summaryrefslogtreecommitdiff
path: root/drivers/block
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c6
-rw-r--r--drivers/block/null_blk.c301
2 files changed, 233 insertions, 74 deletions
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index a28a562f7b7f..3457ac8c03e2 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3810,7 +3810,6 @@ static int mtip_block_initialize(struct driver_data *dd)
sector_t capacity;
unsigned int index = 0;
struct kobject *kobj;
- unsigned char thd_name[16];
if (dd->disk)
goto skip_create_disk; /* hw init done, before rebuild */
@@ -3958,10 +3957,9 @@ skip_create_disk:
}
start_service_thread:
- sprintf(thd_name, "mtip_svc_thd_%02d", index);
dd->mtip_svc_handler = kthread_create_on_node(mtip_service_thread,
- dd, dd->numa_node, "%s",
- thd_name);
+ dd, dd->numa_node,
+ "mtip_svc_thd_%02d", index);
if (IS_ERR(dd->mtip_svc_handler)) {
dev_err(&dd->pdev->dev, "service thread failed to start\n");
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 6255d1c4bba4..0c3940ec5e62 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -8,6 +8,7 @@
#include <linux/slab.h>
#include <linux/blk-mq.h>
#include <linux/hrtimer.h>
+#include <linux/lightnvm.h>
struct nullb_cmd {
struct list_head list;
@@ -17,6 +18,7 @@ struct nullb_cmd {
struct bio *bio;
unsigned int tag;
struct nullb_queue *nq;
+ struct hrtimer timer;
};
struct nullb_queue {
@@ -39,23 +41,14 @@ struct nullb {
struct nullb_queue *queues;
unsigned int nr_queues;
+ char disk_name[DISK_NAME_LEN];
};
static LIST_HEAD(nullb_list);
static struct mutex lock;
static int null_major;
static int nullb_indexes;
-
-struct completion_queue {
- struct llist_head list;
- struct hrtimer timer;
-};
-
-/*
- * These are per-cpu for now, they will need to be configured by the
- * complete_queues parameter and appropriately mapped.
- */
-static DEFINE_PER_CPU(struct completion_queue, completion_queues);
+static struct kmem_cache *ppa_cache;
enum {
NULL_IRQ_NONE = 0,
@@ -119,6 +112,10 @@ static int nr_devices = 2;
module_param(nr_devices, int, S_IRUGO);
MODULE_PARM_DESC(nr_devices, "Number of devices to register");
+static bool use_lightnvm;
+module_param(use_lightnvm, bool, S_IRUGO);
+MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device");
+
static int irqmode = NULL_IRQ_SOFTIRQ;
static int null_set_irqmode(const char *str, const struct kernel_param *kp)
@@ -135,8 +132,8 @@ static const struct kernel_param_ops null_irqmode_param_ops = {
device_param_cb(irqmode, &null_irqmode_param_ops, &irqmode, S_IRUGO);
MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
-static int completion_nsec = 10000;
-module_param(completion_nsec, int, S_IRUGO);
+static unsigned long completion_nsec = 10000;
+module_param(completion_nsec, ulong, S_IRUGO);
MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
static int hw_queue_depth = 64;
@@ -173,6 +170,8 @@ static void free_cmd(struct nullb_cmd *cmd)
put_tag(cmd->nq, cmd->tag);
}
+static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer);
+
static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
{
struct nullb_cmd *cmd;
@@ -183,6 +182,11 @@ static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
cmd = &nq->cmds[tag];
cmd->tag = tag;
cmd->nq = nq;
+ if (irqmode == NULL_IRQ_TIMER) {
+ hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ cmd->timer.function = null_cmd_timer_expired;
+ }
return cmd;
}
@@ -213,6 +217,8 @@ static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
static void end_cmd(struct nullb_cmd *cmd)
{
+ struct request_queue *q = NULL;
+
switch (queue_mode) {
case NULL_Q_MQ:
blk_mq_end_request(cmd->rq, 0);
@@ -223,55 +229,37 @@ static void end_cmd(struct nullb_cmd *cmd)
break;
case NULL_Q_BIO:
bio_endio(cmd->bio);
- break;
+ goto free_cmd;
}
+ if (cmd->rq)
+ q = cmd->rq->q;
+
+ /* Restart queue if needed, as we are freeing a tag */
+ if (q && !q->mq_ops && blk_queue_stopped(q)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ if (blk_queue_stopped(q))
+ blk_start_queue(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ }
+free_cmd:
free_cmd(cmd);
}
static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
{
- struct completion_queue *cq;
- struct llist_node *entry;
- struct nullb_cmd *cmd;
-
- cq = &per_cpu(completion_queues, smp_processor_id());
-
- while ((entry = llist_del_all(&cq->list)) != NULL) {
- entry = llist_reverse_order(entry);
- do {
- struct request_queue *q = NULL;
-
- cmd = container_of(entry, struct nullb_cmd, ll_list);
- entry = entry->next;
- if (cmd->rq)
- q = cmd->rq->q;
- end_cmd(cmd);
-
- if (q && !q->mq_ops && blk_queue_stopped(q)) {
- spin_lock(q->queue_lock);
- if (blk_queue_stopped(q))
- blk_start_queue(q);
- spin_unlock(q->queue_lock);
- }
- } while (entry);
- }
+ end_cmd(container_of(timer, struct nullb_cmd, timer));
return HRTIMER_NORESTART;
}
static void null_cmd_end_timer(struct nullb_cmd *cmd)
{
- struct completion_queue *cq = &per_cpu(completion_queues, get_cpu());
-
- cmd->ll_list.next = NULL;
- if (llist_add(&cmd->ll_list, &cq->list)) {
- ktime_t kt = ktime_set(0, completion_nsec);
+ ktime_t kt = ktime_set(0, completion_nsec);
- hrtimer_start(&cq->timer, kt, HRTIMER_MODE_REL_PINNED);
- }
-
- put_cpu();
+ hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
}
static void null_softirq_done_fn(struct request *rq)
@@ -369,6 +357,10 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx,
{
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
+ if (irqmode == NULL_IRQ_TIMER) {
+ hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ cmd->timer.function = null_cmd_timer_expired;
+ }
cmd->rq = bd->rq;
cmd->nq = hctx->driver_data;
@@ -427,15 +419,156 @@ static void null_del_dev(struct nullb *nullb)
{
list_del_init(&nullb->list);
- del_gendisk(nullb->disk);
+ if (use_lightnvm)
+ nvm_unregister(nullb->disk_name);
+ else
+ del_gendisk(nullb->disk);
blk_cleanup_queue(nullb->q);
if (queue_mode == NULL_Q_MQ)
blk_mq_free_tag_set(&nullb->tag_set);
- put_disk(nullb->disk);
+ if (!use_lightnvm)
+ put_disk(nullb->disk);
cleanup_queues(nullb);
kfree(nullb);
}
+#ifdef CONFIG_NVM
+
+static void null_lnvm_end_io(struct request *rq, int error)
+{
+ struct nvm_rq *rqd = rq->end_io_data;
+ struct nvm_dev *dev = rqd->dev;
+
+ dev->mt->end_io(rqd, error);
+
+ blk_put_request(rq);
+}
+
+static int null_lnvm_submit_io(struct request_queue *q, struct nvm_rq *rqd)
+{
+ struct request *rq;
+ struct bio *bio = rqd->bio;
+
+ rq = blk_mq_alloc_request(q, bio_rw(bio), GFP_KERNEL, 0);
+ if (IS_ERR(rq))
+ return -ENOMEM;
+
+ rq->cmd_type = REQ_TYPE_DRV_PRIV;
+ rq->__sector = bio->bi_iter.bi_sector;
+ rq->ioprio = bio_prio(bio);
+
+ if (bio_has_data(bio))
+ rq->nr_phys_segments = bio_phys_segments(q, bio);
+
+ rq->__data_len = bio->bi_iter.bi_size;
+ rq->bio = rq->biotail = bio;
+
+ rq->end_io_data = rqd;
+
+ blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io);
+
+ return 0;
+}
+
+static int null_lnvm_id(struct request_queue *q, struct nvm_id *id)
+{
+ sector_t size = gb * 1024 * 1024 * 1024ULL;
+ sector_t blksize;
+ struct nvm_id_group *grp;
+
+ id->ver_id = 0x1;
+ id->vmnt = 0;
+ id->cgrps = 1;
+ id->cap = 0x3;
+ id->dom = 0x1;
+
+ id->ppaf.blk_offset = 0;
+ id->ppaf.blk_len = 16;
+ id->ppaf.pg_offset = 16;
+ id->ppaf.pg_len = 16;
+ id->ppaf.sect_offset = 32;
+ id->ppaf.sect_len = 8;
+ id->ppaf.pln_offset = 40;
+ id->ppaf.pln_len = 8;
+ id->ppaf.lun_offset = 48;
+ id->ppaf.lun_len = 8;
+ id->ppaf.ch_offset = 56;
+ id->ppaf.ch_len = 8;
+
+ do_div(size, bs); /* convert size to pages */
+ do_div(size, 256); /* concert size to pgs pr blk */
+ grp = &id->groups[0];
+ grp->mtype = 0;
+ grp->fmtype = 0;
+ grp->num_ch = 1;
+ grp->num_pg = 256;
+ blksize = size;
+ do_div(size, (1 << 16));
+ grp->num_lun = size + 1;
+ do_div(blksize, grp->num_lun);
+ grp->num_blk = blksize;
+ grp->num_pln = 1;
+
+ grp->fpg_sz = bs;
+ grp->csecs = bs;
+ grp->trdt = 25000;
+ grp->trdm = 25000;
+ grp->tprt = 500000;
+ grp->tprm = 500000;
+ grp->tbet = 1500000;
+ grp->tbem = 1500000;
+ grp->mpos = 0x010101; /* single plane rwe */
+ grp->cpar = hw_queue_depth;
+
+ return 0;
+}
+
+static void *null_lnvm_create_dma_pool(struct request_queue *q, char *name)
+{
+ mempool_t *virtmem_pool;
+
+ virtmem_pool = mempool_create_slab_pool(64, ppa_cache);
+ if (!virtmem_pool) {
+ pr_err("null_blk: Unable to create virtual memory pool\n");
+ return NULL;
+ }
+
+ return virtmem_pool;
+}
+
+static void null_lnvm_destroy_dma_pool(void *pool)
+{
+ mempool_destroy(pool);
+}
+
+static void *null_lnvm_dev_dma_alloc(struct request_queue *q, void *pool,
+ gfp_t mem_flags, dma_addr_t *dma_handler)
+{
+ return mempool_alloc(pool, mem_flags);
+}
+
+static void null_lnvm_dev_dma_free(void *pool, void *entry,
+ dma_addr_t dma_handler)
+{
+ mempool_free(entry, pool);
+}
+
+static struct nvm_dev_ops null_lnvm_dev_ops = {
+ .identity = null_lnvm_id,
+ .submit_io = null_lnvm_submit_io,
+
+ .create_dma_pool = null_lnvm_create_dma_pool,
+ .destroy_dma_pool = null_lnvm_destroy_dma_pool,
+ .dev_dma_alloc = null_lnvm_dev_dma_alloc,
+ .dev_dma_free = null_lnvm_dev_dma_free,
+
+ /* Simulate nvme protocol restriction */
+ .max_phys_sect = 64,
+};
+#else
+static struct nvm_dev_ops null_lnvm_dev_ops;
+#endif /* CONFIG_NVM */
+
static int null_open(struct block_device *bdev, fmode_t mode)
{
return 0;
@@ -575,11 +708,6 @@ static int null_add_dev(void)
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);
- disk = nullb->disk = alloc_disk_node(1, home_node);
- if (!disk) {
- rv = -ENOMEM;
- goto out_cleanup_blk_queue;
- }
mutex_lock(&lock);
list_add_tail(&nullb->list, &nullb_list);
@@ -589,6 +717,21 @@ static int null_add_dev(void)
blk_queue_logical_block_size(nullb->q, bs);
blk_queue_physical_block_size(nullb->q, bs);
+ sprintf(nullb->disk_name, "nullb%d", nullb->index);
+
+ if (use_lightnvm) {
+ rv = nvm_register(nullb->q, nullb->disk_name,
+ &null_lnvm_dev_ops);
+ if (rv)
+ goto out_cleanup_blk_queue;
+ goto done;
+ }
+
+ disk = nullb->disk = alloc_disk_node(1, home_node);
+ if (!disk) {
+ rv = -ENOMEM;
+ goto out_cleanup_lightnvm;
+ }
size = gb * 1024 * 1024 * 1024ULL;
set_capacity(disk, size >> 9);
@@ -598,10 +741,15 @@ static int null_add_dev(void)
disk->fops = &null_fops;
disk->private_data = nullb;
disk->queue = nullb->q;
- sprintf(disk->disk_name, "nullb%d", nullb->index);
+ strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
+
add_disk(disk);
+done:
return 0;
+out_cleanup_lightnvm:
+ if (use_lightnvm)
+ nvm_unregister(nullb->disk_name);
out_cleanup_blk_queue:
blk_cleanup_queue(nullb->q);
out_cleanup_tags:
@@ -625,6 +773,18 @@ static int __init null_init(void)
bs = PAGE_SIZE;
}
+ if (use_lightnvm && bs != 4096) {
+ pr_warn("null_blk: LightNVM only supports 4k block size\n");
+ pr_warn("null_blk: defaults block size to 4k\n");
+ bs = 4096;
+ }
+
+ if (use_lightnvm && queue_mode != NULL_Q_MQ) {
+ pr_warn("null_blk: LightNVM only supported for blk-mq\n");
+ pr_warn("null_blk: defaults queue mode to blk-mq\n");
+ queue_mode = NULL_Q_MQ;
+ }
+
if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
if (submit_queues < nr_online_nodes) {
pr_warn("null_blk: submit_queues param is set to %u.",
@@ -638,32 +798,31 @@ static int __init null_init(void)
mutex_init(&lock);
- /* Initialize a separate list for each CPU for issuing softirqs */
- for_each_possible_cpu(i) {
- struct completion_queue *cq = &per_cpu(completion_queues, i);
-
- init_llist_head(&cq->list);
-
- if (irqmode != NULL_IRQ_TIMER)
- continue;
-
- hrtimer_init(&cq->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- cq->timer.function = null_cmd_timer_expired;
- }
-
null_major = register_blkdev(0, "nullb");
if (null_major < 0)
return null_major;
+ if (use_lightnvm) {
+ ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64),
+ 0, 0, NULL);
+ if (!ppa_cache) {
+ pr_err("null_blk: unable to create ppa cache\n");
+ return -ENOMEM;
+ }
+ }
+
for (i = 0; i < nr_devices; i++) {
if (null_add_dev()) {
unregister_blkdev(null_major, "nullb");
- return -EINVAL;
+ goto err_ppa;
}
}
pr_info("null: module loaded\n");
return 0;
+err_ppa:
+ kmem_cache_destroy(ppa_cache);
+ return -EINVAL;
}
static void __exit null_exit(void)
@@ -678,6 +837,8 @@ static void __exit null_exit(void)
null_del_dev(nullb);
}
mutex_unlock(&lock);
+
+ kmem_cache_destroy(ppa_cache);
}
module_init(null_init);