diff options
author | Christoph Hellwig <hch@lst.de> | 2016-09-14 16:18:59 +0200 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2016-09-15 08:42:03 -0600 |
commit | 1b157939f92ae22d10b9d52baaa14f826927f5ff (patch) | |
tree | 8e0656c97a3d19bd8936e6484b91d03ed7965fac /block/blk-mq.c | |
parent | b5af7f2ff022a75eb0bbf2166007c4b8ddd02ef1 (diff) |
blk-mq: get rid of the cpumask in struct blk_mq_tags
Unused now that NVMe sets up irq affinity before calling into blk-mq.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r-- | block/blk-mq.c | 25 |
1 files changed, 21 insertions, 4 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index a3060078a8da..060b350d3f0c 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1861,7 +1861,6 @@ static void blk_mq_map_swqueue(struct request_queue *q, hctx->tags = set->tags[i]; WARN_ON(!hctx->tags); - cpumask_copy(hctx->tags->cpumask, hctx->cpumask); /* * Set the map size to the number of mapped software queues. * This is more accurate and more efficient than looping @@ -2272,11 +2271,29 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) return 0; } -struct cpumask *blk_mq_tags_cpumask(struct blk_mq_tags *tags) +static int blk_mq_create_mq_map(struct blk_mq_tag_set *set, + const struct cpumask *affinity_mask) { - return tags->cpumask; + int queue = -1, cpu = 0; + + set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids, + GFP_KERNEL, set->numa_node); + if (!set->mq_map) + return -ENOMEM; + + if (!affinity_mask) + return 0; /* map all cpus to queue 0 */ + + /* If cpus are offline, map them to first hctx */ + for_each_online_cpu(cpu) { + if (cpumask_test_cpu(cpu, affinity_mask)) + queue++; + if (queue >= 0) + set->mq_map[cpu] = queue; + } + + return 0; } -EXPORT_SYMBOL_GPL(blk_mq_tags_cpumask); /* * Alloc a tag set to be associated with one or more request queues. |