summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2022-05-22 20:23:50 +0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2022-06-14 18:36:09 +0200
commit460aa288c5cd0544dcf933a2f0ad0e8c6d2d35ff (patch)
treecd36f0eb1f66e38f23fc1c560a15bb621565c039 /block
parent7a4afd8a003d6abf1f5d159c2bb67e6b7cbde253 (diff)
blk-mq: don't touch ->tagset in blk_mq_get_sq_hctx
[ Upstream commit 5d05426e2d5fd7df8afc866b78c36b37b00188b7 ] blk_mq_run_hw_queues() could be run when there isn't queued request and after queue is cleaned up, at that time tagset is freed, because tagset lifetime is covered by driver, and often freed after blk_cleanup_queue() returns. So don't touch ->tagset for figuring out current default hctx by the mapping built in request queue, so use-after-free on tagset can be avoided. Meantime this way should be fast than retrieving mapping from tagset. Cc: "yukuai (C)" <yukuai3@huawei.com> Cc: Jan Kara <jack@suse.cz> Fixes: b6e68ee82585 ("blk-mq: Improve performance of non-mq IO schedulers with multiple HW queues") Signed-off-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Jan Kara <jack@suse.cz> Link: https://lore.kernel.org/r/20220522122350.743103-1-ming.lei@redhat.com Signed-off-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq.c7
1 files changed, 3 insertions, 4 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 82de39926a9f..b70488e4db94 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1642,8 +1642,7 @@ static bool blk_mq_has_sqsched(struct request_queue *q)
*/
static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
{
- struct blk_mq_hw_ctx *hctx;
-
+ struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
/*
* If the IO scheduler does not respect hardware queues when
* dispatching, we just don't bother with multiple HW queues and
@@ -1651,8 +1650,8 @@ static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
* just causes lock contention inside the scheduler and pointless cache
* bouncing.
*/
- hctx = blk_mq_map_queue_type(q, HCTX_TYPE_DEFAULT,
- raw_smp_processor_id());
+ struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, 0, ctx);
+
if (!blk_mq_hctx_stopped(hctx))
return hctx;
return NULL;