summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorFengnan Chang <fengnanchang@gmail.com>2025-11-28 16:53:13 +0800
committerJens Axboe <axboe@kernel.dk>2025-11-28 09:09:19 -0700
commitd0c98769ee7d5db8d699a270690639cde1766cd4 (patch)
tree736482908a880ed3a624edbcf4cec18c68cd044b /include
parentc6a45ee7607de3a350008630f4369b1b5ac80884 (diff)
blk-mq: use array manage hctx map instead of xarray
After commit 4e5cc99e1e48 ("blk-mq: manage hctx map via xarray"), we use an xarray instead of array to store hctx, but in poll mode, each time in blk_mq_poll, we need use xa_load to find corresponding hctx, this introduce some costs. In my test, xa_load may cost 3.8% cpu. This patch revert previous change, eliminates the overhead of xa_load and can result in a 3% performance improvement. Signed-off-by: Fengnan Chang <changfengnan@bytedance.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'include')
-rw-r--r--include/linux/blk-mq.h3
-rw-r--r--include/linux/blkdev.h2
2 files changed, 3 insertions, 2 deletions
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index b54506b3b76d..9208ff90ae16 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -1016,7 +1016,8 @@ static inline void *blk_mq_rq_to_pdu(struct request *rq)
}
#define queue_for_each_hw_ctx(q, hctx, i) \
- xa_for_each(&(q)->hctx_table, (i), (hctx))
+ for ((i) = 0; (i) < (q)->nr_hw_queues && \
+ ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
#define hctx_for_each_ctx(hctx, ctx, i) \
for ((i) = 0; (i) < (hctx)->nr_ctx && \
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index cb4ba09959ee..6195f89648db 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -503,7 +503,7 @@ struct request_queue {
/* hw dispatch queues */
unsigned int nr_hw_queues;
- struct xarray hctx_table;
+ struct blk_mq_hw_ctx **queue_hw_ctx;
struct percpu_ref q_usage_counter;
struct lock_class_key io_lock_cls_key;