summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorYu Kuai <yukuai@fnnas.com>2026-02-03 16:19:45 +0800
committerJens Axboe <axboe@kernel.dk>2026-02-03 07:45:36 -0700
commitf98afe4f31bb8b07fea318606c08030c2049587e (patch)
tree652539f7de5272f1ba80dacebc9ec2109bd1a3e1
parentcf02d7d41b064af3e2c3a3a1ea9042a5b565b0d8 (diff)
blk-mq: add a new queue sysfs attribute async_depth
Add a new field async_depth to request_queue and related APIs, this is currently not used, following patches will convert elevators to use this instead of internal async_depth. Signed-off-by: Yu Kuai <yukuai@fnnas.com> Reviewed-by: Nilay Shroff <nilay@linux.ibm.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/blk-core.c1
-rw-r--r--block/blk-mq.c6
-rw-r--r--block/blk-sysfs.c42
-rw-r--r--block/elevator.c1
-rw-r--r--include/linux/blkdev.h1
5 files changed, 51 insertions, 0 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index d6732dc69dd9..474700ffaa1c 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -463,6 +463,7 @@ struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id)
fs_reclaim_release(GFP_KERNEL);
q->nr_requests = BLKDEV_DEFAULT_RQ;
+ q->async_depth = BLKDEV_DEFAULT_RQ;
return q;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index b7b272e856b8..0ad3dd3329db 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -4662,6 +4662,7 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
spin_lock_init(&q->requeue_lock);
q->nr_requests = set->queue_depth;
+ q->async_depth = set->queue_depth;
blk_mq_init_cpu_queues(q, set->nr_hw_queues);
blk_mq_map_swqueue(q);
@@ -5028,6 +5029,11 @@ struct elevator_tags *blk_mq_update_nr_requests(struct request_queue *q,
q->elevator->et = et;
}
+ /*
+ * Preserve relative value, both nr and async_depth are at most 16 bit
+ * value, no need to worry about overflow.
+ */
+ q->async_depth = max(q->async_depth * nr / q->nr_requests, 1);
q->nr_requests = nr;
if (q->elevator && q->elevator->type->ops.depth_updated)
q->elevator->type->ops.depth_updated(q);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index a580688c3ad5..003aa684e854 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -127,6 +127,46 @@ unlock:
return ret;
}
+static ssize_t queue_async_depth_show(struct gendisk *disk, char *page)
+{
+ guard(mutex)(&disk->queue->elevator_lock);
+
+ return queue_var_show(disk->queue->async_depth, page);
+}
+
+static ssize_t
+queue_async_depth_store(struct gendisk *disk, const char *page, size_t count)
+{
+ struct request_queue *q = disk->queue;
+ unsigned int memflags;
+ unsigned long nr;
+ int ret;
+
+ if (!queue_is_mq(q))
+ return -EINVAL;
+
+ ret = queue_var_store(&nr, page, count);
+ if (ret < 0)
+ return ret;
+
+ if (nr == 0)
+ return -EINVAL;
+
+ memflags = blk_mq_freeze_queue(q);
+ scoped_guard(mutex, &q->elevator_lock) {
+ if (q->elevator) {
+ q->async_depth = min(q->nr_requests, nr);
+ if (q->elevator->type->ops.depth_updated)
+ q->elevator->type->ops.depth_updated(q);
+ } else {
+ ret = -EINVAL;
+ }
+ }
+ blk_mq_unfreeze_queue(q, memflags);
+
+ return ret;
+}
+
static ssize_t queue_ra_show(struct gendisk *disk, char *page)
{
ssize_t ret;
@@ -532,6 +572,7 @@ static struct queue_sysfs_entry _prefix##_entry = { \
}
QUEUE_RW_ENTRY(queue_requests, "nr_requests");
+QUEUE_RW_ENTRY(queue_async_depth, "async_depth");
QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb");
QUEUE_LIM_RW_ENTRY(queue_max_sectors, "max_sectors_kb");
QUEUE_LIM_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb");
@@ -719,6 +760,7 @@ static struct attribute *blk_mq_queue_attrs[] = {
*/
&elv_iosched_entry.attr,
&queue_requests_entry.attr,
+ &queue_async_depth_entry.attr,
#ifdef CONFIG_BLK_WBT
&queue_wb_lat_entry.attr,
#endif
diff --git a/block/elevator.c b/block/elevator.c
index a2f8b2251dc6..ebe2a1fcf011 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -589,6 +589,7 @@ static int elevator_switch(struct request_queue *q, struct elv_change_ctx *ctx)
blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q);
q->elevator = NULL;
q->nr_requests = q->tag_set->queue_depth;
+ q->async_depth = q->tag_set->queue_depth;
}
blk_add_trace_msg(q, "elv switch: %s", ctx->name);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 67d8d9e03abc..99ef8cd7673c 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -551,6 +551,7 @@ struct request_queue {
* queue settings
*/
unsigned int nr_requests; /* Max # of requests */
+ unsigned int async_depth; /* Max # of async requests */
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
struct blk_crypto_profile *crypto_profile;