summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDamien Le Moal <dlemoal@kernel.org>2026-01-29 16:27:15 +0900
committerJens Axboe <axboe@kernel.dk>2026-01-29 13:15:50 -0700
commit2719bd1ee1a1cd0535bc62e89b52822f2bbd14eb (patch)
tree090a7ffca6432cadfd6b430fcc279b43458da691
parent068f5b5ef5bf97e25568950f06ba32325bdc660b (diff)
block: introduce blk_queue_rot()
To check if a request queue is for a rotational device, a double negation is needed with the pattern "!blk_queue_nonrot(q)". Simplify this with the introduction of the helper blk_queue_rot() which tests if a requests queue limit has the BLK_FEAT_ROTATIONAL feature set. All call sites of blk_queue_nonrot() are modified to use blk_queue_rot() and blk_queue_nonrot() definition removed. No functional changes. Signed-off-by: Damien Le Moal <dlemoal@kernel.org> Reviewed-by: Nitesh Shetty <nj.shetty@samsung.com> Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/bfq-iosched.c20
-rw-r--r--block/blk-iocost.c2
-rw-r--r--block/blk-iolatency.c5
-rw-r--r--block/blk-wbt.c5
-rw-r--r--include/linux/blkdev.h4
5 files changed, 16 insertions, 20 deletions
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 6e54b1d3d8bc..3ebdec40e758 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -231,7 +231,7 @@ static struct kmem_cache *bfq_pool;
#define BFQ_RQ_SEEKY(bfqd, last_pos, rq) \
(get_sdist(last_pos, rq) > \
BFQQ_SEEK_THR && \
- (!blk_queue_nonrot(bfqd->queue) || \
+ (blk_queue_rot(bfqd->queue) || \
blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT))
#define BFQQ_CLOSE_THR (sector_t)(8 * 1024)
#define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 19)
@@ -4165,7 +4165,7 @@ static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq,
/* don't use too short time intervals */
if (delta_usecs < 1000) {
- if (blk_queue_nonrot(bfqd->queue))
+ if (!blk_queue_rot(bfqd->queue))
/*
* give same worst-case guarantees as idling
* for seeky
@@ -4487,7 +4487,7 @@ static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd,
struct bfq_queue *bfqq)
{
bool rot_without_queueing =
- !blk_queue_nonrot(bfqd->queue) && !bfqd->hw_tag,
+ blk_queue_rot(bfqd->queue) && !bfqd->hw_tag,
bfqq_sequential_and_IO_bound,
idling_boosts_thr;
@@ -4521,7 +4521,7 @@ static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd,
* flash-based device.
*/
idling_boosts_thr = rot_without_queueing ||
- ((!blk_queue_nonrot(bfqd->queue) || !bfqd->hw_tag) &&
+ ((blk_queue_rot(bfqd->queue) || !bfqd->hw_tag) &&
bfqq_sequential_and_IO_bound);
/*
@@ -4722,7 +4722,7 @@ bfq_choose_bfqq_for_injection(struct bfq_data *bfqd)
* there is only one in-flight large request
* at a time.
*/
- if (blk_queue_nonrot(bfqd->queue) &&
+ if (!blk_queue_rot(bfqd->queue) &&
blk_rq_sectors(bfqq->next_rq) >=
BFQQ_SECT_THR_NONROT &&
bfqd->tot_rq_in_driver >= 1)
@@ -6340,7 +6340,7 @@ static void bfq_update_hw_tag(struct bfq_data *bfqd)
bfqd->hw_tag_samples = 0;
bfqd->nonrot_with_queueing =
- blk_queue_nonrot(bfqd->queue) && bfqd->hw_tag;
+ !blk_queue_rot(bfqd->queue) && bfqd->hw_tag;
}
static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
@@ -7293,7 +7293,7 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_queue *eq)
INIT_HLIST_HEAD(&bfqd->burst_list);
bfqd->hw_tag = -1;
- bfqd->nonrot_with_queueing = blk_queue_nonrot(bfqd->queue);
+ bfqd->nonrot_with_queueing = !blk_queue_rot(bfqd->queue);
bfqd->bfq_max_budget = bfq_default_max_budget;
@@ -7328,9 +7328,9 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_queue *eq)
* Begin by assuming, optimistically, that the device peak
* rate is equal to 2/3 of the highest reference rate.
*/
- bfqd->rate_dur_prod = ref_rate[blk_queue_nonrot(bfqd->queue)] *
- ref_wr_duration[blk_queue_nonrot(bfqd->queue)];
- bfqd->peak_rate = ref_rate[blk_queue_nonrot(bfqd->queue)] * 2 / 3;
+ bfqd->rate_dur_prod = ref_rate[!blk_queue_rot(bfqd->queue)] *
+ ref_wr_duration[!blk_queue_rot(bfqd->queue)];
+ bfqd->peak_rate = ref_rate[!blk_queue_rot(bfqd->queue)] * 2 / 3;
/* see comments on the definition of next field inside bfq_data */
bfqd->actuator_load_threshold = 4;
diff --git a/block/blk-iocost.c b/block/blk-iocost.c
index a0416927d33d..ef543d163d46 100644
--- a/block/blk-iocost.c
+++ b/block/blk-iocost.c
@@ -812,7 +812,7 @@ static int ioc_autop_idx(struct ioc *ioc, struct gendisk *disk)
u64 now_ns;
/* rotational? */
- if (!blk_queue_nonrot(disk->queue))
+ if (blk_queue_rot(disk->queue))
return AUTOP_HDD;
/* handle SATA SSDs w/ broken NCQ */
diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
index 45bd18f68541..f7434278cd29 100644
--- a/block/blk-iolatency.c
+++ b/block/blk-iolatency.c
@@ -988,10 +988,7 @@ static void iolatency_pd_init(struct blkg_policy_data *pd)
u64 now = blk_time_get_ns();
int cpu;
- if (blk_queue_nonrot(blkg->q))
- iolat->ssd = true;
- else
- iolat->ssd = false;
+ iolat->ssd = !blk_queue_rot(blkg->q);
for_each_possible_cpu(cpu) {
struct latency_stat *stat;
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index 0974875f77bd..8e025834f2fb 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -747,10 +747,9 @@ u64 wbt_default_latency_nsec(struct request_queue *q)
* We default to 2msec for non-rotational storage, and 75msec
* for rotational storage.
*/
- if (blk_queue_nonrot(q))
- return 2000000ULL;
- else
+ if (blk_queue_rot(q))
return 75000000ULL;
+ return 2000000ULL;
}
static int wbt_data_dir(const struct request *rq)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 4536211ff33c..1e5b5547929f 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -680,7 +680,7 @@ void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
#define blk_queue_noxmerges(q) \
test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
-#define blk_queue_nonrot(q) (!((q)->limits.features & BLK_FEAT_ROTATIONAL))
+#define blk_queue_rot(q) ((q)->limits.features & BLK_FEAT_ROTATIONAL)
#define blk_queue_io_stat(q) ((q)->limits.features & BLK_FEAT_IO_STAT)
#define blk_queue_passthrough_stat(q) \
((q)->limits.flags & BLK_FLAG_IOSTATS_PASSTHROUGH)
@@ -1463,7 +1463,7 @@ bdev_write_zeroes_unmap_sectors(struct block_device *bdev)
static inline bool bdev_nonrot(struct block_device *bdev)
{
- return blk_queue_nonrot(bdev_get_queue(bdev));
+ return !blk_queue_rot(bdev_get_queue(bdev));
}
static inline bool bdev_synchronous(struct block_device *bdev)