summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2026-01-23 17:15:43 +0800
committerJens Axboe <axboe@kernel.dk>2026-01-23 05:11:03 -0700
commite4c4bfec2bb8db9963d87e8ccdf89cd9e485d7b6 (patch)
treefa0ef8ef3279c1e1b43c1f7c20b1f820ac9f5dc8
parentdbc635c4be7eba1d0e0fe0275a289ee3ccc63d72 (diff)
ublk: fix canceling flag handling in batch I/O recovery
Two issues with ubq->canceling flag handling: 1) In ublk_queue_reset_io_flags(), ubq->canceling is set outside cancel_lock, violating the locking requirement. Move it inside the spinlock-protected section. 2) In ublk_batch_unprep_io(), when rolling back after a batch prep failure, if the queue became ready during prep (which cleared canceling), the flag is not restored when the queue becomes not-ready again. This allows new requests to be queued to uninitialized IO slots. Fix by restoring ubq->canceling = true under cancel_lock when the queue transitions from ready to not-ready during rollback. Reported-by: Jens Axboe <axboe@kernel.dk> Fixes: 3f3850785594 ("ublk: fix batch I/O recovery -ENODEV error") Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--drivers/block/ublk_drv.c11
1 files changed, 8 insertions, 3 deletions
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 31279a8238b8..31fda782c47c 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -2806,9 +2806,9 @@ static void ublk_queue_reset_io_flags(struct ublk_queue *ubq)
spin_lock(&ubq->cancel_lock);
for (j = 0; j < ubq->q_depth; j++)
ubq->ios[j].flags &= ~UBLK_IO_FLAG_CANCELED;
+ ubq->canceling = false;
spin_unlock(&ubq->cancel_lock);
ubq->fail_io = false;
- ubq->canceling = false;
}
/* device can only be started after all IOs are ready */
@@ -3435,10 +3435,15 @@ static int ublk_batch_unprep_io(struct ublk_queue *ubq,
/*
* If queue was ready before this decrement, it won't be anymore,
- * so we need to decrement the queue ready count too.
+ * so we need to decrement the queue ready count and restore the
+ * canceling flag to prevent new requests from being queued.
*/
- if (ublk_queue_ready(ubq))
+ if (ublk_queue_ready(ubq)) {
data->ub->nr_queue_ready--;
+ spin_lock(&ubq->cancel_lock);
+ ubq->canceling = true;
+ spin_unlock(&ubq->cancel_lock);
+ }
ubq->nr_io_ready--;
ublk_io_lock(io);