summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCaleb Sander Mateos <csander@purestorage.com>2026-03-02 10:29:12 -0700
committerJens Axboe <axboe@kernel.dk>2026-03-16 16:14:14 -0600
commit3a5e96d47f7ea37fb6adf37882eec1521f8ca75e (patch)
tree2873fe6f7f67dda3c9b04a0d40fc587a9a1a145b
parent7995be40deb3ab8b5df7bdf0621f33aa546aefa7 (diff)
io_uring: count CQEs in io_iopoll_check()
A subsequent commit will allow uring_cmds that don't use iopoll on IORING_SETUP_IOPOLL io_urings. As a result, CQEs can be posted without setting the iopoll_completed flag for a request in iopoll_list or going through task work. For example, a UBLK_U_IO_FETCH_IO_CMDS command could call io_uring_mshot_cmd_post_cqe() to directly post a CQE. The io_iopoll_check() loop currently only counts completions posted in io_do_iopoll() when determining whether the min_events threshold has been met. It also exits early if there are any existing CQEs before polling, or if any CQEs are posted while running task work. CQEs posted via io_uring_mshot_cmd_post_cqe() or other mechanisms won't be counted against min_events. Explicitly check the available CQEs in each io_iopoll_check() loop iteration to account for CQEs posted in any fashion. Signed-off-by: Caleb Sander Mateos <csander@purestorage.com> Link: https://patch.msgid.link/20260302172914.2488599-4-csander@purestorage.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--io_uring/io_uring.c9
1 files changed, 2 insertions, 7 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 64ba359878a1..74cd62b44d94 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1186,7 +1186,6 @@ __cold void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned int min_events)
{
- unsigned int nr_events = 0;
unsigned long check_cq;
min_events = min(min_events, ctx->cq_entries);
@@ -1229,8 +1228,6 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned int min_events)
* very same mutex.
*/
if (list_empty(&ctx->iopoll_list) || io_task_work_pending(ctx)) {
- u32 tail = ctx->cached_cq_tail;
-
(void) io_run_local_work_locked(ctx, min_events);
if (task_work_pending(current) || list_empty(&ctx->iopoll_list)) {
@@ -1239,7 +1236,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned int min_events)
mutex_lock(&ctx->uring_lock);
}
/* some requests don't go through iopoll_list */
- if (tail != ctx->cached_cq_tail || list_empty(&ctx->iopoll_list))
+ if (list_empty(&ctx->iopoll_list))
break;
}
ret = io_do_iopoll(ctx, !min_events);
@@ -1250,9 +1247,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned int min_events)
return -EINTR;
if (need_resched())
break;
-
- nr_events += ret;
- } while (nr_events < min_events);
+ } while (io_cqring_events(ctx) < min_events);
return 0;
}