summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2025-05-09 12:12:50 +0100
committerJens Axboe <axboe@kernel.dk>2025-05-09 08:01:01 -0600
commite91e4f692f7993d5d192228c5f8a9a2e12ff5250 (patch)
tree92780f0b76c3db856fb90ec674081d80c90a5ff1
parent05b334110fdc85f536d7dd573120d573801fb2d1 (diff)
io_uring: remove drain prealloc checks
Currently io_drain_req() has two steps. The first is fast path checking sequence numbers. The second is allocations, rechecking and actual queuing. Further simplify it by removing the first step. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/4d06e89ed07611993d7bf89182de2300858379bd.1746788718.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--io_uring/io_uring.c15
1 files changed, 3 insertions, 12 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 6b606e9dffa3..f83abdf8a056 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1674,17 +1674,6 @@ static __cold void io_drain_req(struct io_kiocb *req)
struct io_defer_entry *de;
u32 seq = io_get_sequence(req);
- /* Still need defer if there is pending req in defer list. */
- spin_lock(&ctx->completion_lock);
- if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) {
- spin_unlock(&ctx->completion_lock);
-queue:
- ctx->drain_active = false;
- io_req_task_queue(req);
- return;
- }
- spin_unlock(&ctx->completion_lock);
-
io_prep_async_link(req);
de = kmalloc(sizeof(*de), GFP_KERNEL_ACCOUNT);
if (!de) {
@@ -1696,7 +1685,9 @@ queue:
if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
spin_unlock(&ctx->completion_lock);
kfree(de);
- goto queue;
+ ctx->drain_active = false;
+ io_req_task_queue(req);
+ return;
}
trace_io_uring_defer(req);