summaryrefslogtreecommitdiff
path: root/io_uring/kbuf.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-12-13 10:33:08 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2022-12-13 10:33:08 -0800
commit54e60e505d6144a22c787b5be1fdce996a27be1b (patch)
treea0b582fa8d9de216fef4fb6320199bb055125c65 /io_uring/kbuf.c
parentd523ec4c6af4314575d6ab8b52629ae3e2039a50 (diff)
parent5d772916855f593672de55c437925daccc8ecd73 (diff)
Merge tag 'for-6.2/io_uring-2022-12-08' of git://git.kernel.dk/linux
Pull io_uring updates from Jens Axboe: - Always ensure proper ordering in case of CQ ring overflow, which then means we can remove some work-arounds for that (Dylan) - Support completion batching for multishot, greatly increasing the efficiency for those (Dylan) - Flag epoll/eventfd wakeups done from io_uring, so that we can easily tell if we're recursing into io_uring again. Previously, this would have resulted in repeated multishot notifications if we had a dependency there. That could happen if an eventfd was registered as the ring eventfd, and we multishot polled for events on it. Or if an io_uring fd was added to epoll, and io_uring had a multishot request for the epoll fd. Test cases here: https://git.kernel.dk/cgit/liburing/commit/?id=919755a7d0096fda08fb6d65ac54ad8d0fe027cd Previously these got terminated when the CQ ring eventually overflowed, now it's handled gracefully (me). - Tightening of the IOPOLL based completions (Pavel) - Optimizations of the networking zero-copy paths (Pavel) - Various tweaks and fixes (Dylan, Pavel) * tag 'for-6.2/io_uring-2022-12-08' of git://git.kernel.dk/linux: (41 commits) io_uring: keep unlock_post inlined in hot path io_uring: don't use complete_post in kbuf io_uring: spelling fix io_uring: remove io_req_complete_post_tw io_uring: allow multishot polled reqs to defer completion io_uring: remove overflow param from io_post_aux_cqe io_uring: add lockdep assertion in io_fill_cqe_aux io_uring: make io_fill_cqe_aux static io_uring: add io_aux_cqe which allows deferred completion io_uring: allow defer completion for aux posted cqes io_uring: defer all io_req_complete_failed io_uring: always lock in io_apoll_task_func io_uring: remove iopoll spinlock io_uring: iopoll protect complete_post io_uring: inline __io_req_complete_put() io_uring: remove io_req_tw_post_queue io_uring: use io_req_task_complete() in timeout io_uring: hold locks for io_req_complete_failed io_uring: add completion locking for iopoll io_uring: kill io_cqring_ev_posted() and __io_cq_unlock_post() ...
Diffstat (limited to 'io_uring/kbuf.c')
-rw-r--r--io_uring/kbuf.c14
1 files changed, 5 insertions, 9 deletions
diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
index e2c46889d5fa..4a6401080c1f 100644
--- a/io_uring/kbuf.c
+++ b/io_uring/kbuf.c
@@ -306,14 +306,11 @@ int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
if (!bl->buf_nr_pages)
ret = __io_remove_buffers(ctx, bl, p->nbufs);
}
+ io_ring_submit_unlock(ctx, issue_flags);
if (ret < 0)
req_set_fail(req);
-
- /* complete before unlock, IOPOLL may need the lock */
io_req_set_res(req, ret, 0);
- __io_req_complete(req, issue_flags);
- io_ring_submit_unlock(ctx, issue_flags);
- return IOU_ISSUE_SKIP_COMPLETE;
+ return IOU_OK;
}
int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
@@ -458,13 +455,12 @@ int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
ret = io_add_buffers(ctx, p, bl);
err:
+ io_ring_submit_unlock(ctx, issue_flags);
+
if (ret < 0)
req_set_fail(req);
- /* complete before unlock, IOPOLL may need the lock */
io_req_set_res(req, ret, 0);
- __io_req_complete(req, issue_flags);
- io_ring_submit_unlock(ctx, issue_flags);
- return IOU_ISSUE_SKIP_COMPLETE;
+ return IOU_OK;
}
int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)