summaryrefslogtreecommitdiff
path: root/io_uring/io_uring.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2025-06-05 11:33:52 -0600
committerJens Axboe <axboe@kernel.dk>2025-06-23 08:59:13 -0600
commitaf19388a973877b2349df46c4487a789cd3148ed (patch)
tree5b5f77fdd6bf87818ff3d85fbdfcdba40f808ecb /io_uring/io_uring.c
parent4d811e395bbe54ba2febb3940d4b6c4741f360a6 (diff)
io_uring: add struct io_cold_def->sqe_copy() method
Will be called by the core of io_uring, if inline issue is not going to be tried for a request. Opcodes can define this handler to defer copying of SQE data that should remain stable. Only called if IO_URING_F_INLINE is set. If it isn't set, then there's a bug in the core handling of this, and -EFAULT will be returned instead to terminate the request. This will trigger a WARN_ON_ONCE(). Don't expect this to ever trigger, and down the line this can be removed. Reviewed-by: Caleb Sander Mateos <csander@purestorage.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/io_uring.c')
-rw-r--r--io_uring/io_uring.c27
1 files changed, 25 insertions, 2 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index c60d1c286c87..3a23c8713f1b 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1938,14 +1938,34 @@ struct file *io_file_get_normal(struct io_kiocb *req, int fd)
return file;
}
-static void io_queue_async(struct io_kiocb *req, int ret)
+static int io_req_sqe_copy(struct io_kiocb *req, unsigned int issue_flags)
+{
+ const struct io_cold_def *def = &io_cold_defs[req->opcode];
+
+ if (req->flags & REQ_F_SQE_COPIED)
+ return 0;
+ req->flags |= REQ_F_SQE_COPIED;
+ if (!def->sqe_copy)
+ return 0;
+ if (WARN_ON_ONCE(!(issue_flags & IO_URING_F_INLINE)))
+ return -EFAULT;
+ def->sqe_copy(req);
+ return 0;
+}
+
+static void io_queue_async(struct io_kiocb *req, unsigned int issue_flags, int ret)
__must_hold(&req->ctx->uring_lock)
{
if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) {
+fail:
io_req_defer_failed(req, ret);
return;
}
+ ret = io_req_sqe_copy(req, issue_flags);
+ if (unlikely(ret))
+ goto fail;
+
switch (io_arm_poll_handler(req, 0)) {
case IO_APOLL_READY:
io_kbuf_recycle(req, 0);
@@ -1974,7 +1994,7 @@ static inline void io_queue_sqe(struct io_kiocb *req, unsigned int extra_flags)
* doesn't support non-blocking read/write attempts
*/
if (unlikely(ret))
- io_queue_async(req, ret);
+ io_queue_async(req, issue_flags, ret);
}
static void io_queue_sqe_fallback(struct io_kiocb *req)
@@ -1989,6 +2009,8 @@ static void io_queue_sqe_fallback(struct io_kiocb *req)
req->flags |= REQ_F_LINK;
io_req_defer_failed(req, req->cqe.res);
} else {
+ /* can't fail with IO_URING_F_INLINE */
+ io_req_sqe_copy(req, IO_URING_F_INLINE);
if (unlikely(req->ctx->drain_active))
io_drain_req(req);
else
@@ -2200,6 +2222,7 @@ static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
*/
if (unlikely(link->head)) {
trace_io_uring_link(req, link->last);
+ io_req_sqe_copy(req, IO_URING_F_INLINE);
link->last->link = req;
link->last = req;