diff options
author | Jens Axboe <axboe@kernel.dk> | 2019-12-04 08:53:43 -0700 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2019-12-13 08:42:33 +0100 |
commit | 74dcfcd1d3e332607a985344316ccf5d0c6ef0af (patch) | |
tree | 9427fd2e1f914cb340b8394d3d33f3d1178f511d /fs/io_uring.c | |
parent | 1dec7fcac3c52f98fe7e8a419c0fd5d7595a6fd7 (diff) |
io_uring: ensure req->submit is copied when req is deferred
There's an issue with deferred requests through drain, where if we do
need to defer, we're not copying over the sqe_submit state correctly.
This can result in using uninitialized data when we then later go and
submit the deferred request, like this check in __io_submit_sqe():
if (unlikely(s->index >= ctx->sq_entries))
return -EINVAL;
with 's' being uninitialized, we can randomly fail this check. Fix this
by copying sqe_submit state when we defer a request.
Because it was fixed as part of a cleanup series in mainline, before
anyone realized we had this issue. That removed the separate states
of ->index vs ->submit.sqe. That series is not something I was
comfortable putting into stable, hence the much simpler addition.
Here's the patch in the series that fixes the same issue:
commit cf6fd4bd559ee61a4454b161863c8de6f30f8dca
Author: Pavel Begunkov <asml.silence@gmail.com>
Date: Mon Nov 25 23:14:39 2019 +0300
io_uring: inline struct sqe_submit
Reported-by: Andres Freund <andres@anarazel.de>
Reported-by: Tomáš Chaloupka
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r-- | fs/io_uring.c | 9 |
1 files changed, 5 insertions, 4 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index b75c292f5559..a340147387ec 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2039,7 +2039,7 @@ add: } static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req, - const struct io_uring_sqe *sqe) + struct sqe_submit *s) { struct io_uring_sqe *sqe_copy; @@ -2057,7 +2057,8 @@ static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req, return 0; } - memcpy(sqe_copy, sqe, sizeof(*sqe_copy)); + memcpy(&req->submit, s, sizeof(*s)); + memcpy(sqe_copy, s->sqe, sizeof(*sqe_copy)); req->submit.sqe = sqe_copy; INIT_WORK(&req->work, io_sq_wq_submit_work); @@ -2425,7 +2426,7 @@ static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, { int ret; - ret = io_req_defer(ctx, req, s->sqe); + ret = io_req_defer(ctx, req, s); if (ret) { if (ret != -EIOCBQUEUED) { io_free_req(req); @@ -2452,7 +2453,7 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req, * list. */ req->flags |= REQ_F_IO_DRAIN; - ret = io_req_defer(ctx, req, s->sqe); + ret = io_req_defer(ctx, req, s); if (ret) { if (ret != -EIOCBQUEUED) { io_free_req(req); |