From b0380bf6dad4601d92025841e2b7a135d566c6e3 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 13 Jun 2022 06:32:44 +0100 Subject: io_uring: fix races with file table unregister Fixed file table quiesce might unlock ->uring_lock, potentially letting new requests to be submitted, don't allow those requests to use the table as they will race with unregistration. Reported-and-tested-by: van fantasy Fixes: 05f3fb3c53975 ("io_uring: avoid ring quiesce for fixed file set unregister and update") Signed-off-by: Pavel Begunkov --- fs/io_uring.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index ed3416a7b2e9..00d266746916 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -9768,11 +9768,19 @@ static void __io_sqe_files_unregister(struct io_ring_ctx *ctx) static int io_sqe_files_unregister(struct io_ring_ctx *ctx) { + unsigned nr = ctx->nr_user_files; int ret; if (!ctx->file_data) return -ENXIO; + + /* + * Quiesce may unlock ->uring_lock, and while it's not held + * prevent new requests using the table. + */ + ctx->nr_user_files = 0; ret = io_rsrc_ref_quiesce(ctx->file_data, ctx); + ctx->nr_user_files = nr; if (!ret) __io_sqe_files_unregister(ctx); return ret; -- cgit v1.2.3 From d11d31fc5d8a96f707facee0babdcffaafa38de2 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 13 Jun 2022 06:30:06 +0100 Subject: io_uring: fix races with buffer table unregister Fixed buffer table quiesce might unlock ->uring_lock, potentially letting new requests to be submitted, don't allow those requests to use the table as they will race with unregistration. Reported-and-tested-by: van fantasy Fixes: bd54b6fe3316ec ("io_uring: implement fixed buffers registration similar to fixed files") Signed-off-by: Pavel Begunkov --- fs/io_uring.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index 00d266746916..be05f375a776 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -10680,12 +10680,19 @@ static void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx) static int io_sqe_buffers_unregister(struct io_ring_ctx *ctx) { + unsigned nr = ctx->nr_user_bufs; int ret; if (!ctx->buf_data) return -ENXIO; + /* + * Quiesce may unlock ->uring_lock, and while it's not held + * prevent new requests using the table. + */ + ctx->nr_user_bufs = 0; ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx); + ctx->nr_user_bufs = nr; if (!ret) __io_sqe_buffers_unregister(ctx); return ret; -- cgit v1.2.3 From 05b538c1765f8d14a71ccf5f85258dcbeaf189f7 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Thu, 9 Jun 2022 08:34:35 +0100 Subject: io_uring: fix not locked access to fixed buf table We can look inside the fixed buffer table only while holding ->uring_lock, however in some cases we don't do the right async prep for IORING_OP_{WRITE,READ}_FIXED ending up with NULL req->imu forcing making an io-wq worker to try to resolve the fixed buffer without proper locking. Move req->imu setup into early req init paths, i.e. io_prep_rw(), which is called unconditionally for rw requests and under uring_lock. Fixes: 634d00df5e1cf ("io_uring: add full-fledged dynamic buffers support") Signed-off-by: Pavel Begunkov --- fs/io_uring.c | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index be05f375a776..fd8a1ffe6a1a 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -3636,6 +3636,20 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) int ret; kiocb->ki_pos = READ_ONCE(sqe->off); + /* used for fixed read/write too - just read unconditionally */ + req->buf_index = READ_ONCE(sqe->buf_index); + + if (req->opcode == IORING_OP_READ_FIXED || + req->opcode == IORING_OP_WRITE_FIXED) { + struct io_ring_ctx *ctx = req->ctx; + u16 index; + + if (unlikely(req->buf_index >= ctx->nr_user_bufs)) + return -EFAULT; + index = array_index_nospec(req->buf_index, ctx->nr_user_bufs); + req->imu = ctx->user_bufs[index]; + io_req_set_rsrc_node(req, ctx, 0); + } ioprio = READ_ONCE(sqe->ioprio); if (ioprio) { @@ -3648,12 +3662,9 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) kiocb->ki_ioprio = get_current_ioprio(); } - req->imu = NULL; req->rw.addr = READ_ONCE(sqe->addr); req->rw.len = READ_ONCE(sqe->len); req->rw.flags = READ_ONCE(sqe->rw_flags); - /* used for fixed read/write too - just read unconditionally */ - req->buf_index = READ_ONCE(sqe->buf_index); return 0; } @@ -3785,20 +3796,9 @@ static int __io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter, unsigned int issue_flags) { - struct io_mapped_ubuf *imu = req->imu; - u16 index, buf_index = req->buf_index; - - if (likely(!imu)) { - struct io_ring_ctx *ctx = req->ctx; - - if (unlikely(buf_index >= ctx->nr_user_bufs)) - return -EFAULT; - io_req_set_rsrc_node(req, ctx, issue_flags); - index = array_index_nospec(buf_index, ctx->nr_user_bufs); - imu = READ_ONCE(ctx->user_bufs[index]); - req->imu = imu; - } - return __io_import_fixed(req, rw, iter, imu); + if (WARN_ON_ONCE(!req->imu)) + return -EFAULT; + return __io_import_fixed(req, rw, iter, req->imu); } static int io_buffer_add_list(struct io_ring_ctx *ctx, -- cgit v1.2.3 From e71d7c56dd69f720169c1675f87a1d22d8167767 Mon Sep 17 00:00:00 2001 From: Hao Xu Date: Sat, 11 Jun 2022 20:22:20 +0800 Subject: io_uring: openclose: fix bug of closing wrong fixed file Don't update ret until fixed file is closed, otherwise the file slot becomes the error code. Fixes: a7c41b4687f5 ("io_uring: let IORING_OP_FILES_UPDATE support choosing fixed file slots") Signed-off-by: Hao Xu [pavel: 5.19 rebase] Signed-off-by: Pavel Begunkov --- fs/io_uring.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index fd8a1ffe6a1a..e6d8cafdd28e 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -8035,8 +8035,8 @@ static int io_files_update_with_index_alloc(struct io_kiocb *req, if (ret < 0) break; if (copy_to_user(&fds[done], &ret, sizeof(ret))) { - ret = -EFAULT; __io_close_fixed(req, issue_flags, ret); + ret = -EFAULT; break; } } -- cgit v1.2.3 From 42db0c00e275877eb92480beaa16b33507dc3bda Mon Sep 17 00:00:00 2001 From: Hao Xu Date: Sat, 11 Jun 2022 20:29:52 +0800 Subject: io_uring: kbuf: fix bug of not consuming ring buffer in partial io case When we use ring-mapped provided buffer, we should consume it before arm poll if partial io has been done. Otherwise the buffer may be used by other requests and thus we lost the data. Fixes: c7fb19428d67 ("io_uring: add support for ring mapped supplied buffers") Signed-off-by: Hao Xu [pavel: 5.19 rebase] Signed-off-by: Pavel Begunkov --- fs/io_uring.c | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index e6d8cafdd28e..84b45ed91b2d 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1729,9 +1729,16 @@ static void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags) if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING))) return; - /* don't recycle if we already did IO to this buffer */ - if (req->flags & REQ_F_PARTIAL_IO) + /* + * For legacy provided buffer mode, don't recycle if we already did + * IO to this buffer. For ring-mapped provided buffer mode, we should + * increment ring->head to explicitly monopolize the buffer to avoid + * multiple use. + */ + if ((req->flags & REQ_F_BUFFER_SELECTED) && + (req->flags & REQ_F_PARTIAL_IO)) return; + /* * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear * the flag and hence ensure that bl->head doesn't get incremented. @@ -1739,8 +1746,13 @@ static void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags) */ if (req->flags & REQ_F_BUFFER_RING) { if (req->buf_list) { - req->buf_index = req->buf_list->bgid; - req->flags &= ~REQ_F_BUFFER_RING; + if (req->flags & REQ_F_PARTIAL_IO) { + req->buf_list->head++; + req->buf_list = NULL; + } else { + req->buf_index = req->buf_list->bgid; + req->flags &= ~REQ_F_BUFFER_RING; + } } return; } -- cgit v1.2.3 From fc9375e3f763b06c3c90c5f5b2b84d3e07c1f4c2 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sun, 12 Jun 2022 14:31:38 +0100 Subject: io_uring: fix double unlock for pbuf select io_buffer_select(), which is the only caller of io_ring_buffer_select(), fully handles locking, mutex unlock in io_ring_buffer_select() will lead to double unlock. Fixes: c7fb19428d67d ("io_uring: add support for ring mapped supplied buffers") Signed-off-by: Pavel Begunkov --- fs/io_uring.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index 84b45ed91b2d..4719eaee3b45 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -3849,10 +3849,8 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len, struct io_uring_buf *buf; __u32 head = bl->head; - if (unlikely(smp_load_acquire(&br->tail) == head)) { - io_ring_submit_unlock(req->ctx, issue_flags); + if (unlikely(smp_load_acquire(&br->tail) == head)) return NULL; - } head &= bl->mask; if (head < IO_BUFFER_LIST_BUF_PER_PAGE) { -- cgit v1.2.3 From 97da4a537924d87e2261773f3ac9365abb191fc9 Mon Sep 17 00:00:00 2001 From: Dylan Yudaken Date: Mon, 13 Jun 2022 03:11:55 -0700 Subject: io_uring: fix index calculation When indexing into a provided buffer ring, do not subtract 1 from the index. Fixes: c7fb19428d67 ("io_uring: add support for ring mapped supplied buffers") Signed-off-by: Dylan Yudaken Link: https://lore.kernel.org/r/20220613101157.3687-2-dylany@fb.com Reviewed-by: Hao Xu Signed-off-by: Jens Axboe --- fs/io_uring.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index 3aab4182fd89..9cf9aff51b70 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -3888,7 +3888,7 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len, buf = &br->bufs[head]; } else { int off = head & (IO_BUFFER_LIST_BUF_PER_PAGE - 1); - int index = head / IO_BUFFER_LIST_BUF_PER_PAGE - 1; + int index = head / IO_BUFFER_LIST_BUF_PER_PAGE; buf = page_address(bl->buf_pages[index]); buf += off; } -- cgit v1.2.3 From c6e9fa5c0ab811f4bec36a96337f4b1bb77d142c Mon Sep 17 00:00:00 2001 From: Dylan Yudaken Date: Mon, 13 Jun 2022 03:11:56 -0700 Subject: io_uring: fix types in provided buffer ring The type of head needs to match that of tail in order for rollover and comparisons to work correctly. Without this change the comparison of tail to head might incorrectly allow io_uring to use a buffer that userspace had not given it. Fixes: c7fb19428d67 ("io_uring: add support for ring mapped supplied buffers") Signed-off-by: Dylan Yudaken Link: https://lore.kernel.org/r/20220613101157.3687-3-dylany@fb.com Reviewed-by: Hao Xu Signed-off-by: Jens Axboe --- fs/io_uring.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index 9cf9aff51b70..6eea18e8330c 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -298,8 +298,8 @@ struct io_buffer_list { /* below is for ring provided buffers */ __u16 buf_nr_pages; __u16 nr_entries; - __u32 head; - __u32 mask; + __u16 head; + __u16 mask; }; struct io_buffer { @@ -3876,7 +3876,7 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len, { struct io_uring_buf_ring *br = bl->buf_ring; struct io_uring_buf *buf; - __u32 head = bl->head; + __u16 head = bl->head; if (unlikely(smp_load_acquire(&br->tail) == head)) { io_ring_submit_unlock(req->ctx, issue_flags); -- cgit v1.2.3 From f9437ac0f851cea2374d53594f52fbbefdd977bd Mon Sep 17 00:00:00 2001 From: Dylan Yudaken Date: Mon, 13 Jun 2022 03:11:57 -0700 Subject: io_uring: limit size of provided buffer ring The type of head and tail do not allow more than 2^15 entries in a provided buffer ring, so do not allow this. At 2^16 while each entry can be indexed, there is no way to disambiguate full vs empty. Signed-off-by: Dylan Yudaken Link: https://lore.kernel.org/r/20220613101157.3687-4-dylany@fb.com Reviewed-by: Hao Xu Signed-off-by: Jens Axboe --- fs/io_uring.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index 6eea18e8330c..85b116ddfd2a 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -13002,6 +13002,10 @@ static int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg) if (!is_power_of_2(reg.ring_entries)) return -EINVAL; + /* cannot disambiguate full vs empty due to head/tail size */ + if (reg.ring_entries >= 65536) + return -EINVAL; + if (unlikely(reg.bgid < BGID_ARRAY && !ctx->io_bl)) { int ret = io_init_bl_list(ctx); if (ret) -- cgit v1.2.3 From 8899ce4b2f7364a90e3b9cf332dfd9993c61f46c Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Tue, 14 Jun 2022 17:51:16 +0100 Subject: Revert "io_uring: support CQE32 for nop operation" This reverts commit 2bb04df7c2af9dad5d28771c723bc39b01cf7df4. CQE32 nops were used for debugging and benchmarking but it doesn't target any real use case. Revert it, we can return it back if someone finds a good way to use it. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/5ff623d84ccb4b3f3b92a3ea41cdcfa612f3d96f.1655224415.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 21 +-------------------- 1 file changed, 1 insertion(+), 20 deletions(-) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index ca6170a66e62..bf556f77d4ab 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -784,12 +784,6 @@ struct io_msg { u32 len; }; -struct io_nop { - struct file *file; - u64 extra1; - u64 extra2; -}; - struct io_async_connect { struct sockaddr_storage address; }; @@ -994,7 +988,6 @@ struct io_kiocb { struct io_msg msg; struct io_xattr xattr; struct io_socket sock; - struct io_nop nop; struct io_uring_cmd uring_cmd; }; @@ -5268,14 +5261,6 @@ done: static int io_nop_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { - /* - * If the ring is setup with CQE32, relay back addr/addr - */ - if (req->ctx->flags & IORING_SETUP_CQE32) { - req->nop.extra1 = READ_ONCE(sqe->addr); - req->nop.extra2 = READ_ONCE(sqe->addr2); - } - return 0; } @@ -5296,11 +5281,7 @@ static int io_nop(struct io_kiocb *req, unsigned int issue_flags) } cflags = io_put_kbuf(req, issue_flags); - if (!(req->ctx->flags & IORING_SETUP_CQE32)) - __io_req_complete(req, issue_flags, 0, cflags); - else - __io_req_complete32(req, issue_flags, 0, cflags, - req->nop.extra1, req->nop.extra2); + __io_req_complete(req, issue_flags, 0, cflags); return 0; } -- cgit v1.2.3 From aa165d6d2bb55f8b1bb5047fd634311681316fa2 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Tue, 14 Jun 2022 17:51:17 +0100 Subject: Revert "io_uring: add buffer selection support to IORING_OP_NOP" This reverts commit 3d200242a6c968af321913b635fc4014b238cba4. Buffer selection with nops was used for debugging and benchmarking but is useless in real life. Let's revert it before it's released. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/c5012098ca6b51dfbdcb190f8c4e3c0bf1c965dc.1655224415.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index bf556f77d4ab..1b95c6750a81 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1114,7 +1114,6 @@ static const struct io_op_def io_op_defs[] = { [IORING_OP_NOP] = { .audit_skip = 1, .iopoll = 1, - .buffer_select = 1, }, [IORING_OP_READV] = { .needs_file = 1, @@ -5269,19 +5268,7 @@ static int io_nop_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) */ static int io_nop(struct io_kiocb *req, unsigned int issue_flags) { - unsigned int cflags; - void __user *buf; - - if (req->flags & REQ_F_BUFFER_SELECT) { - size_t len = 1; - - buf = io_buffer_select(req, &len, issue_flags); - if (!buf) - return -ENOBUFS; - } - - cflags = io_put_kbuf(req, issue_flags); - __io_req_complete(req, issue_flags, 0, cflags); + __io_req_complete(req, issue_flags, 0, 0); return 0; } -- cgit v1.2.3 From d884b6498d2f022098502e106d5a45ab635f2e9a Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Tue, 14 Jun 2022 17:51:18 +0100 Subject: io_uring: remove IORING_CLOSE_FD_AND_FILE_SLOT This partially reverts a7c41b4687f5902af70cd559806990930c8a307b Even though IORING_CLOSE_FD_AND_FILE_SLOT might save cycles for some users, but it tries to do two things at a time and it's not clear how to handle errors and what to return in a single result field when one part fails and another completes well. Kill it for now. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/837c745019b3795941eee4fcfd7de697886d645b.1655224415.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index 1b95c6750a81..1b0b6099e717 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -576,7 +576,6 @@ struct io_close { struct file *file; int fd; u32 file_slot; - u32 flags; }; struct io_timeout_data { @@ -5966,18 +5965,14 @@ static int io_statx(struct io_kiocb *req, unsigned int issue_flags) static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { - if (sqe->off || sqe->addr || sqe->len || sqe->buf_index) + if (sqe->off || sqe->addr || sqe->len || sqe->rw_flags || sqe->buf_index) return -EINVAL; if (req->flags & REQ_F_FIXED_FILE) return -EBADF; req->close.fd = READ_ONCE(sqe->fd); req->close.file_slot = READ_ONCE(sqe->file_index); - req->close.flags = READ_ONCE(sqe->close_flags); - if (req->close.flags & ~IORING_CLOSE_FD_AND_FILE_SLOT) - return -EINVAL; - if (!(req->close.flags & IORING_CLOSE_FD_AND_FILE_SLOT) && - req->close.file_slot && req->close.fd) + if (req->close.file_slot && req->close.fd) return -EINVAL; return 0; @@ -5993,8 +5988,7 @@ static int io_close(struct io_kiocb *req, unsigned int issue_flags) if (req->close.file_slot) { ret = io_close_fixed(req, issue_flags); - if (ret || !(req->close.flags & IORING_CLOSE_FD_AND_FILE_SLOT)) - goto err; + goto err; } spin_lock(&files->file_lock); -- cgit v1.2.3 From 91ef75a7db0d0855284b78d60d3fcec5c353ec5a Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 15 Jun 2022 11:23:02 +0100 Subject: io_uring: get rid of __io_fill_cqe{32}_req() There are too many cqe filling helpers, kill __io_fill_cqe{32}_req(), use __io_fill_cqe{32}_req_filled() instead, and then rename it. It'll simplify fixing in following patches. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/c18e0d191014fb574f24721245e4e3fddd0b6917.1655287457.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 70 ++++++++++++++++++----------------------------------------- 1 file changed, 21 insertions(+), 49 deletions(-) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index 1b0b6099e717..654c2f897497 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2464,8 +2464,8 @@ static inline bool __io_fill_cqe(struct io_ring_ctx *ctx, u64 user_data, return io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0); } -static inline bool __io_fill_cqe_req_filled(struct io_ring_ctx *ctx, - struct io_kiocb *req) +static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx, + struct io_kiocb *req) { struct io_uring_cqe *cqe; @@ -2486,8 +2486,8 @@ static inline bool __io_fill_cqe_req_filled(struct io_ring_ctx *ctx, req->cqe.res, req->cqe.flags, 0, 0); } -static inline bool __io_fill_cqe32_req_filled(struct io_ring_ctx *ctx, - struct io_kiocb *req) +static inline bool __io_fill_cqe32_req(struct io_ring_ctx *ctx, + struct io_kiocb *req) { struct io_uring_cqe *cqe; u64 extra1 = req->extra1; @@ -2513,44 +2513,6 @@ static inline bool __io_fill_cqe32_req_filled(struct io_ring_ctx *ctx, req->cqe.flags, extra1, extra2); } -static inline bool __io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags) -{ - trace_io_uring_complete(req->ctx, req, req->cqe.user_data, res, cflags, 0, 0); - return __io_fill_cqe(req->ctx, req->cqe.user_data, res, cflags); -} - -static inline void __io_fill_cqe32_req(struct io_kiocb *req, s32 res, u32 cflags, - u64 extra1, u64 extra2) -{ - struct io_ring_ctx *ctx = req->ctx; - struct io_uring_cqe *cqe; - - if (WARN_ON_ONCE(!(ctx->flags & IORING_SETUP_CQE32))) - return; - if (req->flags & REQ_F_CQE_SKIP) - return; - - trace_io_uring_complete(ctx, req, req->cqe.user_data, res, cflags, - extra1, extra2); - - /* - * If we can't get a cq entry, userspace overflowed the - * submission (by quite a lot). Increment the overflow count in - * the ring. - */ - cqe = io_get_cqe(ctx); - if (likely(cqe)) { - WRITE_ONCE(cqe->user_data, req->cqe.user_data); - WRITE_ONCE(cqe->res, res); - WRITE_ONCE(cqe->flags, cflags); - WRITE_ONCE(cqe->big_cqe[0], extra1); - WRITE_ONCE(cqe->big_cqe[1], extra2); - return; - } - - io_cqring_event_overflow(ctx, req->cqe.user_data, res, cflags, extra1, extra2); -} - static noinline bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags) { @@ -2593,16 +2555,24 @@ static void __io_req_complete_put(struct io_kiocb *req) static void __io_req_complete_post(struct io_kiocb *req, s32 res, u32 cflags) { - if (!(req->flags & REQ_F_CQE_SKIP)) - __io_fill_cqe_req(req, res, cflags); + if (!(req->flags & REQ_F_CQE_SKIP)) { + req->cqe.res = res; + req->cqe.flags = cflags; + __io_fill_cqe_req(req->ctx, req); + } __io_req_complete_put(req); } static void __io_req_complete_post32(struct io_kiocb *req, s32 res, u32 cflags, u64 extra1, u64 extra2) { - if (!(req->flags & REQ_F_CQE_SKIP)) - __io_fill_cqe32_req(req, res, cflags, extra1, extra2); + if (!(req->flags & REQ_F_CQE_SKIP)) { + req->cqe.res = res; + req->cqe.flags = cflags; + req->extra1 = extra1; + req->extra2 = extra2; + __io_fill_cqe32_req(req->ctx, req); + } __io_req_complete_put(req); } @@ -3207,9 +3177,9 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx) if (!(req->flags & REQ_F_CQE_SKIP)) { if (!(ctx->flags & IORING_SETUP_CQE32)) - __io_fill_cqe_req_filled(ctx, req); + __io_fill_cqe_req(ctx, req); else - __io_fill_cqe32_req_filled(ctx, req); + __io_fill_cqe32_req(ctx, req); } } @@ -3329,7 +3299,9 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) nr_events++; if (unlikely(req->flags & REQ_F_CQE_SKIP)) continue; - __io_fill_cqe_req(req, req->cqe.res, io_put_kbuf(req, 0)); + + req->cqe.flags = io_put_kbuf(req, 0); + __io_fill_cqe_req(req->ctx, req); } if (unlikely(!nr_events)) -- cgit v1.2.3 From f43de1f88841d59f27f761219b6550bd6ce3dcc1 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 15 Jun 2022 11:23:03 +0100 Subject: io_uring: unite fill_cqe and the 32B version We want just one function that will handle both normal cqes and 32B cqes. Combine __io_fill_cqe_req() and __io_fill_cqe_req32(). It's still not entirely correct yet, but saves us from cases when we fill an CQE of a wrong size. Fixes: 76c68fbf1a1f9 ("io_uring: enable CQE32") Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/8085c5b2f74141520f60decd45334f87e389b718.1655287457.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 61 ++++++++++++++++++++++++++++++++++++++++------------------- 1 file changed, 42 insertions(+), 19 deletions(-) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index 654c2f897497..eb858cf92af9 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2469,21 +2469,48 @@ static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx, { struct io_uring_cqe *cqe; - trace_io_uring_complete(req->ctx, req, req->cqe.user_data, - req->cqe.res, req->cqe.flags, 0, 0); + if (!(ctx->flags & IORING_SETUP_CQE32)) { + trace_io_uring_complete(req->ctx, req, req->cqe.user_data, + req->cqe.res, req->cqe.flags, 0, 0); - /* - * If we can't get a cq entry, userspace overflowed the - * submission (by quite a lot). Increment the overflow count in - * the ring. - */ - cqe = io_get_cqe(ctx); - if (likely(cqe)) { - memcpy(cqe, &req->cqe, sizeof(*cqe)); - return true; + /* + * If we can't get a cq entry, userspace overflowed the + * submission (by quite a lot). Increment the overflow count in + * the ring. + */ + cqe = io_get_cqe(ctx); + if (likely(cqe)) { + memcpy(cqe, &req->cqe, sizeof(*cqe)); + return true; + } + + return io_cqring_event_overflow(ctx, req->cqe.user_data, + req->cqe.res, req->cqe.flags, + 0, 0); + } else { + u64 extra1 = req->extra1; + u64 extra2 = req->extra2; + + trace_io_uring_complete(req->ctx, req, req->cqe.user_data, + req->cqe.res, req->cqe.flags, extra1, extra2); + + /* + * If we can't get a cq entry, userspace overflowed the + * submission (by quite a lot). Increment the overflow count in + * the ring. + */ + cqe = io_get_cqe(ctx); + if (likely(cqe)) { + memcpy(cqe, &req->cqe, sizeof(struct io_uring_cqe)); + WRITE_ONCE(cqe->big_cqe[0], extra1); + WRITE_ONCE(cqe->big_cqe[1], extra2); + return true; + } + + return io_cqring_event_overflow(ctx, req->cqe.user_data, + req->cqe.res, req->cqe.flags, + extra1, extra2); } - return io_cqring_event_overflow(ctx, req->cqe.user_data, - req->cqe.res, req->cqe.flags, 0, 0); } static inline bool __io_fill_cqe32_req(struct io_ring_ctx *ctx, @@ -3175,12 +3202,8 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx) struct io_kiocb *req = container_of(node, struct io_kiocb, comp_list); - if (!(req->flags & REQ_F_CQE_SKIP)) { - if (!(ctx->flags & IORING_SETUP_CQE32)) - __io_fill_cqe_req(ctx, req); - else - __io_fill_cqe32_req(ctx, req); - } + if (!(req->flags & REQ_F_CQE_SKIP)) + __io_fill_cqe_req(ctx, req); } io_commit_cqring(ctx); -- cgit v1.2.3 From 29ede2014c87576d2fc83680aa4c1d7403db0dfe Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 15 Jun 2022 11:23:04 +0100 Subject: io_uring: fill extra big cqe fields from req The only user of io_req_complete32()-like functions is cmd requests. Instead of keeping the whole complete32 family, remove them and provide the extras in already added for inline completions req->extra{1,2}. When fill_cqe_res() finds CQE32 option enabled it'll use those fields to fill a 32B cqe. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/af1319eb661b1f9a0abceb51cbbf72b8002e019d.1655287457.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 78 ++++++++--------------------------------------------------- 1 file changed, 10 insertions(+), 68 deletions(-) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index eb858cf92af9..10901db93f7e 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2513,33 +2513,6 @@ static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx, } } -static inline bool __io_fill_cqe32_req(struct io_ring_ctx *ctx, - struct io_kiocb *req) -{ - struct io_uring_cqe *cqe; - u64 extra1 = req->extra1; - u64 extra2 = req->extra2; - - trace_io_uring_complete(req->ctx, req, req->cqe.user_data, - req->cqe.res, req->cqe.flags, extra1, extra2); - - /* - * If we can't get a cq entry, userspace overflowed the - * submission (by quite a lot). Increment the overflow count in - * the ring. - */ - cqe = io_get_cqe(ctx); - if (likely(cqe)) { - memcpy(cqe, &req->cqe, sizeof(struct io_uring_cqe)); - cqe->big_cqe[0] = extra1; - cqe->big_cqe[1] = extra2; - return true; - } - - return io_cqring_event_overflow(ctx, req->cqe.user_data, req->cqe.res, - req->cqe.flags, extra1, extra2); -} - static noinline bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags) { @@ -2590,19 +2563,6 @@ static void __io_req_complete_post(struct io_kiocb *req, s32 res, __io_req_complete_put(req); } -static void __io_req_complete_post32(struct io_kiocb *req, s32 res, - u32 cflags, u64 extra1, u64 extra2) -{ - if (!(req->flags & REQ_F_CQE_SKIP)) { - req->cqe.res = res; - req->cqe.flags = cflags; - req->extra1 = extra1; - req->extra2 = extra2; - __io_fill_cqe32_req(req->ctx, req); - } - __io_req_complete_put(req); -} - static void io_req_complete_post(struct io_kiocb *req, s32 res, u32 cflags) { struct io_ring_ctx *ctx = req->ctx; @@ -2614,18 +2574,6 @@ static void io_req_complete_post(struct io_kiocb *req, s32 res, u32 cflags) io_cqring_ev_posted(ctx); } -static void io_req_complete_post32(struct io_kiocb *req, s32 res, - u32 cflags, u64 extra1, u64 extra2) -{ - struct io_ring_ctx *ctx = req->ctx; - - spin_lock(&ctx->completion_lock); - __io_req_complete_post32(req, res, cflags, extra1, extra2); - io_commit_cqring(ctx); - spin_unlock(&ctx->completion_lock); - io_cqring_ev_posted(ctx); -} - static inline void io_req_complete_state(struct io_kiocb *req, s32 res, u32 cflags) { @@ -2643,19 +2591,6 @@ static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags, io_req_complete_post(req, res, cflags); } -static inline void __io_req_complete32(struct io_kiocb *req, - unsigned int issue_flags, s32 res, - u32 cflags, u64 extra1, u64 extra2) -{ - if (issue_flags & IO_URING_F_COMPLETE_DEFER) { - io_req_complete_state(req, res, cflags); - req->extra1 = extra1; - req->extra2 = extra2; - } else { - io_req_complete_post32(req, res, cflags, extra1, extra2); - } -} - static inline void io_req_complete(struct io_kiocb *req, s32 res) { if (res < 0) @@ -5079,6 +5014,13 @@ void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd, } EXPORT_SYMBOL_GPL(io_uring_cmd_complete_in_task); +static inline void io_req_set_cqe32_extra(struct io_kiocb *req, + u64 extra1, u64 extra2) +{ + req->extra1 = extra1; + req->extra2 = extra2; +} + /* * Called by consumers of io_uring_cmd, if they originally returned * -EIOCBQUEUED upon receiving the command. @@ -5089,10 +5031,10 @@ void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2) if (ret < 0) req_set_fail(req); + if (req->ctx->flags & IORING_SETUP_CQE32) - __io_req_complete32(req, 0, ret, 0, res2, 0); - else - io_req_complete(req, ret); + io_req_set_cqe32_extra(req, res2, 0); + io_req_complete(req, ret); } EXPORT_SYMBOL_GPL(io_uring_cmd_done); -- cgit v1.2.3 From 2caf9822f0507463168a9e83f93c75b3e3fac971 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 15 Jun 2022 11:23:05 +0100 Subject: io_uring: fix ->extra{1,2} misuse We don't really know the state of req->extra{1,2] fields in __io_fill_cqe_req(), if an opcode handler is not aware of CQE32 option, it never sets them up properly. Track the state of those fields with a request flag. Fixes: 76c68fbf1a1f9 ("io_uring: enable CQE32") Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/4b3e5be512fbf4debec7270fd485b8a3b014d464.1655287457.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index 10901db93f7e..808b7f4ace0b 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -844,6 +844,7 @@ enum { REQ_F_SINGLE_POLL_BIT, REQ_F_DOUBLE_POLL_BIT, REQ_F_PARTIAL_IO_BIT, + REQ_F_CQE32_INIT_BIT, REQ_F_APOLL_MULTISHOT_BIT, /* keep async read/write and isreg together and in order */ REQ_F_SUPPORT_NOWAIT_BIT, @@ -913,6 +914,8 @@ enum { REQ_F_PARTIAL_IO = BIT(REQ_F_PARTIAL_IO_BIT), /* fast poll multishot mode */ REQ_F_APOLL_MULTISHOT = BIT(REQ_F_APOLL_MULTISHOT_BIT), + /* ->extra1 and ->extra2 are initialised */ + REQ_F_CQE32_INIT = BIT(REQ_F_CQE32_INIT_BIT), }; struct async_poll { @@ -2488,8 +2491,12 @@ static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx, req->cqe.res, req->cqe.flags, 0, 0); } else { - u64 extra1 = req->extra1; - u64 extra2 = req->extra2; + u64 extra1 = 0, extra2 = 0; + + if (req->flags & REQ_F_CQE32_INIT) { + extra1 = req->extra1; + extra2 = req->extra2; + } trace_io_uring_complete(req->ctx, req, req->cqe.user_data, req->cqe.res, req->cqe.flags, extra1, extra2); @@ -5019,6 +5026,7 @@ static inline void io_req_set_cqe32_extra(struct io_kiocb *req, { req->extra1 = extra1; req->extra2 = extra2; + req->flags |= REQ_F_CQE32_INIT; } /* -- cgit v1.2.3 From cd94903d3ba50d7ae797c603f68996af8d1ba1a1 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 15 Jun 2022 11:23:06 +0100 Subject: io_uring: remove __io_fill_cqe() helper In preparation for the following patch, inline __io_fill_cqe(), there is only one user. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/71dab9afc3cde3f8b64d26f20d3b60bdc40726ff.1655287457.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 37 ++++++++++++++++--------------------- 1 file changed, 16 insertions(+), 21 deletions(-) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index 808b7f4ace0b..792e9c95d217 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2447,26 +2447,6 @@ static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data, return true; } -static inline bool __io_fill_cqe(struct io_ring_ctx *ctx, u64 user_data, - s32 res, u32 cflags) -{ - struct io_uring_cqe *cqe; - - /* - * If we can't get a cq entry, userspace overflowed the - * submission (by quite a lot). Increment the overflow count in - * the ring. - */ - cqe = io_get_cqe(ctx); - if (likely(cqe)) { - WRITE_ONCE(cqe->user_data, user_data); - WRITE_ONCE(cqe->res, res); - WRITE_ONCE(cqe->flags, cflags); - return true; - } - return io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0); -} - static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx, struct io_kiocb *req) { @@ -2523,9 +2503,24 @@ static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx, static noinline bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags) { + struct io_uring_cqe *cqe; + ctx->cq_extra++; trace_io_uring_complete(ctx, NULL, user_data, res, cflags, 0, 0); - return __io_fill_cqe(ctx, user_data, res, cflags); + + /* + * If we can't get a cq entry, userspace overflowed the + * submission (by quite a lot). Increment the overflow count in + * the ring. + */ + cqe = io_get_cqe(ctx); + if (likely(cqe)) { + WRITE_ONCE(cqe->user_data, user_data); + WRITE_ONCE(cqe->res, res); + WRITE_ONCE(cqe->flags, cflags); + return true; + } + return io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0); } static void __io_req_complete_put(struct io_kiocb *req) -- cgit v1.2.3 From c5595975b53a487bf329eeba65b5c5f34605a4c0 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 15 Jun 2022 11:23:07 +0100 Subject: io_uring: make io_fill_cqe_aux honour CQE32 Don't let io_fill_cqe_aux() post 16B cqes for CQE32 rings, neither the kernel nor the userspace expect this to happen. Fixes: 76c68fbf1a1f9 ("io_uring: enable CQE32") Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/64fae669fae1b7083aa15d0cd807f692b0880b9a.1655287457.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index 792e9c95d217..5d479428d8e5 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2518,6 +2518,11 @@ static noinline bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, WRITE_ONCE(cqe->user_data, user_data); WRITE_ONCE(cqe->res, res); WRITE_ONCE(cqe->flags, cflags); + + if (ctx->flags & IORING_SETUP_CQE32) { + WRITE_ONCE(cqe->big_cqe[0], 0); + WRITE_ONCE(cqe->big_cqe[1], 0); + } return true; } return io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0); -- cgit v1.2.3 From a76c0b31eef50fdb8b21d53a6d050f59241fb88e Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 15 Jun 2022 19:51:11 -0600 Subject: io_uring: commit non-pollable provided mapped buffers upfront For recv/recvmsg, IO either completes immediately or gets queued for a retry. This isn't the case for read/readv, if eg a normal file or a block device is used. Here, an operation can get queued with the block layer. If this happens, ring mapped buffers must get committed immediately to avoid that the next read can consume the same buffer. Check if we're dealing with pollable file, when getting a new ring mapped provided buffer. If it's not, commit it immediately rather than wait post issue. If we don't wait, we can race with completions coming in, or just plain buffer reuse by committing after a retry where others could have grabbed the same buffer. Fixes: c7fb19428d67 ("io_uring: add support for ring mapped supplied buffers") Reviewed-by: Hao Xu Signed-off-by: Jens Axboe --- fs/io_uring.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index 5d479428d8e5..b6e75f69c6b1 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -3836,7 +3836,7 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len, req->buf_list = bl; req->buf_index = buf->bid; - if (issue_flags & IO_URING_F_UNLOCKED) { + if (issue_flags & IO_URING_F_UNLOCKED || !file_can_poll(req->file)) { /* * If we came in unlocked, we have no choice but to consume the * buffer here. This does mean it'll be pinned until the IO -- cgit v1.2.3 From 32fc810b364f3dd30930c594e461ffa1761fef39 Mon Sep 17 00:00:00 2001 From: Dylan Yudaken Date: Thu, 16 Jun 2022 06:50:11 -0700 Subject: io_uring: do not use prio task_work_add in uring_cmd io_req_task_prio_work_add has a strict assumption that it will only be used with io_req_task_complete. There is a codepath that assumes this is the case and will not even call the completion function if it is hit. For uring_cmd with an arbitrary completion function change the call to the correct non-priority version. Fixes: ee692a21e9bf8 ("fs,io_uring: add infrastructure for uring-cmd") Signed-off-by: Dylan Yudaken Reviewed-by: Pavel Begunkov Link: https://lore.kernel.org/r/20220616135011.441980-1-dylany@fb.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index b6e75f69c6b1..95a1a78d799a 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -5017,7 +5017,7 @@ void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd, req->uring_cmd.task_work_cb = task_work_cb; req->io_task_work.func = io_uring_cmd_work; - io_req_task_prio_work_add(req); + io_req_task_work_add(req); } EXPORT_SYMBOL_GPL(io_uring_cmd_complete_in_task); -- cgit v1.2.3 From 6436c770f120a9ffeb4e791650467f30f1d062d1 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 17 Jun 2022 06:24:26 -0600 Subject: io_uring: recycle provided buffer if we punt to io-wq io_arm_poll_handler() will recycle the buffer appropriately if we end up arming poll (or if we're ready to retry), but not for the io-wq case if we have attempted poll first. Explicitly recycle the buffer to avoid both hanging on to it too long, but also to avoid multiple reads grabbing the same one. This can happen for ring mapped buffers, since it hasn't necessarily been committed. Fixes: c7fb19428d67 ("io_uring: add support for ring mapped supplied buffers") Link: https://github.com/axboe/liburing/issues/605 Signed-off-by: Jens Axboe --- fs/io_uring.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/io_uring.c b/fs/io_uring.c index 95a1a78d799a..d3ee4fc532fa 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -8690,6 +8690,7 @@ static void io_queue_async(struct io_kiocb *req, int ret) * Queued up for async execution, worker will release * submit reference when the iocb is actually submitted. */ + io_kbuf_recycle(req, 0); io_queue_iowq(req, NULL); break; case IO_APOLL_OK: -- cgit v1.2.3