diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2025-12-09 09:07:28 +0900 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2025-12-09 09:07:28 +0900 |
| commit | cfd4039213e7b5a828c5b78e1b5235cac91af53d (patch) | |
| tree | 971782550d86b32e927be6188883753263e02415 /io_uring/rsrc.c | |
| parent | 4482ebb2970efa58173075c101426b2f3af40b41 (diff) | |
| parent | 55d57b3bcc7efcab812a8179e2dc17d781302997 (diff) | |
Merge tag 'io_uring-6.19-20251208' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux
Pull io_uring updates from Jens Axboe:
"Followup set of fixes for io_uring for this merge window. These are
either later fixes, or cleanups that don't make sense to defer. This
pull request contains:
- Fix for a recent regression in io-wq worker creation
- Tracing cleanup
- Use READ_ONCE/WRITE_ONCE consistently for ring mapped kbufs. Mostly
for documentation purposes, indicating that they are shared with
userspace
- Fix for POLL_ADD losing a completion, if the request is updated and
now is triggerable - eg, if POLLIN is set with the updated, and the
polled file is readable
- In conjunction with the above fix, also unify how poll wait queue
entries are deleted with the head update. We had 3 different spots
doing both the list deletion and head write, with one of them
nicely documented. Abstract that into a helper and use it
consistently
- Small series from Joanne fixing an issue with buffer cloning, and
cleaning up the arg validation"
* tag 'io_uring-6.19-20251208' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux:
io_uring/poll: unify poll waitqueue entry and list removal
io_uring/kbuf: use WRITE_ONCE() for userspace-shared buffer ring fields
io_uring/kbuf: use READ_ONCE() for userspace-mapped memory
io_uring/rsrc: fix lost entries after cloned range
io_uring/rsrc: rename misleading src_node variable in io_clone_buffers()
io_uring/rsrc: clean up buffer cloning arg validation
io_uring/trace: rename io_uring_queue_async_work event "rw" field
io_uring/io-wq: always retry worker create on ERESTART*
io_uring/poll: correctly handle io_poll_add() return value on update
Diffstat (limited to 'io_uring/rsrc.c')
| -rw-r--r-- | io_uring/rsrc.c | 47 |
1 files changed, 21 insertions, 26 deletions
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c index 3765a50329a8..a63474b331bf 100644 --- a/io_uring/rsrc.c +++ b/io_uring/rsrc.c @@ -1186,12 +1186,16 @@ static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx return -EBUSY; nbufs = src_ctx->buf_table.nr; + if (!nbufs) + return -ENXIO; if (!arg->nr) arg->nr = nbufs; else if (arg->nr > nbufs) return -EINVAL; else if (arg->nr > IORING_MAX_REG_BUFFERS) return -EINVAL; + if (check_add_overflow(arg->nr, arg->src_off, &off) || off > nbufs) + return -EOVERFLOW; if (check_add_overflow(arg->nr, arg->dst_off, &nbufs)) return -EOVERFLOW; if (nbufs > IORING_MAX_REG_BUFFERS) @@ -1201,31 +1205,16 @@ static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx if (ret) return ret; - /* Fill entries in data from dst that won't overlap with src */ + /* Copy original dst nodes from before the cloned range */ for (i = 0; i < min(arg->dst_off, ctx->buf_table.nr); i++) { - struct io_rsrc_node *src_node = ctx->buf_table.nodes[i]; + struct io_rsrc_node *node = ctx->buf_table.nodes[i]; - if (src_node) { - data.nodes[i] = src_node; - src_node->refs++; + if (node) { + data.nodes[i] = node; + node->refs++; } } - ret = -ENXIO; - nbufs = src_ctx->buf_table.nr; - if (!nbufs) - goto out_free; - ret = -EINVAL; - if (!arg->nr) - arg->nr = nbufs; - else if (arg->nr > nbufs) - goto out_free; - ret = -EOVERFLOW; - if (check_add_overflow(arg->nr, arg->src_off, &off)) - goto out_free; - if (off > nbufs) - goto out_free; - off = arg->dst_off; i = arg->src_off; nr = arg->nr; @@ -1238,8 +1227,8 @@ static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx } else { dst_node = io_rsrc_node_alloc(ctx, IORING_RSRC_BUFFER); if (!dst_node) { - ret = -ENOMEM; - goto out_free; + io_rsrc_data_free(ctx, &data); + return -ENOMEM; } refcount_inc(&src_node->buf->refs); @@ -1249,6 +1238,16 @@ static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx i++; } + /* Copy original dst nodes from after the cloned range */ + for (i = nbufs; i < ctx->buf_table.nr; i++) { + struct io_rsrc_node *node = ctx->buf_table.nodes[i]; + + if (node) { + data.nodes[i] = node; + node->refs++; + } + } + /* * If asked for replace, put the old table. data->nodes[] holds both * old and new nodes at this point. @@ -1265,10 +1264,6 @@ static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx WARN_ON_ONCE(ctx->buf_table.nr); ctx->buf_table = data; return 0; - -out_free: - io_rsrc_data_free(ctx, &data); - return ret; } /* |
