diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2026-02-21 10:05:49 -0800 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2026-02-21 10:05:49 -0800 |
| commit | f9d66e64a2bcb979d47eb7d67aa7e9b454fd5d15 (patch) | |
| tree | ed6bc2b311de8f14f8d07678f44e3d8133e21569 | |
| parent | 817c16e5655958c34035a10c8c4b93e33e529624 (diff) | |
| parent | ea129e55c9e06a51a93c3f5ef3e32a6cfa3f8ec7 (diff) | |
Merge tag 'io_uring-20260221' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux
Pull io_uring fixes from Jens Axboe:
- A fix for a missing URING_CMD128 opcode check, fixing an issue with
the SQE mixed mode support introduced in 6.19. Merged late due to
having multiple dependencies
- Add sqe->cmd size checking for big SQEs, similar to what we have for
normal sized SQEs
- Fix a race condition in zcrx, that leads to a double free
* tag 'io_uring-20260221' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux:
io_uring: Add size check for sqe->cmd
io_uring: add IORING_OP_URING_CMD128 to opcode checks
io_uring/zcrx: fix user_ref race between scrub and refill paths
| -rw-r--r-- | drivers/block/ublk_drv.c | 12 | ||||
| -rw-r--r-- | drivers/nvme/host/ioctl.c | 3 | ||||
| -rw-r--r-- | fs/fuse/dev_uring.c | 6 | ||||
| -rw-r--r-- | include/linux/io_uring/cmd.h | 15 | ||||
| -rw-r--r-- | io_uring/io_uring.h | 6 | ||||
| -rw-r--r-- | io_uring/kbuf.c | 2 | ||||
| -rw-r--r-- | io_uring/rw.c | 4 | ||||
| -rw-r--r-- | io_uring/zcrx.c | 10 |
8 files changed, 41 insertions, 17 deletions
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index c13cda58a7c6..46a785ce078d 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -3255,7 +3255,8 @@ static int ublk_ch_uring_cmd_local(struct io_uring_cmd *cmd, unsigned int issue_flags) { /* May point to userspace-mapped memory */ - const struct ublksrv_io_cmd *ub_src = io_uring_sqe_cmd(cmd->sqe); + const struct ublksrv_io_cmd *ub_src = io_uring_sqe_cmd(cmd->sqe, + struct ublksrv_io_cmd); u16 buf_idx = UBLK_INVALID_BUF_IDX; struct ublk_device *ub = cmd->file->private_data; struct ublk_queue *ubq; @@ -3833,7 +3834,8 @@ static int ublk_validate_batch_fetch_cmd(struct ublk_batch_io_data *data) static int ublk_handle_non_batch_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags) { - const struct ublksrv_io_cmd *ub_cmd = io_uring_sqe_cmd(cmd->sqe); + const struct ublksrv_io_cmd *ub_cmd = io_uring_sqe_cmd(cmd->sqe, + struct ublksrv_io_cmd); struct ublk_device *ub = cmd->file->private_data; unsigned tag = READ_ONCE(ub_cmd->tag); unsigned q_id = READ_ONCE(ub_cmd->q_id); @@ -3862,7 +3864,8 @@ static int ublk_handle_non_batch_cmd(struct io_uring_cmd *cmd, static int ublk_ch_batch_io_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags) { - const struct ublk_batch_io *uc = io_uring_sqe_cmd(cmd->sqe); + const struct ublk_batch_io *uc = io_uring_sqe_cmd(cmd->sqe, + struct ublk_batch_io); struct ublk_device *ub = cmd->file->private_data; struct ublk_batch_io_data data = { .ub = ub, @@ -5253,7 +5256,8 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags) { /* May point to userspace-mapped memory */ - const struct ublksrv_ctrl_cmd *ub_src = io_uring_sqe_cmd(cmd->sqe); + const struct ublksrv_ctrl_cmd *ub_src = io_uring_sqe128_cmd(cmd->sqe, + struct ublksrv_ctrl_cmd); struct ublksrv_ctrl_cmd header; struct ublk_device *ub = NULL; u32 cmd_op = cmd->cmd_op; diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index fb62633ccbb0..8844bbd39515 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -447,7 +447,8 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns, struct io_uring_cmd *ioucmd, unsigned int issue_flags, bool vec) { struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd); - const struct nvme_uring_cmd *cmd = io_uring_sqe_cmd(ioucmd->sqe); + const struct nvme_uring_cmd *cmd = io_uring_sqe128_cmd(ioucmd->sqe, + struct nvme_uring_cmd); struct request_queue *q = ns ? ns->queue : ctrl->admin_q; struct nvme_uring_data d; struct nvme_command c; diff --git a/fs/fuse/dev_uring.c b/fs/fuse/dev_uring.c index 5ceb217ced1b..60f2058feb74 100644 --- a/fs/fuse/dev_uring.c +++ b/fs/fuse/dev_uring.c @@ -879,7 +879,8 @@ static int fuse_ring_ent_set_commit(struct fuse_ring_ent *ent) static int fuse_uring_commit_fetch(struct io_uring_cmd *cmd, int issue_flags, struct fuse_conn *fc) { - const struct fuse_uring_cmd_req *cmd_req = io_uring_sqe_cmd(cmd->sqe); + const struct fuse_uring_cmd_req *cmd_req = io_uring_sqe128_cmd(cmd->sqe, + struct fuse_uring_cmd_req); struct fuse_ring_ent *ent; int err; struct fuse_ring *ring = fc->ring; @@ -1083,7 +1084,8 @@ fuse_uring_create_ring_ent(struct io_uring_cmd *cmd, static int fuse_uring_register(struct io_uring_cmd *cmd, unsigned int issue_flags, struct fuse_conn *fc) { - const struct fuse_uring_cmd_req *cmd_req = io_uring_sqe_cmd(cmd->sqe); + const struct fuse_uring_cmd_req *cmd_req = io_uring_sqe128_cmd(cmd->sqe, + struct fuse_uring_cmd_req); struct fuse_ring *ring = smp_load_acquire(&fc->ring); struct fuse_ring_queue *queue; struct fuse_ring_ent *ent; diff --git a/include/linux/io_uring/cmd.h b/include/linux/io_uring/cmd.h index 375fd048c4cb..331dcbefe72f 100644 --- a/include/linux/io_uring/cmd.h +++ b/include/linux/io_uring/cmd.h @@ -20,10 +20,17 @@ struct io_uring_cmd { u8 unused[8]; }; -static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe) -{ - return sqe->cmd; -} +#define io_uring_sqe128_cmd(sqe, type) ({ \ + BUILD_BUG_ON(sizeof(type) > ((2 * sizeof(struct io_uring_sqe)) - \ + offsetof(struct io_uring_sqe, cmd))); \ + (const type *)(sqe)->cmd; \ +}) + +#define io_uring_sqe_cmd(sqe, type) ({ \ + BUILD_BUG_ON(sizeof(type) > (sizeof(struct io_uring_sqe) - \ + offsetof(struct io_uring_sqe, cmd))); \ + (const type *)(sqe)->cmd; \ +}) static inline void io_uring_cmd_private_sz_check(size_t cmd_sz) { diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h index 503663d6fd6d..0fa844faf287 100644 --- a/io_uring/io_uring.h +++ b/io_uring/io_uring.h @@ -530,6 +530,12 @@ static inline bool io_file_can_poll(struct io_kiocb *req) return false; } +static inline bool io_is_uring_cmd(const struct io_kiocb *req) +{ + return req->opcode == IORING_OP_URING_CMD || + req->opcode == IORING_OP_URING_CMD128; +} + static inline ktime_t io_get_time(struct io_ring_ctx *ctx) { if (ctx->clockid == CLOCK_MONOTONIC) diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c index 67d4fe576473..dae5b4ab3819 100644 --- a/io_uring/kbuf.c +++ b/io_uring/kbuf.c @@ -171,7 +171,7 @@ static bool io_should_commit(struct io_kiocb *req, unsigned int issue_flags) return true; /* uring_cmd commits kbuf upfront, no need to auto-commit */ - if (!io_file_can_poll(req) && req->opcode != IORING_OP_URING_CMD) + if (!io_file_can_poll(req) && !io_is_uring_cmd(req)) return true; return false; } diff --git a/io_uring/rw.c b/io_uring/rw.c index b3971171c342..1a5f262734e8 100644 --- a/io_uring/rw.c +++ b/io_uring/rw.c @@ -1254,7 +1254,7 @@ static int io_uring_classic_poll(struct io_kiocb *req, struct io_comp_batch *iob { struct file *file = req->file; - if (req->opcode == IORING_OP_URING_CMD) { + if (io_is_uring_cmd(req)) { struct io_uring_cmd *ioucmd; ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd); @@ -1380,7 +1380,7 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) wq_list_add_tail(&req->comp_list, &ctx->submit_state.compl_reqs); nr_events++; req->cqe.flags = io_put_kbuf(req, req->cqe.res, NULL); - if (req->opcode != IORING_OP_URING_CMD) + if (!io_is_uring_cmd(req)) io_req_rw_cleanup(req, 0); } if (nr_events) diff --git a/io_uring/zcrx.c b/io_uring/zcrx.c index 28150c6578e3..97984a73a95d 100644 --- a/io_uring/zcrx.c +++ b/io_uring/zcrx.c @@ -349,10 +349,14 @@ static inline atomic_t *io_get_user_counter(struct net_iov *niov) static bool io_zcrx_put_niov_uref(struct net_iov *niov) { atomic_t *uref = io_get_user_counter(niov); + int old; + + old = atomic_read(uref); + do { + if (unlikely(old == 0)) + return false; + } while (!atomic_try_cmpxchg(uref, &old, old - 1)); - if (unlikely(!atomic_read(uref))) - return false; - atomic_dec(uref); return true; } |
