diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2025-12-09 09:07:28 +0900 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2025-12-09 09:07:28 +0900 |
| commit | cfd4039213e7b5a828c5b78e1b5235cac91af53d (patch) | |
| tree | 971782550d86b32e927be6188883753263e02415 /io_uring/poll.c | |
| parent | 4482ebb2970efa58173075c101426b2f3af40b41 (diff) | |
| parent | 55d57b3bcc7efcab812a8179e2dc17d781302997 (diff) | |
Merge tag 'io_uring-6.19-20251208' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux
Pull io_uring updates from Jens Axboe:
"Followup set of fixes for io_uring for this merge window. These are
either later fixes, or cleanups that don't make sense to defer. This
pull request contains:
- Fix for a recent regression in io-wq worker creation
- Tracing cleanup
- Use READ_ONCE/WRITE_ONCE consistently for ring mapped kbufs. Mostly
for documentation purposes, indicating that they are shared with
userspace
- Fix for POLL_ADD losing a completion, if the request is updated and
now is triggerable - eg, if POLLIN is set with the updated, and the
polled file is readable
- In conjunction with the above fix, also unify how poll wait queue
entries are deleted with the head update. We had 3 different spots
doing both the list deletion and head write, with one of them
nicely documented. Abstract that into a helper and use it
consistently
- Small series from Joanne fixing an issue with buffer cloning, and
cleaning up the arg validation"
* tag 'io_uring-6.19-20251208' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux:
io_uring/poll: unify poll waitqueue entry and list removal
io_uring/kbuf: use WRITE_ONCE() for userspace-shared buffer ring fields
io_uring/kbuf: use READ_ONCE() for userspace-mapped memory
io_uring/rsrc: fix lost entries after cloned range
io_uring/rsrc: rename misleading src_node variable in io_clone_buffers()
io_uring/rsrc: clean up buffer cloning arg validation
io_uring/trace: rename io_uring_queue_async_work event "rw" field
io_uring/io-wq: always retry worker create on ERESTART*
io_uring/poll: correctly handle io_poll_add() return value on update
Diffstat (limited to 'io_uring/poll.c')
| -rw-r--r-- | io_uring/poll.c | 52 |
1 files changed, 29 insertions, 23 deletions
diff --git a/io_uring/poll.c b/io_uring/poll.c index 8aa4e3a31e73..aac4b3b881fb 100644 --- a/io_uring/poll.c +++ b/io_uring/poll.c @@ -138,14 +138,32 @@ static void io_init_poll_iocb(struct io_poll *poll, __poll_t events) init_waitqueue_func_entry(&poll->wait, io_poll_wake); } +static void io_poll_remove_waitq(struct io_poll *poll) +{ + /* + * If the waitqueue is being freed early but someone is already holds + * ownership over it, we have to tear down the request as best we can. + * That means immediately removing the request from its waitqueue and + * preventing all further accesses to the waitqueue via the request. + */ + list_del_init(&poll->wait.entry); + + /* + * Careful: this *must* be the last step, since as soon as req->head is + * NULL'ed out, the request can be completed and freed, since + * io_poll_remove_entry() will no longer need to take the waitqueue + * lock. + */ + smp_store_release(&poll->head, NULL); +} + static inline void io_poll_remove_entry(struct io_poll *poll) { struct wait_queue_head *head = smp_load_acquire(&poll->head); if (head) { spin_lock_irq(&head->lock); - list_del_init(&poll->wait.entry); - poll->head = NULL; + io_poll_remove_waitq(poll); spin_unlock_irq(&head->lock); } } @@ -368,23 +386,7 @@ static __cold int io_pollfree_wake(struct io_kiocb *req, struct io_poll *poll) io_poll_mark_cancelled(req); /* we have to kick tw in case it's not already */ io_poll_execute(req, 0); - - /* - * If the waitqueue is being freed early but someone is already - * holds ownership over it, we have to tear down the request as - * best we can. That means immediately removing the request from - * its waitqueue and preventing all further accesses to the - * waitqueue via the request. - */ - list_del_init(&poll->wait.entry); - - /* - * Careful: this *must* be the last step, since as soon - * as req->head is NULL'ed out, the request can be - * completed and freed, since aio_poll_complete_work() - * will no longer need to take the waitqueue lock. - */ - smp_store_release(&poll->head, NULL); + io_poll_remove_waitq(poll); return 1; } @@ -413,8 +415,7 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, /* optional, saves extra locking for removal in tw handler */ if (mask && poll->events & EPOLLONESHOT) { - list_del_init(&poll->wait.entry); - poll->head = NULL; + io_poll_remove_waitq(poll); if (wqe_is_double(wait)) req->flags &= ~REQ_F_DOUBLE_POLL; else @@ -937,12 +938,17 @@ int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags) ret2 = io_poll_add(preq, issue_flags & ~IO_URING_F_UNLOCKED); /* successfully updated, don't complete poll request */ - if (!ret2 || ret2 == -EIOCBQUEUED) + if (ret2 == IOU_ISSUE_SKIP_COMPLETE) goto out; + /* request completed as part of the update, complete it */ + else if (ret2 == IOU_COMPLETE) + goto complete; } - req_set_fail(preq); io_req_set_res(preq, -ECANCELED, 0); +complete: + if (preq->cqe.res < 0) + req_set_fail(preq); preq->io_task_work.func = io_req_task_complete; io_req_task_work_add(preq); out: |
