summaryrefslogtreecommitdiff
path: root/io_uring
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2026-01-23 12:51:00 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2026-01-23 12:51:00 -0800
commit7907f673d0ea569b23274ce2fc75f479b905e547 (patch)
tree097a4b3bad6ca599ff9272dd007c220cbf24674c /io_uring
parentb33d70625977126d59f59dfd8c0bf9a75b4591c0 (diff)
parent145e0074392587606aa5df353d0e761f0b8357d5 (diff)
Merge tag 'io_uring-6.19-20260122' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux
Pull io_uring fixes from Jens Axboe: - Fix for a potential leak of an iovec, if a specific cleanup path is used and the rw_cache is full at the time of the call - Fix for a regression added in this cycle, where waitid should be using prober release/acquire semantics for updating the wait queue head - Check for the cancelation bit being set for every work item processed by io-wq, not just at the start of the loop. Has no real practical implications other than to shut up syzbot doing crazy things that grossly overload a system, hence slowing down ring exit - A few selftest additions, updating the mini_liburing that selftests use * tag 'io_uring-6.19-20260122' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux: selftests/io_uring: support NO_SQARRAY in miniliburing selftests/io_uring: add io_uring_queue_init_params io_uring/io-wq: check IO_WQ_BIT_EXIT inside work run loop io_uring/waitid: fix KCSAN warning on io_waitid->head io_uring/rw: free potentially allocated iovec on cache put failure
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/io-wq.c2
-rw-r--r--io_uring/rw.c15
-rw-r--r--io_uring/waitid.c6
3 files changed, 15 insertions, 8 deletions
diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c
index 9fd9f6ab722c..2fa7d3601edb 100644
--- a/io_uring/io-wq.c
+++ b/io_uring/io-wq.c
@@ -598,9 +598,9 @@ static void io_worker_handle_work(struct io_wq_acct *acct,
__releases(&acct->lock)
{
struct io_wq *wq = worker->wq;
- bool do_kill = test_bit(IO_WQ_BIT_EXIT, &wq->state);
do {
+ bool do_kill = test_bit(IO_WQ_BIT_EXIT, &wq->state);
struct io_wq_work *work;
/*
diff --git a/io_uring/rw.c b/io_uring/rw.c
index 70ca88cc1f54..28555bc85ba0 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -144,19 +144,22 @@ static inline int io_import_rw_buffer(int rw, struct io_kiocb *req,
return 0;
}
-static void io_rw_recycle(struct io_kiocb *req, unsigned int issue_flags)
+static bool io_rw_recycle(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_async_rw *rw = req->async_data;
if (unlikely(issue_flags & IO_URING_F_UNLOCKED))
- return;
+ return false;
io_alloc_cache_vec_kasan(&rw->vec);
if (rw->vec.nr > IO_VEC_CACHE_SOFT_CAP)
io_vec_free(&rw->vec);
- if (io_alloc_cache_put(&req->ctx->rw_cache, rw))
+ if (io_alloc_cache_put(&req->ctx->rw_cache, rw)) {
io_req_async_data_clear(req, 0);
+ return true;
+ }
+ return false;
}
static void io_req_rw_cleanup(struct io_kiocb *req, unsigned int issue_flags)
@@ -190,7 +193,11 @@ static void io_req_rw_cleanup(struct io_kiocb *req, unsigned int issue_flags)
*/
if (!(req->flags & (REQ_F_REISSUE | REQ_F_REFCOUNT))) {
req->flags &= ~REQ_F_NEED_CLEANUP;
- io_rw_recycle(req, issue_flags);
+ if (!io_rw_recycle(req, issue_flags)) {
+ struct io_async_rw *rw = req->async_data;
+
+ io_vec_free(&rw->vec);
+ }
}
}
diff --git a/io_uring/waitid.c b/io_uring/waitid.c
index 2d4cbd47c67c..d25d60aed6af 100644
--- a/io_uring/waitid.c
+++ b/io_uring/waitid.c
@@ -114,11 +114,11 @@ static void io_waitid_remove_wq(struct io_kiocb *req)
struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
struct wait_queue_head *head;
- head = READ_ONCE(iw->head);
+ head = smp_load_acquire(&iw->head);
if (head) {
struct io_waitid_async *iwa = req->async_data;
- iw->head = NULL;
+ smp_store_release(&iw->head, NULL);
spin_lock_irq(&head->lock);
list_del_init(&iwa->wo.child_wait.entry);
spin_unlock_irq(&head->lock);
@@ -246,7 +246,7 @@ static int io_waitid_wait(struct wait_queue_entry *wait, unsigned mode,
return 0;
list_del_init(&wait->entry);
- iw->head = NULL;
+ smp_store_release(&iw->head, NULL);
/* cancel is in progress */
if (atomic_fetch_inc(&iw->refs) & IO_WAITID_REF_MASK)