summaryrefslogtreecommitdiff
path: root/io_uring
diff options
context:
space:
mode:
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/cancel.c5
-rw-r--r--io_uring/io-wq.c11
-rw-r--r--io_uring/io_uring.c7
-rw-r--r--io_uring/memmap.c9
-rw-r--r--io_uring/openclose.c2
-rw-r--r--io_uring/register.c2
-rw-r--r--io_uring/rsrc.c1
-rw-r--r--io_uring/tctx.c8
8 files changed, 27 insertions, 18 deletions
diff --git a/io_uring/cancel.c b/io_uring/cancel.c
index ca12ac10c0ae..07b8d852218b 100644
--- a/io_uring/cancel.c
+++ b/io_uring/cancel.c
@@ -184,7 +184,9 @@ static int __io_async_cancel(struct io_cancel_data *cd,
} while (1);
/* slow path, try all io-wq's */
+ __set_current_state(TASK_RUNNING);
io_ring_submit_lock(ctx, issue_flags);
+ mutex_lock(&ctx->tctx_lock);
ret = -ENOENT;
list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
ret = io_async_cancel_one(node->task->io_uring, cd);
@@ -194,6 +196,7 @@ static int __io_async_cancel(struct io_cancel_data *cd,
nr++;
}
}
+ mutex_unlock(&ctx->tctx_lock);
io_ring_submit_unlock(ctx, issue_flags);
return all ? nr : ret;
}
@@ -484,6 +487,7 @@ static __cold bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
bool ret = false;
mutex_lock(&ctx->uring_lock);
+ mutex_lock(&ctx->tctx_lock);
list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
struct io_uring_task *tctx = node->task->io_uring;
@@ -496,6 +500,7 @@ static __cold bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true);
ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
}
+ mutex_unlock(&ctx->tctx_lock);
mutex_unlock(&ctx->uring_lock);
return ret;
diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c
index cd13d8aac3d2..9fd9f6ab722c 100644
--- a/io_uring/io-wq.c
+++ b/io_uring/io-wq.c
@@ -947,16 +947,13 @@ static bool io_acct_for_each_worker(struct io_wq_acct *acct,
return ret;
}
-static bool io_wq_for_each_worker(struct io_wq *wq,
+static void io_wq_for_each_worker(struct io_wq *wq,
bool (*func)(struct io_worker *, void *),
void *data)
{
- for (int i = 0; i < IO_WQ_ACCT_NR; i++) {
- if (!io_acct_for_each_worker(&wq->acct[i], func, data))
- return false;
- }
-
- return true;
+ for (int i = 0; i < IO_WQ_ACCT_NR; i++)
+ if (io_acct_for_each_worker(&wq->acct[i], func, data))
+ break;
}
static bool io_wq_worker_wake(struct io_worker *worker, void *data)
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 6cb24cdf8e68..87a87396e940 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -340,6 +340,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
INIT_LIST_HEAD(&ctx->ltimeout_list);
init_llist_head(&ctx->work_llist);
INIT_LIST_HEAD(&ctx->tctx_list);
+ mutex_init(&ctx->tctx_lock);
ctx->submit_state.free_list.next = NULL;
INIT_HLIST_HEAD(&ctx->waitid_list);
xa_init_flags(&ctx->zcrx_ctxs, XA_FLAGS_ALLOC);
@@ -864,7 +865,7 @@ static __cold bool io_cqe_overflow_locked(struct io_ring_ctx *ctx,
{
struct io_overflow_cqe *ocqe;
- ocqe = io_alloc_ocqe(ctx, cqe, big_cqe, GFP_ATOMIC);
+ ocqe = io_alloc_ocqe(ctx, cqe, big_cqe, GFP_NOWAIT);
return io_cqring_add_overflow(ctx, ocqe);
}
@@ -3045,6 +3046,7 @@ static __cold void io_ring_exit_work(struct work_struct *work)
exit.ctx = ctx;
mutex_lock(&ctx->uring_lock);
+ mutex_lock(&ctx->tctx_lock);
while (!list_empty(&ctx->tctx_list)) {
WARN_ON_ONCE(time_after(jiffies, timeout));
@@ -3056,6 +3058,7 @@ static __cold void io_ring_exit_work(struct work_struct *work)
if (WARN_ON_ONCE(ret))
continue;
+ mutex_unlock(&ctx->tctx_lock);
mutex_unlock(&ctx->uring_lock);
/*
* See comment above for
@@ -3064,7 +3067,9 @@ static __cold void io_ring_exit_work(struct work_struct *work)
*/
wait_for_completion_interruptible(&exit.completion);
mutex_lock(&ctx->uring_lock);
+ mutex_lock(&ctx->tctx_lock);
}
+ mutex_unlock(&ctx->tctx_lock);
mutex_unlock(&ctx->uring_lock);
spin_lock(&ctx->completion_lock);
spin_unlock(&ctx->completion_lock);
diff --git a/io_uring/memmap.c b/io_uring/memmap.c
index 18e574776ef6..7d3c5eb58480 100644
--- a/io_uring/memmap.c
+++ b/io_uring/memmap.c
@@ -268,8 +268,7 @@ static void *io_region_validate_mmap(struct io_ring_ctx *ctx,
return io_region_get_ptr(mr);
}
-static void *io_uring_validate_mmap_request(struct file *file, loff_t pgoff,
- size_t sz)
+static void *io_uring_validate_mmap_request(struct file *file, loff_t pgoff)
{
struct io_ring_ctx *ctx = file->private_data;
struct io_mapped_region *region;
@@ -304,7 +303,7 @@ __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
guard(mutex)(&ctx->mmap_lock);
- ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
+ ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff);
if (IS_ERR(ptr))
return PTR_ERR(ptr);
@@ -336,7 +335,7 @@ unsigned long io_uring_get_unmapped_area(struct file *filp, unsigned long addr,
guard(mutex)(&ctx->mmap_lock);
- ptr = io_uring_validate_mmap_request(filp, pgoff, len);
+ ptr = io_uring_validate_mmap_request(filp, pgoff);
if (IS_ERR(ptr))
return -ENOMEM;
@@ -386,7 +385,7 @@ unsigned long io_uring_get_unmapped_area(struct file *file, unsigned long addr,
guard(mutex)(&ctx->mmap_lock);
- ptr = io_uring_validate_mmap_request(file, pgoff, len);
+ ptr = io_uring_validate_mmap_request(file, pgoff);
if (IS_ERR(ptr))
return PTR_ERR(ptr);
diff --git a/io_uring/openclose.c b/io_uring/openclose.c
index bfeb91b31bba..15dde9bd6ff6 100644
--- a/io_uring/openclose.c
+++ b/io_uring/openclose.c
@@ -73,13 +73,13 @@ static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
open->filename = NULL;
return ret;
}
+ req->flags |= REQ_F_NEED_CLEANUP;
open->file_slot = READ_ONCE(sqe->file_index);
if (open->file_slot && (open->how.flags & O_CLOEXEC))
return -EINVAL;
open->nofile = rlimit(RLIMIT_NOFILE);
- req->flags |= REQ_F_NEED_CLEANUP;
if (io_openat_force_async(open))
req->flags |= REQ_F_FORCE_ASYNC;
return 0;
diff --git a/io_uring/register.c b/io_uring/register.c
index 62d39b3ff317..3d3822ff3fd9 100644
--- a/io_uring/register.c
+++ b/io_uring/register.c
@@ -320,6 +320,7 @@ static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
return 0;
/* now propagate the restriction to all registered users */
+ mutex_lock(&ctx->tctx_lock);
list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
tctx = node->task->io_uring;
if (WARN_ON_ONCE(!tctx->io_wq))
@@ -330,6 +331,7 @@ static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
/* ignore errors, it always returns zero anyway */
(void)io_wq_max_workers(tctx->io_wq, new_count);
}
+ mutex_unlock(&ctx->tctx_lock);
return 0;
err:
if (sqd) {
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index a63474b331bf..41c89f5c616d 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -1059,6 +1059,7 @@ static int io_import_kbuf(int ddir, struct iov_iter *iter,
if (count < imu->len) {
const struct bio_vec *bvec = iter->bvec;
+ len += iter->iov_offset;
while (len > bvec->bv_len) {
len -= bvec->bv_len;
bvec++;
diff --git a/io_uring/tctx.c b/io_uring/tctx.c
index 5b66755579c0..6d6f44215ec8 100644
--- a/io_uring/tctx.c
+++ b/io_uring/tctx.c
@@ -136,9 +136,9 @@ int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
return ret;
}
- mutex_lock(&ctx->uring_lock);
+ mutex_lock(&ctx->tctx_lock);
list_add(&node->ctx_node, &ctx->tctx_list);
- mutex_unlock(&ctx->uring_lock);
+ mutex_unlock(&ctx->tctx_lock);
}
return 0;
}
@@ -176,9 +176,9 @@ __cold void io_uring_del_tctx_node(unsigned long index)
WARN_ON_ONCE(current != node->task);
WARN_ON_ONCE(list_empty(&node->ctx_node));
- mutex_lock(&node->ctx->uring_lock);
+ mutex_lock(&node->ctx->tctx_lock);
list_del(&node->ctx_node);
- mutex_unlock(&node->ctx->uring_lock);
+ mutex_unlock(&node->ctx->tctx_lock);
if (tctx->last == node->ctx)
tctx->last = NULL;