summaryrefslogtreecommitdiff
path: root/io_uring/io_uring.c
diff options
context:
space:
mode:
Diffstat (limited to 'io_uring/io_uring.c')
-rw-r--r--io_uring/io_uring.c108
1 files changed, 34 insertions, 74 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 7c98a820c8dd..55fd6d98fe12 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -2601,6 +2601,7 @@ static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
if (!(ctx->flags & IORING_SETUP_IOPOLL))
return;
+ percpu_ref_get(&ctx->refs);
mutex_lock(&ctx->uring_lock);
while (!list_empty(&ctx->iopoll_list)) {
unsigned int nr_events = 0;
@@ -2622,6 +2623,7 @@ static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
}
}
mutex_unlock(&ctx->uring_lock);
+ percpu_ref_put(&ctx->refs);
}
static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
@@ -2668,6 +2670,11 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
break;
}
ret = io_do_iopoll(ctx, &nr_events, min);
+
+ if (task_sigpending(current)) {
+ ret = -EINTR;
+ goto out;
+ }
} while (!ret && nr_events < min && !need_resched());
out:
mutex_unlock(&ctx->uring_lock);
@@ -3147,7 +3154,7 @@ static int __io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter
*/
const struct bio_vec *bvec = imu->bvec;
- if (offset <= bvec->bv_len) {
+ if (offset < bvec->bv_len) {
iov_iter_advance(iter, offset);
} else {
unsigned long seg_skip;
@@ -3483,14 +3490,17 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
struct iovec *iov = iorw->fast_iov;
int ret;
+ iorw->bytes_done = 0;
+ iorw->free_iovec = NULL;
+
ret = io_import_iovec(rw, req, &iov, &iorw->iter, false);
if (unlikely(ret < 0))
return ret;
- iorw->bytes_done = 0;
- iorw->free_iovec = iov;
- if (iov)
+ if (iov) {
+ iorw->free_iovec = iov;
req->flags |= REQ_F_NEED_CLEANUP;
+ }
iov_iter_save_state(&iorw->iter, &iorw->iter_state);
return 0;
}
@@ -4033,7 +4043,7 @@ static int io_linkat_prep(struct io_kiocb *req,
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
- if (sqe->ioprio || sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
+ if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
return -EINVAL;
if (unlikely(req->flags & REQ_F_FIXED_FILE))
return -EBADF;
@@ -4044,7 +4054,7 @@ static int io_linkat_prep(struct io_kiocb *req,
newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
lnk->flags = READ_ONCE(sqe->hardlink_flags);
- lnk->oldpath = getname(oldf);
+ lnk->oldpath = getname_uflags(oldf, lnk->flags);
if (IS_ERR(lnk->oldpath))
return PTR_ERR(lnk->oldpath);
@@ -5716,6 +5726,7 @@ static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
if (ret > 0)
return;
+ io_tw_lock(req->ctx, locked);
io_poll_remove_entries(req);
spin_lock(&ctx->completion_lock);
hash_del(&req->hash_node);
@@ -7068,7 +7079,8 @@ static void io_wq_submit_work(struct io_wq_work *work)
*/
if (ret != -EAGAIN || !(req->ctx->flags & IORING_SETUP_IOPOLL))
break;
-
+ if (io_wq_worker_stopped())
+ break;
/*
* If REQ_F_NOWAIT is set, then don't wait or retry with
* poll. -EAGAIN is final for that case.
@@ -7531,7 +7543,9 @@ static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
return &ctx->sq_sqes[head];
/* drop invalid entries */
+ spin_lock(&ctx->completion_lock);
ctx->cq_extra--;
+ spin_unlock(&ctx->completion_lock);
WRITE_ONCE(ctx->rings->sq_dropped,
READ_ONCE(ctx->rings->sq_dropped) + 1);
return NULL;
@@ -8616,49 +8630,6 @@ out_free:
return ret;
}
-static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
- int index)
-{
-#if defined(CONFIG_UNIX)
- struct sock *sock = ctx->ring_sock->sk;
- struct sk_buff_head *head = &sock->sk_receive_queue;
- struct sk_buff *skb;
-
- /*
- * See if we can merge this file into an existing skb SCM_RIGHTS
- * file set. If there's no room, fall back to allocating a new skb
- * and filling it in.
- */
- spin_lock_irq(&head->lock);
- skb = skb_peek(head);
- if (skb) {
- struct scm_fp_list *fpl = UNIXCB(skb).fp;
-
- if (fpl->count < SCM_MAX_FD) {
- __skb_unlink(skb, head);
- spin_unlock_irq(&head->lock);
- fpl->fp[fpl->count] = get_file(file);
- unix_inflight(fpl->user, fpl->fp[fpl->count]);
- fpl->count++;
- spin_lock_irq(&head->lock);
- __skb_queue_head(head, skb);
- } else {
- skb = NULL;
- }
- }
- spin_unlock_irq(&head->lock);
-
- if (skb) {
- fput(file);
- return 0;
- }
-
- return __io_sqe_files_scm(ctx, 1, index);
-#else
- return 0;
-#endif
-}
-
static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
struct io_rsrc_node *node, void *rsrc)
{
@@ -8716,12 +8687,6 @@ static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
*io_get_tag_slot(ctx->file_data, slot_index) = 0;
io_fixed_file_set(file_slot, file);
- ret = io_sqe_file_register(ctx, file, slot_index);
- if (ret) {
- file_slot->file_ptr = 0;
- goto err;
- }
-
ret = 0;
err:
if (needs_switch)
@@ -8835,12 +8800,6 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
}
*io_get_tag_slot(data, i) = tag;
io_fixed_file_set(file_slot, file);
- err = io_sqe_file_register(ctx, file, i);
- if (err) {
- file_slot->file_ptr = 0;
- fput(file);
- break;
- }
}
}
@@ -9762,12 +9721,7 @@ static void io_ring_exit_work(struct work_struct *work)
init_completion(&exit.completion);
init_task_work(&exit.task_work, io_tctx_exit_cb);
exit.ctx = ctx;
- /*
- * Some may use context even when all refs and requests have been put,
- * and they are free to do so while still holding uring_lock or
- * completion_lock, see io_req_task_submit(). Apart from other work,
- * this lock/unlock section also waits them to finish.
- */
+
mutex_lock(&ctx->uring_lock);
while (!list_empty(&ctx->tctx_list)) {
WARN_ON_ONCE(time_after(jiffies, timeout));
@@ -10402,7 +10356,7 @@ static int io_uring_show_cred(struct seq_file *m, unsigned int id,
static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
{
- struct io_sq_data *sq = NULL;
+ int sq_pid = -1, sq_cpu = -1;
bool has_lock;
int i;
@@ -10415,13 +10369,19 @@ static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
has_lock = mutex_trylock(&ctx->uring_lock);
if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) {
- sq = ctx->sq_data;
- if (!sq->thread)
- sq = NULL;
+ struct io_sq_data *sq = ctx->sq_data;
+
+ if (mutex_trylock(&sq->lock)) {
+ if (sq->thread) {
+ sq_pid = task_pid_nr(sq->thread);
+ sq_cpu = task_cpu(sq->thread);
+ }
+ mutex_unlock(&sq->lock);
+ }
}
- seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1);
- seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
+ seq_printf(m, "SqThread:\t%d\n", sq_pid);
+ seq_printf(m, "SqThreadCpu:\t%d\n", sq_cpu);
seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
struct file *f = io_file_from_index(ctx, i);