summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/block_dev.c58
-rw-r--r--fs/io_uring.c81
2 files changed, 114 insertions, 25 deletions
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 4707dfff991b..c2a85b587922 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -345,15 +345,24 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
struct bio *bio;
bool is_poll = (iocb->ki_flags & IOCB_HIPRI) != 0;
bool is_read = (iov_iter_rw(iter) == READ), is_sync;
+ bool nowait = (iocb->ki_flags & IOCB_NOWAIT) != 0;
loff_t pos = iocb->ki_pos;
blk_qc_t qc = BLK_QC_T_NONE;
- int ret = 0;
+ gfp_t gfp;
+ ssize_t ret;
if ((pos | iov_iter_alignment(iter)) &
(bdev_logical_block_size(bdev) - 1))
return -EINVAL;
- bio = bio_alloc_bioset(GFP_KERNEL, nr_pages, &blkdev_dio_pool);
+ if (nowait)
+ gfp = GFP_NOWAIT;
+ else
+ gfp = GFP_KERNEL;
+
+ bio = bio_alloc_bioset(gfp, nr_pages, &blkdev_dio_pool);
+ if (!bio)
+ return -EAGAIN;
dio = container_of(bio, struct blkdev_dio, bio);
dio->is_sync = is_sync = is_sync_kiocb(iocb);
@@ -375,7 +384,10 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
if (!is_poll)
blk_start_plug(&plug);
+ ret = 0;
for (;;) {
+ int err;
+
bio_set_dev(bio, bdev);
bio->bi_iter.bi_sector = pos >> 9;
bio->bi_write_hint = iocb->ki_hint;
@@ -383,8 +395,10 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
bio->bi_end_io = blkdev_bio_end_io;
bio->bi_ioprio = iocb->ki_ioprio;
- ret = bio_iov_iter_get_pages(bio, iter);
- if (unlikely(ret)) {
+ err = bio_iov_iter_get_pages(bio, iter);
+ if (unlikely(err)) {
+ if (!ret)
+ ret = err;
bio->bi_status = BLK_STS_IOERR;
bio_endio(bio);
break;
@@ -399,6 +413,14 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
task_io_account_write(bio->bi_iter.bi_size);
}
+ /*
+ * Tell underlying layer to not block for resource shortage.
+ * And if we would have blocked, return error inline instead
+ * of through the bio->bi_end_io() callback.
+ */
+ if (nowait)
+ bio->bi_opf |= (REQ_NOWAIT | REQ_NOWAIT_INLINE);
+
dio->size += bio->bi_iter.bi_size;
pos += bio->bi_iter.bi_size;
@@ -412,6 +434,11 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
}
qc = submit_bio(bio);
+ if (qc == BLK_QC_T_EAGAIN) {
+ if (!ret)
+ ret = -EAGAIN;
+ goto error;
+ }
if (polled)
WRITE_ONCE(iocb->ki_cookie, qc);
@@ -432,8 +459,20 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
atomic_inc(&dio->ref);
}
- submit_bio(bio);
- bio = bio_alloc(GFP_KERNEL, nr_pages);
+ qc = submit_bio(bio);
+ if (qc == BLK_QC_T_EAGAIN) {
+ if (!ret)
+ ret = -EAGAIN;
+ goto error;
+ }
+ ret += bio->bi_iter.bi_size;
+
+ bio = bio_alloc(gfp, nr_pages);
+ if (!bio) {
+ if (!ret)
+ ret = -EAGAIN;
+ goto error;
+ }
}
if (!is_poll)
@@ -453,13 +492,16 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
}
__set_current_state(TASK_RUNNING);
+out:
if (!ret)
ret = blk_status_to_errno(dio->bio.bi_status);
- if (likely(!ret))
- ret = dio->size;
bio_put(&dio->bio);
return ret;
+error:
+ if (!is_poll)
+ blk_finish_plug(&plug);
+ goto out;
}
static ssize_t
diff --git a/fs/io_uring.c b/fs/io_uring.c
index e2a66e12fbc6..012bc0efb9d3 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -202,7 +202,7 @@ struct async_list {
struct file *file;
off_t io_end;
- size_t io_pages;
+ size_t io_len;
};
struct io_ring_ctx {
@@ -333,7 +333,8 @@ struct io_kiocb {
#define REQ_F_IO_DRAIN 16 /* drain existing IO first */
#define REQ_F_IO_DRAINED 32 /* drain done */
#define REQ_F_LINK 64 /* linked sqes */
-#define REQ_F_FAIL_LINK 128 /* fail rest of links */
+#define REQ_F_LINK_DONE 128 /* linked sqes done */
+#define REQ_F_FAIL_LINK 256 /* fail rest of links */
u64 user_data;
u32 result;
u32 sequence;
@@ -429,7 +430,7 @@ static inline bool io_sequence_defer(struct io_ring_ctx *ctx,
if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN)
return false;
- return req->sequence > ctx->cached_cq_tail + ctx->sq_ring->dropped;
+ return req->sequence != ctx->cached_cq_tail + ctx->sq_ring->dropped;
}
static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
@@ -632,6 +633,7 @@ static void io_req_link_next(struct io_kiocb *req)
nxt->flags |= REQ_F_LINK;
}
+ nxt->flags |= REQ_F_LINK_DONE;
INIT_WORK(&nxt->work, io_sq_wq_submit_work);
queue_work(req->ctx->sqo_wq, &nxt->work);
}
@@ -1064,8 +1066,44 @@ static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
*/
offset = buf_addr - imu->ubuf;
iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
- if (offset)
- iov_iter_advance(iter, offset);
+
+ if (offset) {
+ /*
+ * Don't use iov_iter_advance() here, as it's really slow for
+ * using the latter parts of a big fixed buffer - it iterates
+ * over each segment manually. We can cheat a bit here, because
+ * we know that:
+ *
+ * 1) it's a BVEC iter, we set it up
+ * 2) all bvecs are PAGE_SIZE in size, except potentially the
+ * first and last bvec
+ *
+ * So just find our index, and adjust the iterator afterwards.
+ * If the offset is within the first bvec (or the whole first
+ * bvec, just use iov_iter_advance(). This makes it easier
+ * since we can just skip the first segment, which may not
+ * be PAGE_SIZE aligned.
+ */
+ const struct bio_vec *bvec = imu->bvec;
+
+ if (offset <= bvec->bv_len) {
+ iov_iter_advance(iter, offset);
+ } else {
+ unsigned long seg_skip;
+
+ /* skip first vec */
+ offset -= bvec->bv_len;
+ seg_skip = 1 + (offset >> PAGE_SHIFT);
+
+ iter->bvec = bvec + seg_skip;
+ iter->nr_segs -= seg_skip;
+ iter->count -= (seg_skip << PAGE_SHIFT);
+ iter->iov_offset = offset & ~PAGE_MASK;
+ if (iter->iov_offset)
+ iter->count -= iter->iov_offset;
+ }
+ }
+
return 0;
}
@@ -1120,28 +1158,26 @@ static void io_async_list_note(int rw, struct io_kiocb *req, size_t len)
off_t io_end = kiocb->ki_pos + len;
if (filp == async_list->file && kiocb->ki_pos == async_list->io_end) {
- unsigned long max_pages;
+ unsigned long max_bytes;
/* Use 8x RA size as a decent limiter for both reads/writes */
- max_pages = filp->f_ra.ra_pages;
- if (!max_pages)
- max_pages = VM_READAHEAD_PAGES;
- max_pages *= 8;
-
- /* If max pages are exceeded, reset the state */
- len >>= PAGE_SHIFT;
- if (async_list->io_pages + len <= max_pages) {
+ max_bytes = filp->f_ra.ra_pages << (PAGE_SHIFT + 3);
+ if (!max_bytes)
+ max_bytes = VM_READAHEAD_PAGES << (PAGE_SHIFT + 3);
+
+ /* If max len are exceeded, reset the state */
+ if (async_list->io_len + len <= max_bytes) {
req->flags |= REQ_F_SEQ_PREV;
- async_list->io_pages += len;
+ async_list->io_len += len;
} else {
io_end = 0;
- async_list->io_pages = 0;
+ async_list->io_len = 0;
}
}
/* New file? Reset state. */
if (async_list->file != filp) {
- async_list->io_pages = 0;
+ async_list->io_len = 0;
async_list->file = filp;
}
async_list->io_end = io_end;
@@ -1630,6 +1666,8 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
INIT_LIST_HEAD(&poll->wait.entry);
init_waitqueue_func_entry(&poll->wait, io_poll_wake);
+ INIT_LIST_HEAD(&req->list);
+
mask = vfs_poll(poll->file, &ipt.pt) & poll->events;
spin_lock_irq(&ctx->completion_lock);
@@ -1844,6 +1882,10 @@ restart:
/* async context always use a copy of the sqe */
kfree(sqe);
+ /* req from defer and link list needn't decrease async cnt */
+ if (req->flags & (REQ_F_IO_DRAINED | REQ_F_LINK_DONE))
+ goto out;
+
if (!async_list)
break;
if (!list_empty(&req_list)) {
@@ -1891,6 +1933,7 @@ restart:
}
}
+out:
if (cur_mm) {
set_fs(old_fs);
unuse_mm(cur_mm);
@@ -1917,6 +1960,10 @@ static bool io_add_to_prev_work(struct async_list *list, struct io_kiocb *req)
ret = true;
spin_lock(&list->lock);
list_add_tail(&req->list, &list->list);
+ /*
+ * Ensure we see a simultaneous modification from io_sq_wq_submit_work()
+ */
+ smp_mb();
if (!atomic_read(&list->cnt)) {
list_del_init(&req->list);
ret = false;