summaryrefslogtreecommitdiff
path: root/io_uring/rw.c
diff options
context:
space:
mode:
Diffstat (limited to 'io_uring/rw.c')
-rw-r--r--io_uring/rw.c26
1 files changed, 18 insertions, 8 deletions
diff --git a/io_uring/rw.c b/io_uring/rw.c
index 75b001febb4d..4ff3442ac2ee 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -28,9 +28,19 @@ struct io_rw {
rwf_t flags;
};
-static inline bool io_file_supports_nowait(struct io_kiocb *req)
+static bool io_file_supports_nowait(struct io_kiocb *req, __poll_t mask)
{
- return req->flags & REQ_F_SUPPORT_NOWAIT;
+ /* If FMODE_NOWAIT is set for a file, we're golden */
+ if (req->flags & REQ_F_SUPPORT_NOWAIT)
+ return true;
+ /* No FMODE_NOWAIT, if we can poll, check the status */
+ if (io_file_can_poll(req)) {
+ struct poll_table_struct pt = { ._key = mask };
+
+ return vfs_poll(req->file, &pt) & mask;
+ }
+ /* No FMODE_NOWAIT support, and file isn't pollable. Tough luck. */
+ return false;
}
#ifdef CONFIG_COMPAT
@@ -629,7 +639,7 @@ static bool io_rw_should_retry(struct io_kiocb *req)
* just use poll if we can, and don't attempt if the fs doesn't
* support callback based unlocks
*/
- if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
+ if (io_file_can_poll(req) || !(req->file->f_mode & FMODE_BUF_RASYNC))
return false;
wait->wait.func = io_async_buf_func;
@@ -685,8 +695,8 @@ static int io_rw_init_file(struct io_kiocb *req, fmode_t mode)
* supports async. Otherwise it's impossible to use O_NONBLOCK files
* reliably. If not, or it IOCB_NOWAIT is set, don't retry.
*/
- if ((kiocb->ki_flags & IOCB_NOWAIT) ||
- ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req)))
+ if (kiocb->ki_flags & IOCB_NOWAIT ||
+ ((file->f_flags & O_NONBLOCK && !(req->flags & REQ_F_SUPPORT_NOWAIT))))
req->flags |= REQ_F_NOWAIT;
if (ctx->flags & IORING_SETUP_IOPOLL) {
@@ -752,7 +762,7 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
if (force_nonblock) {
/* If the file doesn't support async, just async punt */
- if (unlikely(!io_file_supports_nowait(req))) {
+ if (unlikely(!io_file_supports_nowait(req, EPOLLIN))) {
ret = io_setup_async_rw(req, iovec, s, true);
return ret ?: -EAGAIN;
}
@@ -783,7 +793,7 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
req->flags &= ~REQ_F_REISSUE;
/* if we can poll, just do that */
- if (req->opcode == IORING_OP_READ && file_can_poll(req->file))
+ if (req->opcode == IORING_OP_READ && io_file_can_poll(req))
return -EAGAIN;
/* IOPOLL retry should happen for io-wq threads */
if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
@@ -927,7 +937,7 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags)
if (force_nonblock) {
/* If the file doesn't support async, just async punt */
- if (unlikely(!io_file_supports_nowait(req)))
+ if (unlikely(!io_file_supports_nowait(req, EPOLLOUT)))
goto copy_iov;
/* File path supports NOWAIT for non-direct_IO only for block devices. */