diff options
| author | Hao Xu <howeyxu@tencent.com> | 2022-06-23 21:01:26 +0800 | 
|---|---|---|
| committer | Jens Axboe <axboe@kernel.dk> | 2022-07-24 18:39:16 -0600 | 
| commit | 795bbbc8a9a1bbbafce762c706bfb5733c9d0426 (patch) | |
| tree | 1cf02750064a966d1b195266e4caaa450f81d12a /io_uring/kbuf.h | |
| parent | 49f1c68e048f1706b71c8255faf8110113d1cc48 (diff) | |
io_uring: kbuf: inline io_kbuf_recycle_ring()
Make io_kbuf_recycle_ring() inline since it is the fast path of
provided buffer.
Signed-off-by: Hao Xu <howeyxu@tencent.com>
Link: https://lore.kernel.org/r/20220623130126.179232-1-hao.xu@linux.dev
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/kbuf.h')
| -rw-r--r-- | io_uring/kbuf.h | 28 | 
1 files changed, 27 insertions, 1 deletions
| diff --git a/io_uring/kbuf.h b/io_uring/kbuf.h index 721465c5d809..b3e8c6c5fee1 100644 --- a/io_uring/kbuf.h +++ b/io_uring/kbuf.h @@ -49,7 +49,33 @@ int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);  unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);  void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags); -void io_kbuf_recycle_ring(struct io_kiocb *req); + +static inline void io_kbuf_recycle_ring(struct io_kiocb *req) +{ +	/* +	 * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear +	 * the flag and hence ensure that bl->head doesn't get incremented. +	 * If the tail has already been incremented, hang on to it. +	 * The exception is partial io, that case we should increment bl->head +	 * to monopolize the buffer. +	 */ +	if (req->buf_list) { +		if (req->flags & REQ_F_PARTIAL_IO) { +			/* +			 * If we end up here, then the io_uring_lock has +			 * been kept held since we retrieved the buffer. +			 * For the io-wq case, we already cleared +			 * req->buf_list when the buffer was retrieved, +			 * hence it cannot be set here for that case. +			 */ +			req->buf_list->head++; +			req->buf_list = NULL; +		} else { +			req->buf_index = req->buf_list->bgid; +			req->flags &= ~REQ_F_BUFFER_RING; +		} +	} +}  static inline bool io_do_buffer_select(struct io_kiocb *req)  { | 
