diff options
| author | Pavel Begunkov <asml.silence@gmail.com> | 2022-06-16 10:22:00 +0100 | 
|---|---|---|
| committer | Jens Axboe <axboe@kernel.dk> | 2022-07-24 18:39:13 -0600 | 
| commit | 53ccf69bda6f51e462f3c4ab7eb9c0ec34e78be4 (patch) | |
| tree | b029fa2b5fa1dd1483f489b699b612e08fbe9ec3 /io_uring/kbuf.h | |
| parent | 7012c81593d5a3bc6d0b8720345cf887ef1df914 (diff) | |
io_uring: don't inline io_put_kbuf
io_put_kbuf() is huge, don't bloat the kernel with inlining.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/2e21ccf0be471ffa654032914b9430813cae53f8.1655371007.git.asml.silence@gmail.com
Reviewed-by: Hao Xu <howeyxu@tencent.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/kbuf.h')
| -rw-r--r-- | io_uring/kbuf.h | 38 | 
1 files changed, 6 insertions, 32 deletions
| diff --git a/io_uring/kbuf.h b/io_uring/kbuf.h index 9da3a933ef40..304e7139d835 100644 --- a/io_uring/kbuf.h +++ b/io_uring/kbuf.h @@ -47,6 +47,8 @@ int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags);  int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);  int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg); +unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags); +  static inline bool io_do_buffer_select(struct io_kiocb *req)  {  	if (!(req->flags & REQ_F_BUFFER_SELECT)) @@ -79,7 +81,8 @@ static inline void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)  	__io_kbuf_recycle(req, issue_flags);  } -static unsigned int __io_put_kbuf(struct io_kiocb *req, struct list_head *list) +static inline unsigned int __io_put_kbuf_list(struct io_kiocb *req, +					      struct list_head *list)  {  	if (req->flags & REQ_F_BUFFER_RING) {  		if (req->buf_list) @@ -99,44 +102,15 @@ static inline unsigned int io_put_kbuf_comp(struct io_kiocb *req)  	if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))  		return 0; -	return __io_put_kbuf(req, &req->ctx->io_buffers_comp); +	return __io_put_kbuf_list(req, &req->ctx->io_buffers_comp);  }  static inline unsigned int io_put_kbuf(struct io_kiocb *req,  				       unsigned issue_flags)  { -	unsigned int cflags;  	if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))  		return 0; - -	/* -	 * We can add this buffer back to two lists: -	 * -	 * 1) The io_buffers_cache list. This one is protected by the -	 *    ctx->uring_lock. If we already hold this lock, add back to this -	 *    list as we can grab it from issue as well. -	 * 2) The io_buffers_comp list. This one is protected by the -	 *    ctx->completion_lock. -	 * -	 * We migrate buffers from the comp_list to the issue cache list -	 * when we need one. -	 */ -	if (req->flags & REQ_F_BUFFER_RING) { -		/* no buffers to recycle for this case */ -		cflags = __io_put_kbuf(req, NULL); -	} else if (issue_flags & IO_URING_F_UNLOCKED) { -		struct io_ring_ctx *ctx = req->ctx; - -		spin_lock(&ctx->completion_lock); -		cflags = __io_put_kbuf(req, &ctx->io_buffers_comp); -		spin_unlock(&ctx->completion_lock); -	} else { -		lockdep_assert_held(&req->ctx->uring_lock); - -		cflags = __io_put_kbuf(req, &req->ctx->io_buffers_cache); -	} - -	return cflags; +	return __io_put_kbuf(req, issue_flags);  }  #endif | 
