diff options
| author | Pavel Begunkov <asml.silence@gmail.com> | 2025-09-16 15:27:59 +0100 |
|---|---|---|
| committer | Jens Axboe <axboe@kernel.dk> | 2025-09-16 12:37:21 -0600 |
| commit | c95257f336556de05f26dc88a890fb2a59364939 (patch) | |
| tree | 4ce9a0f4993e37cdc4d78862b412bd41d193d0a8 | |
| parent | 73fa880effc5644aaf746596acb1b1efa44606df (diff) | |
io_uring/zcrx: use guards for the refill lock
Use guards for rq_lock in io_zcrx_ring_refill(), makes it a tad simpler.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
| -rw-r--r-- | io_uring/zcrx.c | 7 |
1 files changed, 2 insertions, 5 deletions
diff --git a/io_uring/zcrx.c b/io_uring/zcrx.c index 5f99fc7b43ee..630b19ebb47e 100644 --- a/io_uring/zcrx.c +++ b/io_uring/zcrx.c @@ -756,14 +756,12 @@ static void io_zcrx_ring_refill(struct page_pool *pp, unsigned int mask = ifq->rq_entries - 1; unsigned int entries; - spin_lock_bh(&ifq->rq_lock); + guard(spinlock_bh)(&ifq->rq_lock); entries = io_zcrx_rqring_entries(ifq); entries = min_t(unsigned, entries, PP_ALLOC_CACHE_REFILL - pp->alloc.count); - if (unlikely(!entries)) { - spin_unlock_bh(&ifq->rq_lock); + if (unlikely(!entries)) return; - } do { struct io_uring_zcrx_rqe *rqe = io_zcrx_get_rqe(ifq, mask); @@ -801,7 +799,6 @@ static void io_zcrx_ring_refill(struct page_pool *pp, } while (--entries); smp_store_release(&ifq->rq_ring->head, ifq->cached_rq_head); - spin_unlock_bh(&ifq->rq_lock); } static void io_zcrx_refill_slow(struct page_pool *pp, struct io_zcrx_ifq *ifq) |
