diff options
| author | Pavel Begunkov <asml.silence@gmail.com> | 2026-03-23 12:43:57 +0000 |
|---|---|---|
| committer | Jens Axboe <axboe@kernel.dk> | 2026-04-01 10:21:12 -0600 |
| commit | 898ad80d1207cbdb22b21bafb6de4adfd7627bd0 (patch) | |
| tree | 8a5a98e1b0c67ed1a31c2d82adb20698bfb0373e /io_uring | |
| parent | 6a55a0a7ebcc8496c81827a2e9287de80f86dd57 (diff) | |
io_uring/zcrx: use guards for locking
Convert last several places using manual locking to guards to simplify
the code.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://patch.msgid.link/eb4667cfaf88c559700f6399da9e434889f5b04a.1774261953.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
| -rw-r--r-- | io_uring/zcrx.c | 15 |
1 files changed, 7 insertions, 8 deletions
diff --git a/io_uring/zcrx.c b/io_uring/zcrx.c index 2112b652a699..6457690e1af4 100644 --- a/io_uring/zcrx.c +++ b/io_uring/zcrx.c @@ -586,9 +586,8 @@ static void io_zcrx_return_niov_freelist(struct net_iov *niov) { struct io_zcrx_area *area = io_zcrx_iov_to_area(niov); - spin_lock_bh(&area->freelist_lock); + guard(spinlock_bh)(&area->freelist_lock); area->freelist[area->free_count++] = net_iov_idx(niov); - spin_unlock_bh(&area->freelist_lock); } static void io_zcrx_return_niov(struct net_iov *niov) @@ -1053,7 +1052,8 @@ static void io_zcrx_refill_slow(struct page_pool *pp, struct io_zcrx_ifq *ifq) { struct io_zcrx_area *area = ifq->area; - spin_lock_bh(&area->freelist_lock); + guard(spinlock_bh)(&area->freelist_lock); + while (area->free_count && pp->alloc.count < PP_ALLOC_CACHE_REFILL) { struct net_iov *niov = __io_zcrx_get_free_niov(area); netmem_ref netmem = net_iov_to_netmem(niov); @@ -1062,7 +1062,6 @@ static void io_zcrx_refill_slow(struct page_pool *pp, struct io_zcrx_ifq *ifq) io_zcrx_sync_for_device(pp, niov); net_mp_netmem_place_in_cache(pp, netmem); } - spin_unlock_bh(&area->freelist_lock); } static netmem_ref io_pp_zc_alloc_netmems(struct page_pool *pp, gfp_t gfp) @@ -1285,10 +1284,10 @@ static struct net_iov *io_alloc_fallback_niov(struct io_zcrx_ifq *ifq) if (area->mem.is_dmabuf) return NULL; - spin_lock_bh(&area->freelist_lock); - if (area->free_count) - niov = __io_zcrx_get_free_niov(area); - spin_unlock_bh(&area->freelist_lock); + scoped_guard(spinlock_bh, &area->freelist_lock) { + if (area->free_count) + niov = __io_zcrx_get_free_niov(area); + } if (niov) page_pool_fragment_netmem(net_iov_to_netmem(niov), 1); |
