summaryrefslogtreecommitdiff
path: root/net/core/page_pool.c
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2025-03-25 10:04:55 -0700
committerJakub Kicinski <kuba@kernel.org>2025-03-25 10:06:49 -0700
commit7bd2e6b74ad56a49459ba84e8d4fa3730055ab5e (patch)
treef3a934ee1001a31e62f38e7cef6c014efa7f6f56 /net/core/page_pool.c
parent51068769cc8c699eaba7d411f214bc969b35708b (diff)
parentb52458652eca5a551ddb55605201b136f091b04d (diff)
Merge branch 'net-skip-taking-rtnl_lock-for-queue-get'
Jakub Kicinski says: ==================== net: skip taking rtnl_lock for queue GET (prep) Skip taking rtnl_lock for queue GET ops on devices which opt into running all ops under the instance lock. In preparating for performing queue ops without rtnl lock clarify the protection of queue-related fields. v1: https://lore.kernel.org/20250312223507.805719-1-kuba@kernel.org ==================== Link: https://patch.msgid.link/20250324224537.248800-1-kuba@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net/core/page_pool.c')
-rw-r--r--net/core/page_pool.c7
1 files changed, 2 insertions, 5 deletions
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index acef1fcd8ddc..7745ad924ae2 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -11,6 +11,7 @@
#include <linux/slab.h>
#include <linux/device.h>
+#include <net/netdev_lock.h>
#include <net/netdev_rx_queue.h>
#include <net/page_pool/helpers.h>
#include <net/page_pool/memory_provider.h>
@@ -279,11 +280,7 @@ static int page_pool_init(struct page_pool *pool,
get_device(pool->p.dev);
if (pool->slow.flags & PP_FLAG_ALLOW_UNREADABLE_NETMEM) {
- /* We rely on rtnl_lock()ing to make sure netdev_rx_queue
- * configuration doesn't change while we're initializing
- * the page_pool.
- */
- ASSERT_RTNL();
+ netdev_assert_locked(pool->slow.netdev);
rxq = __netif_get_rx_queue(pool->slow.netdev,
pool->slow.queue_idx);
pool->mp_priv = rxq->mp_params.mp_priv;