summaryrefslogtreecommitdiff
path: root/include/net
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2025-03-25 10:04:55 -0700
committerJakub Kicinski <kuba@kernel.org>2025-03-25 10:06:49 -0700
commit7bd2e6b74ad56a49459ba84e8d4fa3730055ab5e (patch)
treef3a934ee1001a31e62f38e7cef6c014efa7f6f56 /include/net
parent51068769cc8c699eaba7d411f214bc969b35708b (diff)
parentb52458652eca5a551ddb55605201b136f091b04d (diff)
Merge branch 'net-skip-taking-rtnl_lock-for-queue-get'
Jakub Kicinski says: ==================== net: skip taking rtnl_lock for queue GET (prep) Skip taking rtnl_lock for queue GET ops on devices which opt into running all ops under the instance lock. In preparating for performing queue ops without rtnl lock clarify the protection of queue-related fields. v1: https://lore.kernel.org/20250312223507.805719-1-kuba@kernel.org ==================== Link: https://patch.msgid.link/20250324224537.248800-1-kuba@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'include/net')
-rw-r--r--include/net/netdev_lock.h20
-rw-r--r--include/net/netdev_rx_queue.h2
2 files changed, 17 insertions, 5 deletions
diff --git a/include/net/netdev_lock.h b/include/net/netdev_lock.h
index 99631fbd7f54..1c0c9a94cc22 100644
--- a/include/net/netdev_lock.h
+++ b/include/net/netdev_lock.h
@@ -5,25 +5,27 @@
#include <linux/lockdep.h>
#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
static inline bool netdev_trylock(struct net_device *dev)
{
return mutex_trylock(&dev->lock);
}
-static inline void netdev_assert_locked(struct net_device *dev)
+static inline void netdev_assert_locked(const struct net_device *dev)
{
lockdep_assert_held(&dev->lock);
}
-static inline void netdev_assert_locked_or_invisible(struct net_device *dev)
+static inline void
+netdev_assert_locked_or_invisible(const struct net_device *dev)
{
if (dev->reg_state == NETREG_REGISTERED ||
dev->reg_state == NETREG_UNREGISTERING)
netdev_assert_locked(dev);
}
-static inline bool netdev_need_ops_lock(struct net_device *dev)
+static inline bool netdev_need_ops_lock(const struct net_device *dev)
{
bool ret = dev->request_ops_lock || !!dev->queue_mgmt_ops;
@@ -46,10 +48,20 @@ static inline void netdev_unlock_ops(struct net_device *dev)
netdev_unlock(dev);
}
-static inline void netdev_ops_assert_locked(struct net_device *dev)
+static inline void netdev_ops_assert_locked(const struct net_device *dev)
{
if (netdev_need_ops_lock(dev))
lockdep_assert_held(&dev->lock);
+ else
+ ASSERT_RTNL();
+}
+
+static inline void
+netdev_ops_assert_locked_or_invisible(const struct net_device *dev)
+{
+ if (dev->reg_state == NETREG_REGISTERED ||
+ dev->reg_state == NETREG_UNREGISTERING)
+ netdev_ops_assert_locked(dev);
}
static inline int netdev_lock_cmp_fn(const struct lockdep_map *a,
diff --git a/include/net/netdev_rx_queue.h b/include/net/netdev_rx_queue.h
index af40842f229d..b2238b551dce 100644
--- a/include/net/netdev_rx_queue.h
+++ b/include/net/netdev_rx_queue.h
@@ -24,7 +24,7 @@ struct netdev_rx_queue {
struct xsk_buff_pool *pool;
#endif
/* NAPI instance for the queue
- * Readers and writers must hold RTNL
+ * "ops protected", see comment about net_device::lock
*/
struct napi_struct *napi;
struct pp_memory_provider_params mp_params;