summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2025-10-16 16:25:16 -0700
committerJakub Kicinski <kuba@kernel.org>2025-10-16 16:25:16 -0700
commit2df75cc5bdc48f8a6f393eaa9d18480aeddac7f2 (patch)
tree474b900af346b7131228a19382b88a04694b0fd9 /include/linux
parent01b6aca22bb9f8fbbebbf8bdbb80aadf11318e3d (diff)
parent100dfa74cad9d4665cdcf0cc8e673b123a3ea910 (diff)
Merge branch 'net-optimize-tx-throughput-and-efficiency'
Eric Dumazet says: ==================== net: optimize TX throughput and efficiency In this series, I replace the busylock spinlock we have in __dev_queue_xmit() and use lockless list (llist) to reduce spinlock contention to the minimum. Idea is that only one cpu might spin on the qdisc spinlock, while others simply add their skb in the llist. After this series, we get a 300 % (4x) improvement on heavy TX workloads, sending twice the number of packets per second, for half the cpu cycles. ==================== Link: https://patch.msgid.link/20251014171907.3554413-1-edumazet@google.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/netdevice_xmit.h9
1 files changed, 8 insertions, 1 deletions
diff --git a/include/linux/netdevice_xmit.h b/include/linux/netdevice_xmit.h
index 813a19122ebb..cc232508e695 100644
--- a/include/linux/netdevice_xmit.h
+++ b/include/linux/netdevice_xmit.h
@@ -2,6 +2,12 @@
#ifndef _LINUX_NETDEVICE_XMIT_H
#define _LINUX_NETDEVICE_XMIT_H
+#if IS_ENABLED(CONFIG_NET_ACT_MIRRED)
+#define MIRRED_NEST_LIMIT 4
+#endif
+
+struct net_device;
+
struct netdev_xmit {
u16 recursion;
u8 more;
@@ -9,7 +15,8 @@ struct netdev_xmit {
u8 skip_txqueue;
#endif
#if IS_ENABLED(CONFIG_NET_ACT_MIRRED)
- u8 sched_mirred_nest;
+ u8 sched_mirred_nest;
+ struct net_device *sched_mirred_dev[MIRRED_NEST_LIMIT];
#endif
#if IS_ENABLED(CONFIG_NF_DUP_NETDEV)
u8 nf_dup_skb_recursion;