summaryrefslogtreecommitdiff
path: root/include/linux/netdevice.h
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2025-09-09 12:19:42 +0000
committerJakub Kicinski <kuba@kernel.org>2025-09-14 11:35:17 -0700
commitfdae0ab67d57d480dc61e9fb45678bbdc3786711 (patch)
tree9a45268e106c211e75c55a285db8569a9849747b /include/linux/netdevice.h
parent278289bcec901663868048497e36c92560bd1b14 (diff)
net: use NUMA drop counters for softnet_data.dropped
Hosts under DOS attack can suffer from false sharing in enqueue_to_backlog() : atomic_inc(&sd->dropped). This is because sd->dropped can be touched from many cpus, possibly residing on different NUMA nodes. Generalize the sk_drop_counters infrastucture added in commit c51613fa276f ("net: add sk->sk_drop_counters") and use it to replace softnet_data.dropped with NUMA friendly softnet_data.drop_counters. This adds 64 bytes per cpu, maybe more in the future if we increase the number of counters (currently 2) per 'struct numa_drop_counters'. Signed-off-by: Eric Dumazet <edumazet@google.com> Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com> Link: https://patch.msgid.link/20250909121942.1202585-1-edumazet@google.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r--include/linux/netdevice.h28
1 files changed, 27 insertions, 1 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index f3a3b761abfb..f5a840c07cf1 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3459,6 +3459,32 @@ static inline bool dev_has_header(const struct net_device *dev)
return dev->header_ops && dev->header_ops->create;
}
+struct numa_drop_counters {
+ atomic_t drops0 ____cacheline_aligned_in_smp;
+ atomic_t drops1 ____cacheline_aligned_in_smp;
+};
+
+static inline int numa_drop_read(const struct numa_drop_counters *ndc)
+{
+ return atomic_read(&ndc->drops0) + atomic_read(&ndc->drops1);
+}
+
+static inline void numa_drop_add(struct numa_drop_counters *ndc, int val)
+{
+ int n = numa_node_id() % 2;
+
+ if (n)
+ atomic_add(val, &ndc->drops1);
+ else
+ atomic_add(val, &ndc->drops0);
+}
+
+static inline void numa_drop_reset(struct numa_drop_counters *ndc)
+{
+ atomic_set(&ndc->drops0, 0);
+ atomic_set(&ndc->drops1, 0);
+}
+
/*
* Incoming packets are placed on per-CPU queues
*/
@@ -3504,7 +3530,7 @@ struct softnet_data {
struct sk_buff_head input_pkt_queue;
struct napi_struct backlog;
- atomic_t dropped ____cacheline_aligned_in_smp;
+ struct numa_drop_counters drop_counters;
/* Another possibly contended cache line */
spinlock_t defer_lock ____cacheline_aligned_in_smp;