summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2026-01-29 19:38:27 -0800
committerJakub Kicinski <kuba@kernel.org>2026-01-31 12:35:27 -0800
commitfdf3f6800be36377e045e2448087f12132b88d2f (patch)
tree820b5ab869b1c58ad0408e2de3102b3e49a6b80b /net
parent6d06bc83a5ae8777a5f7a81c32dd75b8d9b2fe04 (diff)
net: don't touch dev->stats in BPF redirect paths
Gal reports that BPF redirect increments dev->stats.tx_errors on failure. This is not correct, most modern drivers completely ignore dev->stats so these drops will be invisible to the user. Core code should use the dedicated core stats which are folded into device stats in dev_get_stats(). Note that we're switching from tx_errors to tx_dropped. Core only has tx_dropped, hence presumably users already expect that counter to increment for "stack" Tx issues. Reported-by: Gal Pressman <gal@nvidia.com> Link: https://lore.kernel.org/c5df3b60-246a-4030-9c9a-0a35cd1ca924@nvidia.com Fixes: b4ab31414970 ("bpf: Add redirect_neigh helper as redirect drop-in") Acked-by: Martin KaFai Lau <martin.lau@kernel.org> Acked-by: Daniel Borkmann <daniel@iogearbox.net> Reviewed-by: Eric Dumazet <edumazet@google.com> Link: https://patch.msgid.link/20260130033827.698841-1-kuba@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net')
-rw-r--r--net/core/filter.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/net/core/filter.c b/net/core/filter.c
index bcd73d9bd764..029e560e32ce 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2289,12 +2289,12 @@ static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev,
err = bpf_out_neigh_v6(net, skb, dev, nh);
if (unlikely(net_xmit_eval(err)))
- DEV_STATS_INC(dev, tx_errors);
+ dev_core_stats_tx_dropped_inc(dev);
else
ret = NET_XMIT_SUCCESS;
goto out_xmit;
out_drop:
- DEV_STATS_INC(dev, tx_errors);
+ dev_core_stats_tx_dropped_inc(dev);
kfree_skb(skb);
out_xmit:
return ret;
@@ -2396,12 +2396,12 @@ static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev,
err = bpf_out_neigh_v4(net, skb, dev, nh);
if (unlikely(net_xmit_eval(err)))
- DEV_STATS_INC(dev, tx_errors);
+ dev_core_stats_tx_dropped_inc(dev);
else
ret = NET_XMIT_SUCCESS;
goto out_xmit;
out_drop:
- DEV_STATS_INC(dev, tx_errors);
+ dev_core_stats_tx_dropped_inc(dev);
kfree_skb(skb);
out_xmit:
return ret;