From 4510d140524ca7d6e772db962e013f26f09a63b1 Mon Sep 17 00:00:00 2001 From: Dudu Lu Date: Mon, 13 Apr 2026 16:49:27 +0800 Subject: net/sched: act_mirred: fix wrong device for mac_header_xmit check in tcf_blockcast_redir MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In tcf_blockcast_redir(), when iterating block ports to redirect packets to multiple devices, the mac_header_xmit flag is queried from the wrong device. The loop sends to dev_prev but queries dev_is_mac_header_xmit(dev) — which is the NEXT device in the iteration, not the one being sent to. This causes tcf_mirred_to_dev() to make incorrect decisions about whether to push or pull the MAC header. When the block contains mixed device types (e.g., an ethernet veth and a tunnel device), intermediate devices get the wrong mac_header_xmit flag, leading to skb header corruption. In the worst case, skb_push_rcsum with an incorrect mac_len can exhaust headroom and panic. The last device in the loop is handled correctly (line 365-366 uses dev_is_mac_header_xmit(dev_prev)), confirming this is a copy-paste oversight for the intermediate devices. Fix by using dev_prev instead of dev for the mac_header_xmit query, consistent with the device actually being sent to. Fixes: 42f39036cda8 ("net/sched: act_mirred: Allow mirred to block") Signed-off-by: Dudu Lu Reviewed-by: Simon Horman Link: https://patch.msgid.link/20260413084927.71353-1-phx0fer@gmail.com Signed-off-by: Paolo Abeni --- net/sched/act_mirred.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 05e0b14b5773..2c5a7a321a94 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -354,7 +354,7 @@ static int tcf_blockcast_redir(struct sk_buff *skb, struct tcf_mirred *m, goto assign_prev; tcf_mirred_to_dev(skb, m, dev_prev, - dev_is_mac_header_xmit(dev), + dev_is_mac_header_xmit(dev_prev), mirred_eaction, retval); assign_prev: dev_prev = dev; -- cgit v1.2.3 From df4601653201de21b487c3e7fffd464790cab808 Mon Sep 17 00:00:00 2001 From: Zhengchuan Liang Date: Mon, 13 Apr 2026 17:08:46 +0800 Subject: net: bridge: use a stable FDB dst snapshot in RCU readers Local FDB entries can be rewritten in place by `fdb_delete_local()`, which updates `f->dst` to another port or to `NULL` while keeping the entry alive. Several bridge RCU readers inspect `f->dst`, including `br_fdb_fillbuf()` through the `brforward_read()` sysfs path. These readers currently load `f->dst` multiple times and can therefore observe inconsistent values across the check and later dereference. In `br_fdb_fillbuf()`, this means a concurrent local-FDB update can change `f->dst` after the NULL check and before the `port_no` dereference, leading to a NULL-ptr-deref. Fix this by taking a single `READ_ONCE()` snapshot of `f->dst` in each affected RCU reader and using that snapshot for the rest of the access sequence. Also publish the in-place `f->dst` updates in `fdb_delete_local()` with `WRITE_ONCE()` so the readers and writer use matching access patterns. Fixes: 960b589f86c7 ("bridge: Properly check if local fdb entry can be deleted in br_fdb_change_mac_address") Cc: stable@kernel.org Reported-by: Yifan Wu Reported-by: Juefei Pu Co-developed-by: Yuan Tan Signed-off-by: Yuan Tan Suggested-by: Xin Liu Tested-by: Ren Wei Signed-off-by: Zhengchuan Liang Signed-off-by: Ren Wei Reviewed-by: Ido Schimmel Acked-by: Nikolay Aleksandrov Link: https://patch.msgid.link/6570fabb85ecadb8baaf019efe856f407711c7b9.1776043229.git.zcliangcn@gmail.com Signed-off-by: Paolo Abeni --- net/bridge/br_arp_nd_proxy.c | 8 +++++--- net/bridge/br_fdb.c | 28 ++++++++++++++++++---------- 2 files changed, 23 insertions(+), 13 deletions(-) (limited to 'net') diff --git a/net/bridge/br_arp_nd_proxy.c b/net/bridge/br_arp_nd_proxy.c index 0c8a06cdd46f..deb1ab1f24b0 100644 --- a/net/bridge/br_arp_nd_proxy.c +++ b/net/bridge/br_arp_nd_proxy.c @@ -201,11 +201,12 @@ void br_do_proxy_suppress_arp(struct sk_buff *skb, struct net_bridge *br, f = br_fdb_find_rcu(br, n->ha, vid); if (f) { + const struct net_bridge_port *dst = READ_ONCE(f->dst); bool replied = false; if ((p && (p->flags & BR_PROXYARP)) || - (f->dst && (f->dst->flags & BR_PROXYARP_WIFI)) || - br_is_neigh_suppress_enabled(f->dst, vid)) { + (dst && (dst->flags & BR_PROXYARP_WIFI)) || + br_is_neigh_suppress_enabled(dst, vid)) { if (!vid) br_arp_send(br, p, skb->dev, sip, tip, sha, n->ha, sha, 0, 0); @@ -469,9 +470,10 @@ void br_do_suppress_nd(struct sk_buff *skb, struct net_bridge *br, f = br_fdb_find_rcu(br, n->ha, vid); if (f) { + const struct net_bridge_port *dst = READ_ONCE(f->dst); bool replied = false; - if (br_is_neigh_suppress_enabled(f->dst, vid)) { + if (br_is_neigh_suppress_enabled(dst, vid)) { if (vid != 0) br_nd_send(br, p, skb, n, skb->vlan_proto, diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index e2c17f620f00..6eb3ab69a514 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -236,6 +236,7 @@ struct net_device *br_fdb_find_port(const struct net_device *br_dev, const unsigned char *addr, __u16 vid) { + const struct net_bridge_port *dst; struct net_bridge_fdb_entry *f; struct net_device *dev = NULL; struct net_bridge *br; @@ -248,8 +249,11 @@ struct net_device *br_fdb_find_port(const struct net_device *br_dev, br = netdev_priv(br_dev); rcu_read_lock(); f = br_fdb_find_rcu(br, addr, vid); - if (f && f->dst) - dev = f->dst->dev; + if (f) { + dst = READ_ONCE(f->dst); + if (dst) + dev = dst->dev; + } rcu_read_unlock(); return dev; @@ -346,7 +350,7 @@ static void fdb_delete_local(struct net_bridge *br, vg = nbp_vlan_group(op); if (op != p && ether_addr_equal(op->dev->dev_addr, addr) && (!vid || br_vlan_find(vg, vid))) { - f->dst = op; + WRITE_ONCE(f->dst, op); clear_bit(BR_FDB_ADDED_BY_USER, &f->flags); return; } @@ -357,7 +361,7 @@ static void fdb_delete_local(struct net_bridge *br, /* Maybe bridge device has same hw addr? */ if (p && ether_addr_equal(br->dev->dev_addr, addr) && (!vid || (v && br_vlan_should_use(v)))) { - f->dst = NULL; + WRITE_ONCE(f->dst, NULL); clear_bit(BR_FDB_ADDED_BY_USER, &f->flags); return; } @@ -928,6 +932,7 @@ int br_fdb_test_addr(struct net_device *dev, unsigned char *addr) int br_fdb_fillbuf(struct net_bridge *br, void *buf, unsigned long maxnum, unsigned long skip) { + const struct net_bridge_port *dst; struct net_bridge_fdb_entry *f; struct __fdb_entry *fe = buf; unsigned long delta; @@ -944,7 +949,8 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf, continue; /* ignore pseudo entry for local MAC address */ - if (!f->dst) + dst = READ_ONCE(f->dst); + if (!dst) continue; if (skip) { @@ -956,8 +962,8 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf, memcpy(fe->mac_addr, f->key.addr.addr, ETH_ALEN); /* due to ABI compat need to split into hi/lo */ - fe->port_no = f->dst->port_no; - fe->port_hi = f->dst->port_no >> 8; + fe->port_no = dst->port_no; + fe->port_hi = dst->port_no >> 8; fe->is_local = test_bit(BR_FDB_LOCAL, &f->flags); if (!test_bit(BR_FDB_STATIC, &f->flags)) { @@ -1083,9 +1089,11 @@ int br_fdb_dump(struct sk_buff *skb, rcu_read_lock(); hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) { + const struct net_bridge_port *dst = READ_ONCE(f->dst); + if (*idx < ctx->fdb_idx) goto skip; - if (filter_dev && (!f->dst || f->dst->dev != filter_dev)) { + if (filter_dev && (!dst || dst->dev != filter_dev)) { if (filter_dev != dev) goto skip; /* !f->dst is a special case for bridge @@ -1093,10 +1101,10 @@ int br_fdb_dump(struct sk_buff *skb, * Therefore need a little more filtering * we only want to dump the !f->dst case */ - if (f->dst) + if (dst) goto skip; } - if (!filter_dev && f->dst) + if (!filter_dev && dst) goto skip; err = fdb_fill_info(skb, br, f, -- cgit v1.2.3 From f9e40664706927d7ae22a448a3383e23c38a4c0b Mon Sep 17 00:00:00 2001 From: Dudu Lu Date: Mon, 13 Apr 2026 19:00:41 +0800 Subject: net/sched: sch_cake: fix NAT destination port not being updated in cake_update_flowkeys MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit cake_update_flowkeys() is supposed to update the flow dissector keys with the NAT-translated addresses and ports from conntrack, so that CAKE's per-flow fairness correctly identifies post-NAT flows as belonging to the same connection. For the source port, this works correctly: keys->ports.src = port; But for the destination port, the assignment is reversed: port = keys->ports.dst; This means the NAT destination port is never updated in the flow keys. As a result, when multiple connections are NATed to the same destination, CAKE treats them as separate flows because the original (pre-NAT) destination ports differ. This breaks CAKE's NAT-aware flow isolation when using the "nat" mode. The bug was introduced in commit b0c19ed6088a ("sch_cake: Take advantage of skb->hash where appropriate") which refactored the original direct assignment into a compare-and-conditionally-update pattern, but wrote the destination port update backwards. Fix by reversing the assignment direction to match the source port pattern. Fixes: b0c19ed6088a ("sch_cake: Take advantage of skb->hash where appropriate") Signed-off-by: Dudu Lu Acked-by: Toke Høiland-Jørgensen Link: https://patch.msgid.link/20260413110041.44704-1-phx0fer@gmail.com Signed-off-by: Paolo Abeni --- net/sched/sch_cake.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index ffea9fbd522d..02e1fa4577ae 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -619,7 +619,7 @@ static bool cake_update_flowkeys(struct flow_keys *keys, } port = rev ? tuple.src.u.all : tuple.dst.u.all; if (port != keys->ports.dst) { - port = keys->ports.dst; + keys->ports.dst = port; upd = true; } } -- cgit v1.2.3 From 29c95185ba32b621fbc3800fb86e7dc3edf5c2be Mon Sep 17 00:00:00 2001 From: Jiayuan Chen Date: Mon, 13 Apr 2026 19:45:19 +0800 Subject: nexthop: fix IPv6 route referencing IPv4 nexthop syzbot reported a panic [1] [2]. When an IPv6 nexthop is replaced with an IPv4 nexthop, the has_v4 flag of all groups containing this nexthop is not updated. This is because nh_group_v4_update is only called when replacing AF_INET to AF_INET6, but the reverse direction (AF_INET6 to AF_INET) is missed. This allows a stale has_v4=false to bypass fib6_check_nexthop, causing IPv6 routes to be attached to groups that effectively contain only AF_INET members. Subsequent route lookups then call nexthop_fib6_nh() which returns NULL for the AF_INET member, leading to a NULL pointer dereference. Fix by calling nh_group_v4_update whenever the family changes, not just AF_INET to AF_INET6. Reproducer: # AF_INET6 blackhole ip -6 nexthop add id 1 blackhole # group with has_v4=false ip nexthop add id 100 group 1 # replace with AF_INET (no -6), has_v4 stays false ip nexthop replace id 1 blackhole # pass stale has_v4 check ip -6 route add 2001:db8::/64 nhid 100 # panic ping -6 2001:db8::1 [1] https://syzkaller.appspot.com/bug?id=e17283eb2f8dcf3dd9b47fe6f67a95f71faadad0 [2] https://syzkaller.appspot.com/bug?id=8699b6ae54c9f35837d925686208402949e12ef3 Fixes: 7bf4796dd099 ("nexthops: add support for replace") Signed-off-by: Jiayuan Chen Reviewed-by: David Ahern Link: https://patch.msgid.link/20260413114522.147784-1-jiayuan.chen@linux.dev Signed-off-by: Paolo Abeni --- net/ipv4/nexthop.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c index 904a060a7330..f92fcc39fc4c 100644 --- a/net/ipv4/nexthop.c +++ b/net/ipv4/nexthop.c @@ -2469,10 +2469,10 @@ static int replace_nexthop_single(struct net *net, struct nexthop *old, goto err_notify; } - /* When replacing an IPv4 nexthop with an IPv6 nexthop, potentially + /* When replacing a nexthop with one of a different family, potentially * update IPv4 indication in all the groups using the nexthop. */ - if (oldi->family == AF_INET && newi->family == AF_INET6) { + if (oldi->family != newi->family) { list_for_each_entry(nhge, &old->grp_list, nh_list) { struct nexthop *nhp = nhge->nh_parent; struct nh_group *nhg; -- cgit v1.2.3 From 52bcb57a4e8a0865a76c587c2451906342ae1b2d Mon Sep 17 00:00:00 2001 From: Dudu Lu Date: Mon, 13 Apr 2026 21:14:09 +0800 Subject: vsock/virtio: fix accept queue count leak on transport mismatch virtio_transport_recv_listen() calls sk_acceptq_added() before vsock_assign_transport(). If vsock_assign_transport() fails or selects a different transport, the error path returns without calling sk_acceptq_removed(), permanently incrementing sk_ack_backlog. After approximately backlog+1 such failures, sk_acceptq_is_full() returns true, causing the listener to reject all new connections. Fix by moving sk_acceptq_added() to after the transport validation, matching the pattern used by vmci_transport and hyperv_transport. Fixes: c0cfa2d8a788 ("vsock: add multi-transports support") Signed-off-by: Dudu Lu Reviewed-by: Bobby Eshleman Reviewed-by: Luigi Leonardi Reviewed-by: Stefano Garzarella Acked-by: Michael S. Tsirkin Link: https://patch.msgid.link/20260413131409.19022-1-phx0fer@gmail.com Signed-off-by: Paolo Abeni --- net/vmw_vsock/virtio_transport_common.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'net') diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c index a152a9e208d0..e96e9893b21b 100644 --- a/net/vmw_vsock/virtio_transport_common.c +++ b/net/vmw_vsock/virtio_transport_common.c @@ -1558,8 +1558,6 @@ virtio_transport_recv_listen(struct sock *sk, struct sk_buff *skb, return -ENOMEM; } - sk_acceptq_added(sk); - lock_sock_nested(child, SINGLE_DEPTH_NESTING); child->sk_state = TCP_ESTABLISHED; @@ -1581,6 +1579,7 @@ virtio_transport_recv_listen(struct sock *sk, struct sk_buff *skb, return ret; } + sk_acceptq_added(sk); if (virtio_transport_space_update(child, skb)) child->sk_write_space(child); -- cgit v1.2.3 From 105425b1969c5affe532713cfac1c0b320d7ac2b Mon Sep 17 00:00:00 2001 From: Vinicius Costa Gomes Date: Fri, 10 Apr 2026 18:57:57 -0700 Subject: net/sched: taprio: fix use-after-free in advance_sched() on schedule switch In advance_sched(), when should_change_schedules() returns true, switch_schedules() is called to promote the admin schedule to oper. switch_schedules() queues the old oper schedule for RCU freeing via call_rcu(), but 'next' still points into an entry of the old oper schedule. The subsequent 'next->end_time = end_time' and rcu_assign_pointer(q->current_entry, next) are use-after-free. Fix this by selecting 'next' from the new oper schedule immediately after switch_schedules(), and using its pre-calculated end_time. setup_first_end_time() sets the first entry's end_time to base_time + interval when the schedule is installed, so the value is already correct. The deleted 'end_time = sched_base_time(admin)' assignment was also harmful independently: it would overwrite the new first entry's pre-calculated end_time with just base_time. Fixes: a3d43c0d56f1 ("taprio: Add support adding an admin schedule") Reported-by: Junxi Qian Signed-off-by: Vinicius Costa Gomes Signed-off-by: Jakub Kicinski --- net/sched/sch_taprio.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index 8e3752811950..a47a09d76400 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -972,11 +972,12 @@ static enum hrtimer_restart advance_sched(struct hrtimer *timer) } if (should_change_schedules(admin, oper, end_time)) { - /* Set things so the next time this runs, the new - * schedule runs. - */ - end_time = sched_base_time(admin); switch_schedules(q, &admin, &oper); + /* After changing schedules, the next entry is the first one + * in the new schedule, with a pre-calculated end_time. + */ + next = list_first_entry(&oper->entries, struct sched_entry, list); + end_time = next->end_time; } next->end_time = end_time; -- cgit v1.2.3 From 0f99e0c3e19badaf3fdced0d3feba623e59eed41 Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Tue, 14 Apr 2026 16:10:35 -0700 Subject: net: dsa: remove redundant netdev_lock_ops() from conduit ethtool ops DSA replaces the conduit (master) device's ethtool_ops with its own wrappers that aggregate stats from both the conduit and DSA switch ports. Taking the lock again inside the DSA wrappers causes a deadlock. Stumbled upon this when booting qemu with fbnic and CONFIG_NET_DSA_LOOP=y (which looks like some kind of testing device that auto-populates the ports of eth0). `ethtool -i` is enough to deadlock. This means we have basically zero coverage for DSA stuff with real ops locked devs. Remove the redundant netdev_lock_ops()/netdev_unlock_ops() calls from the DSA conduit ethtool wrappers. Fixes: 2bcf4772e45a ("net: ethtool: try to protect all callback with netdev instance lock") Signed-off-by: Stanislav Fomichev Reviewed-by: Maxime Chevallier Link: https://patch.msgid.link/20260414231035.1917035-1-sdf@fomichev.me Signed-off-by: Jakub Kicinski --- net/dsa/conduit.c | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) (limited to 'net') diff --git a/net/dsa/conduit.c b/net/dsa/conduit.c index a1b044467bd6..8398d72d7e4d 100644 --- a/net/dsa/conduit.c +++ b/net/dsa/conduit.c @@ -27,9 +27,7 @@ static int dsa_conduit_get_regs_len(struct net_device *dev) int len; if (ops && ops->get_regs_len) { - netdev_lock_ops(dev); len = ops->get_regs_len(dev); - netdev_unlock_ops(dev); if (len < 0) return len; ret += len; @@ -60,15 +58,11 @@ static void dsa_conduit_get_regs(struct net_device *dev, int len; if (ops && ops->get_regs_len && ops->get_regs) { - netdev_lock_ops(dev); len = ops->get_regs_len(dev); - if (len < 0) { - netdev_unlock_ops(dev); + if (len < 0) return; - } regs->len = len; ops->get_regs(dev, regs, data); - netdev_unlock_ops(dev); data += regs->len; } @@ -115,10 +109,8 @@ static void dsa_conduit_get_ethtool_stats(struct net_device *dev, int count, mcount = 0; if (ops && ops->get_sset_count && ops->get_ethtool_stats) { - netdev_lock_ops(dev); mcount = ops->get_sset_count(dev, ETH_SS_STATS); ops->get_ethtool_stats(dev, stats, data); - netdev_unlock_ops(dev); } list_for_each_entry(dp, &dst->ports, list) { @@ -149,10 +141,8 @@ static void dsa_conduit_get_ethtool_phy_stats(struct net_device *dev, if (count >= 0) phy_ethtool_get_stats(dev->phydev, stats, data); } else if (ops && ops->get_sset_count && ops->get_ethtool_phy_stats) { - netdev_lock_ops(dev); count = ops->get_sset_count(dev, ETH_SS_PHY_STATS); ops->get_ethtool_phy_stats(dev, stats, data); - netdev_unlock_ops(dev); } if (count < 0) @@ -176,13 +166,11 @@ static int dsa_conduit_get_sset_count(struct net_device *dev, int sset) struct dsa_switch_tree *dst = cpu_dp->dst; int count = 0; - netdev_lock_ops(dev); if (sset == ETH_SS_PHY_STATS && dev->phydev && (!ops || !ops->get_ethtool_phy_stats)) count = phy_ethtool_get_sset_count(dev->phydev); else if (ops && ops->get_sset_count) count = ops->get_sset_count(dev, sset); - netdev_unlock_ops(dev); if (count < 0) count = 0; @@ -239,7 +227,6 @@ static void dsa_conduit_get_strings(struct net_device *dev, u32 stringset, struct dsa_switch_tree *dst = cpu_dp->dst; int count, mcount = 0; - netdev_lock_ops(dev); if (stringset == ETH_SS_PHY_STATS && dev->phydev && !ops->get_ethtool_phy_stats) { mcount = phy_ethtool_get_sset_count(dev->phydev); @@ -253,7 +240,6 @@ static void dsa_conduit_get_strings(struct net_device *dev, u32 stringset, mcount = 0; ops->get_strings(dev, stringset, data); } - netdev_unlock_ops(dev); list_for_each_entry(dp, &dst->ports, list) { if (!dsa_port_is_dsa(dp) && !dsa_port_is_cpu(dp)) -- cgit v1.2.3 From 080f22f5d30233faf3d83be3098f35b8be9b7a00 Mon Sep 17 00:00:00 2001 From: Luigi Leonardi Date: Wed, 15 Apr 2026 17:09:28 +0200 Subject: vsock/virtio: fix MSG_PEEK ignoring skb offset when calculating bytes to copy `virtio_transport_stream_do_peek()` does not account for the skb offset when computing the number of bytes to copy. This means that, after a partial recv() that advances the offset, a peek requesting more bytes than are available in the sk_buff causes `skb_copy_datagram_iter()` to go past the valid payload, resulting in a -EFAULT. The dequeue path already handles this correctly. Apply the same logic to the peek path. Fixes: 0df7cd3c13e4 ("vsock/virtio/vhost: read data from non-linear skb") Reviewed-by: Stefano Garzarella Acked-by: Arseniy Krasnov Signed-off-by: Luigi Leonardi Link: https://patch.msgid.link/20260415-fix_peek-v4-1-8207e872759e@redhat.com Signed-off-by: Jakub Kicinski --- net/vmw_vsock/virtio_transport_common.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c index e96e9893b21b..0742091beae7 100644 --- a/net/vmw_vsock/virtio_transport_common.c +++ b/net/vmw_vsock/virtio_transport_common.c @@ -545,9 +545,8 @@ virtio_transport_stream_do_peek(struct vsock_sock *vsk, skb_queue_walk(&vvs->rx_queue, skb) { size_t bytes; - bytes = len - total; - if (bytes > skb->len) - bytes = skb->len; + bytes = min_t(size_t, len - total, + skb->len - VIRTIO_VSOCK_SKB_CB(skb)->offset); spin_unlock_bh(&vvs->rx_lock); -- cgit v1.2.3 From 267bf3cf9a6f0ffb98b8afd983c1950e835f07c9 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 16 Apr 2026 20:03:06 +0000 Subject: tcp: annotate data-races in tcp_get_info_chrono_stats() tcp_get_timestamping_opt_stats() does not own the socket lock, this is intentional. It calls tcp_get_info_chrono_stats() while other threads could change chrono fields in tcp_chrono_set(). I do not think we need coherent TCP socket state snapshot in tcp_get_timestamping_opt_stats(), I chose to only add annotations to keep KCSAN happy. Fixes: 1c885808e456 ("tcp: SOF_TIMESTAMPING_OPT_STATS option for SO_TIMESTAMPING") Signed-off-by: Eric Dumazet Link: https://patch.msgid.link/20260416200319.3608680-2-edumazet@google.com Signed-off-by: Jakub Kicinski --- net/ipv4/tcp.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 1a494d18c5fd..7b7812cb710f 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -4191,12 +4191,18 @@ static void tcp_get_info_chrono_stats(const struct tcp_sock *tp, struct tcp_info *info) { u64 stats[__TCP_CHRONO_MAX], total = 0; - enum tcp_chrono i; + enum tcp_chrono i, cur; + /* Following READ_ONCE()s pair with WRITE_ONCE()s in tcp_chrono_set(). + * This is because socket lock might not be owned by us at this point. + * This is best effort, tcp_get_timestamping_opt_stats() can + * see wrong values. A real fix would be too costly for TCP fast path. + */ + cur = READ_ONCE(tp->chrono_type); for (i = TCP_CHRONO_BUSY; i < __TCP_CHRONO_MAX; ++i) { - stats[i] = tp->chrono_stat[i - 1]; - if (i == tp->chrono_type) - stats[i] += tcp_jiffies32 - tp->chrono_start; + stats[i] = READ_ONCE(tp->chrono_stat[i - 1]); + if (i == cur) + stats[i] += tcp_jiffies32 - READ_ONCE(tp->chrono_start); stats[i] *= USEC_PER_SEC / HZ; total += stats[i]; } -- cgit v1.2.3 From 21e92a38cfd891538598ba8f805e0165a820d532 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 16 Apr 2026 20:03:07 +0000 Subject: tcp: add data-race annotations around tp->data_segs_out and tp->total_retrans tcp_get_timestamping_opt_stats() intentionally runs lockless, we must add READ_ONCE() and WRITE_ONCE() annotations to keep KCSAN happy. Fixes: 7e98102f4897 ("tcp: record pkts sent and retransmistted") Signed-off-by: Eric Dumazet Link: https://patch.msgid.link/20260416200319.3608680-3-edumazet@google.com Signed-off-by: Jakub Kicinski --- net/ipv4/tcp.c | 4 ++-- net/ipv4/tcp_output.c | 8 +++++--- 2 files changed, 7 insertions(+), 5 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 7b7812cb710f..e39e0734d958 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -4434,9 +4434,9 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk, nla_put_u64_64bit(stats, TCP_NLA_SNDBUF_LIMITED, info.tcpi_sndbuf_limited, TCP_NLA_PAD); nla_put_u64_64bit(stats, TCP_NLA_DATA_SEGS_OUT, - tp->data_segs_out, TCP_NLA_PAD); + READ_ONCE(tp->data_segs_out), TCP_NLA_PAD); nla_put_u64_64bit(stats, TCP_NLA_TOTAL_RETRANS, - tp->total_retrans, TCP_NLA_PAD); + READ_ONCE(tp->total_retrans), TCP_NLA_PAD); rate = READ_ONCE(sk->sk_pacing_rate); rate64 = (rate != ~0UL) ? rate : ~0ULL; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 8e99687526a6..d8e8bba2d03a 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1688,7 +1688,8 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, if (skb->len != tcp_header_size) { tcp_event_data_sent(tp, sk); - tp->data_segs_out += tcp_skb_pcount(skb); + WRITE_ONCE(tp->data_segs_out, + tp->data_segs_out + tcp_skb_pcount(skb)); tp->bytes_sent += skb->len - tcp_header_size; } @@ -3642,7 +3643,7 @@ start: TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs); if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); - tp->total_retrans += segs; + WRITE_ONCE(tp->total_retrans, tp->total_retrans + segs); tp->bytes_retrans += skb->len; /* make sure skb->data is aligned on arches that require it @@ -4646,7 +4647,8 @@ int tcp_rtx_synack(const struct sock *sk, struct request_sock *req) * However in this case, we are dealing with a passive fastopen * socket thus we can change total_retrans value. */ - tcp_sk_rw(sk)->total_retrans++; + WRITE_ONCE(tcp_sk_rw(sk)->total_retrans, + tcp_sk_rw(sk)->total_retrans + 1); } trace_tcp_retransmit_synack(sk, req); WRITE_ONCE(req->num_retrans, req->num_retrans + 1); -- cgit v1.2.3 From 829ba1f329cb7cbd56d599a6d225997fba66dc32 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 16 Apr 2026 20:03:08 +0000 Subject: tcp: add data-races annotations around tp->reordering, tp->snd_cwnd tcp_get_timestamping_opt_stats() intentionally runs lockless, we must add READ_ONCE(), WRITE_ONCE() data_race() annotations to keep KCSAN happy. Fixes: bb7c19f96012 ("tcp: add related fields into SCM_TIMESTAMPING_OPT_STATS") Signed-off-by: Eric Dumazet Link: https://patch.msgid.link/20260416200319.3608680-4-edumazet@google.com Signed-off-by: Jakub Kicinski --- net/ipv4/tcp.c | 8 ++++---- net/ipv4/tcp_input.c | 14 ++++++++------ net/ipv4/tcp_metrics.c | 2 +- 3 files changed, 13 insertions(+), 11 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index e39e0734d958..24ba80d244b1 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -4445,13 +4445,13 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk, rate64 = tcp_compute_delivery_rate(tp); nla_put_u64_64bit(stats, TCP_NLA_DELIVERY_RATE, rate64, TCP_NLA_PAD); - nla_put_u32(stats, TCP_NLA_SND_CWND, tcp_snd_cwnd(tp)); - nla_put_u32(stats, TCP_NLA_REORDERING, tp->reordering); - nla_put_u32(stats, TCP_NLA_MIN_RTT, tcp_min_rtt(tp)); + nla_put_u32(stats, TCP_NLA_SND_CWND, READ_ONCE(tp->snd_cwnd)); + nla_put_u32(stats, TCP_NLA_REORDERING, READ_ONCE(tp->reordering)); + nla_put_u32(stats, TCP_NLA_MIN_RTT, data_race(tcp_min_rtt(tp))); nla_put_u8(stats, TCP_NLA_RECUR_RETRANS, READ_ONCE(inet_csk(sk)->icsk_retransmits)); - nla_put_u8(stats, TCP_NLA_DELIVERY_RATE_APP_LMT, !!tp->rate_app_limited); + nla_put_u8(stats, TCP_NLA_DELIVERY_RATE_APP_LMT, data_race(!!tp->rate_app_limited)); nla_put_u32(stats, TCP_NLA_SND_SSTHRESH, tp->snd_ssthresh); nla_put_u32(stats, TCP_NLA_DELIVERED, tp->delivered); nla_put_u32(stats, TCP_NLA_DELIVERED_CE, tp->delivered_ce); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 021f745747c5..6bb6bf049a35 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1293,8 +1293,9 @@ static void tcp_check_sack_reordering(struct sock *sk, const u32 low_seq, tp->sacked_out, tp->undo_marker ? tp->undo_retrans : 0); #endif - tp->reordering = min_t(u32, (metric + mss - 1) / mss, - READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_max_reordering)); + WRITE_ONCE(tp->reordering, + min_t(u32, (metric + mss - 1) / mss, + READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_max_reordering))); } /* This exciting event is worth to be remembered. 8) */ @@ -2439,8 +2440,9 @@ static void tcp_check_reno_reordering(struct sock *sk, const int addend) if (!tcp_limit_reno_sacked(tp)) return; - tp->reordering = min_t(u32, tp->packets_out + addend, - READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_max_reordering)); + WRITE_ONCE(tp->reordering, + min_t(u32, tp->packets_out + addend, + READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_max_reordering))); tp->reord_seen++; NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRENOREORDER); } @@ -2579,8 +2581,8 @@ void tcp_enter_loss(struct sock *sk) reordering = READ_ONCE(net->ipv4.sysctl_tcp_reordering); if (icsk->icsk_ca_state <= TCP_CA_Disorder && tp->sacked_out >= reordering) - tp->reordering = min_t(unsigned int, tp->reordering, - reordering); + WRITE_ONCE(tp->reordering, + min_t(unsigned int, tp->reordering, reordering)); tcp_set_ca_state(sk, TCP_CA_Loss); tp->high_seq = tp->snd_nxt; diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c index 06b1d5d3b6df..7a9d6d9006f6 100644 --- a/net/ipv4/tcp_metrics.c +++ b/net/ipv4/tcp_metrics.c @@ -496,7 +496,7 @@ void tcp_init_metrics(struct sock *sk) } val = tcp_metric_get(tm, TCP_METRIC_REORDERING); if (val && tp->reordering != val) - tp->reordering = val; + WRITE_ONCE(tp->reordering, val); crtt = tcp_metric_get(tm, TCP_METRIC_RTT); rcu_read_unlock(); -- cgit v1.2.3 From fd571afb05ebaeac5d8f09460a0640d4cf6755f8 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 16 Apr 2026 20:03:09 +0000 Subject: tcp: annotate data-races around tp->snd_ssthresh tcp_get_timestamping_opt_stats() intentionally runs lockless, we must add READ_ONCE() and WRITE_ONCE() annotations to keep KCSAN happy. Fixes: 7156d194a077 ("tcp: add snd_ssthresh stat in SCM_TIMESTAMPING_OPT_STATS") Signed-off-by: Eric Dumazet Link: https://patch.msgid.link/20260416200319.3608680-5-edumazet@google.com Signed-off-by: Jakub Kicinski --- net/core/filter.c | 2 +- net/ipv4/tcp.c | 4 ++-- net/ipv4/tcp_bbr.c | 6 +++--- net/ipv4/tcp_bic.c | 2 +- net/ipv4/tcp_cdg.c | 4 ++-- net/ipv4/tcp_cubic.c | 6 +++--- net/ipv4/tcp_dctcp.c | 2 +- net/ipv4/tcp_input.c | 8 ++++---- net/ipv4/tcp_metrics.c | 4 ++-- net/ipv4/tcp_nv.c | 4 ++-- net/ipv4/tcp_output.c | 4 ++-- net/ipv4/tcp_vegas.c | 9 +++++---- net/ipv4/tcp_westwood.c | 4 ++-- net/ipv4/tcp_yeah.c | 3 ++- 14 files changed, 32 insertions(+), 30 deletions(-) (limited to 'net') diff --git a/net/core/filter.c b/net/core/filter.c index fcfcb72663ca..3b5609fb96de 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -5396,7 +5396,7 @@ static int bpf_sol_tcp_setsockopt(struct sock *sk, int optname, if (val <= 0) return -EINVAL; tp->snd_cwnd_clamp = val; - tp->snd_ssthresh = val; + WRITE_ONCE(tp->snd_ssthresh, val); break; case TCP_BPF_DELACK_MAX: timeout = usecs_to_jiffies(val); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 24ba80d244b1..802a9ea05211 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -3425,7 +3425,7 @@ int tcp_disconnect(struct sock *sk, int flags) icsk->icsk_rto = TCP_TIMEOUT_INIT; WRITE_ONCE(icsk->icsk_rto_min, TCP_RTO_MIN); WRITE_ONCE(icsk->icsk_delack_max, TCP_DELACK_MAX); - tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; + WRITE_ONCE(tp->snd_ssthresh, TCP_INFINITE_SSTHRESH); tcp_snd_cwnd_set(tp, TCP_INIT_CWND); tp->snd_cwnd_cnt = 0; tp->is_cwnd_limited = 0; @@ -4452,7 +4452,7 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk, nla_put_u8(stats, TCP_NLA_RECUR_RETRANS, READ_ONCE(inet_csk(sk)->icsk_retransmits)); nla_put_u8(stats, TCP_NLA_DELIVERY_RATE_APP_LMT, data_race(!!tp->rate_app_limited)); - nla_put_u32(stats, TCP_NLA_SND_SSTHRESH, tp->snd_ssthresh); + nla_put_u32(stats, TCP_NLA_SND_SSTHRESH, READ_ONCE(tp->snd_ssthresh)); nla_put_u32(stats, TCP_NLA_DELIVERED, tp->delivered); nla_put_u32(stats, TCP_NLA_DELIVERED_CE, tp->delivered_ce); diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c index 1ddc20a399b0..aec7805b1d37 100644 --- a/net/ipv4/tcp_bbr.c +++ b/net/ipv4/tcp_bbr.c @@ -897,8 +897,8 @@ static void bbr_check_drain(struct sock *sk, const struct rate_sample *rs) if (bbr->mode == BBR_STARTUP && bbr_full_bw_reached(sk)) { bbr->mode = BBR_DRAIN; /* drain queue we created */ - tcp_sk(sk)->snd_ssthresh = - bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT); + WRITE_ONCE(tcp_sk(sk)->snd_ssthresh, + bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT)); } /* fall through to check if in-flight is already small: */ if (bbr->mode == BBR_DRAIN && bbr_packets_in_net_at_edt(sk, tcp_packets_in_flight(tcp_sk(sk))) <= @@ -1043,7 +1043,7 @@ __bpf_kfunc static void bbr_init(struct sock *sk) struct bbr *bbr = inet_csk_ca(sk); bbr->prior_cwnd = 0; - tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; + WRITE_ONCE(tp->snd_ssthresh, TCP_INFINITE_SSTHRESH); bbr->rtt_cnt = 0; bbr->next_rtt_delivered = tp->delivered; bbr->prev_ca_state = TCP_CA_Open; diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c index 58358bf92e1b..65444ff14241 100644 --- a/net/ipv4/tcp_bic.c +++ b/net/ipv4/tcp_bic.c @@ -74,7 +74,7 @@ static void bictcp_init(struct sock *sk) bictcp_reset(ca); if (initial_ssthresh) - tcp_sk(sk)->snd_ssthresh = initial_ssthresh; + WRITE_ONCE(tcp_sk(sk)->snd_ssthresh, initial_ssthresh); } /* diff --git a/net/ipv4/tcp_cdg.c b/net/ipv4/tcp_cdg.c index ceabfd690a29..0812c390aee5 100644 --- a/net/ipv4/tcp_cdg.c +++ b/net/ipv4/tcp_cdg.c @@ -162,7 +162,7 @@ static void tcp_cdg_hystart_update(struct sock *sk) NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPHYSTARTTRAINCWND, tcp_snd_cwnd(tp)); - tp->snd_ssthresh = tcp_snd_cwnd(tp); + WRITE_ONCE(tp->snd_ssthresh, tcp_snd_cwnd(tp)); return; } } @@ -181,7 +181,7 @@ static void tcp_cdg_hystart_update(struct sock *sk) NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPHYSTARTDELAYCWND, tcp_snd_cwnd(tp)); - tp->snd_ssthresh = tcp_snd_cwnd(tp); + WRITE_ONCE(tp->snd_ssthresh, tcp_snd_cwnd(tp)); } } } diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index ab78b5ae8d0e..119bf8cbb007 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c @@ -136,7 +136,7 @@ __bpf_kfunc static void cubictcp_init(struct sock *sk) bictcp_hystart_reset(sk); if (!hystart && initial_ssthresh) - tcp_sk(sk)->snd_ssthresh = initial_ssthresh; + WRITE_ONCE(tcp_sk(sk)->snd_ssthresh, initial_ssthresh); } __bpf_kfunc static void cubictcp_cwnd_event_tx_start(struct sock *sk) @@ -420,7 +420,7 @@ static void hystart_update(struct sock *sk, u32 delay) NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPHYSTARTTRAINCWND, tcp_snd_cwnd(tp)); - tp->snd_ssthresh = tcp_snd_cwnd(tp); + WRITE_ONCE(tp->snd_ssthresh, tcp_snd_cwnd(tp)); } } } @@ -440,7 +440,7 @@ static void hystart_update(struct sock *sk, u32 delay) NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPHYSTARTDELAYCWND, tcp_snd_cwnd(tp)); - tp->snd_ssthresh = tcp_snd_cwnd(tp); + WRITE_ONCE(tp->snd_ssthresh, tcp_snd_cwnd(tp)); } } } diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c index 96c99999e09d..274e628e7cf8 100644 --- a/net/ipv4/tcp_dctcp.c +++ b/net/ipv4/tcp_dctcp.c @@ -177,7 +177,7 @@ static void dctcp_react_to_loss(struct sock *sk) struct tcp_sock *tp = tcp_sk(sk); ca->loss_cwnd = tcp_snd_cwnd(tp); - tp->snd_ssthresh = max(tcp_snd_cwnd(tp) >> 1U, 2U); + WRITE_ONCE(tp->snd_ssthresh, max(tcp_snd_cwnd(tp) >> 1U, 2U)); } __bpf_kfunc static void dctcp_state(struct sock *sk, u8 new_state) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 6bb6bf049a35..c6361447535f 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2567,7 +2567,7 @@ void tcp_enter_loss(struct sock *sk) (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { tp->prior_ssthresh = tcp_current_ssthresh(sk); tp->prior_cwnd = tcp_snd_cwnd(tp); - tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); + WRITE_ONCE(tp->snd_ssthresh, icsk->icsk_ca_ops->ssthresh(sk)); tcp_ca_event(sk, CA_EVENT_LOSS); tcp_init_undo(tp); } @@ -2860,7 +2860,7 @@ static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss) tcp_snd_cwnd_set(tp, icsk->icsk_ca_ops->undo_cwnd(sk)); if (tp->prior_ssthresh > tp->snd_ssthresh) { - tp->snd_ssthresh = tp->prior_ssthresh; + WRITE_ONCE(tp->snd_ssthresh, tp->prior_ssthresh); tcp_ecn_withdraw_cwr(tp); } } @@ -2978,7 +2978,7 @@ static void tcp_init_cwnd_reduction(struct sock *sk) tp->prior_cwnd = tcp_snd_cwnd(tp); tp->prr_delivered = 0; tp->prr_out = 0; - tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk); + WRITE_ONCE(tp->snd_ssthresh, inet_csk(sk)->icsk_ca_ops->ssthresh(sk)); tcp_ecn_queue_cwr(tp); } @@ -3120,7 +3120,7 @@ static void tcp_non_congestion_loss_retransmit(struct sock *sk) if (icsk->icsk_ca_state != TCP_CA_Loss) { tp->high_seq = tp->snd_nxt; - tp->snd_ssthresh = tcp_current_ssthresh(sk); + WRITE_ONCE(tp->snd_ssthresh, tcp_current_ssthresh(sk)); tp->prior_ssthresh = 0; tp->undo_marker = 0; tcp_set_ca_state(sk, TCP_CA_Loss); diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c index 7a9d6d9006f6..dc0c081fc1f3 100644 --- a/net/ipv4/tcp_metrics.c +++ b/net/ipv4/tcp_metrics.c @@ -490,9 +490,9 @@ void tcp_init_metrics(struct sock *sk) val = READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) ? 0 : tcp_metric_get(tm, TCP_METRIC_SSTHRESH); if (val) { - tp->snd_ssthresh = val; + WRITE_ONCE(tp->snd_ssthresh, val); if (tp->snd_ssthresh > tp->snd_cwnd_clamp) - tp->snd_ssthresh = tp->snd_cwnd_clamp; + WRITE_ONCE(tp->snd_ssthresh, tp->snd_cwnd_clamp); } val = tcp_metric_get(tm, TCP_METRIC_REORDERING); if (val && tp->reordering != val) diff --git a/net/ipv4/tcp_nv.c b/net/ipv4/tcp_nv.c index a60662f4bdf9..f345897a68df 100644 --- a/net/ipv4/tcp_nv.c +++ b/net/ipv4/tcp_nv.c @@ -396,8 +396,8 @@ static void tcpnv_acked(struct sock *sk, const struct ack_sample *sample) /* We have enough data to determine we are congested */ ca->nv_allow_cwnd_growth = 0; - tp->snd_ssthresh = - (nv_ssthresh_factor * max_win) >> 3; + WRITE_ONCE(tp->snd_ssthresh, + (nv_ssthresh_factor * max_win) >> 3); if (tcp_snd_cwnd(tp) - max_win > 2) { /* gap > 2, we do exponential cwnd decrease */ int dec; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index d8e8bba2d03a..2663505a0dd7 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -171,7 +171,7 @@ void tcp_cwnd_restart(struct sock *sk, s32 delta) tcp_ca_event(sk, CA_EVENT_CWND_RESTART); - tp->snd_ssthresh = tcp_current_ssthresh(sk); + WRITE_ONCE(tp->snd_ssthresh, tcp_current_ssthresh(sk)); restart_cwnd = min(restart_cwnd, cwnd); while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) @@ -2143,7 +2143,7 @@ static void tcp_cwnd_application_limited(struct sock *sk) u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk)); u32 win_used = max(tp->snd_cwnd_used, init_win); if (win_used < tcp_snd_cwnd(tp)) { - tp->snd_ssthresh = tcp_current_ssthresh(sk); + WRITE_ONCE(tp->snd_ssthresh, tcp_current_ssthresh(sk)); tcp_snd_cwnd_set(tp, (tcp_snd_cwnd(tp) + win_used) >> 1); } tp->snd_cwnd_used = 0; diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c index 950a66966059..574453af6bc0 100644 --- a/net/ipv4/tcp_vegas.c +++ b/net/ipv4/tcp_vegas.c @@ -245,7 +245,8 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked) */ tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp), (u32)target_cwnd + 1)); - tp->snd_ssthresh = tcp_vegas_ssthresh(tp); + WRITE_ONCE(tp->snd_ssthresh, + tcp_vegas_ssthresh(tp)); } else if (tcp_in_slow_start(tp)) { /* Slow start. */ @@ -261,8 +262,8 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked) * we slow down. */ tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) - 1); - tp->snd_ssthresh - = tcp_vegas_ssthresh(tp); + WRITE_ONCE(tp->snd_ssthresh, + tcp_vegas_ssthresh(tp)); } else if (diff < alpha) { /* We don't have enough extra packets * in the network, so speed up. @@ -280,7 +281,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked) else if (tcp_snd_cwnd(tp) > tp->snd_cwnd_clamp) tcp_snd_cwnd_set(tp, tp->snd_cwnd_clamp); - tp->snd_ssthresh = tcp_current_ssthresh(sk); + WRITE_ONCE(tp->snd_ssthresh, tcp_current_ssthresh(sk)); } /* Wipe the slate clean for the next RTT. */ diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c index c6e97141eef2..b5a42adfd6ca 100644 --- a/net/ipv4/tcp_westwood.c +++ b/net/ipv4/tcp_westwood.c @@ -244,11 +244,11 @@ static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event) switch (event) { case CA_EVENT_COMPLETE_CWR: - tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk); + WRITE_ONCE(tp->snd_ssthresh, tcp_westwood_bw_rttmin(sk)); tcp_snd_cwnd_set(tp, tp->snd_ssthresh); break; case CA_EVENT_LOSS: - tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk); + WRITE_ONCE(tp->snd_ssthresh, tcp_westwood_bw_rttmin(sk)); /* Update RTT_min when next ack arrives */ w->reset_rtt_min = 1; break; diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c index b22b3dccd05e..9e581154f18f 100644 --- a/net/ipv4/tcp_yeah.c +++ b/net/ipv4/tcp_yeah.c @@ -147,7 +147,8 @@ do_vegas: tcp_snd_cwnd_set(tp, max(tcp_snd_cwnd(tp), yeah->reno_count)); - tp->snd_ssthresh = tcp_snd_cwnd(tp); + WRITE_ONCE(tp->snd_ssthresh, + tcp_snd_cwnd(tp)); } if (yeah->reno_count <= 2) -- cgit v1.2.3 From faa886ad3ce5fc8f5156493491fe189b2b726bc9 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 16 Apr 2026 20:03:10 +0000 Subject: tcp: annotate data-races around tp->delivered and tp->delivered_ce tcp_get_timestamping_opt_stats() intentionally runs lockless, we must add READ_ONCE() and WRITE_ONCE() annotations to keep KCSAN happy. Fixes: feb5f2ec6464 ("tcp: export packets delivery info") Signed-off-by: Eric Dumazet Link: https://patch.msgid.link/20260416200319.3608680-6-edumazet@google.com Signed-off-by: Jakub Kicinski --- net/ipv4/tcp.c | 4 ++-- net/ipv4/tcp_input.c | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 802a9ea05211..0aabd02d4496 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -4453,8 +4453,8 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk, READ_ONCE(inet_csk(sk)->icsk_retransmits)); nla_put_u8(stats, TCP_NLA_DELIVERY_RATE_APP_LMT, data_race(!!tp->rate_app_limited)); nla_put_u32(stats, TCP_NLA_SND_SSTHRESH, READ_ONCE(tp->snd_ssthresh)); - nla_put_u32(stats, TCP_NLA_DELIVERED, tp->delivered); - nla_put_u32(stats, TCP_NLA_DELIVERED_CE, tp->delivered_ce); + nla_put_u32(stats, TCP_NLA_DELIVERED, READ_ONCE(tp->delivered)); + nla_put_u32(stats, TCP_NLA_DELIVERED_CE, READ_ONCE(tp->delivered_ce)); nla_put_u32(stats, TCP_NLA_SNDQ_SIZE, tp->write_seq - tp->snd_una); nla_put_u8(stats, TCP_NLA_CA_STATE, inet_csk(sk)->icsk_ca_state); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index c6361447535f..63ff89210a72 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -476,14 +476,14 @@ static bool tcp_accecn_process_option(struct tcp_sock *tp, static void tcp_count_delivered_ce(struct tcp_sock *tp, u32 ecn_count) { - tp->delivered_ce += ecn_count; + WRITE_ONCE(tp->delivered_ce, tp->delivered_ce + ecn_count); } /* Updates the delivered and delivered_ce counts */ static void tcp_count_delivered(struct tcp_sock *tp, u32 delivered, bool ece_ack) { - tp->delivered += delivered; + WRITE_ONCE(tp->delivered, tp->delivered + delivered); if (tcp_ecn_mode_rfc3168(tp) && ece_ack) tcp_count_delivered_ce(tp, delivered); } @@ -6779,7 +6779,7 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack, NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE); /* SYN-data is counted as two separate packets in tcp_ack() */ if (tp->delivered > 1) - --tp->delivered; + WRITE_ONCE(tp->delivered, tp->delivered - 1); } tcp_fastopen_add_skb(sk, synack); @@ -7212,7 +7212,7 @@ tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) SKB_DR_SET(reason, NOT_SPECIFIED); switch (sk->sk_state) { case TCP_SYN_RECV: - tp->delivered++; /* SYN-ACK delivery isn't tracked in tcp_ack */ + WRITE_ONCE(tp->delivered, tp->delivered + 1); /* SYN-ACK delivery isn't tracked in tcp_ack */ if (!tp->srtt_us) tcp_synack_rtt_meas(sk, req); -- cgit v1.2.3 From 124199444de467767175a9004e1574dc42523e62 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 16 Apr 2026 20:03:11 +0000 Subject: tcp: add data-race annotations for TCP_NLA_SNDQ_SIZE tcp_get_timestamping_opt_stats() intentionally runs lockless, we must add READ_ONCE() and WRITE_ONCE() annotations to keep KCSAN happy. Fixes: 87ecc95d81d9 ("tcp: add send queue size stat in SCM_TIMESTAMPING_OPT_STATS") Signed-off-by: Eric Dumazet Link: https://patch.msgid.link/20260416200319.3608680-7-edumazet@google.com Signed-off-by: Jakub Kicinski --- net/ipv4/tcp.c | 4 +++- net/ipv4/tcp_input.c | 4 ++-- net/ipv4/tcp_output.c | 2 +- 3 files changed, 6 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 0aabd02d4496..729936d13a5c 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -4456,7 +4456,9 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk, nla_put_u32(stats, TCP_NLA_DELIVERED, READ_ONCE(tp->delivered)); nla_put_u32(stats, TCP_NLA_DELIVERED_CE, READ_ONCE(tp->delivered_ce)); - nla_put_u32(stats, TCP_NLA_SNDQ_SIZE, tp->write_seq - tp->snd_una); + nla_put_u32(stats, TCP_NLA_SNDQ_SIZE, + max_t(int, 0, + READ_ONCE(tp->write_seq) - READ_ONCE(tp->snd_una))); nla_put_u8(stats, TCP_NLA_CA_STATE, inet_csk(sk)->icsk_ca_state); nla_put_u64_64bit(stats, TCP_NLA_BYTES_SENT, tp->bytes_sent, diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 63ff89210a72..edb5013873e0 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -3912,7 +3912,7 @@ static void tcp_snd_una_update(struct tcp_sock *tp, u32 ack) sock_owned_by_me((struct sock *)tp); tp->bytes_acked += delta; tcp_snd_sne_update(tp, ack); - tp->snd_una = ack; + WRITE_ONCE(tp->snd_una, ack); } static void tcp_rcv_sne_update(struct tcp_sock *tp, u32 seq) @@ -7240,7 +7240,7 @@ tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) if (sk->sk_socket) sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); - tp->snd_una = TCP_SKB_CB(skb)->ack_seq; + WRITE_ONCE(tp->snd_una, TCP_SKB_CB(skb)->ack_seq); tp->snd_wnd = ntohs(th->window) << tp->rx_opt.snd_wscale; tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 2663505a0dd7..9f83c7e4acab 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -4153,7 +4153,7 @@ static void tcp_connect_init(struct sock *sk) tp->snd_wnd = 0; tcp_init_wl(tp, 0); tcp_write_queue_purge(sk); - tp->snd_una = tp->write_seq; + WRITE_ONCE(tp->snd_una, tp->write_seq); tp->snd_sml = tp->write_seq; tp->snd_up = tp->write_seq; WRITE_ONCE(tp->snd_nxt, tp->write_seq); -- cgit v1.2.3 From ee43e957ce2ec77b2ec47fef28f3c0df6ab01a31 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 16 Apr 2026 20:03:12 +0000 Subject: tcp: annotate data-races around tp->bytes_sent tcp_get_timestamping_opt_stats() intentionally runs lockless, we must add READ_ONCE() and WRITE_ONCE() annotations to keep KCSAN happy. Fixes: ba113c3aa79a ("tcp: add data bytes sent stats") Signed-off-by: Eric Dumazet Link: https://patch.msgid.link/20260416200319.3608680-8-edumazet@google.com Signed-off-by: Jakub Kicinski --- net/ipv4/tcp.c | 2 +- net/ipv4/tcp_output.c | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 729936d13a5c..f999b86851cd 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -4461,7 +4461,7 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk, READ_ONCE(tp->write_seq) - READ_ONCE(tp->snd_una))); nla_put_u8(stats, TCP_NLA_CA_STATE, inet_csk(sk)->icsk_ca_state); - nla_put_u64_64bit(stats, TCP_NLA_BYTES_SENT, tp->bytes_sent, + nla_put_u64_64bit(stats, TCP_NLA_BYTES_SENT, READ_ONCE(tp->bytes_sent), TCP_NLA_PAD); nla_put_u64_64bit(stats, TCP_NLA_BYTES_RETRANS, tp->bytes_retrans, TCP_NLA_PAD); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 9f83c7e4acab..87af4731df87 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1690,7 +1690,8 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, tcp_event_data_sent(tp, sk); WRITE_ONCE(tp->data_segs_out, tp->data_segs_out + tcp_skb_pcount(skb)); - tp->bytes_sent += skb->len - tcp_header_size; + WRITE_ONCE(tp->bytes_sent, + tp->bytes_sent + skb->len - tcp_header_size); } if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) -- cgit v1.2.3 From 5efc7b9f7cbd43401f1af81d3d7f2be00f93390d Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 16 Apr 2026 20:03:13 +0000 Subject: tcp: annotate data-races around tp->bytes_retrans tcp_get_timestamping_opt_stats() intentionally runs lockless, we must add READ_ONCE() and WRITE_ONCE() annotations to keep KCSAN happy. Fixes: fb31c9b9f6c8 ("tcp: add data bytes retransmitted stats") Signed-off-by: Eric Dumazet Link: https://patch.msgid.link/20260416200319.3608680-9-edumazet@google.com Signed-off-by: Jakub Kicinski --- net/ipv4/tcp.c | 4 ++-- net/ipv4/tcp_output.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index f999b86851cd..8c84639dc54b 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -4463,8 +4463,8 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk, nla_put_u64_64bit(stats, TCP_NLA_BYTES_SENT, READ_ONCE(tp->bytes_sent), TCP_NLA_PAD); - nla_put_u64_64bit(stats, TCP_NLA_BYTES_RETRANS, tp->bytes_retrans, - TCP_NLA_PAD); + nla_put_u64_64bit(stats, TCP_NLA_BYTES_RETRANS, + READ_ONCE(tp->bytes_retrans), TCP_NLA_PAD); nla_put_u32(stats, TCP_NLA_DSACK_DUPS, tp->dsack_dups); nla_put_u32(stats, TCP_NLA_REORD_SEEN, tp->reord_seen); nla_put_u32(stats, TCP_NLA_SRTT, tp->srtt_us >> 3); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 87af4731df87..f9d8755705f7 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -3645,7 +3645,7 @@ start: if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); WRITE_ONCE(tp->total_retrans, tp->total_retrans + segs); - tp->bytes_retrans += skb->len; + WRITE_ONCE(tp->bytes_retrans, tp->bytes_retrans + skb->len); /* make sure skb->data is aligned on arches that require it * and check if ack-trimming & collapsing extended the headroom -- cgit v1.2.3 From a984705ca88b976bf1087978fd98b7f3993da88c Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 16 Apr 2026 20:03:14 +0000 Subject: tcp: annotate data-races around tp->dsack_dups tcp_get_timestamping_opt_stats() intentionally runs lockless, we must add READ_ONCE() and WRITE_ONCE() annotations to keep KCSAN happy. Fixes: 7e10b6554ff2 ("tcp: add dsack blocks received stats") Signed-off-by: Eric Dumazet Link: https://patch.msgid.link/20260416200319.3608680-10-edumazet@google.com Signed-off-by: Jakub Kicinski --- net/ipv4/tcp.c | 2 +- net/ipv4/tcp_input.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 8c84639dc54b..57c4dcc8bfe9 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -4465,7 +4465,7 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk, TCP_NLA_PAD); nla_put_u64_64bit(stats, TCP_NLA_BYTES_RETRANS, READ_ONCE(tp->bytes_retrans), TCP_NLA_PAD); - nla_put_u32(stats, TCP_NLA_DSACK_DUPS, tp->dsack_dups); + nla_put_u32(stats, TCP_NLA_DSACK_DUPS, READ_ONCE(tp->dsack_dups)); nla_put_u32(stats, TCP_NLA_REORD_SEEN, tp->reord_seen); nla_put_u32(stats, TCP_NLA_SRTT, tp->srtt_us >> 3); nla_put_u16(stats, TCP_NLA_TIMEOUT_REHASH, tp->timeout_rehash); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index edb5013873e0..65b3ecc6be4b 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1246,7 +1246,7 @@ static u32 tcp_dsack_seen(struct tcp_sock *tp, u32 start_seq, else if (tp->tlp_high_seq && tp->tlp_high_seq == end_seq) state->flag |= FLAG_DSACK_TLP; - tp->dsack_dups += dup_segs; + WRITE_ONCE(tp->dsack_dups, tp->dsack_dups + dup_segs); /* Skip the DSACK if dup segs weren't retransmitted by sender */ if (tp->dsack_dups > tp->total_retrans) return 0; -- cgit v1.2.3 From 62585690e6b2a112c408fe25f142b246ac833c42 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 16 Apr 2026 20:03:15 +0000 Subject: tcp: annotate data-races around tp->reord_seen tcp_get_timestamping_opt_stats() intentionally runs lockless, we must add READ_ONCE() and WRITE_ONCE() annotations to keep KCSAN happy. Fixes: 7ec65372ca53 ("tcp: add stat of data packet reordering events") Signed-off-by: Eric Dumazet Link: https://patch.msgid.link/20260416200319.3608680-11-edumazet@google.com Signed-off-by: Jakub Kicinski --- net/ipv4/tcp.c | 2 +- net/ipv4/tcp_input.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 57c4dcc8bfe9..39a4b06e36bb 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -4466,7 +4466,7 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk, nla_put_u64_64bit(stats, TCP_NLA_BYTES_RETRANS, READ_ONCE(tp->bytes_retrans), TCP_NLA_PAD); nla_put_u32(stats, TCP_NLA_DSACK_DUPS, READ_ONCE(tp->dsack_dups)); - nla_put_u32(stats, TCP_NLA_REORD_SEEN, tp->reord_seen); + nla_put_u32(stats, TCP_NLA_REORD_SEEN, READ_ONCE(tp->reord_seen)); nla_put_u32(stats, TCP_NLA_SRTT, tp->srtt_us >> 3); nla_put_u16(stats, TCP_NLA_TIMEOUT_REHASH, tp->timeout_rehash); nla_put_u32(stats, TCP_NLA_BYTES_NOTSENT, diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 65b3ecc6be4b..896a5a5a6b1a 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1299,7 +1299,7 @@ static void tcp_check_sack_reordering(struct sock *sk, const u32 low_seq, } /* This exciting event is worth to be remembered. 8) */ - tp->reord_seen++; + WRITE_ONCE(tp->reord_seen, tp->reord_seen + 1); NET_INC_STATS(sock_net(sk), ts ? LINUX_MIB_TCPTSREORDER : LINUX_MIB_TCPSACKREORDER); } @@ -2443,7 +2443,7 @@ static void tcp_check_reno_reordering(struct sock *sk, const int addend) WRITE_ONCE(tp->reordering, min_t(u32, tp->packets_out + addend, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_max_reordering))); - tp->reord_seen++; + WRITE_ONCE(tp->reord_seen, tp->reord_seen + 1); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRENOREORDER); } -- cgit v1.2.3 From 290b693ce7c9d48588d88b15a782a3efc6fa036b Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 16 Apr 2026 20:03:16 +0000 Subject: tcp: annotate data-races around tp->srtt_us tcp_get_timestamping_opt_stats() intentionally runs lockless, we must add READ_ONCE() and WRITE_ONCE() annotations to keep KCSAN happy. Fixes: e8bd8fca6773 ("tcp: add SRTT to SCM_TIMESTAMPING_OPT_STATS") Signed-off-by: Eric Dumazet Link: https://patch.msgid.link/20260416200319.3608680-12-edumazet@google.com Signed-off-by: Jakub Kicinski --- net/ipv4/tcp.c | 5 +++-- net/ipv4/tcp_input.c | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 39a4b06e36bb..541bd0d2d8c4 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -3623,7 +3623,8 @@ static void tcp_enable_tx_delay(struct sock *sk, int val) if (delta && sk->sk_state == TCP_ESTABLISHED) { s64 srtt = (s64)tp->srtt_us + delta; - tp->srtt_us = clamp_t(s64, srtt, 1, ~0U); + WRITE_ONCE(tp->srtt_us, + clamp_t(s64, srtt, 1, ~0U)); /* Note: does not deal with non zero icsk_backoff */ tcp_set_rto(sk); @@ -4467,7 +4468,7 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk, READ_ONCE(tp->bytes_retrans), TCP_NLA_PAD); nla_put_u32(stats, TCP_NLA_DSACK_DUPS, READ_ONCE(tp->dsack_dups)); nla_put_u32(stats, TCP_NLA_REORD_SEEN, READ_ONCE(tp->reord_seen)); - nla_put_u32(stats, TCP_NLA_SRTT, tp->srtt_us >> 3); + nla_put_u32(stats, TCP_NLA_SRTT, READ_ONCE(tp->srtt_us) >> 3); nla_put_u16(stats, TCP_NLA_TIMEOUT_REHASH, tp->timeout_rehash); nla_put_u32(stats, TCP_NLA_BYTES_NOTSENT, max_t(int, 0, tp->write_seq - tp->snd_nxt)); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 896a5a5a6b1a..e04ae105893c 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1132,7 +1132,7 @@ static void tcp_rtt_estimator(struct sock *sk, long mrtt_us) tcp_bpf_rtt(sk, mrtt_us, srtt); } - tp->srtt_us = max(1U, srtt); + WRITE_ONCE(tp->srtt_us, max(1U, srtt)); } void tcp_update_pacing_rate(struct sock *sk) -- cgit v1.2.3 From 71c675358b711bbfd8528949249419dc2dfa4ce1 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 16 Apr 2026 20:03:17 +0000 Subject: tcp: annotate data-races around tp->timeout_rehash tcp_get_timestamping_opt_stats() intentionally runs lockless, we must add READ_ONCE() and WRITE_ONCE() annotations to keep KCSAN happy. Fixes: 32efcc06d2a1 ("tcp: export count for rehash attempts") Signed-off-by: Eric Dumazet Link: https://patch.msgid.link/20260416200319.3608680-13-edumazet@google.com Signed-off-by: Jakub Kicinski --- net/ipv4/tcp.c | 3 ++- net/ipv4/tcp_timer.c | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 541bd0d2d8c4..192e95b71ce9 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -4469,7 +4469,8 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk, nla_put_u32(stats, TCP_NLA_DSACK_DUPS, READ_ONCE(tp->dsack_dups)); nla_put_u32(stats, TCP_NLA_REORD_SEEN, READ_ONCE(tp->reord_seen)); nla_put_u32(stats, TCP_NLA_SRTT, READ_ONCE(tp->srtt_us) >> 3); - nla_put_u16(stats, TCP_NLA_TIMEOUT_REHASH, tp->timeout_rehash); + nla_put_u16(stats, TCP_NLA_TIMEOUT_REHASH, + READ_ONCE(tp->timeout_rehash)); nla_put_u32(stats, TCP_NLA_BYTES_NOTSENT, max_t(int, 0, tp->write_seq - tp->snd_nxt)); nla_put_u64_64bit(stats, TCP_NLA_EDT, orig_skb->skb_mstamp_ns, diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index ea99988795e7..8d791a954cd6 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -297,7 +297,7 @@ static int tcp_write_timeout(struct sock *sk) } if (sk_rethink_txhash(sk)) { - tp->timeout_rehash++; + WRITE_ONCE(tp->timeout_rehash, tp->timeout_rehash + 1); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTREHASH); } -- cgit v1.2.3 From 3a63b3d160560ef51e43fb4c880a5cde8078053c Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 16 Apr 2026 20:03:18 +0000 Subject: tcp: annotate data-races around (tp->write_seq - tp->snd_nxt) tcp_get_timestamping_opt_stats() intentionally runs lockless, we must add READ_ONCE() annotations to keep KCSAN happy. WRITE_ONCE() annotations are already present. Fixes: e08ab0b377a1 ("tcp: add bytes not sent to SCM_TIMESTAMPING_OPT_STATS") Signed-off-by: Eric Dumazet Link: https://patch.msgid.link/20260416200319.3608680-14-edumazet@google.com Signed-off-by: Jakub Kicinski --- net/ipv4/tcp.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 192e95b71ce9..68894c03f262 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -4472,7 +4472,8 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk, nla_put_u16(stats, TCP_NLA_TIMEOUT_REHASH, READ_ONCE(tp->timeout_rehash)); nla_put_u32(stats, TCP_NLA_BYTES_NOTSENT, - max_t(int, 0, tp->write_seq - tp->snd_nxt)); + max_t(int, 0, + READ_ONCE(tp->write_seq) - READ_ONCE(tp->snd_nxt))); nla_put_u64_64bit(stats, TCP_NLA_EDT, orig_skb->skb_mstamp_ns, TCP_NLA_PAD); if (ack_skb) -- cgit v1.2.3 From 9e89b9d03a2d2e30dcca166d5af52f9a8eceab25 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 16 Apr 2026 20:03:19 +0000 Subject: tcp: annotate data-races around tp->plb_rehash tcp_get_timestamping_opt_stats() intentionally runs lockless, we must add READ_ONCE() and WRITE_ONCE() annotations to keep KCSAN happy. Fixes: 29c1c44646ae ("tcp: add u32 counter in tcp_sock and an SNMP counter for PLB") Signed-off-by: Eric Dumazet Link: https://patch.msgid.link/20260416200319.3608680-15-edumazet@google.com Signed-off-by: Jakub Kicinski --- net/ipv4/tcp.c | 3 ++- net/ipv4/tcp_plb.c | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 68894c03f262..7fbf2fca5eb2 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -4480,7 +4480,8 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk, nla_put_u8(stats, TCP_NLA_TTL, tcp_skb_ttl_or_hop_limit(ack_skb)); - nla_put_u32(stats, TCP_NLA_REHASH, tp->plb_rehash + tp->timeout_rehash); + nla_put_u32(stats, TCP_NLA_REHASH, + READ_ONCE(tp->plb_rehash) + READ_ONCE(tp->timeout_rehash)); return stats; } diff --git a/net/ipv4/tcp_plb.c b/net/ipv4/tcp_plb.c index 68ccdb9a5412..c11a0cd3f8fe 100644 --- a/net/ipv4/tcp_plb.c +++ b/net/ipv4/tcp_plb.c @@ -80,7 +80,7 @@ void tcp_plb_check_rehash(struct sock *sk, struct tcp_plb_state *plb) sk_rethink_txhash(sk); plb->consec_cong_rounds = 0; - tcp_sk(sk)->plb_rehash++; + WRITE_ONCE(tcp_sk(sk)->plb_rehash, tcp_sk(sk)->plb_rehash + 1); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPLBREHASH); } EXPORT_SYMBOL_GPL(tcp_plb_check_rehash); -- cgit v1.2.3 From f996edd7615e686ada141b7f3395025729ff8ccb Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 16 Apr 2026 10:35:05 +0000 Subject: ipv6: fix possible UAF in icmpv6_rcv() Caching saddr and daddr before pskb_pull() is problematic since skb->head can change. Remove these temporary variables: - We only access &ipv6_hdr(skb)->saddr and &ipv6_hdr(skb)->daddr when net_dbg_ratelimited() is called in the slow path. - Avoid potential future misuse after pskb_pull() call. Fixes: 4b3418fba0fe ("ipv6: icmp: include addresses in debug messages") Signed-off-by: Eric Dumazet Reviewed-by: Fernando Fernandez Mancera Reviewed-by: Joe Damato Reviewed-by: Ido Schimmel Link: https://patch.msgid.link/20260416103505.2380753-1-edumazet@google.com Signed-off-by: Jakub Kicinski --- net/ipv6/icmp.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 799d9e9ac45d..efb23807a026 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -1104,7 +1104,6 @@ static int icmpv6_rcv(struct sk_buff *skb) struct net *net = dev_net_rcu(skb->dev); struct net_device *dev = icmp6_dev(skb); struct inet6_dev *idev = __in6_dev_get(dev); - const struct in6_addr *saddr, *daddr; struct icmp6hdr *hdr; u8 type; @@ -1135,12 +1134,10 @@ static int icmpv6_rcv(struct sk_buff *skb) __ICMP6_INC_STATS(dev_net_rcu(dev), idev, ICMP6_MIB_INMSGS); - saddr = &ipv6_hdr(skb)->saddr; - daddr = &ipv6_hdr(skb)->daddr; - if (skb_checksum_validate(skb, IPPROTO_ICMPV6, ip6_compute_pseudo)) { net_dbg_ratelimited("ICMPv6 checksum failed [%pI6c > %pI6c]\n", - saddr, daddr); + &ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr); goto csum_error; } @@ -1220,7 +1217,8 @@ static int icmpv6_rcv(struct sk_buff *skb) break; net_dbg_ratelimited("icmpv6: msg of unknown type [%pI6c > %pI6c]\n", - saddr, daddr); + &ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr); /* * error of unknown type. -- cgit v1.2.3 From 965dc93481d1b80d341bdd16c27b16fe197175ee Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Wed, 15 Apr 2026 18:48:29 +0000 Subject: af_unix: Drop all SCM attributes for SOCKMAP. SOCKMAP can hide inflight fd from AF_UNIX GC. When a socket in SOCKMAP receives skb with inflight fd, sk_psock_verdict_data_ready() looks up the mapped socket and enqueue skb to its psock->ingress_skb. Since neither the old nor the new GC can inspect the psock queue, the hidden skb leaks the inflight sockets. Note that this cannot be detected via kmemleak because inflight sockets are linked to a global list. In addition, SOCKMAP redirect breaks the Tarjan-based GC's assumption that unix_edge.successor is always alive, which is no longer true once skb is redirected, resulting in use-after-free below. [0] Moreover, SOCKMAP does not call scm_stat_del() properly, so unix_show_fdinfo() could report an incorrect fd count. sk_msg_recvmsg() does not support any SCM attributes in the first place. Let's drop all SCM attributes before passing skb to the SOCKMAP layer. [0]: BUG: KASAN: slab-use-after-free in unix_del_edges (net/unix/garbage.c:118 net/unix/garbage.c:181 net/unix/garbage.c:251) Read of size 8 at addr ffff888125362670 by task kworker/56:1/496 CPU: 56 UID: 0 PID: 496 Comm: kworker/56:1 Not tainted 7.0.0-rc7-00263-gb9d8b856689d #3 PREEMPT(lazy) Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.17.0-debian-1.17.0-1 04/01/2014 Workqueue: events sk_psock_backlog Call Trace: dump_stack_lvl (lib/dump_stack.c:122) print_report (mm/kasan/report.c:379) kasan_report (mm/kasan/report.c:597) unix_del_edges (net/unix/garbage.c:118 net/unix/garbage.c:181 net/unix/garbage.c:251) unix_destroy_fpl (net/unix/garbage.c:317) unix_destruct_scm (./include/net/scm.h:80 ./include/net/scm.h:86 net/unix/af_unix.c:1976) sk_psock_backlog (./include/linux/skbuff.h:?) process_scheduled_works (kernel/workqueue.c:?) worker_thread (kernel/workqueue.c:?) kthread (kernel/kthread.c:438) ret_from_fork (arch/x86/kernel/process.c:164) ret_from_fork_asm (arch/x86/entry/entry_64.S:258) Allocated by task 955: kasan_save_track (mm/kasan/common.c:58 mm/kasan/common.c:78) __kasan_slab_alloc (mm/kasan/common.c:369) kmem_cache_alloc_noprof (mm/slub.c:4539) sk_prot_alloc (net/core/sock.c:2240) sk_alloc (net/core/sock.c:2301) unix_create1 (net/unix/af_unix.c:1099) unix_create (net/unix/af_unix.c:1169) __sock_create (net/socket.c:1606) __sys_socketpair (net/socket.c:1811) __x64_sys_socketpair (net/socket.c:1863 net/socket.c:1860 net/socket.c:1860) do_syscall_64 (arch/x86/entry/syscall_64.c:?) entry_SYSCALL_64_after_hwframe (arch/x86/entry/entry_64.S:130) Freed by task 496: kasan_save_track (mm/kasan/common.c:58 mm/kasan/common.c:78) kasan_save_free_info (mm/kasan/generic.c:587) __kasan_slab_free (mm/kasan/common.c:287) kmem_cache_free (mm/slub.c:6165) __sk_destruct (net/core/sock.c:2282 net/core/sock.c:2384) sk_psock_destroy (./include/net/sock.h:?) process_scheduled_works (kernel/workqueue.c:?) worker_thread (kernel/workqueue.c:?) kthread (kernel/kthread.c:438) ret_from_fork (arch/x86/kernel/process.c:164) ret_from_fork_asm (arch/x86/entry/entry_64.S:258) Fixes: c63829182c37 ("af_unix: Implement ->psock_update_sk_prot()") Fixes: 77462de14a43 ("af_unix: Add read_sock for stream socket types") Reported-by: Xingyu Jin Signed-off-by: Kuniyuki Iwashima Link: https://patch.msgid.link/20260415184830.3988432-1-kuniyu@google.com Signed-off-by: Jakub Kicinski --- net/unix/af_unix.c | 35 +++++++++++++++++++++++++++-------- 1 file changed, 27 insertions(+), 8 deletions(-) (limited to 'net') diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 4c4a8d23ddd2..fa34c7aec88d 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -1968,16 +1968,19 @@ static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb) static void unix_destruct_scm(struct sk_buff *skb) { - struct scm_cookie scm; + struct scm_cookie scm = {}; + + swap(scm.pid, UNIXCB(skb).pid); - memset(&scm, 0, sizeof(scm)); - scm.pid = UNIXCB(skb).pid; if (UNIXCB(skb).fp) unix_detach_fds(&scm, skb); - /* Alas, it calls VFS */ - /* So fscking what? fput() had been SMP-safe since the last Summer */ scm_destroy(&scm); +} + +static void unix_wfree(struct sk_buff *skb) +{ + unix_destruct_scm(skb); sock_wfree(skb); } @@ -1993,7 +1996,7 @@ static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool sen if (scm->fp && send_fds) err = unix_attach_fds(scm, skb); - skb->destructor = unix_destruct_scm; + skb->destructor = unix_wfree; return err; } @@ -2070,6 +2073,13 @@ static void scm_stat_del(struct sock *sk, struct sk_buff *skb) } } +static void unix_orphan_scm(struct sock *sk, struct sk_buff *skb) +{ + scm_stat_del(sk, skb); + unix_destruct_scm(skb); + skb->destructor = sock_wfree; +} + /* * Send AF_UNIX data. */ @@ -2683,10 +2693,16 @@ static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor) int err; mutex_lock(&u->iolock); + skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err); - mutex_unlock(&u->iolock); - if (!skb) + if (!skb) { + mutex_unlock(&u->iolock); return err; + } + + unix_orphan_scm(sk, skb); + + mutex_unlock(&u->iolock); return recv_actor(sk, skb); } @@ -2886,6 +2902,9 @@ static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor) #endif spin_unlock(&queue->lock); + + unix_orphan_scm(sk, skb); + mutex_unlock(&u->iolock); return recv_actor(sk, skb); -- cgit v1.2.3 From 0cf004ffb61cd32d140531c3a84afe975f9fc7ea Mon Sep 17 00:00:00 2001 From: Michael Bommarito Date: Wed, 15 Apr 2026 23:19:03 -0400 Subject: sctp: fix OOB write to userspace in sctp_getsockopt_peer_auth_chunks sctp_getsockopt_peer_auth_chunks() checks that the caller's optval buffer is large enough for the peer AUTH chunk list with if (len < num_chunks) return -EINVAL; but then writes num_chunks bytes to p->gauth_chunks, which lives at offset offsetof(struct sctp_authchunks, gauth_chunks) == 8 inside optval. The check is missing the sizeof(struct sctp_authchunks) = 8-byte header. When the caller supplies len == num_chunks (for any num_chunks > 0) the test passes but copy_to_user() writes sizeof(struct sctp_authchunks) = 8 bytes past the declared buffer. The sibling function sctp_getsockopt_local_auth_chunks() at the next line already has the correct check: if (len < sizeof(struct sctp_authchunks) + num_chunks) return -EINVAL; Align the peer variant with its sibling. Reproducer confirms on v7.0-13-generic: an unprivileged userspace caller that opens a loopback SCTP association with AUTH enabled, queries num_chunks with a short optval, then issues the real getsockopt with len == num_chunks and sentinel bytes painted past the buffer observes those sentinel bytes overwritten with the peer's AUTH chunk type. The bytes written are under the peer's control but land in the caller's own userspace; this is not a kernel memory corruption, but it is a kernel-side contract violation that can silently corrupt adjacent userspace data. Fixes: 65b07e5d0d09 ("[SCTP]: API updates to suport SCTP-AUTH extensions.") Assisted-by: Claude:claude-opus-4-6 Signed-off-by: Michael Bommarito Acked-by: Xin Long Link: https://patch.msgid.link/20260416031903.1447072-1-michael.bommarito@gmail.com Signed-off-by: Jakub Kicinski --- net/sctp/socket.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/sctp/socket.c b/net/sctp/socket.c index d2665bbd41a2..f52fe90d3e00 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -7033,7 +7033,7 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len, /* See if the user provided enough room for all the data */ num_chunks = ntohs(ch->param_hdr.length) - sizeof(struct sctp_paramhdr); - if (len < num_chunks) + if (len < sizeof(struct sctp_authchunks) + num_chunks) return -EINVAL; if (copy_to_user(to, ch->chunks, num_chunks)) -- cgit v1.2.3 From d6c19b31a3c1d519fabdcf0aa239e6b6109b9473 Mon Sep 17 00:00:00 2001 From: Qingfang Deng Date: Wed, 15 Apr 2026 10:24:50 +0800 Subject: flow_dissector: do not dissect PPPoE PFC frames RFC 2516 Section 7 states that Protocol Field Compression (PFC) is NOT RECOMMENDED for PPPoE. In practice, pppd does not support negotiating PFC for PPPoE sessions, and the flow dissector driver has assumed an uncompressed frame until the blamed commit. During the review process of that commit [1], support for PFC is suggested. However, having a compressed (1-byte) protocol field means the subsequent PPP payload is shifted by one byte, causing 4-byte misalignment for the network header and an unaligned access exception on some architectures. The exception can be reproduced by sending a PPPoE PFC frame to an ethernet interface of a MIPS board, with RPS enabled, even if no PPPoE session is active on that interface: $ 0 : 00000000 80c40000 00000000 85144817 $ 4 : 00000008 00000100 80a75758 81dc9bb8 $ 8 : 00000010 8087ae2c 0000003d 00000000 $12 : 000000e0 00000039 00000000 00000000 $16 : 85043240 80a75758 81dc9bb8 00006488 $20 : 0000002f 00000007 85144810 80a70000 $24 : 81d1bda0 00000000 $28 : 81dc8000 81dc9aa8 00000000 805ead08 Hi : 00009d51 Lo : 2163358a epc : 805e91f0 __skb_flow_dissect+0x1b0/0x1b50 ra : 805ead08 __skb_get_hash_net+0x74/0x12c Status: 11000403 KERNEL EXL IE Cause : 40800010 (ExcCode 04) BadVA : 85144817 PrId : 0001992f (MIPS 1004Kc) Call Trace: [<805e91f0>] __skb_flow_dissect+0x1b0/0x1b50 [<805ead08>] __skb_get_hash_net+0x74/0x12c [<805ef330>] get_rps_cpu+0x1b8/0x3fc [<805fca70>] netif_receive_skb_list_internal+0x324/0x364 [<805fd120>] napi_complete_done+0x68/0x2a4 [<8058de5c>] mtk_napi_rx+0x228/0xfec [<805fd398>] __napi_poll+0x3c/0x1c4 [<805fd754>] napi_threaded_poll_loop+0x234/0x29c [<805fd848>] napi_threaded_poll+0x8c/0xb0 [<80053544>] kthread+0x104/0x12c [<80002bd8>] ret_from_kernel_thread+0x14/0x1c Code: 02d51821 1060045b 00000000 <8c640000> 3084000f 2c820005 144001a2 00042080 8e220000 To reduce the attack surface and maintain performance, do not process PPPoE PFC frames. [1] https://lore.kernel.org/r/20220630231016.GA392@debian.home Fixes: 46126db9c861 ("flow_dissector: Add PPPoE dissectors") Signed-off-by: Qingfang Deng Link: https://patch.msgid.link/20260415022456.141758-1-qingfang.deng@linux.dev Signed-off-by: Jakub Kicinski --- net/core/flow_dissector.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) (limited to 'net') diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 1b61bb25ba0e..2a98f5fa74eb 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -1374,16 +1374,13 @@ proto_again: break; } - /* least significant bit of the most significant octet - * indicates if protocol field was compressed + /* PFC (compressed 1-byte protocol) frames are not processed. + * A compressed protocol field has the least significant bit of + * the most significant octet set, which will fail the following + * ppp_proto_is_valid(), returning FLOW_DISSECT_RET_OUT_BAD. */ ppp_proto = ntohs(hdr->proto); - if (ppp_proto & 0x0100) { - ppp_proto = ppp_proto >> 8; - nhoff += PPPOE_SES_HLEN - 1; - } else { - nhoff += PPPOE_SES_HLEN; - } + nhoff += PPPOE_SES_HLEN; if (ppp_proto == PPP_IP) { proto = htons(ETH_P_IP); -- cgit v1.2.3 From 2091c6aa0df6aba47deb5c8ab232b1cb60af3519 Mon Sep 17 00:00:00 2001 From: Weiming Shi Date: Wed, 15 Apr 2026 19:46:54 -0700 Subject: openvswitch: cap upcall PID array size and pre-size vport replies The vport netlink reply helpers allocate a fixed-size skb with nlmsg_new(NLMSG_DEFAULT_SIZE, ...) but serialize the full upcall PID array via ovs_vport_get_upcall_portids(). Since ovs_vport_set_upcall_portids() accepts any non-zero multiple of sizeof(u32) with no upper bound, a CAP_NET_ADMIN user can install a PID array large enough to overflow the reply buffer, causing nla_put() to fail with -EMSGSIZE and hitting BUG_ON(err < 0). On systems with unprivileged user namespaces enabled (e.g., Ubuntu default), this is reachable via unshare -Urn since OVS vport mutation operations use GENL_UNS_ADMIN_PERM. kernel BUG at net/openvswitch/datapath.c:2414! Oops: invalid opcode: 0000 [#1] SMP KASAN NOPTI CPU: 1 UID: 0 PID: 65 Comm: poc Not tainted 7.0.0-rc7-00195-geb216e422044 #1 RIP: 0010:ovs_vport_cmd_set+0x34c/0x400 Call Trace: genl_family_rcv_msg_doit (net/netlink/genetlink.c:1116) genl_rcv_msg (net/netlink/genetlink.c:1194) netlink_rcv_skb (net/netlink/af_netlink.c:2550) genl_rcv (net/netlink/genetlink.c:1219) netlink_unicast (net/netlink/af_netlink.c:1344) netlink_sendmsg (net/netlink/af_netlink.c:1894) __sys_sendto (net/socket.c:2206) __x64_sys_sendto (net/socket.c:2209) do_syscall_64 (arch/x86/entry/syscall_64.c:63) entry_SYSCALL_64_after_hwframe (arch/x86/entry/entry_64.S:130) Kernel panic - not syncing: Fatal exception Reject attempts to set more PIDs than nr_cpu_ids in ovs_vport_set_upcall_portids(), and pre-compute the worst-case reply size in ovs_vport_cmd_msg_size() based on that bound, similar to the existing ovs_dp_cmd_msg_size(). nr_cpu_ids matches the cap already used by the per-CPU dispatch configuration on the datapath side (ovs_dp_cmd_fill_info() serialises at most nr_cpu_ids PIDs), so the two sides stay consistent. Fixes: 5cd667b0a456 ("openvswitch: Allow each vport to have an array of 'port_id's.") Reported-by: Xiang Mei Assisted-by: Claude:claude-opus-4-6 Signed-off-by: Weiming Shi Reviewed-by: Ilya Maximets Link: https://patch.msgid.link/20260416024653.153456-2-bestswngs@gmail.com Signed-off-by: Jakub Kicinski --- net/openvswitch/datapath.c | 35 +++++++++++++++++++++++++++++++++-- net/openvswitch/vport.c | 3 +++ 2 files changed, 36 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index e209099218b4..bbbde50fc649 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -2184,9 +2184,40 @@ error: return err; } +static size_t ovs_vport_cmd_msg_size(void) +{ + size_t msgsize = NLMSG_ALIGN(sizeof(struct ovs_header)); + + msgsize += nla_total_size(sizeof(u32)); /* OVS_VPORT_ATTR_PORT_NO */ + msgsize += nla_total_size(sizeof(u32)); /* OVS_VPORT_ATTR_TYPE */ + msgsize += nla_total_size(IFNAMSIZ); /* OVS_VPORT_ATTR_NAME */ + msgsize += nla_total_size(sizeof(u32)); /* OVS_VPORT_ATTR_IFINDEX */ + msgsize += nla_total_size(sizeof(s32)); /* OVS_VPORT_ATTR_NETNSID */ + + /* OVS_VPORT_ATTR_STATS */ + msgsize += nla_total_size_64bit(sizeof(struct ovs_vport_stats)); + + /* OVS_VPORT_ATTR_UPCALL_STATS(OVS_VPORT_UPCALL_ATTR_SUCCESS + + * OVS_VPORT_UPCALL_ATTR_FAIL) + */ + msgsize += nla_total_size(nla_total_size_64bit(sizeof(u64)) + + nla_total_size_64bit(sizeof(u64))); + + /* OVS_VPORT_ATTR_UPCALL_PID */ + msgsize += nla_total_size(nr_cpu_ids * sizeof(u32)); + + /* OVS_VPORT_ATTR_OPTIONS(OVS_TUNNEL_ATTR_DST_PORT + + * OVS_TUNNEL_ATTR_EXTENSION(OVS_VXLAN_EXT_GBP)) + */ + msgsize += nla_total_size(nla_total_size(sizeof(u16)) + + nla_total_size(nla_total_size(0))); + + return msgsize; +} + static struct sk_buff *ovs_vport_cmd_alloc_info(void) { - return nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + return genlmsg_new(ovs_vport_cmd_msg_size(), GFP_KERNEL); } /* Called with ovs_mutex, only via ovs_dp_notify_wq(). */ @@ -2196,7 +2227,7 @@ struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, struct net *net, struct sk_buff *skb; int retval; - skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + skb = ovs_vport_cmd_alloc_info(); if (!skb) return ERR_PTR(-ENOMEM); diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c index 23f629e94a36..56b2e2d1a749 100644 --- a/net/openvswitch/vport.c +++ b/net/openvswitch/vport.c @@ -406,6 +406,9 @@ int ovs_vport_set_upcall_portids(struct vport *vport, const struct nlattr *ids) if (!nla_len(ids) || nla_len(ids) % sizeof(u32)) return -EINVAL; + if (nla_len(ids) / sizeof(u32) > nr_cpu_ids) + return -EINVAL; + old = ovsl_dereference(vport->upcall_portids); vport_portids = kmalloc(sizeof(*vport_portids) + nla_len(ids), -- cgit v1.2.3 From f6315295899415f1ddcf39f7c9cb46d25e2c6c6a Mon Sep 17 00:00:00 2001 From: Dexuan Cui Date: Thu, 16 Apr 2026 12:14:33 -0700 Subject: hv_sock: Report EOF instead of -EIO for FIN Commit f0c5827d07cb unluckily causes a regression for the FIN packet, and the final read syscall gets an error rather than 0. Ideally, we would want to fix hvs_channel_readable_payload() so that it could return 0 in the FIN scenario, but it's not good for the hv_sock driver to use the VMBus ringbuffer's cached priv_read_index, which is internal data in the VMBus driver. Fix the regression in hv_sock by returning 0 rather than -EIO. Fixes: f0c5827d07cb ("hv_sock: Return the readable bytes in hvs_stream_has_data()") Cc: stable@vger.kernel.org Reported-by: Ben Hillis Reported-by: Mitchell Levy Signed-off-by: Dexuan Cui Acked-by: Stefano Garzarella Link: https://patch.msgid.link/20260416191433.840637-1-decui@microsoft.com Signed-off-by: Jakub Kicinski --- net/vmw_vsock/hyperv_transport.c | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c index 2b7c0b5896ed..76e78c83fdbc 100644 --- a/net/vmw_vsock/hyperv_transport.c +++ b/net/vmw_vsock/hyperv_transport.c @@ -694,7 +694,6 @@ out: static s64 hvs_stream_has_data(struct vsock_sock *vsk) { struct hvsock *hvs = vsk->trans; - bool need_refill; s64 ret; if (hvs->recv_data_len > 0) @@ -702,9 +701,22 @@ static s64 hvs_stream_has_data(struct vsock_sock *vsk) switch (hvs_channel_readable_payload(hvs->chan)) { case 1: - need_refill = !hvs->recv_desc; - if (!need_refill) - return -EIO; + if (hvs->recv_desc) { + /* Here hvs->recv_data_len is 0, so hvs->recv_desc must + * be NULL unless it points to the 0-byte-payload FIN + * packet: see hvs_update_recv_data(). + * + * Here all the payload has been dequeued, but + * hvs_channel_readable_payload() still returns 1, + * because the VMBus ringbuffer's read_index is not + * updated for the FIN packet: hvs_stream_dequeue() -> + * hv_pkt_iter_next() updates the cached priv_read_index + * but has no opportunity to update the read_index in + * hv_pkt_iter_close() as hvs_stream_has_data() returns + * 0 for the FIN packet, so it won't get dequeued. + */ + return 0; + } hvs->recv_desc = hv_pkt_iter_first(hvs->chan); if (!hvs->recv_desc) -- cgit v1.2.3 From a663bac71a2f0b3ac6c373168ca57b2a6e6381aa Mon Sep 17 00:00:00 2001 From: Yuan Zhaoming Date: Fri, 17 Apr 2026 22:13:40 +0800 Subject: net: mctp: fix don't require received header reserved bits to be zero From the MCTP Base specification (DSP0236 v1.2.1), the first byte of the MCTP header contains a 4 bit reserved field, and 4 bit version. On our current receive path, we require those 4 reserved bits to be zero, but the 9500-8i card is non-conformant, and may set these reserved bits. DSP0236 states that the reserved bits must be written as zero, and ignored when read. While the device might not conform to the former, we should accept these message to conform to the latter. Relax our check on the MCTP version byte to allow non-zero bits in the reserved field. Fixes: 889b7da23abf ("mctp: Add initial routing framework") Signed-off-by: Yuan Zhaoming Cc: stable@vger.kernel.org Acked-by: Jeremy Kerr Link: https://patch.msgid.link/20260417141340.5306-1-yuanzhaoming901030@126.com Signed-off-by: Jakub Kicinski --- net/mctp/route.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/mctp/route.c b/net/mctp/route.c index 26fb8c6bbad2..1f3dccbb7aed 100644 --- a/net/mctp/route.c +++ b/net/mctp/route.c @@ -441,6 +441,7 @@ static int mctp_dst_input(struct mctp_dst *dst, struct sk_buff *skb) unsigned long f; u8 tag, flags; int rc; + u8 ver; msk = NULL; rc = -EINVAL; @@ -467,7 +468,8 @@ static int mctp_dst_input(struct mctp_dst *dst, struct sk_buff *skb) netid = mctp_cb(skb)->net; skb_pull(skb, sizeof(struct mctp_hdr)); - if (mh->ver != 1) + ver = mh->ver & MCTP_HDR_VER_MASK; + if (ver < MCTP_VER_MIN || ver > MCTP_VER_MAX) goto out; flags = mh->flags_seq_tag & (MCTP_HDR_FLAG_SOM | MCTP_HDR_FLAG_EOM); @@ -1317,6 +1319,7 @@ static int mctp_pkttype_receive(struct sk_buff *skb, struct net_device *dev, struct mctp_dst dst; struct mctp_hdr *mh; int rc; + u8 ver; rcu_read_lock(); mdev = __mctp_dev_get(dev); @@ -1334,7 +1337,8 @@ static int mctp_pkttype_receive(struct sk_buff *skb, struct net_device *dev, /* We have enough for a header; decode and route */ mh = mctp_hdr(skb); - if (mh->ver < MCTP_VER_MIN || mh->ver > MCTP_VER_MAX) + ver = mh->ver & MCTP_HDR_VER_MASK; + if (ver < MCTP_VER_MIN || ver > MCTP_VER_MAX) goto err_drop; /* source must be valid unicast or null; drop reserved ranges and -- cgit v1.2.3 From b336fdbb7103fb1484e1dcb6741151d4b5a41e35 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Tue, 14 Apr 2026 13:06:38 +0200 Subject: netfilter: nft_osf: restrict it to ipv4 This expression only supports for ipv4, restrict it. Fixes: b96af92d6eaf ("netfilter: nf_tables: implement Passive OS fingerprint module in nft_osf") Acked-by: Florian Westphal Reviewed-by: Fernando Fernandez Mancera Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nft_osf.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/netfilter/nft_osf.c b/net/netfilter/nft_osf.c index 18003433476c..c02d5cb52143 100644 --- a/net/netfilter/nft_osf.c +++ b/net/netfilter/nft_osf.c @@ -28,6 +28,11 @@ static void nft_osf_eval(const struct nft_expr *expr, struct nft_regs *regs, struct nf_osf_data data; struct tcphdr _tcph; + if (nft_pf(pkt) != NFPROTO_IPV4) { + regs->verdict.code = NFT_BREAK; + return; + } + if (pkt->tprot != IPPROTO_TCP) { regs->verdict.code = NFT_BREAK; return; @@ -114,7 +119,6 @@ static int nft_osf_validate(const struct nft_ctx *ctx, switch (ctx->family) { case NFPROTO_IPV4: - case NFPROTO_IPV6: case NFPROTO_INET: hooks = (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_PRE_ROUTING) | -- cgit v1.2.3 From 2195574dc6d9017d32ac346987e12659f931d932 Mon Sep 17 00:00:00 2001 From: Xiang Mei Date: Tue, 14 Apr 2026 15:14:01 -0700 Subject: netfilter: nfnetlink_osf: fix divide-by-zero in OSF_WSS_MODULO nf_osf_match_one() computes ctx->window % f->wss.val in the OSF_WSS_MODULO branch with no guard for f->wss.val == 0. A CAP_NET_ADMIN user can add such a fingerprint via nfnetlink; a subsequent matching TCP SYN divides by zero and panics the kernel. Reject the bogus fingerprint in nfnl_osf_add_callback() above the per-option for-loop. f->wss is per-fingerprint, not per-option, so the check must run regardless of f->opt_num (including 0). Also reject wss.wc >= OSF_WSS_MAX; nf_osf_match_one() already treats that as "should not happen". Crash: Oops: divide error: 0000 [#1] SMP KASAN NOPTI RIP: 0010:nf_osf_match_one (net/netfilter/nfnetlink_osf.c:98) Call Trace: nf_osf_match (net/netfilter/nfnetlink_osf.c:220) xt_osf_match_packet (net/netfilter/xt_osf.c:32) ipt_do_table (net/ipv4/netfilter/ip_tables.c:348) nf_hook_slow (net/netfilter/core.c:622) ip_local_deliver (net/ipv4/ip_input.c:265) ip_rcv (include/linux/skbuff.h:1162) __netif_receive_skb_one_core (net/core/dev.c:6181) process_backlog (net/core/dev.c:6642) __napi_poll (net/core/dev.c:7710) net_rx_action (net/core/dev.c:7945) handle_softirqs (kernel/softirq.c:622) Fixes: 11eeef41d5f6 ("netfilter: passive OS fingerprint xtables match") Reported-by: Weiming Shi Suggested-by: Florian Westphal Suggested-by: Pablo Neira Ayuso Signed-off-by: Xiang Mei Reviewed-by: Fernando Fernandez Mancera Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nfnetlink_osf.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'net') diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c index d64ce21c7b55..9de91fdd107c 100644 --- a/net/netfilter/nfnetlink_osf.c +++ b/net/netfilter/nfnetlink_osf.c @@ -320,6 +320,10 @@ static int nfnl_osf_add_callback(struct sk_buff *skb, if (f->opt_num > ARRAY_SIZE(f->opt)) return -EINVAL; + if (f->wss.wc >= OSF_WSS_MAX || + (f->wss.wc == OSF_WSS_MODULO && f->wss.val == 0)) + return -EINVAL; + for (i = 0; i < f->opt_num; i++) { if (!f->opt[i].length || f->opt[i].length > MAX_IPOPTLEN) return -EINVAL; -- cgit v1.2.3 From 6e7066bdb481a87fe88c4fa563e348c03b2d373d Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 14 Apr 2026 19:13:46 +0200 Subject: netfilter: conntrack: remove sprintf usage Replace it with scnprintf, the buffer sizes are expected to be large enough to hold the result, no need for snprintf+overflow check. Increase buffer size in mangle_content_len() while at it. BUG: KASAN: stack-out-of-bounds in vsnprintf+0xea5/0x1270 Write of size 1 at addr [..] vsnprintf+0xea5/0x1270 sprintf+0xb1/0xe0 mangle_content_len+0x1ac/0x280 nf_nat_sdp_session+0x1cc/0x240 process_sdp+0x8f8/0xb80 process_invite_request+0x108/0x2b0 process_sip_msg+0x5da/0xf50 sip_help_tcp+0x45e/0x780 nf_confirm+0x34d/0x990 [..] Fixes: 9fafcd7b2032 ("[NETFILTER]: nf_conntrack/nf_nat: add SIP helper port") Reported-by: Yiming Qian Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_nat_amanda.c | 2 +- net/netfilter/nf_nat_sip.c | 33 ++++++++++++++++++--------------- 2 files changed, 19 insertions(+), 16 deletions(-) (limited to 'net') diff --git a/net/netfilter/nf_nat_amanda.c b/net/netfilter/nf_nat_amanda.c index 98deef6cde69..8f1054920a85 100644 --- a/net/netfilter/nf_nat_amanda.c +++ b/net/netfilter/nf_nat_amanda.c @@ -50,7 +50,7 @@ static unsigned int help(struct sk_buff *skb, return NF_DROP; } - sprintf(buffer, "%u", port); + snprintf(buffer, sizeof(buffer), "%u", port); if (!nf_nat_mangle_udp_packet(skb, exp->master, ctinfo, protoff, matchoff, matchlen, buffer, strlen(buffer))) { diff --git a/net/netfilter/nf_nat_sip.c b/net/netfilter/nf_nat_sip.c index cf4aeb299bde..c845b6d1a2bd 100644 --- a/net/netfilter/nf_nat_sip.c +++ b/net/netfilter/nf_nat_sip.c @@ -68,25 +68,27 @@ static unsigned int mangle_packet(struct sk_buff *skb, unsigned int protoff, } static int sip_sprintf_addr(const struct nf_conn *ct, char *buffer, + size_t size, const union nf_inet_addr *addr, bool delim) { if (nf_ct_l3num(ct) == NFPROTO_IPV4) - return sprintf(buffer, "%pI4", &addr->ip); + return scnprintf(buffer, size, "%pI4", &addr->ip); else { if (delim) - return sprintf(buffer, "[%pI6c]", &addr->ip6); + return scnprintf(buffer, size, "[%pI6c]", &addr->ip6); else - return sprintf(buffer, "%pI6c", &addr->ip6); + return scnprintf(buffer, size, "%pI6c", &addr->ip6); } } static int sip_sprintf_addr_port(const struct nf_conn *ct, char *buffer, + size_t size, const union nf_inet_addr *addr, u16 port) { if (nf_ct_l3num(ct) == NFPROTO_IPV4) - return sprintf(buffer, "%pI4:%u", &addr->ip, port); + return scnprintf(buffer, size, "%pI4:%u", &addr->ip, port); else - return sprintf(buffer, "[%pI6c]:%u", &addr->ip6, port); + return scnprintf(buffer, size, "[%pI6c]:%u", &addr->ip6, port); } static int map_addr(struct sk_buff *skb, unsigned int protoff, @@ -119,7 +121,7 @@ static int map_addr(struct sk_buff *skb, unsigned int protoff, if (nf_inet_addr_cmp(&newaddr, addr) && newport == port) return 1; - buflen = sip_sprintf_addr_port(ct, buffer, &newaddr, ntohs(newport)); + buflen = sip_sprintf_addr_port(ct, buffer, sizeof(buffer), &newaddr, ntohs(newport)); return mangle_packet(skb, protoff, dataoff, dptr, datalen, matchoff, matchlen, buffer, buflen); } @@ -212,7 +214,7 @@ static unsigned int nf_nat_sip(struct sk_buff *skb, unsigned int protoff, &addr, true) > 0 && nf_inet_addr_cmp(&addr, &ct->tuplehash[dir].tuple.src.u3) && !nf_inet_addr_cmp(&addr, &ct->tuplehash[!dir].tuple.dst.u3)) { - buflen = sip_sprintf_addr(ct, buffer, + buflen = sip_sprintf_addr(ct, buffer, sizeof(buffer), &ct->tuplehash[!dir].tuple.dst.u3, true); if (!mangle_packet(skb, protoff, dataoff, dptr, datalen, @@ -229,7 +231,7 @@ static unsigned int nf_nat_sip(struct sk_buff *skb, unsigned int protoff, &addr, false) > 0 && nf_inet_addr_cmp(&addr, &ct->tuplehash[dir].tuple.dst.u3) && !nf_inet_addr_cmp(&addr, &ct->tuplehash[!dir].tuple.src.u3)) { - buflen = sip_sprintf_addr(ct, buffer, + buflen = sip_sprintf_addr(ct, buffer, sizeof(buffer), &ct->tuplehash[!dir].tuple.src.u3, false); if (!mangle_packet(skb, protoff, dataoff, dptr, datalen, @@ -247,7 +249,7 @@ static unsigned int nf_nat_sip(struct sk_buff *skb, unsigned int protoff, htons(n) == ct->tuplehash[dir].tuple.dst.u.udp.port && htons(n) != ct->tuplehash[!dir].tuple.src.u.udp.port) { __be16 p = ct->tuplehash[!dir].tuple.src.u.udp.port; - buflen = sprintf(buffer, "%u", ntohs(p)); + buflen = scnprintf(buffer, sizeof(buffer), "%u", ntohs(p)); if (!mangle_packet(skb, protoff, dataoff, dptr, datalen, poff, plen, buffer, buflen)) { nf_ct_helper_log(skb, ct, "cannot mangle rport"); @@ -418,7 +420,8 @@ static unsigned int nf_nat_sip_expect(struct sk_buff *skb, unsigned int protoff, if (!nf_inet_addr_cmp(&exp->tuple.dst.u3, &exp->saved_addr) || exp->tuple.dst.u.udp.port != exp->saved_proto.udp.port) { - buflen = sip_sprintf_addr_port(ct, buffer, &newaddr, port); + buflen = sip_sprintf_addr_port(ct, buffer, sizeof(buffer), + &newaddr, port); if (!mangle_packet(skb, protoff, dataoff, dptr, datalen, matchoff, matchlen, buffer, buflen)) { nf_ct_helper_log(skb, ct, "cannot mangle packet"); @@ -438,8 +441,8 @@ static int mangle_content_len(struct sk_buff *skb, unsigned int protoff, { enum ip_conntrack_info ctinfo; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); + char buffer[sizeof("4294967295")]; unsigned int matchoff, matchlen; - char buffer[sizeof("65536")]; int buflen, c_len; /* Get actual SDP length */ @@ -454,7 +457,7 @@ static int mangle_content_len(struct sk_buff *skb, unsigned int protoff, &matchoff, &matchlen) <= 0) return 0; - buflen = sprintf(buffer, "%u", c_len); + buflen = scnprintf(buffer, sizeof(buffer), "%u", c_len); return mangle_packet(skb, protoff, dataoff, dptr, datalen, matchoff, matchlen, buffer, buflen); } @@ -491,7 +494,7 @@ static unsigned int nf_nat_sdp_addr(struct sk_buff *skb, unsigned int protoff, char buffer[INET6_ADDRSTRLEN]; unsigned int buflen; - buflen = sip_sprintf_addr(ct, buffer, addr, false); + buflen = sip_sprintf_addr(ct, buffer, sizeof(buffer), addr, false); if (mangle_sdp_packet(skb, protoff, dataoff, dptr, datalen, sdpoff, type, term, buffer, buflen)) return 0; @@ -509,7 +512,7 @@ static unsigned int nf_nat_sdp_port(struct sk_buff *skb, unsigned int protoff, char buffer[sizeof("nnnnn")]; unsigned int buflen; - buflen = sprintf(buffer, "%u", port); + buflen = scnprintf(buffer, sizeof(buffer), "%u", port); if (!mangle_packet(skb, protoff, dataoff, dptr, datalen, matchoff, matchlen, buffer, buflen)) return 0; @@ -529,7 +532,7 @@ static unsigned int nf_nat_sdp_session(struct sk_buff *skb, unsigned int protoff unsigned int buflen; /* Mangle session description owner and contact addresses */ - buflen = sip_sprintf_addr(ct, buffer, addr, false); + buflen = sip_sprintf_addr(ct, buffer, sizeof(buffer), addr, false); if (mangle_sdp_packet(skb, protoff, dataoff, dptr, datalen, sdpoff, SDP_HDR_OWNER, SDP_HDR_MEDIA, buffer, buflen)) return 0; -- cgit v1.2.3 From b6fe26f86a1649f84e057f3f15605b08eda15497 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Wed, 15 Apr 2026 12:21:00 +0200 Subject: netfilter: xtables: restrict several matches to inet family This is a partial revert of: commit ab4f21e6fb1c ("netfilter: xtables: use NFPROTO_UNSPEC in more extensions") to allow ipv4 and ipv6 only. - xt_mac - xt_owner - xt_physdev These extensions are not used by ebtables in userspace. Moreover, xt_realm is only for ipv4, since dst->tclassid is ipv4 specific. Fixes: ab4f21e6fb1c ("netfilter: xtables: use NFPROTO_UNSPEC in more extensions") Reported-by: "Kito Xu (veritas501)" Signed-off-by: Pablo Neira Ayuso --- net/netfilter/xt_mac.c | 34 +++++++++++++++++++++++----------- net/netfilter/xt_owner.c | 37 +++++++++++++++++++++++++------------ net/netfilter/xt_physdev.c | 29 +++++++++++++++++++---------- net/netfilter/xt_realm.c | 2 +- 4 files changed, 68 insertions(+), 34 deletions(-) (limited to 'net') diff --git a/net/netfilter/xt_mac.c b/net/netfilter/xt_mac.c index 4798cd2ca26e..7fc5156825e4 100644 --- a/net/netfilter/xt_mac.c +++ b/net/netfilter/xt_mac.c @@ -36,25 +36,37 @@ static bool mac_mt(const struct sk_buff *skb, struct xt_action_param *par) return ret; } -static struct xt_match mac_mt_reg __read_mostly = { - .name = "mac", - .revision = 0, - .family = NFPROTO_UNSPEC, - .match = mac_mt, - .matchsize = sizeof(struct xt_mac_info), - .hooks = (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_IN) | - (1 << NF_INET_FORWARD), - .me = THIS_MODULE, +static struct xt_match mac_mt_reg[] __read_mostly = { + { + .name = "mac", + .family = NFPROTO_IPV4, + .match = mac_mt, + .matchsize = sizeof(struct xt_mac_info), + .hooks = (1 << NF_INET_PRE_ROUTING) | + (1 << NF_INET_LOCAL_IN) | + (1 << NF_INET_FORWARD), + .me = THIS_MODULE, + }, + { + .name = "mac", + .family = NFPROTO_IPV6, + .match = mac_mt, + .matchsize = sizeof(struct xt_mac_info), + .hooks = (1 << NF_INET_PRE_ROUTING) | + (1 << NF_INET_LOCAL_IN) | + (1 << NF_INET_FORWARD), + .me = THIS_MODULE, + }, }; static int __init mac_mt_init(void) { - return xt_register_match(&mac_mt_reg); + return xt_register_matches(mac_mt_reg, ARRAY_SIZE(mac_mt_reg)); } static void __exit mac_mt_exit(void) { - xt_unregister_match(&mac_mt_reg); + xt_unregister_matches(mac_mt_reg, ARRAY_SIZE(mac_mt_reg)); } module_init(mac_mt_init); diff --git a/net/netfilter/xt_owner.c b/net/netfilter/xt_owner.c index 5bfb4843df66..8f2e57b2a586 100644 --- a/net/netfilter/xt_owner.c +++ b/net/netfilter/xt_owner.c @@ -127,26 +127,39 @@ owner_mt(const struct sk_buff *skb, struct xt_action_param *par) return true; } -static struct xt_match owner_mt_reg __read_mostly = { - .name = "owner", - .revision = 1, - .family = NFPROTO_UNSPEC, - .checkentry = owner_check, - .match = owner_mt, - .matchsize = sizeof(struct xt_owner_match_info), - .hooks = (1 << NF_INET_LOCAL_OUT) | - (1 << NF_INET_POST_ROUTING), - .me = THIS_MODULE, +static struct xt_match owner_mt_reg[] __read_mostly = { + { + .name = "owner", + .revision = 1, + .family = NFPROTO_IPV4, + .checkentry = owner_check, + .match = owner_mt, + .matchsize = sizeof(struct xt_owner_match_info), + .hooks = (1 << NF_INET_LOCAL_OUT) | + (1 << NF_INET_POST_ROUTING), + .me = THIS_MODULE, + }, + { + .name = "owner", + .revision = 1, + .family = NFPROTO_IPV6, + .checkentry = owner_check, + .match = owner_mt, + .matchsize = sizeof(struct xt_owner_match_info), + .hooks = (1 << NF_INET_LOCAL_OUT) | + (1 << NF_INET_POST_ROUTING), + .me = THIS_MODULE, + } }; static int __init owner_mt_init(void) { - return xt_register_match(&owner_mt_reg); + return xt_register_matches(owner_mt_reg, ARRAY_SIZE(owner_mt_reg)); } static void __exit owner_mt_exit(void) { - xt_unregister_match(&owner_mt_reg); + xt_unregister_matches(owner_mt_reg, ARRAY_SIZE(owner_mt_reg)); } module_init(owner_mt_init); diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c index 53997771013f..d2b0b52434fa 100644 --- a/net/netfilter/xt_physdev.c +++ b/net/netfilter/xt_physdev.c @@ -137,24 +137,33 @@ static int physdev_mt_check(const struct xt_mtchk_param *par) return 0; } -static struct xt_match physdev_mt_reg __read_mostly = { - .name = "physdev", - .revision = 0, - .family = NFPROTO_UNSPEC, - .checkentry = physdev_mt_check, - .match = physdev_mt, - .matchsize = sizeof(struct xt_physdev_info), - .me = THIS_MODULE, +static struct xt_match physdev_mt_reg[] __read_mostly = { + { + .name = "physdev", + .family = NFPROTO_IPV4, + .checkentry = physdev_mt_check, + .match = physdev_mt, + .matchsize = sizeof(struct xt_physdev_info), + .me = THIS_MODULE, + }, + { + .name = "physdev", + .family = NFPROTO_IPV6, + .checkentry = physdev_mt_check, + .match = physdev_mt, + .matchsize = sizeof(struct xt_physdev_info), + .me = THIS_MODULE, + }, }; static int __init physdev_mt_init(void) { - return xt_register_match(&physdev_mt_reg); + return xt_register_matches(physdev_mt_reg, ARRAY_SIZE(physdev_mt_reg)); } static void __exit physdev_mt_exit(void) { - xt_unregister_match(&physdev_mt_reg); + xt_unregister_matches(physdev_mt_reg, ARRAY_SIZE(physdev_mt_reg)); } module_init(physdev_mt_init); diff --git a/net/netfilter/xt_realm.c b/net/netfilter/xt_realm.c index 6df485f4403d..61b2f1e58d15 100644 --- a/net/netfilter/xt_realm.c +++ b/net/netfilter/xt_realm.c @@ -33,7 +33,7 @@ static struct xt_match realm_mt_reg __read_mostly = { .matchsize = sizeof(struct xt_realm_info), .hooks = (1 << NF_INET_POST_ROUTING) | (1 << NF_INET_FORWARD) | (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_LOCAL_IN), - .family = NFPROTO_UNSPEC, + .family = NFPROTO_IPV4, .me = THIS_MODULE }; -- cgit v1.2.3 From 6eda0d771f94267f73f57c94630aa47e90957915 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Wed, 15 Apr 2026 17:29:45 +0200 Subject: netfilter: nat: use kfree_rcu to release ops Florian Westphal says: "Historically this is not an issue, even for normal base hooks: the data path doesn't use the original nf_hook_ops that are used to register the callbacks. However, in v5.14 I added the ability to dump the active netfilter hooks from userspace. This code will peek back into the nf_hook_ops that are available at the tail of the pointer-array blob used by the datapath. The nat hooks are special, because they are called indirectly from the central nat dispatcher hook. They are currently invisible to the nfnl hook dump subsystem though. But once that changes the nat ops structures have to be deferred too." Update nf_nat_register_fn() to deal with partial exposition of the hooks from error path which can be also an issue for nfnetlink_hook. Fixes: e2cf17d3774c ("netfilter: add new hook nfnl subsystem") Signed-off-by: Pablo Neira Ayuso --- net/ipv4/netfilter/iptable_nat.c | 4 ++-- net/ipv6/netfilter/ip6table_nat.c | 4 ++-- net/netfilter/nf_nat_core.c | 10 ++++++---- 3 files changed, 10 insertions(+), 8 deletions(-) (limited to 'net') diff --git a/net/ipv4/netfilter/iptable_nat.c b/net/ipv4/netfilter/iptable_nat.c index a5db7c67d61b..625a1ca13b1b 100644 --- a/net/ipv4/netfilter/iptable_nat.c +++ b/net/ipv4/netfilter/iptable_nat.c @@ -79,7 +79,7 @@ static int ipt_nat_register_lookups(struct net *net) while (i) nf_nat_ipv4_unregister_fn(net, &ops[--i]); - kfree(ops); + kfree_rcu(ops, rcu); return ret; } } @@ -100,7 +100,7 @@ static void ipt_nat_unregister_lookups(struct net *net) for (i = 0; i < ARRAY_SIZE(nf_nat_ipv4_ops); i++) nf_nat_ipv4_unregister_fn(net, &ops[i]); - kfree(ops); + kfree_rcu(ops, rcu); } static int iptable_nat_table_init(struct net *net) diff --git a/net/ipv6/netfilter/ip6table_nat.c b/net/ipv6/netfilter/ip6table_nat.c index e119d4f090cc..5be723232df8 100644 --- a/net/ipv6/netfilter/ip6table_nat.c +++ b/net/ipv6/netfilter/ip6table_nat.c @@ -81,7 +81,7 @@ static int ip6t_nat_register_lookups(struct net *net) while (i) nf_nat_ipv6_unregister_fn(net, &ops[--i]); - kfree(ops); + kfree_rcu(ops, rcu); return ret; } } @@ -102,7 +102,7 @@ static void ip6t_nat_unregister_lookups(struct net *net) for (i = 0; i < ARRAY_SIZE(nf_nat_ipv6_ops); i++) nf_nat_ipv6_unregister_fn(net, &ops[i]); - kfree(ops); + kfree_rcu(ops, rcu); } static int ip6table_nat_table_init(struct net *net) diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index 83b2b5e9759a..74ec224ce0d6 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c @@ -1222,9 +1222,11 @@ int nf_nat_register_fn(struct net *net, u8 pf, const struct nf_hook_ops *ops, ret = nf_register_net_hooks(net, nat_ops, ops_count); if (ret < 0) { mutex_unlock(&nf_nat_proto_mutex); - for (i = 0; i < ops_count; i++) - kfree(nat_ops[i].priv); - kfree(nat_ops); + for (i = 0; i < ops_count; i++) { + priv = nat_ops[i].priv; + kfree_rcu(priv, rcu_head); + } + kfree_rcu(nat_ops, rcu); return ret; } @@ -1288,7 +1290,7 @@ void nf_nat_unregister_fn(struct net *net, u8 pf, const struct nf_hook_ops *ops, } nat_proto_net->nat_hook_ops = NULL; - kfree(nat_ops); + kfree_rcu(nat_ops, rcu); } unlock: mutex_unlock(&nf_nat_proto_mutex); -- cgit v1.2.3 From 67bf42cae41d847fd6e5749eb68278ca5d748b25 Mon Sep 17 00:00:00 2001 From: Yingnan Zhang <342144303@qq.com> Date: Wed, 15 Apr 2026 22:40:29 +0800 Subject: ipvs: fix MTU check for GSO packets in tunnel mode Currently, IPVS skips MTU checks for GSO packets by excluding them with the !skb_is_gso(skb) condition. This creates problems when IPVS tunnel mode encapsulates GSO packets with IPIP headers. The issue manifests in two ways: 1. MTU violation after encapsulation: When a GSO packet passes through IPVS tunnel mode, the original MTU check is bypassed. After adding the IPIP tunnel header, the packet size may exceed the outgoing interface MTU, leading to unexpected fragmentation at the IP layer. 2. Fragmentation with problematic IP IDs: When net.ipv4.vs.pmtu_disc=1 and a GSO packet with multiple segments is fragmented after encapsulation, each segment gets a sequentially incremented IP ID (0, 1, 2, ...). This happens because: a) The GSO packet bypasses MTU check and gets encapsulated b) At __ip_finish_output, the oversized GSO packet is split into separate SKBs (one per segment), with IP IDs incrementing c) Each SKB is then fragmented again based on the actual MTU This sequential IP ID allocation differs from the expected behavior and can cause issues with fragment reassembly and packet tracking. Fix this by properly validating GSO packets using skb_gso_validate_network_len(). This function correctly validates whether the GSO segments will fit within the MTU after segmentation. If validation fails, send an ICMP Fragmentation Needed message to enable proper PMTU discovery. Fixes: 4cdd34084d53 ("netfilter: nf_conntrack_ipv6: improve fragmentation handling") Signed-off-by: Yingnan Zhang <342144303@qq.com> Acked-by: Julian Anastasov Signed-off-by: Pablo Neira Ayuso --- net/netfilter/ipvs/ip_vs_xmit.c | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index 0fb5162992e5..ce542ed4b013 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c @@ -102,6 +102,18 @@ __ip_vs_dst_check(struct ip_vs_dest *dest) return dest_dst; } +/* Based on ip_exceeds_mtu(). */ +static bool ip_vs_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu) +{ + if (skb->len <= mtu) + return false; + + if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu)) + return false; + + return true; +} + static inline bool __mtu_check_toobig_v6(const struct sk_buff *skb, u32 mtu) { @@ -111,10 +123,9 @@ __mtu_check_toobig_v6(const struct sk_buff *skb, u32 mtu) */ if (IP6CB(skb)->frag_max_size > mtu) return true; /* largest fragment violate MTU */ - } - else if (skb->len > mtu && !skb_is_gso(skb)) { + } else if (ip_vs_exceeds_mtu(skb, mtu)) return true; /* Packet size violate MTU size */ - } + return false; } @@ -232,7 +243,7 @@ static inline bool ensure_mtu_is_adequate(struct netns_ipvs *ipvs, int skb_af, return true; if (unlikely(ip_hdr(skb)->frag_off & htons(IP_DF) && - skb->len > mtu && !skb_is_gso(skb) && + ip_vs_exceeds_mtu(skb, mtu) && !ip_vs_iph_icmp(ipvsh))) { icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); -- cgit v1.2.3 From f5ca450087c3baf3651055e7a6de92600f827af3 Mon Sep 17 00:00:00 2001 From: Fernando Fernandez Mancera Date: Fri, 17 Apr 2026 18:20:56 +0200 Subject: netfilter: nfnetlink_osf: fix out-of-bounds read on option matching In nf_osf_match(), the nf_osf_hdr_ctx structure is initialized once and passed by reference to nf_osf_match_one() for each fingerprint checked. During TCP option parsing, nf_osf_match_one() advances the shared ctx->optp pointer. If a fingerprint perfectly matches, the function returns early without restoring ctx->optp to its initial state. If the user has configured NF_OSF_LOGLEVEL_ALL, the loop continues to the next fingerprint. However, because ctx->optp was not restored, the next call to nf_osf_match_one() starts parsing from the end of the options buffer. This causes subsequent matches to read garbage data and fail immediately, making it impossible to log more than one match or logging incorrect matches. Instead of using a shared ctx->optp pointer, pass the context as a constant pointer and use a local pointer (optp) for TCP option traversal. This makes nf_osf_match_one() strictly stateless from the caller's perspective, ensuring every fingerprint check starts at the correct option offset. Fixes: 1a6a0951fc00 ("netfilter: nfnetlink_osf: add missing fmatch check") Suggested-by: Florian Westphal Signed-off-by: Fernando Fernandez Mancera Reviewed-by: Pablo Neira Ayuso Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nfnetlink_osf.c | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) (limited to 'net') diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c index 9de91fdd107c..9b209241029b 100644 --- a/net/netfilter/nfnetlink_osf.c +++ b/net/netfilter/nfnetlink_osf.c @@ -64,9 +64,9 @@ struct nf_osf_hdr_ctx { static bool nf_osf_match_one(const struct sk_buff *skb, const struct nf_osf_user_finger *f, int ttl_check, - struct nf_osf_hdr_ctx *ctx) + const struct nf_osf_hdr_ctx *ctx) { - const __u8 *optpinit = ctx->optp; + const __u8 *optp = ctx->optp; unsigned int check_WSS = 0; int fmatch = FMATCH_WRONG; int foptsize, optnum; @@ -95,17 +95,17 @@ static bool nf_osf_match_one(const struct sk_buff *skb, check_WSS = f->wss.wc; for (optnum = 0; optnum < f->opt_num; ++optnum) { - if (f->opt[optnum].kind == *ctx->optp) { + if (f->opt[optnum].kind == *optp) { __u32 len = f->opt[optnum].length; - const __u8 *optend = ctx->optp + len; + const __u8 *optend = optp + len; fmatch = FMATCH_OK; - switch (*ctx->optp) { + switch (*optp) { case OSFOPT_MSS: - mss = ctx->optp[3]; + mss = optp[3]; mss <<= 8; - mss |= ctx->optp[2]; + mss |= optp[2]; mss = ntohs((__force __be16)mss); break; @@ -113,7 +113,7 @@ static bool nf_osf_match_one(const struct sk_buff *skb, break; } - ctx->optp = optend; + optp = optend; } else fmatch = FMATCH_OPT_WRONG; @@ -156,9 +156,6 @@ static bool nf_osf_match_one(const struct sk_buff *skb, } } - if (fmatch != FMATCH_OK) - ctx->optp = optpinit; - return fmatch == FMATCH_OK; } -- cgit v1.2.3 From 711987ba281fd806322a7cd244e98e2a81903114 Mon Sep 17 00:00:00 2001 From: Fernando Fernandez Mancera Date: Fri, 17 Apr 2026 18:20:57 +0200 Subject: netfilter: nfnetlink_osf: fix potential NULL dereference in ttl check The nf_osf_ttl() function accessed skb->dev to perform a local interface address lookup without verifying that the device pointer was valid. Additionally, the implementation utilized an in_dev_for_each_ifa_rcu loop to match the packet source address against local interface addresses. It assumed that packets from the same subnet should not see a decrement on the initial TTL. A packet might appear it is from the same subnet but it actually isn't especially in modern environments with containers and virtual switching. Remove the device dereference and interface loop. Replace the logic with a switch statement that evaluates the TTL according to the ttl_check. Fixes: 11eeef41d5f6 ("netfilter: passive OS fingerprint xtables match") Reported-by: Kito Xu (veritas501) Closes: https://lore.kernel.org/netfilter-devel/20260414074556.2512750-1-hxzene@gmail.com/ Signed-off-by: Fernando Fernandez Mancera Reviewed-by: Pablo Neira Ayuso Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nfnetlink_osf.c | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) (limited to 'net') diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c index 9b209241029b..acb753ec5697 100644 --- a/net/netfilter/nfnetlink_osf.c +++ b/net/netfilter/nfnetlink_osf.c @@ -31,26 +31,18 @@ EXPORT_SYMBOL_GPL(nf_osf_fingers); static inline int nf_osf_ttl(const struct sk_buff *skb, int ttl_check, unsigned char f_ttl) { - struct in_device *in_dev = __in_dev_get_rcu(skb->dev); const struct iphdr *ip = ip_hdr(skb); - const struct in_ifaddr *ifa; - int ret = 0; - if (ttl_check == NF_OSF_TTL_TRUE) + switch (ttl_check) { + case NF_OSF_TTL_TRUE: return ip->ttl == f_ttl; - if (ttl_check == NF_OSF_TTL_NOCHECK) - return 1; - else if (ip->ttl <= f_ttl) + break; + case NF_OSF_TTL_NOCHECK: return 1; - - in_dev_for_each_ifa_rcu(ifa, in_dev) { - if (inet_ifa_match(ip->saddr, ifa)) { - ret = (ip->ttl == f_ttl); - break; - } + case NF_OSF_TTL_LESS: + default: + return ip->ttl <= f_ttl; } - - return ret; } struct nf_osf_hdr_ctx { -- cgit v1.2.3 From db9e726525e45dbd713c07897a4d20bc18333ccc Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Thu, 16 Apr 2026 11:56:58 -0700 Subject: net: add address list snapshot and reconciliation infrastructure Introduce __hw_addr_list_snapshot() and __hw_addr_list_reconcile() for use by the upcoming ndo_set_rx_mode_async callback. The async rx_mode path needs to snapshot the device's unicast and multicast address lists under the addr_lock, hand those snapshots to the driver (which may sleep), and then propagate any sync_cnt changes back to the real lists. Two identical snapshots are taken: a work copy for the driver to pass to __hw_addr_sync_dev() and a reference copy to compute deltas against. __hw_addr_list_reconcile() walks the reference snapshot comparing each entry against the work snapshot to determine what the driver synced or unsynced. It then applies those deltas to the real list, handling concurrent modifications: - If the real entry was concurrently removed but the driver synced it to hardware (delta > 0), re-insert a stale entry so the next work run properly unsyncs it from hardware. - If the entry still exists, apply the delta normally. An entry whose refcount drops to zero is removed. # dev_addr_test_snapshot_benchmark: 1024 addrs x 1000 snapshots: 89872802 ns total, 89872 ns/iter # dev_addr_test_snapshot_benchmark.speed: slow Reviewed-by: Aleksandr Loktionov Signed-off-by: Stanislav Fomichev Link: https://patch.msgid.link/20260416185712.2155425-2-sdf@fomichev.me Signed-off-by: Paolo Abeni --- net/core/dev.h | 1 + net/core/dev_addr_lists.c | 109 ++++++++++++- net/core/dev_addr_lists_test.c | 363 ++++++++++++++++++++++++++++++++++++++++- 3 files changed, 470 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/core/dev.h b/net/core/dev.h index 628bdaebf0ca..585b6d7e88df 100644 --- a/net/core/dev.h +++ b/net/core/dev.h @@ -78,6 +78,7 @@ void linkwatch_run_queue(void); void dev_addr_flush(struct net_device *dev); int dev_addr_init(struct net_device *dev); void dev_addr_check(struct net_device *dev); +void __hw_addr_flush(struct netdev_hw_addr_list *list); #if IS_ENABLED(CONFIG_NET_SHAPER) void net_shaper_flush_netdev(struct net_device *dev); diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c index 76c91f224886..bb4851bc55ce 100644 --- a/net/core/dev_addr_lists.c +++ b/net/core/dev_addr_lists.c @@ -11,6 +11,7 @@ #include #include #include +#include #include "dev.h" @@ -481,7 +482,7 @@ void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list, } EXPORT_SYMBOL(__hw_addr_unsync_dev); -static void __hw_addr_flush(struct netdev_hw_addr_list *list) +void __hw_addr_flush(struct netdev_hw_addr_list *list) { struct netdev_hw_addr *ha, *tmp; @@ -492,6 +493,7 @@ static void __hw_addr_flush(struct netdev_hw_addr_list *list) } list->count = 0; } +EXPORT_SYMBOL_IF_KUNIT(__hw_addr_flush); void __hw_addr_init(struct netdev_hw_addr_list *list) { @@ -501,6 +503,111 @@ void __hw_addr_init(struct netdev_hw_addr_list *list) } EXPORT_SYMBOL(__hw_addr_init); +/** + * __hw_addr_list_snapshot - create a snapshot copy of an address list + * @snap: destination snapshot list (needs to be __hw_addr_init-initialized) + * @list: source address list to snapshot + * @addr_len: length of addresses + * + * Creates a copy of @list with individually allocated entries suitable + * for use with __hw_addr_sync_dev() and other list manipulation helpers. + * Each entry is allocated with GFP_ATOMIC; must be called under a spinlock. + * + * Return: 0 on success, -errno on failure. + */ +int __hw_addr_list_snapshot(struct netdev_hw_addr_list *snap, + const struct netdev_hw_addr_list *list, + int addr_len) +{ + struct netdev_hw_addr *ha, *entry; + + list_for_each_entry(ha, &list->list, list) { + entry = __hw_addr_create(ha->addr, addr_len, ha->type, + false, false); + if (!entry) { + __hw_addr_flush(snap); + return -ENOMEM; + } + entry->sync_cnt = ha->sync_cnt; + entry->refcount = ha->refcount; + + list_add_tail(&entry->list, &snap->list); + __hw_addr_insert(snap, entry, addr_len); + snap->count++; + } + + return 0; +} +EXPORT_SYMBOL_IF_KUNIT(__hw_addr_list_snapshot); + +/** + * __hw_addr_list_reconcile - sync snapshot changes back and free snapshots + * @real_list: the real address list to update + * @work: the working snapshot (modified by driver via __hw_addr_sync_dev) + * @ref: the reference snapshot (untouched copy of original state) + * @addr_len: length of addresses + * + * Walks the reference snapshot and compares each entry against the work + * snapshot to compute sync_cnt deltas. Applies those deltas to @real_list. + * Frees both snapshots when done. + * Caller must hold netif_addr_lock_bh. + */ +void __hw_addr_list_reconcile(struct netdev_hw_addr_list *real_list, + struct netdev_hw_addr_list *work, + struct netdev_hw_addr_list *ref, int addr_len) +{ + struct netdev_hw_addr *ref_ha, *tmp, *work_ha, *real_ha; + int delta; + + list_for_each_entry_safe(ref_ha, tmp, &ref->list, list) { + work_ha = __hw_addr_lookup(work, ref_ha->addr, addr_len, + ref_ha->type); + if (work_ha) + delta = work_ha->sync_cnt - ref_ha->sync_cnt; + else + delta = -1; + + if (delta == 0) + continue; + + real_ha = __hw_addr_lookup(real_list, ref_ha->addr, addr_len, + ref_ha->type); + if (!real_ha) { + /* The real entry was concurrently removed. If the + * driver synced this addr to hardware (delta > 0), + * re-insert it as a stale entry so the next work + * run unsyncs it from hardware. + */ + if (delta > 0) { + rb_erase(&ref_ha->node, &ref->tree); + list_del(&ref_ha->list); + ref->count--; + ref_ha->sync_cnt = delta; + ref_ha->refcount = delta; + list_add_tail_rcu(&ref_ha->list, + &real_list->list); + __hw_addr_insert(real_list, ref_ha, + addr_len); + real_list->count++; + } + continue; + } + + real_ha->sync_cnt += delta; + real_ha->refcount += delta; + if (!real_ha->refcount) { + rb_erase(&real_ha->node, &real_list->tree); + list_del_rcu(&real_ha->list); + kfree_rcu(real_ha, rcu_head); + real_list->count--; + } + } + + __hw_addr_flush(work); + __hw_addr_flush(ref); +} +EXPORT_SYMBOL_IF_KUNIT(__hw_addr_list_reconcile); + /* * Device addresses handling functions */ diff --git a/net/core/dev_addr_lists_test.c b/net/core/dev_addr_lists_test.c index 8e1dba825e94..fba926d5ec0d 100644 --- a/net/core/dev_addr_lists_test.c +++ b/net/core/dev_addr_lists_test.c @@ -2,22 +2,31 @@ #include #include +#include #include #include static const struct net_device_ops dummy_netdev_ops = { }; +#define ADDR_A 1 +#define ADDR_B 2 +#define ADDR_C 3 + struct dev_addr_test_priv { u32 addr_seen; + u32 addr_synced; + u32 addr_unsynced; }; static int dev_addr_test_sync(struct net_device *netdev, const unsigned char *a) { struct dev_addr_test_priv *datp = netdev_priv(netdev); - if (a[0] < 31 && !memchr_inv(a, a[0], ETH_ALEN)) + if (a[0] < 31 && !memchr_inv(a, a[0], ETH_ALEN)) { datp->addr_seen |= 1 << a[0]; + datp->addr_synced |= 1 << a[0]; + } return 0; } @@ -26,11 +35,22 @@ static int dev_addr_test_unsync(struct net_device *netdev, { struct dev_addr_test_priv *datp = netdev_priv(netdev); - if (a[0] < 31 && !memchr_inv(a, a[0], ETH_ALEN)) + if (a[0] < 31 && !memchr_inv(a, a[0], ETH_ALEN)) { datp->addr_seen &= ~(1 << a[0]); + datp->addr_unsynced |= 1 << a[0]; + } return 0; } +static void dev_addr_test_reset(struct net_device *netdev) +{ + struct dev_addr_test_priv *datp = netdev_priv(netdev); + + datp->addr_seen = 0; + datp->addr_synced = 0; + datp->addr_unsynced = 0; +} + static int dev_addr_test_init(struct kunit *test) { struct dev_addr_test_priv *datp; @@ -225,6 +245,339 @@ static void dev_addr_test_add_excl(struct kunit *test) rtnl_unlock(); } +/* Snapshot test: basic sync with no concurrent modifications. + * Add one address, snapshot, driver syncs it, reconcile propagates + * sync_cnt delta back to real list. + */ +static void dev_addr_test_snapshot_sync(struct kunit *test) +{ + struct net_device *netdev = test->priv; + struct netdev_hw_addr_list snap, ref; + struct dev_addr_test_priv *datp; + struct netdev_hw_addr *ha; + u8 addr[ETH_ALEN]; + + datp = netdev_priv(netdev); + + rtnl_lock(); + + memset(addr, ADDR_A, sizeof(addr)); + KUNIT_EXPECT_EQ(test, 0, dev_uc_add(netdev, addr)); + + /* Snapshot: ADDR_A has sync_cnt=0, refcount=1 (new) */ + netif_addr_lock_bh(netdev); + __hw_addr_init(&snap); + __hw_addr_init(&ref); + KUNIT_EXPECT_EQ(test, 0, + __hw_addr_list_snapshot(&snap, &netdev->uc, ETH_ALEN)); + KUNIT_EXPECT_EQ(test, 0, + __hw_addr_list_snapshot(&ref, &netdev->uc, ETH_ALEN)); + netif_addr_unlock_bh(netdev); + + /* Driver syncs ADDR_A to hardware */ + dev_addr_test_reset(netdev); + __hw_addr_sync_dev(&snap, netdev, dev_addr_test_sync, + dev_addr_test_unsync); + KUNIT_EXPECT_EQ(test, 1 << ADDR_A, datp->addr_synced); + KUNIT_EXPECT_EQ(test, 0, datp->addr_unsynced); + + /* Reconcile: delta=+1 applied to real entry */ + netif_addr_lock_bh(netdev); + __hw_addr_list_reconcile(&netdev->uc, &snap, &ref, ETH_ALEN); + netif_addr_unlock_bh(netdev); + + /* Real entry should now reflect the sync: sync_cnt=1, refcount=2 */ + KUNIT_EXPECT_EQ(test, 1, netdev->uc.count); + ha = list_first_entry(&netdev->uc.list, struct netdev_hw_addr, list); + KUNIT_EXPECT_MEMEQ(test, ha->addr, addr, ETH_ALEN); + KUNIT_EXPECT_EQ(test, 1, ha->sync_cnt); + KUNIT_EXPECT_EQ(test, 2, ha->refcount); + + /* Second work run: already synced, nothing to do */ + dev_addr_test_reset(netdev); + __hw_addr_sync_dev(&netdev->uc, netdev, dev_addr_test_sync, + dev_addr_test_unsync); + KUNIT_EXPECT_EQ(test, 0, datp->addr_synced); + KUNIT_EXPECT_EQ(test, 0, datp->addr_unsynced); + KUNIT_EXPECT_EQ(test, 1, netdev->uc.count); + + rtnl_unlock(); +} + +/* Snapshot test: ADDR_A synced to hardware, then concurrently removed + * from the real list before reconcile runs. Reconcile re-inserts ADDR_A as + * a stale entry so the next work run unsyncs it from hardware. + */ +static void dev_addr_test_snapshot_remove_during_sync(struct kunit *test) +{ + struct net_device *netdev = test->priv; + struct netdev_hw_addr_list snap, ref; + struct dev_addr_test_priv *datp; + struct netdev_hw_addr *ha; + u8 addr[ETH_ALEN]; + + datp = netdev_priv(netdev); + + rtnl_lock(); + + memset(addr, ADDR_A, sizeof(addr)); + KUNIT_EXPECT_EQ(test, 0, dev_uc_add(netdev, addr)); + + /* Snapshot: ADDR_A is new (sync_cnt=0, refcount=1) */ + netif_addr_lock_bh(netdev); + __hw_addr_init(&snap); + __hw_addr_init(&ref); + KUNIT_EXPECT_EQ(test, 0, + __hw_addr_list_snapshot(&snap, &netdev->uc, ETH_ALEN)); + KUNIT_EXPECT_EQ(test, 0, + __hw_addr_list_snapshot(&ref, &netdev->uc, ETH_ALEN)); + netif_addr_unlock_bh(netdev); + + /* Driver syncs ADDR_A to hardware */ + dev_addr_test_reset(netdev); + __hw_addr_sync_dev(&snap, netdev, dev_addr_test_sync, + dev_addr_test_unsync); + KUNIT_EXPECT_EQ(test, 1 << ADDR_A, datp->addr_synced); + KUNIT_EXPECT_EQ(test, 0, datp->addr_unsynced); + + /* Concurrent removal: user deletes ADDR_A while driver was working */ + memset(addr, ADDR_A, sizeof(addr)); + KUNIT_EXPECT_EQ(test, 0, dev_uc_del(netdev, addr)); + KUNIT_EXPECT_EQ(test, 0, netdev->uc.count); + + /* Reconcile: ADDR_A gone from real list but driver synced it, + * so it gets re-inserted as stale (sync_cnt=1, refcount=1). + */ + netif_addr_lock_bh(netdev); + __hw_addr_list_reconcile(&netdev->uc, &snap, &ref, ETH_ALEN); + netif_addr_unlock_bh(netdev); + + KUNIT_EXPECT_EQ(test, 1, netdev->uc.count); + ha = list_first_entry(&netdev->uc.list, struct netdev_hw_addr, list); + KUNIT_EXPECT_MEMEQ(test, ha->addr, addr, ETH_ALEN); + KUNIT_EXPECT_EQ(test, 1, ha->sync_cnt); + KUNIT_EXPECT_EQ(test, 1, ha->refcount); + + /* Second work run: stale entry gets unsynced from HW and removed */ + dev_addr_test_reset(netdev); + __hw_addr_sync_dev(&netdev->uc, netdev, dev_addr_test_sync, + dev_addr_test_unsync); + KUNIT_EXPECT_EQ(test, 0, datp->addr_synced); + KUNIT_EXPECT_EQ(test, 1 << ADDR_A, datp->addr_unsynced); + KUNIT_EXPECT_EQ(test, 0, netdev->uc.count); + + rtnl_unlock(); +} + +/* Snapshot test: ADDR_A was stale (unsynced from hardware by driver), + * but concurrently re-added by the user. The re-add bumps refcount of + * the existing stale entry. Reconcile applies delta=-1, leaving ADDR_A + * as a fresh entry (sync_cnt=0, refcount=1) for the next work run. + */ +static void dev_addr_test_snapshot_readd_during_unsync(struct kunit *test) +{ + struct net_device *netdev = test->priv; + struct netdev_hw_addr_list snap, ref; + struct dev_addr_test_priv *datp; + struct netdev_hw_addr *ha; + u8 addr[ETH_ALEN]; + + datp = netdev_priv(netdev); + + rtnl_lock(); + + memset(addr, ADDR_A, sizeof(addr)); + KUNIT_EXPECT_EQ(test, 0, dev_uc_add(netdev, addr)); + + /* Sync ADDR_A to hardware: sync_cnt=1, refcount=2 */ + dev_addr_test_reset(netdev); + __hw_addr_sync_dev(&netdev->uc, netdev, dev_addr_test_sync, + dev_addr_test_unsync); + KUNIT_EXPECT_EQ(test, 1 << ADDR_A, datp->addr_synced); + KUNIT_EXPECT_EQ(test, 0, datp->addr_unsynced); + + /* User removes ADDR_A: refcount=1, sync_cnt=1 -> stale */ + KUNIT_EXPECT_EQ(test, 0, dev_uc_del(netdev, addr)); + + /* Snapshot: ADDR_A is stale (sync_cnt=1, refcount=1) */ + netif_addr_lock_bh(netdev); + __hw_addr_init(&snap); + __hw_addr_init(&ref); + KUNIT_EXPECT_EQ(test, 0, + __hw_addr_list_snapshot(&snap, &netdev->uc, ETH_ALEN)); + KUNIT_EXPECT_EQ(test, 0, + __hw_addr_list_snapshot(&ref, &netdev->uc, ETH_ALEN)); + netif_addr_unlock_bh(netdev); + + /* Driver unsyncs stale ADDR_A from hardware */ + dev_addr_test_reset(netdev); + __hw_addr_sync_dev(&snap, netdev, dev_addr_test_sync, + dev_addr_test_unsync); + KUNIT_EXPECT_EQ(test, 0, datp->addr_synced); + KUNIT_EXPECT_EQ(test, 1 << ADDR_A, datp->addr_unsynced); + + /* Concurrent: user re-adds ADDR_A. dev_uc_add finds the existing + * stale entry and bumps refcount from 1 -> 2. sync_cnt stays 1. + */ + KUNIT_EXPECT_EQ(test, 0, dev_uc_add(netdev, addr)); + KUNIT_EXPECT_EQ(test, 1, netdev->uc.count); + + /* Reconcile: ref sync_cnt=1 matches real sync_cnt=1, delta=-1 + * applied. Result: sync_cnt=0, refcount=1 (fresh). + */ + netif_addr_lock_bh(netdev); + __hw_addr_list_reconcile(&netdev->uc, &snap, &ref, ETH_ALEN); + netif_addr_unlock_bh(netdev); + + /* Entry survives as fresh: needs re-sync to HW */ + KUNIT_EXPECT_EQ(test, 1, netdev->uc.count); + ha = list_first_entry(&netdev->uc.list, struct netdev_hw_addr, list); + KUNIT_EXPECT_MEMEQ(test, ha->addr, addr, ETH_ALEN); + KUNIT_EXPECT_EQ(test, 0, ha->sync_cnt); + KUNIT_EXPECT_EQ(test, 1, ha->refcount); + + /* Second work run: fresh entry gets synced to HW */ + dev_addr_test_reset(netdev); + __hw_addr_sync_dev(&netdev->uc, netdev, dev_addr_test_sync, + dev_addr_test_unsync); + KUNIT_EXPECT_EQ(test, 1 << ADDR_A, datp->addr_synced); + KUNIT_EXPECT_EQ(test, 0, datp->addr_unsynced); + + rtnl_unlock(); +} + +/* Snapshot test: ADDR_A is new (synced by driver), and independent ADDR_B + * is concurrently removed from the real list. A's sync delta propagates + * normally; B's absence doesn't interfere. + */ +static void dev_addr_test_snapshot_add_and_remove(struct kunit *test) +{ + struct net_device *netdev = test->priv; + struct netdev_hw_addr_list snap, ref; + struct dev_addr_test_priv *datp; + struct netdev_hw_addr *ha; + u8 addr[ETH_ALEN]; + + datp = netdev_priv(netdev); + + rtnl_lock(); + + /* Add ADDR_A and ADDR_B (will be synced then removed) */ + memset(addr, ADDR_A, sizeof(addr)); + KUNIT_EXPECT_EQ(test, 0, dev_uc_add(netdev, addr)); + memset(addr, ADDR_B, sizeof(addr)); + KUNIT_EXPECT_EQ(test, 0, dev_uc_add(netdev, addr)); + + /* Sync both to hardware: sync_cnt=1, refcount=2 */ + __hw_addr_sync_dev(&netdev->uc, netdev, dev_addr_test_sync, + dev_addr_test_unsync); + + /* Add ADDR_C (new, will be synced by snapshot) */ + memset(addr, ADDR_C, sizeof(addr)); + KUNIT_EXPECT_EQ(test, 0, dev_uc_add(netdev, addr)); + + /* Snapshot: A,B synced (sync_cnt=1,refcount=2); C new (0,1) */ + netif_addr_lock_bh(netdev); + __hw_addr_init(&snap); + __hw_addr_init(&ref); + KUNIT_EXPECT_EQ(test, 0, + __hw_addr_list_snapshot(&snap, &netdev->uc, ETH_ALEN)); + KUNIT_EXPECT_EQ(test, 0, + __hw_addr_list_snapshot(&ref, &netdev->uc, ETH_ALEN)); + netif_addr_unlock_bh(netdev); + + /* Driver syncs snapshot: ADDR_C is new -> synced; A,B already synced */ + dev_addr_test_reset(netdev); + __hw_addr_sync_dev(&snap, netdev, dev_addr_test_sync, + dev_addr_test_unsync); + KUNIT_EXPECT_EQ(test, 1 << ADDR_C, datp->addr_synced); + KUNIT_EXPECT_EQ(test, 0, datp->addr_unsynced); + + /* Concurrent: user removes addr B while driver was working */ + memset(addr, ADDR_B, sizeof(addr)); + KUNIT_EXPECT_EQ(test, 0, dev_uc_del(netdev, addr)); + + /* Reconcile: ADDR_C's delta=+1 applied to real list. + * ADDR_B's delta=0 (unchanged in snapshot), + * so nothing to apply to ADDR_B. + */ + netif_addr_lock_bh(netdev); + __hw_addr_list_reconcile(&netdev->uc, &snap, &ref, ETH_ALEN); + netif_addr_unlock_bh(netdev); + + /* ADDR_A: unchanged (sync_cnt=1, refcount=2) + * ADDR_B: refcount went from 2->1 via dev_uc_del (still present, stale) + * ADDR_C: sync propagated (sync_cnt=1, refcount=2) + */ + KUNIT_EXPECT_EQ(test, 3, netdev->uc.count); + netdev_hw_addr_list_for_each(ha, &netdev->uc) { + u8 id = ha->addr[0]; + + if (!memchr_inv(ha->addr, id, ETH_ALEN)) { + if (id == ADDR_A) { + KUNIT_EXPECT_EQ(test, 1, ha->sync_cnt); + KUNIT_EXPECT_EQ(test, 2, ha->refcount); + } else if (id == ADDR_B) { + /* B: still present but now stale */ + KUNIT_EXPECT_EQ(test, 1, ha->sync_cnt); + KUNIT_EXPECT_EQ(test, 1, ha->refcount); + } else if (id == ADDR_C) { + KUNIT_EXPECT_EQ(test, 1, ha->sync_cnt); + KUNIT_EXPECT_EQ(test, 2, ha->refcount); + } + } + } + + /* Second work run: ADDR_B is stale, gets unsynced and removed */ + dev_addr_test_reset(netdev); + __hw_addr_sync_dev(&netdev->uc, netdev, dev_addr_test_sync, + dev_addr_test_unsync); + KUNIT_EXPECT_EQ(test, 0, datp->addr_synced); + KUNIT_EXPECT_EQ(test, 1 << ADDR_B, datp->addr_unsynced); + KUNIT_EXPECT_EQ(test, 2, netdev->uc.count); + + rtnl_unlock(); +} + +static void dev_addr_test_snapshot_benchmark(struct kunit *test) +{ + struct net_device *netdev = test->priv; + struct netdev_hw_addr_list snap; + u8 addr[ETH_ALEN]; + s64 duration = 0; + ktime_t start; + int i, iter; + + rtnl_lock(); + + for (i = 0; i < 1024; i++) { + memset(addr, 0, sizeof(addr)); + addr[0] = (i >> 8) & 0xff; + addr[1] = i & 0xff; + KUNIT_EXPECT_EQ(test, 0, dev_uc_add(netdev, addr)); + } + + for (iter = 0; iter < 1000; iter++) { + netif_addr_lock_bh(netdev); + __hw_addr_init(&snap); + + start = ktime_get(); + KUNIT_EXPECT_EQ(test, 0, + __hw_addr_list_snapshot(&snap, &netdev->uc, + ETH_ALEN)); + duration += ktime_to_ns(ktime_sub(ktime_get(), start)); + + netif_addr_unlock_bh(netdev); + __hw_addr_flush(&snap); + } + + kunit_info(test, + "1024 addrs x 1000 snapshots: %lld ns total, %lld ns/iter", + duration, div_s64(duration, 1000)); + + rtnl_unlock(); +} + static struct kunit_case dev_addr_test_cases[] = { KUNIT_CASE(dev_addr_test_basic), KUNIT_CASE(dev_addr_test_sync_one), @@ -232,6 +585,11 @@ static struct kunit_case dev_addr_test_cases[] = { KUNIT_CASE(dev_addr_test_del_main), KUNIT_CASE(dev_addr_test_add_set), KUNIT_CASE(dev_addr_test_add_excl), + KUNIT_CASE(dev_addr_test_snapshot_sync), + KUNIT_CASE(dev_addr_test_snapshot_remove_during_sync), + KUNIT_CASE(dev_addr_test_snapshot_readd_during_unsync), + KUNIT_CASE(dev_addr_test_snapshot_add_and_remove), + KUNIT_CASE_SLOW(dev_addr_test_snapshot_benchmark), {} }; @@ -243,5 +601,6 @@ static struct kunit_suite dev_addr_test_suite = { }; kunit_test_suite(dev_addr_test_suite); +MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING"); MODULE_DESCRIPTION("KUnit tests for struct netdev_hw_addr_list"); MODULE_LICENSE("GPL"); -- cgit v1.2.3 From 3554b4345d855089ab7af5e3557f5dc3262d14c9 Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Thu, 16 Apr 2026 11:56:59 -0700 Subject: net: introduce ndo_set_rx_mode_async and netdev_rx_mode_work Add ndo_set_rx_mode_async callback that drivers can implement instead of the legacy ndo_set_rx_mode. The legacy callback runs under the netif_addr_lock spinlock with BHs disabled, preventing drivers from sleeping. The async variant runs from a work queue with rtnl_lock and netdev_lock_ops held, in fully sleepable context. When __dev_set_rx_mode() sees ndo_set_rx_mode_async, it schedules netdev_rx_mode_work instead of calling the driver inline. The work function takes two snapshots of each address list (uc/mc) under the addr_lock, then drops the lock and calls the driver with the work copies. After the driver returns, it reconciles the snapshots back to the real lists under the lock. Add netif_rx_mode_sync() to opportunistically execute the pending workqueue update inline, so that rx mode changes are committed before returning to userspace: - dev_change_flags (SIOCSIFFLAGS / RTM_NEWLINK) - dev_set_promiscuity - dev_set_allmulti - dev_ifsioc SIOCADDMULTI / SIOCDELMULTI - do_setlink (RTM_SETLINK) Note that some deep hierarchies still do skip the lower updates via: - dev_uc_sync - dev_mc_sync If we do end up hitting user-visible issues, we can add more calls to netif_rx_mode_sync in specific places. But hopefully we should not, the actual user-visible lists are still synced, it's that just HW state that might be lagging. Signed-off-by: Stanislav Fomichev Link: https://patch.msgid.link/20260416185712.2155425-3-sdf@fomichev.me Signed-off-by: Paolo Abeni --- net/core/dev.c | 43 +--------- net/core/dev.h | 3 + net/core/dev_addr_lists.c | 209 ++++++++++++++++++++++++++++++++++++++++++++++ net/core/dev_api.c | 3 + net/core/dev_ioctl.c | 6 +- net/core/rtnetlink.c | 1 + 6 files changed, 222 insertions(+), 43 deletions(-) (limited to 'net') diff --git a/net/core/dev.c b/net/core/dev.c index e59f6025067c..b37061238a25 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -9593,7 +9593,7 @@ static void dev_change_rx_flags(struct net_device *dev, int flags) ops->ndo_change_rx_flags(dev, flags); } -static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify) +int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify) { unsigned int old_flags = dev->flags; unsigned int promiscuity, flags; @@ -9697,46 +9697,6 @@ int netif_set_allmulti(struct net_device *dev, int inc, bool notify) return 0; } -/* - * Upload unicast and multicast address lists to device and - * configure RX filtering. When the device doesn't support unicast - * filtering it is put in promiscuous mode while unicast addresses - * are present. - */ -void __dev_set_rx_mode(struct net_device *dev) -{ - const struct net_device_ops *ops = dev->netdev_ops; - - /* dev_open will call this function so the list will stay sane. */ - if (!(dev->flags&IFF_UP)) - return; - - if (!netif_device_present(dev)) - return; - - if (!(dev->priv_flags & IFF_UNICAST_FLT)) { - /* Unicast addresses changes may only happen under the rtnl, - * therefore calling __dev_set_promiscuity here is safe. - */ - if (!netdev_uc_empty(dev) && !dev->uc_promisc) { - __dev_set_promiscuity(dev, 1, false); - dev->uc_promisc = true; - } else if (netdev_uc_empty(dev) && dev->uc_promisc) { - __dev_set_promiscuity(dev, -1, false); - dev->uc_promisc = false; - } - } - - if (ops->ndo_set_rx_mode) - ops->ndo_set_rx_mode(dev); -} - -void dev_set_rx_mode(struct net_device *dev) -{ - netif_addr_lock_bh(dev); - __dev_set_rx_mode(dev); - netif_addr_unlock_bh(dev); -} /** * netif_get_flags() - get flags reported to userspace @@ -12127,6 +12087,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, #endif mutex_init(&dev->lock); + INIT_LIST_HEAD(&dev->rx_mode_node); dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM; setup(dev); diff --git a/net/core/dev.h b/net/core/dev.h index 585b6d7e88df..0cf24b8f5008 100644 --- a/net/core/dev.h +++ b/net/core/dev.h @@ -165,6 +165,9 @@ int netif_change_carrier(struct net_device *dev, bool new_carrier); int dev_change_carrier(struct net_device *dev, bool new_carrier); void __dev_set_rx_mode(struct net_device *dev); +int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify); +bool netif_rx_mode_clean(struct net_device *dev); +void netif_rx_mode_sync(struct net_device *dev); void __dev_notify_flags(struct net_device *dev, unsigned int old_flags, unsigned int gchanges, u32 portid, diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c index bb4851bc55ce..056bca6fce12 100644 --- a/net/core/dev_addr_lists.c +++ b/net/core/dev_addr_lists.c @@ -11,10 +11,18 @@ #include #include #include +#include +#include #include #include "dev.h" +static void netdev_rx_mode_work(struct work_struct *work); + +static LIST_HEAD(rx_mode_list); +static DEFINE_SPINLOCK(rx_mode_lock); +static DECLARE_WORK(rx_mode_work, netdev_rx_mode_work); + /* * General list handling functions */ @@ -1156,3 +1164,204 @@ void dev_mc_init(struct net_device *dev) __hw_addr_init(&dev->mc); } EXPORT_SYMBOL(dev_mc_init); + +static int netif_addr_lists_snapshot(struct net_device *dev, + struct netdev_hw_addr_list *uc_snap, + struct netdev_hw_addr_list *mc_snap, + struct netdev_hw_addr_list *uc_ref, + struct netdev_hw_addr_list *mc_ref) +{ + int err; + + err = __hw_addr_list_snapshot(uc_snap, &dev->uc, dev->addr_len); + if (!err) + err = __hw_addr_list_snapshot(uc_ref, &dev->uc, dev->addr_len); + if (!err) + err = __hw_addr_list_snapshot(mc_snap, &dev->mc, + dev->addr_len); + if (!err) + err = __hw_addr_list_snapshot(mc_ref, &dev->mc, dev->addr_len); + + if (err) { + __hw_addr_flush(uc_snap); + __hw_addr_flush(uc_ref); + __hw_addr_flush(mc_snap); + } + + return err; +} + +static void netif_addr_lists_reconcile(struct net_device *dev, + struct netdev_hw_addr_list *uc_snap, + struct netdev_hw_addr_list *mc_snap, + struct netdev_hw_addr_list *uc_ref, + struct netdev_hw_addr_list *mc_ref) +{ + __hw_addr_list_reconcile(&dev->uc, uc_snap, uc_ref, dev->addr_len); + __hw_addr_list_reconcile(&dev->mc, mc_snap, mc_ref, dev->addr_len); +} + +static void netif_rx_mode_run(struct net_device *dev) +{ + struct netdev_hw_addr_list uc_snap, mc_snap, uc_ref, mc_ref; + const struct net_device_ops *ops = dev->netdev_ops; + int err; + + might_sleep(); + netdev_ops_assert_locked(dev); + + __hw_addr_init(&uc_snap); + __hw_addr_init(&mc_snap); + __hw_addr_init(&uc_ref); + __hw_addr_init(&mc_ref); + + if (!(dev->flags & IFF_UP) || !netif_device_present(dev)) + return; + + netif_addr_lock_bh(dev); + err = netif_addr_lists_snapshot(dev, &uc_snap, &mc_snap, + &uc_ref, &mc_ref); + if (err) { + netdev_WARN(dev, "failed to sync uc/mc addresses\n"); + netif_addr_unlock_bh(dev); + return; + } + netif_addr_unlock_bh(dev); + + ops->ndo_set_rx_mode_async(dev, &uc_snap, &mc_snap); + + netif_addr_lock_bh(dev); + netif_addr_lists_reconcile(dev, &uc_snap, &mc_snap, + &uc_ref, &mc_ref); + netif_addr_unlock_bh(dev); +} + +static void netdev_rx_mode_work(struct work_struct *work) +{ + struct net_device *dev; + + rtnl_lock(); + + while (true) { + spin_lock_bh(&rx_mode_lock); + if (list_empty(&rx_mode_list)) { + spin_unlock_bh(&rx_mode_lock); + break; + } + dev = list_first_entry(&rx_mode_list, struct net_device, + rx_mode_node); + list_del_init(&dev->rx_mode_node); + /* We must free netdev tracker under + * the spinlock protection. + */ + netdev_tracker_free(dev, &dev->rx_mode_tracker); + spin_unlock_bh(&rx_mode_lock); + + netdev_lock_ops(dev); + netif_rx_mode_run(dev); + netdev_unlock_ops(dev); + /* Use __dev_put() because netdev_tracker_free() was already + * called above. Must be after netdev_unlock_ops() to prevent + * netdev_run_todo() from freeing the device while still in use. + */ + __dev_put(dev); + } + + rtnl_unlock(); +} + +static void netif_rx_mode_queue(struct net_device *dev) +{ + spin_lock_bh(&rx_mode_lock); + if (list_empty(&dev->rx_mode_node)) { + list_add_tail(&dev->rx_mode_node, &rx_mode_list); + netdev_hold(dev, &dev->rx_mode_tracker, GFP_ATOMIC); + } + spin_unlock_bh(&rx_mode_lock); + schedule_work(&rx_mode_work); +} + +/** + * __dev_set_rx_mode() - upload unicast and multicast address lists to device + * and configure RX filtering. + * @dev: device + * + * When the device doesn't support unicast filtering it is put in promiscuous + * mode while unicast addresses are present. + */ +void __dev_set_rx_mode(struct net_device *dev) +{ + const struct net_device_ops *ops = dev->netdev_ops; + + /* dev_open will call this function so the list will stay sane. */ + if (!(dev->flags & IFF_UP)) + return; + + if (!netif_device_present(dev)) + return; + + if (ops->ndo_set_rx_mode_async) { + netif_rx_mode_queue(dev); + return; + } + + if (!(dev->priv_flags & IFF_UNICAST_FLT)) { + if (!netdev_uc_empty(dev) && !dev->uc_promisc) { + __dev_set_promiscuity(dev, 1, false); + dev->uc_promisc = true; + } else if (netdev_uc_empty(dev) && dev->uc_promisc) { + __dev_set_promiscuity(dev, -1, false); + dev->uc_promisc = false; + } + } + + if (ops->ndo_set_rx_mode) + ops->ndo_set_rx_mode(dev); +} + +void dev_set_rx_mode(struct net_device *dev) +{ + netif_addr_lock_bh(dev); + __dev_set_rx_mode(dev); + netif_addr_unlock_bh(dev); +} + +bool netif_rx_mode_clean(struct net_device *dev) +{ + bool clean = false; + + spin_lock_bh(&rx_mode_lock); + if (!list_empty(&dev->rx_mode_node)) { + list_del_init(&dev->rx_mode_node); + clean = true; + /* We must release netdev tracker under + * the spinlock protection. + */ + netdev_tracker_free(dev, &dev->rx_mode_tracker); + } + spin_unlock_bh(&rx_mode_lock); + + return clean; +} + +/** + * netif_rx_mode_sync() - sync rx mode inline + * @dev: network device + * + * Drivers implementing ndo_set_rx_mode_async() have their rx mode callback + * executed from a workqueue. This allows the callback to sleep, but means + * the hardware update is deferred and may not be visible to userspace + * by the time the initiating syscall returns. netif_rx_mode_sync() steals + * workqueue update and executes it inline. This preserves the atomicity of + * operations to the userspace. + */ +void netif_rx_mode_sync(struct net_device *dev) +{ + if (netif_rx_mode_clean(dev)) { + netif_rx_mode_run(dev); + /* Use __dev_put() because netdev_tracker_free() was already + * called inside netif_rx_mode_clean(). + */ + __dev_put(dev); + } +} diff --git a/net/core/dev_api.c b/net/core/dev_api.c index f28852078aa6..437947dd08ed 100644 --- a/net/core/dev_api.c +++ b/net/core/dev_api.c @@ -66,6 +66,7 @@ int dev_change_flags(struct net_device *dev, unsigned int flags, netdev_lock_ops(dev); ret = netif_change_flags(dev, flags, extack); + netif_rx_mode_sync(dev); netdev_unlock_ops(dev); return ret; @@ -285,6 +286,7 @@ int dev_set_promiscuity(struct net_device *dev, int inc) netdev_lock_ops(dev); ret = netif_set_promiscuity(dev, inc); + netif_rx_mode_sync(dev); netdev_unlock_ops(dev); return ret; @@ -311,6 +313,7 @@ int dev_set_allmulti(struct net_device *dev, int inc) netdev_lock_ops(dev); ret = netif_set_allmulti(dev, inc, true); + netif_rx_mode_sync(dev); netdev_unlock_ops(dev); return ret; diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c index 7a8966544c9d..f3979b276090 100644 --- a/net/core/dev_ioctl.c +++ b/net/core/dev_ioctl.c @@ -586,24 +586,26 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, void __user *data, return err; case SIOCADDMULTI: - if (!ops->ndo_set_rx_mode || + if ((!ops->ndo_set_rx_mode && !ops->ndo_set_rx_mode_async) || ifr->ifr_hwaddr.sa_family != AF_UNSPEC) return -EINVAL; if (!netif_device_present(dev)) return -ENODEV; netdev_lock_ops(dev); err = dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data); + netif_rx_mode_sync(dev); netdev_unlock_ops(dev); return err; case SIOCDELMULTI: - if (!ops->ndo_set_rx_mode || + if ((!ops->ndo_set_rx_mode && !ops->ndo_set_rx_mode_async) || ifr->ifr_hwaddr.sa_family != AF_UNSPEC) return -EINVAL; if (!netif_device_present(dev)) return -ENODEV; netdev_lock_ops(dev); err = dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data); + netif_rx_mode_sync(dev); netdev_unlock_ops(dev); return err; diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 69daba3ddaf0..b613bb6e07df 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -3431,6 +3431,7 @@ errout: dev->name); } + netif_rx_mode_sync(dev); netdev_unlock_ops(dev); return err; -- cgit v1.2.3 From a4c833278144917982510ca43a3438155756122a Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Thu, 16 Apr 2026 11:57:00 -0700 Subject: net: cache snapshot entries for ndo_set_rx_mode_async Add a per-device netdev_hw_addr_list cache (rx_mode_addr_cache) that allows __hw_addr_list_snapshot() and __hw_addr_list_reconcile() to reuse previously allocated entries instead of hitting GFP_ATOMIC on every snapshot cycle. snapshot pops entries from the cache when available, falling back to __hw_addr_create(). reconcile splices both snapshot lists back into the cache via __hw_addr_splice(). The cache is flushed in free_netdev(). Signed-off-by: Stanislav Fomichev Link: https://patch.msgid.link/20260416185712.2155425-4-sdf@fomichev.me Signed-off-by: Paolo Abeni --- net/core/dev.c | 3 ++ net/core/dev_addr_lists.c | 66 ++++++++++++++++++++++++++++++------------ net/core/dev_addr_lists_test.c | 60 ++++++++++++++++++++++++++------------ 3 files changed, 92 insertions(+), 37 deletions(-) (limited to 'net') diff --git a/net/core/dev.c b/net/core/dev.c index b37061238a25..8597ec56fd64 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -12088,6 +12088,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, mutex_init(&dev->lock); INIT_LIST_HEAD(&dev->rx_mode_node); + __hw_addr_init(&dev->rx_mode_addr_cache); dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM; setup(dev); @@ -12192,6 +12193,8 @@ void free_netdev(struct net_device *dev) kfree(rcu_dereference_protected(dev->ingress_queue, 1)); + __hw_addr_flush(&dev->rx_mode_addr_cache); + /* Flush device addresses */ dev_addr_flush(dev); diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c index 056bca6fce12..7bab2ed0f625 100644 --- a/net/core/dev_addr_lists.c +++ b/net/core/dev_addr_lists.c @@ -511,30 +511,50 @@ void __hw_addr_init(struct netdev_hw_addr_list *list) } EXPORT_SYMBOL(__hw_addr_init); +static void __hw_addr_splice(struct netdev_hw_addr_list *dst, + struct netdev_hw_addr_list *src) +{ + src->tree = RB_ROOT; + list_splice_init(&src->list, &dst->list); + dst->count += src->count; + src->count = 0; +} + /** * __hw_addr_list_snapshot - create a snapshot copy of an address list * @snap: destination snapshot list (needs to be __hw_addr_init-initialized) * @list: source address list to snapshot * @addr_len: length of addresses + * @cache: entry cache to reuse entries from; falls back to GFP_ATOMIC * - * Creates a copy of @list with individually allocated entries suitable - * for use with __hw_addr_sync_dev() and other list manipulation helpers. - * Each entry is allocated with GFP_ATOMIC; must be called under a spinlock. + * Creates a copy of @list reusing entries from @cache when available. + * Must be called under a spinlock. * * Return: 0 on success, -errno on failure. */ int __hw_addr_list_snapshot(struct netdev_hw_addr_list *snap, const struct netdev_hw_addr_list *list, - int addr_len) + int addr_len, struct netdev_hw_addr_list *cache) { struct netdev_hw_addr *ha, *entry; list_for_each_entry(ha, &list->list, list) { - entry = __hw_addr_create(ha->addr, addr_len, ha->type, - false, false); - if (!entry) { - __hw_addr_flush(snap); - return -ENOMEM; + if (cache->count) { + entry = list_first_entry(&cache->list, + struct netdev_hw_addr, list); + list_del(&entry->list); + cache->count--; + memcpy(entry->addr, ha->addr, addr_len); + entry->type = ha->type; + entry->global_use = false; + entry->synced = 0; + } else { + entry = __hw_addr_create(ha->addr, addr_len, ha->type, + false, false); + if (!entry) { + __hw_addr_flush(snap); + return -ENOMEM; + } } entry->sync_cnt = ha->sync_cnt; entry->refcount = ha->refcount; @@ -554,15 +574,17 @@ EXPORT_SYMBOL_IF_KUNIT(__hw_addr_list_snapshot); * @work: the working snapshot (modified by driver via __hw_addr_sync_dev) * @ref: the reference snapshot (untouched copy of original state) * @addr_len: length of addresses + * @cache: entry cache to return snapshot entries to for reuse * * Walks the reference snapshot and compares each entry against the work * snapshot to compute sync_cnt deltas. Applies those deltas to @real_list. - * Frees both snapshots when done. + * Returns snapshot entries to @cache for reuse; frees both snapshots. * Caller must hold netif_addr_lock_bh. */ void __hw_addr_list_reconcile(struct netdev_hw_addr_list *real_list, struct netdev_hw_addr_list *work, - struct netdev_hw_addr_list *ref, int addr_len) + struct netdev_hw_addr_list *ref, int addr_len, + struct netdev_hw_addr_list *cache) { struct netdev_hw_addr *ref_ha, *tmp, *work_ha, *real_ha; int delta; @@ -611,8 +633,8 @@ void __hw_addr_list_reconcile(struct netdev_hw_addr_list *real_list, } } - __hw_addr_flush(work); - __hw_addr_flush(ref); + __hw_addr_splice(cache, work); + __hw_addr_splice(cache, ref); } EXPORT_SYMBOL_IF_KUNIT(__hw_addr_list_reconcile); @@ -1173,14 +1195,18 @@ static int netif_addr_lists_snapshot(struct net_device *dev, { int err; - err = __hw_addr_list_snapshot(uc_snap, &dev->uc, dev->addr_len); + err = __hw_addr_list_snapshot(uc_snap, &dev->uc, dev->addr_len, + &dev->rx_mode_addr_cache); if (!err) - err = __hw_addr_list_snapshot(uc_ref, &dev->uc, dev->addr_len); + err = __hw_addr_list_snapshot(uc_ref, &dev->uc, dev->addr_len, + &dev->rx_mode_addr_cache); if (!err) err = __hw_addr_list_snapshot(mc_snap, &dev->mc, - dev->addr_len); + dev->addr_len, + &dev->rx_mode_addr_cache); if (!err) - err = __hw_addr_list_snapshot(mc_ref, &dev->mc, dev->addr_len); + err = __hw_addr_list_snapshot(mc_ref, &dev->mc, dev->addr_len, + &dev->rx_mode_addr_cache); if (err) { __hw_addr_flush(uc_snap); @@ -1197,8 +1223,10 @@ static void netif_addr_lists_reconcile(struct net_device *dev, struct netdev_hw_addr_list *uc_ref, struct netdev_hw_addr_list *mc_ref) { - __hw_addr_list_reconcile(&dev->uc, uc_snap, uc_ref, dev->addr_len); - __hw_addr_list_reconcile(&dev->mc, mc_snap, mc_ref, dev->addr_len); + __hw_addr_list_reconcile(&dev->uc, uc_snap, uc_ref, dev->addr_len, + &dev->rx_mode_addr_cache); + __hw_addr_list_reconcile(&dev->mc, mc_snap, mc_ref, dev->addr_len, + &dev->rx_mode_addr_cache); } static void netif_rx_mode_run(struct net_device *dev) diff --git a/net/core/dev_addr_lists_test.c b/net/core/dev_addr_lists_test.c index fba926d5ec0d..260e71a2399f 100644 --- a/net/core/dev_addr_lists_test.c +++ b/net/core/dev_addr_lists_test.c @@ -251,8 +251,8 @@ static void dev_addr_test_add_excl(struct kunit *test) */ static void dev_addr_test_snapshot_sync(struct kunit *test) { + struct netdev_hw_addr_list snap, ref, cache; struct net_device *netdev = test->priv; - struct netdev_hw_addr_list snap, ref; struct dev_addr_test_priv *datp; struct netdev_hw_addr *ha; u8 addr[ETH_ALEN]; @@ -268,10 +268,13 @@ static void dev_addr_test_snapshot_sync(struct kunit *test) netif_addr_lock_bh(netdev); __hw_addr_init(&snap); __hw_addr_init(&ref); + __hw_addr_init(&cache); KUNIT_EXPECT_EQ(test, 0, - __hw_addr_list_snapshot(&snap, &netdev->uc, ETH_ALEN)); + __hw_addr_list_snapshot(&snap, &netdev->uc, ETH_ALEN, + &cache)); KUNIT_EXPECT_EQ(test, 0, - __hw_addr_list_snapshot(&ref, &netdev->uc, ETH_ALEN)); + __hw_addr_list_snapshot(&ref, &netdev->uc, ETH_ALEN, + &cache)); netif_addr_unlock_bh(netdev); /* Driver syncs ADDR_A to hardware */ @@ -283,7 +286,8 @@ static void dev_addr_test_snapshot_sync(struct kunit *test) /* Reconcile: delta=+1 applied to real entry */ netif_addr_lock_bh(netdev); - __hw_addr_list_reconcile(&netdev->uc, &snap, &ref, ETH_ALEN); + __hw_addr_list_reconcile(&netdev->uc, &snap, &ref, ETH_ALEN, + &cache); netif_addr_unlock_bh(netdev); /* Real entry should now reflect the sync: sync_cnt=1, refcount=2 */ @@ -301,6 +305,7 @@ static void dev_addr_test_snapshot_sync(struct kunit *test) KUNIT_EXPECT_EQ(test, 0, datp->addr_unsynced); KUNIT_EXPECT_EQ(test, 1, netdev->uc.count); + __hw_addr_flush(&cache); rtnl_unlock(); } @@ -310,8 +315,8 @@ static void dev_addr_test_snapshot_sync(struct kunit *test) */ static void dev_addr_test_snapshot_remove_during_sync(struct kunit *test) { + struct netdev_hw_addr_list snap, ref, cache; struct net_device *netdev = test->priv; - struct netdev_hw_addr_list snap, ref; struct dev_addr_test_priv *datp; struct netdev_hw_addr *ha; u8 addr[ETH_ALEN]; @@ -327,10 +332,13 @@ static void dev_addr_test_snapshot_remove_during_sync(struct kunit *test) netif_addr_lock_bh(netdev); __hw_addr_init(&snap); __hw_addr_init(&ref); + __hw_addr_init(&cache); KUNIT_EXPECT_EQ(test, 0, - __hw_addr_list_snapshot(&snap, &netdev->uc, ETH_ALEN)); + __hw_addr_list_snapshot(&snap, &netdev->uc, ETH_ALEN, + &cache)); KUNIT_EXPECT_EQ(test, 0, - __hw_addr_list_snapshot(&ref, &netdev->uc, ETH_ALEN)); + __hw_addr_list_snapshot(&ref, &netdev->uc, ETH_ALEN, + &cache)); netif_addr_unlock_bh(netdev); /* Driver syncs ADDR_A to hardware */ @@ -349,7 +357,8 @@ static void dev_addr_test_snapshot_remove_during_sync(struct kunit *test) * so it gets re-inserted as stale (sync_cnt=1, refcount=1). */ netif_addr_lock_bh(netdev); - __hw_addr_list_reconcile(&netdev->uc, &snap, &ref, ETH_ALEN); + __hw_addr_list_reconcile(&netdev->uc, &snap, &ref, ETH_ALEN, + &cache); netif_addr_unlock_bh(netdev); KUNIT_EXPECT_EQ(test, 1, netdev->uc.count); @@ -366,6 +375,7 @@ static void dev_addr_test_snapshot_remove_during_sync(struct kunit *test) KUNIT_EXPECT_EQ(test, 1 << ADDR_A, datp->addr_unsynced); KUNIT_EXPECT_EQ(test, 0, netdev->uc.count); + __hw_addr_flush(&cache); rtnl_unlock(); } @@ -376,8 +386,8 @@ static void dev_addr_test_snapshot_remove_during_sync(struct kunit *test) */ static void dev_addr_test_snapshot_readd_during_unsync(struct kunit *test) { + struct netdev_hw_addr_list snap, ref, cache; struct net_device *netdev = test->priv; - struct netdev_hw_addr_list snap, ref; struct dev_addr_test_priv *datp; struct netdev_hw_addr *ha; u8 addr[ETH_ALEN]; @@ -403,10 +413,13 @@ static void dev_addr_test_snapshot_readd_during_unsync(struct kunit *test) netif_addr_lock_bh(netdev); __hw_addr_init(&snap); __hw_addr_init(&ref); + __hw_addr_init(&cache); KUNIT_EXPECT_EQ(test, 0, - __hw_addr_list_snapshot(&snap, &netdev->uc, ETH_ALEN)); + __hw_addr_list_snapshot(&snap, &netdev->uc, ETH_ALEN, + &cache)); KUNIT_EXPECT_EQ(test, 0, - __hw_addr_list_snapshot(&ref, &netdev->uc, ETH_ALEN)); + __hw_addr_list_snapshot(&ref, &netdev->uc, ETH_ALEN, + &cache)); netif_addr_unlock_bh(netdev); /* Driver unsyncs stale ADDR_A from hardware */ @@ -426,7 +439,8 @@ static void dev_addr_test_snapshot_readd_during_unsync(struct kunit *test) * applied. Result: sync_cnt=0, refcount=1 (fresh). */ netif_addr_lock_bh(netdev); - __hw_addr_list_reconcile(&netdev->uc, &snap, &ref, ETH_ALEN); + __hw_addr_list_reconcile(&netdev->uc, &snap, &ref, ETH_ALEN, + &cache); netif_addr_unlock_bh(netdev); /* Entry survives as fresh: needs re-sync to HW */ @@ -443,6 +457,7 @@ static void dev_addr_test_snapshot_readd_during_unsync(struct kunit *test) KUNIT_EXPECT_EQ(test, 1 << ADDR_A, datp->addr_synced); KUNIT_EXPECT_EQ(test, 0, datp->addr_unsynced); + __hw_addr_flush(&cache); rtnl_unlock(); } @@ -452,8 +467,8 @@ static void dev_addr_test_snapshot_readd_during_unsync(struct kunit *test) */ static void dev_addr_test_snapshot_add_and_remove(struct kunit *test) { + struct netdev_hw_addr_list snap, ref, cache; struct net_device *netdev = test->priv; - struct netdev_hw_addr_list snap, ref; struct dev_addr_test_priv *datp; struct netdev_hw_addr *ha; u8 addr[ETH_ALEN]; @@ -480,10 +495,13 @@ static void dev_addr_test_snapshot_add_and_remove(struct kunit *test) netif_addr_lock_bh(netdev); __hw_addr_init(&snap); __hw_addr_init(&ref); + __hw_addr_init(&cache); KUNIT_EXPECT_EQ(test, 0, - __hw_addr_list_snapshot(&snap, &netdev->uc, ETH_ALEN)); + __hw_addr_list_snapshot(&snap, &netdev->uc, ETH_ALEN, + &cache)); KUNIT_EXPECT_EQ(test, 0, - __hw_addr_list_snapshot(&ref, &netdev->uc, ETH_ALEN)); + __hw_addr_list_snapshot(&ref, &netdev->uc, ETH_ALEN, + &cache)); netif_addr_unlock_bh(netdev); /* Driver syncs snapshot: ADDR_C is new -> synced; A,B already synced */ @@ -502,7 +520,8 @@ static void dev_addr_test_snapshot_add_and_remove(struct kunit *test) * so nothing to apply to ADDR_B. */ netif_addr_lock_bh(netdev); - __hw_addr_list_reconcile(&netdev->uc, &snap, &ref, ETH_ALEN); + __hw_addr_list_reconcile(&netdev->uc, &snap, &ref, ETH_ALEN, + &cache); netif_addr_unlock_bh(netdev); /* ADDR_A: unchanged (sync_cnt=1, refcount=2) @@ -536,13 +555,14 @@ static void dev_addr_test_snapshot_add_and_remove(struct kunit *test) KUNIT_EXPECT_EQ(test, 1 << ADDR_B, datp->addr_unsynced); KUNIT_EXPECT_EQ(test, 2, netdev->uc.count); + __hw_addr_flush(&cache); rtnl_unlock(); } static void dev_addr_test_snapshot_benchmark(struct kunit *test) { struct net_device *netdev = test->priv; - struct netdev_hw_addr_list snap; + struct netdev_hw_addr_list snap, cache; u8 addr[ETH_ALEN]; s64 duration = 0; ktime_t start; @@ -557,6 +577,8 @@ static void dev_addr_test_snapshot_benchmark(struct kunit *test) KUNIT_EXPECT_EQ(test, 0, dev_uc_add(netdev, addr)); } + __hw_addr_init(&cache); + for (iter = 0; iter < 1000; iter++) { netif_addr_lock_bh(netdev); __hw_addr_init(&snap); @@ -564,13 +586,15 @@ static void dev_addr_test_snapshot_benchmark(struct kunit *test) start = ktime_get(); KUNIT_EXPECT_EQ(test, 0, __hw_addr_list_snapshot(&snap, &netdev->uc, - ETH_ALEN)); + ETH_ALEN, &cache)); duration += ktime_to_ns(ktime_sub(ktime_get(), start)); netif_addr_unlock_bh(netdev); __hw_addr_flush(&snap); } + __hw_addr_flush(&cache); + kunit_info(test, "1024 addrs x 1000 snapshots: %lld ns total, %lld ns/iter", duration, div_s64(duration, 1000)); -- cgit v1.2.3 From 7ef83bf1712b5c441a21d5df844202433f6b0b05 Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Thu, 16 Apr 2026 11:57:01 -0700 Subject: net: move promiscuity handling into netdev_rx_mode_work Move unicast promiscuity tracking into netdev_rx_mode_work so it runs under netdev_ops_lock instead of under the addr_lock spinlock. This is required because __dev_set_promiscuity calls dev_change_rx_flags and __dev_notify_flags, both of which may need to sleep. Change ASSERT_RTNL() to netdev_ops_assert_locked() in __dev_set_promiscuity, netif_set_allmulti and __dev_change_flags since these are now called from the work queue under the ops lock. Link: https://lore.kernel.org/netdev/20260214033859.43857-1-jiayuan.chen@linux.dev/ Fixes: 78cd408356fe ("net: add missing instance lock to dev_set_promiscuity") Reported-by: syzbot+2b3391f44313b3983e91@syzkaller.appspotmail.com Reviewed-by: Aleksandr Loktionov Signed-off-by: Stanislav Fomichev Link: https://patch.msgid.link/20260416185712.2155425-5-sdf@fomichev.me Signed-off-by: Paolo Abeni --- net/core/dev.c | 16 +++------ net/core/dev_addr_lists.c | 82 ++++++++++++++++++++++++++++++++++------------- 2 files changed, 64 insertions(+), 34 deletions(-) (limited to 'net') diff --git a/net/core/dev.c b/net/core/dev.c index 8597ec56fd64..8a69aed56fca 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -9600,7 +9600,7 @@ int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify) kuid_t uid; kgid_t gid; - ASSERT_RTNL(); + netdev_ops_assert_locked(dev); promiscuity = dev->promiscuity + inc; if (promiscuity == 0) { @@ -9636,16 +9636,8 @@ int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify) dev_change_rx_flags(dev, IFF_PROMISC); } - if (notify) { - /* The ops lock is only required to ensure consistent locking - * for `NETDEV_CHANGE` notifiers. This function is sometimes - * called without the lock, even for devices that are ops - * locked, such as in `dev_uc_sync_multiple` when using - * bonding or teaming. - */ - netdev_ops_assert_locked(dev); + if (notify) __dev_notify_flags(dev, old_flags, IFF_PROMISC, 0, NULL); - } return 0; } @@ -9667,7 +9659,7 @@ int netif_set_allmulti(struct net_device *dev, int inc, bool notify) unsigned int old_flags = dev->flags, old_gflags = dev->gflags; unsigned int allmulti, flags; - ASSERT_RTNL(); + netdev_ops_assert_locked(dev); allmulti = dev->allmulti + inc; if (allmulti == 0) { @@ -9735,7 +9727,7 @@ int __dev_change_flags(struct net_device *dev, unsigned int flags, unsigned int old_flags = dev->flags; int ret; - ASSERT_RTNL(); + netdev_ops_assert_locked(dev); /* * Set the flags on our device. diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c index 7bab2ed0f625..4c9e8a69493f 100644 --- a/net/core/dev_addr_lists.c +++ b/net/core/dev_addr_lists.c @@ -1229,10 +1229,34 @@ static void netif_addr_lists_reconcile(struct net_device *dev, &dev->rx_mode_addr_cache); } +/** + * netif_uc_promisc_update() - evaluate whether uc_promisc should be toggled. + * @dev: device + * + * Must be called under netif_addr_lock_bh. + * Return: +1 to enter promisc, -1 to leave, 0 for no change. + */ +static int netif_uc_promisc_update(struct net_device *dev) +{ + if (dev->priv_flags & IFF_UNICAST_FLT) + return 0; + + if (!netdev_uc_empty(dev) && !dev->uc_promisc) { + dev->uc_promisc = true; + return 1; + } + if (netdev_uc_empty(dev) && dev->uc_promisc) { + dev->uc_promisc = false; + return -1; + } + return 0; +} + static void netif_rx_mode_run(struct net_device *dev) { struct netdev_hw_addr_list uc_snap, mc_snap, uc_ref, mc_ref; const struct net_device_ops *ops = dev->netdev_ops; + int promisc_inc; int err; might_sleep(); @@ -1246,22 +1270,39 @@ static void netif_rx_mode_run(struct net_device *dev) if (!(dev->flags & IFF_UP) || !netif_device_present(dev)) return; - netif_addr_lock_bh(dev); - err = netif_addr_lists_snapshot(dev, &uc_snap, &mc_snap, - &uc_ref, &mc_ref); - if (err) { - netdev_WARN(dev, "failed to sync uc/mc addresses\n"); + if (ops->ndo_set_rx_mode_async) { + netif_addr_lock_bh(dev); + err = netif_addr_lists_snapshot(dev, &uc_snap, &mc_snap, + &uc_ref, &mc_ref); + if (err) { + netdev_WARN(dev, "failed to sync uc/mc addresses\n"); + netif_addr_unlock_bh(dev); + return; + } + + promisc_inc = netif_uc_promisc_update(dev); + netif_addr_unlock_bh(dev); + } else { + netif_addr_lock_bh(dev); + promisc_inc = netif_uc_promisc_update(dev); netif_addr_unlock_bh(dev); - return; } - netif_addr_unlock_bh(dev); - ops->ndo_set_rx_mode_async(dev, &uc_snap, &mc_snap); + if (promisc_inc) + __dev_set_promiscuity(dev, promisc_inc, false); - netif_addr_lock_bh(dev); - netif_addr_lists_reconcile(dev, &uc_snap, &mc_snap, - &uc_ref, &mc_ref); - netif_addr_unlock_bh(dev); + if (ops->ndo_set_rx_mode_async) { + ops->ndo_set_rx_mode_async(dev, &uc_snap, &mc_snap); + + netif_addr_lock_bh(dev); + netif_addr_lists_reconcile(dev, &uc_snap, &mc_snap, + &uc_ref, &mc_ref); + netif_addr_unlock_bh(dev); + } else if (ops->ndo_set_rx_mode) { + netif_addr_lock_bh(dev); + ops->ndo_set_rx_mode(dev); + netif_addr_unlock_bh(dev); + } } static void netdev_rx_mode_work(struct work_struct *work) @@ -1320,6 +1361,7 @@ static void netif_rx_mode_queue(struct net_device *dev) void __dev_set_rx_mode(struct net_device *dev) { const struct net_device_ops *ops = dev->netdev_ops; + int promisc_inc; /* dev_open will call this function so the list will stay sane. */ if (!(dev->flags & IFF_UP)) @@ -1328,20 +1370,16 @@ void __dev_set_rx_mode(struct net_device *dev) if (!netif_device_present(dev)) return; - if (ops->ndo_set_rx_mode_async) { + if (ops->ndo_set_rx_mode_async || ops->ndo_change_rx_flags) { netif_rx_mode_queue(dev); return; } - if (!(dev->priv_flags & IFF_UNICAST_FLT)) { - if (!netdev_uc_empty(dev) && !dev->uc_promisc) { - __dev_set_promiscuity(dev, 1, false); - dev->uc_promisc = true; - } else if (netdev_uc_empty(dev) && dev->uc_promisc) { - __dev_set_promiscuity(dev, -1, false); - dev->uc_promisc = false; - } - } + /* Legacy path for non-ops-locked HW devices. */ + + promisc_inc = netif_uc_promisc_update(dev); + if (promisc_inc) + __dev_set_promiscuity(dev, promisc_inc, false); if (ops->ndo_set_rx_mode) ops->ndo_set_rx_mode(dev); -- cgit v1.2.3 From 3cbd22938877e0c0c7c193a91d5a6d5149c39490 Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Thu, 16 Apr 2026 11:57:10 -0700 Subject: net: warn ops-locked drivers still using ndo_set_rx_mode Now that all in-tree ops-locked drivers have been converted to ndo_set_rx_mode_async, add a warning in register_netdevice to catch any remaining or newly added drivers that use ndo_set_rx_mode with ops locking. This ensures future driver authors are guided toward the async path. Also route ops-locked devices through netdev_rx_mode_work even if they lack rx_mode NDOs, to ensure netdev_ops_assert_locked() does not fire on the legacy path where only RTNL is held. Reviewed-by: Aleksandr Loktionov Signed-off-by: Stanislav Fomichev Link: https://patch.msgid.link/20260416185712.2155425-14-sdf@fomichev.me Signed-off-by: Paolo Abeni --- net/core/dev.c | 5 +++++ net/core/dev_addr_lists.c | 3 ++- 2 files changed, 7 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/core/dev.c b/net/core/dev.c index 8a69aed56fca..d426c1beeb76 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -11360,6 +11360,11 @@ int register_netdevice(struct net_device *dev) goto err_uninit; } + if (netdev_need_ops_lock(dev) && + dev->netdev_ops->ndo_set_rx_mode && + !dev->netdev_ops->ndo_set_rx_mode_async) + netdev_WARN(dev, "ops-locked drivers should use ndo_set_rx_mode_async\n"); + ret = netdev_do_alloc_pcpu_stats(dev); if (ret) goto err_uninit; diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c index 4c9e8a69493f..d73fcb0c6785 100644 --- a/net/core/dev_addr_lists.c +++ b/net/core/dev_addr_lists.c @@ -1370,7 +1370,8 @@ void __dev_set_rx_mode(struct net_device *dev) if (!netif_device_present(dev)) return; - if (ops->ndo_set_rx_mode_async || ops->ndo_change_rx_flags) { + if (ops->ndo_set_rx_mode_async || ops->ndo_change_rx_flags || + netdev_need_ops_lock(dev)) { netif_rx_mode_queue(dev); return; } -- cgit v1.2.3 From 478ed6b7d2577439c610f91fa8759a4c878a4264 Mon Sep 17 00:00:00 2001 From: Chia-Yu Chang Date: Fri, 17 Apr 2026 17:25:51 +0200 Subject: net/sched: sch_dualpi2: drain both C-queue and L-queue in dualpi2_change() Fix dualpi2_change() to correctly enforce updated limit and memlimit values after a configuration change of the dualpi2 qdisc. Before this patch, dualpi2_change() always attempted to dequeue packets via the root qdisc (C-queue) when reducing backlog or memory usage, and unconditionally assumed that a valid skb will be returned. When traffic classification results in packets being queued in the L-queue while the C-queue is empty, this leads to a NULL skb dereference during limit or memlimit enforcement. This is fixed by first dequeuing from the C-queue path if it is non-empty. Once the C-queue is empty, packets are dequeued directly from the L-queue. Return values from qdisc_dequeue_internal() are checked for both queues. When dequeuing from the L-queue, the parent qdisc qlen and backlog counters are updated explicitly to keep overall qdisc statistics consistent. Fixes: 320d031ad6e4 ("sched: Struct definition and parsing of dualpi2 qdisc") Reported-by: "Kito Xu (veritas501)" Closes: https://lore.kernel.org/netdev/20260413075740.2234828-1-hxzene@gmail.com/ Signed-off-by: Chia-Yu Chang Link: https://patch.msgid.link/20260417152551.71648-1-chia-yu.chang@nokia-bell-labs.com Signed-off-by: Paolo Abeni --- net/sched/sch_dualpi2.c | 32 ++++++++++++++++++++++++++++---- 1 file changed, 28 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/sched/sch_dualpi2.c b/net/sched/sch_dualpi2.c index fe6f5e889625..241e6a46bd00 100644 --- a/net/sched/sch_dualpi2.c +++ b/net/sched/sch_dualpi2.c @@ -868,11 +868,35 @@ static int dualpi2_change(struct Qdisc *sch, struct nlattr *opt, old_backlog = sch->qstats.backlog; while (qdisc_qlen(sch) > sch->limit || q->memory_used > q->memory_limit) { - struct sk_buff *skb = qdisc_dequeue_internal(sch, true); + struct sk_buff *skb = NULL; - q->memory_used -= skb->truesize; - qdisc_qstats_backlog_dec(sch, skb); - rtnl_qdisc_drop(skb, sch); + if (qdisc_qlen(sch) > qdisc_qlen(q->l_queue)) { + skb = qdisc_dequeue_internal(sch, true); + if (unlikely(!skb)) { + WARN_ON_ONCE(1); + break; + } + q->memory_used -= skb->truesize; + rtnl_qdisc_drop(skb, sch); + } else if (qdisc_qlen(q->l_queue)) { + skb = qdisc_dequeue_internal(q->l_queue, true); + if (unlikely(!skb)) { + WARN_ON_ONCE(1); + break; + } + /* L-queue packets are counted in both sch and + * l_queue on enqueue; qdisc_dequeue_internal() + * handled l_queue, so we further account for sch. + */ + --sch->q.qlen; + qdisc_qstats_backlog_dec(sch, skb); + q->memory_used -= skb->truesize; + rtnl_qdisc_drop(skb, q->l_queue); + qdisc_qstats_drop(sch); + } else { + WARN_ON_ONCE(1); + break; + } } qdisc_tree_reduce_backlog(sch, old_qlen - qdisc_qlen(sch), old_backlog - sch->qstats.backlog); -- cgit v1.2.3 From 2c054e17d9d41f1020376806c7f750834ced4dc5 Mon Sep 17 00:00:00 2001 From: Bingquan Chen Date: Sat, 18 Apr 2026 19:20:06 +0800 Subject: net/packet: fix TOCTOU race on mmap'd vnet_hdr in tpacket_snd() In tpacket_snd(), when PACKET_VNET_HDR is enabled, vnet_hdr points directly into the mmap'd TX ring buffer shared with userspace. The kernel validates the header via __packet_snd_vnet_parse() but then re-reads all fields later in virtio_net_hdr_to_skb(). A concurrent userspace thread can modify the vnet_hdr fields between validation and use, bypassing all safety checks. The non-TPACKET path (packet_snd()) already correctly copies vnet_hdr to a stack-local variable. All other vnet_hdr consumers in the kernel (tun.c, tap.c, virtio_net.c) also use stack copies. The TPACKET TX path is the only caller of virtio_net_hdr_to_skb() that reads directly from user-controlled shared memory. Fix this by copying vnet_hdr from the mmap'd ring buffer to a stack-local variable before validation and use, consistent with the approach used in packet_snd() and all other callers. Fixes: 1d036d25e560 ("packet: tpacket_snd gso and checksum offload") Signed-off-by: Bingquan Chen Reviewed-by: Willem de Bruijn Link: https://patch.msgid.link/20260418112006.78823-1-patzilla007@gmail.com Signed-off-by: Jakub Kicinski --- net/packet/af_packet.c | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) (limited to 'net') diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 4b043241fd56..8e6f3a734ba0 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -2718,7 +2718,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) { struct sk_buff *skb = NULL; struct net_device *dev; - struct virtio_net_hdr *vnet_hdr = NULL; + struct virtio_net_hdr vnet_hdr; + bool has_vnet_hdr = false; struct sockcm_cookie sockc; __be16 proto; int err, reserve = 0; @@ -2819,16 +2820,20 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) hlen = LL_RESERVED_SPACE(dev); tlen = dev->needed_tailroom; if (vnet_hdr_sz) { - vnet_hdr = data; data += vnet_hdr_sz; tp_len -= vnet_hdr_sz; - if (tp_len < 0 || - __packet_snd_vnet_parse(vnet_hdr, tp_len)) { + if (tp_len < 0) { + tp_len = -EINVAL; + goto tpacket_error; + } + memcpy(&vnet_hdr, data - vnet_hdr_sz, sizeof(vnet_hdr)); + if (__packet_snd_vnet_parse(&vnet_hdr, tp_len)) { tp_len = -EINVAL; goto tpacket_error; } copylen = __virtio16_to_cpu(vio_le(), - vnet_hdr->hdr_len); + vnet_hdr.hdr_len); + has_vnet_hdr = true; } copylen = max_t(int, copylen, dev->hard_header_len); skb = sock_alloc_send_skb(&po->sk, @@ -2865,12 +2870,12 @@ tpacket_error: } } - if (vnet_hdr_sz) { - if (virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) { + if (has_vnet_hdr) { + if (virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le())) { tp_len = -EINVAL; goto tpacket_error; } - virtio_net_hdr_set_proto(skb, vnet_hdr); + virtio_net_hdr_set_proto(skb, &vnet_hdr); } skb->destructor = tpacket_destruct_skb; -- cgit v1.2.3 From 7c9b012d6367a335f1e91da28401a7c612305a46 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Fri, 17 Apr 2026 17:09:40 -0400 Subject: sctp: fix sockets_allocated imbalance after sk_clone() sk_clone() increments sockets_allocated and sets the socket refcount to 2. SCTP performs additional accounting in sctp_clone_sock(), so the clone-time increment must be undone to avoid double counting. Note we cannot simply remove the SCTP-side increment, because the SCTP destroy path in sctp_destroy_sock() only decrements sockets_allocated when sp->ep is set, which may not be true for all failure paths in sctp_clone_sock(). Fixes: 16942cf4d3e3 ("sctp: Use sk_clone() in sctp_accept().") Signed-off-by: Xin Long Reviewed-by: Kuniyuki Iwashima Link: https://patch.msgid.link/af8d66f928dec3e9fcbee8d4a85b7d5a6b86f515.1776460180.git.lucien.xin@gmail.com Signed-off-by: Jakub Kicinski --- net/sctp/socket.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/sctp/socket.c b/net/sctp/socket.c index f52fe90d3e00..58d0d9747f0b 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4855,8 +4855,9 @@ static struct sock *sctp_clone_sock(struct sock *sk, if (!newsk) return ERR_PTR(err); - /* sk_clone() sets refcnt to 2 */ + /* sk_clone() sets refcnt to 2 and increments sockets_allocated */ sock_put(newsk); + sk_sockets_allocated_dec(newsk); newinet = inet_sk(newsk); newsp = sctp_sk(newsk); -- cgit v1.2.3 From ade67d5f588832c7ba131aadd4215a94ce0a15c8 Mon Sep 17 00:00:00 2001 From: Andrea Mayer Date: Sat, 18 Apr 2026 18:28:38 +0200 Subject: seg6: fix seg6 lwtunnel output redirect for L2 reduced encap mode When SEG6_IPTUN_MODE_L2ENCAP_RED (L2ENCAP_RED) was introduced, the condition in seg6_build_state() that excludes L2 encap modes from setting LWTUNNEL_STATE_OUTPUT_REDIRECT was not updated to account for the new mode. As a consequence, L2ENCAP_RED routes incorrectly trigger seg6_output() on the output path, where the packet is silently dropped because skb_mac_header_was_set() fails on L3 packets. Extend the check to also exclude L2ENCAP_RED, consistent with L2ENCAP. Fixes: 13f0296be8ec ("seg6: add support for SRv6 H.L2Encaps.Red behavior") Cc: stable@vger.kernel.org Signed-off-by: Andrea Mayer Reviewed-by: Justin Iurman Link: https://patch.msgid.link/20260418162838.31979-1-andrea.mayer@uniroma2.it Signed-off-by: Jakub Kicinski --- net/ipv6/seg6_iptunnel.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c index 97b50d9b1365..9b64343ebad6 100644 --- a/net/ipv6/seg6_iptunnel.c +++ b/net/ipv6/seg6_iptunnel.c @@ -746,7 +746,8 @@ static int seg6_build_state(struct net *net, struct nlattr *nla, newts->type = LWTUNNEL_ENCAP_SEG6; newts->flags |= LWTUNNEL_STATE_INPUT_REDIRECT; - if (tuninfo->mode != SEG6_IPTUN_MODE_L2ENCAP) + if (tuninfo->mode != SEG6_IPTUN_MODE_L2ENCAP && + tuninfo->mode != SEG6_IPTUN_MODE_L2ENCAP_RED) newts->flags |= LWTUNNEL_STATE_OUTPUT_REDIRECT; newts->headroom = seg6_lwt_headroom(tuninfo); -- cgit v1.2.3 From c88eb7e8d8397a8c1db59c425332c5a30b2a1682 Mon Sep 17 00:00:00 2001 From: Michael Bommarito Date: Sat, 18 Apr 2026 10:10:47 -0400 Subject: net/rds: zero per-item info buffer before handing it to visitors rds_for_each_conn_info() and rds_walk_conn_path_info() both hand a caller-allocated on-stack u64 buffer to a per-connection visitor and then copy the full item_len bytes back to user space via rds_info_copy() regardless of how much of the buffer the visitor actually wrote. rds_ib_conn_info_visitor() and rds6_ib_conn_info_visitor() only write a subset of their output struct when the underlying rds_connection is not in state RDS_CONN_UP (src/dst addr, tos, sl and the two GIDs via explicit memsets). Several u32 fields (max_send_wr, max_recv_wr, max_send_sge, rdma_mr_max, rdma_mr_size, cache_allocs) and the 2-byte alignment hole between sl and cache_allocs remain as whatever stack contents preceded the visitor call and are then memcpy_to_user()'d out to user space. struct rds_info_rdma_connection and struct rds6_info_rdma_connection are the only rds_info_* structs in include/uapi/linux/rds.h that are not marked __attribute__((packed)), so they have a real alignment hole. The other info visitors (rds_conn_info_visitor, rds6_conn_info_visitor, rds_tcp_tc_info, ...) write all fields of their packed output struct today and are not known to be vulnerable, but a future visitor that adds a conditional write-path would have the same bug. Reproduction on a kernel built without CONFIG_INIT_STACK_ALL_ZERO=y: a local unprivileged user opens AF_RDS, sets SO_RDS_TRANSPORT=IB, binds to a local address on an RDMA-capable netdev (rxe soft-RoCE on any netdev is sufficient), sendto()'s any peer on the same subnet (fails cleanly but installs an rds_connection in the global hash in RDS_CONN_CONNECTING), then calls getsockopt(SOL_RDS, RDS_INFO_IB_CONNECTIONS). The returned 68-byte item contains 26 bytes of stack garbage including kernel text/data pointers: 0..7 0a 63 00 01 0a 63 00 02 src=10.99.0.1 dst=10.99.0.2 8..39 00 ... gids (memset-zeroed) 40..47 e0 92 a3 81 ff ff ff ff kernel pointer (max_send_wr) 48..55 7f 37 b5 81 ff ff ff ff kernel pointer (rdma_mr_max) 56..59 01 00 08 00 rdma_mr_size (garbage) 60..61 00 00 tos, sl 62..63 00 00 alignment padding 64..67 18 00 00 00 cache_allocs (garbage) Fix by zeroing the per-item buffer in both rds_for_each_conn_info() and rds_walk_conn_path_info() before invoking the visitor. This covers the IPv4/IPv6 IB visitors and hardens all current and future visitors against the same class of bug. No functional change for visitors that fully populate their output. Changes in v2: - retarget at the net tree (subject prefix "[PATCH net v2]", net/rds: prefix in the title) - pick up Reviewed-by tags from Sharath Srinivasan and Allison Henderson Fixes: ec16227e1414 ("RDS/IB: Infiniband transport") Signed-off-by: Michael Bommarito Reviewed-by: Sharath Srinivasan Reviewed-by: Allison Henderson Assisted-by: Claude:claude-opus-4-7 Link: https://patch.msgid.link/20260418141047.3398203-1-michael.bommarito@gmail.com Signed-off-by: Jakub Kicinski --- net/rds/connection.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'net') diff --git a/net/rds/connection.c b/net/rds/connection.c index 412441aaa298..c10b7ed06c49 100644 --- a/net/rds/connection.c +++ b/net/rds/connection.c @@ -701,6 +701,13 @@ void rds_for_each_conn_info(struct socket *sock, unsigned int len, i++, head++) { hlist_for_each_entry_rcu(conn, head, c_hash_node) { + /* Zero the per-item buffer before handing it to the + * visitor so any field the visitor does not write - + * including implicit alignment padding - cannot leak + * stack contents to user space via rds_info_copy(). + */ + memset(buffer, 0, item_len); + /* XXX no c_lock usage.. */ if (!visitor(conn, buffer)) continue; @@ -750,6 +757,13 @@ static void rds_walk_conn_path_info(struct socket *sock, unsigned int len, */ cp = conn->c_path; + /* Zero the per-item buffer for the same reason as + * rds_for_each_conn_info(): any byte the visitor + * does not write (including alignment padding) must + * not leak stack contents via rds_info_copy(). + */ + memset(buffer, 0, item_len); + /* XXX no cp_lock usage.. */ if (!visitor(cp, buffer)) continue; -- cgit v1.2.3 From a6edf2cd4156b71e07258876b7626692e158f7e8 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 21 Apr 2026 14:33:49 +0000 Subject: net_sched: sch_hhf: annotate data-races in hhf_dump_stats() hhf_dump_stats() only runs with RTNL held, reading fields that can be changed in qdisc fast path. Add READ_ONCE()/WRITE_ONCE() annotations. Fixes: edb09eb17ed8 ("net: sched: do not acquire qdisc spinlock in qdisc/class stats dump") Signed-off-by: Eric Dumazet Reviewed-by: Jamal Hadi Salim Link: https://patch.msgid.link/20260421143349.4052215-1-edumazet@google.com Signed-off-by: Jakub Kicinski --- net/sched/sch_hhf.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) (limited to 'net') diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c index 95e5d9bfd9c8..96021f52d835 100644 --- a/net/sched/sch_hhf.c +++ b/net/sched/sch_hhf.c @@ -198,7 +198,8 @@ static struct hh_flow_state *seek_list(const u32 hash, return NULL; list_del(&flow->flowchain); kfree(flow); - q->hh_flows_current_cnt--; + WRITE_ONCE(q->hh_flows_current_cnt, + q->hh_flows_current_cnt - 1); } else if (flow->hash_id == hash) { return flow; } @@ -226,7 +227,7 @@ static struct hh_flow_state *alloc_new_hh(struct list_head *head, } if (q->hh_flows_current_cnt >= q->hh_flows_limit) { - q->hh_flows_overlimit++; + WRITE_ONCE(q->hh_flows_overlimit, q->hh_flows_overlimit + 1); return NULL; } /* Create new entry. */ @@ -234,7 +235,7 @@ static struct hh_flow_state *alloc_new_hh(struct list_head *head, if (!flow) return NULL; - q->hh_flows_current_cnt++; + WRITE_ONCE(q->hh_flows_current_cnt, q->hh_flows_current_cnt + 1); INIT_LIST_HEAD(&flow->flowchain); list_add_tail(&flow->flowchain, head); @@ -309,7 +310,7 @@ static enum wdrr_bucket_idx hhf_classify(struct sk_buff *skb, struct Qdisc *sch) return WDRR_BUCKET_FOR_NON_HH; flow->hash_id = hash; flow->hit_timestamp = now; - q->hh_flows_total_cnt++; + WRITE_ONCE(q->hh_flows_total_cnt, q->hh_flows_total_cnt + 1); /* By returning without updating counters in q->hhf_arrays, * we implicitly implement "shielding" (see Optimization O1). @@ -403,7 +404,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch, return NET_XMIT_SUCCESS; prev_backlog = sch->qstats.backlog; - q->drop_overlimit++; + WRITE_ONCE(q->drop_overlimit, q->drop_overlimit + 1); /* Return Congestion Notification only if we dropped a packet from this * bucket. */ @@ -686,10 +687,10 @@ static int hhf_dump_stats(struct Qdisc *sch, struct gnet_dump *d) { struct hhf_sched_data *q = qdisc_priv(sch); struct tc_hhf_xstats st = { - .drop_overlimit = q->drop_overlimit, - .hh_overlimit = q->hh_flows_overlimit, - .hh_tot_count = q->hh_flows_total_cnt, - .hh_cur_count = q->hh_flows_current_cnt, + .drop_overlimit = READ_ONCE(q->drop_overlimit), + .hh_overlimit = READ_ONCE(q->hh_flows_overlimit), + .hh_tot_count = READ_ONCE(q->hh_flows_total_cnt), + .hh_cur_count = READ_ONCE(q->hh_flows_current_cnt), }; return gnet_stats_copy_app(d, &st, sizeof(st)); -- cgit v1.2.3 From 5154561d9b119f781249f8e845fecf059b38b483 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 21 Apr 2026 14:29:44 +0000 Subject: net/sched: sch_pie: annotate data-races in pie_dump_stats() pie_dump_stats() only runs with RTNL held, reading fields that can be changed in qdisc fast path. Add READ_ONCE()/WRITE_ONCE() annotations. Alternative would be to acquire the qdisc spinlock, but our long-term goal is to make qdisc dump operations lockless as much as we can. tc_pie_xstats fields don't need to be latched atomically, otherwise this bug would have been caught earlier. Fixes: edb09eb17ed8 ("net: sched: do not acquire qdisc spinlock in qdisc/class stats dump") Signed-off-by: Eric Dumazet Reviewed-by: Jamal Hadi Salim Link: https://patch.msgid.link/20260421142944.4009941-1-edumazet@google.com Signed-off-by: Jakub Kicinski --- net/sched/sch_pie.c | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) (limited to 'net') diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c index 16f3f629cb8e..fb53fbf0e328 100644 --- a/net/sched/sch_pie.c +++ b/net/sched/sch_pie.c @@ -90,7 +90,7 @@ static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, bool enqueue = false; if (unlikely(qdisc_qlen(sch) >= sch->limit)) { - q->stats.overlimit++; + WRITE_ONCE(q->stats.overlimit, q->stats.overlimit + 1); goto out; } @@ -104,7 +104,7 @@ static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, /* If packet is ecn capable, mark it if drop probability * is lower than 10%, else drop it. */ - q->stats.ecn_mark++; + WRITE_ONCE(q->stats.ecn_mark, q->stats.ecn_mark + 1); enqueue = true; } @@ -114,15 +114,15 @@ static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, if (!q->params.dq_rate_estimator) pie_set_enqueue_time(skb); - q->stats.packets_in++; + WRITE_ONCE(q->stats.packets_in, q->stats.packets_in + 1); if (qdisc_qlen(sch) > q->stats.maxq) - q->stats.maxq = qdisc_qlen(sch); + WRITE_ONCE(q->stats.maxq, qdisc_qlen(sch)); return qdisc_enqueue_tail(skb, sch); } out: - q->stats.dropped++; + WRITE_ONCE(q->stats.dropped, q->stats.dropped + 1); q->vars.accu_prob = 0; return qdisc_drop_reason(skb, sch, to_free, reason); } @@ -267,11 +267,11 @@ void pie_process_dequeue(struct sk_buff *skb, struct pie_params *params, count = count / dtime; if (vars->avg_dq_rate == 0) - vars->avg_dq_rate = count; + WRITE_ONCE(vars->avg_dq_rate, count); else - vars->avg_dq_rate = + WRITE_ONCE(vars->avg_dq_rate, (vars->avg_dq_rate - - (vars->avg_dq_rate >> 3)) + (count >> 3); + (vars->avg_dq_rate >> 3)) + (count >> 3)); /* If the queue has receded below the threshold, we hold * on to the last drain rate calculated, else we reset @@ -381,7 +381,7 @@ void pie_calculate_probability(struct pie_params *params, struct pie_vars *vars, if (delta > 0) { /* prevent overflow */ if (vars->prob < oldprob) { - vars->prob = MAX_PROB; + WRITE_ONCE(vars->prob, MAX_PROB); /* Prevent normalization error. If probability is at * maximum value already, we normalize it here, and * skip the check to do a non-linear drop in the next @@ -392,7 +392,7 @@ void pie_calculate_probability(struct pie_params *params, struct pie_vars *vars, } else { /* prevent underflow */ if (vars->prob > oldprob) - vars->prob = 0; + WRITE_ONCE(vars->prob, 0); } /* Non-linear drop in probability: Reduce drop probability quickly if @@ -403,7 +403,7 @@ void pie_calculate_probability(struct pie_params *params, struct pie_vars *vars, /* Reduce drop probability to 98.4% */ vars->prob -= vars->prob / 64; - vars->qdelay = qdelay; + WRITE_ONCE(vars->qdelay, qdelay); vars->backlog_old = backlog; /* We restart the measurement cycle if the following conditions are met @@ -502,21 +502,21 @@ static int pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d) struct pie_sched_data *q = qdisc_priv(sch); struct tc_pie_xstats st = { .prob = q->vars.prob << BITS_PER_BYTE, - .delay = ((u32)PSCHED_TICKS2NS(q->vars.qdelay)) / + .delay = ((u32)PSCHED_TICKS2NS(READ_ONCE(q->vars.qdelay))) / NSEC_PER_USEC, - .packets_in = q->stats.packets_in, - .overlimit = q->stats.overlimit, - .maxq = q->stats.maxq, - .dropped = q->stats.dropped, - .ecn_mark = q->stats.ecn_mark, + .packets_in = READ_ONCE(q->stats.packets_in), + .overlimit = READ_ONCE(q->stats.overlimit), + .maxq = READ_ONCE(q->stats.maxq), + .dropped = READ_ONCE(q->stats.dropped), + .ecn_mark = READ_ONCE(q->stats.ecn_mark), }; /* avg_dq_rate is only valid if dq_rate_estimator is enabled */ st.dq_rate_estimating = q->params.dq_rate_estimator; /* unscale and return dq_rate in bytes per sec */ - if (q->params.dq_rate_estimator) - st.avg_dq_rate = q->vars.avg_dq_rate * + if (st.dq_rate_estimating) + st.avg_dq_rate = READ_ONCE(q->vars.avg_dq_rate) * (PSCHED_TICKS_PER_SEC) >> PIE_SCALE; return gnet_stats_copy_app(d, &st, sizeof(st)); -- cgit v1.2.3 From bbfaa73ea6871db03dc05d7f05f00557a8981f25 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 21 Apr 2026 14:25:09 +0000 Subject: net/sched: sch_fq_codel: remove data-races from fq_codel_dump_stats() fq_codel_dump_stats() acquires the qdisc spinlock a bit too late. Move this acquisition before we fill st.qdisc_stats with live data. Fixes: edb09eb17ed8 ("net: sched: do not acquire qdisc spinlock in qdisc/class stats dump") Signed-off-by: Eric Dumazet Reviewed-by: Jamal Hadi Salim Link: https://patch.msgid.link/20260421142509.3967231-1-edumazet@google.com Signed-off-by: Jakub Kicinski --- net/sched/sch_fq_codel.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 2a3d758f67ab..0664b2f2d6f2 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -585,6 +585,8 @@ static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d) }; struct list_head *pos; + sch_tree_lock(sch); + st.qdisc_stats.maxpacket = q->cstats.maxpacket; st.qdisc_stats.drop_overlimit = q->drop_overlimit; st.qdisc_stats.ecn_mark = q->cstats.ecn_mark; @@ -593,7 +595,6 @@ static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d) st.qdisc_stats.memory_usage = q->memory_usage; st.qdisc_stats.drop_overmemory = q->drop_overmemory; - sch_tree_lock(sch); list_for_each(pos, &q->new_flows) st.qdisc_stats.new_flows_len++; -- cgit v1.2.3 From a8f5192809caf636d05ba47c144f282cfd0e3839 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 21 Apr 2026 14:23:09 +0000 Subject: net/sched: sch_red: annotate data-races in red_dump_stats() red_dump_stats() only runs with RTNL held, reading fields that can be changed in qdisc fast path. Add READ_ONCE()/WRITE_ONCE() annotations. Alternative would be to acquire the qdisc spinlock, but our long-term goal is to make qdisc dump operations lockless as much as we can. tc_red_xstats fields don't need to be latched atomically, otherwise this bug would have been caught earlier. Fixes: edb09eb17ed8 ("net: sched: do not acquire qdisc spinlock in qdisc/class stats dump") Signed-off-by: Eric Dumazet Reviewed-by: Jamal Hadi Salim Link: https://patch.msgid.link/20260421142309.3964322-1-edumazet@google.com Signed-off-by: Jakub Kicinski --- net/sched/sch_red.c | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) (limited to 'net') diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index c8d3d09f15e3..432b8a3000a5 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c @@ -90,17 +90,20 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, case RED_PROB_MARK: qdisc_qstats_overlimit(sch); if (!red_use_ecn(q)) { - q->stats.prob_drop++; + WRITE_ONCE(q->stats.prob_drop, + q->stats.prob_drop + 1); goto congestion_drop; } if (INET_ECN_set_ce(skb)) { - q->stats.prob_mark++; + WRITE_ONCE(q->stats.prob_mark, + q->stats.prob_mark + 1); skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret); if (!skb) return NET_XMIT_CN | ret; } else if (!red_use_nodrop(q)) { - q->stats.prob_drop++; + WRITE_ONCE(q->stats.prob_drop, + q->stats.prob_drop + 1); goto congestion_drop; } @@ -111,17 +114,20 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, reason = QDISC_DROP_OVERLIMIT; qdisc_qstats_overlimit(sch); if (red_use_harddrop(q) || !red_use_ecn(q)) { - q->stats.forced_drop++; + WRITE_ONCE(q->stats.forced_drop, + q->stats.forced_drop + 1); goto congestion_drop; } if (INET_ECN_set_ce(skb)) { - q->stats.forced_mark++; + WRITE_ONCE(q->stats.forced_mark, + q->stats.forced_mark + 1); skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret); if (!skb) return NET_XMIT_CN | ret; } else if (!red_use_nodrop(q)) { - q->stats.forced_drop++; + WRITE_ONCE(q->stats.forced_drop, + q->stats.forced_drop + 1); goto congestion_drop; } @@ -135,7 +141,8 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, sch->qstats.backlog += len; sch->q.qlen++; } else if (net_xmit_drop_count(ret)) { - q->stats.pdrop++; + WRITE_ONCE(q->stats.pdrop, + q->stats.pdrop + 1); qdisc_qstats_drop(sch); } return ret; @@ -463,9 +470,13 @@ static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d) dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, &hw_stats_request); } - st.early = q->stats.prob_drop + q->stats.forced_drop; - st.pdrop = q->stats.pdrop; - st.marked = q->stats.prob_mark + q->stats.forced_mark; + st.early = READ_ONCE(q->stats.prob_drop) + + READ_ONCE(q->stats.forced_drop); + + st.pdrop = READ_ONCE(q->stats.pdrop); + + st.marked = READ_ONCE(q->stats.prob_mark) + + READ_ONCE(q->stats.forced_mark); return gnet_stats_copy_app(d, &st, sizeof(st)); } -- cgit v1.2.3 From 1ada03fdef82d3d7d2edb9dcd3acc91917675e48 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 21 Apr 2026 14:16:55 +0000 Subject: net/sched: sch_sfb: annotate data-races in sfb_dump_stats() sfb_dump_stats() only runs with RTNL held, reading fields that can be changed in qdisc fast path. Add READ_ONCE()/WRITE_ONCE() annotations. Alternative would be to acquire the qdisc spinlock, but our long-term goal is to make qdisc dump operations lockless as much as we can. tc_sfb_xstats fields don't need to be latched atomically, otherwise this bug would have been caught earlier. Fixes: edb09eb17ed8 ("net: sched: do not acquire qdisc spinlock in qdisc/class stats dump") Signed-off-by: Eric Dumazet Link: https://patch.msgid.link/20260421141655.3953721-1-edumazet@google.com Signed-off-by: Jakub Kicinski --- net/sched/sch_sfb.c | 54 +++++++++++++++++++++++++++++++---------------------- 1 file changed, 32 insertions(+), 22 deletions(-) (limited to 'net') diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c index 013738662128..bd5ef561030f 100644 --- a/net/sched/sch_sfb.c +++ b/net/sched/sch_sfb.c @@ -130,7 +130,7 @@ static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q) sfbhash >>= SFB_BUCKET_SHIFT; if (b[hash].qlen < 0xFFFF) - b[hash].qlen++; + WRITE_ONCE(b[hash].qlen, b[hash].qlen + 1); b += SFB_NUMBUCKETS; /* next level */ } } @@ -159,7 +159,7 @@ static void decrement_one_qlen(u32 sfbhash, u32 slot, sfbhash >>= SFB_BUCKET_SHIFT; if (b[hash].qlen > 0) - b[hash].qlen--; + WRITE_ONCE(b[hash].qlen, b[hash].qlen - 1); b += SFB_NUMBUCKETS; /* next level */ } } @@ -179,12 +179,12 @@ static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q) static void decrement_prob(struct sfb_bucket *b, struct sfb_sched_data *q) { - b->p_mark = prob_minus(b->p_mark, q->decrement); + WRITE_ONCE(b->p_mark, prob_minus(b->p_mark, q->decrement)); } static void increment_prob(struct sfb_bucket *b, struct sfb_sched_data *q) { - b->p_mark = prob_plus(b->p_mark, q->increment); + WRITE_ONCE(b->p_mark, prob_plus(b->p_mark, q->increment)); } static void sfb_zero_all_buckets(struct sfb_sched_data *q) @@ -202,11 +202,14 @@ static u32 sfb_compute_qlen(u32 *prob_r, u32 *avgpm_r, const struct sfb_sched_da const struct sfb_bucket *b = &q->bins[q->slot].bins[0][0]; for (i = 0; i < SFB_LEVELS * SFB_NUMBUCKETS; i++) { - if (qlen < b->qlen) - qlen = b->qlen; - totalpm += b->p_mark; - if (prob < b->p_mark) - prob = b->p_mark; + u32 b_qlen = READ_ONCE(b->qlen); + u32 b_mark = READ_ONCE(b->p_mark); + + if (qlen < b_qlen) + qlen = b_qlen; + totalpm += b_mark; + if (prob < b_mark) + prob = b_mark; b++; } *prob_r = prob; @@ -295,7 +298,8 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch, if (unlikely(sch->q.qlen >= q->limit)) { qdisc_qstats_overlimit(sch); - q->stats.queuedrop++; + WRITE_ONCE(q->stats.queuedrop, + q->stats.queuedrop + 1); goto drop; } @@ -348,7 +352,8 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch, if (unlikely(minqlen >= q->max)) { qdisc_qstats_overlimit(sch); - q->stats.bucketdrop++; + WRITE_ONCE(q->stats.bucketdrop, + q->stats.bucketdrop + 1); goto drop; } @@ -374,7 +379,8 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch, } if (sfb_rate_limit(skb, q)) { qdisc_qstats_overlimit(sch); - q->stats.penaltydrop++; + WRITE_ONCE(q->stats.penaltydrop, + q->stats.penaltydrop + 1); goto drop; } goto enqueue; @@ -390,14 +396,17 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch, * In either case, we want to start dropping packets. */ if (r < (p_min - SFB_MAX_PROB / 2) * 2) { - q->stats.earlydrop++; + WRITE_ONCE(q->stats.earlydrop, + q->stats.earlydrop + 1); goto drop; } } if (INET_ECN_set_ce(skb)) { - q->stats.marked++; + WRITE_ONCE(q->stats.marked, + q->stats.marked + 1); } else { - q->stats.earlydrop++; + WRITE_ONCE(q->stats.earlydrop, + q->stats.earlydrop + 1); goto drop; } } @@ -410,7 +419,8 @@ enqueue: sch->q.qlen++; increment_qlen(&cb, q); } else if (net_xmit_drop_count(ret)) { - q->stats.childdrop++; + WRITE_ONCE(q->stats.childdrop, + q->stats.childdrop + 1); qdisc_qstats_drop(sch); } return ret; @@ -599,12 +609,12 @@ static int sfb_dump_stats(struct Qdisc *sch, struct gnet_dump *d) { struct sfb_sched_data *q = qdisc_priv(sch); struct tc_sfb_xstats st = { - .earlydrop = q->stats.earlydrop, - .penaltydrop = q->stats.penaltydrop, - .bucketdrop = q->stats.bucketdrop, - .queuedrop = q->stats.queuedrop, - .childdrop = q->stats.childdrop, - .marked = q->stats.marked, + .earlydrop = READ_ONCE(q->stats.earlydrop), + .penaltydrop = READ_ONCE(q->stats.penaltydrop), + .bucketdrop = READ_ONCE(q->stats.bucketdrop), + .queuedrop = READ_ONCE(q->stats.queuedrop), + .childdrop = READ_ONCE(q->stats.childdrop), + .marked = READ_ONCE(q->stats.marked), }; st.maxqlen = sfb_compute_qlen(&st.maxprob, &st.avgprob, q); -- cgit v1.2.3 From fc69decc811b155a0ed8eef17ee940f28c4f6dbc Mon Sep 17 00:00:00 2001 From: Longxuan Yu Date: Mon, 20 Apr 2026 11:18:45 +0800 Subject: 8021q: use RCU for egress QoS mappings The TX fast path and reporting paths walk egress QoS mappings without RTNL. Convert the mapping lists to RCU-protected pointers, use RCU reader annotations in readers, and defer freeing mapping nodes with an embedded rcu_head. This prepares the egress QoS mapping code for safe removal of mapping nodes in a follow-up change while preserving the current behavior. Co-developed-by: Yuan Tan Signed-off-by: Yuan Tan Signed-off-by: Longxuan Yu Signed-off-by: Ren Wei Link: https://patch.msgid.link/9136768189f8c6d3f824f476c62d2fa1111688e8.1776647968.git.yuantan098@gmail.com Signed-off-by: Paolo Abeni --- net/8021q/vlan_dev.c | 31 ++++++++++++++++--------------- net/8021q/vlan_netlink.c | 10 ++++++---- net/8021q/vlanproc.c | 12 ++++++++---- 3 files changed, 30 insertions(+), 23 deletions(-) (limited to 'net') diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index c40f7d5c4fca..a5340932b657 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -172,39 +172,34 @@ int vlan_dev_set_egress_priority(const struct net_device *dev, u32 skb_prio, u16 vlan_prio) { struct vlan_dev_priv *vlan = vlan_dev_priv(dev); - struct vlan_priority_tci_mapping *mp = NULL; + struct vlan_priority_tci_mapping *mp; struct vlan_priority_tci_mapping *np; + u32 bucket = skb_prio & 0xF; u32 vlan_qos = (vlan_prio << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK; /* See if a priority mapping exists.. */ - mp = vlan->egress_priority_map[skb_prio & 0xF]; + mp = rtnl_dereference(vlan->egress_priority_map[bucket]); while (mp) { if (mp->priority == skb_prio) { if (mp->vlan_qos && !vlan_qos) vlan->nr_egress_mappings--; else if (!mp->vlan_qos && vlan_qos) vlan->nr_egress_mappings++; - mp->vlan_qos = vlan_qos; + WRITE_ONCE(mp->vlan_qos, vlan_qos); return 0; } - mp = mp->next; + mp = rtnl_dereference(mp->next); } /* Create a new mapping then. */ - mp = vlan->egress_priority_map[skb_prio & 0xF]; np = kmalloc_obj(struct vlan_priority_tci_mapping); if (!np) return -ENOBUFS; - np->next = mp; np->priority = skb_prio; np->vlan_qos = vlan_qos; - /* Before inserting this element in hash table, make sure all its fields - * are committed to memory. - * coupled with smp_rmb() in vlan_dev_get_egress_qos_mask() - */ - smp_wmb(); - vlan->egress_priority_map[skb_prio & 0xF] = np; + RCU_INIT_POINTER(np->next, rtnl_dereference(vlan->egress_priority_map[bucket])); + rcu_assign_pointer(vlan->egress_priority_map[bucket], np); if (vlan_qos) vlan->nr_egress_mappings++; return 0; @@ -604,11 +599,17 @@ void vlan_dev_free_egress_priority(const struct net_device *dev) int i; for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) { - while ((pm = vlan->egress_priority_map[i]) != NULL) { - vlan->egress_priority_map[i] = pm->next; - kfree(pm); + pm = rtnl_dereference(vlan->egress_priority_map[i]); + RCU_INIT_POINTER(vlan->egress_priority_map[i], NULL); + while (pm) { + struct vlan_priority_tci_mapping *next; + + next = rtnl_dereference(pm->next); + kfree_rcu(pm, rcu); + pm = next; } } + vlan->nr_egress_mappings = 0; } static void vlan_dev_uninit(struct net_device *dev) diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c index a000b1ef0520..a5b16833e2ce 100644 --- a/net/8021q/vlan_netlink.c +++ b/net/8021q/vlan_netlink.c @@ -260,13 +260,15 @@ static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev) goto nla_put_failure; for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) { - for (pm = vlan->egress_priority_map[i]; pm; - pm = pm->next) { - if (!pm->vlan_qos) + for (pm = rcu_dereference_rtnl(vlan->egress_priority_map[i]); pm; + pm = rcu_dereference_rtnl(pm->next)) { + u16 vlan_qos = READ_ONCE(pm->vlan_qos); + + if (!vlan_qos) continue; m.from = pm->priority; - m.to = (pm->vlan_qos >> 13) & 0x7; + m.to = (vlan_qos >> 13) & 0x7; if (nla_put(skb, IFLA_VLAN_QOS_MAPPING, sizeof(m), &m)) goto nla_put_failure; diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c index fa67374bda49..0e424e0895b7 100644 --- a/net/8021q/vlanproc.c +++ b/net/8021q/vlanproc.c @@ -262,15 +262,19 @@ static int vlandev_seq_show(struct seq_file *seq, void *offset) vlan->ingress_priority_map[7]); seq_printf(seq, " EGRESS priority mappings: "); + rcu_read_lock(); for (i = 0; i < 16; i++) { - const struct vlan_priority_tci_mapping *mp - = vlan->egress_priority_map[i]; + const struct vlan_priority_tci_mapping *mp = + rcu_dereference(vlan->egress_priority_map[i]); while (mp) { + u16 vlan_qos = READ_ONCE(mp->vlan_qos); + seq_printf(seq, "%u:%d ", - mp->priority, ((mp->vlan_qos >> 13) & 0x7)); - mp = mp->next; + mp->priority, ((vlan_qos >> 13) & 0x7)); + mp = rcu_dereference(mp->next); } } + rcu_read_unlock(); seq_puts(seq, "\n"); return 0; -- cgit v1.2.3 From 7dddc74af369478ba7f9bc136d0fc1dc4570cb66 Mon Sep 17 00:00:00 2001 From: Longxuan Yu Date: Mon, 20 Apr 2026 11:18:46 +0800 Subject: 8021q: delete cleared egress QoS mappings vlan_dev_set_egress_priority() currently keeps cleared egress priority mappings in the hash as tombstones. Repeated set/clear cycles with distinct skb priorities therefore accumulate mapping nodes until device teardown and leak memory. Delete mappings when vlan_prio is cleared instead of keeping tombstones. Now that the egress mapping lists are RCU protected, the node can be unlinked safely and freed after a grace period. Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Cc: stable@kernel.org Reported-by: Yifan Wu Reported-by: Juefei Pu Reported-by: Xin Liu Co-developed-by: Yuan Tan Signed-off-by: Yuan Tan Signed-off-by: Longxuan Yu Signed-off-by: Ren Wei Link: https://patch.msgid.link/ecfa6f6ce2467a42647ff4c5221238ae85b79a59.1776647968.git.yuantan098@gmail.com Signed-off-by: Paolo Abeni --- net/8021q/vlan_dev.c | 20 ++++++++++++++------ net/8021q/vlan_netlink.c | 4 ---- 2 files changed, 14 insertions(+), 10 deletions(-) (limited to 'net') diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index a5340932b657..7aa3af8b10ea 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -172,26 +172,34 @@ int vlan_dev_set_egress_priority(const struct net_device *dev, u32 skb_prio, u16 vlan_prio) { struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + struct vlan_priority_tci_mapping __rcu **mpp; struct vlan_priority_tci_mapping *mp; struct vlan_priority_tci_mapping *np; u32 bucket = skb_prio & 0xF; u32 vlan_qos = (vlan_prio << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK; /* See if a priority mapping exists.. */ - mp = rtnl_dereference(vlan->egress_priority_map[bucket]); + mpp = &vlan->egress_priority_map[bucket]; + mp = rtnl_dereference(*mpp); while (mp) { if (mp->priority == skb_prio) { - if (mp->vlan_qos && !vlan_qos) + if (!vlan_qos) { + rcu_assign_pointer(*mpp, rtnl_dereference(mp->next)); vlan->nr_egress_mappings--; - else if (!mp->vlan_qos && vlan_qos) - vlan->nr_egress_mappings++; - WRITE_ONCE(mp->vlan_qos, vlan_qos); + kfree_rcu(mp, rcu); + } else { + WRITE_ONCE(mp->vlan_qos, vlan_qos); + } return 0; } - mp = rtnl_dereference(mp->next); + mpp = &mp->next; + mp = rtnl_dereference(*mpp); } /* Create a new mapping then. */ + if (!vlan_qos) + return 0; + np = kmalloc_obj(struct vlan_priority_tci_mapping); if (!np) return -ENOBUFS; diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c index a5b16833e2ce..368d53ca7d87 100644 --- a/net/8021q/vlan_netlink.c +++ b/net/8021q/vlan_netlink.c @@ -263,10 +263,6 @@ static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev) for (pm = rcu_dereference_rtnl(vlan->egress_priority_map[i]); pm; pm = rcu_dereference_rtnl(pm->next)) { u16 vlan_qos = READ_ONCE(pm->vlan_qos); - - if (!vlan_qos) - continue; - m.from = pm->priority; m.to = (vlan_qos >> 13) & 0x7; if (nla_put(skb, IFLA_VLAN_QOS_MAPPING, -- cgit v1.2.3 From 1cb36e252211506f51095fe7ced8286cc77b4c80 Mon Sep 17 00:00:00 2001 From: Stefano Garzarella Date: Mon, 20 Apr 2026 15:20:51 +0200 Subject: vsock/virtio: fix MSG_ZEROCOPY pinned-pages accounting virtio_transport_init_zcopy_skb() uses iter->count as the size argument for msg_zerocopy_realloc(), which in turn passes it to mm_account_pinned_pages() for RLIMIT_MEMLOCK accounting. However, this function is called after virtio_transport_fill_skb() has already consumed the iterator via __zerocopy_sg_from_iter(), so on the last skb, iter->count will be 0, skipping the RLIMIT_MEMLOCK enforcement. Pass pkt_len (the total bytes being sent) as an explicit parameter to virtio_transport_init_zcopy_skb() instead of reading the already-consumed iter->count. This matches TCP and UDP, which both call msg_zerocopy_realloc() with the original message size. Fixes: 581512a6dc93 ("vsock/virtio: MSG_ZEROCOPY flag support") Reported-by: Yiming Qian Signed-off-by: Stefano Garzarella Reviewed-by: Bobby Eshleman Link: https://patch.msgid.link/20260420132051.217589-1-sgarzare@redhat.com Signed-off-by: Paolo Abeni --- net/vmw_vsock/virtio_transport_common.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c index 0742091beae7..416d533f493d 100644 --- a/net/vmw_vsock/virtio_transport_common.c +++ b/net/vmw_vsock/virtio_transport_common.c @@ -73,6 +73,7 @@ static bool virtio_transport_can_zcopy(const struct virtio_transport *t_ops, static int virtio_transport_init_zcopy_skb(struct vsock_sock *vsk, struct sk_buff *skb, struct msghdr *msg, + size_t pkt_len, bool zerocopy) { struct ubuf_info *uarg; @@ -81,12 +82,10 @@ static int virtio_transport_init_zcopy_skb(struct vsock_sock *vsk, uarg = msg->msg_ubuf; net_zcopy_get(uarg); } else { - struct iov_iter *iter = &msg->msg_iter; struct ubuf_info_msgzc *uarg_zc; uarg = msg_zerocopy_realloc(sk_vsock(vsk), - iter->count, - NULL, false); + pkt_len, NULL, false); if (!uarg) return -1; @@ -398,11 +397,17 @@ static int virtio_transport_send_pkt_info(struct vsock_sock *vsk, * each iteration. If this is last skb for this buffer * and MSG_ZEROCOPY mode is in use - we must allocate * completion for the current syscall. + * + * Pass pkt_len because msg iter is already consumed + * by virtio_transport_fill_skb(), so iter->count + * can not be used for RLIMIT_MEMLOCK pinned-pages + * accounting done by msg_zerocopy_realloc(). */ if (info->msg && info->msg->msg_flags & MSG_ZEROCOPY && skb_len == rest_len && info->op == VIRTIO_VSOCK_OP_RW) { if (virtio_transport_init_zcopy_skb(vsk, skb, info->msg, + pkt_len, can_zcopy)) { kfree_skb(skb); ret = -ENOMEM; -- cgit v1.2.3 From fcf04b14334641f4b0b8647824480935e9416d52 Mon Sep 17 00:00:00 2001 From: Gang Yan Date: Mon, 20 Apr 2026 18:19:23 +0200 Subject: mptcp: sync the msk->sndbuf at accept() time On passive MPTCP connections, the msk sndbuf is not updated correctly. The root cause is an order issue in the accept path: - tcp_check_req() -> subflow_syn_recv_sock() -> mptcp_sk_clone_init() calls __mptcp_propagate_sndbuf() to copy the ssk sndbuf into msk - Later, tcp_child_process() -> tcp_init_transfer() -> tcp_sndbuf_expand() grows the ssk sndbuf. So __mptcp_propagate_sndbuf() runs before the ssk sndbuf has been expanded and the msk ends up with a much smaller sndbuf than the subflow: MPTCP: msk->sndbuf:20480, msk->first->sndbuf:2626560 Fix this by moving the __mptcp_propagate_sndbuf() call from mptcp_sk_clone_init() -- the ssk sndbuf is not yet finalized there -- to __mptcp_propagate_sndbuf() at accept() time, when the ssk sndbuf has been fully expanded by tcp_sndbuf_expand(). Fixes: 8005184fd1ca ("mptcp: refactor sndbuf auto-tuning") Cc: stable@vger.kernel.org Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/602 Signed-off-by: Gang Yan Acked-by: Paolo Abeni Reviewed-by: Matthieu Baerts (NGI0) Signed-off-by: Matthieu Baerts (NGI0) Link: https://patch.msgid.link/20260420-net-mptcp-sync-sndbuf-accept-v1-1-e3523e3aeb44@kernel.org Signed-off-by: Paolo Abeni --- net/mptcp/protocol.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index fbffd3a43fe8..718e910ff23f 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -3594,7 +3594,6 @@ struct sock *mptcp_sk_clone_init(const struct sock *sk, * uses the correct data */ mptcp_copy_inaddrs(nsk, ssk); - __mptcp_propagate_sndbuf(nsk, ssk); mptcp_rcv_space_init(msk, ssk); msk->rcvq_space.time = mptcp_stamp(); @@ -4252,6 +4251,7 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock, mptcp_graft_subflows(newsk); mptcp_rps_record_subflows(msk); + __mptcp_propagate_sndbuf(newsk, mptcp_subflow_tcp_sock(subflow)); /* Do late cleanup for the first subflow as necessary. Also * deal with bad peers not doing a complete shutdown. -- cgit v1.2.3 From 3d1f20727a635811f6b77801a7b57b8995268abd Mon Sep 17 00:00:00 2001 From: Dexuan Cui Date: Wed, 22 Apr 2026 23:48:11 -0700 Subject: hv_sock: Return -EIO for malformed/short packets Commit f63152958994 fixes a regression, however it fails to report an error for malformed/short packets -- normally we should never see such packets, but let's report an error for them just in case. Fixes: f63152958994 ("hv_sock: Report EOF instead of -EIO for FIN") Cc: stable@vger.kernel.org Signed-off-by: Dexuan Cui Acked-by: Stefano Garzarella Link: https://patch.msgid.link/20260423064811.1371749-1-decui@microsoft.com Signed-off-by: Jakub Kicinski --- net/vmw_vsock/hyperv_transport.c | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) (limited to 'net') diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c index 76e78c83fdbc..f862988c1e86 100644 --- a/net/vmw_vsock/hyperv_transport.c +++ b/net/vmw_vsock/hyperv_transport.c @@ -704,17 +704,26 @@ static s64 hvs_stream_has_data(struct vsock_sock *vsk) if (hvs->recv_desc) { /* Here hvs->recv_data_len is 0, so hvs->recv_desc must * be NULL unless it points to the 0-byte-payload FIN - * packet: see hvs_update_recv_data(). + * packet or a malformed/short packet: see + * hvs_update_recv_data(). * - * Here all the payload has been dequeued, but - * hvs_channel_readable_payload() still returns 1, - * because the VMBus ringbuffer's read_index is not - * updated for the FIN packet: hvs_stream_dequeue() -> - * hv_pkt_iter_next() updates the cached priv_read_index - * but has no opportunity to update the read_index in - * hv_pkt_iter_close() as hvs_stream_has_data() returns - * 0 for the FIN packet, so it won't get dequeued. + * If hvs->recv_desc points to the FIN packet, here all + * the payload has been dequeued and the peer_shutdown + * flag is set, but hvs_channel_readable_payload() still + * returns 1, because the VMBus ringbuffer's read_index + * is not updated for the FIN packet: + * hvs_stream_dequeue() -> hv_pkt_iter_next() updates + * the cached priv_read_index but has no opportunity to + * update the read_index in hv_pkt_iter_close() as + * hvs_stream_has_data() returns 0 for the FIN packet, + * so it won't get dequeued. + * + * In case hvs->recv_desc points to a malformed/short + * packet, return -EIO. */ + if (!(vsk->peer_shutdown & SEND_SHUTDOWN)) + return -EIO; + return 0; } -- cgit v1.2.3 From 5a8db80f721deee8e916c2cfdee78decda02ce4f Mon Sep 17 00:00:00 2001 From: Ruijie Li Date: Wed, 22 Apr 2026 23:40:18 +0800 Subject: net/smc: avoid early lgr access in smc_clc_wait_msg A CLC decline can be received while the handshake is still in an early stage, before the connection has been associated with a link group. The decline handling in smc_clc_wait_msg() updates link-group level sync state for first-contact declines, but that state only exists after link group setup has completed. Guard the link-group update accordingly and keep the per-socket peer diagnosis handling unchanged. This preserves the existing sync_err handling for established link-group contexts and avoids touching link-group state before it is available. Fixes: 0cfdd8f92cac ("smc: connection and link group creation") Cc: stable@kernel.org Reported-by: Yuan Tan Reported-by: Yifan Wu Reported-by: Juefei Pu Reported-by: Xin Liu Signed-off-by: Ruijie Li Signed-off-by: Ren Wei Reviewed-by: Dust Li Link: https://patch.msgid.link/08c68a5c817acf198cce63d22517e232e8d60718.1776850759.git.ruijieli51@gmail.com Signed-off-by: Jakub Kicinski --- net/smc/smc_clc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c index c38fc7bf0a7e..014d527d5462 100644 --- a/net/smc/smc_clc.c +++ b/net/smc/smc_clc.c @@ -788,8 +788,8 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen, dclc = (struct smc_clc_msg_decline *)clcm; reason_code = SMC_CLC_DECL_PEERDECL; smc->peer_diagnosis = ntohl(dclc->peer_diagnosis); - if (((struct smc_clc_msg_decline *)buf)->hdr.typev2 & - SMC_FIRST_CONTACT_MASK) { + if ((dclc->hdr.typev2 & SMC_FIRST_CONTACT_MASK) && + smc->conn.lgr) { smc->conn.lgr->sync_err = 1; smc_lgr_terminate_sched(smc->conn.lgr); } -- cgit v1.2.3 From 42726ec644cbdde0035c3e0417fee8ed9547e120 Mon Sep 17 00:00:00 2001 From: Jiayuan Chen Date: Wed, 22 Apr 2026 20:35:38 +0800 Subject: tcp: send a challenge ACK on SEG.ACK > SND.NXT RFC 5961 Section 5.2 validates an incoming segment's ACK value against the range [SND.UNA - MAX.SND.WND, SND.NXT] and states: "All incoming segments whose ACK value doesn't satisfy the above condition MUST be discarded and an ACK sent back." Commit 354e4aa391ed ("tcp: RFC 5961 5.2 Blind Data Injection Attack Mitigation") opted Linux into this mitigation and implements the challenge ACK on the lower side (SEG.ACK < SND.UNA - MAX.SND.WND), but the symmetric upper side (SEG.ACK > SND.NXT) still takes the pre-RFC-5961 path and silently returns SKB_DROP_REASON_TCP_ACK_UNSENT_DATA, even though RFC 793 Section 3.9 (now RFC 9293 Section 3.10.7.4) has always required: "If the ACK acknowledges something not yet sent (SEG.ACK > SND.NXT) then send an ACK, drop the segment, and return." Complete the mitigation by sending a challenge ACK on that branch, reusing the existing tcp_send_challenge_ack() path which already enforces the per-socket RFC 5961 Section 7 rate limit via __tcp_oow_rate_limited(). FLAG_NO_CHALLENGE_ACK is honoured for symmetry with the lower-edge case. Update the existing tcp_ts_recent_invalid_ack.pkt selftest, which drives this exact path, to consume the new challenge ACK. Fixes: 354e4aa391ed ("tcp: RFC 5961 5.2 Blind Data Injection Attack Mitigation") Signed-off-by: Jiayuan Chen Reviewed-by: Eric Dumazet Link: https://patch.msgid.link/20260422123605.320000-2-jiayuan.chen@linux.dev Signed-off-by: Jakub Kicinski --- net/ipv4/tcp_input.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index e04ae105893c..d5c9e65d9760 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4286,11 +4286,15 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) goto old_ack; } - /* If the ack includes data we haven't sent yet, discard - * this segment (RFC793 Section 3.9). + /* If the ack includes data we haven't sent yet, drop the + * segment. RFC 793 Section 3.9 and RFC 5961 Section 5.2 + * require us to send an ACK back in that case. */ - if (after(ack, tp->snd_nxt)) + if (after(ack, tp->snd_nxt)) { + if (!(flag & FLAG_NO_CHALLENGE_ACK)) + tcp_send_challenge_ack(sk, false); return -SKB_DROP_REASON_TCP_ACK_UNSENT_DATA; + } if (after(ack, prior_snd_una)) { flag |= FLAG_SND_UNA_ADVANCED; -- cgit v1.2.3 From 67bf002a2d7387a6312138210d0bd06e3cf4879b Mon Sep 17 00:00:00 2001 From: Ruide Cao Date: Tue, 21 Apr 2026 12:16:31 +0800 Subject: ipv4: icmp: validate reply type before using icmp_pointers Extended echo replies use ICMP_EXT_ECHOREPLY as the outbound reply type. That value is outside the range covered by icmp_pointers[], which only describes the traditional ICMP types up to NR_ICMP_TYPES. Avoid consulting icmp_pointers[] for reply types outside that range, and use array_index_nospec() for the remaining in-range lookup. Normal ICMP replies keep their existing behavior unchanged. Fixes: d329ea5bd884 ("icmp: add response to RFC 8335 PROBE messages") Cc: stable@kernel.org Reported-by: Yuan Tan Reported-by: Yifan Wu Reported-by: Juefei Pu Reported-by: Xin Liu Signed-off-by: Ruide Cao Signed-off-by: Ren Wei Reviewed-by: Simon Horman Link: https://patch.msgid.link/0dace90c01a5978e829ca741ef684dbd7304ce62.1776628519.git.caoruide123@gmail.com Signed-off-by: Jakub Kicinski --- net/ipv4/icmp.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 2f4fac22d1ab..7eeff658b467 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -64,6 +64,7 @@ #include #include #include +#include #include #include #include @@ -371,7 +372,9 @@ static int icmp_glue_bits(void *from, char *to, int offset, int len, int odd, to, len); skb->csum = csum_block_add(skb->csum, csum, odd); - if (icmp_pointers[icmp_param->data.icmph.type].error) + if (icmp_param->data.icmph.type <= NR_ICMP_TYPES && + icmp_pointers[array_index_nospec(icmp_param->data.icmph.type, + NR_ICMP_TYPES + 1)].error) nf_ct_attach(skb, icmp_param->skb); return 0; } -- cgit v1.2.3 From 864ba40c80edae2b98f47d46f2c39399126aa3d6 Mon Sep 17 00:00:00 2001 From: Ernestas Kulik Date: Tue, 21 Apr 2026 09:02:26 +0300 Subject: llc: Return -EINPROGRESS from llc_ui_connect() Given a zero sk_sndtimeo, llc_ui_connect() skips waiting for state change and returns 0, confusing userspace applications that will assume the socket is connected, making e.g. getpeername() calls error out. More specifically, the issue was discovered in libcoap, where newly-added AF_LLC socket support was behaving differently from AF_INET connections due to EINPROGRESS handling being skipped. Set rc to -EINPROGRESS if connect() would not block, akin to AF_INET sockets. Signed-off-by: Ernestas Kulik Reviewed-by: Simon Horman Link: https://patch.msgid.link/20260421060304.285419-1-ernestas.k@iconn-networks.com Signed-off-by: Jakub Kicinski --- net/llc/af_llc.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 59d593bb5d18..1b210db3119e 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -520,8 +520,10 @@ static int llc_ui_connect(struct socket *sock, struct sockaddr_unsized *uaddr, if (sk->sk_state == TCP_SYN_SENT) { const long timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); - if (!timeo || !llc_ui_wait_for_conn(sk, timeo)) + if (!timeo || !llc_ui_wait_for_conn(sk, timeo)) { + rc = -EINPROGRESS; goto out; + } rc = sock_intr_errno(timeo); if (signal_pending(current)) -- cgit v1.2.3 From d293ca716e7d5dffdaecaf6b9b2f857a33dc3d3a Mon Sep 17 00:00:00 2001 From: Lee Jones Date: Tue, 21 Apr 2026 13:45:26 +0100 Subject: tipc: fix double-free in tipc_buf_append() tipc_msg_validate() can potentially reallocate the skb it is validating, freeing the old one. In tipc_buf_append(), it was being called with a pointer to a local variable which was a copy of the caller's skb pointer. If the skb was reallocated and validation subsequently failed, the error handling path would free the original skb pointer, which had already been freed, leading to double-free. Fix this by checking if head now points to a newly allocated reassembled skb. If it does, reassign *headbuf for later freeing operations. Fixes: d618d09a68e4 ("tipc: enforce valid ratio between skb truesize and contents") Suggested-by: Tung Nguyen Signed-off-by: Lee Jones Reviewed-by: Tung Nguyen Signed-off-by: Jakub Kicinski --- net/tipc/msg.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/tipc/msg.c b/net/tipc/msg.c index 76284fc538eb..b0bba0feef56 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c @@ -177,8 +177,20 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) if (fragid == LAST_FRAGMENT) { TIPC_SKB_CB(head)->validated = 0; - if (unlikely(!tipc_msg_validate(&head))) + + /* If the reassembled skb has been freed in + * tipc_msg_validate() because of an invalid truesize, + * then head will point to a newly allocated reassembled + * skb, while *headbuf points to freed reassembled skb. + * In such cases, correct *headbuf for freeing the newly + * allocated reassembled skb later. + */ + if (unlikely(!tipc_msg_validate(&head))) { + if (head != *headbuf) + *headbuf = head; goto err; + } + *buf = head; TIPC_SKB_CB(head)->tail = NULL; *headbuf = NULL; -- cgit v1.2.3 From 076b8cad77aa96557719fb5effe8703bfb64df00 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 21 Apr 2026 22:24:06 +0200 Subject: ipv6: Cap TLV scan in ip6_tnl_parse_tlv_enc_lim Commit 47d3d7ac656a ("ipv6: Implement limits on Hop-by-Hop and Destination options") added net.ipv6.max_{hbh,dst}_opts_{cnt,len} and applied them in ip6_parse_tlv(), the generic TLV walker invoked from ipv6_destopt_rcv() and ipv6_parse_hopopts(). ip6_tnl_parse_tlv_enc_lim() does not go through ip6_parse_tlv(); it has its own hand-rolled TLV scanner inside its NEXTHDR_DEST branch which looks for IPV6_TLV_TNL_ENCAP_LIMIT. That inner loop is bounded only by optlen, which can be up to 2048 bytes. Stuffing the Destination Options header with 2046 Pad1 (type=0) entries advances the scanner a single byte at a time, yielding ~2000 TLV iterations per extension header. Reusing max_dst_opts_cnt to bound the TLV iterations, matching the semantics from 47d3d7ac656a, would require duplicating ip6_parse_tlv() to also validate Pad1/PadN payload. It would also mandate enforcing max_dst_opts_len, since otherwise an attacker shifts the axis to few options with a giant PadN and recovers the original DoS. Allowing up to 8 options before the tunnel encapsulation limit TLV is liberal enough; in practice encap limit is the first TLV. Thus, go with a hard-coded limit IP6_TUNNEL_MAX_DEST_TLVS (8). Signed-off-by: Daniel Borkmann Reviewed-by: Ido Schimmel Reviewed-by: Justin Iurman Signed-off-by: Jakub Kicinski --- net/ipv6/ip6_tunnel.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'net') diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 46bc06506470..c468c83af0f2 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -62,6 +62,8 @@ MODULE_LICENSE("GPL"); MODULE_ALIAS_RTNL_LINK("ip6tnl"); MODULE_ALIAS_NETDEV("ip6tnl0"); +#define IP6_TUNNEL_MAX_DEST_TLVS 8 + #define IP6_TUNNEL_HASH_SIZE_SHIFT 5 #define IP6_TUNNEL_HASH_SIZE (1 << IP6_TUNNEL_HASH_SIZE_SHIFT) @@ -425,11 +427,15 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw) break; } if (nexthdr == NEXTHDR_DEST) { + int tlv_cnt = 0; u16 i = 2; while (1) { struct ipv6_tlv_tnl_enc_lim *tel; + if (unlikely(tlv_cnt++ >= IP6_TUNNEL_MAX_DEST_TLVS)) + break; + /* No more room for encapsulation limit */ if (i + sizeof(*tel) > optlen) break; -- cgit v1.2.3 From 3864c6ba1e041bc75342353a70fa2a2c6f909923 Mon Sep 17 00:00:00 2001 From: Zhenzhong Wu Date: Wed, 22 Apr 2026 10:45:53 +0800 Subject: tcp: call sk_data_ready() after listener migration When inet_csk_listen_stop() migrates an established child socket from a closing listener to another socket in the same SO_REUSEPORT group, the target listener gets a new accept-queue entry via inet_csk_reqsk_queue_add(), but that path never notifies the target listener's waiters. A nonblocking accept() still works because it checks the queue directly, but poll()/epoll_wait() waiters and blocking accept() callers can also remain asleep indefinitely. Call READ_ONCE(nsk->sk_data_ready)(nsk) after a successful migration in inet_csk_listen_stop(). However, after inet_csk_reqsk_queue_add() succeeds, the ref acquired in reuseport_migrate_sock() is effectively transferred to nreq->rsk_listener. Another CPU can then dequeue nreq via accept() or listener shutdown, hit reqsk_put(), and drop that listener ref. Since listeners are SOCK_RCU_FREE, wrap the post-queue_add() dereferences of nsk in rcu_read_lock()/rcu_read_unlock(), which also covers the existing sock_net(nsk) access in that path. The reqsk_timer_handler() path does not need the same changes for two reasons: half-open requests become readable only after the final ACK, where tcp_child_process() already wakes the listener; and once nreq is visible via inet_ehash_insert(), the success path no longer touches nsk directly. Fixes: 54b92e841937 ("tcp: Migrate TCP_ESTABLISHED/TCP_SYN_RECV sockets in accept queues.") Cc: stable@vger.kernel.org Suggested-by: Eric Dumazet Reviewed-by: Kuniyuki Iwashima Signed-off-by: Zhenzhong Wu Reviewed-by: Eric Dumazet Link: https://patch.msgid.link/20260422024554.130346-2-jt26wzz@gmail.com Signed-off-by: Jakub Kicinski --- net/ipv4/inet_connection_sock.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'net') diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 4ac3ae1bc1af..928654c34156 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -1479,16 +1479,19 @@ void inet_csk_listen_stop(struct sock *sk) if (nreq) { refcount_set(&nreq->rsk_refcnt, 1); + rcu_read_lock(); if (inet_csk_reqsk_queue_add(nsk, nreq, child)) { __NET_INC_STATS(sock_net(nsk), LINUX_MIB_TCPMIGRATEREQSUCCESS); reqsk_migrate_reset(req); + READ_ONCE(nsk->sk_data_ready)(nsk); } else { __NET_INC_STATS(sock_net(nsk), LINUX_MIB_TCPMIGRATEREQFAILURE); reqsk_migrate_reset(nreq); __reqsk_free(nreq); } + rcu_read_unlock(); /* inet_csk_reqsk_queue_add() has already * called inet_child_forget() on failure case. -- cgit v1.2.3 From 8141a2dc70080eda1aedc0389ed2db2b292af5bd Mon Sep 17 00:00:00 2001 From: Ao Zhou Date: Wed, 22 Apr 2026 22:52:07 +0800 Subject: net: rds: fix MR cleanup on copy error __rds_rdma_map() hands sg/pages ownership to the transport after get_mr() succeeds. If copying the generated cookie back to user space fails after that point, the error path must not free those resources again before dropping the MR reference. Remove the duplicate unpin/free from the put_user() failure branch so that MR teardown is handled only through the existing final cleanup path. Fixes: 0d4597c8c5ab ("net/rds: Track user mapped pages through special API") Cc: stable@kernel.org Reported-by: Yuan Tan Reported-by: Yifan Wu Reported-by: Juefei Pu Reported-by: Xin Liu Signed-off-by: Ao Zhou Signed-off-by: Ren Wei Reviewed-by: Allison Henderson Link: https://patch.msgid.link/79c8ef73ec8e5844d71038983940cc2943099baf.1776764247.git.draw51280@163.com Signed-off-by: Jakub Kicinski --- net/rds/rdma.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'net') diff --git a/net/rds/rdma.c b/net/rds/rdma.c index aa6465dc742c..61fb6e45281b 100644 --- a/net/rds/rdma.c +++ b/net/rds/rdma.c @@ -326,10 +326,6 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args, if (args->cookie_addr && put_user(cookie, (u64 __user *)(unsigned long)args->cookie_addr)) { - if (!need_odp) { - unpin_user_pages(pages, nr_pages); - kfree(sg); - } ret = -EFAULT; goto out; } -- cgit v1.2.3 From 34f61a07e0cdefaecd3ec03bb5fb22215643678f Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 22 Apr 2026 17:14:30 +0100 Subject: rxrpc: Fix memory leaks in rxkad_verify_response() Fix rxkad_verify_response() to free the ticket and the server key under all circumstances by initialising the ticket pointer to NULL and then making all paths through the function after the first allocation has been done go through a single common epilogue that just releases everything - where all the releases skip on a NULL pointer. Fixes: 57af281e5389 ("rxrpc: Tidy up abort generation infrastructure") Fixes: ec832bd06d6f ("rxrpc: Don't retain the server key in the connection") Closes: https://sashiko.dev/#/patchset/20260408121252.2249051-1-dhowells%40redhat.com Signed-off-by: David Howells cc: Marc Dionne cc: Jeffrey Altman cc: Simon Horman cc: linux-afs@lists.infradead.org cc: stable@kernel.org Link: https://patch.msgid.link/20260422161438.2593376-2-dhowells@redhat.com Signed-off-by: Jakub Kicinski --- net/rxrpc/rxkad.c | 103 ++++++++++++++++++++++-------------------------------- 1 file changed, 42 insertions(+), 61 deletions(-) (limited to 'net') diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c index eb7f2769d2b1..5a720222854f 100644 --- a/net/rxrpc/rxkad.c +++ b/net/rxrpc/rxkad.c @@ -1136,7 +1136,7 @@ static int rxkad_verify_response(struct rxrpc_connection *conn, struct rxrpc_crypt session_key; struct key *server_key; time64_t expiry; - void *ticket; + void *ticket = NULL; u32 version, kvno, ticket_len, level; __be32 csum; int ret, i; @@ -1162,13 +1162,13 @@ static int rxkad_verify_response(struct rxrpc_connection *conn, ret = -ENOMEM; response = kzalloc_obj(struct rxkad_response, GFP_NOFS); if (!response) - goto temporary_error; + goto error; if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header), response, sizeof(*response)) < 0) { - rxrpc_abort_conn(conn, skb, RXKADPACKETSHORT, -EPROTO, - rxkad_abort_resp_short); - goto protocol_error; + ret = rxrpc_abort_conn(conn, skb, RXKADPACKETSHORT, -EPROTO, + rxkad_abort_resp_short); + goto error; } version = ntohl(response->version); @@ -1178,62 +1178,62 @@ static int rxkad_verify_response(struct rxrpc_connection *conn, trace_rxrpc_rx_response(conn, sp->hdr.serial, version, kvno, ticket_len); if (version != RXKAD_VERSION) { - rxrpc_abort_conn(conn, skb, RXKADINCONSISTENCY, -EPROTO, - rxkad_abort_resp_version); - goto protocol_error; + ret = rxrpc_abort_conn(conn, skb, RXKADINCONSISTENCY, -EPROTO, + rxkad_abort_resp_version); + goto error; } if (ticket_len < 4 || ticket_len > MAXKRB5TICKETLEN) { - rxrpc_abort_conn(conn, skb, RXKADTICKETLEN, -EPROTO, - rxkad_abort_resp_tkt_len); - goto protocol_error; + ret = rxrpc_abort_conn(conn, skb, RXKADTICKETLEN, -EPROTO, + rxkad_abort_resp_tkt_len); + goto error; } if (kvno >= RXKAD_TKT_TYPE_KERBEROS_V5) { - rxrpc_abort_conn(conn, skb, RXKADUNKNOWNKEY, -EPROTO, - rxkad_abort_resp_unknown_tkt); - goto protocol_error; + ret = rxrpc_abort_conn(conn, skb, RXKADUNKNOWNKEY, -EPROTO, + rxkad_abort_resp_unknown_tkt); + goto error; } /* extract the kerberos ticket and decrypt and decode it */ ret = -ENOMEM; ticket = kmalloc(ticket_len, GFP_NOFS); if (!ticket) - goto temporary_error_free_resp; + goto error; if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header) + sizeof(*response), ticket, ticket_len) < 0) { - rxrpc_abort_conn(conn, skb, RXKADPACKETSHORT, -EPROTO, - rxkad_abort_resp_short_tkt); - goto protocol_error; + ret = rxrpc_abort_conn(conn, skb, RXKADPACKETSHORT, -EPROTO, + rxkad_abort_resp_short_tkt); + goto error; } ret = rxkad_decrypt_ticket(conn, server_key, skb, ticket, ticket_len, &session_key, &expiry); if (ret < 0) - goto temporary_error_free_ticket; + goto error; /* use the session key from inside the ticket to decrypt the * response */ ret = rxkad_decrypt_response(conn, response, &session_key); if (ret < 0) - goto temporary_error_free_ticket; + goto error; if (ntohl(response->encrypted.epoch) != conn->proto.epoch || ntohl(response->encrypted.cid) != conn->proto.cid || ntohl(response->encrypted.securityIndex) != conn->security_ix) { - rxrpc_abort_conn(conn, skb, RXKADSEALEDINCON, -EPROTO, - rxkad_abort_resp_bad_param); - goto protocol_error_free; + ret = rxrpc_abort_conn(conn, skb, RXKADSEALEDINCON, -EPROTO, + rxkad_abort_resp_bad_param); + goto error; } csum = response->encrypted.checksum; response->encrypted.checksum = 0; rxkad_calc_response_checksum(response); if (response->encrypted.checksum != csum) { - rxrpc_abort_conn(conn, skb, RXKADSEALEDINCON, -EPROTO, - rxkad_abort_resp_bad_checksum); - goto protocol_error_free; + ret = rxrpc_abort_conn(conn, skb, RXKADSEALEDINCON, -EPROTO, + rxkad_abort_resp_bad_checksum); + goto error; } for (i = 0; i < RXRPC_MAXCALLS; i++) { @@ -1241,38 +1241,38 @@ static int rxkad_verify_response(struct rxrpc_connection *conn, u32 counter = READ_ONCE(conn->channels[i].call_counter); if (call_id > INT_MAX) { - rxrpc_abort_conn(conn, skb, RXKADSEALEDINCON, -EPROTO, - rxkad_abort_resp_bad_callid); - goto protocol_error_free; + ret = rxrpc_abort_conn(conn, skb, RXKADSEALEDINCON, -EPROTO, + rxkad_abort_resp_bad_callid); + goto error; } if (call_id < counter) { - rxrpc_abort_conn(conn, skb, RXKADSEALEDINCON, -EPROTO, - rxkad_abort_resp_call_ctr); - goto protocol_error_free; + ret = rxrpc_abort_conn(conn, skb, RXKADSEALEDINCON, -EPROTO, + rxkad_abort_resp_call_ctr); + goto error; } if (call_id > counter) { if (conn->channels[i].call) { - rxrpc_abort_conn(conn, skb, RXKADSEALEDINCON, -EPROTO, + ret = rxrpc_abort_conn(conn, skb, RXKADSEALEDINCON, -EPROTO, rxkad_abort_resp_call_state); - goto protocol_error_free; + goto error; } conn->channels[i].call_counter = call_id; } } if (ntohl(response->encrypted.inc_nonce) != conn->rxkad.nonce + 1) { - rxrpc_abort_conn(conn, skb, RXKADOUTOFSEQUENCE, -EPROTO, - rxkad_abort_resp_ooseq); - goto protocol_error_free; + ret = rxrpc_abort_conn(conn, skb, RXKADOUTOFSEQUENCE, -EPROTO, + rxkad_abort_resp_ooseq); + goto error; } level = ntohl(response->encrypted.level); if (level > RXRPC_SECURITY_ENCRYPT) { - rxrpc_abort_conn(conn, skb, RXKADLEVELFAIL, -EPROTO, - rxkad_abort_resp_level); - goto protocol_error_free; + ret = rxrpc_abort_conn(conn, skb, RXKADLEVELFAIL, -EPROTO, + rxkad_abort_resp_level); + goto error; } conn->security_level = level; @@ -1280,31 +1280,12 @@ static int rxkad_verify_response(struct rxrpc_connection *conn, * this the connection security can be handled in exactly the same way * as for a client connection */ ret = rxrpc_get_server_data_key(conn, &session_key, expiry, kvno); - if (ret < 0) - goto temporary_error_free_ticket; - - kfree(ticket); - kfree(response); - _leave(" = 0"); - return 0; -protocol_error_free: - kfree(ticket); -protocol_error: - kfree(response); - key_put(server_key); - return -EPROTO; - -temporary_error_free_ticket: +error: kfree(ticket); -temporary_error_free_resp: kfree(response); -temporary_error: - /* Ignore the response packet if we got a temporary error such as - * ENOMEM. We just want to send the challenge again. Note that we - * also come out this way if the ticket decryption fails. - */ key_put(server_key); + _leave(" = %d", ret); return ret; } -- cgit v1.2.3 From def304aae2edf321d2671fd6ca766a93c21f877e Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 22 Apr 2026 17:14:31 +0100 Subject: rxrpc: Fix rxkad crypto unalignment handling Fix handling of a packet with a misaligned crypto length. Also handle non-ENOMEM errors from decryption by aborting. Further, remove the WARN_ON_ONCE() so that it can't be remotely triggered (a trace line can still be emitted). Fixes: f93af41b9f5f ("rxrpc: Fix missing error checks for rxkad encryption/decryption failure") Closes: https://sashiko.dev/#/patchset/20260408121252.2249051-1-dhowells%40redhat.com Signed-off-by: David Howells cc: Marc Dionne cc: Jeffrey Altman cc: Simon Horman cc: linux-afs@lists.infradead.org cc: stable@kernel.org Link: https://patch.msgid.link/20260422161438.2593376-3-dhowells@redhat.com Signed-off-by: Jakub Kicinski --- net/rxrpc/rxkad.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c index 5a720222854f..cba7935977f0 100644 --- a/net/rxrpc/rxkad.c +++ b/net/rxrpc/rxkad.c @@ -510,6 +510,9 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb, return rxrpc_abort_eproto(call, skb, RXKADSEALEDINCON, rxkad_abort_2_short_header); + /* Don't let the crypto algo see a misaligned length. */ + sp->len = round_down(sp->len, 8); + /* Decrypt the skbuff in-place. TODO: We really want to decrypt * directly into the target buffer. */ @@ -543,8 +546,10 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb, if (sg != _sg) kfree(sg); if (ret < 0) { - WARN_ON_ONCE(ret != -ENOMEM); - return ret; + if (ret == -ENOMEM) + return ret; + return rxrpc_abort_eproto(call, skb, RXKADSEALEDINCON, + rxkad_abort_2_crypto_unaligned); } /* Extract the decrypted packet length */ -- cgit v1.2.3 From 1f2740150f904bfa60e4bad74d65add3ccb5e7f8 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 22 Apr 2026 17:14:32 +0100 Subject: rxrpc: Fix potential UAF after skb_unshare() failure If skb_unshare() fails to unshare a packet due to allocation failure in rxrpc_input_packet(), the skb pointer in the parent (rxrpc_io_thread()) will be NULL'd out. This will likely cause the call to trace_rxrpc_rx_done() to oops. Fix this by moving the unsharing down to where rxrpc_input_call_event() calls rxrpc_input_call_packet(). There are a number of places prior to that where we ignore DATA packets for a variety of reasons (such as the call already being complete) for which an unshare is then avoided. And with that, rxrpc_input_packet() doesn't need to take a pointer to the pointer to the packet, so change that to just a pointer. Fixes: 2d1faf7a0ca3 ("rxrpc: Simplify skbuff accounting in receive path") Closes: https://sashiko.dev/#/patchset/20260408121252.2249051-1-dhowells%40redhat.com Signed-off-by: David Howells cc: Marc Dionne cc: Jeffrey Altman cc: Simon Horman cc: linux-afs@lists.infradead.org cc: stable@kernel.org Link: https://patch.msgid.link/20260422161438.2593376-4-dhowells@redhat.com Signed-off-by: Jakub Kicinski --- net/rxrpc/ar-internal.h | 1 - net/rxrpc/call_event.c | 19 ++++++++++++++++++- net/rxrpc/io_thread.c | 24 ++---------------------- net/rxrpc/skbuff.c | 9 --------- 4 files changed, 20 insertions(+), 33 deletions(-) (limited to 'net') diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 96ecb83c9071..27c2aa2dd023 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -1486,7 +1486,6 @@ int rxrpc_server_keyring(struct rxrpc_sock *, sockptr_t, int); void rxrpc_kernel_data_consumed(struct rxrpc_call *, struct sk_buff *); void rxrpc_new_skb(struct sk_buff *, enum rxrpc_skb_trace); void rxrpc_see_skb(struct sk_buff *, enum rxrpc_skb_trace); -void rxrpc_eaten_skb(struct sk_buff *, enum rxrpc_skb_trace); void rxrpc_get_skb(struct sk_buff *, enum rxrpc_skb_trace); void rxrpc_free_skb(struct sk_buff *, enum rxrpc_skb_trace); void rxrpc_purge_queue(struct sk_buff_head *); diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c index fec59d9338b9..cc8f9dfa44e8 100644 --- a/net/rxrpc/call_event.c +++ b/net/rxrpc/call_event.c @@ -332,7 +332,24 @@ bool rxrpc_input_call_event(struct rxrpc_call *call) saw_ack |= sp->hdr.type == RXRPC_PACKET_TYPE_ACK; - rxrpc_input_call_packet(call, skb); + if (sp->hdr.securityIndex != 0 && + skb_cloned(skb)) { + /* Unshare the packet so that it can be + * modified by in-place decryption. + */ + struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); + + if (nskb) { + rxrpc_new_skb(nskb, rxrpc_skb_new_unshared); + rxrpc_input_call_packet(call, nskb); + rxrpc_free_skb(nskb, rxrpc_skb_put_call_rx); + } else { + /* OOM - Drop the packet. */ + rxrpc_see_skb(skb, rxrpc_skb_see_unshare_nomem); + } + } else { + rxrpc_input_call_packet(call, skb); + } rxrpc_free_skb(skb, rxrpc_skb_put_call_rx); did_receive = true; } diff --git a/net/rxrpc/io_thread.c b/net/rxrpc/io_thread.c index 697956931925..dc5184a2fa9d 100644 --- a/net/rxrpc/io_thread.c +++ b/net/rxrpc/io_thread.c @@ -192,13 +192,12 @@ static bool rxrpc_extract_abort(struct sk_buff *skb) /* * Process packets received on the local endpoint */ -static bool rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff **_skb) +static bool rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff *skb) { struct rxrpc_connection *conn; struct sockaddr_rxrpc peer_srx; struct rxrpc_skb_priv *sp; struct rxrpc_peer *peer = NULL; - struct sk_buff *skb = *_skb; bool ret = false; skb_pull(skb, sizeof(struct udphdr)); @@ -244,25 +243,6 @@ static bool rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff **_skb) return rxrpc_bad_message(skb, rxrpc_badmsg_zero_call); if (sp->hdr.seq == 0) return rxrpc_bad_message(skb, rxrpc_badmsg_zero_seq); - - /* Unshare the packet so that it can be modified for in-place - * decryption. - */ - if (sp->hdr.securityIndex != 0) { - skb = skb_unshare(skb, GFP_ATOMIC); - if (!skb) { - rxrpc_eaten_skb(*_skb, rxrpc_skb_eaten_by_unshare_nomem); - *_skb = NULL; - return just_discard; - } - - if (skb != *_skb) { - rxrpc_eaten_skb(*_skb, rxrpc_skb_eaten_by_unshare); - *_skb = skb; - rxrpc_new_skb(skb, rxrpc_skb_new_unshared); - sp = rxrpc_skb(skb); - } - } break; case RXRPC_PACKET_TYPE_CHALLENGE: @@ -494,7 +474,7 @@ int rxrpc_io_thread(void *data) switch (skb->mark) { case RXRPC_SKB_MARK_PACKET: skb->priority = 0; - if (!rxrpc_input_packet(local, &skb)) + if (!rxrpc_input_packet(local, skb)) rxrpc_reject_packet(local, skb); trace_rxrpc_rx_done(skb->mark, skb->priority); rxrpc_free_skb(skb, rxrpc_skb_put_input); diff --git a/net/rxrpc/skbuff.c b/net/rxrpc/skbuff.c index 3bcd6ee80396..e2169d1a14b5 100644 --- a/net/rxrpc/skbuff.c +++ b/net/rxrpc/skbuff.c @@ -46,15 +46,6 @@ void rxrpc_get_skb(struct sk_buff *skb, enum rxrpc_skb_trace why) skb_get(skb); } -/* - * Note the dropping of a ref on a socket buffer by the core. - */ -void rxrpc_eaten_skb(struct sk_buff *skb, enum rxrpc_skb_trace why) -{ - int n = atomic_inc_return(&rxrpc_n_rx_skbs); - trace_rxrpc_skb(skb, 0, n, why); -} - /* * Note the destruction of a socket buffer. */ -- cgit v1.2.3 From 24481a7f573305706054c59e275371f8d0fe919f Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 22 Apr 2026 17:14:33 +0100 Subject: rxrpc: Fix conn-level packet handling to unshare RESPONSE packets The security operations that verify the RESPONSE packets decrypt bits of it in place - however, the sk_buff may be shared with a packet sniffer, which would lead to the sniffer seeing an apparently corrupt packet (actually decrypted). Fix this by handing a copy of the packet off to the specific security handler if the packet was cloned. Fixes: 17926a79320a ("[AF_RXRPC]: Provide secure RxRPC sockets for use by userspace and kernel both") Closes: https://sashiko.dev/#/patchset/20260408121252.2249051-1-dhowells%40redhat.com Signed-off-by: David Howells cc: Marc Dionne cc: Jeffrey Altman cc: Simon Horman cc: linux-afs@lists.infradead.org cc: stable@kernel.org Link: https://patch.msgid.link/20260422161438.2593376-5-dhowells@redhat.com Signed-off-by: Jakub Kicinski --- net/rxrpc/conn_event.c | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c index 9a41ec708aeb..aee977291d90 100644 --- a/net/rxrpc/conn_event.c +++ b/net/rxrpc/conn_event.c @@ -240,6 +240,33 @@ static void rxrpc_call_is_secure(struct rxrpc_call *call) rxrpc_notify_socket(call); } +static int rxrpc_verify_response(struct rxrpc_connection *conn, + struct sk_buff *skb) +{ + int ret; + + if (skb_cloned(skb)) { + /* Copy the packet if shared so that we can do in-place + * decryption. + */ + struct sk_buff *nskb = skb_copy(skb, GFP_NOFS); + + if (nskb) { + rxrpc_new_skb(nskb, rxrpc_skb_new_unshared); + ret = conn->security->verify_response(conn, nskb); + rxrpc_free_skb(nskb, rxrpc_skb_put_response_copy); + } else { + /* OOM - Drop the packet. */ + rxrpc_see_skb(skb, rxrpc_skb_see_unshare_nomem); + ret = -ENOMEM; + } + } else { + ret = conn->security->verify_response(conn, skb); + } + + return ret; +} + /* * connection-level Rx packet processor */ @@ -270,7 +297,7 @@ static int rxrpc_process_event(struct rxrpc_connection *conn, } spin_unlock_irq(&conn->state_lock); - ret = conn->security->verify_response(conn, skb); + ret = rxrpc_verify_response(conn, skb); if (ret < 0) return ret; -- cgit v1.2.3 From 6929350080f4da292d111a3b33e53138fee51cec Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 22 Apr 2026 17:14:34 +0100 Subject: rxgk: Fix potential integer overflow in length check Fix potential integer overflow in rxgk_extract_token() when checking the length of the ticket. Rather than rounding up the value to be tested (which might overflow), round down the size of the available data. Fixes: 2429a1976481 ("rxrpc: Fix untrusted unsigned subtract") Closes: https://sashiko.dev/#/patchset/20260408121252.2249051-1-dhowells%40redhat.com Signed-off-by: David Howells cc: Marc Dionne cc: Jeffrey Altman cc: Simon Horman cc: linux-afs@lists.infradead.org cc: stable@kernel.org Link: https://patch.msgid.link/20260422161438.2593376-6-dhowells@redhat.com Signed-off-by: Jakub Kicinski --- net/rxrpc/rxgk_app.c | 2 +- net/rxrpc/rxgk_common.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/rxrpc/rxgk_app.c b/net/rxrpc/rxgk_app.c index 30275cb5ba3e..5587639d60c5 100644 --- a/net/rxrpc/rxgk_app.c +++ b/net/rxrpc/rxgk_app.c @@ -214,7 +214,7 @@ int rxgk_extract_token(struct rxrpc_connection *conn, struct sk_buff *skb, ticket_len = ntohl(container.token_len); ticket_offset = token_offset + sizeof(container); - if (xdr_round_up(ticket_len) > token_len - sizeof(container)) + if (ticket_len > xdr_round_down(token_len - sizeof(container))) goto short_packet; _debug("KVNO %u", kvno); diff --git a/net/rxrpc/rxgk_common.h b/net/rxrpc/rxgk_common.h index 80164d89e19c..1e257d7ab8ec 100644 --- a/net/rxrpc/rxgk_common.h +++ b/net/rxrpc/rxgk_common.h @@ -34,6 +34,7 @@ struct rxgk_context { }; #define xdr_round_up(x) (round_up((x), sizeof(__be32))) +#define xdr_round_down(x) (round_down((x), sizeof(__be32))) #define xdr_object_len(x) (4 + xdr_round_up(x)) /* -- cgit v1.2.3 From ac33733b10b484d666f97688561670afd5861383 Mon Sep 17 00:00:00 2001 From: Anderson Nascimento Date: Wed, 22 Apr 2026 17:14:35 +0100 Subject: rxrpc: Fix missing validation of ticket length in non-XDR key preparsing In rxrpc_preparse(), there are two paths for parsing key payloads: the XDR path (for large payloads) and the non-XDR path (for payloads <= 28 bytes). While the XDR path (rxrpc_preparse_xdr_rxkad()) correctly validates the ticket length against AFSTOKEN_RK_TIX_MAX, the non-XDR path fails to do so. This allows an unprivileged user to provide a very large ticket length. When this key is later read via rxrpc_read(), the total token size (toksize) calculation results in a value that exceeds AFSTOKEN_LENGTH_MAX, triggering a WARN_ON(). [ 2001.302904] WARNING: CPU: 2 PID: 2108 at net/rxrpc/key.c:778 rxrpc_read+0x109/0x5c0 [rxrpc] Fix this by adding a check in the non-XDR parsing path of rxrpc_preparse() to ensure the ticket length does not exceed AFSTOKEN_RK_TIX_MAX, bringing it into parity with the XDR parsing logic. Fixes: 8a7a3eb4ddbe ("KEYS: RxRPC: Use key preparsing") Fixes: 84924aac08a4 ("rxrpc: Fix checker warning") Reported-by: Anderson Nascimento Signed-off-by: Anderson Nascimento Signed-off-by: David Howells cc: Marc Dionne cc: Jeffrey Altman cc: Simon Horman cc: linux-afs@lists.infradead.org cc: stable@kernel.org Link: https://patch.msgid.link/20260422161438.2593376-7-dhowells@redhat.com Signed-off-by: Jakub Kicinski --- net/rxrpc/key.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'net') diff --git a/net/rxrpc/key.c b/net/rxrpc/key.c index 6301d79ee35a..3ec3d89fdf14 100644 --- a/net/rxrpc/key.c +++ b/net/rxrpc/key.c @@ -502,6 +502,10 @@ static int rxrpc_preparse(struct key_preparsed_payload *prep) if (v1->security_index != RXRPC_SECURITY_RXKAD) goto error; + ret = -EKEYREJECTED; + if (v1->ticket_length > AFSTOKEN_RK_TIX_MAX) + goto error; + plen = sizeof(*token->kad) + v1->ticket_length; prep->quotalen += plen + sizeof(*token); -- cgit v1.2.3 From 55b2984c96c37f909bbfe8851f13152693951382 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 23 Apr 2026 21:09:06 +0100 Subject: rxrpc: Fix rxrpc_input_call_event() to only unshare DATA packets Fix rxrpc_input_call_event() to only unshare DATA packets and not ACK, ABORT, etc.. And with that, rxrpc_input_packet() doesn't need to take a pointer to the pointer to the packet, so change that to just a pointer. Fixes: 1f2740150f90 ("rxrpc: Fix potential UAF after skb_unshare() failure") Closes: https://sashiko.dev/#/patchset/20260422161438.2593376-4-dhowells@redhat.com Signed-off-by: David Howells cc: Marc Dionne cc: Jeffrey Altman cc: Simon Horman cc: linux-afs@lists.infradead.org cc: stable@kernel.org Link: https://patch.msgid.link/20260423200909.3049438-2-dhowells@redhat.com Signed-off-by: Jakub Kicinski --- net/rxrpc/call_event.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c index cc8f9dfa44e8..fdd683261226 100644 --- a/net/rxrpc/call_event.c +++ b/net/rxrpc/call_event.c @@ -332,7 +332,8 @@ bool rxrpc_input_call_event(struct rxrpc_call *call) saw_ack |= sp->hdr.type == RXRPC_PACKET_TYPE_ACK; - if (sp->hdr.securityIndex != 0 && + if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA && + sp->hdr.securityIndex != 0 && skb_cloned(skb)) { /* Unshare the packet so that it can be * modified by in-place decryption. -- cgit v1.2.3 From 0422e7a4883f25101903f3e8105c0808aa5f4ce9 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 23 Apr 2026 21:09:07 +0100 Subject: rxrpc: Fix re-decryption of RESPONSE packets If a RESPONSE packet gets a temporary failure during processing, it may end up in a partially decrypted state - and then get requeued for a retry. Fix this by just discarding the packet; we will send another CHALLENGE packet and thereby elicit a further response. Similarly, discard an incoming CHALLENGE packet if we get an error whilst generating a RESPONSE; the server will send another CHALLENGE. Fixes: 17926a79320a ("[AF_RXRPC]: Provide secure RxRPC sockets for use by userspace and kernel both") Closes: https://sashiko.dev/#/patchset/20260422161438.2593376-4-dhowells@redhat.com Signed-off-by: David Howells cc: Marc Dionne cc: Jeffrey Altman cc: Simon Horman cc: linux-afs@lists.infradead.org cc: stable@kernel.org Link: https://patch.msgid.link/20260423200909.3049438-3-dhowells@redhat.com Signed-off-by: Jakub Kicinski --- net/rxrpc/conn_event.c | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) (limited to 'net') diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c index aee977291d90..a2130d25aaa9 100644 --- a/net/rxrpc/conn_event.c +++ b/net/rxrpc/conn_event.c @@ -389,7 +389,6 @@ again: static void rxrpc_do_process_connection(struct rxrpc_connection *conn) { struct sk_buff *skb; - int ret; if (test_and_clear_bit(RXRPC_CONN_EV_CHALLENGE, &conn->events)) rxrpc_secure_connection(conn); @@ -398,17 +397,8 @@ static void rxrpc_do_process_connection(struct rxrpc_connection *conn) * connection that each one has when we've finished with it */ while ((skb = skb_dequeue(&conn->rx_queue))) { rxrpc_see_skb(skb, rxrpc_skb_see_conn_work); - ret = rxrpc_process_event(conn, skb); - switch (ret) { - case -ENOMEM: - case -EAGAIN: - skb_queue_head(&conn->rx_queue, skb); - rxrpc_queue_conn(conn, rxrpc_conn_queue_retry_work); - break; - default: - rxrpc_free_skb(skb, rxrpc_skb_put_conn_work); - break; - } + rxrpc_process_event(conn, skb); + rxrpc_free_skb(skb, rxrpc_skb_put_conn_work); } } -- cgit v1.2.3 From 3476c8bb960f48e49355d6f93fb7673211e0163f Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 23 Apr 2026 21:09:08 +0100 Subject: rxrpc: Fix error handling in rxgk_extract_token() Fix a missing bit of error handling in rxgk_extract_token(): in the event that rxgk_decrypt_skb() returns -ENOMEM, it should just return that rather than continuing on (for anything else, it generates an abort). Fixes: 64863f4ca494 ("rxrpc: Fix unhandled errors in rxgk_verify_packet_integrity()") Closes: https://sashiko.dev/#/patchset/20260422161438.2593376-4-dhowells@redhat.com Signed-off-by: David Howells cc: Marc Dionne cc: Jeffrey Altman cc: Simon Horman cc: linux-afs@lists.infradead.org cc: stable@kernel.org Link: https://patch.msgid.link/20260423200909.3049438-4-dhowells@redhat.com Signed-off-by: Jakub Kicinski --- net/rxrpc/rxgk_app.c | 1 + 1 file changed, 1 insertion(+) (limited to 'net') diff --git a/net/rxrpc/rxgk_app.c b/net/rxrpc/rxgk_app.c index 5587639d60c5..0ef2a29eb695 100644 --- a/net/rxrpc/rxgk_app.c +++ b/net/rxrpc/rxgk_app.c @@ -245,6 +245,7 @@ int rxgk_extract_token(struct rxrpc_connection *conn, struct sk_buff *skb, if (ret != -ENOMEM) return rxrpc_abort_conn(conn, skb, ec, ret, rxgk_abort_resp_tok_dec); + return ret; } ret = conn->security->default_decode_ticket(conn, skb, ticket_offset, -- cgit v1.2.3