diff options
Diffstat (limited to 'net')
197 files changed, 3839 insertions, 1781 deletions
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index a516745f732f..49d674f5e73a 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c @@ -966,7 +966,7 @@ static int p9_bind_privport(struct socket *sock) ((struct sockaddr_in *)&stor)->sin_port = htons((ushort)port); else ((struct sockaddr_in6 *)&stor)->sin6_port = htons((ushort)port); - err = kernel_bind(sock, (struct sockaddr *)&stor, sizeof(stor)); + err = kernel_bind(sock, (struct sockaddr_unsized *)&stor, sizeof(stor)); if (err != -EADDRINUSE) break; } @@ -1018,7 +1018,7 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args) } err = READ_ONCE(csocket->ops)->connect(csocket, - (struct sockaddr *)&stor, + (struct sockaddr_unsized *)&stor, sizeof(stor), 0); if (err < 0) { pr_err("%s (%d): problem connecting socket to %s\n", @@ -1058,8 +1058,8 @@ p9_fd_create_unix(struct p9_client *client, const char *addr, char *args) return err; } - err = READ_ONCE(csocket->ops)->connect(csocket, (struct sockaddr *)&sun_server, - sizeof(struct sockaddr_un) - 1, 0); + err = READ_ONCE(csocket->ops)->connect(csocket, (struct sockaddr_unsized *)&sun_server, + sizeof(struct sockaddr_un) - 1, 0); if (err < 0) { pr_err("%s (%d): problem connecting socket: %s: %d\n", __func__, task_pid_nr(current), addr, err); diff --git a/net/Kconfig b/net/Kconfig index 1d3f757d4b07..62266eaf0e95 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -400,15 +400,15 @@ config NET_PKTGEN module will be called pktgen. config NET_DROP_MONITOR - tristate "Network packet drop alerting service" + tristate "Legacy network packet drop alerting service" depends on INET && TRACEPOINTS help This feature provides an alerting service to userspace in the event that packets are discarded in the network stack. Alerts are broadcast via netlink socket to any listening user space - process. If you don't need network drop alerts, or if you are ok - just checking the various proc files and other utilities for - drop statistics, say N here. + process. This feature is NOT related to "perf" based drop monitoring. + Say N here unless you need to support older userspace tools like + "dropwatch". endmenu # Network testing diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index 30242fe10341..2a01fff46c9d 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c @@ -1149,7 +1149,7 @@ out: } /* Set the address 'our end' of the connection */ -static int atalk_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) +static int atalk_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len) { struct sockaddr_at *addr = (struct sockaddr_at *)uaddr; struct sock *sk = sock->sk; @@ -1204,7 +1204,7 @@ out: } /* Set the address we talk to */ -static int atalk_connect(struct socket *sock, struct sockaddr *uaddr, +static int atalk_connect(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len, int flags) { struct sock *sk = sock->sk; diff --git a/net/atm/clip.c b/net/atm/clip.c index f7a5565e794e..8f152e5fa659 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c @@ -168,10 +168,10 @@ static int neigh_check_cb(struct neighbour *n) static void idle_timer_check(struct timer_list *unused) { - write_lock(&arp_tbl.lock); + spin_lock(&arp_tbl.lock); __neigh_for_each_release(&arp_tbl, neigh_check_cb); mod_timer(&idle_timer, jiffies + CLIP_CHECK_INTERVAL * HZ); - write_unlock(&arp_tbl.lock); + spin_unlock(&arp_tbl.lock); } static int clip_arp_rcv(struct sk_buff *skb) diff --git a/net/atm/common.c b/net/atm/common.c index 881c7f259dbd..cecc71a8bee1 100644 --- a/net/atm/common.c +++ b/net/atm/common.c @@ -157,7 +157,7 @@ int vcc_create(struct net *net, struct socket *sock, int protocol, int family, i memset(&vcc->local, 0, sizeof(struct sockaddr_atmsvc)); memset(&vcc->remote, 0, sizeof(struct sockaddr_atmsvc)); vcc->qos.txtp.max_sdu = 1 << 16; /* for meta VCs */ - refcount_set(&sk->sk_wmem_alloc, 1); + refcount_set(&sk->sk_wmem_alloc, SK_WMEM_ALLOC_BIAS); atomic_set(&sk->sk_rmem_alloc, 0); vcc->push = NULL; vcc->pop = NULL; diff --git a/net/atm/pvc.c b/net/atm/pvc.c index 66d9a9bd5896..8f5e76f5dd9e 100644 --- a/net/atm/pvc.c +++ b/net/atm/pvc.c @@ -24,7 +24,7 @@ static int pvc_shutdown(struct socket *sock, int how) return 0; } -static int pvc_bind(struct socket *sock, struct sockaddr *sockaddr, +static int pvc_bind(struct socket *sock, struct sockaddr_unsized *sockaddr, int sockaddr_len) { struct sock *sk = sock->sk; @@ -56,7 +56,7 @@ out: return error; } -static int pvc_connect(struct socket *sock, struct sockaddr *sockaddr, +static int pvc_connect(struct socket *sock, struct sockaddr_unsized *sockaddr, int sockaddr_len, int flags) { return pvc_bind(sock, sockaddr, sockaddr_len); diff --git a/net/atm/svc.c b/net/atm/svc.c index f8137ae693b0..005964250ecd 100644 --- a/net/atm/svc.c +++ b/net/atm/svc.c @@ -97,7 +97,7 @@ static int svc_release(struct socket *sock) return 0; } -static int svc_bind(struct socket *sock, struct sockaddr *sockaddr, +static int svc_bind(struct socket *sock, struct sockaddr_unsized *sockaddr, int sockaddr_len) { DEFINE_WAIT(wait); @@ -153,7 +153,7 @@ out: return error; } -static int svc_connect(struct socket *sock, struct sockaddr *sockaddr, +static int svc_connect(struct socket *sock, struct sockaddr_unsized *sockaddr, int sockaddr_len, int flags) { DEFINE_WAIT(wait); diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 6ef8b2a57a9b..7ebbff2f0020 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -1094,7 +1094,7 @@ static int ax25_release(struct socket *sock) * that we've implemented support for SO_BINDTODEVICE. It is however small * and trivially backward compatible. */ -static int ax25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) +static int ax25_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len) { struct sock *sk = sock->sk; struct full_sockaddr_ax25 *addr = (struct full_sockaddr_ax25 *)uaddr; @@ -1175,7 +1175,7 @@ out: * FIXME: nonblock behaviour looks like it may have a bug. */ static int __must_check ax25_connect(struct socket *sock, - struct sockaddr *uaddr, int addr_len, int flags) + struct sockaddr_unsized *uaddr, int addr_len, int flags) { struct sock *sk = sock->sk; ax25_cb *ax25 = sk_to_ax25(sk), *ax25t; diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig index c299e2bc87ed..58c408b7a7d9 100644 --- a/net/batman-adv/Kconfig +++ b/net/batman-adv/Kconfig @@ -35,6 +35,7 @@ config BATMAN_ADV_BLA bool "Bridge Loop Avoidance" depends on BATMAN_ADV && INET select CRC16 + select NET_CRC32C default y help This option enables BLA (Bridge Loop Avoidance), a mechanism diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index b992ba12aa24..3dc791c15bf7 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -12,7 +12,6 @@ #include <linux/compiler.h> #include <linux/container_of.h> #include <linux/crc16.h> -#include <linux/crc32.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/etherdevice.h> @@ -1586,44 +1585,10 @@ int batadv_bla_init(struct batadv_priv *bat_priv) } /** - * batadv_skb_crc32() - calculate CRC32 of the whole packet and skip bytes in - * the header - * @skb: skb pointing to fragmented socket buffers - * @payload_ptr: Pointer to position inside the head buffer of the skb - * marking the start of the data to be CRC'ed - * - * payload_ptr must always point to an address in the skb head buffer and not to - * a fragment. - * - * Return: big endian crc32c of the checksummed data - */ -static __be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr) -{ - unsigned int to = skb->len; - unsigned int consumed = 0; - struct skb_seq_state st; - unsigned int from; - unsigned int len; - const u8 *data; - u32 crc = 0; - - from = (unsigned int)(payload_ptr - skb->data); - - skb_prepare_seq_read(skb, from, to, &st); - while ((len = skb_seq_read(consumed, &data, &st)) != 0) { - crc = crc32c(crc, data, len); - consumed += len; - } - - return htonl(crc); -} - -/** * batadv_bla_check_duplist() - Check if a frame is in the broadcast dup. * @bat_priv: the bat priv with all the mesh interface information * @skb: contains the multicast packet to be checked - * @payload_ptr: pointer to position inside the head buffer of the skb - * marking the start of the data to be CRC'ed + * @payload_offset: offset in the skb, marking the start of the data to be CRC'ed * @orig: originator mac address, NULL if unknown * * Check if it is on our broadcast list. Another gateway might have sent the @@ -1638,16 +1603,18 @@ static __be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr) * Return: true if a packet is in the duplicate list, false otherwise. */ static bool batadv_bla_check_duplist(struct batadv_priv *bat_priv, - struct sk_buff *skb, u8 *payload_ptr, + struct sk_buff *skb, int payload_offset, const u8 *orig) { struct batadv_bcast_duplist_entry *entry; bool ret = false; + int payload_len; int i, curr; - __be32 crc; + u32 crc; /* calculate the crc ... */ - crc = batadv_skb_crc32(skb, payload_ptr); + payload_len = skb->len - payload_offset; + crc = skb_crc32c(skb, payload_offset, payload_len, 0); spin_lock_bh(&bat_priv->bla.bcast_duplist_lock); @@ -1727,7 +1694,7 @@ out: static bool batadv_bla_check_ucast_duplist(struct batadv_priv *bat_priv, struct sk_buff *skb) { - return batadv_bla_check_duplist(bat_priv, skb, (u8 *)skb->data, NULL); + return batadv_bla_check_duplist(bat_priv, skb, 0, NULL); } /** @@ -1745,12 +1712,10 @@ bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, struct sk_buff *skb) { struct batadv_bcast_packet *bcast_packet; - u8 *payload_ptr; bcast_packet = (struct batadv_bcast_packet *)skb->data; - payload_ptr = (u8 *)(bcast_packet + 1); - return batadv_bla_check_duplist(bat_priv, skb, payload_ptr, + return batadv_bla_check_duplist(bat_priv, skb, sizeof(*bcast_packet), bcast_packet->orig); } diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index 2be1ac17acaa..af230b017bc1 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h @@ -13,7 +13,7 @@ #define BATADV_DRIVER_DEVICE "batman-adv" #ifndef BATADV_SOURCE_VERSION -#define BATADV_SOURCE_VERSION "2025.4" +#define BATADV_SOURCE_VERSION "2025.5" #endif /* B.A.T.M.A.N. parameters */ diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index ae1d7a8dc480..8fc5fe0e9b05 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -734,7 +734,7 @@ struct batadv_bcast_duplist_entry { u8 orig[ETH_ALEN]; /** @crc: crc32 checksum of broadcast payload */ - __be32 crc; + u32 crc; /** @entrytime: time when the broadcast packet was received */ unsigned long entrytime; diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index fc866759910d..ba9f48771e11 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -1185,7 +1185,7 @@ static int hci_sock_compat_ioctl(struct socket *sock, unsigned int cmd, } #endif -static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, +static int hci_sock_bind(struct socket *sock, struct sockaddr_unsized *addr, int addr_len) { struct sockaddr_hci haddr; diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c index 3d98cb6291da..243505b89733 100644 --- a/net/bluetooth/iso.c +++ b/net/bluetooth/iso.c @@ -944,7 +944,7 @@ static int iso_sock_create(struct net *net, struct socket *sock, int protocol, return 0; } -static int iso_sock_bind_bc(struct socket *sock, struct sockaddr *addr, +static int iso_sock_bind_bc(struct socket *sock, struct sockaddr_unsized *addr, int addr_len) { struct sockaddr_iso *sa = (struct sockaddr_iso *)addr; @@ -1022,7 +1022,7 @@ done: return err; } -static int iso_sock_bind(struct socket *sock, struct sockaddr *addr, +static int iso_sock_bind(struct socket *sock, struct sockaddr_unsized *addr, int addr_len) { struct sockaddr_iso *sa = (struct sockaddr_iso *)addr; @@ -1080,7 +1080,7 @@ done: return err; } -static int iso_sock_connect(struct socket *sock, struct sockaddr *addr, +static int iso_sock_connect(struct socket *sock, struct sockaddr_unsized *addr, int alen, int flags) { struct sockaddr_iso *sa = (struct sockaddr_iso *)addr; diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index 814fb8610ac4..9ee189c815d4 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -80,7 +80,7 @@ static int l2cap_validate_le_psm(u16 psm) return 0; } -static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) +static int l2cap_sock_bind(struct socket *sock, struct sockaddr_unsized *addr, int alen) { struct sock *sk = sock->sk; struct l2cap_chan *chan = l2cap_pi(sk)->chan; @@ -178,7 +178,7 @@ done: return err; } -static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, +static int l2cap_sock_connect(struct socket *sock, struct sockaddr_unsized *addr, int alen, int flags) { struct sock *sk = sock->sk; diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index 96250807b32b..57b1dca8141f 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c @@ -781,7 +781,7 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, addr.l2_psm = 0; addr.l2_cid = 0; addr.l2_bdaddr_type = BDADDR_BREDR; - *err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr)); + *err = kernel_bind(sock, (struct sockaddr_unsized *)&addr, sizeof(addr)); if (*err < 0) goto failed; @@ -808,7 +808,7 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, addr.l2_psm = cpu_to_le16(L2CAP_PSM_RFCOMM); addr.l2_cid = 0; addr.l2_bdaddr_type = BDADDR_BREDR; - *err = kernel_connect(sock, (struct sockaddr *) &addr, sizeof(addr), O_NONBLOCK); + *err = kernel_connect(sock, (struct sockaddr_unsized *)&addr, sizeof(addr), O_NONBLOCK); if (*err == 0 || *err == -EINPROGRESS) return s; @@ -2068,7 +2068,7 @@ static int rfcomm_add_listener(bdaddr_t *ba) addr.l2_psm = cpu_to_le16(L2CAP_PSM_RFCOMM); addr.l2_cid = 0; addr.l2_bdaddr_type = BDADDR_BREDR; - err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr)); + err = kernel_bind(sock, (struct sockaddr_unsized *)&addr, sizeof(addr)); if (err < 0) { BT_ERR("Bind failed %d", err); goto failed; diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index 913402806fa0..be6639cd6f59 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -324,7 +324,7 @@ static int rfcomm_sock_create(struct net *net, struct socket *sock, return 0; } -static int rfcomm_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) +static int rfcomm_sock_bind(struct socket *sock, struct sockaddr_unsized *addr, int addr_len) { struct sockaddr_rc sa; struct sock *sk = sock->sk; @@ -371,7 +371,8 @@ done: return err; } -static int rfcomm_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) +static int rfcomm_sock_connect(struct socket *sock, struct sockaddr_unsized *addr, + int alen, int flags) { struct sockaddr_rc *sa = (struct sockaddr_rc *) addr; struct sock *sk = sock->sk; diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index ab0cf442d57b..7afe65e7ff37 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -605,7 +605,7 @@ static int sco_sock_create(struct net *net, struct socket *sock, int protocol, return 0; } -static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, +static int sco_sock_bind(struct socket *sock, struct sockaddr_unsized *addr, int addr_len) { struct sockaddr_sco *sa = (struct sockaddr_sco *) addr; @@ -639,7 +639,7 @@ done: return err; } -static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) +static int sco_sock_connect(struct socket *sock, struct sockaddr_unsized *addr, int alen, int flags) { struct sockaddr_sco *sa = (struct sockaddr_sco *) addr; struct sock *sk = sock->sk; diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index ca3a637d7cca..4c67a32745f6 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -526,20 +526,6 @@ void br_mtu_auto_adjust(struct net_bridge *br) br_opt_toggle(br, BROPT_MTU_SET_BY_USER, false); } -static void br_set_gso_limits(struct net_bridge *br) -{ - unsigned int tso_max_size = TSO_MAX_SIZE; - const struct net_bridge_port *p; - u16 tso_max_segs = TSO_MAX_SEGS; - - list_for_each_entry(p, &br->port_list, list) { - tso_max_size = min(tso_max_size, p->dev->tso_max_size); - tso_max_segs = min(tso_max_segs, p->dev->tso_max_segs); - } - netif_set_tso_max_size(br->dev, tso_max_size); - netif_set_tso_max_segs(br->dev, tso_max_segs); -} - /* * Recomputes features using slave's features */ @@ -653,8 +639,6 @@ int br_add_if(struct net_bridge *br, struct net_device *dev, netdev_err(dev, "failed to sync bridge static fdb addresses to this port\n"); } - netdev_update_features(br->dev); - br_hr = br->dev->needed_headroom; dev_hr = netdev_get_fwd_headroom(dev); if (br_hr < dev_hr) @@ -695,7 +679,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev, call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev); br_mtu_auto_adjust(br); - br_set_gso_limits(br); + + netdev_compute_master_upper_features(br->dev, false); kobject_uevent(&p->kobj, KOBJ_ADD); @@ -741,7 +726,6 @@ int br_del_if(struct net_bridge *br, struct net_device *dev) del_nbp(p); br_mtu_auto_adjust(br); - br_set_gso_limits(br); spin_lock_bh(&br->lock); changed_addr = br_stp_recalculate_bridge_id(br); @@ -750,7 +734,7 @@ int br_del_if(struct net_bridge *br, struct net_device *dev) if (changed_addr) call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev); - netdev_update_features(br->dev); + netdev_compute_master_upper_features(br->dev, false); return 0; } diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 22d12e545966..d55a4ab87837 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -4649,6 +4649,14 @@ static void br_multicast_start_querier(struct net_bridge_mcast *brmctx, rcu_read_unlock(); } +static void br_multicast_del_grps(struct net_bridge *br) +{ + struct net_bridge_port *port; + + list_for_each_entry(port, &br->port_list, list) + __br_multicast_disable_port_ctx(&port->multicast_ctx); +} + int br_multicast_toggle(struct net_bridge *br, unsigned long val, struct netlink_ext_ack *extack) { @@ -4669,6 +4677,7 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val, br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val); if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) { change_snoopers = true; + br_multicast_del_grps(br); goto unlock; } diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 4e2d53b27221..0264730938f4 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -467,7 +467,7 @@ static int br_fill_ifinfo(struct sk_buff *skb, else br = netdev_priv(dev); - br_debug(br, "br_fill_info event %d port %s master %s\n", + br_debug(br, "br_fill_ifinfo event %d port %s master %s\n", event, dev->name, br->dev->name); nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags); diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 039dfbd367c9..af218742af5a 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -734,7 +734,7 @@ bad_sol: * o sock->state: holds the SS_* socket state and is updated by connect and * disconnect. */ -static int caif_connect(struct socket *sock, struct sockaddr *uaddr, +static int caif_connect(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len, int flags) { struct sock *sk = sock->sk; diff --git a/net/can/bcm.c b/net/can/bcm.c index 5e690a2377e4..7eba8ae01a5b 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -1657,7 +1657,7 @@ static int bcm_release(struct socket *sock) return 0; } -static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len, +static int bcm_connect(struct socket *sock, struct sockaddr_unsized *uaddr, int len, int flags) { struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; diff --git a/net/can/isotp.c b/net/can/isotp.c index 74ee1e52249b..ce588b85665a 100644 --- a/net/can/isotp.c +++ b/net/can/isotp.c @@ -1246,7 +1246,7 @@ static int isotp_release(struct socket *sock) return 0; } -static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len) +static int isotp_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int len) { struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; struct sock *sk = sock->sk; diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c index 88e7160d4248..6272326dd614 100644 --- a/net/can/j1939/socket.c +++ b/net/can/j1939/socket.c @@ -440,7 +440,7 @@ static int j1939_sk_sanity_check(struct sockaddr_can *addr, int len) return 0; } -static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len) +static int j1939_sk_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int len) { struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; struct j1939_sock *jsk = j1939_sk(sock->sk); @@ -535,7 +535,7 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len) return ret; } -static int j1939_sk_connect(struct socket *sock, struct sockaddr *uaddr, +static int j1939_sk_connect(struct socket *sock, struct sockaddr_unsized *uaddr, int len, int flags) { struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; diff --git a/net/can/raw.c b/net/can/raw.c index a53853f5e9af..f36a83d3447c 100644 --- a/net/can/raw.c +++ b/net/can/raw.c @@ -449,7 +449,7 @@ static int raw_release(struct socket *sock) return 0; } -static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len) +static int raw_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int len) { struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; struct sock *sk = sock->sk; diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index f8181acaf870..70b25f4ecba6 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -460,7 +460,7 @@ int ceph_tcp_connect(struct ceph_connection *con) set_sock_callbacks(sock, con); con_sock_state_connecting(con); - ret = kernel_connect(sock, (struct sockaddr *)&ss, sizeof(ss), + ret = kernel_connect(sock, (struct sockaddr_unsized *)&ss, sizeof(ss), O_NONBLOCK); if (ret == -EINPROGRESS) { dout("connect %s EINPROGRESS sk_state = %u\n", diff --git a/net/core/dev.c b/net/core/dev.c index 2acfa44927da..69515edd17bc 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1163,6 +1163,7 @@ void netdev_copy_name(struct net_device *dev, char *name) strscpy(name, dev->name, IFNAMSIZ); } while (read_seqretry(&netdev_rename_lock, seq)); } +EXPORT_IPV6_MOD_GPL(netdev_copy_name); /** * netdev_get_name - get a netdevice name, knowing its ifindex. @@ -2462,9 +2463,9 @@ int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb) return __dev_forward_skb2(dev, skb, false) ?: netif_rx_internal(skb); } -static inline int deliver_skb(struct sk_buff *skb, - struct packet_type *pt_prev, - struct net_device *orig_dev) +static int deliver_skb(struct sk_buff *skb, + struct packet_type *pt_prev, + struct net_device *orig_dev) { if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) return -ENOMEM; @@ -2483,7 +2484,7 @@ static inline void deliver_ptype_list_skb(struct sk_buff *skb, list_for_each_entry_rcu(ptype, ptype_list, list) { if (ptype->type != type) continue; - if (pt_prev) + if (unlikely(pt_prev)) deliver_skb(skb, pt_prev, orig_dev); pt_prev = ptype; } @@ -2544,7 +2545,7 @@ again: if (skb_loop_sk(ptype, skb)) continue; - if (pt_prev) { + if (unlikely(pt_prev)) { deliver_skb(skb2, pt_prev, skb->dev); pt_prev = ptype; continue; @@ -3373,6 +3374,13 @@ static void __netif_reschedule(struct Qdisc *q) void __netif_schedule(struct Qdisc *q) { + /* If q->defer_list is not empty, at least one thread is + * in __dev_xmit_skb() before llist_del_all(&q->defer_list). + * This thread will attempt to run the queue. + */ + if (!llist_empty(&q->defer_list)) + return; + if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) __netif_reschedule(q); } @@ -4125,9 +4133,10 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, struct net_device *dev, struct netdev_queue *txq) { + struct sk_buff *next, *to_free = NULL; spinlock_t *root_lock = qdisc_lock(q); - struct sk_buff *to_free = NULL; - bool contended; + struct llist_node *ll_list, *first_n; + unsigned long defer_count = 0; int rc; qdisc_calculate_pkt_len(skb, q); @@ -4167,67 +4176,81 @@ no_lock_out: return rc; } - if (unlikely(READ_ONCE(q->owner) == smp_processor_id())) { - kfree_skb_reason(skb, SKB_DROP_REASON_TC_RECLASSIFY_LOOP); - return NET_XMIT_DROP; - } - /* - * Heuristic to force contended enqueues to serialize on a - * separate lock before trying to get qdisc main lock. - * This permits qdisc->running owner to get the lock more - * often and dequeue packets faster. - * On PREEMPT_RT it is possible to preempt the qdisc owner during xmit - * and then other tasks will only enqueue packets. The packets will be - * sent after the qdisc owner is scheduled again. To prevent this - * scenario the task always serialize on the lock. + /* Open code llist_add(&skb->ll_node, &q->defer_list) + queue limit. + * In the try_cmpxchg() loop, we want to increment q->defer_count + * at most once to limit the number of skbs in defer_list. + * We perform the defer_count increment only if the list is not empty, + * because some arches have slow atomic_long_inc_return(). */ - contended = qdisc_is_running(q) || IS_ENABLED(CONFIG_PREEMPT_RT); - if (unlikely(contended)) - spin_lock(&q->busylock); + first_n = READ_ONCE(q->defer_list.first); + do { + if (first_n && !defer_count) { + defer_count = atomic_long_inc_return(&q->defer_count); + if (unlikely(defer_count > q->limit)) { + kfree_skb_reason(skb, SKB_DROP_REASON_QDISC_DROP); + return NET_XMIT_DROP; + } + } + skb->ll_node.next = first_n; + } while (!try_cmpxchg(&q->defer_list.first, &first_n, &skb->ll_node)); + + /* If defer_list was not empty, we know the cpu which queued + * the first skb will process the whole list for us. + */ + if (first_n) + return NET_XMIT_SUCCESS; spin_lock(root_lock); + + ll_list = llist_del_all(&q->defer_list); + /* There is a small race because we clear defer_count not atomically + * with the prior llist_del_all(). This means defer_list could grow + * over q->limit. + */ + atomic_long_set(&q->defer_count, 0); + + ll_list = llist_reverse_order(ll_list); + if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { - __qdisc_drop(skb, &to_free); + llist_for_each_entry_safe(skb, next, ll_list, ll_node) + __qdisc_drop(skb, &to_free); rc = NET_XMIT_DROP; - } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && - qdisc_run_begin(q)) { + goto unlock; + } + if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && + !llist_next(ll_list) && qdisc_run_begin(q)) { /* * This is a work-conserving queue; there are no old skbs * waiting to be sent out; and the qdisc is not running - * xmit the skb directly. */ + DEBUG_NET_WARN_ON_ONCE(skb != llist_entry(ll_list, + struct sk_buff, + ll_node)); qdisc_bstats_update(q, skb); - - if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) { - if (unlikely(contended)) { - spin_unlock(&q->busylock); - contended = false; - } + if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) __qdisc_run(q); - } - qdisc_run_end(q); rc = NET_XMIT_SUCCESS; } else { - WRITE_ONCE(q->owner, smp_processor_id()); - rc = dev_qdisc_enqueue(skb, q, &to_free, txq); - WRITE_ONCE(q->owner, -1); - if (qdisc_run_begin(q)) { - if (unlikely(contended)) { - spin_unlock(&q->busylock); - contended = false; - } - __qdisc_run(q); - qdisc_run_end(q); + int count = 0; + + llist_for_each_entry_safe(skb, next, ll_list, ll_node) { + prefetch(next); + skb_mark_not_on_list(skb); + rc = dev_qdisc_enqueue(skb, q, &to_free, txq); + count++; } + qdisc_run(q); + if (count != 1) + rc = NET_XMIT_SUCCESS; } +unlock: spin_unlock(root_lock); if (unlikely(to_free)) kfree_skb_list_reason(to_free, tcf_get_drop_reason(to_free)); - if (unlikely(contended)) - spin_unlock(&q->busylock); return rc; } @@ -4398,7 +4421,7 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, return skb; bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx); - if (*pt_prev) { + if (unlikely(*pt_prev)) { *ret = deliver_skb(skb, *pt_prev, orig_dev); *pt_prev = NULL; } @@ -4591,6 +4614,32 @@ u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, } EXPORT_SYMBOL(dev_pick_tx_zero); +int sk_tx_queue_get(const struct sock *sk) +{ + int resel, val; + + if (!sk) + return -1; + /* Paired with WRITE_ONCE() in sk_tx_queue_clear() + * and sk_tx_queue_set(). + */ + val = READ_ONCE(sk->sk_tx_queue_mapping); + + if (val == NO_QUEUE_MAPPING) + return -1; + + if (!sk_fullsock(sk)) + return val; + + resel = READ_ONCE(sock_net(sk)->core.sysctl_txq_reselection); + if (resel && time_is_before_jiffies( + READ_ONCE(sk->sk_tx_queue_mapping_jiffies) + resel)) + return -1; + + return val; +} +EXPORT_SYMBOL(sk_tx_queue_get); + u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev) { @@ -4606,8 +4655,7 @@ u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, if (new_index < 0) new_index = skb_tx_hash(dev, sb_dev, skb); - if (queue_index != new_index && sk && - sk_fullsock(sk) && + if (sk && sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache)) sk_tx_queue_set(sk, new_index); @@ -5202,14 +5250,15 @@ void kick_defer_list_purge(unsigned int cpu) int netdev_flow_limit_table_len __read_mostly = (1 << 12); #endif -static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen) +static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen, + int max_backlog) { #ifdef CONFIG_NET_FLOW_LIMIT - struct sd_flow_limit *fl; - struct softnet_data *sd; unsigned int old_flow, new_flow; + const struct softnet_data *sd; + struct sd_flow_limit *fl; - if (qlen < (READ_ONCE(net_hotdata.max_backlog) >> 1)) + if (likely(qlen < (max_backlog >> 1))) return false; sd = this_cpu_ptr(&softnet_data); @@ -5254,19 +5303,19 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu, u32 tail; reason = SKB_DROP_REASON_DEV_READY; - if (!netif_running(skb->dev)) + if (unlikely(!netif_running(skb->dev))) goto bad_dev; - reason = SKB_DROP_REASON_CPU_BACKLOG; sd = &per_cpu(softnet_data, cpu); qlen = skb_queue_len_lockless(&sd->input_pkt_queue); max_backlog = READ_ONCE(net_hotdata.max_backlog); - if (unlikely(qlen > max_backlog)) + if (unlikely(qlen > max_backlog) || + skb_flow_limit(skb, qlen, max_backlog)) goto cpu_backlog_drop; backlog_lock_irq_save(sd, &flags); qlen = skb_queue_len(&sd->input_pkt_queue); - if (qlen <= max_backlog && !skb_flow_limit(skb, qlen)) { + if (likely(qlen <= max_backlog)) { if (!qlen) { /* Schedule NAPI for backlog device. We can use * non atomic operation as we own the queue lock. @@ -5287,6 +5336,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu, backlog_unlock_irq_restore(sd, &flags); cpu_backlog_drop: + reason = SKB_DROP_REASON_CPU_BACKLOG; numa_drop_add(&sd->drop_counters, 1); bad_dev: dev_core_stats_rx_dropped_inc(skb->dev); @@ -5833,7 +5883,7 @@ static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev, if (nf_hook_ingress_active(skb)) { int ingress_retval; - if (*pt_prev) { + if (unlikely(*pt_prev)) { *ret = deliver_skb(skb, *pt_prev, orig_dev); *pt_prev = NULL; } @@ -5910,13 +5960,13 @@ another_round: list_for_each_entry_rcu(ptype, &dev_net_rcu(skb->dev)->ptype_all, list) { - if (pt_prev) + if (unlikely(pt_prev)) ret = deliver_skb(skb, pt_prev, orig_dev); pt_prev = ptype; } list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) { - if (pt_prev) + if (unlikely(pt_prev)) ret = deliver_skb(skb, pt_prev, orig_dev); pt_prev = ptype; } @@ -5947,7 +5997,7 @@ skip_classify: } if (skb_vlan_tag_present(skb)) { - if (pt_prev) { + if (unlikely(pt_prev)) { ret = deliver_skb(skb, pt_prev, orig_dev); pt_prev = NULL; } @@ -5959,7 +6009,7 @@ skip_classify: rx_handler = rcu_dereference(skb->dev->rx_handler); if (rx_handler) { - if (pt_prev) { + if (unlikely(pt_prev)) { ret = deliver_skb(skb, pt_prev, orig_dev); pt_prev = NULL; } @@ -6732,6 +6782,7 @@ static void skb_defer_free_flush(void) free_list = llist_del_all(&sdn->defer_list); llist_for_each_entry_safe(skb, next, free_list, ll_node) { + prefetch(next); napi_consume_skb(skb, 1); } } @@ -7039,7 +7090,8 @@ static void napi_stop_kthread(struct napi_struct *napi) */ if ((val & NAPIF_STATE_SCHED_THREADED) || !(val & NAPIF_STATE_SCHED)) { - new = val & (~NAPIF_STATE_THREADED); + new = val & (~(NAPIF_STATE_THREADED | + NAPIF_STATE_THREADED_BUSY_POLL)); } else { msleep(20); continue; @@ -7063,6 +7115,16 @@ static void napi_stop_kthread(struct napi_struct *napi) napi->thread = NULL; } +static void napi_set_threaded_state(struct napi_struct *napi, + enum netdev_napi_threaded threaded_mode) +{ + bool threaded = threaded_mode != NETDEV_NAPI_THREADED_DISABLED; + bool busy_poll = threaded_mode == NETDEV_NAPI_THREADED_BUSY_POLL; + + assign_bit(NAPI_STATE_THREADED, &napi->state, threaded); + assign_bit(NAPI_STATE_THREADED_BUSY_POLL, &napi->state, busy_poll); +} + int napi_set_threaded(struct napi_struct *napi, enum netdev_napi_threaded threaded) { @@ -7089,7 +7151,7 @@ int napi_set_threaded(struct napi_struct *napi, } else { /* Make sure kthread is created before THREADED bit is set. */ smp_mb__before_atomic(); - assign_bit(NAPI_STATE_THREADED, &napi->state, threaded); + napi_set_threaded_state(napi, threaded); } return 0; @@ -7481,7 +7543,9 @@ void napi_disable_locked(struct napi_struct *n) } new = val | NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC; - new &= ~(NAPIF_STATE_THREADED | NAPIF_STATE_PREFER_BUSY_POLL); + new &= ~(NAPIF_STATE_THREADED | + NAPIF_STATE_THREADED_BUSY_POLL | + NAPIF_STATE_PREFER_BUSY_POLL); } while (!try_cmpxchg(&n->state, &val, new)); hrtimer_cancel(&n->timer); @@ -7693,7 +7757,7 @@ static int napi_thread_wait(struct napi_struct *napi) return -1; } -static void napi_threaded_poll_loop(struct napi_struct *napi) +static void napi_threaded_poll_loop(struct napi_struct *napi, bool busy_poll) { struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx; struct softnet_data *sd; @@ -7722,22 +7786,47 @@ static void napi_threaded_poll_loop(struct napi_struct *napi) } skb_defer_free_flush(); bpf_net_ctx_clear(bpf_net_ctx); + + /* When busy poll is enabled, the old packets are not flushed in + * napi_complete_done. So flush them here. + */ + if (busy_poll) + gro_flush_normal(&napi->gro, HZ >= 1000); local_bh_enable(); + /* Call cond_resched here to avoid watchdog warnings. */ + if (repoll || busy_poll) { + rcu_softirq_qs_periodic(last_qs); + cond_resched(); + } + if (!repoll) break; - - rcu_softirq_qs_periodic(last_qs); - cond_resched(); } } static int napi_threaded_poll(void *data) { struct napi_struct *napi = data; + bool want_busy_poll; + bool in_busy_poll; + unsigned long val; + + while (!napi_thread_wait(napi)) { + val = READ_ONCE(napi->state); + + want_busy_poll = val & NAPIF_STATE_THREADED_BUSY_POLL; + in_busy_poll = val & NAPIF_STATE_IN_BUSY_POLL; - while (!napi_thread_wait(napi)) - napi_threaded_poll_loop(napi); + if (unlikely(val & NAPIF_STATE_DISABLE)) + want_busy_poll = false; + + if (want_busy_poll != in_busy_poll) + assign_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state, + want_busy_poll); + + napi_threaded_poll_loop(napi, want_busy_poll); + } return 0; } @@ -9885,7 +9974,7 @@ DECLARE_RWSEM(dev_addr_sem); /* "sa" is a true struct sockaddr with limited "sa_data" member. */ int netif_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name) { - size_t size = sizeof(sa->sa_data_min); + size_t size = sizeof(sa->sa_data); struct net_device *dev; int ret = 0; @@ -12646,6 +12735,94 @@ netdev_features_t netdev_increment_features(netdev_features_t all, } EXPORT_SYMBOL(netdev_increment_features); +/** + * netdev_compute_master_upper_features - compute feature from lowers + * @dev: the upper device + * @update_header: whether to update upper device's header_len/headroom/tailroom + * + * Recompute the upper device's feature based on all lower devices. + */ +void netdev_compute_master_upper_features(struct net_device *dev, bool update_header) +{ + unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM; + netdev_features_t gso_partial_features = MASTER_UPPER_DEV_GSO_PARTIAL_FEATURES; + netdev_features_t xfrm_features = MASTER_UPPER_DEV_XFRM_FEATURES; + netdev_features_t mpls_features = MASTER_UPPER_DEV_MPLS_FEATURES; + netdev_features_t vlan_features = MASTER_UPPER_DEV_VLAN_FEATURES; + netdev_features_t enc_features = MASTER_UPPER_DEV_ENC_FEATURES; + unsigned short max_header_len = ETH_HLEN; + unsigned int tso_max_size = TSO_MAX_SIZE; + unsigned short max_headroom = 0; + unsigned short max_tailroom = 0; + u16 tso_max_segs = TSO_MAX_SEGS; + struct net_device *lower_dev; + struct list_head *iter; + + mpls_features = netdev_base_features(mpls_features); + vlan_features = netdev_base_features(vlan_features); + enc_features = netdev_base_features(enc_features); + + netdev_for_each_lower_dev(dev, lower_dev, iter) { + gso_partial_features = netdev_increment_features(gso_partial_features, + lower_dev->gso_partial_features, + MASTER_UPPER_DEV_GSO_PARTIAL_FEATURES); + + vlan_features = netdev_increment_features(vlan_features, + lower_dev->vlan_features, + MASTER_UPPER_DEV_VLAN_FEATURES); + + enc_features = netdev_increment_features(enc_features, + lower_dev->hw_enc_features, + MASTER_UPPER_DEV_ENC_FEATURES); + + if (IS_ENABLED(CONFIG_XFRM_OFFLOAD)) + xfrm_features = netdev_increment_features(xfrm_features, + lower_dev->hw_enc_features, + MASTER_UPPER_DEV_XFRM_FEATURES); + + mpls_features = netdev_increment_features(mpls_features, + lower_dev->mpls_features, + MASTER_UPPER_DEV_MPLS_FEATURES); + + dst_release_flag &= lower_dev->priv_flags; + + if (update_header) { + max_header_len = max(max_header_len, lower_dev->hard_header_len); + max_headroom = max(max_headroom, lower_dev->needed_headroom); + max_tailroom = max(max_tailroom, lower_dev->needed_tailroom); + } + + tso_max_size = min(tso_max_size, lower_dev->tso_max_size); + tso_max_segs = min(tso_max_segs, lower_dev->tso_max_segs); + } + + dev->gso_partial_features = gso_partial_features; + dev->vlan_features = vlan_features; + dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX; + if (IS_ENABLED(CONFIG_XFRM_OFFLOAD)) + dev->hw_enc_features |= xfrm_features; + dev->mpls_features = mpls_features; + + dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; + if ((dev->priv_flags & IFF_XMIT_DST_RELEASE_PERM) && + dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM)) + dev->priv_flags |= IFF_XMIT_DST_RELEASE; + + if (update_header) { + dev->hard_header_len = max_header_len; + dev->needed_headroom = max_headroom; + dev->needed_tailroom = max_tailroom; + } + + netif_set_tso_max_segs(dev, tso_max_segs); + netif_set_tso_max_size(dev, tso_max_size); + + netdev_change_features(dev); +} +EXPORT_SYMBOL(netdev_compute_master_upper_features); + static struct hlist_head * __net_init netdev_create_hash(void) { int i; @@ -12959,7 +13136,7 @@ static void run_backlog_napi(unsigned int cpu) { struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu); - napi_threaded_poll_loop(&sd->backlog); + napi_threaded_poll_loop(&sd->backlog, false); } static void backlog_napi_setup(unsigned int cpu) diff --git a/net/core/dev.h b/net/core/dev.h index 900880e8b5b4..4d872a79bafb 100644 --- a/net/core/dev.h +++ b/net/core/dev.h @@ -317,6 +317,9 @@ static inline void napi_set_irq_suspend_timeout(struct napi_struct *n, static inline enum netdev_napi_threaded napi_get_threaded(struct napi_struct *n) { + if (test_bit(NAPI_STATE_THREADED_BUSY_POLL, &n->state)) + return NETDEV_NAPI_THREADED_BUSY_POLL; + if (test_bit(NAPI_STATE_THREADED, &n->state)) return NETDEV_NAPI_THREADED_ENABLED; diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c index ad54b12d4b4c..b3ce0fb24a69 100644 --- a/net/core/dev_ioctl.c +++ b/net/core/dev_ioctl.c @@ -596,7 +596,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, void __user *data, if (ifr->ifr_hwaddr.sa_family != dev->type) return -EINVAL; memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data, - min(sizeof(ifr->ifr_hwaddr.sa_data_min), + min(sizeof(ifr->ifr_hwaddr.sa_data), (size_t)dev->addr_len)); netdev_lock_ops(dev); call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); diff --git a/net/core/devmem.h b/net/core/devmem.h index 101150d761af..0b43a648cd2e 100644 --- a/net/core/devmem.h +++ b/net/core/devmem.h @@ -94,7 +94,6 @@ void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding); int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx, struct net_devmem_dmabuf_binding *binding, struct netlink_ext_ack *extack); -void net_devmem_bind_tx_release(struct sock *sk); static inline struct dmabuf_genpool_chunk_owner * net_devmem_iov_to_chunk_owner(const struct net_iov *niov) diff --git a/net/core/filter.c b/net/core/filter.c index fa06c5a08e22..4124becf8604 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -3253,11 +3253,11 @@ static void bpf_skb_change_protocol(struct sk_buff *skb, u16 proto) static int bpf_skb_generic_push(struct sk_buff *skb, u32 off, u32 len) { - /* Caller already did skb_cow() with len as headroom, + /* Caller already did skb_cow() with meta_len+len as headroom, * so no need to do it here. */ skb_push(skb, len); - memmove(skb->data, skb->data + len, off); + skb_postpush_data_move(skb, len, off); memset(skb->data + off, 0, len); /* No skb_postpush_rcsum(skb, skb->data + off, len) @@ -3281,7 +3281,7 @@ static int bpf_skb_generic_pop(struct sk_buff *skb, u32 off, u32 len) old_data = skb->data; __skb_pull(skb, len); skb_postpull_rcsum(skb, old_data + off, len); - memmove(skb->data, old_data, off); + skb_postpull_data_move(skb, len, off); return 0; } @@ -3326,10 +3326,11 @@ static int bpf_skb_net_hdr_pop(struct sk_buff *skb, u32 off, u32 len) static int bpf_skb_proto_4_to_6(struct sk_buff *skb) { const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr); + const u8 meta_len = skb_metadata_len(skb); u32 off = skb_mac_header_len(skb); int ret; - ret = skb_cow(skb, len_diff); + ret = skb_cow(skb, meta_len + len_diff); if (unlikely(ret < 0)) return ret; @@ -3489,6 +3490,7 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff, u8 inner_mac_len = flags >> BPF_ADJ_ROOM_ENCAP_L2_SHIFT; bool encap = flags & BPF_F_ADJ_ROOM_ENCAP_L3_MASK; u16 mac_len = 0, inner_net = 0, inner_trans = 0; + const u8 meta_len = skb_metadata_len(skb); unsigned int gso_type = SKB_GSO_DODGY; int ret; @@ -3499,7 +3501,7 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff, return -ENOTSUPP; } - ret = skb_cow_head(skb, len_diff); + ret = skb_cow_head(skb, meta_len + len_diff); if (unlikely(ret < 0)) return ret; @@ -3873,6 +3875,7 @@ static const struct bpf_func_proto sk_skb_change_tail_proto = { static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room, u64 flags) { + const u8 meta_len = skb_metadata_len(skb); u32 max_len = BPF_SKB_MAX_LEN; u32 new_len = skb->len + head_room; int ret; @@ -3882,7 +3885,7 @@ static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room, new_len < skb->len)) return -EINVAL; - ret = skb_cow(skb, head_room); + ret = skb_cow(skb, meta_len + head_room); if (likely(!ret)) { /* Idea for this helper is that we currently only * allow to expand on mac header. This means that @@ -3894,6 +3897,7 @@ static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room, * for redirection into L2 device. */ __skb_push(skb, head_room); + skb_postpush_data_move(skb, head_room, 0); memset(skb->data, 0, head_room); skb_reset_mac_header(skb); skb_reset_mac_len(skb); @@ -5734,6 +5738,77 @@ static const struct bpf_func_proto bpf_sock_addr_getsockopt_proto = { .arg5_type = ARG_CONST_SIZE, }; +static int sk_bpf_set_get_bypass_prot_mem(struct sock *sk, + char *optval, int optlen, + bool getopt) +{ + int val; + + if (optlen != sizeof(int)) + return -EINVAL; + + if (!sk_has_account(sk)) + return -EOPNOTSUPP; + + if (getopt) { + *(int *)optval = sk->sk_bypass_prot_mem; + return 0; + } + + val = *(int *)optval; + if (val < 0 || val > 1) + return -EINVAL; + + sk->sk_bypass_prot_mem = val; + return 0; +} + +BPF_CALL_5(bpf_sock_create_setsockopt, struct sock *, sk, int, level, + int, optname, char *, optval, int, optlen) +{ + if (level == SOL_SOCKET && optname == SK_BPF_BYPASS_PROT_MEM) + return sk_bpf_set_get_bypass_prot_mem(sk, optval, optlen, false); + + return __bpf_setsockopt(sk, level, optname, optval, optlen); +} + +static const struct bpf_func_proto bpf_sock_create_setsockopt_proto = { + .func = bpf_sock_create_setsockopt, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_ANYTHING, + .arg3_type = ARG_ANYTHING, + .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg5_type = ARG_CONST_SIZE, +}; + +BPF_CALL_5(bpf_sock_create_getsockopt, struct sock *, sk, int, level, + int, optname, char *, optval, int, optlen) +{ + if (level == SOL_SOCKET && optname == SK_BPF_BYPASS_PROT_MEM) { + int err = sk_bpf_set_get_bypass_prot_mem(sk, optval, optlen, true); + + if (err) + memset(optval, 0, optlen); + + return err; + } + + return __bpf_getsockopt(sk, level, optname, optval, optlen); +} + +static const struct bpf_func_proto bpf_sock_create_getsockopt_proto = { + .func = bpf_sock_create_getsockopt, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_ANYTHING, + .arg3_type = ARG_ANYTHING, + .arg4_type = ARG_PTR_TO_UNINIT_MEM, + .arg5_type = ARG_CONST_SIZE, +}; + BPF_CALL_5(bpf_sock_ops_setsockopt, struct bpf_sock_ops_kern *, bpf_sock, int, level, int, optname, char *, optval, int, optlen) { @@ -5908,7 +5983,7 @@ BPF_CALL_3(bpf_bind, struct bpf_sock_addr_kern *, ctx, struct sockaddr *, addr, return err; if (((struct sockaddr_in *)addr)->sin_port == htons(0)) flags |= BIND_FORCE_ADDRESS_NO_PORT; - return __inet_bind(sk, addr, addr_len, flags); + return __inet_bind(sk, (struct sockaddr_unsized *)addr, addr_len, flags); #if IS_ENABLED(CONFIG_IPV6) } else if (addr->sa_family == AF_INET6) { if (addr_len < SIN6_LEN_RFC2133) @@ -5918,7 +5993,8 @@ BPF_CALL_3(bpf_bind, struct bpf_sock_addr_kern *, ctx, struct sockaddr *, addr, /* ipv6_bpf_stub cannot be NULL, since it's called from * bpf_cgroup_inet6_connect hook and ipv6 is already loaded */ - return ipv6_bpf_stub->inet6_bind(sk, addr, addr_len, flags); + return ipv6_bpf_stub->inet6_bind(sk, (struct sockaddr_unsized *)addr, + addr_len, flags); #endif /* CONFIG_IPV6 */ } #endif /* CONFIG_INET */ @@ -8063,6 +8139,20 @@ sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_sk_storage_get_cg_sock_proto; case BPF_FUNC_ktime_get_coarse_ns: return &bpf_ktime_get_coarse_ns_proto; + case BPF_FUNC_setsockopt: + switch (prog->expected_attach_type) { + case BPF_CGROUP_INET_SOCK_CREATE: + return &bpf_sock_create_setsockopt_proto; + default: + return NULL; + } + case BPF_FUNC_getsockopt: + switch (prog->expected_attach_type) { + case BPF_CGROUP_INET_SOCK_CREATE: + return &bpf_sock_create_getsockopt_proto; + default: + return NULL; + } default: return bpf_base_func_proto(func_id, prog); } @@ -12016,6 +12106,18 @@ void *bpf_skb_meta_pointer(struct sk_buff *skb, u32 offset) return skb_metadata_end(skb) - skb_metadata_len(skb) + offset; } +int __bpf_skb_meta_store_bytes(struct sk_buff *skb, u32 offset, + const void *from, u32 len, u64 flags) +{ + if (unlikely(flags)) + return -EINVAL; + if (unlikely(bpf_try_make_writable(skb, 0))) + return -EFAULT; + + memmove(bpf_skb_meta_pointer(skb, offset), from, len); + return 0; +} + __bpf_kfunc_start_defs(); __bpf_kfunc int bpf_dynptr_from_skb(struct __sk_buff *s, u64 flags, struct bpf_dynptr *ptr__uninit) @@ -12043,9 +12145,6 @@ __bpf_kfunc int bpf_dynptr_from_skb(struct __sk_buff *s, u64 flags, * XDP context with bpf_xdp_adjust_meta(). Serves as an alternative to * &__sk_buff->data_meta. * - * If passed @skb_ is a clone which shares the data with the original, the - * dynptr will be read-only. This limitation may be lifted in the future. - * * Return: * * %0 - dynptr ready to use * * %-EINVAL - invalid flags, dynptr set to null @@ -12063,9 +12162,6 @@ __bpf_kfunc int bpf_dynptr_from_skb_meta(struct __sk_buff *skb_, u64 flags, bpf_dynptr_init(ptr, skb, BPF_DYNPTR_TYPE_SKB_META, 0, skb_metadata_len(skb)); - if (skb_cloned(skb)) - bpf_dynptr_set_rdonly(ptr); - return 0; } diff --git a/net/core/hotdata.c b/net/core/hotdata.c index 95d0a4df1006..dddd5c287cf0 100644 --- a/net/core/hotdata.c +++ b/net/core/hotdata.c @@ -20,7 +20,7 @@ struct net_hotdata net_hotdata __cacheline_aligned = { .dev_tx_weight = 64, .dev_rx_weight = 64, .sysctl_max_skb_frags = MAX_SKB_FRAGS, - .sysctl_skb_defer_max = 64, + .sysctl_skb_defer_max = 128, .sysctl_mem_pcpu_rsv = SK_MEMORY_PCPU_RESERVE }; EXPORT_SYMBOL(net_hotdata); diff --git a/net/core/neighbour.c b/net/core/neighbour.c index bddfa389effa..96a3b1a93252 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -81,7 +81,7 @@ static struct hlist_head *neigh_get_dev_table(struct net_device *dev, int family } /* - Neighbour hash table buckets are protected with rwlock tbl->lock. + Neighbour hash table buckets are protected with tbl->lock. - All the scans/updates to hash buckets MUST be made under this lock. - NOTHING clever should be made under this lock: no callbacks @@ -149,7 +149,7 @@ static void neigh_update_gc_list(struct neighbour *n) { bool on_gc_list, exempt_from_gc; - write_lock_bh(&n->tbl->lock); + spin_lock_bh(&n->tbl->lock); write_lock(&n->lock); if (n->dead) goto out; @@ -172,14 +172,14 @@ static void neigh_update_gc_list(struct neighbour *n) } out: write_unlock(&n->lock); - write_unlock_bh(&n->tbl->lock); + spin_unlock_bh(&n->tbl->lock); } static void neigh_update_managed_list(struct neighbour *n) { bool on_managed_list, add_to_managed; - write_lock_bh(&n->tbl->lock); + spin_lock_bh(&n->tbl->lock); write_lock(&n->lock); if (n->dead) goto out; @@ -193,7 +193,7 @@ static void neigh_update_managed_list(struct neighbour *n) list_add_tail(&n->managed_list, &n->tbl->managed_list); out: write_unlock(&n->lock); - write_unlock_bh(&n->tbl->lock); + spin_unlock_bh(&n->tbl->lock); } static void neigh_update_flags(struct neighbour *neigh, u32 flags, int *notify, @@ -263,7 +263,7 @@ static int neigh_forced_gc(struct neigh_table *tbl) NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs); - write_lock_bh(&tbl->lock); + spin_lock_bh(&tbl->lock); list_for_each_entry_safe(n, tmp, &tbl->gc_list, gc_list) { if (refcount_read(&n->refcnt) == 1) { @@ -292,7 +292,7 @@ static int neigh_forced_gc(struct neigh_table *tbl) WRITE_ONCE(tbl->last_flush, jiffies); unlock: - write_unlock_bh(&tbl->lock); + spin_unlock_bh(&tbl->lock); return shrunk; } @@ -454,23 +454,23 @@ static void neigh_flush_table(struct neigh_table *tbl) void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev) { - write_lock_bh(&tbl->lock); + spin_lock_bh(&tbl->lock); neigh_flush_dev(tbl, dev, false); - write_unlock_bh(&tbl->lock); + spin_unlock_bh(&tbl->lock); } EXPORT_SYMBOL(neigh_changeaddr); static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev, bool skip_perm) { - write_lock_bh(&tbl->lock); + spin_lock_bh(&tbl->lock); if (likely(dev)) { neigh_flush_dev(tbl, dev, skip_perm); } else { DEBUG_NET_WARN_ON_ONCE(skip_perm); neigh_flush_table(tbl); } - write_unlock_bh(&tbl->lock); + spin_unlock_bh(&tbl->lock); pneigh_ifdown(tbl, dev, skip_perm); pneigh_queue_purge(&tbl->proxy_queue, dev ? dev_net(dev) : NULL, @@ -687,7 +687,7 @@ ___neigh_create(struct neigh_table *tbl, const void *pkey, n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1); - write_lock_bh(&tbl->lock); + spin_lock_bh(&tbl->lock); nht = rcu_dereference_protected(tbl->nht, lockdep_is_held(&tbl->lock)); @@ -722,13 +722,13 @@ ___neigh_create(struct neigh_table *tbl, const void *pkey, hlist_add_head_rcu(&n->dev_list, neigh_get_dev_table(dev, tbl->family)); - write_unlock_bh(&tbl->lock); + spin_unlock_bh(&tbl->lock); neigh_dbg(2, "neigh %p is created\n", n); rc = n; out: return rc; out_tbl_unlock: - write_unlock_bh(&tbl->lock); + spin_unlock_bh(&tbl->lock); out_neigh_release: if (!exempt_from_gc) atomic_dec(&tbl->gc_entries); @@ -982,7 +982,7 @@ static void neigh_periodic_work(struct work_struct *work) NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs); - write_lock_bh(&tbl->lock); + spin_lock_bh(&tbl->lock); nht = rcu_dereference_protected(tbl->nht, lockdep_is_held(&tbl->lock)); @@ -995,8 +995,7 @@ static void neigh_periodic_work(struct work_struct *work) WRITE_ONCE(tbl->last_rand, jiffies); list_for_each_entry(p, &tbl->parms_list, list) - p->reachable_time = - neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); + neigh_set_reach_time(p); } if (atomic_read(&tbl->entries) < READ_ONCE(tbl->gc_thresh1)) @@ -1037,9 +1036,9 @@ static void neigh_periodic_work(struct work_struct *work) * It's fine to release lock here, even if hash table * grows while we are preempted. */ - write_unlock_bh(&tbl->lock); + spin_unlock_bh(&tbl->lock); cond_resched(); - write_lock_bh(&tbl->lock); + spin_lock_bh(&tbl->lock); nht = rcu_dereference_protected(tbl->nht, lockdep_is_held(&tbl->lock)); } @@ -1050,7 +1049,7 @@ out: */ queue_delayed_work(system_power_efficient_wq, &tbl->gc_work, NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1); - write_unlock_bh(&tbl->lock); + spin_unlock_bh(&tbl->lock); } static __inline__ int neigh_max_probes(struct neighbour *n) @@ -1642,12 +1641,12 @@ static void neigh_managed_work(struct work_struct *work) managed_work.work); struct neighbour *neigh; - write_lock_bh(&tbl->lock); + spin_lock_bh(&tbl->lock); list_for_each_entry(neigh, &tbl->managed_list, managed_list) neigh_event_send_probe(neigh, NULL, false); queue_delayed_work(system_power_efficient_wq, &tbl->managed_work, NEIGH_VAR(&tbl->parms, INTERVAL_PROBE_TIME_MS)); - write_unlock_bh(&tbl->lock); + spin_unlock_bh(&tbl->lock); } static void neigh_proxy_process(struct timer_list *t) @@ -1749,8 +1748,7 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev, if (p) { p->tbl = tbl; refcount_set(&p->refcnt, 1); - p->reachable_time = - neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); + neigh_set_reach_time(p); p->qlen = 0; netdev_hold(dev, &p->dev_tracker, GFP_KERNEL); p->dev = dev; @@ -1763,9 +1761,9 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev, return NULL; } - write_lock_bh(&tbl->lock); - list_add(&p->list, &tbl->parms.list); - write_unlock_bh(&tbl->lock); + spin_lock_bh(&tbl->lock); + list_add_rcu(&p->list, &tbl->parms.list); + spin_unlock_bh(&tbl->lock); neigh_parms_data_state_cleanall(p); } @@ -1785,10 +1783,12 @@ void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms) { if (!parms || parms == &tbl->parms) return; - write_lock_bh(&tbl->lock); - list_del(&parms->list); + + spin_lock_bh(&tbl->lock); + list_del_rcu(&parms->list); parms->dead = 1; - write_unlock_bh(&tbl->lock); + spin_unlock_bh(&tbl->lock); + netdev_put(parms->dev, &parms->dev_tracker); call_rcu(&parms->rcu_head, neigh_rcu_free_parms); } @@ -1810,8 +1810,7 @@ void neigh_table_init(int index, struct neigh_table *tbl) list_add(&tbl->parms.list, &tbl->parms_list); write_pnet(&tbl->parms.net, &init_net); refcount_set(&tbl->parms.refcnt, 1); - tbl->parms.reachable_time = - neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME)); + neigh_set_reach_time(&tbl->parms); tbl->parms.qlen = 0; tbl->stats = alloc_percpu(struct neigh_statistics); @@ -1838,7 +1837,7 @@ void neigh_table_init(int index, struct neigh_table *tbl) else WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN); - rwlock_init(&tbl->lock); + spin_lock_init(&tbl->lock); mutex_init(&tbl->phash_lock); INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work); @@ -1981,10 +1980,10 @@ static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, err = __neigh_update(neigh, NULL, NUD_FAILED, NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN, NETLINK_CB(skb).portid, extack); - write_lock_bh(&tbl->lock); + spin_lock_bh(&tbl->lock); neigh_release(neigh); neigh_remove_one(neigh); - write_unlock_bh(&tbl->lock); + spin_unlock_bh(&tbl->lock); out: return err; @@ -2179,7 +2178,7 @@ static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms) return -ENOBUFS; if ((parms->dev && - nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) || + nla_put_u32(skb, NDTPA_IFINDEX, READ_ONCE(parms->dev->ifindex))) || nla_put_u32(skb, NDTPA_REFCNT, refcount_read(&parms->refcnt)) || nla_put_u32(skb, NDTPA_QUEUE_LENBYTES, NEIGH_VAR(parms, QUEUE_LEN_BYTES)) || @@ -2194,7 +2193,7 @@ static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms) NEIGH_VAR(parms, MCAST_PROBES)) || nla_put_u32(skb, NDTPA_MCAST_REPROBES, NEIGH_VAR(parms, MCAST_REPROBES)) || - nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time, + nla_put_msecs(skb, NDTPA_REACHABLE_TIME, READ_ONCE(parms->reachable_time), NDTPA_PAD) || nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME, NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) || @@ -2231,8 +2230,6 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl, return -EMSGSIZE; ndtmsg = nlmsg_data(nlh); - - read_lock_bh(&tbl->lock); ndtmsg->ndtm_family = tbl->family; ndtmsg->ndtm_pad1 = 0; ndtmsg->ndtm_pad2 = 0; @@ -2258,11 +2255,9 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl, .ndtc_proxy_qlen = READ_ONCE(tbl->proxy_queue.qlen), }; - rcu_read_lock(); nht = rcu_dereference(tbl->nht); ndc.ndtc_hash_rnd = nht->hash_rnd[0]; ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1); - rcu_read_unlock(); if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc)) goto nla_put_failure; @@ -2300,12 +2295,10 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl, if (neightbl_fill_parms(skb, &tbl->parms) < 0) goto nla_put_failure; - read_unlock_bh(&tbl->lock); nlmsg_end(skb, nlh); return 0; nla_put_failure: - read_unlock_bh(&tbl->lock); nlmsg_cancel(skb, nlh); return -EMSGSIZE; } @@ -2324,8 +2317,6 @@ static int neightbl_fill_param_info(struct sk_buff *skb, return -EMSGSIZE; ndtmsg = nlmsg_data(nlh); - - read_lock_bh(&tbl->lock); ndtmsg->ndtm_family = tbl->family; ndtmsg->ndtm_pad1 = 0; ndtmsg->ndtm_pad2 = 0; @@ -2334,11 +2325,9 @@ static int neightbl_fill_param_info(struct sk_buff *skb, neightbl_fill_parms(skb, parms) < 0) goto errout; - read_unlock_bh(&tbl->lock); nlmsg_end(skb, nlh); return 0; errout: - read_unlock_bh(&tbl->lock); nlmsg_cancel(skb, nlh); return -EMSGSIZE; } @@ -2375,9 +2364,9 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); + struct nlattr *tb[NDTA_MAX + 1]; struct neigh_table *tbl; struct ndtmsg *ndtmsg; - struct nlattr *tb[NDTA_MAX+1]; bool found = false; int err, tidx; @@ -2393,26 +2382,33 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, ndtmsg = nlmsg_data(nlh); + rcu_read_lock(); + for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) { - tbl = rcu_dereference_rtnl(neigh_tables[tidx]); + tbl = rcu_dereference(neigh_tables[tidx]); if (!tbl) continue; + if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family) continue; + if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) { found = true; break; } } - if (!found) - return -ENOENT; + if (!found) { + rcu_read_unlock(); + err = -ENOENT; + goto errout; + } /* * We acquire tbl->lock to be nice to the periodic timers and * make sure they always see a consistent set of values. */ - write_lock_bh(&tbl->lock); + spin_lock_bh(&tbl->lock); if (tb[NDTA_PARMS]) { struct nlattr *tbp[NDTPA_MAX+1]; @@ -2475,8 +2471,7 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, * only be effective after the next time neigh_periodic_work * decides to recompute it (can be multiple minutes) */ - p->reachable_time = - neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); + neigh_set_reach_time(p); break; case NDTPA_GC_STALETIME: NEIGH_VAR_SET(p, GC_STALETIME, @@ -2532,7 +2527,8 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, err = 0; errout_tbl_lock: - write_unlock_bh(&tbl->lock); + spin_unlock_bh(&tbl->lock); + rcu_read_unlock(); errout: return err; } @@ -2579,10 +2575,12 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb) family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family; + rcu_read_lock(); + for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) { struct neigh_parms *p; - tbl = rcu_dereference_rtnl(neigh_tables[tidx]); + tbl = rcu_dereference(neigh_tables[tidx]); if (!tbl) continue; @@ -2596,7 +2594,7 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb) nidx = 0; p = list_next_entry(&tbl->parms, list); - list_for_each_entry_from(p, &tbl->parms_list, list) { + list_for_each_entry_from_rcu(p, &tbl->parms_list, list) { if (!net_eq(neigh_parms_net(p), net)) continue; @@ -2616,6 +2614,8 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb) neigh_skip = 0; } out: + rcu_read_unlock(); + cb->args[0] = tidx; cb->args[1] = nidx; @@ -3127,14 +3127,14 @@ void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void rcu_read_lock(); nht = rcu_dereference(tbl->nht); - read_lock_bh(&tbl->lock); /* avoid resizes */ + spin_lock_bh(&tbl->lock); /* avoid resizes */ for (chain = 0; chain < (1 << nht->hash_shift); chain++) { struct neighbour *n; neigh_for_each_in_bucket(n, &nht->hash_heads[chain]) cb(n, cookie); } - read_unlock_bh(&tbl->lock); + spin_unlock_bh(&tbl->lock); rcu_read_unlock(); } EXPORT_SYMBOL(neigh_for_each); @@ -3404,7 +3404,7 @@ void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl rcu_read_lock(); state->nht = rcu_dereference(tbl->nht); - read_lock_bh(&tbl->lock); + spin_lock_bh(&tbl->lock); return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN; } @@ -3444,7 +3444,7 @@ void neigh_seq_stop(struct seq_file *seq, void *v) struct neigh_seq_state *state = seq->private; struct neigh_table *tbl = state->tbl; - read_unlock_bh(&tbl->lock); + spin_unlock_bh(&tbl->lock); rcu_read_unlock(); } EXPORT_SYMBOL(neigh_seq_stop); @@ -3721,8 +3721,7 @@ static int neigh_proc_base_reachable_time(const struct ctl_table *ctl, int write * only be effective after the next time neigh_periodic_work * decides to recompute it */ - p->reachable_time = - neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); + neigh_set_reach_time(p); } return ret; } @@ -3918,8 +3917,10 @@ static const struct rtnl_msg_handler neigh_rtnl_msg_handlers[] __initconst = { {.msgtype = RTM_DELNEIGH, .doit = neigh_delete}, {.msgtype = RTM_GETNEIGH, .doit = neigh_get, .dumpit = neigh_dump_info, .flags = RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED}, - {.msgtype = RTM_GETNEIGHTBL, .dumpit = neightbl_dump_info}, - {.msgtype = RTM_SETNEIGHTBL, .doit = neightbl_set}, + {.msgtype = RTM_GETNEIGHTBL, .dumpit = neightbl_dump_info, + .flags = RTNL_FLAG_DUMP_UNLOCKED}, + {.msgtype = RTM_SETNEIGHTBL, .doit = neightbl_set, + .flags = RTNL_FLAG_DOIT_UNLOCKED}, }; static int __init neigh_init(void) diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index b0e0f22d7b21..adcfef55a66f 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -395,6 +395,7 @@ static __net_init void preinit_net_sysctl(struct net *net) net->core.sysctl_optmem_max = 128 * 1024; net->core.sysctl_txrehash = SOCK_TXREHASH_ENABLED; net->core.sysctl_tstamp_allow_data = 1; + net->core.sysctl_txq_reselection = msecs_to_jiffies(1000); } /* init code that must occur even if setup_net() is not called. */ diff --git a/net/core/netdev-genl-gen.c b/net/core/netdev-genl-gen.c index e9a2a6f26cb7..ff20435c45d2 100644 --- a/net/core/netdev-genl-gen.c +++ b/net/core/netdev-genl-gen.c @@ -97,7 +97,7 @@ static const struct nla_policy netdev_napi_set_nl_policy[NETDEV_A_NAPI_THREADED [NETDEV_A_NAPI_DEFER_HARD_IRQS] = NLA_POLICY_FULL_RANGE(NLA_U32, &netdev_a_napi_defer_hard_irqs_range), [NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT] = { .type = NLA_UINT, }, [NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT] = { .type = NLA_UINT, }, - [NETDEV_A_NAPI_THREADED] = NLA_POLICY_MAX(NLA_U32, 1), + [NETDEV_A_NAPI_THREADED] = NLA_POLICY_MAX(NLA_U32, 2), }; /* NETDEV_CMD_BIND_TX - do */ diff --git a/net/core/netmem_priv.h b/net/core/netmem_priv.h index cd95394399b4..23175cb2bd86 100644 --- a/net/core/netmem_priv.h +++ b/net/core/netmem_priv.h @@ -5,19 +5,19 @@ static inline unsigned long netmem_get_pp_magic(netmem_ref netmem) { - return __netmem_clear_lsb(netmem)->pp_magic & ~PP_DMA_INDEX_MASK; + return netmem_to_nmdesc(netmem)->pp_magic & ~PP_DMA_INDEX_MASK; } static inline void netmem_or_pp_magic(netmem_ref netmem, unsigned long pp_magic) { - __netmem_clear_lsb(netmem)->pp_magic |= pp_magic; + netmem_to_nmdesc(netmem)->pp_magic |= pp_magic; } static inline void netmem_clear_pp_magic(netmem_ref netmem) { - WARN_ON_ONCE(__netmem_clear_lsb(netmem)->pp_magic & PP_DMA_INDEX_MASK); + WARN_ON_ONCE(netmem_to_nmdesc(netmem)->pp_magic & PP_DMA_INDEX_MASK); - __netmem_clear_lsb(netmem)->pp_magic = 0; + netmem_to_nmdesc(netmem)->pp_magic = 0; } static inline bool netmem_is_pp(netmem_ref netmem) @@ -27,13 +27,13 @@ static inline bool netmem_is_pp(netmem_ref netmem) static inline void netmem_set_pp(netmem_ref netmem, struct page_pool *pool) { - __netmem_clear_lsb(netmem)->pp = pool; + netmem_to_nmdesc(netmem)->pp = pool; } static inline void netmem_set_dma_addr(netmem_ref netmem, unsigned long dma_addr) { - __netmem_clear_lsb(netmem)->dma_addr = dma_addr; + netmem_to_nmdesc(netmem)->dma_addr = dma_addr; } static inline unsigned long netmem_get_dma_index(netmem_ref netmem) @@ -43,7 +43,7 @@ static inline unsigned long netmem_get_dma_index(netmem_ref netmem) if (WARN_ON_ONCE(netmem_is_net_iov(netmem))) return 0; - magic = __netmem_clear_lsb(netmem)->pp_magic; + magic = netmem_to_nmdesc(netmem)->pp_magic; return (magic & PP_DMA_INDEX_MASK) >> PP_DMA_INDEX_SHIFT; } @@ -57,6 +57,6 @@ static inline void netmem_set_dma_index(netmem_ref netmem, return; magic = netmem_get_pp_magic(netmem) | (id << PP_DMA_INDEX_SHIFT); - __netmem_clear_lsb(netmem)->pp_magic = magic; + netmem_to_nmdesc(netmem)->pp_magic = magic; } #endif diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 576d5ec3bb36..b1ed55141d8a 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1270,13 +1270,13 @@ static size_t rtnl_dpll_pin_size(const struct net_device *dev) static noinline size_t if_nlmsg_size(const struct net_device *dev, u32 ext_filter_mask) { - return NLMSG_ALIGN(sizeof(struct ifinfomsg)) + size_t size; + + size = NLMSG_ALIGN(sizeof(struct ifinfomsg)) + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */ + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */ + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap)) - + nla_total_size(sizeof(struct rtnl_link_stats)) - + nla_total_size_64bit(sizeof(struct rtnl_link_stats64)) + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */ + nla_total_size(4) /* IFLA_TXQLEN */ @@ -1329,6 +1329,12 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev, + nla_total_size(2) /* IFLA_HEADROOM */ + nla_total_size(2) /* IFLA_TAILROOM */ + 0; + + if (!(ext_filter_mask & RTEXT_FILTER_SKIP_STATS)) + size += nla_total_size(sizeof(struct rtnl_link_stats)) + + nla_total_size_64bit(sizeof(struct rtnl_link_stats64)); + + return size; } static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev) @@ -2123,7 +2129,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, if (rtnl_phys_switch_id_fill(skb, dev)) goto nla_put_failure; - if (rtnl_fill_stats(skb, dev)) + if (!(ext_filter_mask & RTEXT_FILTER_SKIP_STATS) && + rtnl_fill_stats(skb, dev)) goto nla_put_failure; if (rtnl_fill_vf(skb, dev, ext_filter_mask)) diff --git a/net/core/selftests.c b/net/core/selftests.c index 3d79133a91a6..8b81feb82c4a 100644 --- a/net/core/selftests.c +++ b/net/core/selftests.c @@ -14,46 +14,10 @@ #include <net/tcp.h> #include <net/udp.h> -struct net_packet_attrs { - const unsigned char *src; - const unsigned char *dst; - u32 ip_src; - u32 ip_dst; - bool tcp; - u16 sport; - u16 dport; - int timeout; - int size; - int max_size; - u8 id; - u16 queue_mapping; - bool bad_csum; -}; - -struct net_test_priv { - struct net_packet_attrs *packet; - struct packet_type pt; - struct completion comp; - int double_vlan; - int vlan_id; - int ok; -}; - -struct netsfhdr { - __be32 version; - __be64 magic; - u8 id; -} __packed; - static u8 net_test_next_id; -#define NET_TEST_PKT_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) + \ - sizeof(struct netsfhdr)) -#define NET_TEST_PKT_MAGIC 0xdeadcafecafedeadULL -#define NET_LB_TIMEOUT msecs_to_jiffies(200) - -static struct sk_buff *net_test_get_skb(struct net_device *ndev, - struct net_packet_attrs *attr) +struct sk_buff *net_test_get_skb(struct net_device *ndev, u8 id, + struct net_packet_attrs *attr) { struct sk_buff *skb = NULL; struct udphdr *uhdr = NULL; @@ -142,8 +106,8 @@ static struct sk_buff *net_test_get_skb(struct net_device *ndev, shdr = skb_put(skb, sizeof(*shdr)); shdr->version = 0; shdr->magic = cpu_to_be64(NET_TEST_PKT_MAGIC); - attr->id = net_test_next_id; - shdr->id = net_test_next_id++; + attr->id = id; + shdr->id = id; if (attr->size) { void *payload = skb_put(skb, attr->size); @@ -190,6 +154,7 @@ static struct sk_buff *net_test_get_skb(struct net_device *ndev, return skb; } +EXPORT_SYMBOL_GPL(net_test_get_skb); static int net_test_loopback_validate(struct sk_buff *skb, struct net_device *ndev, @@ -286,12 +251,13 @@ static int __net_test_loopback(struct net_device *ndev, tpriv->packet = attr; dev_add_pack(&tpriv->pt); - skb = net_test_get_skb(ndev, attr); + skb = net_test_get_skb(ndev, net_test_next_id, attr); if (!skb) { ret = -ENOMEM; goto cleanup; } + net_test_next_id++; ret = dev_direct_xmit(skb, attr->queue_mapping); if (ret < 0) { goto cleanup; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 6be01454f262..f34372666e67 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -81,6 +81,7 @@ #include <net/page_pool/helpers.h> #include <net/psp/types.h> #include <net/dropreason.h> +#include <net/xdp_sock.h> #include <linux/uaccess.h> #include <trace/events/skb.h> @@ -274,6 +275,11 @@ void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) } EXPORT_SYMBOL(__netdev_alloc_frag_align); +/* Cache kmem_cache_size(net_hotdata.skbuff_cache) to help the compiler + * remove dead code (and skbuff_cache_size) when CONFIG_KASAN is unset. + */ +static u32 skbuff_cache_size __read_mostly; + static struct sk_buff *napi_skb_cache_get(void) { struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); @@ -293,7 +299,7 @@ static struct sk_buff *napi_skb_cache_get(void) skb = nc->skb_cache[--nc->skb_count]; local_unlock_nested_bh(&napi_alloc_cache.bh_lock); - kasan_mempool_unpoison_object(skb, kmem_cache_size(net_hotdata.skbuff_cache)); + kasan_mempool_unpoison_object(skb, skbuff_cache_size); return skb; } @@ -345,11 +351,9 @@ u32 napi_skb_cache_get_bulk(void **skbs, u32 n) get: for (u32 base = nc->skb_count - n, i = 0; i < n; i++) { - u32 cache_size = kmem_cache_size(net_hotdata.skbuff_cache); - skbs[i] = nc->skb_cache[base + i]; - kasan_mempool_unpoison_object(skbs[i], cache_size); + kasan_mempool_unpoison_object(skbs[i], skbuff_cache_size); memset(skbs[i], 0, offsetof(struct sk_buff, tail)); } @@ -1136,12 +1140,22 @@ void skb_release_head_state(struct sk_buff *skb) skb_dst_drop(skb); if (skb->destructor) { DEBUG_NET_WARN_ON_ONCE(in_hardirq()); - skb->destructor(skb); - } -#if IS_ENABLED(CONFIG_NF_CONNTRACK) - nf_conntrack_put(skb_nfct(skb)); +#ifdef CONFIG_INET + INDIRECT_CALL_4(skb->destructor, + tcp_wfree, __sock_wfree, sock_wfree, + xsk_destruct_skb, + skb); +#else + INDIRECT_CALL_2(skb->destructor, + sock_wfree, xsk_destruct_skb, + skb); + #endif - skb_ext_put(skb); + skb->destructor = NULL; + skb->sk = NULL; + } + nf_reset_ct(skb); + skb_ext_reset(skb); } /* Free everything but the sk_buff shell. */ @@ -1428,7 +1442,7 @@ static void napi_skb_cache_put(struct sk_buff *skb) if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) { for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++) kasan_mempool_unpoison_object(nc->skb_cache[i], - kmem_cache_size(net_hotdata.skbuff_cache)); + skbuff_cache_size); kmem_cache_free_bulk(net_hotdata.skbuff_cache, NAPI_SKB_CACHE_HALF, nc->skb_cache + NAPI_SKB_CACHE_HALF); @@ -1465,6 +1479,11 @@ void napi_consume_skb(struct sk_buff *skb, int budget) DEBUG_NET_WARN_ON_ONCE(!in_softirq()); + if (skb->alloc_cpu != smp_processor_id() && !skb_shared(skb)) { + skb_release_head_state(skb); + return skb_attempt_defer_free(skb); + } + if (!skb_unref(skb)) return; @@ -2218,6 +2237,10 @@ EXPORT_SYMBOL(__pskb_copy_fclone); * * All the pointers pointing into skb header may change and must be * reloaded after call to this function. + * + * Note: If you skb_push() the start of the buffer after reallocating the + * header, call skb_postpush_data_move() first to move the metadata out of + * the way before writing to &sk_buff->data. */ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, @@ -2289,8 +2312,6 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, skb->nohdr = 0; atomic_set(&skb_shinfo(skb)->dataref, 1); - skb_metadata_clear(skb); - /* It is not generally safe to change skb->truesize. * For the moment, we really care of rx path, or * when skb is orphaned (not attached to a socket). @@ -5116,6 +5137,8 @@ void __init skb_init(void) offsetof(struct sk_buff, cb), sizeof_field(struct sk_buff, cb), NULL); + skbuff_cache_size = kmem_cache_size(net_hotdata.skbuff_cache); + net_hotdata.skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", sizeof(struct sk_buff_fclones), 0, diff --git a/net/core/sock.c b/net/core/sock.c index dc03d4b5909a..3b74fc71f51c 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -155,7 +155,7 @@ static DEFINE_MUTEX(proto_list_mutex); static LIST_HEAD(proto_list); -static void sock_def_write_space_wfree(struct sock *sk); +static void sock_def_write_space_wfree(struct sock *sk, int wmem_alloc); static void sock_def_write_space(struct sock *sk); /** @@ -1046,9 +1046,13 @@ static int sock_reserve_memory(struct sock *sk, int bytes) if (!charged) return -ENOMEM; + if (sk->sk_bypass_prot_mem) + goto success; + /* pre-charge to forward_alloc */ sk_memory_allocated_add(sk, pages); allocated = sk_memory_allocated(sk); + /* If the system goes into memory pressure with this * precharge, give up and return error. */ @@ -1057,6 +1061,8 @@ static int sock_reserve_memory(struct sock *sk, int bytes) mem_cgroup_sk_uncharge(sk, pages); return -ENOMEM; } + +success: sk_forward_alloc_add(sk, pages << PAGE_SHIFT); WRITE_ONCE(sk->sk_reserved_mem, @@ -2300,8 +2306,13 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority, * why we need sk_prot_creator -acme */ sk->sk_prot = sk->sk_prot_creator = prot; + + if (READ_ONCE(net->core.sysctl_bypass_prot_mem)) + sk->sk_bypass_prot_mem = 1; + sk->sk_kern_sock = kern; sock_lock_init(sk); + sk->sk_net_refcnt = kern ? 0 : 1; if (likely(sk->sk_net_refcnt)) { get_net_track(net, &sk->ns_tracker, priority); @@ -2313,7 +2324,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority, } sock_net_set(sk, net); - refcount_set(&sk->sk_wmem_alloc, 1); + refcount_set(&sk->sk_wmem_alloc, SK_WMEM_ALLOC_BIAS); mem_cgroup_sk_alloc(sk); cgroup_sk_alloc(&sk->sk_cgrp_data); @@ -2451,13 +2462,16 @@ static void sk_init_common(struct sock *sk) } /** - * sk_clone_lock - clone a socket, and lock its clone - * @sk: the socket to clone - * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) + * sk_clone - clone a socket + * @sk: the socket to clone + * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) + * @lock: if true, lock the cloned sk * - * Caller must unlock socket even in error path (bh_unlock_sock(newsk)) + * If @lock is true, the clone is locked by bh_lock_sock(), and + * caller must unlock socket even in error path by bh_unlock_sock(). */ -struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) +struct sock *sk_clone(const struct sock *sk, const gfp_t priority, + bool lock) { struct proto *prot = READ_ONCE(sk->sk_prot); struct sk_filter *filter; @@ -2486,16 +2500,19 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) __netns_tracker_alloc(sock_net(newsk), &newsk->ns_tracker, false, priority); } + sk_node_init(&newsk->sk_node); sock_lock_init(newsk); - bh_lock_sock(newsk); + + if (lock) + bh_lock_sock(newsk); + newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; newsk->sk_backlog.len = 0; atomic_set(&newsk->sk_rmem_alloc, 0); - /* sk_wmem_alloc set to one (see sk_free() and sock_wfree()) */ - refcount_set(&newsk->sk_wmem_alloc, 1); + refcount_set(&newsk->sk_wmem_alloc, SK_WMEM_ALLOC_BIAS); atomic_set(&newsk->sk_omem_alloc, 0); sk_init_common(newsk); @@ -2580,12 +2597,13 @@ free: * destructor and make plain sk_free() */ newsk->sk_destruct = NULL; - bh_unlock_sock(newsk); + if (lock) + bh_unlock_sock(newsk); sk_free(newsk); newsk = NULL; goto out; } -EXPORT_SYMBOL_GPL(sk_clone_lock); +EXPORT_SYMBOL_GPL(sk_clone); static u32 sk_dst_gso_max_size(struct sock *sk, const struct net_device *dev) { @@ -2649,16 +2667,18 @@ EXPORT_SYMBOL_GPL(sk_setup_caps); */ void sock_wfree(struct sk_buff *skb) { - struct sock *sk = skb->sk; unsigned int len = skb->truesize; + struct sock *sk = skb->sk; bool free; + int old; if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) { if (sock_flag(sk, SOCK_RCU_FREE) && sk->sk_write_space == sock_def_write_space) { rcu_read_lock(); - free = refcount_sub_and_test(len, &sk->sk_wmem_alloc); - sock_def_write_space_wfree(sk); + free = __refcount_sub_and_test(len, &sk->sk_wmem_alloc, + &old); + sock_def_write_space_wfree(sk, old - len); rcu_read_unlock(); if (unlikely(free)) __sk_free(sk); @@ -2695,6 +2715,8 @@ void __sock_wfree(struct sk_buff *skb) void skb_set_owner_w(struct sk_buff *skb, struct sock *sk) { + int old_wmem; + skb_orphan(skb); #ifdef CONFIG_INET if (unlikely(!sk_fullsock(sk))) @@ -2708,7 +2730,15 @@ void skb_set_owner_w(struct sk_buff *skb, struct sock *sk) * is enough to guarantee sk_free() won't free this sock until * all in-flight packets are completed */ - refcount_add(skb->truesize, &sk->sk_wmem_alloc); + __refcount_add(skb->truesize, &sk->sk_wmem_alloc, &old_wmem); + + /* (old_wmem == SK_WMEM_ALLOC_BIAS) if no other TX packet for this socket + * is in a host queue (qdisc, NIC queue). + * Set skb->ooo_okay so that netdev_pick_tx() can choose a TX queue + * based on XPS for better performance. + * Otherwise clear ooo_okay to not risk Out Of Order delivery. + */ + skb->ooo_okay = (old_wmem == SK_WMEM_ALLOC_BIAS); } EXPORT_SYMBOL(skb_set_owner_w); @@ -3136,8 +3166,11 @@ bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag) if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation))) return true; - sk_enter_memory_pressure(sk); + if (!sk->sk_bypass_prot_mem) + sk_enter_memory_pressure(sk); + sk_stream_moderate_sndbuf(sk); + return false; } EXPORT_SYMBOL(sk_page_frag_refill); @@ -3254,10 +3287,12 @@ int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind) { bool memcg_enabled = false, charged = false; struct proto *prot = sk->sk_prot; - long allocated; + long allocated = 0; - sk_memory_allocated_add(sk, amt); - allocated = sk_memory_allocated(sk); + if (!sk->sk_bypass_prot_mem) { + sk_memory_allocated_add(sk, amt); + allocated = sk_memory_allocated(sk); + } if (mem_cgroup_sk_enabled(sk)) { memcg_enabled = true; @@ -3266,6 +3301,9 @@ int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind) goto suppress_allocation; } + if (!allocated) + return 1; + /* Under limit. */ if (allocated <= sk_prot_mem_limits(sk, 0)) { sk_leave_memory_pressure(sk); @@ -3344,7 +3382,8 @@ suppress_allocation: trace_sock_exceed_buf_limit(sk, prot, allocated, kind); - sk_memory_allocated_sub(sk, amt); + if (allocated) + sk_memory_allocated_sub(sk, amt); if (charged) mem_cgroup_sk_uncharge(sk, amt); @@ -3383,11 +3422,14 @@ EXPORT_SYMBOL(__sk_mem_schedule); */ void __sk_mem_reduce_allocated(struct sock *sk, int amount) { - sk_memory_allocated_sub(sk, amount); - if (mem_cgroup_sk_enabled(sk)) mem_cgroup_sk_uncharge(sk, amount); + if (sk->sk_bypass_prot_mem) + return; + + sk_memory_allocated_sub(sk, amount); + if (sk_under_global_memory_pressure(sk) && (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0))) sk_leave_memory_pressure(sk); @@ -3420,13 +3462,13 @@ EXPORT_SYMBOL_GPL(sk_set_peek_off); * function, some default processing is provided. */ -int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len) +int sock_no_bind(struct socket *sock, struct sockaddr_unsized *saddr, int len) { return -EOPNOTSUPP; } EXPORT_SYMBOL(sock_no_bind); -int sock_no_connect(struct socket *sock, struct sockaddr *saddr, +int sock_no_connect(struct socket *sock, struct sockaddr_unsized *saddr, int len, int flags) { return -EOPNOTSUPP; @@ -3580,12 +3622,12 @@ static void sock_def_write_space(struct sock *sk) * for SOCK_RCU_FREE sockets under RCU read section and after putting * ->sk_wmem_alloc. */ -static void sock_def_write_space_wfree(struct sock *sk) +static void sock_def_write_space_wfree(struct sock *sk, int wmem_alloc) { /* Do not wake up a writer until he can make "significant" * progress. --DaveM */ - if (sock_writeable(sk)) { + if (__sock_writeable(sk, wmem_alloc)) { struct socket_wq *wq = rcu_dereference(sk->sk_wq); /* rely on refcount_sub from sock_wfree() */ @@ -4353,7 +4395,7 @@ bool sk_busy_loop_end(void *p, unsigned long start_time) EXPORT_SYMBOL(sk_busy_loop_end); #endif /* CONFIG_NET_RX_BUSY_POLL */ -int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len) +int sock_bind_add(struct sock *sk, struct sockaddr_unsized *addr, int addr_len) { if (!sk->sk_prot->bind_add) return -EOPNOTSUPP; diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 8cf04b57ade1..8d4decb2606f 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -668,6 +668,13 @@ static struct ctl_table netns_core_table[] = { .proc_handler = proc_dou8vec_minmax, }, { + .procname = "txq_reselection_ms", + .data = &init_net.core.sysctl_txq_reselection, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_ms_jiffies, + }, + { .procname = "tstamp_allow_data", .data = &init_net.core.sysctl_tstamp_allow_data, .maxlen = sizeof(u8), @@ -676,6 +683,15 @@ static struct ctl_table netns_core_table[] = { .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE }, + { + .procname = "bypass_prot_mem", + .data = &init_net.core.sysctl_bypass_prot_mem, + .maxlen = sizeof(u8), + .mode = 0644, + .proc_handler = proc_dou8vec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE + }, /* sysctl_core_net_init() will set the values after this * to readonly in network namespaces */ diff --git a/net/devlink/netlink_gen.c b/net/devlink/netlink_gen.c index 9fd00977d59e..5ad435aee29d 100644 --- a/net/devlink/netlink_gen.c +++ b/net/devlink/netlink_gen.c @@ -229,7 +229,7 @@ static const struct nla_policy devlink_eswitch_get_nl_policy[DEVLINK_ATTR_DEV_NA static const struct nla_policy devlink_eswitch_set_nl_policy[DEVLINK_ATTR_ESWITCH_ENCAP_MODE + 1] = { [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, }, [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, }, - [DEVLINK_ATTR_ESWITCH_MODE] = NLA_POLICY_MAX(NLA_U16, 1), + [DEVLINK_ATTR_ESWITCH_MODE] = NLA_POLICY_MAX(NLA_U16, 2), [DEVLINK_ATTR_ESWITCH_INLINE_MODE] = NLA_POLICY_MAX(NLA_U8, 3), [DEVLINK_ATTR_ESWITCH_ENCAP_MODE] = NLA_POLICY_MAX(NLA_U8, 1), }; diff --git a/net/devlink/param.c b/net/devlink/param.c index 70e69523412c..6b233b13b69a 100644 --- a/net/devlink/param.c +++ b/net/devlink/param.c @@ -112,6 +112,11 @@ static const struct devlink_param devlink_param_generic[] = { .name = DEVLINK_PARAM_GENERIC_NUM_DOORBELLS_NAME, .type = DEVLINK_PARAM_GENERIC_NUM_DOORBELLS_TYPE, }, + { + .id = DEVLINK_PARAM_GENERIC_ID_MAX_MAC_PER_VF, + .name = DEVLINK_PARAM_GENERIC_MAX_MAC_PER_VF_NAME, + .type = DEVLINK_PARAM_GENERIC_MAX_MAC_PER_VF_TYPE, + }, }; static int devlink_param_generic_verify(const struct devlink_param *param) diff --git a/net/devlink/region.c b/net/devlink/region.c index 63fb297f6d67..d6e5805cf3a0 100644 --- a/net/devlink/region.c +++ b/net/devlink/region.c @@ -50,7 +50,7 @@ devlink_port_region_get_by_name(struct devlink_port *port, struct devlink_region *region; list_for_each_entry(region, &port->region_list, list) - if (!strcmp(region->ops->name, region_name)) + if (!strcmp(region->port_ops->name, region_name)) return region; return NULL; diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig index 869cbe57162f..f86b30742122 100644 --- a/net/dsa/Kconfig +++ b/net/dsa/Kconfig @@ -104,6 +104,14 @@ config NET_DSA_TAG_MTK Say Y or M if you want to enable support for tagging frames for Mediatek switches. +config NET_DSA_TAG_MXL_GSW1XX + tristate "Tag driver for MaxLinear GSW1xx switches" + help + The GSW1xx family of switches supports an 8-byte special tag which + can be used on the CPU port of the switch. + Say Y or M if you want to enable support for tagging frames for + MaxLinear GSW1xx switches. + config NET_DSA_TAG_KSZ tristate "Tag driver for Microchip 8795/937x/9477/9893 families of switches" help @@ -190,4 +198,10 @@ config NET_DSA_TAG_XRS700X Say Y or M if you want to enable support for tagging frames for Arrow SpeedChips XRS700x switches that use a single byte tag trailer. +config NET_DSA_TAG_YT921X + tristate "Tag driver for Motorcomm YT921x switches" + help + Say Y or M if you want to enable support for tagging frames for + Motorcomm YT921x switches. + endif diff --git a/net/dsa/Makefile b/net/dsa/Makefile index 555c07cfeb71..42d173f5a701 100644 --- a/net/dsa/Makefile +++ b/net/dsa/Makefile @@ -28,6 +28,7 @@ obj-$(CONFIG_NET_DSA_TAG_HELLCREEK) += tag_hellcreek.o obj-$(CONFIG_NET_DSA_TAG_KSZ) += tag_ksz.o obj-$(CONFIG_NET_DSA_TAG_LAN9303) += tag_lan9303.o obj-$(CONFIG_NET_DSA_TAG_MTK) += tag_mtk.o +obj-$(CONFIG_NET_DSA_TAG_MXL_GSW1XX) += tag_mxl-gsw1xx.o obj-$(CONFIG_NET_DSA_TAG_NONE) += tag_none.o obj-$(CONFIG_NET_DSA_TAG_OCELOT) += tag_ocelot.o obj-$(CONFIG_NET_DSA_TAG_OCELOT_8021Q) += tag_ocelot_8021q.o @@ -39,6 +40,7 @@ obj-$(CONFIG_NET_DSA_TAG_SJA1105) += tag_sja1105.o obj-$(CONFIG_NET_DSA_TAG_TRAILER) += tag_trailer.o obj-$(CONFIG_NET_DSA_TAG_VSC73XX_8021Q) += tag_vsc73xx_8021q.o obj-$(CONFIG_NET_DSA_TAG_XRS700X) += tag_xrs700x.o +obj-$(CONFIG_NET_DSA_TAG_YT921X) += tag_yt921x.o # for tracing framework to find trace.h CFLAGS_trace.o := -I$(src) diff --git a/net/dsa/tag_mxl-gsw1xx.c b/net/dsa/tag_mxl-gsw1xx.c new file mode 100644 index 000000000000..701a079955f2 --- /dev/null +++ b/net/dsa/tag_mxl-gsw1xx.c @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * DSA driver Special Tag support for MaxLinear GSW1xx switch chips + * + * Copyright (C) 2025 Daniel Golle <daniel@makrotopia.org> + * Copyright (C) 2023 - 2024 MaxLinear Inc. + */ + +#include <linux/bitops.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <net/dsa.h> + +#include "tag.h" + +/* To define the outgoing port and to discover the incoming port a special + * tag is used by the GSW1xx. + * + * Dest MAC Src MAC special TAG EtherType + * ...| 1 2 3 4 5 6 | 1 2 3 4 5 6 | 1 2 3 4 5 6 7 8 | 1 2 |... + * |<--------------->| + */ + +#define GSW1XX_TAG_NAME "gsw1xx" + +/* special tag header length (RX and TX) */ +#define GSW1XX_HEADER_LEN 8 + +/* Word 0 = Ethertype -> 0x88C3 */ + +/* Word 1 */ +#define GSW1XX_TX_PORT_MAP GENMASK(7, 0) +#define GSW1XX_TX_PORT_MAP_EN BIT(15) +#define GSW1XX_TX_CLASS_EN BIT(14) +#define GSW1XX_TX_TIME_STAMP_EN BIT(13) +#define GSW1XX_TX_LRN_DIS BIT(12) +#define GSW1XX_TX_CLASS GENMASK(11, 8) + +/* special tag in RX path header */ +/* Word 2 */ +#define GSW1XX_RX_PORT_MAP GENMASK(15, 8) + +static struct sk_buff *gsw1xx_tag_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + __be16 *gsw1xx_tag; + + /* provide additional space 'GSW1XX_HEADER_LEN' bytes */ + skb_push(skb, GSW1XX_HEADER_LEN); + + /* add space between MAC address and Ethertype */ + dsa_alloc_etype_header(skb, GSW1XX_HEADER_LEN); + + /* special tag ingress */ + gsw1xx_tag = dsa_etype_header_pos_tx(skb); + gsw1xx_tag[0] = htons(ETH_P_MXLGSW); + gsw1xx_tag[1] = htons(GSW1XX_TX_PORT_MAP_EN | GSW1XX_TX_LRN_DIS | + FIELD_PREP(GSW1XX_TX_PORT_MAP, BIT(dp->index))); + + gsw1xx_tag[2] = 0; + gsw1xx_tag[3] = 0; + + return skb; +} + +static struct sk_buff *gsw1xx_tag_rcv(struct sk_buff *skb, + struct net_device *dev) +{ + int port; + __be16 *gsw1xx_tag; + + if (unlikely(!pskb_may_pull(skb, GSW1XX_HEADER_LEN))) { + dev_warn_ratelimited(&dev->dev, "Dropping packet, cannot pull SKB\n"); + return NULL; + } + + gsw1xx_tag = dsa_etype_header_pos_rx(skb); + + if (unlikely(ntohs(gsw1xx_tag[0]) != ETH_P_MXLGSW)) { + dev_warn_ratelimited(&dev->dev, "Dropping packet due to invalid special tag\n"); + dev_warn_ratelimited(&dev->dev, "Tag: %8ph\n", gsw1xx_tag); + return NULL; + } + + /* Get source port information */ + port = FIELD_GET(GSW1XX_RX_PORT_MAP, ntohs(gsw1xx_tag[1])); + skb->dev = dsa_conduit_find_user(dev, 0, port); + if (!skb->dev) { + dev_warn_ratelimited(&dev->dev, "Dropping packet due to invalid source port\n"); + dev_warn_ratelimited(&dev->dev, "Tag: %8ph\n", gsw1xx_tag); + return NULL; + } + + /* remove the GSW1xx special tag between MAC addresses and the current + * ethertype field. + */ + skb_pull_rcsum(skb, GSW1XX_HEADER_LEN); + dsa_strip_etype_header(skb, GSW1XX_HEADER_LEN); + + return skb; +} + +static const struct dsa_device_ops gsw1xx_netdev_ops = { + .name = GSW1XX_TAG_NAME, + .proto = DSA_TAG_PROTO_MXL_GSW1XX, + .xmit = gsw1xx_tag_xmit, + .rcv = gsw1xx_tag_rcv, + .needed_headroom = GSW1XX_HEADER_LEN, +}; + +MODULE_DESCRIPTION("DSA tag driver for MaxLinear GSW1xx 8 byte protocol"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_MXL_GSW1XX, GSW1XX_TAG_NAME); + +module_dsa_tag_driver(gsw1xx_netdev_ops); diff --git a/net/dsa/tag_yt921x.c b/net/dsa/tag_yt921x.c new file mode 100644 index 000000000000..995da44f0a2a --- /dev/null +++ b/net/dsa/tag_yt921x.c @@ -0,0 +1,141 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Motorcomm YT921x Switch Extended CPU Port Tagging + * + * Copyright (c) 2025 David Yang <mmyangfl@gmail.com> + * + * +----+----+-------+-----+----+--------- + * | DA | SA | TagET | Tag | ET | Payload ... + * +----+----+-------+-----+----+--------- + * 6 6 2 6 2 N + * + * Tag Ethertype: CPU_TAG_TPID_TPID (default: ETH_P_YT921X = 0x9988) + * * Hardcoded for the moment, but still configurable. Discuss it if there + * are conflicts somewhere and/or you want to change it for some reason. + * Tag: + * 2: VLAN Tag + * 2: Rx Port + * 15b: Rx Port Valid + * 14b-11b: Rx Port + * 10b-0b: Cmd? + * 2: Tx Port(s) + * 15b: Tx Port(s) Valid + * 10b-0b: Tx Port(s) Mask + */ + +#include <linux/etherdevice.h> + +#include "tag.h" + +#define YT921X_TAG_NAME "yt921x" + +#define YT921X_TAG_LEN 8 + +#define YT921X_TAG_PORT_EN BIT(15) +#define YT921X_TAG_RX_PORT_M GENMASK(14, 11) +#define YT921X_TAG_RX_CMD_M GENMASK(10, 0) +#define YT921X_TAG_RX_CMD(x) FIELD_PREP(YT921X_TAG_RX_CMD_M, (x)) +#define YT921X_TAG_RX_CMD_FORWARDED 0x80 +#define YT921X_TAG_RX_CMD_UNK_UCAST 0xb2 +#define YT921X_TAG_RX_CMD_UNK_MCAST 0xb4 +#define YT921X_TAG_TX_PORTS_M GENMASK(10, 0) +#define YT921X_TAG_TX_PORTn(port) BIT(port) + +static struct sk_buff * +yt921x_tag_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct dsa_port *dp = dsa_user_to_port(netdev); + unsigned int port = dp->index; + __be16 *tag; + u16 tx; + + skb_push(skb, YT921X_TAG_LEN); + dsa_alloc_etype_header(skb, YT921X_TAG_LEN); + + tag = dsa_etype_header_pos_tx(skb); + + tag[0] = htons(ETH_P_YT921X); + /* VLAN tag unrelated when TX */ + tag[1] = 0; + tag[2] = 0; + tx = YT921X_TAG_PORT_EN | YT921X_TAG_TX_PORTn(port); + tag[3] = htons(tx); + + return skb; +} + +static struct sk_buff * +yt921x_tag_rcv(struct sk_buff *skb, struct net_device *netdev) +{ + unsigned int port; + __be16 *tag; + u16 cmd; + u16 rx; + + if (unlikely(!pskb_may_pull(skb, YT921X_TAG_LEN))) + return NULL; + + tag = dsa_etype_header_pos_rx(skb); + + if (unlikely(tag[0] != htons(ETH_P_YT921X))) { + dev_warn_ratelimited(&netdev->dev, + "Unexpected EtherType 0x%04x\n", + ntohs(tag[0])); + return NULL; + } + + /* Locate which port this is coming from */ + rx = ntohs(tag[2]); + if (unlikely((rx & YT921X_TAG_PORT_EN) == 0)) { + dev_warn_ratelimited(&netdev->dev, + "Unexpected rx tag 0x%04x\n", rx); + return NULL; + } + + port = FIELD_GET(YT921X_TAG_RX_PORT_M, rx); + skb->dev = dsa_conduit_find_user(netdev, 0, port); + if (unlikely(!skb->dev)) { + dev_warn_ratelimited(&netdev->dev, + "Couldn't decode source port %u\n", port); + return NULL; + } + + cmd = FIELD_GET(YT921X_TAG_RX_CMD_M, rx); + switch (cmd) { + case YT921X_TAG_RX_CMD_FORWARDED: + /* Already forwarded by hardware */ + dsa_default_offload_fwd_mark(skb); + break; + case YT921X_TAG_RX_CMD_UNK_UCAST: + case YT921X_TAG_RX_CMD_UNK_MCAST: + /* NOTE: hardware doesn't distinguish between TRAP (copy to CPU + * only) and COPY (forward and copy to CPU). In order to perform + * a soft switch, NEVER use COPY action in the switch driver. + */ + break; + default: + dev_warn_ratelimited(&netdev->dev, + "Unexpected rx cmd 0x%02x\n", cmd); + break; + } + + /* Remove YT921x tag and update checksum */ + skb_pull_rcsum(skb, YT921X_TAG_LEN); + dsa_strip_etype_header(skb, YT921X_TAG_LEN); + + return skb; +} + +static const struct dsa_device_ops yt921x_netdev_ops = { + .name = YT921X_TAG_NAME, + .proto = DSA_TAG_PROTO_YT921X, + .xmit = yt921x_tag_xmit, + .rcv = yt921x_tag_rcv, + .needed_headroom = YT921X_TAG_LEN, +}; + +MODULE_DESCRIPTION("DSA tag driver for Motorcomm YT921x switches"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_YT921X, YT921X_TAG_NAME); + +module_dsa_tag_driver(yt921x_netdev_ops); diff --git a/net/ethtool/Makefile b/net/ethtool/Makefile index 1e493553b977..629c10916670 100644 --- a/net/ethtool/Makefile +++ b/net/ethtool/Makefile @@ -9,4 +9,4 @@ ethtool_nl-y := netlink.o bitset.o strset.o linkinfo.o linkmodes.o rss.o \ channels.o coalesce.o pause.o eee.o tsinfo.o cabletest.o \ tunnels.o fec.o eeprom.o stats.o phc_vclocks.o mm.o \ module.o cmis_fw_update.o cmis_cdb.o pse-pd.o plca.o \ - phy.o tsconfig.o + phy.o tsconfig.o mse.o diff --git a/net/ethtool/mse.c b/net/ethtool/mse.c new file mode 100644 index 000000000000..6aac004c3ffc --- /dev/null +++ b/net/ethtool/mse.c @@ -0,0 +1,329 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/ethtool.h> +#include <linux/phy.h> +#include <linux/slab.h> + +#include "netlink.h" +#include "common.h" + +/* Channels A-D only; WORST and LINK are exclusive alternatives */ +#define PHY_MSE_CHANNEL_COUNT 4 + +struct mse_req_info { + struct ethnl_req_info base; +}; + +struct mse_snapshot_entry { + struct phy_mse_snapshot snapshot; + int channel; +}; + +struct mse_reply_data { + struct ethnl_reply_data base; + struct phy_mse_capability capability; + struct mse_snapshot_entry *snapshots; + unsigned int num_snapshots; +}; + +static struct mse_reply_data * +mse_repdata(const struct ethnl_reply_data *reply_base) +{ + return container_of(reply_base, struct mse_reply_data, base); +} + +const struct nla_policy ethnl_mse_get_policy[] = { + [ETHTOOL_A_MSE_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy_phy), +}; + +static int get_snapshot_if_supported(struct phy_device *phydev, + struct mse_reply_data *data, + unsigned int *idx, u32 cap_bit, + enum phy_mse_channel channel) +{ + int ret; + + if (data->capability.supported_caps & cap_bit) { + ret = phydev->drv->get_mse_snapshot(phydev, channel, + &data->snapshots[*idx].snapshot); + if (ret) + return ret; + data->snapshots[*idx].channel = channel; + (*idx)++; + } + + return 0; +} + +static int mse_get_channels(struct phy_device *phydev, + struct mse_reply_data *data) +{ + unsigned int i = 0; + int ret; + + if (!data->capability.supported_caps) + return 0; + + data->snapshots = kcalloc(PHY_MSE_CHANNEL_COUNT, + sizeof(*data->snapshots), GFP_KERNEL); + if (!data->snapshots) + return -ENOMEM; + + /* Priority 1: Individual channels */ + ret = get_snapshot_if_supported(phydev, data, &i, PHY_MSE_CAP_CHANNEL_A, + PHY_MSE_CHANNEL_A); + if (ret) + return ret; + ret = get_snapshot_if_supported(phydev, data, &i, PHY_MSE_CAP_CHANNEL_B, + PHY_MSE_CHANNEL_B); + if (ret) + return ret; + ret = get_snapshot_if_supported(phydev, data, &i, PHY_MSE_CAP_CHANNEL_C, + PHY_MSE_CHANNEL_C); + if (ret) + return ret; + ret = get_snapshot_if_supported(phydev, data, &i, PHY_MSE_CAP_CHANNEL_D, + PHY_MSE_CHANNEL_D); + if (ret) + return ret; + + /* If any individual channels were found, we are done. */ + if (i > 0) { + data->num_snapshots = i; + return 0; + } + + /* Priority 2: Worst channel, if no individual channels supported. */ + ret = get_snapshot_if_supported(phydev, data, &i, + PHY_MSE_CAP_WORST_CHANNEL, + PHY_MSE_CHANNEL_WORST); + if (ret) + return ret; + + /* If worst channel was found, we are done. */ + if (i > 0) { + data->num_snapshots = i; + return 0; + } + + /* Priority 3: Link-wide, if nothing else is supported. */ + ret = get_snapshot_if_supported(phydev, data, &i, PHY_MSE_CAP_LINK, + PHY_MSE_CHANNEL_LINK); + if (ret) + return ret; + + data->num_snapshots = i; + return 0; +} + +static int mse_prepare_data(const struct ethnl_req_info *req_base, + struct ethnl_reply_data *reply_base, + const struct genl_info *info) +{ + struct mse_reply_data *data = mse_repdata(reply_base); + struct net_device *dev = reply_base->dev; + struct phy_device *phydev; + int ret; + + phydev = ethnl_req_get_phydev(req_base, info->attrs, + ETHTOOL_A_MSE_HEADER, info->extack); + if (IS_ERR(phydev)) + return PTR_ERR(phydev); + if (!phydev) + return -EOPNOTSUPP; + + ret = ethnl_ops_begin(dev); + if (ret) + return ret; + + mutex_lock(&phydev->lock); + + if (!phydev->drv || !phydev->drv->get_mse_capability || + !phydev->drv->get_mse_snapshot) { + ret = -EOPNOTSUPP; + goto out_unlock; + } + if (!phydev->link) { + ret = -ENETDOWN; + goto out_unlock; + } + + ret = phydev->drv->get_mse_capability(phydev, &data->capability); + if (ret) + goto out_unlock; + + ret = mse_get_channels(phydev, data); + +out_unlock: + mutex_unlock(&phydev->lock); + ethnl_ops_complete(dev); + if (ret) + kfree(data->snapshots); + return ret; +} + +static void mse_cleanup_data(struct ethnl_reply_data *reply_base) +{ + struct mse_reply_data *data = mse_repdata(reply_base); + + kfree(data->snapshots); +} + +static int mse_reply_size(const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + const struct mse_reply_data *data = mse_repdata(reply_base); + size_t len = 0; + unsigned int i; + + /* ETHTOOL_A_MSE_CAPABILITIES */ + len += nla_total_size(0); + if (data->capability.supported_caps & PHY_MSE_CAP_AVG) + /* ETHTOOL_A_MSE_CAPABILITIES_MAX_AVERAGE_MSE */ + len += nla_total_size(sizeof(u64)); + if (data->capability.supported_caps & (PHY_MSE_CAP_PEAK | + PHY_MSE_CAP_WORST_PEAK)) + /* ETHTOOL_A_MSE_CAPABILITIES_MAX_PEAK_MSE */ + len += nla_total_size(sizeof(u64)); + /* ETHTOOL_A_MSE_CAPABILITIES_REFRESH_RATE_PS */ + len += nla_total_size(sizeof(u64)); + /* ETHTOOL_A_MSE_CAPABILITIES_NUM_SYMBOLS */ + len += nla_total_size(sizeof(u64)); + + for (i = 0; i < data->num_snapshots; i++) { + size_t snapshot_len = 0; + + /* Per-channel nest (e.g., ETHTOOL_A_MSE_CHANNEL_A / _B / _C / + * _D / _WORST_CHANNEL / _LINK) + */ + snapshot_len += nla_total_size(0); + + if (data->capability.supported_caps & PHY_MSE_CAP_AVG) + snapshot_len += nla_total_size(sizeof(u64)); + if (data->capability.supported_caps & PHY_MSE_CAP_PEAK) + snapshot_len += nla_total_size(sizeof(u64)); + if (data->capability.supported_caps & PHY_MSE_CAP_WORST_PEAK) + snapshot_len += nla_total_size(sizeof(u64)); + + len += snapshot_len; + } + + return len; +} + +static int mse_channel_to_attr(int ch) +{ + switch (ch) { + case PHY_MSE_CHANNEL_A: + return ETHTOOL_A_MSE_CHANNEL_A; + case PHY_MSE_CHANNEL_B: + return ETHTOOL_A_MSE_CHANNEL_B; + case PHY_MSE_CHANNEL_C: + return ETHTOOL_A_MSE_CHANNEL_C; + case PHY_MSE_CHANNEL_D: + return ETHTOOL_A_MSE_CHANNEL_D; + case PHY_MSE_CHANNEL_WORST: + return ETHTOOL_A_MSE_WORST_CHANNEL; + case PHY_MSE_CHANNEL_LINK: + return ETHTOOL_A_MSE_LINK; + default: + return -EINVAL; + } +} + +static int mse_fill_reply(struct sk_buff *skb, + const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + const struct mse_reply_data *data = mse_repdata(reply_base); + struct nlattr *nest; + unsigned int i; + int ret; + + nest = nla_nest_start(skb, ETHTOOL_A_MSE_CAPABILITIES); + if (!nest) + return -EMSGSIZE; + + if (data->capability.supported_caps & PHY_MSE_CAP_AVG) { + ret = nla_put_uint(skb, + ETHTOOL_A_MSE_CAPABILITIES_MAX_AVERAGE_MSE, + data->capability.max_average_mse); + if (ret < 0) + goto nla_put_nest_failure; + } + + if (data->capability.supported_caps & (PHY_MSE_CAP_PEAK | + PHY_MSE_CAP_WORST_PEAK)) { + ret = nla_put_uint(skb, ETHTOOL_A_MSE_CAPABILITIES_MAX_PEAK_MSE, + data->capability.max_peak_mse); + if (ret < 0) + goto nla_put_nest_failure; + } + + ret = nla_put_uint(skb, ETHTOOL_A_MSE_CAPABILITIES_REFRESH_RATE_PS, + data->capability.refresh_rate_ps); + if (ret < 0) + goto nla_put_nest_failure; + + ret = nla_put_uint(skb, ETHTOOL_A_MSE_CAPABILITIES_NUM_SYMBOLS, + data->capability.num_symbols); + if (ret < 0) + goto nla_put_nest_failure; + + nla_nest_end(skb, nest); + + for (i = 0; i < data->num_snapshots; i++) { + const struct mse_snapshot_entry *s = &data->snapshots[i]; + int chan_attr; + + chan_attr = mse_channel_to_attr(s->channel); + if (chan_attr < 0) + return chan_attr; + + nest = nla_nest_start(skb, chan_attr); + if (!nest) + return -EMSGSIZE; + + if (data->capability.supported_caps & PHY_MSE_CAP_AVG) { + ret = nla_put_uint(skb, + ETHTOOL_A_MSE_SNAPSHOT_AVERAGE_MSE, + s->snapshot.average_mse); + if (ret) + goto nla_put_nest_failure; + } + if (data->capability.supported_caps & PHY_MSE_CAP_PEAK) { + ret = nla_put_uint(skb, ETHTOOL_A_MSE_SNAPSHOT_PEAK_MSE, + s->snapshot.peak_mse); + if (ret) + goto nla_put_nest_failure; + } + if (data->capability.supported_caps & PHY_MSE_CAP_WORST_PEAK) { + ret = nla_put_uint(skb, + ETHTOOL_A_MSE_SNAPSHOT_WORST_PEAK_MSE, + s->snapshot.worst_peak_mse); + if (ret) + goto nla_put_nest_failure; + } + + nla_nest_end(skb, nest); + } + + return 0; + +nla_put_nest_failure: + nla_nest_cancel(skb, nest); + return ret; +} + +const struct ethnl_request_ops ethnl_mse_request_ops = { + .request_cmd = ETHTOOL_MSG_MSE_GET, + .reply_cmd = ETHTOOL_MSG_MSE_GET_REPLY, + .hdr_attr = ETHTOOL_A_MSE_HEADER, + .req_info_size = sizeof(struct mse_req_info), + .reply_data_size = sizeof(struct mse_reply_data), + + .prepare_data = mse_prepare_data, + .cleanup_data = mse_cleanup_data, + .reply_size = mse_reply_size, + .fill_reply = mse_fill_reply, +}; diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c index 2f813f25f07e..6e5f0f4f815a 100644 --- a/net/ethtool/netlink.c +++ b/net/ethtool/netlink.c @@ -420,6 +420,7 @@ ethnl_default_requests[__ETHTOOL_MSG_USER_CNT] = { [ETHTOOL_MSG_TSCONFIG_GET] = ðnl_tsconfig_request_ops, [ETHTOOL_MSG_TSCONFIG_SET] = ðnl_tsconfig_request_ops, [ETHTOOL_MSG_PHY_GET] = ðnl_phy_request_ops, + [ETHTOOL_MSG_MSE_GET] = ðnl_mse_request_ops, }; static struct ethnl_dump_ctx *ethnl_dump_context(struct netlink_callback *cb) @@ -1534,6 +1535,15 @@ static const struct genl_ops ethtool_genl_ops[] = { .policy = ethnl_rss_delete_policy, .maxattr = ARRAY_SIZE(ethnl_rss_delete_policy) - 1, }, + { + .cmd = ETHTOOL_MSG_MSE_GET, + .doit = ethnl_default_doit, + .start = ethnl_perphy_start, + .dumpit = ethnl_perphy_dumpit, + .done = ethnl_perphy_done, + .policy = ethnl_mse_get_policy, + .maxattr = ARRAY_SIZE(ethnl_mse_get_policy) - 1, + }, }; static const struct genl_multicast_group ethtool_nl_mcgrps[] = { diff --git a/net/ethtool/netlink.h b/net/ethtool/netlink.h index 1d4f9ecb3d26..89010eaa67df 100644 --- a/net/ethtool/netlink.h +++ b/net/ethtool/netlink.h @@ -442,6 +442,7 @@ extern const struct ethnl_request_ops ethnl_plca_status_request_ops; extern const struct ethnl_request_ops ethnl_mm_request_ops; extern const struct ethnl_request_ops ethnl_phy_request_ops; extern const struct ethnl_request_ops ethnl_tsconfig_request_ops; +extern const struct ethnl_request_ops ethnl_mse_request_ops; extern const struct nla_policy ethnl_header_policy[ETHTOOL_A_HEADER_FLAGS + 1]; extern const struct nla_policy ethnl_header_policy_stats[ETHTOOL_A_HEADER_FLAGS + 1]; @@ -497,6 +498,7 @@ extern const struct nla_policy ethnl_module_fw_flash_act_policy[ETHTOOL_A_MODULE extern const struct nla_policy ethnl_phy_get_policy[ETHTOOL_A_PHY_HEADER + 1]; extern const struct nla_policy ethnl_tsconfig_get_policy[ETHTOOL_A_TSCONFIG_HEADER + 1]; extern const struct nla_policy ethnl_tsconfig_set_policy[ETHTOOL_A_TSCONFIG_MAX + 1]; +extern const struct nla_policy ethnl_mse_get_policy[ETHTOOL_A_MSE_HEADER + 1]; int ethnl_set_features(struct sk_buff *skb, struct genl_info *info); int ethnl_act_cable_test(struct sk_buff *skb, struct genl_info *info); diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c index c96b63adf96f..db0b0af7a692 100644 --- a/net/hsr/hsr_netlink.c +++ b/net/hsr/hsr_netlink.c @@ -166,12 +166,20 @@ static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev) goto nla_put_failure; } + port = hsr_port_get_hsr(hsr, HSR_PT_INTERLINK); + if (port) { + if (nla_put_u32(skb, IFLA_HSR_INTERLINK, port->dev->ifindex)) + goto nla_put_failure; + } + if (nla_put(skb, IFLA_HSR_SUPERVISION_ADDR, ETH_ALEN, hsr->sup_multicast_addr) || nla_put_u16(skb, IFLA_HSR_SEQ_NR, hsr->sequence_nr)) goto nla_put_failure; if (hsr->prot_version == PRP_V1) proto = HSR_PROTOCOL_PRP; + else if (nla_put_u8(skb, IFLA_HSR_VERSION, hsr->prot_version)) + goto nla_put_failure; if (nla_put_u8(skb, IFLA_HSR_PROTOCOL, proto)) goto nla_put_failure; diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c index 18d267921bb5..e542fbe113e7 100644 --- a/net/ieee802154/socket.c +++ b/net/ieee802154/socket.c @@ -96,7 +96,7 @@ static int ieee802154_sock_sendmsg(struct socket *sock, struct msghdr *msg, return sk->sk_prot->sendmsg(sk, msg, len); } -static int ieee802154_sock_bind(struct socket *sock, struct sockaddr *uaddr, +static int ieee802154_sock_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len) { struct sock *sk = sock->sk; @@ -107,7 +107,7 @@ static int ieee802154_sock_bind(struct socket *sock, struct sockaddr *uaddr, return sock_no_bind(sock, uaddr, addr_len); } -static int ieee802154_sock_connect(struct socket *sock, struct sockaddr *uaddr, +static int ieee802154_sock_connect(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len, int flags) { struct sock *sk = sock->sk; @@ -193,7 +193,7 @@ static void raw_close(struct sock *sk, long timeout) sk_common_release(sk); } -static int raw_bind(struct sock *sk, struct sockaddr *_uaddr, int len) +static int raw_bind(struct sock *sk, struct sockaddr_unsized *_uaddr, int len) { struct ieee802154_addr addr; struct sockaddr_ieee802154 *uaddr = (struct sockaddr_ieee802154 *)_uaddr; @@ -227,7 +227,7 @@ out: return err; } -static int raw_connect(struct sock *sk, struct sockaddr *uaddr, +static int raw_connect(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len) { return -ENOTSUPP; @@ -485,7 +485,7 @@ static void dgram_close(struct sock *sk, long timeout) sk_common_release(sk); } -static int dgram_bind(struct sock *sk, struct sockaddr *uaddr, int len) +static int dgram_bind(struct sock *sk, struct sockaddr_unsized *uaddr, int len) { struct sockaddr_ieee802154 *addr = (struct sockaddr_ieee802154 *)uaddr; struct ieee802154_addr haddr; @@ -563,7 +563,7 @@ static int dgram_ioctl(struct sock *sk, int cmd, int *karg) } /* FIXME: autobind */ -static int dgram_connect(struct sock *sk, struct sockaddr *uaddr, +static int dgram_connect(struct sock *sk, struct sockaddr_unsized *uaddr, int len) { struct sockaddr_ieee802154 *addr = (struct sockaddr_ieee802154 *)uaddr; diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 12850a277251..b71c22475c51 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -760,9 +760,7 @@ config TCP_AO config TCP_MD5SIG bool "TCP: MD5 Signature Option support (RFC2385)" - select CRYPTO - select CRYPTO_MD5 - select TCP_SIGPOOL + select CRYPTO_LIB_MD5 help RFC2385 specifies a method of giving MD5 protection to TCP sessions. Its main (only?) use is to protect BGP sessions between core routers diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 3109c5ec38f3..a31b94ce8968 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -441,7 +441,7 @@ int inet_release(struct socket *sock) } EXPORT_SYMBOL(inet_release); -int inet_bind_sk(struct sock *sk, struct sockaddr *uaddr, int addr_len) +int inet_bind_sk(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len) { u32 flags = BIND_WITH_LOCK; int err; @@ -464,13 +464,13 @@ int inet_bind_sk(struct sock *sk, struct sockaddr *uaddr, int addr_len) return __inet_bind(sk, uaddr, addr_len, flags); } -int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) +int inet_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len) { return inet_bind_sk(sock->sk, uaddr, addr_len); } EXPORT_SYMBOL(inet_bind); -int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len, +int __inet_bind(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len, u32 flags) { struct sockaddr_in *addr = (struct sockaddr_in *)uaddr; @@ -567,7 +567,7 @@ out: return err; } -int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr, +int inet_dgram_connect(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len, int flags) { struct sock *sk = sock->sk; @@ -623,7 +623,7 @@ static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias) * Connect to a remote host. There is regrettably still a little * TCP 'magic' in here. */ -int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr, +int __inet_stream_connect(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len, int flags, int is_sendmsg) { struct sock *sk = sock->sk; @@ -741,7 +741,7 @@ sock_error: } EXPORT_SYMBOL(__inet_stream_connect); -int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr, +int inet_stream_connect(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len, int flags) { int err; @@ -755,6 +755,26 @@ EXPORT_SYMBOL(inet_stream_connect); void __inet_accept(struct socket *sock, struct socket *newsock, struct sock *newsk) { + if (mem_cgroup_sockets_enabled) { + gfp_t gfp = GFP_KERNEL | __GFP_NOFAIL; + + mem_cgroup_sk_alloc(newsk); + + if (mem_cgroup_from_sk(newsk)) { + int amt; + + /* The socket has not been accepted yet, no need + * to look at newsk->sk_wmem_queued. + */ + amt = sk_mem_pages(newsk->sk_forward_alloc + + atomic_read(&newsk->sk_rmem_alloc)); + if (amt) + mem_cgroup_sk_charge(newsk, amt, gfp); + } + + kmem_cache_charge(newsk, gfp); + } + sock_rps_record_flow(newsk); WARN_ON(!((1 << newsk->sk_state) & (TCPF_ESTABLISHED | TCPF_SYN_RECV | @@ -768,6 +788,7 @@ void __inet_accept(struct socket *sock, struct socket *newsock, struct sock *new newsock->state = SS_CONNECTED; } +EXPORT_SYMBOL_GPL(__inet_accept); /* * Accept a pending connection. The TCP layer now gives BSD semantics. @@ -813,7 +834,7 @@ int inet_getname(struct socket *sock, struct sockaddr *uaddr, } sin->sin_port = inet->inet_dport; sin->sin_addr.s_addr = inet->inet_daddr; - BPF_CGROUP_RUN_SA_PROG(sk, (struct sockaddr *)sin, &sin_addr_len, + BPF_CGROUP_RUN_SA_PROG(sk, sin, &sin_addr_len, CGROUP_INET4_GETPEERNAME); } else { __be32 addr = inet->inet_rcv_saddr; @@ -821,7 +842,7 @@ int inet_getname(struct socket *sock, struct sockaddr *uaddr, addr = inet->inet_saddr; sin->sin_port = inet->inet_sport; sin->sin_addr.s_addr = addr; - BPF_CGROUP_RUN_SA_PROG(sk, (struct sockaddr *)sin, &sin_addr_len, + BPF_CGROUP_RUN_SA_PROG(sk, sin, &sin_addr_len, CGROUP_INET4_GETSOCKNAME); } release_sock(sk); diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 833f2cf97178..7f3863daaa40 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -1189,7 +1189,7 @@ static int arp_req_get(struct net *net, struct arpreq *r) read_lock_bh(&neigh->lock); memcpy(r->arp_ha.sa_data, neigh->ha, - min(dev->addr_len, sizeof(r->arp_ha.sa_data_min))); + min(dev->addr_len, sizeof(r->arp_ha.sa_data))); r->arp_flags = arp_state_to_flags(neigh); read_unlock_bh(&neigh->lock); @@ -1217,10 +1217,10 @@ int arp_invalidate(struct net_device *dev, __be32 ip, bool force) err = neigh_update(neigh, NULL, NUD_FAILED, NEIGH_UPDATE_F_OVERRIDE| NEIGH_UPDATE_F_ADMIN, 0); - write_lock_bh(&tbl->lock); + spin_lock_bh(&tbl->lock); neigh_release(neigh); neigh_remove_one(neigh); - write_unlock_bh(&tbl->lock); + spin_unlock_bh(&tbl->lock); } return err; diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c index c2b2cda1a7e5..1614593b6d72 100644 --- a/net/ipv4/datagram.c +++ b/net/ipv4/datagram.c @@ -16,7 +16,7 @@ #include <net/tcp_states.h> #include <net/sock_reuseport.h> -int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) +int __ip4_datagram_connect(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len) { struct inet_sock *inet = inet_sk(sk); struct sockaddr_in *usin = (struct sockaddr_in *) uaddr; @@ -84,7 +84,7 @@ out: } EXPORT_SYMBOL(__ip4_datagram_connect); -int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) +int ip4_datagram_connect(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len) { int res; diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 1b7fb5d935ed..4abbec2f47ef 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -582,6 +582,185 @@ relookup_failed: return ERR_PTR(err); } +struct icmp_ext_iio_addr4_subobj { + __be16 afi; + __be16 reserved; + __be32 addr4; +}; + +static unsigned int icmp_ext_iio_len(void) +{ + return sizeof(struct icmp_extobj_hdr) + + /* ifIndex */ + sizeof(__be32) + + /* Interface Address Sub-Object */ + sizeof(struct icmp_ext_iio_addr4_subobj) + + /* Interface Name Sub-Object. Length must be a multiple of 4 + * bytes. + */ + ALIGN(sizeof(struct icmp_ext_iio_name_subobj), 4) + + /* MTU */ + sizeof(__be32); +} + +static unsigned int icmp_ext_max_len(u8 ext_objs) +{ + unsigned int ext_max_len; + + ext_max_len = sizeof(struct icmp_ext_hdr); + + if (ext_objs & BIT(ICMP_ERR_EXT_IIO_IIF)) + ext_max_len += icmp_ext_iio_len(); + + return ext_max_len; +} + +static __be32 icmp_ext_iio_addr4_find(const struct net_device *dev) +{ + struct in_device *in_dev; + struct in_ifaddr *ifa; + + in_dev = __in_dev_get_rcu(dev); + if (!in_dev) + return 0; + + /* It is unclear from RFC 5837 which IP address should be chosen, but + * it makes sense to choose a global unicast address. + */ + in_dev_for_each_ifa_rcu(ifa, in_dev) { + if (READ_ONCE(ifa->ifa_flags) & IFA_F_SECONDARY) + continue; + if (ifa->ifa_scope != RT_SCOPE_UNIVERSE || + ipv4_is_multicast(ifa->ifa_address)) + continue; + return ifa->ifa_address; + } + + return 0; +} + +static void icmp_ext_iio_iif_append(struct net *net, struct sk_buff *skb, + int iif) +{ + struct icmp_ext_iio_name_subobj *name_subobj; + struct icmp_extobj_hdr *objh; + struct net_device *dev; + __be32 data; + + if (!iif) + return; + + /* Add the fields in the order specified by RFC 5837. */ + objh = skb_put(skb, sizeof(*objh)); + objh->class_num = ICMP_EXT_OBJ_CLASS_IIO; + objh->class_type = ICMP_EXT_CTYPE_IIO_ROLE(ICMP_EXT_CTYPE_IIO_ROLE_IIF); + + data = htonl(iif); + skb_put_data(skb, &data, sizeof(__be32)); + objh->class_type |= ICMP_EXT_CTYPE_IIO_IFINDEX; + + rcu_read_lock(); + + dev = dev_get_by_index_rcu(net, iif); + if (!dev) + goto out; + + data = icmp_ext_iio_addr4_find(dev); + if (data) { + struct icmp_ext_iio_addr4_subobj *addr4_subobj; + + addr4_subobj = skb_put_zero(skb, sizeof(*addr4_subobj)); + addr4_subobj->afi = htons(ICMP_AFI_IP); + addr4_subobj->addr4 = data; + objh->class_type |= ICMP_EXT_CTYPE_IIO_IPADDR; + } + + name_subobj = skb_put_zero(skb, ALIGN(sizeof(*name_subobj), 4)); + name_subobj->len = ALIGN(sizeof(*name_subobj), 4); + netdev_copy_name(dev, name_subobj->name); + objh->class_type |= ICMP_EXT_CTYPE_IIO_NAME; + + data = htonl(READ_ONCE(dev->mtu)); + skb_put_data(skb, &data, sizeof(__be32)); + objh->class_type |= ICMP_EXT_CTYPE_IIO_MTU; + +out: + rcu_read_unlock(); + objh->length = htons(skb_tail_pointer(skb) - (unsigned char *)objh); +} + +static void icmp_ext_objs_append(struct net *net, struct sk_buff *skb, + u8 ext_objs, int iif) +{ + if (ext_objs & BIT(ICMP_ERR_EXT_IIO_IIF)) + icmp_ext_iio_iif_append(net, skb, iif); +} + +static struct sk_buff * +icmp_ext_append(struct net *net, struct sk_buff *skb_in, struct icmphdr *icmph, + unsigned int room, int iif) +{ + unsigned int payload_len, ext_max_len, ext_len; + struct icmp_ext_hdr *ext_hdr; + struct sk_buff *skb; + u8 ext_objs; + int nhoff; + + switch (icmph->type) { + case ICMP_DEST_UNREACH: + case ICMP_TIME_EXCEEDED: + case ICMP_PARAMETERPROB: + break; + default: + return NULL; + } + + ext_objs = READ_ONCE(net->ipv4.sysctl_icmp_errors_extension_mask); + if (!ext_objs) + return NULL; + + ext_max_len = icmp_ext_max_len(ext_objs); + if (ICMP_EXT_ORIG_DGRAM_MIN_LEN + ext_max_len > room) + return NULL; + + skb = skb_clone(skb_in, GFP_ATOMIC); + if (!skb) + return NULL; + + nhoff = skb_network_offset(skb); + payload_len = min(skb->len - nhoff, ICMP_EXT_ORIG_DGRAM_MIN_LEN); + + if (!pskb_network_may_pull(skb, payload_len)) + goto free_skb; + + if (pskb_trim(skb, nhoff + ICMP_EXT_ORIG_DGRAM_MIN_LEN) || + __skb_put_padto(skb, nhoff + ICMP_EXT_ORIG_DGRAM_MIN_LEN, false)) + goto free_skb; + + if (pskb_expand_head(skb, 0, ext_max_len, GFP_ATOMIC)) + goto free_skb; + + ext_hdr = skb_put_zero(skb, sizeof(*ext_hdr)); + ext_hdr->version = ICMP_EXT_VERSION_2; + + icmp_ext_objs_append(net, skb, ext_objs, iif); + + /* Do not send an empty extension structure. */ + ext_len = skb_tail_pointer(skb) - (unsigned char *)ext_hdr; + if (ext_len == sizeof(*ext_hdr)) + goto free_skb; + + ext_hdr->checksum = ip_compute_csum(ext_hdr, ext_len); + /* The length of the original datagram in 32-bit words (RFC 4884). */ + icmph->un.reserved[1] = ICMP_EXT_ORIG_DGRAM_MIN_LEN / sizeof(u32); + + return skb; + +free_skb: + consume_skb(skb); + return NULL; +} + /* * Send an ICMP message in response to a situation * @@ -601,6 +780,7 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info, struct icmp_bxm icmp_param; struct rtable *rt = skb_rtable(skb_in); bool apply_ratelimit = false; + struct sk_buff *ext_skb; struct ipcm_cookie ipc; struct flowi4 fl4; __be32 saddr; @@ -770,7 +950,12 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info, if (room <= (int)sizeof(struct iphdr)) goto ende; - icmp_param.data_len = skb_in->len - icmp_param.offset; + ext_skb = icmp_ext_append(net, skb_in, &icmp_param.data.icmph, room, + parm->iif); + if (ext_skb) + icmp_param.skb = ext_skb; + + icmp_param.data_len = icmp_param.skb->len - icmp_param.offset; if (icmp_param.data_len > room) icmp_param.data_len = room; icmp_param.head_len = sizeof(struct icmphdr); @@ -785,6 +970,9 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info, trace_icmp_send(skb_in, type, code); icmp_push_reply(sk, &icmp_param, &fl4, &ipc, &rt); + + if (ext_skb) + consume_skb(ext_skb); ende: ip_rt_put(rt); out_unlock: @@ -1502,6 +1690,7 @@ static int __net_init icmp_sk_init(struct net *net) net->ipv4.sysctl_icmp_ratelimit = 1 * HZ; net->ipv4.sysctl_icmp_ratemask = 0x1818; net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr = 0; + net->ipv4.sysctl_icmp_errors_extension_mask = 0; net->ipv4.sysctl_icmp_msgs_per_sec = 1000; net->ipv4.sysctl_icmp_msgs_burst = 50; diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index cdd1e12aac8c..b4eae731c9ba 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -712,31 +712,6 @@ struct sock *inet_csk_accept(struct sock *sk, struct proto_accept_arg *arg) release_sock(sk); - if (mem_cgroup_sockets_enabled) { - gfp_t gfp = GFP_KERNEL | __GFP_NOFAIL; - int amt = 0; - - /* atomically get the memory usage, set and charge the - * newsk->sk_memcg. - */ - lock_sock(newsk); - - mem_cgroup_sk_alloc(newsk); - if (mem_cgroup_from_sk(newsk)) { - /* The socket has not been accepted yet, no need - * to look at newsk->sk_wmem_queued. - */ - amt = sk_mem_pages(newsk->sk_forward_alloc + - atomic_read(&newsk->sk_rmem_alloc)); - } - - if (amt) - mem_cgroup_sk_charge(newsk, amt, gfp); - kmem_cache_charge(newsk, gfp); - - release_sock(newsk); - } - if (req) reqsk_put(req); @@ -910,7 +885,6 @@ reqsk_alloc_noprof(const struct request_sock_ops *ops, struct sock *sk_listener, sk_tx_queue_clear(req_to_sk(req)); req->saved_syn = NULL; req->syncookie = 0; - req->timeout = 0; req->num_timeout = 0; req->num_retrans = 0; req->sk = NULL; @@ -938,7 +912,6 @@ struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops, ireq->ireq_state = TCP_NEW_SYN_RECV; write_pnet(&ireq->ireq_net, sock_net(sk_listener)); ireq->ireq_family = sk_listener->sk_family; - req->timeout = TCP_TIMEOUT_INIT; } return req; @@ -1121,16 +1094,18 @@ static void reqsk_timer_handler(struct timer_list *t) young <<= 1; } } + syn_ack_recalc(req, max_syn_ack_retries, READ_ONCE(queue->rskq_defer_accept), &expire, &resend); - req->rsk_ops->syn_ack_timeout(req); + tcp_syn_ack_timeout(req); + if (!expire && (!resend || !tcp_rtx_synack(sk_listener, req) || inet_rsk(req)->acked)) { if (req->num_timeout++ == 0) atomic_dec(&queue->young); - mod_timer(&req->rsk_timer, jiffies + reqsk_timeout(req, TCP_RTO_MAX)); + mod_timer(&req->rsk_timer, jiffies + tcp_reqsk_timeout(req)); if (!nreq) return; @@ -1167,8 +1142,7 @@ drop: reqsk_put(oreq); } -static bool reqsk_queue_hash_req(struct request_sock *req, - unsigned long timeout) +static bool reqsk_queue_hash_req(struct request_sock *req) { bool found_dup_sk = false; @@ -1176,8 +1150,9 @@ static bool reqsk_queue_hash_req(struct request_sock *req, return false; /* The timer needs to be setup after a successful insertion. */ + req->timeout = tcp_timeout_init((struct sock *)req); timer_setup(&req->rsk_timer, reqsk_timer_handler, TIMER_PINNED); - mod_timer(&req->rsk_timer, jiffies + timeout); + mod_timer(&req->rsk_timer, jiffies + req->timeout); /* before letting lookups find us, make sure all req fields * are committed to memory and refcnt initialized. @@ -1187,10 +1162,9 @@ static bool reqsk_queue_hash_req(struct request_sock *req, return true; } -bool inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req, - unsigned long timeout) +bool inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req) { - if (!reqsk_queue_hash_req(req, timeout)) + if (!reqsk_queue_hash_req(req)) return false; inet_csk_reqsk_queue_added(sk); diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index b7024e3d9ac3..f5826ec4bcaa 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -720,8 +720,11 @@ bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk) spin_lock(lock); if (osk) { WARN_ON_ONCE(sk->sk_hash != osk->sk_hash); - ret = sk_nulls_del_node_init_rcu(osk); - } else if (found_dup_sk) { + ret = sk_nulls_replace_node_init_rcu(osk, sk); + goto unlock; + } + + if (found_dup_sk) { *found_dup_sk = inet_ehash_lookup_by_sk(sk, list); if (*found_dup_sk) ret = false; @@ -730,6 +733,7 @@ bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk) if (ret) __sk_nulls_add_node_rcu(sk, list); +unlock: spin_unlock(lock); return ret; diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index c96d61d08854..d4c781a0667f 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c @@ -88,12 +88,6 @@ void inet_twsk_put(struct inet_timewait_sock *tw) } EXPORT_SYMBOL_GPL(inet_twsk_put); -static void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw, - struct hlist_nulls_head *list) -{ - hlist_nulls_add_head_rcu(&tw->tw_node, list); -} - static void inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo) { __inet_twsk_schedule(tw, timeo, false); @@ -113,13 +107,12 @@ void inet_twsk_hashdance_schedule(struct inet_timewait_sock *tw, { const struct inet_sock *inet = inet_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); - struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash); spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash); struct inet_bind_hashbucket *bhead, *bhead2; - /* Step 1: Put TW into bind hash. Original socket stays there too. - Note, that any socket with inet->num != 0 MUST be bound in - binding cache, even if it is closed. + /* Put TW into bind hash. Original socket stays there too. + * Note, that any socket with inet->num != 0 MUST be bound in + * binding cache, even if it is closed. */ bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->inet_num, hashinfo->bhash_size)]; @@ -141,19 +134,6 @@ void inet_twsk_hashdance_schedule(struct inet_timewait_sock *tw, spin_lock(lock); - /* Step 2: Hash TW into tcp ehash chain */ - inet_twsk_add_node_rcu(tw, &ehead->chain); - - /* Step 3: Remove SK from hash chain */ - if (__sk_nulls_del_node_init_rcu(sk)) - sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); - - - /* Ensure above writes are committed into memory before updating the - * refcount. - * Provides ordering vs later refcount_inc(). - */ - smp_wmb(); /* tw_refcnt is set to 3 because we have : * - one reference for bhash chain. * - one reference for ehash chain. @@ -163,6 +143,15 @@ void inet_twsk_hashdance_schedule(struct inet_timewait_sock *tw, */ refcount_set(&tw->tw_refcnt, 3); + /* Ensure tw_refcnt has been set before tw is published. + * smp_wmb() provides the necessary memory barrier to enforce this + * ordering. + */ + smp_wmb(); + + hlist_nulls_replace_init_rcu(&sk->sk_nulls_node, &tw->tw_node); + sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); + inet_twsk_schedule(tw, timeo); spin_unlock(lock); diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 273578579a6b..19d3141dad1f 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -141,6 +141,8 @@ #include <linux/mroute.h> #include <linux/netlink.h> #include <net/dst_metadata.h> +#include <net/udp.h> +#include <net/tcp.h> /* * Process Router Attention IP option (RFC 2113) @@ -317,8 +319,6 @@ static bool ip_can_use_hint(const struct sk_buff *skb, const struct iphdr *iph, ip_hdr(hint)->tos == iph->tos; } -int tcp_v4_early_demux(struct sk_buff *skb); -enum skb_drop_reason udp_v4_early_demux(struct sk_buff *skb); static int ip_rcv_finish_core(struct net *net, struct sk_buff *skb, struct net_device *dev, const struct sk_buff *hint) diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 5321c5801c64..ad56588107cc 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -286,7 +286,7 @@ void ping_close(struct sock *sk, long timeout) } EXPORT_IPV6_MOD_GPL(ping_close); -static int ping_pre_connect(struct sock *sk, struct sockaddr *uaddr, +static int ping_pre_connect(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len) { /* This check is replicated from __ip4_datagram_connect() and @@ -301,7 +301,7 @@ static int ping_pre_connect(struct sock *sk, struct sockaddr *uaddr, /* Checks the bind address and possibly modifies sk->sk_bound_dev_if. */ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk, - struct sockaddr *uaddr, int addr_len) + struct sockaddr_unsized *uaddr, int addr_len) { struct net *net = sock_net(sk); if (sk->sk_family == AF_INET) { @@ -387,7 +387,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk, return 0; } -static void ping_set_saddr(struct sock *sk, struct sockaddr *saddr) +static void ping_set_saddr(struct sock *sk, struct sockaddr_unsized *saddr) { if (saddr->sa_family == AF_INET) { struct inet_sock *isk = inet_sk(sk); @@ -407,7 +407,7 @@ static void ping_set_saddr(struct sock *sk, struct sockaddr *saddr) * Moreover, we don't allow binding to multi- and broadcast addresses. */ -int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) +int ping_bind(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len) { struct inet_sock *isk = inet_sk(sk); unsigned short snum; diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index d54ebb7df966..5998c4cc6f47 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -697,7 +697,8 @@ static void raw_destroy(struct sock *sk) } /* This gets rid of all the nasties in af_inet. -DaveM */ -static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) +static int raw_bind(struct sock *sk, struct sockaddr_unsized *uaddr, + int addr_len) { struct inet_sock *inet = inet_sk(sk); struct sockaddr_in *addr = (struct sockaddr_in *) uaddr; diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 24dbc603cc44..35367f8e2da3 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -48,6 +48,8 @@ static int tcp_plb_max_rounds = 31; static int tcp_plb_max_cong_thresh = 256; static unsigned int tcp_tw_reuse_delay_max = TCP_PAWS_MSL * MSEC_PER_SEC; static int tcp_ecn_mode_max = 2; +static u32 icmp_errors_extension_mask_all = + GENMASK_U8(ICMP_ERR_EXT_COUNT - 1, 0); /* obsolete */ static int sysctl_tcp_low_latency __read_mostly; @@ -675,6 +677,15 @@ static struct ctl_table ipv4_net_table[] = { .extra2 = SYSCTL_ONE }, { + .procname = "icmp_errors_extension_mask", + .data = &init_net.ipv4.sysctl_icmp_errors_extension_mask, + .maxlen = sizeof(u8), + .mode = 0644, + .proc_handler = proc_dou8vec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = &icmp_errors_extension_mask_all, + }, + { .procname = "icmp_ratelimit", .data = &init_net.ipv4.sysctl_icmp_ratelimit, .maxlen = sizeof(int), @@ -1441,6 +1452,15 @@ static struct ctl_table ipv4_net_table[] = { .proc_handler = proc_doulongvec_minmax, }, { + .procname = "tcp_comp_sack_rtt_percent", + .data = &init_net.ipv4.sysctl_tcp_comp_sack_rtt_percent, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ONE, + .extra2 = SYSCTL_ONE_THOUSAND, + }, + { .procname = "tcp_comp_sack_slack_ns", .data = &init_net.ipv4.sysctl_tcp_comp_sack_slack_ns, .maxlen = sizeof(unsigned long), diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 8a18aeca7ab0..dee578aad690 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -243,7 +243,7 @@ #define pr_fmt(fmt) "TCP: " fmt -#include <crypto/hash.h> +#include <crypto/md5.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> @@ -253,7 +253,6 @@ #include <linux/init.h> #include <linux/fs.h> #include <linux/skbuff.h> -#include <linux/scatterlist.h> #include <linux/splice.h> #include <linux/net.h> #include <linux/socket.h> @@ -425,7 +424,6 @@ void tcp_md5_destruct_sock(struct sock *sk) tcp_clear_md5_list(sk); kfree(rcu_replace_pointer(tp->md5sig_info, NULL, 1)); static_branch_slow_dec_deferred(&tcp_md5_needed); - tcp_md5_release_sigpool(); } } EXPORT_IPV6_MOD_GPL(tcp_md5_destruct_sock); @@ -928,7 +926,8 @@ struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, gfp_t gfp, } __kfree_skb(skb); } else { - sk->sk_prot->enter_memory_pressure(sk); + if (!sk->sk_bypass_prot_mem) + tcp_enter_memory_pressure(sk); sk_stream_moderate_sndbuf(sk); } return NULL; @@ -1062,7 +1061,7 @@ int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied, } } flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0; - err = __inet_stream_connect(sk->sk_socket, uaddr, + err = __inet_stream_connect(sk->sk_socket, (struct sockaddr_unsized *)uaddr, msg->msg_namelen, flags, 1); /* fastopen_req could already be freed in __inet_stream_connect * if the connection times out or gets rst @@ -1557,8 +1556,10 @@ void __tcp_cleanup_rbuf(struct sock *sk, int copied) time_to_ack = true; } } - if (time_to_ack) + if (time_to_ack) { + tcp_mstamp_refresh(tp); tcp_send_ack(sk); + } } void tcp_cleanup_rbuf(struct sock *sk, int copied) @@ -3583,9 +3584,12 @@ static int tcp_repair_options_est(struct sock *sk, sockptr_t optbuf, DEFINE_STATIC_KEY_FALSE(tcp_tx_delay_enabled); EXPORT_IPV6_MOD(tcp_tx_delay_enabled); -static void tcp_enable_tx_delay(void) +static void tcp_enable_tx_delay(struct sock *sk, int val) { - if (!static_branch_unlikely(&tcp_tx_delay_enabled)) { + struct tcp_sock *tp = tcp_sk(sk); + s32 delta = (val - tp->tcp_tx_delay) << 3; + + if (val && !static_branch_unlikely(&tcp_tx_delay_enabled)) { static int __tcp_tx_delay_enabled = 0; if (cmpxchg(&__tcp_tx_delay_enabled, 0, 1) == 0) { @@ -3593,6 +3597,22 @@ static void tcp_enable_tx_delay(void) pr_info("TCP_TX_DELAY enabled\n"); } } + /* If we change tcp_tx_delay on a live flow, adjust tp->srtt_us, + * tp->rtt_min, icsk_rto and sk->sk_pacing_rate. + * This is best effort. + */ + if (delta && sk->sk_state == TCP_ESTABLISHED) { + s64 srtt = (s64)tp->srtt_us + delta; + + tp->srtt_us = clamp_t(s64, srtt, 1, ~0U); + + /* Note: does not deal with non zero icsk_backoff */ + tcp_set_rto(sk); + + minmax_reset(&tp->rtt_min, tcp_jiffies32, ~0U); + + tcp_update_pacing_rate(sk); + } } /* When set indicates to always queue non-full frames. Later the user clears @@ -4119,8 +4139,12 @@ ao_parse: tp->recvmsg_inq = val; break; case TCP_TX_DELAY: - if (val) - tcp_enable_tx_delay(); + /* tp->srtt_us is u32, and is shifted by 3 */ + if (val < 0 || val >= (1U << (31 - 3))) { + err = -EINVAL; + break; + } + tcp_enable_tx_delay(sk, val); WRITE_ONCE(tp->tcp_tx_delay, val); break; default: @@ -4815,52 +4839,45 @@ int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, EXPORT_IPV6_MOD(tcp_getsockopt); #ifdef CONFIG_TCP_MD5SIG -int tcp_md5_sigpool_id = -1; -EXPORT_IPV6_MOD_GPL(tcp_md5_sigpool_id); - -int tcp_md5_alloc_sigpool(void) +void tcp_md5_hash_skb_data(struct md5_ctx *ctx, const struct sk_buff *skb, + unsigned int header_len) { - size_t scratch_size; - int ret; + const unsigned int head_data_len = skb_headlen(skb) > header_len ? + skb_headlen(skb) - header_len : 0; + const struct skb_shared_info *shi = skb_shinfo(skb); + struct sk_buff *frag_iter; + unsigned int i; - scratch_size = sizeof(union tcp_md5sum_block) + sizeof(struct tcphdr); - ret = tcp_sigpool_alloc_ahash("md5", scratch_size); - if (ret >= 0) { - /* As long as any md5 sigpool was allocated, the return - * id would stay the same. Re-write the id only for the case - * when previously all MD5 keys were deleted and this call - * allocates the first MD5 key, which may return a different - * sigpool id than was used previously. - */ - WRITE_ONCE(tcp_md5_sigpool_id, ret); /* Avoids the compiler potentially being smart here */ - return 0; - } - return ret; -} + md5_update(ctx, (const u8 *)tcp_hdr(skb) + header_len, head_data_len); -void tcp_md5_release_sigpool(void) -{ - tcp_sigpool_release(READ_ONCE(tcp_md5_sigpool_id)); -} + for (i = 0; i < shi->nr_frags; ++i) { + const skb_frag_t *f = &shi->frags[i]; + u32 p_off, p_len, copied; + const void *vaddr; + struct page *p; -void tcp_md5_add_sigpool(void) -{ - tcp_sigpool_get(READ_ONCE(tcp_md5_sigpool_id)); + skb_frag_foreach_page(f, skb_frag_off(f), skb_frag_size(f), + p, p_off, p_len, copied) { + vaddr = kmap_local_page(p); + md5_update(ctx, vaddr + p_off, p_len); + kunmap_local(vaddr); + } + } + + skb_walk_frags(skb, frag_iter) + tcp_md5_hash_skb_data(ctx, frag_iter, 0); } +EXPORT_IPV6_MOD(tcp_md5_hash_skb_data); -int tcp_md5_hash_key(struct tcp_sigpool *hp, - const struct tcp_md5sig_key *key) +void tcp_md5_hash_key(struct md5_ctx *ctx, + const struct tcp_md5sig_key *key) { u8 keylen = READ_ONCE(key->keylen); /* paired with WRITE_ONCE() in tcp_md5_do_add */ - struct scatterlist sg; - - sg_init_one(&sg, key->key, keylen); - ahash_request_set_crypt(hp->req, &sg, NULL, keylen); /* We use data_race() because tcp_md5_do_add() might change * key->key under us */ - return data_race(crypto_ahash_update(hp->req)); + data_race(({ md5_update(ctx, key->key, keylen), 0; })); } EXPORT_IPV6_MOD(tcp_md5_hash_key); @@ -4871,19 +4888,16 @@ tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb, int family, int l3index, const __u8 *hash_location) { /* This gets called for each TCP segment that has TCP-MD5 option. - * We have 3 drop cases: - * o No MD5 hash and one expected. - * o MD5 hash and we're not expecting one. - * o MD5 hash and its wrong. + * We have 2 drop cases: + * o An MD5 signature is present, but we're not expecting one. + * o The MD5 signature is wrong. */ const struct tcp_sock *tp = tcp_sk(sk); struct tcp_md5sig_key *key; u8 newhash[16]; - int genhash; key = tcp_md5_do_lookup(sk, l3index, saddr, family); - - if (!key && hash_location) { + if (!key) { NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); trace_tcp_hash_md5_unexpected(sk, skb); return SKB_DROP_REASON_TCP_MD5UNEXPECTED; @@ -4894,11 +4908,10 @@ tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb, * IPv4-mapped case. */ if (family == AF_INET) - genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb); + tcp_v4_md5_hash_skb(newhash, key, NULL, skb); else - genhash = tp->af_specific->calc_md5_hash(newhash, key, - NULL, skb); - if (genhash || memcmp(hash_location, newhash, 16) != 0) { + tp->af_specific->calc_md5_hash(newhash, key, NULL, skb); + if (memcmp(hash_location, newhash, 16) != 0) { NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE); trace_tcp_hash_md5_mismatch(sk, skb); return SKB_DROP_REASON_TCP_MD5FAILURE; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index e4a979b75cc6..9df5d7515605 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -937,9 +937,15 @@ void tcp_rcv_space_adjust(struct sock *sk) trace_tcp_rcv_space_adjust(sk); - tcp_mstamp_refresh(tp); + if (unlikely(!tp->rcv_rtt_est.rtt_us)) + return; + + /* We do not refresh tp->tcp_mstamp here. + * Some platforms have expensive ktime_get() implementations. + * Using the last cached value is enough for DRS. + */ time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time); - if (time < (tp->rcv_rtt_est.rtt_us >> 3) || tp->rcv_rtt_est.rtt_us == 0) + if (time < (tp->rcv_rtt_est.rtt_us >> 3)) return; /* Number of bytes copied to user in last RTT */ @@ -1102,7 +1108,7 @@ static void tcp_rtt_estimator(struct sock *sk, long mrtt_us) tp->srtt_us = max(1U, srtt); } -static void tcp_update_pacing_rate(struct sock *sk) +void tcp_update_pacing_rate(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); u64 rate; @@ -1139,7 +1145,7 @@ static void tcp_update_pacing_rate(struct sock *sk) /* Calculate rto without backoff. This is the second half of Van Jacobson's * routine referred to above. */ -static void tcp_set_rto(struct sock *sk) +void tcp_set_rto(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); /* Old crap is replaced with new one. 8) @@ -5887,7 +5893,9 @@ static inline void tcp_data_snd_check(struct sock *sk) static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible) { struct tcp_sock *tp = tcp_sk(sk); - unsigned long rtt, delay; + struct net *net = sock_net(sk); + unsigned long rtt; + u64 delay; /* More than one full frame received... */ if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss && @@ -5906,7 +5914,7 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible) * Defer the ack until tcp_release_cb(). */ if (sock_owned_by_user_nocheck(sk) && - READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_backlog_ack_defer)) { + READ_ONCE(net->ipv4.sysctl_tcp_backlog_ack_defer)) { set_bit(TCP_ACK_DEFERRED, &sk->sk_tsq_flags); return; } @@ -5921,7 +5929,7 @@ send_now: } if (!tcp_is_sack(tp) || - tp->compressed_ack >= READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr)) + tp->compressed_ack >= READ_ONCE(net->ipv4.sysctl_tcp_comp_sack_nr)) goto send_now; if (tp->compressed_ack_rcv_nxt != tp->rcv_nxt) { @@ -5936,18 +5944,26 @@ send_now: if (hrtimer_is_queued(&tp->compressed_ack_timer)) return; - /* compress ack timer : 5 % of rtt, but no more than tcp_comp_sack_delay_ns */ + /* compress ack timer : comp_sack_rtt_percent of rtt, + * but no more than tcp_comp_sack_delay_ns. + */ rtt = tp->rcv_rtt_est.rtt_us; if (tp->srtt_us && tp->srtt_us < rtt) rtt = tp->srtt_us; - delay = min_t(unsigned long, - READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_comp_sack_delay_ns), - rtt * (NSEC_PER_USEC >> 3)/20); + /* delay = (rtt >> 3) * NSEC_PER_USEC * comp_sack_rtt_percent / 100 + * -> + * delay = rtt * 1.25 * comp_sack_rtt_percent + */ + delay = (u64)(rtt + (rtt >> 2)) * + READ_ONCE(net->ipv4.sysctl_tcp_comp_sack_rtt_percent); + + delay = min(delay, READ_ONCE(net->ipv4.sysctl_tcp_comp_sack_delay_ns)); + sock_hold(sk); hrtimer_start_range_ns(&tp->compressed_ack_timer, ns_to_ktime(delay), - READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_comp_sack_slack_ns), + READ_ONCE(net->ipv4.sysctl_tcp_comp_sack_slack_ns), HRTIMER_MODE_REL_PINNED_SOFT); } @@ -7525,15 +7541,11 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, sock_put(fastopen_sk); } else { tcp_rsk(req)->tfo_listener = false; - if (!want_cookie) { - req->timeout = tcp_timeout_init((struct sock *)req); - if (unlikely(!inet_csk_reqsk_queue_hash_add(sk, req, - req->timeout))) { - reqsk_free(req); - dst_release(dst); - return 0; - } - + if (!want_cookie && + unlikely(!inet_csk_reqsk_queue_hash_add(sk, req))) { + reqsk_free(req); + dst_release(dst); + return 0; } af_ops->send_synack(sk, dst, &fl, req, &foc, !want_cookie ? TCP_SYNACK_NORMAL : diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index b1fcf3e4e1ce..a7d9fec2950b 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -53,6 +53,7 @@ #include <linux/module.h> #include <linux/random.h> #include <linux/cache.h> +#include <linux/fips.h> #include <linux/jhash.h> #include <linux/init.h> #include <linux/times.h> @@ -86,14 +87,13 @@ #include <linux/btf_ids.h> #include <linux/skbuff_ref.h> -#include <crypto/hash.h> -#include <linux/scatterlist.h> +#include <crypto/md5.h> #include <trace/events/tcp.h> #ifdef CONFIG_TCP_MD5SIG -static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key, - __be32 daddr, __be32 saddr, const struct tcphdr *th); +static void tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key, + __be32 daddr, __be32 saddr, const struct tcphdr *th); #endif struct inet_hashinfo tcp_hashinfo; @@ -205,7 +205,7 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) } EXPORT_IPV6_MOD_GPL(tcp_twsk_unique); -static int tcp_v4_pre_connect(struct sock *sk, struct sockaddr *uaddr, +static int tcp_v4_pre_connect(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len) { /* This check is replicated from tcp_v4_connect() and intended to @@ -221,7 +221,7 @@ static int tcp_v4_pre_connect(struct sock *sk, struct sockaddr *uaddr, } /* This will initiate an outgoing connection. */ -int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) +int tcp_v4_connect(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len) { struct sockaddr_in *usin = (struct sockaddr_in *)uaddr; struct inet_timewait_death_row *tcp_death_row; @@ -754,7 +754,6 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb, struct tcp_md5sig_key *key = NULL; unsigned char newhash[16]; struct sock *sk1 = NULL; - int genhash; #endif u64 transmit_time = 0; struct sock *ctl_sk; @@ -840,11 +839,9 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb, if (!key) goto out; - - genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb); - if (genhash || memcmp(md5_hash_location, newhash, 16) != 0) + tcp_v4_md5_hash_skb(newhash, key, NULL, skb); + if (memcmp(md5_hash_location, newhash, 16) != 0) goto out; - } if (key) { @@ -1425,13 +1422,13 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, struct tcp_sock *tp = tcp_sk(sk); if (!rcu_dereference_protected(tp->md5sig_info, lockdep_sock_is_held(sk))) { - if (tcp_md5_alloc_sigpool()) - return -ENOMEM; + if (fips_enabled) { + pr_warn_once("TCP-MD5 support is disabled due to FIPS\n"); + return -EOPNOTSUPP; + } - if (tcp_md5sig_info_add(sk, GFP_KERNEL)) { - tcp_md5_release_sigpool(); + if (tcp_md5sig_info_add(sk, GFP_KERNEL)) return -ENOMEM; - } if (!static_branch_inc(&tcp_md5_needed.key)) { struct tcp_md5sig_info *md5sig; @@ -1439,7 +1436,6 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, md5sig = rcu_dereference_protected(tp->md5sig_info, lockdep_sock_is_held(sk)); rcu_assign_pointer(tp->md5sig_info, NULL); kfree_rcu(md5sig, rcu); - tcp_md5_release_sigpool(); return -EUSERS; } } @@ -1456,12 +1452,9 @@ int tcp_md5_key_copy(struct sock *sk, const union tcp_md5_addr *addr, struct tcp_sock *tp = tcp_sk(sk); if (!rcu_dereference_protected(tp->md5sig_info, lockdep_sock_is_held(sk))) { - tcp_md5_add_sigpool(); - if (tcp_md5sig_info_add(sk, sk_gfp_mask(sk, GFP_ATOMIC))) { - tcp_md5_release_sigpool(); + if (tcp_md5sig_info_add(sk, sk_gfp_mask(sk, GFP_ATOMIC))) return -ENOMEM; - } if (!static_key_fast_inc_not_disabled(&tcp_md5_needed.key.key)) { struct tcp_md5sig_info *md5sig; @@ -1470,7 +1463,6 @@ int tcp_md5_key_copy(struct sock *sk, const union tcp_md5_addr *addr, net_warn_ratelimited("Too many TCP-MD5 keys in the system\n"); rcu_assign_pointer(tp->md5sig_info, NULL); kfree_rcu(md5sig, rcu); - tcp_md5_release_sigpool(); return -EUSERS; } } @@ -1578,66 +1570,44 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, int optname, cmd.tcpm_key, cmd.tcpm_keylen); } -static int tcp_v4_md5_hash_headers(struct tcp_sigpool *hp, - __be32 daddr, __be32 saddr, - const struct tcphdr *th, int nbytes) +static void tcp_v4_md5_hash_headers(struct md5_ctx *ctx, + __be32 daddr, __be32 saddr, + const struct tcphdr *th, int nbytes) { - struct tcp4_pseudohdr *bp; - struct scatterlist sg; - struct tcphdr *_th; - - bp = hp->scratch; - bp->saddr = saddr; - bp->daddr = daddr; - bp->pad = 0; - bp->protocol = IPPROTO_TCP; - bp->len = cpu_to_be16(nbytes); - - _th = (struct tcphdr *)(bp + 1); - memcpy(_th, th, sizeof(*th)); - _th->check = 0; + struct { + struct tcp4_pseudohdr ip; + struct tcphdr tcp; + } h; - sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th)); - ahash_request_set_crypt(hp->req, &sg, NULL, - sizeof(*bp) + sizeof(*th)); - return crypto_ahash_update(hp->req); + h.ip.saddr = saddr; + h.ip.daddr = daddr; + h.ip.pad = 0; + h.ip.protocol = IPPROTO_TCP; + h.ip.len = cpu_to_be16(nbytes); + h.tcp = *th; + h.tcp.check = 0; + md5_update(ctx, (const u8 *)&h, sizeof(h.ip) + sizeof(h.tcp)); } -static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key, - __be32 daddr, __be32 saddr, const struct tcphdr *th) +static noinline_for_stack void +tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key, + __be32 daddr, __be32 saddr, const struct tcphdr *th) { - struct tcp_sigpool hp; + struct md5_ctx ctx; - if (tcp_sigpool_start(tcp_md5_sigpool_id, &hp)) - goto clear_hash_nostart; - - if (crypto_ahash_init(hp.req)) - goto clear_hash; - if (tcp_v4_md5_hash_headers(&hp, daddr, saddr, th, th->doff << 2)) - goto clear_hash; - if (tcp_md5_hash_key(&hp, key)) - goto clear_hash; - ahash_request_set_crypt(hp.req, NULL, md5_hash, 0); - if (crypto_ahash_final(hp.req)) - goto clear_hash; - - tcp_sigpool_end(&hp); - return 0; - -clear_hash: - tcp_sigpool_end(&hp); -clear_hash_nostart: - memset(md5_hash, 0, 16); - return 1; + md5_init(&ctx); + tcp_v4_md5_hash_headers(&ctx, daddr, saddr, th, th->doff << 2); + tcp_md5_hash_key(&ctx, key); + md5_final(&ctx, md5_hash); } -int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key, - const struct sock *sk, - const struct sk_buff *skb) +noinline_for_stack void +tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key, + const struct sock *sk, const struct sk_buff *skb) { const struct tcphdr *th = tcp_hdr(skb); - struct tcp_sigpool hp; __be32 saddr, daddr; + struct md5_ctx ctx; if (sk) { /* valid for establish/request sockets */ saddr = sk->sk_rcv_saddr; @@ -1648,30 +1618,11 @@ int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key, daddr = iph->daddr; } - if (tcp_sigpool_start(tcp_md5_sigpool_id, &hp)) - goto clear_hash_nostart; - - if (crypto_ahash_init(hp.req)) - goto clear_hash; - - if (tcp_v4_md5_hash_headers(&hp, daddr, saddr, th, skb->len)) - goto clear_hash; - if (tcp_sigpool_hash_skb_data(&hp, skb, th->doff << 2)) - goto clear_hash; - if (tcp_md5_hash_key(&hp, key)) - goto clear_hash; - ahash_request_set_crypt(hp.req, NULL, md5_hash, 0); - if (crypto_ahash_final(hp.req)) - goto clear_hash; - - tcp_sigpool_end(&hp); - return 0; - -clear_hash: - tcp_sigpool_end(&hp); -clear_hash_nostart: - memset(md5_hash, 0, 16); - return 1; + md5_init(&ctx); + tcp_v4_md5_hash_headers(&ctx, daddr, saddr, th, skb->len); + tcp_md5_hash_skb_data(&ctx, skb, th->doff << 2); + tcp_md5_hash_key(&ctx, key); + md5_final(&ctx, md5_hash); } EXPORT_IPV6_MOD(tcp_v4_md5_hash_skb); @@ -1709,7 +1660,6 @@ struct request_sock_ops tcp_request_sock_ops __read_mostly = { .send_ack = tcp_v4_reqsk_send_ack, .destructor = tcp_v4_reqsk_destructor, .send_reset = tcp_v4_send_reset, - .syn_ack_timeout = tcp_syn_ack_timeout, }; const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = { @@ -3645,6 +3595,7 @@ static int __net_init tcp_sk_init(struct net *net) net->ipv4.sysctl_tcp_comp_sack_delay_ns = NSEC_PER_MSEC; net->ipv4.sysctl_tcp_comp_sack_slack_ns = 100 * NSEC_PER_USEC; net->ipv4.sysctl_tcp_comp_sack_nr = 44; + net->ipv4.sysctl_tcp_comp_sack_rtt_percent = 33; net->ipv4.sysctl_tcp_backlog_ack_defer = 1; net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE; net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 0; diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c index 52fe17167460..976b56644a8a 100644 --- a/net/ipv4/tcp_lp.c +++ b/net/ipv4/tcp_lp.c @@ -23,9 +23,9 @@ * Original Author: * Aleksandar Kuzmanovic <akuzma@northwestern.edu> * Available from: - * http://www.ece.rice.edu/~akuzma/Doc/akuzma/TCP-LP.pdf + * https://users.cs.northwestern.edu/~akuzma/doc/TCP-LP-ToN.pdf * Original implementation for 2.4.19: - * http://www-ece.rice.edu/networks/TCP-LP/ + * https://users.cs.northwestern.edu/~akuzma/rice/TCP-LP/linux/tcp-lp-linux.htm * * 2.6.x module Authors: * Wong Hoi Sing, Edison <hswong3i@gmail.com> @@ -113,6 +113,8 @@ static void tcp_lp_init(struct sock *sk) /** * tcp_lp_cong_avoid * @sk: socket to avoid congesting + * @ack: current ack sequence number + * @acked: number of ACKed packets * * Implementation of cong_avoid. * Will only call newReno CA when away from inference. @@ -261,6 +263,7 @@ static void tcp_lp_rtt_sample(struct sock *sk, u32 rtt) /** * tcp_lp_pkts_acked * @sk: socket requiring congestion avoidance calculations + * @sample: ACK sample containing timing and rate information * * Implementation of pkts_acked. * Deal with active drop under Early Congestion Indication. diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 2ec8c6f1cdcc..d8f4d813e8dd 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -312,7 +312,6 @@ static void tcp_time_wait_init(struct sock *sk, struct tcp_timewait_sock *tcptw) return; if (!static_key_fast_inc_not_disabled(&tcp_md5_needed.key.key)) goto out_free; - tcp_md5_add_sigpool(); } return; out_free: @@ -406,7 +405,6 @@ void tcp_twsk_destructor(struct sock *sk) if (twsk->tw_md5_key) { kfree(twsk->tw_md5_key); static_branch_slow_dec_deferred(&tcp_md5_needed); - tcp_md5_release_sigpool(); } } #endif @@ -716,7 +714,8 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, * it can be estimated (approximately) * from another data. */ - tmp_opt.ts_recent_stamp = ktime_get_seconds() - reqsk_timeout(req, TCP_RTO_MAX) / HZ; + tmp_opt.ts_recent_stamp = ktime_get_seconds() - + tcp_reqsk_timeout(req) / HZ; paws_reject = tcp_paws_reject(&tmp_opt, th->rst); } } @@ -755,7 +754,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, !tcp_rtx_synack(sk, req)) { unsigned long expires = jiffies; - expires += reqsk_timeout(req, TCP_RTO_MAX); + expires += tcp_reqsk_timeout(req); if (!fastopen) mod_timer_pending(&req->rsk_timer, expires); else diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index b94efb3050d2..479afb714bdf 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -40,6 +40,7 @@ #include <net/tcp.h> #include <net/tcp_ecn.h> #include <net/mptcp.h> +#include <net/smc.h> #include <net/proto_memory.h> #include <net/psp.h> @@ -802,34 +803,36 @@ static void tcp_options_write(struct tcphdr *th, struct tcp_sock *tp, mptcp_options_write(th, ptr, tp, opts); } -static void smc_set_option(const struct tcp_sock *tp, +static void smc_set_option(struct tcp_sock *tp, struct tcp_out_options *opts, unsigned int *remaining) { #if IS_ENABLED(CONFIG_SMC) - if (static_branch_unlikely(&tcp_have_smc)) { - if (tp->syn_smc) { - if (*remaining >= TCPOLEN_EXP_SMC_BASE_ALIGNED) { - opts->options |= OPTION_SMC; - *remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED; - } + if (static_branch_unlikely(&tcp_have_smc) && tp->syn_smc) { + tp->syn_smc = !!smc_call_hsbpf(1, tp, syn_option); + /* re-check syn_smc */ + if (tp->syn_smc && + *remaining >= TCPOLEN_EXP_SMC_BASE_ALIGNED) { + opts->options |= OPTION_SMC; + *remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED; } } #endif } static void smc_set_option_cond(const struct tcp_sock *tp, - const struct inet_request_sock *ireq, + struct inet_request_sock *ireq, struct tcp_out_options *opts, unsigned int *remaining) { #if IS_ENABLED(CONFIG_SMC) - if (static_branch_unlikely(&tcp_have_smc)) { - if (tp->syn_smc && ireq->smc_ok) { - if (*remaining >= TCPOLEN_EXP_SMC_BASE_ALIGNED) { - opts->options |= OPTION_SMC; - *remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED; - } + if (static_branch_unlikely(&tcp_have_smc) && tp->syn_smc && ireq->smc_ok) { + ireq->smc_ok = !!smc_call_hsbpf(1, tp, synack_option, ireq); + /* re-check smc_ok */ + if (ireq->smc_ok && + *remaining >= TCPOLEN_EXP_SMC_BASE_ALIGNED) { + opts->options |= OPTION_SMC; + *remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED; } } #endif @@ -3743,12 +3746,17 @@ void sk_forced_mem_schedule(struct sock *sk, int size) delta = size - sk->sk_forward_alloc; if (delta <= 0) return; + amt = sk_mem_pages(delta); sk_forward_alloc_add(sk, amt << PAGE_SHIFT); - sk_memory_allocated_add(sk, amt); if (mem_cgroup_sk_enabled(sk)) mem_cgroup_sk_charge(sk, amt, gfp_memcg_charge() | __GFP_NOFAIL); + + if (sk->sk_bypass_prot_mem) + return; + + sk_memory_allocated_add(sk, amt); } /* Send a FIN. The caller locks the socket for us. diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 2dd73a4e8e51..0672c3d8f4f1 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -458,7 +458,7 @@ static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req) struct tcp_sock *tp = tcp_sk(sk); int max_retries; - req->rsk_ops->syn_ack_timeout(req); + tcp_syn_ack_timeout(req); /* Add one more retry for fastopen. * Paired with WRITE_ONCE() in tcp_sock_set_syncnt() @@ -752,7 +752,6 @@ void tcp_syn_ack_timeout(const struct request_sock *req) __NET_INC_STATS(net, LINUX_MIB_TCPTIMEOUTS); } -EXPORT_IPV6_MOD(tcp_syn_ack_timeout); void tcp_reset_keepalive_timer(struct sock *sk, unsigned long len) { diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 30dfbf73729d..ffe074cb5865 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -2159,7 +2159,8 @@ csum_copy_err: goto try_again; } -int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) +int udp_pre_connect(struct sock *sk, struct sockaddr_unsized *uaddr, + int addr_len) { /* This check is replicated from __ip4_datagram_connect() and * intended to prevent BPF program called below from accessing bytes @@ -2172,7 +2173,8 @@ int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) } EXPORT_IPV6_MOD(udp_pre_connect); -static int udp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) +static int udp_connect(struct sock *sk, struct sockaddr_unsized *uaddr, + int addr_len) { int res; diff --git a/net/ipv4/udp_tunnel_core.c b/net/ipv4/udp_tunnel_core.c index 54386e06a813..b1f667c52cb2 100644 --- a/net/ipv4/udp_tunnel_core.c +++ b/net/ipv4/udp_tunnel_core.c @@ -29,7 +29,7 @@ int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg, udp_addr.sin_family = AF_INET; udp_addr.sin_addr = cfg->local_ip; udp_addr.sin_port = cfg->local_udp_port; - err = kernel_bind(sock, (struct sockaddr *)&udp_addr, + err = kernel_bind(sock, (struct sockaddr_unsized *)&udp_addr, sizeof(udp_addr)); if (err < 0) goto error; @@ -38,7 +38,7 @@ int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg, udp_addr.sin_family = AF_INET; udp_addr.sin_addr = cfg->peer_ip; udp_addr.sin_port = cfg->peer_udp_port; - err = kernel_connect(sock, (struct sockaddr *)&udp_addr, + err = kernel_connect(sock, (struct sockaddr_unsized *)&udp_addr, sizeof(udp_addr), 0); if (err < 0) goto error; diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 1b0314644e0c..b705751eb73c 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -277,7 +277,7 @@ out_sk_release: goto out; } -static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len, +static int __inet6_bind(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len, u32 flags) { struct sockaddr_in6 *addr = (struct sockaddr_in6 *)uaddr; @@ -438,7 +438,7 @@ out_unlock: goto out; } -int inet6_bind_sk(struct sock *sk, struct sockaddr *uaddr, int addr_len) +int inet6_bind_sk(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len) { u32 flags = BIND_WITH_LOCK; const struct proto *prot; @@ -465,7 +465,7 @@ int inet6_bind_sk(struct sock *sk, struct sockaddr *uaddr, int addr_len) } /* bind for INET6 API */ -int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) +int inet6_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len) { return inet6_bind_sk(sock->sk, uaddr, addr_len); } @@ -960,6 +960,7 @@ static int __net_init inet6_net_init(struct net *net) net->ipv6.sysctl.icmpv6_echo_ignore_multicast = 0; net->ipv6.sysctl.icmpv6_echo_ignore_anycast = 0; net->ipv6.sysctl.icmpv6_error_anycast_as_unicast = 0; + net->ipv6.sysctl.icmpv6_errors_extension_mask = 0; /* By default, rate limit error messages. * Except for pmtu discovery, it would break it. diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 33ebe93d80e3..83e03176819c 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -138,7 +138,7 @@ void ip6_datagram_release_cb(struct sock *sk) } EXPORT_SYMBOL_GPL(ip6_datagram_release_cb); -int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, +int __ip6_datagram_connect(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len) { struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; @@ -194,7 +194,7 @@ int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, sin.sin_port = usin->sin6_port; err = __ip4_datagram_connect(sk, - (struct sockaddr *) &sin, + (struct sockaddr_unsized *)&sin, sizeof(sin)); ipv4_connected: @@ -271,7 +271,7 @@ out: } EXPORT_SYMBOL_GPL(__ip6_datagram_connect); -int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) +int ip6_datagram_connect(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len) { int res; @@ -282,7 +282,7 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) } EXPORT_SYMBOL_GPL(ip6_datagram_connect); -int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *uaddr, +int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len) { DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, uaddr); diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 56c974cf75d1..5d2f90babaa5 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -444,6 +444,193 @@ static int icmp6_iif(const struct sk_buff *skb) return icmp6_dev(skb)->ifindex; } +struct icmp6_ext_iio_addr6_subobj { + __be16 afi; + __be16 reserved; + struct in6_addr addr6; +}; + +static unsigned int icmp6_ext_iio_len(void) +{ + return sizeof(struct icmp_extobj_hdr) + + /* ifIndex */ + sizeof(__be32) + + /* Interface Address Sub-Object */ + sizeof(struct icmp6_ext_iio_addr6_subobj) + + /* Interface Name Sub-Object. Length must be a multiple of 4 + * bytes. + */ + ALIGN(sizeof(struct icmp_ext_iio_name_subobj), 4) + + /* MTU */ + sizeof(__be32); +} + +static unsigned int icmp6_ext_max_len(u8 ext_objs) +{ + unsigned int ext_max_len; + + ext_max_len = sizeof(struct icmp_ext_hdr); + + if (ext_objs & BIT(ICMP_ERR_EXT_IIO_IIF)) + ext_max_len += icmp6_ext_iio_len(); + + return ext_max_len; +} + +static struct in6_addr *icmp6_ext_iio_addr6_find(const struct net_device *dev) +{ + struct inet6_dev *in6_dev; + struct inet6_ifaddr *ifa; + + in6_dev = __in6_dev_get(dev); + if (!in6_dev) + return NULL; + + /* It is unclear from RFC 5837 which IP address should be chosen, but + * it makes sense to choose a global unicast address. + */ + list_for_each_entry_rcu(ifa, &in6_dev->addr_list, if_list) { + if (ifa->flags & (IFA_F_TENTATIVE | IFA_F_DADFAILED)) + continue; + if (ipv6_addr_type(&ifa->addr) != IPV6_ADDR_UNICAST || + ipv6_addr_src_scope(&ifa->addr) != IPV6_ADDR_SCOPE_GLOBAL) + continue; + return &ifa->addr; + } + + return NULL; +} + +static void icmp6_ext_iio_iif_append(struct net *net, struct sk_buff *skb, + int iif) +{ + struct icmp_ext_iio_name_subobj *name_subobj; + struct icmp_extobj_hdr *objh; + struct net_device *dev; + struct in6_addr *addr6; + __be32 data; + + if (!iif) + return; + + /* Add the fields in the order specified by RFC 5837. */ + objh = skb_put(skb, sizeof(*objh)); + objh->class_num = ICMP_EXT_OBJ_CLASS_IIO; + objh->class_type = ICMP_EXT_CTYPE_IIO_ROLE(ICMP_EXT_CTYPE_IIO_ROLE_IIF); + + data = htonl(iif); + skb_put_data(skb, &data, sizeof(__be32)); + objh->class_type |= ICMP_EXT_CTYPE_IIO_IFINDEX; + + rcu_read_lock(); + + dev = dev_get_by_index_rcu(net, iif); + if (!dev) + goto out; + + addr6 = icmp6_ext_iio_addr6_find(dev); + if (addr6) { + struct icmp6_ext_iio_addr6_subobj *addr6_subobj; + + addr6_subobj = skb_put_zero(skb, sizeof(*addr6_subobj)); + addr6_subobj->afi = htons(ICMP_AFI_IP6); + addr6_subobj->addr6 = *addr6; + objh->class_type |= ICMP_EXT_CTYPE_IIO_IPADDR; + } + + name_subobj = skb_put_zero(skb, ALIGN(sizeof(*name_subobj), 4)); + name_subobj->len = ALIGN(sizeof(*name_subobj), 4); + netdev_copy_name(dev, name_subobj->name); + objh->class_type |= ICMP_EXT_CTYPE_IIO_NAME; + + data = htonl(READ_ONCE(dev->mtu)); + skb_put_data(skb, &data, sizeof(__be32)); + objh->class_type |= ICMP_EXT_CTYPE_IIO_MTU; + +out: + rcu_read_unlock(); + objh->length = htons(skb_tail_pointer(skb) - (unsigned char *)objh); +} + +static void icmp6_ext_objs_append(struct net *net, struct sk_buff *skb, + u8 ext_objs, int iif) +{ + if (ext_objs & BIT(ICMP_ERR_EXT_IIO_IIF)) + icmp6_ext_iio_iif_append(net, skb, iif); +} + +static struct sk_buff * +icmp6_ext_append(struct net *net, struct sk_buff *skb_in, + struct icmp6hdr *icmp6h, unsigned int room, int iif) +{ + unsigned int payload_len, ext_max_len, ext_len; + struct icmp_ext_hdr *ext_hdr; + struct sk_buff *skb; + u8 ext_objs; + int nhoff; + + switch (icmp6h->icmp6_type) { + case ICMPV6_DEST_UNREACH: + case ICMPV6_TIME_EXCEED: + break; + default: + return NULL; + } + + /* Do not overwrite existing extensions. This can happen when we + * receive an ICMPv4 message with extensions from a tunnel and + * translate it to an ICMPv6 message towards an IPv6 host in the + * overlay network. + */ + if (icmp6h->icmp6_datagram_len) + return NULL; + + ext_objs = READ_ONCE(net->ipv6.sysctl.icmpv6_errors_extension_mask); + if (!ext_objs) + return NULL; + + ext_max_len = icmp6_ext_max_len(ext_objs); + if (ICMP_EXT_ORIG_DGRAM_MIN_LEN + ext_max_len > room) + return NULL; + + skb = skb_clone(skb_in, GFP_ATOMIC); + if (!skb) + return NULL; + + nhoff = skb_network_offset(skb); + payload_len = min(skb->len - nhoff, ICMP_EXT_ORIG_DGRAM_MIN_LEN); + + if (!pskb_network_may_pull(skb, payload_len)) + goto free_skb; + + if (pskb_trim(skb, nhoff + ICMP_EXT_ORIG_DGRAM_MIN_LEN) || + __skb_put_padto(skb, nhoff + ICMP_EXT_ORIG_DGRAM_MIN_LEN, false)) + goto free_skb; + + if (pskb_expand_head(skb, 0, ext_max_len, GFP_ATOMIC)) + goto free_skb; + + ext_hdr = skb_put_zero(skb, sizeof(*ext_hdr)); + ext_hdr->version = ICMP_EXT_VERSION_2; + + icmp6_ext_objs_append(net, skb, ext_objs, iif); + + /* Do not send an empty extension structure. */ + ext_len = skb_tail_pointer(skb) - (unsigned char *)ext_hdr; + if (ext_len == sizeof(*ext_hdr)) + goto free_skb; + + ext_hdr->checksum = ip_compute_csum(ext_hdr, ext_len); + /* The length of the original datagram in 64-bit words (RFC 4884). */ + icmp6h->icmp6_datagram_len = ICMP_EXT_ORIG_DGRAM_MIN_LEN / sizeof(u64); + + return skb; + +free_skb: + consume_skb(skb); + return NULL; +} + /* * Send an ICMP message in response to a packet in error */ @@ -458,7 +645,9 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, struct ipv6_pinfo *np; const struct in6_addr *saddr = NULL; bool apply_ratelimit = false; + struct sk_buff *ext_skb; struct dst_entry *dst; + unsigned int room; struct icmp6hdr tmp_hdr; struct flowi6 fl6; struct icmpv6_msg msg; @@ -612,8 +801,13 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, msg.offset = skb_network_offset(skb); msg.type = type; - len = skb->len - msg.offset; - len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) - sizeof(struct icmp6hdr)); + room = IPV6_MIN_MTU - sizeof(struct ipv6hdr) - sizeof(struct icmp6hdr); + ext_skb = icmp6_ext_append(net, skb, &tmp_hdr, room, parm->iif); + if (ext_skb) + msg.skb = ext_skb; + + len = msg.skb->len - msg.offset; + len = min_t(unsigned int, len, room); if (len < 0) { net_dbg_ratelimited("icmp: len problem [%pI6c > %pI6c]\n", &hdr->saddr, &hdr->daddr); @@ -635,6 +829,8 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, } out_dst_release: + if (ext_skb) + consume_skb(ext_skb); dst_release(dst); out_unlock: icmpv6_xmit_unlock(sk); @@ -1171,6 +1367,10 @@ int icmpv6_err_convert(u8 type, u8 code, int *err) EXPORT_SYMBOL(icmpv6_err_convert); #ifdef CONFIG_SYSCTL + +static u32 icmpv6_errors_extension_mask_all = + GENMASK_U8(ICMP_ERR_EXT_COUNT - 1, 0); + static struct ctl_table ipv6_icmp_table_template[] = { { .procname = "ratelimit", @@ -1216,6 +1416,15 @@ static struct ctl_table ipv6_icmp_table_template[] = { .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE, }, + { + .procname = "errors_extension_mask", + .data = &init_net.ipv6.sysctl.icmpv6_errors_extension_mask, + .maxlen = sizeof(u8), + .mode = 0644, + .proc_handler = proc_dou8vec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = &icmpv6_errors_extension_mask_all, + }, }; struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net) @@ -1233,6 +1442,7 @@ struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net) table[3].data = &net->ipv6.sysctl.icmpv6_echo_ignore_anycast; table[4].data = &net->ipv6.sysctl.icmpv6_ratemask_ptr; table[5].data = &net->ipv6.sysctl.icmpv6_error_anycast_as_unicast; + table[6].data = &net->ipv6.sysctl.icmpv6_errors_extension_mask; } return table; } diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index a3ff575798dd..60d0be47a9f3 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c @@ -66,8 +66,8 @@ EXPORT_SYMBOL(ipv6_flowlabel_exclusive); fl != NULL; \ fl = rcu_dereference(fl->next)) -#define for_each_sk_fl_rcu(np, sfl) \ - for (sfl = rcu_dereference(np->ipv6_fl_list); \ +#define for_each_sk_fl_rcu(sk, sfl) \ + for (sfl = rcu_dereference(inet_sk(sk)->ipv6_fl_list); \ sfl != NULL; \ sfl = rcu_dereference(sfl->next)) @@ -262,12 +262,11 @@ static struct ip6_flowlabel *fl_intern(struct net *net, struct ip6_flowlabel *__fl6_sock_lookup(struct sock *sk, __be32 label) { struct ipv6_fl_socklist *sfl; - struct ipv6_pinfo *np = inet6_sk(sk); label &= IPV6_FLOWLABEL_MASK; rcu_read_lock(); - for_each_sk_fl_rcu(np, sfl) { + for_each_sk_fl_rcu(sk, sfl) { struct ip6_flowlabel *fl = sfl->fl; if (fl->label == label && atomic_inc_not_zero(&fl->users)) { @@ -283,16 +282,16 @@ EXPORT_SYMBOL_GPL(__fl6_sock_lookup); void fl6_free_socklist(struct sock *sk) { - struct ipv6_pinfo *np = inet6_sk(sk); + struct inet_sock *inet = inet_sk(sk); struct ipv6_fl_socklist *sfl; - if (!rcu_access_pointer(np->ipv6_fl_list)) + if (!rcu_access_pointer(inet->ipv6_fl_list)) return; spin_lock_bh(&ip6_sk_fl_lock); - while ((sfl = rcu_dereference_protected(np->ipv6_fl_list, + while ((sfl = rcu_dereference_protected(inet->ipv6_fl_list, lockdep_is_held(&ip6_sk_fl_lock))) != NULL) { - np->ipv6_fl_list = sfl->next; + inet->ipv6_fl_list = sfl->next; spin_unlock_bh(&ip6_sk_fl_lock); fl_release(sfl->fl); @@ -470,16 +469,15 @@ done: static int mem_check(struct sock *sk) { - struct ipv6_pinfo *np = inet6_sk(sk); - struct ipv6_fl_socklist *sfl; int room = FL_MAX_SIZE - atomic_read(&fl_size); + struct ipv6_fl_socklist *sfl; int count = 0; if (room > FL_MAX_SIZE - FL_MAX_PER_SOCK) return 0; rcu_read_lock(); - for_each_sk_fl_rcu(np, sfl) + for_each_sk_fl_rcu(sk, sfl) count++; rcu_read_unlock(); @@ -492,13 +490,15 @@ static int mem_check(struct sock *sk) return 0; } -static inline void fl_link(struct ipv6_pinfo *np, struct ipv6_fl_socklist *sfl, - struct ip6_flowlabel *fl) +static inline void fl_link(struct sock *sk, struct ipv6_fl_socklist *sfl, + struct ip6_flowlabel *fl) { + struct inet_sock *inet = inet_sk(sk); + spin_lock_bh(&ip6_sk_fl_lock); sfl->fl = fl; - sfl->next = np->ipv6_fl_list; - rcu_assign_pointer(np->ipv6_fl_list, sfl); + sfl->next = inet->ipv6_fl_list; + rcu_assign_pointer(inet->ipv6_fl_list, sfl); spin_unlock_bh(&ip6_sk_fl_lock); } @@ -520,7 +520,7 @@ int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq, rcu_read_lock(); - for_each_sk_fl_rcu(np, sfl) { + for_each_sk_fl_rcu(sk, sfl) { if (sfl->fl->label == (np->flow_label & IPV6_FLOWLABEL_MASK)) { spin_lock_bh(&ip6_fl_lock); freq->flr_label = sfl->fl->label; @@ -559,7 +559,7 @@ static int ipv6_flowlabel_put(struct sock *sk, struct in6_flowlabel_req *freq) } spin_lock_bh(&ip6_sk_fl_lock); - for (sflp = &np->ipv6_fl_list; + for (sflp = &inet_sk(sk)->ipv6_fl_list; (sfl = socklist_dereference(*sflp)) != NULL; sflp = &sfl->next) { if (sfl->fl->label == freq->flr_label) @@ -579,13 +579,12 @@ found: static int ipv6_flowlabel_renew(struct sock *sk, struct in6_flowlabel_req *freq) { - struct ipv6_pinfo *np = inet6_sk(sk); struct net *net = sock_net(sk); struct ipv6_fl_socklist *sfl; int err; rcu_read_lock(); - for_each_sk_fl_rcu(np, sfl) { + for_each_sk_fl_rcu(sk, sfl) { if (sfl->fl->label == freq->flr_label) { err = fl6_renew(sfl->fl, freq->flr_linger, freq->flr_expires); @@ -614,7 +613,6 @@ static int ipv6_flowlabel_get(struct sock *sk, struct in6_flowlabel_req *freq, { struct ipv6_fl_socklist *sfl, *sfl1 = NULL; struct ip6_flowlabel *fl, *fl1 = NULL; - struct ipv6_pinfo *np = inet6_sk(sk); struct net *net = sock_net(sk); int err; @@ -645,7 +643,7 @@ static int ipv6_flowlabel_get(struct sock *sk, struct in6_flowlabel_req *freq, if (freq->flr_label) { err = -EEXIST; rcu_read_lock(); - for_each_sk_fl_rcu(np, sfl) { + for_each_sk_fl_rcu(sk, sfl) { if (sfl->fl->label == freq->flr_label) { if (freq->flr_flags & IPV6_FL_F_EXCL) { rcu_read_unlock(); @@ -682,7 +680,7 @@ recheck: fl1->linger = fl->linger; if ((long)(fl->expires - fl1->expires) > 0) fl1->expires = fl->expires; - fl_link(np, sfl1, fl1); + fl_link(sk, sfl1, fl1); fl_free(fl); return 0; @@ -716,7 +714,7 @@ release: } } - fl_link(np, sfl1, fl); + fl_link(sk, sfl1, fl); return 0; done: fl_free(fl); diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c index 0ff547a4bff7..cef3e0210744 100644 --- a/net/ipv6/ip6_udp_tunnel.c +++ b/net/ipv6/ip6_udp_tunnel.c @@ -40,7 +40,7 @@ int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg, memcpy(&udp6_addr.sin6_addr, &cfg->local_ip6, sizeof(udp6_addr.sin6_addr)); udp6_addr.sin6_port = cfg->local_udp_port; - err = kernel_bind(sock, (struct sockaddr *)&udp6_addr, + err = kernel_bind(sock, (struct sockaddr_unsized *)&udp6_addr, sizeof(udp6_addr)); if (err < 0) goto error; @@ -52,7 +52,7 @@ int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg, sizeof(udp6_addr.sin6_addr)); udp6_addr.sin6_port = cfg->peer_udp_port; err = kernel_connect(sock, - (struct sockaddr *)&udp6_addr, + (struct sockaddr_unsized *)&udp6_addr, sizeof(udp6_addr), 0); } if (err < 0) diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index f427e41e9c49..59d17b6f06bf 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -1449,7 +1449,7 @@ skip_defrtr: BASE_REACHABLE_TIME, rtime); NEIGH_VAR_SET(in6_dev->nd_parms, GC_STALETIME, 3 * rtime); - in6_dev->nd_parms->reachable_time = neigh_rand_reach_time(rtime); + neigh_set_reach_time(in6_dev->nd_parms); in6_dev->tstamp = jiffies; send_ifinfo_notify = true; } @@ -1948,9 +1948,9 @@ int ndisc_ifinfo_sysctl_change(const struct ctl_table *ctl, int write, void *buf ret = -1; if (write && ret == 0 && dev && (idev = in6_dev_get(dev)) != NULL) { - if (ctl->data == &NEIGH_VAR(idev->nd_parms, BASE_REACHABLE_TIME)) - idev->nd_parms->reachable_time = - neigh_rand_reach_time(NEIGH_VAR(idev->nd_parms, BASE_REACHABLE_TIME)); + if (ctl->data == NEIGH_VAR_PTR(idev->nd_parms, BASE_REACHABLE_TIME)) + neigh_set_reach_time(idev->nd_parms); + WRITE_ONCE(idev->tstamp, jiffies); inet6_ifinfo_notify(RTM_NEWLINK, idev); in6_dev_put(idev); diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c index d7a2cdaa2631..e4afc651731a 100644 --- a/net/ipv6/ping.c +++ b/net/ipv6/ping.c @@ -45,7 +45,7 @@ static int dummy_ipv6_chk_addr(struct net *net, const struct in6_addr *addr, return 0; } -static int ping_v6_pre_connect(struct sock *sk, struct sockaddr *uaddr, +static int ping_v6_pre_connect(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len) { /* This check is replicated from __ip6_datagram_connect() and diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index e369f54844dd..b4cd05dba9b6 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -214,7 +214,8 @@ bool raw6_local_deliver(struct sk_buff *skb, int nexthdr) } /* This cleans up af_inet6 a bit. -DaveM */ -static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) +static int rawv6_bind(struct sock *sk, struct sockaddr_unsized *uaddr, + int addr_len) { struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 59c4977a811a..08113f430124 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -67,8 +67,7 @@ #include <linux/proc_fs.h> #include <linux/seq_file.h> -#include <crypto/hash.h> -#include <linux/scatterlist.h> +#include <crypto/md5.h> #include <trace/events/tcp.h> @@ -119,7 +118,7 @@ static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb) ipv6_hdr(skb)->saddr.s6_addr32); } -static int tcp_v6_pre_connect(struct sock *sk, struct sockaddr *uaddr, +static int tcp_v6_pre_connect(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len) { /* This check is replicated from tcp_v6_connect() and intended to @@ -134,7 +133,7 @@ static int tcp_v6_pre_connect(struct sock *sk, struct sockaddr *uaddr, return BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr, &addr_len); } -static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, +static int tcp_v6_connect(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len) { struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; @@ -239,7 +238,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, tp->af_specific = &tcp_sock_ipv6_mapped_specific; #endif - err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin)); + err = tcp_v4_connect(sk, (struct sockaddr_unsized *)&sin, sizeof(sin)); if (err) { icsk->icsk_ext_hdr_len = exthdrlen; @@ -691,69 +690,45 @@ static int tcp_v6_parse_md5_keys(struct sock *sk, int optname, cmd.tcpm_key, cmd.tcpm_keylen); } -static int tcp_v6_md5_hash_headers(struct tcp_sigpool *hp, - const struct in6_addr *daddr, - const struct in6_addr *saddr, - const struct tcphdr *th, int nbytes) +static void tcp_v6_md5_hash_headers(struct md5_ctx *ctx, + const struct in6_addr *daddr, + const struct in6_addr *saddr, + const struct tcphdr *th, int nbytes) { - struct tcp6_pseudohdr *bp; - struct scatterlist sg; - struct tcphdr *_th; - - bp = hp->scratch; - /* 1. TCP pseudo-header (RFC2460) */ - bp->saddr = *saddr; - bp->daddr = *daddr; - bp->protocol = cpu_to_be32(IPPROTO_TCP); - bp->len = cpu_to_be32(nbytes); - - _th = (struct tcphdr *)(bp + 1); - memcpy(_th, th, sizeof(*th)); - _th->check = 0; - - sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th)); - ahash_request_set_crypt(hp->req, &sg, NULL, - sizeof(*bp) + sizeof(*th)); - return crypto_ahash_update(hp->req); + struct { + struct tcp6_pseudohdr ip; /* TCP pseudo-header (RFC2460) */ + struct tcphdr tcp; + } h; + + h.ip.saddr = *saddr; + h.ip.daddr = *daddr; + h.ip.protocol = cpu_to_be32(IPPROTO_TCP); + h.ip.len = cpu_to_be32(nbytes); + h.tcp = *th; + h.tcp.check = 0; + md5_update(ctx, (const u8 *)&h, sizeof(h.ip) + sizeof(h.tcp)); } -static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key, - const struct in6_addr *daddr, struct in6_addr *saddr, - const struct tcphdr *th) +static noinline_for_stack void +tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key, + const struct in6_addr *daddr, struct in6_addr *saddr, + const struct tcphdr *th) { - struct tcp_sigpool hp; - - if (tcp_sigpool_start(tcp_md5_sigpool_id, &hp)) - goto clear_hash_nostart; - - if (crypto_ahash_init(hp.req)) - goto clear_hash; - if (tcp_v6_md5_hash_headers(&hp, daddr, saddr, th, th->doff << 2)) - goto clear_hash; - if (tcp_md5_hash_key(&hp, key)) - goto clear_hash; - ahash_request_set_crypt(hp.req, NULL, md5_hash, 0); - if (crypto_ahash_final(hp.req)) - goto clear_hash; - - tcp_sigpool_end(&hp); - return 0; + struct md5_ctx ctx; -clear_hash: - tcp_sigpool_end(&hp); -clear_hash_nostart: - memset(md5_hash, 0, 16); - return 1; + md5_init(&ctx); + tcp_v6_md5_hash_headers(&ctx, daddr, saddr, th, th->doff << 2); + tcp_md5_hash_key(&ctx, key); + md5_final(&ctx, md5_hash); } -static int tcp_v6_md5_hash_skb(char *md5_hash, - const struct tcp_md5sig_key *key, - const struct sock *sk, - const struct sk_buff *skb) +static noinline_for_stack void +tcp_v6_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key, + const struct sock *sk, const struct sk_buff *skb) { const struct tcphdr *th = tcp_hdr(skb); const struct in6_addr *saddr, *daddr; - struct tcp_sigpool hp; + struct md5_ctx ctx; if (sk) { /* valid for establish/request sockets */ saddr = &sk->sk_v6_rcv_saddr; @@ -764,30 +739,11 @@ static int tcp_v6_md5_hash_skb(char *md5_hash, daddr = &ip6h->daddr; } - if (tcp_sigpool_start(tcp_md5_sigpool_id, &hp)) - goto clear_hash_nostart; - - if (crypto_ahash_init(hp.req)) - goto clear_hash; - - if (tcp_v6_md5_hash_headers(&hp, daddr, saddr, th, skb->len)) - goto clear_hash; - if (tcp_sigpool_hash_skb_data(&hp, skb, th->doff << 2)) - goto clear_hash; - if (tcp_md5_hash_key(&hp, key)) - goto clear_hash; - ahash_request_set_crypt(hp.req, NULL, md5_hash, 0); - if (crypto_ahash_final(hp.req)) - goto clear_hash; - - tcp_sigpool_end(&hp); - return 0; - -clear_hash: - tcp_sigpool_end(&hp); -clear_hash_nostart: - memset(md5_hash, 0, 16); - return 1; + md5_init(&ctx); + tcp_v6_md5_hash_headers(&ctx, daddr, saddr, th, skb->len); + tcp_md5_hash_skb_data(&ctx, skb, th->doff << 2); + tcp_md5_hash_key(&ctx, key); + md5_final(&ctx, md5_hash); } #endif @@ -840,7 +796,6 @@ struct request_sock_ops tcp6_request_sock_ops __read_mostly = { .send_ack = tcp_v6_reqsk_send_ack, .destructor = tcp_v6_reqsk_destructor, .send_reset = tcp_v6_send_reset, - .syn_ack_timeout = tcp_syn_ack_timeout, }; const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = { @@ -1032,7 +987,6 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb, int oif = 0; #ifdef CONFIG_TCP_MD5SIG unsigned char newhash[16]; - int genhash; struct sock *sk1 = NULL; #endif @@ -1091,8 +1045,8 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb, goto out; key.type = TCP_KEY_MD5; - genhash = tcp_v6_md5_hash_skb(newhash, key.md5_key, NULL, skb); - if (genhash || memcmp(md5_hash_location, newhash, 16) != 0) + tcp_v6_md5_hash_skb(newhash, key.md5_key, NULL, skb); + if (memcmp(md5_hash_location, newhash, 16) != 0) goto out; } #endif @@ -1386,7 +1340,9 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff * if (!newsk) return NULL; - inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk); + newinet = inet_sk(newsk); + newinet->pinet6 = tcp_inet6_sk(newsk); + newinet->ipv6_fl_list = NULL; newnp = tcp_inet6_sk(newsk); newtp = tcp_sk(newsk); @@ -1405,7 +1361,6 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff * newnp->ipv6_mc_list = NULL; newnp->ipv6_ac_list = NULL; - newnp->ipv6_fl_list = NULL; newnp->pktoptions = NULL; newnp->opt = NULL; newnp->mcast_oif = inet_iif(skb); @@ -1453,10 +1408,12 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff * newsk->sk_gso_type = SKB_GSO_TCPV6; inet6_sk_rx_dst_set(newsk, skb); - inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk); + newinet = inet_sk(newsk); + newinet->pinet6 = tcp_inet6_sk(newsk); + newinet->ipv6_fl_list = NULL; + newinet->inet_opt = NULL; newtp = tcp_sk(newsk); - newinet = inet_sk(newsk); newnp = tcp_inet6_sk(newsk); memcpy(newnp, np, sizeof(struct ipv6_pinfo)); @@ -1469,10 +1426,8 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff * First: no IPv4 options. */ - newinet->inet_opt = NULL; newnp->ipv6_mc_list = NULL; newnp->ipv6_ac_list = NULL; - newnp->ipv6_fl_list = NULL; /* Clone RX bits */ newnp->rxopt.all = np->rxopt.all; diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 813a2ba75824..794c13674e8a 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1282,7 +1282,7 @@ static void udp_v6_flush_pending_frames(struct sock *sk) } } -static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr, +static int udpv6_pre_connect(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len) { if (addr_len < offsetofend(struct sockaddr, sa_family)) @@ -1303,7 +1303,8 @@ static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr, return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, &addr_len); } -static int udpv6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) +static int udpv6_connect(struct sock *sk, struct sockaddr_unsized *uaddr, + int addr_len) { int res; diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 6c717a7ef292..a4f1df92417d 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -553,16 +553,17 @@ static void __iucv_auto_name(struct iucv_sock *iucv) { char name[12]; - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name)); + scnprintf(name, sizeof(name), + "%08x", atomic_inc_return(&iucv_sk_list.autobind_name)); while (__iucv_get_sock_by_name(name)) { - sprintf(name, "%08x", - atomic_inc_return(&iucv_sk_list.autobind_name)); + scnprintf(name, sizeof(name), "%08x", + atomic_inc_return(&iucv_sk_list.autobind_name)); } memcpy(iucv->src_name, name, 8); } /* Bind an unbound socket */ -static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr, +static int iucv_sock_bind(struct socket *sock, struct sockaddr_unsized *addr, int addr_len) { DECLARE_SOCKADDR(struct sockaddr_iucv *, sa, addr); @@ -667,7 +668,7 @@ static int iucv_sock_autobind(struct sock *sk) return err; } -static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr) +static int afiucv_path_connect(struct socket *sock, struct sockaddr_unsized *addr) { DECLARE_SOCKADDR(struct sockaddr_iucv *, sa, addr); struct sock *sk = sock->sk; @@ -713,7 +714,7 @@ done: } /* Connect an unconnected socket */ -static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr, +static int iucv_sock_connect(struct socket *sock, struct sockaddr_unsized *addr, int alen, int flags) { DECLARE_SOCKADDR(struct sockaddr_iucv *, sa, addr); diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index 473a7847d80b..008be0abe3a5 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c @@ -95,7 +95,7 @@ struct device *iucv_alloc_device(const struct attribute_group **attrs, if (!dev) goto out_error; va_start(vargs, fmt); - vsnprintf(buf, sizeof(buf), fmt, vargs); + vscnprintf(buf, sizeof(buf), fmt, vargs); rc = dev_set_name(dev, "%s", buf); va_end(vargs); if (rc) diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 369a2f2e459c..c4f4a57cd67c 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1503,7 +1503,7 @@ static int l2tp_tunnel_sock_create(struct net *net, memcpy(&ip6_addr.l2tp_addr, cfg->local_ip6, sizeof(ip6_addr.l2tp_addr)); ip6_addr.l2tp_conn_id = tunnel_id; - err = kernel_bind(sock, (struct sockaddr *)&ip6_addr, + err = kernel_bind(sock, (struct sockaddr_unsized *)&ip6_addr, sizeof(ip6_addr)); if (err < 0) goto out; @@ -1513,7 +1513,7 @@ static int l2tp_tunnel_sock_create(struct net *net, sizeof(ip6_addr.l2tp_addr)); ip6_addr.l2tp_conn_id = peer_tunnel_id; err = kernel_connect(sock, - (struct sockaddr *)&ip6_addr, + (struct sockaddr_unsized *)&ip6_addr, sizeof(ip6_addr), 0); if (err < 0) goto out; @@ -1530,7 +1530,7 @@ static int l2tp_tunnel_sock_create(struct net *net, ip_addr.l2tp_family = AF_INET; ip_addr.l2tp_addr = cfg->local_ip; ip_addr.l2tp_conn_id = tunnel_id; - err = kernel_bind(sock, (struct sockaddr *)&ip_addr, + err = kernel_bind(sock, (struct sockaddr_unsized *)&ip_addr, sizeof(ip_addr)); if (err < 0) goto out; @@ -1538,7 +1538,7 @@ static int l2tp_tunnel_sock_create(struct net *net, ip_addr.l2tp_family = AF_INET; ip_addr.l2tp_addr = cfg->peer_ip; ip_addr.l2tp_conn_id = peer_tunnel_id; - err = kernel_connect(sock, (struct sockaddr *)&ip_addr, + err = kernel_connect(sock, (struct sockaddr_unsized *)&ip_addr, sizeof(ip_addr), 0); if (err < 0) goto out; diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 29795d2839e8..cac1ff59cb83 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -267,7 +267,8 @@ static void l2tp_ip_destroy_sock(struct sock *sk) } } -static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) +static int l2tp_ip_bind(struct sock *sk, struct sockaddr_unsized *uaddr, + int addr_len) { struct inet_sock *inet = inet_sk(sk); struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *)uaddr; @@ -328,7 +329,8 @@ out: return ret; } -static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) +static int l2tp_ip_connect(struct sock *sk, struct sockaddr_unsized *uaddr, + int addr_len) { struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *)uaddr; struct l2tp_ip_net *pn = l2tp_ip_pernet(sock_net(sk)); diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index ea232f338dcb..05a396ba6a3e 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -280,7 +280,8 @@ static void l2tp_ip6_destroy_sock(struct sock *sk) } } -static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) +static int l2tp_ip6_bind(struct sock *sk, struct sockaddr_unsized *uaddr, + int addr_len) { struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); @@ -383,7 +384,7 @@ out_unlock: return err; } -static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr, +static int l2tp_ip6_connect(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len) { struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)uaddr; diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 5e12e7ce17d8..ae4543d5597b 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -684,7 +684,7 @@ static struct l2tp_tunnel *pppol2tp_tunnel_get(struct net *net, /* connect() handler. Attach a PPPoX socket to a tunnel UDP socket */ -static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, +static int pppol2tp_connect(struct socket *sock, struct sockaddr_unsized *uservaddr, int sockaddr_len, int flags) { struct sock *sk = sock->sk; diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 5958a80fe14c..59d593bb5d18 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -337,7 +337,7 @@ out: * otherwise all hell will break loose. * Returns: 0 upon success, negative otherwise. */ -static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen) +static int llc_ui_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int addrlen) { struct sockaddr_llc *addr = (struct sockaddr_llc *)uaddr; struct sock *sk = sock->sk; @@ -477,7 +477,7 @@ out: * This function will autobind if user did not previously call bind. * Returns: 0 upon success, negative otherwise. */ -static int llc_ui_connect(struct socket *sock, struct sockaddr *uaddr, +static int llc_ui_connect(struct socket *sock, struct sockaddr_unsized *uaddr, int addrlen, int flags) { struct sock *sk = sock->sk; diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index e38f46ffebfa..7da909d78c68 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c @@ -9,7 +9,7 @@ * Copyright 2007, Michael Wu <flamingice@sourmilk.net> * Copyright 2007-2010, Intel Corporation * Copyright(c) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2024 Intel Corporation + * Copyright (C) 2018-2025 Intel Corporation */ /** @@ -206,7 +206,10 @@ u8 ieee80211_retrieve_addba_ext_data(struct sta_info *sta, if (elem_len <= 0) return 0; - elems = ieee802_11_parse_elems(elem_data, elem_len, true, NULL); + elems = ieee802_11_parse_elems(elem_data, elem_len, + IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION, + NULL); if (!elems || elems->parse_error || !elems->addba_ext_ie) goto free; diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index c52b0456039d..b51c2c8584ae 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -63,12 +63,14 @@ static void ieee80211_set_mu_mimo_follow(struct ieee80211_sub_if_data *sdata, memcpy(sdata->vif.bss_conf.mu_group.position, params->vht_mumimo_groups + WLAN_MEMBERSHIP_LEN, WLAN_USER_POSITION_LEN); - ieee80211_link_info_change_notify(sdata, &sdata->deflink, - BSS_CHANGED_MU_GROUPS); + /* don't care about endianness - just check for 0 */ memcpy(&membership, params->vht_mumimo_groups, WLAN_MEMBERSHIP_LEN); mu_mimo_groups = membership != 0; + + /* Unset following if configured explicitly */ + eth_broadcast_addr(sdata->u.mntr.mu_follow_addr); } if (params->vht_mumimo_follow_addr) { @@ -76,16 +78,26 @@ static void ieee80211_set_mu_mimo_follow(struct ieee80211_sub_if_data *sdata, is_valid_ether_addr(params->vht_mumimo_follow_addr); ether_addr_copy(sdata->u.mntr.mu_follow_addr, params->vht_mumimo_follow_addr); + + /* Unset current membership until a management frame is RXed */ + memset(sdata->vif.bss_conf.mu_group.membership, 0, + WLAN_MEMBERSHIP_LEN); } sdata->vif.bss_conf.mu_mimo_owner = mu_mimo_groups || mu_mimo_follow; + + /* Notify only after setting mu_mimo_owner */ + if (sdata->vif.bss_conf.mu_mimo_owner && + sdata->flags & IEEE80211_SDATA_IN_DRIVER) + ieee80211_link_info_change_notify(sdata, &sdata->deflink, + BSS_CHANGED_MU_GROUPS); } static int ieee80211_set_mon_options(struct ieee80211_sub_if_data *sdata, struct vif_params *params) { struct ieee80211_local *local = sdata->local; - struct ieee80211_sub_if_data *monitor_sdata; + struct ieee80211_sub_if_data *monitor_sdata = NULL; /* check flags first */ if (params->flags && ieee80211_sdata_running(sdata)) { @@ -103,23 +115,28 @@ static int ieee80211_set_mon_options(struct ieee80211_sub_if_data *sdata, return -EBUSY; } - /* also validate MU-MIMO change */ - if (ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR)) - monitor_sdata = sdata; - else - monitor_sdata = wiphy_dereference(local->hw.wiphy, - local->monitor_sdata); - - if (!monitor_sdata && + /* validate whether MU-MIMO can be configured */ + if (!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF) && + !ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR) && (params->vht_mumimo_groups || params->vht_mumimo_follow_addr)) return -EOPNOTSUPP; + /* Also update dependent monitor_sdata if required */ + if (test_bit(SDATA_STATE_RUNNING, &sdata->state) && + !ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR)) + monitor_sdata = wiphy_dereference(local->hw.wiphy, + local->monitor_sdata); + /* apply all changes now - no failures allowed */ - if (monitor_sdata && - (ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF) || - ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR))) - ieee80211_set_mu_mimo_follow(monitor_sdata, params); + if (ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF) || + ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR)) { + /* This is copied in when the VIF is activated */ + ieee80211_set_mu_mimo_follow(sdata, params); + + if (monitor_sdata) + ieee80211_set_mu_mimo_follow(monitor_sdata, params); + } if (params->flags) { if (ieee80211_sdata_running(sdata)) { diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index 7f8799fd673e..6aa305839f53 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c @@ -12,15 +12,131 @@ #include "driver-ops.h" #include "rate.h" +struct ieee80211_chanctx_user_iter { + struct ieee80211_chan_req *chanreq; + struct ieee80211_sub_if_data *sdata; + struct ieee80211_link_data *link; + enum nl80211_iftype iftype; + bool reserved, radar_required, done; + enum { + CHANCTX_ITER_POS_ASSIGNED, + CHANCTX_ITER_POS_RESERVED, + CHANCTX_ITER_POS_DONE, + } per_link; +}; + +enum ieee80211_chanctx_iter_type { + CHANCTX_ITER_ALL, + CHANCTX_ITER_RESERVED, + CHANCTX_ITER_ASSIGNED, +}; + +static void ieee80211_chanctx_user_iter_next(struct ieee80211_local *local, + struct ieee80211_chanctx *ctx, + struct ieee80211_chanctx_user_iter *iter, + enum ieee80211_chanctx_iter_type type, + bool start) +{ + lockdep_assert_wiphy(local->hw.wiphy); + + if (start) { + memset(iter, 0, sizeof(*iter)); + goto next_interface; + } + +next_link: + for (int link_id = iter->link ? iter->link->link_id : 0; + link_id < ARRAY_SIZE(iter->sdata->link); + link_id++) { + struct ieee80211_link_data *link; + + link = sdata_dereference(iter->sdata->link[link_id], + iter->sdata); + if (!link) + continue; + + switch (iter->per_link) { + case CHANCTX_ITER_POS_ASSIGNED: + iter->per_link = CHANCTX_ITER_POS_RESERVED; + if (type != CHANCTX_ITER_RESERVED && + rcu_access_pointer(link->conf->chanctx_conf) == &ctx->conf) { + iter->link = link; + iter->reserved = false; + iter->radar_required = link->radar_required; + iter->chanreq = &link->conf->chanreq; + return; + } + fallthrough; + case CHANCTX_ITER_POS_RESERVED: + iter->per_link = CHANCTX_ITER_POS_DONE; + if (type != CHANCTX_ITER_ASSIGNED && + link->reserved_chanctx == ctx) { + iter->link = link; + iter->reserved = true; + iter->radar_required = + link->reserved_radar_required; + + iter->chanreq = &link->reserved; + return; + } + fallthrough; + case CHANCTX_ITER_POS_DONE: + iter->per_link = CHANCTX_ITER_POS_ASSIGNED; + continue; + } + } + +next_interface: + /* next (or first) interface */ + iter->sdata = list_prepare_entry(iter->sdata, &local->interfaces, list); + list_for_each_entry_continue(iter->sdata, &local->interfaces, list) { + /* AP_VLAN has a chanctx pointer but follows AP */ + if (iter->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + continue; + + iter->link = NULL; + iter->per_link = CHANCTX_ITER_POS_ASSIGNED; + iter->iftype = iter->sdata->vif.type; + goto next_link; + } + + iter->done = true; +} + +#define for_each_chanctx_user_assigned(local, ctx, iter) \ + for (ieee80211_chanctx_user_iter_next(local, ctx, iter, \ + CHANCTX_ITER_ASSIGNED, \ + true); \ + !((iter)->done); \ + ieee80211_chanctx_user_iter_next(local, ctx, iter, \ + CHANCTX_ITER_ASSIGNED, \ + false)) + +#define for_each_chanctx_user_reserved(local, ctx, iter) \ + for (ieee80211_chanctx_user_iter_next(local, ctx, iter, \ + CHANCTX_ITER_RESERVED, \ + true); \ + !((iter)->done); \ + ieee80211_chanctx_user_iter_next(local, ctx, iter, \ + CHANCTX_ITER_RESERVED, \ + false)) + +#define for_each_chanctx_user_all(local, ctx, iter) \ + for (ieee80211_chanctx_user_iter_next(local, ctx, iter, \ + CHANCTX_ITER_ALL, \ + true); \ + !((iter)->done); \ + ieee80211_chanctx_user_iter_next(local, ctx, iter, \ + CHANCTX_ITER_ALL, \ + false)) + static int ieee80211_chanctx_num_assigned(struct ieee80211_local *local, struct ieee80211_chanctx *ctx) { - struct ieee80211_link_data *link; + struct ieee80211_chanctx_user_iter iter; int num = 0; - lockdep_assert_wiphy(local->hw.wiphy); - - list_for_each_entry(link, &ctx->assigned_links, assigned_chanctx_list) + for_each_chanctx_user_assigned(local, ctx, &iter) num++; return num; @@ -29,12 +145,10 @@ static int ieee80211_chanctx_num_assigned(struct ieee80211_local *local, static int ieee80211_chanctx_num_reserved(struct ieee80211_local *local, struct ieee80211_chanctx *ctx) { - struct ieee80211_link_data *link; + struct ieee80211_chanctx_user_iter iter; int num = 0; - lockdep_assert_wiphy(local->hw.wiphy); - - list_for_each_entry(link, &ctx->reserved_links, reserved_chanctx_list) + for_each_chanctx_user_reserved(local, ctx, &iter) num++; return num; @@ -43,8 +157,13 @@ static int ieee80211_chanctx_num_reserved(struct ieee80211_local *local, int ieee80211_chanctx_refcount(struct ieee80211_local *local, struct ieee80211_chanctx *ctx) { - return ieee80211_chanctx_num_assigned(local, ctx) + - ieee80211_chanctx_num_reserved(local, ctx); + struct ieee80211_chanctx_user_iter iter; + int num = 0; + + for_each_chanctx_user_all(local, ctx, &iter) + num++; + + return num; } static int ieee80211_num_chanctx(struct ieee80211_local *local, int radio_idx) @@ -143,15 +262,15 @@ ieee80211_chanctx_reserved_chanreq(struct ieee80211_local *local, const struct ieee80211_chan_req *req, struct ieee80211_chan_req *tmp) { - struct ieee80211_link_data *link; + struct ieee80211_chanctx_user_iter iter; lockdep_assert_wiphy(local->hw.wiphy); if (WARN_ON(!req)) return NULL; - list_for_each_entry(link, &ctx->reserved_links, reserved_chanctx_list) { - req = ieee80211_chanreq_compatible(&link->reserved, req, tmp); + for_each_chanctx_user_reserved(local, ctx, &iter) { + req = ieee80211_chanreq_compatible(iter.chanreq, req, tmp); if (!req) break; } @@ -165,18 +284,16 @@ ieee80211_chanctx_non_reserved_chandef(struct ieee80211_local *local, const struct ieee80211_chan_req *compat, struct ieee80211_chan_req *tmp) { - struct ieee80211_link_data *link; const struct ieee80211_chan_req *comp_def = compat; + struct ieee80211_chanctx_user_iter iter; lockdep_assert_wiphy(local->hw.wiphy); - list_for_each_entry(link, &ctx->assigned_links, assigned_chanctx_list) { - struct ieee80211_bss_conf *link_conf = link->conf; - - if (link->reserved_chanctx) + for_each_chanctx_user_assigned(local, ctx, &iter) { + if (iter.link->reserved_chanctx) continue; - comp_def = ieee80211_chanreq_compatible(&link_conf->chanreq, + comp_def = ieee80211_chanreq_compatible(iter.chanreq, comp_def, tmp); if (!comp_def) break; @@ -200,7 +317,7 @@ ieee80211_chanctx_can_reserve(struct ieee80211_local *local, if (!ieee80211_chanctx_non_reserved_chandef(local, ctx, req, &tmp)) return false; - if (!list_empty(&ctx->reserved_links) && + if (ieee80211_chanctx_num_reserved(local, ctx) != 0 && ieee80211_chanctx_reserved_chanreq(local, ctx, req, &tmp)) return true; @@ -389,10 +506,10 @@ ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local, * channel context. */ static u32 -_ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local, - struct ieee80211_chanctx *ctx, - struct ieee80211_link_data *rsvd_for, - bool check_reserved) +__ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local, + struct ieee80211_chanctx *ctx, + struct ieee80211_link_data *rsvd_for, + bool check_reserved) { enum nl80211_chan_width max_bw; struct cfg80211_chan_def min_def; @@ -497,13 +614,14 @@ static void ieee80211_chan_bw_change(struct ieee80211_local *local, * the max of min required widths of all the interfaces bound to this * channel context. */ -void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local, - struct ieee80211_chanctx *ctx, - struct ieee80211_link_data *rsvd_for, - bool check_reserved) +static void +_ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local, + struct ieee80211_chanctx *ctx, + struct ieee80211_link_data *rsvd_for, + bool check_reserved) { - u32 changed = _ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for, - check_reserved); + u32 changed = __ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for, + check_reserved); if (!changed) return; @@ -517,6 +635,12 @@ void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local, ieee80211_chan_bw_change(local, ctx, false, false); } +void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local, + struct ieee80211_chanctx *ctx) +{ + _ieee80211_recalc_chanctx_min_def(local, ctx, NULL, false); +} + static void _ieee80211_change_chanctx(struct ieee80211_local *local, struct ieee80211_chanctx *ctx, struct ieee80211_chanctx *old_ctx, @@ -551,7 +675,7 @@ static void _ieee80211_change_chanctx(struct ieee80211_local *local, ieee80211_chan_bw_change(local, old_ctx, false, true); if (ieee80211_chanreq_identical(&ctx_req, chanreq)) { - ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for, false); + _ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for, false); return; } @@ -572,7 +696,8 @@ static void _ieee80211_change_chanctx(struct ieee80211_local *local, ctx->conf.ap = chanreq->ap; /* check if min chanctx also changed */ - changed |= _ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for, false); + changed |= __ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for, + false); ieee80211_add_wbrf(local, &ctx->conf.def); @@ -633,8 +758,6 @@ ieee80211_find_chanctx(struct ieee80211_local *local, * context to actually be removed. */ link->reserved_chanctx = ctx; - list_add(&link->reserved_chanctx_list, - &ctx->reserved_links); ieee80211_change_chanctx(local, ctx, ctx, compat); @@ -675,17 +798,13 @@ static bool ieee80211_chanctx_radar_required(struct ieee80211_local *local, struct ieee80211_chanctx *ctx) { - struct ieee80211_chanctx_conf *conf = &ctx->conf; - struct ieee80211_link_data *link; + struct ieee80211_chanctx_user_iter iter; lockdep_assert_wiphy(local->hw.wiphy); - for_each_sdata_link(local, link) { - if (rcu_access_pointer(link->conf->chanctx_conf) != conf) - continue; - if (!link->radar_required) - continue; - return true; + for_each_chanctx_user_assigned(local, ctx, &iter) { + if (iter.radar_required) + return true; } return false; @@ -705,8 +824,6 @@ ieee80211_alloc_chanctx(struct ieee80211_local *local, if (!ctx) return NULL; - INIT_LIST_HEAD(&ctx->assigned_links); - INIT_LIST_HEAD(&ctx->reserved_links); ctx->conf.def = chanreq->oper; ctx->conf.ap = chanreq->ap; ctx->conf.rx_chains_static = 1; @@ -715,7 +832,7 @@ ieee80211_alloc_chanctx(struct ieee80211_local *local, ctx->conf.radar_enabled = false; ctx->conf.radio_idx = radio_idx; ctx->radar_detected = false; - _ieee80211_recalc_chanctx_min_def(local, ctx, NULL, false); + __ieee80211_recalc_chanctx_min_def(local, ctx, NULL, false); return ctx; } @@ -804,27 +921,17 @@ void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local, { struct ieee80211_chanctx_conf *conf = &ctx->conf; const struct ieee80211_chan_req *compat = NULL; - struct ieee80211_link_data *link; + struct ieee80211_chanctx_user_iter iter; struct ieee80211_chan_req tmp; struct sta_info *sta; lockdep_assert_wiphy(local->hw.wiphy); - for_each_sdata_link(local, link) { - struct ieee80211_bss_conf *link_conf; - - if (link->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) - continue; - - link_conf = link->conf; - - if (rcu_access_pointer(link_conf->chanctx_conf) != conf) - continue; - + for_each_chanctx_user_assigned(local, ctx, &iter) { if (!compat) - compat = &link_conf->chanreq; + compat = iter.chanreq; - compat = ieee80211_chanreq_compatible(&link_conf->chanreq, + compat = ieee80211_chanreq_compatible(iter.chanreq, compat, &tmp); if (WARN_ON_ONCE(!compat)) return; @@ -837,6 +944,7 @@ void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local, list_for_each_entry(sta, &local->sta_list, list) { struct ieee80211_sub_if_data *sdata = sta->sdata; struct ieee80211_chan_req tdls_chanreq = {}; + struct ieee80211_link_data *link; int tdls_link_id; if (!sta->uploaded || @@ -904,12 +1012,11 @@ static int ieee80211_assign_link_chanctx(struct ieee80211_link_data *link, drv_unassign_vif_chanctx(local, sdata, link->conf, curr_ctx); conf = NULL; - list_del(&link->assigned_chanctx_list); } if (new_ctx) { /* recalc considering the link we'll use it for now */ - ieee80211_recalc_chanctx_min_def(local, new_ctx, link, false); + _ieee80211_recalc_chanctx_min_def(local, new_ctx, link, false); ret = drv_assign_vif_chanctx(local, sdata, link->conf, new_ctx); if (assign_on_failure || !ret) { @@ -919,9 +1026,6 @@ static int ieee80211_assign_link_chanctx(struct ieee80211_link_data *link, /* succeeded, so commit it to the data structures */ conf = &new_ctx->conf; - if (!local->in_reconfig) - list_add(&link->assigned_chanctx_list, - &new_ctx->assigned_links); } } else { ret = 0; @@ -933,12 +1037,12 @@ static int ieee80211_assign_link_chanctx(struct ieee80211_link_data *link, ieee80211_recalc_chanctx_chantype(local, curr_ctx); ieee80211_recalc_smps_chanctx(local, curr_ctx); ieee80211_recalc_radar_chanctx(local, curr_ctx); - ieee80211_recalc_chanctx_min_def(local, curr_ctx, NULL, false); + ieee80211_recalc_chanctx_min_def(local, curr_ctx); } if (new_ctx && ieee80211_chanctx_num_assigned(local, new_ctx) > 0) { ieee80211_recalc_txpower(link, false); - ieee80211_recalc_chanctx_min_def(local, new_ctx, NULL, false); + ieee80211_recalc_chanctx_min_def(local, new_ctx); } if (conf) { @@ -971,21 +1075,21 @@ static int ieee80211_assign_link_chanctx(struct ieee80211_link_data *link, void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local, struct ieee80211_chanctx *chanctx) { + struct ieee80211_chanctx_user_iter iter; struct ieee80211_sub_if_data *sdata; u8 rx_chains_static, rx_chains_dynamic; - struct ieee80211_link_data *link; lockdep_assert_wiphy(local->hw.wiphy); rx_chains_static = 1; rx_chains_dynamic = 1; - for_each_sdata_link(local, link) { + for_each_chanctx_user_assigned(local, chanctx, &iter) { u8 needed_static, needed_dynamic; - switch (link->sdata->vif.type) { + switch (iter.iftype) { case NL80211_IFTYPE_STATION: - if (!link->sdata->u.mgd.associated) + if (!iter.sdata->u.mgd.associated) continue; break; case NL80211_IFTYPE_MONITOR: @@ -1001,26 +1105,23 @@ void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local, continue; } - if (rcu_access_pointer(link->conf->chanctx_conf) != &chanctx->conf) - continue; - - if (link->sdata->vif.type == NL80211_IFTYPE_MONITOR) { + if (iter.iftype == NL80211_IFTYPE_MONITOR) { rx_chains_dynamic = rx_chains_static = local->rx_chains; break; } - switch (link->smps_mode) { + switch (iter.link->smps_mode) { default: WARN_ONCE(1, "Invalid SMPS mode %d\n", - link->smps_mode); + iter.link->smps_mode); fallthrough; case IEEE80211_SMPS_OFF: - needed_static = link->needed_rx_chains; - needed_dynamic = link->needed_rx_chains; + needed_static = iter.link->needed_rx_chains; + needed_dynamic = iter.link->needed_rx_chains; break; case IEEE80211_SMPS_DYNAMIC: needed_static = 1; - needed_dynamic = link->needed_rx_chains; + needed_dynamic = iter.link->needed_rx_chains; break; case IEEE80211_SMPS_STATIC: needed_static = 1; @@ -1108,7 +1209,6 @@ void ieee80211_link_unreserve_chanctx(struct ieee80211_link_data *link) if (WARN_ON(!ctx)) return; - list_del(&link->reserved_chanctx_list); link->reserved_chanctx = NULL; if (ieee80211_chanctx_refcount(sdata->local, ctx) == 0) { @@ -1142,9 +1242,9 @@ ieee80211_replace_chanctx(struct ieee80211_local *local, struct wiphy *wiphy = local->hw.wiphy; const struct wiphy_radio *radio; - if (!curr_ctx || (curr_ctx->replace_state == - IEEE80211_CHANCTX_WILL_BE_REPLACED) || - !list_empty(&curr_ctx->reserved_links)) { + if (!curr_ctx || + curr_ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED || + ieee80211_chanctx_num_reserved(local, curr_ctx) != 0) { /* * Another link already requested this context for a * reservation. Find another one hoping all links assigned @@ -1167,7 +1267,7 @@ ieee80211_replace_chanctx(struct ieee80211_local *local, IEEE80211_CHANCTX_REPLACE_NONE) continue; - if (!list_empty(&ctx->reserved_links)) + if (ieee80211_chanctx_num_reserved(local, ctx) != 0) continue; if (ctx->conf.radio_idx >= 0) { @@ -1185,9 +1285,9 @@ ieee80211_replace_chanctx(struct ieee80211_local *local, * If that's true then all available contexts already have reservations * and cannot be used. */ - if (!curr_ctx || (curr_ctx->replace_state == - IEEE80211_CHANCTX_WILL_BE_REPLACED) || - !list_empty(&curr_ctx->reserved_links)) + if (!curr_ctx || + curr_ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED || + ieee80211_chanctx_num_reserved(local, curr_ctx) != 0) return ERR_PTR(-EBUSY); new_ctx = ieee80211_alloc_chanctx(local, chanreq, mode, -1); @@ -1267,7 +1367,6 @@ int ieee80211_link_reserve_chanctx(struct ieee80211_link_data *link, return PTR_ERR(new_ctx); } - list_add(&link->reserved_chanctx_list, &new_ctx->reserved_links); link->reserved_chanctx = new_ctx; link->reserved = *chanreq; link->reserved_radar_required = radar_required; @@ -1381,7 +1480,6 @@ ieee80211_link_use_reserved_reassign(struct ieee80211_link_data *link) vif_chsw[0].new_ctx = &new_ctx->conf; vif_chsw[0].link_conf = link->conf; - list_del(&link->reserved_chanctx_list); link->reserved_chanctx = NULL; err = drv_switch_vif_chanctx(local, vif_chsw, 1, @@ -1394,7 +1492,6 @@ ieee80211_link_use_reserved_reassign(struct ieee80211_link_data *link) } link->radar_required = link->reserved_radar_required; - list_move(&link->assigned_chanctx_list, &new_ctx->assigned_links); rcu_assign_pointer(link_conf->chanctx_conf, &new_ctx->conf); if (sdata->vif.type == NL80211_IFTYPE_AP) @@ -1405,7 +1502,7 @@ ieee80211_link_use_reserved_reassign(struct ieee80211_link_data *link) if (ieee80211_chanctx_refcount(local, old_ctx) == 0) ieee80211_free_chanctx(local, old_ctx, false); - ieee80211_recalc_chanctx_min_def(local, new_ctx, NULL, false); + ieee80211_recalc_chanctx_min_def(local, new_ctx); ieee80211_recalc_smps_chanctx(local, new_ctx); ieee80211_recalc_radar_chanctx(local, new_ctx); @@ -1451,7 +1548,6 @@ ieee80211_link_use_reserved_assign(struct ieee80211_link_data *link) ieee80211_change_chanctx(local, new_ctx, new_ctx, chanreq); - list_del(&link->reserved_chanctx_list); link->reserved_chanctx = NULL; err = ieee80211_assign_link_chanctx(link, new_ctx, false); @@ -1497,7 +1593,6 @@ static int ieee80211_chsw_switch_vifs(struct ieee80211_local *local, int n_vifs) { struct ieee80211_vif_chanctx_switch *vif_chsw; - struct ieee80211_link_data *link; struct ieee80211_chanctx *ctx, *old_ctx; int i, err; @@ -1509,6 +1604,8 @@ static int ieee80211_chsw_switch_vifs(struct ieee80211_local *local, i = 0; list_for_each_entry(ctx, &local->chanctx_list, list) { + struct ieee80211_chanctx_user_iter iter; + if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER) continue; @@ -1517,16 +1614,15 @@ static int ieee80211_chsw_switch_vifs(struct ieee80211_local *local, goto out; } - list_for_each_entry(link, &ctx->reserved_links, - reserved_chanctx_list) { - if (!ieee80211_link_has_in_place_reservation(link)) + for_each_chanctx_user_reserved(local, ctx, &iter) { + if (!ieee80211_link_has_in_place_reservation(iter.link)) continue; - old_ctx = ieee80211_link_get_chanctx(link); - vif_chsw[i].vif = &link->sdata->vif; + old_ctx = ieee80211_link_get_chanctx(iter.link); + vif_chsw[i].vif = &iter.sdata->vif; vif_chsw[i].old_ctx = &old_ctx->conf; vif_chsw[i].new_ctx = &ctx->conf; - vif_chsw[i].link_conf = link->conf; + vif_chsw[i].link_conf = iter.link->conf; i++; } @@ -1551,7 +1647,7 @@ static int ieee80211_chsw_switch_ctxs(struct ieee80211_local *local) if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER) continue; - if (!list_empty(&ctx->replace_ctx->assigned_links)) + if (ieee80211_chanctx_num_assigned(local, ctx) != 0) continue; ieee80211_del_chanctx(local, ctx->replace_ctx, false); @@ -1568,7 +1664,7 @@ err: if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER) continue; - if (!list_empty(&ctx->replace_ctx->assigned_links)) + if (ieee80211_chanctx_num_assigned(local, ctx) != 0) continue; ieee80211_del_chanctx(local, ctx, false); @@ -1603,7 +1699,7 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local) */ list_for_each_entry(ctx, &local->chanctx_list, list) { - struct ieee80211_link_data *link; + struct ieee80211_chanctx_user_iter iter; if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER) continue; @@ -1619,12 +1715,11 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local) n_reserved = 0; n_ready = 0; - list_for_each_entry(link, &ctx->replace_ctx->assigned_links, - assigned_chanctx_list) { + for_each_chanctx_user_assigned(local, ctx, &iter) { n_assigned++; - if (link->reserved_chanctx) { + if (iter.link->reserved_chanctx) { n_reserved++; - if (link->reserved_ready) + if (iter.link->reserved_ready) n_ready++; } } @@ -1641,13 +1736,12 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local) } ctx->conf.radar_enabled = false; - list_for_each_entry(link, &ctx->reserved_links, - reserved_chanctx_list) { - if (ieee80211_link_has_in_place_reservation(link) && - !link->reserved_ready) + for_each_chanctx_user_reserved(local, ctx, &iter) { + if (ieee80211_link_has_in_place_reservation(iter.link) && + !iter.link->reserved_ready) return -EAGAIN; - old_ctx = ieee80211_link_get_chanctx(link); + old_ctx = ieee80211_link_get_chanctx(iter.link); if (old_ctx) { if (old_ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED) @@ -1658,7 +1752,7 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local) n_vifs_ctxless++; } - if (link->reserved_radar_required) + if (iter.radar_required) ctx->conf.radar_enabled = true; } } @@ -1673,7 +1767,7 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local) /* update station rate control and min width before switch */ list_for_each_entry(ctx, &local->chanctx_list, list) { - struct ieee80211_link_data *link; + struct ieee80211_chanctx_user_iter iter; if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER) continue; @@ -1683,17 +1777,16 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local) goto err; } - list_for_each_entry(link, &ctx->reserved_links, - reserved_chanctx_list) { - if (!ieee80211_link_has_in_place_reservation(link)) + for_each_chanctx_user_reserved(local, ctx, &iter) { + if (!ieee80211_link_has_in_place_reservation(iter.link)) continue; ieee80211_chan_bw_change(local, - ieee80211_link_get_chanctx(link), + ieee80211_link_get_chanctx(iter.link), true, true); } - ieee80211_recalc_chanctx_min_def(local, ctx, NULL, true); + _ieee80211_recalc_chanctx_min_def(local, ctx, NULL, true); } /* @@ -1718,7 +1811,7 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local) * context(s). */ list_for_each_entry(ctx, &local->chanctx_list, list) { - struct ieee80211_link_data *link, *link_tmp; + struct ieee80211_chanctx_user_iter iter; if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER) continue; @@ -1728,9 +1821,9 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local) goto err; } - list_for_each_entry(link, &ctx->reserved_links, - reserved_chanctx_list) { - struct ieee80211_sub_if_data *sdata = link->sdata; + for_each_chanctx_user_reserved(local, ctx, &iter) { + struct ieee80211_link_data *link = iter.link; + struct ieee80211_sub_if_data *sdata = iter.sdata; struct ieee80211_bss_conf *link_conf = link->conf; u64 changed = 0; @@ -1746,9 +1839,9 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local) ieee80211_check_fast_xmit_iface(sdata); - link->radar_required = link->reserved_radar_required; + link->radar_required = iter.radar_required; - if (link_conf->chanreq.oper.width != link->reserved.oper.width) + if (link_conf->chanreq.oper.width != iter.chanreq->oper.width) changed = BSS_CHANGED_BANDWIDTH; ieee80211_link_update_chanreq(link, &link->reserved); @@ -1763,19 +1856,15 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local) ieee80211_recalc_chanctx_chantype(local, ctx); ieee80211_recalc_smps_chanctx(local, ctx); ieee80211_recalc_radar_chanctx(local, ctx); - ieee80211_recalc_chanctx_min_def(local, ctx, NULL, false); + ieee80211_recalc_chanctx_min_def(local, ctx); - list_for_each_entry_safe(link, link_tmp, &ctx->reserved_links, - reserved_chanctx_list) { - if (ieee80211_link_get_chanctx(link) != ctx) + for_each_chanctx_user_reserved(local, ctx, &iter) { + if (ieee80211_link_get_chanctx(iter.link) != ctx) continue; - list_del(&link->reserved_chanctx_list); - list_move(&link->assigned_chanctx_list, - &ctx->assigned_links); - link->reserved_chanctx = NULL; + iter.link->reserved_chanctx = NULL; - ieee80211_link_chanctx_reservation_complete(link); + ieee80211_link_chanctx_reservation_complete(iter.link); ieee80211_chan_bw_change(local, ctx, false, false); } @@ -1786,12 +1875,10 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local) * reservation for originally requested interface has already * succeeded at this point. */ - list_for_each_entry_safe(link, link_tmp, &ctx->reserved_links, - reserved_chanctx_list) { - if (WARN_ON(ieee80211_link_has_in_place_reservation(link))) - continue; + for_each_chanctx_user_reserved(local, ctx, &iter) { + struct ieee80211_link_data *link = iter.link; - if (WARN_ON(link->reserved_chanctx != ctx)) + if (WARN_ON(ieee80211_link_has_in_place_reservation(link))) continue; if (!link->reserved_ready) @@ -1834,15 +1921,14 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local) err: list_for_each_entry(ctx, &local->chanctx_list, list) { - struct ieee80211_link_data *link, *link_tmp; + struct ieee80211_chanctx_user_iter iter; if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER) continue; - list_for_each_entry_safe(link, link_tmp, &ctx->reserved_links, - reserved_chanctx_list) { - ieee80211_link_unreserve_chanctx(link); - ieee80211_link_chanctx_reservation_complete(link); + for_each_chanctx_user_reserved(local, ctx, &iter) { + ieee80211_link_unreserve_chanctx(iter.link); + ieee80211_link_chanctx_reservation_complete(iter.link); } } @@ -1949,7 +2035,6 @@ int _ieee80211_link_use_channel(struct ieee80211_link_data *link, /* remove reservation */ WARN_ON(link->reserved_chanctx != ctx); link->reserved_chanctx = NULL; - list_del(&link->reserved_chanctx_list); } if (ret) { @@ -2046,29 +2131,17 @@ ieee80211_chanctx_recheck(struct ieee80211_local *local, struct ieee80211_chan_req *tmp) { const struct ieee80211_chan_req *ret = req; - struct ieee80211_link_data *link; + struct ieee80211_chanctx_user_iter iter; lockdep_assert_wiphy(local->hw.wiphy); - for_each_sdata_link(local, link) { - if (link == skip_link) + for_each_chanctx_user_all(local, ctx, &iter) { + if (iter.link == skip_link) continue; - if (rcu_access_pointer(link->conf->chanctx_conf) == &ctx->conf) { - ret = ieee80211_chanreq_compatible(ret, - &link->conf->chanreq, - tmp); - if (!ret) - return NULL; - } - - if (link->reserved_chanctx == ctx) { - ret = ieee80211_chanreq_compatible(ret, - &link->reserved, - tmp); - if (!ret) - return NULL; - } + ret = ieee80211_chanreq_compatible(ret, iter.chanreq, tmp); + if (!ret) + return NULL; } *tmp = *ret; diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c index ba9fba165926..49753b73aba2 100644 --- a/net/mac80211/driver-ops.c +++ b/net/mac80211/driver-ops.c @@ -476,8 +476,12 @@ void drv_link_info_changed(struct ieee80211_local *local, if (WARN_ON_ONCE(sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE || sdata->vif.type == NL80211_IFTYPE_NAN || (sdata->vif.type == NL80211_IFTYPE_MONITOR && - !sdata->vif.bss_conf.mu_mimo_owner && - !(changed & BSS_CHANGED_TXPOWER)))) + changed & ~(BSS_CHANGED_TXPOWER | + BSS_CHANGED_MU_GROUPS)))) + return; + + if (WARN_ON_ONCE(changed & BSS_CHANGED_MU_GROUPS && + !sdata->vif.bss_conf.mu_mimo_owner)) return; if (!check_sdata_in_driver(sdata)) diff --git a/net/mac80211/he.c b/net/mac80211/he.c index 5792ef77e986..f7b05e59374c 100644 --- a/net/mac80211/he.c +++ b/net/mac80211/he.c @@ -3,7 +3,7 @@ * HE handling * * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2019 - 2024 Intel Corporation + * Copyright(c) 2019-2025 Intel Corporation */ #include "ieee80211_i.h" @@ -313,7 +313,7 @@ bool ieee80211_prepare_rx_omi_bw(struct ieee80211_link_sta *pub_link_sta, ieee80211_link_sta_rc_update_omi(link, link_sta); } else { link_sta->rx_omi_bw_rx = bw; - ieee80211_recalc_chanctx_min_def(local, chanctx, NULL, false); + ieee80211_recalc_chanctx_min_def(local, chanctx); } link_sta->rx_omi_bw_staging = bw; @@ -359,7 +359,7 @@ void ieee80211_finalize_rx_omi_bw(struct ieee80211_link_sta *pub_link_sta) /* channel context in finalize only when narrowing bandwidth */ WARN_ON(link_sta->rx_omi_bw_rx < link_sta->rx_omi_bw_staging); link_sta->rx_omi_bw_rx = link_sta->rx_omi_bw_staging; - ieee80211_recalc_chanctx_min_def(local, chanctx, NULL, false); + ieee80211_recalc_chanctx_min_def(local, chanctx); } trace_api_return_void(local); diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 6e36b09fe97f..168f84a1353b 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -9,7 +9,7 @@ * Copyright 2009, Johannes Berg <johannes@sipsolutions.net> * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright(c) 2016 Intel Deutschland GmbH - * Copyright(c) 2018-2024 Intel Corporation + * Copyright(c) 2018-2025 Intel Corporation */ #include <linux/delay.h> @@ -1554,6 +1554,7 @@ void ieee80211_rx_mgmt_probe_beacon(struct ieee80211_sub_if_data *sdata, { size_t baselen; struct ieee802_11_elems *elems; + u16 type; BUILD_BUG_ON(offsetof(typeof(mgmt->u.probe_resp), variable) != offsetof(typeof(mgmt->u.beacon), variable)); @@ -1566,8 +1567,9 @@ void ieee80211_rx_mgmt_probe_beacon(struct ieee80211_sub_if_data *sdata, if (baselen > len) return; + type = le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_TYPE; elems = ieee802_11_parse_elems(mgmt->u.probe_resp.variable, - len - baselen, false, NULL); + len - baselen, type, NULL); if (elems) { ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, elems); @@ -1616,9 +1618,11 @@ void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, if (ies_len < 0) break; - elems = ieee802_11_parse_elems( - mgmt->u.action.u.chan_switch.variable, - ies_len, true, NULL); + elems = ieee802_11_parse_elems(mgmt->u.action.u.chan_switch.variable, + ies_len, + IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION, + NULL); if (elems && !elems->parse_error) ieee80211_rx_mgmt_spectrum_mgmt(sdata, mgmt, diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 878c3b14aeb8..9d9313eee59f 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -916,9 +916,6 @@ struct ieee80211_chanctx { struct list_head list; struct rcu_head rcu_head; - struct list_head assigned_links; - struct list_head reserved_links; - enum ieee80211_chanctx_replace_state replace_state; struct ieee80211_chanctx *replace_ctx; @@ -1071,9 +1068,6 @@ struct ieee80211_link_data { struct ieee80211_sub_if_data *sdata; unsigned int link_id; - struct list_head assigned_chanctx_list; /* protected by wiphy mutex */ - struct list_head reserved_chanctx_list; /* protected by wiphy mutex */ - /* multicast keys only */ struct ieee80211_key __rcu *gtk[NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS + @@ -1239,9 +1233,12 @@ struct ieee80211_sub_if_data *vif_to_sdata(struct ieee80211_vif *p) for (struct ieee80211_sub_if_data *___sdata = NULL; \ !___sdata; \ ___sdata = (void *)~0 /* always stop */) \ + for (int ___link_id = ARRAY_SIZE(___sdata->link); \ + ___link_id; ___link_id = 0 /* always stop */) \ list_for_each_entry(___sdata, &(_local)->interfaces, list) \ - if (ieee80211_sdata_running(___sdata)) \ - for (int ___link_id = 0; \ + if (___link_id == ARRAY_SIZE(___sdata->link) && \ + ieee80211_sdata_running(___sdata)) \ + for (___link_id = 0; \ ___link_id < ARRAY_SIZE(___sdata->link); \ ___link_id++) \ if ((_link = wiphy_dereference((_local)->hw.wiphy, \ @@ -1255,9 +1252,12 @@ struct ieee80211_sub_if_data *vif_to_sdata(struct ieee80211_vif *p) for (struct ieee80211_sub_if_data *___sdata = NULL; \ !___sdata; \ ___sdata = (void *)~0 /* always stop */) \ - list_for_each_entry_rcu(___sdata, &(_local)->interfaces, list) \ - if (ieee80211_sdata_running(___sdata)) \ - for (int ___link_id = 0; \ + for (int ___link_id = ARRAY_SIZE(___sdata->link); \ + ___link_id; ___link_id = 0 /* always stop */) \ + list_for_each_entry(___sdata, &(_local)->interfaces, list) \ + if (___link_id == ARRAY_SIZE(___sdata->link) && \ + ieee80211_sdata_running(___sdata)) \ + for (___link_id = 0; \ ___link_id < ARRAY_SIZE((___sdata)->link); \ ___link_id++) \ if ((_link = rcu_dereference((___sdata)->link[___link_id]))) @@ -2107,7 +2107,8 @@ void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata, const int offset); int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up); void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata); -int ieee80211_add_virtual_monitor(struct ieee80211_local *local); +int ieee80211_add_virtual_monitor(struct ieee80211_local *local, + struct ieee80211_sub_if_data *creator_sdata); void ieee80211_del_virtual_monitor(struct ieee80211_local *local); bool __ieee80211_recalc_txpower(struct ieee80211_link_data *link); @@ -2422,7 +2423,8 @@ static inline void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, * @mode: connection mode for parsing * @start: pointer to the elements * @len: length of the elements - * @action: %true if the elements came from an action frame + * @type: type of the frame the elements came from + * (action, probe response, beacon, etc.) * @filter: bitmap of element IDs to filter out while calculating * the element CRC * @crc: CRC starting value @@ -2440,7 +2442,7 @@ struct ieee80211_elems_parse_params { enum ieee80211_conn_mode mode; const u8 *start; size_t len; - bool action; + u8 type; u64 filter; u32 crc; struct cfg80211_bss *bss; @@ -2452,17 +2454,14 @@ struct ieee802_11_elems * ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params); static inline struct ieee802_11_elems * -ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action, - u64 filter, u32 crc, - struct cfg80211_bss *bss) +ieee802_11_parse_elems(const u8 *start, size_t len, u8 type, + struct cfg80211_bss *bss) { struct ieee80211_elems_parse_params params = { .mode = IEEE80211_CONN_MODE_HIGHEST, .start = start, .len = len, - .action = action, - .filter = filter, - .crc = crc, + .type = type, .bss = bss, .link_id = -1, }; @@ -2470,13 +2469,6 @@ ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action, return ieee802_11_parse_elems_full(¶ms); } -static inline struct ieee802_11_elems * -ieee802_11_parse_elems(const u8 *start, size_t len, bool action, - struct cfg80211_bss *bss) -{ - return ieee802_11_parse_elems_crc(start, len, action, 0, 0, bss); -} - extern const int ieee802_1d_to_ac[8]; static inline int ieee80211_ac_from_tid(int tid) @@ -2768,9 +2760,7 @@ int ieee80211_chanctx_refcount(struct ieee80211_local *local, void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local, struct ieee80211_chanctx *chanctx); void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local, - struct ieee80211_chanctx *ctx, - struct ieee80211_link_data *rsvd_for, - bool check_reserved); + struct ieee80211_chanctx *ctx); bool ieee80211_is_radar_required(struct ieee80211_local *local, struct cfg80211_scan_request *req); bool ieee80211_is_radio_idx_in_scan_req(struct wiphy *wiphy, diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 0ca55b9655a7..4f04d95c19d4 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -741,8 +741,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do ieee80211_configure_filter(local); ieee80211_hw_config(local, -1, hw_reconf_flags); + /* Passing NULL means an interface is picked for configuration */ if (local->virt_monitors == local->open_count) - ieee80211_add_virtual_monitor(local); + ieee80211_add_virtual_monitor(local, NULL); } void ieee80211_stop_mbssid(struct ieee80211_sub_if_data *sdata) @@ -1176,7 +1177,8 @@ static void ieee80211_sdata_init(struct ieee80211_local *local, ieee80211_link_init(sdata, -1, &sdata->deflink, &sdata->vif.bss_conf); } -int ieee80211_add_virtual_monitor(struct ieee80211_local *local) +int ieee80211_add_virtual_monitor(struct ieee80211_local *local, + struct ieee80211_sub_if_data *creator_sdata) { struct ieee80211_sub_if_data *sdata; int ret; @@ -1184,10 +1186,14 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local) ASSERT_RTNL(); lockdep_assert_wiphy(local->hw.wiphy); - if (local->monitor_sdata || - ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR)) + if (ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR)) return 0; + /* Already have a monitor set up, configure it */ + sdata = wiphy_dereference(local->hw.wiphy, local->monitor_sdata); + if (sdata) + goto configure_monitor; + sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size, GFP_KERNEL); if (!sdata) return -ENOMEM; @@ -1240,6 +1246,32 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local) skb_queue_head_init(&sdata->status_queue); wiphy_work_init(&sdata->work, ieee80211_iface_work); +configure_monitor: + /* Copy in the MU-MIMO configuration if set */ + if (!creator_sdata) { + struct ieee80211_sub_if_data *other; + + list_for_each_entry(other, &local->mon_list, list) { + if (!other->vif.bss_conf.mu_mimo_owner) + continue; + + creator_sdata = other; + break; + } + } + + if (creator_sdata && creator_sdata->vif.bss_conf.mu_mimo_owner) { + sdata->vif.bss_conf.mu_mimo_owner = true; + memcpy(&sdata->vif.bss_conf.mu_group, + &creator_sdata->vif.bss_conf.mu_group, + sizeof(sdata->vif.bss_conf.mu_group)); + memcpy(&sdata->u.mntr.mu_follow_addr, + creator_sdata->u.mntr.mu_follow_addr, ETH_ALEN); + + ieee80211_link_info_change_notify(sdata, &sdata->deflink, + BSS_CHANGED_MU_GROUPS); + } + return 0; } @@ -1396,11 +1428,13 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up) if (res) goto err_stop; } else { - if (local->virt_monitors == 0 && local->open_count == 0) { - res = ieee80211_add_virtual_monitor(local); + /* add/configure if there is no non-monitor interface */ + if (local->virt_monitors == local->open_count) { + res = ieee80211_add_virtual_monitor(local, sdata); if (res) goto err_stop; } + local->virt_monitors++; /* must be before the call to ieee80211_configure_filter */ diff --git a/net/mac80211/link.c b/net/mac80211/link.c index 4a19b765ccb6..1e05845872af 100644 --- a/net/mac80211/link.c +++ b/net/mac80211/link.c @@ -23,9 +23,6 @@ static void ieee80211_update_apvlan_links(struct ieee80211_sub_if_data *sdata) list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) { int link_id; - if (!vlan) - continue; - /* No support for 4addr with MLO yet */ if (vlan->wdev.use_4addr) return; @@ -119,8 +116,6 @@ void ieee80211_link_init(struct ieee80211_sub_if_data *sdata, ieee80211_color_change_finalize_work); wiphy_delayed_work_init(&link->color_collision_detect_work, ieee80211_color_collision_detection_work); - INIT_LIST_HEAD(&link->assigned_chanctx_list); - INIT_LIST_HEAD(&link->reserved_chanctx_list); wiphy_delayed_work_init(&link->dfs_cac_timer_work, ieee80211_dfs_cac_timer_work); diff --git a/net/mac80211/main.c b/net/mac80211/main.c index eefa6f7e899b..b05e313c7f17 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -356,8 +356,7 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, if (WARN_ON_ONCE(sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE || sdata->vif.type == NL80211_IFTYPE_NAN || (sdata->vif.type == NL80211_IFTYPE_MONITOR && - !sdata->vif.bss_conf.mu_mimo_owner && - !(changed & BSS_CHANGED_TXPOWER)))) + changed & ~BSS_CHANGED_TXPOWER))) return; if (!check_sdata_in_driver(sdata)) diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index f37068a533f4..68901f1def0d 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2008, 2009 open80211s Ltd. - * Copyright (C) 2018 - 2024 Intel Corporation + * Copyright (C) 2018 - 2025 Intel Corporation * Authors: Luis Carlos Cobo <luisca@cozybit.com> * Javier Cardona <javier@cozybit.com> */ @@ -1410,7 +1410,10 @@ ieee80211_mesh_rx_probe_req(struct ieee80211_sub_if_data *sdata, if (baselen > len) return; - elems = ieee802_11_parse_elems(pos, len - baselen, false, NULL); + elems = ieee802_11_parse_elems(pos, len - baselen, + IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_PROBE_REQ, + NULL); if (!elems) return; @@ -1455,11 +1458,11 @@ free: } static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, - u16 stype, struct ieee80211_mgmt *mgmt, size_t len, struct ieee80211_rx_status *rx_status) { + u16 type = le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_TYPE; struct ieee80211_local *local = sdata->local; struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct ieee802_11_elems *elems; @@ -1469,7 +1472,7 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, enum nl80211_band band = rx_status->band; /* ignore ProbeResp to foreign address */ - if (stype == IEEE80211_STYPE_PROBE_RESP && + if (type == (IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_RESP) && !ether_addr_equal(mgmt->da, sdata->vif.addr)) return; @@ -1478,8 +1481,7 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, return; elems = ieee802_11_parse_elems(mgmt->u.probe_resp.variable, - len - baselen, - false, NULL); + len - baselen, type, NULL); if (!elems) return; @@ -1514,7 +1516,9 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, } if (ifmsh->sync_ops) - ifmsh->sync_ops->rx_bcn_presp(sdata, stype, mgmt, len, + ifmsh->sync_ops->rx_bcn_presp(sdata, + type & IEEE80211_FCTL_STYPE, + mgmt, len, elems->mesh_config, rx_status); free: kfree(elems); @@ -1622,7 +1626,10 @@ static void mesh_rx_csa_frame(struct ieee80211_sub_if_data *sdata, pos = mgmt->u.action.u.chan_switch.variable; baselen = offsetof(struct ieee80211_mgmt, u.action.u.chan_switch.variable); - elems = ieee802_11_parse_elems(pos, len - baselen, true, NULL); + elems = ieee802_11_parse_elems(pos, len - baselen, + IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION, + NULL); if (!elems) return; @@ -1699,8 +1706,7 @@ void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, switch (stype) { case IEEE80211_STYPE_PROBE_RESP: case IEEE80211_STYPE_BEACON: - ieee80211_mesh_rx_bcn_presp(sdata, stype, mgmt, skb->len, - rx_status); + ieee80211_mesh_rx_bcn_presp(sdata, mgmt, skb->len, rx_status); break; case IEEE80211_STYPE_PROBE_REQ: ieee80211_mesh_rx_probe_req(sdata, mgmt, skb->len); diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index 9101858525dd..a41b57bd11ff 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2008, 2009 open80211s Ltd. - * Copyright (C) 2019, 2021-2023 Intel Corporation + * Copyright (C) 2019, 2021-2023, 2025 Intel Corporation * Author: Luis Carlos Cobo <luisca@cozybit.com> */ @@ -951,7 +951,10 @@ void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata, baselen = (u8 *) mgmt->u.action.u.mesh_action.variable - (u8 *) mgmt; elems = ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable, - len - baselen, false, NULL); + len - baselen, + IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION, + NULL); if (!elems) return; diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index cb45a5d2009d..04c931cd2063 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2008, 2009 open80211s Ltd. - * Copyright (C) 2019, 2021-2024 Intel Corporation + * Copyright (C) 2019, 2021-2025 Intel Corporation * Author: Luis Carlos Cobo <luisca@cozybit.com> */ #include <linux/gfp.h> @@ -1248,7 +1248,10 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, if (baselen > len) return; } - elems = ieee802_11_parse_elems(baseaddr, len - baselen, true, NULL); + elems = ieee802_11_parse_elems(baseaddr, len - baselen, + IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION, + NULL); if (elems) { mesh_process_plink_frame(sdata, mgmt, elems, rx_status); kfree(elems); diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index f3138d158535..c705d3f45aff 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -276,11 +276,8 @@ ieee80211_determine_ap_chan(struct ieee80211_sub_if_data *sdata, return IEEE80211_CONN_MODE_VHT; } } else if (!vht_oper || !elems->vht_cap_elem) { - if (sband->band == NL80211_BAND_5GHZ) { - sdata_info(sdata, - "VHT information is missing, disabling VHT\n"); + if (sband->band == NL80211_BAND_5GHZ) return IEEE80211_CONN_MODE_HT; - } no_vht = true; } else if (sband->band == NL80211_BAND_2GHZ) { no_vht = true; @@ -1002,6 +999,9 @@ ieee80211_determine_chan_mode(struct ieee80211_sub_if_data *sdata, .from_ap = true, .start = ies->data, .len = ies->len, + .type = ies->from_beacon ? + IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON : + IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_RESP, }; struct ieee802_11_elems *elems; struct ieee80211_supported_band *sband; @@ -2508,6 +2508,16 @@ static void ieee80211_csa_switch_work(struct wiphy *wiphy, link->u.mgd.csa.waiting_bcn = true; + /* + * The next beacon really should always be different, so this should + * have no effect whatsoever. However, some APs (we observed this in + * an Asus AXE11000), the beacon after the CSA might be identical to + * the last beacon on the old channel - in this case we'd ignore it. + * Resetting the CRC will lead us to handle it better (albeit with a + * disconnect, but clearly the AP is broken.) + */ + link->u.mgd.beacon_crc_valid = false; + /* apply new TPE restrictions immediately on the new channel */ if (link->u.mgd.csa.ap_chandef.chan->band == NL80211_BAND_6GHZ && link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HE) { @@ -5170,7 +5180,9 @@ static void ieee80211_epcs_teardown(struct ieee80211_sub_if_data *sdata) continue; } - elems = ieee802_11_parse_elems(ies->data, ies->len, false, + elems = ieee802_11_parse_elems(ies->data, ies->len, + IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_BEACON, NULL); if (!elems) { rcu_read_unlock(); @@ -5216,6 +5228,7 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link, .len = elem_len, .link_id = link_id == assoc_data->assoc_link_id ? -1 : link_id, .from_ap = true, + .type = le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_TYPE, }; bool is_5ghz = cbss->channel->band == NL80211_BAND_5GHZ; bool is_6ghz = cbss->channel->band == NL80211_BAND_6GHZ; @@ -6021,24 +6034,6 @@ ieee80211_determine_our_sta_mode_assoc(struct ieee80211_sub_if_data *sdata, conn->bw_limit, tmp.bw_limit); } -static enum ieee80211_ap_reg_power -ieee80211_ap_power_type(u8 control) -{ - switch (u8_get_bits(control, IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO)) { - case IEEE80211_6GHZ_CTRL_REG_LPI_AP: - case IEEE80211_6GHZ_CTRL_REG_INDOOR_LPI_AP: - return IEEE80211_REG_LPI_AP; - case IEEE80211_6GHZ_CTRL_REG_SP_AP: - case IEEE80211_6GHZ_CTRL_REG_INDOOR_SP_AP: - case IEEE80211_6GHZ_CTRL_REG_INDOOR_SP_AP_OLD: - return IEEE80211_REG_SP_AP; - case IEEE80211_6GHZ_CTRL_REG_VLP_AP: - return IEEE80211_REG_VLP_AP; - default: - return IEEE80211_REG_UNSET_AP; - } -} - static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, struct ieee80211_link_data *link, int link_id, @@ -6081,7 +6076,8 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, he_6ghz_oper = ieee80211_he_6ghz_oper(elems->he_operation); if (he_6ghz_oper) link->conf->power_type = - ieee80211_ap_power_type(he_6ghz_oper->control); + cfg80211_6ghz_power_type(he_6ghz_oper->control, + cbss->channel->flags); else link_info(link, "HE 6 GHz operation missing (on %d MHz), expect issues\n", @@ -6349,6 +6345,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, .bss = NULL, .link_id = -1, .from_ap = true, + .type = le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_TYPE, }; struct ieee802_11_elems *elems; int ac; @@ -6610,8 +6607,8 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_link_data *link, * Response frame shall be set to the broadcast address [..]" * So, on 6GHz band we should also accept broadcast responses. */ - channel = ieee80211_get_channel(sdata->local->hw.wiphy, - rx_status->freq); + channel = ieee80211_get_channel_khz(sdata->local->hw.wiphy, + ieee80211_rx_status_to_khz(rx_status)); if (!channel) return; @@ -7257,7 +7254,9 @@ ieee80211_mgd_check_cross_link_csa(struct ieee80211_sub_if_data *sdata, (prof->sta_info_len - 1), len - (prof->sta_info_len - 1), - false, NULL); + IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_BEACON, + NULL); /* memory allocation failed - let's hope that's transient */ if (!prof_elems) @@ -7361,6 +7360,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link, .mode = link->u.mgd.conn.mode, .link_id = -1, .from_ap = true, + .type = le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_TYPE, }; lockdep_assert_wiphy(local->hw.wiphy); @@ -7963,7 +7963,10 @@ void ieee80211_process_neg_ttlm_req(struct ieee80211_sub_if_data *sdata, ies_len = len - offsetof(struct ieee80211_mgmt, u.action.u.ttlm_req.variable); elems = ieee802_11_parse_elems(mgmt->u.action.u.ttlm_req.variable, - ies_len, true, NULL); + ies_len, + IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION, + NULL); if (!elems) { ttlm_res = NEG_TTLM_RES_REJECT; goto out; @@ -8169,9 +8172,11 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, break; /* CSA IE cannot be overridden, no need for BSSID */ - elems = ieee802_11_parse_elems( - mgmt->u.action.u.chan_switch.variable, - ies_len, true, NULL); + elems = ieee802_11_parse_elems(mgmt->u.action.u.chan_switch.variable, + ies_len, + IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION, + NULL); if (elems && !elems->parse_error) { enum ieee80211_csa_source src = @@ -8198,9 +8203,11 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, * extended CSA IE can't be overridden, no need for * BSSID */ - elems = ieee802_11_parse_elems( - mgmt->u.action.u.ext_chan_switch.variable, - ies_len, true, NULL); + elems = ieee802_11_parse_elems(mgmt->u.action.u.ext_chan_switch.variable, + ies_len, + IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION, + NULL); if (elems && !elems->parse_error) { enum ieee80211_csa_source src; @@ -10978,7 +10985,10 @@ static void ieee80211_ml_epcs(struct ieee80211_sub_if_data *sdata, pos = scratch + sizeof(control); len -= sizeof(control); - link_elems = ieee802_11_parse_elems(pos, len, false, NULL); + link_elems = ieee802_11_parse_elems(pos, len, + IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION, + NULL); if (!link_elems) continue; @@ -11029,7 +11039,10 @@ void ieee80211_process_epcs_ena_resp(struct ieee80211_sub_if_data *sdata, u.action.u.epcs.variable) - IEEE80211_EPCS_ENA_RESP_BODY_LEN; - elems = ieee802_11_parse_elems(pos, ies_len, true, NULL); + elems = ieee802_11_parse_elems(pos, ies_len, + IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION, + NULL); if (!elems) return; diff --git a/net/mac80211/parse.c b/net/mac80211/parse.c index c5e0f7f46004..bfc4ecb7a048 100644 --- a/net/mac80211/parse.c +++ b/net/mac80211/parse.c @@ -6,7 +6,7 @@ * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2024 Intel Corporation + * Copyright (C) 2018-2025 Intel Corporation * * element parsing for mac80211 */ @@ -286,6 +286,24 @@ _ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params, bitmap_zero(seen_elems, 256); + switch (params->type) { + /* we don't need to parse assoc request, luckily (it's value 0) */ + case IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ASSOC_REQ: + case IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_REASSOC_REQ: + default: + WARN(1, "invalid frame type 0x%x for element parsing\n", + params->type); + break; + case IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ASSOC_RESP: + case IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_REASSOC_RESP: + case IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ: + case IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_RESP: + case IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON: + case IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION: + case IEEE80211_FTYPE_EXT | IEEE80211_STYPE_S1G_BEACON: + break; + } + for_each_element(elem, params->start, params->len) { const struct element *subelem; u8 elem_parse_failed; @@ -566,7 +584,8 @@ _ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params, if (params->mode < IEEE80211_CONN_MODE_VHT) break; - if (!params->action) { + if (params->type != (IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION)) { elem_parse_failed = IEEE80211_PARSE_ERR_UNEXPECTED_ELEM; break; @@ -582,7 +601,8 @@ _ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params, case WLAN_EID_CHANNEL_SWITCH_WRAPPER: if (params->mode < IEEE80211_CONN_MODE_VHT) break; - if (params->action) { + if (params->type == (IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION)) { elem_parse_failed = IEEE80211_PARSE_ERR_UNEXPECTED_ELEM; break; @@ -942,7 +962,7 @@ ieee80211_prep_mle_link_parse(struct ieee80211_elems_parse *elems_parse, sub->len = end - sub->start; sub->mode = params->mode; - sub->action = params->action; + sub->type = params->type; sub->from_ap = params->from_ap; sub->link_id = -1; @@ -1041,7 +1061,7 @@ ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params) sub.start = elems_parse->scratch_pos; sub.mode = params->mode; sub.len = nontx_len; - sub.action = params->action; + sub.type = params->type; sub.link_id = params->link_id; /* consume the space used for non-transmitted profile */ diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 5b4c3fe9970a..cc26c279984d 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -59,7 +59,8 @@ static struct sk_buff *ieee80211_clean_skb(struct sk_buff *skb, status->flag &= ~(RX_FLAG_RADIOTAP_TLV_AT_END | RX_FLAG_RADIOTAP_LSIG | RX_FLAG_RADIOTAP_HE_MU | - RX_FLAG_RADIOTAP_HE); + RX_FLAG_RADIOTAP_HE | + RX_FLAG_RADIOTAP_VHT); hdr = (void *)skb->data; fc = hdr->frame_control; @@ -151,8 +152,10 @@ ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local, } if (status->encoding == RX_ENC_VHT) { + /* Included even if RX_FLAG_RADIOTAP_VHT is not set */ len = ALIGN(len, 2); len += 12; + BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_vht) != 12); } if (local->hw.radiotap_timestamp.units_pos >= 0) { @@ -195,6 +198,9 @@ ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local, * The position to look at depends on the existence (or non- * existence) of other elements, so take that into account... */ + if (status->flag & RX_FLAG_RADIOTAP_VHT) + tlv_offset += + sizeof(struct ieee80211_radiotap_vht); if (status->flag & RX_FLAG_RADIOTAP_HE) tlv_offset += sizeof(struct ieee80211_radiotap_he); @@ -319,10 +325,17 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, u32 tlvs_len = 0; int mpdulen, chain; unsigned long chains = status->chains; + struct ieee80211_radiotap_vht vht = {}; struct ieee80211_radiotap_he he = {}; struct ieee80211_radiotap_he_mu he_mu = {}; struct ieee80211_radiotap_lsig lsig = {}; + if (status->flag & RX_FLAG_RADIOTAP_VHT) { + vht = *(struct ieee80211_radiotap_vht *)skb->data; + skb_pull(skb, sizeof(vht)); + WARN_ON_ONCE(status->encoding != RX_ENC_VHT); + } + if (status->flag & RX_FLAG_RADIOTAP_HE) { he = *(struct ieee80211_radiotap_he *)skb->data; skb_pull(skb, sizeof(he)); @@ -530,45 +543,61 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, } if (status->encoding == RX_ENC_VHT) { - u16 known = local->hw.radiotap_vht_details; + u16 fill = local->hw.radiotap_vht_details; - rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_VHT)); - put_unaligned_le16(known, pos); - pos += 2; - /* flags */ - if (status->enc_flags & RX_ENC_FLAG_SHORT_GI) - *pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI; + /* Leave driver filled fields alone */ + fill &= ~le16_to_cpu(vht.known); + vht.known |= cpu_to_le16(fill); + + if (fill & IEEE80211_RADIOTAP_VHT_KNOWN_GI && + status->enc_flags & RX_ENC_FLAG_SHORT_GI) + vht.flags |= IEEE80211_RADIOTAP_VHT_FLAG_SGI; /* in VHT, STBC is binary */ - if (status->enc_flags & RX_ENC_FLAG_STBC_MASK) - *pos |= IEEE80211_RADIOTAP_VHT_FLAG_STBC; - if (status->enc_flags & RX_ENC_FLAG_BF) + if (fill & IEEE80211_RADIOTAP_VHT_KNOWN_STBC && + status->enc_flags & RX_ENC_FLAG_STBC_MASK) + vht.flags |= IEEE80211_RADIOTAP_VHT_FLAG_STBC; + if (fill & IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED && + status->enc_flags & RX_ENC_FLAG_BF) *pos |= IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED; - pos++; - /* bandwidth */ - switch (status->bw) { - case RATE_INFO_BW_80: - *pos++ = 4; - break; - case RATE_INFO_BW_160: - *pos++ = 11; - break; - case RATE_INFO_BW_40: - *pos++ = 1; - break; - default: - *pos++ = 0; + + if (fill & IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH) { + switch (status->bw) { + case RATE_INFO_BW_40: + vht.bandwidth = IEEE80211_RADIOTAP_VHT_BW_40; + break; + case RATE_INFO_BW_80: + vht.bandwidth = IEEE80211_RADIOTAP_VHT_BW_80; + break; + case RATE_INFO_BW_160: + vht.bandwidth = IEEE80211_RADIOTAP_VHT_BW_160; + break; + default: + vht.bandwidth = IEEE80211_RADIOTAP_VHT_BW_20; + break; + } } - /* MCS/NSS */ - *pos = (status->rate_idx << 4) | status->nss; - pos += 4; - /* coding field */ - if (status->enc_flags & RX_ENC_FLAG_LDPC) - *pos |= IEEE80211_RADIOTAP_CODING_LDPC_USER0; - pos++; - /* group ID */ - pos++; - /* partial_aid */ - pos += 2; + + /* + * If the driver filled in mcs_nss[0], then do not touch it. + * + * Otherwise, put some information about MCS/NSS into the + * user 0 field. Note that this is not technically correct for + * an MU frame as we might have decoded a different user. + */ + if (!vht.mcs_nss[0]) { + vht.mcs_nss[0] = (status->rate_idx << 4) | status->nss; + + /* coding field */ + if (status->enc_flags & RX_ENC_FLAG_LDPC) + vht.coding |= IEEE80211_RADIOTAP_CODING_LDPC_USER0; + } + + /* ensure 2 byte alignment */ + while ((pos - (u8 *)rthdr) & 1) + pos++; + rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_VHT)); + memcpy(pos, &vht, sizeof(vht)); + pos += sizeof(vht); } if (local->hw.radiotap_timestamp.units_pos >= 0) { @@ -763,6 +792,51 @@ ieee80211_make_monitor_skb(struct ieee80211_local *local, return skb; } +static bool +ieee80211_validate_monitor_radio(struct ieee80211_sub_if_data *sdata, + struct ieee80211_local *local, + struct ieee80211_rx_status *status) +{ + struct wiphy *wiphy = local->hw.wiphy; + int i, freq, bw; + + if (!wiphy->n_radio) + return true; + + switch (status->bw) { + case RATE_INFO_BW_20: + bw = 20000; + break; + case RATE_INFO_BW_40: + bw = 40000; + break; + case RATE_INFO_BW_80: + bw = 80000; + break; + case RATE_INFO_BW_160: + bw = 160000; + break; + case RATE_INFO_BW_320: + bw = 320000; + break; + default: + return false; + } + + freq = MHZ_TO_KHZ(status->freq); + + for (i = 0; i < wiphy->n_radio; i++) { + if (!(sdata->wdev.radio_mask & BIT(i))) + continue; + + if (!ieee80211_radio_freq_range_valid(&wiphy->radio[i], freq, bw)) + continue; + + return true; + } + return false; +} + /* * This function copies a received frame to all monitor interfaces and * returns a cleaned-up SKB that no longer includes the FCS nor the @@ -789,6 +863,9 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, return NULL; } + if (status->flag & RX_FLAG_RADIOTAP_VHT) + rtap_space += sizeof(struct ieee80211_radiotap_vht); + if (status->flag & RX_FLAG_RADIOTAP_HE) rtap_space += sizeof(struct ieee80211_radiotap_he); @@ -855,6 +932,10 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, chandef->chan->center_freq != status->freq) continue; + if (ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR) && + !ieee80211_validate_monitor_radio(sdata, local, status)) + continue; + if (!prev_sdata) { prev_sdata = sdata; continue; @@ -3521,8 +3602,11 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) switch (mgmt->u.action.category) { case WLAN_CATEGORY_HT: - /* reject HT action frames from stations not supporting HT */ - if (!rx->link_sta->pub->ht_cap.ht_supported) + /* reject HT action frames from stations not supporting HT + * or not HE Capable + */ + if (!rx->link_sta->pub->ht_cap.ht_supported && + !rx->link_sta->pub->he_cap.has_he) goto invalid; if (sdata->vif.type != NL80211_IFTYPE_STATION && @@ -4903,6 +4987,11 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx, /* after this point, don't punt to the slowpath! */ + if (fast_rx->uses_rss) + stats = this_cpu_ptr(rx->link_sta->pcpu_rx_stats); + else + stats = &rx->link_sta->rx_stats; + if (rx->key && !(status->flag & RX_FLAG_MIC_STRIPPED) && pskb_trim(skb, skb->len - fast_rx->icv_len)) goto drop; @@ -4937,6 +5026,8 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx, res = ieee80211_rx_mesh_data(rx->sdata, rx->sta, rx->skb); switch (res) { case RX_QUEUED: + stats->last_rx = jiffies; + stats->last_rate = sta_stats_encode_rate(status); return true; case RX_CONTINUE: break; @@ -4950,11 +5041,6 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx, drop: dev_kfree_skb(skb); - if (fast_rx->uses_rss) - stats = this_cpu_ptr(rx->link_sta->pcpu_rx_stats); - else - stats = &rx->link_sta->rx_stats; - stats->dropped++; return true; } diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index bb9563f50e7b..5ef315ed3b0f 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -76,7 +76,11 @@ void ieee80211_inform_bss(struct wiphy *wiphy, if (!update_data) return; - elems = ieee802_11_parse_elems(ies->data, ies->len, false, NULL); + elems = ieee802_11_parse_elems(ies->data, ies->len, + update_data->beacon ? + IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON : + IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_RESP, + NULL); if (!elems) return; diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c index ba5fbacbeeda..dbbfe2d6842f 100644 --- a/net/mac80211/tdls.c +++ b/net/mac80211/tdls.c @@ -6,7 +6,7 @@ * Copyright 2014, Intel Corporation * Copyright 2014 Intel Mobile Communications GmbH * Copyright 2015 - 2016 Intel Deutschland GmbH - * Copyright (C) 2019, 2021-2024 Intel Corporation + * Copyright (C) 2019, 2021-2025 Intel Corporation */ #include <linux/ieee80211.h> @@ -1783,7 +1783,10 @@ ieee80211_process_tdls_channel_switch_resp(struct ieee80211_sub_if_data *sdata, } elems = ieee802_11_parse_elems(tf->u.chan_switch_resp.variable, - skb->len - baselen, false, NULL); + skb->len - baselen, + IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION, + NULL); if (!elems) { ret = -ENOMEM; goto out; @@ -1902,7 +1905,10 @@ ieee80211_process_tdls_channel_switch_req(struct ieee80211_sub_if_data *sdata, } elems = ieee802_11_parse_elems(tf->u.chan_switch_req.variable, - skb->len - baselen, false, NULL); + skb->len - baselen, + IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION, + NULL); if (!elems) return -ENOMEM; diff --git a/net/mac80211/tests/elems.c b/net/mac80211/tests/elems.c index a53c55a879a8..1039794a0183 100644 --- a/net/mac80211/tests/elems.c +++ b/net/mac80211/tests/elems.c @@ -2,7 +2,7 @@ /* * KUnit tests for element parsing * - * Copyright (C) 2023-2024 Intel Corporation + * Copyright (C) 2023-2025 Intel Corporation */ #include <kunit/test.h> #include "../ieee80211_i.h" @@ -15,6 +15,8 @@ static void mle_defrag(struct kunit *test) .link_id = 12, .from_ap = true, .mode = IEEE80211_CONN_MODE_EHT, + /* type is not really relevant here */ + .type = IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON, }; struct ieee802_11_elems *parsed; struct sk_buff *skb; diff --git a/net/mac80211/util.c b/net/mac80211/util.c index c9931537f9d2..0c46009a3d63 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -2206,9 +2206,10 @@ int ieee80211_reconfig(struct ieee80211_local *local) } } + /* Passing NULL means an interface is picked for configuration */ if (local->virt_monitors > 0 && local->virt_monitors == local->open_count) - ieee80211_add_virtual_monitor(local); + ieee80211_add_virtual_monitor(local, NULL); if (!suspended) return 0; @@ -2347,7 +2348,7 @@ void ieee80211_recalc_min_chandef(struct ieee80211_sub_if_data *sdata, chanctx = container_of(chanctx_conf, struct ieee80211_chanctx, conf); - ieee80211_recalc_chanctx_min_def(local, chanctx, NULL, false); + ieee80211_recalc_chanctx_min_def(local, chanctx); } } @@ -4016,23 +4017,23 @@ static u8 ieee80211_chanctx_radar_detect(struct ieee80211_local *local, if (WARN_ON(ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED)) return 0; - list_for_each_entry(link, &ctx->reserved_links, reserved_chanctx_list) - if (link->reserved_radar_required) - radar_detect |= BIT(link->reserved.oper.width); - - /* - * An in-place reservation context should not have any assigned vifs - * until it replaces the other context. - */ - WARN_ON(ctx->replace_state == IEEE80211_CHANCTX_REPLACES_OTHER && - !list_empty(&ctx->assigned_links)); + for_each_sdata_link(local, link) { + if (rcu_access_pointer(link->conf->chanctx_conf) == &ctx->conf) { + /* + * An in-place reservation context should not have any + * assigned links until it replaces the other context. + */ + WARN_ON(ctx->replace_state == + IEEE80211_CHANCTX_REPLACES_OTHER); - list_for_each_entry(link, &ctx->assigned_links, assigned_chanctx_list) { - if (!link->radar_required) - continue; + if (link->radar_required) + radar_detect |= + BIT(link->conf->chanreq.oper.width); + } - radar_detect |= - BIT(link->conf->chanreq.oper.width); + if (link->reserved_chanctx == ctx && + link->reserved_radar_required) + radar_detect |= BIT(link->reserved.oper.width); } return radar_detect; diff --git a/net/mctp/af_mctp.c b/net/mctp/af_mctp.c index b99ba14f39d2..209a963112e3 100644 --- a/net/mctp/af_mctp.c +++ b/net/mctp/af_mctp.c @@ -49,7 +49,7 @@ static bool mctp_sockaddr_ext_is_ok(const struct sockaddr_mctp_ext *addr) !addr->__smctp_pad0[2]; } -static int mctp_bind(struct socket *sock, struct sockaddr *addr, int addrlen) +static int mctp_bind(struct socket *sock, struct sockaddr_unsized *addr, int addrlen) { struct sock *sk = sock->sk; struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk); @@ -128,7 +128,7 @@ out_release: /* Used to set a specific peer prior to bind. Not used for outbound * connections (Tag Owner set) since MCTP is a datagram protocol. */ -static int mctp_connect(struct socket *sock, struct sockaddr *addr, +static int mctp_connect(struct socket *sock, struct sockaddr_unsized *addr, int addrlen, int flags) { struct sock *sk = sock->sk; diff --git a/net/mctp/test/route-test.c b/net/mctp/test/route-test.c index 69a3ccfc6310..be9149ac79dd 100644 --- a/net/mctp/test/route-test.c +++ b/net/mctp/test/route-test.c @@ -205,7 +205,7 @@ static void __mctp_route_test_init(struct kunit *test, addr.smctp_network = netid; addr.smctp_addr.s_addr = 8; addr.smctp_type = 0; - rc = kernel_bind(sock, (struct sockaddr *)&addr, sizeof(addr)); + rc = kernel_bind(sock, (struct sockaddr_unsized *)&addr, sizeof(addr)); KUNIT_ASSERT_EQ(test, rc, 0); *devp = dev; diff --git a/net/mctp/test/utils.c b/net/mctp/test/utils.c index 953d41902771..35f6be814567 100644 --- a/net/mctp/test/utils.c +++ b/net/mctp/test/utils.c @@ -279,7 +279,7 @@ void mctp_test_bind_run(struct kunit *test, addr.smctp_addr.s_addr = setup->peer_addr; /* connect() type must match bind() type */ addr.smctp_type = setup->bind_type; - rc = kernel_connect(*sock, (struct sockaddr *)&addr, + rc = kernel_connect(*sock, (struct sockaddr_unsized *)&addr, sizeof(addr), 0); KUNIT_EXPECT_EQ(test, rc, 0); } @@ -292,5 +292,6 @@ void mctp_test_bind_run(struct kunit *test, addr.smctp_type = setup->bind_type; *ret_bind_errno = - kernel_bind(*sock, (struct sockaddr *)&addr, sizeof(addr)); + kernel_bind(*sock, (struct sockaddr_unsized *)&addr, + sizeof(addr)); } diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index 25c88cba5c48..580aac112dd2 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c @@ -75,16 +75,23 @@ static void rtmsg_lfib(int event, u32 label, struct mpls_route *rt, struct nlmsghdr *nlh, struct net *net, u32 portid, unsigned int nlm_flags); -static struct mpls_route *mpls_route_input_rcu(struct net *net, unsigned index) +static struct mpls_route *mpls_route_input(struct net *net, unsigned int index) { - struct mpls_route *rt = NULL; + struct mpls_route __rcu **platform_label; - if (index < net->mpls.platform_labels) { - struct mpls_route __rcu **platform_label = - rcu_dereference_rtnl(net->mpls.platform_label); - rt = rcu_dereference_rtnl(platform_label[index]); - } - return rt; + platform_label = mpls_dereference(net, net->mpls.platform_label); + return mpls_dereference(net, platform_label[index]); +} + +static struct mpls_route *mpls_route_input_rcu(struct net *net, unsigned int index) +{ + struct mpls_route __rcu **platform_label; + + if (index >= net->mpls.platform_labels) + return NULL; + + platform_label = rcu_dereference(net->mpls.platform_label); + return rcu_dereference(platform_label[index]); } bool mpls_output_possible(const struct net_device *dev) @@ -129,25 +136,26 @@ bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu) } EXPORT_SYMBOL_GPL(mpls_pkt_too_big); -void mpls_stats_inc_outucastpkts(struct net_device *dev, +void mpls_stats_inc_outucastpkts(struct net *net, + struct net_device *dev, const struct sk_buff *skb) { struct mpls_dev *mdev; if (skb->protocol == htons(ETH_P_MPLS_UC)) { - mdev = mpls_dev_get(dev); + mdev = mpls_dev_rcu(dev); if (mdev) MPLS_INC_STATS_LEN(mdev, skb->len, tx_packets, tx_bytes); } else if (skb->protocol == htons(ETH_P_IP)) { - IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len); + IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len); #if IS_ENABLED(CONFIG_IPV6) } else if (skb->protocol == htons(ETH_P_IPV6)) { - struct inet6_dev *in6dev = __in6_dev_get(dev); + struct inet6_dev *in6dev = in6_dev_rcu(dev); if (in6dev) - IP6_UPD_PO_STATS(dev_net(dev), in6dev, + IP6_UPD_PO_STATS(net, in6dev, IPSTATS_MIB_OUT, skb->len); #endif } @@ -342,7 +350,7 @@ static bool mpls_egress(struct net *net, struct mpls_route *rt, static int mpls_forward(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { - struct net *net = dev_net(dev); + struct net *net = dev_net_rcu(dev); struct mpls_shim_hdr *hdr; const struct mpls_nh *nh; struct mpls_route *rt; @@ -357,7 +365,7 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev, /* Careful this entire function runs inside of an rcu critical section */ - mdev = mpls_dev_get(dev); + mdev = mpls_dev_rcu(dev); if (!mdev) goto drop; @@ -434,7 +442,7 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev, dec.ttl -= 1; if (unlikely(!new_header_size && dec.bos)) { /* Penultimate hop popping */ - if (!mpls_egress(dev_net(out_dev), rt, skb, dec)) + if (!mpls_egress(net, rt, skb, dec)) goto err; } else { bool bos; @@ -451,7 +459,7 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev, } } - mpls_stats_inc_outucastpkts(out_dev, skb); + mpls_stats_inc_outucastpkts(net, out_dev, skb); /* If via wasn't specified then send out using device address */ if (nh->nh_via_table == MPLS_NEIGH_TABLE_UNSPEC) @@ -466,7 +474,7 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev, return 0; tx_err: - out_mdev = out_dev ? mpls_dev_get(out_dev) : NULL; + out_mdev = out_dev ? mpls_dev_rcu(out_dev) : NULL; if (out_mdev) MPLS_INC_STATS(out_mdev, tx_errors); goto drop; @@ -530,10 +538,23 @@ static struct mpls_route *mpls_rt_alloc(u8 num_nh, u8 max_alen, u8 max_labels) return rt; } +static void mpls_rt_free_rcu(struct rcu_head *head) +{ + struct mpls_route *rt; + + rt = container_of(head, struct mpls_route, rt_rcu); + + change_nexthops(rt) { + netdev_put(nh->nh_dev, &nh->nh_dev_tracker); + } endfor_nexthops(rt); + + kfree(rt); +} + static void mpls_rt_free(struct mpls_route *rt) { if (rt) - kfree_rcu(rt, rt_rcu); + call_rcu(&rt->rt_rcu, mpls_rt_free_rcu); } static void mpls_notify_route(struct net *net, unsigned index, @@ -557,10 +578,8 @@ static void mpls_route_update(struct net *net, unsigned index, struct mpls_route __rcu **platform_label; struct mpls_route *rt; - ASSERT_RTNL(); - - platform_label = rtnl_dereference(net->mpls.platform_label); - rt = rtnl_dereference(platform_label[index]); + platform_label = mpls_dereference(net, net->mpls.platform_label); + rt = mpls_dereference(net, platform_label[index]); rcu_assign_pointer(platform_label[index], new); mpls_notify_route(net, index, rt, new, info); @@ -569,24 +588,23 @@ static void mpls_route_update(struct net *net, unsigned index, mpls_rt_free(rt); } -static unsigned find_free_label(struct net *net) +static unsigned int find_free_label(struct net *net) { - struct mpls_route __rcu **platform_label; - size_t platform_labels; - unsigned index; + unsigned int index; - platform_label = rtnl_dereference(net->mpls.platform_label); - platform_labels = net->mpls.platform_labels; - for (index = MPLS_LABEL_FIRST_UNRESERVED; index < platform_labels; + for (index = MPLS_LABEL_FIRST_UNRESERVED; + index < net->mpls.platform_labels; index++) { - if (!rtnl_dereference(platform_label[index])) + if (!mpls_route_input(net, index)) return index; } + return LABEL_NOT_SPECIFIED; } #if IS_ENABLED(CONFIG_INET) static struct net_device *inet_fib_lookup_dev(struct net *net, + struct mpls_nh *nh, const void *addr) { struct net_device *dev; @@ -599,14 +617,14 @@ static struct net_device *inet_fib_lookup_dev(struct net *net, return ERR_CAST(rt); dev = rt->dst.dev; - dev_hold(dev); - + netdev_hold(dev, &nh->nh_dev_tracker, GFP_KERNEL); ip_rt_put(rt); return dev; } #else static struct net_device *inet_fib_lookup_dev(struct net *net, + struct mpls_nh *nh, const void *addr) { return ERR_PTR(-EAFNOSUPPORT); @@ -615,6 +633,7 @@ static struct net_device *inet_fib_lookup_dev(struct net *net, #if IS_ENABLED(CONFIG_IPV6) static struct net_device *inet6_fib_lookup_dev(struct net *net, + struct mpls_nh *nh, const void *addr) { struct net_device *dev; @@ -631,13 +650,14 @@ static struct net_device *inet6_fib_lookup_dev(struct net *net, return ERR_CAST(dst); dev = dst->dev; - dev_hold(dev); + netdev_hold(dev, &nh->nh_dev_tracker, GFP_KERNEL); dst_release(dst); return dev; } #else static struct net_device *inet6_fib_lookup_dev(struct net *net, + struct mpls_nh *nh, const void *addr) { return ERR_PTR(-EAFNOSUPPORT); @@ -653,16 +673,17 @@ static struct net_device *find_outdev(struct net *net, if (!oif) { switch (nh->nh_via_table) { case NEIGH_ARP_TABLE: - dev = inet_fib_lookup_dev(net, mpls_nh_via(rt, nh)); + dev = inet_fib_lookup_dev(net, nh, mpls_nh_via(rt, nh)); break; case NEIGH_ND_TABLE: - dev = inet6_fib_lookup_dev(net, mpls_nh_via(rt, nh)); + dev = inet6_fib_lookup_dev(net, nh, mpls_nh_via(rt, nh)); break; case NEIGH_LINK_TABLE: break; } } else { - dev = dev_get_by_index(net, oif); + dev = netdev_get_by_index(net, oif, + &nh->nh_dev_tracker, GFP_KERNEL); } if (!dev) @@ -671,8 +692,7 @@ static struct net_device *find_outdev(struct net *net, if (IS_ERR(dev)) return dev; - /* The caller is holding rtnl anyways, so release the dev reference */ - dev_put(dev); + nh->nh_dev = dev; return dev; } @@ -686,20 +706,17 @@ static int mpls_nh_assign_dev(struct net *net, struct mpls_route *rt, dev = find_outdev(net, rt, nh, oif); if (IS_ERR(dev)) { err = PTR_ERR(dev); - dev = NULL; goto errout; } /* Ensure this is a supported device */ err = -EINVAL; - if (!mpls_dev_get(dev)) - goto errout; + if (!mpls_dev_get(net, dev)) + goto errout_put; if ((nh->nh_via_table == NEIGH_LINK_TABLE) && (dev->addr_len != nh->nh_via_alen)) - goto errout; - - nh->nh_dev = dev; + goto errout_put; if (!(dev->flags & IFF_UP)) { nh->nh_flags |= RTNH_F_DEAD; @@ -713,6 +730,9 @@ static int mpls_nh_assign_dev(struct net *net, struct mpls_route *rt, return 0; +errout_put: + netdev_put(nh->nh_dev, &nh->nh_dev_tracker); + nh->nh_dev = NULL; errout: return err; } @@ -890,7 +910,8 @@ static int mpls_nh_build_multi(struct mpls_route_config *cfg, struct nlattr *nla_via, *nla_newdst; int remaining = cfg->rc_mp_len; int err = 0; - u8 nhs = 0; + + rt->rt_nhn = 0; change_nexthops(rt) { int attrlen; @@ -926,11 +947,9 @@ static int mpls_nh_build_multi(struct mpls_route_config *cfg, rt->rt_nhn_alive--; rtnh = rtnh_next(rtnh, &remaining); - nhs++; + rt->rt_nhn++; } endfor_nexthops(rt); - rt->rt_nhn = nhs; - return 0; errout: @@ -940,30 +959,28 @@ errout: static bool mpls_label_ok(struct net *net, unsigned int *index, struct netlink_ext_ack *extack) { - bool is_ok = true; - /* Reserved labels may not be set */ if (*index < MPLS_LABEL_FIRST_UNRESERVED) { NL_SET_ERR_MSG(extack, "Invalid label - must be MPLS_LABEL_FIRST_UNRESERVED or higher"); - is_ok = false; + return false; } /* The full 20 bit range may not be supported. */ - if (is_ok && *index >= net->mpls.platform_labels) { + if (*index >= net->mpls.platform_labels) { NL_SET_ERR_MSG(extack, "Label >= configured maximum in platform_labels"); - is_ok = false; + return false; } *index = array_index_nospec(*index, net->mpls.platform_labels); - return is_ok; + + return true; } static int mpls_route_add(struct mpls_route_config *cfg, struct netlink_ext_ack *extack) { - struct mpls_route __rcu **platform_label; struct net *net = cfg->rc_nlinfo.nl_net; struct mpls_route *rt, *old; int err = -EINVAL; @@ -991,8 +1008,7 @@ static int mpls_route_add(struct mpls_route_config *cfg, } err = -EEXIST; - platform_label = rtnl_dereference(net->mpls.platform_label); - old = rtnl_dereference(platform_label[index]); + old = mpls_route_input(net, index); if ((cfg->rc_nlflags & NLM_F_EXCL) && old) goto errout; @@ -1103,7 +1119,7 @@ static int mpls_fill_stats_af(struct sk_buff *skb, struct mpls_dev *mdev; struct nlattr *nla; - mdev = mpls_dev_get(dev); + mdev = mpls_dev_rcu(dev); if (!mdev) return -ENODATA; @@ -1123,7 +1139,7 @@ static size_t mpls_get_stats_af_size(const struct net_device *dev) { struct mpls_dev *mdev; - mdev = mpls_dev_get(dev); + mdev = mpls_dev_rcu(dev); if (!mdev) return 0; @@ -1264,23 +1280,32 @@ static int mpls_netconf_get_devconf(struct sk_buff *in_skb, if (err < 0) goto errout; - err = -EINVAL; - if (!tb[NETCONFA_IFINDEX]) + if (!tb[NETCONFA_IFINDEX]) { + err = -EINVAL; goto errout; + } ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]); - dev = __dev_get_by_index(net, ifindex); - if (!dev) - goto errout; - - mdev = mpls_dev_get(dev); - if (!mdev) - goto errout; - err = -ENOBUFS; skb = nlmsg_new(mpls_netconf_msgsize_devconf(NETCONFA_ALL), GFP_KERNEL); - if (!skb) + if (!skb) { + err = -ENOBUFS; goto errout; + } + + rcu_read_lock(); + + dev = dev_get_by_index_rcu(net, ifindex); + if (!dev) { + err = -EINVAL; + goto errout_unlock; + } + + mdev = mpls_dev_rcu(dev); + if (!mdev) { + err = -EINVAL; + goto errout_unlock; + } err = mpls_netconf_fill_devconf(skb, mdev, NETLINK_CB(in_skb).portid, @@ -1289,12 +1314,19 @@ static int mpls_netconf_get_devconf(struct sk_buff *in_skb, if (err < 0) { /* -EMSGSIZE implies BUG in mpls_netconf_msgsize_devconf() */ WARN_ON(err == -EMSGSIZE); - kfree_skb(skb); - goto errout; + goto errout_unlock; } + err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); + + rcu_read_unlock(); errout: return err; + +errout_unlock: + rcu_read_unlock(); + kfree_skb(skb); + goto errout; } static int mpls_netconf_dump_devconf(struct sk_buff *skb, @@ -1326,7 +1358,7 @@ static int mpls_netconf_dump_devconf(struct sk_buff *skb, rcu_read_lock(); for_each_netdev_dump(net, dev, ctx->ifindex) { - mdev = mpls_dev_get(dev); + mdev = mpls_dev_rcu(dev); if (!mdev) continue; err = mpls_netconf_fill_devconf(skb, mdev, @@ -1438,8 +1470,6 @@ static struct mpls_dev *mpls_add_dev(struct net_device *dev) int err = -ENOMEM; int i; - ASSERT_RTNL(); - mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); if (!mdev) return ERR_PTR(err); @@ -1481,16 +1511,15 @@ static void mpls_dev_destroy_rcu(struct rcu_head *head) static int mpls_ifdown(struct net_device *dev, int event) { - struct mpls_route __rcu **platform_label; struct net *net = dev_net(dev); - unsigned index; + unsigned int index; - platform_label = rtnl_dereference(net->mpls.platform_label); for (index = 0; index < net->mpls.platform_labels; index++) { - struct mpls_route *rt = rtnl_dereference(platform_label[index]); + struct mpls_route *rt; bool nh_del = false; u8 alive = 0; + rt = mpls_route_input(net, index); if (!rt) continue; @@ -1524,8 +1553,12 @@ static int mpls_ifdown(struct net_device *dev, int event) change_nexthops(rt) { unsigned int nh_flags = nh->nh_flags; - if (nh->nh_dev != dev) + if (nh->nh_dev != dev) { + if (nh_del) + netdev_hold(nh->nh_dev, &nh->nh_dev_tracker, + GFP_KERNEL); goto next; + } switch (event) { case NETDEV_DOWN: @@ -1557,15 +1590,14 @@ next: static void mpls_ifup(struct net_device *dev, unsigned int flags) { - struct mpls_route __rcu **platform_label; struct net *net = dev_net(dev); - unsigned index; + unsigned int index; u8 alive; - platform_label = rtnl_dereference(net->mpls.platform_label); for (index = 0; index < net->mpls.platform_labels; index++) { - struct mpls_route *rt = rtnl_dereference(platform_label[index]); + struct mpls_route *rt; + rt = mpls_route_input(net, index); if (!rt) continue; @@ -1592,28 +1624,33 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct net *net = dev_net(dev); struct mpls_dev *mdev; unsigned int flags; int err; + mutex_lock(&net->mpls.platform_mutex); + if (event == NETDEV_REGISTER) { mdev = mpls_add_dev(dev); - if (IS_ERR(mdev)) - return notifier_from_errno(PTR_ERR(mdev)); + if (IS_ERR(mdev)) { + err = PTR_ERR(mdev); + goto err; + } - return NOTIFY_OK; + goto out; } - mdev = mpls_dev_get(dev); + mdev = mpls_dev_get(net, dev); if (!mdev) - return NOTIFY_OK; + goto out; switch (event) { case NETDEV_DOWN: err = mpls_ifdown(dev, event); if (err) - return notifier_from_errno(err); + goto err; break; case NETDEV_UP: flags = netif_get_flags(dev); @@ -1629,14 +1666,15 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event, } else { err = mpls_ifdown(dev, event); if (err) - return notifier_from_errno(err); + goto err; } break; case NETDEV_UNREGISTER: err = mpls_ifdown(dev, event); if (err) - return notifier_from_errno(err); - mdev = mpls_dev_get(dev); + goto err; + + mdev = mpls_dev_get(net, dev); if (mdev) { mpls_dev_sysctl_unregister(dev, mdev); RCU_INIT_POINTER(dev->mpls_ptr, NULL); @@ -1644,16 +1682,23 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event, } break; case NETDEV_CHANGENAME: - mdev = mpls_dev_get(dev); + mdev = mpls_dev_get(net, dev); if (mdev) { mpls_dev_sysctl_unregister(dev, mdev); err = mpls_dev_sysctl_register(dev, mdev); if (err) - return notifier_from_errno(err); + goto err; } break; } + +out: + mutex_unlock(&net->mpls.platform_mutex); return NOTIFY_OK; + +err: + mutex_unlock(&net->mpls.platform_mutex); + return notifier_from_errno(err); } static struct notifier_block mpls_dev_notifier = { @@ -1928,6 +1973,7 @@ errout: static int mpls_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { + struct net *net = sock_net(skb->sk); struct mpls_route_config *cfg; int err; @@ -1939,7 +1985,9 @@ static int mpls_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, if (err < 0) goto out; + mutex_lock(&net->mpls.platform_mutex); err = mpls_route_del(cfg, extack); + mutex_unlock(&net->mpls.platform_mutex); out: kfree(cfg); @@ -1950,6 +1998,7 @@ out: static int mpls_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { + struct net *net = sock_net(skb->sk); struct mpls_route_config *cfg; int err; @@ -1961,7 +2010,9 @@ static int mpls_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, if (err < 0) goto out; + mutex_lock(&net->mpls.platform_mutex); err = mpls_route_add(cfg, extack); + mutex_unlock(&net->mpls.platform_mutex); out: kfree(cfg); @@ -2124,7 +2175,7 @@ static int mpls_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh, if (i == RTA_OIF) { ifindex = nla_get_u32(tb[i]); - filter->dev = __dev_get_by_index(net, ifindex); + filter->dev = dev_get_by_index_rcu(net, ifindex); if (!filter->dev) return -ENODEV; filter->filter_set = 1; @@ -2162,20 +2213,19 @@ static int mpls_dump_routes(struct sk_buff *skb, struct netlink_callback *cb) struct net *net = sock_net(skb->sk); struct mpls_route __rcu **platform_label; struct fib_dump_filter filter = { - .rtnl_held = true, + .rtnl_held = false, }; unsigned int flags = NLM_F_MULTI; size_t platform_labels; unsigned int index; + int err; - ASSERT_RTNL(); + rcu_read_lock(); if (cb->strict_check) { - int err; - err = mpls_valid_fib_dump_req(net, nlh, &filter, cb); if (err < 0) - return err; + goto err; /* for MPLS, there is only 1 table with fixed type and flags. * If either are set in the filter then return nothing. @@ -2183,14 +2233,14 @@ static int mpls_dump_routes(struct sk_buff *skb, struct netlink_callback *cb) if ((filter.table_id && filter.table_id != RT_TABLE_MAIN) || (filter.rt_type && filter.rt_type != RTN_UNICAST) || filter.flags) - return skb->len; + goto unlock; } index = cb->args[0]; if (index < MPLS_LABEL_FIRST_UNRESERVED) index = MPLS_LABEL_FIRST_UNRESERVED; - platform_label = rtnl_dereference(net->mpls.platform_label); + platform_label = rcu_dereference(net->mpls.platform_label); platform_labels = net->mpls.platform_labels; if (filter.filter_set) @@ -2199,7 +2249,7 @@ static int mpls_dump_routes(struct sk_buff *skb, struct netlink_callback *cb) for (; index < platform_labels; index++) { struct mpls_route *rt; - rt = rtnl_dereference(platform_label[index]); + rt = rcu_dereference(platform_label[index]); if (!rt) continue; @@ -2214,7 +2264,13 @@ static int mpls_dump_routes(struct sk_buff *skb, struct netlink_callback *cb) } cb->args[0] = index; +unlock: + rcu_read_unlock(); return skb->len; + +err: + rcu_read_unlock(); + return err; } static inline size_t lfib_nlmsg_size(struct mpls_route *rt) @@ -2345,18 +2401,20 @@ static int mpls_getroute(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, u32 portid = NETLINK_CB(in_skb).portid; u32 in_label = LABEL_NOT_SPECIFIED; struct nlattr *tb[RTA_MAX + 1]; + struct mpls_route *rt = NULL; u32 labels[MAX_NEW_LABELS]; struct mpls_shim_hdr *hdr; unsigned int hdr_size = 0; const struct mpls_nh *nh; struct net_device *dev; - struct mpls_route *rt; struct rtmsg *rtm, *r; struct nlmsghdr *nlh; struct sk_buff *skb; u8 n_labels; int err; + mutex_lock(&net->mpls.platform_mutex); + err = mpls_valid_getroute_req(in_skb, in_nlh, tb, extack); if (err < 0) goto errout; @@ -2378,7 +2436,8 @@ static int mpls_getroute(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, } } - rt = mpls_route_input_rcu(net, in_label); + if (in_label < net->mpls.platform_labels) + rt = mpls_route_input(net, in_label); if (!rt) { err = -ENETUNREACH; goto errout; @@ -2399,7 +2458,8 @@ static int mpls_getroute(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, goto errout_free; } - return rtnl_unicast(skb, net, portid); + err = rtnl_unicast(skb, net, portid); + goto errout; } if (tb[RTA_NEWDST]) { @@ -2491,12 +2551,14 @@ static int mpls_getroute(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, err = rtnl_unicast(skb, net, portid); errout: + mutex_unlock(&net->mpls.platform_mutex); return err; nla_put_failure: nlmsg_cancel(skb, nlh); err = -EMSGSIZE; errout_free: + mutex_unlock(&net->mpls.platform_mutex); kfree_skb(skb); return err; } @@ -2519,10 +2581,13 @@ static int resize_platform_label_table(struct net *net, size_t limit) /* In case the predefined labels need to be populated */ if (limit > MPLS_LABEL_IPV4NULL) { struct net_device *lo = net->loopback_dev; + rt0 = mpls_rt_alloc(1, lo->addr_len, 0); if (IS_ERR(rt0)) goto nort0; + rt0->rt_nh->nh_dev = lo; + netdev_hold(lo, &rt0->rt_nh->nh_dev_tracker, GFP_KERNEL); rt0->rt_protocol = RTPROT_KERNEL; rt0->rt_payload_type = MPT_IPV4; rt0->rt_ttl_propagate = MPLS_TTL_PROP_DEFAULT; @@ -2533,10 +2598,13 @@ static int resize_platform_label_table(struct net *net, size_t limit) } if (limit > MPLS_LABEL_IPV6NULL) { struct net_device *lo = net->loopback_dev; + rt2 = mpls_rt_alloc(1, lo->addr_len, 0); if (IS_ERR(rt2)) goto nort2; + rt2->rt_nh->nh_dev = lo; + netdev_hold(lo, &rt2->rt_nh->nh_dev_tracker, GFP_KERNEL); rt2->rt_protocol = RTPROT_KERNEL; rt2->rt_payload_type = MPT_IPV6; rt2->rt_ttl_propagate = MPLS_TTL_PROP_DEFAULT; @@ -2546,9 +2614,10 @@ static int resize_platform_label_table(struct net *net, size_t limit) lo->addr_len); } - rtnl_lock(); + mutex_lock(&net->mpls.platform_mutex); + /* Remember the original table */ - old = rtnl_dereference(net->mpls.platform_label); + old = mpls_dereference(net, net->mpls.platform_label); old_limit = net->mpls.platform_labels; /* Free any labels beyond the new table */ @@ -2579,7 +2648,7 @@ static int resize_platform_label_table(struct net *net, size_t limit) net->mpls.platform_labels = limit; rcu_assign_pointer(net->mpls.platform_label, labels); - rtnl_unlock(); + mutex_unlock(&net->mpls.platform_mutex); mpls_rt_free(rt2); mpls_rt_free(rt0); @@ -2652,12 +2721,13 @@ static const struct ctl_table mpls_table[] = { }, }; -static int mpls_net_init(struct net *net) +static __net_init int mpls_net_init(struct net *net) { size_t table_size = ARRAY_SIZE(mpls_table); struct ctl_table *table; int i; + mutex_init(&net->mpls.platform_mutex); net->mpls.platform_labels = 0; net->mpls.platform_label = NULL; net->mpls.ip_ttl_propagate = 1; @@ -2683,7 +2753,7 @@ static int mpls_net_init(struct net *net) return 0; } -static void mpls_net_exit(struct net *net) +static __net_exit void mpls_net_exit(struct net *net) { struct mpls_route __rcu **platform_label; size_t platform_labels; @@ -2703,16 +2773,20 @@ static void mpls_net_exit(struct net *net) * As such no additional rcu synchronization is necessary when * freeing the platform_label table. */ - rtnl_lock(); - platform_label = rtnl_dereference(net->mpls.platform_label); + mutex_lock(&net->mpls.platform_mutex); + + platform_label = mpls_dereference(net, net->mpls.platform_label); platform_labels = net->mpls.platform_labels; + for (index = 0; index < platform_labels; index++) { - struct mpls_route *rt = rtnl_dereference(platform_label[index]); - RCU_INIT_POINTER(platform_label[index], NULL); + struct mpls_route *rt; + + rt = mpls_dereference(net, platform_label[index]); mpls_notify_route(net, index, rt, NULL, NULL); mpls_rt_free(rt); } - rtnl_unlock(); + + mutex_unlock(&net->mpls.platform_mutex); kvfree(platform_label); } @@ -2729,12 +2803,15 @@ static struct rtnl_af_ops mpls_af_ops __read_mostly = { }; static const struct rtnl_msg_handler mpls_rtnl_msg_handlers[] __initdata_or_module = { - {THIS_MODULE, PF_MPLS, RTM_NEWROUTE, mpls_rtm_newroute, NULL, 0}, - {THIS_MODULE, PF_MPLS, RTM_DELROUTE, mpls_rtm_delroute, NULL, 0}, - {THIS_MODULE, PF_MPLS, RTM_GETROUTE, mpls_getroute, mpls_dump_routes, 0}, + {THIS_MODULE, PF_MPLS, RTM_NEWROUTE, mpls_rtm_newroute, NULL, + RTNL_FLAG_DOIT_UNLOCKED}, + {THIS_MODULE, PF_MPLS, RTM_DELROUTE, mpls_rtm_delroute, NULL, + RTNL_FLAG_DOIT_UNLOCKED}, + {THIS_MODULE, PF_MPLS, RTM_GETROUTE, mpls_getroute, mpls_dump_routes, + RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED}, {THIS_MODULE, PF_MPLS, RTM_GETNETCONF, mpls_netconf_get_devconf, mpls_netconf_dump_devconf, - RTNL_FLAG_DUMP_UNLOCKED}, + RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED}, }; static int __init mpls_init(void) diff --git a/net/mpls/internal.h b/net/mpls/internal.h index 83c629529b57..80cb5bbcd946 100644 --- a/net/mpls/internal.h +++ b/net/mpls/internal.h @@ -88,6 +88,7 @@ enum mpls_payload_type { struct mpls_nh { /* next hop label forwarding entry */ struct net_device *nh_dev; + netdevice_tracker nh_dev_tracker; /* nh_flags is accessed under RCU in the packet path; it is * modified handling netdev events with rtnl lock held @@ -184,9 +185,20 @@ static inline struct mpls_entry_decoded mpls_entry_decode(struct mpls_shim_hdr * return result; } -static inline struct mpls_dev *mpls_dev_get(const struct net_device *dev) +#define mpls_dereference(net, p) \ + rcu_dereference_protected( \ + (p), \ + lockdep_is_held(&(net)->mpls.platform_mutex)) + +static inline struct mpls_dev *mpls_dev_rcu(const struct net_device *dev) +{ + return rcu_dereference(dev->mpls_ptr); +} + +static inline struct mpls_dev *mpls_dev_get(const struct net *net, + const struct net_device *dev) { - return rcu_dereference_rtnl(dev->mpls_ptr); + return mpls_dereference(net, dev->mpls_ptr); } int nla_put_labels(struct sk_buff *skb, int attrtype, u8 labels, @@ -196,7 +208,8 @@ int nla_get_labels(const struct nlattr *nla, u8 max_labels, u8 *labels, bool mpls_output_possible(const struct net_device *dev); unsigned int mpls_dev_mtu(const struct net_device *dev); bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu); -void mpls_stats_inc_outucastpkts(struct net_device *dev, +void mpls_stats_inc_outucastpkts(struct net *net, + struct net_device *dev, const struct sk_buff *skb); #endif /* MPLS_INTERNAL_H */ diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c index 6e73da94af7f..1a1a0eb5b787 100644 --- a/net/mpls/mpls_iptunnel.c +++ b/net/mpls/mpls_iptunnel.c @@ -53,7 +53,7 @@ static int mpls_xmit(struct sk_buff *skb) /* Find the output device */ out_dev = dst->dev; - net = dev_net(out_dev); + net = dev_net_rcu(out_dev); if (!mpls_output_possible(out_dev) || !dst->lwtstate || skb_warn_if_lro(skb)) @@ -128,7 +128,7 @@ static int mpls_xmit(struct sk_buff *skb) bos = false; } - mpls_stats_inc_outucastpkts(out_dev, skb); + mpls_stats_inc_outucastpkts(net, out_dev, skb); if (rt) { if (rt->rt_gw_family == AF_INET6) @@ -153,7 +153,7 @@ static int mpls_xmit(struct sk_buff *skb) return LWTUNNEL_XMIT_DONE; drop: - out_mdev = out_dev ? mpls_dev_get(out_dev) : NULL; + out_mdev = out_dev ? mpls_dev_rcu(out_dev) : NULL; if (out_mdev) MPLS_INC_STATS(out_mdev, tx_errors); kfree_skb(skb); diff --git a/net/mptcp/pm_kernel.c b/net/mptcp/pm_kernel.c index 2ae95476dba3..598f01a573c1 100644 --- a/net/mptcp/pm_kernel.c +++ b/net/mptcp/pm_kernel.c @@ -22,6 +22,7 @@ struct pm_nl_pernet { u8 endp_signal_max; u8 endp_subflow_max; u8 endp_laminar_max; + u8 endp_fullmesh_max; u8 limit_add_addr_accepted; u8 limit_extra_subflows; u8 next_id; @@ -70,6 +71,14 @@ u8 mptcp_pm_get_endp_laminar_max(const struct mptcp_sock *msk) } EXPORT_SYMBOL_GPL(mptcp_pm_get_endp_laminar_max); +u8 mptcp_pm_get_endp_fullmesh_max(const struct mptcp_sock *msk) +{ + struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); + + return READ_ONCE(pernet->endp_fullmesh_max); +} +EXPORT_SYMBOL_GPL(mptcp_pm_get_endp_fullmesh_max); + u8 mptcp_pm_get_limit_add_addr_accepted(const struct mptcp_sock *msk) { struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); @@ -600,12 +609,11 @@ fill_local_addresses_vec(struct mptcp_sock *msk, struct mptcp_addr_info *remote, struct mptcp_pm_local *locals) { bool c_flag_case = remote->id && mptcp_pm_add_addr_c_flag_case(msk); - int i; /* If there is at least one MPTCP endpoint with a fullmesh flag */ - i = fill_local_addresses_vec_fullmesh(msk, remote, locals, c_flag_case); - if (i) - return i; + if (mptcp_pm_get_endp_fullmesh_max(msk)) + return fill_local_addresses_vec_fullmesh(msk, remote, locals, + c_flag_case); /* If there is at least one MPTCP endpoint with a laminar flag */ if (mptcp_pm_get_endp_laminar_max(msk)) @@ -790,6 +798,10 @@ find_next: addr_max = pernet->endp_laminar_max; WRITE_ONCE(pernet->endp_laminar_max, addr_max + 1); } + if (entry->flags & MPTCP_PM_ADDR_FLAG_FULLMESH) { + addr_max = pernet->endp_fullmesh_max; + WRITE_ONCE(pernet->endp_fullmesh_max, addr_max + 1); + } pernet->endpoints++; if (!entry->addr.port) @@ -855,10 +867,10 @@ static int mptcp_pm_nl_create_listen_socket(struct sock *sk, addrlen = sizeof(struct sockaddr_in6); #endif if (ssk->sk_family == AF_INET) - err = inet_bind_sk(ssk, (struct sockaddr *)&addr, addrlen); + err = inet_bind_sk(ssk, (struct sockaddr_unsized *)&addr, addrlen); #if IS_ENABLED(CONFIG_MPTCP_IPV6) else if (ssk->sk_family == AF_INET6) - err = inet6_bind_sk(ssk, (struct sockaddr *)&addr, addrlen); + err = inet6_bind_sk(ssk, (struct sockaddr_unsized *)&addr, addrlen); #endif if (err) return err; @@ -1187,6 +1199,10 @@ int mptcp_pm_nl_del_addr_doit(struct sk_buff *skb, struct genl_info *info) addr_max = pernet->endp_laminar_max; WRITE_ONCE(pernet->endp_laminar_max, addr_max - 1); } + if (entry->flags & MPTCP_PM_ADDR_FLAG_FULLMESH) { + addr_max = pernet->endp_fullmesh_max; + WRITE_ONCE(pernet->endp_fullmesh_max, addr_max - 1); + } pernet->endpoints--; list_del_rcu(&entry->list); @@ -1502,6 +1518,18 @@ int mptcp_pm_nl_set_flags(struct mptcp_pm_addr_entry *local, changed = (local->flags ^ entry->flags) & mask; entry->flags = (entry->flags & ~mask) | (local->flags & mask); *local = *entry; + + if (changed & MPTCP_PM_ADDR_FLAG_FULLMESH) { + u8 addr_max = pernet->endp_fullmesh_max; + + if (entry->flags & MPTCP_PM_ADDR_FLAG_FULLMESH) + addr_max++; + else + addr_max--; + + WRITE_ONCE(pernet->endp_fullmesh_max, addr_max); + } + spin_unlock_bh(&pernet->lock); mptcp_pm_nl_set_flags_all(net, local, changed); diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 2d6b8de35c44..4cd5df01446e 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -1074,11 +1074,12 @@ static void mptcp_enter_memory_pressure(struct sock *sk) mptcp_for_each_subflow(msk, subflow) { struct sock *ssk = mptcp_subflow_tcp_sock(subflow); - if (first) + if (first && !ssk->sk_bypass_prot_mem) { tcp_enter_memory_pressure(ssk); - sk_stream_moderate_sndbuf(ssk); + first = false; + } - first = false; + sk_stream_moderate_sndbuf(ssk); } __mptcp_sync_sndbuf(sk); } @@ -3745,7 +3746,8 @@ static int mptcp_ioctl(struct sock *sk, int cmd, int *karg) return 0; } -static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) +static int mptcp_connect(struct sock *sk, struct sockaddr_unsized *uaddr, + int addr_len) { struct mptcp_subflow_context *subflow; struct mptcp_sock *msk = mptcp_sk(sk); @@ -3855,7 +3857,7 @@ static struct proto mptcp_prot = { .no_autobind = true, }; -static int mptcp_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) +static int mptcp_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len) { struct mptcp_sock *msk = mptcp_sk(sock->sk); struct sock *ssk, *sk = sock->sk; diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index 379a88e14e8d..9a3429175758 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -1183,6 +1183,7 @@ void __mptcp_pm_kernel_worker(struct mptcp_sock *msk); u8 mptcp_pm_get_endp_signal_max(const struct mptcp_sock *msk); u8 mptcp_pm_get_endp_subflow_max(const struct mptcp_sock *msk); u8 mptcp_pm_get_endp_laminar_max(const struct mptcp_sock *msk); +u8 mptcp_pm_get_endp_fullmesh_max(const struct mptcp_sock *msk); u8 mptcp_pm_get_limit_add_addr_accepted(const struct mptcp_sock *msk); u8 mptcp_pm_get_limit_extra_subflows(const struct mptcp_sock *msk); diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c index a28a48385885..de90a2897d2d 100644 --- a/net/mptcp/sockopt.c +++ b/net/mptcp/sockopt.c @@ -982,6 +982,8 @@ void mptcp_diag_fill_info(struct mptcp_sock *msk, struct mptcp_info *info) mptcp_pm_get_endp_subflow_max(msk); info->mptcpi_endp_laminar_max = mptcp_pm_get_endp_laminar_max(msk); + info->mptcpi_endp_fullmesh_max = + mptcp_pm_get_endp_fullmesh_max(msk); } if (__mptcp_check_fallback(msk)) diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index e8325890a322..30961b3d1702 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -1660,7 +1660,7 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_pm_local *local, addrlen = sizeof(struct sockaddr_in6); #endif ssk->sk_bound_dev_if = local->ifindex; - err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen); + err = kernel_bind(sf, (struct sockaddr_unsized *)&addr, addrlen); if (err) { MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNTXBINDERR); pr_debug("msk=%p local=%d remote=%d bind error: %d\n", @@ -1680,7 +1680,7 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_pm_local *local, sock_hold(ssk); list_add_tail(&subflow->node, &msk->conn_list); - err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK); + err = kernel_connect(sf, (struct sockaddr_unsized *)&addr, addrlen, O_NONBLOCK); if (err && err != -EINPROGRESS) { MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNTXCONNECTERR); pr_debug("msk=%p local=%d remote=%d connect error: %d\n", diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index 3402675bf521..5a0c6f42bd8f 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c @@ -1435,7 +1435,7 @@ static int bind_mcastif_addr(struct socket *sock, struct net_device *dev) sin.sin_addr.s_addr = addr; sin.sin_port = 0; - return kernel_bind(sock, (struct sockaddr *)&sin, sizeof(sin)); + return kernel_bind(sock, (struct sockaddr_unsized *)&sin, sizeof(sin)); } static void get_mcast_sockaddr(union ipvs_sockaddr *sa, int *salen, @@ -1501,7 +1501,7 @@ static int make_send_sock(struct netns_ipvs *ipvs, int id, } get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->mcfg, id); - result = kernel_connect(sock, (struct sockaddr *)&mcast_addr, + result = kernel_connect(sock, (struct sockaddr_unsized *)&mcast_addr, salen, 0); if (result < 0) { pr_err("Error connecting to the multicast addr\n"); @@ -1542,7 +1542,7 @@ static int make_receive_sock(struct netns_ipvs *ipvs, int id, get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->bcfg, id); sock->sk->sk_bound_dev_if = dev->ifindex; - result = kernel_bind(sock, (struct sockaddr *)&mcast_addr, salen); + result = kernel_bind(sock, (struct sockaddr_unsized *)&mcast_addr, salen); if (result < 0) { pr_err("Error binding to the multicast addr\n"); goto error; diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 344f88295976..0b95f226f211 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1668,7 +1668,7 @@ __nf_conntrack_alloc(struct net *net, /* We don't want any race condition at early drop stage */ ct_count = atomic_inc_return(&cnet->count); - if (nf_conntrack_max && unlikely(ct_count > nf_conntrack_max)) { + if (unlikely(ct_count > nf_conntrack_max)) { if (!early_drop(net, hash)) { if (!conntrack_gc_work.early_drop) conntrack_gc_work.early_drop = true; diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index 708b79380f04..207b240b14e5 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -648,7 +648,7 @@ static struct ctl_table nf_ct_sysctl_table[] = { .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, - .extra1 = SYSCTL_ZERO, + .extra1 = SYSCTL_ONE, .extra2 = SYSCTL_INT_MAX, }, [NF_SYSCTL_CT_COUNT] = { @@ -929,7 +929,7 @@ static struct ctl_table nf_ct_netfilter_table[] = { .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, - .extra1 = SYSCTL_ZERO, + .extra1 = SYSCTL_ONE, .extra2 = SYSCTL_INT_MAX, }, }; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index eed434e0a970..f3de2f9bbebf 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -5770,7 +5770,11 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, struct nft_set_binding *binding) { struct nft_set_binding *i; - struct nft_set_iter iter; + struct nft_set_iter iter = { + .genmask = nft_genmask_next(ctx->net), + .type = NFT_ITER_UPDATE, + .fn = nf_tables_bind_check_setelem, + }; if (!list_empty(&set->bindings) && nft_set_is_anonymous(set)) return -EBUSY; @@ -5785,13 +5789,6 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, goto bind; } - iter.genmask = nft_genmask_next(ctx->net); - iter.type = NFT_ITER_UPDATE; - iter.skip = 0; - iter.count = 0; - iter.err = 0; - iter.fn = nf_tables_bind_check_setelem; - set->ops->walk(ctx, set, &iter); if (!iter.err) iter.err = nft_set_catchall_bind_check(ctx, set); @@ -6195,7 +6192,17 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb) struct nftables_pernet *nft_net; struct nft_table *table; struct nft_set *set; - struct nft_set_dump_args args; + struct nft_set_dump_args args = { + .cb = cb, + .skb = skb, + .reset = dump_ctx->reset, + .iter = { + .genmask = nft_genmask_cur(net), + .type = NFT_ITER_READ, + .skip = cb->args[0], + .fn = nf_tables_dump_setelem, + }, + }; bool set_found = false; struct nlmsghdr *nlh; struct nlattr *nest; @@ -6246,15 +6253,6 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb) if (nest == NULL) goto nla_put_failure; - args.cb = cb; - args.skb = skb; - args.reset = dump_ctx->reset; - args.iter.genmask = nft_genmask_cur(net); - args.iter.type = NFT_ITER_READ; - args.iter.skip = cb->args[0]; - args.iter.count = 0; - args.iter.err = 0; - args.iter.fn = nf_tables_dump_setelem; set->ops->walk(&dump_ctx->ctx, set, &args.iter); if (!args.iter.err && args.iter.count == cb->args[0]) diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c index 58c5b14889c4..fc2d7c5d83c8 100644 --- a/net/netfilter/nft_lookup.c +++ b/net/netfilter/nft_lookup.c @@ -246,19 +246,16 @@ static int nft_lookup_validate(const struct nft_ctx *ctx, const struct nft_expr *expr) { const struct nft_lookup *priv = nft_expr_priv(expr); - struct nft_set_iter iter; + struct nft_set_iter iter = { + .genmask = nft_genmask_next(ctx->net), + .type = NFT_ITER_UPDATE, + .fn = nft_setelem_validate, + }; if (!(priv->set->flags & NFT_SET_MAP) || priv->set->dtype != NFT_DATA_VERDICT) return 0; - iter.genmask = nft_genmask_next(ctx->net); - iter.type = NFT_ITER_UPDATE; - iter.skip = 0; - iter.count = 0; - iter.err = 0; - iter.fn = nft_setelem_validate; - priv->set->ops->walk(ctx, priv->set, &iter); if (!iter.err) iter.err = nft_set_catchall_validate(ctx, priv->set); diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 2b46c0cd752a..8e5151f0c6e4 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -596,10 +596,8 @@ static void netlink_remove(struct sock *sk) table = &nl_table[sk->sk_protocol]; if (!rhashtable_remove_fast(&table->hash, &nlk_sk(sk)->node, - netlink_rhashtable_params)) { - WARN_ON(refcount_read(&sk->sk_refcnt) == 1); + netlink_rhashtable_params)) __sock_put(sk); - } netlink_table_grab(); if (nlk_sk(sk)->subscriptions) { @@ -968,7 +966,7 @@ static void netlink_undo_bind(int group, long unsigned int groups, nlk->netlink_unbind(sock_net(sk), undo + 1); } -static int netlink_bind(struct socket *sock, struct sockaddr *addr, +static int netlink_bind(struct socket *sock, struct sockaddr_unsized *addr, int addr_len) { struct sock *sk = sock->sk; @@ -1056,7 +1054,7 @@ unlock: return err; } -static int netlink_connect(struct socket *sock, struct sockaddr *addr, +static int netlink_connect(struct socket *sock, struct sockaddr_unsized *addr, int alen, int flags) { int err = 0; diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index 3331669d8e33..5ed1a71ceec1 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c @@ -561,7 +561,7 @@ static int nr_release(struct socket *sock) return 0; } -static int nr_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) +static int nr_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len) { struct sock *sk = sock->sk; struct nr_sock *nr = nr_sk(sk); @@ -632,8 +632,8 @@ static int nr_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) return 0; } -static int nr_connect(struct socket *sock, struct sockaddr *uaddr, - int addr_len, int flags) +static int nr_connect(struct socket *sock, struct sockaddr_unsized *uaddr, + int addr_len, int flags) { struct sock *sk = sock->sk; struct nr_sock *nr = nr_sk(sk); diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c index 57a2f97004e1..f1be1e84f665 100644 --- a/net/nfc/llcp_sock.c +++ b/net/nfc/llcp_sock.c @@ -56,7 +56,7 @@ static struct proto llcp_sock_proto = { .obj_size = sizeof(struct nfc_llcp_sock), }; -static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) +static int llcp_sock_bind(struct socket *sock, struct sockaddr_unsized *addr, int alen) { struct sock *sk = sock->sk; struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); @@ -146,7 +146,7 @@ error: return ret; } -static int llcp_raw_sock_bind(struct socket *sock, struct sockaddr *addr, +static int llcp_raw_sock_bind(struct socket *sock, struct sockaddr_unsized *addr, int alen) { struct sock *sk = sock->sk; @@ -648,7 +648,7 @@ out: return err; } -static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr, +static int llcp_sock_connect(struct socket *sock, struct sockaddr_unsized *_addr, int len, int flags) { struct sock *sk = sock->sk; diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c index 5125392bb68e..b049022399ae 100644 --- a/net/nfc/rawsock.c +++ b/net/nfc/rawsock.c @@ -73,7 +73,7 @@ static int rawsock_release(struct socket *sock) return 0; } -static int rawsock_connect(struct socket *sock, struct sockaddr *_addr, +static int rawsock_connect(struct socket *sock, struct sockaddr_unsized *_addr, int len, int flags) { struct sock *sk = sock->sk; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 173e6edda08f..494d628d10a5 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -3279,11 +3279,12 @@ out_unlock: * Bind a packet socket to a device */ -static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, +static int packet_bind_spkt(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len) { struct sock *sk = sock->sk; - char name[sizeof(uaddr->sa_data_min) + 1]; + struct sockaddr *sa = (struct sockaddr *)uaddr; + char name[sizeof(sa->sa_data) + 1]; /* * Check legality @@ -3294,13 +3295,13 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, /* uaddr->sa_data comes from the userspace, it's not guaranteed to be * zero-terminated. */ - memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data_min)); - name[sizeof(uaddr->sa_data_min)] = 0; + memcpy(name, sa->sa_data, sizeof(sa->sa_data)); + name[sizeof(sa->sa_data)] = 0; return packet_do_bind(sk, name, 0, 0); } -static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) +static int packet_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len) { struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr; struct sock *sk = sock->sk; @@ -3580,11 +3581,11 @@ static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr, return -EOPNOTSUPP; uaddr->sa_family = AF_PACKET; - memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data_min)); + memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data)); rcu_read_lock(); dev = dev_get_by_index_rcu(sock_net(sk), READ_ONCE(pkt_sk(sk)->ifindex)); if (dev) - strscpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data_min)); + strscpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data)); rcu_read_unlock(); return sizeof(*uaddr); diff --git a/net/phonet/pep.c b/net/phonet/pep.c index 4db564d9d522..120e711ea78c 100644 --- a/net/phonet/pep.c +++ b/net/phonet/pep.c @@ -882,7 +882,8 @@ drop: return newsk; } -static int pep_sock_connect(struct sock *sk, struct sockaddr *addr, int len) +static int pep_sock_connect(struct sock *sk, struct sockaddr_unsized *addr, + int len) { struct pep_sock *pn = pep_sk(sk); int err; diff --git a/net/phonet/socket.c b/net/phonet/socket.c index db2d552e9b32..4423d483c630 100644 --- a/net/phonet/socket.c +++ b/net/phonet/socket.c @@ -153,7 +153,7 @@ EXPORT_SYMBOL(pn_sock_unhash); static DEFINE_MUTEX(port_mutex); -static int pn_socket_bind(struct socket *sock, struct sockaddr *addr, int len) +static int pn_socket_bind(struct socket *sock, struct sockaddr_unsized *addr, int len) { struct sock *sk = sock->sk; struct pn_sock *pn = pn_sk(sk); @@ -206,16 +206,16 @@ static int pn_socket_autobind(struct socket *sock) memset(&sa, 0, sizeof(sa)); sa.spn_family = AF_PHONET; - err = pn_socket_bind(sock, (struct sockaddr *)&sa, - sizeof(struct sockaddr_pn)); + err = pn_socket_bind(sock, (struct sockaddr_unsized *)&sa, + sizeof(struct sockaddr_pn)); if (err != -EINVAL) return err; BUG_ON(!pn_port(pn_sk(sock->sk)->sobject)); return 0; /* socket was already bound */ } -static int pn_socket_connect(struct socket *sock, struct sockaddr *addr, - int len, int flags) +static int pn_socket_connect(struct socket *sock, struct sockaddr_unsized *addr, + int len, int flags) { struct sock *sk = sock->sk; struct pn_sock *pn = pn_sk(sk); diff --git a/net/psp/psp-nl-gen.c b/net/psp/psp-nl-gen.c index 9fdd6f831803..73f8b06d66f0 100644 --- a/net/psp/psp-nl-gen.c +++ b/net/psp/psp-nl-gen.c @@ -47,6 +47,11 @@ static const struct nla_policy psp_tx_assoc_nl_policy[PSP_A_ASSOC_SOCK_FD + 1] = [PSP_A_ASSOC_SOCK_FD] = { .type = NLA_U32, }, }; +/* PSP_CMD_GET_STATS - do */ +static const struct nla_policy psp_get_stats_nl_policy[PSP_A_STATS_DEV_ID + 1] = { + [PSP_A_STATS_DEV_ID] = NLA_POLICY_MIN(NLA_U32, 1), +}; + /* Ops table for psp */ static const struct genl_split_ops psp_nl_ops[] = { { @@ -99,6 +104,20 @@ static const struct genl_split_ops psp_nl_ops[] = { .maxattr = PSP_A_ASSOC_SOCK_FD, .flags = GENL_CMD_CAP_DO, }, + { + .cmd = PSP_CMD_GET_STATS, + .pre_doit = psp_device_get_locked, + .doit = psp_nl_get_stats_doit, + .post_doit = psp_device_unlock, + .policy = psp_get_stats_nl_policy, + .maxattr = PSP_A_STATS_DEV_ID, + .flags = GENL_CMD_CAP_DO, + }, + { + .cmd = PSP_CMD_GET_STATS, + .dumpit = psp_nl_get_stats_dumpit, + .flags = GENL_CMD_CAP_DUMP, + }, }; static const struct genl_multicast_group psp_nl_mcgrps[] = { diff --git a/net/psp/psp-nl-gen.h b/net/psp/psp-nl-gen.h index 25268ed11fb5..5bc3b5d5a53e 100644 --- a/net/psp/psp-nl-gen.h +++ b/net/psp/psp-nl-gen.h @@ -28,6 +28,8 @@ int psp_nl_dev_set_doit(struct sk_buff *skb, struct genl_info *info); int psp_nl_key_rotate_doit(struct sk_buff *skb, struct genl_info *info); int psp_nl_rx_assoc_doit(struct sk_buff *skb, struct genl_info *info); int psp_nl_tx_assoc_doit(struct sk_buff *skb, struct genl_info *info); +int psp_nl_get_stats_doit(struct sk_buff *skb, struct genl_info *info); +int psp_nl_get_stats_dumpit(struct sk_buff *skb, struct netlink_callback *cb); enum { PSP_NLGRP_MGMT, diff --git a/net/psp/psp_main.c b/net/psp/psp_main.c index 481aaf0fc9fc..a8534124f626 100644 --- a/net/psp/psp_main.c +++ b/net/psp/psp_main.c @@ -60,7 +60,8 @@ psp_dev_create(struct net_device *netdev, !psd_ops->key_rotate || !psd_ops->rx_spi_alloc || !psd_ops->tx_key_add || - !psd_ops->tx_key_del)) + !psd_ops->tx_key_del || + !psd_ops->get_stats)) return ERR_PTR(-EINVAL); psd = kzalloc(sizeof(*psd), GFP_KERNEL); diff --git a/net/psp/psp_nl.c b/net/psp/psp_nl.c index 8aaca62744c3..6afd7707ec12 100644 --- a/net/psp/psp_nl.c +++ b/net/psp/psp_nl.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only +#include <linux/ethtool.h> #include <linux/skbuff.h> #include <linux/xarray.h> #include <net/genetlink.h> @@ -262,6 +263,7 @@ int psp_nl_key_rotate_doit(struct sk_buff *skb, struct genl_info *info) psd->generation & ~PSP_GEN_VALID_MASK); psp_assocs_key_rotated(psd); + psd->stats.rotations++; nlmsg_end(ntf, (struct nlmsghdr *)ntf->data); genlmsg_multicast_netns(&psp_nl_family, dev_net(psd->main_netdev), ntf, @@ -503,3 +505,94 @@ err_free_msg: nlmsg_free(rsp); return err; } + +static int +psp_nl_stats_fill(struct psp_dev *psd, struct sk_buff *rsp, + const struct genl_info *info) +{ + unsigned int required_cnt = sizeof(struct psp_dev_stats) / sizeof(u64); + struct psp_dev_stats stats; + void *hdr; + int i; + + memset(&stats, 0xff, sizeof(stats)); + psd->ops->get_stats(psd, &stats); + + for (i = 0; i < required_cnt; i++) + if (WARN_ON_ONCE(stats.required[i] == ETHTOOL_STAT_NOT_SET)) + return -EOPNOTSUPP; + + hdr = genlmsg_iput(rsp, info); + if (!hdr) + return -EMSGSIZE; + + if (nla_put_u32(rsp, PSP_A_STATS_DEV_ID, psd->id) || + nla_put_uint(rsp, PSP_A_STATS_KEY_ROTATIONS, + psd->stats.rotations) || + nla_put_uint(rsp, PSP_A_STATS_STALE_EVENTS, psd->stats.stales) || + nla_put_uint(rsp, PSP_A_STATS_RX_PACKETS, stats.rx_packets) || + nla_put_uint(rsp, PSP_A_STATS_RX_BYTES, stats.rx_bytes) || + nla_put_uint(rsp, PSP_A_STATS_RX_AUTH_FAIL, stats.rx_auth_fail) || + nla_put_uint(rsp, PSP_A_STATS_RX_ERROR, stats.rx_error) || + nla_put_uint(rsp, PSP_A_STATS_RX_BAD, stats.rx_bad) || + nla_put_uint(rsp, PSP_A_STATS_TX_PACKETS, stats.tx_packets) || + nla_put_uint(rsp, PSP_A_STATS_TX_BYTES, stats.tx_bytes) || + nla_put_uint(rsp, PSP_A_STATS_TX_ERROR, stats.tx_error)) + goto err_cancel_msg; + + genlmsg_end(rsp, hdr); + return 0; + +err_cancel_msg: + genlmsg_cancel(rsp, hdr); + return -EMSGSIZE; +} + +int psp_nl_get_stats_doit(struct sk_buff *skb, struct genl_info *info) +{ + struct psp_dev *psd = info->user_ptr[0]; + struct sk_buff *rsp; + int err; + + rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!rsp) + return -ENOMEM; + + err = psp_nl_stats_fill(psd, rsp, info); + if (err) + goto err_free_msg; + + return genlmsg_reply(rsp, info); + +err_free_msg: + nlmsg_free(rsp); + return err; +} + +static int +psp_nl_stats_get_dumpit_one(struct sk_buff *rsp, struct netlink_callback *cb, + struct psp_dev *psd) +{ + if (psp_dev_check_access(psd, sock_net(rsp->sk))) + return 0; + + return psp_nl_stats_fill(psd, rsp, genl_info_dump(cb)); +} + +int psp_nl_get_stats_dumpit(struct sk_buff *rsp, struct netlink_callback *cb) +{ + struct psp_dev *psd; + int err = 0; + + mutex_lock(&psp_devs_lock); + xa_for_each_start(&psp_devs, cb->args[0], psd, cb->args[0]) { + mutex_lock(&psd->lock); + err = psp_nl_stats_get_dumpit_one(rsp, cb, psd); + mutex_unlock(&psd->lock); + if (err) + break; + } + mutex_unlock(&psp_devs_lock); + + return err; +} diff --git a/net/psp/psp_sock.c b/net/psp/psp_sock.c index a931d825d1cc..f785672b7df6 100644 --- a/net/psp/psp_sock.c +++ b/net/psp/psp_sock.c @@ -253,8 +253,10 @@ void psp_assocs_key_rotated(struct psp_dev *psd) /* Mark the stale associations as invalid, they will no longer * be able to Rx any traffic. */ - list_for_each_entry_safe(pas, next, &psd->prev_assocs, assocs_list) + list_for_each_entry_safe(pas, next, &psd->prev_assocs, assocs_list) { pas->generation |= ~PSP_GEN_VALID_MASK; + psd->stats.stales++; + } list_splice_init(&psd->prev_assocs, &psd->stale_assocs); list_splice_init(&psd->active_assocs, &psd->prev_assocs); diff --git a/net/qrtr/af_qrtr.c b/net/qrtr/af_qrtr.c index 00c51cf693f3..dab839f61ee9 100644 --- a/net/qrtr/af_qrtr.c +++ b/net/qrtr/af_qrtr.c @@ -824,7 +824,7 @@ static int qrtr_autobind(struct socket *sock) } /* Bind socket to specified sockaddr. */ -static int qrtr_bind(struct socket *sock, struct sockaddr *saddr, int len) +static int qrtr_bind(struct socket *sock, struct sockaddr_unsized *saddr, int len) { DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr); struct qrtr_sock *ipc = qrtr_sk(sock->sk); @@ -1084,7 +1084,7 @@ out: return rc; } -static int qrtr_connect(struct socket *sock, struct sockaddr *saddr, +static int qrtr_connect(struct socket *sock, struct sockaddr_unsized *saddr, int len, int flags) { DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr); diff --git a/net/qrtr/ns.c b/net/qrtr/ns.c index 3de9350cbf30..bfcc1a453f23 100644 --- a/net/qrtr/ns.c +++ b/net/qrtr/ns.c @@ -714,7 +714,7 @@ int qrtr_ns_init(void) sq.sq_port = QRTR_PORT_CTRL; qrtr_ns.local_node = sq.sq_node; - ret = kernel_bind(qrtr_ns.sock, (struct sockaddr *)&sq, sizeof(sq)); + ret = kernel_bind(qrtr_ns.sock, (struct sockaddr_unsized *)&sq, sizeof(sq)); if (ret < 0) { pr_err("failed to bind to socket\n"); goto err_wq; diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c index 4a7217fbeab6..b396c673dfaf 100644 --- a/net/rds/af_rds.c +++ b/net/rds/af_rds.c @@ -533,7 +533,7 @@ out: } -static int rds_connect(struct socket *sock, struct sockaddr *uaddr, +static int rds_connect(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len, int flags) { struct sock *sk = sock->sk; diff --git a/net/rds/bind.c b/net/rds/bind.c index 97a29172a8ee..f800d920d969 100644 --- a/net/rds/bind.c +++ b/net/rds/bind.c @@ -160,7 +160,7 @@ void rds_remove_bound(struct rds_sock *rs) rs->rs_bound_addr = in6addr_any; } -int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) +int rds_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len) { struct sock *sk = sock->sk; struct rds_sock *rs = rds_sk_to_rs(sk); diff --git a/net/rds/rds.h b/net/rds/rds.h index 5b1c072e2e7f..a029e5fcdea7 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h @@ -735,7 +735,7 @@ extern wait_queue_head_t rds_poll_waitq; /* bind.c */ -int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len); +int rds_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len); void rds_remove_bound(struct rds_sock *rs); struct rds_sock *rds_find_bound(const struct in6_addr *addr, __be16 port, __u32 scope_id); diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c index a0046e99d6df..92891b0d224d 100644 --- a/net/rds/tcp_connect.c +++ b/net/rds/tcp_connect.c @@ -145,7 +145,7 @@ int rds_tcp_conn_path_connect(struct rds_conn_path *cp) addrlen = sizeof(sin); } - ret = kernel_bind(sock, addr, addrlen); + ret = kernel_bind(sock, (struct sockaddr_unsized *)addr, addrlen); if (ret) { rdsdebug("bind failed with %d at address %pI6c\n", ret, &conn->c_laddr); @@ -173,7 +173,7 @@ int rds_tcp_conn_path_connect(struct rds_conn_path *cp) * own the socket */ rds_tcp_set_callbacks(sock, cp); - ret = kernel_connect(sock, addr, addrlen, O_NONBLOCK); + ret = kernel_connect(sock, (struct sockaddr_unsized *)addr, addrlen, O_NONBLOCK); rdsdebug("connect to address %pI6c returned %d\n", &conn->c_faddr, ret); if (ret == -EINPROGRESS) diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c index 91e34af3fe5d..820d3e20de19 100644 --- a/net/rds/tcp_listen.c +++ b/net/rds/tcp_listen.c @@ -290,7 +290,7 @@ struct socket *rds_tcp_listen_init(struct net *net, bool isv6) addr_len = sizeof(*sin); } - ret = kernel_bind(sock, (struct sockaddr *)&ss, addr_len); + ret = kernel_bind(sock, (struct sockaddr_unsized *)&ss, addr_len); if (ret < 0) { rdsdebug("could not bind %s listener socket: %d\n", isv6 ? "IPv6" : "IPv4", ret); diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index 543f9e8ebb69..fd67494f2815 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c @@ -693,7 +693,7 @@ static int rose_release(struct socket *sock) return 0; } -static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) +static int rose_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); @@ -765,7 +765,8 @@ out_release: return err; } -static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) +static int rose_connect(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len, + int flags) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index 36df0274d7b7..0c2c68c4b07e 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -127,7 +127,7 @@ static int rxrpc_validate_address(struct rxrpc_sock *rx, /* * bind a local address to an RxRPC socket */ -static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len) +static int rxrpc_bind(struct socket *sock, struct sockaddr_unsized *saddr, int len) { struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *)saddr; struct rxrpc_local *local; @@ -481,7 +481,7 @@ EXPORT_SYMBOL(rxrpc_kernel_set_notifications); * - this just targets it at a specific destination; no actual connection * negotiation takes place */ -static int rxrpc_connect(struct socket *sock, struct sockaddr *addr, +static int rxrpc_connect(struct socket *sock, struct sockaddr_unsized *addr, int addr_len, int flags) { struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *)addr; diff --git a/net/rxrpc/rxperf.c b/net/rxrpc/rxperf.c index 2ea71e3831f7..98ea76fae70f 100644 --- a/net/rxrpc/rxperf.c +++ b/net/rxrpc/rxperf.c @@ -211,7 +211,7 @@ static int rxperf_open_socket(void) ret = rxrpc_sock_set_security_keyring(socket->sk, rxperf_sec_keyring); - ret = kernel_bind(socket, (struct sockaddr *)&srx, sizeof(srx)); + ret = kernel_bind(socket, (struct sockaddr_unsized *)&srx, sizeof(srx)); if (ret < 0) goto error_2; diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 5f01f567c934..f27b583def78 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -29,31 +29,6 @@ static LIST_HEAD(mirred_list); static DEFINE_SPINLOCK(mirred_list_lock); -#define MIRRED_NEST_LIMIT 4 - -#ifndef CONFIG_PREEMPT_RT -static u8 tcf_mirred_nest_level_inc_return(void) -{ - return __this_cpu_inc_return(softnet_data.xmit.sched_mirred_nest); -} - -static void tcf_mirred_nest_level_dec(void) -{ - __this_cpu_dec(softnet_data.xmit.sched_mirred_nest); -} - -#else -static u8 tcf_mirred_nest_level_inc_return(void) -{ - return current->net_xmit.sched_mirred_nest++; -} - -static void tcf_mirred_nest_level_dec(void) -{ - current->net_xmit.sched_mirred_nest--; -} -#endif - static bool tcf_mirred_is_act_redirect(int action) { return action == TCA_EGRESS_REDIR || action == TCA_INGRESS_REDIR; @@ -439,44 +414,53 @@ TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb, { struct tcf_mirred *m = to_mirred(a); int retval = READ_ONCE(m->tcf_action); - unsigned int nest_level; + struct netdev_xmit *xmit; bool m_mac_header_xmit; struct net_device *dev; - int m_eaction; + int i, m_eaction; u32 blockid; - nest_level = tcf_mirred_nest_level_inc_return(); - if (unlikely(nest_level > MIRRED_NEST_LIMIT)) { +#ifdef CONFIG_PREEMPT_RT + xmit = ¤t->net_xmit; +#else + xmit = this_cpu_ptr(&softnet_data.xmit); +#endif + if (unlikely(xmit->sched_mirred_nest >= MIRRED_NEST_LIMIT)) { net_warn_ratelimited("Packet exceeded mirred recursion limit on dev %s\n", netdev_name(skb->dev)); - retval = TC_ACT_SHOT; - goto dec_nest_level; + return TC_ACT_SHOT; } tcf_lastuse_update(&m->tcf_tm); tcf_action_update_bstats(&m->common, skb); blockid = READ_ONCE(m->tcfm_blockid); - if (blockid) { - retval = tcf_blockcast(skb, m, blockid, res, retval); - goto dec_nest_level; - } + if (blockid) + return tcf_blockcast(skb, m, blockid, res, retval); dev = rcu_dereference_bh(m->tcfm_dev); if (unlikely(!dev)) { pr_notice_once("tc mirred: target device is gone\n"); tcf_action_inc_overlimit_qstats(&m->common); - goto dec_nest_level; + return retval; } + for (i = 0; i < xmit->sched_mirred_nest; i++) { + if (xmit->sched_mirred_dev[i] != dev) + continue; + pr_notice_once("tc mirred: loop on device %s\n", + netdev_name(dev)); + tcf_action_inc_overlimit_qstats(&m->common); + return retval; + } + + xmit->sched_mirred_dev[xmit->sched_mirred_nest++] = dev; m_mac_header_xmit = READ_ONCE(m->tcfm_mac_header_xmit); m_eaction = READ_ONCE(m->tcfm_eaction); retval = tcf_mirred_to_dev(skb, m, dev, m_mac_header_xmit, m_eaction, retval); - -dec_nest_level: - tcf_mirred_nest_level_dec(); + xmit->sched_mirred_nest--; return retval; } diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index ecec0a1e1c1a..f751cd5eeac8 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -1866,7 +1866,7 @@ int tcf_classify(struct sk_buff *skb, struct tc_skb_cb *cb = tc_skb_cb(skb); ext = tc_skb_ext_alloc(skb); - if (WARN_ON_ONCE(!ext)) { + if (!ext) { tcf_set_drop_reason(skb, SKB_DROP_REASON_NOMEM); return TC_ACT_SHOT; } diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 7dee9748a56b..852e603c1755 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -669,7 +669,6 @@ struct Qdisc noop_qdisc = { .ops = &noop_qdisc_ops, .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), .dev_queue = &noop_netdev_queue, - .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock), .gso_skb = { .next = (struct sk_buff *)&noop_qdisc.gso_skb, .prev = (struct sk_buff *)&noop_qdisc.gso_skb, @@ -682,7 +681,6 @@ struct Qdisc noop_qdisc = { .qlen = 0, .lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.skb_bad_txq.lock), }, - .owner = -1, }; EXPORT_SYMBOL(noop_qdisc); @@ -974,10 +972,6 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, } } - spin_lock_init(&sch->busylock); - lockdep_set_class(&sch->busylock, - dev->qdisc_tx_busylock ?: &qdisc_tx_busylock); - /* seqlock has the same scope of busylock, for NOLOCK qdisc */ spin_lock_init(&sch->seqlock); lockdep_set_class(&sch->seqlock, @@ -988,7 +982,6 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, sch->enqueue = ops->enqueue; sch->dequeue = ops->dequeue; sch->dev_queue = dev_queue; - sch->owner = -1; netdev_hold(dev, &sch->dev_tracker, GFP_KERNEL); refcount_set(&sch->refcnt, 1); diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 568ff8797c39..069b7e45d8bd 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -777,54 +777,6 @@ static enum sctp_scope sctp_v6_scope(union sctp_addr *addr) return retval; } -/* Create and initialize a new sk for the socket to be returned by accept(). */ -static struct sock *sctp_v6_create_accept_sk(struct sock *sk, - struct sctp_association *asoc, - bool kern) -{ - struct sock *newsk; - struct ipv6_pinfo *newnp, *np = inet6_sk(sk); - struct sctp6_sock *newsctp6sk; - - newsk = sk_alloc(sock_net(sk), PF_INET6, GFP_KERNEL, sk->sk_prot, kern); - if (!newsk) - goto out; - - sock_init_data(NULL, newsk); - - sctp_copy_sock(newsk, sk, asoc); - sock_reset_flag(sk, SOCK_ZAPPED); - - newsctp6sk = (struct sctp6_sock *)newsk; - inet_sk(newsk)->pinet6 = &newsctp6sk->inet6; - - sctp_sk(newsk)->v4mapped = sctp_sk(sk)->v4mapped; - - newnp = inet6_sk(newsk); - - memcpy(newnp, np, sizeof(struct ipv6_pinfo)); - newnp->ipv6_mc_list = NULL; - newnp->ipv6_ac_list = NULL; - newnp->ipv6_fl_list = NULL; - - sctp_v6_copy_ip_options(sk, newsk); - - /* Initialize sk's sport, dport, rcv_saddr and daddr for getsockname() - * and getpeername(). - */ - sctp_v6_to_sk_daddr(&asoc->peer.primary_addr, newsk); - - newsk->sk_v6_rcv_saddr = sk->sk_v6_rcv_saddr; - - if (newsk->sk_prot->init(newsk)) { - sk_common_release(newsk); - newsk = NULL; - } - -out: - return newsk; -} - /* Format a sockaddr for return to user space. This makes sure the return is * AF_INET or AF_INET6 depending on the SCTP_I_WANT_MAPPED_V4_ADDR option. */ @@ -1171,7 +1123,6 @@ static struct sctp_pf sctp_pf_inet6 = { .bind_verify = sctp_inet6_bind_verify, .send_verify = sctp_inet6_send_verify, .supported_addrs = sctp_inet6_supported_addrs, - .create_accept_sk = sctp_v6_create_accept_sk, .addr_to_user = sctp_v6_addr_to_user, .to_sk_saddr = sctp_v6_to_sk_saddr, .to_sk_daddr = sctp_v6_to_sk_daddr, diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 9dbc24af749b..2c3398f75d76 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -580,38 +580,6 @@ static int sctp_v4_is_ce(const struct sk_buff *skb) return INET_ECN_is_ce(ip_hdr(skb)->tos); } -/* Create and initialize a new sk for the socket returned by accept(). */ -static struct sock *sctp_v4_create_accept_sk(struct sock *sk, - struct sctp_association *asoc, - bool kern) -{ - struct sock *newsk = sk_alloc(sock_net(sk), PF_INET, GFP_KERNEL, - sk->sk_prot, kern); - struct inet_sock *newinet; - - if (!newsk) - goto out; - - sock_init_data(NULL, newsk); - - sctp_copy_sock(newsk, sk, asoc); - sock_reset_flag(newsk, SOCK_ZAPPED); - - sctp_v4_copy_ip_options(sk, newsk); - - newinet = inet_sk(newsk); - - newinet->inet_daddr = asoc->peer.primary_addr.v4.sin_addr.s_addr; - - if (newsk->sk_prot->init(newsk)) { - sk_common_release(newsk); - newsk = NULL; - } - -out: - return newsk; -} - static int sctp_v4_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr) { /* No address mapping for V4 sockets */ @@ -1119,7 +1087,6 @@ static struct sctp_pf sctp_pf_inet = { .bind_verify = sctp_inet_bind_verify, .send_verify = sctp_inet_send_verify, .supported_addrs = sctp_inet_supported_addrs, - .create_accept_sk = sctp_v4_create_accept_sk, .addr_to_user = sctp_v4_addr_to_user, .to_sk_saddr = sctp_v4_to_sk_saddr, .to_sk_daddr = sctp_v4_to_sk_daddr, diff --git a/net/sctp/socket.c b/net/sctp/socket.c index ed8293a34240..d808096f5ab1 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -306,7 +306,8 @@ static struct sctp_transport *sctp_addr_id2transport(struct sock *sk, * sockaddr_in6 [RFC 2553]), * addr_len - the size of the address structure. */ -static int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len) +static int sctp_bind(struct sock *sk, struct sockaddr_unsized *addr, + int addr_len) { int retval = 0; @@ -1053,13 +1054,13 @@ static int sctp_setsockopt_bindx(struct sock *sk, struct sockaddr *addrs, } } -static int sctp_bind_add(struct sock *sk, struct sockaddr *addrs, - int addrlen) +static int sctp_bind_add(struct sock *sk, struct sockaddr_unsized *addrs, + int addrlen) { int err; lock_sock(sk); - err = sctp_setsockopt_bindx(sk, addrs, addrlen, SCTP_BINDX_ADD_ADDR); + err = sctp_setsockopt_bindx(sk, (struct sockaddr *)addrs, addrlen, SCTP_BINDX_ADD_ADDR); release_sock(sk); return err; } @@ -1553,8 +1554,6 @@ static void sctp_close(struct sock *sk, long timeout) spin_unlock_bh(&net->sctp.addr_wq_lock); sock_put(sk); - - SCTP_DBG_OBJCNT_DEC(sock); } /* Handle EPIPE error. */ @@ -4822,7 +4821,7 @@ static int sctp_connect(struct sock *sk, struct sockaddr *addr, return err; } -int sctp_inet_connect(struct socket *sock, struct sockaddr *uaddr, +int sctp_inet_connect(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len, int flags) { if (addr_len < sizeof(uaddr->sa_family)) @@ -4831,7 +4830,7 @@ int sctp_inet_connect(struct socket *sock, struct sockaddr *uaddr, if (uaddr->sa_family == AF_UNSPEC) return -EOPNOTSUPP; - return sctp_connect(sock->sk, uaddr, addr_len, flags); + return sctp_connect(sock->sk, (struct sockaddr *)uaddr, addr_len, flags); } /* Only called when shutdown a listening SCTP socket. */ @@ -4844,6 +4843,75 @@ static int sctp_disconnect(struct sock *sk, int flags) return 0; } +static struct sock *sctp_clone_sock(struct sock *sk, + struct sctp_association *asoc, + enum sctp_socket_type type) +{ + struct sock *newsk = sk_clone(sk, GFP_KERNEL, false); + struct inet_sock *newinet; + struct sctp_sock *newsp; + int err = -ENOMEM; + + if (!newsk) + return ERR_PTR(err); + + /* sk_clone() sets refcnt to 2 */ + sock_put(newsk); + + newinet = inet_sk(newsk); + newsp = sctp_sk(newsk); + + newsp->pf->to_sk_daddr(&asoc->peer.primary_addr, newsk); + newinet->inet_dport = htons(asoc->peer.port); + + newsp->pf->copy_ip_options(sk, newsk); + atomic_set(&newinet->inet_id, get_random_u16()); + + inet_set_bit(MC_LOOP, newsk); + newinet->mc_ttl = 1; + newinet->mc_index = 0; + newinet->mc_list = NULL; + +#if IS_ENABLED(CONFIG_IPV6) + if (sk->sk_family == AF_INET6) { + struct ipv6_pinfo *newnp = inet6_sk(newsk); + + newinet->pinet6 = &((struct sctp6_sock *)newsk)->inet6; + newinet->ipv6_fl_list = NULL; + + memcpy(newnp, inet6_sk(sk), sizeof(struct ipv6_pinfo)); + newnp->ipv6_mc_list = NULL; + newnp->ipv6_ac_list = NULL; + } +#endif + + newsp->do_auto_asconf = 0; + skb_queue_head_init(&newsp->pd_lobby); + + newsp->ep = sctp_endpoint_new(newsk, GFP_KERNEL); + if (!newsp->ep) + goto out_release; + + SCTP_DBG_OBJCNT_INC(sock); + sk_sockets_allocated_inc(newsk); + sock_prot_inuse_add(sock_net(sk), newsk->sk_prot, 1); + + err = sctp_sock_migrate(sk, newsk, asoc, type); + if (err) + goto out_release; + + /* Set newsk security attributes from original sk and connection + * security attribute from asoc. + */ + security_sctp_sk_clone(asoc, sk, newsk); + + return newsk; + +out_release: + sk_common_release(newsk); + return ERR_PTR(err); +} + /* 4.1.4 accept() - TCP Style Syntax * * Applications use accept() call to remove an established SCTP @@ -4853,18 +4921,13 @@ static int sctp_disconnect(struct sock *sk, int flags) */ static struct sock *sctp_accept(struct sock *sk, struct proto_accept_arg *arg) { - struct sctp_sock *sp; - struct sctp_endpoint *ep; - struct sock *newsk = NULL; struct sctp_association *asoc; - long timeo; + struct sock *newsk = NULL; int error = 0; + long timeo; lock_sock(sk); - sp = sctp_sk(sk); - ep = sp->ep; - if (!sctp_style(sk, TCP)) { error = -EOPNOTSUPP; goto out; @@ -4885,20 +4948,12 @@ static struct sock *sctp_accept(struct sock *sk, struct proto_accept_arg *arg) /* We treat the list of associations on the endpoint as the accept * queue and pick the first association on the list. */ - asoc = list_entry(ep->asocs.next, struct sctp_association, asocs); + asoc = list_entry(sctp_sk(sk)->ep->asocs.next, + struct sctp_association, asocs); - newsk = sp->pf->create_accept_sk(sk, asoc, arg->kern); - if (!newsk) { - error = -ENOMEM; - goto out; - } - - /* Populate the fields of the newsk from the oldsk and migrate the - * asoc to the newsk. - */ - error = sctp_sock_migrate(sk, newsk, asoc, SCTP_SOCKET_TCP); - if (error) { - sk_common_release(newsk); + newsk = sctp_clone_sock(sk, asoc, SCTP_SOCKET_TCP); + if (IS_ERR(newsk)) { + error = PTR_ERR(newsk); newsk = NULL; } @@ -5109,9 +5164,12 @@ static void sctp_destroy_sock(struct sock *sk) sp->do_auto_asconf = 0; list_del(&sp->auto_asconf_list); } + sctp_endpoint_free(sp->ep); + sk_sockets_allocated_dec(sk); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); + SCTP_DBG_OBJCNT_DEC(sock); } static void sctp_destruct_sock(struct sock *sk) @@ -5615,11 +5673,11 @@ static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optv /* Helper routine to branch off an association to a new socket. */ static int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, - struct socket **sockp) + struct socket **sockp) { struct sctp_association *asoc = sctp_id2assoc(sk, id); - struct sctp_sock *sp = sctp_sk(sk); struct socket *sock; + struct sock *newsk; int err = 0; /* Do not peel off from one netns to another one. */ @@ -5635,30 +5693,24 @@ static int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, if (!sctp_style(sk, UDP)) return -EINVAL; - /* Create a new socket. */ - err = sock_create(sk->sk_family, SOCK_SEQPACKET, IPPROTO_SCTP, &sock); - if (err < 0) + err = sock_create_lite(sk->sk_family, SOCK_SEQPACKET, IPPROTO_SCTP, &sock); + if (err) return err; - sctp_copy_sock(sock->sk, sk, asoc); - - /* Make peeled-off sockets more like 1-1 accepted sockets. - * Set the daddr and initialize id to something more random and also - * copy over any ip options. - */ - sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sock->sk); - sp->pf->copy_ip_options(sk, sock->sk); - - /* Populate the fields of the newsk from the oldsk and migrate the - * asoc to the newsk. - */ - err = sctp_sock_migrate(sk, sock->sk, asoc, - SCTP_SOCKET_UDP_HIGH_BANDWIDTH); - if (err) { + newsk = sctp_clone_sock(sk, asoc, SCTP_SOCKET_UDP_HIGH_BANDWIDTH); + if (IS_ERR(newsk)) { sock_release(sock); - sock = NULL; + *sockp = NULL; + return PTR_ERR(newsk); } + lock_sock_nested(newsk, SINGLE_DEPTH_NESTING); + __inet_accept(sk->sk_socket, sock, newsk); + release_sock(newsk); + + sock->ops = sk->sk_socket->ops; + __module_get(sock->ops->owner); + *sockp = sock; return err; @@ -9441,71 +9493,6 @@ done: sctp_skb_set_owner_r(skb, sk); } -void sctp_copy_sock(struct sock *newsk, struct sock *sk, - struct sctp_association *asoc) -{ - struct inet_sock *inet = inet_sk(sk); - struct inet_sock *newinet; - struct sctp_sock *sp = sctp_sk(sk); - - newsk->sk_type = sk->sk_type; - newsk->sk_bound_dev_if = sk->sk_bound_dev_if; - newsk->sk_flags = sk->sk_flags; - newsk->sk_tsflags = sk->sk_tsflags; - newsk->sk_no_check_tx = sk->sk_no_check_tx; - newsk->sk_no_check_rx = sk->sk_no_check_rx; - newsk->sk_reuse = sk->sk_reuse; - sctp_sk(newsk)->reuse = sp->reuse; - - newsk->sk_shutdown = sk->sk_shutdown; - newsk->sk_destruct = sk->sk_destruct; - newsk->sk_family = sk->sk_family; - newsk->sk_protocol = IPPROTO_SCTP; - newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; - newsk->sk_sndbuf = sk->sk_sndbuf; - newsk->sk_rcvbuf = sk->sk_rcvbuf; - newsk->sk_lingertime = sk->sk_lingertime; - newsk->sk_rcvtimeo = READ_ONCE(sk->sk_rcvtimeo); - newsk->sk_sndtimeo = READ_ONCE(sk->sk_sndtimeo); - newsk->sk_rxhash = sk->sk_rxhash; - - newinet = inet_sk(newsk); - - /* Initialize sk's sport, dport, rcv_saddr and daddr for - * getsockname() and getpeername() - */ - newinet->inet_sport = inet->inet_sport; - newinet->inet_saddr = inet->inet_saddr; - newinet->inet_rcv_saddr = inet->inet_rcv_saddr; - newinet->inet_dport = htons(asoc->peer.port); - newinet->pmtudisc = inet->pmtudisc; - atomic_set(&newinet->inet_id, get_random_u16()); - - newinet->uc_ttl = inet->uc_ttl; - inet_set_bit(MC_LOOP, newsk); - newinet->mc_ttl = 1; - newinet->mc_index = 0; - newinet->mc_list = NULL; - - if (newsk->sk_flags & SK_FLAGS_TIMESTAMP) - net_enable_timestamp(); - - /* Set newsk security attributes from original sk and connection - * security attribute from asoc. - */ - security_sctp_sk_clone(asoc, sk, newsk); -} - -static inline void sctp_copy_descendant(struct sock *sk_to, - const struct sock *sk_from) -{ - size_t ancestor_size = sizeof(struct inet_sock); - - ancestor_size += sk_from->sk_prot->obj_size; - ancestor_size -= offsetof(struct sctp_sock, pd_lobby); - __inet_sk_copy_descendant(sk_to, sk_from, ancestor_size); -} - /* Populate the fields of the newsk from the oldsk and migrate the assoc * and its messages to the newsk. */ @@ -9522,14 +9509,6 @@ static int sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, struct sctp_bind_hashbucket *head; int err; - /* Migrate socket buffer sizes and all the socket level options to the - * new socket. - */ - newsk->sk_sndbuf = oldsk->sk_sndbuf; - newsk->sk_rcvbuf = oldsk->sk_rcvbuf; - /* Brute force copy old sctp opt. */ - sctp_copy_descendant(newsk, oldsk); - /* Restore the ep value that was overwritten with the above structure * copy. */ diff --git a/net/sctp/stream.c b/net/sctp/stream.c index f205556c5b24..0615e4426341 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c @@ -54,7 +54,7 @@ static void sctp_stream_shrink_out(struct sctp_stream *stream, __u16 outcnt) static void sctp_stream_free_ext(struct sctp_stream *stream, __u16 sid) { - struct sctp_sched_ops *sched; + const struct sctp_sched_ops *sched; if (!SCTP_SO(stream, sid)->ext) return; @@ -130,7 +130,7 @@ out: int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt, gfp_t gfp) { - struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream); + const struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream); int i, ret = 0; gfp |= __GFP_NOWARN; @@ -182,7 +182,7 @@ int sctp_stream_init_ext(struct sctp_stream *stream, __u16 sid) void sctp_stream_free(struct sctp_stream *stream) { - struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream); + const struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream); int i; sched->unsched_all(stream); @@ -207,7 +207,7 @@ void sctp_stream_clear(struct sctp_stream *stream) void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new) { - struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream); + const struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream); sched->unsched_all(stream); sctp_stream_outq_migrate(stream, new, new->outcnt); diff --git a/net/sctp/stream_sched.c b/net/sctp/stream_sched.c index 54afbe4fb087..50f8b5240359 100644 --- a/net/sctp/stream_sched.c +++ b/net/sctp/stream_sched.c @@ -91,7 +91,7 @@ static void sctp_sched_fcfs_unsched_all(struct sctp_stream *stream) { } -static struct sctp_sched_ops sctp_sched_fcfs = { +static const struct sctp_sched_ops sctp_sched_fcfs = { .set = sctp_sched_fcfs_set, .get = sctp_sched_fcfs_get, .init = sctp_sched_fcfs_init, @@ -111,10 +111,10 @@ static void sctp_sched_ops_fcfs_init(void) /* API to other parts of the stack */ -static struct sctp_sched_ops *sctp_sched_ops[SCTP_SS_MAX + 1]; +static const struct sctp_sched_ops *sctp_sched_ops[SCTP_SS_MAX + 1]; void sctp_sched_ops_register(enum sctp_sched_type sched, - struct sctp_sched_ops *sched_ops) + const struct sctp_sched_ops *sched_ops) { sctp_sched_ops[sched] = sched_ops; } @@ -130,7 +130,7 @@ void sctp_sched_ops_init(void) static void sctp_sched_free_sched(struct sctp_stream *stream) { - struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream); + const struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream); struct sctp_stream_out_ext *soute; int i; @@ -148,9 +148,9 @@ static void sctp_sched_free_sched(struct sctp_stream *stream) int sctp_sched_set_sched(struct sctp_association *asoc, enum sctp_sched_type sched) { - struct sctp_sched_ops *old = asoc->outqueue.sched; + const struct sctp_sched_ops *old = asoc->outqueue.sched; struct sctp_datamsg *msg = NULL; - struct sctp_sched_ops *n; + const struct sctp_sched_ops *n; struct sctp_chunk *ch; int i, ret = 0; @@ -263,14 +263,14 @@ void sctp_sched_dequeue_common(struct sctp_outq *q, struct sctp_chunk *ch) int sctp_sched_init_sid(struct sctp_stream *stream, __u16 sid, gfp_t gfp) { - struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream); + const struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream); struct sctp_stream_out_ext *ext = SCTP_SO(stream, sid)->ext; INIT_LIST_HEAD(&ext->outq); return sched->init_sid(stream, sid, gfp); } -struct sctp_sched_ops *sctp_sched_ops_from_stream(struct sctp_stream *stream) +const struct sctp_sched_ops *sctp_sched_ops_from_stream(struct sctp_stream *stream) { struct sctp_association *asoc; diff --git a/net/sctp/stream_sched_fc.c b/net/sctp/stream_sched_fc.c index 4bd18a497a6d..776c6de46c22 100644 --- a/net/sctp/stream_sched_fc.c +++ b/net/sctp/stream_sched_fc.c @@ -188,7 +188,7 @@ static void sctp_sched_fc_unsched_all(struct sctp_stream *stream) list_del_init(&soute->fc_list); } -static struct sctp_sched_ops sctp_sched_fc = { +static const struct sctp_sched_ops sctp_sched_fc = { .set = sctp_sched_fc_set, .get = sctp_sched_fc_get, .init = sctp_sched_fc_init, @@ -206,7 +206,7 @@ void sctp_sched_ops_fc_init(void) sctp_sched_ops_register(SCTP_SS_FC, &sctp_sched_fc); } -static struct sctp_sched_ops sctp_sched_wfq = { +static const struct sctp_sched_ops sctp_sched_wfq = { .set = sctp_sched_wfq_set, .get = sctp_sched_wfq_get, .init = sctp_sched_fc_init, diff --git a/net/sctp/stream_sched_prio.c b/net/sctp/stream_sched_prio.c index 4d4d9da331f4..fb6c55e5615d 100644 --- a/net/sctp/stream_sched_prio.c +++ b/net/sctp/stream_sched_prio.c @@ -300,7 +300,7 @@ static void sctp_sched_prio_unsched_all(struct sctp_stream *stream) sctp_sched_prio_unsched(soute); } -static struct sctp_sched_ops sctp_sched_prio = { +static const struct sctp_sched_ops sctp_sched_prio = { .set = sctp_sched_prio_set, .get = sctp_sched_prio_get, .init = sctp_sched_prio_init, diff --git a/net/sctp/stream_sched_rr.c b/net/sctp/stream_sched_rr.c index 1f235e7f643a..9157b653f196 100644 --- a/net/sctp/stream_sched_rr.c +++ b/net/sctp/stream_sched_rr.c @@ -171,7 +171,7 @@ static void sctp_sched_rr_unsched_all(struct sctp_stream *stream) sctp_sched_rr_unsched(stream, soute); } -static struct sctp_sched_ops sctp_sched_rr = { +static const struct sctp_sched_ops sctp_sched_rr = { .set = sctp_sched_rr_set, .get = sctp_sched_rr_get, .init = sctp_sched_rr_init, diff --git a/net/smc/Kconfig b/net/smc/Kconfig index 99ecd59d1f4b..325addf83cc6 100644 --- a/net/smc/Kconfig +++ b/net/smc/Kconfig @@ -19,3 +19,13 @@ config SMC_DIAG smcss. if unsure, say Y. + +config SMC_HS_CTRL_BPF + bool "Generic eBPF hook for SMC handshake flow" + depends on SMC && BPF_SYSCALL + default y + help + SMC_HS_CTRL_BPF enables support to register generic eBPF hook for SMC + handshake flow, which offer much greater flexibility in modifying the behavior + of the SMC protocol stack compared to a complete kernel-based approach. Select + this option if you want filtring the handshake process via eBPF programs.
\ No newline at end of file diff --git a/net/smc/Makefile b/net/smc/Makefile index 0e754cbc38f9..5368634c5dd6 100644 --- a/net/smc/Makefile +++ b/net/smc/Makefile @@ -6,3 +6,4 @@ smc-y := af_smc.o smc_pnet.o smc_ib.o smc_clc.o smc_core.o smc_wr.o smc_llc.o smc-y += smc_cdc.o smc_tx.o smc_rx.o smc_close.o smc_ism.o smc_netlink.o smc_stats.o smc-y += smc_tracepoint.o smc_inet.o smc-$(CONFIG_SYSCTL) += smc_sysctl.o +smc-$(CONFIG_SMC_HS_CTRL_BPF) += smc_hs_bpf.o diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 77b99e8ef35a..e388de8dca09 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -58,6 +58,7 @@ #include "smc_tracepoint.h" #include "smc_sysctl.h" #include "smc_inet.h" +#include "smc_hs_bpf.h" static DEFINE_MUTEX(smc_server_lgr_pending); /* serialize link group * creation on server @@ -421,7 +422,7 @@ static struct sock *smc_sock_alloc(struct net *net, struct socket *sock, return sk; } -int smc_bind(struct socket *sock, struct sockaddr *uaddr, +int smc_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len) { struct sockaddr_in *addr = (struct sockaddr_in *)uaddr; @@ -1642,7 +1643,7 @@ out: release_sock(&smc->sk); } -int smc_connect(struct socket *sock, struct sockaddr *addr, +int smc_connect(struct socket *sock, struct sockaddr_unsized *addr, int alen, int flags) { struct sock *sk = sock->sk; @@ -1694,7 +1695,7 @@ int smc_connect(struct socket *sock, struct sockaddr *addr, rc = -EALREADY; goto out; } - rc = kernel_connect(smc->clcsock, addr, alen, flags); + rc = kernel_connect(smc->clcsock, (struct sockaddr_unsized *)addr, alen, flags); if (rc && rc != -EINPROGRESS) goto out; @@ -2140,7 +2141,7 @@ static void smc_check_ism_v2_match(struct smc_init_info *ini, } } -static void smc_find_ism_store_rc(u32 rc, struct smc_init_info *ini) +static void smc_init_info_store_rc(u32 rc, struct smc_init_info *ini) { if (!ini->rc) ini->rc = rc; @@ -2203,7 +2204,7 @@ static void smc_find_ism_v2_device_serv(struct smc_sock *new_smc, mutex_unlock(&smcd_dev_list.mutex); if (!ini->ism_dev[0]) { - smc_find_ism_store_rc(SMC_CLC_DECL_NOSMCD2DEV, ini); + smc_init_info_store_rc(SMC_CLC_DECL_NOSMCD2DEV, ini); goto not_found; } @@ -2220,7 +2221,7 @@ static void smc_find_ism_v2_device_serv(struct smc_sock *new_smc, ini->ism_selected = i; rc = smc_listen_ism_init(new_smc, ini); if (rc) { - smc_find_ism_store_rc(rc, ini); + smc_init_info_store_rc(rc, ini); /* try next active ISM device */ continue; } @@ -2260,7 +2261,7 @@ static void smc_find_ism_v1_device_serv(struct smc_sock *new_smc, return; /* V1 ISM device found */ not_found: - smc_find_ism_store_rc(rc, ini); + smc_init_info_store_rc(rc, ini); ini->smcd_version &= ~SMC_V1; ini->ism_dev[0] = NULL; ini->is_smcd = false; @@ -2311,7 +2312,7 @@ static void smc_find_rdma_v2_device_serv(struct smc_sock *new_smc, ini->smcrv2.daddr = smc_ib_gid_to_ipv4(smc_v2_ext->roce); rc = smc_find_rdma_device(new_smc, ini); if (rc) { - smc_find_ism_store_rc(rc, ini); + smc_init_info_store_rc(rc, ini); goto not_found; } if (!ini->smcrv2.uses_gateway) @@ -2328,7 +2329,7 @@ static void smc_find_rdma_v2_device_serv(struct smc_sock *new_smc, if (!rc) return; ini->smcr_version = smcr_version; - smc_find_ism_store_rc(rc, ini); + smc_init_info_store_rc(rc, ini); not_found: ini->smcr_version &= ~SMC_V2; @@ -2375,7 +2376,7 @@ static int smc_listen_find_device(struct smc_sock *new_smc, /* check for matching IP prefix and subnet length (V1) */ prfx_rc = smc_listen_prfx_check(new_smc, pclc); if (prfx_rc) - smc_find_ism_store_rc(prfx_rc, ini); + smc_init_info_store_rc(prfx_rc, ini); /* get vlan id from IP device */ if (smc_vlan_by_tcpsk(new_smc->clcsock, ini)) @@ -2402,7 +2403,7 @@ static int smc_listen_find_device(struct smc_sock *new_smc, int rc; rc = smc_find_rdma_v1_device_serv(new_smc, pclc, ini); - smc_find_ism_store_rc(rc, ini); + smc_init_info_store_rc(rc, ini); return (!rc) ? 0 : ini->rc; } return prfx_rc; @@ -3600,8 +3601,16 @@ static int __init smc_init(void) pr_err("%s: smc_inet_init fails with %d\n", __func__, rc); goto out_ulp; } + rc = bpf_smc_hs_ctrl_init(); + if (rc) { + pr_err("%s: bpf_smc_hs_ctrl_init fails with %d\n", __func__, + rc); + goto out_inet; + } static_branch_enable(&tcp_have_smc); return 0; +out_inet: + smc_inet_exit(); out_ulp: tcp_unregister_ulp(&smc_ulp_ops); out_ib: diff --git a/net/smc/smc.h b/net/smc/smc.h index 2c9084963739..9e6af72784ba 100644 --- a/net/smc/smc.h +++ b/net/smc/smc.h @@ -42,9 +42,9 @@ void smc_unhash_sk(struct sock *sk); void smc_release_cb(struct sock *sk); int smc_release(struct socket *sock); -int smc_bind(struct socket *sock, struct sockaddr *uaddr, +int smc_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len); -int smc_connect(struct socket *sock, struct sockaddr *addr, +int smc_connect(struct socket *sock, struct sockaddr_unsized *addr, int alen, int flags); int smc_accept(struct socket *sock, struct socket *new_sock, struct proto_accept_arg *arg); diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index be0c2da83d2b..e4eabc83719e 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -810,6 +810,8 @@ int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk, lnk->clearing = 0; lnk->path_mtu = lnk->smcibdev->pattr[lnk->ibport - 1].active_mtu; lnk->link_id = smcr_next_link_id(lgr); + lnk->max_send_wr = lgr->max_send_wr; + lnk->max_recv_wr = lgr->max_recv_wr; lnk->lgr = lgr; smc_lgr_hold(lgr); /* lgr_put in smcr_link_clear() */ lnk->link_idx = link_idx; @@ -836,27 +838,39 @@ int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk, rc = smc_llc_link_init(lnk); if (rc) goto out; - rc = smc_wr_alloc_link_mem(lnk); - if (rc) - goto clear_llc_lnk; rc = smc_ib_create_protection_domain(lnk); if (rc) - goto free_link_mem; - rc = smc_ib_create_queue_pair(lnk); - if (rc) - goto dealloc_pd; + goto clear_llc_lnk; + do { + rc = smc_ib_create_queue_pair(lnk); + if (rc) + goto dealloc_pd; + rc = smc_wr_alloc_link_mem(lnk); + if (!rc) + break; + else if (rc != -ENOMEM) /* give up */ + goto destroy_qp; + /* retry with smaller ... */ + lnk->max_send_wr /= 2; + lnk->max_recv_wr /= 2; + /* ... unless droping below old SMC_WR_BUF_SIZE */ + if (lnk->max_send_wr < 16 || lnk->max_recv_wr < 48) + goto destroy_qp; + smc_ib_destroy_queue_pair(lnk); + } while (1); + rc = smc_wr_create_link(lnk); if (rc) - goto destroy_qp; + goto free_link_mem; lnk->state = SMC_LNK_ACTIVATING; return 0; +free_link_mem: + smc_wr_free_link_mem(lnk); destroy_qp: smc_ib_destroy_queue_pair(lnk); dealloc_pd: smc_ib_dealloc_protection_domain(lnk); -free_link_mem: - smc_wr_free_link_mem(lnk); clear_llc_lnk: smc_llc_link_clear(lnk, false); out: diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h index a5a78cbff341..5c18f08a4c8a 100644 --- a/net/smc/smc_core.h +++ b/net/smc/smc_core.h @@ -34,6 +34,8 @@ * distributions may modify it to a value between * 16-255 as needed. */ +#define SMCR_MAX_SEND_WR_DEF 16 /* Default number of work requests per send queue */ +#define SMCR_MAX_RECV_WR_DEF 48 /* Default number of work requests per recv queue */ struct smc_lgr_list { /* list of link group definition */ struct list_head list; @@ -173,6 +175,8 @@ struct smc_link { struct completion llc_testlink_resp; /* wait for rx of testlink */ int llc_testlink_time; /* testlink interval */ atomic_t conn_cnt; /* connections on this link */ + u16 max_send_wr; + u16 max_recv_wr; }; /* For now we just allow one parallel link per link group. The SMC protocol @@ -366,6 +370,10 @@ struct smc_link_group { /* max conn can be assigned to lgr */ u8 max_links; /* max links can be added in lgr */ + u16 max_send_wr; + /* number of WR buffers on send */ + u16 max_recv_wr; + /* number of WR buffers on recv */ }; struct { /* SMC-D */ struct smcd_gid peer_gid; diff --git a/net/smc/smc_hs_bpf.c b/net/smc/smc_hs_bpf.c new file mode 100644 index 000000000000..063d23d85850 --- /dev/null +++ b/net/smc/smc_hs_bpf.c @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Shared Memory Communications over RDMA (SMC-R) and RoCE + * + * Generic hook for SMC handshake flow. + * + * Copyright IBM Corp. 2016 + * Copyright (c) 2025, Alibaba Inc. + * + * Author: D. Wythe <alibuda@linux.alibaba.com> + */ + +#include <linux/bpf_verifier.h> +#include <linux/bpf.h> +#include <linux/btf.h> +#include <linux/rculist.h> + +#include "smc_hs_bpf.h" + +static DEFINE_SPINLOCK(smc_hs_ctrl_list_lock); +static LIST_HEAD(smc_hs_ctrl_list); + +static int smc_hs_ctrl_reg(struct smc_hs_ctrl *ctrl) +{ + int ret = 0; + + spin_lock(&smc_hs_ctrl_list_lock); + /* already exist or duplicate name */ + if (smc_hs_ctrl_find_by_name(ctrl->name)) + ret = -EEXIST; + else + list_add_tail_rcu(&ctrl->list, &smc_hs_ctrl_list); + spin_unlock(&smc_hs_ctrl_list_lock); + return ret; +} + +static void smc_hs_ctrl_unreg(struct smc_hs_ctrl *ctrl) +{ + spin_lock(&smc_hs_ctrl_list_lock); + list_del_rcu(&ctrl->list); + spin_unlock(&smc_hs_ctrl_list_lock); + + /* Ensure that all readers to complete */ + synchronize_rcu(); +} + +struct smc_hs_ctrl *smc_hs_ctrl_find_by_name(const char *name) +{ + struct smc_hs_ctrl *ctrl; + + list_for_each_entry_rcu(ctrl, &smc_hs_ctrl_list, list) { + if (strcmp(ctrl->name, name) == 0) + return ctrl; + } + return NULL; +} + +static int __smc_bpf_stub_set_tcp_option(struct tcp_sock *tp) { return 1; } +static int __smc_bpf_stub_set_tcp_option_cond(const struct tcp_sock *tp, + struct inet_request_sock *ireq) +{ + return 1; +} + +static struct smc_hs_ctrl __smc_bpf_hs_ctrl = { + .syn_option = __smc_bpf_stub_set_tcp_option, + .synack_option = __smc_bpf_stub_set_tcp_option_cond, +}; + +static int smc_bpf_hs_ctrl_init(struct btf *btf) { return 0; } + +static int smc_bpf_hs_ctrl_reg(void *kdata, struct bpf_link *link) +{ + if (link) + return -EOPNOTSUPP; + + return smc_hs_ctrl_reg(kdata); +} + +static void smc_bpf_hs_ctrl_unreg(void *kdata, struct bpf_link *link) +{ + smc_hs_ctrl_unreg(kdata); +} + +static int smc_bpf_hs_ctrl_init_member(const struct btf_type *t, + const struct btf_member *member, + void *kdata, const void *udata) +{ + const struct smc_hs_ctrl *u_ctrl; + struct smc_hs_ctrl *k_ctrl; + u32 moff; + + u_ctrl = (const struct smc_hs_ctrl *)udata; + k_ctrl = (struct smc_hs_ctrl *)kdata; + + moff = __btf_member_bit_offset(t, member) / 8; + switch (moff) { + case offsetof(struct smc_hs_ctrl, name): + if (bpf_obj_name_cpy(k_ctrl->name, u_ctrl->name, + sizeof(u_ctrl->name)) <= 0) + return -EINVAL; + return 1; + case offsetof(struct smc_hs_ctrl, flags): + if (u_ctrl->flags & ~SMC_HS_CTRL_ALL_FLAGS) + return -EINVAL; + k_ctrl->flags = u_ctrl->flags; + return 1; + default: + break; + } + + return 0; +} + +static const struct bpf_func_proto * +bpf_smc_hs_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) +{ + return bpf_base_func_proto(func_id, prog); +} + +static const struct bpf_verifier_ops smc_bpf_verifier_ops = { + .get_func_proto = bpf_smc_hs_func_proto, + .is_valid_access = bpf_tracing_btf_ctx_access, +}; + +static struct bpf_struct_ops bpf_smc_hs_ctrl_ops = { + .name = "smc_hs_ctrl", + .init = smc_bpf_hs_ctrl_init, + .reg = smc_bpf_hs_ctrl_reg, + .unreg = smc_bpf_hs_ctrl_unreg, + .cfi_stubs = &__smc_bpf_hs_ctrl, + .verifier_ops = &smc_bpf_verifier_ops, + .init_member = smc_bpf_hs_ctrl_init_member, + .owner = THIS_MODULE, +}; + +int bpf_smc_hs_ctrl_init(void) +{ + return register_bpf_struct_ops(&bpf_smc_hs_ctrl_ops, smc_hs_ctrl); +} diff --git a/net/smc/smc_hs_bpf.h b/net/smc/smc_hs_bpf.h new file mode 100644 index 000000000000..f5f1807c079e --- /dev/null +++ b/net/smc/smc_hs_bpf.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Shared Memory Communications over RDMA (SMC-R) and RoCE + * + * Generic hook for SMC handshake flow. + * + * Copyright IBM Corp. 2016 + * Copyright (c) 2025, Alibaba Inc. + * + * Author: D. Wythe <alibuda@linux.alibaba.com> + */ + +#ifndef __SMC_HS_CTRL +#define __SMC_HS_CTRL + +#include <net/smc.h> + +/* Find hs_ctrl by the target name, which required to be a c-string. + * Return NULL if no such ctrl was found,otherwise, return a valid ctrl. + * + * Note: Caller MUST ensure it's was invoked under rcu_read_lock. + */ +struct smc_hs_ctrl *smc_hs_ctrl_find_by_name(const char *name); + +#if IS_ENABLED(CONFIG_SMC_HS_CTRL_BPF) +int bpf_smc_hs_ctrl_init(void); +#else +static inline int bpf_smc_hs_ctrl_init(void) { return 0; } +#endif /* CONFIG_SMC_HS_CTRL_BPF */ + +#endif /* __SMC_HS_CTRL */ diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c index 0052f02756eb..1154907c5c05 100644 --- a/net/smc/smc_ib.c +++ b/net/smc/smc_ib.c @@ -669,11 +669,6 @@ int smc_ib_create_queue_pair(struct smc_link *lnk) .recv_cq = lnk->smcibdev->roce_cq_recv, .srq = NULL, .cap = { - /* include unsolicited rdma_writes as well, - * there are max. 2 RDMA_WRITE per 1 WR_SEND - */ - .max_send_wr = SMC_WR_BUF_CNT * 3, - .max_recv_wr = SMC_WR_BUF_CNT * 3, .max_send_sge = SMC_IB_MAX_SEND_SGE, .max_recv_sge = lnk->wr_rx_sge_cnt, .max_inline_data = 0, @@ -683,6 +678,11 @@ int smc_ib_create_queue_pair(struct smc_link *lnk) }; int rc; + /* include unsolicited rdma_writes as well, + * there are max. 2 RDMA_WRITE per 1 WR_SEND + */ + qp_attr.cap.max_send_wr = 3 * lnk->lgr->max_send_wr; + qp_attr.cap.max_recv_wr = lnk->lgr->max_recv_wr; lnk->roce_qp = ib_create_qp(lnk->roce_pd, &qp_attr); rc = PTR_ERR_OR_ZERO(lnk->roce_qp); if (IS_ERR(lnk->roce_qp)) diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c index f865c58c3aa7..f5d5eb617526 100644 --- a/net/smc/smc_llc.c +++ b/net/smc/smc_llc.c @@ -2157,6 +2157,8 @@ void smc_llc_lgr_init(struct smc_link_group *lgr, struct smc_sock *smc) init_waitqueue_head(&lgr->llc_msg_waiter); init_rwsem(&lgr->llc_conf_mutex); lgr->llc_testlink_time = READ_ONCE(net->smc.sysctl_smcr_testlink_time); + lgr->max_send_wr = (u16)(READ_ONCE(net->smc.sysctl_smcr_max_send_wr)); + lgr->max_recv_wr = (u16)(READ_ONCE(net->smc.sysctl_smcr_max_recv_wr)); } /* called after lgr was removed from lgr_list */ diff --git a/net/smc/smc_sysctl.c b/net/smc/smc_sysctl.c index 2fab6456f765..b1efed546243 100644 --- a/net/smc/smc_sysctl.c +++ b/net/smc/smc_sysctl.c @@ -12,12 +12,14 @@ #include <linux/init.h> #include <linux/sysctl.h> +#include <linux/bpf.h> #include <net/net_namespace.h> #include "smc.h" #include "smc_core.h" #include "smc_llc.h" #include "smc_sysctl.h" +#include "smc_hs_bpf.h" static int min_sndbuf = SMC_BUF_MIN_SIZE; static int min_rcvbuf = SMC_BUF_MIN_SIZE; @@ -29,6 +31,71 @@ static int links_per_lgr_min = SMC_LINKS_ADD_LNK_MIN; static int links_per_lgr_max = SMC_LINKS_ADD_LNK_MAX; static int conns_per_lgr_min = SMC_CONN_PER_LGR_MIN; static int conns_per_lgr_max = SMC_CONN_PER_LGR_MAX; +static unsigned int smcr_max_wr_min = 2; +static unsigned int smcr_max_wr_max = 2048; + +#if IS_ENABLED(CONFIG_SMC_HS_CTRL_BPF) +static int smc_net_replace_smc_hs_ctrl(struct net *net, const char *name) +{ + struct smc_hs_ctrl *ctrl = NULL; + + rcu_read_lock(); + /* null or empty name ask to clear current ctrl */ + if (name && name[0]) { + ctrl = smc_hs_ctrl_find_by_name(name); + if (!ctrl) { + rcu_read_unlock(); + return -EINVAL; + } + /* no change, just return */ + if (ctrl == rcu_dereference(net->smc.hs_ctrl)) { + rcu_read_unlock(); + return 0; + } + if (!bpf_try_module_get(ctrl, ctrl->owner)) { + rcu_read_unlock(); + return -EBUSY; + } + } + /* xhcg old ctrl with the new one atomically */ + ctrl = unrcu_pointer(xchg(&net->smc.hs_ctrl, RCU_INITIALIZER(ctrl))); + /* release old ctrl */ + if (ctrl) + bpf_module_put(ctrl, ctrl->owner); + + rcu_read_unlock(); + return 0; +} + +static int proc_smc_hs_ctrl(const struct ctl_table *ctl, int write, + void *buffer, size_t *lenp, loff_t *ppos) +{ + struct net *net = container_of(ctl->data, struct net, smc.hs_ctrl); + char val[SMC_HS_CTRL_NAME_MAX]; + const struct ctl_table tbl = { + .data = val, + .maxlen = SMC_HS_CTRL_NAME_MAX, + }; + struct smc_hs_ctrl *ctrl; + int ret; + + rcu_read_lock(); + ctrl = rcu_dereference(net->smc.hs_ctrl); + if (ctrl) + memcpy(val, ctrl->name, sizeof(ctrl->name)); + else + val[0] = '\0'; + rcu_read_unlock(); + + ret = proc_dostring(&tbl, write, buffer, lenp, ppos); + if (ret) + return ret; + + if (write) + ret = smc_net_replace_smc_hs_ctrl(net, val); + return ret; +} +#endif /* CONFIG_SMC_HS_CTRL_BPF */ static struct ctl_table smc_table[] = { { @@ -99,6 +166,33 @@ static struct ctl_table smc_table[] = { .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE, }, + { + .procname = "smcr_max_send_wr", + .data = &init_net.smc.sysctl_smcr_max_send_wr, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &smcr_max_wr_min, + .extra2 = &smcr_max_wr_max, + }, + { + .procname = "smcr_max_recv_wr", + .data = &init_net.smc.sysctl_smcr_max_recv_wr, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &smcr_max_wr_min, + .extra2 = &smcr_max_wr_max, + }, +#if IS_ENABLED(CONFIG_SMC_HS_CTRL_BPF) + { + .procname = "hs_ctrl", + .data = &init_net.smc.hs_ctrl, + .mode = 0644, + .maxlen = SMC_HS_CTRL_NAME_MAX, + .proc_handler = proc_smc_hs_ctrl, + }, +#endif /* CONFIG_SMC_HS_CTRL_BPF */ }; int __net_init smc_sysctl_net_init(struct net *net) @@ -109,6 +203,16 @@ int __net_init smc_sysctl_net_init(struct net *net) table = smc_table; if (!net_eq(net, &init_net)) { int i; +#if IS_ENABLED(CONFIG_SMC_HS_CTRL_BPF) + struct smc_hs_ctrl *ctrl; + + rcu_read_lock(); + ctrl = rcu_dereference(init_net.smc.hs_ctrl); + if (ctrl && ctrl->flags & SMC_HS_CTRL_FLAG_INHERITABLE && + bpf_try_module_get(ctrl, ctrl->owner)) + rcu_assign_pointer(net->smc.hs_ctrl, ctrl); + rcu_read_unlock(); +#endif /* CONFIG_SMC_HS_CTRL_BPF */ table = kmemdup(table, sizeof(smc_table), GFP_KERNEL); if (!table) @@ -130,6 +234,8 @@ int __net_init smc_sysctl_net_init(struct net *net) WRITE_ONCE(net->smc.sysctl_rmem, net_smc_rmem_init); net->smc.sysctl_max_links_per_lgr = SMC_LINKS_PER_LGR_MAX_PREFER; net->smc.sysctl_max_conns_per_lgr = SMC_CONN_PER_LGR_PREFER; + net->smc.sysctl_smcr_max_send_wr = SMCR_MAX_SEND_WR_DEF; + net->smc.sysctl_smcr_max_recv_wr = SMCR_MAX_RECV_WR_DEF; /* disable handshake limitation by default */ net->smc.limit_smc_hs = 0; @@ -139,6 +245,9 @@ err_reg: if (!net_eq(net, &init_net)) kfree(table); err_alloc: +#if IS_ENABLED(CONFIG_SMC_HS_CTRL_BPF) + smc_net_replace_smc_hs_ctrl(net, NULL); +#endif /* CONFIG_SMC_HS_CTRL_BPF */ return -ENOMEM; } @@ -148,6 +257,10 @@ void __net_exit smc_sysctl_net_exit(struct net *net) table = net->smc.smc_hdr->ctl_table_arg; unregister_net_sysctl_table(net->smc.smc_hdr); +#if IS_ENABLED(CONFIG_SMC_HS_CTRL_BPF) + smc_net_replace_smc_hs_ctrl(net, NULL); +#endif /* CONFIG_SMC_HS_CTRL_BPF */ + if (!net_eq(net, &init_net)) kfree(table); } diff --git a/net/smc/smc_sysctl.h b/net/smc/smc_sysctl.h index eb2465ae1e15..8538915af7af 100644 --- a/net/smc/smc_sysctl.h +++ b/net/smc/smc_sysctl.h @@ -25,6 +25,8 @@ static inline int smc_sysctl_net_init(struct net *net) net->smc.sysctl_autocorking_size = SMC_AUTOCORKING_DEFAULT_SIZE; net->smc.sysctl_max_links_per_lgr = SMC_LINKS_PER_LGR_MAX_PREFER; net->smc.sysctl_max_conns_per_lgr = SMC_CONN_PER_LGR_PREFER; + net->smc.sysctl_smcr_max_send_wr = SMCR_MAX_SEND_WR_DEF; + net->smc.sysctl_smcr_max_recv_wr = SMCR_MAX_RECV_WR_DEF; return 0; } diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c index b04a21b8c511..5feafa98ab1a 100644 --- a/net/smc/smc_wr.c +++ b/net/smc/smc_wr.c @@ -547,9 +547,9 @@ void smc_wr_remember_qp_attr(struct smc_link *lnk) IB_QP_DEST_QPN, &init_attr); - lnk->wr_tx_cnt = min_t(size_t, SMC_WR_BUF_CNT, + lnk->wr_tx_cnt = min_t(size_t, lnk->max_send_wr, lnk->qp_attr.cap.max_send_wr); - lnk->wr_rx_cnt = min_t(size_t, SMC_WR_BUF_CNT * 3, + lnk->wr_rx_cnt = min_t(size_t, lnk->max_recv_wr, lnk->qp_attr.cap.max_recv_wr); } @@ -741,50 +741,51 @@ int smc_wr_alloc_lgr_mem(struct smc_link_group *lgr) int smc_wr_alloc_link_mem(struct smc_link *link) { /* allocate link related memory */ - link->wr_tx_bufs = kcalloc(SMC_WR_BUF_CNT, SMC_WR_BUF_SIZE, GFP_KERNEL); + link->wr_tx_bufs = kcalloc(link->max_send_wr, + SMC_WR_BUF_SIZE, GFP_KERNEL); if (!link->wr_tx_bufs) goto no_mem; - link->wr_rx_bufs = kcalloc(SMC_WR_BUF_CNT * 3, link->wr_rx_buflen, + link->wr_rx_bufs = kcalloc(link->max_recv_wr, link->wr_rx_buflen, GFP_KERNEL); if (!link->wr_rx_bufs) goto no_mem_wr_tx_bufs; - link->wr_tx_ibs = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_ibs[0]), - GFP_KERNEL); + link->wr_tx_ibs = kcalloc(link->max_send_wr, + sizeof(link->wr_tx_ibs[0]), GFP_KERNEL); if (!link->wr_tx_ibs) goto no_mem_wr_rx_bufs; - link->wr_rx_ibs = kcalloc(SMC_WR_BUF_CNT * 3, + link->wr_rx_ibs = kcalloc(link->max_recv_wr, sizeof(link->wr_rx_ibs[0]), GFP_KERNEL); if (!link->wr_rx_ibs) goto no_mem_wr_tx_ibs; - link->wr_tx_rdmas = kcalloc(SMC_WR_BUF_CNT, + link->wr_tx_rdmas = kcalloc(link->max_send_wr, sizeof(link->wr_tx_rdmas[0]), GFP_KERNEL); if (!link->wr_tx_rdmas) goto no_mem_wr_rx_ibs; - link->wr_tx_rdma_sges = kcalloc(SMC_WR_BUF_CNT, + link->wr_tx_rdma_sges = kcalloc(link->max_send_wr, sizeof(link->wr_tx_rdma_sges[0]), GFP_KERNEL); if (!link->wr_tx_rdma_sges) goto no_mem_wr_tx_rdmas; - link->wr_tx_sges = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_sges[0]), + link->wr_tx_sges = kcalloc(link->max_send_wr, sizeof(link->wr_tx_sges[0]), GFP_KERNEL); if (!link->wr_tx_sges) goto no_mem_wr_tx_rdma_sges; - link->wr_rx_sges = kcalloc(SMC_WR_BUF_CNT * 3, + link->wr_rx_sges = kcalloc(link->max_recv_wr, sizeof(link->wr_rx_sges[0]) * link->wr_rx_sge_cnt, GFP_KERNEL); if (!link->wr_rx_sges) goto no_mem_wr_tx_sges; - link->wr_tx_mask = bitmap_zalloc(SMC_WR_BUF_CNT, GFP_KERNEL); + link->wr_tx_mask = bitmap_zalloc(link->max_send_wr, GFP_KERNEL); if (!link->wr_tx_mask) goto no_mem_wr_rx_sges; - link->wr_tx_pends = kcalloc(SMC_WR_BUF_CNT, + link->wr_tx_pends = kcalloc(link->max_send_wr, sizeof(link->wr_tx_pends[0]), GFP_KERNEL); if (!link->wr_tx_pends) goto no_mem_wr_tx_mask; - link->wr_tx_compl = kcalloc(SMC_WR_BUF_CNT, + link->wr_tx_compl = kcalloc(link->max_send_wr, sizeof(link->wr_tx_compl[0]), GFP_KERNEL); if (!link->wr_tx_compl) @@ -905,7 +906,7 @@ int smc_wr_create_link(struct smc_link *lnk) goto dma_unmap; } smc_wr_init_sge(lnk); - bitmap_zero(lnk->wr_tx_mask, SMC_WR_BUF_CNT); + bitmap_zero(lnk->wr_tx_mask, lnk->max_send_wr); init_waitqueue_head(&lnk->wr_tx_wait); rc = percpu_ref_init(&lnk->wr_tx_refs, smcr_wr_tx_refs_free, 0, GFP_KERNEL); if (rc) diff --git a/net/smc/smc_wr.h b/net/smc/smc_wr.h index f3008dda222a..aa4533af9122 100644 --- a/net/smc/smc_wr.h +++ b/net/smc/smc_wr.h @@ -19,8 +19,6 @@ #include "smc.h" #include "smc_core.h" -#define SMC_WR_BUF_CNT 16 /* # of ctrl buffers per link */ - #define SMC_WR_TX_WAIT_FREE_SLOT_TIME (10 * HZ) #define SMC_WR_TX_SIZE 44 /* actual size of wr_send data (<=SMC_WR_BUF_SIZE) */ diff --git a/net/socket.c b/net/socket.c index e8892b218708..101a7ed574e7 100644 --- a/net/socket.c +++ b/net/socket.c @@ -1872,7 +1872,7 @@ int __sys_bind_socket(struct socket *sock, struct sockaddr_storage *address, addrlen); if (!err) err = READ_ONCE(sock->ops)->bind(sock, - (struct sockaddr *)address, + (struct sockaddr_unsized *)address, addrlen); return err; } @@ -2099,8 +2099,8 @@ int __sys_connect_file(struct file *file, struct sockaddr_storage *address, if (err) goto out; - err = READ_ONCE(sock->ops)->connect(sock, (struct sockaddr *)address, - addrlen, sock->file->f_flags | file_flags); + err = READ_ONCE(sock->ops)->connect(sock, (struct sockaddr_unsized *)address, + addrlen, sock->file->f_flags | file_flags); out: return err; } @@ -3583,13 +3583,13 @@ static long compat_sock_ioctl(struct file *file, unsigned int cmd, * Returns 0 or an error. */ -int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen) +int kernel_bind(struct socket *sock, struct sockaddr_unsized *addr, int addrlen) { struct sockaddr_storage address; memcpy(&address, addr, addrlen); - return READ_ONCE(sock->ops)->bind(sock, (struct sockaddr *)&address, + return READ_ONCE(sock->ops)->bind(sock, (struct sockaddr_unsized *)&address, addrlen); } EXPORT_SYMBOL(kernel_bind); @@ -3662,14 +3662,14 @@ EXPORT_SYMBOL(kernel_accept); * Returns 0 or an error code. */ -int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen, +int kernel_connect(struct socket *sock, struct sockaddr_unsized *addr, int addrlen, int flags) { struct sockaddr_storage address; memcpy(&address, addr, addrlen); - return READ_ONCE(sock->ops)->connect(sock, (struct sockaddr *)&address, + return READ_ONCE(sock->ops)->connect(sock, (struct sockaddr_unsized *)&address, addrlen, flags); } EXPORT_SYMBOL(kernel_connect); diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c index e659fea2da70..fe0e76fdd1f1 100644 --- a/net/strparser/strparser.c +++ b/net/strparser/strparser.c @@ -127,7 +127,7 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb, } if (!strp->skb_nextp) { - /* We are going to append to the frags_list of head. + /* We are going to append to the frag_list of head. * Need to unshare the frag_list. */ err = skb_unclone(head, GFP_ATOMIC); diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 8ca354ecfd02..58442ae1c2da 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -1457,12 +1457,12 @@ static int rpc_sockname(struct net *net, struct sockaddr *sap, size_t salen, switch (sap->sa_family) { case AF_INET: err = kernel_bind(sock, - (struct sockaddr *)&rpc_inaddr_loopback, + (struct sockaddr_unsized *)&rpc_inaddr_loopback, sizeof(rpc_inaddr_loopback)); break; case AF_INET6: err = kernel_bind(sock, - (struct sockaddr *)&rpc_in6addr_loopback, + (struct sockaddr_unsized *)&rpc_in6addr_loopback, sizeof(rpc_in6addr_loopback)); break; default: @@ -1474,7 +1474,7 @@ static int rpc_sockname(struct net *net, struct sockaddr *sap, size_t salen, goto out_release; } - err = kernel_connect(sock, sap, salen, 0); + err = kernel_connect(sock, (struct sockaddr_unsized *)sap, salen, 0); if (err < 0) { dprintk("RPC: can't connect UDP socket (%d)\n", err); goto out_release; diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 7b90abc5cf0e..16ff6c100821 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -1557,7 +1557,7 @@ static struct svc_xprt *svc_create_socket(struct svc_serv *serv, ip6_sock_set_v6only(sock->sk); if (type == SOCK_STREAM) sock->sk->sk_reuse = SK_CAN_REUSE; /* allow address reuse */ - error = kernel_bind(sock, sin, len); + error = kernel_bind(sock, (struct sockaddr_unsized *)sin, len); if (error < 0) goto bummer; diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 3aa987e7f072..2e1fe6013361 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -1845,8 +1845,8 @@ static int xs_bind(struct sock_xprt *transport, struct socket *sock) memcpy(&myaddr, &transport->srcaddr, transport->xprt.addrlen); do { rpc_set_port((struct sockaddr *)&myaddr, port); - err = kernel_bind(sock, (struct sockaddr *)&myaddr, - transport->xprt.addrlen); + err = kernel_bind(sock, (struct sockaddr_unsized *)&myaddr, + transport->xprt.addrlen); if (err == 0) { if (transport->xprt.reuseport) transport->srcport = port; @@ -2005,7 +2005,7 @@ static int xs_local_finish_connecting(struct rpc_xprt *xprt, xs_stream_start_connect(transport); - return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, 0); + return kernel_connect(sock, (struct sockaddr_unsized *)xs_addr(xprt), xprt->addrlen, 0); } /** @@ -2405,7 +2405,8 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) /* Tell the socket layer to start connecting... */ set_bit(XPRT_SOCK_CONNECTING, &transport->sock_state); - return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK); + return kernel_connect(sock, (struct sockaddr_unsized *)xs_addr(xprt), + xprt->addrlen, O_NONBLOCK); } /** diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 1574a83384f8..817b07d95a91 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -710,7 +710,7 @@ int tipc_sk_bind(struct socket *sock, struct sockaddr *skaddr, int alen) return res; } -static int tipc_bind(struct socket *sock, struct sockaddr *skaddr, int alen) +static int tipc_bind(struct socket *sock, struct sockaddr_unsized *skaddr, int alen) { struct tipc_uaddr *ua = (struct tipc_uaddr *)skaddr; u32 atype = ua->addrtype; @@ -726,7 +726,7 @@ static int tipc_bind(struct socket *sock, struct sockaddr *skaddr, int alen) return -EACCES; } } - return tipc_sk_bind(sock, skaddr, alen); + return tipc_sk_bind(sock, (struct sockaddr *)skaddr, alen); } /** @@ -2565,7 +2565,7 @@ static bool tipc_sockaddr_is_sane(struct sockaddr_tipc *addr) * * Return: 0 on success, errno otherwise */ -static int tipc_connect(struct socket *sock, struct sockaddr *dest, +static int tipc_connect(struct socket *sock, struct sockaddr_unsized *dest, int destlen, int flags) { struct sock *sk = sock->sk; @@ -3031,10 +3031,8 @@ static void tipc_sk_remove(struct tipc_sock *tsk) struct sock *sk = &tsk->sk; struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id); - if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) { - WARN_ON(refcount_read(&sk->sk_refcnt) == 1); + if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) __sock_put(sk); - } } static const struct rhashtable_params tsk_rht_params = { diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index 71734411ff4c..82ea407e520a 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -373,7 +373,8 @@ static int tls_do_allocation(struct sock *sk, if (!offload_ctx->open_record) { if (unlikely(!skb_page_frag_refill(prepend_size, pfrag, sk->sk_allocation))) { - READ_ONCE(sk->sk_prot)->enter_memory_pressure(sk); + if (!sk->sk_bypass_prot_mem) + READ_ONCE(sk->sk_prot)->enter_memory_pressure(sk); sk_stream_moderate_sndbuf(sk); return -ENOMEM; } @@ -461,7 +462,7 @@ static int tls_push_data(struct sock *sk, /* TLS_HEADER_SIZE is not counted as part of the TLS record, and * we need to leave room for an authentication tag. */ - max_open_record_len = TLS_MAX_PAYLOAD_SIZE + + max_open_record_len = tls_ctx->tx_max_payload_len + prot->prepend_size; do { rc = tls_do_allocation(sk, ctx, pfrag, prot->prepend_size); diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 39a2ab47fe72..56ce0bc8317b 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -541,6 +541,28 @@ static int do_tls_getsockopt_no_pad(struct sock *sk, char __user *optval, return 0; } +static int do_tls_getsockopt_tx_payload_len(struct sock *sk, char __user *optval, + int __user *optlen) +{ + struct tls_context *ctx = tls_get_ctx(sk); + u16 payload_len = ctx->tx_max_payload_len; + int len; + + if (get_user(len, optlen)) + return -EFAULT; + + if (len < sizeof(payload_len)) + return -EINVAL; + + if (put_user(sizeof(payload_len), optlen)) + return -EFAULT; + + if (copy_to_user(optval, &payload_len, sizeof(payload_len))) + return -EFAULT; + + return 0; +} + static int do_tls_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen) { @@ -560,6 +582,9 @@ static int do_tls_getsockopt(struct sock *sk, int optname, case TLS_RX_EXPECT_NO_PAD: rc = do_tls_getsockopt_no_pad(sk, optval, optlen); break; + case TLS_TX_MAX_PAYLOAD_LEN: + rc = do_tls_getsockopt_tx_payload_len(sk, optval, optlen); + break; default: rc = -ENOPROTOOPT; break; @@ -809,6 +834,32 @@ static int do_tls_setsockopt_no_pad(struct sock *sk, sockptr_t optval, return rc; } +static int do_tls_setsockopt_tx_payload_len(struct sock *sk, sockptr_t optval, + unsigned int optlen) +{ + struct tls_context *ctx = tls_get_ctx(sk); + struct tls_sw_context_tx *sw_ctx = tls_sw_ctx_tx(ctx); + u16 value; + bool tls_13 = ctx->prot_info.version == TLS_1_3_VERSION; + + if (sw_ctx && sw_ctx->open_rec) + return -EBUSY; + + if (sockptr_is_null(optval) || optlen != sizeof(value)) + return -EINVAL; + + if (copy_from_sockptr(&value, optval, sizeof(value))) + return -EFAULT; + + if (value < TLS_MIN_RECORD_SIZE_LIM - (tls_13 ? 1 : 0) || + value > TLS_MAX_PAYLOAD_SIZE) + return -EINVAL; + + ctx->tx_max_payload_len = value; + + return 0; +} + static int do_tls_setsockopt(struct sock *sk, int optname, sockptr_t optval, unsigned int optlen) { @@ -830,6 +881,11 @@ static int do_tls_setsockopt(struct sock *sk, int optname, sockptr_t optval, case TLS_RX_EXPECT_NO_PAD: rc = do_tls_setsockopt_no_pad(sk, optval, optlen); break; + case TLS_TX_MAX_PAYLOAD_LEN: + lock_sock(sk); + rc = do_tls_setsockopt_tx_payload_len(sk, optval, optlen); + release_sock(sk); + break; default: rc = -ENOPROTOOPT; break; @@ -1019,6 +1075,7 @@ static int tls_init(struct sock *sk) ctx->tx_conf = TLS_BASE; ctx->rx_conf = TLS_BASE; + ctx->tx_max_payload_len = TLS_MAX_PAYLOAD_SIZE; update_sk_prot(sk, ctx); out: write_unlock_bh(&sk->sk_callback_lock); @@ -1108,6 +1165,12 @@ static int tls_get_info(struct sock *sk, struct sk_buff *skb, bool net_admin) goto nla_failure; } + err = nla_put_u16(skb, TLS_INFO_TX_MAX_PAYLOAD_LEN, + ctx->tx_max_payload_len); + + if (err) + goto nla_failure; + rcu_read_unlock(); nla_nest_end(skb, start); return 0; @@ -1129,6 +1192,7 @@ static size_t tls_get_info_size(const struct sock *sk, bool net_admin) nla_total_size(sizeof(u16)) + /* TLS_INFO_TXCONF */ nla_total_size(0) + /* TLS_INFO_ZC_RO_TX */ nla_total_size(0) + /* TLS_INFO_RX_NO_PAD */ + nla_total_size(sizeof(u16)) + /* TLS_INFO_TX_MAX_PAYLOAD_LEN */ 0; return size; diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index d17135369980..9937d4c810f2 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -1079,7 +1079,7 @@ static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg, orig_size = msg_pl->sg.size; full_record = false; try_to_copy = msg_data_left(msg); - record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size; + record_room = tls_ctx->tx_max_payload_len - msg_pl->sg.size; if (try_to_copy >= record_room) { try_to_copy = record_room; full_record = true; diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 768098dec231..3b44cadaed96 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -733,17 +733,6 @@ static void unix_release_sock(struct sock *sk, int embrion) /* ---- Socket is dead now and most probably destroyed ---- */ - /* - * Fixme: BSD difference: In BSD all sockets connected to us get - * ECONNRESET and we die on the spot. In Linux we behave - * like files and pipes do and wait for the last - * dereference. - * - * Can't we simply set sock->err? - * - * What the above comment does talk about? --ANK(980817) - */ - if (READ_ONCE(unix_tot_inflight)) unix_gc(); /* Garbage collect fds */ } @@ -854,8 +843,8 @@ out: } static int unix_release(struct socket *); -static int unix_bind(struct socket *, struct sockaddr *, int); -static int unix_stream_connect(struct socket *, struct sockaddr *, +static int unix_bind(struct socket *, struct sockaddr_unsized *, int); +static int unix_stream_connect(struct socket *, struct sockaddr_unsized *, int addr_len, int flags); static int unix_socketpair(struct socket *, struct socket *); static int unix_accept(struct socket *, struct socket *, struct proto_accept_arg *arg); @@ -877,7 +866,7 @@ static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t); static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int); static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor); static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor); -static int unix_dgram_connect(struct socket *, struct sockaddr *, +static int unix_dgram_connect(struct socket *, struct sockaddr_unsized *, int, int); static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t); static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t, @@ -1477,7 +1466,7 @@ out: return err; } -static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) +static int unix_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len) { struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr; struct sock *sk = sock->sk; @@ -1523,7 +1512,7 @@ static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2) unix_state_unlock(sk2); } -static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr, +static int unix_dgram_connect(struct socket *sock, struct sockaddr_unsized *addr, int alen, int flags) { struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr; @@ -1642,7 +1631,7 @@ static long unix_wait_for_peer(struct sock *other, long timeo) return timeo; } -static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr, +static int unix_stream_connect(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len, int flags) { struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr; diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index 76763247a377..72bb6b7ed386 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -987,7 +987,7 @@ static int vsock_release(struct socket *sock) } static int -vsock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) +vsock_bind(struct socket *sock, struct sockaddr_unsized *addr, int addr_len) { int err; struct sock *sk; @@ -1328,7 +1328,7 @@ out: } static int vsock_dgram_connect(struct socket *sock, - struct sockaddr *addr, int addr_len, int flags) + struct sockaddr_unsized *addr, int addr_len, int flags) { int err; struct sock *sk; @@ -1528,7 +1528,7 @@ static void vsock_connect_timeout(struct work_struct *work) sock_put(sk); } -static int vsock_connect(struct socket *sock, struct sockaddr *addr, +static int vsock_connect(struct socket *sock, struct sockaddr_unsized *addr, int addr_len, int flags) { int err; diff --git a/net/vmw_vsock/vsock_addr.c b/net/vmw_vsock/vsock_addr.c index 223b9660a759..a986aa6fff9b 100644 --- a/net/vmw_vsock/vsock_addr.c +++ b/net/vmw_vsock/vsock_addr.c @@ -57,7 +57,7 @@ bool vsock_addr_equals_addr(const struct sockaddr_vm *addr, } EXPORT_SYMBOL_GPL(vsock_addr_equals_addr); -int vsock_addr_cast(const struct sockaddr *addr, +int vsock_addr_cast(const struct sockaddr_unsized *addr, size_t len, struct sockaddr_vm **out_addr) { if (len < sizeof(**out_addr)) diff --git a/net/wireless/core.c b/net/wireless/core.c index 54a34d8d356e..aa78dbaaf093 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -34,6 +34,9 @@ /* name for sysfs, %d is appended */ #define PHY_NAME "phy" +/* maximum length of radio debugfs directory name */ +#define RADIO_DEBUGFSDIR_MAX_LEN 8 + MODULE_AUTHOR("Johannes Berg"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("wireless configuration support"); @@ -1042,6 +1045,18 @@ int wiphy_register(struct wiphy *wiphy) /* add to debugfs */ rdev->wiphy.debugfsdir = debugfs_create_dir(wiphy_name(&rdev->wiphy), ieee80211_debugfs_dir); + if (wiphy->n_radio > 0) { + int idx; + char radio_name[RADIO_DEBUGFSDIR_MAX_LEN]; + + for (idx = 0; idx < wiphy->n_radio; idx++) { + scnprintf(radio_name, sizeof(radio_name), "radio%d", + idx); + wiphy->radio_cfg[idx].radio_debugfsdir = + debugfs_create_dir(radio_name, + rdev->wiphy.debugfsdir); + } + } cfg80211_debugfs_rdev_add(rdev); nl80211_notify_wiphy(rdev, NL80211_CMD_NEW_WIPHY); @@ -1051,12 +1066,12 @@ int wiphy_register(struct wiphy *wiphy) wiphy_regulatory_register(wiphy); if (wiphy->regulatory_flags & REGULATORY_CUSTOM_REG) { - struct regulatory_request request; - - request.wiphy_idx = get_wiphy_idx(wiphy); - request.initiator = NL80211_REGDOM_SET_BY_DRIVER; - request.alpha2[0] = '9'; - request.alpha2[1] = '9'; + struct regulatory_request request = { + .wiphy_idx = get_wiphy_idx(wiphy), + .initiator = NL80211_REGDOM_SET_BY_DRIVER, + .alpha2[0] = '9', + .alpha2[1] = '9', + }; nl80211_send_reg_change_event(&request); } diff --git a/net/wireless/core.h b/net/wireless/core.h index b6bd7f4d6385..82f343663e8f 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -550,7 +550,8 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev, bool signal_valid, unsigned long ts); enum ieee80211_ap_reg_power -cfg80211_get_6ghz_power_type(const u8 *elems, size_t elems_len); +cfg80211_get_6ghz_power_type(const u8 *elems, size_t elems_len, + u32 client_flags); #ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS #define CFG80211_DEV_WARN_ON(cond) WARN_ON(cond) diff --git a/net/wireless/debugfs.c b/net/wireless/debugfs.c index 40e49074e2ee..f9e7fff1ef25 100644 --- a/net/wireless/debugfs.c +++ b/net/wireless/debugfs.c @@ -29,6 +29,24 @@ static const struct file_operations name## _ops = { \ .llseek = generic_file_llseek, \ } +#define DEBUGFS_RADIO_READONLY_FILE(name, buflen, fmt, value...) \ +static ssize_t name## _read(struct file *file, char __user *userbuf, \ + size_t count, loff_t *ppos) \ +{ \ + struct wiphy_radio_cfg *radio_cfg = file->private_data; \ + char buf[buflen]; \ + int res; \ + \ + res = scnprintf(buf, buflen, fmt "\n", ##value); \ + return simple_read_from_buffer(userbuf, count, ppos, buf, res); \ +} \ + \ +static const struct file_operations name## _ops = { \ + .read = name## _read, \ + .open = simple_open, \ + .llseek = generic_file_llseek, \ +} + DEBUGFS_READONLY_FILE(rts_threshold, 20, "%d", wiphy->rts_threshold); DEBUGFS_READONLY_FILE(fragmentation_threshold, 20, "%d", @@ -38,6 +56,9 @@ DEBUGFS_READONLY_FILE(short_retry_limit, 20, "%d", DEBUGFS_READONLY_FILE(long_retry_limit, 20, "%d", wiphy->retry_long); +DEBUGFS_RADIO_READONLY_FILE(radio_rts_threshold, 20, "%d", + radio_cfg->rts_threshold); + static int ht_print_chan(struct ieee80211_channel *chan, char *buf, int buf_size, int offset) { @@ -100,15 +121,27 @@ static const struct file_operations ht40allow_map_ops = { #define DEBUGFS_ADD(name) \ debugfs_create_file(#name, 0444, phyd, &rdev->wiphy, &name## _ops) +#define DEBUGFS_RADIO_ADD(name, radio_idx) \ + debugfs_create_file(#name, 0444, radiod, \ + &rdev->wiphy.radio_cfg[radio_idx], \ + &name## _ops) + void cfg80211_debugfs_rdev_add(struct cfg80211_registered_device *rdev) { struct dentry *phyd = rdev->wiphy.debugfsdir; + struct dentry *radiod; + u8 i; DEBUGFS_ADD(rts_threshold); DEBUGFS_ADD(fragmentation_threshold); DEBUGFS_ADD(short_retry_limit); DEBUGFS_ADD(long_retry_limit); DEBUGFS_ADD(ht40allow_map); + + for (i = 0; i < rdev->wiphy.n_radio; i++) { + radiod = rdev->wiphy.radio_cfg[i].radio_debugfsdir; + DEBUGFS_RADIO_ADD(radio_rts_threshold, i); + } } struct debugfs_read_work { diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 03d07b54359a..29c92bc8291b 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -3544,6 +3544,9 @@ static int _nl80211_parse_chandef(struct cfg80211_registered_device *rdev, return -EINVAL; } + if (cfg80211_chandef_is_s1g(chandef)) + chandef->width = NL80211_CHAN_WIDTH_1; + if (attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) { enum nl80211_channel_type chantype; @@ -6745,7 +6748,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) beacon_check.relax = true; beacon_check.reg_power = cfg80211_get_6ghz_power_type(params->beacon.tail, - params->beacon.tail_len); + params->beacon.tail_len, 0); if (!cfg80211_reg_check_beaconing(&rdev->wiphy, ¶ms->chandef, &beacon_check)) { err = -EINVAL; @@ -6924,7 +6927,7 @@ static int nl80211_set_beacon(struct sk_buff *skb, struct genl_info *info) beacon_check.relax = true; beacon_check.reg_power = cfg80211_get_6ghz_power_type(params->beacon.tail, - params->beacon.tail_len); + params->beacon.tail_len, 0); if (!cfg80211_reg_check_beaconing(&rdev->wiphy, &wdev->links[link_id].ap.chandef, &beacon_check)) { diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 90a9187a6b13..7546647752fd 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -2212,7 +2212,8 @@ struct cfg80211_inform_single_bss_data { }; enum ieee80211_ap_reg_power -cfg80211_get_6ghz_power_type(const u8 *elems, size_t elems_len) +cfg80211_get_6ghz_power_type(const u8 *elems, size_t elems_len, + u32 client_flags) { const struct ieee80211_he_6ghz_oper *he_6ghz_oper; struct ieee80211_he_operation *he_oper; @@ -2230,26 +2231,13 @@ cfg80211_get_6ghz_power_type(const u8 *elems, size_t elems_len) if (!he_6ghz_oper) return IEEE80211_REG_UNSET_AP; - switch (u8_get_bits(he_6ghz_oper->control, - IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO)) { - case IEEE80211_6GHZ_CTRL_REG_LPI_AP: - case IEEE80211_6GHZ_CTRL_REG_INDOOR_LPI_AP: - return IEEE80211_REG_LPI_AP; - case IEEE80211_6GHZ_CTRL_REG_SP_AP: - case IEEE80211_6GHZ_CTRL_REG_INDOOR_SP_AP: - case IEEE80211_6GHZ_CTRL_REG_INDOOR_SP_AP_OLD: - return IEEE80211_REG_SP_AP; - case IEEE80211_6GHZ_CTRL_REG_VLP_AP: - return IEEE80211_REG_VLP_AP; - default: - return IEEE80211_REG_UNSET_AP; - } + return cfg80211_6ghz_power_type(he_6ghz_oper->control, client_flags); } static bool cfg80211_6ghz_power_type_valid(const u8 *elems, size_t elems_len, const u32 flags) { - switch (cfg80211_get_6ghz_power_type(elems, elems_len)) { + switch (cfg80211_get_6ghz_power_type(elems, elems_len, flags)) { case IEEE80211_REG_LPI_AP: return true; case IEEE80211_REG_SP_AP: diff --git a/net/wireless/util.c b/net/wireless/util.c index 56724b33af04..97f40c6d1e9d 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -2942,9 +2942,8 @@ cfg80211_get_iftype_ext_capa(struct wiphy *wiphy, enum nl80211_iftype type) } EXPORT_SYMBOL(cfg80211_get_iftype_ext_capa); -static bool -ieee80211_radio_freq_range_valid(const struct wiphy_radio *radio, - u32 freq, u32 width) +bool ieee80211_radio_freq_range_valid(const struct wiphy_radio *radio, + u32 freq, u32 width) { const struct wiphy_radio_freq_range *r; int i; @@ -2958,6 +2957,7 @@ ieee80211_radio_freq_range_valid(const struct wiphy_radio *radio, return false; } +EXPORT_SYMBOL(ieee80211_radio_freq_range_valid); bool cfg80211_radio_chandef_valid(const struct wiphy_radio *radio, const struct cfg80211_chan_def *chandef) diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index 655d1e0ae25f..af8762b24039 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c @@ -670,7 +670,7 @@ out: return 0; } -static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) +static int x25_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len) { struct sock *sk = sock->sk; struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr; @@ -743,7 +743,7 @@ static int x25_wait_for_connection_establishment(struct sock *sk) return rc; } -static int x25_connect(struct socket *sock, struct sockaddr *uaddr, +static int x25_connect(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len, int flags) { struct sock *sk = sock->sk; diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 7b0c68a70888..bcfd400e9cf8 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -548,12 +548,11 @@ static int xsk_wakeup(struct xdp_sock *xs, u8 flags) static int xsk_cq_reserve_locked(struct xsk_buff_pool *pool) { - unsigned long flags; int ret; - spin_lock_irqsave(&pool->cq_lock, flags); + spin_lock(&pool->cq_cached_prod_lock); ret = xskq_prod_reserve(pool->cq); - spin_unlock_irqrestore(&pool->cq_lock, flags); + spin_unlock(&pool->cq_cached_prod_lock); return ret; } @@ -566,7 +565,7 @@ static void xsk_cq_submit_addr_locked(struct xsk_buff_pool *pool, unsigned long flags; u32 idx; - spin_lock_irqsave(&pool->cq_lock, flags); + spin_lock_irqsave(&pool->cq_prod_lock, flags); idx = xskq_get_prod(pool->cq); xskq_prod_write_addr(pool->cq, idx, @@ -583,16 +582,14 @@ static void xsk_cq_submit_addr_locked(struct xsk_buff_pool *pool, } } xskq_prod_submit_n(pool->cq, descs_processed); - spin_unlock_irqrestore(&pool->cq_lock, flags); + spin_unlock_irqrestore(&pool->cq_prod_lock, flags); } static void xsk_cq_cancel_locked(struct xsk_buff_pool *pool, u32 n) { - unsigned long flags; - - spin_lock_irqsave(&pool->cq_lock, flags); + spin_lock(&pool->cq_cached_prod_lock); xskq_prod_cancel_n(pool->cq, n); - spin_unlock_irqrestore(&pool->cq_lock, flags); + spin_unlock(&pool->cq_cached_prod_lock); } static void xsk_inc_num_desc(struct sk_buff *skb) @@ -605,7 +602,8 @@ static u32 xsk_get_num_desc(struct sk_buff *skb) return XSKCB(skb)->num_descs; } -static void xsk_destruct_skb(struct sk_buff *skb) +INDIRECT_CALLABLE_SCOPE +void xsk_destruct_skb(struct sk_buff *skb) { struct xsk_tx_metadata_compl *compl = &skb_shinfo(skb)->xsk_meta; @@ -1241,7 +1239,7 @@ static bool xsk_validate_queues(struct xdp_sock *xs) return xs->fq_tmp && xs->cq_tmp; } -static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len) +static int xsk_bind(struct socket *sock, struct sockaddr_unsized *addr, int addr_len) { struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr; struct sock *sk = sock->sk; diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c index aa9788f20d0d..51526034c42a 100644 --- a/net/xdp/xsk_buff_pool.c +++ b/net/xdp/xsk_buff_pool.c @@ -12,26 +12,22 @@ void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) { - unsigned long flags; - if (!xs->tx) return; - spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); + spin_lock(&pool->xsk_tx_list_lock); list_add_rcu(&xs->tx_list, &pool->xsk_tx_list); - spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); + spin_unlock(&pool->xsk_tx_list_lock); } void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) { - unsigned long flags; - if (!xs->tx) return; - spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); + spin_lock(&pool->xsk_tx_list_lock); list_del_rcu(&xs->tx_list); - spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); + spin_unlock(&pool->xsk_tx_list_lock); } void xp_destroy(struct xsk_buff_pool *pool) @@ -94,7 +90,8 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs, INIT_LIST_HEAD(&pool->xskb_list); INIT_LIST_HEAD(&pool->xsk_tx_list); spin_lock_init(&pool->xsk_tx_list_lock); - spin_lock_init(&pool->cq_lock); + spin_lock_init(&pool->cq_prod_lock); + spin_lock_init(&pool->cq_cached_prod_lock); refcount_set(&pool->users, 1); pool->fq = xs->fq_tmp; @@ -158,10 +155,6 @@ static void xp_disable_drv_zc(struct xsk_buff_pool *pool) } } -#define NETDEV_XDP_ACT_ZC (NETDEV_XDP_ACT_BASIC | \ - NETDEV_XDP_ACT_REDIRECT | \ - NETDEV_XDP_ACT_XSK_ZEROCOPY) - int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *netdev, u16 queue_id, u16 flags) { @@ -203,7 +196,7 @@ int xp_assign_dev(struct xsk_buff_pool *pool, /* For copy-mode, we are done. */ return 0; - if ((netdev->xdp_features & NETDEV_XDP_ACT_ZC) != NETDEV_XDP_ACT_ZC) { + if ((netdev->xdp_features & NETDEV_XDP_ACT_XSK) != NETDEV_XDP_ACT_XSK) { err = -EOPNOTSUPP; goto err_unreg_pool; } |
