summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/bridge/br_netlink.c21
-rw-r--r--net/bridge/br_stp_if.c12
-rw-r--r--net/core/rtnetlink.c32
-rw-r--r--net/dccp/dccp.h4
-rw-r--r--net/dccp/ipv4.c6
-rw-r--r--net/dccp/ipv6.c9
-rw-r--r--net/dccp/minisocks.c14
-rw-r--r--net/dsa/slave.c26
-rw-r--r--net/ipv4/Makefile1
-rw-r--r--net/ipv4/devinet.c4
-rw-r--r--net/ipv4/fib_frontend.c14
-rw-r--r--net/ipv4/inet_connection_sock.c33
-rw-r--r--net/ipv4/inet_hashtables.c39
-rw-r--r--net/ipv4/syncookies.c4
-rw-r--r--net/ipv4/sysctl_net_ipv4.c14
-rw-r--r--net/ipv4/tcp.c1
-rw-r--r--net/ipv4/tcp_fastopen.c4
-rw-r--r--net/ipv4/tcp_input.c180
-rw-r--r--net/ipv4/tcp_ipv4.c6
-rw-r--r--net/ipv4/tcp_minisocks.c14
-rw-r--r--net/ipv4/tcp_output.c6
-rw-r--r--net/ipv4/tcp_recovery.c109
-rw-r--r--net/ipv6/addrconf.c3
-rw-r--r--net/ipv6/ip6_offload.c12
-rw-r--r--net/ipv6/tcp_ipv6.c9
-rw-r--r--net/mac80211/Makefile1
-rw-r--r--net/mac80211/cfg.c45
-rw-r--r--net/mac80211/cfg.h9
-rw-r--r--net/mac80211/debugfs_sta.c8
-rw-r--r--net/mac80211/ethtool.c29
-rw-r--r--net/mac80211/event.c27
-rw-r--r--net/mac80211/ibss.c24
-rw-r--r--net/mac80211/ieee80211_i.h13
-rw-r--r--net/mac80211/iface.c4
-rw-r--r--net/mac80211/main.c14
-rw-r--r--net/mac80211/mesh_hwmp.c2
-rw-r--r--net/mac80211/mesh_plink.c6
-rw-r--r--net/mac80211/mlme.c85
-rw-r--r--net/mac80211/ocb.c2
-rw-r--r--net/mac80211/rx.c81
-rw-r--r--net/mac80211/scan.c20
-rw-r--r--net/mac80211/sta_info.c101
-rw-r--r--net/mac80211/sta_info.h103
-rw-r--r--net/mac80211/status.c53
-rw-r--r--net/mac80211/trace.h2
-rw-r--r--net/mac80211/tx.c20
-rw-r--r--net/mac80211/util.c12
-rw-r--r--net/mac80211/wpa.c9
-rw-r--r--net/mpls/af_mpls.c573
-rw-r--r--net/mpls/internal.h52
-rw-r--r--net/openvswitch/flow_netlink.c6
-rw-r--r--net/openvswitch/vport-geneve.c2
-rw-r--r--net/openvswitch/vport-gre.c2
-rw-r--r--net/openvswitch/vport-internal_dev.c8
-rw-r--r--net/openvswitch/vport-netdev.c33
-rw-r--r--net/openvswitch/vport-netdev.h1
-rw-r--r--net/openvswitch/vport-vxlan.c2
-rw-r--r--net/openvswitch/vport.c30
-rw-r--r--net/openvswitch/vport.h7
-rw-r--r--net/wireless/Kconfig10
-rw-r--r--net/wireless/core.c5
-rw-r--r--net/wireless/core.h1
-rw-r--r--net/wireless/nl80211.c288
-rw-r--r--net/wireless/reg.c291
-rw-r--r--net/wireless/scan.c61
-rw-r--r--net/wireless/trace.h22
66 files changed, 1634 insertions, 1007 deletions
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 94b4de8c4646..40197ff8918a 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -1214,29 +1214,10 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
return 0;
}
-static size_t br_get_link_af_size(const struct net_device *dev)
-{
- struct net_bridge_port *p;
- struct net_bridge *br;
- int num_vlans = 0;
-
- if (br_port_exists(dev)) {
- p = br_port_get_rtnl(dev);
- num_vlans = br_get_num_vlan_infos(nbp_vlan_group(p),
- RTEXT_FILTER_BRVLAN);
- } else if (dev->priv_flags & IFF_EBRIDGE) {
- br = netdev_priv(dev);
- num_vlans = br_get_num_vlan_infos(br_vlan_group(br),
- RTEXT_FILTER_BRVLAN);
- }
-
- /* Each VLAN is returned in bridge_vlan_info along with flags */
- return num_vlans * nla_total_size(sizeof(struct bridge_vlan_info));
-}
static struct rtnl_af_ops br_af_ops __read_mostly = {
.family = AF_BRIDGE,
- .get_link_af_size = br_get_link_af_size,
+ .get_link_af_size = br_get_link_af_size_filtered,
};
struct rtnl_link_ops br_link_ops __read_mostly = {
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 4ca449a16132..fa53d7a89f48 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -15,6 +15,7 @@
#include <linux/kmod.h>
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
+#include <net/switchdev.h>
#include "br_private.h"
#include "br_private_stp.h"
@@ -35,11 +36,22 @@ static inline port_id br_make_port_id(__u8 priority, __u16 port_no)
/* called under bridge lock */
void br_init_port(struct net_bridge_port *p)
{
+ struct switchdev_attr attr = {
+ .id = SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME,
+ .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP | SWITCHDEV_F_DEFER,
+ .u.ageing_time = p->br->ageing_time,
+ };
+ int err;
+
p->port_id = br_make_port_id(p->priority, p->port_no);
br_become_designated_port(p);
br_set_state(p, BR_STATE_BLOCKING);
p->topology_change_ack = 0;
p->config_pending = 0;
+
+ err = switchdev_port_attr_set(p->dev, &attr);
+ if (err)
+ netdev_err(p->dev, "failed to set HW ageing time\n");
}
/* called under bridge lock */
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 24775953fa68..504bd17b7456 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -497,7 +497,8 @@ void rtnl_af_unregister(struct rtnl_af_ops *ops)
}
EXPORT_SYMBOL_GPL(rtnl_af_unregister);
-static size_t rtnl_link_get_af_size(const struct net_device *dev)
+static size_t rtnl_link_get_af_size(const struct net_device *dev,
+ u32 ext_filter_mask)
{
struct rtnl_af_ops *af_ops;
size_t size;
@@ -509,7 +510,7 @@ static size_t rtnl_link_get_af_size(const struct net_device *dev)
if (af_ops->get_link_af_size) {
/* AF_* + nested data */
size += nla_total_size(sizeof(struct nlattr)) +
- af_ops->get_link_af_size(dev);
+ af_ops->get_link_af_size(dev, ext_filter_mask);
}
}
@@ -837,7 +838,8 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev,
/* IFLA_VF_STATS_BROADCAST */
nla_total_size(sizeof(__u64)) +
/* IFLA_VF_STATS_MULTICAST */
- nla_total_size(sizeof(__u64)));
+ nla_total_size(sizeof(__u64)) +
+ nla_total_size(sizeof(struct ifla_vf_trust)));
return size;
} else
return 0;
@@ -900,7 +902,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
+ rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
+ rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
+ rtnl_link_get_size(dev) /* IFLA_LINKINFO */
- + rtnl_link_get_af_size(dev) /* IFLA_AF_SPEC */
+ + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
+ nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
+ nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
+ nla_total_size(1); /* IFLA_PROTO_DOWN */
@@ -1160,6 +1162,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
struct ifla_vf_link_state vf_linkstate;
struct ifla_vf_rss_query_en vf_rss_query_en;
struct ifla_vf_stats vf_stats;
+ struct ifla_vf_trust vf_trust;
/*
* Not all SR-IOV capable drivers support the
@@ -1169,6 +1172,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
*/
ivi.spoofchk = -1;
ivi.rss_query_en = -1;
+ ivi.trusted = -1;
memset(ivi.mac, 0, sizeof(ivi.mac));
/* The default value for VF link state is "auto"
* IFLA_VF_LINK_STATE_AUTO which equals zero
@@ -1182,7 +1186,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
vf_tx_rate.vf =
vf_spoofchk.vf =
vf_linkstate.vf =
- vf_rss_query_en.vf = ivi.vf;
+ vf_rss_query_en.vf =
+ vf_trust.vf = ivi.vf;
memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
vf_vlan.vlan = ivi.vlan;
@@ -1193,6 +1198,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
vf_spoofchk.setting = ivi.spoofchk;
vf_linkstate.link_state = ivi.linkstate;
vf_rss_query_en.setting = ivi.rss_query_en;
+ vf_trust.setting = ivi.trusted;
vf = nla_nest_start(skb, IFLA_VF_INFO);
if (!vf) {
nla_nest_cancel(skb, vfinfo);
@@ -1210,7 +1216,9 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
&vf_linkstate) ||
nla_put(skb, IFLA_VF_RSS_QUERY_EN,
sizeof(vf_rss_query_en),
- &vf_rss_query_en))
+ &vf_rss_query_en) ||
+ nla_put(skb, IFLA_VF_TRUST,
+ sizeof(vf_trust), &vf_trust))
goto nla_put_failure;
memset(&vf_stats, 0, sizeof(vf_stats));
if (dev->netdev_ops->ndo_get_vf_stats)
@@ -1347,6 +1355,7 @@ static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
[IFLA_VF_LINK_STATE] = { .len = sizeof(struct ifla_vf_link_state) },
[IFLA_VF_RSS_QUERY_EN] = { .len = sizeof(struct ifla_vf_rss_query_en) },
[IFLA_VF_STATS] = { .type = NLA_NESTED },
+ [IFLA_VF_TRUST] = { .len = sizeof(struct ifla_vf_trust) },
};
static const struct nla_policy ifla_vf_stats_policy[IFLA_VF_STATS_MAX + 1] = {
@@ -1586,6 +1595,16 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
return err;
}
+ if (tb[IFLA_VF_TRUST]) {
+ struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]);
+
+ err = -EOPNOTSUPP;
+ if (ops->ndo_set_vf_trust)
+ err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting);
+ if (err < 0)
+ return err;
+ }
+
return err;
}
@@ -3443,4 +3462,3 @@ void __init rtnetlink_init(void)
rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, NULL);
rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, NULL);
}
-
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 923f5a180134..b0e28d24e1a7 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -278,7 +278,9 @@ int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
struct sock *dccp_v4_request_recv_sock(const struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
- struct dst_entry *dst);
+ struct dst_entry *dst,
+ struct request_sock *req_unhash,
+ bool *own_req);
struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
struct request_sock *req);
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 59bc180b02d8..5684e14932bd 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -393,7 +393,9 @@ static inline u64 dccp_v4_init_sequence(const struct sk_buff *skb)
struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
struct sk_buff *skb,
struct request_sock *req,
- struct dst_entry *dst)
+ struct dst_entry *dst,
+ struct request_sock *req_unhash,
+ bool *own_req)
{
struct inet_request_sock *ireq;
struct inet_sock *newinet;
@@ -426,7 +428,7 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
if (__inet_inherit_port(sk, newsk) < 0)
goto put_and_exit;
- __inet_hash_nolisten(newsk, NULL);
+ *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
return newsk;
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index d9cc731f2619..ef4e48ce9143 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -380,7 +380,9 @@ drop:
static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
struct sk_buff *skb,
struct request_sock *req,
- struct dst_entry *dst)
+ struct dst_entry *dst,
+ struct request_sock *req_unhash,
+ bool *own_req)
{
struct inet_request_sock *ireq = inet_rsk(req);
struct ipv6_pinfo *newnp;
@@ -393,7 +395,8 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
/*
* v6 mapped
*/
- newsk = dccp_v4_request_recv_sock(sk, skb, req, dst);
+ newsk = dccp_v4_request_recv_sock(sk, skb, req, dst,
+ req_unhash, own_req);
if (newsk == NULL)
return NULL;
@@ -511,7 +514,7 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
dccp_done(newsk);
goto out;
}
- __inet_hash(newsk, NULL);
+ *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
return newsk;
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index d10aace43672..1994f8af646b 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -143,6 +143,7 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
{
struct sock *child = NULL;
struct dccp_request_sock *dreq = dccp_rsk(req);
+ bool own_req;
/* Check for retransmitted REQUEST */
if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) {
@@ -182,14 +183,13 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
if (dccp_parse_options(sk, dreq, skb))
goto drop;
- child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
- if (child == NULL)
+ child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
+ req, &own_req);
+ if (!child)
goto listen_overflow;
- inet_csk_reqsk_queue_drop(sk, req);
- inet_csk_reqsk_queue_add(sk, req, child);
-out:
- return child;
+ return inet_csk_complete_hashdance(sk, child, req, own_req);
+
listen_overflow:
dccp_pr_debug("listen_overflow!\n");
DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
@@ -198,7 +198,7 @@ drop:
req->rsk_ops->send_reset(sk, skb);
inet_csk_reqsk_queue_drop(sk, req);
- goto out;
+ return NULL;
}
EXPORT_SYMBOL_GPL(dccp_check_req);
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index b0b8da0f5af8..481754ee062a 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -378,31 +378,11 @@ static int dsa_slave_port_fdb_dump(struct net_device *dev,
{
struct dsa_slave_priv *p = netdev_priv(dev);
struct dsa_switch *ds = p->parent;
- unsigned char addr[ETH_ALEN] = { 0 };
- u16 vid = 0;
- int ret;
-
- if (!ds->drv->port_fdb_getnext)
- return -EOPNOTSUPP;
- for (;;) {
- bool is_static;
-
- ret = ds->drv->port_fdb_getnext(ds, p->port, addr, &vid,
- &is_static);
- if (ret < 0)
- break;
+ if (ds->drv->port_fdb_dump)
+ return ds->drv->port_fdb_dump(ds, p->port, fdb, cb);
- ether_addr_copy(fdb->addr, addr);
- fdb->vid = vid;
- fdb->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
-
- ret = cb(&fdb->obj);
- if (ret < 0)
- break;
- }
-
- return ret == -ENOENT ? 0 : ret;
+ return -EOPNOTSUPP;
}
static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index 89aacb630a53..c29809f765dc 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -8,6 +8,7 @@ obj-y := route.o inetpeer.o protocol.o \
inet_timewait_sock.o inet_connection_sock.o \
tcp.o tcp_input.o tcp_output.o tcp_timer.o tcp_ipv4.o \
tcp_minisocks.o tcp_cong.o tcp_metrics.o tcp_fastopen.o \
+ tcp_recovery.o \
tcp_offload.o datagram.o raw.o udp.o udplite.o \
udp_offload.o arp.o icmp.o devinet.o af_inet.o igmp.o \
fib_frontend.o fib_semantics.o fib_trie.o \
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 735008472844..cebd9d31e65a 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1644,7 +1644,8 @@ errout:
rtnl_set_sk_err(net, RTNLGRP_IPV4_IFADDR, err);
}
-static size_t inet_get_link_af_size(const struct net_device *dev)
+static size_t inet_get_link_af_size(const struct net_device *dev,
+ u32 ext_filter_mask)
{
struct in_device *in_dev = rcu_dereference_rtnl(dev->ip_ptr);
@@ -2398,4 +2399,3 @@ void __init devinet_init(void)
rtnl_register(PF_INET, RTM_GETNETCONF, inet_netconf_get_devconf,
inet_netconf_dump_devconf, NULL);
}
-
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index d7c2bb0c4f65..e786873c89f2 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -867,9 +867,10 @@ void fib_add_ifaddr(struct in_ifaddr *ifa)
if (!ipv4_is_zeronet(prefix) && !(ifa->ifa_flags & IFA_F_SECONDARY) &&
(prefix != addr || ifa->ifa_prefixlen < 32)) {
- fib_magic(RTM_NEWROUTE,
- dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST,
- prefix, ifa->ifa_prefixlen, prim);
+ if (!(ifa->ifa_flags & IFA_F_NOPREFIXROUTE))
+ fib_magic(RTM_NEWROUTE,
+ dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST,
+ prefix, ifa->ifa_prefixlen, prim);
/* Add network specific broadcasts, when it takes a sense */
if (ifa->ifa_prefixlen < 31) {
@@ -914,9 +915,10 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
}
} else if (!ipv4_is_zeronet(any) &&
(any != ifa->ifa_local || ifa->ifa_prefixlen < 32)) {
- fib_magic(RTM_DELROUTE,
- dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST,
- any, ifa->ifa_prefixlen, prim);
+ if (!(ifa->ifa_flags & IFA_F_NOPREFIXROUTE))
+ fib_magic(RTM_DELROUTE,
+ dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST,
+ any, ifa->ifa_prefixlen, prim);
subnet = 1;
}
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 8430bc8ccd58..1feb15f23de8 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -523,15 +523,15 @@ static bool reqsk_queue_unlink(struct request_sock_queue *queue,
struct request_sock *req)
{
struct inet_hashinfo *hashinfo = req_to_sk(req)->sk_prot->h.hashinfo;
- spinlock_t *lock;
- bool found;
+ bool found = false;
- lock = inet_ehash_lockp(hashinfo, req->rsk_hash);
-
- spin_lock(lock);
- found = __sk_nulls_del_node_init_rcu(req_to_sk(req));
- spin_unlock(lock);
+ if (sk_hashed(req_to_sk(req))) {
+ spinlock_t *lock = inet_ehash_lockp(hashinfo, req->rsk_hash);
+ spin_lock(lock);
+ found = __sk_nulls_del_node_init_rcu(req_to_sk(req));
+ spin_unlock(lock);
+ }
if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
reqsk_put(req);
return found;
@@ -811,6 +811,25 @@ void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req,
}
EXPORT_SYMBOL(inet_csk_reqsk_queue_add);
+struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
+ struct request_sock *req, bool own_req)
+{
+ if (own_req) {
+ inet_csk_reqsk_queue_drop(sk, req);
+ reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
+ inet_csk_reqsk_queue_add(sk, req, child);
+ /* Warning: caller must not call reqsk_put(req);
+ * child stole last reference on it.
+ */
+ return child;
+ }
+ /* Too bad, another child took ownership of the request, undo. */
+ bh_unlock_sock(child);
+ sock_put(child);
+ return NULL;
+}
+EXPORT_SYMBOL(inet_csk_complete_hashdance);
+
/*
* This routine closes sockets which have been at least partially
* opened, but not yet accepted.
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 958728a22001..ccc5980797fc 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -407,13 +407,13 @@ static u32 inet_sk_port_offset(const struct sock *sk)
/* insert a socket into ehash, and eventually remove another one
* (The another one can be a SYN_RECV or TIMEWAIT
*/
-int inet_ehash_insert(struct sock *sk, struct sock *osk)
+bool inet_ehash_insert(struct sock *sk, struct sock *osk)
{
struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
struct hlist_nulls_head *list;
struct inet_ehash_bucket *head;
spinlock_t *lock;
- int ret = 0;
+ bool ret = true;
WARN_ON_ONCE(!sk_unhashed(sk));
@@ -423,30 +423,41 @@ int inet_ehash_insert(struct sock *sk, struct sock *osk)
lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
spin_lock(lock);
- __sk_nulls_add_node_rcu(sk, list);
if (osk) {
- WARN_ON(sk->sk_hash != osk->sk_hash);
- sk_nulls_del_node_init_rcu(osk);
+ WARN_ON_ONCE(sk->sk_hash != osk->sk_hash);
+ ret = sk_nulls_del_node_init_rcu(osk);
}
+ if (ret)
+ __sk_nulls_add_node_rcu(sk, list);
spin_unlock(lock);
return ret;
}
-void __inet_hash_nolisten(struct sock *sk, struct sock *osk)
+bool inet_ehash_nolisten(struct sock *sk, struct sock *osk)
{
- inet_ehash_insert(sk, osk);
- sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+ bool ok = inet_ehash_insert(sk, osk);
+
+ if (ok) {
+ sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+ } else {
+ percpu_counter_inc(sk->sk_prot->orphan_count);
+ sk->sk_state = TCP_CLOSE;
+ sock_set_flag(sk, SOCK_DEAD);
+ inet_csk_destroy_sock(sk);
+ }
+ return ok;
}
-EXPORT_SYMBOL_GPL(__inet_hash_nolisten);
+EXPORT_SYMBOL_GPL(inet_ehash_nolisten);
void __inet_hash(struct sock *sk, struct sock *osk)
{
struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
struct inet_listen_hashbucket *ilb;
- if (sk->sk_state != TCP_LISTEN)
- return __inet_hash_nolisten(sk, osk);
-
+ if (sk->sk_state != TCP_LISTEN) {
+ inet_ehash_nolisten(sk, osk);
+ return;
+ }
WARN_ON(!sk_unhashed(sk));
ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
@@ -567,7 +578,7 @@ ok:
inet_bind_hash(sk, tb, port);
if (sk_unhashed(sk)) {
inet_sk(sk)->inet_sport = htons(port);
- __inet_hash_nolisten(sk, (struct sock *)tw);
+ inet_ehash_nolisten(sk, (struct sock *)tw);
}
if (tw)
inet_twsk_bind_unhash(tw, hinfo);
@@ -584,7 +595,7 @@ ok:
tb = inet_csk(sk)->icsk_bind_hash;
spin_lock_bh(&head->lock);
if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
- __inet_hash_nolisten(sk, NULL);
+ inet_ehash_nolisten(sk, NULL);
spin_unlock_bh(&head->lock);
return 0;
} else {
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 4c0892badb8b..4cbe9f0a4281 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -221,8 +221,10 @@ struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct sock *child;
+ bool own_req;
- child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst);
+ child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst,
+ NULL, &own_req);
if (child) {
atomic_set(&req->rsk_refcnt, 1);
sock_rps_save_rxhash(child, skb);
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 894da3a70aff..25300c5e283b 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -496,6 +496,13 @@ static struct ctl_table ipv4_table[] = {
.proc_handler = proc_dointvec
},
{
+ .procname = "tcp_recovery",
+ .data = &sysctl_tcp_recovery,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
.procname = "tcp_reordering",
.data = &sysctl_tcp_reordering,
.maxlen = sizeof(int),
@@ -577,6 +584,13 @@ static struct ctl_table ipv4_table[] = {
.proc_handler = proc_dointvec
},
{
+ .procname = "tcp_min_rtt_wlen",
+ .data = &sysctl_tcp_min_rtt_wlen,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+ {
.procname = "tcp_low_latency",
.data = &sysctl_tcp_low_latency,
.maxlen = sizeof(int),
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index ac1bdbb50352..0cfa7c0c1e80 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -388,6 +388,7 @@ void tcp_init_sock(struct sock *sk)
icsk->icsk_rto = TCP_TIMEOUT_INIT;
tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
+ tp->rtt_min[0].rtt = ~0U;
/* So many TCP implementations out there (incorrectly) count the
* initial SYN frame in their delayed-ACK and congestion control
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index 93396bf7b475..55be6ac70cff 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -133,12 +133,14 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk,
struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
struct sock *child;
u32 end_seq;
+ bool own_req;
req->num_retrans = 0;
req->num_timeout = 0;
req->sk = NULL;
- child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
+ child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
+ NULL, &own_req);
if (!child)
return NULL;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 944eaca69115..fdd88c3803a6 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -95,6 +95,7 @@ int sysctl_tcp_stdurg __read_mostly;
int sysctl_tcp_rfc1337 __read_mostly;
int sysctl_tcp_max_orphans __read_mostly = NR_FILE;
int sysctl_tcp_frto __read_mostly = 2;
+int sysctl_tcp_min_rtt_wlen __read_mostly = 300;
int sysctl_tcp_thin_dupack __read_mostly;
@@ -880,6 +881,7 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
if (metric > 0)
tcp_disable_early_retrans(tp);
+ tp->rack.reord = 1;
}
/* This must be called before lost_out is incremented */
@@ -905,8 +907,7 @@ static void tcp_skb_mark_lost(struct tcp_sock *tp, struct sk_buff *skb)
}
}
-static void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp,
- struct sk_buff *skb)
+void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb)
{
tcp_verify_retransmit_hint(tp, skb);
@@ -1047,70 +1048,6 @@ static bool tcp_is_sackblock_valid(struct tcp_sock *tp, bool is_dsack,
return !before(start_seq, end_seq - tp->max_window);
}
-/* Check for lost retransmit. This superb idea is borrowed from "ratehalving".
- * Event "B". Later note: FACK people cheated me again 8), we have to account
- * for reordering! Ugly, but should help.
- *
- * Search retransmitted skbs from write_queue that were sent when snd_nxt was
- * less than what is now known to be received by the other end (derived from
- * highest SACK block). Also calculate the lowest snd_nxt among the remaining
- * retransmitted skbs to avoid some costly processing per ACKs.
- */
-static void tcp_mark_lost_retrans(struct sock *sk, int *flag)
-{
- const struct inet_connection_sock *icsk = inet_csk(sk);
- struct tcp_sock *tp = tcp_sk(sk);
- struct sk_buff *skb;
- int cnt = 0;
- u32 new_low_seq = tp->snd_nxt;
- u32 received_upto = tcp_highest_sack_seq(tp);
-
- if (!tcp_is_fack(tp) || !tp->retrans_out ||
- !after(received_upto, tp->lost_retrans_low) ||
- icsk->icsk_ca_state != TCP_CA_Recovery)
- return;
-
- tcp_for_write_queue(skb, sk) {
- u32 ack_seq = TCP_SKB_CB(skb)->ack_seq;
-
- if (skb == tcp_send_head(sk))
- break;
- if (cnt == tp->retrans_out)
- break;
- if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
- continue;
-
- if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS))
- continue;
-
- /* TODO: We would like to get rid of tcp_is_fack(tp) only
- * constraint here (see above) but figuring out that at
- * least tp->reordering SACK blocks reside between ack_seq
- * and received_upto is not easy task to do cheaply with
- * the available datastructures.
- *
- * Whether FACK should check here for tp->reordering segs
- * in-between one could argue for either way (it would be
- * rather simple to implement as we could count fack_count
- * during the walk and do tp->fackets_out - fack_count).
- */
- if (after(received_upto, ack_seq)) {
- TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
- tp->retrans_out -= tcp_skb_pcount(skb);
- *flag |= FLAG_LOST_RETRANS;
- tcp_skb_mark_lost_uncond_verify(tp, skb);
- NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT);
- } else {
- if (before(ack_seq, new_low_seq))
- new_low_seq = ack_seq;
- cnt += tcp_skb_pcount(skb);
- }
- }
-
- if (tp->retrans_out)
- tp->lost_retrans_low = new_low_seq;
-}
-
static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
struct tcp_sack_block_wire *sp, int num_sacks,
u32 prior_snd_una)
@@ -1236,6 +1173,8 @@ static u8 tcp_sacktag_one(struct sock *sk,
return sacked;
if (!(sacked & TCPCB_SACKED_ACKED)) {
+ tcp_rack_advance(tp, xmit_time, sacked);
+
if (sacked & TCPCB_SACKED_RETRANS) {
/* If the segment is not tagged as lost,
* we do not clear RETRANS, believing
@@ -1837,7 +1776,6 @@ advance_sp:
((inet_csk(sk)->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker))
tcp_update_reordering(sk, tp->fackets_out - state->reord, 0);
- tcp_mark_lost_retrans(sk, &state->flag);
tcp_verify_left_out(tp);
out:
@@ -2314,14 +2252,29 @@ static inline void tcp_moderate_cwnd(struct tcp_sock *tp)
tp->snd_cwnd_stamp = tcp_time_stamp;
}
+static bool tcp_tsopt_ecr_before(const struct tcp_sock *tp, u32 when)
+{
+ return tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
+ before(tp->rx_opt.rcv_tsecr, when);
+}
+
+/* skb is spurious retransmitted if the returned timestamp echo
+ * reply is prior to the skb transmission time
+ */
+static bool tcp_skb_spurious_retrans(const struct tcp_sock *tp,
+ const struct sk_buff *skb)
+{
+ return (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) &&
+ tcp_tsopt_ecr_before(tp, tcp_skb_timestamp(skb));
+}
+
/* Nothing was retransmitted or returned timestamp is less
* than timestamp of the first retransmission.
*/
static inline bool tcp_packet_delayed(const struct tcp_sock *tp)
{
return !tp->retrans_stamp ||
- (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
- before(tp->rx_opt.rcv_tsecr, tp->retrans_stamp));
+ tcp_tsopt_ecr_before(tp, tp->retrans_stamp);
}
/* Undo procedures. */
@@ -2853,6 +2806,11 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
}
}
+ /* Use RACK to detect loss */
+ if (sysctl_tcp_recovery & TCP_RACK_LOST_RETRANS &&
+ tcp_rack_mark_lost(sk))
+ flag |= FLAG_LOST_RETRANS;
+
/* E. Process state. */
switch (icsk->icsk_ca_state) {
case TCP_CA_Recovery:
@@ -2915,8 +2873,69 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
tcp_xmit_retransmit_queue(sk);
}
+/* Kathleen Nichols' algorithm for tracking the minimum value of
+ * a data stream over some fixed time interval. (E.g., the minimum
+ * RTT over the past five minutes.) It uses constant space and constant
+ * time per update yet almost always delivers the same minimum as an
+ * implementation that has to keep all the data in the window.
+ *
+ * The algorithm keeps track of the best, 2nd best & 3rd best min
+ * values, maintaining an invariant that the measurement time of the
+ * n'th best >= n-1'th best. It also makes sure that the three values
+ * are widely separated in the time window since that bounds the worse
+ * case error when that data is monotonically increasing over the window.
+ *
+ * Upon getting a new min, we can forget everything earlier because it
+ * has no value - the new min is <= everything else in the window by
+ * definition and it's the most recent. So we restart fresh on every new min
+ * and overwrites 2nd & 3rd choices. The same property holds for 2nd & 3rd
+ * best.
+ */
+static void tcp_update_rtt_min(struct sock *sk, u32 rtt_us)
+{
+ const u32 now = tcp_time_stamp, wlen = sysctl_tcp_min_rtt_wlen * HZ;
+ struct rtt_meas *m = tcp_sk(sk)->rtt_min;
+ struct rtt_meas rttm = { .rtt = (rtt_us ? : 1), .ts = now };
+ u32 elapsed;
+
+ /* Check if the new measurement updates the 1st, 2nd, or 3rd choices */
+ if (unlikely(rttm.rtt <= m[0].rtt))
+ m[0] = m[1] = m[2] = rttm;
+ else if (rttm.rtt <= m[1].rtt)
+ m[1] = m[2] = rttm;
+ else if (rttm.rtt <= m[2].rtt)
+ m[2] = rttm;
+
+ elapsed = now - m[0].ts;
+ if (unlikely(elapsed > wlen)) {
+ /* Passed entire window without a new min so make 2nd choice
+ * the new min & 3rd choice the new 2nd. So forth and so on.
+ */
+ m[0] = m[1];
+ m[1] = m[2];
+ m[2] = rttm;
+ if (now - m[0].ts > wlen) {
+ m[0] = m[1];
+ m[1] = rttm;
+ if (now - m[0].ts > wlen)
+ m[0] = rttm;
+ }
+ } else if (m[1].ts == m[0].ts && elapsed > wlen / 4) {
+ /* Passed a quarter of the window without a new min so
+ * take 2nd choice from the 2nd quarter of the window.
+ */
+ m[2] = m[1] = rttm;
+ } else if (m[2].ts == m[1].ts && elapsed > wlen / 2) {
+ /* Passed half the window without a new min so take the 3rd
+ * choice from the last half of the window.
+ */
+ m[2] = rttm;
+ }
+}
+
static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
- long seq_rtt_us, long sack_rtt_us)
+ long seq_rtt_us, long sack_rtt_us,
+ long ca_rtt_us)
{
const struct tcp_sock *tp = tcp_sk(sk);
@@ -2925,9 +2944,6 @@ static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
* Karn's algorithm forbids taking RTT if some retransmitted data
* is acked (RFC6298).
*/
- if (flag & FLAG_RETRANS_DATA_ACKED)
- seq_rtt_us = -1L;
-
if (seq_rtt_us < 0)
seq_rtt_us = sack_rtt_us;
@@ -2939,11 +2955,16 @@ static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
*/
if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
flag & FLAG_ACKED)
- seq_rtt_us = jiffies_to_usecs(tcp_time_stamp - tp->rx_opt.rcv_tsecr);
-
+ seq_rtt_us = ca_rtt_us = jiffies_to_usecs(tcp_time_stamp -
+ tp->rx_opt.rcv_tsecr);
if (seq_rtt_us < 0)
return false;
+ /* ca_rtt_us >= 0 is counting on the invariant that ca_rtt_us is
+ * always taken together with ACK, SACK, or TS-opts. Any negative
+ * values will be skipped with the seq_rtt_us < 0 check above.
+ */
+ tcp_update_rtt_min(sk, ca_rtt_us);
tcp_rtt_estimator(sk, seq_rtt_us);
tcp_set_rto(sk);
@@ -2964,7 +2985,7 @@ void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req)
rtt_us = skb_mstamp_us_delta(&now, &tcp_rsk(req)->snt_synack);
}
- tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, rtt_us, -1L);
+ tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, rtt_us, -1L, rtt_us);
}
@@ -3131,6 +3152,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
if (sacked & TCPCB_SACKED_ACKED)
tp->sacked_out -= acked_pcount;
+ else if (tcp_is_sack(tp) && !tcp_skb_spurious_retrans(tp, skb))
+ tcp_rack_advance(tp, &skb->skb_mstamp, sacked);
if (sacked & TCPCB_LOST)
tp->lost_out -= acked_pcount;
@@ -3169,7 +3192,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
flag |= FLAG_SACK_RENEGING;
skb_mstamp_get(&now);
- if (likely(first_ackt.v64)) {
+ if (likely(first_ackt.v64) && !(flag & FLAG_RETRANS_DATA_ACKED)) {
seq_rtt_us = skb_mstamp_us_delta(&now, &first_ackt);
ca_rtt_us = skb_mstamp_us_delta(&now, &last_ackt);
}
@@ -3178,7 +3201,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
ca_rtt_us = skb_mstamp_us_delta(&now, &sack->last_sackt);
}
- rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us);
+ rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us,
+ ca_rtt_us);
if (flag & FLAG_ACKED) {
tcp_rearm_rto(sk);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 30dd45c1f568..1c2648bbac4b 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1247,7 +1247,9 @@ EXPORT_SYMBOL(tcp_v4_conn_request);
*/
struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
- struct dst_entry *dst)
+ struct dst_entry *dst,
+ struct request_sock *req_unhash,
+ bool *own_req)
{
struct inet_request_sock *ireq;
struct inet_sock *newinet;
@@ -1323,7 +1325,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
if (__inet_inherit_port(sk, newsk) < 0)
goto put_and_exit;
- __inet_hash_nolisten(newsk, NULL);
+ *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
return newsk;
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 41828bdc5d32..3575dd1e5b67 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -470,6 +470,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
newtp->srtt_us = 0;
newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
+ newtp->rtt_min[0].rtt = ~0U;
newicsk->icsk_rto = TCP_TIMEOUT_INIT;
newtp->packets_out = 0;
@@ -547,6 +548,8 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
tcp_ecn_openreq_child(newtp, req);
newtp->fastopen_rsk = NULL;
newtp->syn_data_acked = 0;
+ newtp->rack.mstamp.v64 = 0;
+ newtp->rack.advanced = 0;
newtp->saved_syn = req->saved_syn;
req->saved_syn = NULL;
@@ -577,6 +580,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
const struct tcphdr *th = tcp_hdr(skb);
__be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
bool paws_reject = false;
+ bool own_req;
tmp_opt.saw_tstamp = 0;
if (th->doff > (sizeof(struct tcphdr)>>2)) {
@@ -764,18 +768,14 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
* ESTABLISHED STATE. If it will be dropped after
* socket is created, wait for troubles.
*/
- child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
+ child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
+ req, &own_req);
if (!child)
goto listen_overflow;
sock_rps_save_rxhash(child, skb);
tcp_synack_rtt_meas(child, req);
- inet_csk_reqsk_queue_drop(sk, req);
- inet_csk_reqsk_queue_add(sk, req, child);
- /* Warning: caller must not call reqsk_put(req);
- * child stole last reference on it.
- */
- return child;
+ return inet_csk_complete_hashdance(sk, child, req, own_req);
listen_overflow:
if (!sysctl_tcp_abort_on_overflow) {
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 19adedb8c5cc..f6f7f9b4901b 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2655,8 +2655,6 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
net_dbg_ratelimited("retrans_out leaked\n");
}
#endif
- if (!tp->retrans_out)
- tp->lost_retrans_low = tp->snd_nxt;
TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
tp->retrans_out += tcp_skb_pcount(skb);
@@ -2664,10 +2662,6 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
if (!tp->retrans_stamp)
tp->retrans_stamp = tcp_skb_timestamp(skb);
- /* snd_nxt is stored to detect loss of retransmitted segment,
- * see tcp_input.c tcp_sacktag_write_queue().
- */
- TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt;
} else if (err != -EBUSY) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
}
diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c
new file mode 100644
index 000000000000..5353085fd0b2
--- /dev/null
+++ b/net/ipv4/tcp_recovery.c
@@ -0,0 +1,109 @@
+#include <linux/tcp.h>
+#include <net/tcp.h>
+
+int sysctl_tcp_recovery __read_mostly = TCP_RACK_LOST_RETRANS;
+
+/* Marks a packet lost, if some packet sent later has been (s)acked.
+ * The underlying idea is similar to the traditional dupthresh and FACK
+ * but they look at different metrics:
+ *
+ * dupthresh: 3 OOO packets delivered (packet count)
+ * FACK: sequence delta to highest sacked sequence (sequence space)
+ * RACK: sent time delta to the latest delivered packet (time domain)
+ *
+ * The advantage of RACK is it applies to both original and retransmitted
+ * packet and therefore is robust against tail losses. Another advantage
+ * is being more resilient to reordering by simply allowing some
+ * "settling delay", instead of tweaking the dupthresh.
+ *
+ * The current version is only used after recovery starts but can be
+ * easily extended to detect the first loss.
+ */
+int tcp_rack_mark_lost(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct sk_buff *skb;
+ u32 reo_wnd, prior_retrans = tp->retrans_out;
+
+ if (inet_csk(sk)->icsk_ca_state < TCP_CA_Recovery || !tp->rack.advanced)
+ return 0;
+
+ /* Reset the advanced flag to avoid unnecessary queue scanning */
+ tp->rack.advanced = 0;
+
+ /* To be more reordering resilient, allow min_rtt/4 settling delay
+ * (lower-bounded to 1000uS). We use min_rtt instead of the smoothed
+ * RTT because reordering is often a path property and less related
+ * to queuing or delayed ACKs.
+ *
+ * TODO: measure and adapt to the observed reordering delay, and
+ * use a timer to retransmit like the delayed early retransmit.
+ */
+ reo_wnd = 1000;
+ if (tp->rack.reord && tcp_min_rtt(tp) != ~0U)
+ reo_wnd = max(tcp_min_rtt(tp) >> 2, reo_wnd);
+
+ tcp_for_write_queue(skb, sk) {
+ struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
+
+ if (skb == tcp_send_head(sk))
+ break;
+
+ /* Skip ones already (s)acked */
+ if (!after(scb->end_seq, tp->snd_una) ||
+ scb->sacked & TCPCB_SACKED_ACKED)
+ continue;
+
+ if (skb_mstamp_after(&tp->rack.mstamp, &skb->skb_mstamp)) {
+
+ if (skb_mstamp_us_delta(&tp->rack.mstamp,
+ &skb->skb_mstamp) <= reo_wnd)
+ continue;
+
+ /* skb is lost if packet sent later is sacked */
+ tcp_skb_mark_lost_uncond_verify(tp, skb);
+ if (scb->sacked & TCPCB_SACKED_RETRANS) {
+ scb->sacked &= ~TCPCB_SACKED_RETRANS;
+ tp->retrans_out -= tcp_skb_pcount(skb);
+ NET_INC_STATS_BH(sock_net(sk),
+ LINUX_MIB_TCPLOSTRETRANSMIT);
+ }
+ } else if (!(scb->sacked & TCPCB_RETRANS)) {
+ /* Original data are sent sequentially so stop early
+ * b/c the rest are all sent after rack_sent
+ */
+ break;
+ }
+ }
+ return prior_retrans - tp->retrans_out;
+}
+
+/* Record the most recently (re)sent time among the (s)acked packets */
+void tcp_rack_advance(struct tcp_sock *tp,
+ const struct skb_mstamp *xmit_time, u8 sacked)
+{
+ if (tp->rack.mstamp.v64 &&
+ !skb_mstamp_after(xmit_time, &tp->rack.mstamp))
+ return;
+
+ if (sacked & TCPCB_RETRANS) {
+ struct skb_mstamp now;
+
+ /* If the sacked packet was retransmitted, it's ambiguous
+ * whether the retransmission or the original (or the prior
+ * retransmission) was sacked.
+ *
+ * If the original is lost, there is no ambiguity. Otherwise
+ * we assume the original can be delayed up to aRTT + min_rtt.
+ * the aRTT term is bounded by the fast recovery or timeout,
+ * so it's at least one RTT (i.e., retransmission is at least
+ * an RTT later).
+ */
+ skb_mstamp_get(&now);
+ if (skb_mstamp_us_delta(&now, xmit_time) < tcp_min_rtt(tp))
+ return;
+ }
+
+ tp->rack.mstamp = *xmit_time;
+ tp->rack.advanced = 1;
+}
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index d135350495e8..d0c685cdc345 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -4788,7 +4788,8 @@ nla_put_failure:
return -EMSGSIZE;
}
-static size_t inet6_get_link_af_size(const struct net_device *dev)
+static size_t inet6_get_link_af_size(const struct net_device *dev,
+ u32 ext_filter_mask)
{
if (!__in6_dev_get(dev))
return 0;
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 08b62047c67f..eeca943f12dc 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -264,6 +264,9 @@ static int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + nhoff);
int err = -ENOSYS;
+ if (skb->encapsulation)
+ skb_set_inner_network_header(skb, nhoff);
+
iph->payload_len = htons(skb->len - nhoff - sizeof(*iph));
rcu_read_lock();
@@ -280,6 +283,13 @@ out_unlock:
return err;
}
+static int sit_gro_complete(struct sk_buff *skb, int nhoff)
+{
+ skb->encapsulation = 1;
+ skb_shinfo(skb)->gso_type |= SKB_GSO_SIT;
+ return ipv6_gro_complete(skb, nhoff);
+}
+
static struct packet_offload ipv6_packet_offload __read_mostly = {
.type = cpu_to_be16(ETH_P_IPV6),
.callbacks = {
@@ -292,6 +302,8 @@ static struct packet_offload ipv6_packet_offload __read_mostly = {
static const struct net_offload sit_offload = {
.callbacks = {
.gso_segment = ipv6_gso_segment,
+ .gro_receive = ipv6_gro_receive,
+ .gro_complete = sit_gro_complete,
},
};
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index f495d189f5e0..714bc5ad096e 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -965,7 +965,9 @@ drop:
static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
- struct dst_entry *dst)
+ struct dst_entry *dst,
+ struct request_sock *req_unhash,
+ bool *own_req)
{
struct inet_request_sock *ireq;
struct ipv6_pinfo *newnp;
@@ -984,7 +986,8 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
* v6 mapped
*/
- newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
+ newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
+ req_unhash, own_req);
if (!newsk)
return NULL;
@@ -1145,7 +1148,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
tcp_done(newsk);
goto out;
}
- __inet_hash(newsk, NULL);
+ *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
return newsk;
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 783e891b7525..f9137a8341f4 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -27,7 +27,6 @@ mac80211-y := \
key.o \
util.o \
wme.o \
- event.o \
chan.o \
trace.o mlme.o \
tdls.o \
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 68e551e263c6..713cdbf6fb3c 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -17,7 +17,6 @@
#include <net/cfg80211.h>
#include "ieee80211_i.h"
#include "driver-ops.h"
-#include "cfg.h"
#include "rate.h"
#include "mesh.h"
#include "wme.h"
@@ -469,45 +468,6 @@ void sta_set_rate_info_tx(struct sta_info *sta,
rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
}
-void sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo)
-{
- rinfo->flags = 0;
-
- if (sta->last_rx_rate_flag & RX_FLAG_HT) {
- rinfo->flags |= RATE_INFO_FLAGS_MCS;
- rinfo->mcs = sta->last_rx_rate_idx;
- } else if (sta->last_rx_rate_flag & RX_FLAG_VHT) {
- rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS;
- rinfo->nss = sta->last_rx_rate_vht_nss;
- rinfo->mcs = sta->last_rx_rate_idx;
- } else {
- struct ieee80211_supported_band *sband;
- int shift = ieee80211_vif_get_shift(&sta->sdata->vif);
- u16 brate;
-
- sband = sta->local->hw.wiphy->bands[
- ieee80211_get_sdata_band(sta->sdata)];
- brate = sband->bitrates[sta->last_rx_rate_idx].bitrate;
- rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift);
- }
-
- if (sta->last_rx_rate_flag & RX_FLAG_SHORT_GI)
- rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
-
- if (sta->last_rx_rate_flag & RX_FLAG_5MHZ)
- rinfo->bw = RATE_INFO_BW_5;
- else if (sta->last_rx_rate_flag & RX_FLAG_10MHZ)
- rinfo->bw = RATE_INFO_BW_10;
- else if (sta->last_rx_rate_flag & RX_FLAG_40MHZ)
- rinfo->bw = RATE_INFO_BW_40;
- else if (sta->last_rx_rate_vht_flag & RX_VHT_FLAG_80MHZ)
- rinfo->bw = RATE_INFO_BW_80;
- else if (sta->last_rx_rate_vht_flag & RX_VHT_FLAG_160MHZ)
- rinfo->bw = RATE_INFO_BW_160;
- else
- rinfo->bw = RATE_INFO_BW_20;
-}
-
static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
int idx, u8 *mac, struct station_info *sinfo)
{
@@ -1138,6 +1098,7 @@ static int sta_apply_parameters(struct ieee80211_local *local,
}
if (mask & BIT(NL80211_STA_FLAG_MFP)) {
+ sta->sta.mfp = !!(set & BIT(NL80211_STA_FLAG_MFP));
if (set & BIT(NL80211_STA_FLAG_MFP))
set_sta_flag(sta, WLAN_STA_MFP);
else
@@ -1427,7 +1388,7 @@ static int ieee80211_change_station(struct wiphy *wiphy,
if (sdata->vif.type == NL80211_IFTYPE_STATION &&
params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) {
- ieee80211_recalc_ps(local, -1);
+ ieee80211_recalc_ps(local);
ieee80211_recalc_ps_vif(sdata);
}
@@ -2462,7 +2423,7 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
if (ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS))
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
- ieee80211_recalc_ps(local, -1);
+ ieee80211_recalc_ps(local);
ieee80211_recalc_ps_vif(sdata);
return 0;
diff --git a/net/mac80211/cfg.h b/net/mac80211/cfg.h
deleted file mode 100644
index 2d51f62dc76c..000000000000
--- a/net/mac80211/cfg.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * mac80211 configuration hooks for cfg80211
- */
-#ifndef __CFG_H
-#define __CFG_H
-
-extern const struct cfg80211_ops mac80211_config_ops;
-
-#endif /* __CFG_H */
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 06d52935036d..a39512f09f9e 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -50,7 +50,6 @@ static const struct file_operations sta_ ##name## _ops = { \
STA_OPS(name)
STA_FILE(aid, sta.aid, D);
-STA_FILE(last_ack_signal, last_ack_signal, D);
static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
@@ -366,11 +365,10 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
DEBUGFS_ADD(agg_status);
DEBUGFS_ADD(ht_capa);
DEBUGFS_ADD(vht_capa);
- DEBUGFS_ADD(last_ack_signal);
- DEBUGFS_ADD_COUNTER(rx_duplicates, num_duplicates);
- DEBUGFS_ADD_COUNTER(rx_fragments, rx_fragments);
- DEBUGFS_ADD_COUNTER(tx_filtered, tx_filtered_count);
+ DEBUGFS_ADD_COUNTER(rx_duplicates, rx_stats.num_duplicates);
+ DEBUGFS_ADD_COUNTER(rx_fragments, rx_stats.fragments);
+ DEBUGFS_ADD_COUNTER(tx_filtered, status_stats.filtered);
if (sizeof(sta->driver_buffered_tids) == sizeof(u32))
debugfs_create_x32("driver_buffered_tids", 0400,
diff --git a/net/mac80211/ethtool.c b/net/mac80211/ethtool.c
index 188faab11c24..9cc986deda61 100644
--- a/net/mac80211/ethtool.c
+++ b/net/mac80211/ethtool.c
@@ -40,7 +40,7 @@ static const char ieee80211_gstrings_sta_stats[][ETH_GSTRING_LEN] = {
"rx_duplicates", "rx_fragments", "rx_dropped",
"tx_packets", "tx_bytes",
"tx_filtered", "tx_retry_failed", "tx_retries",
- "beacon_loss", "sta_state", "txrate", "rxrate", "signal",
+ "sta_state", "txrate", "rxrate", "signal",
"channel", "noise", "ch_time", "ch_time_busy",
"ch_time_ext_busy", "ch_time_rx", "ch_time_tx"
};
@@ -77,20 +77,19 @@ static void ieee80211_get_stats(struct net_device *dev,
memset(data, 0, sizeof(u64) * STA_STATS_LEN);
-#define ADD_STA_STATS(sta) \
- do { \
- data[i++] += sta->rx_packets; \
- data[i++] += sta->rx_bytes; \
- data[i++] += sta->num_duplicates; \
- data[i++] += sta->rx_fragments; \
- data[i++] += sta->rx_dropped; \
- \
- data[i++] += sinfo.tx_packets; \
- data[i++] += sinfo.tx_bytes; \
- data[i++] += sta->tx_filtered_count; \
- data[i++] += sta->tx_retry_failed; \
- data[i++] += sta->tx_retry_count; \
- data[i++] += sta->beacon_loss_count; \
+#define ADD_STA_STATS(sta) \
+ do { \
+ data[i++] += sta->rx_stats.packets; \
+ data[i++] += sta->rx_stats.bytes; \
+ data[i++] += sta->rx_stats.num_duplicates; \
+ data[i++] += sta->rx_stats.fragments; \
+ data[i++] += sta->rx_stats.dropped; \
+ \
+ data[i++] += sinfo.tx_packets; \
+ data[i++] += sinfo.tx_bytes; \
+ data[i++] += sta->status_stats.filtered; \
+ data[i++] += sta->status_stats.retry_failed; \
+ data[i++] += sta->status_stats.retry_count; \
} while (0)
/* For Managed stations, find the single station based on BSSID
diff --git a/net/mac80211/event.c b/net/mac80211/event.c
deleted file mode 100644
index 01ae759518f6..000000000000
--- a/net/mac80211/event.c
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * mac80211 - events
- */
-#include <net/cfg80211.h>
-#include "ieee80211_i.h"
-
-/*
- * Indicate a failed Michael MIC to userspace. If the caller knows the TSC of
- * the frame that generated the MIC failure (i.e., if it was provided by the
- * driver or is still in the frame), it should provide that information.
- */
-void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx,
- struct ieee80211_hdr *hdr, const u8 *tsc,
- gfp_t gfp)
-{
- cfg80211_michael_mic_failure(sdata->dev, hdr->addr2,
- (hdr->addr1[0] & 0x01) ?
- NL80211_KEYTYPE_GROUP :
- NL80211_KEYTYPE_PAIRWISE,
- keyidx, tsc, gfp);
-}
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 7f72bc9bae2e..2001555d49cb 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -229,7 +229,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
struct cfg80211_chan_def chandef;
struct ieee80211_channel *chan;
struct beacon_data *presp;
- enum nl80211_bss_scan_width scan_width;
+ struct cfg80211_inform_bss bss_meta = {};
bool have_higher_than_11mbit;
bool radar_required;
int err;
@@ -383,10 +383,11 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
mod_timer(&ifibss->timer,
round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL));
- scan_width = cfg80211_chandef_to_scan_width(&chandef);
- bss = cfg80211_inform_bss_width_frame(local->hw.wiphy, chan,
- scan_width, mgmt,
- presp->head_len, 0, GFP_KERNEL);
+ bss_meta.chan = chan;
+ bss_meta.scan_width = cfg80211_chandef_to_scan_width(&chandef);
+ bss = cfg80211_inform_bss_frame_data(local->hw.wiphy, &bss_meta, mgmt,
+ presp->head_len, GFP_KERNEL);
+
cfg80211_put_bss(local->hw.wiphy, bss);
netif_carrier_on(sdata->dev);
cfg80211_ibss_joined(sdata->dev, ifibss->bssid, chan, GFP_KERNEL);
@@ -646,7 +647,7 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, const u8 *bssid,
return NULL;
}
- sta->last_rx = jiffies;
+ sta->rx_stats.last_rx = jiffies;
/* make sure mandatory rates are always added */
sband = local->hw.wiphy->bands[band];
@@ -668,7 +669,8 @@ static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata)
list_for_each_entry_rcu(sta, &local->sta_list, list) {
if (sta->sdata == sdata &&
- time_after(sta->last_rx + IEEE80211_IBSS_MERGE_INTERVAL,
+ time_after(sta->rx_stats.last_rx +
+ IEEE80211_IBSS_MERGE_INTERVAL,
jiffies)) {
active++;
break;
@@ -1234,7 +1236,7 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata,
if (!sta)
return;
- sta->last_rx = jiffies;
+ sta->rx_stats.last_rx = jiffies;
/* make sure mandatory rates are always added */
sband = local->hw.wiphy->bands[band];
@@ -1252,7 +1254,7 @@ static void ieee80211_ibss_sta_expire(struct ieee80211_sub_if_data *sdata)
struct ieee80211_local *local = sdata->local;
struct sta_info *sta, *tmp;
unsigned long exp_time = IEEE80211_IBSS_INACTIVITY_LIMIT;
- unsigned long exp_rsn_time = IEEE80211_IBSS_RSN_INACTIVITY_LIMIT;
+ unsigned long exp_rsn = IEEE80211_IBSS_RSN_INACTIVITY_LIMIT;
mutex_lock(&local->sta_mtx);
@@ -1260,8 +1262,8 @@ static void ieee80211_ibss_sta_expire(struct ieee80211_sub_if_data *sdata)
if (sdata != sta->sdata)
continue;
- if (time_after(jiffies, sta->last_rx + exp_time) ||
- (time_after(jiffies, sta->last_rx + exp_rsn_time) &&
+ if (time_after(jiffies, sta->rx_stats.last_rx + exp_time) ||
+ (time_after(jiffies, sta->rx_stats.last_rx + exp_rsn) &&
sta->sta_state != IEEE80211_STA_AUTHORIZED)) {
sta_dbg(sta->sdata, "expiring inactive %sSTA %pM\n",
sta->sta_state != IEEE80211_STA_AUTHORIZED ?
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index f9605f13def9..62f2a97cd2a6 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -34,6 +34,8 @@
#include "sta_info.h"
#include "debug.h"
+extern const struct cfg80211_ops mac80211_config_ops;
+
struct ieee80211_local;
/* Maximum number of broadcast/multicast frames to buffer when some of the
@@ -501,6 +503,9 @@ struct ieee80211_if_managed {
*/
unsigned int count_beacon_signal;
+ /* Number of times beacon loss was invoked. */
+ unsigned int beacon_loss_count;
+
/*
* Last Beacon frame signal strength average (ave_beacon_signal / 16)
* that triggered a cqm event. 0 indicates that no event has been
@@ -1305,7 +1310,6 @@ struct ieee80211_local {
struct work_struct dynamic_ps_enable_work;
struct work_struct dynamic_ps_disable_work;
struct timer_list dynamic_ps_timer;
- struct notifier_block network_latency_notifier;
struct notifier_block ifa_notifier;
struct notifier_block ifa6_notifier;
@@ -1491,10 +1495,8 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
struct cfg80211_disassoc_request *req);
void ieee80211_send_pspoll(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata);
-void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency);
+void ieee80211_recalc_ps(struct ieee80211_local *local);
void ieee80211_recalc_ps_vif(struct ieee80211_sub_if_data *sdata);
-int ieee80211_max_network_latency(struct notifier_block *nb,
- unsigned long data, void *dummy);
int ieee80211_set_arp_filter(struct ieee80211_sub_if_data *sdata);
void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata);
void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
@@ -1766,9 +1768,6 @@ extern const void *const mac80211_wiphy_privid; /* for wiphy privid */
int ieee80211_frame_duration(enum ieee80211_band band, size_t len,
int rate, int erp, int short_preamble,
int shift);
-void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx,
- struct ieee80211_hdr *hdr, const u8 *tsc,
- gfp_t gfp);
void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
bool bss_notify);
void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 42d7f0f65bd6..f848c75518a2 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -709,7 +709,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
if (hw_reconf_flags)
ieee80211_hw_config(local, hw_reconf_flags);
- ieee80211_recalc_ps(local, -1);
+ ieee80211_recalc_ps(local);
if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
@@ -1016,7 +1016,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
drv_remove_interface(local, sdata);
}
- ieee80211_recalc_ps(local, -1);
+ ieee80211_recalc_ps(local);
if (cancel_scan)
flush_delayed_work(&local->scan_work);
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 9b813a2f3a75..273c96de4910 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -20,7 +20,6 @@
#include <linux/if_arp.h>
#include <linux/rtnetlink.h>
#include <linux/bitmap.h>
-#include <linux/pm_qos.h>
#include <linux/inetdevice.h>
#include <net/net_namespace.h>
#include <net/cfg80211.h>
@@ -32,7 +31,6 @@
#include "mesh.h"
#include "wep.h"
#include "led.h"
-#include "cfg.h"
#include "debugfs.h"
void ieee80211_configure_filter(struct ieee80211_local *local)
@@ -1083,13 +1081,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
rtnl_unlock();
- local->network_latency_notifier.notifier_call =
- ieee80211_max_network_latency;
- result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY,
- &local->network_latency_notifier);
- if (result)
- goto fail_pm_qos;
-
#ifdef CONFIG_INET
local->ifa_notifier.notifier_call = ieee80211_ifa_changed;
result = register_inetaddr_notifier(&local->ifa_notifier);
@@ -1114,10 +1105,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
#endif
#if defined(CONFIG_INET) || defined(CONFIG_IPV6)
fail_ifa:
- pm_qos_remove_notifier(PM_QOS_NETWORK_LATENCY,
- &local->network_latency_notifier);
#endif
- fail_pm_qos:
rtnl_lock();
rate_control_deinitialize(local);
ieee80211_remove_interfaces(local);
@@ -1143,8 +1131,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
tasklet_kill(&local->tx_pending_tasklet);
tasklet_kill(&local->tasklet);
- pm_qos_remove_notifier(PM_QOS_NETWORK_LATENCY,
- &local->network_latency_notifier);
#ifdef CONFIG_INET
unregister_inetaddr_notifier(&local->ifa_notifier);
#endif
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index d80e0a4c16cf..c6be0b4f4058 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -329,7 +329,7 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local,
if (sta->mesh->fail_avg >= 100)
return MAX_METRIC;
- sta_set_rate_info_tx(sta, &sta->last_tx_rate, &rinfo);
+ sta_set_rate_info_tx(sta, &sta->tx_stats.last_rate, &rinfo);
rate = cfg80211_calculate_bitrate(&rinfo);
if (WARN_ON(!rate))
return MAX_METRIC;
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index a360b24b7df8..c1f889270484 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -60,7 +60,9 @@ static bool rssi_threshold_check(struct ieee80211_sub_if_data *sdata,
{
s32 rssi_threshold = sdata->u.mesh.mshcfg.rssi_threshold;
return rssi_threshold == 0 ||
- (sta && (s8) -ewma_signal_read(&sta->avg_signal) > rssi_threshold);
+ (sta &&
+ (s8)-ewma_signal_read(&sta->rx_stats.avg_signal) >
+ rssi_threshold);
}
/**
@@ -390,7 +392,7 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
rates = ieee80211_sta_get_rates(sdata, elems, band, &basic_rates);
spin_lock_bh(&sta->mesh->plink_lock);
- sta->last_rx = jiffies;
+ sta->rx_stats.last_rx = jiffies;
/* rates and capabilities don't change during peering */
if (sta->mesh->plink_state == NL80211_PLINK_ESTAB &&
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 56ef9a8e151c..ded4b976bb48 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -20,7 +20,6 @@
#include <linux/etherdevice.h>
#include <linux/moduleparam.h>
#include <linux/rtnetlink.h>
-#include <linux/pm_qos.h>
#include <linux/crc32.h>
#include <linux/slab.h>
#include <linux/export.h>
@@ -1476,7 +1475,7 @@ static bool ieee80211_powersave_allowed(struct ieee80211_sub_if_data *sdata)
}
/* need to hold RTNL or interface lock */
-void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
+void ieee80211_recalc_ps(struct ieee80211_local *local)
{
struct ieee80211_sub_if_data *sdata, *found = NULL;
int count = 0;
@@ -1505,48 +1504,23 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
}
if (count == 1 && ieee80211_powersave_allowed(found)) {
+ u8 dtimper = found->u.mgd.dtim_period;
s32 beaconint_us;
- if (latency < 0)
- latency = pm_qos_request(PM_QOS_NETWORK_LATENCY);
-
beaconint_us = ieee80211_tu_to_usec(
found->vif.bss_conf.beacon_int);
timeout = local->dynamic_ps_forced_timeout;
- if (timeout < 0) {
- /*
- * Go to full PSM if the user configures a very low
- * latency requirement.
- * The 2000 second value is there for compatibility
- * until the PM_QOS_NETWORK_LATENCY is configured
- * with real values.
- */
- if (latency > (1900 * USEC_PER_MSEC) &&
- latency != (2000 * USEC_PER_SEC))
- timeout = 0;
- else
- timeout = 100;
- }
+ if (timeout < 0)
+ timeout = 100;
local->hw.conf.dynamic_ps_timeout = timeout;
- if (beaconint_us > latency) {
- local->ps_sdata = NULL;
- } else {
- int maxslp = 1;
- u8 dtimper = found->u.mgd.dtim_period;
-
- /* If the TIM IE is invalid, pretend the value is 1 */
- if (!dtimper)
- dtimper = 1;
- else if (dtimper > 1)
- maxslp = min_t(int, dtimper,
- latency / beaconint_us);
-
- local->hw.conf.max_sleep_period = maxslp;
- local->hw.conf.ps_dtim_period = dtimper;
- local->ps_sdata = found;
- }
+ /* If the TIM IE is invalid, pretend the value is 1 */
+ if (!dtimper)
+ dtimper = 1;
+
+ local->hw.conf.ps_dtim_period = dtimper;
+ local->ps_sdata = found;
} else {
local->ps_sdata = NULL;
}
@@ -1997,7 +1971,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
ieee80211_bss_info_change_notify(sdata, bss_info_changed);
mutex_lock(&local->iflist_mtx);
- ieee80211_recalc_ps(local, -1);
+ ieee80211_recalc_ps(local);
mutex_unlock(&local->iflist_mtx);
ieee80211_recalc_smps(sdata);
@@ -2165,7 +2139,7 @@ static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata)
__ieee80211_stop_poll(sdata);
mutex_lock(&local->iflist_mtx);
- ieee80211_recalc_ps(local, -1);
+ ieee80211_recalc_ps(local);
mutex_unlock(&local->iflist_mtx);
if (ieee80211_hw_check(&sdata->local->hw, CONNECTION_MONITOR))
@@ -2341,7 +2315,7 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
goto out;
mutex_lock(&sdata->local->iflist_mtx);
- ieee80211_recalc_ps(sdata->local, -1);
+ ieee80211_recalc_ps(sdata->local);
mutex_unlock(&sdata->local->iflist_mtx);
ifmgd->probe_send_count = 0;
@@ -2446,15 +2420,9 @@ static void ieee80211_beacon_connection_loss_work(struct work_struct *work)
container_of(work, struct ieee80211_sub_if_data,
u.mgd.beacon_connection_loss_work);
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- struct sta_info *sta;
- if (ifmgd->associated) {
- rcu_read_lock();
- sta = sta_info_get(sdata, ifmgd->bssid);
- if (sta)
- sta->beacon_loss_count++;
- rcu_read_unlock();
- }
+ if (ifmgd->associated)
+ ifmgd->beacon_loss_count++;
if (ifmgd->connection_loss) {
sdata_info(sdata, "Connection to AP %pM lost\n",
@@ -3044,8 +3012,12 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
rate_control_rate_init(sta);
- if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED)
+ if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED) {
set_sta_flag(sta, WLAN_STA_MFP);
+ sta->sta.mfp = true;
+ } else {
+ sta->sta.mfp = false;
+ }
sta->sta.wme = elems.wmm_param && local->hw.queues >= IEEE80211_NUM_ACS;
@@ -3544,7 +3516,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
ifmgd->have_beacon = true;
mutex_lock(&local->iflist_mtx);
- ieee80211_recalc_ps(local, -1);
+ ieee80211_recalc_ps(local);
mutex_unlock(&local->iflist_mtx);
ieee80211_recalc_ps_vif(sdata);
@@ -4148,21 +4120,6 @@ void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local)
rcu_read_unlock();
}
-int ieee80211_max_network_latency(struct notifier_block *nb,
- unsigned long data, void *dummy)
-{
- s32 latency_usec = (s32) data;
- struct ieee80211_local *local =
- container_of(nb, struct ieee80211_local,
- network_latency_notifier);
-
- mutex_lock(&local->iflist_mtx);
- ieee80211_recalc_ps(local, latency_usec);
- mutex_unlock(&local->iflist_mtx);
-
- return NOTIFY_OK;
-}
-
static u8 ieee80211_ht_vht_rx_chains(struct ieee80211_sub_if_data *sdata,
struct cfg80211_bss *cbss)
{
diff --git a/net/mac80211/ocb.c b/net/mac80211/ocb.c
index 573b81a1fb2d..0be0aadfc559 100644
--- a/net/mac80211/ocb.c
+++ b/net/mac80211/ocb.c
@@ -75,7 +75,7 @@ void ieee80211_ocb_rx_no_sta(struct ieee80211_sub_if_data *sdata,
if (!sta)
return;
- sta->last_rx = jiffies;
+ sta->rx_stats.last_rx = jiffies;
/* Add only mandatory rates for now */
sband = local->hw.wiphy->bands[band];
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 5bc0b88d9eb1..8bae5de0dc44 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1113,16 +1113,16 @@ ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx)
is_multicast_ether_addr(hdr->addr1))
return RX_CONTINUE;
- if (rx->sta) {
- if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
- rx->sta->last_seq_ctrl[rx->seqno_idx] ==
- hdr->seq_ctrl)) {
- I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount);
- rx->sta->num_duplicates++;
- return RX_DROP_UNUSABLE;
- } else if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
- rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl;
- }
+ if (!rx->sta)
+ return RX_CONTINUE;
+
+ if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
+ rx->sta->last_seq_ctrl[rx->seqno_idx] == hdr->seq_ctrl)) {
+ I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount);
+ rx->sta->rx_stats.num_duplicates++;
+ return RX_DROP_UNUSABLE;
+ } else if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
+ rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl;
}
return RX_CONTINUE;
@@ -1396,51 +1396,56 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
NL80211_IFTYPE_ADHOC);
if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) &&
test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
- sta->last_rx = jiffies;
+ sta->rx_stats.last_rx = jiffies;
if (ieee80211_is_data(hdr->frame_control) &&
!is_multicast_ether_addr(hdr->addr1)) {
- sta->last_rx_rate_idx = status->rate_idx;
- sta->last_rx_rate_flag = status->flag;
- sta->last_rx_rate_vht_flag = status->vht_flag;
- sta->last_rx_rate_vht_nss = status->vht_nss;
+ sta->rx_stats.last_rate_idx =
+ status->rate_idx;
+ sta->rx_stats.last_rate_flag =
+ status->flag;
+ sta->rx_stats.last_rate_vht_flag =
+ status->vht_flag;
+ sta->rx_stats.last_rate_vht_nss =
+ status->vht_nss;
}
}
} else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) {
- sta->last_rx = jiffies;
+ sta->rx_stats.last_rx = jiffies;
} else if (!is_multicast_ether_addr(hdr->addr1)) {
/*
* Mesh beacons will update last_rx when if they are found to
* match the current local configuration when processed.
*/
- sta->last_rx = jiffies;
+ sta->rx_stats.last_rx = jiffies;
if (ieee80211_is_data(hdr->frame_control)) {
- sta->last_rx_rate_idx = status->rate_idx;
- sta->last_rx_rate_flag = status->flag;
- sta->last_rx_rate_vht_flag = status->vht_flag;
- sta->last_rx_rate_vht_nss = status->vht_nss;
+ sta->rx_stats.last_rate_idx = status->rate_idx;
+ sta->rx_stats.last_rate_flag = status->flag;
+ sta->rx_stats.last_rate_vht_flag = status->vht_flag;
+ sta->rx_stats.last_rate_vht_nss = status->vht_nss;
}
}
if (rx->sdata->vif.type == NL80211_IFTYPE_STATION)
ieee80211_sta_rx_notify(rx->sdata, hdr);
- sta->rx_fragments++;
- sta->rx_bytes += rx->skb->len;
+ sta->rx_stats.fragments++;
+ sta->rx_stats.bytes += rx->skb->len;
if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
- sta->last_signal = status->signal;
- ewma_signal_add(&sta->avg_signal, -status->signal);
+ sta->rx_stats.last_signal = status->signal;
+ ewma_signal_add(&sta->rx_stats.avg_signal, -status->signal);
}
if (status->chains) {
- sta->chains = status->chains;
+ sta->rx_stats.chains = status->chains;
for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
int signal = status->chain_signal[i];
if (!(status->chains & BIT(i)))
continue;
- sta->chain_signal_last[i] = signal;
- ewma_signal_add(&sta->chain_signal_avg[i], -signal);
+ sta->rx_stats.chain_signal_last[i] = signal;
+ ewma_signal_add(&sta->rx_stats.chain_signal_avg[i],
+ -signal);
}
}
@@ -1500,7 +1505,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
* Update counter and free packet here to avoid
* counting this as a dropped packed.
*/
- sta->rx_packets++;
+ sta->rx_stats.packets++;
dev_kfree_skb(rx->skb);
return RX_QUEUED;
}
@@ -1922,7 +1927,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
ieee80211_led_rx(rx->local);
out_no_led:
if (rx->sta)
- rx->sta->rx_packets++;
+ rx->sta->rx_stats.packets++;
return RX_CONTINUE;
}
@@ -2376,7 +2381,7 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
* for non-QoS-data frames. Here we know it's a data
* frame, so count MSDUs.
*/
- rx->sta->rx_msdu[rx->seqno_idx]++;
+ rx->sta->rx_stats.msdu[rx->seqno_idx]++;
}
/*
@@ -2413,7 +2418,7 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
skb_queue_tail(&local->skb_queue_tdls_chsw, rx->skb);
schedule_work(&local->tdls_chsw_work);
if (rx->sta)
- rx->sta->rx_packets++;
+ rx->sta->rx_stats.packets++;
return RX_QUEUED;
}
@@ -2875,7 +2880,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
handled:
if (rx->sta)
- rx->sta->rx_packets++;
+ rx->sta->rx_stats.packets++;
dev_kfree_skb(rx->skb);
return RX_QUEUED;
@@ -2884,7 +2889,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
skb_queue_tail(&sdata->skb_queue, rx->skb);
ieee80211_queue_work(&local->hw, &sdata->work);
if (rx->sta)
- rx->sta->rx_packets++;
+ rx->sta->rx_stats.packets++;
return RX_QUEUED;
}
@@ -2911,7 +2916,7 @@ ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
if (cfg80211_rx_mgmt(&rx->sdata->wdev, status->freq, sig,
rx->skb->data, rx->skb->len, 0)) {
if (rx->sta)
- rx->sta->rx_packets++;
+ rx->sta->rx_stats.packets++;
dev_kfree_skb(rx->skb);
return RX_QUEUED;
}
@@ -3030,7 +3035,7 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
skb_queue_tail(&sdata->skb_queue, rx->skb);
ieee80211_queue_work(&rx->local->hw, &sdata->work);
if (rx->sta)
- rx->sta->rx_packets++;
+ rx->sta->rx_stats.packets++;
return RX_QUEUED;
}
@@ -3112,7 +3117,7 @@ static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx,
case RX_DROP_MONITOR:
I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
if (rx->sta)
- rx->sta->rx_dropped++;
+ rx->sta->rx_stats.dropped++;
/* fall through */
case RX_CONTINUE: {
struct ieee80211_rate *rate = NULL;
@@ -3132,7 +3137,7 @@ static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx,
case RX_DROP_UNUSABLE:
I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
if (rx->sta)
- rx->sta->rx_dropped++;
+ rx->sta->rx_stats.dropped++;
dev_kfree_skb(rx->skb);
break;
case RX_QUEUED:
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 11d0901ebb7b..b64fd2b2d95a 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -16,7 +16,6 @@
#include <linux/if_arp.h>
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
-#include <linux/pm_qos.h>
#include <net/sch_generic.h>
#include <linux/slab.h>
#include <linux/export.h>
@@ -67,24 +66,23 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
struct cfg80211_bss *cbss;
struct ieee80211_bss *bss;
int clen, srlen;
- enum nl80211_bss_scan_width scan_width;
- s32 signal = 0;
+ struct cfg80211_inform_bss bss_meta = {};
bool signal_valid;
if (ieee80211_hw_check(&local->hw, SIGNAL_DBM))
- signal = rx_status->signal * 100;
+ bss_meta.signal = rx_status->signal * 100;
else if (ieee80211_hw_check(&local->hw, SIGNAL_UNSPEC))
- signal = (rx_status->signal * 100) / local->hw.max_signal;
+ bss_meta.signal = (rx_status->signal * 100) / local->hw.max_signal;
- scan_width = NL80211_BSS_CHAN_WIDTH_20;
+ bss_meta.scan_width = NL80211_BSS_CHAN_WIDTH_20;
if (rx_status->flag & RX_FLAG_5MHZ)
- scan_width = NL80211_BSS_CHAN_WIDTH_5;
+ bss_meta.scan_width = NL80211_BSS_CHAN_WIDTH_5;
if (rx_status->flag & RX_FLAG_10MHZ)
- scan_width = NL80211_BSS_CHAN_WIDTH_10;
+ bss_meta.scan_width = NL80211_BSS_CHAN_WIDTH_10;
- cbss = cfg80211_inform_bss_width_frame(local->hw.wiphy, channel,
- scan_width, mgmt, len, signal,
- GFP_ATOMIC);
+ bss_meta.chan = channel;
+ cbss = cfg80211_inform_bss_frame_data(local->hw.wiphy, &bss_meta,
+ mgmt, len, GFP_ATOMIC);
if (!cbss)
return NULL;
/* In case the signal is invalid update the status */
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index c3644458e2ee..f91d1873218c 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -331,7 +331,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
memcpy(sta->sta.addr, addr, ETH_ALEN);
sta->local = local;
sta->sdata = sdata;
- sta->last_rx = jiffies;
+ sta->rx_stats.last_rx = jiffies;
sta->sta_state = IEEE80211_STA_NONE;
@@ -339,9 +339,9 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
sta->reserved_tid = IEEE80211_TID_UNRESERVED;
sta->last_connected = ktime_get_seconds();
- ewma_signal_init(&sta->avg_signal);
- for (i = 0; i < ARRAY_SIZE(sta->chain_signal_avg); i++)
- ewma_signal_init(&sta->chain_signal_avg[i]);
+ ewma_signal_init(&sta->rx_stats.avg_signal);
+ for (i = 0; i < ARRAY_SIZE(sta->rx_stats.chain_signal_avg); i++)
+ ewma_signal_init(&sta->rx_stats.chain_signal_avg[i]);
if (local->ops->wake_tx_queue) {
void *txq_data;
@@ -1066,7 +1066,7 @@ void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
if (sdata != sta->sdata)
continue;
- if (time_after(jiffies, sta->last_rx + exp_time)) {
+ if (time_after(jiffies, sta->rx_stats.last_rx + exp_time)) {
sta_dbg(sta->sdata, "expiring inactive STA %pM\n",
sta->sta.addr);
@@ -1806,6 +1806,45 @@ u8 sta_info_tx_streams(struct sta_info *sta)
>> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT) + 1;
}
+static void sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo)
+{
+ rinfo->flags = 0;
+
+ if (sta->rx_stats.last_rate_flag & RX_FLAG_HT) {
+ rinfo->flags |= RATE_INFO_FLAGS_MCS;
+ rinfo->mcs = sta->rx_stats.last_rate_idx;
+ } else if (sta->rx_stats.last_rate_flag & RX_FLAG_VHT) {
+ rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS;
+ rinfo->nss = sta->rx_stats.last_rate_vht_nss;
+ rinfo->mcs = sta->rx_stats.last_rate_idx;
+ } else {
+ struct ieee80211_supported_band *sband;
+ int shift = ieee80211_vif_get_shift(&sta->sdata->vif);
+ u16 brate;
+
+ sband = sta->local->hw.wiphy->bands[
+ ieee80211_get_sdata_band(sta->sdata)];
+ brate = sband->bitrates[sta->rx_stats.last_rate_idx].bitrate;
+ rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift);
+ }
+
+ if (sta->rx_stats.last_rate_flag & RX_FLAG_SHORT_GI)
+ rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
+
+ if (sta->rx_stats.last_rate_flag & RX_FLAG_5MHZ)
+ rinfo->bw = RATE_INFO_BW_5;
+ else if (sta->rx_stats.last_rate_flag & RX_FLAG_10MHZ)
+ rinfo->bw = RATE_INFO_BW_10;
+ else if (sta->rx_stats.last_rate_flag & RX_FLAG_40MHZ)
+ rinfo->bw = RATE_INFO_BW_40;
+ else if (sta->rx_stats.last_rate_vht_flag & RX_VHT_FLAG_80MHZ)
+ rinfo->bw = RATE_INFO_BW_80;
+ else if (sta->rx_stats.last_rate_vht_flag & RX_VHT_FLAG_160MHZ)
+ rinfo->bw = RATE_INFO_BW_160;
+ else
+ rinfo->bw = RATE_INFO_BW_20;
+}
+
void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
@@ -1832,50 +1871,54 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
BIT(NL80211_STA_INFO_STA_FLAGS) |
BIT(NL80211_STA_INFO_BSS_PARAM) |
BIT(NL80211_STA_INFO_CONNECTED_TIME) |
- BIT(NL80211_STA_INFO_RX_DROP_MISC) |
- BIT(NL80211_STA_INFO_BEACON_LOSS);
+ BIT(NL80211_STA_INFO_RX_DROP_MISC);
+
+ if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+ sinfo->beacon_loss_count = sdata->u.mgd.beacon_loss_count;
+ sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_LOSS);
+ }
sinfo->connected_time = ktime_get_seconds() - sta->last_connected;
- sinfo->inactive_time = jiffies_to_msecs(jiffies - sta->last_rx);
+ sinfo->inactive_time =
+ jiffies_to_msecs(jiffies - sta->rx_stats.last_rx);
if (!(sinfo->filled & (BIT(NL80211_STA_INFO_TX_BYTES64) |
BIT(NL80211_STA_INFO_TX_BYTES)))) {
sinfo->tx_bytes = 0;
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
- sinfo->tx_bytes += sta->tx_bytes[ac];
+ sinfo->tx_bytes += sta->tx_stats.bytes[ac];
sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES64);
}
if (!(sinfo->filled & BIT(NL80211_STA_INFO_TX_PACKETS))) {
sinfo->tx_packets = 0;
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
- sinfo->tx_packets += sta->tx_packets[ac];
+ sinfo->tx_packets += sta->tx_stats.packets[ac];
sinfo->filled |= BIT(NL80211_STA_INFO_TX_PACKETS);
}
if (!(sinfo->filled & (BIT(NL80211_STA_INFO_RX_BYTES64) |
BIT(NL80211_STA_INFO_RX_BYTES)))) {
- sinfo->rx_bytes = sta->rx_bytes;
+ sinfo->rx_bytes = sta->rx_stats.bytes;
sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES64);
}
if (!(sinfo->filled & BIT(NL80211_STA_INFO_RX_PACKETS))) {
- sinfo->rx_packets = sta->rx_packets;
+ sinfo->rx_packets = sta->rx_stats.packets;
sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS);
}
if (!(sinfo->filled & BIT(NL80211_STA_INFO_TX_RETRIES))) {
- sinfo->tx_retries = sta->tx_retry_count;
+ sinfo->tx_retries = sta->status_stats.retry_count;
sinfo->filled |= BIT(NL80211_STA_INFO_TX_RETRIES);
}
if (!(sinfo->filled & BIT(NL80211_STA_INFO_TX_FAILED))) {
- sinfo->tx_failed = sta->tx_retry_failed;
+ sinfo->tx_failed = sta->status_stats.retry_failed;
sinfo->filled |= BIT(NL80211_STA_INFO_TX_FAILED);
}
- sinfo->rx_dropped_misc = sta->rx_dropped;
- sinfo->beacon_loss_count = sta->beacon_loss_count;
+ sinfo->rx_dropped_misc = sta->rx_stats.dropped;
if (sdata->vif.type == NL80211_IFTYPE_STATION &&
!(sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)) {
@@ -1887,33 +1930,35 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
if (ieee80211_hw_check(&sta->local->hw, SIGNAL_DBM) ||
ieee80211_hw_check(&sta->local->hw, SIGNAL_UNSPEC)) {
if (!(sinfo->filled & BIT(NL80211_STA_INFO_SIGNAL))) {
- sinfo->signal = (s8)sta->last_signal;
+ sinfo->signal = (s8)sta->rx_stats.last_signal;
sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
}
if (!(sinfo->filled & BIT(NL80211_STA_INFO_SIGNAL_AVG))) {
sinfo->signal_avg =
- (s8) -ewma_signal_read(&sta->avg_signal);
+ -ewma_signal_read(&sta->rx_stats.avg_signal);
sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG);
}
}
- if (sta->chains &&
+ if (sta->rx_stats.chains &&
!(sinfo->filled & (BIT(NL80211_STA_INFO_CHAIN_SIGNAL) |
BIT(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)))) {
sinfo->filled |= BIT(NL80211_STA_INFO_CHAIN_SIGNAL) |
BIT(NL80211_STA_INFO_CHAIN_SIGNAL_AVG);
- sinfo->chains = sta->chains;
+ sinfo->chains = sta->rx_stats.chains;
for (i = 0; i < ARRAY_SIZE(sinfo->chain_signal); i++) {
- sinfo->chain_signal[i] = sta->chain_signal_last[i];
+ sinfo->chain_signal[i] =
+ sta->rx_stats.chain_signal_last[i];
sinfo->chain_signal_avg[i] =
- (s8) -ewma_signal_read(&sta->chain_signal_avg[i]);
+ -ewma_signal_read(&sta->rx_stats.chain_signal_avg[i]);
}
}
if (!(sinfo->filled & BIT(NL80211_STA_INFO_TX_BITRATE))) {
- sta_set_rate_info_tx(sta, &sta->last_tx_rate, &sinfo->txrate);
+ sta_set_rate_info_tx(sta, &sta->tx_stats.last_rate,
+ &sinfo->txrate);
sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
}
@@ -1928,12 +1973,12 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
if (!(tidstats->filled & BIT(NL80211_TID_STATS_RX_MSDU))) {
tidstats->filled |= BIT(NL80211_TID_STATS_RX_MSDU);
- tidstats->rx_msdu = sta->rx_msdu[i];
+ tidstats->rx_msdu = sta->rx_stats.msdu[i];
}
if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU))) {
tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU);
- tidstats->tx_msdu = sta->tx_msdu[i];
+ tidstats->tx_msdu = sta->tx_stats.msdu[i];
}
if (!(tidstats->filled &
@@ -1941,7 +1986,8 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
tidstats->filled |=
BIT(NL80211_TID_STATS_TX_MSDU_RETRIES);
- tidstats->tx_msdu_retries = sta->tx_msdu_retries[i];
+ tidstats->tx_msdu_retries =
+ sta->status_stats.msdu_retries[i];
}
if (!(tidstats->filled &
@@ -1949,7 +1995,8 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
tidstats->filled |=
BIT(NL80211_TID_STATS_TX_MSDU_FAILED);
- tidstats->tx_msdu_failed = sta->tx_msdu_failed[i];
+ tidstats->tx_msdu_failed =
+ sta->status_stats.msdu_failed[i];
}
}
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index d5ded8749ac4..2cafb21b422f 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -344,12 +344,6 @@ DECLARE_EWMA(signal, 1024, 8)
* @rate_ctrl_lock: spinlock used to protect rate control data
* (data inside the algorithm, so serializes calls there)
* @rate_ctrl_priv: rate control private per-STA pointer
- * @last_tx_rate: rate used for last transmit, to report to userspace as
- * "the" transmit rate
- * @last_rx_rate_idx: rx status rate index of the last data packet
- * @last_rx_rate_flag: rx status flag of the last data packet
- * @last_rx_rate_vht_flag: rx status vht flag of the last data packet
- * @last_rx_rate_vht_nss: rx status nss of last data packet
* @lock: used for locking all fields that require locking, see comments
* in the header file.
* @drv_deliver_wk: used for delivering frames after driver PS unblocking
@@ -364,23 +358,9 @@ DECLARE_EWMA(signal, 1024, 8)
* the station when it leaves powersave or polls for frames
* @driver_buffered_tids: bitmap of TIDs the driver has data buffered on
* @txq_buffered_tids: bitmap of TIDs that mac80211 has txq data buffered on
- * @rx_packets: Number of MSDUs received from this STA
- * @rx_bytes: Number of bytes received from this STA
- * @last_rx: time (in jiffies) when last frame was received from this STA
* @last_connected: time (in seconds) when a station got connected
- * @num_duplicates: number of duplicate frames received from this STA
- * @rx_fragments: number of received MPDUs
- * @rx_dropped: number of dropped MPDUs from this STA
- * @last_signal: signal of last received frame from this STA
- * @avg_signal: moving average of signal of received frames from this STA
- * @last_ack_signal: signal of last received Ack frame from this STA
* @last_seq_ctrl: last received seq/frag number from this STA (per TID
* plus one for non-QoS frames)
- * @tx_filtered_count: number of frames the hardware filtered for this STA
- * @tx_retry_failed: number of frames that failed retry
- * @tx_retry_count: total number of retries for frames to this STA
- * @tx_packets: number of RX/TX MSDUs
- * @tx_bytes: number of bytes transmitted to this STA
* @tid_seq: per-TID sequence numbers for sending to this STA
* @ampdu_mlme: A-MPDU state machine state
* @timer_to_tid: identity mapping to ID timers
@@ -388,32 +368,22 @@ DECLARE_EWMA(signal, 1024, 8)
* @debugfs: debug filesystem info
* @dead: set to true when sta is unlinked
* @uploaded: set to true when sta is uploaded to the driver
- * @lost_packets: number of consecutive lost packets
* @sta: station information we share with the driver
* @sta_state: duplicates information about station state (for debug)
* @beacon_loss_count: number of times beacon loss has triggered
* @rcu_head: RCU head used for freeing this station struct
* @cur_max_bandwidth: maximum bandwidth to use for TX to the station,
* taken from HT/VHT capabilities or VHT operating mode notification
- * @chains: chains ever used for RX from this station
- * @chain_signal_last: last signal (per chain)
- * @chain_signal_avg: signal average (per chain)
* @known_smps_mode: the smps_mode the client thinks we are in. Relevant for
* AP only.
* @cipher_scheme: optional cipher scheme for this station
- * @last_tdls_pkt_time: holds the time in jiffies of last TDLS pkt ACKed
* @reserved_tid: reserved TID (if any, otherwise IEEE80211_TID_UNRESERVED)
- * @tx_msdu: MSDUs transmitted to this station, using IEEE80211_NUM_TID
- * entry for non-QoS frames
- * @tx_msdu_retries: MSDU retries for transmissions to to this station,
- * using IEEE80211_NUM_TID entry for non-QoS frames
- * @tx_msdu_failed: MSDU failures for transmissions to to this station,
- * using IEEE80211_NUM_TID entry for non-QoS frames
- * @rx_msdu: MSDUs received from this station, using IEEE80211_NUM_TID
- * entry for non-QoS frames
* @fast_tx: TX fastpath information
* @tdls_chandef: a TDLS peer can have a wider chandef that is compatible to
* the BSS one.
+ * @tx_stats: TX statistics
+ * @rx_stats: RX statistics
+ * @status_stats: TX status statistics
*/
struct sta_info {
/* General information, mostly static */
@@ -457,42 +427,49 @@ struct sta_info {
unsigned long driver_buffered_tids;
unsigned long txq_buffered_tids;
- /* Updated from RX path only, no locking requirements */
- unsigned long rx_packets;
- u64 rx_bytes;
- unsigned long last_rx;
long last_connected;
- unsigned long num_duplicates;
- unsigned long rx_fragments;
- unsigned long rx_dropped;
- int last_signal;
- struct ewma_signal avg_signal;
- int last_ack_signal;
- u8 chains;
- s8 chain_signal_last[IEEE80211_MAX_CHAINS];
- struct ewma_signal chain_signal_avg[IEEE80211_MAX_CHAINS];
+ /* Updated from RX path only, no locking requirements */
+ struct {
+ unsigned long packets;
+ u64 bytes;
+ unsigned long last_rx;
+ unsigned long num_duplicates;
+ unsigned long fragments;
+ unsigned long dropped;
+ int last_signal;
+ struct ewma_signal avg_signal;
+ u8 chains;
+ s8 chain_signal_last[IEEE80211_MAX_CHAINS];
+ struct ewma_signal chain_signal_avg[IEEE80211_MAX_CHAINS];
+ int last_rate_idx;
+ u32 last_rate_flag;
+ u32 last_rate_vht_flag;
+ u8 last_rate_vht_nss;
+ u64 msdu[IEEE80211_NUM_TIDS + 1];
+ } rx_stats;
/* Plus 1 for non-QoS frames */
__le16 last_seq_ctrl[IEEE80211_NUM_TIDS + 1];
/* Updated from TX status path only, no locking requirements */
- unsigned long tx_filtered_count;
- unsigned long tx_retry_failed, tx_retry_count;
+ struct {
+ unsigned long filtered;
+ unsigned long retry_failed, retry_count;
+ unsigned int lost_packets;
+ unsigned long last_tdls_pkt_time;
+ u64 msdu_retries[IEEE80211_NUM_TIDS + 1];
+ u64 msdu_failed[IEEE80211_NUM_TIDS + 1];
+ } status_stats;
/* Updated from TX path only, no locking requirements */
- u64 tx_packets[IEEE80211_NUM_ACS];
- u64 tx_bytes[IEEE80211_NUM_ACS];
- struct ieee80211_tx_rate last_tx_rate;
- int last_rx_rate_idx;
- u32 last_rx_rate_flag;
- u32 last_rx_rate_vht_flag;
- u8 last_rx_rate_vht_nss;
+ struct {
+ u64 packets[IEEE80211_NUM_ACS];
+ u64 bytes[IEEE80211_NUM_ACS];
+ struct ieee80211_tx_rate last_rate;
+ u64 msdu[IEEE80211_NUM_TIDS + 1];
+ } tx_stats;
u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1];
- u64 tx_msdu[IEEE80211_NUM_TIDS + 1];
- u64 tx_msdu_retries[IEEE80211_NUM_TIDS + 1];
- u64 tx_msdu_failed[IEEE80211_NUM_TIDS + 1];
- u64 rx_msdu[IEEE80211_NUM_TIDS + 1];
/*
* Aggregation information, locked with lock.
@@ -509,15 +486,9 @@ struct sta_info {
enum ieee80211_sta_rx_bandwidth cur_max_bandwidth;
- unsigned int lost_packets;
- unsigned int beacon_loss_count;
-
enum ieee80211_smps_mode known_smps_mode;
const struct ieee80211_cipher_scheme *cipher_scheme;
- /* TDLS timeout data */
- unsigned long last_tdls_pkt_time;
-
u8 reserved_tid;
struct cfg80211_chan_def tdls_chandef;
@@ -688,8 +659,6 @@ static inline int sta_info_flush(struct ieee80211_sub_if_data *sdata)
void sta_set_rate_info_tx(struct sta_info *sta,
const struct ieee80211_tx_rate *rate,
struct rate_info *rinfo);
-void sta_set_rate_info_rx(struct sta_info *sta,
- struct rate_info *rinfo);
void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo);
void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 9169ccc36534..5bad05e9af90 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -67,7 +67,7 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
IEEE80211_TX_INTFL_RETRANSMISSION;
info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS;
- sta->tx_filtered_count++;
+ sta->status_stats.filtered++;
/*
* Clear more-data bit on filtered frames, it might be set
@@ -183,7 +183,7 @@ static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
struct ieee80211_sub_if_data *sdata = sta->sdata;
if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS))
- sta->last_rx = jiffies;
+ sta->rx_stats.last_rx = jiffies;
if (ieee80211_is_data_qos(mgmt->frame_control)) {
struct ieee80211_hdr *hdr = (void *) skb->data;
@@ -557,8 +557,9 @@ static void ieee80211_lost_packet(struct sta_info *sta,
!(info->flags & IEEE80211_TX_STAT_AMPDU))
return;
- sta->lost_packets++;
- if (!sta->sta.tdls && sta->lost_packets < STA_LOST_PKT_THRESHOLD)
+ sta->status_stats.lost_packets++;
+ if (!sta->sta.tdls &&
+ sta->status_stats.lost_packets < STA_LOST_PKT_THRESHOLD)
return;
/*
@@ -568,14 +569,15 @@ static void ieee80211_lost_packet(struct sta_info *sta,
* mechanism.
*/
if (sta->sta.tdls &&
- (sta->lost_packets < STA_LOST_TDLS_PKT_THRESHOLD ||
+ (sta->status_stats.lost_packets < STA_LOST_TDLS_PKT_THRESHOLD ||
time_before(jiffies,
- sta->last_tdls_pkt_time + STA_LOST_TDLS_PKT_TIME)))
+ sta->status_stats.last_tdls_pkt_time +
+ STA_LOST_TDLS_PKT_TIME)))
return;
cfg80211_cqm_pktloss_notify(sta->sdata->dev, sta->sta.addr,
- sta->lost_packets, GFP_ATOMIC);
- sta->lost_packets = 0;
+ sta->status_stats.lost_packets, GFP_ATOMIC);
+ sta->status_stats.lost_packets = 0;
}
static int ieee80211_tx_get_rates(struct ieee80211_hw *hw,
@@ -636,18 +638,18 @@ void ieee80211_tx_status_noskb(struct ieee80211_hw *hw,
sta = container_of(pubsta, struct sta_info, sta);
if (!acked)
- sta->tx_retry_failed++;
- sta->tx_retry_count += retry_count;
+ sta->status_stats.retry_failed++;
+ sta->status_stats.retry_count += retry_count;
if (acked) {
- sta->last_rx = jiffies;
+ sta->rx_stats.last_rx = jiffies;
- if (sta->lost_packets)
- sta->lost_packets = 0;
+ if (sta->status_stats.lost_packets)
+ sta->status_stats.lost_packets = 0;
/* Track when last TDLS packet was ACKed */
if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH))
- sta->last_tdls_pkt_time = jiffies;
+ sta->status_stats.last_tdls_pkt_time = jiffies;
} else {
ieee80211_lost_packet(sta, info);
}
@@ -784,7 +786,8 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL) &&
(ieee80211_is_data(hdr->frame_control)) &&
(rates_idx != -1))
- sta->last_tx_rate = info->status.rates[rates_idx];
+ sta->tx_stats.last_rate =
+ info->status.rates[rates_idx];
if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) &&
(ieee80211_is_data_qos(fc))) {
@@ -830,13 +833,15 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
return;
} else {
if (!acked)
- sta->tx_retry_failed++;
- sta->tx_retry_count += retry_count;
+ sta->status_stats.retry_failed++;
+ sta->status_stats.retry_count += retry_count;
if (ieee80211_is_data_present(fc)) {
if (!acked)
- sta->tx_msdu_failed[tid]++;
- sta->tx_msdu_retries[tid] += retry_count;
+ sta->status_stats.msdu_failed[tid]++;
+
+ sta->status_stats.msdu_retries[tid] +=
+ retry_count;
}
}
@@ -854,19 +859,17 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
if (info->flags & IEEE80211_TX_STAT_ACK) {
- if (sta->lost_packets)
- sta->lost_packets = 0;
+ if (sta->status_stats.lost_packets)
+ sta->status_stats.lost_packets = 0;
/* Track when last TDLS packet was ACKed */
if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH))
- sta->last_tdls_pkt_time = jiffies;
+ sta->status_stats.last_tdls_pkt_time =
+ jiffies;
} else {
ieee80211_lost_packet(sta, info);
}
}
-
- if (acked)
- sta->last_ack_signal = info->status.ack_signal;
}
rcu_read_unlock();
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index 314e3bd7fbdb..5cf8f4ea077f 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -325,7 +325,6 @@ TRACE_EVENT(drv_config,
__field(u32, flags)
__field(int, power_level)
__field(int, dynamic_ps_timeout)
- __field(int, max_sleep_period)
__field(u16, listen_interval)
__field(u8, long_frame_max_tx_count)
__field(u8, short_frame_max_tx_count)
@@ -339,7 +338,6 @@ TRACE_EVENT(drv_config,
__entry->flags = local->hw.conf.flags;
__entry->power_level = local->hw.conf.power_level;
__entry->dynamic_ps_timeout = local->hw.conf.dynamic_ps_timeout;
- __entry->max_sleep_period = local->hw.conf.max_sleep_period;
__entry->listen_interval = local->hw.conf.listen_interval;
__entry->long_frame_max_tx_count =
local->hw.conf.long_frame_max_tx_count;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 3478a83187e5..bdc224d5053a 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -757,9 +757,9 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
if (txrc.reported_rate.idx < 0) {
txrc.reported_rate = tx->rate;
if (tx->sta && ieee80211_is_data(hdr->frame_control))
- tx->sta->last_tx_rate = txrc.reported_rate;
+ tx->sta->tx_stats.last_rate = txrc.reported_rate;
} else if (tx->sta)
- tx->sta->last_tx_rate = txrc.reported_rate;
+ tx->sta->tx_stats.last_rate = txrc.reported_rate;
if (ratetbl)
return TX_CONTINUE;
@@ -824,7 +824,7 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
hdr->seq_ctrl = cpu_to_le16(tx->sdata->sequence_number);
tx->sdata->sequence_number += 0x10;
if (tx->sta)
- tx->sta->tx_msdu[IEEE80211_NUM_TIDS]++;
+ tx->sta->tx_stats.msdu[IEEE80211_NUM_TIDS]++;
return TX_CONTINUE;
}
@@ -840,7 +840,7 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
qc = ieee80211_get_qos_ctl(hdr);
tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
- tx->sta->tx_msdu[tid]++;
+ tx->sta->tx_stats.msdu[tid]++;
if (!tx->sta->sta.txq[0])
hdr->seq_ctrl = ieee80211_tx_next_seq(tx->sta, tid);
@@ -994,10 +994,10 @@ ieee80211_tx_h_stats(struct ieee80211_tx_data *tx)
skb_queue_walk(&tx->skbs, skb) {
ac = skb_get_queue_mapping(skb);
- tx->sta->tx_bytes[ac] += skb->len;
+ tx->sta->tx_stats.bytes[ac] += skb->len;
}
if (ac >= 0)
- tx->sta->tx_packets[ac]++;
+ tx->sta->tx_stats.packets[ac]++;
return TX_CONTINUE;
}
@@ -2779,10 +2779,10 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
}
if (skb_shinfo(skb)->gso_size)
- sta->tx_msdu[tid] +=
+ sta->tx_stats.msdu[tid] +=
DIV_ROUND_UP(skb->len, skb_shinfo(skb)->gso_size);
else
- sta->tx_msdu[tid]++;
+ sta->tx_stats.msdu[tid]++;
info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
@@ -2813,8 +2813,8 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
/* statistics normally done by ieee80211_tx_h_stats (but that
* has to consider fragmentation, so is more complex)
*/
- sta->tx_bytes[skb_get_queue_mapping(skb)] += skb->len;
- sta->tx_packets[skb_get_queue_mapping(skb)]++;
+ sta->tx_stats.bytes[skb_get_queue_mapping(skb)] += skb->len;
+ sta->tx_stats.packets[skb_get_queue_mapping(skb)]++;
if (fast_tx->pn_offs) {
u64 pn;
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 60c4dbf92625..8274c86296f9 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1951,7 +1951,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
}
}
- ieee80211_recalc_ps(local, -1);
+ ieee80211_recalc_ps(local);
/*
* The sta might be in psm against the ap (e.g. because
@@ -2042,9 +2042,13 @@ int ieee80211_reconfig(struct ieee80211_local *local)
if (sched_scan_sdata && sched_scan_req)
/*
* Sched scan stopped, but we don't want to report it. Instead,
- * we're trying to reschedule.
+ * we're trying to reschedule. However, if more than one scan
+ * plan was set, we cannot reschedule since we don't know which
+ * scan plan was currently running (and some scan plans may have
+ * already finished).
*/
- if (__ieee80211_request_sched_scan_start(sched_scan_sdata,
+ if (sched_scan_req->n_scan_plans > 1 ||
+ __ieee80211_request_sched_scan_start(sched_scan_sdata,
sched_scan_req))
sched_scan_stopped = true;
mutex_unlock(&local->mtx);
@@ -3301,9 +3305,11 @@ void ieee80211_init_tx_queue(struct ieee80211_sub_if_data *sdata,
if (sta) {
txqi->txq.sta = &sta->sta;
sta->sta.txq[tid] = &txqi->txq;
+ txqi->txq.tid = tid;
txqi->txq.ac = ieee802_1d_to_ac[tid & 7];
} else {
sdata->vif.txq = &txqi->txq;
+ txqi->txq.tid = 0;
txqi->txq.ac = IEEE80211_AC_BE;
}
}
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index feb547dc8643..d824c38971ed 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -174,9 +174,12 @@ mic_fail_no_key:
* a driver that supports HW encryption. Send up the key idx only if
* the key is set.
*/
- mac80211_ev_michael_mic_failure(rx->sdata,
- rx->key ? rx->key->conf.keyidx : -1,
- (void *) skb->data, NULL, GFP_ATOMIC);
+ cfg80211_michael_mic_failure(rx->sdata->dev, hdr->addr2,
+ is_multicast_ether_addr(hdr->addr1) ?
+ NL80211_KEYTYPE_GROUP :
+ NL80211_KEYTYPE_PAIRWISE,
+ rx->key ? rx->key->conf.keyidx : -1,
+ NULL, GFP_ATOMIC);
return RX_DROP_UNUSABLE;
}
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index bb185a28de98..cc972e30355b 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -19,36 +19,13 @@
#include <net/ipv6.h>
#include <net/addrconf.h>
#endif
+#include <net/nexthop.h>
#include "internal.h"
-#define LABEL_NOT_SPECIFIED (1<<20)
-#define MAX_NEW_LABELS 2
-
-/* This maximum ha length copied from the definition of struct neighbour */
-#define MAX_VIA_ALEN (ALIGN(MAX_ADDR_LEN, sizeof(unsigned long)))
-
-enum mpls_payload_type {
- MPT_UNSPEC, /* IPv4 or IPv6 */
- MPT_IPV4 = 4,
- MPT_IPV6 = 6,
-
- /* Other types not implemented:
- * - Pseudo-wire with or without control word (RFC4385)
- * - GAL (RFC5586)
- */
-};
-
-struct mpls_route { /* next hop label forwarding entry */
- struct net_device __rcu *rt_dev;
- struct rcu_head rt_rcu;
- u32 rt_label[MAX_NEW_LABELS];
- u8 rt_protocol; /* routing protocol that set this entry */
- u8 rt_payload_type;
- u8 rt_labels;
- u8 rt_via_alen;
- u8 rt_via_table;
- u8 rt_via[0];
-};
+/* Maximum number of labels to look ahead at when selecting a path of
+ * a multipath route
+ */
+#define MAX_MP_SELECT_LABELS 4
static int zero = 0;
static int label_limit = (1 << 20) - 1;
@@ -80,10 +57,10 @@ bool mpls_output_possible(const struct net_device *dev)
}
EXPORT_SYMBOL_GPL(mpls_output_possible);
-static unsigned int mpls_rt_header_size(const struct mpls_route *rt)
+static unsigned int mpls_nh_header_size(const struct mpls_nh *nh)
{
/* The size of the layer 2.5 labels to be added for this route */
- return rt->rt_labels * sizeof(struct mpls_shim_hdr);
+ return nh->nh_labels * sizeof(struct mpls_shim_hdr);
}
unsigned int mpls_dev_mtu(const struct net_device *dev)
@@ -105,6 +82,80 @@ bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
}
EXPORT_SYMBOL_GPL(mpls_pkt_too_big);
+static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt,
+ struct sk_buff *skb, bool bos)
+{
+ struct mpls_entry_decoded dec;
+ struct mpls_shim_hdr *hdr;
+ bool eli_seen = false;
+ int label_index;
+ int nh_index = 0;
+ u32 hash = 0;
+
+ /* No need to look further into packet if there's only
+ * one path
+ */
+ if (rt->rt_nhn == 1)
+ goto out;
+
+ for (label_index = 0; label_index < MAX_MP_SELECT_LABELS && !bos;
+ label_index++) {
+ if (!pskb_may_pull(skb, sizeof(*hdr) * label_index))
+ break;
+
+ /* Read and decode the current label */
+ hdr = mpls_hdr(skb) + label_index;
+ dec = mpls_entry_decode(hdr);
+
+ /* RFC6790 - reserved labels MUST NOT be used as keys
+ * for the load-balancing function
+ */
+ if (likely(dec.label >= MPLS_LABEL_FIRST_UNRESERVED)) {
+ hash = jhash_1word(dec.label, hash);
+
+ /* The entropy label follows the entropy label
+ * indicator, so this means that the entropy
+ * label was just added to the hash - no need to
+ * go any deeper either in the label stack or in the
+ * payload
+ */
+ if (eli_seen)
+ break;
+ } else if (dec.label == MPLS_LABEL_ENTROPY) {
+ eli_seen = true;
+ }
+
+ bos = dec.bos;
+ if (bos && pskb_may_pull(skb, sizeof(*hdr) * label_index +
+ sizeof(struct iphdr))) {
+ const struct iphdr *v4hdr;
+
+ v4hdr = (const struct iphdr *)(mpls_hdr(skb) +
+ label_index);
+ if (v4hdr->version == 4) {
+ hash = jhash_3words(ntohl(v4hdr->saddr),
+ ntohl(v4hdr->daddr),
+ v4hdr->protocol, hash);
+ } else if (v4hdr->version == 6 &&
+ pskb_may_pull(skb, sizeof(*hdr) * label_index +
+ sizeof(struct ipv6hdr))) {
+ const struct ipv6hdr *v6hdr;
+
+ v6hdr = (const struct ipv6hdr *)(mpls_hdr(skb) +
+ label_index);
+
+ hash = __ipv6_addr_jhash(&v6hdr->saddr, hash);
+ hash = __ipv6_addr_jhash(&v6hdr->daddr, hash);
+ hash = jhash_1word(v6hdr->nexthdr, hash);
+ }
+ }
+ }
+
+ nh_index = hash % rt->rt_nhn;
+out:
+ return &rt->rt_nh[nh_index];
+}
+
static bool mpls_egress(struct mpls_route *rt, struct sk_buff *skb,
struct mpls_entry_decoded dec)
{
@@ -159,6 +210,7 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
struct net *net = dev_net(dev);
struct mpls_shim_hdr *hdr;
struct mpls_route *rt;
+ struct mpls_nh *nh;
struct mpls_entry_decoded dec;
struct net_device *out_dev;
struct mpls_dev *mdev;
@@ -196,8 +248,12 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
if (!rt)
goto drop;
+ nh = mpls_select_multipath(rt, skb, dec.bos);
+ if (!nh)
+ goto drop;
+
/* Find the output device */
- out_dev = rcu_dereference(rt->rt_dev);
+ out_dev = rcu_dereference(nh->nh_dev);
if (!mpls_output_possible(out_dev))
goto drop;
@@ -212,7 +268,7 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
dec.ttl -= 1;
/* Verify the destination can hold the packet */
- new_header_size = mpls_rt_header_size(rt);
+ new_header_size = mpls_nh_header_size(nh);
mtu = mpls_dev_mtu(out_dev);
if (mpls_pkt_too_big(skb, mtu - new_header_size))
goto drop;
@@ -240,13 +296,14 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
/* Push the new labels */
hdr = mpls_hdr(skb);
bos = dec.bos;
- for (i = rt->rt_labels - 1; i >= 0; i--) {
- hdr[i] = mpls_entry_encode(rt->rt_label[i], dec.ttl, 0, bos);
+ for (i = nh->nh_labels - 1; i >= 0; i--) {
+ hdr[i] = mpls_entry_encode(nh->nh_label[i],
+ dec.ttl, 0, bos);
bos = false;
}
}
- err = neigh_xmit(rt->rt_via_table, out_dev, rt->rt_via, skb);
+ err = neigh_xmit(nh->nh_via_table, out_dev, nh->nh_via, skb);
if (err)
net_dbg_ratelimited("%s: packet transmission failed: %d\n",
__func__, err);
@@ -270,24 +327,28 @@ static const struct nla_policy rtm_mpls_policy[RTA_MAX+1] = {
struct mpls_route_config {
u32 rc_protocol;
u32 rc_ifindex;
- u16 rc_via_table;
- u16 rc_via_alen;
+ u8 rc_via_table;
+ u8 rc_via_alen;
u8 rc_via[MAX_VIA_ALEN];
u32 rc_label;
- u32 rc_output_labels;
+ u8 rc_output_labels;
u32 rc_output_label[MAX_NEW_LABELS];
u32 rc_nlflags;
enum mpls_payload_type rc_payload_type;
struct nl_info rc_nlinfo;
+ struct rtnexthop *rc_mp;
+ int rc_mp_len;
};
-static struct mpls_route *mpls_rt_alloc(size_t alen)
+static struct mpls_route *mpls_rt_alloc(int num_nh)
{
struct mpls_route *rt;
- rt = kzalloc(sizeof(*rt) + alen, GFP_KERNEL);
+ rt = kzalloc(sizeof(*rt) + (num_nh * sizeof(struct mpls_nh)),
+ GFP_KERNEL);
if (rt)
- rt->rt_via_alen = alen;
+ rt->rt_nhn = num_nh;
+
return rt;
}
@@ -312,25 +373,22 @@ static void mpls_notify_route(struct net *net, unsigned index,
}
static void mpls_route_update(struct net *net, unsigned index,
- struct net_device *dev, struct mpls_route *new,
+ struct mpls_route *new,
const struct nl_info *info)
{
struct mpls_route __rcu **platform_label;
- struct mpls_route *rt, *old = NULL;
+ struct mpls_route *rt;
ASSERT_RTNL();
platform_label = rtnl_dereference(net->mpls.platform_label);
rt = rtnl_dereference(platform_label[index]);
- if (!dev || (rt && (rtnl_dereference(rt->rt_dev) == dev))) {
- rcu_assign_pointer(platform_label[index], new);
- old = rt;
- }
+ rcu_assign_pointer(platform_label[index], new);
- mpls_notify_route(net, index, old, new, info);
+ mpls_notify_route(net, index, rt, new, info);
/* If we removed a route free it now */
- mpls_rt_free(old);
+ mpls_rt_free(rt);
}
static unsigned find_free_label(struct net *net)
@@ -406,40 +464,199 @@ static struct net_device *inet6_fib_lookup_dev(struct net *net, void *addr)
#endif
static struct net_device *find_outdev(struct net *net,
- struct mpls_route_config *cfg)
+ struct mpls_nh *nh, int oif)
{
struct net_device *dev = NULL;
- if (!cfg->rc_ifindex) {
- switch (cfg->rc_via_table) {
+ if (!oif) {
+ switch (nh->nh_via_table) {
case NEIGH_ARP_TABLE:
- dev = inet_fib_lookup_dev(net, cfg->rc_via);
+ dev = inet_fib_lookup_dev(net, nh->nh_via);
break;
case NEIGH_ND_TABLE:
- dev = inet6_fib_lookup_dev(net, cfg->rc_via);
+ dev = inet6_fib_lookup_dev(net, nh->nh_via);
break;
case NEIGH_LINK_TABLE:
break;
}
} else {
- dev = dev_get_by_index(net, cfg->rc_ifindex);
+ dev = dev_get_by_index(net, oif);
}
if (!dev)
return ERR_PTR(-ENODEV);
+ /* The caller is holding rtnl anyways, so release the dev reference */
+ dev_put(dev);
+
return dev;
}
+static int mpls_nh_assign_dev(struct net *net, struct mpls_nh *nh, int oif)
+{
+ struct net_device *dev = NULL;
+ int err = -ENODEV;
+
+ dev = find_outdev(net, nh, oif);
+ if (IS_ERR(dev)) {
+ err = PTR_ERR(dev);
+ dev = NULL;
+ goto errout;
+ }
+
+ /* Ensure this is a supported device */
+ err = -EINVAL;
+ if (!mpls_dev_get(dev))
+ goto errout;
+
+ RCU_INIT_POINTER(nh->nh_dev, dev);
+
+ return 0;
+
+errout:
+ return err;
+}
+
+static int mpls_nh_build_from_cfg(struct mpls_route_config *cfg,
+ struct mpls_route *rt)
+{
+ struct net *net = cfg->rc_nlinfo.nl_net;
+ struct mpls_nh *nh = rt->rt_nh;
+ int err;
+ int i;
+
+ if (!nh)
+ return -ENOMEM;
+
+ err = -EINVAL;
+ /* Ensure only a supported number of labels are present */
+ if (cfg->rc_output_labels > MAX_NEW_LABELS)
+ goto errout;
+
+ nh->nh_labels = cfg->rc_output_labels;
+ for (i = 0; i < nh->nh_labels; i++)
+ nh->nh_label[i] = cfg->rc_output_label[i];
+
+ nh->nh_via_table = cfg->rc_via_table;
+ memcpy(nh->nh_via, cfg->rc_via, cfg->rc_via_alen);
+ nh->nh_via_alen = cfg->rc_via_alen;
+
+ err = mpls_nh_assign_dev(net, nh, cfg->rc_ifindex);
+ if (err)
+ goto errout;
+
+ return 0;
+
+errout:
+ return err;
+}
+
+static int mpls_nh_build(struct net *net, struct mpls_nh *nh,
+ int oif, struct nlattr *via, struct nlattr *newdst)
+{
+ int err = -ENOMEM;
+
+ if (!nh)
+ goto errout;
+
+ if (newdst) {
+ err = nla_get_labels(newdst, MAX_NEW_LABELS,
+ &nh->nh_labels, nh->nh_label);
+ if (err)
+ goto errout;
+ }
+
+ err = nla_get_via(via, &nh->nh_via_alen, &nh->nh_via_table,
+ nh->nh_via);
+ if (err)
+ goto errout;
+
+ err = mpls_nh_assign_dev(net, nh, oif);
+ if (err)
+ goto errout;
+
+ return 0;
+
+errout:
+ return err;
+}
+
+static int mpls_count_nexthops(struct rtnexthop *rtnh, int len)
+{
+ int nhs = 0;
+ int remaining = len;
+
+ while (rtnh_ok(rtnh, remaining)) {
+ nhs++;
+ rtnh = rtnh_next(rtnh, &remaining);
+ }
+
+ /* leftover implies invalid nexthop configuration, discard it */
+ return remaining > 0 ? 0 : nhs;
+}
+
+static int mpls_nh_build_multi(struct mpls_route_config *cfg,
+ struct mpls_route *rt)
+{
+ struct rtnexthop *rtnh = cfg->rc_mp;
+ struct nlattr *nla_via, *nla_newdst;
+ int remaining = cfg->rc_mp_len;
+ int nhs = 0;
+ int err = 0;
+
+ change_nexthops(rt) {
+ int attrlen;
+
+ nla_via = NULL;
+ nla_newdst = NULL;
+
+ err = -EINVAL;
+ if (!rtnh_ok(rtnh, remaining))
+ goto errout;
+
+ /* neither weighted multipath nor any flags
+ * are supported
+ */
+ if (rtnh->rtnh_hops || rtnh->rtnh_flags)
+ goto errout;
+
+ attrlen = rtnh_attrlen(rtnh);
+ if (attrlen > 0) {
+ struct nlattr *attrs = rtnh_attrs(rtnh);
+
+ nla_via = nla_find(attrs, attrlen, RTA_VIA);
+ nla_newdst = nla_find(attrs, attrlen, RTA_NEWDST);
+ }
+
+ if (!nla_via)
+ goto errout;
+
+ err = mpls_nh_build(cfg->rc_nlinfo.nl_net, nh,
+ rtnh->rtnh_ifindex, nla_via,
+ nla_newdst);
+ if (err)
+ goto errout;
+
+ rtnh = rtnh_next(rtnh, &remaining);
+ nhs++;
+ } endfor_nexthops(rt);
+
+ rt->rt_nhn = nhs;
+
+ return 0;
+
+errout:
+ return err;
+}
+
static int mpls_route_add(struct mpls_route_config *cfg)
{
struct mpls_route __rcu **platform_label;
struct net *net = cfg->rc_nlinfo.nl_net;
- struct net_device *dev = NULL;
struct mpls_route *rt, *old;
- unsigned index;
- int i;
int err = -EINVAL;
+ unsigned index;
+ int nhs = 1; /* default to one nexthop */
index = cfg->rc_label;
@@ -457,27 +674,6 @@ static int mpls_route_add(struct mpls_route_config *cfg)
if (index >= net->mpls.platform_labels)
goto errout;
- /* Ensure only a supported number of labels are present */
- if (cfg->rc_output_labels > MAX_NEW_LABELS)
- goto errout;
-
- dev = find_outdev(net, cfg);
- if (IS_ERR(dev)) {
- err = PTR_ERR(dev);
- dev = NULL;
- goto errout;
- }
-
- /* Ensure this is a supported device */
- err = -EINVAL;
- if (!mpls_dev_get(dev))
- goto errout;
-
- err = -EINVAL;
- if ((cfg->rc_via_table == NEIGH_LINK_TABLE) &&
- (dev->addr_len != cfg->rc_via_alen))
- goto errout;
-
/* Append makes no sense with mpls */
err = -EOPNOTSUPP;
if (cfg->rc_nlflags & NLM_F_APPEND)
@@ -497,28 +693,35 @@ static int mpls_route_add(struct mpls_route_config *cfg)
if (!(cfg->rc_nlflags & NLM_F_CREATE) && !old)
goto errout;
+ if (cfg->rc_mp) {
+ err = -EINVAL;
+ nhs = mpls_count_nexthops(cfg->rc_mp, cfg->rc_mp_len);
+ if (nhs == 0)
+ goto errout;
+ }
+
err = -ENOMEM;
- rt = mpls_rt_alloc(cfg->rc_via_alen);
+ rt = mpls_rt_alloc(nhs);
if (!rt)
goto errout;
- rt->rt_labels = cfg->rc_output_labels;
- for (i = 0; i < rt->rt_labels; i++)
- rt->rt_label[i] = cfg->rc_output_label[i];
rt->rt_protocol = cfg->rc_protocol;
- RCU_INIT_POINTER(rt->rt_dev, dev);
rt->rt_payload_type = cfg->rc_payload_type;
- rt->rt_via_table = cfg->rc_via_table;
- memcpy(rt->rt_via, cfg->rc_via, cfg->rc_via_alen);
- mpls_route_update(net, index, NULL, rt, &cfg->rc_nlinfo);
+ if (cfg->rc_mp)
+ err = mpls_nh_build_multi(cfg, rt);
+ else
+ err = mpls_nh_build_from_cfg(cfg, rt);
+ if (err)
+ goto freert;
+
+ mpls_route_update(net, index, rt, &cfg->rc_nlinfo);
- dev_put(dev);
return 0;
+freert:
+ mpls_rt_free(rt);
errout:
- if (dev)
- dev_put(dev);
return err;
}
@@ -538,7 +741,7 @@ static int mpls_route_del(struct mpls_route_config *cfg)
if (index >= net->mpls.platform_labels)
goto errout;
- mpls_route_update(net, index, NULL, NULL, &cfg->rc_nlinfo);
+ mpls_route_update(net, index, NULL, &cfg->rc_nlinfo);
err = 0;
errout:
@@ -635,9 +838,11 @@ static void mpls_ifdown(struct net_device *dev)
struct mpls_route *rt = rtnl_dereference(platform_label[index]);
if (!rt)
continue;
- if (rtnl_dereference(rt->rt_dev) != dev)
- continue;
- rt->rt_dev = NULL;
+ for_nexthops(rt) {
+ if (rtnl_dereference(nh->nh_dev) != dev)
+ continue;
+ nh->nh_dev = NULL;
+ } endfor_nexthops(rt);
}
mdev = mpls_dev_get(dev);
@@ -736,7 +941,7 @@ int nla_put_labels(struct sk_buff *skb, int attrtype,
EXPORT_SYMBOL_GPL(nla_put_labels);
int nla_get_labels(const struct nlattr *nla,
- u32 max_labels, u32 *labels, u32 label[])
+ u32 max_labels, u8 *labels, u32 label[])
{
unsigned len = nla_len(nla);
unsigned nla_labels;
@@ -781,6 +986,48 @@ int nla_get_labels(const struct nlattr *nla,
}
EXPORT_SYMBOL_GPL(nla_get_labels);
+int nla_get_via(const struct nlattr *nla, u8 *via_alen,
+ u8 *via_table, u8 via_addr[])
+{
+ struct rtvia *via = nla_data(nla);
+ int err = -EINVAL;
+ int alen;
+
+ if (nla_len(nla) < offsetof(struct rtvia, rtvia_addr))
+ goto errout;
+ alen = nla_len(nla) -
+ offsetof(struct rtvia, rtvia_addr);
+ if (alen > MAX_VIA_ALEN)
+ goto errout;
+
+ /* Validate the address family */
+ switch (via->rtvia_family) {
+ case AF_PACKET:
+ *via_table = NEIGH_LINK_TABLE;
+ break;
+ case AF_INET:
+ *via_table = NEIGH_ARP_TABLE;
+ if (alen != 4)
+ goto errout;
+ break;
+ case AF_INET6:
+ *via_table = NEIGH_ND_TABLE;
+ if (alen != 16)
+ goto errout;
+ break;
+ default:
+ /* Unsupported address family */
+ goto errout;
+ }
+
+ memcpy(via_addr, via->rtvia_addr, alen);
+ *via_alen = alen;
+ err = 0;
+
+errout:
+ return err;
+}
+
static int rtm_to_route_config(struct sk_buff *skb, struct nlmsghdr *nlh,
struct mpls_route_config *cfg)
{
@@ -844,7 +1091,7 @@ static int rtm_to_route_config(struct sk_buff *skb, struct nlmsghdr *nlh,
break;
case RTA_DST:
{
- u32 label_count;
+ u8 label_count;
if (nla_get_labels(nla, 1, &label_count,
&cfg->rc_label))
goto errout;
@@ -857,35 +1104,15 @@ static int rtm_to_route_config(struct sk_buff *skb, struct nlmsghdr *nlh,
}
case RTA_VIA:
{
- struct rtvia *via = nla_data(nla);
- if (nla_len(nla) < offsetof(struct rtvia, rtvia_addr))
+ if (nla_get_via(nla, &cfg->rc_via_alen,
+ &cfg->rc_via_table, cfg->rc_via))
goto errout;
- cfg->rc_via_alen = nla_len(nla) -
- offsetof(struct rtvia, rtvia_addr);
- if (cfg->rc_via_alen > MAX_VIA_ALEN)
- goto errout;
-
- /* Validate the address family */
- switch(via->rtvia_family) {
- case AF_PACKET:
- cfg->rc_via_table = NEIGH_LINK_TABLE;
- break;
- case AF_INET:
- cfg->rc_via_table = NEIGH_ARP_TABLE;
- if (cfg->rc_via_alen != 4)
- goto errout;
- break;
- case AF_INET6:
- cfg->rc_via_table = NEIGH_ND_TABLE;
- if (cfg->rc_via_alen != 16)
- goto errout;
- break;
- default:
- /* Unsupported address family */
- goto errout;
- }
-
- memcpy(cfg->rc_via, via->rtvia_addr, cfg->rc_via_alen);
+ break;
+ }
+ case RTA_MULTIPATH:
+ {
+ cfg->rc_mp = nla_data(nla);
+ cfg->rc_mp_len = nla_len(nla);
break;
}
default:
@@ -946,16 +1173,52 @@ static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event,
rtm->rtm_type = RTN_UNICAST;
rtm->rtm_flags = 0;
- if (rt->rt_labels &&
- nla_put_labels(skb, RTA_NEWDST, rt->rt_labels, rt->rt_label))
- goto nla_put_failure;
- if (nla_put_via(skb, rt->rt_via_table, rt->rt_via, rt->rt_via_alen))
- goto nla_put_failure;
- dev = rtnl_dereference(rt->rt_dev);
- if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex))
- goto nla_put_failure;
if (nla_put_labels(skb, RTA_DST, 1, &label))
goto nla_put_failure;
+ if (rt->rt_nhn == 1) {
+ struct mpls_nh *nh = rt->rt_nh;
+
+ if (nh->nh_labels &&
+ nla_put_labels(skb, RTA_NEWDST, nh->nh_labels,
+ nh->nh_label))
+ goto nla_put_failure;
+ if (nla_put_via(skb, nh->nh_via_table, nh->nh_via,
+ nh->nh_via_alen))
+ goto nla_put_failure;
+ dev = rtnl_dereference(nh->nh_dev);
+ if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex))
+ goto nla_put_failure;
+ } else {
+ struct rtnexthop *rtnh;
+ struct nlattr *mp;
+
+ mp = nla_nest_start(skb, RTA_MULTIPATH);
+ if (!mp)
+ goto nla_put_failure;
+
+ for_nexthops(rt) {
+ rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
+ if (!rtnh)
+ goto nla_put_failure;
+
+ dev = rtnl_dereference(nh->nh_dev);
+ if (dev)
+ rtnh->rtnh_ifindex = dev->ifindex;
+ if (nh->nh_labels && nla_put_labels(skb, RTA_NEWDST,
+ nh->nh_labels,
+ nh->nh_label))
+ goto nla_put_failure;
+ if (nla_put_via(skb, nh->nh_via_table,
+ nh->nh_via,
+ nh->nh_via_alen))
+ goto nla_put_failure;
+
+ /* length of rtnetlink header + attributes */
+ rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh;
+ } endfor_nexthops(rt);
+
+ nla_nest_end(skb, mp);
+ }
nlmsg_end(skb, nlh);
return 0;
@@ -1000,12 +1263,30 @@ static inline size_t lfib_nlmsg_size(struct mpls_route *rt)
{
size_t payload =
NLMSG_ALIGN(sizeof(struct rtmsg))
- + nla_total_size(2 + rt->rt_via_alen) /* RTA_VIA */
+ nla_total_size(4); /* RTA_DST */
- if (rt->rt_labels) /* RTA_NEWDST */
- payload += nla_total_size(rt->rt_labels * 4);
- if (rt->rt_dev) /* RTA_OIF */
- payload += nla_total_size(4);
+
+ if (rt->rt_nhn == 1) {
+ struct mpls_nh *nh = rt->rt_nh;
+
+ if (nh->nh_dev)
+ payload += nla_total_size(4); /* RTA_OIF */
+ payload += nla_total_size(2 + nh->nh_via_alen); /* RTA_VIA */
+ if (nh->nh_labels) /* RTA_NEWDST */
+ payload += nla_total_size(nh->nh_labels * 4);
+ } else {
+ /* each nexthop is packed in an attribute */
+ size_t nhsize = 0;
+
+ for_nexthops(rt) {
+ nhsize += nla_total_size(sizeof(struct rtnexthop));
+ nhsize += nla_total_size(2 + nh->nh_via_alen);
+ if (nh->nh_labels)
+ nhsize += nla_total_size(nh->nh_labels * 4);
+ } endfor_nexthops(rt);
+ /* nested attribute */
+ payload += nla_total_size(nhsize);
+ }
+
return payload;
}
@@ -1057,25 +1338,25 @@ static int resize_platform_label_table(struct net *net, size_t limit)
/* In case the predefined labels need to be populated */
if (limit > MPLS_LABEL_IPV4NULL) {
struct net_device *lo = net->loopback_dev;
- rt0 = mpls_rt_alloc(lo->addr_len);
+ rt0 = mpls_rt_alloc(1);
if (!rt0)
goto nort0;
- RCU_INIT_POINTER(rt0->rt_dev, lo);
+ RCU_INIT_POINTER(rt0->rt_nh->nh_dev, lo);
rt0->rt_protocol = RTPROT_KERNEL;
rt0->rt_payload_type = MPT_IPV4;
- rt0->rt_via_table = NEIGH_LINK_TABLE;
- memcpy(rt0->rt_via, lo->dev_addr, lo->addr_len);
+ rt0->rt_nh->nh_via_table = NEIGH_LINK_TABLE;
+ memcpy(rt0->rt_nh->nh_via, lo->dev_addr, lo->addr_len);
}
if (limit > MPLS_LABEL_IPV6NULL) {
struct net_device *lo = net->loopback_dev;
- rt2 = mpls_rt_alloc(lo->addr_len);
+ rt2 = mpls_rt_alloc(1);
if (!rt2)
goto nort2;
- RCU_INIT_POINTER(rt2->rt_dev, lo);
+ RCU_INIT_POINTER(rt2->rt_nh->nh_dev, lo);
rt2->rt_protocol = RTPROT_KERNEL;
rt2->rt_payload_type = MPT_IPV6;
- rt2->rt_via_table = NEIGH_LINK_TABLE;
- memcpy(rt2->rt_via, lo->dev_addr, lo->addr_len);
+ rt2->rt_nh->nh_via_table = NEIGH_LINK_TABLE;
+ memcpy(rt2->rt_nh->nh_via, lo->dev_addr, lo->addr_len);
}
rtnl_lock();
@@ -1085,7 +1366,7 @@ static int resize_platform_label_table(struct net *net, size_t limit)
/* Free any labels beyond the new table */
for (index = limit; index < old_limit; index++)
- mpls_route_update(net, index, NULL, NULL, NULL);
+ mpls_route_update(net, index, NULL, NULL);
/* Copy over the old labels */
cp_size = size;
diff --git a/net/mpls/internal.h b/net/mpls/internal.h
index 2681a4ba6c37..d7757be39877 100644
--- a/net/mpls/internal.h
+++ b/net/mpls/internal.h
@@ -21,6 +21,54 @@ struct mpls_dev {
struct sk_buff;
+#define LABEL_NOT_SPECIFIED (1 << 20)
+#define MAX_NEW_LABELS 2
+
+/* This maximum ha length copied from the definition of struct neighbour */
+#define MAX_VIA_ALEN (ALIGN(MAX_ADDR_LEN, sizeof(unsigned long)))
+
+enum mpls_payload_type {
+ MPT_UNSPEC, /* IPv4 or IPv6 */
+ MPT_IPV4 = 4,
+ MPT_IPV6 = 6,
+
+ /* Other types not implemented:
+ * - Pseudo-wire with or without control word (RFC4385)
+ * - GAL (RFC5586)
+ */
+};
+
+struct mpls_nh { /* next hop label forwarding entry */
+ struct net_device __rcu *nh_dev;
+ u32 nh_label[MAX_NEW_LABELS];
+ u8 nh_labels;
+ u8 nh_via_alen;
+ u8 nh_via_table;
+ u8 nh_via[MAX_VIA_ALEN];
+};
+
+struct mpls_route { /* next hop label forwarding entry */
+ struct rcu_head rt_rcu;
+ u8 rt_protocol;
+ u8 rt_payload_type;
+ int rt_nhn;
+ struct mpls_nh rt_nh[0];
+};
+
+#define for_nexthops(rt) { \
+ int nhsel; struct mpls_nh *nh; \
+ for (nhsel = 0, nh = (rt)->rt_nh; \
+ nhsel < (rt)->rt_nhn; \
+ nh++, nhsel++)
+
+#define change_nexthops(rt) { \
+ int nhsel; struct mpls_nh *nh; \
+ for (nhsel = 0, nh = (struct mpls_nh *)((rt)->rt_nh); \
+ nhsel < (rt)->rt_nhn; \
+ nh++, nhsel++)
+
+#define endfor_nexthops(rt) }
+
static inline struct mpls_shim_hdr *mpls_hdr(const struct sk_buff *skb)
{
return (struct mpls_shim_hdr *)skb_network_header(skb);
@@ -52,8 +100,10 @@ static inline struct mpls_entry_decoded mpls_entry_decode(struct mpls_shim_hdr *
int nla_put_labels(struct sk_buff *skb, int attrtype, u8 labels,
const u32 label[]);
-int nla_get_labels(const struct nlattr *nla, u32 max_labels, u32 *labels,
+int nla_get_labels(const struct nlattr *nla, u32 max_labels, u8 *labels,
u32 label[]);
+int nla_get_via(const struct nlattr *nla, u8 *via_alen, u8 *via_table,
+ u8 via[]);
bool mpls_output_possible(const struct net_device *dev);
unsigned int mpls_dev_mtu(const struct net_device *dev);
bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu);
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 6799c8d470c6..80e1f09397c0 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -548,11 +548,11 @@ static int ip_tun_from_nlattr(const struct nlattr *attr,
struct sw_flow_match *match, bool is_mask,
bool log)
{
+ bool ttl = false, ipv4 = false, ipv6 = false;
+ __be16 tun_flags = 0;
+ int opts_type = 0;
struct nlattr *a;
int rem;
- bool ttl = false;
- __be16 tun_flags = 0, ipv4 = false, ipv6 = false;
- int opts_type = 0;
nla_for_each_nested(a, attr, rem) {
int type = nla_type(a);
diff --git a/net/openvswitch/vport-geneve.c b/net/openvswitch/vport-geneve.c
index 2735e9c4a3b8..7a568ca8da54 100644
--- a/net/openvswitch/vport-geneve.c
+++ b/net/openvswitch/vport-geneve.c
@@ -128,7 +128,7 @@ static struct vport_ops ovs_geneve_vport_ops = {
.create = geneve_create,
.destroy = ovs_netdev_tunnel_destroy,
.get_options = geneve_get_options,
- .send = ovs_netdev_send,
+ .send = dev_queue_xmit,
.owner = THIS_MODULE,
.get_egress_tun_info = geneve_get_egress_tun_info,
};
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c
index 4d24481669c9..cdb758ab01cf 100644
--- a/net/openvswitch/vport-gre.c
+++ b/net/openvswitch/vport-gre.c
@@ -94,7 +94,7 @@ static int gre_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
static struct vport_ops ovs_gre_vport_ops = {
.type = OVS_VPORT_TYPE_GRE,
.create = gre_create,
- .send = ovs_netdev_send,
+ .send = dev_queue_xmit,
.get_egress_tun_info = gre_get_egress_tun_info,
.destroy = ovs_netdev_tunnel_destroy,
.owner = THIS_MODULE,
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index 388b8a6bf112..7f0a8bd08857 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -202,22 +202,21 @@ static void internal_dev_destroy(struct vport *vport)
rtnl_unlock();
}
-static void internal_dev_recv(struct vport *vport, struct sk_buff *skb)
+static netdev_tx_t internal_dev_recv(struct sk_buff *skb)
{
- struct net_device *netdev = vport->dev;
+ struct net_device *netdev = skb->dev;
struct pcpu_sw_netstats *stats;
if (unlikely(!(netdev->flags & IFF_UP))) {
kfree_skb(skb);
netdev->stats.rx_dropped++;
- return;
+ return NETDEV_TX_OK;
}
skb_dst_drop(skb);
nf_reset(skb);
secpath_reset(skb);
- skb->dev = netdev;
skb->pkt_type = PACKET_HOST;
skb->protocol = eth_type_trans(skb, netdev);
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
@@ -229,6 +228,7 @@ static void internal_dev_recv(struct vport *vport, struct sk_buff *skb)
u64_stats_update_end(&stats->syncp);
netif_rx(skb);
+ return NETDEV_TX_OK;
}
static struct vport_ops ovs_internal_vport_ops = {
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index f7e8dcce7ada..b327368a3848 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -190,37 +190,6 @@ void ovs_netdev_tunnel_destroy(struct vport *vport)
}
EXPORT_SYMBOL_GPL(ovs_netdev_tunnel_destroy);
-static unsigned int packet_length(const struct sk_buff *skb)
-{
- unsigned int length = skb->len - ETH_HLEN;
-
- if (skb->protocol == htons(ETH_P_8021Q))
- length -= VLAN_HLEN;
-
- return length;
-}
-
-void ovs_netdev_send(struct vport *vport, struct sk_buff *skb)
-{
- int mtu = vport->dev->mtu;
-
- if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) {
- net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n",
- vport->dev->name,
- packet_length(skb), mtu);
- vport->dev->stats.tx_errors++;
- goto drop;
- }
-
- skb->dev = vport->dev;
- dev_queue_xmit(skb);
- return;
-
-drop:
- kfree_skb(skb);
-}
-EXPORT_SYMBOL_GPL(ovs_netdev_send);
-
/* Returns null if this device is not attached to a datapath. */
struct vport *ovs_netdev_get_vport(struct net_device *dev)
{
@@ -235,7 +204,7 @@ static struct vport_ops ovs_netdev_vport_ops = {
.type = OVS_VPORT_TYPE_NETDEV,
.create = netdev_create,
.destroy = netdev_destroy,
- .send = ovs_netdev_send,
+ .send = dev_queue_xmit,
};
int __init ovs_netdev_init(void)
diff --git a/net/openvswitch/vport-netdev.h b/net/openvswitch/vport-netdev.h
index bf22fcedbc69..19e29c12adcc 100644
--- a/net/openvswitch/vport-netdev.h
+++ b/net/openvswitch/vport-netdev.h
@@ -27,7 +27,6 @@
struct vport *ovs_netdev_get_vport(struct net_device *dev);
struct vport *ovs_netdev_link(struct vport *vport, const char *name);
-void ovs_netdev_send(struct vport *vport, struct sk_buff *skb);
void ovs_netdev_detach_dev(struct vport *);
int __init ovs_netdev_init(void);
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
index fb3cdb85905d..6f700710d413 100644
--- a/net/openvswitch/vport-vxlan.c
+++ b/net/openvswitch/vport-vxlan.c
@@ -170,7 +170,7 @@ static struct vport_ops ovs_vxlan_netdev_vport_ops = {
.create = vxlan_create,
.destroy = ovs_netdev_tunnel_destroy,
.get_options = vxlan_get_options,
- .send = ovs_netdev_send,
+ .send = dev_queue_xmit,
.get_egress_tun_info = vxlan_get_egress_tun_info,
};
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 12a36ac21eda..ef19d0b77d13 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -537,3 +537,33 @@ int ovs_vport_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
return vport->ops->get_egress_tun_info(vport, skb, upcall);
}
+
+static unsigned int packet_length(const struct sk_buff *skb)
+{
+ unsigned int length = skb->len - ETH_HLEN;
+
+ if (skb->protocol == htons(ETH_P_8021Q))
+ length -= VLAN_HLEN;
+
+ return length;
+}
+
+void ovs_vport_send(struct vport *vport, struct sk_buff *skb)
+{
+ int mtu = vport->dev->mtu;
+
+ if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) {
+ net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n",
+ vport->dev->name,
+ packet_length(skb), mtu);
+ vport->dev->stats.tx_errors++;
+ goto drop;
+ }
+
+ skb->dev = vport->dev;
+ vport->ops->send(skb);
+ return;
+
+drop:
+ kfree_skb(skb);
+}
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
index a413f3ae6a7b..885607f28d56 100644
--- a/net/openvswitch/vport.h
+++ b/net/openvswitch/vport.h
@@ -153,7 +153,7 @@ struct vport_ops {
int (*set_options)(struct vport *, struct nlattr *);
int (*get_options)(const struct vport *, struct sk_buff *);
- void (*send)(struct vport *, struct sk_buff *);
+ netdev_tx_t (*send) (struct sk_buff *skb);
int (*get_egress_tun_info)(struct vport *, struct sk_buff *,
struct dp_upcall_info *upcall);
@@ -234,9 +234,6 @@ static inline struct rtable *ovs_tunnel_route_lookup(struct net *net,
return rt;
}
-static inline void ovs_vport_send(struct vport *vport, struct sk_buff *skb)
-{
- vport->ops->send(vport, skb);
-}
+void ovs_vport_send(struct vport *vport, struct sk_buff *skb);
#endif /* vport.h */
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 4f5543dd2524..da72ed32f143 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -174,6 +174,16 @@ config CFG80211_INTERNAL_REGDB
Most distributions have a CRDA package. So if unsure, say N.
+config CFG80211_CRDA_SUPPORT
+ bool "support CRDA" if CFG80211_INTERNAL_REGDB
+ default y
+ depends on CFG80211
+ help
+ You should enable this option unless you know for sure you have no
+ need for it, for example when using internal regdb (above.)
+
+ If unsure, say Y.
+
config CFG80211_WEXT
bool "cfg80211 wireless extensions compatibility" if !CFG80211_WEXT_EXPORT
depends on CFG80211
diff --git a/net/wireless/core.c b/net/wireless/core.c
index f223026ddb03..b0915515640e 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -461,6 +461,9 @@ use_default_name:
rdev->wiphy.max_num_csa_counters = 1;
+ rdev->wiphy.max_sched_scan_plans = 1;
+ rdev->wiphy.max_sched_scan_plan_interval = U32_MAX;
+
return &rdev->wiphy;
}
EXPORT_SYMBOL(wiphy_new_nm);
@@ -636,7 +639,7 @@ int wiphy_register(struct wiphy *wiphy)
if (WARN_ON(!sband->n_channels))
return -EINVAL;
/*
- * on 60gHz band, there are no legacy rates, so
+ * on 60GHz band, there are no legacy rates, so
* n_bitrates is 0
*/
if (WARN_ON(band != IEEE80211_BAND_60GHZ &&
diff --git a/net/wireless/core.h b/net/wireless/core.h
index b9d5bc8c148d..a618b4b86fa4 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -137,6 +137,7 @@ struct cfg80211_internal_bss {
struct list_head list;
struct list_head hidden_list;
struct rb_node rbn;
+ u64 ts_boottime;
unsigned long ts;
unsigned long refcount;
atomic_t hold;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index f05ba8b7af61..d693c9d031fc 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -479,6 +479,12 @@ nl80211_match_policy[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1] = {
[NL80211_SCHED_SCAN_MATCH_ATTR_RSSI] = { .type = NLA_U32 },
};
+static const struct nla_policy
+nl80211_plan_policy[NL80211_SCHED_SCAN_PLAN_MAX + 1] = {
+ [NL80211_SCHED_SCAN_PLAN_INTERVAL] = { .type = NLA_U32 },
+ [NL80211_SCHED_SCAN_PLAN_ITERATIONS] = { .type = NLA_U32 },
+};
+
static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
struct netlink_callback *cb,
struct cfg80211_registered_device **rdev,
@@ -1304,7 +1310,13 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
nla_put_u16(msg, NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN,
rdev->wiphy.max_sched_scan_ie_len) ||
nla_put_u8(msg, NL80211_ATTR_MAX_MATCH_SETS,
- rdev->wiphy.max_match_sets))
+ rdev->wiphy.max_match_sets) ||
+ nla_put_u32(msg, NL80211_ATTR_MAX_NUM_SCHED_SCAN_PLANS,
+ rdev->wiphy.max_sched_scan_plans) ||
+ nla_put_u32(msg, NL80211_ATTR_MAX_SCAN_PLAN_INTERVAL,
+ rdev->wiphy.max_sched_scan_plan_interval) ||
+ nla_put_u32(msg, NL80211_ATTR_MAX_SCAN_PLAN_ITERATIONS,
+ rdev->wiphy.max_sched_scan_plan_iterations))
goto nla_put_failure;
if ((rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN) &&
@@ -4932,56 +4944,6 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
return err;
}
-static const struct nla_policy reg_rule_policy[NL80211_REG_RULE_ATTR_MAX + 1] = {
- [NL80211_ATTR_REG_RULE_FLAGS] = { .type = NLA_U32 },
- [NL80211_ATTR_FREQ_RANGE_START] = { .type = NLA_U32 },
- [NL80211_ATTR_FREQ_RANGE_END] = { .type = NLA_U32 },
- [NL80211_ATTR_FREQ_RANGE_MAX_BW] = { .type = NLA_U32 },
- [NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN] = { .type = NLA_U32 },
- [NL80211_ATTR_POWER_RULE_MAX_EIRP] = { .type = NLA_U32 },
- [NL80211_ATTR_DFS_CAC_TIME] = { .type = NLA_U32 },
-};
-
-static int parse_reg_rule(struct nlattr *tb[],
- struct ieee80211_reg_rule *reg_rule)
-{
- struct ieee80211_freq_range *freq_range = &reg_rule->freq_range;
- struct ieee80211_power_rule *power_rule = &reg_rule->power_rule;
-
- if (!tb[NL80211_ATTR_REG_RULE_FLAGS])
- return -EINVAL;
- if (!tb[NL80211_ATTR_FREQ_RANGE_START])
- return -EINVAL;
- if (!tb[NL80211_ATTR_FREQ_RANGE_END])
- return -EINVAL;
- if (!tb[NL80211_ATTR_FREQ_RANGE_MAX_BW])
- return -EINVAL;
- if (!tb[NL80211_ATTR_POWER_RULE_MAX_EIRP])
- return -EINVAL;
-
- reg_rule->flags = nla_get_u32(tb[NL80211_ATTR_REG_RULE_FLAGS]);
-
- freq_range->start_freq_khz =
- nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_START]);
- freq_range->end_freq_khz =
- nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_END]);
- freq_range->max_bandwidth_khz =
- nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_MAX_BW]);
-
- power_rule->max_eirp =
- nla_get_u32(tb[NL80211_ATTR_POWER_RULE_MAX_EIRP]);
-
- if (tb[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN])
- power_rule->max_antenna_gain =
- nla_get_u32(tb[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN]);
-
- if (tb[NL80211_ATTR_DFS_CAC_TIME])
- reg_rule->dfs_cac_ms =
- nla_get_u32(tb[NL80211_ATTR_DFS_CAC_TIME]);
-
- return 0;
-}
-
static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info)
{
char *data = NULL;
@@ -5613,6 +5575,57 @@ out_err:
return err;
}
+#ifdef CONFIG_CFG80211_CRDA_SUPPORT
+static const struct nla_policy reg_rule_policy[NL80211_REG_RULE_ATTR_MAX + 1] = {
+ [NL80211_ATTR_REG_RULE_FLAGS] = { .type = NLA_U32 },
+ [NL80211_ATTR_FREQ_RANGE_START] = { .type = NLA_U32 },
+ [NL80211_ATTR_FREQ_RANGE_END] = { .type = NLA_U32 },
+ [NL80211_ATTR_FREQ_RANGE_MAX_BW] = { .type = NLA_U32 },
+ [NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN] = { .type = NLA_U32 },
+ [NL80211_ATTR_POWER_RULE_MAX_EIRP] = { .type = NLA_U32 },
+ [NL80211_ATTR_DFS_CAC_TIME] = { .type = NLA_U32 },
+};
+
+static int parse_reg_rule(struct nlattr *tb[],
+ struct ieee80211_reg_rule *reg_rule)
+{
+ struct ieee80211_freq_range *freq_range = &reg_rule->freq_range;
+ struct ieee80211_power_rule *power_rule = &reg_rule->power_rule;
+
+ if (!tb[NL80211_ATTR_REG_RULE_FLAGS])
+ return -EINVAL;
+ if (!tb[NL80211_ATTR_FREQ_RANGE_START])
+ return -EINVAL;
+ if (!tb[NL80211_ATTR_FREQ_RANGE_END])
+ return -EINVAL;
+ if (!tb[NL80211_ATTR_FREQ_RANGE_MAX_BW])
+ return -EINVAL;
+ if (!tb[NL80211_ATTR_POWER_RULE_MAX_EIRP])
+ return -EINVAL;
+
+ reg_rule->flags = nla_get_u32(tb[NL80211_ATTR_REG_RULE_FLAGS]);
+
+ freq_range->start_freq_khz =
+ nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_START]);
+ freq_range->end_freq_khz =
+ nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_END]);
+ freq_range->max_bandwidth_khz =
+ nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_MAX_BW]);
+
+ power_rule->max_eirp =
+ nla_get_u32(tb[NL80211_ATTR_POWER_RULE_MAX_EIRP]);
+
+ if (tb[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN])
+ power_rule->max_antenna_gain =
+ nla_get_u32(tb[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN]);
+
+ if (tb[NL80211_ATTR_DFS_CAC_TIME])
+ reg_rule->dfs_cac_ms =
+ nla_get_u32(tb[NL80211_ATTR_DFS_CAC_TIME]);
+
+ return 0;
+}
+
static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr *tb[NL80211_REG_RULE_ATTR_MAX + 1];
@@ -5689,6 +5702,7 @@ static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info)
kfree(rd);
return r;
}
+#endif /* CONFIG_CFG80211_CRDA_SUPPORT */
static int validate_scan_freqs(struct nlattr *freqs)
{
@@ -5974,14 +5988,100 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
return err;
}
+static int
+nl80211_parse_sched_scan_plans(struct wiphy *wiphy, int n_plans,
+ struct cfg80211_sched_scan_request *request,
+ struct nlattr **attrs)
+{
+ int tmp, err, i = 0;
+ struct nlattr *attr;
+
+ if (!attrs[NL80211_ATTR_SCHED_SCAN_PLANS]) {
+ u32 interval;
+
+ /*
+ * If scan plans are not specified,
+ * %NL80211_ATTR_SCHED_SCAN_INTERVAL must be specified. In this
+ * case one scan plan will be set with the specified scan
+ * interval and infinite number of iterations.
+ */
+ if (!attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL])
+ return -EINVAL;
+
+ interval = nla_get_u32(attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL]);
+ if (!interval)
+ return -EINVAL;
+
+ request->scan_plans[0].interval =
+ DIV_ROUND_UP(interval, MSEC_PER_SEC);
+ if (!request->scan_plans[0].interval)
+ return -EINVAL;
+
+ if (request->scan_plans[0].interval >
+ wiphy->max_sched_scan_plan_interval)
+ request->scan_plans[0].interval =
+ wiphy->max_sched_scan_plan_interval;
+
+ return 0;
+ }
+
+ nla_for_each_nested(attr, attrs[NL80211_ATTR_SCHED_SCAN_PLANS], tmp) {
+ struct nlattr *plan[NL80211_SCHED_SCAN_PLAN_MAX + 1];
+
+ if (WARN_ON(i >= n_plans))
+ return -EINVAL;
+
+ err = nla_parse(plan, NL80211_SCHED_SCAN_PLAN_MAX,
+ nla_data(attr), nla_len(attr),
+ nl80211_plan_policy);
+ if (err)
+ return err;
+
+ if (!plan[NL80211_SCHED_SCAN_PLAN_INTERVAL])
+ return -EINVAL;
+
+ request->scan_plans[i].interval =
+ nla_get_u32(plan[NL80211_SCHED_SCAN_PLAN_INTERVAL]);
+ if (!request->scan_plans[i].interval ||
+ request->scan_plans[i].interval >
+ wiphy->max_sched_scan_plan_interval)
+ return -EINVAL;
+
+ if (plan[NL80211_SCHED_SCAN_PLAN_ITERATIONS]) {
+ request->scan_plans[i].iterations =
+ nla_get_u32(plan[NL80211_SCHED_SCAN_PLAN_ITERATIONS]);
+ if (!request->scan_plans[i].iterations ||
+ (request->scan_plans[i].iterations >
+ wiphy->max_sched_scan_plan_iterations))
+ return -EINVAL;
+ } else if (i < n_plans - 1) {
+ /*
+ * All scan plans but the last one must specify
+ * a finite number of iterations
+ */
+ return -EINVAL;
+ }
+
+ i++;
+ }
+
+ /*
+ * The last scan plan must not specify the number of
+ * iterations, it is supposed to run infinitely
+ */
+ if (request->scan_plans[n_plans - 1].iterations)
+ return -EINVAL;
+
+ return 0;
+}
+
static struct cfg80211_sched_scan_request *
nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
struct nlattr **attrs)
{
struct cfg80211_sched_scan_request *request;
struct nlattr *attr;
- int err, tmp, n_ssids = 0, n_match_sets = 0, n_channels, i;
- u32 interval;
+ int err, tmp, n_ssids = 0, n_match_sets = 0, n_channels, i, n_plans = 0;
enum ieee80211_band band;
size_t ie_len;
struct nlattr *tb[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1];
@@ -5990,13 +6090,6 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
if (!is_valid_ie_attr(attrs[NL80211_ATTR_IE]))
return ERR_PTR(-EINVAL);
- if (!attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL])
- return ERR_PTR(-EINVAL);
-
- interval = nla_get_u32(attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL]);
- if (interval == 0)
- return ERR_PTR(-EINVAL);
-
if (attrs[NL80211_ATTR_SCAN_FREQUENCIES]) {
n_channels = validate_scan_freqs(
attrs[NL80211_ATTR_SCAN_FREQUENCIES]);
@@ -6060,9 +6153,37 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
if (ie_len > wiphy->max_sched_scan_ie_len)
return ERR_PTR(-EINVAL);
+ if (attrs[NL80211_ATTR_SCHED_SCAN_PLANS]) {
+ /*
+ * NL80211_ATTR_SCHED_SCAN_INTERVAL must not be specified since
+ * each scan plan already specifies its own interval
+ */
+ if (attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL])
+ return ERR_PTR(-EINVAL);
+
+ nla_for_each_nested(attr,
+ attrs[NL80211_ATTR_SCHED_SCAN_PLANS], tmp)
+ n_plans++;
+ } else {
+ /*
+ * The scan interval attribute is kept for backward
+ * compatibility. If no scan plans are specified and sched scan
+ * interval is specified, one scan plan will be set with this
+ * scan interval and infinite number of iterations.
+ */
+ if (!attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL])
+ return ERR_PTR(-EINVAL);
+
+ n_plans = 1;
+ }
+
+ if (!n_plans || n_plans > wiphy->max_sched_scan_plans)
+ return ERR_PTR(-EINVAL);
+
request = kzalloc(sizeof(*request)
+ sizeof(*request->ssids) * n_ssids
+ sizeof(*request->match_sets) * n_match_sets
+ + sizeof(*request->scan_plans) * n_plans
+ sizeof(*request->channels) * n_channels
+ ie_len, GFP_KERNEL);
if (!request)
@@ -6090,6 +6211,18 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
}
request->n_match_sets = n_match_sets;
+ if (n_match_sets)
+ request->scan_plans = (void *)(request->match_sets +
+ n_match_sets);
+ else if (request->ie)
+ request->scan_plans = (void *)(request->ie + ie_len);
+ else if (n_ssids)
+ request->scan_plans = (void *)(request->ssids + n_ssids);
+ else
+ request->scan_plans = (void *)(request->channels + n_channels);
+
+ request->n_scan_plans = n_plans;
+
i = 0;
if (attrs[NL80211_ATTR_SCAN_FREQUENCIES]) {
/* user specified, bail out if channel not found */
@@ -6252,7 +6385,10 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
request->delay =
nla_get_u32(attrs[NL80211_ATTR_SCHED_SCAN_DELAY]);
- request->interval = interval;
+ err = nl80211_parse_sched_scan_plans(wiphy, n_plans, request, attrs);
+ if (err)
+ goto out_free;
+
request->scan_start = jiffies;
return request;
@@ -6605,6 +6741,11 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
jiffies_to_msecs(jiffies - intbss->ts)))
goto nla_put_failure;
+ if (intbss->ts_boottime &&
+ nla_put_u64(msg, NL80211_BSS_LAST_SEEN_BOOTTIME,
+ intbss->ts_boottime))
+ goto nla_put_failure;
+
switch (rdev->wiphy.signal_type) {
case CFG80211_SIGNAL_TYPE_MBM:
if (nla_put_u32(msg, NL80211_BSS_SIGNAL_MBM, res->signal))
@@ -8845,7 +8986,7 @@ static int nl80211_send_wowlan_tcp(struct sk_buff *msg,
static int nl80211_send_wowlan_nd(struct sk_buff *msg,
struct cfg80211_sched_scan_request *req)
{
- struct nlattr *nd, *freqs, *matches, *match;
+ struct nlattr *nd, *freqs, *matches, *match, *scan_plans, *scan_plan;
int i;
if (!req)
@@ -8855,7 +8996,9 @@ static int nl80211_send_wowlan_nd(struct sk_buff *msg,
if (!nd)
return -ENOBUFS;
- if (nla_put_u32(msg, NL80211_ATTR_SCHED_SCAN_INTERVAL, req->interval))
+ if (req->n_scan_plans == 1 &&
+ nla_put_u32(msg, NL80211_ATTR_SCHED_SCAN_INTERVAL,
+ req->scan_plans[0].interval * 1000))
return -ENOBUFS;
if (nla_put_u32(msg, NL80211_ATTR_SCHED_SCAN_DELAY, req->delay))
@@ -8882,6 +9025,23 @@ static int nl80211_send_wowlan_nd(struct sk_buff *msg,
nla_nest_end(msg, matches);
}
+ scan_plans = nla_nest_start(msg, NL80211_ATTR_SCHED_SCAN_PLANS);
+ if (!scan_plans)
+ return -ENOBUFS;
+
+ for (i = 0; i < req->n_scan_plans; i++) {
+ scan_plan = nla_nest_start(msg, i + 1);
+ if (!scan_plan ||
+ nla_put_u32(msg, NL80211_SCHED_SCAN_PLAN_INTERVAL,
+ req->scan_plans[i].interval) ||
+ (req->scan_plans[i].iterations &&
+ nla_put_u32(msg, NL80211_SCHED_SCAN_PLAN_ITERATIONS,
+ req->scan_plans[i].iterations)))
+ return -ENOBUFS;
+ nla_nest_end(msg, scan_plan);
+ }
+ nla_nest_end(msg, scan_plans);
+
nla_nest_end(msg, nd);
return 0;
@@ -10737,6 +10897,7 @@ static const struct genl_ops nl80211_ops[] = {
.internal_flags = NL80211_FLAG_NEED_RTNL,
/* can be retrieved by unprivileged users */
},
+#ifdef CONFIG_CFG80211_CRDA_SUPPORT
{
.cmd = NL80211_CMD_SET_REG,
.doit = nl80211_set_reg,
@@ -10744,6 +10905,7 @@ static const struct genl_ops nl80211_ops[] = {
.flags = GENL_ADMIN_PERM,
.internal_flags = NL80211_FLAG_NEED_RTNL,
},
+#endif
{
.cmd = NL80211_CMD_REQ_SET_REG,
.doit = nl80211_req_set_reg,
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 7258246b7458..2e8d6f39ed56 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -135,10 +135,7 @@ static spinlock_t reg_indoor_lock;
/* Used to track the userspace process controlling the indoor setting */
static u32 reg_is_indoor_portid;
-/* Max number of consecutive attempts to communicate with CRDA */
-#define REG_MAX_CRDA_TIMEOUTS 10
-
-static u32 reg_crda_timeouts;
+static void restore_regulatory_settings(bool reset_user);
static const struct ieee80211_regdomain *get_cfg80211_regdom(void)
{
@@ -226,9 +223,6 @@ static DECLARE_DELAYED_WORK(reg_check_chans, reg_check_chans_work);
static void reg_todo(struct work_struct *work);
static DECLARE_WORK(reg_work, reg_todo);
-static void reg_timeout_work(struct work_struct *work);
-static DECLARE_DELAYED_WORK(reg_timeout, reg_timeout_work);
-
/* We keep a static world regulatory domain in case of the absence of CRDA */
static const struct ieee80211_regdomain world_regdom = {
.n_reg_rules = 8,
@@ -262,7 +256,7 @@ static const struct ieee80211_regdomain world_regdom = {
REG_RULE(5745-10, 5825+10, 80, 6, 20,
NL80211_RRF_NO_IR),
- /* IEEE 802.11ad (60gHz), channels 1..3 */
+ /* IEEE 802.11ad (60GHz), channels 1..3 */
REG_RULE(56160+2160*1-1080, 56160+2160*3+1080, 2160, 0, 0, 0),
}
};
@@ -279,6 +273,9 @@ MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code");
static void reg_free_request(struct regulatory_request *request)
{
+ if (request == &core_request_world)
+ return;
+
if (request != get_last_request())
kfree(request);
}
@@ -453,68 +450,70 @@ reg_copy_regd(const struct ieee80211_regdomain *src_regd)
}
#ifdef CONFIG_CFG80211_INTERNAL_REGDB
-struct reg_regdb_search_request {
- char alpha2[2];
+struct reg_regdb_apply_request {
struct list_head list;
+ const struct ieee80211_regdomain *regdom;
};
-static LIST_HEAD(reg_regdb_search_list);
-static DEFINE_MUTEX(reg_regdb_search_mutex);
+static LIST_HEAD(reg_regdb_apply_list);
+static DEFINE_MUTEX(reg_regdb_apply_mutex);
-static void reg_regdb_search(struct work_struct *work)
+static void reg_regdb_apply(struct work_struct *work)
{
- struct reg_regdb_search_request *request;
- const struct ieee80211_regdomain *curdom, *regdom = NULL;
- int i;
+ struct reg_regdb_apply_request *request;
rtnl_lock();
- mutex_lock(&reg_regdb_search_mutex);
- while (!list_empty(&reg_regdb_search_list)) {
- request = list_first_entry(&reg_regdb_search_list,
- struct reg_regdb_search_request,
+ mutex_lock(&reg_regdb_apply_mutex);
+ while (!list_empty(&reg_regdb_apply_list)) {
+ request = list_first_entry(&reg_regdb_apply_list,
+ struct reg_regdb_apply_request,
list);
list_del(&request->list);
- for (i = 0; i < reg_regdb_size; i++) {
- curdom = reg_regdb[i];
-
- if (alpha2_equal(request->alpha2, curdom->alpha2)) {
- regdom = reg_copy_regd(curdom);
- break;
- }
- }
-
+ set_regdom(request->regdom, REGD_SOURCE_INTERNAL_DB);
kfree(request);
}
- mutex_unlock(&reg_regdb_search_mutex);
-
- if (!IS_ERR_OR_NULL(regdom))
- set_regdom(regdom, REGD_SOURCE_INTERNAL_DB);
+ mutex_unlock(&reg_regdb_apply_mutex);
rtnl_unlock();
}
-static DECLARE_WORK(reg_regdb_work, reg_regdb_search);
+static DECLARE_WORK(reg_regdb_work, reg_regdb_apply);
-static void reg_regdb_query(const char *alpha2)
+static int reg_query_builtin(const char *alpha2)
{
- struct reg_regdb_search_request *request;
+ const struct ieee80211_regdomain *regdom = NULL;
+ struct reg_regdb_apply_request *request;
+ unsigned int i;
- if (!alpha2)
- return;
+ for (i = 0; i < reg_regdb_size; i++) {
+ if (alpha2_equal(alpha2, reg_regdb[i]->alpha2)) {
+ regdom = reg_regdb[i];
+ break;
+ }
+ }
+
+ if (!regdom)
+ return -ENODATA;
- request = kzalloc(sizeof(struct reg_regdb_search_request), GFP_KERNEL);
+ request = kzalloc(sizeof(struct reg_regdb_apply_request), GFP_KERNEL);
if (!request)
- return;
+ return -ENOMEM;
- memcpy(request->alpha2, alpha2, 2);
+ request->regdom = reg_copy_regd(regdom);
+ if (IS_ERR_OR_NULL(request->regdom)) {
+ kfree(request);
+ return -ENOMEM;
+ }
- mutex_lock(&reg_regdb_search_mutex);
- list_add_tail(&request->list, &reg_regdb_search_list);
- mutex_unlock(&reg_regdb_search_mutex);
+ mutex_lock(&reg_regdb_apply_mutex);
+ list_add_tail(&request->list, &reg_regdb_apply_list);
+ mutex_unlock(&reg_regdb_apply_mutex);
schedule_work(&reg_regdb_work);
+
+ return 0;
}
/* Feel free to add any other sanity checks here */
@@ -525,9 +524,45 @@ static void reg_regdb_size_check(void)
}
#else
static inline void reg_regdb_size_check(void) {}
-static inline void reg_regdb_query(const char *alpha2) {}
+static inline int reg_query_builtin(const char *alpha2)
+{
+ return -ENODATA;
+}
#endif /* CONFIG_CFG80211_INTERNAL_REGDB */
+#ifdef CONFIG_CFG80211_CRDA_SUPPORT
+/* Max number of consecutive attempts to communicate with CRDA */
+#define REG_MAX_CRDA_TIMEOUTS 10
+
+static u32 reg_crda_timeouts;
+
+static void crda_timeout_work(struct work_struct *work);
+static DECLARE_DELAYED_WORK(crda_timeout, crda_timeout_work);
+
+static void crda_timeout_work(struct work_struct *work)
+{
+ REG_DBG_PRINT("Timeout while waiting for CRDA to reply, restoring regulatory settings\n");
+ rtnl_lock();
+ reg_crda_timeouts++;
+ restore_regulatory_settings(true);
+ rtnl_unlock();
+}
+
+static void cancel_crda_timeout(void)
+{
+ cancel_delayed_work(&crda_timeout);
+}
+
+static void cancel_crda_timeout_sync(void)
+{
+ cancel_delayed_work_sync(&crda_timeout);
+}
+
+static void reset_crda_timeouts(void)
+{
+ reg_crda_timeouts = 0;
+}
+
/*
* This lets us keep regulatory code which is updated on a regulatory
* basis in userspace.
@@ -536,13 +571,11 @@ static int call_crda(const char *alpha2)
{
char country[12];
char *env[] = { country, NULL };
+ int ret;
snprintf(country, sizeof(country), "COUNTRY=%c%c",
alpha2[0], alpha2[1]);
- /* query internal regulatory database (if it exists) */
- reg_regdb_query(alpha2);
-
if (reg_crda_timeouts > REG_MAX_CRDA_TIMEOUTS) {
pr_debug("Exceeded CRDA call max attempts. Not calling CRDA\n");
return -EINVAL;
@@ -554,18 +587,34 @@ static int call_crda(const char *alpha2)
else
pr_debug("Calling CRDA to update world regulatory domain\n");
- return kobject_uevent_env(&reg_pdev->dev.kobj, KOBJ_CHANGE, env);
+ ret = kobject_uevent_env(&reg_pdev->dev.kobj, KOBJ_CHANGE, env);
+ if (ret)
+ return ret;
+
+ queue_delayed_work(system_power_efficient_wq,
+ &crda_timeout, msecs_to_jiffies(3142));
+ return 0;
+}
+#else
+static inline void cancel_crda_timeout(void) {}
+static inline void cancel_crda_timeout_sync(void) {}
+static inline void reset_crda_timeouts(void) {}
+static inline int call_crda(const char *alpha2)
+{
+ return -ENODATA;
}
+#endif /* CONFIG_CFG80211_CRDA_SUPPORT */
-static enum reg_request_treatment
-reg_call_crda(struct regulatory_request *request)
+static bool reg_query_database(struct regulatory_request *request)
{
- if (call_crda(request->alpha2))
- return REG_REQ_IGNORE;
+ /* query internal regulatory database (if it exists) */
+ if (reg_query_builtin(request->alpha2) == 0)
+ return true;
- queue_delayed_work(system_power_efficient_wq,
- &reg_timeout, msecs_to_jiffies(3142));
- return REG_REQ_OK;
+ if (call_crda(request->alpha2) == 0)
+ return true;
+
+ return false;
}
bool reg_is_valid_request(const char *alpha2)
@@ -1081,11 +1130,11 @@ const char *reg_initiator_name(enum nl80211_reg_initiator initiator)
}
EXPORT_SYMBOL(reg_initiator_name);
-#ifdef CONFIG_CFG80211_REG_DEBUG
static void chan_reg_rule_print_dbg(const struct ieee80211_regdomain *regd,
struct ieee80211_channel *chan,
const struct ieee80211_reg_rule *reg_rule)
{
+#ifdef CONFIG_CFG80211_REG_DEBUG
const struct ieee80211_power_rule *power_rule;
const struct ieee80211_freq_range *freq_range;
char max_antenna_gain[32], bw[32];
@@ -1096,7 +1145,7 @@ static void chan_reg_rule_print_dbg(const struct ieee80211_regdomain *regd,
if (!power_rule->max_antenna_gain)
snprintf(max_antenna_gain, sizeof(max_antenna_gain), "N/A");
else
- snprintf(max_antenna_gain, sizeof(max_antenna_gain), "%d",
+ snprintf(max_antenna_gain, sizeof(max_antenna_gain), "%d mBi",
power_rule->max_antenna_gain);
if (reg_rule->flags & NL80211_RRF_AUTO_BW)
@@ -1110,19 +1159,12 @@ static void chan_reg_rule_print_dbg(const struct ieee80211_regdomain *regd,
REG_DBG_PRINT("Updating information on frequency %d MHz with regulatory rule:\n",
chan->center_freq);
- REG_DBG_PRINT("%d KHz - %d KHz @ %s), (%s mBi, %d mBm)\n",
+ REG_DBG_PRINT("(%d KHz - %d KHz @ %s), (%s, %d mBm)\n",
freq_range->start_freq_khz, freq_range->end_freq_khz,
bw, max_antenna_gain,
power_rule->max_eirp);
-}
-#else
-static void chan_reg_rule_print_dbg(const struct ieee80211_regdomain *regd,
- struct ieee80211_channel *chan,
- const struct ieee80211_reg_rule *reg_rule)
-{
- return;
-}
#endif
+}
/*
* Note that right now we assume the desired channel bandwidth
@@ -1311,7 +1353,8 @@ static bool reg_dev_ignore_cell_hint(struct wiphy *wiphy)
return !(wiphy->features & NL80211_FEATURE_CELL_BASE_REG_HINTS);
}
#else
-static int reg_ignore_cell_hint(struct regulatory_request *pending_request)
+static enum reg_request_treatment
+reg_ignore_cell_hint(struct regulatory_request *pending_request)
{
return REG_REQ_IGNORE;
}
@@ -1846,7 +1889,7 @@ static void reg_set_request_processed(void)
need_more_processing = true;
spin_unlock(&reg_requests_lock);
- cancel_delayed_work(&reg_timeout);
+ cancel_crda_timeout();
if (need_more_processing)
schedule_work(&reg_work);
@@ -1858,19 +1901,18 @@ static void reg_set_request_processed(void)
*
* The wireless subsystem can use this function to process
* a regulatory request issued by the regulatory core.
- *
- * Returns one of the different reg request treatment values.
*/
static enum reg_request_treatment
reg_process_hint_core(struct regulatory_request *core_request)
{
+ if (reg_query_database(core_request)) {
+ core_request->intersect = false;
+ core_request->processed = false;
+ reg_update_last_request(core_request);
+ return REG_REQ_OK;
+ }
- core_request->intersect = false;
- core_request->processed = false;
-
- reg_update_last_request(core_request);
-
- return reg_call_crda(core_request);
+ return REG_REQ_IGNORE;
}
static enum reg_request_treatment
@@ -1915,8 +1957,6 @@ __reg_process_hint_user(struct regulatory_request *user_request)
*
* The wireless subsystem can use this function to process
* a regulatory request initiated by userspace.
- *
- * Returns one of the different reg request treatment values.
*/
static enum reg_request_treatment
reg_process_hint_user(struct regulatory_request *user_request)
@@ -1925,20 +1965,20 @@ reg_process_hint_user(struct regulatory_request *user_request)
treatment = __reg_process_hint_user(user_request);
if (treatment == REG_REQ_IGNORE ||
- treatment == REG_REQ_ALREADY_SET) {
- reg_free_request(user_request);
- return treatment;
- }
+ treatment == REG_REQ_ALREADY_SET)
+ return REG_REQ_IGNORE;
user_request->intersect = treatment == REG_REQ_INTERSECT;
user_request->processed = false;
- reg_update_last_request(user_request);
-
- user_alpha2[0] = user_request->alpha2[0];
- user_alpha2[1] = user_request->alpha2[1];
+ if (reg_query_database(user_request)) {
+ reg_update_last_request(user_request);
+ user_alpha2[0] = user_request->alpha2[0];
+ user_alpha2[1] = user_request->alpha2[1];
+ return REG_REQ_OK;
+ }
- return reg_call_crda(user_request);
+ return REG_REQ_IGNORE;
}
static enum reg_request_treatment
@@ -1986,16 +2026,12 @@ reg_process_hint_driver(struct wiphy *wiphy,
case REG_REQ_OK:
break;
case REG_REQ_IGNORE:
- reg_free_request(driver_request);
- return treatment;
+ return REG_REQ_IGNORE;
case REG_REQ_INTERSECT:
- /* fall through */
case REG_REQ_ALREADY_SET:
regd = reg_copy_regd(get_cfg80211_regdom());
- if (IS_ERR(regd)) {
- reg_free_request(driver_request);
+ if (IS_ERR(regd))
return REG_REQ_IGNORE;
- }
tmp = get_wiphy_regdom(wiphy);
rcu_assign_pointer(wiphy->regd, regd);
@@ -2006,8 +2042,6 @@ reg_process_hint_driver(struct wiphy *wiphy,
driver_request->intersect = treatment == REG_REQ_INTERSECT;
driver_request->processed = false;
- reg_update_last_request(driver_request);
-
/*
* Since CRDA will not be called in this case as we already
* have applied the requested regulatory domain before we just
@@ -2015,11 +2049,17 @@ reg_process_hint_driver(struct wiphy *wiphy,
*/
if (treatment == REG_REQ_ALREADY_SET) {
nl80211_send_reg_change_event(driver_request);
+ reg_update_last_request(driver_request);
reg_set_request_processed();
- return treatment;
+ return REG_REQ_ALREADY_SET;
}
- return reg_call_crda(driver_request);
+ if (reg_query_database(driver_request)) {
+ reg_update_last_request(driver_request);
+ return REG_REQ_OK;
+ }
+
+ return REG_REQ_IGNORE;
}
static enum reg_request_treatment
@@ -2085,12 +2125,11 @@ reg_process_hint_country_ie(struct wiphy *wiphy,
case REG_REQ_OK:
break;
case REG_REQ_IGNORE:
- /* fall through */
+ return REG_REQ_IGNORE;
case REG_REQ_ALREADY_SET:
reg_free_request(country_ie_request);
- return treatment;
+ return REG_REQ_ALREADY_SET;
case REG_REQ_INTERSECT:
- reg_free_request(country_ie_request);
/*
* This doesn't happen yet, not sure we
* ever want to support it for this case.
@@ -2102,9 +2141,12 @@ reg_process_hint_country_ie(struct wiphy *wiphy,
country_ie_request->intersect = false;
country_ie_request->processed = false;
- reg_update_last_request(country_ie_request);
+ if (reg_query_database(country_ie_request)) {
+ reg_update_last_request(country_ie_request);
+ return REG_REQ_OK;
+ }
- return reg_call_crda(country_ie_request);
+ return REG_REQ_IGNORE;
}
/* This processes *all* regulatory hints */
@@ -2118,11 +2160,11 @@ static void reg_process_hint(struct regulatory_request *reg_request)
switch (reg_request->initiator) {
case NL80211_REGDOM_SET_BY_CORE:
- reg_process_hint_core(reg_request);
- return;
+ treatment = reg_process_hint_core(reg_request);
+ break;
case NL80211_REGDOM_SET_BY_USER:
- reg_process_hint_user(reg_request);
- return;
+ treatment = reg_process_hint_user(reg_request);
+ break;
case NL80211_REGDOM_SET_BY_DRIVER:
if (!wiphy)
goto out_free;
@@ -2138,6 +2180,12 @@ static void reg_process_hint(struct regulatory_request *reg_request)
goto out_free;
}
+ if (treatment == REG_REQ_IGNORE)
+ goto out_free;
+
+ WARN(treatment != REG_REQ_OK && treatment != REG_REQ_ALREADY_SET,
+ "unexpected treatment value %d\n", treatment);
+
/* This is required so that the orig_* parameters are saved.
* NOTE: treatment must be set for any case that reaches here!
*/
@@ -2345,7 +2393,7 @@ int regulatory_hint_user(const char *alpha2,
request->user_reg_hint_type = user_reg_hint_type;
/* Allow calling CRDA again */
- reg_crda_timeouts = 0;
+ reset_crda_timeouts();
queue_regulatory_request(request);
@@ -2417,7 +2465,7 @@ int regulatory_hint(struct wiphy *wiphy, const char *alpha2)
request->initiator = NL80211_REGDOM_SET_BY_DRIVER;
/* Allow calling CRDA again */
- reg_crda_timeouts = 0;
+ reset_crda_timeouts();
queue_regulatory_request(request);
@@ -2473,7 +2521,7 @@ void regulatory_hint_country_ie(struct wiphy *wiphy, enum ieee80211_band band,
request->country_ie_env = env;
/* Allow calling CRDA again */
- reg_crda_timeouts = 0;
+ reset_crda_timeouts();
queue_regulatory_request(request);
request = NULL;
@@ -2874,11 +2922,8 @@ static int reg_set_rd_driver(const struct ieee80211_regdomain *rd,
}
request_wiphy = wiphy_idx_to_wiphy(driver_request->wiphy_idx);
- if (!request_wiphy) {
- queue_delayed_work(system_power_efficient_wq,
- &reg_timeout, 0);
+ if (!request_wiphy)
return -ENODEV;
- }
if (!driver_request->intersect) {
if (request_wiphy->regd)
@@ -2935,11 +2980,8 @@ static int reg_set_rd_country_ie(const struct ieee80211_regdomain *rd,
}
request_wiphy = wiphy_idx_to_wiphy(country_ie_request->wiphy_idx);
- if (!request_wiphy) {
- queue_delayed_work(system_power_efficient_wq,
- &reg_timeout, 0);
+ if (!request_wiphy)
return -ENODEV;
- }
if (country_ie_request->intersect)
return -EINVAL;
@@ -2966,7 +3008,7 @@ int set_regdom(const struct ieee80211_regdomain *rd,
}
if (regd_src == REGD_SOURCE_CRDA)
- reg_crda_timeouts = 0;
+ reset_crda_timeouts();
lr = get_last_request();
@@ -3123,15 +3165,6 @@ void wiphy_regulatory_deregister(struct wiphy *wiphy)
lr->country_ie_env = ENVIRON_ANY;
}
-static void reg_timeout_work(struct work_struct *work)
-{
- REG_DBG_PRINT("Timeout while waiting for CRDA to reply, restoring regulatory settings\n");
- rtnl_lock();
- reg_crda_timeouts++;
- restore_regulatory_settings(true);
- rtnl_unlock();
-}
-
/*
* See http://www.fcc.gov/document/5-ghz-unlicensed-spectrum-unii, for
* UNII band definitions
@@ -3217,7 +3250,7 @@ void regulatory_exit(void)
struct reg_beacon *reg_beacon, *btmp;
cancel_work_sync(&reg_work);
- cancel_delayed_work_sync(&reg_timeout);
+ cancel_crda_timeout_sync();
cancel_delayed_work_sync(&reg_check_chans);
/* Lock to suppress warnings */
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 3a50aa2553bf..14d5369eb778 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -266,8 +266,7 @@ void __cfg80211_sched_scan_results(struct work_struct *wk)
spin_lock_bh(&rdev->bss_lock);
__cfg80211_bss_expire(rdev, request->scan_start);
spin_unlock_bh(&rdev->bss_lock);
- request->scan_start =
- jiffies + msecs_to_jiffies(request->interval);
+ request->scan_start = jiffies;
}
nl80211_send_sched_scan_results(rdev, request->dev);
}
@@ -839,6 +838,7 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
found->pub.signal = tmp->pub.signal;
found->pub.capability = tmp->pub.capability;
found->ts = tmp->ts;
+ found->ts_boottime = tmp->ts_boottime;
} else {
struct cfg80211_internal_bss *new;
struct cfg80211_internal_bss *hidden;
@@ -938,14 +938,13 @@ cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen,
}
/* Returned bss is reference counted and must be cleaned up appropriately. */
-struct cfg80211_bss*
-cfg80211_inform_bss_width(struct wiphy *wiphy,
- struct ieee80211_channel *rx_channel,
- enum nl80211_bss_scan_width scan_width,
- enum cfg80211_bss_frame_type ftype,
- const u8 *bssid, u64 tsf, u16 capability,
- u16 beacon_interval, const u8 *ie, size_t ielen,
- s32 signal, gfp_t gfp)
+struct cfg80211_bss *
+cfg80211_inform_bss_data(struct wiphy *wiphy,
+ struct cfg80211_inform_bss *data,
+ enum cfg80211_bss_frame_type ftype,
+ const u8 *bssid, u64 tsf, u16 capability,
+ u16 beacon_interval, const u8 *ie, size_t ielen,
+ gfp_t gfp)
{
struct cfg80211_bss_ies *ies;
struct ieee80211_channel *channel;
@@ -957,19 +956,21 @@ cfg80211_inform_bss_width(struct wiphy *wiphy,
return NULL;
if (WARN_ON(wiphy->signal_type == CFG80211_SIGNAL_TYPE_UNSPEC &&
- (signal < 0 || signal > 100)))
+ (data->signal < 0 || data->signal > 100)))
return NULL;
- channel = cfg80211_get_bss_channel(wiphy, ie, ielen, rx_channel);
+ channel = cfg80211_get_bss_channel(wiphy, ie, ielen, data->chan);
if (!channel)
return NULL;
memcpy(tmp.pub.bssid, bssid, ETH_ALEN);
tmp.pub.channel = channel;
- tmp.pub.scan_width = scan_width;
- tmp.pub.signal = signal;
+ tmp.pub.scan_width = data->scan_width;
+ tmp.pub.signal = data->signal;
tmp.pub.beacon_interval = beacon_interval;
tmp.pub.capability = capability;
+ tmp.ts_boottime = data->boottime_ns;
+
/*
* If we do not know here whether the IEs are from a Beacon or Probe
* Response frame, we need to pick one of the options and only use it
@@ -999,7 +1000,7 @@ cfg80211_inform_bss_width(struct wiphy *wiphy,
}
rcu_assign_pointer(tmp.pub.ies, ies);
- signal_valid = abs(rx_channel->center_freq - channel->center_freq) <=
+ signal_valid = abs(data->chan->center_freq - channel->center_freq) <=
wiphy->max_adj_channel_rssi_comp;
res = cfg80211_bss_update(wiphy_to_rdev(wiphy), &tmp, signal_valid);
if (!res)
@@ -1019,15 +1020,15 @@ cfg80211_inform_bss_width(struct wiphy *wiphy,
/* cfg80211_bss_update gives us a referenced result */
return &res->pub;
}
-EXPORT_SYMBOL(cfg80211_inform_bss_width);
+EXPORT_SYMBOL(cfg80211_inform_bss_data);
-/* Returned bss is reference counted and must be cleaned up appropriately. */
+/* cfg80211_inform_bss_width_frame helper */
struct cfg80211_bss *
-cfg80211_inform_bss_width_frame(struct wiphy *wiphy,
- struct ieee80211_channel *rx_channel,
- enum nl80211_bss_scan_width scan_width,
- struct ieee80211_mgmt *mgmt, size_t len,
- s32 signal, gfp_t gfp)
+cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
+ struct cfg80211_inform_bss *data,
+ struct ieee80211_mgmt *mgmt, size_t len,
+ gfp_t gfp)
+
{
struct cfg80211_internal_bss tmp = {}, *res;
struct cfg80211_bss_ies *ies;
@@ -1040,8 +1041,7 @@ cfg80211_inform_bss_width_frame(struct wiphy *wiphy,
BUILD_BUG_ON(offsetof(struct ieee80211_mgmt, u.probe_resp.variable) !=
offsetof(struct ieee80211_mgmt, u.beacon.variable));
- trace_cfg80211_inform_bss_width_frame(wiphy, rx_channel, scan_width, mgmt,
- len, signal);
+ trace_cfg80211_inform_bss_frame(wiphy, data, mgmt, len);
if (WARN_ON(!mgmt))
return NULL;
@@ -1050,14 +1050,14 @@ cfg80211_inform_bss_width_frame(struct wiphy *wiphy,
return NULL;
if (WARN_ON(wiphy->signal_type == CFG80211_SIGNAL_TYPE_UNSPEC &&
- (signal < 0 || signal > 100)))
+ (data->signal < 0 || data->signal > 100)))
return NULL;
if (WARN_ON(len < offsetof(struct ieee80211_mgmt, u.probe_resp.variable)))
return NULL;
channel = cfg80211_get_bss_channel(wiphy, mgmt->u.beacon.variable,
- ielen, rx_channel);
+ ielen, data->chan);
if (!channel)
return NULL;
@@ -1077,12 +1077,13 @@ cfg80211_inform_bss_width_frame(struct wiphy *wiphy,
memcpy(tmp.pub.bssid, mgmt->bssid, ETH_ALEN);
tmp.pub.channel = channel;
- tmp.pub.scan_width = scan_width;
- tmp.pub.signal = signal;
+ tmp.pub.scan_width = data->scan_width;
+ tmp.pub.signal = data->signal;
tmp.pub.beacon_interval = le16_to_cpu(mgmt->u.probe_resp.beacon_int);
tmp.pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info);
+ tmp.ts_boottime = data->boottime_ns;
- signal_valid = abs(rx_channel->center_freq - channel->center_freq) <=
+ signal_valid = abs(data->chan->center_freq - channel->center_freq) <=
wiphy->max_adj_channel_rssi_comp;
res = cfg80211_bss_update(wiphy_to_rdev(wiphy), &tmp, signal_valid);
if (!res)
@@ -1102,7 +1103,7 @@ cfg80211_inform_bss_width_frame(struct wiphy *wiphy,
/* cfg80211_bss_update gives us a referenced result */
return &res->pub;
}
-EXPORT_SYMBOL(cfg80211_inform_bss_width_frame);
+EXPORT_SYMBOL(cfg80211_inform_bss_frame_data);
void cfg80211_ref_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
{
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index a808279a432a..0c392d36781b 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -2670,30 +2670,30 @@ TRACE_EVENT(cfg80211_get_bss,
__entry->privacy)
);
-TRACE_EVENT(cfg80211_inform_bss_width_frame,
- TP_PROTO(struct wiphy *wiphy, struct ieee80211_channel *channel,
- enum nl80211_bss_scan_width scan_width,
- struct ieee80211_mgmt *mgmt, size_t len,
- s32 signal),
- TP_ARGS(wiphy, channel, scan_width, mgmt, len, signal),
+TRACE_EVENT(cfg80211_inform_bss_frame,
+ TP_PROTO(struct wiphy *wiphy, struct cfg80211_inform_bss *data,
+ struct ieee80211_mgmt *mgmt, size_t len),
+ TP_ARGS(wiphy, data, mgmt, len),
TP_STRUCT__entry(
WIPHY_ENTRY
CHAN_ENTRY
__field(enum nl80211_bss_scan_width, scan_width)
__dynamic_array(u8, mgmt, len)
__field(s32, signal)
+ __field(u64, ts_boottime)
),
TP_fast_assign(
WIPHY_ASSIGN;
- CHAN_ASSIGN(channel);
- __entry->scan_width = scan_width;
+ CHAN_ASSIGN(data->chan);
+ __entry->scan_width = data->scan_width;
if (mgmt)
memcpy(__get_dynamic_array(mgmt), mgmt, len);
- __entry->signal = signal;
+ __entry->signal = data->signal;
+ __entry->ts_boottime = data->boottime_ns;
),
- TP_printk(WIPHY_PR_FMT ", " CHAN_PR_FMT "(scan_width: %d) signal: %d",
+ TP_printk(WIPHY_PR_FMT ", " CHAN_PR_FMT "(scan_width: %d) signal: %d, tsb:%llu",
WIPHY_PR_ARG, CHAN_PR_ARG, __entry->scan_width,
- __entry->signal)
+ __entry->signal, (unsigned long long)__entry->ts_boottime)
);
DECLARE_EVENT_CLASS(cfg80211_bss_evt,