summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/atm/clip.c17
-rw-r--r--net/batman-adv/bat_algo.h (renamed from net/batman-adv/bat_ogm.h)20
-rw-r--r--net/batman-adv/bat_debugfs.c22
-rw-r--r--net/batman-adv/bat_iv_ogm.c245
-rw-r--r--net/batman-adv/bat_sysfs.c9
-rw-r--r--net/batman-adv/hard-interface.c24
-rw-r--r--net/batman-adv/icmp_socket.c6
-rw-r--r--net/batman-adv/main.c107
-rw-r--r--net/batman-adv/main.h15
-rw-r--r--net/batman-adv/originator.c1
-rw-r--r--net/batman-adv/packet.h38
-rw-r--r--net/batman-adv/routing.c24
-rw-r--r--net/batman-adv/send.c7
-rw-r--r--net/batman-adv/soft-interface.c23
-rw-r--r--net/batman-adv/translation-table.c67
-rw-r--r--net/batman-adv/types.h21
-rw-r--r--net/batman-adv/unicast.c16
-rw-r--r--net/batman-adv/vis.c14
-rw-r--r--net/bridge/br_device.c5
-rw-r--r--net/caif/caif_dev.c2
-rw-r--r--net/caif/caif_socket.c113
-rw-r--r--net/caif/chnl_net.c15
-rw-r--r--net/core/datagram.c26
-rw-r--r--net/core/dev.c94
-rw-r--r--net/core/ethtool.c2
-rw-r--r--net/core/neighbour.c90
-rw-r--r--net/core/netpoll.c71
-rw-r--r--net/core/rtnetlink.c9
-rw-r--r--net/core/skbuff.c4
-rw-r--r--net/core/sock.c21
-rw-r--r--net/decnet/dn_neigh.c24
-rw-r--r--net/decnet/dn_route.c3
-rw-r--r--net/ethernet/eth.c2
-rw-r--r--net/ieee802154/6lowpan.c16
-rw-r--r--net/ipv4/af_inet.c1
-rw-r--r--net/ipv4/inet_diag.c18
-rw-r--r--net/ipv4/ip_gre.c21
-rw-r--r--net/ipv4/ip_sockglue.c37
-rw-r--r--net/ipv4/ipip.c3
-rw-r--r--net/ipv4/ping.c3
-rw-r--r--net/ipv4/proc.c1
-rw-r--r--net/ipv4/raw.c3
-rw-r--r--net/ipv4/route.c7
-rw-r--r--net/ipv4/tcp_ipv4.c297
-rw-r--r--net/ipv4/tcp_minisocks.c12
-rw-r--r--net/ipv4/tcp_output.c4
-rw-r--r--net/ipv4/udp.c7
-rw-r--r--net/ipv6/af_inet6.c1
-rw-r--r--net/ipv6/anycast.c29
-rw-r--r--net/ipv6/datagram.c2
-rw-r--r--net/ipv6/icmp.c4
-rw-r--r--net/ipv6/ip6_fib.c19
-rw-r--r--net/ipv6/ip6_output.c10
-rw-r--r--net/ipv6/ipv6_sockglue.c38
-rw-r--r--net/ipv6/ndisc.c30
-rw-r--r--net/ipv6/raw.c2
-rw-r--r--net/ipv6/reassembly.c7
-rw-r--r--net/ipv6/route.c15
-rw-r--r--net/ipv6/sit.c20
-rw-r--r--net/ipv6/tcp_ipv6.c231
-rw-r--r--net/ipv6/udp.c7
-rw-r--r--net/ipv6/xfrm6_output.c2
-rw-r--r--net/iucv/af_iucv.c202
-rw-r--r--net/l2tp/l2tp_eth.c2
-rw-r--r--net/mac80211/Makefile4
-rw-r--r--net/mac80211/cfg.c52
-rw-r--r--net/mac80211/chan.c28
-rw-r--r--net/mac80211/debugfs.c6
-rw-r--r--net/mac80211/debugfs_netdev.c35
-rw-r--r--net/mac80211/debugfs_sta.c5
-rw-r--r--net/mac80211/driver-ops.h33
-rw-r--r--net/mac80211/driver-trace.h32
-rw-r--r--net/mac80211/ibss.c98
-rw-r--r--net/mac80211/ieee80211_i.h82
-rw-r--r--net/mac80211/iface.c20
-rw-r--r--net/mac80211/key.c38
-rw-r--r--net/mac80211/main.c19
-rw-r--r--net/mac80211/mesh_hwmp.c2
-rw-r--r--net/mac80211/mesh_pathtbl.c6
-rw-r--r--net/mac80211/mesh_plink.c6
-rw-r--r--net/mac80211/mlme.c1389
-rw-r--r--net/mac80211/pm.c11
-rw-r--r--net/mac80211/rate.c151
-rw-r--r--net/mac80211/rate.h4
-rw-r--r--net/mac80211/rx.c33
-rw-r--r--net/mac80211/sta_info.c328
-rw-r--r--net/mac80211/sta_info.h55
-rw-r--r--net/mac80211/status.c6
-rw-r--r--net/mac80211/tx.c5
-rw-r--r--net/mac80211/util.c32
-rw-r--r--net/mac80211/work.c814
-rw-r--r--net/netfilter/ipset/ip_set_core.c10
-rw-r--r--net/netfilter/nf_conntrack_netlink.c53
-rw-r--r--net/netfilter/nfnetlink_acct.c6
-rw-r--r--net/netlink/af_netlink.c30
-rw-r--r--net/netlink/genetlink.c40
-rw-r--r--net/nfc/core.c5
-rw-r--r--net/nfc/nci/core.c118
-rw-r--r--net/nfc/nci/data.c4
-rw-r--r--net/nfc/nci/ntf.c337
-rw-r--r--net/nfc/nci/rsp.c28
-rw-r--r--net/nfc/netlink.c6
-rw-r--r--net/nfc/rawsock.c12
-rw-r--r--net/openvswitch/vport-internal_dev.c3
-rw-r--r--net/packet/af_packet.c32
-rw-r--r--net/sched/Kconfig26
-rw-r--r--net/sched/Makefile1
-rw-r--r--net/sched/sch_plug.c233
-rw-r--r--net/tipc/bcast.c324
-rw-r--r--net/tipc/bcast.h2
-rw-r--r--net/tipc/link.c148
-rw-r--r--net/tipc/name_distr.c4
-rw-r--r--net/tipc/node.c14
-rw-r--r--net/tipc/node.h18
-rw-r--r--net/tipc/port.c5
-rw-r--r--net/unix/af_unix.c52
-rw-r--r--net/unix/diag.c10
-rw-r--r--net/wireless/core.h14
-rw-r--r--net/wireless/mesh.c1
-rw-r--r--net/wireless/mlme.c322
-rw-r--r--net/wireless/nl80211.c112
-rw-r--r--net/wireless/reg.c19
-rw-r--r--net/wireless/scan.c12
-rw-r--r--net/wireless/sme.c41
-rw-r--r--net/xfrm/xfrm_user.c9
125 files changed, 4154 insertions, 3399 deletions
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 127fe70a1baa..5de42ea309bc 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -330,6 +330,8 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
struct atmarp_entry *entry;
struct neighbour *n;
struct atm_vcc *vcc;
+ struct rtable *rt;
+ __be32 *daddr;
int old;
unsigned long flags;
@@ -340,7 +342,12 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
- n = dst_get_neighbour_noref(dst);
+ rt = (struct rtable *) dst;
+ if (rt->rt_gateway)
+ daddr = &rt->rt_gateway;
+ else
+ daddr = &ip_hdr(skb)->daddr;
+ n = dst_neigh_lookup(dst, daddr);
if (!n) {
pr_err("NO NEIGHBOUR !\n");
dev_kfree_skb(skb);
@@ -360,7 +367,7 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
dev_kfree_skb(skb);
dev->stats.tx_dropped++;
}
- return NETDEV_TX_OK;
+ goto out_release_neigh;
}
pr_debug("neigh %p, vccs %p\n", entry, entry->vccs);
ATM_SKB(skb)->vcc = vcc = entry->vccs->vcc;
@@ -379,14 +386,14 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
old = xchg(&entry->vccs->xoff, 1); /* assume XOFF ... */
if (old) {
pr_warning("XOFF->XOFF transition\n");
- return NETDEV_TX_OK;
+ goto out_release_neigh;
}
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
vcc->send(vcc, skb);
if (atm_may_send(vcc, 0)) {
entry->vccs->xoff = 0;
- return NETDEV_TX_OK;
+ goto out_release_neigh;
}
spin_lock_irqsave(&clip_priv->xoff_lock, flags);
netif_stop_queue(dev); /* XOFF -> throttle immediately */
@@ -398,6 +405,8 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
of the brief netif_stop_queue. If this isn't true or if it
changes, use netif_wake_queue instead. */
spin_unlock_irqrestore(&clip_priv->xoff_lock, flags);
+out_release_neigh:
+ neigh_release(n);
return NETDEV_TX_OK;
}
diff --git a/net/batman-adv/bat_ogm.h b/net/batman-adv/bat_algo.h
index 69329c107e28..755379fd367d 100644
--- a/net/batman-adv/bat_ogm.h
+++ b/net/batman-adv/bat_algo.h
@@ -1,7 +1,7 @@
/*
- * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2011 B.A.T.M.A.N. contributors:
*
- * Marek Lindner, Simon Wunderlich
+ * Marek Lindner
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
@@ -19,17 +19,9 @@
*
*/
-#ifndef _NET_BATMAN_ADV_OGM_H_
-#define _NET_BATMAN_ADV_OGM_H_
+#ifndef _NET_BATMAN_ADV_BAT_ALGO_H_
+#define _NET_BATMAN_ADV_BAT_ALGO_H_
-#include "main.h"
+int bat_iv_init(void);
-void bat_ogm_init(struct hard_iface *hard_iface);
-void bat_ogm_init_primary(struct hard_iface *hard_iface);
-void bat_ogm_update_mac(struct hard_iface *hard_iface);
-void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes);
-void bat_ogm_emit(struct forw_packet *forw_packet);
-void bat_ogm_receive(const struct ethhdr *ethhdr, unsigned char *packet_buff,
- int packet_len, struct hard_iface *if_incoming);
-
-#endif /* _NET_BATMAN_ADV_OGM_H_ */
+#endif /* _NET_BATMAN_ADV_BAT_ALGO_H_ */
diff --git a/net/batman-adv/bat_debugfs.c b/net/batman-adv/bat_debugfs.c
index d0af9bf69e46..a7a393eeb935 100644
--- a/net/batman-adv/bat_debugfs.c
+++ b/net/batman-adv/bat_debugfs.c
@@ -221,6 +221,11 @@ static void debug_log_cleanup(struct bat_priv *bat_priv)
}
#endif
+static int bat_algorithms_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, bat_algo_seq_print_text, NULL);
+}
+
static int originators_open(struct inode *inode, struct file *file)
{
struct net_device *net_dev = (struct net_device *)inode->i_private;
@@ -274,6 +279,7 @@ struct bat_debuginfo bat_debuginfo_##_name = { \
} \
};
+static BAT_DEBUGINFO(routing_algos, S_IRUGO, bat_algorithms_open);
static BAT_DEBUGINFO(originators, S_IRUGO, originators_open);
static BAT_DEBUGINFO(gateways, S_IRUGO, gateways_open);
static BAT_DEBUGINFO(softif_neigh, S_IRUGO, softif_neigh_open);
@@ -293,9 +299,25 @@ static struct bat_debuginfo *mesh_debuginfos[] = {
void debugfs_init(void)
{
+ struct bat_debuginfo *bat_debug;
+ struct dentry *file;
+
bat_debugfs = debugfs_create_dir(DEBUGFS_BAT_SUBDIR, NULL);
if (bat_debugfs == ERR_PTR(-ENODEV))
bat_debugfs = NULL;
+
+ if (!bat_debugfs)
+ goto out;
+
+ bat_debug = &bat_debuginfo_routing_algos;
+ file = debugfs_create_file(bat_debug->attr.name,
+ S_IFREG | bat_debug->attr.mode,
+ bat_debugfs, NULL, &bat_debug->fops);
+ if (!file)
+ pr_err("Can't add debugfs file: %s\n", bat_debug->attr.name);
+
+out:
+ return;
}
void debugfs_destroy(void)
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 3512e251545b..1c483a5dd808 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -20,7 +20,6 @@
*/
#include "main.h"
-#include "bat_ogm.h"
#include "translation-table.h"
#include "ring_buffer.h"
#include "originator.h"
@@ -29,8 +28,9 @@
#include "gateway_client.h"
#include "hard-interface.h"
#include "send.h"
+#include "bat_algo.h"
-void bat_ogm_init(struct hard_iface *hard_iface)
+static void bat_iv_ogm_init(struct hard_iface *hard_iface)
{
struct batman_ogm_packet *batman_ogm_packet;
@@ -38,25 +38,25 @@ void bat_ogm_init(struct hard_iface *hard_iface)
hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC);
batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
- batman_ogm_packet->packet_type = BAT_OGM;
- batman_ogm_packet->version = COMPAT_VERSION;
+ batman_ogm_packet->header.packet_type = BAT_OGM;
+ batman_ogm_packet->header.version = COMPAT_VERSION;
+ batman_ogm_packet->header.ttl = 2;
batman_ogm_packet->flags = NO_FLAGS;
- batman_ogm_packet->ttl = 2;
batman_ogm_packet->tq = TQ_MAX_VALUE;
batman_ogm_packet->tt_num_changes = 0;
batman_ogm_packet->ttvn = 0;
}
-void bat_ogm_init_primary(struct hard_iface *hard_iface)
+static void bat_iv_ogm_init_primary(struct hard_iface *hard_iface)
{
struct batman_ogm_packet *batman_ogm_packet;
batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
batman_ogm_packet->flags = PRIMARIES_FIRST_HOP;
- batman_ogm_packet->ttl = TTL;
+ batman_ogm_packet->header.ttl = TTL;
}
-void bat_ogm_update_mac(struct hard_iface *hard_iface)
+static void bat_iv_ogm_update_mac(struct hard_iface *hard_iface)
{
struct batman_ogm_packet *batman_ogm_packet;
@@ -68,7 +68,7 @@ void bat_ogm_update_mac(struct hard_iface *hard_iface)
}
/* when do we schedule our own ogm to be sent */
-static unsigned long bat_ogm_emit_send_time(const struct bat_priv *bat_priv)
+static unsigned long bat_iv_ogm_emit_send_time(const struct bat_priv *bat_priv)
{
return jiffies + msecs_to_jiffies(
atomic_read(&bat_priv->orig_interval) -
@@ -76,7 +76,7 @@ static unsigned long bat_ogm_emit_send_time(const struct bat_priv *bat_priv)
}
/* when do we schedule a ogm packet to be sent */
-static unsigned long bat_ogm_fwd_send_time(void)
+static unsigned long bat_iv_ogm_fwd_send_time(void)
{
return jiffies + msecs_to_jiffies(random32() % (JITTER/2));
}
@@ -89,8 +89,8 @@ static uint8_t hop_penalty(uint8_t tq, const struct bat_priv *bat_priv)
}
/* is there another aggregated packet here? */
-static int bat_ogm_aggr_packet(int buff_pos, int packet_len,
- int tt_num_changes)
+static int bat_iv_ogm_aggr_packet(int buff_pos, int packet_len,
+ int tt_num_changes)
{
int next_buff_pos = buff_pos + BATMAN_OGM_LEN + tt_len(tt_num_changes);
@@ -99,8 +99,8 @@ static int bat_ogm_aggr_packet(int buff_pos, int packet_len,
}
/* send a batman ogm to a given interface */
-static void bat_ogm_send_to_if(struct forw_packet *forw_packet,
- struct hard_iface *hard_iface)
+static void bat_iv_ogm_send_to_if(struct forw_packet *forw_packet,
+ struct hard_iface *hard_iface)
{
struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
char *fwd_str;
@@ -117,8 +117,8 @@ static void bat_ogm_send_to_if(struct forw_packet *forw_packet,
batman_ogm_packet = (struct batman_ogm_packet *)forw_packet->skb->data;
/* adjust all flags and log packets */
- while (bat_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
- batman_ogm_packet->tt_num_changes)) {
+ while (bat_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
+ batman_ogm_packet->tt_num_changes)) {
/* we might have aggregated direct link packets with an
* ordinary base packet */
@@ -137,7 +137,7 @@ static void bat_ogm_send_to_if(struct forw_packet *forw_packet,
fwd_str, (packet_num > 0 ? "aggregated " : ""),
batman_ogm_packet->orig,
ntohl(batman_ogm_packet->seqno),
- batman_ogm_packet->tq, batman_ogm_packet->ttl,
+ batman_ogm_packet->tq, batman_ogm_packet->header.ttl,
(batman_ogm_packet->flags & DIRECTLINK ?
"on" : "off"),
batman_ogm_packet->ttvn, hard_iface->net_dev->name,
@@ -157,7 +157,7 @@ static void bat_ogm_send_to_if(struct forw_packet *forw_packet,
}
/* send a batman ogm packet */
-void bat_ogm_emit(struct forw_packet *forw_packet)
+static void bat_iv_ogm_emit(struct forw_packet *forw_packet)
{
struct hard_iface *hard_iface;
struct net_device *soft_iface;
@@ -188,7 +188,7 @@ void bat_ogm_emit(struct forw_packet *forw_packet)
/* multihomed peer assumed */
/* non-primary OGMs are only broadcasted on their interface */
- if ((directlink && (batman_ogm_packet->ttl == 1)) ||
+ if ((directlink && (batman_ogm_packet->header.ttl == 1)) ||
(forw_packet->own && (forw_packet->if_incoming != primary_if))) {
/* FIXME: what about aggregated packets ? */
@@ -198,7 +198,7 @@ void bat_ogm_emit(struct forw_packet *forw_packet)
(forw_packet->own ? "Sending own" : "Forwarding"),
batman_ogm_packet->orig,
ntohl(batman_ogm_packet->seqno),
- batman_ogm_packet->ttl,
+ batman_ogm_packet->header.ttl,
forw_packet->if_incoming->net_dev->name,
forw_packet->if_incoming->net_dev->dev_addr);
@@ -216,7 +216,7 @@ void bat_ogm_emit(struct forw_packet *forw_packet)
if (hard_iface->soft_iface != soft_iface)
continue;
- bat_ogm_send_to_if(forw_packet, hard_iface);
+ bat_iv_ogm_send_to_if(forw_packet, hard_iface);
}
rcu_read_unlock();
@@ -226,13 +226,13 @@ out:
}
/* return true if new_packet can be aggregated with forw_packet */
-static bool bat_ogm_can_aggregate(const struct batman_ogm_packet
+static bool bat_iv_ogm_can_aggregate(const struct batman_ogm_packet
*new_batman_ogm_packet,
- struct bat_priv *bat_priv,
- int packet_len, unsigned long send_time,
- bool directlink,
- const struct hard_iface *if_incoming,
- const struct forw_packet *forw_packet)
+ struct bat_priv *bat_priv,
+ int packet_len, unsigned long send_time,
+ bool directlink,
+ const struct hard_iface *if_incoming,
+ const struct forw_packet *forw_packet)
{
struct batman_ogm_packet *batman_ogm_packet;
int aggregated_bytes = forw_packet->packet_len + packet_len;
@@ -272,7 +272,7 @@ static bool bat_ogm_can_aggregate(const struct batman_ogm_packet
* are flooded through the net */
if ((!directlink) &&
(!(batman_ogm_packet->flags & DIRECTLINK)) &&
- (batman_ogm_packet->ttl != 1) &&
+ (batman_ogm_packet->header.ttl != 1) &&
/* own packets originating non-primary
* interfaces leave only that interface */
@@ -285,7 +285,7 @@ static bool bat_ogm_can_aggregate(const struct batman_ogm_packet
/* if the incoming packet is sent via this one
* interface only - we still can aggregate */
if ((directlink) &&
- (new_batman_ogm_packet->ttl == 1) &&
+ (new_batman_ogm_packet->header.ttl == 1) &&
(forw_packet->if_incoming == if_incoming) &&
/* packets from direct neighbors or
@@ -306,11 +306,11 @@ out:
}
/* create a new aggregated packet and add this packet to it */
-static void bat_ogm_aggregate_new(const unsigned char *packet_buff,
- int packet_len, unsigned long send_time,
- bool direct_link,
- struct hard_iface *if_incoming,
- int own_packet)
+static void bat_iv_ogm_aggregate_new(const unsigned char *packet_buff,
+ int packet_len, unsigned long send_time,
+ bool direct_link,
+ struct hard_iface *if_incoming,
+ int own_packet)
{
struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
struct forw_packet *forw_packet_aggr;
@@ -385,9 +385,9 @@ out:
}
/* aggregate a new packet into the existing ogm packet */
-static void bat_ogm_aggregate(struct forw_packet *forw_packet_aggr,
- const unsigned char *packet_buff,
- int packet_len, bool direct_link)
+static void bat_iv_ogm_aggregate(struct forw_packet *forw_packet_aggr,
+ const unsigned char *packet_buff,
+ int packet_len, bool direct_link)
{
unsigned char *skb_buff;
@@ -402,10 +402,10 @@ static void bat_ogm_aggregate(struct forw_packet *forw_packet_aggr,
(1 << forw_packet_aggr->num_packets);
}
-static void bat_ogm_queue_add(struct bat_priv *bat_priv,
- unsigned char *packet_buff,
- int packet_len, struct hard_iface *if_incoming,
- int own_packet, unsigned long send_time)
+static void bat_iv_ogm_queue_add(struct bat_priv *bat_priv,
+ unsigned char *packet_buff,
+ int packet_len, struct hard_iface *if_incoming,
+ int own_packet, unsigned long send_time)
{
/**
* _aggr -> pointer to the packet we want to aggregate with
@@ -425,11 +425,11 @@ static void bat_ogm_queue_add(struct bat_priv *bat_priv,
if ((atomic_read(&bat_priv->aggregated_ogms)) && (!own_packet)) {
hlist_for_each_entry(forw_packet_pos, tmp_node,
&bat_priv->forw_bat_list, list) {
- if (bat_ogm_can_aggregate(batman_ogm_packet,
- bat_priv, packet_len,
- send_time, direct_link,
- if_incoming,
- forw_packet_pos)) {
+ if (bat_iv_ogm_can_aggregate(batman_ogm_packet,
+ bat_priv, packet_len,
+ send_time, direct_link,
+ if_incoming,
+ forw_packet_pos)) {
forw_packet_aggr = forw_packet_pos;
break;
}
@@ -451,27 +451,27 @@ static void bat_ogm_queue_add(struct bat_priv *bat_priv,
(atomic_read(&bat_priv->aggregated_ogms)))
send_time += msecs_to_jiffies(MAX_AGGREGATION_MS);
- bat_ogm_aggregate_new(packet_buff, packet_len,
- send_time, direct_link,
- if_incoming, own_packet);
+ bat_iv_ogm_aggregate_new(packet_buff, packet_len,
+ send_time, direct_link,
+ if_incoming, own_packet);
} else {
- bat_ogm_aggregate(forw_packet_aggr, packet_buff, packet_len,
- direct_link);
+ bat_iv_ogm_aggregate(forw_packet_aggr, packet_buff,
+ packet_len, direct_link);
spin_unlock_bh(&bat_priv->forw_bat_list_lock);
}
}
-static void bat_ogm_forward(struct orig_node *orig_node,
- const struct ethhdr *ethhdr,
- struct batman_ogm_packet *batman_ogm_packet,
- int directlink, struct hard_iface *if_incoming)
+static void bat_iv_ogm_forward(struct orig_node *orig_node,
+ const struct ethhdr *ethhdr,
+ struct batman_ogm_packet *batman_ogm_packet,
+ int directlink, struct hard_iface *if_incoming)
{
struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
struct neigh_node *router;
uint8_t in_tq, in_ttl, tq_avg = 0;
uint8_t tt_num_changes;
- if (batman_ogm_packet->ttl <= 1) {
+ if (batman_ogm_packet->header.ttl <= 1) {
bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n");
return;
}
@@ -479,10 +479,10 @@ static void bat_ogm_forward(struct orig_node *orig_node,
router = orig_node_get_router(orig_node);
in_tq = batman_ogm_packet->tq;
- in_ttl = batman_ogm_packet->ttl;
+ in_ttl = batman_ogm_packet->header.ttl;
tt_num_changes = batman_ogm_packet->tt_num_changes;
- batman_ogm_packet->ttl--;
+ batman_ogm_packet->header.ttl--;
memcpy(batman_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
/* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast
@@ -494,7 +494,8 @@ static void bat_ogm_forward(struct orig_node *orig_node,
batman_ogm_packet->tq = router->tq_avg;
if (router->last_ttl)
- batman_ogm_packet->ttl = router->last_ttl - 1;
+ batman_ogm_packet->header.ttl =
+ router->last_ttl - 1;
}
tq_avg = router->tq_avg;
@@ -510,7 +511,7 @@ static void bat_ogm_forward(struct orig_node *orig_node,
"Forwarding packet: tq_orig: %i, tq_avg: %i, "
"tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n",
in_tq, tq_avg, batman_ogm_packet->tq, in_ttl - 1,
- batman_ogm_packet->ttl);
+ batman_ogm_packet->header.ttl);
batman_ogm_packet->seqno = htonl(batman_ogm_packet->seqno);
batman_ogm_packet->tt_crc = htons(batman_ogm_packet->tt_crc);
@@ -522,12 +523,13 @@ static void bat_ogm_forward(struct orig_node *orig_node,
else
batman_ogm_packet->flags &= ~DIRECTLINK;
- bat_ogm_queue_add(bat_priv, (unsigned char *)batman_ogm_packet,
- BATMAN_OGM_LEN + tt_len(tt_num_changes),
- if_incoming, 0, bat_ogm_fwd_send_time());
+ bat_iv_ogm_queue_add(bat_priv, (unsigned char *)batman_ogm_packet,
+ BATMAN_OGM_LEN + tt_len(tt_num_changes),
+ if_incoming, 0, bat_iv_ogm_fwd_send_time());
}
-void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
+static void bat_iv_ogm_schedule(struct hard_iface *hard_iface,
+ int tt_num_changes)
{
struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
struct batman_ogm_packet *batman_ogm_packet;
@@ -564,21 +566,22 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
atomic_inc(&hard_iface->seqno);
slide_own_bcast_window(hard_iface);
- bat_ogm_queue_add(bat_priv, hard_iface->packet_buff,
- hard_iface->packet_len, hard_iface, 1,
- bat_ogm_emit_send_time(bat_priv));
+ bat_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff,
+ hard_iface->packet_len, hard_iface, 1,
+ bat_iv_ogm_emit_send_time(bat_priv));
if (primary_if)
hardif_free_ref(primary_if);
}
-static void bat_ogm_orig_update(struct bat_priv *bat_priv,
- struct orig_node *orig_node,
- const struct ethhdr *ethhdr,
- const struct batman_ogm_packet
+static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv,
+ struct orig_node *orig_node,
+ const struct ethhdr *ethhdr,
+ const struct batman_ogm_packet
*batman_ogm_packet,
- struct hard_iface *if_incoming,
- const unsigned char *tt_buff, int is_duplicate)
+ struct hard_iface *if_incoming,
+ const unsigned char *tt_buff,
+ int is_duplicate)
{
struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
struct neigh_node *router = NULL;
@@ -642,8 +645,8 @@ static void bat_ogm_orig_update(struct bat_priv *bat_priv,
spin_unlock_bh(&neigh_node->tq_lock);
if (!is_duplicate) {
- orig_node->last_ttl = batman_ogm_packet->ttl;
- neigh_node->last_ttl = batman_ogm_packet->ttl;
+ orig_node->last_ttl = batman_ogm_packet->header.ttl;
+ neigh_node->last_ttl = batman_ogm_packet->header.ttl;
}
bonding_candidate_add(orig_node, neigh_node);
@@ -683,7 +686,7 @@ update_tt:
/* I have to check for transtable changes only if the OGM has been
* sent through a primary interface */
if (((batman_ogm_packet->orig != ethhdr->h_source) &&
- (batman_ogm_packet->ttl > 2)) ||
+ (batman_ogm_packet->header.ttl > 2)) ||
(batman_ogm_packet->flags & PRIMARIES_FIRST_HOP))
tt_update_orig(bat_priv, orig_node, tt_buff,
batman_ogm_packet->tt_num_changes,
@@ -713,10 +716,10 @@ out:
neigh_node_free_ref(router);
}
-static int bat_ogm_calc_tq(struct orig_node *orig_node,
- struct orig_node *orig_neigh_node,
- struct batman_ogm_packet *batman_ogm_packet,
- struct hard_iface *if_incoming)
+static int bat_iv_ogm_calc_tq(struct orig_node *orig_node,
+ struct orig_node *orig_neigh_node,
+ struct batman_ogm_packet *batman_ogm_packet,
+ struct hard_iface *if_incoming)
{
struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
struct neigh_node *neigh_node = NULL, *tmp_neigh_node;
@@ -825,10 +828,10 @@ out:
* -1 the packet is old and has been received while the seqno window
* was protected. Caller should drop it.
*/
-static int bat_ogm_update_seqnos(const struct ethhdr *ethhdr,
- const struct batman_ogm_packet
+static int bat_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
+ const struct batman_ogm_packet
*batman_ogm_packet,
- const struct hard_iface *if_incoming)
+ const struct hard_iface *if_incoming)
{
struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
struct orig_node *orig_node;
@@ -890,10 +893,10 @@ out:
return ret;
}
-static void bat_ogm_process(const struct ethhdr *ethhdr,
- struct batman_ogm_packet *batman_ogm_packet,
- const unsigned char *tt_buff,
- struct hard_iface *if_incoming)
+static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
+ struct batman_ogm_packet *batman_ogm_packet,
+ const unsigned char *tt_buff,
+ struct hard_iface *if_incoming)
{
struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
struct hard_iface *hard_iface;
@@ -918,7 +921,7 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
* packet in an aggregation. Here we expect that the padding
* is always zero (or not 0x01)
*/
- if (batman_ogm_packet->packet_type != BAT_OGM)
+ if (batman_ogm_packet->header.packet_type != BAT_OGM)
return;
/* could be changed by schedule_own_packet() */
@@ -938,8 +941,8 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
batman_ogm_packet->prev_sender, batman_ogm_packet->seqno,
batman_ogm_packet->ttvn, batman_ogm_packet->tt_crc,
batman_ogm_packet->tt_num_changes, batman_ogm_packet->tq,
- batman_ogm_packet->ttl, batman_ogm_packet->version,
- has_directlink_flag);
+ batman_ogm_packet->header.ttl,
+ batman_ogm_packet->header.version, has_directlink_flag);
rcu_read_lock();
list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
@@ -966,10 +969,10 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
}
rcu_read_unlock();
- if (batman_ogm_packet->version != COMPAT_VERSION) {
+ if (batman_ogm_packet->header.version != COMPAT_VERSION) {
bat_dbg(DBG_BATMAN, bat_priv,
"Drop packet: incompatible batman version (%i)\n",
- batman_ogm_packet->version);
+ batman_ogm_packet->header.version);
return;
}
@@ -1031,8 +1034,8 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
if (!orig_node)
return;
- is_duplicate = bat_ogm_update_seqnos(ethhdr, batman_ogm_packet,
- if_incoming);
+ is_duplicate = bat_iv_ogm_update_seqnos(ethhdr, batman_ogm_packet,
+ if_incoming);
if (is_duplicate == -1) {
bat_dbg(DBG_BATMAN, bat_priv,
@@ -1081,8 +1084,8 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
goto out_neigh;
}
- is_bidirectional = bat_ogm_calc_tq(orig_node, orig_neigh_node,
- batman_ogm_packet, if_incoming);
+ is_bidirectional = bat_iv_ogm_calc_tq(orig_node, orig_neigh_node,
+ batman_ogm_packet, if_incoming);
bonding_save_primary(orig_node, orig_neigh_node, batman_ogm_packet);
@@ -1091,17 +1094,17 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
if (is_bidirectional &&
(!is_duplicate ||
((orig_node->last_real_seqno == batman_ogm_packet->seqno) &&
- (orig_node->last_ttl - 3 <= batman_ogm_packet->ttl))))
- bat_ogm_orig_update(bat_priv, orig_node, ethhdr,
- batman_ogm_packet, if_incoming,
- tt_buff, is_duplicate);
+ (orig_node->last_ttl - 3 <= batman_ogm_packet->header.ttl))))
+ bat_iv_ogm_orig_update(bat_priv, orig_node, ethhdr,
+ batman_ogm_packet, if_incoming,
+ tt_buff, is_duplicate);
/* is single hop (direct) neighbor */
if (is_single_hop_neigh) {
/* mark direct link on incoming interface */
- bat_ogm_forward(orig_node, ethhdr, batman_ogm_packet,
- 1, if_incoming);
+ bat_iv_ogm_forward(orig_node, ethhdr, batman_ogm_packet,
+ 1, if_incoming);
bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
"rebroadcast neighbor packet with direct link flag\n");
@@ -1123,7 +1126,8 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
bat_dbg(DBG_BATMAN, bat_priv,
"Forwarding packet: rebroadcast originator packet\n");
- bat_ogm_forward(orig_node, ethhdr, batman_ogm_packet, 0, if_incoming);
+ bat_iv_ogm_forward(orig_node, ethhdr, batman_ogm_packet,
+ 0, if_incoming);
out_neigh:
if ((orig_neigh_node) && (!is_single_hop_neigh))
@@ -1139,13 +1143,17 @@ out:
orig_node_free_ref(orig_node);
}
-void bat_ogm_receive(const struct ethhdr *ethhdr, unsigned char *packet_buff,
- int packet_len, struct hard_iface *if_incoming)
+static void bat_iv_ogm_receive(struct hard_iface *if_incoming,
+ struct sk_buff *skb)
{
struct batman_ogm_packet *batman_ogm_packet;
- int buff_pos = 0;
- unsigned char *tt_buff;
+ struct ethhdr *ethhdr;
+ int buff_pos = 0, packet_len;
+ unsigned char *tt_buff, *packet_buff;
+ packet_len = skb_headlen(skb);
+ ethhdr = (struct ethhdr *)skb_mac_header(skb);
+ packet_buff = skb->data;
batman_ogm_packet = (struct batman_ogm_packet *)packet_buff;
/* unpack the aggregated packets and process them one by one */
@@ -1157,14 +1165,29 @@ void bat_ogm_receive(const struct ethhdr *ethhdr, unsigned char *packet_buff,
tt_buff = packet_buff + buff_pos + BATMAN_OGM_LEN;
- bat_ogm_process(ethhdr, batman_ogm_packet,
- tt_buff, if_incoming);
+ bat_iv_ogm_process(ethhdr, batman_ogm_packet,
+ tt_buff, if_incoming);
buff_pos += BATMAN_OGM_LEN +
tt_len(batman_ogm_packet->tt_num_changes);
batman_ogm_packet = (struct batman_ogm_packet *)
(packet_buff + buff_pos);
- } while (bat_ogm_aggr_packet(buff_pos, packet_len,
- batman_ogm_packet->tt_num_changes));
+ } while (bat_iv_ogm_aggr_packet(buff_pos, packet_len,
+ batman_ogm_packet->tt_num_changes));
+}
+
+static struct bat_algo_ops batman_iv __read_mostly = {
+ .name = "BATMAN IV",
+ .bat_ogm_init = bat_iv_ogm_init,
+ .bat_ogm_init_primary = bat_iv_ogm_init_primary,
+ .bat_ogm_update_mac = bat_iv_ogm_update_mac,
+ .bat_ogm_schedule = bat_iv_ogm_schedule,
+ .bat_ogm_emit = bat_iv_ogm_emit,
+ .bat_ogm_receive = bat_iv_ogm_receive,
+};
+
+int __init bat_iv_init(void)
+{
+ return bat_algo_register(&batman_iv);
}
diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c
index c25492f7d665..480ae0a5ba43 100644
--- a/net/batman-adv/bat_sysfs.c
+++ b/net/batman-adv/bat_sysfs.c
@@ -272,6 +272,13 @@ static ssize_t store_vis_mode(struct kobject *kobj, struct attribute *attr,
return count;
}
+static ssize_t show_bat_algo(struct kobject *kobj, struct attribute *attr,
+ char *buff)
+{
+ struct bat_priv *bat_priv = kobj_to_batpriv(kobj);
+ return sprintf(buff, "%s\n", bat_priv->bat_algo_ops->name);
+}
+
static void post_gw_deselect(struct net_device *net_dev)
{
struct bat_priv *bat_priv = netdev_priv(net_dev);
@@ -382,6 +389,7 @@ BAT_ATTR_BOOL(bonding, S_IRUGO | S_IWUSR, NULL);
BAT_ATTR_BOOL(fragmentation, S_IRUGO | S_IWUSR, update_min_mtu);
BAT_ATTR_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL);
static BAT_ATTR(vis_mode, S_IRUGO | S_IWUSR, show_vis_mode, store_vis_mode);
+static BAT_ATTR(routing_algo, S_IRUGO, show_bat_algo, NULL);
static BAT_ATTR(gw_mode, S_IRUGO | S_IWUSR, show_gw_mode, store_gw_mode);
BAT_ATTR_UINT(orig_interval, S_IRUGO | S_IWUSR, 2 * JITTER, INT_MAX, NULL);
BAT_ATTR_UINT(hop_penalty, S_IRUGO | S_IWUSR, 0, TQ_MAX_VALUE, NULL);
@@ -399,6 +407,7 @@ static struct bat_attribute *mesh_attrs[] = {
&bat_attr_fragmentation,
&bat_attr_ap_isolation,
&bat_attr_vis_mode,
+ &bat_attr_routing_algo,
&bat_attr_gw_mode,
&bat_attr_orig_interval,
&bat_attr_hop_penalty,
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 7704df468e0b..ff5ba406b1cf 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -28,7 +28,6 @@
#include "bat_sysfs.h"
#include "originator.h"
#include "hash.h"
-#include "bat_ogm.h"
#include <linux/if_arp.h>
@@ -147,7 +146,7 @@ static void primary_if_select(struct bat_priv *bat_priv,
if (!new_hard_iface)
return;
- bat_ogm_init_primary(new_hard_iface);
+ bat_priv->bat_algo_ops->bat_ogm_init_primary(new_hard_iface);
primary_if_update_addr(bat_priv);
}
@@ -233,7 +232,7 @@ static void hardif_activate_interface(struct hard_iface *hard_iface)
bat_priv = netdev_priv(hard_iface->soft_iface);
- bat_ogm_update_mac(hard_iface);
+ bat_priv->bat_algo_ops->bat_ogm_update_mac(hard_iface);
hard_iface->if_status = IF_TO_BE_ACTIVATED;
/**
@@ -281,6 +280,14 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
if (!atomic_inc_not_zero(&hard_iface->refcount))
goto out;
+ /* hard-interface is part of a bridge */
+ if (hard_iface->net_dev->priv_flags & IFF_BRIDGE_PORT)
+ pr_err("You are about to enable batman-adv on '%s' which "
+ "already is part of a bridge. Unless you know exactly "
+ "what you are doing this is probably wrong and won't "
+ "work the way you think it would.\n",
+ hard_iface->net_dev->name);
+
soft_iface = dev_get_by_name(&init_net, iface_name);
if (!soft_iface) {
@@ -307,7 +314,7 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
hard_iface->soft_iface = soft_iface;
bat_priv = netdev_priv(hard_iface->soft_iface);
- bat_ogm_init(hard_iface);
+ bat_priv->bat_algo_ops->bat_ogm_init(hard_iface);
if (!hard_iface->packet_buff) {
bat_err(hard_iface->soft_iface, "Can't add interface packet "
@@ -527,9 +534,10 @@ static int hard_if_event(struct notifier_block *this,
goto hardif_put;
check_known_mac_addr(hard_iface->net_dev);
- bat_ogm_update_mac(hard_iface);
bat_priv = netdev_priv(hard_iface->soft_iface);
+ bat_priv->bat_algo_ops->bat_ogm_update_mac(hard_iface);
+
primary_if = primary_if_get_selected(bat_priv);
if (!primary_if)
goto hardif_put;
@@ -590,17 +598,17 @@ static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
batman_ogm_packet = (struct batman_ogm_packet *)skb->data;
- if (batman_ogm_packet->version != COMPAT_VERSION) {
+ if (batman_ogm_packet->header.version != COMPAT_VERSION) {
bat_dbg(DBG_BATMAN, bat_priv,
"Drop packet: incompatible batman version (%i)\n",
- batman_ogm_packet->version);
+ batman_ogm_packet->header.version);
goto err_free;
}
/* all receive handlers return whether they received or reused
* the supplied skb. if not, we have to free the skb. */
- switch (batman_ogm_packet->packet_type) {
+ switch (batman_ogm_packet->header.packet_type) {
/* batman originator packet */
case BAT_OGM:
ret = recv_bat_ogm_packet(skb, hard_iface);
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index d9c1e7bb7fbf..5d69e103faa1 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -191,7 +191,7 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
goto free_skb;
}
- if (icmp_packet->packet_type != BAT_ICMP) {
+ if (icmp_packet->header.packet_type != BAT_ICMP) {
bat_dbg(DBG_BATMAN, bat_priv,
"Error - can't send packet from char device: "
"got bogus packet type (expected: BAT_ICMP)\n");
@@ -209,9 +209,9 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
icmp_packet->uid = socket_client->index;
- if (icmp_packet->version != COMPAT_VERSION) {
+ if (icmp_packet->header.version != COMPAT_VERSION) {
icmp_packet->msg_type = PARAMETER_PROBLEM;
- icmp_packet->version = COMPAT_VERSION;
+ icmp_packet->header.version = COMPAT_VERSION;
bat_socket_add_packet(socket_client, icmp_packet, packet_len);
goto free_skb;
}
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index fb87bdc2ce9b..8ae497b26bd8 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -32,11 +32,14 @@
#include "gateway_client.h"
#include "vis.h"
#include "hash.h"
+#include "bat_algo.h"
/* List manipulations on hardif_list have to be rtnl_lock()'ed,
* list traversals just rcu-locked */
struct list_head hardif_list;
+char bat_routing_algo[20] = "BATMAN IV";
+static struct hlist_head bat_algo_list;
unsigned char broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
@@ -45,6 +48,9 @@ struct workqueue_struct *bat_event_workqueue;
static int __init batman_init(void)
{
INIT_LIST_HEAD(&hardif_list);
+ INIT_HLIST_HEAD(&bat_algo_list);
+
+ bat_iv_init();
/* the name should not be longer than 10 chars - see
* http://lwn.net/Articles/23634/ */
@@ -170,9 +176,110 @@ int is_my_mac(const uint8_t *addr)
}
rcu_read_unlock();
return 0;
+}
+
+static struct bat_algo_ops *bat_algo_get(char *name)
+{
+ struct bat_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp;
+ struct hlist_node *node;
+
+ hlist_for_each_entry(bat_algo_ops_tmp, node, &bat_algo_list, list) {
+ if (strcmp(bat_algo_ops_tmp->name, name) != 0)
+ continue;
+
+ bat_algo_ops = bat_algo_ops_tmp;
+ break;
+ }
+
+ return bat_algo_ops;
+}
+
+int bat_algo_register(struct bat_algo_ops *bat_algo_ops)
+{
+ struct bat_algo_ops *bat_algo_ops_tmp;
+ int ret = -1;
+
+ bat_algo_ops_tmp = bat_algo_get(bat_algo_ops->name);
+ if (bat_algo_ops_tmp) {
+ pr_info("Trying to register already registered routing "
+ "algorithm: %s\n", bat_algo_ops->name);
+ goto out;
+ }
+
+ /* all algorithms must implement all ops (for now) */
+ if (!bat_algo_ops->bat_ogm_init ||
+ !bat_algo_ops->bat_ogm_init_primary ||
+ !bat_algo_ops->bat_ogm_update_mac ||
+ !bat_algo_ops->bat_ogm_schedule ||
+ !bat_algo_ops->bat_ogm_emit ||
+ !bat_algo_ops->bat_ogm_receive) {
+ pr_info("Routing algo '%s' does not implement required ops\n",
+ bat_algo_ops->name);
+ goto out;
+ }
+
+ INIT_HLIST_NODE(&bat_algo_ops->list);
+ hlist_add_head(&bat_algo_ops->list, &bat_algo_list);
+ ret = 0;
+
+out:
+ return ret;
+}
+
+int bat_algo_select(struct bat_priv *bat_priv, char *name)
+{
+ struct bat_algo_ops *bat_algo_ops;
+ int ret = -1;
+
+ bat_algo_ops = bat_algo_get(name);
+ if (!bat_algo_ops)
+ goto out;
+
+ bat_priv->bat_algo_ops = bat_algo_ops;
+ ret = 0;
+
+out:
+ return ret;
+}
+
+int bat_algo_seq_print_text(struct seq_file *seq, void *offset)
+{
+ struct bat_algo_ops *bat_algo_ops;
+ struct hlist_node *node;
+
+ seq_printf(seq, "Available routing algorithms:\n");
+
+ hlist_for_each_entry(bat_algo_ops, node, &bat_algo_list, list) {
+ seq_printf(seq, "%s\n", bat_algo_ops->name);
+ }
+
+ return 0;
+}
+
+static int param_set_ra(const char *val, const struct kernel_param *kp)
+{
+ struct bat_algo_ops *bat_algo_ops;
+ bat_algo_ops = bat_algo_get((char *)val);
+ if (!bat_algo_ops) {
+ pr_err("Routing algorithm '%s' is not supported\n", val);
+ return -EINVAL;
+ }
+
+ return param_set_copystring(val, kp);
}
+static const struct kernel_param_ops param_ops_ra = {
+ .set = param_set_ra,
+ .get = param_get_string,
+};
+
+static struct kparam_string __param_string_ra = {
+ .maxlen = sizeof(bat_routing_algo),
+ .string = bat_routing_algo,
+};
+
+module_param_cb(routing_algo, &param_ops_ra, &__param_string_ra, 0644);
module_init(batman_init);
module_exit(batman_exit);
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 86354e06eb48..586afafb1b67 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -147,6 +147,7 @@ enum dbg_level {
#include <linux/seq_file.h>
#include "types.h"
+extern char bat_routing_algo[];
extern struct list_head hardif_list;
extern unsigned char broadcast_addr[];
@@ -157,6 +158,9 @@ void mesh_free(struct net_device *soft_iface);
void inc_module_count(void);
void dec_module_count(void);
int is_my_mac(const uint8_t *addr);
+int bat_algo_register(struct bat_algo_ops *bat_algo_ops);
+int bat_algo_select(struct bat_priv *bat_priv, char *name);
+int bat_algo_seq_print_text(struct seq_file *seq, void *offset);
#ifdef CONFIG_BATMAN_ADV_DEBUG
int debug_log(struct bat_priv *bat_priv, const char *fmt, ...) __printf(2, 3);
@@ -202,6 +206,17 @@ static inline int compare_eth(const void *data1, const void *data2)
return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
}
+/**
+ * has_timed_out - compares current time (jiffies) and timestamp + timeout
+ * @timestamp: base value to compare with (in jiffies)
+ * @timeout: added to base value before comparing (in milliseconds)
+ *
+ * Returns true if current time is after timestamp + timeout
+ */
+static inline bool has_timed_out(unsigned long timestamp, unsigned int timeout)
+{
+ return time_is_before_jiffies(timestamp + msecs_to_jiffies(timeout));
+}
#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 0bc2045a2f2e..847ff7e98a61 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -219,6 +219,7 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr)
/* extra reference for return */
atomic_set(&orig_node->refcount, 2);
+ orig_node->tt_initialised = false;
orig_node->tt_poss_change = false;
orig_node->bat_priv = bat_priv;
memcpy(orig_node->orig, addr, ETH_ALEN);
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index 4d9e54c57a36..88c717b9344d 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -90,10 +90,14 @@ enum tt_client_flags {
TT_CLIENT_PENDING = 1 << 10
};
-struct batman_ogm_packet {
+struct batman_header {
uint8_t packet_type;
uint8_t version; /* batman version field */
uint8_t ttl;
+} __packed;
+
+struct batman_ogm_packet {
+ struct batman_header header;
uint8_t flags; /* 0x40: DIRECTLINK flag, 0x20 VIS_SERVER flag... */
uint32_t seqno;
uint8_t orig[6];
@@ -108,9 +112,7 @@ struct batman_ogm_packet {
#define BATMAN_OGM_LEN sizeof(struct batman_ogm_packet)
struct icmp_packet {
- uint8_t packet_type;
- uint8_t version; /* batman version field */
- uint8_t ttl;
+ struct batman_header header;
uint8_t msg_type; /* see ICMP message types above */
uint8_t dst[6];
uint8_t orig[6];
@@ -124,9 +126,7 @@ struct icmp_packet {
/* icmp_packet_rr must start with all fields from imcp_packet
* as this is assumed by code that handles ICMP packets */
struct icmp_packet_rr {
- uint8_t packet_type;
- uint8_t version; /* batman version field */
- uint8_t ttl;
+ struct batman_header header;
uint8_t msg_type; /* see ICMP message types above */
uint8_t dst[6];
uint8_t orig[6];
@@ -137,17 +137,13 @@ struct icmp_packet_rr {
} __packed;
struct unicast_packet {
- uint8_t packet_type;
- uint8_t version; /* batman version field */
- uint8_t ttl;
+ struct batman_header header;
uint8_t ttvn; /* destination translation table version number */
uint8_t dest[6];
} __packed;
struct unicast_frag_packet {
- uint8_t packet_type;
- uint8_t version; /* batman version field */
- uint8_t ttl;
+ struct batman_header header;
uint8_t ttvn; /* destination translation table version number */
uint8_t dest[6];
uint8_t flags;
@@ -157,18 +153,14 @@ struct unicast_frag_packet {
} __packed;
struct bcast_packet {
- uint8_t packet_type;
- uint8_t version; /* batman version field */
- uint8_t ttl;
+ struct batman_header header;
uint8_t reserved;
uint32_t seqno;
uint8_t orig[6];
} __packed;
struct vis_packet {
- uint8_t packet_type;
- uint8_t version; /* batman version field */
- uint8_t ttl; /* TTL */
+ struct batman_header header;
uint8_t vis_type; /* which type of vis-participant sent this? */
uint32_t seqno; /* sequence number */
uint8_t entries; /* number of entries behind this struct */
@@ -179,9 +171,7 @@ struct vis_packet {
} __packed;
struct tt_query_packet {
- uint8_t packet_type;
- uint8_t version; /* batman version field */
- uint8_t ttl;
+ struct batman_header header;
/* the flag field is a combination of:
* - TT_REQUEST or TT_RESPONSE
* - TT_FULL_TABLE */
@@ -202,9 +192,7 @@ struct tt_query_packet {
} __packed;
struct roam_adv_packet {
- uint8_t packet_type;
- uint8_t version;
- uint8_t ttl;
+ struct batman_header header;
uint8_t reserved;
uint8_t dst[ETH_ALEN];
uint8_t src[ETH_ALEN];
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 773e606f9702..b72d7f3b3c6a 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -29,7 +29,6 @@
#include "originator.h"
#include "vis.h"
#include "unicast.h"
-#include "bat_ogm.h"
void slide_own_bcast_window(struct hard_iface *hard_iface)
{
@@ -248,6 +247,7 @@ int window_protected(struct bat_priv *bat_priv, int32_t seq_num_diff,
int recv_bat_ogm_packet(struct sk_buff *skb, struct hard_iface *hard_iface)
{
+ struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
struct ethhdr *ethhdr;
/* drop packet if it has not necessary minimum size */
@@ -272,9 +272,7 @@ int recv_bat_ogm_packet(struct sk_buff *skb, struct hard_iface *hard_iface)
if (skb_linearize(skb) < 0)
return NET_RX_DROP;
- ethhdr = (struct ethhdr *)skb_mac_header(skb);
-
- bat_ogm_receive(ethhdr, skb->data, skb_headlen(skb), hard_iface);
+ bat_priv->bat_algo_ops->bat_ogm_receive(hard_iface, skb);
kfree_skb(skb);
return NET_RX_SUCCESS;
@@ -320,7 +318,7 @@ static int recv_my_icmp_packet(struct bat_priv *bat_priv,
memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
icmp_packet->msg_type = ECHO_REPLY;
- icmp_packet->ttl = TTL;
+ icmp_packet->header.ttl = TTL;
send_skb_packet(skb, router->if_incoming, router->addr);
ret = NET_RX_SUCCESS;
@@ -376,7 +374,7 @@ static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
icmp_packet->msg_type = TTL_EXCEEDED;
- icmp_packet->ttl = TTL;
+ icmp_packet->header.ttl = TTL;
send_skb_packet(skb, router->if_incoming, router->addr);
ret = NET_RX_SUCCESS;
@@ -441,7 +439,7 @@ int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
return recv_my_icmp_packet(bat_priv, skb, hdr_size);
/* TTL exceeded */
- if (icmp_packet->ttl < 2)
+ if (icmp_packet->header.ttl < 2)
return recv_icmp_ttl_exceeded(bat_priv, skb);
/* get routing information */
@@ -460,7 +458,7 @@ int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
icmp_packet = (struct icmp_packet_rr *)skb->data;
/* decrement ttl */
- icmp_packet->ttl--;
+ icmp_packet->header.ttl--;
/* route it */
send_skb_packet(skb, router->if_incoming, router->addr);
@@ -815,7 +813,7 @@ int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
unicast_packet = (struct unicast_packet *)skb->data;
/* TTL exceeded */
- if (unicast_packet->ttl < 2) {
+ if (unicast_packet->header.ttl < 2) {
pr_debug("Warning - can't forward unicast packet from %pM to "
"%pM: ttl exceeded\n", ethhdr->h_source,
unicast_packet->dest);
@@ -840,7 +838,7 @@ int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
unicast_packet = (struct unicast_packet *)skb->data;
- if (unicast_packet->packet_type == BAT_UNICAST &&
+ if (unicast_packet->header.packet_type == BAT_UNICAST &&
atomic_read(&bat_priv->fragmentation) &&
skb->len > neigh_node->if_incoming->net_dev->mtu) {
ret = frag_send_skb(skb, bat_priv,
@@ -848,7 +846,7 @@ int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
goto out;
}
- if (unicast_packet->packet_type == BAT_UNICAST_FRAG &&
+ if (unicast_packet->header.packet_type == BAT_UNICAST_FRAG &&
frag_can_reassemble(skb, neigh_node->if_incoming->net_dev->mtu)) {
ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
@@ -867,7 +865,7 @@ int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
}
/* decrement ttl */
- unicast_packet->ttl--;
+ unicast_packet->header.ttl--;
/* route it */
send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
@@ -1041,7 +1039,7 @@ int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
if (is_my_mac(bcast_packet->orig))
goto out;
- if (bcast_packet->ttl < 2)
+ if (bcast_packet->header.ttl < 2)
goto out;
orig_node = orig_hash_find(bat_priv, bcast_packet->orig);
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 8a684eb738ad..019337e3eafb 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -28,7 +28,6 @@
#include "vis.h"
#include "gateway_common.h"
#include "originator.h"
-#include "bat_ogm.h"
static void send_outstanding_bcast_packet(struct work_struct *work);
@@ -168,7 +167,7 @@ void schedule_bat_ogm(struct hard_iface *hard_iface)
if (primary_if)
hardif_free_ref(primary_if);
- bat_ogm_schedule(hard_iface, tt_num_changes);
+ bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface, tt_num_changes);
}
static void forw_packet_free(struct forw_packet *forw_packet)
@@ -234,7 +233,7 @@ int add_bcast_packet_to_list(struct bat_priv *bat_priv,
/* as we have a copy now, it is safe to decrease the TTL */
bcast_packet = (struct bcast_packet *)newskb->data;
- bcast_packet->ttl--;
+ bcast_packet->header.ttl--;
skb_reset_mac_header(newskb);
@@ -318,7 +317,7 @@ void send_outstanding_bat_ogm_packet(struct work_struct *work)
if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
goto out;
- bat_ogm_emit(forw_packet);
+ bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
/**
* we have to have at least one packet in the queue
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 987c75a775f9..2d8bd2ad6ba3 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -457,10 +457,10 @@ static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev,
batman_ogm_packet = (struct batman_ogm_packet *)
(skb->data + ETH_HLEN);
- if (batman_ogm_packet->version != COMPAT_VERSION)
+ if (batman_ogm_packet->header.version != COMPAT_VERSION)
goto out;
- if (batman_ogm_packet->packet_type != BAT_OGM)
+ if (batman_ogm_packet->header.packet_type != BAT_OGM)
goto out;
if (!(batman_ogm_packet->flags & PRIMARIES_FIRST_HOP))
@@ -541,6 +541,7 @@ static int interface_set_mac_addr(struct net_device *dev, void *p)
}
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ dev->addr_assign_type &= ~NET_ADDR_RANDOM;
return 0;
}
@@ -632,11 +633,11 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
goto dropped;
bcast_packet = (struct bcast_packet *)skb->data;
- bcast_packet->version = COMPAT_VERSION;
- bcast_packet->ttl = TTL;
+ bcast_packet->header.version = COMPAT_VERSION;
+ bcast_packet->header.ttl = TTL;
/* batman packet type: broadcast */
- bcast_packet->packet_type = BAT_BCAST;
+ bcast_packet->header.packet_type = BAT_BCAST;
/* hw address of first interface is the orig mac because only
* this mac is known throughout the mesh */
@@ -725,8 +726,8 @@ void interface_rx(struct net_device *soft_iface,
skb_push(skb, hdr_size);
unicast_packet = (struct unicast_packet *)skb->data;
- if ((unicast_packet->packet_type != BAT_UNICAST) &&
- (unicast_packet->packet_type != BAT_UNICAST_FRAG))
+ if ((unicast_packet->header.packet_type != BAT_UNICAST) &&
+ (unicast_packet->header.packet_type != BAT_UNICAST_FRAG))
goto dropped;
skb_reset_mac_header(skb);
@@ -783,7 +784,6 @@ static const struct net_device_ops bat_netdev_ops = {
static void interface_setup(struct net_device *dev)
{
struct bat_priv *priv = netdev_priv(dev);
- char dev_addr[ETH_ALEN];
ether_setup(dev);
@@ -800,8 +800,7 @@ static void interface_setup(struct net_device *dev)
dev->hard_header_len = BAT_HEADER_LEN;
/* generate random address */
- random_ether_addr(dev_addr);
- memcpy(dev->dev_addr, dev_addr, ETH_ALEN);
+ eth_hw_addr_random(dev);
SET_ETHTOOL_OPS(dev, &bat_ethtool_ops);
@@ -855,6 +854,10 @@ struct net_device *softif_create(const char *name)
bat_priv->primary_if = NULL;
bat_priv->num_ifaces = 0;
+ ret = bat_algo_select(bat_priv, bat_routing_algo);
+ if (ret < 0)
+ goto unreg_soft_iface;
+
ret = sysfs_add_meshif(soft_iface);
if (ret < 0)
goto unreg_soft_iface;
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index ab8dea8b0b2e..a84e80409f9b 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -108,14 +108,6 @@ static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv,
}
-static bool is_out_of_time(unsigned long starting_time, unsigned long timeout)
-{
- unsigned long deadline;
- deadline = starting_time + msecs_to_jiffies(timeout);
-
- return time_after(jiffies, deadline);
-}
-
static void tt_local_entry_free_ref(struct tt_local_entry *tt_local_entry)
{
if (atomic_dec_and_test(&tt_local_entry->common.refcount))
@@ -420,8 +412,8 @@ static void tt_local_purge(struct bat_priv *bat_priv)
if (tt_local_entry->common.flags & TT_CLIENT_PENDING)
continue;
- if (!is_out_of_time(tt_local_entry->last_seen,
- TT_LOCAL_TIMEOUT * 1000))
+ if (!has_timed_out(tt_local_entry->last_seen,
+ TT_LOCAL_TIMEOUT * 1000))
continue;
tt_local_set_pending(bat_priv, tt_local_entry,
@@ -733,6 +725,7 @@ void tt_global_del_orig(struct bat_priv *bat_priv,
spin_unlock_bh(list_lock);
}
atomic_set(&orig_node->tt_size, 0);
+ orig_node->tt_initialised = false;
}
static void tt_global_roam_purge(struct bat_priv *bat_priv)
@@ -757,8 +750,8 @@ static void tt_global_roam_purge(struct bat_priv *bat_priv)
common);
if (!(tt_global_entry->common.flags & TT_CLIENT_ROAM))
continue;
- if (!is_out_of_time(tt_global_entry->roam_at,
- TT_CLIENT_ROAM_TIMEOUT * 1000))
+ if (!has_timed_out(tt_global_entry->roam_at,
+ TT_CLIENT_ROAM_TIMEOUT * 1000))
continue;
bat_dbg(DBG_TT, bat_priv, "Deleting global "
@@ -977,8 +970,8 @@ static void tt_req_purge(struct bat_priv *bat_priv)
spin_lock_bh(&bat_priv->tt_req_list_lock);
list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
- if (is_out_of_time(node->issued_at,
- TT_REQUEST_TIMEOUT * 1000)) {
+ if (has_timed_out(node->issued_at,
+ TT_REQUEST_TIMEOUT * 1000)) {
list_del(&node->list);
kfree(node);
}
@@ -996,8 +989,8 @@ static struct tt_req_node *new_tt_req_node(struct bat_priv *bat_priv,
spin_lock_bh(&bat_priv->tt_req_list_lock);
list_for_each_entry(tt_req_node_tmp, &bat_priv->tt_req_list, list) {
if (compare_eth(tt_req_node_tmp, orig_node) &&
- !is_out_of_time(tt_req_node_tmp->issued_at,
- TT_REQUEST_TIMEOUT * 1000))
+ !has_timed_out(tt_req_node_tmp->issued_at,
+ TT_REQUEST_TIMEOUT * 1000))
goto unlock;
}
@@ -1134,11 +1127,11 @@ static int send_tt_request(struct bat_priv *bat_priv,
tt_request = (struct tt_query_packet *)skb_put(skb,
sizeof(struct tt_query_packet));
- tt_request->packet_type = BAT_TT_QUERY;
- tt_request->version = COMPAT_VERSION;
+ tt_request->header.packet_type = BAT_TT_QUERY;
+ tt_request->header.version = COMPAT_VERSION;
memcpy(tt_request->src, primary_if->net_dev->dev_addr, ETH_ALEN);
memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN);
- tt_request->ttl = TTL;
+ tt_request->header.ttl = TTL;
tt_request->ttvn = ttvn;
tt_request->tt_data = tt_crc;
tt_request->flags = TT_REQUEST;
@@ -1264,9 +1257,9 @@ static bool send_other_tt_response(struct bat_priv *bat_priv,
tt_response = (struct tt_query_packet *)skb->data;
}
- tt_response->packet_type = BAT_TT_QUERY;
- tt_response->version = COMPAT_VERSION;
- tt_response->ttl = TTL;
+ tt_response->header.packet_type = BAT_TT_QUERY;
+ tt_response->header.version = COMPAT_VERSION;
+ tt_response->header.ttl = TTL;
memcpy(tt_response->src, req_dst_orig_node->orig, ETH_ALEN);
memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
tt_response->flags = TT_RESPONSE;
@@ -1381,9 +1374,9 @@ static bool send_my_tt_response(struct bat_priv *bat_priv,
tt_response = (struct tt_query_packet *)skb->data;
}
- tt_response->packet_type = BAT_TT_QUERY;
- tt_response->version = COMPAT_VERSION;
- tt_response->ttl = TTL;
+ tt_response->header.packet_type = BAT_TT_QUERY;
+ tt_response->header.version = COMPAT_VERSION;
+ tt_response->header.ttl = TTL;
memcpy(tt_response->src, primary_if->net_dev->dev_addr, ETH_ALEN);
memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
tt_response->flags = TT_RESPONSE;
@@ -1450,6 +1443,7 @@ static void _tt_update_changes(struct bat_priv *bat_priv,
*/
return;
}
+ orig_node->tt_initialised = true;
}
static void tt_fill_gtable(struct bat_priv *bat_priv,
@@ -1589,8 +1583,8 @@ static void tt_roam_purge(struct bat_priv *bat_priv)
spin_lock_bh(&bat_priv->tt_roam_list_lock);
list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
- if (!is_out_of_time(node->first_time,
- ROAMING_MAX_TIME * 1000))
+ if (!has_timed_out(node->first_time,
+ ROAMING_MAX_TIME * 1000))
continue;
list_del(&node->list);
@@ -1617,8 +1611,8 @@ static bool tt_check_roam_count(struct bat_priv *bat_priv,
if (!compare_eth(tt_roam_node->addr, client))
continue;
- if (is_out_of_time(tt_roam_node->first_time,
- ROAMING_MAX_TIME * 1000))
+ if (has_timed_out(tt_roam_node->first_time,
+ ROAMING_MAX_TIME * 1000))
continue;
if (!atomic_dec_not_zero(&tt_roam_node->counter))
@@ -1669,9 +1663,9 @@ void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
roam_adv_packet = (struct roam_adv_packet *)skb_put(skb,
sizeof(struct roam_adv_packet));
- roam_adv_packet->packet_type = BAT_ROAM_ADV;
- roam_adv_packet->version = COMPAT_VERSION;
- roam_adv_packet->ttl = TTL;
+ roam_adv_packet->header.packet_type = BAT_ROAM_ADV;
+ roam_adv_packet->header.version = COMPAT_VERSION;
+ roam_adv_packet->header.ttl = TTL;
primary_if = primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
@@ -1854,8 +1848,10 @@ void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
bool full_table = true;
- /* the ttvn increased by one -> we can apply the attached changes */
- if (ttvn - orig_ttvn == 1) {
+ /* orig table not initialised AND first diff is in the OGM OR the ttvn
+ * increased by one -> we can apply the attached changes */
+ if ((!orig_node->tt_initialised && ttvn == 1) ||
+ ttvn - orig_ttvn == 1) {
/* the OGM could not contain the changes due to their size or
* because they have already been sent TT_OGM_APPEND_MAX times.
* In this case send a tt request */
@@ -1889,7 +1885,8 @@ void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
} else {
/* if we missed more than one change or our tables are not
* in sync anymore -> request fresh tt data */
- if (ttvn != orig_ttvn || orig_node->tt_crc != tt_crc) {
+ if (!orig_node->tt_initialised || ttvn != orig_ttvn ||
+ orig_node->tt_crc != tt_crc) {
request_table:
bat_dbg(DBG_TT, bat_priv, "TT inconsistency for %pM. "
"Need to retrieve the correct information "
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index e9eb043719ac..650ce5fb1192 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -81,6 +81,7 @@ struct orig_node {
int16_t tt_buff_len;
spinlock_t tt_buff_lock; /* protects tt_buff */
atomic_t tt_size;
+ bool tt_initialised;
/* The tt_poss_change flag is used to detect an ongoing roaming phase.
* If true, then I sent a Roaming_adv to this orig_node and I have to
* inspect every packet directed to it to check whether it is still
@@ -205,6 +206,7 @@ struct bat_priv {
atomic_t gw_reselect;
struct hard_iface __rcu *primary_if; /* rcu protected pointer */
struct vis_info *my_vis_info;
+ struct bat_algo_ops *bat_algo_ops;
};
struct socket_client {
@@ -343,4 +345,23 @@ struct softif_neigh {
struct rcu_head rcu;
};
+struct bat_algo_ops {
+ struct hlist_node list;
+ char *name;
+ /* init OGM when hard-interface is enabled */
+ void (*bat_ogm_init)(struct hard_iface *hard_iface);
+ /* init primary OGM when primary interface is selected */
+ void (*bat_ogm_init_primary)(struct hard_iface *hard_iface);
+ /* init mac addresses of the OGM belonging to this hard-interface */
+ void (*bat_ogm_update_mac)(struct hard_iface *hard_iface);
+ /* prepare a new outgoing OGM for the send queue */
+ void (*bat_ogm_schedule)(struct hard_iface *hard_iface,
+ int tt_num_changes);
+ /* send scheduled OGM */
+ void (*bat_ogm_emit)(struct forw_packet *forw_packet);
+ /* receive incoming OGM */
+ void (*bat_ogm_receive)(struct hard_iface *if_incoming,
+ struct sk_buff *skb);
+};
+
#endif /* _NET_BATMAN_ADV_TYPES_H_ */
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index 07d1c1da89dd..6f3c65952f53 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -67,7 +67,7 @@ static struct sk_buff *frag_merge_packet(struct list_head *head,
memmove(skb->data + uni_diff, skb->data, hdr_len);
unicast_packet = (struct unicast_packet *) skb_pull(skb, uni_diff);
- unicast_packet->packet_type = BAT_UNICAST;
+ unicast_packet->header.packet_type = BAT_UNICAST;
return skb;
@@ -251,9 +251,9 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
memcpy(frag1, &tmp_uc, sizeof(tmp_uc));
- frag1->ttl--;
- frag1->version = COMPAT_VERSION;
- frag1->packet_type = BAT_UNICAST_FRAG;
+ frag1->header.ttl--;
+ frag1->header.version = COMPAT_VERSION;
+ frag1->header.packet_type = BAT_UNICAST_FRAG;
memcpy(frag1->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
memcpy(frag2, frag1, sizeof(*frag2));
@@ -320,11 +320,11 @@ find_router:
unicast_packet = (struct unicast_packet *)skb->data;
- unicast_packet->version = COMPAT_VERSION;
+ unicast_packet->header.version = COMPAT_VERSION;
/* batman packet type: unicast */
- unicast_packet->packet_type = BAT_UNICAST;
+ unicast_packet->header.packet_type = BAT_UNICAST;
/* set unicast ttl */
- unicast_packet->ttl = TTL;
+ unicast_packet->header.ttl = TTL;
/* copy the destination for faster routing */
memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
/* set the destination tt version number */
@@ -335,7 +335,7 @@ find_router:
data_len + sizeof(*unicast_packet) >
neigh_node->if_incoming->net_dev->mtu) {
/* send frag skb decreases ttl */
- unicast_packet->ttl++;
+ unicast_packet->header.ttl++;
ret = frag_send_skb(skb, bat_priv,
neigh_node->if_incoming, neigh_node->addr);
goto out;
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index cc3b9f2f3b5d..ac7e66100590 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -617,7 +617,7 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
packet->vis_type = atomic_read(&bat_priv->vis_mode);
memcpy(packet->target_orig, broadcast_addr, ETH_ALEN);
- packet->ttl = TTL;
+ packet->header.ttl = TTL;
packet->seqno = htonl(ntohl(packet->seqno) + 1);
packet->entries = 0;
skb_trim(info->skb_packet, sizeof(*packet));
@@ -818,19 +818,19 @@ static void send_vis_packet(struct bat_priv *bat_priv, struct vis_info *info)
goto out;
packet = (struct vis_packet *)info->skb_packet->data;
- if (packet->ttl < 2) {
+ if (packet->header.ttl < 2) {
pr_debug("Error - can't send vis packet: ttl exceeded\n");
goto out;
}
memcpy(packet->sender_orig, primary_if->net_dev->dev_addr, ETH_ALEN);
- packet->ttl--;
+ packet->header.ttl--;
if (is_broadcast_ether_addr(packet->target_orig))
broadcast_vis_packet(bat_priv, info);
else
unicast_vis_packet(bat_priv, info);
- packet->ttl++; /* restore TTL */
+ packet->header.ttl++; /* restore TTL */
out:
if (primary_if)
@@ -910,9 +910,9 @@ int vis_init(struct bat_priv *bat_priv)
INIT_LIST_HEAD(&bat_priv->my_vis_info->send_list);
kref_init(&bat_priv->my_vis_info->refcount);
bat_priv->my_vis_info->bat_priv = bat_priv;
- packet->version = COMPAT_VERSION;
- packet->packet_type = BAT_VIS;
- packet->ttl = TTL;
+ packet->header.version = COMPAT_VERSION;
+ packet->header.packet_type = BAT_VIS;
+ packet->header.ttl = TTL;
packet->seqno = 0;
packet->entries = 0;
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 71773b014e0c..ba829de84423 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -167,10 +167,11 @@ static int br_set_mac_address(struct net_device *dev, void *p)
struct sockaddr *addr = p;
if (!is_valid_ether_addr(addr->sa_data))
- return -EINVAL;
+ return -EADDRNOTAVAIL;
spin_lock_bh(&br->lock);
if (compare_ether_addr(dev->dev_addr, addr->sa_data)) {
+ dev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
br_fdb_change_mac_address(br, addr->sa_data);
br_stp_change_bridge_id(br, addr->sa_data);
@@ -334,7 +335,7 @@ void br_dev_setup(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
- random_ether_addr(dev->dev_addr);
+ eth_hw_addr_random(dev);
ether_setup(dev);
dev->netdev_ops = &br_netdev_ops;
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index 82c57069415f..aa6f716524fd 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -162,7 +162,6 @@ void caif_flow_cb(struct sk_buff *skb)
static int transmit(struct cflayer *layer, struct cfpkt *pkt)
{
int err, high = 0, qlen = 0;
- struct caif_dev_common *caifdev;
struct caif_device_entry *caifd =
container_of(layer, struct caif_device_entry, layer);
struct sk_buff *skb;
@@ -174,7 +173,6 @@ static int transmit(struct cflayer *layer, struct cfpkt *pkt)
skb->dev = caifd->netdev;
skb_reset_network_header(skb);
skb->protocol = htons(ETH_P_CAIF);
- caifdev = netdev_priv(caifd->netdev);
/* Check if we need to handle xoff */
if (likely(caifd->netdev->tx_queue_len == 0))
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index a97d97a3a512..5016fa57b623 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -43,34 +43,9 @@ enum caif_states {
#define TX_FLOW_ON_BIT 1
#define RX_FLOW_ON_BIT 2
-static struct dentry *debugfsdir;
-
-#ifdef CONFIG_DEBUG_FS
-struct debug_fs_counter {
- atomic_t caif_nr_socks;
- atomic_t caif_sock_create;
- atomic_t num_connect_req;
- atomic_t num_connect_resp;
- atomic_t num_connect_fail_resp;
- atomic_t num_disconnect;
- atomic_t num_remote_shutdown_ind;
- atomic_t num_tx_flow_off_ind;
- atomic_t num_tx_flow_on_ind;
- atomic_t num_rx_flow_off;
- atomic_t num_rx_flow_on;
-};
-static struct debug_fs_counter cnt;
-#define dbfs_atomic_inc(v) atomic_inc_return(v)
-#define dbfs_atomic_dec(v) atomic_dec_return(v)
-#else
-#define dbfs_atomic_inc(v) 0
-#define dbfs_atomic_dec(v) 0
-#endif
-
struct caifsock {
struct sock sk; /* must be first member */
struct cflayer layer;
- char name[CAIF_LAYER_NAME_SZ]; /* Used for debugging */
u32 flow_state;
struct caif_connect_request conn_req;
struct mutex readlock;
@@ -161,7 +136,6 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
atomic_read(&cf_sk->sk.sk_rmem_alloc),
sk_rcvbuf_lowwater(cf_sk));
set_rx_flow_off(cf_sk);
- dbfs_atomic_inc(&cnt.num_rx_flow_off);
caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
}
@@ -172,7 +146,6 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
set_rx_flow_off(cf_sk);
if (net_ratelimit())
pr_debug("sending flow OFF due to rmem_schedule\n");
- dbfs_atomic_inc(&cnt.num_rx_flow_off);
caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
}
skb->dev = NULL;
@@ -233,14 +206,12 @@ static void caif_ctrl_cb(struct cflayer *layr,
switch (flow) {
case CAIF_CTRLCMD_FLOW_ON_IND:
/* OK from modem to start sending again */
- dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
set_tx_flow_on(cf_sk);
cf_sk->sk.sk_state_change(&cf_sk->sk);
break;
case CAIF_CTRLCMD_FLOW_OFF_IND:
/* Modem asks us to shut up */
- dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
set_tx_flow_off(cf_sk);
cf_sk->sk.sk_state_change(&cf_sk->sk);
break;
@@ -249,7 +220,6 @@ static void caif_ctrl_cb(struct cflayer *layr,
/* We're now connected */
caif_client_register_refcnt(&cf_sk->layer,
cfsk_hold, cfsk_put);
- dbfs_atomic_inc(&cnt.num_connect_resp);
cf_sk->sk.sk_state = CAIF_CONNECTED;
set_tx_flow_on(cf_sk);
cf_sk->sk.sk_state_change(&cf_sk->sk);
@@ -263,7 +233,6 @@ static void caif_ctrl_cb(struct cflayer *layr,
case CAIF_CTRLCMD_INIT_FAIL_RSP:
/* Connect request failed */
- dbfs_atomic_inc(&cnt.num_connect_fail_resp);
cf_sk->sk.sk_err = ECONNREFUSED;
cf_sk->sk.sk_state = CAIF_DISCONNECTED;
cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
@@ -277,7 +246,6 @@ static void caif_ctrl_cb(struct cflayer *layr,
case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
/* Modem has closed this connection, or device is down. */
- dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
cf_sk->sk.sk_err = ECONNRESET;
set_rx_flow_on(cf_sk);
@@ -297,7 +265,6 @@ static void caif_check_flow_release(struct sock *sk)
return;
if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
- dbfs_atomic_inc(&cnt.num_rx_flow_on);
set_rx_flow_on(cf_sk);
caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
}
@@ -856,7 +823,6 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
/*ifindex = id of the interface.*/
cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
- dbfs_atomic_inc(&cnt.num_connect_req);
cf_sk->layer.receive = caif_sktrecv_cb;
err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
@@ -945,8 +911,6 @@ static int caif_release(struct socket *sock)
spin_unlock_bh(&sk->sk_receive_queue.lock);
sock->sk = NULL;
- dbfs_atomic_inc(&cnt.num_disconnect);
-
WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
if (cf_sk->debugfs_socket_dir != NULL)
debugfs_remove_recursive(cf_sk->debugfs_socket_dir);
@@ -1054,14 +1018,12 @@ static void caif_sock_destructor(struct sock *sk)
return;
}
sk_stream_kill_queues(&cf_sk->sk);
- dbfs_atomic_dec(&cnt.caif_nr_socks);
caif_free_client(&cf_sk->layer);
}
static int caif_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
- int num;
struct sock *sk = NULL;
struct caifsock *cf_sk = NULL;
static struct proto prot = {.name = "PF_CAIF",
@@ -1122,34 +1084,6 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
cf_sk->sk.sk_priority = CAIF_PRIO_NORMAL;
cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY;
cf_sk->conn_req.protocol = protocol;
- /* Increase the number of sockets created. */
- dbfs_atomic_inc(&cnt.caif_nr_socks);
- num = dbfs_atomic_inc(&cnt.caif_sock_create);
-#ifdef CONFIG_DEBUG_FS
- if (!IS_ERR(debugfsdir)) {
-
- /* Fill in some information concerning the misc socket. */
- snprintf(cf_sk->name, sizeof(cf_sk->name), "cfsk%d", num);
-
- cf_sk->debugfs_socket_dir =
- debugfs_create_dir(cf_sk->name, debugfsdir);
-
- debugfs_create_u32("sk_state", S_IRUSR | S_IWUSR,
- cf_sk->debugfs_socket_dir,
- (u32 *) &cf_sk->sk.sk_state);
- debugfs_create_u32("flow_state", S_IRUSR | S_IWUSR,
- cf_sk->debugfs_socket_dir, &cf_sk->flow_state);
- debugfs_create_u32("sk_rmem_alloc", S_IRUSR | S_IWUSR,
- cf_sk->debugfs_socket_dir,
- (u32 *) &cf_sk->sk.sk_rmem_alloc);
- debugfs_create_u32("sk_wmem_alloc", S_IRUSR | S_IWUSR,
- cf_sk->debugfs_socket_dir,
- (u32 *) &cf_sk->sk.sk_wmem_alloc);
- debugfs_create_u32("identity", S_IRUSR | S_IWUSR,
- cf_sk->debugfs_socket_dir,
- (u32 *) &cf_sk->layer.id);
- }
-#endif
release_sock(&cf_sk->sk);
return 0;
}
@@ -1161,7 +1095,7 @@ static struct net_proto_family caif_family_ops = {
.owner = THIS_MODULE,
};
-static int af_caif_init(void)
+static int __init caif_sktinit_module(void)
{
int err = sock_register(&caif_family_ops);
if (!err)
@@ -1169,54 +1103,9 @@ static int af_caif_init(void)
return 0;
}
-static int __init caif_sktinit_module(void)
-{
-#ifdef CONFIG_DEBUG_FS
- debugfsdir = debugfs_create_dir("caif_sk", NULL);
- if (!IS_ERR(debugfsdir)) {
- debugfs_create_u32("num_sockets", S_IRUSR | S_IWUSR,
- debugfsdir,
- (u32 *) &cnt.caif_nr_socks);
- debugfs_create_u32("num_create", S_IRUSR | S_IWUSR,
- debugfsdir,
- (u32 *) &cnt.caif_sock_create);
- debugfs_create_u32("num_connect_req", S_IRUSR | S_IWUSR,
- debugfsdir,
- (u32 *) &cnt.num_connect_req);
- debugfs_create_u32("num_connect_resp", S_IRUSR | S_IWUSR,
- debugfsdir,
- (u32 *) &cnt.num_connect_resp);
- debugfs_create_u32("num_connect_fail_resp", S_IRUSR | S_IWUSR,
- debugfsdir,
- (u32 *) &cnt.num_connect_fail_resp);
- debugfs_create_u32("num_disconnect", S_IRUSR | S_IWUSR,
- debugfsdir,
- (u32 *) &cnt.num_disconnect);
- debugfs_create_u32("num_remote_shutdown_ind",
- S_IRUSR | S_IWUSR, debugfsdir,
- (u32 *) &cnt.num_remote_shutdown_ind);
- debugfs_create_u32("num_tx_flow_off_ind", S_IRUSR | S_IWUSR,
- debugfsdir,
- (u32 *) &cnt.num_tx_flow_off_ind);
- debugfs_create_u32("num_tx_flow_on_ind", S_IRUSR | S_IWUSR,
- debugfsdir,
- (u32 *) &cnt.num_tx_flow_on_ind);
- debugfs_create_u32("num_rx_flow_off", S_IRUSR | S_IWUSR,
- debugfsdir,
- (u32 *) &cnt.num_rx_flow_off);
- debugfs_create_u32("num_rx_flow_on", S_IRUSR | S_IWUSR,
- debugfsdir,
- (u32 *) &cnt.num_rx_flow_on);
- }
-#endif
- return af_caif_init();
-}
-
static void __exit caif_sktexit_module(void)
{
sock_unregister(PF_CAIF);
- if (debugfsdir != NULL)
- debugfs_remove_recursive(debugfsdir);
}
module_init(caif_sktinit_module);
module_exit(caif_sktexit_module);
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 865690948bbc..e866234d0e5a 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -72,14 +72,12 @@ static void robust_list_del(struct list_head *delete_node)
static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
{
struct sk_buff *skb;
- struct chnl_net *priv = container_of(layr, struct chnl_net, chnl);
+ struct chnl_net *priv;
int pktlen;
- int err = 0;
const u8 *ip_version;
u8 buf;
priv = container_of(layr, struct chnl_net, chnl);
-
if (!priv)
return -EINVAL;
@@ -95,8 +93,7 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
/* check the version of IP */
ip_version = skb_header_pointer(skb, 0, 1, &buf);
- if (!ip_version)
- return -EINVAL;
+
switch (*ip_version >> 4) {
case 4:
skb->protocol = htons(ETH_P_IP);
@@ -105,6 +102,7 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
skb->protocol = htons(ETH_P_IPV6);
break;
default:
+ priv->netdev->stats.rx_errors++;
return -EINVAL;
}
@@ -123,7 +121,7 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
priv->netdev->stats.rx_packets++;
priv->netdev->stats.rx_bytes += pktlen;
- return err;
+ return 0;
}
static int delete_device(struct chnl_net *dev)
@@ -221,11 +219,13 @@ static int chnl_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (skb->len > priv->netdev->mtu) {
pr_warn("Size of skb exceeded MTU\n");
+ dev->stats.tx_errors++;
return -ENOSPC;
}
if (!priv->flowenabled) {
pr_debug("dropping packets flow off\n");
+ dev->stats.tx_dropped++;
return NETDEV_TX_BUSY;
}
@@ -240,8 +240,7 @@ static int chnl_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Send the packet down the stack. */
result = priv->chnl.dn->transmit(priv->chnl.dn, pkt);
if (result) {
- if (result == -EAGAIN)
- result = NETDEV_TX_BUSY;
+ dev->stats.tx_dropped++;
return result;
}
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 68bbf9f65cb0..d3cf12f62c8f 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -132,6 +132,8 @@ out_noerr:
* __skb_recv_datagram - Receive a datagram skbuff
* @sk: socket
* @flags: MSG_ flags
+ * @off: an offset in bytes to peek skb from. Returns an offset
+ * within an skb where data actually starts
* @peeked: returns non-zero if this packet has been seen before
* @err: error code returned
*
@@ -158,7 +160,7 @@ out_noerr:
* the standard around please.
*/
struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
- int *peeked, int *err)
+ int *peeked, int *off, int *err)
{
struct sk_buff *skb;
long timeo;
@@ -180,21 +182,25 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
* However, this function was correct in any case. 8)
*/
unsigned long cpu_flags;
+ struct sk_buff_head *queue = &sk->sk_receive_queue;
- spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
- skb = skb_peek(&sk->sk_receive_queue);
- if (skb) {
+ spin_lock_irqsave(&queue->lock, cpu_flags);
+ skb_queue_walk(queue, skb) {
*peeked = skb->peeked;
if (flags & MSG_PEEK) {
+ if (*off >= skb->len) {
+ *off -= skb->len;
+ continue;
+ }
skb->peeked = 1;
atomic_inc(&skb->users);
} else
- __skb_unlink(skb, &sk->sk_receive_queue);
- }
- spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
+ __skb_unlink(skb, queue);
- if (skb)
+ spin_unlock_irqrestore(&queue->lock, cpu_flags);
return skb;
+ }
+ spin_unlock_irqrestore(&queue->lock, cpu_flags);
/* User doesn't want to wait */
error = -EAGAIN;
@@ -214,10 +220,10 @@ EXPORT_SYMBOL(__skb_recv_datagram);
struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
int noblock, int *err)
{
- int peeked;
+ int peeked, off = 0;
return __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
- &peeked, err);
+ &peeked, &off, err);
}
EXPORT_SYMBOL(skb_recv_datagram);
diff --git a/net/core/dev.c b/net/core/dev.c
index 6ca32f6b3105..763a0eda7158 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -446,7 +446,7 @@ void __dev_remove_pack(struct packet_type *pt)
}
}
- printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
+ pr_warn("dev_remove_pack: %p not found\n", pt);
out:
spin_unlock(&ptype_lock);
}
@@ -1039,8 +1039,7 @@ rollback:
memcpy(dev->name, oldname, IFNAMSIZ);
goto rollback;
} else {
- printk(KERN_ERR
- "%s: name change rollback failed: %d.\n",
+ pr_err("%s: name change rollback failed: %d\n",
dev->name, ret);
}
}
@@ -1139,9 +1138,8 @@ void dev_load(struct net *net, const char *name)
no_module = request_module("netdev-%s", name);
if (no_module && capable(CAP_SYS_MODULE)) {
if (!request_module("%s", name))
- pr_err("Loading kernel module for a network device "
-"with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
-"instead\n", name);
+ pr_err("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
+ name);
}
}
EXPORT_SYMBOL(dev_load);
@@ -1655,10 +1653,9 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
if (skb_network_header(skb2) < skb2->data ||
skb2->network_header > skb2->tail) {
if (net_ratelimit())
- printk(KERN_CRIT "protocol %04x is "
- "buggy, dev %s\n",
- ntohs(skb2->protocol),
- dev->name);
+ pr_crit("protocol %04x is buggy, dev %s\n",
+ ntohs(skb2->protocol),
+ dev->name);
skb_reset_network_header(skb2);
}
@@ -1691,9 +1688,7 @@ static void netif_setup_tc(struct net_device *dev, unsigned int txq)
/* If TC0 is invalidated disable TC mapping */
if (tc->offset + tc->count > txq) {
- pr_warning("Number of in use tx queues changed "
- "invalidating tc mappings. Priority "
- "traffic classification disabled!\n");
+ pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
dev->num_tc = 0;
return;
}
@@ -1704,11 +1699,8 @@ static void netif_setup_tc(struct net_device *dev, unsigned int txq)
tc = &dev->tc_to_txq[q];
if (tc->offset + tc->count > txq) {
- pr_warning("Number of in use tx queues "
- "changed. Priority %i to tc "
- "mapping %i is no longer valid "
- "setting map to 0\n",
- i, q);
+ pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
+ i, q);
netdev_set_prio_tc_map(dev, i, 0);
}
}
@@ -2014,8 +2006,7 @@ EXPORT_SYMBOL(skb_gso_segment);
void netdev_rx_csum_fault(struct net_device *dev)
{
if (net_ratelimit()) {
- printk(KERN_ERR "%s: hw csum failure.\n",
- dev ? dev->name : "<unknown>");
+ pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
dump_stack();
}
}
@@ -2332,9 +2323,9 @@ static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
{
if (unlikely(queue_index >= dev->real_num_tx_queues)) {
if (net_ratelimit()) {
- pr_warning("%s selects TX queue %d, but "
- "real number of TX queues is %d\n",
- dev->name, queue_index, dev->real_num_tx_queues);
+ pr_warn("%s selects TX queue %d, but real number of TX queues is %d\n",
+ dev->name, queue_index,
+ dev->real_num_tx_queues);
}
return 0;
}
@@ -2578,16 +2569,16 @@ int dev_queue_xmit(struct sk_buff *skb)
}
HARD_TX_UNLOCK(dev, txq);
if (net_ratelimit())
- printk(KERN_CRIT "Virtual device %s asks to "
- "queue packet!\n", dev->name);
+ pr_crit("Virtual device %s asks to queue packet!\n",
+ dev->name);
} else {
/* Recursion is detected! It is possible,
* unfortunately
*/
recursion_alert:
if (net_ratelimit())
- printk(KERN_CRIT "Dead loop on virtual device "
- "%s, fix it urgently!\n", dev->name);
+ pr_crit("Dead loop on virtual device %s, fix it urgently!\n",
+ dev->name);
}
}
@@ -3069,8 +3060,8 @@ static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
if (unlikely(MAX_RED_LOOP < ttl++)) {
if (net_ratelimit())
- pr_warning( "Redir loop detected Dropping packet (%d->%d)\n",
- skb->skb_iif, dev->ifindex);
+ pr_warn("Redir loop detected Dropping packet (%d->%d)\n",
+ skb->skb_iif, dev->ifindex);
return TC_ACT_SHOT;
}
@@ -4497,16 +4488,15 @@ static int __dev_set_promiscuity(struct net_device *dev, int inc)
dev->flags &= ~IFF_PROMISC;
else {
dev->promiscuity -= inc;
- printk(KERN_WARNING "%s: promiscuity touches roof, "
- "set promiscuity failed, promiscuity feature "
- "of device might be broken.\n", dev->name);
+ pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
+ dev->name);
return -EOVERFLOW;
}
}
if (dev->flags != old_flags) {
- printk(KERN_INFO "device %s %s promiscuous mode\n",
- dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
- "left");
+ pr_info("device %s %s promiscuous mode\n",
+ dev->name,
+ dev->flags & IFF_PROMISC ? "entered" : "left");
if (audit_enabled) {
current_uid_gid(&uid, &gid);
audit_log(current->audit_context, GFP_ATOMIC,
@@ -4579,9 +4569,8 @@ int dev_set_allmulti(struct net_device *dev, int inc)
dev->flags &= ~IFF_ALLMULTI;
else {
dev->allmulti -= inc;
- printk(KERN_WARNING "%s: allmulti touches roof, "
- "set allmulti failed, allmulti feature of "
- "device might be broken.\n", dev->name);
+ pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
+ dev->name);
return -EOVERFLOW;
}
}
@@ -5238,8 +5227,8 @@ static void rollback_registered_many(struct list_head *head)
* devices and proceed with the remaining.
*/
if (dev->reg_state == NETREG_UNINITIALIZED) {
- pr_debug("unregister_netdevice: device %s/%p never "
- "was registered\n", dev->name, dev);
+ pr_debug("unregister_netdevice: device %s/%p never was registered\n",
+ dev->name, dev);
WARN_ON(1);
list_del(&dev->unreg_list);
@@ -5471,7 +5460,7 @@ static int netif_alloc_rx_queues(struct net_device *dev)
rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
if (!rx) {
- pr_err("netdev: Unable to allocate %u rx queues.\n", count);
+ pr_err("netdev: Unable to allocate %u rx queues\n", count);
return -ENOMEM;
}
dev->_rx = rx;
@@ -5505,8 +5494,7 @@ static int netif_alloc_netdev_queues(struct net_device *dev)
tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
if (!tx) {
- pr_err("netdev: Unable to allocate %u tx queues.\n",
- count);
+ pr_err("netdev: Unable to allocate %u tx queues\n", count);
return -ENOMEM;
}
dev->_tx = tx;
@@ -5765,10 +5753,8 @@ static void netdev_wait_allrefs(struct net_device *dev)
refcnt = netdev_refcnt_read(dev);
if (time_after(jiffies, warning_time + 10 * HZ)) {
- printk(KERN_EMERG "unregister_netdevice: "
- "waiting for %s to become free. Usage "
- "count = %d\n",
- dev->name, refcnt);
+ pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
+ dev->name, refcnt);
warning_time = jiffies;
}
}
@@ -5819,7 +5805,7 @@ void netdev_run_todo(void)
list_del(&dev->todo_list);
if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
- printk(KERN_ERR "network todo '%s' but state %d\n",
+ pr_err("network todo '%s' but state %d\n",
dev->name, dev->reg_state);
dump_stack();
continue;
@@ -5935,15 +5921,13 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
BUG_ON(strlen(name) >= sizeof(dev->name));
if (txqs < 1) {
- pr_err("alloc_netdev: Unable to allocate device "
- "with zero queues.\n");
+ pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
return NULL;
}
#ifdef CONFIG_RPS
if (rxqs < 1) {
- pr_err("alloc_netdev: Unable to allocate device "
- "with zero RX queues.\n");
+ pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
return NULL;
}
#endif
@@ -5959,7 +5943,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
p = kzalloc(alloc_size, GFP_KERNEL);
if (!p) {
- printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
+ pr_err("alloc_netdev: Unable to allocate device\n");
return NULL;
}
@@ -6492,8 +6476,8 @@ static void __net_exit default_device_exit(struct net *net)
snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
err = dev_change_net_namespace(dev, &init_net, fb_name);
if (err) {
- printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
- __func__, dev->name, err);
+ pr_emerg("%s: failed to move %s to init_net: %d\n",
+ __func__, dev->name, err);
BUG();
}
}
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 3f79db1b612a..6d6d7d25caaa 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -73,6 +73,8 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
[NETIF_F_RXCSUM_BIT] = "rx-checksum",
[NETIF_F_NOCACHE_COPY_BIT] = "tx-nocache-copy",
[NETIF_F_LOOPBACK_BIT] = "loopback",
+ [NETIF_F_RXFCS_BIT] = "rx-fcs",
+ [NETIF_F_RXALL_BIT] = "rx-all",
};
static int ethtool_get_features(struct net_device *dev, void __user *useraddr)
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 2a83914b0277..0a68045782d1 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2167,6 +2167,35 @@ nla_put_failure:
return -EMSGSIZE;
}
+static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
+ u32 pid, u32 seq, int type, unsigned int flags,
+ struct neigh_table *tbl)
+{
+ struct nlmsghdr *nlh;
+ struct ndmsg *ndm;
+
+ nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
+ if (nlh == NULL)
+ return -EMSGSIZE;
+
+ ndm = nlmsg_data(nlh);
+ ndm->ndm_family = tbl->family;
+ ndm->ndm_pad1 = 0;
+ ndm->ndm_pad2 = 0;
+ ndm->ndm_flags = pn->flags | NTF_PROXY;
+ ndm->ndm_type = NDA_DST;
+ ndm->ndm_ifindex = pn->dev->ifindex;
+ ndm->ndm_state = NUD_NONE;
+
+ NLA_PUT(skb, NDA_DST, tbl->key_len, pn->key);
+
+ return nlmsg_end(skb, nlh);
+
+nla_put_failure:
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
+}
+
static void neigh_update_notify(struct neighbour *neigh)
{
call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
@@ -2216,23 +2245,78 @@ out:
return rc;
}
+static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct pneigh_entry *n;
+ struct net *net = sock_net(skb->sk);
+ int rc, h, s_h = cb->args[3];
+ int idx, s_idx = idx = cb->args[4];
+
+ read_lock_bh(&tbl->lock);
+
+ for (h = 0; h <= PNEIGH_HASHMASK; h++) {
+ if (h < s_h)
+ continue;
+ if (h > s_h)
+ s_idx = 0;
+ for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
+ if (dev_net(n->dev) != net)
+ continue;
+ if (idx < s_idx)
+ goto next;
+ if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq,
+ RTM_NEWNEIGH,
+ NLM_F_MULTI, tbl) <= 0) {
+ read_unlock_bh(&tbl->lock);
+ rc = -1;
+ goto out;
+ }
+ next:
+ idx++;
+ }
+ }
+
+ read_unlock_bh(&tbl->lock);
+ rc = skb->len;
+out:
+ cb->args[3] = h;
+ cb->args[4] = idx;
+ return rc;
+
+}
+
static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
{
struct neigh_table *tbl;
int t, family, s_t;
+ int proxy = 0;
+ int err = 0;
read_lock(&neigh_tbl_lock);
family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
+
+ /* check for full ndmsg structure presence, family member is
+ * the same for both structures
+ */
+ if (nlmsg_len(cb->nlh) >= sizeof(struct ndmsg) &&
+ ((struct ndmsg *) nlmsg_data(cb->nlh))->ndm_flags == NTF_PROXY)
+ proxy = 1;
+
s_t = cb->args[0];
- for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
+ for (tbl = neigh_tables, t = 0; tbl && (err >= 0);
+ tbl = tbl->next, t++) {
if (t < s_t || (family && tbl->family != family))
continue;
if (t > s_t)
memset(&cb->args[1], 0, sizeof(cb->args) -
sizeof(cb->args[0]));
- if (neigh_dump_table(tbl, skb, cb) < 0)
- break;
+ if (proxy)
+ err = pneigh_dump_table(tbl, skb, cb);
+ else
+ err = neigh_dump_table(tbl, skb, cb);
}
read_unlock(&neigh_tbl_lock);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index ddefc513b44a..3d84fb9d8873 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -9,6 +9,8 @@
* Copyright (C) 2002 Red Hat, Inc.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/moduleparam.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -45,9 +47,11 @@ static atomic_t trapped;
#define NETPOLL_RX_ENABLED 1
#define NETPOLL_RX_DROP 2
-#define MAX_SKB_SIZE \
- (MAX_UDP_CHUNK + sizeof(struct udphdr) + \
- sizeof(struct iphdr) + sizeof(struct ethhdr))
+#define MAX_SKB_SIZE \
+ (sizeof(struct ethhdr) + \
+ sizeof(struct iphdr) + \
+ sizeof(struct udphdr) + \
+ MAX_UDP_CHUNK)
static void zap_completion_queue(void);
static void arp_reply(struct sk_buff *skb);
@@ -55,6 +59,13 @@ static void arp_reply(struct sk_buff *skb);
static unsigned int carrier_timeout = 4;
module_param(carrier_timeout, uint, 0644);
+#define np_info(np, fmt, ...) \
+ pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
+#define np_err(np, fmt, ...) \
+ pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
+#define np_notice(np, fmt, ...) \
+ pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
+
static void queue_process(struct work_struct *work)
{
struct netpoll_info *npinfo =
@@ -627,18 +638,12 @@ out:
void netpoll_print_options(struct netpoll *np)
{
- printk(KERN_INFO "%s: local port %d\n",
- np->name, np->local_port);
- printk(KERN_INFO "%s: local IP %pI4\n",
- np->name, &np->local_ip);
- printk(KERN_INFO "%s: interface '%s'\n",
- np->name, np->dev_name);
- printk(KERN_INFO "%s: remote port %d\n",
- np->name, np->remote_port);
- printk(KERN_INFO "%s: remote IP %pI4\n",
- np->name, &np->remote_ip);
- printk(KERN_INFO "%s: remote ethernet address %pM\n",
- np->name, np->remote_mac);
+ np_info(np, "local port %d\n", np->local_port);
+ np_info(np, "local IP %pI4\n", &np->local_ip);
+ np_info(np, "interface '%s'\n", np->dev_name);
+ np_info(np, "remote port %d\n", np->remote_port);
+ np_info(np, "remote IP %pI4\n", &np->remote_ip);
+ np_info(np, "remote ethernet address %pM\n", np->remote_mac);
}
EXPORT_SYMBOL(netpoll_print_options);
@@ -680,8 +685,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
goto parse_failed;
*delim = 0;
if (*cur == ' ' || *cur == '\t')
- printk(KERN_INFO "%s: warning: whitespace"
- "is not allowed\n", np->name);
+ np_info(np, "warning: whitespace is not allowed\n");
np->remote_port = simple_strtol(cur, NULL, 10);
cur = delim;
}
@@ -705,8 +709,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
return 0;
parse_failed:
- printk(KERN_INFO "%s: couldn't parse config at '%s'!\n",
- np->name, cur);
+ np_info(np, "couldn't parse config at '%s'!\n", cur);
return -1;
}
EXPORT_SYMBOL(netpoll_parse_options);
@@ -721,8 +724,8 @@ int __netpoll_setup(struct netpoll *np)
if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
!ndev->netdev_ops->ndo_poll_controller) {
- printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
- np->name, np->dev_name);
+ np_err(np, "%s doesn't support polling, aborting\n",
+ np->dev_name);
err = -ENOTSUPP;
goto out;
}
@@ -785,14 +788,12 @@ int netpoll_setup(struct netpoll *np)
if (np->dev_name)
ndev = dev_get_by_name(&init_net, np->dev_name);
if (!ndev) {
- printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
- np->name, np->dev_name);
+ np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
return -ENODEV;
}
if (ndev->master) {
- printk(KERN_ERR "%s: %s is a slave device, aborting.\n",
- np->name, np->dev_name);
+ np_err(np, "%s is a slave device, aborting\n", np->dev_name);
err = -EBUSY;
goto put;
}
@@ -800,16 +801,14 @@ int netpoll_setup(struct netpoll *np)
if (!netif_running(ndev)) {
unsigned long atmost, atleast;
- printk(KERN_INFO "%s: device %s not up yet, forcing it\n",
- np->name, np->dev_name);
+ np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
rtnl_lock();
err = dev_open(ndev);
rtnl_unlock();
if (err) {
- printk(KERN_ERR "%s: failed to open %s\n",
- np->name, ndev->name);
+ np_err(np, "failed to open %s\n", ndev->name);
goto put;
}
@@ -817,9 +816,7 @@ int netpoll_setup(struct netpoll *np)
atmost = jiffies + carrier_timeout * HZ;
while (!netif_carrier_ok(ndev)) {
if (time_after(jiffies, atmost)) {
- printk(KERN_NOTICE
- "%s: timeout waiting for carrier\n",
- np->name);
+ np_notice(np, "timeout waiting for carrier\n");
break;
}
msleep(1);
@@ -831,9 +828,7 @@ int netpoll_setup(struct netpoll *np)
*/
if (time_before(jiffies, atleast)) {
- printk(KERN_NOTICE "%s: carrier detect appears"
- " untrustworthy, waiting 4 seconds\n",
- np->name);
+ np_notice(np, "carrier detect appears untrustworthy, waiting 4 seconds\n");
msleep(4000);
}
}
@@ -844,15 +839,15 @@ int netpoll_setup(struct netpoll *np)
if (!in_dev || !in_dev->ifa_list) {
rcu_read_unlock();
- printk(KERN_ERR "%s: no IP address for %s, aborting\n",
- np->name, np->dev_name);
+ np_err(np, "no IP address for %s, aborting\n",
+ np->dev_name);
err = -EDESTADDRREQ;
goto put;
}
np->local_ip = in_dev->ifa_list->ifa_local;
rcu_read_unlock();
- printk(KERN_INFO "%s: local IP %pI4\n", np->name, &np->local_ip);
+ np_info(np, "local IP %pI4\n", &np->local_ip);
}
np->dev = ndev;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 606a6e8f3671..5cf39cd7da85 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2017,8 +2017,13 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
__rtnl_unlock();
rtnl = net->rtnl;
- err = netlink_dump_start(rtnl, skb, nlh, dumpit,
- NULL, min_dump_alloc);
+ {
+ struct netlink_dump_control c = {
+ .dump = dumpit,
+ .min_dump_alloc = min_dump_alloc,
+ };
+ err = netlink_dump_start(rtnl, skb, nlh, &c);
+ }
rtnl_lock();
return err;
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index da0c97f2fab4..6eb656acdfe5 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -592,6 +592,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
new->rxhash = old->rxhash;
new->ooo_okay = old->ooo_okay;
new->l4_rxhash = old->l4_rxhash;
+ new->no_fcs = old->no_fcs;
#ifdef CONFIG_XFRM
new->sp = secpath_get(old->sp);
#endif
@@ -2906,7 +2907,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
nskb->prev = p;
nskb->data_len += p->len;
- nskb->truesize += p->len;
+ nskb->truesize += p->truesize;
nskb->len += p->len;
*head = nskb;
@@ -2916,6 +2917,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
p = nskb;
merge:
+ p->truesize += skb->truesize - len;
if (offset > headlen) {
unsigned int eat = offset - headlen;
diff --git a/net/core/sock.c b/net/core/sock.c
index 02f8dfe320b7..216719cb5c7f 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -793,6 +793,17 @@ set_rcvbuf:
sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
break;
+ case SO_PEEK_OFF:
+ if (sock->ops->set_peek_off)
+ sock->ops->set_peek_off(sk, val);
+ else
+ ret = -EOPNOTSUPP;
+ break;
+
+ case SO_NOFCS:
+ sock_valbool_flag(sk, SOCK_NOFCS, valbool);
+ break;
+
default:
ret = -ENOPROTOOPT;
break;
@@ -1018,6 +1029,15 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
v.val = !!sock_flag(sk, SOCK_WIFI_STATUS);
break;
+ case SO_PEEK_OFF:
+ if (!sock->ops->set_peek_off)
+ return -EOPNOTSUPP;
+
+ v.val = sk->sk_peek_off;
+ break;
+ case SO_NOFCS:
+ v.val = !!sock_flag(sk, SOCK_NOFCS);
+ break;
default:
return -ENOPROTOOPT;
}
@@ -2092,6 +2112,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
sk->sk_sndmsg_page = NULL;
sk->sk_sndmsg_off = 0;
+ sk->sk_peek_off = -1;
sk->sk_peer_pid = NULL;
sk->sk_peer_cred = NULL;
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index befe426491ba..ee7013f24fca 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -205,17 +205,23 @@ static int dn_neigh_output_packet(struct sk_buff *skb)
struct neighbour *neigh = dst_get_neighbour_noref(dst);
struct net_device *dev = neigh->dev;
char mac_addr[ETH_ALEN];
+ unsigned int seq;
+ int err;
dn_dn2eth(mac_addr, rt->rt_local_src);
- if (dev_hard_header(skb, dev, ntohs(skb->protocol), neigh->ha,
- mac_addr, skb->len) >= 0)
- return dev_queue_xmit(skb);
-
- if (net_ratelimit())
- printk(KERN_DEBUG "dn_neigh_output_packet: oops, can't send packet\n");
-
- kfree_skb(skb);
- return -EINVAL;
+ do {
+ seq = read_seqbegin(&neigh->ha_lock);
+ err = dev_hard_header(skb, dev, ntohs(skb->protocol),
+ neigh->ha, mac_addr, skb->len);
+ } while (read_seqretry(&neigh->ha_lock, seq));
+
+ if (err >= 0)
+ err = dev_queue_xmit(skb);
+ else {
+ kfree_skb(skb);
+ err = -EINVAL;
+ }
+ return err;
}
static int dn_long_output(struct neighbour *neigh, struct sk_buff *skb)
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index f31ce72dca65..80a3de4906d3 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -724,11 +724,10 @@ static int dn_output(struct sk_buff *skb)
struct dn_route *rt = (struct dn_route *)dst;
struct net_device *dev = dst->dev;
struct dn_skb_cb *cb = DN_SKB_CB(skb);
- struct neighbour *neigh;
int err = -EINVAL;
- if ((neigh = dst_get_neighbour_noref(dst)) == NULL)
+ if (dst_get_neighbour_noref(dst) == NULL)
goto error;
skb->dev = dev;
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index a2468363978e..a93af86b8474 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -288,6 +288,8 @@ int eth_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ /* if device marked as NET_ADDR_RANDOM, reset it */
+ dev->addr_assign_type &= ~NET_ADDR_RANDOM;
return 0;
}
EXPORT_SYMBOL(eth_mac_addr);
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index e4ecc1eef98c..368515885368 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -55,6 +55,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
#include <net/af_ieee802154.h>
#include <net/ieee802154.h>
#include <net/ieee802154_netdev.h>
@@ -924,19 +925,6 @@ drop:
return -EINVAL;
}
-static int lowpan_set_address(struct net_device *dev, void *p)
-{
- struct sockaddr *sa = p;
-
- if (netif_running(dev))
- return -EBUSY;
-
- /* TODO: validate addr */
- memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
-
- return 0;
-}
-
static int lowpan_get_mac_header_length(struct sk_buff *skb)
{
/*
@@ -1062,7 +1050,7 @@ static struct header_ops lowpan_header_ops = {
static const struct net_device_ops lowpan_netdev_ops = {
.ndo_start_xmit = lowpan_xmit,
- .ndo_set_mac_address = lowpan_set_address,
+ .ndo_set_mac_address = eth_mac_addr,
};
static void lowpan_setup(struct net_device *dev)
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index f7b5670744f0..e588a34e85c2 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -381,6 +381,7 @@ lookup_protocol:
inet->mc_all = 1;
inet->mc_index = 0;
inet->mc_list = NULL;
+ inet->rcv_tos = 0;
sk_refcnt_debug_inc(sk);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index fcf281819cd4..8d25a1c557eb 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -960,9 +960,12 @@ static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
inet_diag_bc_audit(nla_data(attr), nla_len(attr)))
return -EINVAL;
}
-
- return netlink_dump_start(sock_diag_nlsk, skb, nlh,
- inet_diag_dump_compat, NULL, 0);
+ {
+ struct netlink_dump_control c = {
+ .dump = inet_diag_dump_compat,
+ };
+ return netlink_dump_start(sock_diag_nlsk, skb, nlh, &c);
+ }
}
return inet_diag_get_exact_compat(skb, nlh);
@@ -985,9 +988,12 @@ static int inet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
inet_diag_bc_audit(nla_data(attr), nla_len(attr)))
return -EINVAL;
}
-
- return netlink_dump_start(sock_diag_nlsk, skb, h,
- inet_diag_dump, NULL, 0);
+ {
+ struct netlink_dump_control c = {
+ .dump = inet_diag_dump,
+ };
+ return netlink_dump_start(sock_diag_nlsk, skb, h, &c);
+ }
}
return inet_diag_get_exact(skb, h, (struct inet_diag_req_v2 *)NLMSG_DATA(h));
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 38673d2860e2..6ef66af12291 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -730,15 +730,16 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
if (skb->protocol == htons(ETH_P_IP)) {
rt = skb_rtable(skb);
- if ((dst = rt->rt_gateway) == 0)
- goto tx_error_icmp;
+ dst = rt->rt_gateway;
}
#if IS_ENABLED(CONFIG_IPV6)
else if (skb->protocol == htons(ETH_P_IPV6)) {
- struct neighbour *neigh = dst_get_neighbour_noref(skb_dst(skb));
const struct in6_addr *addr6;
+ struct neighbour *neigh;
+ bool do_tx_error_icmp;
int addr_type;
+ neigh = dst_neigh_lookup(skb_dst(skb), &ipv6_hdr(skb)->daddr);
if (neigh == NULL)
goto tx_error;
@@ -751,9 +752,14 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
}
if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
+ do_tx_error_icmp = true;
+ else {
+ do_tx_error_icmp = false;
+ dst = addr6->s6_addr32[3];
+ }
+ neigh_release(neigh);
+ if (do_tx_error_icmp)
goto tx_error_icmp;
-
- dst = addr6->s6_addr32[3];
}
#endif
else
@@ -914,9 +920,10 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
__IPTUNNEL_XMIT(tstats, &dev->stats);
return NETDEV_TX_OK;
+#if IS_ENABLED(CONFIG_IPV6)
tx_error_icmp:
dst_link_failure(skb);
-
+#endif
tx_error:
dev->stats.tx_errors++;
dev_kfree_skb(skb);
@@ -1529,7 +1536,7 @@ static int ipgre_newlink(struct net *src_net, struct net_device *dev, struct nla
return -EEXIST;
if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
- random_ether_addr(dev->dev_addr);
+ eth_hw_addr_random(dev);
mtu = ipgre_tunnel_bind_dev(dev);
if (!tb[IFLA_MTU])
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 8aa87c19fa00..ca50d9f9f8c1 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -469,6 +469,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
(1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) |
(1<<IP_PASSSEC) | (1<<IP_TRANSPARENT) |
(1<<IP_MINTTL) | (1<<IP_NODEFRAG))) ||
+ optname == IP_UNICAST_IF ||
optname == IP_MULTICAST_TTL ||
optname == IP_MULTICAST_ALL ||
optname == IP_MULTICAST_LOOP ||
@@ -628,6 +629,35 @@ static int do_ip_setsockopt(struct sock *sk, int level,
goto e_inval;
inet->mc_loop = !!val;
break;
+ case IP_UNICAST_IF:
+ {
+ struct net_device *dev = NULL;
+ int ifindex;
+
+ if (optlen != sizeof(int))
+ goto e_inval;
+
+ ifindex = (__force int)ntohl((__force __be32)val);
+ if (ifindex == 0) {
+ inet->uc_index = 0;
+ err = 0;
+ break;
+ }
+
+ dev = dev_get_by_index(sock_net(sk), ifindex);
+ err = -EADDRNOTAVAIL;
+ if (!dev)
+ break;
+ dev_put(dev);
+
+ err = -EINVAL;
+ if (sk->sk_bound_dev_if)
+ break;
+
+ inet->uc_index = ifindex;
+ err = 0;
+ break;
+ }
case IP_MULTICAST_IF:
{
struct ip_mreqn mreq;
@@ -1178,6 +1208,9 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
case IP_MULTICAST_LOOP:
val = inet->mc_loop;
break;
+ case IP_UNICAST_IF:
+ val = (__force int)htonl((__u32) inet->uc_index);
+ break;
case IP_MULTICAST_IF:
{
struct in_addr addr;
@@ -1256,6 +1289,10 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
int hlim = inet->mc_ttl;
put_cmsg(&msg, SOL_IP, IP_TTL, sizeof(hlim), &hlim);
}
+ if (inet->cmsg_flags & IP_CMSG_TOS) {
+ int tos = inet->rcv_tos;
+ put_cmsg(&msg, SOL_IP, IP_TOS, sizeof(tos), &tos);
+ }
len -= msg.msg_controllen;
return put_user(len, optlen);
}
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 22a199315309..f84ebff5cdb0 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -454,8 +454,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_fifo_errors++;
goto tx_error;
}
- if ((dst = rt->rt_gateway) == 0)
- goto tx_error_icmp;
+ dst = rt->rt_gateway;
}
rt = ip_route_output_ports(dev_net(dev), &fl4, NULL,
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index b072386cee21..4398a45a9600 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -556,7 +556,8 @@ static int ping_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
ipc.oif = inet->mc_index;
if (!saddr)
saddr = inet->mc_addr;
- }
+ } else if (!ipc.oif)
+ ipc.oif = inet->uc_index;
flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
RT_SCOPE_UNIVERSE, sk->sk_protocol,
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 6afc807ee2ad..02d61079f08b 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -256,6 +256,7 @@ static const struct snmp_mib snmp4_net_list[] = {
SNMP_MIB_ITEM("TCPTimeWaitOverflow", LINUX_MIB_TCPTIMEWAITOVERFLOW),
SNMP_MIB_ITEM("TCPReqQFullDoCookies", LINUX_MIB_TCPREQQFULLDOCOOKIES),
SNMP_MIB_ITEM("TCPReqQFullDrop", LINUX_MIB_TCPREQQFULLDROP),
+ SNMP_MIB_ITEM("TCPRetransFail", LINUX_MIB_TCPRETRANSFAIL),
SNMP_MIB_SENTINEL
};
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 3ccda5ae8a27..ab466305b629 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -563,7 +563,8 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
ipc.oif = inet->mc_index;
if (!saddr)
saddr = inet->mc_addr;
- }
+ } else if (!ipc.oif)
+ ipc.oif = inet->uc_index;
flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
RT_SCOPE_UNIVERSE,
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index bcacf54e5418..0489cedc1671 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1117,12 +1117,17 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, const vo
static const __be32 inaddr_any = 0;
struct net_device *dev = dst->dev;
const __be32 *pkey = daddr;
+ const struct rtable *rt;
struct neighbour *n;
+ rt = (const struct rtable *) dst;
+
if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
pkey = &inaddr_any;
+ else if (rt->rt_gateway)
+ pkey = (const __be32 *) &rt->rt_gateway;
- n = __ipv4_neigh_lookup(&arp_tbl, dev, *(__force u32 *)pkey);
+ n = __ipv4_neigh_lookup(dev, *(__force u32 *)pkey);
if (n)
return n;
return neigh_create(&arp_tbl, pkey, dev);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 94d683a61cba..94abee8cf563 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -90,16 +90,8 @@ EXPORT_SYMBOL(sysctl_tcp_low_latency);
#ifdef CONFIG_TCP_MD5SIG
-static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
- __be32 addr);
-static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
+static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
__be32 daddr, __be32 saddr, const struct tcphdr *th);
-#else
-static inline
-struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
-{
- return NULL;
-}
#endif
struct inet_hashinfo tcp_hashinfo;
@@ -601,6 +593,10 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
struct ip_reply_arg arg;
#ifdef CONFIG_TCP_MD5SIG
struct tcp_md5sig_key *key;
+ const __u8 *hash_location = NULL;
+ unsigned char newhash[16];
+ int genhash;
+ struct sock *sk1 = NULL;
#endif
struct net *net;
@@ -631,7 +627,36 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
arg.iov[0].iov_len = sizeof(rep.th);
#ifdef CONFIG_TCP_MD5SIG
- key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->saddr) : NULL;
+ hash_location = tcp_parse_md5sig_option(th);
+ if (!sk && hash_location) {
+ /*
+ * active side is lost. Try to find listening socket through
+ * source port, and then find md5 key through listening socket.
+ * we are not loose security here:
+ * Incoming packet is checked with md5 hash with finding key,
+ * no RST generated if md5 hash doesn't match.
+ */
+ sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
+ &tcp_hashinfo, ip_hdr(skb)->daddr,
+ ntohs(th->source), inet_iif(skb));
+ /* don't send rst if it can't find key */
+ if (!sk1)
+ return;
+ rcu_read_lock();
+ key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
+ &ip_hdr(skb)->saddr, AF_INET);
+ if (!key)
+ goto release_sk1;
+
+ genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
+ if (genhash || memcmp(hash_location, newhash, 16) != 0)
+ goto release_sk1;
+ } else {
+ key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
+ &ip_hdr(skb)->saddr,
+ AF_INET) : NULL;
+ }
+
if (key) {
rep.opt[0] = htonl((TCPOPT_NOP << 24) |
(TCPOPT_NOP << 16) |
@@ -664,6 +689,14 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
+
+#ifdef CONFIG_TCP_MD5SIG
+release_sk1:
+ if (sk1) {
+ rcu_read_unlock();
+ sock_put(sk1);
+ }
+#endif
}
/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
@@ -764,7 +797,8 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
req->ts_recent,
0,
- tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr),
+ tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
+ AF_INET),
inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
ip_hdr(skb)->tos);
}
@@ -881,153 +915,137 @@ static struct ip_options_rcu *tcp_v4_save_options(struct sock *sk,
*/
/* Find the Key structure for an address. */
-static struct tcp_md5sig_key *
- tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
+struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
+ const union tcp_md5_addr *addr,
+ int family)
{
struct tcp_sock *tp = tcp_sk(sk);
- int i;
-
- if (!tp->md5sig_info || !tp->md5sig_info->entries4)
+ struct tcp_md5sig_key *key;
+ struct hlist_node *pos;
+ unsigned int size = sizeof(struct in_addr);
+ struct tcp_md5sig_info *md5sig;
+
+ /* caller either holds rcu_read_lock() or socket lock */
+ md5sig = rcu_dereference_check(tp->md5sig_info,
+ sock_owned_by_user(sk));
+ if (!md5sig)
return NULL;
- for (i = 0; i < tp->md5sig_info->entries4; i++) {
- if (tp->md5sig_info->keys4[i].addr == addr)
- return &tp->md5sig_info->keys4[i].base;
+#if IS_ENABLED(CONFIG_IPV6)
+ if (family == AF_INET6)
+ size = sizeof(struct in6_addr);
+#endif
+ hlist_for_each_entry_rcu(key, pos, &md5sig->head, node) {
+ if (key->family != family)
+ continue;
+ if (!memcmp(&key->addr, addr, size))
+ return key;
}
return NULL;
}
+EXPORT_SYMBOL(tcp_md5_do_lookup);
struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
struct sock *addr_sk)
{
- return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->inet_daddr);
+ union tcp_md5_addr *addr;
+
+ addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
+ return tcp_md5_do_lookup(sk, addr, AF_INET);
}
EXPORT_SYMBOL(tcp_v4_md5_lookup);
static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
struct request_sock *req)
{
- return tcp_v4_md5_do_lookup(sk, inet_rsk(req)->rmt_addr);
+ union tcp_md5_addr *addr;
+
+ addr = (union tcp_md5_addr *)&inet_rsk(req)->rmt_addr;
+ return tcp_md5_do_lookup(sk, addr, AF_INET);
}
/* This can be called on a newly created socket, from other files */
-int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
- u8 *newkey, u8 newkeylen)
+int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
+ int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
{
/* Add Key to the list */
struct tcp_md5sig_key *key;
struct tcp_sock *tp = tcp_sk(sk);
- struct tcp4_md5sig_key *keys;
+ struct tcp_md5sig_info *md5sig;
- key = tcp_v4_md5_do_lookup(sk, addr);
+ key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
if (key) {
/* Pre-existing entry - just update that one. */
- kfree(key->key);
- key->key = newkey;
+ memcpy(key->key, newkey, newkeylen);
key->keylen = newkeylen;
- } else {
- struct tcp_md5sig_info *md5sig;
-
- if (!tp->md5sig_info) {
- tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info),
- GFP_ATOMIC);
- if (!tp->md5sig_info) {
- kfree(newkey);
- return -ENOMEM;
- }
- sk_nocaps_add(sk, NETIF_F_GSO_MASK);
- }
+ return 0;
+ }
- md5sig = tp->md5sig_info;
- if (md5sig->entries4 == 0 &&
- tcp_alloc_md5sig_pool(sk) == NULL) {
- kfree(newkey);
+ md5sig = rcu_dereference_protected(tp->md5sig_info,
+ sock_owned_by_user(sk));
+ if (!md5sig) {
+ md5sig = kmalloc(sizeof(*md5sig), gfp);
+ if (!md5sig)
return -ENOMEM;
- }
-
- if (md5sig->alloced4 == md5sig->entries4) {
- keys = kmalloc((sizeof(*keys) *
- (md5sig->entries4 + 1)), GFP_ATOMIC);
- if (!keys) {
- kfree(newkey);
- if (md5sig->entries4 == 0)
- tcp_free_md5sig_pool();
- return -ENOMEM;
- }
- if (md5sig->entries4)
- memcpy(keys, md5sig->keys4,
- sizeof(*keys) * md5sig->entries4);
+ sk_nocaps_add(sk, NETIF_F_GSO_MASK);
+ INIT_HLIST_HEAD(&md5sig->head);
+ rcu_assign_pointer(tp->md5sig_info, md5sig);
+ }
- /* Free old key list, and reference new one */
- kfree(md5sig->keys4);
- md5sig->keys4 = keys;
- md5sig->alloced4++;
- }
- md5sig->entries4++;
- md5sig->keys4[md5sig->entries4 - 1].addr = addr;
- md5sig->keys4[md5sig->entries4 - 1].base.key = newkey;
- md5sig->keys4[md5sig->entries4 - 1].base.keylen = newkeylen;
+ key = sock_kmalloc(sk, sizeof(*key), gfp);
+ if (!key)
+ return -ENOMEM;
+ if (hlist_empty(&md5sig->head) && !tcp_alloc_md5sig_pool(sk)) {
+ sock_kfree_s(sk, key, sizeof(*key));
+ return -ENOMEM;
}
- return 0;
-}
-EXPORT_SYMBOL(tcp_v4_md5_do_add);
-static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk,
- u8 *newkey, u8 newkeylen)
-{
- return tcp_v4_md5_do_add(sk, inet_sk(addr_sk)->inet_daddr,
- newkey, newkeylen);
+ memcpy(key->key, newkey, newkeylen);
+ key->keylen = newkeylen;
+ key->family = family;
+ memcpy(&key->addr, addr,
+ (family == AF_INET6) ? sizeof(struct in6_addr) :
+ sizeof(struct in_addr));
+ hlist_add_head_rcu(&key->node, &md5sig->head);
+ return 0;
}
+EXPORT_SYMBOL(tcp_md5_do_add);
-int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
+int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
{
struct tcp_sock *tp = tcp_sk(sk);
- int i;
-
- for (i = 0; i < tp->md5sig_info->entries4; i++) {
- if (tp->md5sig_info->keys4[i].addr == addr) {
- /* Free the key */
- kfree(tp->md5sig_info->keys4[i].base.key);
- tp->md5sig_info->entries4--;
-
- if (tp->md5sig_info->entries4 == 0) {
- kfree(tp->md5sig_info->keys4);
- tp->md5sig_info->keys4 = NULL;
- tp->md5sig_info->alloced4 = 0;
- tcp_free_md5sig_pool();
- } else if (tp->md5sig_info->entries4 != i) {
- /* Need to do some manipulation */
- memmove(&tp->md5sig_info->keys4[i],
- &tp->md5sig_info->keys4[i+1],
- (tp->md5sig_info->entries4 - i) *
- sizeof(struct tcp4_md5sig_key));
- }
- return 0;
- }
- }
- return -ENOENT;
+ struct tcp_md5sig_key *key;
+ struct tcp_md5sig_info *md5sig;
+
+ key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
+ if (!key)
+ return -ENOENT;
+ hlist_del_rcu(&key->node);
+ atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
+ kfree_rcu(key, rcu);
+ md5sig = rcu_dereference_protected(tp->md5sig_info,
+ sock_owned_by_user(sk));
+ if (hlist_empty(&md5sig->head))
+ tcp_free_md5sig_pool();
+ return 0;
}
-EXPORT_SYMBOL(tcp_v4_md5_do_del);
+EXPORT_SYMBOL(tcp_md5_do_del);
-static void tcp_v4_clear_md5_list(struct sock *sk)
+void tcp_clear_md5_list(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
+ struct tcp_md5sig_key *key;
+ struct hlist_node *pos, *n;
+ struct tcp_md5sig_info *md5sig;
- /* Free each key, then the set of key keys,
- * the crypto element, and then decrement our
- * hold on the last resort crypto.
- */
- if (tp->md5sig_info->entries4) {
- int i;
- for (i = 0; i < tp->md5sig_info->entries4; i++)
- kfree(tp->md5sig_info->keys4[i].base.key);
- tp->md5sig_info->entries4 = 0;
+ md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
+
+ if (!hlist_empty(&md5sig->head))
tcp_free_md5sig_pool();
- }
- if (tp->md5sig_info->keys4) {
- kfree(tp->md5sig_info->keys4);
- tp->md5sig_info->keys4 = NULL;
- tp->md5sig_info->alloced4 = 0;
+ hlist_for_each_entry_safe(key, pos, n, &md5sig->head, node) {
+ hlist_del_rcu(&key->node);
+ atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
+ kfree_rcu(key, rcu);
}
}
@@ -1036,7 +1054,6 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
{
struct tcp_md5sig cmd;
struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
- u8 *newkey;
if (optlen < sizeof(cmd))
return -EINVAL;
@@ -1047,32 +1064,16 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
if (sin->sin_family != AF_INET)
return -EINVAL;
- if (!cmd.tcpm_key || !cmd.tcpm_keylen) {
- if (!tcp_sk(sk)->md5sig_info)
- return -ENOENT;
- return tcp_v4_md5_do_del(sk, sin->sin_addr.s_addr);
- }
+ if (!cmd.tcpm_key || !cmd.tcpm_keylen)
+ return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
+ AF_INET);
if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
return -EINVAL;
- if (!tcp_sk(sk)->md5sig_info) {
- struct tcp_sock *tp = tcp_sk(sk);
- struct tcp_md5sig_info *p;
-
- p = kzalloc(sizeof(*p), sk->sk_allocation);
- if (!p)
- return -EINVAL;
-
- tp->md5sig_info = p;
- sk_nocaps_add(sk, NETIF_F_GSO_MASK);
- }
-
- newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, sk->sk_allocation);
- if (!newkey)
- return -ENOMEM;
- return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr,
- newkey, cmd.tcpm_keylen);
+ return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
+ AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
+ GFP_KERNEL);
}
static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
@@ -1098,7 +1099,7 @@ static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
}
-static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
+static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
__be32 daddr, __be32 saddr, const struct tcphdr *th)
{
struct tcp_md5sig_pool *hp;
@@ -1198,7 +1199,8 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
int genhash;
unsigned char newhash[16];
- hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr);
+ hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
+ AF_INET);
hash_location = tcp_parse_md5sig_option(th);
/* We've parsed the options - do we have a hash? */
@@ -1461,6 +1463,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
ireq->opt = NULL;
newinet->mc_index = inet_iif(skb);
newinet->mc_ttl = ip_hdr(skb)->ttl;
+ newinet->rcv_tos = ip_hdr(skb)->tos;
inet_csk(newsk)->icsk_ext_hdr_len = 0;
if (inet_opt)
inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
@@ -1486,7 +1489,8 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
#ifdef CONFIG_TCP_MD5SIG
/* Copy over the MD5 key from the original socket */
- key = tcp_v4_md5_do_lookup(sk, newinet->inet_daddr);
+ key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
+ AF_INET);
if (key != NULL) {
/*
* We're using one, so create a matching key
@@ -1494,10 +1498,8 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
* memory, then we end up not copying the key
* across. Shucks.
*/
- char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
- if (newkey != NULL)
- tcp_v4_md5_do_add(newsk, newinet->inet_daddr,
- newkey, key->keylen);
+ tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
+ AF_INET, key->key, key->keylen, GFP_ATOMIC);
sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
}
#endif
@@ -1858,7 +1860,6 @@ EXPORT_SYMBOL(ipv4_specific);
static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
.md5_lookup = tcp_v4_md5_lookup,
.calc_md5_hash = tcp_v4_md5_hash_skb,
- .md5_add = tcp_v4_md5_add_func,
.md5_parse = tcp_v4_parse_md5_keys,
};
#endif
@@ -1947,8 +1948,8 @@ void tcp_v4_destroy_sock(struct sock *sk)
#ifdef CONFIG_TCP_MD5SIG
/* Clean up the MD5 key list, if any */
if (tp->md5sig_info) {
- tcp_v4_clear_md5_list(sk);
- kfree(tp->md5sig_info);
+ tcp_clear_md5_list(sk);
+ kfree_rcu(tp->md5sig_info, rcu);
tp->md5sig_info = NULL;
}
#endif
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 550e755747e0..3cabafb5cdd1 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -359,13 +359,11 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
*/
do {
struct tcp_md5sig_key *key;
- memset(tcptw->tw_md5_key, 0, sizeof(tcptw->tw_md5_key));
- tcptw->tw_md5_keylen = 0;
+ tcptw->tw_md5_key = NULL;
key = tp->af_specific->md5_lookup(sk, sk);
if (key != NULL) {
- memcpy(&tcptw->tw_md5_key, key->key, key->keylen);
- tcptw->tw_md5_keylen = key->keylen;
- if (tcp_alloc_md5sig_pool(sk) == NULL)
+ tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
+ if (tcptw->tw_md5_key && tcp_alloc_md5sig_pool(sk) == NULL)
BUG();
}
} while (0);
@@ -405,8 +403,10 @@ void tcp_twsk_destructor(struct sock *sk)
{
#ifdef CONFIG_TCP_MD5SIG
struct tcp_timewait_sock *twsk = tcp_twsk(sk);
- if (twsk->tw_md5_keylen)
+ if (twsk->tw_md5_key) {
tcp_free_md5sig_pool();
+ kfree_rcu(twsk->tw_md5_key, rcu);
+ }
#endif
}
EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 4ff3b6dc74fc..364784a91939 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2306,8 +2306,10 @@ begin_fwd:
if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
continue;
- if (tcp_retransmit_skb(sk, skb))
+ if (tcp_retransmit_skb(sk, skb)) {
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
return;
+ }
NET_INC_STATS_BH(sock_net(sk), mib_idx);
if (inet_csk(sk)->icsk_ca_state == TCP_CA_Recovery)
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 5d075b5f70fc..7c41ab84e72e 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -917,7 +917,8 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (!saddr)
saddr = inet->mc_addr;
connected = 0;
- }
+ } else if (!ipc.oif)
+ ipc.oif = inet->uc_index;
if (connected)
rt = (struct rtable *)sk_dst_check(sk, 0);
@@ -1166,7 +1167,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
struct sk_buff *skb;
unsigned int ulen, copied;
- int peeked;
+ int peeked, off = 0;
int err;
int is_udplite = IS_UDPLITE(sk);
bool slow;
@@ -1182,7 +1183,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
try_again:
skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
- &peeked, &err);
+ &peeked, &off, &err);
if (!skb)
goto out;
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 273f48d1df2e..5605f9dca87e 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -214,6 +214,7 @@ lookup_protocol:
inet->mc_ttl = 1;
inet->mc_index = 0;
inet->mc_list = NULL;
+ inet->rcv_tos = 0;
if (ipv4_config.no_pmtu_disc)
inet->pmtudisc = IP_PMTUDISC_DONT;
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index 59402b4637f9..db00d27ffb16 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -211,35 +211,6 @@ void ipv6_sock_ac_close(struct sock *sk)
rcu_read_unlock();
}
-#if 0
-/* The function is not used, which is funny. Apparently, author
- * supposed to use it to filter out datagrams inside udp/raw but forgot.
- *
- * It is OK, anycasts are not special comparing to delivery to unicasts.
- */
-
-int inet6_ac_check(struct sock *sk, struct in6_addr *addr, int ifindex)
-{
- struct ipv6_ac_socklist *pac;
- struct ipv6_pinfo *np = inet6_sk(sk);
- int found;
-
- found = 0;
- read_lock(&ipv6_sk_ac_lock);
- for (pac=np->ipv6_ac_list; pac; pac=pac->acl_next) {
- if (ifindex && pac->acl_ifindex != ifindex)
- continue;
- found = ipv6_addr_equal(&pac->acl_addr, addr);
- if (found)
- break;
- }
- read_unlock(&ipv6_sk_ac_lock);
-
- return found;
-}
-
-#endif
-
static void aca_put(struct ifacaddr6 *ac)
{
if (atomic_dec_and_test(&ac->aca_refcnt)) {
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 251e7cd75e89..76832c8dc89d 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -485,7 +485,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
}
if (np->rxopt.bits.rxtclass) {
- int tclass = (ntohl(*(__be32 *)ipv6_hdr(skb)) >> 20) & 0xff;
+ int tclass = ipv6_tclass(ipv6_hdr(skb));
put_cmsg(msg, SOL_IPV6, IPV6_TCLASS, sizeof(tclass), &tclass);
}
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 01d46bff63c3..af88934e4d79 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -468,6 +468,8 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
fl6.flowi6_oif = np->mcast_oif;
+ else if (!fl6.flowi6_oif)
+ fl6.flowi6_oif = np->ucast_oif;
dst = icmpv6_route_lookup(net, skb, sk, &fl6);
if (IS_ERR(dst))
@@ -553,6 +555,8 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
fl6.flowi6_oif = np->mcast_oif;
+ else if (!fl6.flowi6_oif)
+ fl6.flowi6_oif = np->ucast_oif;
err = ip6_dst_lookup(sk, &dst, &fl6);
if (err)
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index b82bcde53f7a..5b27fbcae346 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -1552,11 +1552,20 @@ static int fib6_age(struct rt6_info *rt, void *arg)
time_after_eq(now, rt->dst.lastuse + gc_args.timeout)) {
RT6_TRACE("aging clone %p\n", rt);
return -1;
- } else if ((rt->rt6i_flags & RTF_GATEWAY) &&
- (!(dst_get_neighbour_noref_raw(&rt->dst)->flags & NTF_ROUTER))) {
- RT6_TRACE("purging route %p via non-router but gateway\n",
- rt);
- return -1;
+ } else if (rt->rt6i_flags & RTF_GATEWAY) {
+ struct neighbour *neigh;
+ __u8 neigh_flags = 0;
+
+ neigh = dst_neigh_lookup(&rt->dst, &rt->rt6i_gateway);
+ if (neigh) {
+ neigh_flags = neigh->flags;
+ neigh_release(neigh);
+ }
+ if (neigh_flags & NTF_ROUTER) {
+ RT6_TRACE("purging route %p via non-router but gateway\n",
+ rt);
+ return -1;
+ }
}
gc_args.more++;
}
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index d97e07183ce9..7a98fc2a5d97 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -388,7 +388,6 @@ int ip6_forward(struct sk_buff *skb)
struct ipv6hdr *hdr = ipv6_hdr(skb);
struct inet6_skb_parm *opt = IP6CB(skb);
struct net *net = dev_net(dst->dev);
- struct neighbour *n;
u32 mtu;
if (net->ipv6.devconf_all->forwarding == 0)
@@ -463,8 +462,7 @@ int ip6_forward(struct sk_buff *skb)
send redirects to source routed frames.
We don't send redirects to frames decapsulated from IPsec.
*/
- n = dst_get_neighbour_noref(dst);
- if (skb->dev == dst->dev && n && opt->srcrt == 0 && !skb_sec_path(skb)) {
+ if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) {
struct in6_addr *target = NULL;
struct rt6_info *rt;
@@ -474,8 +472,8 @@ int ip6_forward(struct sk_buff *skb)
*/
rt = (struct rt6_info *) dst;
- if ((rt->rt6i_flags & RTF_GATEWAY))
- target = (struct in6_addr*)&n->primary_key;
+ if (rt->rt6i_flags & RTF_GATEWAY)
+ target = &rt->rt6i_gateway;
else
target = &hdr->daddr;
@@ -486,7 +484,7 @@ int ip6_forward(struct sk_buff *skb)
and by source (inside ndisc_send_redirect)
*/
if (inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ))
- ndisc_send_redirect(skb, n, target);
+ ndisc_send_redirect(skb, target);
} else {
int addrtype = ipv6_addr_type(&hdr->saddr);
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 18a2719003c3..63dd1f89ed7d 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -516,6 +516,36 @@ done:
retv = 0;
break;
+ case IPV6_UNICAST_IF:
+ {
+ struct net_device *dev = NULL;
+ int ifindex;
+
+ if (optlen != sizeof(int))
+ goto e_inval;
+
+ ifindex = (__force int)ntohl((__force __be32)val);
+ if (ifindex == 0) {
+ np->ucast_oif = 0;
+ retv = 0;
+ break;
+ }
+
+ dev = dev_get_by_index(net, ifindex);
+ retv = -EADDRNOTAVAIL;
+ if (!dev)
+ break;
+ dev_put(dev);
+
+ retv = -EINVAL;
+ if (sk->sk_bound_dev_if)
+ break;
+
+ np->ucast_oif = ifindex;
+ retv = 0;
+ break;
+ }
+
case IPV6_MULTICAST_IF:
if (sk->sk_type == SOCK_STREAM)
break;
@@ -987,6 +1017,10 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
int hlim = np->mcast_hops;
put_cmsg(&msg, SOL_IPV6, IPV6_HOPLIMIT, sizeof(hlim), &hlim);
}
+ if (np->rxopt.bits.rxtclass) {
+ int tclass = np->rcv_tclass;
+ put_cmsg(&msg, SOL_IPV6, IPV6_TCLASS, sizeof(tclass), &tclass);
+ }
if (np->rxopt.bits.rxoinfo) {
struct in6_pktinfo src_info;
src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif :
@@ -1160,6 +1194,10 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
val = np->mcast_oif;
break;
+ case IPV6_UNICAST_IF:
+ val = (__force int)htonl((__u32) np->ucast_oif);
+ break;
+
case IPV6_MTU_DISCOVER:
val = np->pmtudisc;
break;
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index c964958ac470..3dcdb81ec3e8 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1223,11 +1223,17 @@ static void ndisc_router_discovery(struct sk_buff *skb)
rt = rt6_get_dflt_router(&ipv6_hdr(skb)->saddr, skb->dev);
- if (rt)
- neigh = dst_get_neighbour_noref(&rt->dst);
-
+ if (rt) {
+ neigh = dst_neigh_lookup(&rt->dst, &ipv6_hdr(skb)->saddr);
+ if (!neigh) {
+ ND_PRINTK0(KERN_ERR
+ "ICMPv6 RA: %s() got default router without neighbour.\n",
+ __func__);
+ dst_release(&rt->dst);
+ return;
+ }
+ }
if (rt && lifetime == 0) {
- neigh_clone(neigh);
ip6_del_rt(rt);
rt = NULL;
}
@@ -1244,7 +1250,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
return;
}
- neigh = dst_get_neighbour_noref(&rt->dst);
+ neigh = dst_neigh_lookup(&rt->dst, &ipv6_hdr(skb)->saddr);
if (neigh == NULL) {
ND_PRINTK0(KERN_ERR
"ICMPv6 RA: %s() got default router without neighbour.\n",
@@ -1411,7 +1417,7 @@ skip_routeinfo:
out:
if (rt)
dst_release(&rt->dst);
- else if (neigh)
+ if (neigh)
neigh_release(neigh);
}
@@ -1506,8 +1512,7 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
}
}
-void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
- const struct in6_addr *target)
+void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
{
struct net_device *dev = skb->dev;
struct net *net = dev_net(dev);
@@ -1566,6 +1571,13 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
goto release;
if (dev->addr_len) {
+ struct neighbour *neigh = dst_neigh_lookup(skb_dst(skb), target);
+ if (!neigh) {
+ ND_PRINTK2(KERN_WARNING
+ "ICMPv6 Redirect: no neigh for target address\n");
+ goto release;
+ }
+
read_lock_bh(&neigh->lock);
if (neigh->nud_state & NUD_VALID) {
memcpy(ha_buf, neigh->ha, dev->addr_len);
@@ -1574,6 +1586,8 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
len += ndisc_opt_addr_space(dev);
} else
read_unlock_bh(&neigh->lock);
+
+ neigh_release(neigh);
}
rd_len = min_t(unsigned int,
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index d02f7e4dd611..5bddea778840 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -856,6 +856,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
fl6.flowi6_oif = np->mcast_oif;
+ else if (!fl6.flowi6_oif)
+ fl6.flowi6_oif = np->ucast_oif;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index b69fae76a6f1..9447bd69873a 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -336,12 +336,11 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
}
found:
- /* RFC5722, Section 4:
- * When reassembling an IPv6 datagram, if
+ /* RFC5722, Section 4, amended by Errata ID : 3089
+ * When reassembling an IPv6 datagram, if
* one or more its constituent fragments is determined to be an
* overlapping fragment, the entire datagram (and any constituent
- * fragments, including those not yet received) MUST be silently
- * discarded.
+ * fragments) MUST be silently discarded.
*/
/* Check for overlap with preceding fragment. */
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 8c2e3ab58f2a..92be12bb8d23 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -121,9 +121,22 @@ static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
return p;
}
+static inline const void *choose_neigh_daddr(struct rt6_info *rt, const void *daddr)
+{
+ struct in6_addr *p = &rt->rt6i_gateway;
+
+ if (!ipv6_addr_any(p))
+ return (const void *) p;
+ return daddr;
+}
+
static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst, const void *daddr)
{
- struct neighbour *n = __ipv6_neigh_lookup(&nd_tbl, dst->dev, daddr);
+ struct rt6_info *rt = (struct rt6_info *) dst;
+ struct neighbour *n;
+
+ daddr = choose_neigh_daddr(rt, daddr);
+ n = __ipv6_neigh_lookup(&nd_tbl, dst->dev, daddr);
if (n)
return n;
return neigh_create(&nd_tbl, daddr, dst->dev);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 133768e52912..c4ffd1743528 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -680,9 +680,10 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
/* ISATAP (RFC4214) - must come before 6to4 */
if (dev->priv_flags & IFF_ISATAP) {
struct neighbour *neigh = NULL;
+ bool do_tx_error = false;
if (skb_dst(skb))
- neigh = dst_get_neighbour_noref(skb_dst(skb));
+ neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr);
if (neigh == NULL) {
if (net_ratelimit())
@@ -697,6 +698,10 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
ipv6_addr_is_isatap(addr6))
dst = addr6->s6_addr32[3];
else
+ do_tx_error = true;
+
+ neigh_release(neigh);
+ if (do_tx_error)
goto tx_error;
}
@@ -705,9 +710,10 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
if (!dst) {
struct neighbour *neigh = NULL;
+ bool do_tx_error = false;
if (skb_dst(skb))
- neigh = dst_get_neighbour_noref(skb_dst(skb));
+ neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr);
if (neigh == NULL) {
if (net_ratelimit())
@@ -723,10 +729,14 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
addr_type = ipv6_addr_type(addr6);
}
- if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
- goto tx_error_icmp;
+ if ((addr_type & IPV6_ADDR_COMPATv4) != 0)
+ dst = addr6->s6_addr32[3];
+ else
+ do_tx_error = true;
- dst = addr6->s6_addr32[3];
+ neigh_release(neigh);
+ if (do_tx_error)
+ goto tx_error;
}
rt = ip_route_output_ports(dev_net(dev), &fl4, NULL,
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 3edd05ae4388..12c6ece67f39 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -540,19 +540,7 @@ static void tcp_v6_reqsk_destructor(struct request_sock *req)
static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
const struct in6_addr *addr)
{
- struct tcp_sock *tp = tcp_sk(sk);
- int i;
-
- BUG_ON(tp == NULL);
-
- if (!tp->md5sig_info || !tp->md5sig_info->entries6)
- return NULL;
-
- for (i = 0; i < tp->md5sig_info->entries6; i++) {
- if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, addr))
- return &tp->md5sig_info->keys6[i].base;
- }
- return NULL;
+ return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
}
static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
@@ -567,136 +555,11 @@ static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
}
-static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
- char *newkey, u8 newkeylen)
-{
- /* Add key to the list */
- struct tcp_md5sig_key *key;
- struct tcp_sock *tp = tcp_sk(sk);
- struct tcp6_md5sig_key *keys;
-
- key = tcp_v6_md5_do_lookup(sk, peer);
- if (key) {
- /* modify existing entry - just update that one */
- kfree(key->key);
- key->key = newkey;
- key->keylen = newkeylen;
- } else {
- /* reallocate new list if current one is full. */
- if (!tp->md5sig_info) {
- tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
- if (!tp->md5sig_info) {
- kfree(newkey);
- return -ENOMEM;
- }
- sk_nocaps_add(sk, NETIF_F_GSO_MASK);
- }
- if (tp->md5sig_info->entries6 == 0 &&
- tcp_alloc_md5sig_pool(sk) == NULL) {
- kfree(newkey);
- return -ENOMEM;
- }
- if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
- keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
- (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
-
- if (!keys) {
- kfree(newkey);
- if (tp->md5sig_info->entries6 == 0)
- tcp_free_md5sig_pool();
- return -ENOMEM;
- }
-
- if (tp->md5sig_info->entries6)
- memmove(keys, tp->md5sig_info->keys6,
- (sizeof (tp->md5sig_info->keys6[0]) *
- tp->md5sig_info->entries6));
-
- kfree(tp->md5sig_info->keys6);
- tp->md5sig_info->keys6 = keys;
- tp->md5sig_info->alloced6++;
- }
-
- tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr = *peer;
- tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
- tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
-
- tp->md5sig_info->entries6++;
- }
- return 0;
-}
-
-static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
- u8 *newkey, __u8 newkeylen)
-{
- return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
- newkey, newkeylen);
-}
-
-static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer)
-{
- struct tcp_sock *tp = tcp_sk(sk);
- int i;
-
- for (i = 0; i < tp->md5sig_info->entries6; i++) {
- if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, peer)) {
- /* Free the key */
- kfree(tp->md5sig_info->keys6[i].base.key);
- tp->md5sig_info->entries6--;
-
- if (tp->md5sig_info->entries6 == 0) {
- kfree(tp->md5sig_info->keys6);
- tp->md5sig_info->keys6 = NULL;
- tp->md5sig_info->alloced6 = 0;
- tcp_free_md5sig_pool();
- } else {
- /* shrink the database */
- if (tp->md5sig_info->entries6 != i)
- memmove(&tp->md5sig_info->keys6[i],
- &tp->md5sig_info->keys6[i+1],
- (tp->md5sig_info->entries6 - i)
- * sizeof (tp->md5sig_info->keys6[0]));
- }
- return 0;
- }
- }
- return -ENOENT;
-}
-
-static void tcp_v6_clear_md5_list (struct sock *sk)
-{
- struct tcp_sock *tp = tcp_sk(sk);
- int i;
-
- if (tp->md5sig_info->entries6) {
- for (i = 0; i < tp->md5sig_info->entries6; i++)
- kfree(tp->md5sig_info->keys6[i].base.key);
- tp->md5sig_info->entries6 = 0;
- tcp_free_md5sig_pool();
- }
-
- kfree(tp->md5sig_info->keys6);
- tp->md5sig_info->keys6 = NULL;
- tp->md5sig_info->alloced6 = 0;
-
- if (tp->md5sig_info->entries4) {
- for (i = 0; i < tp->md5sig_info->entries4; i++)
- kfree(tp->md5sig_info->keys4[i].base.key);
- tp->md5sig_info->entries4 = 0;
- tcp_free_md5sig_pool();
- }
-
- kfree(tp->md5sig_info->keys4);
- tp->md5sig_info->keys4 = NULL;
- tp->md5sig_info->alloced4 = 0;
-}
-
static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
int optlen)
{
struct tcp_md5sig cmd;
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
- u8 *newkey;
if (optlen < sizeof(cmd))
return -EINVAL;
@@ -708,36 +571,22 @@ static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
return -EINVAL;
if (!cmd.tcpm_keylen) {
- if (!tcp_sk(sk)->md5sig_info)
- return -ENOENT;
if (ipv6_addr_v4mapped(&sin6->sin6_addr))
- return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]);
- return tcp_v6_md5_do_del(sk, &sin6->sin6_addr);
+ return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
+ AF_INET);
+ return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
+ AF_INET6);
}
if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
return -EINVAL;
- if (!tcp_sk(sk)->md5sig_info) {
- struct tcp_sock *tp = tcp_sk(sk);
- struct tcp_md5sig_info *p;
-
- p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
- if (!p)
- return -ENOMEM;
+ if (ipv6_addr_v4mapped(&sin6->sin6_addr))
+ return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
+ AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
- tp->md5sig_info = p;
- sk_nocaps_add(sk, NETIF_F_GSO_MASK);
- }
-
- newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
- if (!newkey)
- return -ENOMEM;
- if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
- return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
- newkey, cmd.tcpm_keylen);
- }
- return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
+ return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
+ AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
}
static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
@@ -1074,6 +923,13 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
const struct tcphdr *th = tcp_hdr(skb);
u32 seq = 0, ack_seq = 0;
struct tcp_md5sig_key *key = NULL;
+#ifdef CONFIG_TCP_MD5SIG
+ const __u8 *hash_location = NULL;
+ struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+ unsigned char newhash[16];
+ int genhash;
+ struct sock *sk1 = NULL;
+#endif
if (th->rst)
return;
@@ -1082,8 +938,32 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
return;
#ifdef CONFIG_TCP_MD5SIG
- if (sk)
- key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr);
+ hash_location = tcp_parse_md5sig_option(th);
+ if (!sk && hash_location) {
+ /*
+ * active side is lost. Try to find listening socket through
+ * source port, and then find md5 key through listening socket.
+ * we are not loose security here:
+ * Incoming packet is checked with md5 hash with finding key,
+ * no RST generated if md5 hash doesn't match.
+ */
+ sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
+ &tcp_hashinfo, &ipv6h->daddr,
+ ntohs(th->source), inet6_iif(skb));
+ if (!sk1)
+ return;
+
+ rcu_read_lock();
+ key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
+ if (!key)
+ goto release_sk1;
+
+ genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb);
+ if (genhash || memcmp(hash_location, newhash, 16) != 0)
+ goto release_sk1;
+ } else {
+ key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
+ }
#endif
if (th->ack)
@@ -1093,6 +973,14 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
(th->doff << 2);
tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1, 0);
+
+#ifdef CONFIG_TCP_MD5SIG
+release_sk1:
+ if (sk1) {
+ rcu_read_unlock();
+ sock_put(sk1);
+ }
+#endif
}
static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
@@ -1394,6 +1282,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
newnp->opt = NULL;
newnp->mcast_oif = inet6_iif(skb);
newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
+ newnp->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
/*
* No need to charge this sock to the relevant IPv6 refcnt debug socks count
@@ -1472,6 +1361,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
newnp->opt = NULL;
newnp->mcast_oif = inet6_iif(skb);
newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
+ newnp->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
/* Clone native IPv6 options from listening socket (if any)
@@ -1510,10 +1400,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
* memory, then we end up not copying the key
* across. Shucks.
*/
- char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
- if (newkey != NULL)
- tcp_v6_md5_do_add(newsk, &newnp->daddr,
- newkey, key->keylen);
+ tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newnp->daddr,
+ AF_INET6, key->key, key->keylen, GFP_ATOMIC);
}
#endif
@@ -1676,6 +1564,8 @@ ipv6_pktoptions:
np->mcast_oif = inet6_iif(opt_skb);
if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
+ if (np->rxopt.bits.rxtclass)
+ np->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
if (ipv6_opt_accepted(sk, opt_skb)) {
skb_set_owner_r(opt_skb, sk);
opt_skb = xchg(&np->pktoptions, opt_skb);
@@ -1898,7 +1788,6 @@ static const struct inet_connection_sock_af_ops ipv6_specific = {
static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
.md5_lookup = tcp_v6_md5_lookup,
.calc_md5_hash = tcp_v6_md5_hash_skb,
- .md5_add = tcp_v6_md5_add_func,
.md5_parse = tcp_v6_parse_md5_keys,
};
#endif
@@ -1930,7 +1819,6 @@ static const struct inet_connection_sock_af_ops ipv6_mapped = {
static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
.md5_lookup = tcp_v4_md5_lookup,
.calc_md5_hash = tcp_v4_md5_hash_skb,
- .md5_add = tcp_v6_md5_add_func,
.md5_parse = tcp_v6_parse_md5_keys,
};
#endif
@@ -2004,11 +1892,6 @@ static int tcp_v6_init_sock(struct sock *sk)
static void tcp_v6_destroy_sock(struct sock *sk)
{
-#ifdef CONFIG_TCP_MD5SIG
- /* Clean up the MD5 key list */
- if (tcp_sk(sk)->md5sig_info)
- tcp_v6_clear_md5_list(sk);
-#endif
tcp_v4_destroy_sock(sk);
inet6_destroy_sock(sk);
}
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 4f96b5c63685..37b0699e95e5 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -342,7 +342,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
struct inet_sock *inet = inet_sk(sk);
struct sk_buff *skb;
unsigned int ulen, copied;
- int peeked;
+ int peeked, off = 0;
int err;
int is_udplite = IS_UDPLITE(sk);
int is_udp4;
@@ -359,7 +359,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
try_again:
skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
- &peeked, &err);
+ &peeked, &off, &err);
if (!skb)
goto out;
@@ -1130,7 +1130,8 @@ do_udp_sendmsg:
if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) {
fl6.flowi6_oif = np->mcast_oif;
connected = 0;
- }
+ } else if (!fl6.flowi6_oif)
+ fl6.flowi6_oif = np->ucast_oif;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 4eeff89c1aaa..8755a3079d0f 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -146,7 +146,7 @@ static int __xfrm6_output(struct sk_buff *skb)
return -EMSGSIZE;
}
- if ((x && x->props.mode == XFRM_MODE_TUNNEL) &&
+ if (x->props.mode == XFRM_MODE_TUNNEL &&
((skb->len > mtu && !skb_is_gso(skb)) ||
dst_allfrag(skb_dst(skb)))) {
return ip6_fragment(skb, x->outer_mode->afinfo->output_finish);
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index d5c5b8fd1d01..98d1f0ba7fe9 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -90,6 +90,7 @@ do { \
static void iucv_sock_kill(struct sock *sk);
static void iucv_sock_close(struct sock *sk);
+static void iucv_sever_path(struct sock *, int);
static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev);
@@ -130,17 +131,6 @@ static inline void low_nmcpy(unsigned char *dst, char *src)
memcpy(&dst[8], src, 8);
}
-static void iucv_skb_queue_purge(struct sk_buff_head *list)
-{
- struct sk_buff *skb;
-
- while ((skb = skb_dequeue(list)) != NULL) {
- if (skb->dev)
- dev_put(skb->dev);
- kfree_skb(skb);
- }
-}
-
static int afiucv_pm_prepare(struct device *dev)
{
#ifdef CONFIG_PM_DEBUG
@@ -175,17 +165,13 @@ static int afiucv_pm_freeze(struct device *dev)
read_lock(&iucv_sk_list.lock);
sk_for_each(sk, node, &iucv_sk_list.head) {
iucv = iucv_sk(sk);
- iucv_skb_queue_purge(&iucv->send_skb_q);
+ skb_queue_purge(&iucv->send_skb_q);
skb_queue_purge(&iucv->backlog_skb_q);
switch (sk->sk_state) {
case IUCV_DISCONN:
case IUCV_CLOSING:
case IUCV_CONNECTED:
- if (iucv->path) {
- err = pr_iucv->path_sever(iucv->path, NULL);
- iucv_path_free(iucv->path);
- iucv->path = NULL;
- }
+ iucv_sever_path(sk, 0);
break;
case IUCV_OPEN:
case IUCV_BOUND:
@@ -194,6 +180,8 @@ static int afiucv_pm_freeze(struct device *dev)
default:
break;
}
+ skb_queue_purge(&iucv->send_skb_q);
+ skb_queue_purge(&iucv->backlog_skb_q);
}
read_unlock(&iucv_sk_list.lock);
return err;
@@ -338,7 +326,6 @@ static void iucv_sock_wake_msglim(struct sock *sk)
static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
struct sk_buff *skb, u8 flags)
{
- struct net *net = sock_net(sock);
struct iucv_sock *iucv = iucv_sk(sock);
struct af_iucv_trans_hdr *phs_hdr;
struct sk_buff *nskb;
@@ -375,10 +362,10 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
if (imsg)
memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
- skb->dev = dev_get_by_index(net, sock->sk_bound_dev_if);
+ skb->dev = iucv->hs_dev;
if (!skb->dev)
return -ENODEV;
- if (!(skb->dev->flags & IFF_UP))
+ if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev))
return -ENETDOWN;
if (skb->len > skb->dev->mtu) {
if (sock->sk_type == SOCK_SEQPACKET)
@@ -393,15 +380,14 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
return -ENOMEM;
skb_queue_tail(&iucv->send_skb_q, nskb);
err = dev_queue_xmit(skb);
- if (err) {
+ if (net_xmit_eval(err)) {
skb_unlink(nskb, &iucv->send_skb_q);
- dev_put(nskb->dev);
kfree_skb(nskb);
} else {
atomic_sub(confirm_recv, &iucv->msg_recv);
WARN_ON(atomic_read(&iucv->msg_recv) < 0);
}
- return err;
+ return net_xmit_eval(err);
}
static struct sock *__iucv_get_sock_by_name(char *nm)
@@ -447,13 +433,33 @@ static void iucv_sock_kill(struct sock *sk)
sock_put(sk);
}
+/* Terminate an IUCV path */
+static void iucv_sever_path(struct sock *sk, int with_user_data)
+{
+ unsigned char user_data[16];
+ struct iucv_sock *iucv = iucv_sk(sk);
+ struct iucv_path *path = iucv->path;
+
+ if (iucv->path) {
+ iucv->path = NULL;
+ if (with_user_data) {
+ low_nmcpy(user_data, iucv->src_name);
+ high_nmcpy(user_data, iucv->dst_name);
+ ASCEBC(user_data, sizeof(user_data));
+ pr_iucv->path_sever(path, user_data);
+ } else
+ pr_iucv->path_sever(path, NULL);
+ iucv_path_free(path);
+ }
+}
+
/* Close an IUCV socket */
static void iucv_sock_close(struct sock *sk)
{
- unsigned char user_data[16];
struct iucv_sock *iucv = iucv_sk(sk);
unsigned long timeo;
- int err, blen;
+ int err = 0;
+ int blen;
struct sk_buff *skb;
lock_sock(sk);
@@ -480,7 +486,7 @@ static void iucv_sock_close(struct sock *sk)
sk->sk_state = IUCV_CLOSING;
sk->sk_state_change(sk);
- if (!skb_queue_empty(&iucv->send_skb_q)) {
+ if (!err && !skb_queue_empty(&iucv->send_skb_q)) {
if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
timeo = sk->sk_lingertime;
else
@@ -494,25 +500,20 @@ static void iucv_sock_close(struct sock *sk)
sk->sk_state = IUCV_CLOSED;
sk->sk_state_change(sk);
- if (iucv->path) {
- low_nmcpy(user_data, iucv->src_name);
- high_nmcpy(user_data, iucv->dst_name);
- ASCEBC(user_data, sizeof(user_data));
- pr_iucv->path_sever(iucv->path, user_data);
- iucv_path_free(iucv->path);
- iucv->path = NULL;
- }
-
sk->sk_err = ECONNRESET;
sk->sk_state_change(sk);
- iucv_skb_queue_purge(&iucv->send_skb_q);
+ skb_queue_purge(&iucv->send_skb_q);
skb_queue_purge(&iucv->backlog_skb_q);
- break;
- default:
- /* nothing to do here */
- break;
+ default: /* fall through */
+ iucv_sever_path(sk, 1);
+ }
+
+ if (iucv->hs_dev) {
+ dev_put(iucv->hs_dev);
+ iucv->hs_dev = NULL;
+ sk->sk_bound_dev_if = 0;
}
/* mark socket for deletion by iucv_sock_kill() */
@@ -706,7 +707,6 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
goto done_unlock;
/* Bind the socket */
-
if (pr_iucv)
if (!memcmp(sa->siucv_user_id, iucv_userid, 8))
goto vm_bind; /* VM IUCV transport */
@@ -720,6 +720,8 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
memcpy(iucv->src_name, sa->siucv_name, 8);
memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
sk->sk_bound_dev_if = dev->ifindex;
+ iucv->hs_dev = dev;
+ dev_hold(dev);
sk->sk_state = IUCV_BOUND;
iucv->transport = AF_IUCV_TRANS_HIPER;
if (!iucv->msglimit)
@@ -894,11 +896,8 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED)
err = -ECONNREFUSED;
- if (err && iucv->transport == AF_IUCV_TRANS_IUCV) {
- pr_iucv->path_sever(iucv->path, NULL);
- iucv_path_free(iucv->path);
- iucv->path = NULL;
- }
+ if (err && iucv->transport == AF_IUCV_TRANS_IUCV)
+ iucv_sever_path(sk, 0);
done:
release_sock(sk);
@@ -1124,8 +1123,10 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
noblock, &err);
else
skb = sock_alloc_send_skb(sk, len, noblock, &err);
- if (!skb)
+ if (!skb) {
+ err = -ENOMEM;
goto out;
+ }
if (iucv->transport == AF_IUCV_TRANS_HIPER)
skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN);
if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
@@ -1148,6 +1149,7 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
/* increment and save iucv message tag for msg_completion cbk */
txmsg.tag = iucv->send_tag++;
memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
+
if (iucv->transport == AF_IUCV_TRANS_HIPER) {
atomic_inc(&iucv->msg_sent);
err = afiucv_hs_send(&txmsg, sk, skb, 0);
@@ -1202,8 +1204,6 @@ release:
return len;
fail:
- if (skb->dev)
- dev_put(skb->dev);
kfree_skb(skb);
out:
release_sock(sk);
@@ -1396,7 +1396,14 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
}
kfree_skb(skb);
- atomic_inc(&iucv->msg_recv);
+ if (iucv->transport == AF_IUCV_TRANS_HIPER) {
+ atomic_inc(&iucv->msg_recv);
+ if (atomic_read(&iucv->msg_recv) > iucv->msglimit) {
+ WARN_ON(1);
+ iucv_sock_close(sk);
+ return -EFAULT;
+ }
+ }
/* Queue backlog skbs */
spin_lock_bh(&iucv->message_q.lock);
@@ -1486,7 +1493,7 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
if (sk->sk_state == IUCV_DISCONN)
mask |= POLLIN;
- if (sock_writeable(sk))
+ if (sock_writeable(sk) && iucv_below_msglim(sk))
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
else
set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
@@ -1565,13 +1572,6 @@ static int iucv_sock_release(struct socket *sock)
iucv_sock_close(sk);
- /* Unregister with IUCV base support */
- if (iucv_sk(sk)->path) {
- pr_iucv->path_sever(iucv_sk(sk)->path, NULL);
- iucv_path_free(iucv_sk(sk)->path);
- iucv_sk(sk)->path = NULL;
- }
-
sock_orphan(sk);
iucv_sock_kill(sk);
return err;
@@ -1633,7 +1633,8 @@ static int iucv_sock_getsockopt(struct socket *sock, int level, int optname,
{
struct sock *sk = sock->sk;
struct iucv_sock *iucv = iucv_sk(sk);
- int val, len;
+ unsigned int val;
+ int len;
if (level != SOL_IUCV)
return -ENOPROTOOPT;
@@ -1656,6 +1657,13 @@ static int iucv_sock_getsockopt(struct socket *sock, int level, int optname,
: iucv->msglimit; /* default */
release_sock(sk);
break;
+ case SO_MSGSIZE:
+ if (sk->sk_state == IUCV_OPEN)
+ return -EBADFD;
+ val = (iucv->hs_dev) ? iucv->hs_dev->mtu -
+ sizeof(struct af_iucv_trans_hdr) - ETH_HLEN :
+ 0x7fffffff;
+ break;
default:
return -ENOPROTOOPT;
}
@@ -1750,8 +1758,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
path->msglim = iucv->msglimit;
err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk);
if (err) {
- err = pr_iucv->path_sever(path, user_data);
- iucv_path_free(path);
+ iucv_sever_path(nsk, 1);
iucv_sock_kill(nsk);
goto fail;
}
@@ -1828,6 +1835,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
struct sk_buff *list_skb = list->next;
unsigned long flags;
+ bh_lock_sock(sk);
if (!skb_queue_empty(list)) {
spin_lock_irqsave(&list->lock, flags);
@@ -1849,7 +1857,6 @@ static void iucv_callback_txdone(struct iucv_path *path,
iucv_sock_wake_msglim(sk);
}
}
- BUG_ON(!this);
if (sk->sk_state == IUCV_CLOSING) {
if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
@@ -1857,6 +1864,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
sk->sk_state_change(sk);
}
}
+ bh_unlock_sock(sk);
}
@@ -1864,9 +1872,15 @@ static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
{
struct sock *sk = path->private;
+ if (sk->sk_state == IUCV_CLOSED)
+ return;
+
+ bh_lock_sock(sk);
+ iucv_sever_path(sk, 1);
sk->sk_state = IUCV_DISCONN;
sk->sk_state_change(sk);
+ bh_unlock_sock(sk);
}
/* called if the other communication side shuts down its RECV direction;
@@ -1954,6 +1968,8 @@ static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
memcpy(niucv->src_name, iucv->src_name, 8);
memcpy(niucv->src_user_id, iucv->src_user_id, 8);
nsk->sk_bound_dev_if = sk->sk_bound_dev_if;
+ niucv->hs_dev = iucv->hs_dev;
+ dev_hold(niucv->hs_dev);
afiucv_swap_src_dest(skb);
trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK;
trans_hdr->window = niucv->msglimit;
@@ -2022,12 +2038,15 @@ static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
struct iucv_sock *iucv = iucv_sk(sk);
/* other end of connection closed */
- if (iucv) {
- bh_lock_sock(sk);
+ if (!iucv)
+ goto out;
+ bh_lock_sock(sk);
+ if (sk->sk_state == IUCV_CONNECTED) {
sk->sk_state = IUCV_DISCONN;
sk->sk_state_change(sk);
- bh_unlock_sock(sk);
}
+ bh_unlock_sock(sk);
+out:
kfree_skb(skb);
return NET_RX_SUCCESS;
}
@@ -2172,11 +2191,11 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
break;
case (AF_IUCV_FLAG_WIN):
err = afiucv_hs_callback_win(sk, skb);
- if (skb->len > sizeof(struct af_iucv_trans_hdr))
- err = afiucv_hs_callback_rx(sk, skb);
- else
- kfree(skb);
- break;
+ if (skb->len == sizeof(struct af_iucv_trans_hdr)) {
+ kfree_skb(skb);
+ break;
+ }
+ /* fall through */
case 0:
/* plain data frame */
memcpy(CB_TRGCLS(skb), &trans_hdr->iucv_hdr.class,
@@ -2202,65 +2221,64 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
struct iucv_sock *iucv = NULL;
struct sk_buff_head *list;
struct sk_buff *list_skb;
- struct sk_buff *this = NULL;
+ struct sk_buff *nskb;
unsigned long flags;
struct hlist_node *node;
- read_lock(&iucv_sk_list.lock);
+ read_lock_irqsave(&iucv_sk_list.lock, flags);
sk_for_each(sk, node, &iucv_sk_list.head)
if (sk == isk) {
iucv = iucv_sk(sk);
break;
}
- read_unlock(&iucv_sk_list.lock);
+ read_unlock_irqrestore(&iucv_sk_list.lock, flags);
- if (!iucv)
+ if (!iucv || sock_flag(sk, SOCK_ZAPPED))
return;
- bh_lock_sock(sk);
list = &iucv->send_skb_q;
- list_skb = list->next;
+ spin_lock_irqsave(&list->lock, flags);
if (skb_queue_empty(list))
goto out_unlock;
-
- spin_lock_irqsave(&list->lock, flags);
+ list_skb = list->next;
+ nskb = list_skb->next;
while (list_skb != (struct sk_buff *)list) {
if (skb_shinfo(list_skb) == skb_shinfo(skb)) {
- this = list_skb;
switch (n) {
case TX_NOTIFY_OK:
- __skb_unlink(this, list);
+ __skb_unlink(list_skb, list);
+ kfree_skb(list_skb);
iucv_sock_wake_msglim(sk);
- dev_put(this->dev);
- kfree_skb(this);
break;
case TX_NOTIFY_PENDING:
atomic_inc(&iucv->pendings);
break;
case TX_NOTIFY_DELAYED_OK:
- __skb_unlink(this, list);
+ __skb_unlink(list_skb, list);
atomic_dec(&iucv->pendings);
if (atomic_read(&iucv->pendings) <= 0)
iucv_sock_wake_msglim(sk);
- dev_put(this->dev);
- kfree_skb(this);
+ kfree_skb(list_skb);
break;
case TX_NOTIFY_UNREACHABLE:
case TX_NOTIFY_DELAYED_UNREACHABLE:
case TX_NOTIFY_TPQFULL: /* not yet used */
case TX_NOTIFY_GENERALERROR:
case TX_NOTIFY_DELAYED_GENERALERROR:
- __skb_unlink(this, list);
- dev_put(this->dev);
- kfree_skb(this);
- sk->sk_state = IUCV_DISCONN;
- sk->sk_state_change(sk);
+ __skb_unlink(list_skb, list);
+ kfree_skb(list_skb);
+ if (sk->sk_state == IUCV_CONNECTED) {
+ sk->sk_state = IUCV_DISCONN;
+ sk->sk_state_change(sk);
+ }
break;
}
break;
}
- list_skb = list_skb->next;
+ list_skb = nskb;
+ nskb = nskb->next;
}
+out_unlock:
spin_unlock_irqrestore(&list->lock, flags);
if (sk->sk_state == IUCV_CLOSING) {
@@ -2270,8 +2288,6 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
}
}
-out_unlock:
- bh_unlock_sock(sk);
}
static const struct proto_ops iucv_sock_ops = {
.family = PF_IUCV,
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index d2726a74597d..63fe5f353f04 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -64,7 +64,7 @@ static int l2tp_eth_dev_init(struct net_device *dev)
struct l2tp_eth *priv = netdev_priv(dev);
priv->dev = dev;
- random_ether_addr(dev->dev_addr);
+ eth_hw_addr_random(dev);
memset(&dev->broadcast[0], 0xff, 6);
return 0;
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index d540c3b160f3..1be7a454aa77 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -9,7 +9,7 @@ mac80211-y := \
scan.o offchannel.o \
ht.o agg-tx.o agg-rx.o \
ibss.o \
- mlme.o work.o \
+ work.o \
iface.o \
rate.o \
michael.o \
@@ -25,7 +25,7 @@ mac80211-y := \
wme.o \
event.o \
chan.o \
- driver-trace.o
+ driver-trace.o mlme.o
mac80211-$(CONFIG_MAC80211_LEDS) += led.o
mac80211-$(CONFIG_MAC80211_DEBUGFS) += \
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 296620d6ca0c..c3de921c8cfd 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -776,12 +776,10 @@ static int sta_apply_parameters(struct ieee80211_local *local,
if (set & BIT(NL80211_STA_FLAG_AUTHENTICATED) &&
!test_sta_flag(sta, WLAN_STA_AUTH)) {
- ret = sta_info_move_state_checked(sta,
- IEEE80211_STA_AUTH);
+ ret = sta_info_move_state(sta, IEEE80211_STA_AUTH);
if (ret)
return ret;
- ret = sta_info_move_state_checked(sta,
- IEEE80211_STA_ASSOC);
+ ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC);
if (ret)
return ret;
}
@@ -789,11 +787,9 @@ static int sta_apply_parameters(struct ieee80211_local *local,
if (mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) {
if (set & BIT(NL80211_STA_FLAG_AUTHORIZED))
- ret = sta_info_move_state_checked(sta,
- IEEE80211_STA_AUTHORIZED);
+ ret = sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED);
else if (test_sta_flag(sta, WLAN_STA_AUTHORIZED))
- ret = sta_info_move_state_checked(sta,
- IEEE80211_STA_ASSOC);
+ ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC);
if (ret)
return ret;
}
@@ -805,12 +801,10 @@ static int sta_apply_parameters(struct ieee80211_local *local,
if (!(set & BIT(NL80211_STA_FLAG_AUTHENTICATED)) &&
test_sta_flag(sta, WLAN_STA_AUTH)) {
- ret = sta_info_move_state_checked(sta,
- IEEE80211_STA_AUTH);
+ ret = sta_info_move_state(sta, IEEE80211_STA_AUTH);
if (ret)
return ret;
- ret = sta_info_move_state_checked(sta,
- IEEE80211_STA_NONE);
+ ret = sta_info_move_state(sta, IEEE80211_STA_NONE);
if (ret)
return ret;
}
@@ -944,8 +938,8 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
if (!sta)
return -ENOMEM;
- sta_info_move_state(sta, IEEE80211_STA_AUTH);
- sta_info_move_state(sta, IEEE80211_STA_ASSOC);
+ sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
+ sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC);
err = sta_apply_parameters(local, sta, params);
if (err) {
@@ -1001,6 +995,7 @@ static int ieee80211_change_station(struct wiphy *wiphy,
struct ieee80211_local *local = wiphy_priv(wiphy);
struct sta_info *sta;
struct ieee80211_sub_if_data *vlansdata;
+ int err;
mutex_lock(&local->sta_mtx);
@@ -1040,7 +1035,11 @@ static int ieee80211_change_station(struct wiphy *wiphy,
ieee80211_send_layer2_update(sta);
}
- sta_apply_parameters(local, sta, params);
+ err = sta_apply_parameters(local, sta, params);
+ if (err) {
+ mutex_unlock(&local->sta_mtx);
+ return err;
+ }
if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) && params->supported_rates)
rate_control_rate_init(sta);
@@ -1341,6 +1340,8 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy,
conf->dot11MeshHWMPRannInterval =
nconf->dot11MeshHWMPRannInterval;
}
+ if (_chg_mesh_attr(NL80211_MESHCONF_FORWARDING, mask))
+ conf->dot11MeshForwarding = nconf->dot11MeshForwarding;
return 0;
}
@@ -1868,7 +1869,6 @@ static int ieee80211_set_cqm_rssi_config(struct wiphy *wiphy,
s32 rssi_thold, u32 rssi_hyst)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
struct ieee80211_vif *vif = &sdata->vif;
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
@@ -1879,14 +1879,9 @@ static int ieee80211_set_cqm_rssi_config(struct wiphy *wiphy,
bss_conf->cqm_rssi_thold = rssi_thold;
bss_conf->cqm_rssi_hyst = rssi_hyst;
- if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI)) {
- if (sdata->vif.type != NL80211_IFTYPE_STATION)
- return -EOPNOTSUPP;
- return 0;
- }
-
/* tell the driver upon association, unless already associated */
- if (sdata->u.mgd.associated)
+ if (sdata->u.mgd.associated &&
+ sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI)
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_CQM);
return 0;
@@ -1907,8 +1902,11 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
return ret;
}
- for (i = 0; i < IEEE80211_NUM_BANDS; i++)
+ for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
sdata->rc_rateidx_mask[i] = mask->control[i].legacy;
+ memcpy(sdata->rc_rateidx_mcs_mask[i], mask->control[i].mcs,
+ sizeof(mask->control[i].mcs));
+ }
return 0;
}
@@ -2030,7 +2028,7 @@ ieee80211_offchan_tx_done(struct ieee80211_work *wk, struct sk_buff *skb)
if (wk->offchan_tx.wait && !wk->offchan_tx.status)
cfg80211_mgmt_tx_status(wk->sdata->dev,
(unsigned long) wk->offchan_tx.frame,
- wk->ie, wk->ie_len, false, GFP_KERNEL);
+ wk->data, wk->data_len, false, GFP_KERNEL);
return WORK_DONE_DESTROY;
}
@@ -2181,8 +2179,8 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
wk->done = ieee80211_offchan_tx_done;
wk->offchan_tx.frame = skb;
wk->offchan_tx.wait = wait;
- wk->ie_len = len;
- memcpy(wk->ie, buf, len);
+ wk->data_len = len;
+ memcpy(wk->data, buf, len);
ieee80211_add_work(wk);
return 0;
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index 889c3e93e0f4..d1f7abddb182 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -20,23 +20,29 @@ __ieee80211_get_channel_mode(struct ieee80211_local *local,
if (!ieee80211_sdata_running(sdata))
continue;
- if (sdata->vif.type == NL80211_IFTYPE_MONITOR)
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_MONITOR:
continue;
-
- if (sdata->vif.type == NL80211_IFTYPE_STATION &&
- !sdata->u.mgd.associated)
- continue;
-
- if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
+ case NL80211_IFTYPE_STATION:
+ if (!sdata->u.mgd.associated)
+ continue;
+ break;
+ case NL80211_IFTYPE_ADHOC:
if (!sdata->u.ibss.ssid_len)
continue;
if (!sdata->u.ibss.fixed_channel)
return CHAN_MODE_HOPPING;
- }
-
- if (sdata->vif.type == NL80211_IFTYPE_AP &&
- !sdata->u.ap.beacon)
+ break;
+ case NL80211_IFTYPE_AP_VLAN:
+ /* will also have _AP interface */
continue;
+ case NL80211_IFTYPE_AP:
+ if (!sdata->u.ap.beacon)
+ continue;
+ break;
+ default:
+ break;
+ }
return CHAN_MODE_FIXED;
}
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 90baea53e7c5..483e96ed95c1 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -247,8 +247,6 @@ static ssize_t hwflags_read(struct file *file, char __user *user_buf,
sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_DYNAMIC_PS\n");
if (local->hw.flags & IEEE80211_HW_MFP_CAPABLE)
sf += snprintf(buf + sf, mxln - sf, "MFP_CAPABLE\n");
- if (local->hw.flags & IEEE80211_HW_BEACON_FILTER)
- sf += snprintf(buf + sf, mxln - sf, "BEACON_FILTER\n");
if (local->hw.flags & IEEE80211_HW_SUPPORTS_STATIC_SMPS)
sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_STATIC_SMPS\n");
if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS)
@@ -259,14 +257,14 @@ static ssize_t hwflags_read(struct file *file, char __user *user_buf,
sf += snprintf(buf + sf, mxln - sf, "REPORTS_TX_ACK_STATUS\n");
if (local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
sf += snprintf(buf + sf, mxln - sf, "CONNECTION_MONITOR\n");
- if (local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI)
- sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_CQM_RSSI\n");
if (local->hw.flags & IEEE80211_HW_SUPPORTS_PER_STA_GTK)
sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_PER_STA_GTK\n");
if (local->hw.flags & IEEE80211_HW_AP_LINK_PS)
sf += snprintf(buf + sf, mxln - sf, "AP_LINK_PS\n");
if (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW)
sf += snprintf(buf + sf, mxln - sf, "TX_AMPDU_SETUP_IN_HW\n");
+ if (local->hw.flags & IEEE80211_HW_SCAN_WHILE_IDLE)
+ sf += snprintf(buf + sf, mxln - sf, "SCAN_WHILE_IDLE\n");
rv = simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
kfree(buf);
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 176c08ffb13c..510ed1dab3c7 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -87,6 +87,21 @@ static ssize_t ieee80211_if_fmt_##name( \
#define IEEE80211_IF_FMT_SIZE(name, field) \
IEEE80211_IF_FMT(name, field, "%zd\n")
+#define IEEE80211_IF_FMT_HEXARRAY(name, field) \
+static ssize_t ieee80211_if_fmt_##name( \
+ const struct ieee80211_sub_if_data *sdata, \
+ char *buf, int buflen) \
+{ \
+ char *p = buf; \
+ int i; \
+ for (i = 0; i < sizeof(sdata->field); i++) { \
+ p += scnprintf(p, buflen + buf - p, "%.2x ", \
+ sdata->field[i]); \
+ } \
+ p += scnprintf(p, buflen + buf - p, "\n"); \
+ return p - buf; \
+}
+
#define IEEE80211_IF_FMT_ATOMIC(name, field) \
static ssize_t ieee80211_if_fmt_##name( \
const struct ieee80211_sub_if_data *sdata, \
@@ -148,6 +163,11 @@ IEEE80211_IF_FILE(rc_rateidx_mask_2ghz, rc_rateidx_mask[IEEE80211_BAND_2GHZ],
HEX);
IEEE80211_IF_FILE(rc_rateidx_mask_5ghz, rc_rateidx_mask[IEEE80211_BAND_5GHZ],
HEX);
+IEEE80211_IF_FILE(rc_rateidx_mcs_mask_2ghz,
+ rc_rateidx_mcs_mask[IEEE80211_BAND_2GHZ], HEXARRAY);
+IEEE80211_IF_FILE(rc_rateidx_mcs_mask_5ghz,
+ rc_rateidx_mcs_mask[IEEE80211_BAND_5GHZ], HEXARRAY);
+
IEEE80211_IF_FILE(flags, flags, HEX);
IEEE80211_IF_FILE(state, state, LHEX);
IEEE80211_IF_FILE(channel_type, vif.bss_conf.channel_type, DEC);
@@ -422,6 +442,7 @@ IEEE80211_IF_FILE(dot11MeshGateAnnouncementProtocol,
u.mesh.mshcfg.dot11MeshGateAnnouncementProtocol, DEC);
IEEE80211_IF_FILE(dot11MeshHWMPRannInterval,
u.mesh.mshcfg.dot11MeshHWMPRannInterval, DEC);
+IEEE80211_IF_FILE(dot11MeshForwarding, u.mesh.mshcfg.dot11MeshForwarding, DEC);
#endif
@@ -441,6 +462,8 @@ static void add_sta_files(struct ieee80211_sub_if_data *sdata)
DEBUGFS_ADD(channel_type);
DEBUGFS_ADD(rc_rateidx_mask_2ghz);
DEBUGFS_ADD(rc_rateidx_mask_5ghz);
+ DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz);
+ DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz);
DEBUGFS_ADD(bssid);
DEBUGFS_ADD(aid);
@@ -458,6 +481,8 @@ static void add_ap_files(struct ieee80211_sub_if_data *sdata)
DEBUGFS_ADD(channel_type);
DEBUGFS_ADD(rc_rateidx_mask_2ghz);
DEBUGFS_ADD(rc_rateidx_mask_5ghz);
+ DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz);
+ DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz);
DEBUGFS_ADD(num_sta_authorized);
DEBUGFS_ADD(num_sta_ps);
@@ -468,6 +493,12 @@ static void add_ap_files(struct ieee80211_sub_if_data *sdata)
static void add_ibss_files(struct ieee80211_sub_if_data *sdata)
{
+ DEBUGFS_ADD(channel_type);
+ DEBUGFS_ADD(rc_rateidx_mask_2ghz);
+ DEBUGFS_ADD(rc_rateidx_mask_5ghz);
+ DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz);
+ DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz);
+
DEBUGFS_ADD_MODE(tsf, 0600);
}
@@ -479,6 +510,8 @@ static void add_wds_files(struct ieee80211_sub_if_data *sdata)
DEBUGFS_ADD(channel_type);
DEBUGFS_ADD(rc_rateidx_mask_2ghz);
DEBUGFS_ADD(rc_rateidx_mask_5ghz);
+ DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz);
+ DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz);
DEBUGFS_ADD(peer);
}
@@ -491,6 +524,8 @@ static void add_vlan_files(struct ieee80211_sub_if_data *sdata)
DEBUGFS_ADD(channel_type);
DEBUGFS_ADD(rc_rateidx_mask_2ghz);
DEBUGFS_ADD(rc_rateidx_mask_5ghz);
+ DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz);
+ DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz);
}
static void add_monitor_files(struct ieee80211_sub_if_data *sdata)
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index d86217d56bd7..6d45804d09bc 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -63,14 +63,15 @@ static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
test_sta_flag(sta, WLAN_STA_##flg) ? #flg "\n" : ""
int res = scnprintf(buf, sizeof(buf),
- "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
+ "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
TEST(AUTH), TEST(ASSOC), TEST(PS_STA),
TEST(PS_DRIVER), TEST(AUTHORIZED),
TEST(SHORT_PREAMBLE),
TEST(WME), TEST(WDS), TEST(CLEAR_PS_FILT),
TEST(MFP), TEST(BLOCK_BA), TEST(PSPOLL),
TEST(UAPSD), TEST(SP), TEST(TDLS_PEER),
- TEST(TDLS_PEER_AUTH), TEST(RATE_CONTROL));
+ TEST(TDLS_PEER_AUTH), TEST(4ADDR_EVENT),
+ TEST(INSERTED), TEST(RATE_CONTROL));
#undef TEST
return simple_read_from_buffer(userbuf, count, ppos, buf, res);
}
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index e8960ae39861..70dfb6415c20 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -253,6 +253,7 @@ static inline int drv_set_key(struct ieee80211_local *local,
might_sleep();
+ sdata = get_bss_sdata(sdata);
check_sdata_in_driver(sdata);
trace_drv_set_key(local, cmd, sdata, sta, key);
@@ -272,6 +273,7 @@ static inline void drv_update_tkip_key(struct ieee80211_local *local,
if (sta)
ista = &sta->sta;
+ sdata = get_bss_sdata(sdata);
check_sdata_in_driver(sdata);
trace_drv_update_tkip_key(local, sdata, conf, ista, iv32);
@@ -476,6 +478,37 @@ static inline void drv_sta_remove(struct ieee80211_local *local,
trace_drv_return_void(local);
}
+static inline __must_check
+int drv_sta_state(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct sta_info *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ int ret = 0;
+
+ might_sleep();
+
+ sdata = get_bss_sdata(sdata);
+ check_sdata_in_driver(sdata);
+
+ trace_drv_sta_state(local, sdata, &sta->sta, old_state, new_state);
+ if (local->ops->sta_state) {
+ ret = local->ops->sta_state(&local->hw, &sdata->vif, &sta->sta,
+ old_state, new_state);
+ } else if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_ASSOC) {
+ ret = drv_sta_add(local, sdata, &sta->sta);
+ if (ret == 0)
+ sta->uploaded = true;
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH) {
+ drv_sta_remove(local, sdata, &sta->sta);
+ }
+ trace_drv_return_int(local, ret);
+ return ret;
+}
+
static inline int drv_conf_tx(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata, u16 queue,
const struct ieee80211_tx_queue_params *params)
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index 6e9df8fd8fb8..384e2f08c187 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -635,6 +635,38 @@ TRACE_EVENT(drv_sta_notify,
)
);
+TRACE_EVENT(drv_sta_state,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state),
+
+ TP_ARGS(local, sdata, sta, old_state, new_state),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ VIF_ENTRY
+ STA_ENTRY
+ __field(u32, old_state)
+ __field(u32, new_state)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ STA_ASSIGN;
+ __entry->old_state = old_state;
+ __entry->new_state = new_state;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " state: %d->%d",
+ LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG,
+ __entry->old_state, __entry->new_state
+ )
+);
+
TRACE_EVENT(drv_sta_add,
TP_PROTO(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index a4643969a13b..8361da4b36ab 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -36,31 +36,6 @@
#define IEEE80211_IBSS_MAX_STA_ENTRIES 128
-static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_mgmt *mgmt,
- size_t len)
-{
- u16 auth_alg, auth_transaction;
-
- lockdep_assert_held(&sdata->u.ibss.mtx);
-
- if (len < 24 + 6)
- return;
-
- auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
- auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
-
- /*
- * IEEE 802.11 standard does not require authentication in IBSS
- * networks and most implementations do not seem to use it.
- * However, try to reply to authentication attempts if someone
- * has actually implemented this.
- */
- if (auth_alg == WLAN_AUTH_OPEN && auth_transaction == 1)
- ieee80211_send_auth(sdata, 2, WLAN_AUTH_OPEN, NULL, 0,
- sdata->u.ibss.bssid, NULL, 0, 0);
-}
-
static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
const u8 *bssid, const int beacon_int,
struct ieee80211_channel *chan,
@@ -276,7 +251,8 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
cbss->tsf);
}
-static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta)
+static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta,
+ bool auth)
__acquires(RCU)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
@@ -290,22 +266,34 @@ static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta)
addr, sdata->name);
#endif
- sta_info_move_state(sta, IEEE80211_STA_AUTH);
- sta_info_move_state(sta, IEEE80211_STA_ASSOC);
- sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED);
+ sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
+ sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC);
+ /* authorize the station only if the network is not RSN protected. If
+ * not wait for the userspace to authorize it */
+ if (!sta->sdata->u.ibss.control_port)
+ sta_info_pre_move_state(sta, IEEE80211_STA_AUTHORIZED);
rate_control_rate_init(sta);
/* If it fails, maybe we raced another insertion? */
if (sta_info_insert_rcu(sta))
return sta_info_get(sdata, addr);
+ if (auth) {
+#ifdef CONFIG_MAC80211_IBSS_DEBUG
+ printk(KERN_DEBUG "TX Auth SA=%pM DA=%pM BSSID=%pM"
+ "(auth_transaction=1)\n", sdata->vif.addr,
+ sdata->u.ibss.bssid, addr);
+#endif
+ ieee80211_send_auth(sdata, 1, WLAN_AUTH_OPEN, NULL, 0,
+ addr, sdata->u.ibss.bssid, NULL, 0, 0);
+ }
return sta;
}
static struct sta_info *
ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
const u8 *bssid, const u8 *addr,
- u32 supp_rates)
+ u32 supp_rates, bool auth)
__acquires(RCU)
{
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
@@ -347,7 +335,42 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
sta->sta.supp_rates[band] = supp_rates |
ieee80211_mandatory_rates(local, band);
- return ieee80211_ibss_finish_sta(sta);
+ return ieee80211_ibss_finish_sta(sta, auth);
+}
+
+static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mgmt *mgmt,
+ size_t len)
+{
+ u16 auth_alg, auth_transaction;
+
+ lockdep_assert_held(&sdata->u.ibss.mtx);
+
+ if (len < 24 + 6)
+ return;
+
+ auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
+ auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
+
+ if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1)
+ return;
+#ifdef CONFIG_MAC80211_IBSS_DEBUG
+ printk(KERN_DEBUG "%s: RX Auth SA=%pM DA=%pM BSSID=%pM."
+ "(auth_transaction=%d)\n",
+ sdata->name, mgmt->sa, mgmt->da, mgmt->bssid, auth_transaction);
+#endif
+ sta_info_destroy_addr(sdata, mgmt->sa);
+ ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, 0, false);
+ rcu_read_unlock();
+
+ /*
+ * IEEE 802.11 standard does not require authentication in IBSS
+ * networks and most implementations do not seem to use it.
+ * However, try to reply to authentication attempts if someone
+ * has actually implemented this.
+ */
+ ieee80211_send_auth(sdata, 2, WLAN_AUTH_OPEN, NULL, 0,
+ mgmt->sa, sdata->u.ibss.bssid, NULL, 0, 0);
}
static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
@@ -412,7 +435,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
} else {
rcu_read_unlock();
sta = ieee80211_ibss_add_sta(sdata, mgmt->bssid,
- mgmt->sa, supp_rates);
+ mgmt->sa, supp_rates, true);
}
}
@@ -540,7 +563,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
ieee80211_sta_join_ibss(sdata, bss);
supp_rates = ieee80211_sta_get_rates(local, elems, band);
ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa,
- supp_rates);
+ supp_rates, true);
rcu_read_unlock();
}
@@ -643,8 +666,7 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
"IBSS networks with same SSID (merge)\n", sdata->name);
ieee80211_request_internal_scan(sdata,
- ifibss->ssid, ifibss->ssid_len,
- ifibss->fixed_channel ? ifibss->channel : NULL);
+ ifibss->ssid, ifibss->ssid_len, NULL);
}
static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
@@ -855,9 +877,6 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
size_t baselen;
struct ieee802_11_elems elems;
- if (memcmp(mgmt->da, sdata->vif.addr, ETH_ALEN))
- return; /* ignore ProbeResp to foreign address */
-
baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
if (baselen > len)
return;
@@ -945,7 +964,7 @@ void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata)
list_del(&sta->list);
spin_unlock_bh(&ifibss->incomplete_lock);
- ieee80211_ibss_finish_sta(sta);
+ ieee80211_ibss_finish_sta(sta, true);
rcu_read_unlock();
spin_lock_bh(&ifibss->incomplete_lock);
}
@@ -1059,6 +1078,7 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
sdata->u.ibss.fixed_bssid = false;
sdata->u.ibss.privacy = params->privacy;
+ sdata->u.ibss.control_port = params->control_port;
sdata->u.ibss.basic_rates = params->basic_rates;
memcpy(sdata->vif.bss_conf.mcast_rate, params->mcast_rate,
sizeof(params->mcast_rate));
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 2f0642d9e154..74594f012cd3 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -280,10 +280,6 @@ struct mesh_preq_queue {
enum ieee80211_work_type {
IEEE80211_WORK_ABORT,
- IEEE80211_WORK_DIRECT_PROBE,
- IEEE80211_WORK_AUTH,
- IEEE80211_WORK_ASSOC_BEACON_WAIT,
- IEEE80211_WORK_ASSOC,
IEEE80211_WORK_REMAIN_ON_CHANNEL,
IEEE80211_WORK_OFFCHANNEL_TX,
};
@@ -316,36 +312,10 @@ struct ieee80211_work {
unsigned long timeout;
enum ieee80211_work_type type;
- u8 filter_ta[ETH_ALEN];
-
bool started;
union {
struct {
- int tries;
- u16 algorithm, transaction;
- u8 ssid[IEEE80211_MAX_SSID_LEN];
- u8 ssid_len;
- u8 key[WLAN_KEY_LEN_WEP104];
- u8 key_len, key_idx;
- bool privacy;
- bool synced;
- } probe_auth;
- struct {
- struct cfg80211_bss *bss;
- const u8 *supp_rates;
- const u8 *ht_information_ie;
- enum ieee80211_smps_mode smps;
- int tries;
- u16 capability;
- u8 prev_bssid[ETH_ALEN];
- u8 ssid[IEEE80211_MAX_SSID_LEN];
- u8 ssid_len;
- u8 supp_rates_len;
- bool wmm_used, use_11n, uapsd_used;
- bool synced;
- } assoc;
- struct {
u32 duration;
} remain;
struct {
@@ -355,9 +325,8 @@ struct ieee80211_work {
} offchan_tx;
};
- int ie_len;
- /* must be last */
- u8 ie[0];
+ size_t data_len;
+ u8 data[];
};
/* flags used in struct ieee80211_if_managed.flags */
@@ -373,6 +342,43 @@ enum ieee80211_sta_flags {
IEEE80211_STA_RESET_SIGNAL_AVE = BIT(9),
};
+struct ieee80211_mgd_auth_data {
+ struct cfg80211_bss *bss;
+ unsigned long timeout;
+ int tries;
+ u16 algorithm, expected_transaction;
+
+ u8 key[WLAN_KEY_LEN_WEP104];
+ u8 key_len, key_idx;
+ bool synced;
+ bool done;
+
+ size_t ie_len;
+ u8 ie[];
+};
+
+struct ieee80211_mgd_assoc_data {
+ struct cfg80211_bss *bss;
+ const u8 *supp_rates;
+ const u8 *ht_information_ie;
+
+ unsigned long timeout;
+ int tries;
+
+ u16 capability;
+ u8 prev_bssid[ETH_ALEN];
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ u8 ssid_len;
+ u8 supp_rates_len;
+ bool wmm_used, uapsd_used;
+ bool have_beacon;
+ bool sent_assoc;
+ bool synced;
+
+ size_t ie_len;
+ u8 ie[];
+};
+
struct ieee80211_if_managed {
struct timer_list timer;
struct timer_list conn_mon_timer;
@@ -389,6 +395,8 @@ struct ieee80211_if_managed {
struct mutex mtx;
struct cfg80211_bss *associated;
+ struct ieee80211_mgd_auth_data *auth_data;
+ struct ieee80211_mgd_assoc_data *assoc_data;
u8 bssid[ETH_ALEN];
@@ -470,6 +478,8 @@ struct ieee80211_if_ibss {
bool fixed_channel;
bool privacy;
+ bool control_port;
+
u8 bssid[ETH_ALEN];
u8 ssid[IEEE80211_MAX_SSID_LEN];
u8 ssid_len, ie_len;
@@ -646,6 +656,7 @@ struct ieee80211_sub_if_data {
/* bitmap of allowed (non-MCS) rate indexes for rate control */
u32 rc_rateidx_mask[IEEE80211_NUM_BANDS];
+ u8 rc_rateidx_mcs_mask[IEEE80211_NUM_BANDS][IEEE80211_HT_MCS_MASK_LEN];
union {
struct ieee80211_if_ap ap;
@@ -769,7 +780,6 @@ struct ieee80211_local {
struct list_head work_list;
struct timer_list work_timer;
struct work_struct work_work;
- struct sk_buff_head work_skb_queue;
/*
* private workqueue to mac80211. mac80211 makes this accessible
@@ -1396,7 +1406,7 @@ void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
u16 transaction, u16 auth_alg,
u8 *extra, size_t extra_len, const u8 *bssid,
- const u8 *key, u8 key_len, u8 key_idx);
+ const u8 *da, const u8 *key, u8 key_len, u8 key_idx);
int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
const u8 *ie, size_t ie_len,
enum ieee80211_band band, u32 rate_mask,
@@ -1436,8 +1446,6 @@ void ieee80211_work_init(struct ieee80211_local *local);
void ieee80211_add_work(struct ieee80211_work *wk);
void free_work(struct ieee80211_work *wk);
void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata);
-ieee80211_rx_result ieee80211_work_rx_mgmt(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb);
int ieee80211_wk_remain_on_channel(struct ieee80211_sub_if_data *sdata,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 01a21c2f6ab3..6b3cd65d1e07 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -318,9 +318,9 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
goto err_del_interface;
}
- sta_info_move_state(sta, IEEE80211_STA_AUTH);
- sta_info_move_state(sta, IEEE80211_STA_ASSOC);
- sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED);
+ sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
+ sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC);
+ sta_info_pre_move_state(sta, IEEE80211_STA_AUTHORIZED);
res = sta_info_insert(sta);
if (res) {
@@ -1181,6 +1181,13 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
sband = local->hw.wiphy->bands[i];
sdata->rc_rateidx_mask[i] =
sband ? (1 << sband->n_bitrates) - 1 : 0;
+ if (sband)
+ memcpy(sdata->rc_rateidx_mcs_mask[i],
+ sband->ht_cap.mcs.rx_mask,
+ sizeof(sdata->rc_rateidx_mcs_mask[i]));
+ else
+ memset(sdata->rc_rateidx_mcs_mask[i], 0,
+ sizeof(sdata->rc_rateidx_mcs_mask[i]));
}
/* setup type-dependent data */
@@ -1303,7 +1310,9 @@ u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
/* do not count disabled managed interfaces */
if (sdata->vif.type == NL80211_IFTYPE_STATION &&
- !sdata->u.mgd.associated) {
+ !sdata->u.mgd.associated &&
+ !sdata->u.mgd.auth_data &&
+ !sdata->u.mgd.assoc_data) {
sdata->vif.bss_conf.idle = true;
continue;
}
@@ -1323,7 +1332,8 @@ u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
wk->sdata->vif.bss_conf.idle = false;
}
- if (local->scan_sdata) {
+ if (local->scan_sdata &&
+ !(local->hw.flags & IEEE80211_HW_SCAN_WHILE_IDLE)) {
scanning = true;
local->scan_sdata->vif.bss_conf.idle = false;
}
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 87a89741432d..e8616b3ff636 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -54,14 +54,6 @@ static void assert_key_lock(struct ieee80211_local *local)
lockdep_assert_held(&local->key_mtx);
}
-static struct ieee80211_sta *get_sta_for_key(struct ieee80211_key *key)
-{
- if (key->sta)
- return &key->sta->sta;
-
- return NULL;
-}
-
static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
{
/*
@@ -95,7 +87,7 @@ static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
{
struct ieee80211_sub_if_data *sdata;
- struct ieee80211_sta *sta;
+ struct sta_info *sta;
int ret;
might_sleep();
@@ -105,7 +97,7 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
assert_key_lock(key->local);
- sta = get_sta_for_key(key);
+ sta = key->sta;
/*
* If this is a per-STA GTK, check if it
@@ -115,6 +107,9 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
!(key->local->hw.flags & IEEE80211_HW_SUPPORTS_PER_STA_GTK))
goto out_unsupported;
+ if (sta && !sta->uploaded)
+ goto out_unsupported;
+
sdata = key->sdata;
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
/*
@@ -123,12 +118,10 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
*/
if (!(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE))
goto out_unsupported;
- sdata = container_of(sdata->bss,
- struct ieee80211_sub_if_data,
- u.ap);
}
- ret = drv_set_key(key->local, SET_KEY, sdata, sta, &key->conf);
+ ret = drv_set_key(key->local, SET_KEY, sdata,
+ sta ? &sta->sta : NULL, &key->conf);
if (!ret) {
key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE;
@@ -147,7 +140,8 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
if (ret != -ENOSPC && ret != -EOPNOTSUPP)
wiphy_err(key->local->hw.wiphy,
"failed to set key (%d, %pM) to hardware (%d)\n",
- key->conf.keyidx, sta ? sta->addr : bcast_addr, ret);
+ key->conf.keyidx,
+ sta ? sta->sta.addr : bcast_addr, ret);
out_unsupported:
switch (key->conf.cipher) {
@@ -166,7 +160,7 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
{
struct ieee80211_sub_if_data *sdata;
- struct ieee80211_sta *sta;
+ struct sta_info *sta;
int ret;
might_sleep();
@@ -179,7 +173,7 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
return;
- sta = get_sta_for_key(key);
+ sta = key->sta;
sdata = key->sdata;
if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
@@ -187,18 +181,14 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
(key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)))
increment_tailroom_need_count(sdata);
- if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
- sdata = container_of(sdata->bss,
- struct ieee80211_sub_if_data,
- u.ap);
-
ret = drv_set_key(key->local, DISABLE_KEY, sdata,
- sta, &key->conf);
+ sta ? &sta->sta : NULL, &key->conf);
if (ret)
wiphy_err(key->local->hw.wiphy,
"failed to remove key (%d, %pM) from hardware (%d)\n",
- key->conf.keyidx, sta ? sta->addr : bcast_addr, ret);
+ key->conf.keyidx,
+ sta ? sta->sta.addr : bcast_addr, ret);
key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
}
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index b142bd4c2390..2306d7514fff 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -155,7 +155,8 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
power = chan->max_power;
else
power = local->power_constr_level ?
- (chan->max_power - local->power_constr_level) :
+ min(chan->max_power,
+ (chan->max_reg_power - local->power_constr_level)) :
chan->max_power;
if (local->user_power_level >= 0)
@@ -198,15 +199,7 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
return;
if (sdata->vif.type == NL80211_IFTYPE_STATION) {
- /*
- * While not associated, claim a BSSID of all-zeroes
- * so that drivers don't do any weird things with the
- * BSSID at that time.
- */
- if (sdata->vif.bss_conf.assoc)
- sdata->vif.bss_conf.bssid = sdata->u.mgd.bssid;
- else
- sdata->vif.bss_conf.bssid = zero;
+ sdata->vif.bss_conf.bssid = sdata->u.mgd.bssid;
} else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
sdata->vif.bss_conf.bssid = sdata->u.ibss.bssid;
else if (sdata->vif.type == NL80211_IFTYPE_AP)
@@ -534,6 +527,9 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
int priv_size, i;
struct wiphy *wiphy;
+ if (WARN_ON(ops->sta_state && (ops->sta_add || ops->sta_remove)))
+ return NULL;
+
/* Ensure 32-byte alignment of our private data and hw private data.
* We use the wiphy priv data for both our ieee80211_local and for
* the driver's private data
@@ -701,6 +697,9 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
)
return -EINVAL;
+ if ((hw->flags & IEEE80211_HW_SCAN_WHILE_IDLE) && !local->ops->hw_scan)
+ return -EINVAL;
+
if (hw->max_report_rates == 0)
hw->max_report_rates = hw->max_rates;
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 54df1b2bafd2..c27dec904963 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -575,7 +575,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
ifmsh->mshstats.dropped_frames_ttl++;
}
- if (forward) {
+ if (forward && ifmsh->mshcfg.dot11MeshForwarding) {
u32 preq_id;
u8 hopcount, flags;
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index edf167e3b8f3..dc51669e67d8 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -336,7 +336,7 @@ static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
}
-static struct mesh_path *path_lookup(struct mesh_table *tbl, u8 *dst,
+static struct mesh_path *mpath_lookup(struct mesh_table *tbl, u8 *dst,
struct ieee80211_sub_if_data *sdata)
{
struct mesh_path *mpath;
@@ -371,12 +371,12 @@ static struct mesh_path *path_lookup(struct mesh_table *tbl, u8 *dst,
*/
struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
{
- return path_lookup(rcu_dereference(mesh_paths), dst, sdata);
+ return mpath_lookup(rcu_dereference(mesh_paths), dst, sdata);
}
struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
{
- return path_lookup(rcu_dereference(mpp_paths), dst, sdata);
+ return mpath_lookup(rcu_dereference(mpp_paths), dst, sdata);
}
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index a17251730b9e..8806e5ef8ffe 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -96,9 +96,9 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
if (!sta)
return NULL;
- sta_info_move_state(sta, IEEE80211_STA_AUTH);
- sta_info_move_state(sta, IEEE80211_STA_ASSOC);
- sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED);
+ sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
+ sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC);
+ sta_info_pre_move_state(sta, IEEE80211_STA_AUTHORIZED);
set_sta_flag(sta, WLAN_STA_WME);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 295be92f7c77..52133dab9297 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -30,6 +30,12 @@
#include "rate.h"
#include "led.h"
+#define IEEE80211_AUTH_TIMEOUT (HZ / 5)
+#define IEEE80211_AUTH_MAX_TRIES 3
+#define IEEE80211_AUTH_WAIT_ASSOC (HZ * 5)
+#define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
+#define IEEE80211_ASSOC_MAX_TRIES 3
+
static int max_nullfunc_tries = 2;
module_param(max_nullfunc_tries, int, 0644);
MODULE_PARM_DESC(max_nullfunc_tries,
@@ -97,6 +103,15 @@ enum rx_mgmt_action {
/* caller must call cfg80211_send_disassoc() */
RX_MGMT_CFG80211_DISASSOC,
+
+ /* caller must call cfg80211_send_rx_auth() */
+ RX_MGMT_CFG80211_RX_AUTH,
+
+ /* caller must call cfg80211_send_rx_assoc() */
+ RX_MGMT_CFG80211_RX_ASSOC,
+
+ /* caller must call cfg80211_send_assoc_timeout() */
+ RX_MGMT_CFG80211_ASSOC_TIMEOUT,
};
/* utils */
@@ -115,8 +130,7 @@ static inline void ASSERT_MGD_MTX(struct ieee80211_if_managed *ifmgd)
* has happened -- the work that runs from this timer will
* do that.
*/
-static void run_again(struct ieee80211_if_managed *ifmgd,
- unsigned long timeout)
+static void run_again(struct ieee80211_if_managed *ifmgd, unsigned long timeout)
{
ASSERT_MGD_MTX(ifmgd);
@@ -127,7 +141,7 @@ static void run_again(struct ieee80211_if_managed *ifmgd,
void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata)
{
- if (sdata->local->hw.flags & IEEE80211_HW_BEACON_FILTER)
+ if (sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)
return;
mod_timer(&sdata->u.mgd.bcn_mon_timer,
@@ -284,6 +298,319 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
/* frame sending functions */
+static int ieee80211_compatible_rates(const u8 *supp_rates, int supp_rates_len,
+ struct ieee80211_supported_band *sband,
+ u32 *rates)
+{
+ int i, j, count;
+ *rates = 0;
+ count = 0;
+ for (i = 0; i < supp_rates_len; i++) {
+ int rate = (supp_rates[i] & 0x7F) * 5;
+
+ for (j = 0; j < sband->n_bitrates; j++)
+ if (sband->bitrates[j].bitrate == rate) {
+ *rates |= BIT(j);
+ count++;
+ break;
+ }
+ }
+
+ return count;
+}
+
+static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb, const u8 *ht_info_ie,
+ struct ieee80211_supported_band *sband,
+ struct ieee80211_channel *channel,
+ enum ieee80211_smps_mode smps)
+{
+ struct ieee80211_ht_info *ht_info;
+ u8 *pos;
+ u32 flags = channel->flags;
+ u16 cap;
+ struct ieee80211_sta_ht_cap ht_cap;
+
+ BUILD_BUG_ON(sizeof(ht_cap) != sizeof(sband->ht_cap));
+
+ if (!sband->ht_cap.ht_supported)
+ return;
+
+ if (!ht_info_ie)
+ return;
+
+ if (ht_info_ie[1] < sizeof(struct ieee80211_ht_info))
+ return;
+
+ memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap));
+ ieee80211_apply_htcap_overrides(sdata, &ht_cap);
+
+ ht_info = (struct ieee80211_ht_info *)(ht_info_ie + 2);
+
+ /* determine capability flags */
+ cap = ht_cap.cap;
+
+ switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
+ case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+ if (flags & IEEE80211_CHAN_NO_HT40PLUS) {
+ cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ cap &= ~IEEE80211_HT_CAP_SGI_40;
+ }
+ break;
+ case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+ if (flags & IEEE80211_CHAN_NO_HT40MINUS) {
+ cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ cap &= ~IEEE80211_HT_CAP_SGI_40;
+ }
+ break;
+ }
+
+ /* set SM PS mode properly */
+ cap &= ~IEEE80211_HT_CAP_SM_PS;
+ switch (smps) {
+ case IEEE80211_SMPS_AUTOMATIC:
+ case IEEE80211_SMPS_NUM_MODES:
+ WARN_ON(1);
+ case IEEE80211_SMPS_OFF:
+ cap |= WLAN_HT_CAP_SM_PS_DISABLED <<
+ IEEE80211_HT_CAP_SM_PS_SHIFT;
+ break;
+ case IEEE80211_SMPS_STATIC:
+ cap |= WLAN_HT_CAP_SM_PS_STATIC <<
+ IEEE80211_HT_CAP_SM_PS_SHIFT;
+ break;
+ case IEEE80211_SMPS_DYNAMIC:
+ cap |= WLAN_HT_CAP_SM_PS_DYNAMIC <<
+ IEEE80211_HT_CAP_SM_PS_SHIFT;
+ break;
+ }
+
+ /* reserve and fill IE */
+ pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
+ ieee80211_ie_build_ht_cap(pos, &ht_cap, cap);
+}
+
+static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data;
+ struct sk_buff *skb;
+ struct ieee80211_mgmt *mgmt;
+ u8 *pos, qos_info;
+ size_t offset = 0, noffset;
+ int i, count, rates_len, supp_rates_len;
+ u16 capab;
+ struct ieee80211_supported_band *sband;
+ u32 rates = 0;
+ struct ieee80211_bss *bss = (void *)assoc_data->bss->priv;
+
+ lockdep_assert_held(&ifmgd->mtx);
+
+ sband = local->hw.wiphy->bands[local->oper_channel->band];
+
+ if (assoc_data->supp_rates_len) {
+ /*
+ * Get all rates supported by the device and the AP as
+ * some APs don't like getting a superset of their rates
+ * in the association request (e.g. D-Link DAP 1353 in
+ * b-only mode)...
+ */
+ rates_len = ieee80211_compatible_rates(assoc_data->supp_rates,
+ assoc_data->supp_rates_len,
+ sband, &rates);
+ } else {
+ /*
+ * In case AP not provide any supported rates information
+ * before association, we send information element(s) with
+ * all rates that we support.
+ */
+ rates = ~0;
+ rates_len = sband->n_bitrates;
+ }
+
+ skb = alloc_skb(local->hw.extra_tx_headroom +
+ sizeof(*mgmt) + /* bit too much but doesn't matter */
+ 2 + assoc_data->ssid_len + /* SSID */
+ 4 + rates_len + /* (extended) rates */
+ 4 + /* power capability */
+ 2 + 2 * sband->n_channels + /* supported channels */
+ 2 + sizeof(struct ieee80211_ht_cap) + /* HT */
+ assoc_data->ie_len + /* extra IEs */
+ 9, /* WMM */
+ GFP_KERNEL);
+ if (!skb)
+ return;
+
+ skb_reserve(skb, local->hw.extra_tx_headroom);
+
+ capab = WLAN_CAPABILITY_ESS;
+
+ if (sband->band == IEEE80211_BAND_2GHZ) {
+ if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE))
+ capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
+ if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE))
+ capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
+ }
+
+ if (assoc_data->capability & WLAN_CAPABILITY_PRIVACY)
+ capab |= WLAN_CAPABILITY_PRIVACY;
+
+ if ((assoc_data->capability & WLAN_CAPABILITY_SPECTRUM_MGMT) &&
+ (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT))
+ capab |= WLAN_CAPABILITY_SPECTRUM_MGMT;
+
+ mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
+ memset(mgmt, 0, 24);
+ memcpy(mgmt->da, assoc_data->bss->bssid, ETH_ALEN);
+ memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
+ memcpy(mgmt->bssid, assoc_data->bss->bssid, ETH_ALEN);
+
+ if (!is_zero_ether_addr(assoc_data->prev_bssid)) {
+ skb_put(skb, 10);
+ mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_REASSOC_REQ);
+ mgmt->u.reassoc_req.capab_info = cpu_to_le16(capab);
+ mgmt->u.reassoc_req.listen_interval =
+ cpu_to_le16(local->hw.conf.listen_interval);
+ memcpy(mgmt->u.reassoc_req.current_ap, assoc_data->prev_bssid,
+ ETH_ALEN);
+ } else {
+ skb_put(skb, 4);
+ mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_ASSOC_REQ);
+ mgmt->u.assoc_req.capab_info = cpu_to_le16(capab);
+ mgmt->u.assoc_req.listen_interval =
+ cpu_to_le16(local->hw.conf.listen_interval);
+ }
+
+ /* SSID */
+ pos = skb_put(skb, 2 + assoc_data->ssid_len);
+ *pos++ = WLAN_EID_SSID;
+ *pos++ = assoc_data->ssid_len;
+ memcpy(pos, assoc_data->ssid, assoc_data->ssid_len);
+
+ /* add all rates which were marked to be used above */
+ supp_rates_len = rates_len;
+ if (supp_rates_len > 8)
+ supp_rates_len = 8;
+
+ pos = skb_put(skb, supp_rates_len + 2);
+ *pos++ = WLAN_EID_SUPP_RATES;
+ *pos++ = supp_rates_len;
+
+ count = 0;
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if (BIT(i) & rates) {
+ int rate = sband->bitrates[i].bitrate;
+ *pos++ = (u8) (rate / 5);
+ if (++count == 8)
+ break;
+ }
+ }
+
+ if (rates_len > count) {
+ pos = skb_put(skb, rates_len - count + 2);
+ *pos++ = WLAN_EID_EXT_SUPP_RATES;
+ *pos++ = rates_len - count;
+
+ for (i++; i < sband->n_bitrates; i++) {
+ if (BIT(i) & rates) {
+ int rate = sband->bitrates[i].bitrate;
+ *pos++ = (u8) (rate / 5);
+ }
+ }
+ }
+
+ if (capab & WLAN_CAPABILITY_SPECTRUM_MGMT) {
+ /* 1. power capabilities */
+ pos = skb_put(skb, 4);
+ *pos++ = WLAN_EID_PWR_CAPABILITY;
+ *pos++ = 2;
+ *pos++ = 0; /* min tx power */
+ *pos++ = local->oper_channel->max_power; /* max tx power */
+
+ /* 2. supported channels */
+ /* TODO: get this in reg domain format */
+ pos = skb_put(skb, 2 * sband->n_channels + 2);
+ *pos++ = WLAN_EID_SUPPORTED_CHANNELS;
+ *pos++ = 2 * sband->n_channels;
+ for (i = 0; i < sband->n_channels; i++) {
+ *pos++ = ieee80211_frequency_to_channel(
+ sband->channels[i].center_freq);
+ *pos++ = 1; /* one channel in the subband*/
+ }
+ }
+
+ /* if present, add any custom IEs that go before HT */
+ if (assoc_data->ie_len && assoc_data->ie) {
+ static const u8 before_ht[] = {
+ WLAN_EID_SSID,
+ WLAN_EID_SUPP_RATES,
+ WLAN_EID_EXT_SUPP_RATES,
+ WLAN_EID_PWR_CAPABILITY,
+ WLAN_EID_SUPPORTED_CHANNELS,
+ WLAN_EID_RSN,
+ WLAN_EID_QOS_CAPA,
+ WLAN_EID_RRM_ENABLED_CAPABILITIES,
+ WLAN_EID_MOBILITY_DOMAIN,
+ WLAN_EID_SUPPORTED_REGULATORY_CLASSES,
+ };
+ noffset = ieee80211_ie_split(assoc_data->ie, assoc_data->ie_len,
+ before_ht, ARRAY_SIZE(before_ht),
+ offset);
+ pos = skb_put(skb, noffset - offset);
+ memcpy(pos, assoc_data->ie + offset, noffset - offset);
+ offset = noffset;
+ }
+
+ if (!(ifmgd->flags & IEEE80211_STA_DISABLE_11N) &&
+ bss->wmm_used && local->hw.queues >= 4)
+ ieee80211_add_ht_ie(sdata, skb, assoc_data->ht_information_ie,
+ sband, local->oper_channel, ifmgd->ap_smps);
+
+ /* if present, add any custom non-vendor IEs that go after HT */
+ if (assoc_data->ie_len && assoc_data->ie) {
+ noffset = ieee80211_ie_split_vendor(assoc_data->ie,
+ assoc_data->ie_len,
+ offset);
+ pos = skb_put(skb, noffset - offset);
+ memcpy(pos, assoc_data->ie + offset, noffset - offset);
+ offset = noffset;
+ }
+
+ if (assoc_data->wmm_used && local->hw.queues >= 4) {
+ if (assoc_data->uapsd_used) {
+ qos_info = local->uapsd_queues;
+ qos_info |= (local->uapsd_max_sp_len <<
+ IEEE80211_WMM_IE_STA_QOSINFO_SP_SHIFT);
+ } else {
+ qos_info = 0;
+ }
+
+ pos = skb_put(skb, 9);
+ *pos++ = WLAN_EID_VENDOR_SPECIFIC;
+ *pos++ = 7; /* len */
+ *pos++ = 0x00; /* Microsoft OUI 00:50:F2 */
+ *pos++ = 0x50;
+ *pos++ = 0xf2;
+ *pos++ = 2; /* WME */
+ *pos++ = 0; /* WME info */
+ *pos++ = 1; /* WME ver */
+ *pos++ = qos_info;
+ }
+
+ /* add any remaining custom (i.e. vendor specific here) IEs */
+ if (assoc_data->ie_len && assoc_data->ie) {
+ noffset = assoc_data->ie_len;
+ pos = skb_put(skb, noffset - offset);
+ memcpy(pos, assoc_data->ie + offset, noffset - offset);
+ }
+
+ IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+ ieee80211_tx_skb(sdata, skb);
+}
+
static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
const u8 *bssid, u16 stype, u16 reason,
void *cookie, bool send_frame)
@@ -547,7 +874,7 @@ static void ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
if (pwr_constr_elem_len != 1)
return;
- if ((*pwr_constr_elem <= conf->channel->max_power) &&
+ if ((*pwr_constr_elem <= conf->channel->max_reg_power) &&
(*pwr_constr_elem != sdata->local->power_constr_level)) {
sdata->local->power_constr_level = *pwr_constr_elem;
ieee80211_hw_config(sdata->local, 0);
@@ -1043,7 +1370,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
bss_info_changed |= BSS_CHANGED_BSSID;
/* Tell the driver to monitor connection quality (if supported) */
- if ((local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI) &&
+ if (sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI &&
bss_conf->cqm_rssi_thold)
bss_info_changed |= BSS_CHANGED_CQM;
@@ -1423,6 +1750,135 @@ void ieee80211_connection_loss(struct ieee80211_vif *vif)
EXPORT_SYMBOL(ieee80211_connection_loss);
+static void ieee80211_destroy_auth_data(struct ieee80211_sub_if_data *sdata,
+ bool assoc)
+{
+ struct ieee80211_mgd_auth_data *auth_data = sdata->u.mgd.auth_data;
+
+ lockdep_assert_held(&sdata->u.mgd.mtx);
+
+ if (auth_data->synced)
+ drv_finish_tx_sync(sdata->local, sdata,
+ auth_data->bss->bssid,
+ IEEE80211_TX_SYNC_AUTH);
+
+ if (!assoc) {
+ sta_info_destroy_addr(sdata, auth_data->bss->bssid);
+
+ memset(sdata->u.mgd.bssid, 0, ETH_ALEN);
+ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
+ }
+
+ cfg80211_put_bss(auth_data->bss);
+ kfree(auth_data);
+ sdata->u.mgd.auth_data = NULL;
+}
+
+static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mgmt *mgmt, size_t len)
+{
+ struct ieee80211_mgd_auth_data *auth_data = sdata->u.mgd.auth_data;
+ u8 *pos;
+ struct ieee802_11_elems elems;
+
+ pos = mgmt->u.auth.variable;
+ ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
+ if (!elems.challenge)
+ return;
+ auth_data->expected_transaction = 4;
+ ieee80211_send_auth(sdata, 3, auth_data->algorithm,
+ elems.challenge - 2, elems.challenge_len + 2,
+ auth_data->bss->bssid, auth_data->bss->bssid,
+ auth_data->key, auth_data->key_len,
+ auth_data->key_idx);
+}
+
+static enum rx_mgmt_action __must_check
+ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mgmt *mgmt, size_t len)
+{
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ u8 bssid[ETH_ALEN];
+ u16 auth_alg, auth_transaction, status_code;
+ struct sta_info *sta;
+
+ lockdep_assert_held(&ifmgd->mtx);
+
+ if (len < 24 + 6)
+ return RX_MGMT_NONE;
+
+ if (!ifmgd->auth_data || ifmgd->auth_data->done)
+ return RX_MGMT_NONE;
+
+ memcpy(bssid, ifmgd->auth_data->bss->bssid, ETH_ALEN);
+
+ if (memcmp(bssid, mgmt->bssid, ETH_ALEN))
+ return RX_MGMT_NONE;
+
+ auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
+ auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
+ status_code = le16_to_cpu(mgmt->u.auth.status_code);
+
+ if (auth_alg != ifmgd->auth_data->algorithm ||
+ auth_transaction != ifmgd->auth_data->expected_transaction)
+ return RX_MGMT_NONE;
+
+ if (status_code != WLAN_STATUS_SUCCESS) {
+ printk(KERN_DEBUG "%s: %pM denied authentication (status %d)\n",
+ sdata->name, mgmt->sa, status_code);
+ goto out;
+ }
+
+ switch (ifmgd->auth_data->algorithm) {
+ case WLAN_AUTH_OPEN:
+ case WLAN_AUTH_LEAP:
+ case WLAN_AUTH_FT:
+ break;
+ case WLAN_AUTH_SHARED_KEY:
+ if (ifmgd->auth_data->expected_transaction != 4) {
+ ieee80211_auth_challenge(sdata, mgmt, len);
+ /* need another frame */
+ return RX_MGMT_NONE;
+ }
+ break;
+ default:
+ WARN_ONCE(1, "invalid auth alg %d",
+ ifmgd->auth_data->algorithm);
+ return RX_MGMT_NONE;
+ }
+
+ printk(KERN_DEBUG "%s: authenticated\n", sdata->name);
+ out:
+ if (ifmgd->auth_data->synced)
+ drv_finish_tx_sync(sdata->local, sdata, bssid,
+ IEEE80211_TX_SYNC_AUTH);
+ ifmgd->auth_data->synced = false;
+ ifmgd->auth_data->done = true;
+ ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_WAIT_ASSOC;
+ run_again(ifmgd, ifmgd->auth_data->timeout);
+
+ /* move station state to auth */
+ mutex_lock(&sdata->local->sta_mtx);
+ sta = sta_info_get(sdata, bssid);
+ if (!sta) {
+ WARN_ONCE(1, "%s: STA %pM not found", sdata->name, bssid);
+ goto out_err;
+ }
+ if (sta_info_move_state(sta, IEEE80211_STA_AUTH)) {
+ printk(KERN_DEBUG "%s: failed moving %pM to auth\n",
+ sdata->name, bssid);
+ goto out_err;
+ }
+ mutex_unlock(&sdata->local->sta_mtx);
+
+ return RX_MGMT_CFG80211_RX_AUTH;
+ out_err:
+ mutex_unlock(&sdata->local->sta_mtx);
+ /* ignore frame -- wait for timeout */
+ return RX_MGMT_NONE;
+}
+
+
static enum rx_mgmt_action __must_check
ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt, size_t len)
@@ -1431,10 +1887,14 @@ ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
const u8 *bssid = NULL;
u16 reason_code;
+ lockdep_assert_held(&ifmgd->mtx);
+
if (len < 24 + 2)
return RX_MGMT_NONE;
- ASSERT_MGD_MTX(ifmgd);
+ if (!ifmgd->associated ||
+ memcmp(mgmt->bssid, ifmgd->associated->bssid, ETH_ALEN))
+ return RX_MGMT_NONE;
bssid = ifmgd->associated->bssid;
@@ -1459,15 +1919,13 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
u16 reason_code;
- if (len < 24 + 2)
- return RX_MGMT_NONE;
-
- ASSERT_MGD_MTX(ifmgd);
+ lockdep_assert_held(&ifmgd->mtx);
- if (WARN_ON(!ifmgd->associated))
+ if (len < 24 + 2)
return RX_MGMT_NONE;
- if (WARN_ON(memcmp(ifmgd->associated->bssid, mgmt->sa, ETH_ALEN)))
+ if (!ifmgd->associated ||
+ memcmp(mgmt->bssid, ifmgd->associated->bssid, ETH_ALEN))
return RX_MGMT_NONE;
reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
@@ -1524,15 +1982,37 @@ static void ieee80211_get_rates(struct ieee80211_supported_band *sband,
}
}
-static bool ieee80211_assoc_success(struct ieee80211_work *wk,
+static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata,
+ bool assoc)
+{
+ struct ieee80211_mgd_assoc_data *assoc_data = sdata->u.mgd.assoc_data;
+
+ lockdep_assert_held(&sdata->u.mgd.mtx);
+
+ if (assoc_data->synced)
+ drv_finish_tx_sync(sdata->local, sdata,
+ assoc_data->bss->bssid,
+ IEEE80211_TX_SYNC_ASSOC);
+
+ if (!assoc) {
+ sta_info_destroy_addr(sdata, assoc_data->bss->bssid);
+
+ memset(sdata->u.mgd.bssid, 0, ETH_ALEN);
+ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
+ }
+
+ kfree(assoc_data);
+ sdata->u.mgd.assoc_data = NULL;
+}
+
+static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_bss *cbss,
struct ieee80211_mgmt *mgmt, size_t len)
{
- struct ieee80211_sub_if_data *sdata = wk->sdata;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband;
struct sta_info *sta;
- struct cfg80211_bss *cbss = wk->assoc.bss;
u8 *pos;
u32 rates, basic_rates;
u16 capab_info, aid;
@@ -1581,20 +2061,15 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
* station info was already allocated and inserted before
* the association and should be available to us
*/
- sta = sta_info_get_rx(sdata, cbss->bssid);
+ sta = sta_info_get(sdata, cbss->bssid);
if (WARN_ON(!sta)) {
mutex_unlock(&sdata->local->sta_mtx);
return false;
}
- sta_info_move_state(sta, IEEE80211_STA_AUTH);
- sta_info_move_state(sta, IEEE80211_STA_ASSOC);
- if (!(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
- sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED);
-
rates = 0;
basic_rates = 0;
- sband = local->hw.wiphy->bands[wk->chan->band];
+ sband = local->hw.wiphy->bands[local->oper_channel->band];
ieee80211_get_rates(sband, elems.supp_rates, elems.supp_rates_len,
&rates, &basic_rates, &have_higher_than_11mbit,
@@ -1615,11 +2090,11 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
basic_rates = BIT(min_rate_index);
}
- sta->sta.supp_rates[wk->chan->band] = rates;
+ sta->sta.supp_rates[local->oper_channel->band] = rates;
sdata->vif.bss_conf.basic_rates = basic_rates;
/* cf. IEEE 802.11 9.2.12 */
- if (wk->chan->band == IEEE80211_BAND_2GHZ &&
+ if (local->oper_channel->band == IEEE80211_BAND_2GHZ &&
have_higher_than_11mbit)
sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE;
else
@@ -1639,15 +2114,22 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
if (elems.wmm_param)
set_sta_flag(sta, WLAN_STA_WME);
- /* sta_info_reinsert will also unlock the mutex lock */
- err = sta_info_reinsert(sta);
- sta = NULL;
+ err = sta_info_move_state(sta, IEEE80211_STA_AUTH);
+ if (!err)
+ err = sta_info_move_state(sta, IEEE80211_STA_ASSOC);
+ if (!err && !(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
+ err = sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED);
if (err) {
- printk(KERN_DEBUG "%s: failed to insert STA entry for"
- " the AP (error %d)\n", sdata->name, err);
+ printk(KERN_DEBUG
+ "%s: failed to move station %pM to desired state\n",
+ sdata->name, sta->sta.addr);
+ WARN_ON(__sta_info_destroy(sta));
+ mutex_unlock(&sdata->local->sta_mtx);
return false;
}
+ mutex_unlock(&sdata->local->sta_mtx);
+
/*
* Always handle WMM once after association regardless
* of the first value the AP uses. Setting -1 here has
@@ -1662,8 +2144,6 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
else
ieee80211_set_wmm_default(sdata);
- local->oper_channel = wk->chan;
-
if (elems.ht_info_elem && elems.wmm_param &&
(sdata->local->hw.queues >= 4) &&
!(ifmgd->flags & IEEE80211_STA_DISABLE_11N))
@@ -1694,7 +2174,82 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
return true;
}
+static enum rx_mgmt_action __must_check
+ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mgmt *mgmt, size_t len,
+ struct cfg80211_bss **bss)
+{
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data;
+ u16 capab_info, status_code, aid;
+ struct ieee802_11_elems elems;
+ u8 *pos;
+ bool reassoc;
+
+ lockdep_assert_held(&ifmgd->mtx);
+
+ if (!assoc_data)
+ return RX_MGMT_NONE;
+ if (memcmp(assoc_data->bss->bssid, mgmt->bssid, ETH_ALEN))
+ return RX_MGMT_NONE;
+
+ /*
+ * AssocResp and ReassocResp have identical structure, so process both
+ * of them in this function.
+ */
+
+ if (len < 24 + 6)
+ return RX_MGMT_NONE;
+
+ reassoc = ieee80211_is_reassoc_req(mgmt->frame_control);
+ capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
+ status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code);
+ aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
+
+ printk(KERN_DEBUG "%s: RX %sssocResp from %pM (capab=0x%x "
+ "status=%d aid=%d)\n",
+ sdata->name, reassoc ? "Rea" : "A", mgmt->sa,
+ capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14))));
+
+ pos = mgmt->u.assoc_resp.variable;
+ ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
+
+ if (status_code == WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY &&
+ elems.timeout_int && elems.timeout_int_len == 5 &&
+ elems.timeout_int[0] == WLAN_TIMEOUT_ASSOC_COMEBACK) {
+ u32 tu, ms;
+ tu = get_unaligned_le32(elems.timeout_int + 1);
+ ms = tu * 1024 / 1000;
+ printk(KERN_DEBUG "%s: %pM rejected association temporarily; "
+ "comeback duration %u TU (%u ms)\n",
+ sdata->name, mgmt->sa, tu, ms);
+ assoc_data->timeout = jiffies + msecs_to_jiffies(ms);
+ if (ms > IEEE80211_ASSOC_TIMEOUT)
+ run_again(ifmgd, assoc_data->timeout);
+ return RX_MGMT_NONE;
+ }
+
+ *bss = assoc_data->bss;
+
+ if (status_code != WLAN_STATUS_SUCCESS) {
+ printk(KERN_DEBUG "%s: %pM denied association (code=%d)\n",
+ sdata->name, mgmt->sa, status_code);
+ ieee80211_destroy_assoc_data(sdata, false);
+ } else {
+ printk(KERN_DEBUG "%s: associated\n", sdata->name);
+
+ ieee80211_destroy_assoc_data(sdata, true);
+ if (!ieee80211_assoc_success(sdata, *bss, mgmt, len)) {
+ /* oops -- internal error -- send timeout for now */
+ sta_info_destroy_addr(sdata, mgmt->bssid);
+ cfg80211_put_bss(*bss);
+ return RX_MGMT_CFG80211_ASSOC_TIMEOUT;
+ }
+ }
+
+ return RX_MGMT_CFG80211_RX_ASSOC;
+}
static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt,
size_t len,
@@ -1708,7 +2263,9 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
struct ieee80211_channel *channel;
bool need_ps = false;
- if (sdata->u.mgd.associated) {
+ if (sdata->u.mgd.associated &&
+ memcmp(mgmt->bssid, sdata->u.mgd.associated->bssid,
+ ETH_ALEN) == 0) {
bss = (void *)sdata->u.mgd.associated->priv;
/* not previously set so we may need to recalc */
need_ps = !bss->dtim_period;
@@ -1778,6 +2335,15 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
if (ifmgd->associated &&
memcmp(mgmt->bssid, ifmgd->associated->bssid, ETH_ALEN) == 0)
ieee80211_reset_ap_probe(sdata);
+
+ if (ifmgd->auth_data && !ifmgd->auth_data->bss->proberesp_ies &&
+ memcmp(mgmt->bssid, ifmgd->auth_data->bss->bssid, ETH_ALEN) == 0) {
+ /* got probe response, continue with auth */
+ printk(KERN_DEBUG "%s: direct probe responded\n", sdata->name);
+ ifmgd->auth_data->tries = 0;
+ ifmgd->auth_data->timeout = jiffies;
+ run_again(ifmgd, ifmgd->auth_data->timeout);
+ }
}
/*
@@ -1817,7 +2383,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
u32 ncrc;
u8 *bssid;
- ASSERT_MGD_MTX(ifmgd);
+ lockdep_assert_held(&ifmgd->mtx);
/* Process beacon from the current BSS */
baselen = (u8 *) mgmt->u.beacon.variable - (u8 *) mgmt;
@@ -1827,21 +2393,25 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
if (rx_status->freq != local->hw.conf.channel->center_freq)
return;
- /*
- * We might have received a number of frames, among them a
- * disassoc frame and a beacon...
- */
- if (!ifmgd->associated)
- return;
+ if (ifmgd->assoc_data && !ifmgd->assoc_data->have_beacon &&
+ memcmp(mgmt->bssid, ifmgd->assoc_data->bss->bssid, ETH_ALEN) == 0) {
+ ieee802_11_parse_elems(mgmt->u.beacon.variable,
+ len - baselen, &elems);
- bssid = ifmgd->associated->bssid;
+ ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems,
+ false);
+ ifmgd->assoc_data->have_beacon = true;
+ ifmgd->assoc_data->sent_assoc = false;
+ /* continue assoc process */
+ ifmgd->assoc_data->timeout = jiffies;
+ run_again(ifmgd, ifmgd->assoc_data->timeout);
+ return;
+ }
- /*
- * And in theory even frames from a different AP we were just
- * associated to a split-second ago!
- */
- if (memcmp(bssid, mgmt->bssid, ETH_ALEN) != 0)
+ if (!ifmgd->associated ||
+ memcmp(mgmt->bssid, ifmgd->associated->bssid, ETH_ALEN))
return;
+ bssid = ifmgd->associated->bssid;
/* Track average RSSI from the Beacon frames of the current AP */
ifmgd->last_beacon_signal = rx_status->signal;
@@ -1882,7 +2452,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
if (bss_conf->cqm_rssi_thold &&
ifmgd->count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT &&
- !(local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI)) {
+ !(sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI)) {
int sig = ifmgd->ave_beacon_signal / 16;
int last_event = ifmgd->last_cqm_event_signal;
int thold = bss_conf->cqm_rssi_thold;
@@ -2025,6 +2595,7 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_rx_status *rx_status;
struct ieee80211_mgmt *mgmt;
+ struct cfg80211_bss *bss = NULL;
enum rx_mgmt_action rma = RX_MGMT_NONE;
u16 fc;
@@ -2034,92 +2605,59 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
mutex_lock(&ifmgd->mtx);
- if (ifmgd->associated &&
- memcmp(ifmgd->associated->bssid, mgmt->bssid, ETH_ALEN) == 0) {
- switch (fc & IEEE80211_FCTL_STYPE) {
- case IEEE80211_STYPE_BEACON:
- ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len,
- rx_status);
- break;
- case IEEE80211_STYPE_PROBE_RESP:
- ieee80211_rx_mgmt_probe_resp(sdata, skb);
- break;
- case IEEE80211_STYPE_DEAUTH:
- rma = ieee80211_rx_mgmt_deauth(sdata, mgmt, skb->len);
- break;
- case IEEE80211_STYPE_DISASSOC:
- rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len);
- break;
- case IEEE80211_STYPE_ACTION:
- switch (mgmt->u.action.category) {
- case WLAN_CATEGORY_SPECTRUM_MGMT:
- ieee80211_sta_process_chanswitch(sdata,
- &mgmt->u.action.u.chan_switch.sw_elem,
- (void *)ifmgd->associated->priv,
- rx_status->mactime);
- break;
- }
- }
- mutex_unlock(&ifmgd->mtx);
-
- switch (rma) {
- case RX_MGMT_NONE:
- /* no action */
- break;
- case RX_MGMT_CFG80211_DEAUTH:
- cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len);
- break;
- case RX_MGMT_CFG80211_DISASSOC:
- cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len);
+ switch (fc & IEEE80211_FCTL_STYPE) {
+ case IEEE80211_STYPE_BEACON:
+ ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len, rx_status);
+ break;
+ case IEEE80211_STYPE_PROBE_RESP:
+ ieee80211_rx_mgmt_probe_resp(sdata, skb);
+ break;
+ case IEEE80211_STYPE_AUTH:
+ rma = ieee80211_rx_mgmt_auth(sdata, mgmt, skb->len);
+ break;
+ case IEEE80211_STYPE_DEAUTH:
+ rma = ieee80211_rx_mgmt_deauth(sdata, mgmt, skb->len);
+ break;
+ case IEEE80211_STYPE_DISASSOC:
+ rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len);
+ break;
+ case IEEE80211_STYPE_ASSOC_RESP:
+ case IEEE80211_STYPE_REASSOC_RESP:
+ rma = ieee80211_rx_mgmt_assoc_resp(sdata, mgmt, skb->len, &bss);
+ break;
+ case IEEE80211_STYPE_ACTION:
+ switch (mgmt->u.action.category) {
+ case WLAN_CATEGORY_SPECTRUM_MGMT:
+ ieee80211_sta_process_chanswitch(sdata,
+ &mgmt->u.action.u.chan_switch.sw_elem,
+ (void *)ifmgd->associated->priv,
+ rx_status->mactime);
break;
- default:
- WARN(1, "unexpected: %d", rma);
}
- return;
}
-
mutex_unlock(&ifmgd->mtx);
- if (skb->len >= 24 + 2 /* mgmt + deauth reason */ &&
- (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_DEAUTH) {
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_work *wk;
-
- mutex_lock(&local->mtx);
- list_for_each_entry(wk, &local->work_list, list) {
- if (wk->sdata != sdata)
- continue;
-
- if (wk->type != IEEE80211_WORK_ASSOC &&
- wk->type != IEEE80211_WORK_ASSOC_BEACON_WAIT)
- continue;
-
- if (memcmp(mgmt->bssid, wk->filter_ta, ETH_ALEN))
- continue;
- if (memcmp(mgmt->sa, wk->filter_ta, ETH_ALEN))
- continue;
-
- /*
- * Printing the message only here means we can't
- * spuriously print it, but it also means that it
- * won't be printed when the frame comes in before
- * we even tried to associate or in similar cases.
- *
- * Ultimately, I suspect cfg80211 should print the
- * messages instead.
- */
- printk(KERN_DEBUG
- "%s: deauthenticated from %pM (Reason: %u)\n",
- sdata->name, mgmt->bssid,
- le16_to_cpu(mgmt->u.deauth.reason_code));
-
- list_del_rcu(&wk->list);
- free_work(wk);
- break;
- }
- mutex_unlock(&local->mtx);
-
+ switch (rma) {
+ case RX_MGMT_NONE:
+ /* no action */
+ break;
+ case RX_MGMT_CFG80211_DEAUTH:
cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len);
+ break;
+ case RX_MGMT_CFG80211_DISASSOC:
+ cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len);
+ break;
+ case RX_MGMT_CFG80211_RX_AUTH:
+ cfg80211_send_rx_auth(sdata->dev, (u8 *)mgmt, skb->len);
+ break;
+ case RX_MGMT_CFG80211_RX_ASSOC:
+ cfg80211_send_rx_assoc(sdata->dev, bss, (u8 *)mgmt, skb->len);
+ break;
+ case RX_MGMT_CFG80211_ASSOC_TIMEOUT:
+ cfg80211_send_assoc_timeout(sdata->dev, mgmt->bssid);
+ break;
+ default:
+ WARN(1, "unexpected: %d", rma);
}
}
@@ -2164,14 +2702,160 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
mutex_lock(&ifmgd->mtx);
}
+static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ struct ieee80211_mgd_auth_data *auth_data = ifmgd->auth_data;
+
+ lockdep_assert_held(&ifmgd->mtx);
+
+ if (WARN_ON_ONCE(!auth_data))
+ return -EINVAL;
+
+ if (!auth_data->synced) {
+ int ret = drv_tx_sync(local, sdata, auth_data->bss->bssid,
+ IEEE80211_TX_SYNC_AUTH);
+ if (ret)
+ return ret;
+ }
+ auth_data->synced = true;
+
+ auth_data->tries++;
+
+ if (auth_data->tries > IEEE80211_AUTH_MAX_TRIES) {
+ printk(KERN_DEBUG "%s: authentication with %pM timed out\n",
+ sdata->name, auth_data->bss->bssid);
+
+ /*
+ * Most likely AP is not in the range so remove the
+ * bss struct for that AP.
+ */
+ cfg80211_unlink_bss(local->hw.wiphy, auth_data->bss);
+
+ return -ETIMEDOUT;
+ }
+
+ if (auth_data->bss->proberesp_ies) {
+ printk(KERN_DEBUG "%s: send auth to %pM (try %d/%d)\n",
+ sdata->name, auth_data->bss->bssid, auth_data->tries,
+ IEEE80211_AUTH_MAX_TRIES);
+
+ auth_data->expected_transaction = 2;
+ ieee80211_send_auth(sdata, 1, auth_data->algorithm,
+ auth_data->ie, auth_data->ie_len,
+ auth_data->bss->bssid,
+ auth_data->bss->bssid, NULL, 0, 0);
+ } else {
+ const u8 *ssidie;
+
+ printk(KERN_DEBUG "%s: direct probe to %pM (try %d/%i)\n",
+ sdata->name, auth_data->bss->bssid, auth_data->tries,
+ IEEE80211_AUTH_MAX_TRIES);
+
+ ssidie = ieee80211_bss_get_ie(auth_data->bss, WLAN_EID_SSID);
+ if (!ssidie)
+ return -EINVAL;
+ /*
+ * Direct probe is sent to broadcast address as some APs
+ * will not answer to direct packet in unassociated state.
+ */
+ ieee80211_send_probe_req(sdata, NULL, ssidie + 2, ssidie[1],
+ NULL, 0, (u32) -1, true, false);
+ }
+
+ auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
+ run_again(ifmgd, auth_data->timeout);
+
+ return 0;
+}
+
+static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_mgd_assoc_data *assoc_data = sdata->u.mgd.assoc_data;
+ struct ieee80211_local *local = sdata->local;
+
+ lockdep_assert_held(&sdata->u.mgd.mtx);
+
+ if (!assoc_data->synced) {
+ int ret = drv_tx_sync(local, sdata, assoc_data->bss->bssid,
+ IEEE80211_TX_SYNC_ASSOC);
+ if (ret)
+ return ret;
+ }
+ assoc_data->synced = true;
+
+ assoc_data->tries++;
+ if (assoc_data->tries > IEEE80211_ASSOC_MAX_TRIES) {
+ printk(KERN_DEBUG "%s: association with %pM timed out\n",
+ sdata->name, assoc_data->bss->bssid);
+
+ /*
+ * Most likely AP is not in the range so remove the
+ * bss struct for that AP.
+ */
+ cfg80211_unlink_bss(local->hw.wiphy, assoc_data->bss);
+
+ return -ETIMEDOUT;
+ }
+
+ printk(KERN_DEBUG "%s: associate with %pM (try %d/%d)\n",
+ sdata->name, assoc_data->bss->bssid, assoc_data->tries,
+ IEEE80211_ASSOC_MAX_TRIES);
+ ieee80211_send_assoc(sdata);
+
+ assoc_data->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT;
+ run_again(&sdata->u.mgd, assoc_data->timeout);
+
+ return 0;
+}
+
void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- /* then process the rest of the work */
mutex_lock(&ifmgd->mtx);
+ if (ifmgd->auth_data &&
+ time_after(jiffies, ifmgd->auth_data->timeout)) {
+ if (ifmgd->auth_data->done) {
+ /*
+ * ok ... we waited for assoc but userspace didn't,
+ * so let's just kill the auth data
+ */
+ ieee80211_destroy_auth_data(sdata, false);
+ } else if (ieee80211_probe_auth(sdata)) {
+ u8 bssid[ETH_ALEN];
+
+ memcpy(bssid, ifmgd->auth_data->bss->bssid, ETH_ALEN);
+
+ ieee80211_destroy_auth_data(sdata, false);
+
+ mutex_unlock(&ifmgd->mtx);
+ cfg80211_send_auth_timeout(sdata->dev, bssid);
+ mutex_lock(&ifmgd->mtx);
+ }
+ } else if (ifmgd->auth_data)
+ run_again(ifmgd, ifmgd->auth_data->timeout);
+
+ if (ifmgd->assoc_data &&
+ time_after(jiffies, ifmgd->assoc_data->timeout)) {
+ if (!ifmgd->assoc_data->have_beacon ||
+ ieee80211_do_assoc(sdata)) {
+ u8 bssid[ETH_ALEN];
+
+ memcpy(bssid, ifmgd->assoc_data->bss->bssid, ETH_ALEN);
+
+ ieee80211_destroy_assoc_data(sdata, false);
+
+ mutex_unlock(&ifmgd->mtx);
+ cfg80211_send_assoc_timeout(sdata->dev, bssid);
+ mutex_lock(&ifmgd->mtx);
+ }
+ } else if (ifmgd->assoc_data)
+ run_again(ifmgd, ifmgd->assoc_data->timeout);
+
if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL |
IEEE80211_STA_CONNECTION_POLL) &&
ifmgd->associated) {
@@ -2247,6 +2931,10 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
}
mutex_unlock(&ifmgd->mtx);
+
+ mutex_lock(&local->mtx);
+ ieee80211_recalc_idle(local);
+ mutex_unlock(&local->mtx);
}
static void ieee80211_sta_bcn_mon_timer(unsigned long data)
@@ -2419,53 +3107,24 @@ int ieee80211_max_network_latency(struct notifier_block *nb,
}
/* config hooks */
-static enum work_done_result
-ieee80211_probe_auth_done(struct ieee80211_work *wk,
- struct sk_buff *skb)
-{
- struct ieee80211_local *local = wk->sdata->local;
-
- if (!skb) {
- cfg80211_send_auth_timeout(wk->sdata->dev, wk->filter_ta);
- goto destroy;
- }
-
- if (wk->type == IEEE80211_WORK_AUTH) {
- cfg80211_send_rx_auth(wk->sdata->dev, skb->data, skb->len);
- goto destroy;
- }
-
- mutex_lock(&wk->sdata->u.mgd.mtx);
- ieee80211_rx_mgmt_probe_resp(wk->sdata, skb);
- mutex_unlock(&wk->sdata->u.mgd.mtx);
-
- wk->type = IEEE80211_WORK_AUTH;
- wk->probe_auth.tries = 0;
- return WORK_DONE_REQUEUE;
- destroy:
- if (wk->probe_auth.synced)
- drv_finish_tx_sync(local, wk->sdata, wk->filter_ta,
- IEEE80211_TX_SYNC_AUTH);
-
- return WORK_DONE_DESTROY;
-}
-
int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
struct cfg80211_auth_request *req)
{
- const u8 *ssid;
- struct ieee80211_work *wk;
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ struct ieee80211_mgd_auth_data *auth_data;
+ struct sta_info *sta;
u16 auth_alg;
+ int err;
- if (req->local_state_change)
- return 0; /* no need to update mac80211 state */
+ /* prepare auth data structure */
switch (req->auth_type) {
case NL80211_AUTHTYPE_OPEN_SYSTEM:
auth_alg = WLAN_AUTH_OPEN;
break;
case NL80211_AUTHTYPE_SHARED_KEY:
- if (IS_ERR(sdata->local->wep_tx_tfm))
+ if (IS_ERR(local->wep_tx_tfm))
return -EOPNOTSUPP;
auth_alg = WLAN_AUTH_SHARED_KEY;
break;
@@ -2479,171 +3138,142 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
return -EOPNOTSUPP;
}
- wk = kzalloc(sizeof(*wk) + req->ie_len, GFP_KERNEL);
- if (!wk)
+ auth_data = kzalloc(sizeof(*auth_data) + req->ie_len, GFP_KERNEL);
+ if (!auth_data)
return -ENOMEM;
- memcpy(wk->filter_ta, req->bss->bssid, ETH_ALEN);
+ auth_data->bss = req->bss;
if (req->ie && req->ie_len) {
- memcpy(wk->ie, req->ie, req->ie_len);
- wk->ie_len = req->ie_len;
+ memcpy(auth_data->ie, req->ie, req->ie_len);
+ auth_data->ie_len = req->ie_len;
}
if (req->key && req->key_len) {
- wk->probe_auth.key_len = req->key_len;
- wk->probe_auth.key_idx = req->key_idx;
- memcpy(wk->probe_auth.key, req->key, req->key_len);
+ auth_data->key_len = req->key_len;
+ auth_data->key_idx = req->key_idx;
+ memcpy(auth_data->key, req->key, req->key_len);
}
- ssid = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID);
- memcpy(wk->probe_auth.ssid, ssid + 2, ssid[1]);
- wk->probe_auth.ssid_len = ssid[1];
+ auth_data->algorithm = auth_alg;
- wk->probe_auth.algorithm = auth_alg;
- wk->probe_auth.privacy = req->bss->capability & WLAN_CAPABILITY_PRIVACY;
-
- /* if we already have a probe, don't probe again */
- if (req->bss->proberesp_ies)
- wk->type = IEEE80211_WORK_AUTH;
- else
- wk->type = IEEE80211_WORK_DIRECT_PROBE;
- wk->chan = req->bss->channel;
- wk->chan_type = NL80211_CHAN_NO_HT;
- wk->sdata = sdata;
- wk->done = ieee80211_probe_auth_done;
+ /* try to authenticate/probe */
- ieee80211_add_work(wk);
- return 0;
-}
-
-/* create and insert a dummy station entry */
-static int ieee80211_pre_assoc(struct ieee80211_sub_if_data *sdata,
- u8 *bssid) {
- struct sta_info *sta;
- int err;
-
- sta = sta_info_alloc(sdata, bssid, GFP_KERNEL);
- if (!sta)
- return -ENOMEM;
-
- sta->dummy = true;
+ mutex_lock(&ifmgd->mtx);
- err = sta_info_insert(sta);
- sta = NULL;
- if (err) {
- printk(KERN_DEBUG "%s: failed to insert Dummy STA entry for"
- " the AP (error %d)\n", sdata->name, err);
- return err;
+ if ((ifmgd->auth_data && !ifmgd->auth_data->done) ||
+ ifmgd->assoc_data) {
+ err = -EBUSY;
+ goto err_free;
}
- return 0;
-}
+ if (ifmgd->auth_data)
+ ieee80211_destroy_auth_data(sdata, false);
-static enum work_done_result ieee80211_assoc_done(struct ieee80211_work *wk,
- struct sk_buff *skb)
-{
- struct ieee80211_local *local = wk->sdata->local;
- struct ieee80211_mgmt *mgmt;
- struct ieee80211_rx_status *rx_status;
- struct ieee802_11_elems elems;
- struct cfg80211_bss *cbss = wk->assoc.bss;
- u16 status;
+ /* prep auth_data so we don't go into idle on disassoc */
+ ifmgd->auth_data = auth_data;
- if (!skb) {
- sta_info_destroy_addr(wk->sdata, cbss->bssid);
- cfg80211_send_assoc_timeout(wk->sdata->dev, wk->filter_ta);
- goto destroy;
- }
+ if (ifmgd->associated)
+ ieee80211_set_disassoc(sdata, true, false);
- if (wk->type == IEEE80211_WORK_ASSOC_BEACON_WAIT) {
- mutex_lock(&wk->sdata->u.mgd.mtx);
- rx_status = (void *) skb->cb;
- ieee802_11_parse_elems(skb->data + 24 + 12, skb->len - 24 - 12, &elems);
- ieee80211_rx_bss_info(wk->sdata, (void *)skb->data, skb->len, rx_status,
- &elems, true);
- mutex_unlock(&wk->sdata->u.mgd.mtx);
+ printk(KERN_DEBUG "%s: authenticate with %pM\n",
+ sdata->name, req->bss->bssid);
- wk->type = IEEE80211_WORK_ASSOC;
- /* not really done yet */
- return WORK_DONE_REQUEUE;
- }
+ mutex_lock(&local->mtx);
+ ieee80211_recalc_idle(sdata->local);
+ mutex_unlock(&local->mtx);
- mgmt = (void *)skb->data;
- status = le16_to_cpu(mgmt->u.assoc_resp.status_code);
+ /* switch to the right channel */
+ local->oper_channel = req->bss->channel;
+ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
- if (status == WLAN_STATUS_SUCCESS) {
- if (wk->assoc.synced)
- drv_finish_tx_sync(local, wk->sdata, wk->filter_ta,
- IEEE80211_TX_SYNC_ASSOC);
+ /* set BSSID */
+ memcpy(ifmgd->bssid, req->bss->bssid, ETH_ALEN);
+ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
- mutex_lock(&wk->sdata->u.mgd.mtx);
- if (!ieee80211_assoc_success(wk, mgmt, skb->len)) {
- mutex_unlock(&wk->sdata->u.mgd.mtx);
- /* oops -- internal error -- send timeout for now */
- sta_info_destroy_addr(wk->sdata, cbss->bssid);
- cfg80211_send_assoc_timeout(wk->sdata->dev,
- wk->filter_ta);
- return WORK_DONE_DESTROY;
- }
+ /* add station entry */
+ sta = sta_info_alloc(sdata, req->bss->bssid, GFP_KERNEL);
+ if (!sta) {
+ err = -ENOMEM;
+ goto err_clear;
+ }
- mutex_unlock(&wk->sdata->u.mgd.mtx);
- } else {
- /* assoc failed - destroy the dummy station entry */
- sta_info_destroy_addr(wk->sdata, cbss->bssid);
+ err = sta_info_insert(sta);
+ if (err) {
+ printk(KERN_DEBUG
+ "%s: failed to insert STA entry for the AP %pM (error %d)\n",
+ sdata->name, req->bss->bssid, err);
+ goto err_clear;
}
- cfg80211_send_rx_assoc(wk->sdata->dev, skb->data, skb->len);
- destroy:
- if (wk->assoc.synced)
- drv_finish_tx_sync(local, wk->sdata, wk->filter_ta,
- IEEE80211_TX_SYNC_ASSOC);
+ err = ieee80211_probe_auth(sdata);
+ if (err) {
+ if (auth_data->synced)
+ drv_finish_tx_sync(local, sdata, req->bss->bssid,
+ IEEE80211_TX_SYNC_AUTH);
+ sta_info_destroy_addr(sdata, req->bss->bssid);
+ goto err_clear;
+ }
+
+ /* hold our own reference */
+ cfg80211_ref_bss(auth_data->bss);
+ err = 0;
+ goto out_unlock;
+
+ err_clear:
+ ifmgd->auth_data = NULL;
+ err_free:
+ kfree(auth_data);
+ out_unlock:
+ mutex_unlock(&ifmgd->mtx);
- return WORK_DONE_DESTROY;
+ return err;
}
int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
struct cfg80211_assoc_request *req)
{
+ struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_bss *bss = (void *)req->bss->priv;
- struct ieee80211_work *wk;
- const u8 *ssid;
+ struct ieee80211_mgd_assoc_data *assoc_data;
+ struct sta_info *sta;
+ const u8 *ssidie;
int i, err;
+ ssidie = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID);
+ if (!ssidie)
+ return -EINVAL;
+
+ assoc_data = kzalloc(sizeof(*assoc_data) + req->ie_len, GFP_KERNEL);
+ if (!assoc_data)
+ return -ENOMEM;
+
mutex_lock(&ifmgd->mtx);
- if (ifmgd->associated) {
- if (!req->prev_bssid ||
- memcmp(req->prev_bssid, ifmgd->associated->bssid,
- ETH_ALEN)) {
- /*
- * We are already associated and the request was not a
- * reassociation request from the current BSS, so
- * reject it.
- */
- mutex_unlock(&ifmgd->mtx);
- return -EALREADY;
- }
- /* Trying to reassociate - clear previous association state */
+ if (ifmgd->associated)
ieee80211_set_disassoc(sdata, true, false);
+
+ if (ifmgd->auth_data && !ifmgd->auth_data->done) {
+ err = -EBUSY;
+ goto err_free;
}
- mutex_unlock(&ifmgd->mtx);
- wk = kzalloc(sizeof(*wk) + req->ie_len, GFP_KERNEL);
- if (!wk)
- return -ENOMEM;
+ if (ifmgd->assoc_data) {
+ err = -EBUSY;
+ goto err_free;
+ }
- /*
- * create a dummy station info entry in order
- * to start accepting incoming EAPOL packets from the station
- */
- err = ieee80211_pre_assoc(sdata, req->bss->bssid);
- if (err) {
- kfree(wk);
- return err;
+ if (ifmgd->auth_data) {
+ bool match;
+
+ /* keep sta info, bssid if matching */
+ match = memcmp(ifmgd->bssid, req->bss->bssid, ETH_ALEN) == 0;
+ ieee80211_destroy_auth_data(sdata, match);
}
+ /* prepare assoc data */
+
ifmgd->flags &= ~IEEE80211_STA_DISABLE_11N;
ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
@@ -2655,7 +3285,6 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104)
ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
-
if (req->flags & ASSOC_REQ_DISABLE_HT)
ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
@@ -2664,16 +3293,12 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
sizeof(ifmgd->ht_capa_mask));
if (req->ie && req->ie_len) {
- memcpy(wk->ie, req->ie, req->ie_len);
- wk->ie_len = req->ie_len;
- } else
- wk->ie_len = 0;
-
- wk->assoc.bss = req->bss;
+ memcpy(assoc_data->ie, req->ie, req->ie_len);
+ assoc_data->ie_len = req->ie_len;
+ }
- memcpy(wk->filter_ta, req->bss->bssid, ETH_ALEN);
+ assoc_data->bss = req->bss;
- /* new association always uses requested smps mode */
if (ifmgd->req_smps == IEEE80211_SMPS_AUTOMATIC) {
if (ifmgd->powersave)
ifmgd->ap_smps = IEEE80211_SMPS_DYNAMIC;
@@ -2682,7 +3307,6 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
} else
ifmgd->ap_smps = ifmgd->req_smps;
- wk->assoc.smps = ifmgd->ap_smps;
/*
* IEEE802.11n does not allow TKIP/WEP as pairwise ciphers in HT mode.
* We still associate in non-HT mode (11a/b/g) if any one of these
@@ -2690,39 +3314,27 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
* We can set this to true for non-11n hardware, that'll be checked
* separately along with the peer capabilities.
*/
- wk->assoc.use_11n = !(ifmgd->flags & IEEE80211_STA_DISABLE_11N);
- wk->assoc.capability = req->bss->capability;
- wk->assoc.wmm_used = bss->wmm_used;
- wk->assoc.supp_rates = bss->supp_rates;
- wk->assoc.supp_rates_len = bss->supp_rates_len;
- wk->assoc.ht_information_ie =
+ assoc_data->capability = req->bss->capability;
+ assoc_data->wmm_used = bss->wmm_used;
+ assoc_data->supp_rates = bss->supp_rates;
+ assoc_data->supp_rates_len = bss->supp_rates_len;
+ assoc_data->ht_information_ie =
ieee80211_bss_get_ie(req->bss, WLAN_EID_HT_INFORMATION);
if (bss->wmm_used && bss->uapsd_supported &&
(sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)) {
- wk->assoc.uapsd_used = true;
+ assoc_data->uapsd_used = true;
ifmgd->flags |= IEEE80211_STA_UAPSD_ENABLED;
} else {
- wk->assoc.uapsd_used = false;
+ assoc_data->uapsd_used = false;
ifmgd->flags &= ~IEEE80211_STA_UAPSD_ENABLED;
}
- ssid = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID);
- memcpy(wk->assoc.ssid, ssid + 2, ssid[1]);
- wk->assoc.ssid_len = ssid[1];
+ memcpy(assoc_data->ssid, ssidie + 2, ssidie[1]);
+ assoc_data->ssid_len = ssidie[1];
if (req->prev_bssid)
- memcpy(wk->assoc.prev_bssid, req->prev_bssid, ETH_ALEN);
-
- wk->chan = req->bss->channel;
- wk->chan_type = NL80211_CHAN_NO_HT;
- wk->sdata = sdata;
- wk->done = ieee80211_assoc_done;
- if (!bss->dtim_period &&
- sdata->local->hw.flags & IEEE80211_HW_NEED_DTIM_PERIOD)
- wk->type = IEEE80211_WORK_ASSOC_BEACON_WAIT;
- else
- wk->type = IEEE80211_WORK_ASSOC;
+ memcpy(assoc_data->prev_bssid, req->prev_bssid, ETH_ALEN);
if (req->use_mfp) {
ifmgd->mfp = IEEE80211_MFP_REQUIRED;
@@ -2740,89 +3352,100 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
sdata->control_port_protocol = req->crypto.control_port_ethertype;
sdata->control_port_no_encrypt = req->crypto.control_port_no_encrypt;
- ieee80211_add_work(wk);
- return 0;
+ /* kick off associate process */
+
+ ifmgd->assoc_data = assoc_data;
+
+ mutex_lock(&local->mtx);
+ ieee80211_recalc_idle(sdata->local);
+ mutex_unlock(&local->mtx);
+
+ /* switch to the right channel */
+ local->oper_channel = req->bss->channel;
+ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
+
+ rcu_read_lock();
+ sta = sta_info_get(sdata, req->bss->bssid);
+ rcu_read_unlock();
+
+ if (!sta) {
+ /* set BSSID */
+ memcpy(ifmgd->bssid, req->bss->bssid, ETH_ALEN);
+ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
+
+ sta = sta_info_alloc(sdata, req->bss->bssid, GFP_KERNEL);
+ if (!sta) {
+ err = -ENOMEM;
+ goto err_clear;
+ }
+
+ sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
+
+ err = sta_info_insert(sta);
+ sta = NULL;
+ if (err) {
+ printk(KERN_DEBUG
+ "%s: failed to insert STA entry for the AP (error %d)\n",
+ sdata->name, err);
+ goto err_clear;
+ }
+ } else
+ WARN_ON_ONCE(memcmp(ifmgd->bssid, req->bss->bssid, ETH_ALEN));
+
+ if (!bss->dtim_period &&
+ sdata->local->hw.flags & IEEE80211_HW_NEED_DTIM_PERIOD) {
+ /*
+ * Wait up to one beacon interval ...
+ * should this be more if we miss one?
+ */
+ printk(KERN_DEBUG "%s: waiting for beacon from %pM\n",
+ sdata->name, ifmgd->bssid);
+ assoc_data->timeout = jiffies +
+ TU_TO_EXP_TIME(req->bss->beacon_interval);
+ } else {
+ assoc_data->have_beacon = true;
+ assoc_data->sent_assoc = false;
+ assoc_data->timeout = jiffies;
+ }
+ run_again(ifmgd, assoc_data->timeout);
+
+ err = 0;
+ goto out;
+ err_clear:
+ ifmgd->assoc_data = NULL;
+ err_free:
+ kfree(assoc_data);
+ out:
+ mutex_unlock(&ifmgd->mtx);
+
+ return err;
}
int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
struct cfg80211_deauth_request *req,
void *cookie)
{
- struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- u8 bssid[ETH_ALEN];
bool assoc_bss = false;
mutex_lock(&ifmgd->mtx);
- memcpy(bssid, req->bss->bssid, ETH_ALEN);
- if (ifmgd->associated == req->bss) {
+ if (ifmgd->associated &&
+ memcmp(ifmgd->associated->bssid, req->bssid, ETH_ALEN) == 0) {
ieee80211_set_disassoc(sdata, false, true);
- mutex_unlock(&ifmgd->mtx);
assoc_bss = true;
- } else {
- bool not_auth_yet = false;
- struct ieee80211_work *tmp, *wk = NULL;
-
+ } else if (ifmgd->auth_data) {
+ ieee80211_destroy_auth_data(sdata, false);
mutex_unlock(&ifmgd->mtx);
-
- mutex_lock(&local->mtx);
- list_for_each_entry(tmp, &local->work_list, list) {
- if (tmp->sdata != sdata)
- continue;
-
- if (tmp->type != IEEE80211_WORK_DIRECT_PROBE &&
- tmp->type != IEEE80211_WORK_AUTH &&
- tmp->type != IEEE80211_WORK_ASSOC &&
- tmp->type != IEEE80211_WORK_ASSOC_BEACON_WAIT)
- continue;
-
- if (memcmp(req->bss->bssid, tmp->filter_ta, ETH_ALEN))
- continue;
-
- not_auth_yet = tmp->type == IEEE80211_WORK_DIRECT_PROBE;
- list_del_rcu(&tmp->list);
- synchronize_rcu();
- wk = tmp;
- break;
- }
- mutex_unlock(&local->mtx);
-
- if (wk && wk->type == IEEE80211_WORK_ASSOC) {
- /* clean up dummy sta & TX sync */
- sta_info_destroy_addr(wk->sdata, wk->filter_ta);
- if (wk->assoc.synced)
- drv_finish_tx_sync(local, wk->sdata,
- wk->filter_ta,
- IEEE80211_TX_SYNC_ASSOC);
- } else if (wk && wk->type == IEEE80211_WORK_AUTH) {
- if (wk->probe_auth.synced)
- drv_finish_tx_sync(local, wk->sdata,
- wk->filter_ta,
- IEEE80211_TX_SYNC_AUTH);
- }
- kfree(wk);
-
- /*
- * If somebody requests authentication and we haven't
- * sent out an auth frame yet there's no need to send
- * out a deauth frame either. If the state was PROBE,
- * then this is the case. If it's AUTH we have sent a
- * frame, and if it's IDLE we have completed the auth
- * process already.
- */
- if (not_auth_yet) {
- __cfg80211_auth_canceled(sdata->dev, bssid);
- return 0;
- }
+ return 0;
}
+ mutex_unlock(&ifmgd->mtx);
printk(KERN_DEBUG "%s: deauthenticating from %pM by local choice (reason=%d)\n",
- sdata->name, bssid, req->reason_code);
+ sdata->name, req->bssid, req->reason_code);
- ieee80211_send_deauth_disassoc(sdata, bssid, IEEE80211_STYPE_DEAUTH,
- req->reason_code, cookie,
- !req->local_state_change);
+ ieee80211_send_deauth_disassoc(sdata, req->bssid, IEEE80211_STYPE_DEAUTH,
+ req->reason_code, cookie, true);
if (assoc_bss)
sta_info_flush(sdata->local, sdata);
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 596efaf50e09..2b53a5348ace 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -98,13 +98,12 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
mutex_lock(&local->sta_mtx);
list_for_each_entry(sta, &local->sta_list, list) {
if (sta->uploaded) {
- sdata = sta->sdata;
- if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
- sdata = container_of(sdata->bss,
- struct ieee80211_sub_if_data,
- u.ap);
+ enum ieee80211_sta_state state;
- drv_sta_remove(local, sdata, &sta->sta);
+ state = sta->sta_state;
+ for (; state > IEEE80211_STA_NOTEXIST; state--)
+ WARN_ON(drv_sta_state(local, sdata, sta,
+ state, state - 1));
}
mesh_plink_quiesce(sta);
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index ad64f4d5271a..111fba38be82 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -159,7 +159,6 @@ static struct rate_control_ref *rate_control_alloc(const char *name,
ref = kmalloc(sizeof(struct rate_control_ref), GFP_KERNEL);
if (!ref)
goto fail_ref;
- kref_init(&ref->kref);
ref->local = local;
ref->ops = ieee80211_rate_control_ops_get(name);
if (!ref->ops)
@@ -184,11 +183,8 @@ fail_ref:
return NULL;
}
-static void rate_control_release(struct kref *kref)
+static void rate_control_free(struct rate_control_ref *ctrl_ref)
{
- struct rate_control_ref *ctrl_ref;
-
- ctrl_ref = container_of(kref, struct rate_control_ref, kref);
ctrl_ref->ops->free(ctrl_ref->priv);
#ifdef CONFIG_MAC80211_DEBUGFS
@@ -293,8 +289,8 @@ bool rate_control_send_low(struct ieee80211_sta *sta,
}
EXPORT_SYMBOL(rate_control_send_low);
-static void rate_idx_match_mask(struct ieee80211_tx_rate *rate,
- int n_bitrates, u32 mask)
+static bool rate_idx_match_legacy_mask(struct ieee80211_tx_rate *rate,
+ int n_bitrates, u32 mask)
{
int j;
@@ -303,7 +299,7 @@ static void rate_idx_match_mask(struct ieee80211_tx_rate *rate,
if (mask & (1 << j)) {
/* Okay, found a suitable rate. Use it. */
rate->idx = j;
- return;
+ return true;
}
}
@@ -312,6 +308,112 @@ static void rate_idx_match_mask(struct ieee80211_tx_rate *rate,
if (mask & (1 << j)) {
/* Okay, found a suitable rate. Use it. */
rate->idx = j;
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool rate_idx_match_mcs_mask(struct ieee80211_tx_rate *rate,
+ u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
+{
+ int i, j;
+ int ridx, rbit;
+
+ ridx = rate->idx / 8;
+ rbit = rate->idx % 8;
+
+ /* sanity check */
+ if (ridx < 0 || ridx >= IEEE80211_HT_MCS_MASK_LEN)
+ return false;
+
+ /* See whether the selected rate or anything below it is allowed. */
+ for (i = ridx; i >= 0; i--) {
+ for (j = rbit; j >= 0; j--)
+ if (mcs_mask[i] & BIT(j)) {
+ rate->idx = i * 8 + j;
+ return true;
+ }
+ rbit = 7;
+ }
+
+ /* Try to find a higher rate that would be allowed */
+ ridx = (rate->idx + 1) / 8;
+ rbit = (rate->idx + 1) % 8;
+
+ for (i = ridx; i < IEEE80211_HT_MCS_MASK_LEN; i++) {
+ for (j = rbit; j < 8; j++)
+ if (mcs_mask[i] & BIT(j)) {
+ rate->idx = i * 8 + j;
+ return true;
+ }
+ rbit = 0;
+ }
+ return false;
+}
+
+
+
+static void rate_idx_match_mask(struct ieee80211_tx_rate *rate,
+ struct ieee80211_tx_rate_control *txrc,
+ u32 mask,
+ u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
+{
+ struct ieee80211_tx_rate alt_rate;
+
+ /* handle HT rates */
+ if (rate->flags & IEEE80211_TX_RC_MCS) {
+ if (rate_idx_match_mcs_mask(rate, mcs_mask))
+ return;
+
+ /* also try the legacy rates. */
+ alt_rate.idx = 0;
+ /* keep protection flags */
+ alt_rate.flags = rate->flags &
+ (IEEE80211_TX_RC_USE_RTS_CTS |
+ IEEE80211_TX_RC_USE_CTS_PROTECT |
+ IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
+ alt_rate.count = rate->count;
+ if (rate_idx_match_legacy_mask(&alt_rate,
+ txrc->sband->n_bitrates,
+ mask)) {
+ *rate = alt_rate;
+ return;
+ }
+ } else {
+ struct sk_buff *skb = txrc->skb;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ __le16 fc;
+
+ /* handle legacy rates */
+ if (rate_idx_match_legacy_mask(rate, txrc->sband->n_bitrates,
+ mask))
+ return;
+
+ /* if HT BSS, and we handle a data frame, also try HT rates */
+ if (txrc->bss_conf->channel_type == NL80211_CHAN_NO_HT)
+ return;
+
+ fc = hdr->frame_control;
+ if (!ieee80211_is_data(fc))
+ return;
+
+ alt_rate.idx = 0;
+ /* keep protection flags */
+ alt_rate.flags = rate->flags &
+ (IEEE80211_TX_RC_USE_RTS_CTS |
+ IEEE80211_TX_RC_USE_CTS_PROTECT |
+ IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
+ alt_rate.count = rate->count;
+
+ alt_rate.flags |= IEEE80211_TX_RC_MCS;
+
+ if ((txrc->bss_conf->channel_type == NL80211_CHAN_HT40MINUS) ||
+ (txrc->bss_conf->channel_type == NL80211_CHAN_HT40PLUS))
+ alt_rate.flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+
+ if (rate_idx_match_mcs_mask(&alt_rate, mcs_mask)) {
+ *rate = alt_rate;
return;
}
}
@@ -335,6 +437,7 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb);
int i;
u32 mask;
+ u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN];
if (sta && test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) {
ista = &sta->sta;
@@ -358,10 +461,14 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
* the common case.
*/
mask = sdata->rc_rateidx_mask[info->band];
+ memcpy(mcs_mask, sdata->rc_rateidx_mcs_mask[info->band],
+ sizeof(mcs_mask));
if (mask != (1 << txrc->sband->n_bitrates) - 1) {
if (sta) {
/* Filter out rates that the STA does not support */
mask &= sta->sta.supp_rates[info->band];
+ for (i = 0; i < sizeof(mcs_mask); i++)
+ mcs_mask[i] &= sta->sta.ht_cap.mcs.rx_mask[i];
}
/*
* Make sure the rate index selected for each TX rate is
@@ -372,32 +479,18 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
/* Skip invalid rates */
if (info->control.rates[i].idx < 0)
break;
- /* Rate masking supports only legacy rates for now */
- if (info->control.rates[i].flags & IEEE80211_TX_RC_MCS)
- continue;
- rate_idx_match_mask(&info->control.rates[i],
- txrc->sband->n_bitrates, mask);
+ rate_idx_match_mask(&info->control.rates[i], txrc,
+ mask, mcs_mask);
}
}
BUG_ON(info->control.rates[0].idx < 0);
}
-struct rate_control_ref *rate_control_get(struct rate_control_ref *ref)
-{
- kref_get(&ref->kref);
- return ref;
-}
-
-void rate_control_put(struct rate_control_ref *ref)
-{
- kref_put(&ref->kref, rate_control_release);
-}
-
int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
const char *name)
{
- struct rate_control_ref *ref, *old;
+ struct rate_control_ref *ref;
ASSERT_RTNL();
@@ -417,12 +510,8 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
return -ENOENT;
}
- old = local->rate_ctrl;
+ WARN_ON(local->rate_ctrl);
local->rate_ctrl = ref;
- if (old) {
- rate_control_put(old);
- sta_info_flush(local, NULL);
- }
wiphy_debug(local->hw.wiphy, "Selected rate control algorithm '%s'\n",
ref->ops->name);
@@ -440,6 +529,6 @@ void rate_control_deinitialize(struct ieee80211_local *local)
return;
local->rate_ctrl = NULL;
- rate_control_put(ref);
+ rate_control_free(ref);
}
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index 80cfc006dd74..fbb1efdc4d04 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -14,7 +14,6 @@
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/types.h>
-#include <linux/kref.h>
#include <net/mac80211.h>
#include "ieee80211_i.h"
#include "sta_info.h"
@@ -23,14 +22,11 @@ struct rate_control_ref {
struct ieee80211_local *local;
struct rate_control_ops *ops;
void *priv;
- struct kref kref;
};
void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta,
struct ieee80211_tx_rate_control *txrc);
-struct rate_control_ref *rate_control_get(struct rate_control_ref *ref);
-void rate_control_put(struct rate_control_ref *ref);
static inline void rate_control_tx_status(struct ieee80211_local *local,
struct ieee80211_supported_band *sband,
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 5a5e504a8ffb..3ab85c02ef04 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -859,7 +859,12 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
rx->sdata->vif.type != NL80211_IFTYPE_WDS &&
(!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) {
- if (rx->sta && rx->sta->dummy &&
+ /*
+ * accept port control frames from the AP even when it's not
+ * yet marked ASSOC to prevent a race where we don't set the
+ * assoc bit quickly enough before it sends the first frame
+ */
+ if (rx->sta && rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
ieee80211_is_data_present(hdr->frame_control)) {
u16 ethertype;
u8 *payload;
@@ -1145,19 +1150,15 @@ static void ap_sta_ps_start(struct sta_info *sta)
static void ap_sta_ps_end(struct sta_info *sta)
{
- struct ieee80211_sub_if_data *sdata = sta->sdata;
-
- atomic_dec(&sdata->bss->num_sta_ps);
-
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n",
- sdata->name, sta->sta.addr, sta->sta.aid);
+ sta->sdata->name, sta->sta.addr, sta->sta.aid);
#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n",
- sdata->name, sta->sta.addr, sta->sta.aid);
+ sta->sdata->name, sta->sta.addr, sta->sta.aid);
#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
return;
}
@@ -2180,9 +2181,6 @@ ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx)
if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
ieee80211_is_beacon(mgmt->frame_control) &&
!(rx->flags & IEEE80211_RX_BEACON_REPORTED)) {
- struct ieee80211_rx_status *status;
-
- status = IEEE80211_SKB_RXCB(rx->skb);
cfg80211_report_obss_beacon(rx->local->hw.wiphy,
rx->skb->data, rx->skb->len,
status->freq, GFP_ATOMIC);
@@ -2486,14 +2484,9 @@ static ieee80211_rx_result debug_noinline
ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
{
struct ieee80211_sub_if_data *sdata = rx->sdata;
- ieee80211_rx_result rxs;
struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
__le16 stype;
- rxs = ieee80211_work_rx_mgmt(rx->sdata, rx->skb);
- if (rxs != RX_CONTINUE)
- return rxs;
-
stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE);
if (!ieee80211_vif_is_mesh(&sdata->vif) &&
@@ -2502,10 +2495,13 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
return RX_DROP_MONITOR;
switch (stype) {
+ case cpu_to_le16(IEEE80211_STYPE_AUTH):
case cpu_to_le16(IEEE80211_STYPE_BEACON):
case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
/* process for all: mesh, mlme, ibss */
break;
+ case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
+ case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
if (is_multicast_ether_addr(mgmt->da) &&
@@ -2517,7 +2513,6 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
return RX_DROP_MONITOR;
break;
case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
- case cpu_to_le16(IEEE80211_STYPE_AUTH):
/* process only for ibss */
if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
return RX_DROP_MONITOR;
@@ -2956,7 +2951,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
if (ieee80211_is_data(fc)) {
prev_sta = NULL;
- for_each_sta_info_rx(local, hdr->addr2, sta, tmp) {
+ for_each_sta_info(local, hdr->addr2, sta, tmp) {
if (!prev_sta) {
prev_sta = sta;
continue;
@@ -3000,7 +2995,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
continue;
}
- rx.sta = sta_info_get_bss_rx(prev, hdr->addr2);
+ rx.sta = sta_info_get_bss(prev, hdr->addr2);
rx.sdata = prev;
ieee80211_prepare_and_rx_handle(&rx, skb, false);
@@ -3008,7 +3003,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
}
if (prev) {
- rx.sta = sta_info_get_bss_rx(prev, hdr->addr2);
+ rx.sta = sta_info_get_bss(prev, hdr->addr2);
rx.sdata = prev;
if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index ff11f6bf8266..4034ee616022 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -100,25 +100,6 @@ struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
lockdep_is_held(&local->sta_mtx));
while (sta) {
- if (sta->sdata == sdata && !sta->dummy &&
- memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
- break;
- sta = rcu_dereference_check(sta->hnext,
- lockdep_is_held(&local->sta_mtx));
- }
- return sta;
-}
-
-/* get a station info entry even if it is a dummy station*/
-struct sta_info *sta_info_get_rx(struct ieee80211_sub_if_data *sdata,
- const u8 *addr)
-{
- struct ieee80211_local *local = sdata->local;
- struct sta_info *sta;
-
- sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
- lockdep_is_held(&local->sta_mtx));
- while (sta) {
if (sta->sdata == sdata &&
memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
break;
@@ -143,30 +124,6 @@ struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
while (sta) {
if ((sta->sdata == sdata ||
(sta->sdata->bss && sta->sdata->bss == sdata->bss)) &&
- !sta->dummy &&
- memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
- break;
- sta = rcu_dereference_check(sta->hnext,
- lockdep_is_held(&local->sta_mtx));
- }
- return sta;
-}
-
-/*
- * Get sta info either from the specified interface
- * or from one of its vlans (including dummy stations)
- */
-struct sta_info *sta_info_get_bss_rx(struct ieee80211_sub_if_data *sdata,
- const u8 *addr)
-{
- struct ieee80211_local *local = sdata->local;
- struct sta_info *sta;
-
- sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
- lockdep_is_held(&local->sta_mtx));
- while (sta) {
- if ((sta->sdata == sdata ||
- (sta->sdata->bss && sta->sdata->bss == sdata->bss)) &&
memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
break;
sta = rcu_dereference_check(sta->hnext,
@@ -208,10 +165,8 @@ struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata,
*/
void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
{
- if (sta->rate_ctrl) {
+ if (sta->rate_ctrl)
rate_control_free_sta(sta);
- rate_control_put(sta->rate_ctrl);
- }
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
wiphy_debug(local->hw.wiphy, "Destroyed STA %pM\n", sta->sta.addr);
@@ -264,13 +219,11 @@ static int sta_prepare_rate_control(struct ieee80211_local *local,
if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
return 0;
- sta->rate_ctrl = rate_control_get(local->rate_ctrl);
+ sta->rate_ctrl = local->rate_ctrl;
sta->rate_ctrl_priv = rate_control_alloc_sta(sta->rate_ctrl,
&sta->sta, gfp);
- if (!sta->rate_ctrl_priv) {
- rate_control_put(sta->rate_ctrl);
+ if (!sta->rate_ctrl_priv)
return -ENOMEM;
- }
return 0;
}
@@ -297,6 +250,8 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
sta->sdata = sdata;
sta->last_rx = jiffies;
+ sta->sta_state = IEEE80211_STA_NONE;
+
do_posix_clock_monotonic_gettime(&uptime);
sta->last_connected = uptime.tv_sec;
ewma_init(&sta->avg_signal, 1024, 8);
@@ -353,6 +308,43 @@ static int sta_info_insert_check(struct sta_info *sta)
return 0;
}
+static int sta_info_insert_drv_state(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct sta_info *sta)
+{
+ enum ieee80211_sta_state state;
+ int err = 0;
+
+ for (state = IEEE80211_STA_NOTEXIST; state < sta->sta_state; state++) {
+ err = drv_sta_state(local, sdata, sta, state, state + 1);
+ if (err)
+ break;
+ }
+
+ if (!err) {
+ /*
+ * Drivers using legacy sta_add/sta_remove callbacks only
+ * get uploaded set to true after sta_add is called.
+ */
+ if (!local->ops->sta_add)
+ sta->uploaded = true;
+ return 0;
+ }
+
+ if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
+ printk(KERN_DEBUG
+ "%s: failed to move IBSS STA %pM to state %d (%d) - keeping it anyway.\n",
+ sdata->name, sta->sta.addr, state + 1, err);
+ err = 0;
+ }
+
+ /* unwind on error */
+ for (; state > IEEE80211_STA_NOTEXIST; state--)
+ WARN_ON(drv_sta_state(local, sdata, sta, state, state - 1));
+
+ return err;
+}
+
/*
* should be called with sta_mtx locked
* this function replaces the mutex lock
@@ -362,70 +354,43 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
{
struct ieee80211_local *local = sta->local;
struct ieee80211_sub_if_data *sdata = sta->sdata;
- struct sta_info *exist_sta;
- bool dummy_reinsert = false;
+ struct station_info sinfo;
int err = 0;
lockdep_assert_held(&local->sta_mtx);
- /*
- * check if STA exists already.
- * only accept a scenario of a second call to sta_info_insert_finish
- * with a dummy station entry that was inserted earlier
- * in that case - assume that the dummy station flag should
- * be removed.
- */
- exist_sta = sta_info_get_bss_rx(sdata, sta->sta.addr);
- if (exist_sta) {
- if (exist_sta == sta && sta->dummy) {
- dummy_reinsert = true;
- } else {
- err = -EEXIST;
- goto out_err;
- }
+ /* check if STA exists already */
+ if (sta_info_get_bss(sdata, sta->sta.addr)) {
+ err = -EEXIST;
+ goto out_err;
}
- if (!sta->dummy || dummy_reinsert) {
- /* notify driver */
- err = drv_sta_add(local, sdata, &sta->sta);
- if (err) {
- if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
- goto out_err;
- printk(KERN_DEBUG "%s: failed to add IBSS STA %pM to "
- "driver (%d) - keeping it anyway.\n",
- sdata->name, sta->sta.addr, err);
- } else
- sta->uploaded = true;
- }
+ /* notify driver */
+ err = sta_info_insert_drv_state(local, sdata, sta);
+ if (err)
+ goto out_err;
- if (!dummy_reinsert) {
- local->num_sta++;
- local->sta_generation++;
- smp_mb();
+ local->num_sta++;
+ local->sta_generation++;
+ smp_mb();
- /* make the station visible */
- sta_info_hash_add(local, sta);
+ /* make the station visible */
+ sta_info_hash_add(local, sta);
- list_add(&sta->list, &local->sta_list);
- } else {
- sta->dummy = false;
- }
+ list_add(&sta->list, &local->sta_list);
- if (!sta->dummy) {
- struct station_info sinfo;
+ set_sta_flag(sta, WLAN_STA_INSERTED);
- ieee80211_sta_debugfs_add(sta);
- rate_control_add_sta_debugfs(sta);
+ ieee80211_sta_debugfs_add(sta);
+ rate_control_add_sta_debugfs(sta);
- memset(&sinfo, 0, sizeof(sinfo));
- sinfo.filled = 0;
- sinfo.generation = local->sta_generation;
- cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
- }
+ memset(&sinfo, 0, sizeof(sinfo));
+ sinfo.filled = 0;
+ sinfo.generation = local->sta_generation;
+ cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- wiphy_debug(local->hw.wiphy, "Inserted %sSTA %pM\n",
- sta->dummy ? "dummy " : "", sta->sta.addr);
+ wiphy_debug(local->hw.wiphy, "Inserted STA %pM\n", sta->sta.addr);
#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
/* move reference to rcu-protected */
@@ -477,25 +442,6 @@ int sta_info_insert(struct sta_info *sta)
return err;
}
-/* Caller must hold sta->local->sta_mtx */
-int sta_info_reinsert(struct sta_info *sta)
-{
- struct ieee80211_local *local = sta->local;
- int err = 0;
-
- err = sta_info_insert_check(sta);
- if (err) {
- mutex_unlock(&local->sta_mtx);
- return err;
- }
-
- might_sleep();
-
- err = sta_info_insert_finish(sta);
- rcu_read_unlock();
- return err;
-}
-
static inline void __bss_tim_set(struct ieee80211_if_ap *bss, u16 aid)
{
/*
@@ -711,7 +657,7 @@ static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
return have_buffered;
}
-static int __must_check __sta_info_destroy(struct sta_info *sta)
+int __must_check __sta_info_destroy(struct sta_info *sta)
{
struct ieee80211_local *local;
struct ieee80211_sub_if_data *sdata;
@@ -726,6 +672,8 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
local = sta->local;
sdata = sta->sdata;
+ lockdep_assert_held(&local->sta_mtx);
+
/*
* Before removing the station from the driver and
* rate control, it might still start new aggregation
@@ -750,33 +698,24 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
sta->dead = true;
- if (test_sta_flag(sta, WLAN_STA_PS_STA) ||
- test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
- BUG_ON(!sdata->bss);
-
- clear_sta_flag(sta, WLAN_STA_PS_STA);
- clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
-
- atomic_dec(&sdata->bss->num_sta_ps);
- sta_info_recalc_tim(sta);
- }
-
local->num_sta--;
local->sta_generation++;
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
RCU_INIT_POINTER(sdata->u.vlan.sta, NULL);
- while (sta->sta_state > IEEE80211_STA_NONE)
- sta_info_move_state(sta, sta->sta_state - 1);
+ while (sta->sta_state > IEEE80211_STA_NONE) {
+ ret = sta_info_move_state(sta, sta->sta_state - 1);
+ if (ret) {
+ WARN_ON_ONCE(1);
+ break;
+ }
+ }
if (sta->uploaded) {
- if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
- sdata = container_of(sdata->bss,
- struct ieee80211_sub_if_data,
- u.ap);
- drv_sta_remove(local, sdata, &sta->sta);
- sdata = sta->sdata;
+ ret = drv_sta_state(local, sdata, sta, IEEE80211_STA_NONE,
+ IEEE80211_STA_NOTEXIST);
+ WARN_ON_ONCE(ret != 0);
}
/*
@@ -787,6 +726,15 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
*/
synchronize_rcu();
+ if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
+ BUG_ON(!sdata->bss);
+
+ clear_sta_flag(sta, WLAN_STA_PS_STA);
+
+ atomic_dec(&sdata->bss->num_sta_ps);
+ sta_info_recalc_tim(sta);
+ }
+
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
local->total_ps_buffered -= skb_queue_len(&sta->ps_tx_buf[ac]);
__skb_queue_purge(&sta->ps_tx_buf[ac]);
@@ -815,35 +763,20 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
}
#endif
- /* There could be some memory leaks because of ampdu tx pending queue
- * not being freed before destroying the station info.
- *
- * Make sure that such queues are purged before freeing the station
- * info.
- * TODO: We have to somehow postpone the full destruction
- * until the aggregation stop completes. Refer
- * http://thread.gmane.org/gmane.linux.kernel.wireless.general/81936
+ /*
+ * Destroy aggregation state here. It would be nice to wait for the
+ * driver to finish aggregation stop and then clean up, but for now
+ * drivers have to handle aggregation stop being requested, followed
+ * directly by station destruction.
*/
-
- mutex_lock(&sta->ampdu_mlme.mtx);
-
for (i = 0; i < STA_TID_NUM; i++) {
- tid_tx = rcu_dereference_protected_tid_tx(sta, i);
+ tid_tx = rcu_dereference_raw(sta->ampdu_mlme.tid_tx[i]);
if (!tid_tx)
continue;
- if (skb_queue_len(&tid_tx->pending)) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- wiphy_debug(local->hw.wiphy, "TX A-MPDU purging %d "
- "packets for tid=%d\n",
- skb_queue_len(&tid_tx->pending), i);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
- __skb_queue_purge(&tid_tx->pending);
- }
- kfree_rcu(tid_tx, rcu_head);
+ __skb_queue_purge(&tid_tx->pending);
+ kfree(tid_tx);
}
- mutex_unlock(&sta->ampdu_mlme.mtx);
-
sta_info_free(local, sta);
return 0;
@@ -855,7 +788,7 @@ int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata, const u8 *addr)
int ret;
mutex_lock(&sdata->local->sta_mtx);
- sta = sta_info_get_rx(sdata, addr);
+ sta = sta_info_get(sdata, addr);
ret = __sta_info_destroy(sta);
mutex_unlock(&sdata->local->sta_mtx);
@@ -869,7 +802,7 @@ int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata,
int ret;
mutex_lock(&sdata->local->sta_mtx);
- sta = sta_info_get_bss_rx(sdata, addr);
+ sta = sta_info_get_bss(sdata, addr);
ret = __sta_info_destroy(sta);
mutex_unlock(&sdata->local->sta_mtx);
@@ -1009,9 +942,11 @@ EXPORT_SYMBOL(ieee80211_find_sta);
static void clear_sta_ps_flags(void *_sta)
{
struct sta_info *sta = _sta;
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
- clear_sta_flag(sta, WLAN_STA_PS_STA);
+ if (test_and_clear_sta_flag(sta, WLAN_STA_PS_STA))
+ atomic_dec(&sdata->bss->num_sta_ps);
}
/* powersave support code */
@@ -1410,28 +1345,68 @@ void ieee80211_sta_set_buffered(struct ieee80211_sta *pubsta,
}
EXPORT_SYMBOL(ieee80211_sta_set_buffered);
-int sta_info_move_state_checked(struct sta_info *sta,
- enum ieee80211_sta_state new_state)
+int sta_info_move_state(struct sta_info *sta,
+ enum ieee80211_sta_state new_state)
{
might_sleep();
if (sta->sta_state == new_state)
return 0;
+ /* check allowed transitions first */
+
+ switch (new_state) {
+ case IEEE80211_STA_NONE:
+ if (sta->sta_state != IEEE80211_STA_AUTH)
+ return -EINVAL;
+ break;
+ case IEEE80211_STA_AUTH:
+ if (sta->sta_state != IEEE80211_STA_NONE &&
+ sta->sta_state != IEEE80211_STA_ASSOC)
+ return -EINVAL;
+ break;
+ case IEEE80211_STA_ASSOC:
+ if (sta->sta_state != IEEE80211_STA_AUTH &&
+ sta->sta_state != IEEE80211_STA_AUTHORIZED)
+ return -EINVAL;
+ break;
+ case IEEE80211_STA_AUTHORIZED:
+ if (sta->sta_state != IEEE80211_STA_ASSOC)
+ return -EINVAL;
+ break;
+ default:
+ WARN(1, "invalid state %d", new_state);
+ return -EINVAL;
+ }
+
+#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
+ printk(KERN_DEBUG "%s: moving STA %pM to state %d\n",
+ sta->sdata->name, sta->sta.addr, new_state);
+#endif
+
+ /*
+ * notify the driver before the actual changes so it can
+ * fail the transition
+ */
+ if (test_sta_flag(sta, WLAN_STA_INSERTED)) {
+ int err = drv_sta_state(sta->local, sta->sdata, sta,
+ sta->sta_state, new_state);
+ if (err)
+ return err;
+ }
+
+ /* reflect the change in all state variables */
+
switch (new_state) {
case IEEE80211_STA_NONE:
if (sta->sta_state == IEEE80211_STA_AUTH)
clear_bit(WLAN_STA_AUTH, &sta->_flags);
- else
- return -EINVAL;
break;
case IEEE80211_STA_AUTH:
if (sta->sta_state == IEEE80211_STA_NONE)
set_bit(WLAN_STA_AUTH, &sta->_flags);
else if (sta->sta_state == IEEE80211_STA_ASSOC)
clear_bit(WLAN_STA_ASSOC, &sta->_flags);
- else
- return -EINVAL;
break;
case IEEE80211_STA_ASSOC:
if (sta->sta_state == IEEE80211_STA_AUTH) {
@@ -1440,24 +1415,19 @@ int sta_info_move_state_checked(struct sta_info *sta,
if (sta->sdata->vif.type == NL80211_IFTYPE_AP)
atomic_dec(&sta->sdata->u.ap.num_sta_authorized);
clear_bit(WLAN_STA_AUTHORIZED, &sta->_flags);
- } else
- return -EINVAL;
+ }
break;
case IEEE80211_STA_AUTHORIZED:
if (sta->sta_state == IEEE80211_STA_ASSOC) {
if (sta->sdata->vif.type == NL80211_IFTYPE_AP)
atomic_inc(&sta->sdata->u.ap.num_sta_authorized);
set_bit(WLAN_STA_AUTHORIZED, &sta->_flags);
- } else
- return -EINVAL;
+ }
break;
default:
- WARN(1, "invalid state %d", new_state);
- return -EINVAL;
+ break;
}
- printk(KERN_DEBUG "%s: moving STA %pM to state %d\n",
- sta->sdata->name, sta->sta.addr, new_state);
sta->sta_state = new_state;
return 0;
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index bfed851d0d36..23a97c9dc042 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -52,6 +52,7 @@
* @WLAN_STA_SP: Station is in a service period, so don't try to
* reply to other uAPSD trigger frames or PS-Poll.
* @WLAN_STA_4ADDR_EVENT: 4-addr event was already sent for this frame.
+ * @WLAN_STA_INSERTED: This station is inserted into the hash table.
* @WLAN_STA_RATE_CONTROL: rate control was initialized for this station.
*/
enum ieee80211_sta_info_flags {
@@ -72,17 +73,10 @@ enum ieee80211_sta_info_flags {
WLAN_STA_UAPSD,
WLAN_STA_SP,
WLAN_STA_4ADDR_EVENT,
+ WLAN_STA_INSERTED,
WLAN_STA_RATE_CONTROL,
};
-enum ieee80211_sta_state {
- /* NOTE: These need to be ordered correctly! */
- IEEE80211_STA_NONE,
- IEEE80211_STA_AUTH,
- IEEE80211_STA_ASSOC,
- IEEE80211_STA_AUTHORIZED,
-};
-
#define STA_TID_NUM 16
#define ADDBA_RESP_INTERVAL HZ
#define HT_AGG_MAX_RETRIES 15
@@ -273,8 +267,6 @@ struct sta_ampdu_mlme {
* @dead: set to true when sta is unlinked
* @uploaded: set to true when sta is uploaded to the driver
* @lost_packets: number of consecutive lost packets
- * @dummy: indicate a dummy station created for receiving
- * EAP frames before association
* @sta: station information we share with the driver
* @sta_state: duplicates information about station state (for debug)
* @beacon_loss_count: number of times beacon loss has triggered
@@ -372,9 +364,6 @@ struct sta_info {
unsigned int lost_packets;
unsigned int beacon_loss_count;
- /* should be right in front of sta to be in the same cache line */
- bool dummy;
-
/* keep last! */
struct ieee80211_sta sta;
};
@@ -429,13 +418,17 @@ static inline int test_and_set_sta_flag(struct sta_info *sta,
return test_and_set_bit(flag, &sta->_flags);
}
-int sta_info_move_state_checked(struct sta_info *sta,
- enum ieee80211_sta_state new_state);
+int sta_info_move_state(struct sta_info *sta,
+ enum ieee80211_sta_state new_state);
-static inline void sta_info_move_state(struct sta_info *sta,
- enum ieee80211_sta_state new_state)
+static inline void sta_info_pre_move_state(struct sta_info *sta,
+ enum ieee80211_sta_state new_state)
{
- int ret = sta_info_move_state_checked(sta, new_state);
+ int ret;
+
+ WARN_ON_ONCE(test_sta_flag(sta, WLAN_STA_INSERTED));
+
+ ret = sta_info_move_state(sta, new_state);
WARN_ON_ONCE(ret);
}
@@ -472,15 +465,9 @@ rcu_dereference_protected_tid_tx(struct sta_info *sta, int tid)
struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
const u8 *addr);
-struct sta_info *sta_info_get_rx(struct ieee80211_sub_if_data *sdata,
- const u8 *addr);
-
struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
const u8 *addr);
-struct sta_info *sta_info_get_bss_rx(struct ieee80211_sub_if_data *sdata,
- const u8 *addr);
-
static inline
void for_each_sta_info_type_check(struct ieee80211_local *local,
const u8 *addr,
@@ -489,23 +476,7 @@ void for_each_sta_info_type_check(struct ieee80211_local *local,
{
}
-#define for_each_sta_info(local, _addr, _sta, nxt) \
- for ( /* initialise loop */ \
- _sta = rcu_dereference(local->sta_hash[STA_HASH(_addr)]),\
- nxt = _sta ? rcu_dereference(_sta->hnext) : NULL; \
- /* typecheck */ \
- for_each_sta_info_type_check(local, (_addr), _sta, nxt),\
- /* continue condition */ \
- _sta; \
- /* advance loop */ \
- _sta = nxt, \
- nxt = _sta ? rcu_dereference(_sta->hnext) : NULL \
- ) \
- /* run code only if address matches and it's not a dummy sta */ \
- if (memcmp(_sta->sta.addr, (_addr), ETH_ALEN) == 0 && \
- !_sta->dummy)
-
-#define for_each_sta_info_rx(local, _addr, _sta, nxt) \
+#define for_each_sta_info(local, _addr, _sta, nxt) \
for ( /* initialise loop */ \
_sta = rcu_dereference(local->sta_hash[STA_HASH(_addr)]),\
nxt = _sta ? rcu_dereference(_sta->hnext) : NULL; \
@@ -544,8 +515,8 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta);
*/
int sta_info_insert(struct sta_info *sta);
int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU);
-int sta_info_reinsert(struct sta_info *sta);
+int __must_check __sta_info_destroy(struct sta_info *sta);
int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata,
const u8 *addr);
int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 30c265c98f73..d67f0b967f8a 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -350,7 +350,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
bool send_to_cooked;
bool acked;
struct ieee80211_bar *bar;
- u16 tid;
int rtap_len;
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
@@ -412,7 +411,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
}
if (!acked && ieee80211_is_back_req(fc)) {
- u16 control;
+ u16 tid, control;
/*
* BAR failed, store the last SSN and retry sending
@@ -516,7 +515,8 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
if (ieee80211_is_nullfunc(hdr->frame_control) ||
ieee80211_is_qos_nullfunc(hdr->frame_control)) {
- bool acked = info->flags & IEEE80211_TX_STAT_ACK;
+ acked = info->flags & IEEE80211_TX_STAT_ACK;
+
cfg80211_probe_status(skb->dev, hdr->addr1,
cookie, acked, GFP_ATOMIC);
} else {
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index e05667cd5e76..1be0ca2b5936 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -635,6 +635,9 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
txrc.max_rate_idx = -1;
else
txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
+ memcpy(txrc.rate_idx_mcs_mask,
+ tx->sdata->rc_rateidx_mcs_mask[tx->channel->band],
+ sizeof(txrc.rate_idx_mcs_mask));
txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
tx->sdata->vif.type == NL80211_IFTYPE_ADHOC);
@@ -2431,6 +2434,8 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
txrc.max_rate_idx = -1;
else
txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
+ memcpy(txrc.rate_idx_mcs_mask, sdata->rc_rateidx_mcs_mask[band],
+ sizeof(txrc.rate_idx_mcs_mask));
txrc.bss = true;
rate_control_get_rate(sdata, NULL, &txrc);
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 9919892575f4..264397aee811 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -862,8 +862,8 @@ u32 ieee80211_mandatory_rates(struct ieee80211_local *local,
void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
u16 transaction, u16 auth_alg,
- u8 *extra, size_t extra_len, const u8 *bssid,
- const u8 *key, u8 key_len, u8 key_idx)
+ u8 *extra, size_t extra_len, const u8 *da,
+ const u8 *bssid, const u8 *key, u8 key_len, u8 key_idx)
{
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb;
@@ -881,7 +881,7 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
memset(mgmt, 0, 24 + 6);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_AUTH);
- memcpy(mgmt->da, bssid, ETH_ALEN);
+ memcpy(mgmt->da, da, ETH_ALEN);
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
memcpy(mgmt->bssid, bssid, ETH_ALEN);
mgmt->u.auth.auth_alg = cpu_to_le16(auth_alg);
@@ -1185,13 +1185,12 @@ int ieee80211_reconfig(struct ieee80211_local *local)
mutex_lock(&local->sta_mtx);
list_for_each_entry(sta, &local->sta_list, list) {
if (sta->uploaded) {
- sdata = sta->sdata;
- if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
- sdata = container_of(sdata->bss,
- struct ieee80211_sub_if_data,
- u.ap);
+ enum ieee80211_sta_state state;
- WARN_ON(drv_sta_add(local, sdata, &sta->sta));
+ for (state = IEEE80211_STA_NOTEXIST;
+ state < sta->sta_state - 1; state++)
+ WARN_ON(drv_sta_state(local, sta->sdata, sta,
+ state, state + 1));
}
}
mutex_unlock(&local->sta_mtx);
@@ -1272,6 +1271,21 @@ int ieee80211_reconfig(struct ieee80211_local *local)
ieee80211_recalc_ps(local, -1);
/*
+ * The sta might be in psm against the ap (e.g. because
+ * this was the state before a hw restart), so we
+ * explicitly send a null packet in order to make sure
+ * it'll sync against the ap (and get out of psm).
+ */
+ if (!(local->hw.conf.flags & IEEE80211_CONF_PS)) {
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ if (sdata->vif.type != NL80211_IFTYPE_STATION)
+ continue;
+
+ ieee80211_send_nullfunc(local, sdata, 0);
+ }
+ }
+
+ /*
* Clear the WLAN_STA_BLOCK_BA flag so new aggregation
* sessions can be established after a resume.
*
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
index c6dd01a05291..c6e230efa049 100644
--- a/net/mac80211/work.c
+++ b/net/mac80211/work.c
@@ -27,16 +27,9 @@
#include "rate.h"
#include "driver-ops.h"
-#define IEEE80211_AUTH_TIMEOUT (HZ / 5)
-#define IEEE80211_AUTH_MAX_TRIES 3
-#define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
-#define IEEE80211_ASSOC_MAX_TRIES 3
-
enum work_action {
- WORK_ACT_MISMATCH,
WORK_ACT_NONE,
WORK_ACT_TIMEOUT,
- WORK_ACT_DONE,
};
@@ -71,464 +64,6 @@ void free_work(struct ieee80211_work *wk)
kfree_rcu(wk, rcu_head);
}
-static int ieee80211_compatible_rates(const u8 *supp_rates, int supp_rates_len,
- struct ieee80211_supported_band *sband,
- u32 *rates)
-{
- int i, j, count;
- *rates = 0;
- count = 0;
- for (i = 0; i < supp_rates_len; i++) {
- int rate = (supp_rates[i] & 0x7F) * 5;
-
- for (j = 0; j < sband->n_bitrates; j++)
- if (sband->bitrates[j].bitrate == rate) {
- *rates |= BIT(j);
- count++;
- break;
- }
- }
-
- return count;
-}
-
-/* frame sending functions */
-
-static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb, const u8 *ht_info_ie,
- struct ieee80211_supported_band *sband,
- struct ieee80211_channel *channel,
- enum ieee80211_smps_mode smps)
-{
- struct ieee80211_ht_info *ht_info;
- u8 *pos;
- u32 flags = channel->flags;
- u16 cap;
- struct ieee80211_sta_ht_cap ht_cap;
-
- BUILD_BUG_ON(sizeof(ht_cap) != sizeof(sband->ht_cap));
-
- if (!sband->ht_cap.ht_supported)
- return;
-
- if (!ht_info_ie)
- return;
-
- if (ht_info_ie[1] < sizeof(struct ieee80211_ht_info))
- return;
-
- memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap));
- ieee80211_apply_htcap_overrides(sdata, &ht_cap);
-
- ht_info = (struct ieee80211_ht_info *)(ht_info_ie + 2);
-
- /* determine capability flags */
- cap = ht_cap.cap;
-
- switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
- case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
- if (flags & IEEE80211_CHAN_NO_HT40PLUS) {
- cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
- cap &= ~IEEE80211_HT_CAP_SGI_40;
- }
- break;
- case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
- if (flags & IEEE80211_CHAN_NO_HT40MINUS) {
- cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
- cap &= ~IEEE80211_HT_CAP_SGI_40;
- }
- break;
- }
-
- /* set SM PS mode properly */
- cap &= ~IEEE80211_HT_CAP_SM_PS;
- switch (smps) {
- case IEEE80211_SMPS_AUTOMATIC:
- case IEEE80211_SMPS_NUM_MODES:
- WARN_ON(1);
- case IEEE80211_SMPS_OFF:
- cap |= WLAN_HT_CAP_SM_PS_DISABLED <<
- IEEE80211_HT_CAP_SM_PS_SHIFT;
- break;
- case IEEE80211_SMPS_STATIC:
- cap |= WLAN_HT_CAP_SM_PS_STATIC <<
- IEEE80211_HT_CAP_SM_PS_SHIFT;
- break;
- case IEEE80211_SMPS_DYNAMIC:
- cap |= WLAN_HT_CAP_SM_PS_DYNAMIC <<
- IEEE80211_HT_CAP_SM_PS_SHIFT;
- break;
- }
-
- /* reserve and fill IE */
- pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
- ieee80211_ie_build_ht_cap(pos, &ht_cap, cap);
-}
-
-static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_work *wk)
-{
- struct ieee80211_local *local = sdata->local;
- struct sk_buff *skb;
- struct ieee80211_mgmt *mgmt;
- u8 *pos, qos_info;
- size_t offset = 0, noffset;
- int i, count, rates_len, supp_rates_len;
- u16 capab;
- struct ieee80211_supported_band *sband;
- u32 rates = 0;
-
- sband = local->hw.wiphy->bands[wk->chan->band];
-
- if (wk->assoc.supp_rates_len) {
- /*
- * Get all rates supported by the device and the AP as
- * some APs don't like getting a superset of their rates
- * in the association request (e.g. D-Link DAP 1353 in
- * b-only mode)...
- */
- rates_len = ieee80211_compatible_rates(wk->assoc.supp_rates,
- wk->assoc.supp_rates_len,
- sband, &rates);
- } else {
- /*
- * In case AP not provide any supported rates information
- * before association, we send information element(s) with
- * all rates that we support.
- */
- rates = ~0;
- rates_len = sband->n_bitrates;
- }
-
- skb = alloc_skb(local->hw.extra_tx_headroom +
- sizeof(*mgmt) + /* bit too much but doesn't matter */
- 2 + wk->assoc.ssid_len + /* SSID */
- 4 + rates_len + /* (extended) rates */
- 4 + /* power capability */
- 2 + 2 * sband->n_channels + /* supported channels */
- 2 + sizeof(struct ieee80211_ht_cap) + /* HT */
- wk->ie_len + /* extra IEs */
- 9, /* WMM */
- GFP_KERNEL);
- if (!skb)
- return;
-
- skb_reserve(skb, local->hw.extra_tx_headroom);
-
- capab = WLAN_CAPABILITY_ESS;
-
- if (sband->band == IEEE80211_BAND_2GHZ) {
- if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE))
- capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
- if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE))
- capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
- }
-
- if (wk->assoc.capability & WLAN_CAPABILITY_PRIVACY)
- capab |= WLAN_CAPABILITY_PRIVACY;
-
- if ((wk->assoc.capability & WLAN_CAPABILITY_SPECTRUM_MGMT) &&
- (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT))
- capab |= WLAN_CAPABILITY_SPECTRUM_MGMT;
-
- mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
- memset(mgmt, 0, 24);
- memcpy(mgmt->da, wk->filter_ta, ETH_ALEN);
- memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
- memcpy(mgmt->bssid, wk->filter_ta, ETH_ALEN);
-
- if (!is_zero_ether_addr(wk->assoc.prev_bssid)) {
- skb_put(skb, 10);
- mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
- IEEE80211_STYPE_REASSOC_REQ);
- mgmt->u.reassoc_req.capab_info = cpu_to_le16(capab);
- mgmt->u.reassoc_req.listen_interval =
- cpu_to_le16(local->hw.conf.listen_interval);
- memcpy(mgmt->u.reassoc_req.current_ap, wk->assoc.prev_bssid,
- ETH_ALEN);
- } else {
- skb_put(skb, 4);
- mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
- IEEE80211_STYPE_ASSOC_REQ);
- mgmt->u.assoc_req.capab_info = cpu_to_le16(capab);
- mgmt->u.assoc_req.listen_interval =
- cpu_to_le16(local->hw.conf.listen_interval);
- }
-
- /* SSID */
- pos = skb_put(skb, 2 + wk->assoc.ssid_len);
- *pos++ = WLAN_EID_SSID;
- *pos++ = wk->assoc.ssid_len;
- memcpy(pos, wk->assoc.ssid, wk->assoc.ssid_len);
-
- /* add all rates which were marked to be used above */
- supp_rates_len = rates_len;
- if (supp_rates_len > 8)
- supp_rates_len = 8;
-
- pos = skb_put(skb, supp_rates_len + 2);
- *pos++ = WLAN_EID_SUPP_RATES;
- *pos++ = supp_rates_len;
-
- count = 0;
- for (i = 0; i < sband->n_bitrates; i++) {
- if (BIT(i) & rates) {
- int rate = sband->bitrates[i].bitrate;
- *pos++ = (u8) (rate / 5);
- if (++count == 8)
- break;
- }
- }
-
- if (rates_len > count) {
- pos = skb_put(skb, rates_len - count + 2);
- *pos++ = WLAN_EID_EXT_SUPP_RATES;
- *pos++ = rates_len - count;
-
- for (i++; i < sband->n_bitrates; i++) {
- if (BIT(i) & rates) {
- int rate = sband->bitrates[i].bitrate;
- *pos++ = (u8) (rate / 5);
- }
- }
- }
-
- if (capab & WLAN_CAPABILITY_SPECTRUM_MGMT) {
- /* 1. power capabilities */
- pos = skb_put(skb, 4);
- *pos++ = WLAN_EID_PWR_CAPABILITY;
- *pos++ = 2;
- *pos++ = 0; /* min tx power */
- *pos++ = wk->chan->max_power; /* max tx power */
-
- /* 2. supported channels */
- /* TODO: get this in reg domain format */
- pos = skb_put(skb, 2 * sband->n_channels + 2);
- *pos++ = WLAN_EID_SUPPORTED_CHANNELS;
- *pos++ = 2 * sband->n_channels;
- for (i = 0; i < sband->n_channels; i++) {
- *pos++ = ieee80211_frequency_to_channel(
- sband->channels[i].center_freq);
- *pos++ = 1; /* one channel in the subband*/
- }
- }
-
- /* if present, add any custom IEs that go before HT */
- if (wk->ie_len && wk->ie) {
- static const u8 before_ht[] = {
- WLAN_EID_SSID,
- WLAN_EID_SUPP_RATES,
- WLAN_EID_EXT_SUPP_RATES,
- WLAN_EID_PWR_CAPABILITY,
- WLAN_EID_SUPPORTED_CHANNELS,
- WLAN_EID_RSN,
- WLAN_EID_QOS_CAPA,
- WLAN_EID_RRM_ENABLED_CAPABILITIES,
- WLAN_EID_MOBILITY_DOMAIN,
- WLAN_EID_SUPPORTED_REGULATORY_CLASSES,
- };
- noffset = ieee80211_ie_split(wk->ie, wk->ie_len,
- before_ht, ARRAY_SIZE(before_ht),
- offset);
- pos = skb_put(skb, noffset - offset);
- memcpy(pos, wk->ie + offset, noffset - offset);
- offset = noffset;
- }
-
- if (wk->assoc.use_11n && wk->assoc.wmm_used &&
- local->hw.queues >= 4)
- ieee80211_add_ht_ie(sdata, skb, wk->assoc.ht_information_ie,
- sband, wk->chan, wk->assoc.smps);
-
- /* if present, add any custom non-vendor IEs that go after HT */
- if (wk->ie_len && wk->ie) {
- noffset = ieee80211_ie_split_vendor(wk->ie, wk->ie_len,
- offset);
- pos = skb_put(skb, noffset - offset);
- memcpy(pos, wk->ie + offset, noffset - offset);
- offset = noffset;
- }
-
- if (wk->assoc.wmm_used && local->hw.queues >= 4) {
- if (wk->assoc.uapsd_used) {
- qos_info = local->uapsd_queues;
- qos_info |= (local->uapsd_max_sp_len <<
- IEEE80211_WMM_IE_STA_QOSINFO_SP_SHIFT);
- } else {
- qos_info = 0;
- }
-
- pos = skb_put(skb, 9);
- *pos++ = WLAN_EID_VENDOR_SPECIFIC;
- *pos++ = 7; /* len */
- *pos++ = 0x00; /* Microsoft OUI 00:50:F2 */
- *pos++ = 0x50;
- *pos++ = 0xf2;
- *pos++ = 2; /* WME */
- *pos++ = 0; /* WME info */
- *pos++ = 1; /* WME ver */
- *pos++ = qos_info;
- }
-
- /* add any remaining custom (i.e. vendor specific here) IEs */
- if (wk->ie_len && wk->ie) {
- noffset = wk->ie_len;
- pos = skb_put(skb, noffset - offset);
- memcpy(pos, wk->ie + offset, noffset - offset);
- }
-
- IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
- ieee80211_tx_skb(sdata, skb);
-}
-
-static void ieee80211_remove_auth_bss(struct ieee80211_local *local,
- struct ieee80211_work *wk)
-{
- struct cfg80211_bss *cbss;
- u16 capa_val = WLAN_CAPABILITY_ESS;
-
- if (wk->probe_auth.privacy)
- capa_val |= WLAN_CAPABILITY_PRIVACY;
-
- cbss = cfg80211_get_bss(local->hw.wiphy, wk->chan, wk->filter_ta,
- wk->probe_auth.ssid, wk->probe_auth.ssid_len,
- WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_PRIVACY,
- capa_val);
- if (!cbss)
- return;
-
- cfg80211_unlink_bss(local->hw.wiphy, cbss);
- cfg80211_put_bss(cbss);
-}
-
-static enum work_action __must_check
-ieee80211_direct_probe(struct ieee80211_work *wk)
-{
- struct ieee80211_sub_if_data *sdata = wk->sdata;
- struct ieee80211_local *local = sdata->local;
-
- if (!wk->probe_auth.synced) {
- int ret = drv_tx_sync(local, sdata, wk->filter_ta,
- IEEE80211_TX_SYNC_AUTH);
- if (ret)
- return WORK_ACT_TIMEOUT;
- }
- wk->probe_auth.synced = true;
-
- wk->probe_auth.tries++;
- if (wk->probe_auth.tries > IEEE80211_AUTH_MAX_TRIES) {
- printk(KERN_DEBUG "%s: direct probe to %pM timed out\n",
- sdata->name, wk->filter_ta);
-
- /*
- * Most likely AP is not in the range so remove the
- * bss struct for that AP.
- */
- ieee80211_remove_auth_bss(local, wk);
-
- return WORK_ACT_TIMEOUT;
- }
-
- printk(KERN_DEBUG "%s: direct probe to %pM (try %d/%i)\n",
- sdata->name, wk->filter_ta, wk->probe_auth.tries,
- IEEE80211_AUTH_MAX_TRIES);
-
- /*
- * Direct probe is sent to broadcast address as some APs
- * will not answer to direct packet in unassociated state.
- */
- ieee80211_send_probe_req(sdata, NULL, wk->probe_auth.ssid,
- wk->probe_auth.ssid_len, NULL, 0,
- (u32) -1, true, false);
-
- wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
- run_again(local, wk->timeout);
-
- return WORK_ACT_NONE;
-}
-
-
-static enum work_action __must_check
-ieee80211_authenticate(struct ieee80211_work *wk)
-{
- struct ieee80211_sub_if_data *sdata = wk->sdata;
- struct ieee80211_local *local = sdata->local;
-
- if (!wk->probe_auth.synced) {
- int ret = drv_tx_sync(local, sdata, wk->filter_ta,
- IEEE80211_TX_SYNC_AUTH);
- if (ret)
- return WORK_ACT_TIMEOUT;
- }
- wk->probe_auth.synced = true;
-
- wk->probe_auth.tries++;
- if (wk->probe_auth.tries > IEEE80211_AUTH_MAX_TRIES) {
- printk(KERN_DEBUG "%s: authentication with %pM"
- " timed out\n", sdata->name, wk->filter_ta);
-
- /*
- * Most likely AP is not in the range so remove the
- * bss struct for that AP.
- */
- ieee80211_remove_auth_bss(local, wk);
-
- return WORK_ACT_TIMEOUT;
- }
-
- printk(KERN_DEBUG "%s: authenticate with %pM (try %d)\n",
- sdata->name, wk->filter_ta, wk->probe_auth.tries);
-
- ieee80211_send_auth(sdata, 1, wk->probe_auth.algorithm, wk->ie,
- wk->ie_len, wk->filter_ta, NULL, 0, 0);
- wk->probe_auth.transaction = 2;
-
- wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
- run_again(local, wk->timeout);
-
- return WORK_ACT_NONE;
-}
-
-static enum work_action __must_check
-ieee80211_associate(struct ieee80211_work *wk)
-{
- struct ieee80211_sub_if_data *sdata = wk->sdata;
- struct ieee80211_local *local = sdata->local;
-
- if (!wk->assoc.synced) {
- int ret = drv_tx_sync(local, sdata, wk->filter_ta,
- IEEE80211_TX_SYNC_ASSOC);
- if (ret)
- return WORK_ACT_TIMEOUT;
- }
- wk->assoc.synced = true;
-
- wk->assoc.tries++;
- if (wk->assoc.tries > IEEE80211_ASSOC_MAX_TRIES) {
- printk(KERN_DEBUG "%s: association with %pM"
- " timed out\n",
- sdata->name, wk->filter_ta);
-
- /*
- * Most likely AP is not in the range so remove the
- * bss struct for that AP.
- */
- if (wk->assoc.bss)
- cfg80211_unlink_bss(local->hw.wiphy, wk->assoc.bss);
-
- return WORK_ACT_TIMEOUT;
- }
-
- printk(KERN_DEBUG "%s: associate with %pM (try %d)\n",
- sdata->name, wk->filter_ta, wk->assoc.tries);
- ieee80211_send_assoc(sdata, wk);
-
- wk->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT;
- run_again(local, wk->timeout);
-
- return WORK_ACT_NONE;
-}
-
static enum work_action __must_check
ieee80211_remain_on_channel_timeout(struct ieee80211_work *wk)
{
@@ -568,300 +103,6 @@ ieee80211_offchannel_tx(struct ieee80211_work *wk)
return WORK_ACT_TIMEOUT;
}
-static enum work_action __must_check
-ieee80211_assoc_beacon_wait(struct ieee80211_work *wk)
-{
- if (wk->started)
- return WORK_ACT_TIMEOUT;
-
- /*
- * Wait up to one beacon interval ...
- * should this be more if we miss one?
- */
- printk(KERN_DEBUG "%s: waiting for beacon from %pM\n",
- wk->sdata->name, wk->filter_ta);
- wk->timeout = TU_TO_EXP_TIME(wk->assoc.bss->beacon_interval);
- return WORK_ACT_NONE;
-}
-
-static void ieee80211_auth_challenge(struct ieee80211_work *wk,
- struct ieee80211_mgmt *mgmt,
- size_t len)
-{
- struct ieee80211_sub_if_data *sdata = wk->sdata;
- u8 *pos;
- struct ieee802_11_elems elems;
-
- pos = mgmt->u.auth.variable;
- ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
- if (!elems.challenge)
- return;
- ieee80211_send_auth(sdata, 3, wk->probe_auth.algorithm,
- elems.challenge - 2, elems.challenge_len + 2,
- wk->filter_ta, wk->probe_auth.key,
- wk->probe_auth.key_len, wk->probe_auth.key_idx);
- wk->probe_auth.transaction = 4;
-}
-
-static enum work_action __must_check
-ieee80211_rx_mgmt_auth(struct ieee80211_work *wk,
- struct ieee80211_mgmt *mgmt, size_t len)
-{
- u16 auth_alg, auth_transaction, status_code;
-
- if (wk->type != IEEE80211_WORK_AUTH)
- return WORK_ACT_MISMATCH;
-
- if (len < 24 + 6)
- return WORK_ACT_NONE;
-
- auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
- auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
- status_code = le16_to_cpu(mgmt->u.auth.status_code);
-
- if (auth_alg != wk->probe_auth.algorithm ||
- auth_transaction != wk->probe_auth.transaction)
- return WORK_ACT_NONE;
-
- if (status_code != WLAN_STATUS_SUCCESS) {
- printk(KERN_DEBUG "%s: %pM denied authentication (status %d)\n",
- wk->sdata->name, mgmt->sa, status_code);
- return WORK_ACT_DONE;
- }
-
- switch (wk->probe_auth.algorithm) {
- case WLAN_AUTH_OPEN:
- case WLAN_AUTH_LEAP:
- case WLAN_AUTH_FT:
- break;
- case WLAN_AUTH_SHARED_KEY:
- if (wk->probe_auth.transaction != 4) {
- ieee80211_auth_challenge(wk, mgmt, len);
- /* need another frame */
- return WORK_ACT_NONE;
- }
- break;
- default:
- WARN_ON(1);
- return WORK_ACT_NONE;
- }
-
- printk(KERN_DEBUG "%s: authenticated\n", wk->sdata->name);
- return WORK_ACT_DONE;
-}
-
-static enum work_action __must_check
-ieee80211_rx_mgmt_assoc_resp(struct ieee80211_work *wk,
- struct ieee80211_mgmt *mgmt, size_t len,
- bool reassoc)
-{
- struct ieee80211_sub_if_data *sdata = wk->sdata;
- struct ieee80211_local *local = sdata->local;
- u16 capab_info, status_code, aid;
- struct ieee802_11_elems elems;
- u8 *pos;
-
- if (wk->type != IEEE80211_WORK_ASSOC)
- return WORK_ACT_MISMATCH;
-
- /*
- * AssocResp and ReassocResp have identical structure, so process both
- * of them in this function.
- */
-
- if (len < 24 + 6)
- return WORK_ACT_NONE;
-
- capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
- status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code);
- aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
-
- printk(KERN_DEBUG "%s: RX %sssocResp from %pM (capab=0x%x "
- "status=%d aid=%d)\n",
- sdata->name, reassoc ? "Rea" : "A", mgmt->sa,
- capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14))));
-
- pos = mgmt->u.assoc_resp.variable;
- ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
-
- if (status_code == WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY &&
- elems.timeout_int && elems.timeout_int_len == 5 &&
- elems.timeout_int[0] == WLAN_TIMEOUT_ASSOC_COMEBACK) {
- u32 tu, ms;
- tu = get_unaligned_le32(elems.timeout_int + 1);
- ms = tu * 1024 / 1000;
- printk(KERN_DEBUG "%s: %pM rejected association temporarily; "
- "comeback duration %u TU (%u ms)\n",
- sdata->name, mgmt->sa, tu, ms);
- wk->timeout = jiffies + msecs_to_jiffies(ms);
- if (ms > IEEE80211_ASSOC_TIMEOUT)
- run_again(local, wk->timeout);
- return WORK_ACT_NONE;
- }
-
- if (status_code != WLAN_STATUS_SUCCESS)
- printk(KERN_DEBUG "%s: %pM denied association (code=%d)\n",
- sdata->name, mgmt->sa, status_code);
- else
- printk(KERN_DEBUG "%s: associated\n", sdata->name);
-
- return WORK_ACT_DONE;
-}
-
-static enum work_action __must_check
-ieee80211_rx_mgmt_probe_resp(struct ieee80211_work *wk,
- struct ieee80211_mgmt *mgmt, size_t len,
- struct ieee80211_rx_status *rx_status)
-{
- struct ieee80211_sub_if_data *sdata = wk->sdata;
- struct ieee80211_local *local = sdata->local;
- size_t baselen;
-
- ASSERT_WORK_MTX(local);
-
- if (wk->type != IEEE80211_WORK_DIRECT_PROBE)
- return WORK_ACT_MISMATCH;
-
- if (len < 24 + 12)
- return WORK_ACT_NONE;
-
- baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
- if (baselen > len)
- return WORK_ACT_NONE;
-
- printk(KERN_DEBUG "%s: direct probe responded\n", sdata->name);
- return WORK_ACT_DONE;
-}
-
-static enum work_action __must_check
-ieee80211_rx_mgmt_beacon(struct ieee80211_work *wk,
- struct ieee80211_mgmt *mgmt, size_t len)
-{
- struct ieee80211_sub_if_data *sdata = wk->sdata;
- struct ieee80211_local *local = sdata->local;
-
- ASSERT_WORK_MTX(local);
-
- if (wk->type != IEEE80211_WORK_ASSOC_BEACON_WAIT)
- return WORK_ACT_MISMATCH;
-
- if (len < 24 + 12)
- return WORK_ACT_NONE;
-
- printk(KERN_DEBUG "%s: beacon received\n", sdata->name);
- return WORK_ACT_DONE;
-}
-
-static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local,
- struct sk_buff *skb)
-{
- struct ieee80211_rx_status *rx_status;
- struct ieee80211_mgmt *mgmt;
- struct ieee80211_work *wk;
- enum work_action rma = WORK_ACT_NONE;
- u16 fc;
-
- rx_status = (struct ieee80211_rx_status *) skb->cb;
- mgmt = (struct ieee80211_mgmt *) skb->data;
- fc = le16_to_cpu(mgmt->frame_control);
-
- mutex_lock(&local->mtx);
-
- list_for_each_entry(wk, &local->work_list, list) {
- const u8 *bssid = NULL;
-
- switch (wk->type) {
- case IEEE80211_WORK_DIRECT_PROBE:
- case IEEE80211_WORK_AUTH:
- case IEEE80211_WORK_ASSOC:
- case IEEE80211_WORK_ASSOC_BEACON_WAIT:
- bssid = wk->filter_ta;
- break;
- default:
- continue;
- }
-
- /*
- * Before queuing, we already verified mgmt->sa,
- * so this is needed just for matching.
- */
- if (compare_ether_addr(bssid, mgmt->bssid))
- continue;
-
- switch (fc & IEEE80211_FCTL_STYPE) {
- case IEEE80211_STYPE_BEACON:
- rma = ieee80211_rx_mgmt_beacon(wk, mgmt, skb->len);
- break;
- case IEEE80211_STYPE_PROBE_RESP:
- rma = ieee80211_rx_mgmt_probe_resp(wk, mgmt, skb->len,
- rx_status);
- break;
- case IEEE80211_STYPE_AUTH:
- rma = ieee80211_rx_mgmt_auth(wk, mgmt, skb->len);
- break;
- case IEEE80211_STYPE_ASSOC_RESP:
- rma = ieee80211_rx_mgmt_assoc_resp(wk, mgmt,
- skb->len, false);
- break;
- case IEEE80211_STYPE_REASSOC_RESP:
- rma = ieee80211_rx_mgmt_assoc_resp(wk, mgmt,
- skb->len, true);
- break;
- default:
- WARN_ON(1);
- rma = WORK_ACT_NONE;
- }
-
- /*
- * We've either received an unexpected frame, or we have
- * multiple work items and need to match the frame to the
- * right one.
- */
- if (rma == WORK_ACT_MISMATCH)
- continue;
-
- /*
- * We've processed this frame for that work, so it can't
- * belong to another work struct.
- * NB: this is also required for correctness for 'rma'!
- */
- break;
- }
-
- switch (rma) {
- case WORK_ACT_MISMATCH:
- /* ignore this unmatched frame */
- break;
- case WORK_ACT_NONE:
- break;
- case WORK_ACT_DONE:
- list_del_rcu(&wk->list);
- break;
- default:
- WARN(1, "unexpected: %d", rma);
- }
-
- mutex_unlock(&local->mtx);
-
- if (rma != WORK_ACT_DONE)
- goto out;
-
- switch (wk->done(wk, skb)) {
- case WORK_DONE_DESTROY:
- free_work(wk);
- break;
- case WORK_DONE_REQUEUE:
- synchronize_rcu();
- wk->started = false; /* restart */
- mutex_lock(&local->mtx);
- list_add_tail(&wk->list, &local->work_list);
- mutex_unlock(&local->mtx);
- }
-
- out:
- kfree_skb(skb);
-}
-
static void ieee80211_work_timer(unsigned long data)
{
struct ieee80211_local *local = (void *) data;
@@ -876,7 +117,6 @@ static void ieee80211_work_work(struct work_struct *work)
{
struct ieee80211_local *local =
container_of(work, struct ieee80211_local, work_work);
- struct sk_buff *skb;
struct ieee80211_work *wk, *tmp;
LIST_HEAD(free_work);
enum work_action rma;
@@ -892,10 +132,6 @@ static void ieee80211_work_work(struct work_struct *work)
if (WARN(local->suspended, "work scheduled while going to suspend\n"))
return;
- /* first process frames to avoid timing out while a frame is pending */
- while ((skb = skb_dequeue(&local->work_skb_queue)))
- ieee80211_work_rx_queued_mgmt(local, skb);
-
mutex_lock(&local->mtx);
ieee80211_recalc_idle(local);
@@ -946,24 +182,12 @@ static void ieee80211_work_work(struct work_struct *work)
case IEEE80211_WORK_ABORT:
rma = WORK_ACT_TIMEOUT;
break;
- case IEEE80211_WORK_DIRECT_PROBE:
- rma = ieee80211_direct_probe(wk);
- break;
- case IEEE80211_WORK_AUTH:
- rma = ieee80211_authenticate(wk);
- break;
- case IEEE80211_WORK_ASSOC:
- rma = ieee80211_associate(wk);
- break;
case IEEE80211_WORK_REMAIN_ON_CHANNEL:
rma = ieee80211_remain_on_channel_timeout(wk);
break;
case IEEE80211_WORK_OFFCHANNEL_TX:
rma = ieee80211_offchannel_tx(wk);
break;
- case IEEE80211_WORK_ASSOC_BEACON_WAIT:
- rma = ieee80211_assoc_beacon_wait(wk);
- break;
}
wk->started = started;
@@ -1051,7 +275,6 @@ void ieee80211_work_init(struct ieee80211_local *local)
setup_timer(&local->work_timer, ieee80211_work_timer,
(unsigned long)local);
INIT_WORK(&local->work_work, ieee80211_work_work);
- skb_queue_head_init(&local->work_skb_queue);
}
void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata)
@@ -1085,43 +308,6 @@ void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata)
mutex_unlock(&local->mtx);
}
-ieee80211_rx_result ieee80211_work_rx_mgmt(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb)
-{
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_mgmt *mgmt;
- struct ieee80211_work *wk;
- u16 fc;
-
- if (skb->len < 24)
- return RX_DROP_MONITOR;
-
- mgmt = (struct ieee80211_mgmt *) skb->data;
- fc = le16_to_cpu(mgmt->frame_control);
-
- list_for_each_entry_rcu(wk, &local->work_list, list) {
- if (sdata != wk->sdata)
- continue;
- if (compare_ether_addr(wk->filter_ta, mgmt->sa))
- continue;
- if (compare_ether_addr(wk->filter_ta, mgmt->bssid))
- continue;
-
- switch (fc & IEEE80211_FCTL_STYPE) {
- case IEEE80211_STYPE_AUTH:
- case IEEE80211_STYPE_PROBE_RESP:
- case IEEE80211_STYPE_ASSOC_RESP:
- case IEEE80211_STYPE_REASSOC_RESP:
- case IEEE80211_STYPE_BEACON:
- skb_queue_tail(&local->work_skb_queue, skb);
- ieee80211_queue_work(&local->hw, &local->work_work);
- return RX_QUEUED;
- }
- }
-
- return RX_CONTINUE;
-}
-
static enum work_done_result ieee80211_remain_done(struct ieee80211_work *wk,
struct sk_buff *skb)
{
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 32dbf0fa89db..e7f90e7082b4 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -1162,9 +1162,13 @@ ip_set_dump(struct sock *ctnl, struct sk_buff *skb,
if (unlikely(protocol_failed(attr)))
return -IPSET_ERR_PROTOCOL;
- return netlink_dump_start(ctnl, skb, nlh,
- ip_set_dump_start,
- ip_set_dump_done, 0);
+ {
+ struct netlink_dump_control c = {
+ .dump = ip_set_dump_start,
+ .done = ip_set_dump_done,
+ };
+ return netlink_dump_start(ctnl, skb, nlh, &c);
+ }
}
/* Add, del and test */
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 30c9d4ca0218..04fb409623d2 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -691,9 +691,18 @@ static int ctnetlink_done(struct netlink_callback *cb)
{
if (cb->args[1])
nf_ct_put((struct nf_conn *)cb->args[1]);
+ if (cb->data)
+ kfree(cb->data);
return 0;
}
+struct ctnetlink_dump_filter {
+ struct {
+ u_int32_t val;
+ u_int32_t mask;
+ } mark;
+};
+
static int
ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
{
@@ -703,7 +712,9 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
struct hlist_nulls_node *n;
struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
u_int8_t l3proto = nfmsg->nfgen_family;
-
+#ifdef CONFIG_NF_CONNTRACK_MARK
+ const struct ctnetlink_dump_filter *filter = cb->data;
+#endif
spin_lock_bh(&nf_conntrack_lock);
last = (struct nf_conn *)cb->args[1];
for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) {
@@ -723,6 +734,12 @@ restart:
continue;
cb->args[1] = 0;
}
+#ifdef CONFIG_NF_CONNTRACK_MARK
+ if (filter && !((ct->mark & filter->mark.mask) ==
+ filter->mark.val)) {
+ continue;
+ }
+#endif
if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq,
NFNL_MSG_TYPE(
@@ -894,6 +911,7 @@ static const struct nla_policy ct_nla_policy[CTA_MAX+1] = {
[CTA_NAT_DST] = { .type = NLA_NESTED },
[CTA_TUPLE_MASTER] = { .type = NLA_NESTED },
[CTA_ZONE] = { .type = NLA_U16 },
+ [CTA_MARK_MASK] = { .type = NLA_U32 },
};
static int
@@ -977,9 +995,28 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
u16 zone;
int err;
- if (nlh->nlmsg_flags & NLM_F_DUMP)
- return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table,
- ctnetlink_done, 0);
+ if (nlh->nlmsg_flags & NLM_F_DUMP) {
+ struct netlink_dump_control c = {
+ .dump = ctnetlink_dump_table,
+ .done = ctnetlink_done,
+ };
+#ifdef CONFIG_NF_CONNTRACK_MARK
+ if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) {
+ struct ctnetlink_dump_filter *filter;
+
+ filter = kzalloc(sizeof(struct ctnetlink_dump_filter),
+ GFP_ATOMIC);
+ if (filter == NULL)
+ return -ENOMEM;
+
+ filter->mark.val = ntohl(nla_get_be32(cda[CTA_MARK]));
+ filter->mark.mask =
+ ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
+ c.data = filter;
+ }
+#endif
+ return netlink_dump_start(ctnl, skb, nlh, &c);
+ }
err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
if (err < 0)
@@ -1836,9 +1873,11 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
int err;
if (nlh->nlmsg_flags & NLM_F_DUMP) {
- return netlink_dump_start(ctnl, skb, nlh,
- ctnetlink_exp_dump_table,
- ctnetlink_exp_done, 0);
+ struct netlink_dump_control c = {
+ .dump = ctnetlink_exp_dump_table,
+ .done = ctnetlink_exp_done,
+ };
+ return netlink_dump_start(ctnl, skb, nlh, &c);
}
err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
index 11ba013e47f6..3eb348bfc4fb 100644
--- a/net/netfilter/nfnetlink_acct.c
+++ b/net/netfilter/nfnetlink_acct.c
@@ -171,8 +171,10 @@ nfnl_acct_get(struct sock *nfnl, struct sk_buff *skb,
char *acct_name;
if (nlh->nlmsg_flags & NLM_F_DUMP) {
- return netlink_dump_start(nfnl, skb, nlh, nfnl_acct_dump,
- NULL, 0);
+ struct netlink_dump_control c = {
+ .dump = nfnl_acct_dump,
+ };
+ return netlink_dump_start(nfnl, skb, nlh, &c);
}
if (!tb[NFACCT_NAME])
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 629b06182f3f..32bb75324e76 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1645,6 +1645,24 @@ static void netlink_destroy_callback(struct netlink_callback *cb)
kfree(cb);
}
+struct nlmsghdr *
+__nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags)
+{
+ struct nlmsghdr *nlh;
+ int size = NLMSG_LENGTH(len);
+
+ nlh = (struct nlmsghdr*)skb_put(skb, NLMSG_ALIGN(size));
+ nlh->nlmsg_type = type;
+ nlh->nlmsg_len = size;
+ nlh->nlmsg_flags = flags;
+ nlh->nlmsg_pid = pid;
+ nlh->nlmsg_seq = seq;
+ if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
+ memset(NLMSG_DATA(nlh) + len, 0, NLMSG_ALIGN(size) - size);
+ return nlh;
+}
+EXPORT_SYMBOL(__nlmsg_put);
+
/*
* It looks a bit ugly.
* It would be better to create kernel thread.
@@ -1718,10 +1736,7 @@ errout_skb:
int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
const struct nlmsghdr *nlh,
- int (*dump)(struct sk_buff *skb,
- struct netlink_callback *),
- int (*done)(struct netlink_callback *),
- u16 min_dump_alloc)
+ struct netlink_dump_control *control)
{
struct netlink_callback *cb;
struct sock *sk;
@@ -1732,10 +1747,11 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
if (cb == NULL)
return -ENOBUFS;
- cb->dump = dump;
- cb->done = done;
+ cb->dump = control->dump;
+ cb->done = control->done;
cb->nlh = nlh;
- cb->min_dump_alloc = min_dump_alloc;
+ cb->data = control->data;
+ cb->min_dump_alloc = control->min_dump_alloc;
atomic_inc(&skb->users);
cb->skb = skb;
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index c29d2568c9e0..9f40441d7a7d 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -498,6 +498,37 @@ int genl_unregister_family(struct genl_family *family)
}
EXPORT_SYMBOL(genl_unregister_family);
+/**
+ * genlmsg_put - Add generic netlink header to netlink message
+ * @skb: socket buffer holding the message
+ * @pid: netlink pid the message is addressed to
+ * @seq: sequence number (usually the one of the sender)
+ * @family: generic netlink family
+ * @flags netlink message flags
+ * @cmd: generic netlink command
+ *
+ * Returns pointer to user specific header
+ */
+void *genlmsg_put(struct sk_buff *skb, u32 pid, u32 seq,
+ struct genl_family *family, int flags, u8 cmd)
+{
+ struct nlmsghdr *nlh;
+ struct genlmsghdr *hdr;
+
+ nlh = nlmsg_put(skb, pid, seq, family->id, GENL_HDRLEN +
+ family->hdrsize, flags);
+ if (nlh == NULL)
+ return NULL;
+
+ hdr = nlmsg_data(nlh);
+ hdr->cmd = cmd;
+ hdr->version = family->version;
+ hdr->reserved = 0;
+
+ return (char *) hdr + GENL_HDRLEN;
+}
+EXPORT_SYMBOL(genlmsg_put);
+
static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct genl_ops *ops;
@@ -532,8 +563,13 @@ static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
return -EOPNOTSUPP;
genl_unlock();
- err = netlink_dump_start(net->genl_sock, skb, nlh,
- ops->dumpit, ops->done, 0);
+ {
+ struct netlink_dump_control c = {
+ .dump = ops->dumpit,
+ .done = ops->done,
+ };
+ err = netlink_dump_start(net->genl_sock, skb, nlh, &c);
+ }
genl_lock();
return err;
}
diff --git a/net/nfc/core.c b/net/nfc/core.c
index 3ddf6e698df0..6089aca67b14 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -431,15 +431,10 @@ EXPORT_SYMBOL(nfc_alloc_recv_skb);
int nfc_targets_found(struct nfc_dev *dev, struct nfc_target *targets,
int n_targets)
{
- int i;
-
pr_debug("dev_name=%s n_targets=%d\n", dev_name(&dev->dev), n_targets);
dev->polling = false;
- for (i = 0; i < n_targets; i++)
- targets[i].idx = dev->target_idx++;
-
spin_lock_bh(&dev->targets_lock);
dev->targets_generation++;
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index 7650139a1a05..a47e90c7d9d1 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -216,6 +216,39 @@ static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt)
&cmd);
}
+struct nci_rf_discover_select_param {
+ __u8 rf_discovery_id;
+ __u8 rf_protocol;
+};
+
+static void nci_rf_discover_select_req(struct nci_dev *ndev, unsigned long opt)
+{
+ struct nci_rf_discover_select_param *param =
+ (struct nci_rf_discover_select_param *)opt;
+ struct nci_rf_discover_select_cmd cmd;
+
+ cmd.rf_discovery_id = param->rf_discovery_id;
+ cmd.rf_protocol = param->rf_protocol;
+
+ switch (cmd.rf_protocol) {
+ case NCI_RF_PROTOCOL_ISO_DEP:
+ cmd.rf_interface = NCI_RF_INTERFACE_ISO_DEP;
+ break;
+
+ case NCI_RF_PROTOCOL_NFC_DEP:
+ cmd.rf_interface = NCI_RF_INTERFACE_NFC_DEP;
+ break;
+
+ default:
+ cmd.rf_interface = NCI_RF_INTERFACE_FRAME;
+ break;
+ }
+
+ nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_SELECT_CMD,
+ sizeof(struct nci_rf_discover_select_cmd),
+ &cmd);
+}
+
static void nci_rf_deactivate_req(struct nci_dev *ndev, unsigned long opt)
{
struct nci_rf_deactivate_cmd cmd;
@@ -264,6 +297,8 @@ static int nci_open_device(struct nci_dev *ndev)
if (!rc) {
set_bit(NCI_UP, &ndev->flags);
+ nci_clear_target_list(ndev);
+ atomic_set(&ndev->state, NCI_IDLE);
} else {
/* Init failed, cleanup */
skb_queue_purge(&ndev->cmd_q);
@@ -286,6 +321,7 @@ static int nci_close_device(struct nci_dev *ndev)
if (!test_and_clear_bit(NCI_UP, &ndev->flags)) {
del_timer_sync(&ndev->cmd_timer);
+ del_timer_sync(&ndev->data_timer);
mutex_unlock(&ndev->req_lock);
return 0;
}
@@ -331,6 +367,15 @@ static void nci_cmd_timer(unsigned long arg)
queue_work(ndev->cmd_wq, &ndev->cmd_work);
}
+/* NCI data exchange timer function */
+static void nci_data_timer(unsigned long arg)
+{
+ struct nci_dev *ndev = (void *) arg;
+
+ set_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags);
+ queue_work(ndev->rx_wq, &ndev->rx_work);
+}
+
static int nci_dev_up(struct nfc_dev *nfc_dev)
{
struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
@@ -350,7 +395,8 @@ static int nci_start_poll(struct nfc_dev *nfc_dev, __u32 protocols)
struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
int rc;
- if (test_bit(NCI_DISCOVERY, &ndev->flags)) {
+ if ((atomic_read(&ndev->state) == NCI_DISCOVERY) ||
+ (atomic_read(&ndev->state) == NCI_W4_ALL_DISCOVERIES)) {
pr_err("unable to start poll, since poll is already active\n");
return -EBUSY;
}
@@ -360,8 +406,9 @@ static int nci_start_poll(struct nfc_dev *nfc_dev, __u32 protocols)
return -EBUSY;
}
- if (test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
- pr_debug("target is active, implicitly deactivate...\n");
+ if ((atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) ||
+ (atomic_read(&ndev->state) == NCI_POLL_ACTIVE)) {
+ pr_debug("target active or w4 select, implicitly deactivate\n");
rc = nci_request(ndev, nci_rf_deactivate_req, 0,
msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
@@ -382,7 +429,8 @@ static void nci_stop_poll(struct nfc_dev *nfc_dev)
{
struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
- if (!test_bit(NCI_DISCOVERY, &ndev->flags)) {
+ if ((atomic_read(&ndev->state) != NCI_DISCOVERY) &&
+ (atomic_read(&ndev->state) != NCI_W4_ALL_DISCOVERIES)) {
pr_err("unable to stop poll, since poll is not active\n");
return;
}
@@ -395,10 +443,15 @@ static int nci_activate_target(struct nfc_dev *nfc_dev, __u32 target_idx,
__u32 protocol)
{
struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
+ struct nci_rf_discover_select_param param;
+ struct nfc_target *target = NULL;
+ int i;
+ int rc = 0;
pr_debug("target_idx %d, protocol 0x%x\n", target_idx, protocol);
- if (!test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
+ if ((atomic_read(&ndev->state) != NCI_W4_HOST_SELECT) &&
+ (atomic_read(&ndev->state) != NCI_POLL_ACTIVE)) {
pr_err("there is no available target to activate\n");
return -EINVAL;
}
@@ -408,16 +461,47 @@ static int nci_activate_target(struct nfc_dev *nfc_dev, __u32 target_idx,
return -EBUSY;
}
- if (!(ndev->target_available_prots & (1 << protocol))) {
+ for (i = 0; i < ndev->n_targets; i++) {
+ if (ndev->targets[i].idx == target_idx) {
+ target = &ndev->targets[i];
+ break;
+ }
+ }
+
+ if (!target) {
+ pr_err("unable to find the selected target\n");
+ return -EINVAL;
+ }
+
+ if (!(target->supported_protocols & (1 << protocol))) {
pr_err("target does not support the requested protocol 0x%x\n",
protocol);
return -EINVAL;
}
- ndev->target_active_prot = protocol;
- ndev->target_available_prots = 0;
+ if (atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) {
+ param.rf_discovery_id = target->idx;
- return 0;
+ if (protocol == NFC_PROTO_JEWEL)
+ param.rf_protocol = NCI_RF_PROTOCOL_T1T;
+ else if (protocol == NFC_PROTO_MIFARE)
+ param.rf_protocol = NCI_RF_PROTOCOL_T2T;
+ else if (protocol == NFC_PROTO_FELICA)
+ param.rf_protocol = NCI_RF_PROTOCOL_T3T;
+ else if (protocol == NFC_PROTO_ISO14443)
+ param.rf_protocol = NCI_RF_PROTOCOL_ISO_DEP;
+ else
+ param.rf_protocol = NCI_RF_PROTOCOL_NFC_DEP;
+
+ rc = nci_request(ndev, nci_rf_discover_select_req,
+ (unsigned long)&param,
+ msecs_to_jiffies(NCI_RF_DISC_SELECT_TIMEOUT));
+ }
+
+ if (!rc)
+ ndev->target_active_prot = protocol;
+
+ return rc;
}
static void nci_deactivate_target(struct nfc_dev *nfc_dev, __u32 target_idx)
@@ -433,7 +517,7 @@ static void nci_deactivate_target(struct nfc_dev *nfc_dev, __u32 target_idx)
ndev->target_active_prot = 0;
- if (test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
+ if (atomic_read(&ndev->state) == NCI_POLL_ACTIVE) {
nci_request(ndev, nci_rf_deactivate_req, 0,
msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
}
@@ -585,6 +669,8 @@ int nci_register_device(struct nci_dev *ndev)
setup_timer(&ndev->cmd_timer, nci_cmd_timer,
(unsigned long) ndev);
+ setup_timer(&ndev->data_timer, nci_data_timer,
+ (unsigned long) ndev);
mutex_init(&ndev->req_lock);
@@ -722,6 +808,9 @@ static void nci_tx_work(struct work_struct *work)
nci_plen(skb->data));
nci_send_frame(skb);
+
+ mod_timer(&ndev->data_timer,
+ jiffies + msecs_to_jiffies(NCI_DATA_TIMEOUT));
}
}
@@ -753,6 +842,15 @@ static void nci_rx_work(struct work_struct *work)
break;
}
}
+
+ /* check if a data exchange timout has occurred */
+ if (test_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags)) {
+ /* complete the data exchange transaction, if exists */
+ if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags))
+ nci_data_exchange_complete(ndev, NULL, -ETIMEDOUT);
+
+ clear_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags);
+ }
}
/* ----- NCI TX CMD worker thread ----- */
diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c
index e5756b30e602..7880ae924d5e 100644
--- a/net/nfc/nci/data.c
+++ b/net/nfc/nci/data.c
@@ -44,6 +44,10 @@ void nci_data_exchange_complete(struct nci_dev *ndev,
pr_debug("len %d, err %d\n", skb ? skb->len : 0, err);
+ /* data exchange is complete, stop the data timer */
+ del_timer_sync(&ndev->data_timer);
+ clear_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags);
+
if (cb) {
ndev->data_exchange_cb = NULL;
ndev->data_exchange_cb_context = 0;
diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c
index b16a8dc2afbe..03e7b4626a3e 100644
--- a/net/nfc/nci/ntf.c
+++ b/net/nfc/nci/ntf.c
@@ -71,6 +71,20 @@ static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev,
queue_work(ndev->tx_wq, &ndev->tx_work);
}
+static void nci_core_generic_error_ntf_packet(struct nci_dev *ndev,
+ struct sk_buff *skb)
+{
+ __u8 status = skb->data[0];
+
+ pr_debug("status 0x%x\n", status);
+
+ if (atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) {
+ /* Activation failed, so complete the request
+ (the state remains the same) */
+ nci_req_complete(ndev, status);
+ }
+}
+
static void nci_core_conn_intf_error_ntf_packet(struct nci_dev *ndev,
struct sk_buff *skb)
{
@@ -86,12 +100,9 @@ static void nci_core_conn_intf_error_ntf_packet(struct nci_dev *ndev,
}
static __u8 *nci_extract_rf_params_nfca_passive_poll(struct nci_dev *ndev,
- struct nci_rf_intf_activated_ntf *ntf, __u8 *data)
+ struct rf_tech_specific_params_nfca_poll *nfca_poll,
+ __u8 *data)
{
- struct rf_tech_specific_params_nfca_poll *nfca_poll;
-
- nfca_poll = &ntf->rf_tech_specific_params.nfca_poll;
-
nfca_poll->sens_res = __le16_to_cpu(*((__u16 *)data));
data += 2;
@@ -115,15 +126,213 @@ static __u8 *nci_extract_rf_params_nfca_passive_poll(struct nci_dev *ndev,
return data;
}
+static __u8 *nci_extract_rf_params_nfcb_passive_poll(struct nci_dev *ndev,
+ struct rf_tech_specific_params_nfcb_poll *nfcb_poll,
+ __u8 *data)
+{
+ nfcb_poll->sensb_res_len = *data++;
+
+ pr_debug("sensb_res_len %d\n", nfcb_poll->sensb_res_len);
+
+ memcpy(nfcb_poll->sensb_res, data, nfcb_poll->sensb_res_len);
+ data += nfcb_poll->sensb_res_len;
+
+ return data;
+}
+
+static __u8 *nci_extract_rf_params_nfcf_passive_poll(struct nci_dev *ndev,
+ struct rf_tech_specific_params_nfcf_poll *nfcf_poll,
+ __u8 *data)
+{
+ nfcf_poll->bit_rate = *data++;
+ nfcf_poll->sensf_res_len = *data++;
+
+ pr_debug("bit_rate %d, sensf_res_len %d\n",
+ nfcf_poll->bit_rate, nfcf_poll->sensf_res_len);
+
+ memcpy(nfcf_poll->sensf_res, data, nfcf_poll->sensf_res_len);
+ data += nfcf_poll->sensf_res_len;
+
+ return data;
+}
+
+static int nci_add_new_protocol(struct nci_dev *ndev,
+ struct nfc_target *target,
+ __u8 rf_protocol,
+ __u8 rf_tech_and_mode,
+ void *params)
+{
+ struct rf_tech_specific_params_nfca_poll *nfca_poll;
+ struct rf_tech_specific_params_nfcb_poll *nfcb_poll;
+ struct rf_tech_specific_params_nfcf_poll *nfcf_poll;
+ __u32 protocol;
+
+ if (rf_protocol == NCI_RF_PROTOCOL_T2T)
+ protocol = NFC_PROTO_MIFARE_MASK;
+ else if (rf_protocol == NCI_RF_PROTOCOL_ISO_DEP)
+ protocol = NFC_PROTO_ISO14443_MASK;
+ else if (rf_protocol == NCI_RF_PROTOCOL_T3T)
+ protocol = NFC_PROTO_FELICA_MASK;
+ else
+ protocol = 0;
+
+ if (!(protocol & ndev->poll_prots)) {
+ pr_err("the target found does not have the desired protocol\n");
+ return -EPROTO;
+ }
+
+ if (rf_tech_and_mode == NCI_NFC_A_PASSIVE_POLL_MODE) {
+ nfca_poll = (struct rf_tech_specific_params_nfca_poll *)params;
+
+ target->sens_res = nfca_poll->sens_res;
+ target->sel_res = nfca_poll->sel_res;
+ target->nfcid1_len = nfca_poll->nfcid1_len;
+ if (target->nfcid1_len > 0) {
+ memcpy(target->nfcid1, nfca_poll->nfcid1,
+ target->nfcid1_len);
+ }
+ } else if (rf_tech_and_mode == NCI_NFC_B_PASSIVE_POLL_MODE) {
+ nfcb_poll = (struct rf_tech_specific_params_nfcb_poll *)params;
+
+ target->sensb_res_len = nfcb_poll->sensb_res_len;
+ if (target->sensb_res_len > 0) {
+ memcpy(target->sensb_res, nfcb_poll->sensb_res,
+ target->sensb_res_len);
+ }
+ } else if (rf_tech_and_mode == NCI_NFC_F_PASSIVE_POLL_MODE) {
+ nfcf_poll = (struct rf_tech_specific_params_nfcf_poll *)params;
+
+ target->sensf_res_len = nfcf_poll->sensf_res_len;
+ if (target->sensf_res_len > 0) {
+ memcpy(target->sensf_res, nfcf_poll->sensf_res,
+ target->sensf_res_len);
+ }
+ } else {
+ pr_err("unsupported rf_tech_and_mode 0x%x\n", rf_tech_and_mode);
+ return -EPROTO;
+ }
+
+ target->supported_protocols |= protocol;
+
+ pr_debug("protocol 0x%x\n", protocol);
+
+ return 0;
+}
+
+static void nci_add_new_target(struct nci_dev *ndev,
+ struct nci_rf_discover_ntf *ntf)
+{
+ struct nfc_target *target;
+ int i, rc;
+
+ for (i = 0; i < ndev->n_targets; i++) {
+ target = &ndev->targets[i];
+ if (target->idx == ntf->rf_discovery_id) {
+ /* This target already exists, add the new protocol */
+ nci_add_new_protocol(ndev, target, ntf->rf_protocol,
+ ntf->rf_tech_and_mode,
+ &ntf->rf_tech_specific_params);
+ return;
+ }
+ }
+
+ /* This is a new target, check if we've enough room */
+ if (ndev->n_targets == NCI_MAX_DISCOVERED_TARGETS) {
+ pr_debug("not enough room, ignoring new target...\n");
+ return;
+ }
+
+ target = &ndev->targets[ndev->n_targets];
+
+ rc = nci_add_new_protocol(ndev, target, ntf->rf_protocol,
+ ntf->rf_tech_and_mode,
+ &ntf->rf_tech_specific_params);
+ if (!rc) {
+ target->idx = ntf->rf_discovery_id;
+ ndev->n_targets++;
+
+ pr_debug("target_idx %d, n_targets %d\n", target->idx,
+ ndev->n_targets);
+ }
+}
+
+void nci_clear_target_list(struct nci_dev *ndev)
+{
+ memset(ndev->targets, 0,
+ (sizeof(struct nfc_target)*NCI_MAX_DISCOVERED_TARGETS));
+
+ ndev->n_targets = 0;
+}
+
+static void nci_rf_discover_ntf_packet(struct nci_dev *ndev,
+ struct sk_buff *skb)
+{
+ struct nci_rf_discover_ntf ntf;
+ __u8 *data = skb->data;
+ bool add_target = true;
+
+ ntf.rf_discovery_id = *data++;
+ ntf.rf_protocol = *data++;
+ ntf.rf_tech_and_mode = *data++;
+ ntf.rf_tech_specific_params_len = *data++;
+
+ pr_debug("rf_discovery_id %d\n", ntf.rf_discovery_id);
+ pr_debug("rf_protocol 0x%x\n", ntf.rf_protocol);
+ pr_debug("rf_tech_and_mode 0x%x\n", ntf.rf_tech_and_mode);
+ pr_debug("rf_tech_specific_params_len %d\n",
+ ntf.rf_tech_specific_params_len);
+
+ if (ntf.rf_tech_specific_params_len > 0) {
+ switch (ntf.rf_tech_and_mode) {
+ case NCI_NFC_A_PASSIVE_POLL_MODE:
+ data = nci_extract_rf_params_nfca_passive_poll(ndev,
+ &(ntf.rf_tech_specific_params.nfca_poll), data);
+ break;
+
+ case NCI_NFC_B_PASSIVE_POLL_MODE:
+ data = nci_extract_rf_params_nfcb_passive_poll(ndev,
+ &(ntf.rf_tech_specific_params.nfcb_poll), data);
+ break;
+
+ case NCI_NFC_F_PASSIVE_POLL_MODE:
+ data = nci_extract_rf_params_nfcf_passive_poll(ndev,
+ &(ntf.rf_tech_specific_params.nfcf_poll), data);
+ break;
+
+ default:
+ pr_err("unsupported rf_tech_and_mode 0x%x\n",
+ ntf.rf_tech_and_mode);
+ data += ntf.rf_tech_specific_params_len;
+ add_target = false;
+ }
+ }
+
+ ntf.ntf_type = *data++;
+ pr_debug("ntf_type %d\n", ntf.ntf_type);
+
+ if (add_target == true)
+ nci_add_new_target(ndev, &ntf);
+
+ if (ntf.ntf_type == NCI_DISCOVER_NTF_TYPE_MORE) {
+ atomic_set(&ndev->state, NCI_W4_ALL_DISCOVERIES);
+ } else {
+ atomic_set(&ndev->state, NCI_W4_HOST_SELECT);
+ nfc_targets_found(ndev->nfc_dev, ndev->targets,
+ ndev->n_targets);
+ }
+}
+
static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev,
struct nci_rf_intf_activated_ntf *ntf, __u8 *data)
{
struct activation_params_nfca_poll_iso_dep *nfca_poll;
+ struct activation_params_nfcb_poll_iso_dep *nfcb_poll;
switch (ntf->activation_rf_tech_and_mode) {
case NCI_NFC_A_PASSIVE_POLL_MODE:
nfca_poll = &ntf->activation_params.nfca_poll_iso_dep;
nfca_poll->rats_res_len = *data++;
+ pr_debug("rats_res_len %d\n", nfca_poll->rats_res_len);
if (nfca_poll->rats_res_len > 0) {
memcpy(nfca_poll->rats_res,
data,
@@ -131,52 +340,47 @@ static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev,
}
break;
+ case NCI_NFC_B_PASSIVE_POLL_MODE:
+ nfcb_poll = &ntf->activation_params.nfcb_poll_iso_dep;
+ nfcb_poll->attrib_res_len = *data++;
+ pr_debug("attrib_res_len %d\n",
+ nfcb_poll->attrib_res_len);
+ if (nfcb_poll->attrib_res_len > 0) {
+ memcpy(nfcb_poll->attrib_res,
+ data,
+ nfcb_poll->attrib_res_len);
+ }
+ break;
+
default:
pr_err("unsupported activation_rf_tech_and_mode 0x%x\n",
ntf->activation_rf_tech_and_mode);
- return -EPROTO;
+ return NCI_STATUS_RF_PROTOCOL_ERROR;
}
- return 0;
+ return NCI_STATUS_OK;
}
-static void nci_target_found(struct nci_dev *ndev,
- struct nci_rf_intf_activated_ntf *ntf)
+static void nci_target_auto_activated(struct nci_dev *ndev,
+ struct nci_rf_intf_activated_ntf *ntf)
{
- struct nfc_target nfc_tgt;
+ struct nfc_target *target;
+ int rc;
- if (ntf->rf_protocol == NCI_RF_PROTOCOL_T2T) /* T2T MifareUL */
- nfc_tgt.supported_protocols = NFC_PROTO_MIFARE_MASK;
- else if (ntf->rf_protocol == NCI_RF_PROTOCOL_ISO_DEP) /* 4A */
- nfc_tgt.supported_protocols = NFC_PROTO_ISO14443_MASK;
- else
- nfc_tgt.supported_protocols = 0;
-
- nfc_tgt.sens_res = ntf->rf_tech_specific_params.nfca_poll.sens_res;
- nfc_tgt.sel_res = ntf->rf_tech_specific_params.nfca_poll.sel_res;
- nfc_tgt.nfcid1_len = ntf->rf_tech_specific_params.nfca_poll.nfcid1_len;
- if (nfc_tgt.nfcid1_len > 0) {
- memcpy(nfc_tgt.nfcid1,
- ntf->rf_tech_specific_params.nfca_poll.nfcid1,
- nfc_tgt.nfcid1_len);
- }
+ target = &ndev->targets[ndev->n_targets];
- if (!(nfc_tgt.supported_protocols & ndev->poll_prots)) {
- pr_debug("the target found does not have the desired protocol\n");
+ rc = nci_add_new_protocol(ndev, target, ntf->rf_protocol,
+ ntf->activation_rf_tech_and_mode,
+ &ntf->rf_tech_specific_params);
+ if (rc)
return;
- }
- pr_debug("new target found, supported_protocols 0x%x\n",
- nfc_tgt.supported_protocols);
+ target->idx = ntf->rf_discovery_id;
+ ndev->n_targets++;
- ndev->target_available_prots = nfc_tgt.supported_protocols;
- ndev->max_data_pkt_payload_size = ntf->max_data_pkt_payload_size;
- ndev->initial_num_credits = ntf->initial_num_credits;
+ pr_debug("target_idx %d, n_targets %d\n", target->idx, ndev->n_targets);
- /* set the available credits to initial value */
- atomic_set(&ndev->credits_cnt, ndev->initial_num_credits);
-
- nfc_targets_found(ndev->nfc_dev, &nfc_tgt, 1);
+ nfc_targets_found(ndev->nfc_dev, ndev->targets, ndev->n_targets);
}
static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
@@ -184,10 +388,7 @@ static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
{
struct nci_rf_intf_activated_ntf ntf;
__u8 *data = skb->data;
- int err = 0;
-
- clear_bit(NCI_DISCOVERY, &ndev->flags);
- set_bit(NCI_POLL_ACTIVE, &ndev->flags);
+ int err = NCI_STATUS_OK;
ntf.rf_discovery_id = *data++;
ntf.rf_interface = *data++;
@@ -212,13 +413,24 @@ static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
switch (ntf.activation_rf_tech_and_mode) {
case NCI_NFC_A_PASSIVE_POLL_MODE:
data = nci_extract_rf_params_nfca_passive_poll(ndev,
- &ntf, data);
+ &(ntf.rf_tech_specific_params.nfca_poll), data);
+ break;
+
+ case NCI_NFC_B_PASSIVE_POLL_MODE:
+ data = nci_extract_rf_params_nfcb_passive_poll(ndev,
+ &(ntf.rf_tech_specific_params.nfcb_poll), data);
+ break;
+
+ case NCI_NFC_F_PASSIVE_POLL_MODE:
+ data = nci_extract_rf_params_nfcf_passive_poll(ndev,
+ &(ntf.rf_tech_specific_params.nfcf_poll), data);
break;
default:
pr_err("unsupported activation_rf_tech_and_mode 0x%x\n",
ntf.activation_rf_tech_and_mode);
- return;
+ err = NCI_STATUS_RF_PROTOCOL_ERROR;
+ goto exit;
}
}
@@ -250,12 +462,30 @@ static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
default:
pr_err("unsupported rf_interface 0x%x\n",
ntf.rf_interface);
- return;
+ err = NCI_STATUS_RF_PROTOCOL_ERROR;
+ break;
}
}
- if (!err)
- nci_target_found(ndev, &ntf);
+exit:
+ if (err == NCI_STATUS_OK) {
+ ndev->max_data_pkt_payload_size = ntf.max_data_pkt_payload_size;
+ ndev->initial_num_credits = ntf.initial_num_credits;
+
+ /* set the available credits to initial value */
+ atomic_set(&ndev->credits_cnt, ndev->initial_num_credits);
+ }
+
+ if (atomic_read(&ndev->state) == NCI_DISCOVERY) {
+ /* A single target was found and activated automatically */
+ atomic_set(&ndev->state, NCI_POLL_ACTIVE);
+ if (err == NCI_STATUS_OK)
+ nci_target_auto_activated(ndev, &ntf);
+ } else { /* ndev->state == NCI_W4_HOST_SELECT */
+ /* A selected target was activated, so complete the request */
+ atomic_set(&ndev->state, NCI_POLL_ACTIVE);
+ nci_req_complete(ndev, err);
+ }
}
static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
@@ -265,9 +495,6 @@ static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
pr_debug("entry, type 0x%x, reason 0x%x\n", ntf->type, ntf->reason);
- clear_bit(NCI_POLL_ACTIVE, &ndev->flags);
- ndev->target_active_prot = 0;
-
/* drop tx data queue */
skb_queue_purge(&ndev->tx_q);
@@ -280,6 +507,10 @@ static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
/* complete the data exchange transaction, if exists */
if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags))
nci_data_exchange_complete(ndev, NULL, -EIO);
+
+ nci_clear_target_list(ndev);
+ atomic_set(&ndev->state, NCI_IDLE);
+ nci_req_complete(ndev, NCI_STATUS_OK);
}
void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb)
@@ -300,10 +531,18 @@ void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb)
nci_core_conn_credits_ntf_packet(ndev, skb);
break;
+ case NCI_OP_CORE_GENERIC_ERROR_NTF:
+ nci_core_generic_error_ntf_packet(ndev, skb);
+ break;
+
case NCI_OP_CORE_INTF_ERROR_NTF:
nci_core_conn_intf_error_ntf_packet(ndev, skb);
break;
+ case NCI_OP_RF_DISCOVER_NTF:
+ nci_rf_discover_ntf_packet(ndev, skb);
+ break;
+
case NCI_OP_RF_INTF_ACTIVATED_NTF:
nci_rf_intf_activated_ntf_packet(ndev, skb);
break;
diff --git a/net/nfc/nci/rsp.c b/net/nfc/nci/rsp.c
index 2840ae2f3615..aa63b1e99188 100644
--- a/net/nfc/nci/rsp.c
+++ b/net/nfc/nci/rsp.c
@@ -137,11 +137,23 @@ static void nci_rf_disc_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
pr_debug("status 0x%x\n", status);
if (status == NCI_STATUS_OK)
- set_bit(NCI_DISCOVERY, &ndev->flags);
+ atomic_set(&ndev->state, NCI_DISCOVERY);
nci_req_complete(ndev, status);
}
+static void nci_rf_disc_select_rsp_packet(struct nci_dev *ndev,
+ struct sk_buff *skb)
+{
+ __u8 status = skb->data[0];
+
+ pr_debug("status 0x%x\n", status);
+
+ /* Complete the request on intf_activated_ntf or generic_error_ntf */
+ if (status != NCI_STATUS_OK)
+ nci_req_complete(ndev, status);
+}
+
static void nci_rf_deactivate_rsp_packet(struct nci_dev *ndev,
struct sk_buff *skb)
{
@@ -149,9 +161,13 @@ static void nci_rf_deactivate_rsp_packet(struct nci_dev *ndev,
pr_debug("status 0x%x\n", status);
- clear_bit(NCI_DISCOVERY, &ndev->flags);
-
- nci_req_complete(ndev, status);
+ /* If target was active, complete the request only in deactivate_ntf */
+ if ((status != NCI_STATUS_OK) ||
+ (atomic_read(&ndev->state) != NCI_POLL_ACTIVE)) {
+ nci_clear_target_list(ndev);
+ atomic_set(&ndev->state, NCI_IDLE);
+ nci_req_complete(ndev, status);
+ }
}
void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
@@ -187,6 +203,10 @@ void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
nci_rf_disc_rsp_packet(ndev, skb);
break;
+ case NCI_OP_RF_DISCOVER_SELECT_RSP:
+ nci_rf_disc_select_rsp_packet(ndev, skb);
+ break;
+
case NCI_OP_RF_DEACTIVATE_RSP:
nci_rf_deactivate_rsp_packet(ndev, skb);
break;
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index 6989dfa28ee2..07f0348aabf5 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -70,6 +70,12 @@ static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target,
if (target->nfcid1_len > 0)
NLA_PUT(msg, NFC_ATTR_TARGET_NFCID1, target->nfcid1_len,
target->nfcid1);
+ if (target->sensb_res_len > 0)
+ NLA_PUT(msg, NFC_ATTR_TARGET_SENSB_RES, target->sensb_res_len,
+ target->sensb_res);
+ if (target->sensf_res_len > 0)
+ NLA_PUT(msg, NFC_ATTR_TARGET_SENSF_RES, target->sensf_res_len,
+ target->sensf_res);
return genlmsg_end(msg, hdr);
diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
index 2e2f8c6a61fe..5325439b0c60 100644
--- a/net/nfc/rawsock.c
+++ b/net/nfc/rawsock.c
@@ -92,18 +92,6 @@ static int rawsock_connect(struct socket *sock, struct sockaddr *_addr,
goto error;
}
- if (addr->target_idx > dev->target_idx - 1 ||
- addr->target_idx < dev->target_idx - dev->n_targets) {
- rc = -EINVAL;
- goto error;
- }
-
- if (addr->target_idx > dev->target_idx - 1 ||
- addr->target_idx < dev->target_idx - dev->n_targets) {
- rc = -EINVAL;
- goto error;
- }
-
rc = nfc_activate_target(dev, addr->target_idx, addr->nfc_protocol);
if (rc)
goto put_dev;
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index 322b8d206693..b6b1d7daa3cb 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -66,6 +66,7 @@ static int internal_dev_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
+ dev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
return 0;
}
@@ -145,7 +146,7 @@ static void do_setup(struct net_device *netdev)
netdev->vlan_features = netdev->features;
netdev->features |= NETIF_F_HW_VLAN_TX;
netdev->hw_features = netdev->features & ~NETIF_F_LLTX;
- random_ether_addr(netdev->dev_addr);
+ eth_hw_addr_random(netdev);
}
static struct vport *internal_dev_create(const struct vport_parms *parms)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 2dbb32b988c4..ae2d484416dd 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1459,6 +1459,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
struct net_device *dev;
__be16 proto = 0;
int err;
+ int extra_len = 0;
/*
* Get and verify the address.
@@ -1493,8 +1494,16 @@ retry:
* raw protocol and you must do your own fragmentation at this level.
*/
+ if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
+ if (!netif_supports_nofcs(dev)) {
+ err = -EPROTONOSUPPORT;
+ goto out_unlock;
+ }
+ extra_len = 4; /* We're doing our own CRC */
+ }
+
err = -EMSGSIZE;
- if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN)
+ if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
goto out_unlock;
if (!skb) {
@@ -1526,7 +1535,7 @@ retry:
goto retry;
}
- if (len > (dev->mtu + dev->hard_header_len)) {
+ if (len > (dev->mtu + dev->hard_header_len + extra_len)) {
/* Earlier code assumed this would be a VLAN pkt,
* double-check this now that we have the actual
* packet in hand.
@@ -1548,6 +1557,9 @@ retry:
if (err < 0)
goto out_unlock;
+ if (unlikely(extra_len == 4))
+ skb->no_fcs = 1;
+
dev_queue_xmit(skb);
rcu_read_unlock();
return len;
@@ -2209,6 +2221,7 @@ static int packet_snd(struct socket *sock,
struct packet_sock *po = pkt_sk(sk);
unsigned short gso_type = 0;
int hlen, tlen;
+ int extra_len = 0;
/*
* Get and verify the address.
@@ -2288,8 +2301,16 @@ static int packet_snd(struct socket *sock,
}
}
+ if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
+ if (!netif_supports_nofcs(dev)) {
+ err = -EPROTONOSUPPORT;
+ goto out_unlock;
+ }
+ extra_len = 4; /* We're doing our own CRC */
+ }
+
err = -EMSGSIZE;
- if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN))
+ if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
goto out_unlock;
err = -ENOBUFS;
@@ -2315,7 +2336,7 @@ static int packet_snd(struct socket *sock,
if (err < 0)
goto out_free;
- if (!gso_type && (len > dev->mtu + reserve)) {
+ if (!gso_type && (len > dev->mtu + reserve + extra_len)) {
/* Earlier code assumed this would be a VLAN pkt,
* double-check this now that we have the actual
* packet in hand.
@@ -2353,6 +2374,9 @@ static int packet_snd(struct socket *sock,
len += vnet_hdr_len;
}
+ if (unlikely(extra_len == 4))
+ skb->no_fcs = 1;
+
/*
* Now send it
*/
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 2590e91b3289..75b58f81d53d 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -260,6 +260,32 @@ config NET_SCH_INGRESS
To compile this code as a module, choose M here: the
module will be called sch_ingress.
+config NET_SCH_PLUG
+ tristate "Plug network traffic until release (PLUG)"
+ ---help---
+
+ This queuing discipline allows userspace to plug/unplug a network
+ output queue, using the netlink interface. When it receives an
+ enqueue command it inserts a plug into the outbound queue that
+ causes following packets to enqueue until a dequeue command arrives
+ over netlink, causing the plug to be removed and resuming the normal
+ packet flow.
+
+ This module also provides a generic "network output buffering"
+ functionality (aka output commit), wherein upon arrival of a dequeue
+ command, only packets up to the first plug are released for delivery.
+ The Remus HA project uses this module to enable speculative execution
+ of virtual machines by allowing the generated network output to be rolled
+ back if needed.
+
+ For more information, please refer to http://wiki.xensource.com/xenwiki/Remus
+
+ Say Y here if you are using this kernel for Xen dom0 and
+ want to protect Xen guests with Remus.
+
+ To compile this code as a module, choose M here: the
+ module will be called sch_plug.
+
comment "Classification"
config NET_CLS
diff --git a/net/sched/Makefile b/net/sched/Makefile
index dc5889c0a15a..8cdf4e2b51d3 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -33,6 +33,7 @@ obj-$(CONFIG_NET_SCH_MULTIQ) += sch_multiq.o
obj-$(CONFIG_NET_SCH_ATM) += sch_atm.o
obj-$(CONFIG_NET_SCH_NETEM) += sch_netem.o
obj-$(CONFIG_NET_SCH_DRR) += sch_drr.o
+obj-$(CONFIG_NET_SCH_PLUG) += sch_plug.o
obj-$(CONFIG_NET_SCH_MQPRIO) += sch_mqprio.o
obj-$(CONFIG_NET_SCH_CHOKE) += sch_choke.o
obj-$(CONFIG_NET_SCH_QFQ) += sch_qfq.o
diff --git a/net/sched/sch_plug.c b/net/sched/sch_plug.c
new file mode 100644
index 000000000000..89f8fcf73f18
--- /dev/null
+++ b/net/sched/sch_plug.c
@@ -0,0 +1,233 @@
+/*
+ * sch_plug.c Queue traffic until an explicit release command
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * There are two ways to use this qdisc:
+ * 1. A simple "instantaneous" plug/unplug operation, by issuing an alternating
+ * sequence of TCQ_PLUG_BUFFER & TCQ_PLUG_RELEASE_INDEFINITE commands.
+ *
+ * 2. For network output buffering (a.k.a output commit) functionality.
+ * Output commit property is commonly used by applications using checkpoint
+ * based fault-tolerance to ensure that the checkpoint from which a system
+ * is being restored is consistent w.r.t outside world.
+ *
+ * Consider for e.g. Remus - a Virtual Machine checkpointing system,
+ * wherein a VM is checkpointed, say every 50ms. The checkpoint is replicated
+ * asynchronously to the backup host, while the VM continues executing the
+ * next epoch speculatively.
+ *
+ * The following is a typical sequence of output buffer operations:
+ * 1.At epoch i, start_buffer(i)
+ * 2. At end of epoch i (i.e. after 50ms):
+ * 2.1 Stop VM and take checkpoint(i).
+ * 2.2 start_buffer(i+1) and Resume VM
+ * 3. While speculatively executing epoch(i+1), asynchronously replicate
+ * checkpoint(i) to backup host.
+ * 4. When checkpoint_ack(i) is received from backup, release_buffer(i)
+ * Thus, this Qdisc would receive the following sequence of commands:
+ * TCQ_PLUG_BUFFER (epoch i)
+ * .. TCQ_PLUG_BUFFER (epoch i+1)
+ * ....TCQ_PLUG_RELEASE_ONE (epoch i)
+ * ......TCQ_PLUG_BUFFER (epoch i+2)
+ * ........
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <net/pkt_sched.h>
+
+/*
+ * State of the queue, when used for network output buffering:
+ *
+ * plug(i+1) plug(i) head
+ * ------------------+--------------------+---------------->
+ * | |
+ * | |
+ * pkts_current_epoch| pkts_last_epoch |pkts_to_release
+ * ----------------->|<--------+--------->|+--------------->
+ * v v
+ *
+ */
+
+struct plug_sched_data {
+ /* If true, the dequeue function releases all packets
+ * from head to end of the queue. The queue turns into
+ * a pass-through queue for newly arriving packets.
+ */
+ bool unplug_indefinite;
+
+ /* Queue Limit in bytes */
+ u32 limit;
+
+ /* Number of packets (output) from the current speculatively
+ * executing epoch.
+ */
+ u32 pkts_current_epoch;
+
+ /* Number of packets corresponding to the recently finished
+ * epoch. These will be released when we receive a
+ * TCQ_PLUG_RELEASE_ONE command. This command is typically
+ * issued after committing a checkpoint at the target.
+ */
+ u32 pkts_last_epoch;
+
+ /*
+ * Number of packets from the head of the queue, that can
+ * be released (committed checkpoint).
+ */
+ u32 pkts_to_release;
+};
+
+static int plug_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+ struct plug_sched_data *q = qdisc_priv(sch);
+
+ if (likely(sch->qstats.backlog + skb->len <= q->limit)) {
+ if (!q->unplug_indefinite)
+ q->pkts_current_epoch++;
+ return qdisc_enqueue_tail(skb, sch);
+ }
+
+ return qdisc_reshape_fail(skb, sch);
+}
+
+static struct sk_buff *plug_dequeue(struct Qdisc *sch)
+{
+ struct plug_sched_data *q = qdisc_priv(sch);
+
+ if (qdisc_is_throttled(sch))
+ return NULL;
+
+ if (!q->unplug_indefinite) {
+ if (!q->pkts_to_release) {
+ /* No more packets to dequeue. Block the queue
+ * and wait for the next release command.
+ */
+ qdisc_throttled(sch);
+ return NULL;
+ }
+ q->pkts_to_release--;
+ }
+
+ return qdisc_dequeue_head(sch);
+}
+
+static int plug_init(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct plug_sched_data *q = qdisc_priv(sch);
+
+ q->pkts_current_epoch = 0;
+ q->pkts_last_epoch = 0;
+ q->pkts_to_release = 0;
+ q->unplug_indefinite = false;
+
+ if (opt == NULL) {
+ /* We will set a default limit of 100 pkts (~150kB)
+ * in case tx_queue_len is not available. The
+ * default value is completely arbitrary.
+ */
+ u32 pkt_limit = qdisc_dev(sch)->tx_queue_len ? : 100;
+ q->limit = pkt_limit * psched_mtu(qdisc_dev(sch));
+ } else {
+ struct tc_plug_qopt *ctl = nla_data(opt);
+
+ if (nla_len(opt) < sizeof(*ctl))
+ return -EINVAL;
+
+ q->limit = ctl->limit;
+ }
+
+ qdisc_throttled(sch);
+ return 0;
+}
+
+/* Receives 4 types of messages:
+ * TCQ_PLUG_BUFFER: Inset a plug into the queue and
+ * buffer any incoming packets
+ * TCQ_PLUG_RELEASE_ONE: Dequeue packets from queue head
+ * to beginning of the next plug.
+ * TCQ_PLUG_RELEASE_INDEFINITE: Dequeue all packets from queue.
+ * Stop buffering packets until the next TCQ_PLUG_BUFFER
+ * command is received (just act as a pass-thru queue).
+ * TCQ_PLUG_LIMIT: Increase/decrease queue size
+ */
+static int plug_change(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct plug_sched_data *q = qdisc_priv(sch);
+ struct tc_plug_qopt *msg;
+
+ if (opt == NULL)
+ return -EINVAL;
+
+ msg = nla_data(opt);
+ if (nla_len(opt) < sizeof(*msg))
+ return -EINVAL;
+
+ switch (msg->action) {
+ case TCQ_PLUG_BUFFER:
+ /* Save size of the current buffer */
+ q->pkts_last_epoch = q->pkts_current_epoch;
+ q->pkts_current_epoch = 0;
+ if (q->unplug_indefinite)
+ qdisc_throttled(sch);
+ q->unplug_indefinite = false;
+ break;
+ case TCQ_PLUG_RELEASE_ONE:
+ /* Add packets from the last complete buffer to the
+ * packets to be released set.
+ */
+ q->pkts_to_release += q->pkts_last_epoch;
+ q->pkts_last_epoch = 0;
+ qdisc_unthrottled(sch);
+ netif_schedule_queue(sch->dev_queue);
+ break;
+ case TCQ_PLUG_RELEASE_INDEFINITE:
+ q->unplug_indefinite = true;
+ q->pkts_to_release = 0;
+ q->pkts_last_epoch = 0;
+ q->pkts_current_epoch = 0;
+ qdisc_unthrottled(sch);
+ netif_schedule_queue(sch->dev_queue);
+ break;
+ case TCQ_PLUG_LIMIT:
+ /* Limit is supplied in bytes */
+ q->limit = msg->limit;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct Qdisc_ops plug_qdisc_ops __read_mostly = {
+ .id = "plug",
+ .priv_size = sizeof(struct plug_sched_data),
+ .enqueue = plug_enqueue,
+ .dequeue = plug_dequeue,
+ .peek = qdisc_peek_head,
+ .init = plug_init,
+ .change = plug_change,
+ .owner = THIS_MODULE,
+};
+
+static int __init plug_module_init(void)
+{
+ return register_qdisc(&plug_qdisc_ops);
+}
+
+static void __exit plug_module_exit(void)
+{
+ unregister_qdisc(&plug_qdisc_ops);
+}
+module_init(plug_module_init)
+module_exit(plug_module_exit)
+MODULE_LICENSE("GPL");
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 8eb87b11d100..41ecf313073c 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -157,39 +157,14 @@ u32 tipc_bclink_get_last_sent(void)
return bcl->fsm_msg_cnt;
}
-/**
- * bclink_set_gap - set gap according to contents of current deferred pkt queue
- *
- * Called with 'node' locked, bc_lock unlocked
- */
-
-static void bclink_set_gap(struct tipc_node *n_ptr)
-{
- struct sk_buff *buf = n_ptr->bclink.deferred_head;
-
- n_ptr->bclink.gap_after = n_ptr->bclink.gap_to =
- mod(n_ptr->bclink.last_in);
- if (unlikely(buf != NULL))
- n_ptr->bclink.gap_to = mod(buf_seqno(buf) - 1);
-}
-
-/**
- * bclink_ack_allowed - test if ACK or NACK message can be sent at this moment
- *
- * This mechanism endeavours to prevent all nodes in network from trying
- * to ACK or NACK at the same time.
- *
- * Note: TIPC uses a different trigger to distribute ACKs than it does to
- * distribute NACKs, but tries to use the same spacing (divide by 16).
- */
-
-static int bclink_ack_allowed(u32 n)
+static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
{
- return (n % TIPC_MIN_LINK_WIN) == tipc_own_tag;
+ node->bclink.last_sent = less_eq(node->bclink.last_sent, seqno) ?
+ seqno : node->bclink.last_sent;
}
-/**
+/*
* tipc_bclink_retransmit_to - get most recent node to request retransmission
*
* Called with bc_lock locked
@@ -300,140 +275,94 @@ exit:
spin_unlock_bh(&bc_lock);
}
-/**
- * bclink_send_ack - unicast an ACK msg
+/*
+ * tipc_bclink_update_link_state - update broadcast link state
*
* tipc_net_lock and node lock set
*/
-static void bclink_send_ack(struct tipc_node *n_ptr)
+void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
{
- struct tipc_link *l_ptr = n_ptr->active_links[n_ptr->addr & 1];
+ struct sk_buff *buf;
- if (l_ptr != NULL)
- tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
-}
+ /* Ignore "stale" link state info */
-/**
- * bclink_send_nack- broadcast a NACK msg
- *
- * tipc_net_lock and node lock set
- */
+ if (less_eq(last_sent, n_ptr->bclink.last_in))
+ return;
-static void bclink_send_nack(struct tipc_node *n_ptr)
-{
- struct sk_buff *buf;
- struct tipc_msg *msg;
+ /* Update link synchronization state; quit if in sync */
+
+ bclink_update_last_sent(n_ptr, last_sent);
+
+ if (n_ptr->bclink.last_sent == n_ptr->bclink.last_in)
+ return;
+
+ /* Update out-of-sync state; quit if loss is still unconfirmed */
+
+ if ((++n_ptr->bclink.oos_state) == 1) {
+ if (n_ptr->bclink.deferred_size < (TIPC_MIN_LINK_WIN / 2))
+ return;
+ n_ptr->bclink.oos_state++;
+ }
- if (!less(n_ptr->bclink.gap_after, n_ptr->bclink.gap_to))
+ /* Don't NACK if one has been recently sent (or seen) */
+
+ if (n_ptr->bclink.oos_state & 0x1)
return;
+ /* Send NACK */
+
buf = tipc_buf_acquire(INT_H_SIZE);
if (buf) {
- msg = buf_msg(buf);
+ struct tipc_msg *msg = buf_msg(buf);
+
tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
- INT_H_SIZE, n_ptr->addr);
+ INT_H_SIZE, n_ptr->addr);
msg_set_non_seq(msg, 1);
msg_set_mc_netid(msg, tipc_net_id);
- msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in));
- msg_set_bcgap_after(msg, n_ptr->bclink.gap_after);
- msg_set_bcgap_to(msg, n_ptr->bclink.gap_to);
- msg_set_bcast_tag(msg, tipc_own_tag);
+ msg_set_bcast_ack(msg, n_ptr->bclink.last_in);
+ msg_set_bcgap_after(msg, n_ptr->bclink.last_in);
+ msg_set_bcgap_to(msg, n_ptr->bclink.deferred_head
+ ? buf_seqno(n_ptr->bclink.deferred_head) - 1
+ : n_ptr->bclink.last_sent);
+ spin_lock_bh(&bc_lock);
tipc_bearer_send(&bcbearer->bearer, buf, NULL);
bcl->stats.sent_nacks++;
+ spin_unlock_bh(&bc_lock);
buf_discard(buf);
- /*
- * Ensure we doesn't send another NACK msg to the node
- * until 16 more deferred messages arrive from it
- * (i.e. helps prevent all nodes from NACK'ing at same time)
- */
-
- n_ptr->bclink.nack_sync = tipc_own_tag;
+ n_ptr->bclink.oos_state++;
}
}
-/**
- * tipc_bclink_check_gap - send a NACK if a sequence gap exists
+/*
+ * bclink_peek_nack - monitor retransmission requests sent by other nodes
*
- * tipc_net_lock and node lock set
- */
-
-void tipc_bclink_check_gap(struct tipc_node *n_ptr, u32 last_sent)
-{
- if (!n_ptr->bclink.supported ||
- less_eq(last_sent, mod(n_ptr->bclink.last_in)))
- return;
-
- bclink_set_gap(n_ptr);
- if (n_ptr->bclink.gap_after == n_ptr->bclink.gap_to)
- n_ptr->bclink.gap_to = last_sent;
- bclink_send_nack(n_ptr);
-}
-
-/**
- * tipc_bclink_peek_nack - process a NACK msg meant for another node
+ * Delay any upcoming NACK by this node if another node has already
+ * requested the first message this node is going to ask for.
*
* Only tipc_net_lock set.
*/
-static void tipc_bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 gap_to)
+static void bclink_peek_nack(struct tipc_msg *msg)
{
- struct tipc_node *n_ptr = tipc_node_find(dest);
- u32 my_after, my_to;
+ struct tipc_node *n_ptr = tipc_node_find(msg_destnode(msg));
- if (unlikely(!n_ptr || !tipc_node_is_up(n_ptr)))
+ if (unlikely(!n_ptr))
return;
+
tipc_node_lock(n_ptr);
- /*
- * Modify gap to suppress unnecessary NACKs from this node
- */
- my_after = n_ptr->bclink.gap_after;
- my_to = n_ptr->bclink.gap_to;
-
- if (less_eq(gap_after, my_after)) {
- if (less(my_after, gap_to) && less(gap_to, my_to))
- n_ptr->bclink.gap_after = gap_to;
- else if (less_eq(my_to, gap_to))
- n_ptr->bclink.gap_to = n_ptr->bclink.gap_after;
- } else if (less_eq(gap_after, my_to)) {
- if (less_eq(my_to, gap_to))
- n_ptr->bclink.gap_to = gap_after;
- } else {
- /*
- * Expand gap if missing bufs not in deferred queue:
- */
- struct sk_buff *buf = n_ptr->bclink.deferred_head;
- u32 prev = n_ptr->bclink.gap_to;
- for (; buf; buf = buf->next) {
- u32 seqno = buf_seqno(buf);
+ if (n_ptr->bclink.supported &&
+ (n_ptr->bclink.last_in != n_ptr->bclink.last_sent) &&
+ (n_ptr->bclink.last_in == msg_bcgap_after(msg)))
+ n_ptr->bclink.oos_state = 2;
- if (mod(seqno - prev) != 1) {
- buf = NULL;
- break;
- }
- if (seqno == gap_after)
- break;
- prev = seqno;
- }
- if (buf == NULL)
- n_ptr->bclink.gap_to = gap_after;
- }
- /*
- * Some nodes may send a complementary NACK now:
- */
- if (bclink_ack_allowed(sender_tag + 1)) {
- if (n_ptr->bclink.gap_to != n_ptr->bclink.gap_after) {
- bclink_send_nack(n_ptr);
- bclink_set_gap(n_ptr);
- }
- }
tipc_node_unlock(n_ptr);
}
-/**
+/*
* tipc_bclink_send_msg - broadcast a packet to all nodes in cluster
*/
@@ -460,7 +389,33 @@ exit:
return res;
}
-/**
+/*
+ * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet
+ *
+ * Called with both sending node's lock and bc_lock taken.
+ */
+
+static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
+{
+ bclink_update_last_sent(node, seqno);
+ node->bclink.last_in = seqno;
+ node->bclink.oos_state = 0;
+ bcl->stats.recv_info++;
+
+ /*
+ * Unicast an ACK periodically, ensuring that
+ * all nodes in the cluster don't ACK at the same time
+ */
+
+ if (((seqno - tipc_own_addr) % TIPC_MIN_LINK_WIN) == 0) {
+ tipc_link_send_proto_msg(
+ node->active_links[node->addr & 1],
+ STATE_MSG, 0, 0, 0, 0, 0);
+ bcl->stats.sent_acks++;
+ }
+}
+
+/*
* tipc_bclink_recv_pkt - receive a broadcast packet, and deliver upwards
*
* tipc_net_lock is read_locked, no other locks set
@@ -472,7 +427,7 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
struct tipc_node *node;
u32 next_in;
u32 seqno;
- struct sk_buff *deferred;
+ int deferred;
/* Screen out unwanted broadcast messages */
@@ -487,6 +442,8 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
if (unlikely(!node->bclink.supported))
goto unlock;
+ /* Handle broadcast protocol message */
+
if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
if (msg_type(msg) != STATE_MSG)
goto unlock;
@@ -501,85 +458,114 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
spin_unlock_bh(&bc_lock);
} else {
tipc_node_unlock(node);
- tipc_bclink_peek_nack(msg_destnode(msg),
- msg_bcast_tag(msg),
- msg_bcgap_after(msg),
- msg_bcgap_to(msg));
+ bclink_peek_nack(msg);
}
goto exit;
}
/* Handle in-sequence broadcast message */
-receive:
- next_in = mod(node->bclink.last_in + 1);
seqno = msg_seqno(msg);
+ next_in = mod(node->bclink.last_in + 1);
if (likely(seqno == next_in)) {
- bcl->stats.recv_info++;
- node->bclink.last_in++;
- bclink_set_gap(node);
- if (unlikely(bclink_ack_allowed(seqno))) {
- bclink_send_ack(node);
- bcl->stats.sent_acks++;
- }
+receive:
+ /* Deliver message to destination */
+
if (likely(msg_isdata(msg))) {
+ spin_lock_bh(&bc_lock);
+ bclink_accept_pkt(node, seqno);
+ spin_unlock_bh(&bc_lock);
tipc_node_unlock(node);
if (likely(msg_mcast(msg)))
tipc_port_recv_mcast(buf, NULL);
else
buf_discard(buf);
} else if (msg_user(msg) == MSG_BUNDLER) {
+ spin_lock_bh(&bc_lock);
+ bclink_accept_pkt(node, seqno);
bcl->stats.recv_bundles++;
bcl->stats.recv_bundled += msg_msgcnt(msg);
+ spin_unlock_bh(&bc_lock);
tipc_node_unlock(node);
tipc_link_recv_bundle(buf);
} else if (msg_user(msg) == MSG_FRAGMENTER) {
+ int ret = tipc_link_recv_fragment(&node->bclink.defragm,
+ &buf, &msg);
+ if (ret < 0)
+ goto unlock;
+ spin_lock_bh(&bc_lock);
+ bclink_accept_pkt(node, seqno);
bcl->stats.recv_fragments++;
- if (tipc_link_recv_fragment(&node->bclink.defragm,
- &buf, &msg))
+ if (ret > 0)
bcl->stats.recv_fragmented++;
+ spin_unlock_bh(&bc_lock);
tipc_node_unlock(node);
tipc_net_route_msg(buf);
} else if (msg_user(msg) == NAME_DISTRIBUTOR) {
+ spin_lock_bh(&bc_lock);
+ bclink_accept_pkt(node, seqno);
+ spin_unlock_bh(&bc_lock);
tipc_node_unlock(node);
tipc_named_recv(buf);
} else {
+ spin_lock_bh(&bc_lock);
+ bclink_accept_pkt(node, seqno);
+ spin_unlock_bh(&bc_lock);
tipc_node_unlock(node);
buf_discard(buf);
}
buf = NULL;
+
+ /* Determine new synchronization state */
+
tipc_node_lock(node);
- deferred = node->bclink.deferred_head;
- if (deferred && (buf_seqno(deferred) == mod(next_in + 1))) {
- buf = deferred;
- msg = buf_msg(buf);
- node->bclink.deferred_head = deferred->next;
- goto receive;
- }
- } else if (less(next_in, seqno)) {
- u32 gap_after = node->bclink.gap_after;
- u32 gap_to = node->bclink.gap_to;
-
- if (tipc_link_defer_pkt(&node->bclink.deferred_head,
- &node->bclink.deferred_tail,
- buf)) {
- node->bclink.nack_sync++;
- bcl->stats.deferred_recv++;
- if (seqno == mod(gap_after + 1))
- node->bclink.gap_after = seqno;
- else if (less(gap_after, seqno) && less(seqno, gap_to))
- node->bclink.gap_to = seqno;
+ if (unlikely(!tipc_node_is_up(node)))
+ goto unlock;
+
+ if (node->bclink.last_in == node->bclink.last_sent)
+ goto unlock;
+
+ if (!node->bclink.deferred_head) {
+ node->bclink.oos_state = 1;
+ goto unlock;
}
+
+ msg = buf_msg(node->bclink.deferred_head);
+ seqno = msg_seqno(msg);
+ next_in = mod(next_in + 1);
+ if (seqno != next_in)
+ goto unlock;
+
+ /* Take in-sequence message from deferred queue & deliver it */
+
+ buf = node->bclink.deferred_head;
+ node->bclink.deferred_head = buf->next;
+ node->bclink.deferred_size--;
+ goto receive;
+ }
+
+ /* Handle out-of-sequence broadcast message */
+
+ if (less(next_in, seqno)) {
+ deferred = tipc_link_defer_pkt(&node->bclink.deferred_head,
+ &node->bclink.deferred_tail,
+ buf);
+ node->bclink.deferred_size += deferred;
+ bclink_update_last_sent(node, seqno);
buf = NULL;
- if (bclink_ack_allowed(node->bclink.nack_sync)) {
- if (gap_to != gap_after)
- bclink_send_nack(node);
- bclink_set_gap(node);
- }
- } else {
+ } else
+ deferred = 0;
+
+ spin_lock_bh(&bc_lock);
+
+ if (deferred)
+ bcl->stats.deferred_recv++;
+ else
bcl->stats.duplicates++;
- }
+
+ spin_unlock_bh(&bc_lock);
+
unlock:
tipc_node_unlock(node);
exit:
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index b009666c60b0..5571394098f9 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -96,7 +96,7 @@ int tipc_bclink_send_msg(struct sk_buff *buf);
void tipc_bclink_recv_pkt(struct sk_buff *buf);
u32 tipc_bclink_get_last_sent(void);
u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr);
-void tipc_bclink_check_gap(struct tipc_node *n_ptr, u32 seqno);
+void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent);
int tipc_bclink_stats(char *stats_buf, const u32 buf_size);
int tipc_bclink_reset_stats(void);
int tipc_bclink_set_queue_limits(u32 limit);
diff --git a/net/tipc/link.c b/net/tipc/link.c
index ac1832a66f8a..d8b0a22367b6 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1501,13 +1501,13 @@ static void link_retransmit_failure(struct tipc_link *l_ptr,
tipc_node_lock(n_ptr);
tipc_addr_string_fill(addr_string, n_ptr->addr);
- info("Multicast link info for %s\n", addr_string);
+ info("Broadcast link info for %s\n", addr_string);
+ info("Supportable: %d, ", n_ptr->bclink.supportable);
info("Supported: %d, ", n_ptr->bclink.supported);
info("Acked: %u\n", n_ptr->bclink.acked);
info("Last in: %u, ", n_ptr->bclink.last_in);
- info("Gap after: %u, ", n_ptr->bclink.gap_after);
- info("Gap to: %u\n", n_ptr->bclink.gap_to);
- info("Nack sync: %u\n\n", n_ptr->bclink.nack_sync);
+ info("Oos state: %u, ", n_ptr->bclink.oos_state);
+ info("Last sent: %u\n", n_ptr->bclink.last_sent);
tipc_k_signal((Handler)link_reset_all, (unsigned long)n_ptr->addr);
@@ -1736,7 +1736,7 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
/* Release acked messages */
- if (tipc_node_is_up(n_ptr) && n_ptr->bclink.supported)
+ if (n_ptr->bclink.supported)
tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
crs = l_ptr->first_out;
@@ -1774,6 +1774,7 @@ protocol_check:
head = link_insert_deferred_queue(l_ptr,
head);
if (likely(msg_is_dest(msg, tipc_own_addr))) {
+ int ret;
deliver:
if (likely(msg_isdata(msg))) {
tipc_node_unlock(n_ptr);
@@ -1798,11 +1799,15 @@ deliver:
continue;
case MSG_FRAGMENTER:
l_ptr->stats.recv_fragments++;
- if (tipc_link_recv_fragment(&l_ptr->defragm_buf,
- &buf, &msg)) {
+ ret = tipc_link_recv_fragment(
+ &l_ptr->defragm_buf,
+ &buf, &msg);
+ if (ret == 1) {
l_ptr->stats.recv_fragmented++;
goto deliver;
}
+ if (ret == -1)
+ l_ptr->next_in_no--;
break;
case CHANGEOVER_PROTOCOL:
type = msg_type(msg);
@@ -1853,17 +1858,16 @@ cont:
}
/*
- * link_defer_buf(): Sort a received out-of-sequence packet
- * into the deferred reception queue.
- * Returns the increase of the queue length,i.e. 0 or 1
+ * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
+ *
+ * Returns increase in queue length (i.e. 0 or 1)
*/
-u32 tipc_link_defer_pkt(struct sk_buff **head,
- struct sk_buff **tail,
+u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
struct sk_buff *buf)
{
- struct sk_buff *prev = NULL;
- struct sk_buff *crs = *head;
+ struct sk_buff *queue_buf;
+ struct sk_buff **prev;
u32 seq_no = buf_seqno(buf);
buf->next = NULL;
@@ -1881,31 +1885,30 @@ u32 tipc_link_defer_pkt(struct sk_buff **head,
return 1;
}
- /* Scan through queue and sort it in */
- do {
- struct tipc_msg *msg = buf_msg(crs);
+ /* Locate insertion point in queue, then insert; discard if duplicate */
+ prev = head;
+ queue_buf = *head;
+ for (;;) {
+ u32 curr_seqno = buf_seqno(queue_buf);
- if (less(seq_no, msg_seqno(msg))) {
- buf->next = crs;
- if (prev)
- prev->next = buf;
- else
- *head = buf;
- return 1;
+ if (seq_no == curr_seqno) {
+ buf_discard(buf);
+ return 0;
}
- if (seq_no == msg_seqno(msg))
+
+ if (less(seq_no, curr_seqno))
break;
- prev = crs;
- crs = crs->next;
- } while (crs);
- /* Message is a duplicate of an existing message */
+ prev = &queue_buf->next;
+ queue_buf = queue_buf->next;
+ }
- buf_discard(buf);
- return 0;
+ buf->next = queue_buf;
+ *prev = buf;
+ return 1;
}
-/**
+/*
* link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
*/
@@ -1956,6 +1959,13 @@ void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ,
u32 msg_size = sizeof(l_ptr->proto_msg);
int r_flag;
+ /* Discard any previous message that was deferred due to congestion */
+
+ if (l_ptr->proto_msg_queue) {
+ buf_discard(l_ptr->proto_msg_queue);
+ l_ptr->proto_msg_queue = NULL;
+ }
+
if (link_blocked(l_ptr))
return;
@@ -1964,9 +1974,11 @@ void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ,
if ((l_ptr->owner->block_setup) && (msg_typ != RESET_MSG))
return;
+ /* Create protocol message with "out-of-sequence" sequence number */
+
msg_set_type(msg, msg_typ);
msg_set_net_plane(msg, l_ptr->b_ptr->net_plane);
- msg_set_bcast_ack(msg, mod(l_ptr->owner->bclink.last_in));
+ msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
msg_set_last_bcast(msg, tipc_bclink_get_last_sent());
if (msg_typ == STATE_MSG) {
@@ -2020,44 +2032,36 @@ void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ,
r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
msg_set_redundant_link(msg, r_flag);
msg_set_linkprio(msg, l_ptr->priority);
-
- /* Ensure sequence number will not fit : */
+ msg_set_size(msg, msg_size);
msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2)));
- /* Congestion? */
-
- if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
- if (!l_ptr->proto_msg_queue) {
- l_ptr->proto_msg_queue =
- tipc_buf_acquire(sizeof(l_ptr->proto_msg));
- }
- buf = l_ptr->proto_msg_queue;
- if (!buf)
- return;
- skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
- return;
- }
-
- /* Message can be sent */
-
buf = tipc_buf_acquire(msg_size);
if (!buf)
return;
skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
- msg_set_size(buf_msg(buf), msg_size);
- if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
- l_ptr->unacked_window = 0;
- buf_discard(buf);
+ /* Defer message if bearer is already congested */
+
+ if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
+ l_ptr->proto_msg_queue = buf;
return;
}
- /* New congestion */
- tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
- l_ptr->proto_msg_queue = buf;
- l_ptr->stats.bearer_congs++;
+ /* Defer message if attempting to send results in bearer congestion */
+
+ if (!tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
+ tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
+ l_ptr->proto_msg_queue = buf;
+ l_ptr->stats.bearer_congs++;
+ return;
+ }
+
+ /* Discard message if it was sent successfully */
+
+ l_ptr->unacked_window = 0;
+ buf_discard(buf);
}
/*
@@ -2105,6 +2109,8 @@ static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
l_ptr->owner->block_setup = WAIT_NODE_DOWN;
}
+ link_state_event(l_ptr, RESET_MSG);
+
/* fall thru' */
case ACTIVATE_MSG:
/* Update link settings according other endpoint's values */
@@ -2127,16 +2133,22 @@ static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
} else {
l_ptr->max_pkt = l_ptr->max_pkt_target;
}
- l_ptr->owner->bclink.supported = (max_pkt_info != 0);
+ l_ptr->owner->bclink.supportable = (max_pkt_info != 0);
- link_state_event(l_ptr, msg_type(msg));
+ /* Synchronize broadcast link info, if not done previously */
+
+ if (!tipc_node_is_up(l_ptr->owner)) {
+ l_ptr->owner->bclink.last_sent =
+ l_ptr->owner->bclink.last_in =
+ msg_last_bcast(msg);
+ l_ptr->owner->bclink.oos_state = 0;
+ }
l_ptr->peer_session = msg_session(msg);
l_ptr->peer_bearer_id = msg_bearer_id(msg);
- /* Synchronize broadcast sequence numbers */
- if (!tipc_node_redundant_links(l_ptr->owner))
- l_ptr->owner->bclink.last_in = mod(msg_last_bcast(msg));
+ if (msg_type(msg) == ACTIVATE_MSG)
+ link_state_event(l_ptr, ACTIVATE_MSG);
break;
case STATE_MSG:
@@ -2177,7 +2189,9 @@ static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
/* Protocol message before retransmits, reduce loss risk */
- tipc_bclink_check_gap(l_ptr->owner, msg_last_bcast(msg));
+ if (l_ptr->owner->bclink.supported)
+ tipc_bclink_update_link_state(l_ptr->owner,
+ msg_last_bcast(msg));
if (rec_gap || (msg_probe(msg))) {
tipc_link_send_proto_msg(l_ptr, STATE_MSG,
@@ -2623,7 +2637,9 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
set_fragm_size(pbuf, fragm_sz);
set_expected_frags(pbuf, exp_fragm_cnt - 1);
} else {
- warn("Link unable to reassemble fragmented message\n");
+ dbg("Link unable to reassemble fragmented message\n");
+ buf_discard(fbuf);
+ return -1;
}
buf_discard(fbuf);
return 0;
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 98ebb37f1808..acecfda82f37 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -239,9 +239,6 @@ exit:
*
* Invoked for each publication issued by a newly failed node.
* Removes publication structure from name table & deletes it.
- * In rare cases the link may have come back up again when this
- * function is called, and we have two items representing the same
- * publication. Nudge this item's key to distinguish it from the other.
*/
static void named_purge_publ(struct publication *publ)
@@ -249,7 +246,6 @@ static void named_purge_publ(struct publication *publ)
struct publication *p;
write_lock_bh(&tipc_nametbl_lock);
- publ->key += 1222345;
p = tipc_nametbl_remove_publ(publ->type, publ->lower,
publ->node, publ->ref, publ->key);
if (p)
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 6b226faad89f..7bc45e135fb4 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -49,9 +49,8 @@ LIST_HEAD(tipc_node_list);
static u32 tipc_num_nodes;
static atomic_t tipc_num_links = ATOMIC_INIT(0);
-u32 tipc_own_tag;
-/**
+/*
* tipc_node_find - locate specified node object, if it exists
*/
@@ -306,10 +305,9 @@ static void node_established_contact(struct tipc_node *n_ptr)
/* Syncronize broadcast acks */
n_ptr->bclink.acked = tipc_bclink_get_last_sent();
- if (n_ptr->bclink.supported) {
+ if (n_ptr->bclink.supportable) {
tipc_bclink_add_node(n_ptr->addr);
- if (n_ptr->addr < tipc_own_addr)
- tipc_own_tag++;
+ n_ptr->bclink.supported = 1;
}
}
@@ -338,12 +336,12 @@ static void node_lost_contact(struct tipc_node *n_ptr)
/* Flush broadcast link info associated with lost node */
if (n_ptr->bclink.supported) {
- n_ptr->bclink.gap_after = n_ptr->bclink.gap_to = 0;
while (n_ptr->bclink.deferred_head) {
struct sk_buff *buf = n_ptr->bclink.deferred_head;
n_ptr->bclink.deferred_head = buf->next;
buf_discard(buf);
}
+ n_ptr->bclink.deferred_size = 0;
if (n_ptr->bclink.defragm) {
buf_discard(n_ptr->bclink.defragm);
@@ -352,8 +350,6 @@ static void node_lost_contact(struct tipc_node *n_ptr)
tipc_bclink_remove_node(n_ptr->addr);
tipc_bclink_acknowledge(n_ptr, INVALID_LINK_SEQ);
- if (n_ptr->addr < tipc_own_addr)
- tipc_own_tag--;
n_ptr->bclink.supported = 0;
}
@@ -449,7 +445,7 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
read_lock_bh(&tipc_net_lock);
- /* Get space for all unicast links + multicast link */
+ /* Get space for all unicast links + broadcast link */
payload_size = TLV_SPACE(sizeof(link_info)) *
(atomic_read(&tipc_num_links) + 1);
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 0b1c5f8b6996..e1b78a2199c2 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -62,12 +62,13 @@
* @link_cnt: number of links to node
* @permit_changeover: non-zero if node has redundant links to this system
* @bclink: broadcast-related info
+ * @supportable: non-zero if node supports TIPC b'cast link capability
* @supported: non-zero if node supports TIPC b'cast capability
* @acked: sequence # of last outbound b'cast message acknowledged by node
* @last_in: sequence # of last in-sequence b'cast message received from node
- * @gap_after: sequence # of last message not requiring a NAK request
- * @gap_to: sequence # of last message requiring a NAK request
- * @nack_sync: counter that determines when NAK requests should be sent
+ * @last_sent: sequence # of last b'cast message sent by node
+ * @oos_state: state tracker for handling OOS b'cast messages
+ * @deferred_size: number of OOS b'cast messages in deferred queue
* @deferred_head: oldest OOS b'cast message received from node
* @deferred_tail: newest OOS b'cast message received from node
* @defragm: list of partially reassembled b'cast message fragments from node
@@ -86,12 +87,13 @@ struct tipc_node {
int block_setup;
int permit_changeover;
struct {
- int supported;
+ u8 supportable;
+ u8 supported;
u32 acked;
u32 last_in;
- u32 gap_after;
- u32 gap_to;
- u32 nack_sync;
+ u32 last_sent;
+ u32 oos_state;
+ u32 deferred_size;
struct sk_buff *deferred_head;
struct sk_buff *deferred_tail;
struct sk_buff *defragm;
@@ -112,8 +114,6 @@ static inline unsigned int tipc_hashfn(u32 addr)
return addr & (NODE_HTABLE_SIZE - 1);
}
-extern u32 tipc_own_tag;
-
struct tipc_node *tipc_node_find(u32 addr);
struct tipc_node *tipc_node_create(u32 addr);
void tipc_node_delete(struct tipc_node *n_ptr);
diff --git a/net/tipc/port.c b/net/tipc/port.c
index d91efc69e6f9..ba3268b8da42 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -400,15 +400,16 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err)
/* send self-abort message when rejecting on a connected port */
if (msg_connected(msg)) {
- struct sk_buff *abuf = NULL;
struct tipc_port *p_ptr = tipc_port_lock(msg_destport(msg));
if (p_ptr) {
+ struct sk_buff *abuf = NULL;
+
if (p_ptr->connected)
abuf = port_build_self_abort_msg(p_ptr, err);
tipc_port_unlock(p_ptr);
+ tipc_net_route_msg(abuf);
}
- tipc_net_route_msg(abuf);
}
/* send returned message & dispose of rejected message */
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 85d3bb7490aa..8ee85aa79fa7 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -530,6 +530,16 @@ static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
static int unix_seqpacket_recvmsg(struct kiocb *, struct socket *,
struct msghdr *, size_t, int);
+static void unix_set_peek_off(struct sock *sk, int val)
+{
+ struct unix_sock *u = unix_sk(sk);
+
+ mutex_lock(&u->readlock);
+ sk->sk_peek_off = val;
+ mutex_unlock(&u->readlock);
+}
+
+
static const struct proto_ops unix_stream_ops = {
.family = PF_UNIX,
.owner = THIS_MODULE,
@@ -549,6 +559,7 @@ static const struct proto_ops unix_stream_ops = {
.recvmsg = unix_stream_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
+ .set_peek_off = unix_set_peek_off,
};
static const struct proto_ops unix_dgram_ops = {
@@ -570,6 +581,7 @@ static const struct proto_ops unix_dgram_ops = {
.recvmsg = unix_dgram_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
+ .set_peek_off = unix_set_peek_off,
};
static const struct proto_ops unix_seqpacket_ops = {
@@ -591,6 +603,7 @@ static const struct proto_ops unix_seqpacket_ops = {
.recvmsg = unix_seqpacket_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
+ .set_peek_off = unix_set_peek_off,
};
static struct proto unix_proto = {
@@ -1756,6 +1769,7 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
int noblock = flags & MSG_DONTWAIT;
struct sk_buff *skb;
int err;
+ int peeked, skip;
err = -EOPNOTSUPP;
if (flags&MSG_OOB)
@@ -1769,7 +1783,9 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
goto out;
}
- skb = skb_recv_datagram(sk, flags, noblock, &err);
+ skip = sk_peek_offset(sk, flags);
+
+ skb = __skb_recv_datagram(sk, flags, &peeked, &skip, &err);
if (!skb) {
unix_state_lock(sk);
/* Signal EOF on disconnected non-blocking SEQPACKET socket. */
@@ -1786,12 +1802,12 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
if (msg->msg_name)
unix_copy_addr(msg, skb->sk);
- if (size > skb->len)
- size = skb->len;
- else if (size < skb->len)
+ if (size > skb->len - skip)
+ size = skb->len - skip;
+ else if (size < skb->len - skip)
msg->msg_flags |= MSG_TRUNC;
- err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, size);
+ err = skb_copy_datagram_iovec(skb, skip, msg->msg_iov, size);
if (err)
goto out_free;
@@ -1808,6 +1824,8 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
if (!(flags & MSG_PEEK)) {
if (UNIXCB(skb).fp)
unix_detach_fds(siocb->scm, skb);
+
+ sk_peek_offset_bwd(sk, skb->len);
} else {
/* It is questionable: on PEEK we could:
- do not return fds - good, but too simple 8)
@@ -1821,10 +1839,13 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
clearly however!
*/
+
+ sk_peek_offset_fwd(sk, size);
+
if (UNIXCB(skb).fp)
siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
}
- err = size;
+ err = (flags & MSG_TRUNC) ? skb->len - skip : size;
scm_recv(sock, msg, siocb->scm, flags);
@@ -1884,6 +1905,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
int target;
int err = 0;
long timeo;
+ int skip;
err = -EINVAL;
if (sk->sk_state != TCP_ESTABLISHED)
@@ -1913,12 +1935,15 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
goto out;
}
+ skip = sk_peek_offset(sk, flags);
+
do {
int chunk;
struct sk_buff *skb;
unix_state_lock(sk);
skb = skb_peek(&sk->sk_receive_queue);
+again:
if (skb == NULL) {
unix_sk(sk)->recursion_level = 0;
if (copied >= target)
@@ -1953,6 +1978,13 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
unix_state_unlock(sk);
break;
}
+
+ if (skip >= skb->len) {
+ skip -= skb->len;
+ skb = skb_peek_next(skb, &sk->sk_receive_queue);
+ goto again;
+ }
+
unix_state_unlock(sk);
if (check_creds) {
@@ -1972,8 +2004,8 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
sunaddr = NULL;
}
- chunk = min_t(unsigned int, skb->len, size);
- if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
+ chunk = min_t(unsigned int, skb->len - skip, size);
+ if (memcpy_toiovec(msg->msg_iov, skb->data + skip, chunk)) {
if (copied == 0)
copied = -EFAULT;
break;
@@ -1985,6 +2017,8 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
if (!(flags & MSG_PEEK)) {
skb_pull(skb, chunk);
+ sk_peek_offset_bwd(sk, chunk);
+
if (UNIXCB(skb).fp)
unix_detach_fds(siocb->scm, skb);
@@ -2002,6 +2036,8 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
if (UNIXCB(skb).fp)
siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
+ sk_peek_offset_fwd(sk, chunk);
+
break;
}
} while (size);
diff --git a/net/unix/diag.c b/net/unix/diag.c
index 6b7697fd911b..4195555aea65 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -301,10 +301,12 @@ static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
if (nlmsg_len(h) < hdrlen)
return -EINVAL;
- if (h->nlmsg_flags & NLM_F_DUMP)
- return netlink_dump_start(sock_diag_nlsk, skb, h,
- unix_diag_dump, NULL, 0);
- else
+ if (h->nlmsg_flags & NLM_F_DUMP) {
+ struct netlink_dump_control c = {
+ .dump = unix_diag_dump,
+ };
+ return netlink_dump_start(sock_diag_nlsk, skb, h, &c);
+ } else
return unix_diag_get_exact(skb, h, (struct unix_diag_req *)NLMSG_DATA(h));
}
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 43ad9c81efcf..3ac2dd00d714 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -144,11 +144,6 @@ static inline struct cfg80211_internal_bss *bss_from_pub(struct cfg80211_bss *pu
return container_of(pub, struct cfg80211_internal_bss, pub);
}
-static inline void cfg80211_ref_bss(struct cfg80211_internal_bss *bss)
-{
- kref_get(&bss->ref);
-}
-
static inline void cfg80211_hold_bss(struct cfg80211_internal_bss *bss)
{
atomic_inc(&bss->hold);
@@ -325,15 +320,13 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
const u8 *bssid,
const u8 *ssid, int ssid_len,
const u8 *ie, int ie_len,
- const u8 *key, int key_len, int key_idx,
- bool local_state_change);
+ const u8 *key, int key_len, int key_idx);
int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
struct net_device *dev, struct ieee80211_channel *chan,
enum nl80211_auth_type auth_type, const u8 *bssid,
const u8 *ssid, int ssid_len,
const u8 *ie, int ie_len,
- const u8 *key, int key_len, int key_idx,
- bool local_state_change);
+ const u8 *key, int key_len, int key_idx);
int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
struct net_device *dev,
struct ieee80211_channel *chan,
@@ -421,7 +414,8 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
size_t ie_len, u16 reason, bool from_ap);
void cfg80211_sme_scan_done(struct net_device *dev);
void cfg80211_sme_rx_auth(struct net_device *dev, const u8 *buf, size_t len);
-void cfg80211_sme_disassoc(struct net_device *dev, int idx);
+void cfg80211_sme_disassoc(struct net_device *dev,
+ struct cfg80211_internal_bss *bss);
void __cfg80211_scan_done(struct work_struct *wk);
void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak);
void __cfg80211_sched_scan_results(struct work_struct *wk);
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index 8c550df13037..9d3e3b6bfcf4 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -55,6 +55,7 @@ const struct mesh_config default_mesh_config = {
.min_discovery_timeout = MESH_MIN_DISCOVERY_TIMEOUT,
.dot11MeshHWMPRannInterval = MESH_RANN_INTERVAL,
.dot11MeshGateAnnouncementProtocol = false,
+ .dot11MeshForwarding = true,
};
const struct mesh_setup default_mesh_setup = {
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 438dfc105b4a..d553d365e751 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -20,40 +20,18 @@ void cfg80211_send_rx_auth(struct net_device *dev, const u8 *buf, size_t len)
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct wiphy *wiphy = wdev->wiphy;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
- struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
- u8 *bssid = mgmt->bssid;
- int i;
- u16 status = le16_to_cpu(mgmt->u.auth.status_code);
- bool done = false;
wdev_lock(wdev);
- for (i = 0; i < MAX_AUTH_BSSES; i++) {
- if (wdev->authtry_bsses[i] &&
- memcmp(wdev->authtry_bsses[i]->pub.bssid, bssid,
- ETH_ALEN) == 0) {
- if (status == WLAN_STATUS_SUCCESS) {
- wdev->auth_bsses[i] = wdev->authtry_bsses[i];
- } else {
- cfg80211_unhold_bss(wdev->authtry_bsses[i]);
- cfg80211_put_bss(&wdev->authtry_bsses[i]->pub);
- }
- wdev->authtry_bsses[i] = NULL;
- done = true;
- break;
- }
- }
-
- if (done) {
- nl80211_send_rx_auth(rdev, dev, buf, len, GFP_KERNEL);
- cfg80211_sme_rx_auth(dev, buf, len);
- }
+ nl80211_send_rx_auth(rdev, dev, buf, len, GFP_KERNEL);
+ cfg80211_sme_rx_auth(dev, buf, len);
wdev_unlock(wdev);
}
EXPORT_SYMBOL(cfg80211_send_rx_auth);
-void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len)
+void cfg80211_send_rx_assoc(struct net_device *dev, struct cfg80211_bss *bss,
+ const u8 *buf, size_t len)
{
u16 status_code;
struct wireless_dev *wdev = dev->ieee80211_ptr;
@@ -61,8 +39,7 @@ void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len)
struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
u8 *ie = mgmt->u.assoc_resp.variable;
- int i, ieoffs = offsetof(struct ieee80211_mgmt, u.assoc_resp.variable);
- struct cfg80211_internal_bss *bss = NULL;
+ int ieoffs = offsetof(struct ieee80211_mgmt, u.assoc_resp.variable);
wdev_lock(wdev);
@@ -75,43 +52,20 @@ void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len)
* frame instead of reassoc.
*/
if (status_code != WLAN_STATUS_SUCCESS && wdev->conn &&
- cfg80211_sme_failed_reassoc(wdev))
+ cfg80211_sme_failed_reassoc(wdev)) {
+ cfg80211_put_bss(bss);
goto out;
+ }
nl80211_send_rx_assoc(rdev, dev, buf, len, GFP_KERNEL);
- if (status_code == WLAN_STATUS_SUCCESS) {
- for (i = 0; i < MAX_AUTH_BSSES; i++) {
- if (!wdev->auth_bsses[i])
- continue;
- if (memcmp(wdev->auth_bsses[i]->pub.bssid, mgmt->bssid,
- ETH_ALEN) == 0) {
- bss = wdev->auth_bsses[i];
- wdev->auth_bsses[i] = NULL;
- /* additional reference to drop hold */
- cfg80211_ref_bss(bss);
- break;
- }
- }
-
- /*
- * We might be coming here because the driver reported
- * a successful association at the same time as the
- * user requested a deauth. In that case, we will have
- * removed the BSS from the auth_bsses list due to the
- * deauth request when the assoc response makes it. If
- * the two code paths acquire the lock the other way
- * around, that's just the standard situation of a
- * deauth being requested while connected.
- */
- if (!bss)
- goto out;
- } else if (wdev->conn) {
+ if (status_code != WLAN_STATUS_SUCCESS && wdev->conn) {
cfg80211_sme_failed_assoc(wdev);
/*
* do not call connect_result() now because the
* sme will schedule work that does it later.
*/
+ cfg80211_put_bss(bss);
goto out;
}
@@ -124,17 +78,10 @@ void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len)
wdev->sme_state = CFG80211_SME_CONNECTING;
}
- /* this consumes one bss reference (unless bss is NULL) */
+ /* this consumes the bss reference */
__cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, ie, len - ieoffs,
status_code,
- status_code == WLAN_STATUS_SUCCESS,
- bss ? &bss->pub : NULL);
- /* drop hold now, and also reference acquired above */
- if (bss) {
- cfg80211_unhold_bss(bss);
- cfg80211_put_bss(&bss->pub);
- }
-
+ status_code == WLAN_STATUS_SUCCESS, bss);
out:
wdev_unlock(wdev);
}
@@ -148,8 +95,7 @@ void __cfg80211_send_deauth(struct net_device *dev,
struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
const u8 *bssid = mgmt->bssid;
- int i;
- bool found = false, was_current = false;
+ bool was_current = false;
ASSERT_WDEV_LOCK(wdev);
@@ -158,32 +104,9 @@ void __cfg80211_send_deauth(struct net_device *dev,
cfg80211_unhold_bss(wdev->current_bss);
cfg80211_put_bss(&wdev->current_bss->pub);
wdev->current_bss = NULL;
- found = true;
was_current = true;
- } else for (i = 0; i < MAX_AUTH_BSSES; i++) {
- if (wdev->auth_bsses[i] &&
- memcmp(wdev->auth_bsses[i]->pub.bssid, bssid, ETH_ALEN) == 0) {
- cfg80211_unhold_bss(wdev->auth_bsses[i]);
- cfg80211_put_bss(&wdev->auth_bsses[i]->pub);
- wdev->auth_bsses[i] = NULL;
- found = true;
- break;
- }
- if (wdev->authtry_bsses[i] &&
- memcmp(wdev->authtry_bsses[i]->pub.bssid, bssid,
- ETH_ALEN) == 0 &&
- memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) == 0) {
- cfg80211_unhold_bss(wdev->authtry_bsses[i]);
- cfg80211_put_bss(&wdev->authtry_bsses[i]->pub);
- wdev->authtry_bsses[i] = NULL;
- found = true;
- break;
- }
}
- if (!found)
- return;
-
nl80211_send_deauth(rdev, dev, buf, len, GFP_KERNEL);
if (wdev->sme_state == CFG80211_SME_CONNECTED && was_current) {
@@ -220,10 +143,8 @@ void __cfg80211_send_disassoc(struct net_device *dev,
struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
const u8 *bssid = mgmt->bssid;
- int i;
u16 reason_code;
bool from_ap;
- bool done = false;
ASSERT_WDEV_LOCK(wdev);
@@ -234,16 +155,10 @@ void __cfg80211_send_disassoc(struct net_device *dev,
if (wdev->current_bss &&
memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) {
- for (i = 0; i < MAX_AUTH_BSSES; i++) {
- if (wdev->authtry_bsses[i] || wdev->auth_bsses[i])
- continue;
- wdev->auth_bsses[i] = wdev->current_bss;
- wdev->current_bss = NULL;
- done = true;
- cfg80211_sme_disassoc(dev, i);
- break;
- }
- WARN_ON(!done);
+ cfg80211_sme_disassoc(dev, wdev->current_bss);
+ cfg80211_unhold_bss(wdev->current_bss);
+ cfg80211_put_bss(&wdev->current_bss->pub);
+ wdev->current_bss = NULL;
} else
WARN_ON(1);
@@ -287,34 +202,6 @@ void cfg80211_send_unprot_disassoc(struct net_device *dev, const u8 *buf,
}
EXPORT_SYMBOL(cfg80211_send_unprot_disassoc);
-static void __cfg80211_auth_remove(struct wireless_dev *wdev, const u8 *addr)
-{
- int i;
- bool done = false;
-
- ASSERT_WDEV_LOCK(wdev);
-
- for (i = 0; addr && i < MAX_AUTH_BSSES; i++) {
- if (wdev->authtry_bsses[i] &&
- memcmp(wdev->authtry_bsses[i]->pub.bssid,
- addr, ETH_ALEN) == 0) {
- cfg80211_unhold_bss(wdev->authtry_bsses[i]);
- cfg80211_put_bss(&wdev->authtry_bsses[i]->pub);
- wdev->authtry_bsses[i] = NULL;
- done = true;
- break;
- }
- }
-
- WARN_ON(!done);
-}
-
-void __cfg80211_auth_canceled(struct net_device *dev, const u8 *addr)
-{
- __cfg80211_auth_remove(dev->ieee80211_ptr, addr);
-}
-EXPORT_SYMBOL(__cfg80211_auth_canceled);
-
void cfg80211_send_auth_timeout(struct net_device *dev, const u8 *addr)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
@@ -329,8 +216,6 @@ void cfg80211_send_auth_timeout(struct net_device *dev, const u8 *addr)
WLAN_STATUS_UNSPECIFIED_FAILURE,
false, NULL);
- __cfg80211_auth_remove(wdev, addr);
-
wdev_unlock(wdev);
}
EXPORT_SYMBOL(cfg80211_send_auth_timeout);
@@ -340,8 +225,6 @@ void cfg80211_send_assoc_timeout(struct net_device *dev, const u8 *addr)
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct wiphy *wiphy = wdev->wiphy;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
- int i;
- bool done = false;
wdev_lock(wdev);
@@ -351,20 +234,6 @@ void cfg80211_send_assoc_timeout(struct net_device *dev, const u8 *addr)
WLAN_STATUS_UNSPECIFIED_FAILURE,
false, NULL);
- for (i = 0; addr && i < MAX_AUTH_BSSES; i++) {
- if (wdev->auth_bsses[i] &&
- memcmp(wdev->auth_bsses[i]->pub.bssid,
- addr, ETH_ALEN) == 0) {
- cfg80211_unhold_bss(wdev->auth_bsses[i]);
- cfg80211_put_bss(&wdev->auth_bsses[i]->pub);
- wdev->auth_bsses[i] = NULL;
- done = true;
- break;
- }
- }
-
- WARN_ON(!done);
-
wdev_unlock(wdev);
}
EXPORT_SYMBOL(cfg80211_send_assoc_timeout);
@@ -403,13 +272,11 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
const u8 *bssid,
const u8 *ssid, int ssid_len,
const u8 *ie, int ie_len,
- const u8 *key, int key_len, int key_idx,
- bool local_state_change)
+ const u8 *key, int key_len, int key_idx)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_auth_request req;
- struct cfg80211_internal_bss *bss;
- int i, err, slot = -1, nfree = 0;
+ int err;
ASSERT_WDEV_LOCK(wdev);
@@ -421,20 +288,8 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
memcmp(bssid, wdev->current_bss->pub.bssid, ETH_ALEN) == 0)
return -EALREADY;
- for (i = 0; i < MAX_AUTH_BSSES; i++) {
- if (wdev->authtry_bsses[i] &&
- memcmp(bssid, wdev->authtry_bsses[i]->pub.bssid,
- ETH_ALEN) == 0)
- return -EALREADY;
- if (wdev->auth_bsses[i] &&
- memcmp(bssid, wdev->auth_bsses[i]->pub.bssid,
- ETH_ALEN) == 0)
- return -EALREADY;
- }
-
memset(&req, 0, sizeof(req));
- req.local_state_change = local_state_change;
req.ie = ie;
req.ie_len = ie_len;
req.auth_type = auth_type;
@@ -446,39 +301,9 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
if (!req.bss)
return -ENOENT;
- bss = bss_from_pub(req.bss);
-
- for (i = 0; i < MAX_AUTH_BSSES; i++) {
- if (!wdev->auth_bsses[i] && !wdev->authtry_bsses[i]) {
- slot = i;
- nfree++;
- }
- }
-
- /* we need one free slot for disassoc and one for this auth */
- if (nfree < 2) {
- err = -ENOSPC;
- goto out;
- }
-
- if (local_state_change)
- wdev->auth_bsses[slot] = bss;
- else
- wdev->authtry_bsses[slot] = bss;
- cfg80211_hold_bss(bss);
-
err = rdev->ops->auth(&rdev->wiphy, dev, &req);
- if (err) {
- if (local_state_change)
- wdev->auth_bsses[slot] = NULL;
- else
- wdev->authtry_bsses[slot] = NULL;
- cfg80211_unhold_bss(bss);
- }
- out:
- if (err)
- cfg80211_put_bss(req.bss);
+ cfg80211_put_bss(req.bss);
return err;
}
@@ -487,15 +312,14 @@ int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
enum nl80211_auth_type auth_type, const u8 *bssid,
const u8 *ssid, int ssid_len,
const u8 *ie, int ie_len,
- const u8 *key, int key_len, int key_idx,
- bool local_state_change)
+ const u8 *key, int key_len, int key_idx)
{
int err;
wdev_lock(dev->ieee80211_ptr);
err = __cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid,
ssid, ssid_len, ie, ie_len,
- key, key_len, key_idx, local_state_change);
+ key, key_len, key_idx);
wdev_unlock(dev->ieee80211_ptr);
return err;
@@ -530,8 +354,7 @@ int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_assoc_request req;
- struct cfg80211_internal_bss *bss;
- int i, err, slot = -1;
+ int err;
bool was_connected = false;
ASSERT_WDEV_LOCK(wdev);
@@ -573,26 +396,14 @@ int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
return -ENOENT;
}
- bss = bss_from_pub(req.bss);
-
- for (i = 0; i < MAX_AUTH_BSSES; i++) {
- if (bss == wdev->auth_bsses[i]) {
- slot = i;
- break;
- }
- }
+ err = rdev->ops->assoc(&rdev->wiphy, dev, &req);
- if (slot < 0) {
- err = -ENOTCONN;
- goto out;
+ if (err) {
+ if (was_connected)
+ wdev->sme_state = CFG80211_SME_CONNECTED;
+ cfg80211_put_bss(req.bss);
}
- err = rdev->ops->assoc(&rdev->wiphy, dev, &req);
- out:
- if (err && was_connected)
- wdev->sme_state = CFG80211_SME_CONNECTED;
- /* still a reference in wdev->auth_bsses[slot] */
- cfg80211_put_bss(req.bss);
return err;
}
@@ -624,34 +435,25 @@ int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
bool local_state_change)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
- struct cfg80211_deauth_request req;
- int i;
+ struct cfg80211_deauth_request req = {
+ .bssid = bssid,
+ .reason_code = reason,
+ .ie = ie,
+ .ie_len = ie_len,
+ };
ASSERT_WDEV_LOCK(wdev);
- memset(&req, 0, sizeof(req));
- req.reason_code = reason;
- req.local_state_change = local_state_change;
- req.ie = ie;
- req.ie_len = ie_len;
- if (wdev->current_bss &&
- memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) {
- req.bss = &wdev->current_bss->pub;
- } else for (i = 0; i < MAX_AUTH_BSSES; i++) {
- if (wdev->auth_bsses[i] &&
- memcmp(bssid, wdev->auth_bsses[i]->pub.bssid, ETH_ALEN) == 0) {
- req.bss = &wdev->auth_bsses[i]->pub;
- break;
- }
- if (wdev->authtry_bsses[i] &&
- memcmp(bssid, wdev->authtry_bsses[i]->pub.bssid, ETH_ALEN) == 0) {
- req.bss = &wdev->authtry_bsses[i]->pub;
- break;
+ if (local_state_change) {
+ if (wdev->current_bss &&
+ memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) {
+ cfg80211_unhold_bss(wdev->current_bss);
+ cfg80211_put_bss(&wdev->current_bss->pub);
+ wdev->current_bss = NULL;
}
- }
- if (!req.bss)
- return -ENOTCONN;
+ return 0;
+ }
return rdev->ops->deauth(&rdev->wiphy, dev, &req, wdev);
}
@@ -722,7 +524,7 @@ void cfg80211_mlme_down(struct cfg80211_registered_device *rdev,
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_deauth_request req;
- int i;
+ u8 bssid[ETH_ALEN];
ASSERT_WDEV_LOCK(wdev);
@@ -734,35 +536,17 @@ void cfg80211_mlme_down(struct cfg80211_registered_device *rdev,
req.ie = NULL;
req.ie_len = 0;
- if (wdev->current_bss) {
- req.bss = &wdev->current_bss->pub;
- rdev->ops->deauth(&rdev->wiphy, dev, &req, wdev);
- if (wdev->current_bss) {
- cfg80211_unhold_bss(wdev->current_bss);
- cfg80211_put_bss(&wdev->current_bss->pub);
- wdev->current_bss = NULL;
- }
- }
+ if (!wdev->current_bss)
+ return;
- for (i = 0; i < MAX_AUTH_BSSES; i++) {
- if (wdev->auth_bsses[i]) {
- req.bss = &wdev->auth_bsses[i]->pub;
- rdev->ops->deauth(&rdev->wiphy, dev, &req, wdev);
- if (wdev->auth_bsses[i]) {
- cfg80211_unhold_bss(wdev->auth_bsses[i]);
- cfg80211_put_bss(&wdev->auth_bsses[i]->pub);
- wdev->auth_bsses[i] = NULL;
- }
- }
- if (wdev->authtry_bsses[i]) {
- req.bss = &wdev->authtry_bsses[i]->pub;
- rdev->ops->deauth(&rdev->wiphy, dev, &req, wdev);
- if (wdev->authtry_bsses[i]) {
- cfg80211_unhold_bss(wdev->authtry_bsses[i]);
- cfg80211_put_bss(&wdev->authtry_bsses[i]->pub);
- wdev->authtry_bsses[i] = NULL;
- }
- }
+ memcpy(bssid, wdev->current_bss->pub.bssid, ETH_ALEN);
+ req.bssid = bssid;
+ rdev->ops->deauth(&rdev->wiphy, dev, &req, wdev);
+
+ if (wdev->current_bss) {
+ cfg80211_unhold_bss(wdev->current_bss);
+ cfg80211_put_bss(&wdev->current_bss->pub);
+ wdev->current_bss = NULL;
}
}
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index afeea32e04ad..fe2747653564 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -427,10 +427,9 @@ static int nl80211_parse_key_new(struct nlattr *key, struct key_parse *k)
if (tb[NL80211_KEY_DEFAULT_TYPES]) {
struct nlattr *kdt[NUM_NL80211_KEY_DEFAULT_TYPES];
- int err = nla_parse_nested(kdt,
- NUM_NL80211_KEY_DEFAULT_TYPES - 1,
- tb[NL80211_KEY_DEFAULT_TYPES],
- nl80211_key_default_policy);
+ err = nla_parse_nested(kdt, NUM_NL80211_KEY_DEFAULT_TYPES - 1,
+ tb[NL80211_KEY_DEFAULT_TYPES],
+ nl80211_key_default_policy);
if (err)
return err;
@@ -2655,13 +2654,6 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
break;
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_STATION:
- /* disallow things sta doesn't support */
- if (params.plink_action)
- return -EINVAL;
- if (params.ht_capa)
- return -EINVAL;
- if (params.listen_interval >= 0)
- return -EINVAL;
/*
* Don't allow userspace to change the TDLS_PEER flag,
* but silently ignore attempts to change it since we
@@ -2669,7 +2661,15 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
* to change the flag.
*/
params.sta_flags_mask &= ~BIT(NL80211_STA_FLAG_TDLS_PEER);
-
+ /* fall through */
+ case NL80211_IFTYPE_ADHOC:
+ /* disallow things sta doesn't support */
+ if (params.plink_action)
+ return -EINVAL;
+ if (params.ht_capa)
+ return -EINVAL;
+ if (params.listen_interval >= 0)
+ return -EINVAL;
/* reject any changes other than AUTHORIZED */
if (params.sta_flags_mask & ~BIT(NL80211_STA_FLAG_AUTHORIZED))
return -EINVAL;
@@ -3259,6 +3259,8 @@ static int nl80211_get_mesh_config(struct sk_buff *skb,
cur_params.dot11MeshHWMPRannInterval);
NLA_PUT_U8(msg, NL80211_MESHCONF_GATE_ANNOUNCEMENTS,
cur_params.dot11MeshGateAnnouncementProtocol);
+ NLA_PUT_U8(msg, NL80211_MESHCONF_FORWARDING,
+ cur_params.dot11MeshForwarding);
nla_nest_end(msg, pinfoattr);
genlmsg_end(msg, hdr);
return genlmsg_reply(msg, info);
@@ -3290,6 +3292,7 @@ static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_A
[NL80211_MESHCONF_HWMP_ROOTMODE] = { .type = NLA_U8 },
[NL80211_MESHCONF_HWMP_RANN_INTERVAL] = { .type = NLA_U16 },
[NL80211_MESHCONF_GATE_ANNOUNCEMENTS] = { .type = NLA_U8 },
+ [NL80211_MESHCONF_FORWARDING] = { .type = NLA_U8 },
};
static const struct nla_policy
@@ -3379,6 +3382,8 @@ do {\
dot11MeshGateAnnouncementProtocol, mask,
NL80211_MESHCONF_GATE_ANNOUNCEMENTS,
nla_get_u8);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshForwarding,
+ mask, NL80211_MESHCONF_FORWARDING, nla_get_u8);
if (mask_out)
*mask_out = mask;
@@ -4079,7 +4084,6 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
struct cfg80211_bss *res = &intbss->pub;
void *hdr;
struct nlattr *bss;
- int i;
ASSERT_WDEV_LOCK(wdev);
@@ -4132,13 +4136,6 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
if (intbss == wdev->current_bss)
NLA_PUT_U32(msg, NL80211_BSS_STATUS,
NL80211_BSS_STATUS_ASSOCIATED);
- else for (i = 0; i < MAX_AUTH_BSSES; i++) {
- if (intbss != wdev->auth_bsses[i])
- continue;
- NLA_PUT_U32(msg, NL80211_BSS_STATUS,
- NL80211_BSS_STATUS_AUTHENTICATED);
- break;
- }
break;
case NL80211_IFTYPE_ADHOC:
if (intbss == wdev->current_bss)
@@ -4406,10 +4403,16 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE];
+ /*
+ * Since we no longer track auth state, ignore
+ * requests to only change local state.
+ */
+ if (local_state_change)
+ return 0;
+
return cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid,
ssid, ssid_len, ie, ie_len,
- key.p.key, key.p.key_len, key.idx,
- local_state_change);
+ key.p.key, key.p.key_len, key.idx);
}
static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
@@ -4781,7 +4784,6 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
nla_len(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]);
struct ieee80211_supported_band *sband =
wiphy->bands[ibss.channel->band];
- int err;
err = ieee80211_get_ratemask(sband, rates, n_rates,
&ibss.basic_rates);
@@ -4801,6 +4803,9 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
return PTR_ERR(connkeys);
}
+ ibss.control_port =
+ nla_get_flag(info->attrs[NL80211_ATTR_CONTROL_PORT]);
+
err = cfg80211_join_ibss(rdev, dev, &ibss, connkeys);
if (err)
kfree(connkeys);
@@ -5390,9 +5395,39 @@ static u32 rateset_to_mask(struct ieee80211_supported_band *sband,
return mask;
}
+static bool ht_rateset_to_mask(struct ieee80211_supported_band *sband,
+ u8 *rates, u8 rates_len,
+ u8 mcs[IEEE80211_HT_MCS_MASK_LEN])
+{
+ u8 i;
+
+ memset(mcs, 0, IEEE80211_HT_MCS_MASK_LEN);
+
+ for (i = 0; i < rates_len; i++) {
+ int ridx, rbit;
+
+ ridx = rates[i] / 8;
+ rbit = BIT(rates[i] % 8);
+
+ /* check validity */
+ if ((ridx < 0) || (ridx >= IEEE80211_HT_MCS_MASK_LEN))
+ return false;
+
+ /* check availability */
+ if (sband->ht_cap.mcs.rx_mask[ridx] & rbit)
+ mcs[ridx] |= rbit;
+ else
+ return false;
+ }
+
+ return true;
+}
+
static const struct nla_policy nl80211_txattr_policy[NL80211_TXRATE_MAX + 1] = {
[NL80211_TXRATE_LEGACY] = { .type = NLA_BINARY,
.len = NL80211_MAX_SUPP_RATES },
+ [NL80211_TXRATE_MCS] = { .type = NLA_BINARY,
+ .len = NL80211_MAX_SUPP_HT_RATES },
};
static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
@@ -5418,12 +5453,20 @@ static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
sband = rdev->wiphy.bands[i];
mask.control[i].legacy =
sband ? (1 << sband->n_bitrates) - 1 : 0;
+ if (sband)
+ memcpy(mask.control[i].mcs,
+ sband->ht_cap.mcs.rx_mask,
+ sizeof(mask.control[i].mcs));
+ else
+ memset(mask.control[i].mcs, 0,
+ sizeof(mask.control[i].mcs));
}
/*
* The nested attribute uses enum nl80211_band as the index. This maps
* directly to the enum ieee80211_band values used in cfg80211.
*/
+ BUILD_BUG_ON(NL80211_MAX_SUPP_HT_RATES > IEEE80211_HT_MCS_MASK_LEN * 8);
nla_for_each_nested(tx_rates, info->attrs[NL80211_ATTR_TX_RATES], rem)
{
enum ieee80211_band band = nla_type(tx_rates);
@@ -5439,7 +5482,28 @@ static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
sband,
nla_data(tb[NL80211_TXRATE_LEGACY]),
nla_len(tb[NL80211_TXRATE_LEGACY]));
- if (mask.control[band].legacy == 0)
+ }
+ if (tb[NL80211_TXRATE_MCS]) {
+ if (!ht_rateset_to_mask(
+ sband,
+ nla_data(tb[NL80211_TXRATE_MCS]),
+ nla_len(tb[NL80211_TXRATE_MCS]),
+ mask.control[band].mcs))
+ return -EINVAL;
+ }
+
+ if (mask.control[band].legacy == 0) {
+ /* don't allow empty legacy rates if HT
+ * is not even supported. */
+ if (!rdev->wiphy.bands[band]->ht_cap.ht_supported)
+ return -EINVAL;
+
+ for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
+ if (mask.control[band].mcs[i])
+ break;
+
+ /* legacy and mcs rates may not be both empty */
+ if (i == IEEE80211_HT_MCS_MASK_LEN)
return -EINVAL;
}
}
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index f65feaad155f..e9a0ac83b84c 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -882,23 +882,8 @@ static void handle_channel(struct wiphy *wiphy,
chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags);
chan->max_antenna_gain = min(chan->orig_mag,
(int) MBI_TO_DBI(power_rule->max_antenna_gain));
- if (chan->orig_mpwr) {
- /*
- * Devices that have their own custom regulatory domain
- * but also use WIPHY_FLAG_STRICT_REGULATORY will follow the
- * passed country IE power settings.
- */
- if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE &&
- wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY &&
- wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) {
- chan->max_power =
- MBM_TO_DBM(power_rule->max_eirp);
- } else {
- chan->max_power = min(chan->orig_mpwr,
- (int) MBM_TO_DBM(power_rule->max_eirp));
- }
- } else
- chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp);
+ chan->max_reg_power = (int) MBM_TO_DBM(power_rule->max_eirp);
+ chan->max_power = min(chan->max_power, chan->max_reg_power);
}
static void handle_band(struct wiphy *wiphy,
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 31119e32e092..afde7e5f0010 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -861,6 +861,18 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
}
EXPORT_SYMBOL(cfg80211_inform_bss_frame);
+void cfg80211_ref_bss(struct cfg80211_bss *pub)
+{
+ struct cfg80211_internal_bss *bss;
+
+ if (!pub)
+ return;
+
+ bss = container_of(pub, struct cfg80211_internal_bss, pub);
+ kref_get(&bss->ref);
+}
+EXPORT_SYMBOL(cfg80211_ref_bss);
+
void cfg80211_put_bss(struct cfg80211_bss *pub)
{
struct cfg80211_internal_bss *bss;
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 7b9ecaed96be..f7e937ff8978 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -179,7 +179,7 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev)
params->ssid, params->ssid_len,
NULL, 0,
params->key, params->key_len,
- params->key_idx, false);
+ params->key_idx);
case CFG80211_CONN_ASSOCIATE_NEXT:
BUG_ON(!rdev->ops->assoc);
wdev->conn->state = CFG80211_CONN_ASSOCIATING;
@@ -477,6 +477,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
kfree(wdev->connect_keys);
wdev->connect_keys = NULL;
wdev->ssid_len = 0;
+ cfg80211_put_bss(bss);
return;
}
@@ -701,31 +702,10 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
wdev->ssid_len = 0;
if (wdev->conn) {
- const u8 *bssid;
- int ret;
-
kfree(wdev->conn->ie);
wdev->conn->ie = NULL;
kfree(wdev->conn);
wdev->conn = NULL;
-
- /*
- * If this disconnect was due to a disassoc, we
- * we might still have an auth BSS around. For
- * the userspace SME that's currently expected,
- * but for the kernel SME (nl80211 CONNECT or
- * wireless extensions) we want to clear up all
- * state.
- */
- for (i = 0; i < MAX_AUTH_BSSES; i++) {
- if (!wdev->auth_bsses[i])
- continue;
- bssid = wdev->auth_bsses[i]->pub.bssid;
- ret = __cfg80211_mlme_deauth(rdev, dev, bssid, NULL, 0,
- WLAN_REASON_DEAUTH_LEAVING,
- false);
- WARN(ret, "deauth failed: %d\n", ret);
- }
}
nl80211_send_disconnected(rdev, dev, reason, ie, ie_len, from_ap);
@@ -1012,7 +992,8 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
return err;
}
-void cfg80211_sme_disassoc(struct net_device *dev, int idx)
+void cfg80211_sme_disassoc(struct net_device *dev,
+ struct cfg80211_internal_bss *bss)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -1031,16 +1012,8 @@ void cfg80211_sme_disassoc(struct net_device *dev, int idx)
* want it any more so deauthenticate too.
*/
- if (!wdev->auth_bsses[idx])
- return;
+ memcpy(bssid, bss->pub.bssid, ETH_ALEN);
- memcpy(bssid, wdev->auth_bsses[idx]->pub.bssid, ETH_ALEN);
- if (__cfg80211_mlme_deauth(rdev, dev, bssid,
- NULL, 0, WLAN_REASON_DEAUTH_LEAVING,
- false)) {
- /* whatever -- assume gone anyway */
- cfg80211_unhold_bss(wdev->auth_bsses[idx]);
- cfg80211_put_bss(&wdev->auth_bsses[idx]->pub);
- wdev->auth_bsses[idx] = NULL;
- }
+ __cfg80211_mlme_deauth(rdev, dev, bssid, NULL, 0,
+ WLAN_REASON_DEAUTH_LEAVING, false);
}
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 66b84fbf2746..7128dde0fe1a 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -2299,8 +2299,13 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
if (link->dump == NULL)
return -EINVAL;
- return netlink_dump_start(net->xfrm.nlsk, skb, nlh,
- link->dump, link->done, 0);
+ {
+ struct netlink_dump_control c = {
+ .dump = link->dump,
+ .done = link->done,
+ };
+ return netlink_dump_start(net->xfrm.nlsk, skb, nlh, &c);
+ }
}
err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs, XFRMA_MAX,