summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/802/tr.c2
-rw-r--r--net/8021q/vlan_core.c10
-rw-r--r--net/8021q/vlan_dev.c3
-rw-r--r--net/core/dev.c72
-rw-r--r--net/core/net-sysfs.c4
-rw-r--r--net/core/net_namespace.c89
-rw-r--r--net/core/sock.c2
-rw-r--r--net/ipv4/cipso_ipv4.c9
-rw-r--r--net/ipv4/icmp.c2
-rw-r--r--net/ipv4/tcp_input.c9
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/tcp_output.c1
-rw-r--r--net/ipv4/tcp_scalable.c2
-rw-r--r--net/ipv6/addrconf.c53
-rw-r--r--net/ipv6/af_inet6.c24
-rw-r--r--net/ipv6/inet6_hashtables.c4
-rw-r--r--net/netlink/af_netlink.c10
-rw-r--r--net/sched/act_police.c13
-rw-r--r--net/sched/sch_drr.c6
-rw-r--r--net/sctp/protocol.c16
-rw-r--r--net/sctp/sm_sideeffect.c54
-rw-r--r--net/sctp/sm_statefuns.c16
-rw-r--r--net/wireless/reg.c3
-rw-r--r--net/xfrm/xfrm_state.c90
24 files changed, 293 insertions, 203 deletions
diff --git a/net/802/tr.c b/net/802/tr.c
index 158150fee462..f47ae289d83b 100644
--- a/net/802/tr.c
+++ b/net/802/tr.c
@@ -668,3 +668,5 @@ module_init(rif_init);
EXPORT_SYMBOL(tr_type_trans);
EXPORT_SYMBOL(alloc_trdev);
+
+MODULE_LICENSE("GPL");
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index e9db889d6222..2886d2fb9ab5 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -1,12 +1,16 @@
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
+#include <linux/netpoll.h>
#include "vlan.h"
/* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */
int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
u16 vlan_tci, int polling)
{
+ if (netpoll_rx(skb))
+ return NET_RX_DROP;
+
if (skb_bond_should_drop(skb))
goto drop;
@@ -100,6 +104,9 @@ int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
{
int err = NET_RX_SUCCESS;
+ if (netpoll_receive_skb(skb))
+ return NET_RX_DROP;
+
switch (vlan_gro_common(napi, grp, vlan_tci, skb)) {
case -1:
return netif_receive_skb(skb);
@@ -126,6 +133,9 @@ int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
if (!skb)
goto out;
+ if (netpoll_receive_skb(skb))
+ goto out;
+
err = NET_RX_SUCCESS;
switch (vlan_gro_common(napi, grp, vlan_tci, skb)) {
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 4a19acd3a32b..1b34135cf990 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -553,7 +553,7 @@ static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa)
int err = 0;
if (netif_device_present(real_dev) && ops->ndo_neigh_setup)
- err = ops->ndo_neigh_setup(dev, pa);
+ err = ops->ndo_neigh_setup(real_dev, pa);
return err;
}
@@ -639,6 +639,7 @@ static int vlan_dev_init(struct net_device *dev)
dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN;
dev->netdev_ops = &vlan_netdev_ops;
}
+ netdev_resync_ops(dev);
if (is_vlan_dev(real_dev))
subclass = 1;
diff --git a/net/core/dev.c b/net/core/dev.c
index a17e00662363..2565f6d1d661 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2267,12 +2267,6 @@ int netif_receive_skb(struct sk_buff *skb)
rcu_read_lock();
- /* Don't receive packets in an exiting network namespace */
- if (!net_alive(dev_net(skb->dev))) {
- kfree_skb(skb);
- goto out;
- }
-
#ifdef CONFIG_NET_CLS_ACT
if (skb->tc_verd & TC_NCLS) {
skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
@@ -2488,6 +2482,9 @@ static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
{
+ if (netpoll_receive_skb(skb))
+ return NET_RX_DROP;
+
switch (__napi_gro_receive(napi, skb)) {
case -1:
return netif_receive_skb(skb);
@@ -2558,6 +2555,9 @@ int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info)
if (!skb)
goto out;
+ if (netpoll_receive_skb(skb))
+ goto out;
+
err = NET_RX_SUCCESS;
switch (__napi_gro_receive(napi, skb)) {
@@ -2588,9 +2588,9 @@ static int process_backlog(struct napi_struct *napi, int quota)
local_irq_disable();
skb = __skb_dequeue(&queue->input_pkt_queue);
if (!skb) {
- __napi_complete(napi);
local_irq_enable();
- break;
+ napi_complete(napi);
+ goto out;
}
local_irq_enable();
@@ -2599,6 +2599,7 @@ static int process_backlog(struct napi_struct *napi, int quota)
napi_gro_flush(napi);
+out:
return work;
}
@@ -4282,6 +4283,39 @@ unsigned long netdev_fix_features(unsigned long features, const char *name)
}
EXPORT_SYMBOL(netdev_fix_features);
+/* Some devices need to (re-)set their netdev_ops inside
+ * ->init() or similar. If that happens, we have to setup
+ * the compat pointers again.
+ */
+void netdev_resync_ops(struct net_device *dev)
+{
+#ifdef CONFIG_COMPAT_NET_DEV_OPS
+ const struct net_device_ops *ops = dev->netdev_ops;
+
+ dev->init = ops->ndo_init;
+ dev->uninit = ops->ndo_uninit;
+ dev->open = ops->ndo_open;
+ dev->change_rx_flags = ops->ndo_change_rx_flags;
+ dev->set_rx_mode = ops->ndo_set_rx_mode;
+ dev->set_multicast_list = ops->ndo_set_multicast_list;
+ dev->set_mac_address = ops->ndo_set_mac_address;
+ dev->validate_addr = ops->ndo_validate_addr;
+ dev->do_ioctl = ops->ndo_do_ioctl;
+ dev->set_config = ops->ndo_set_config;
+ dev->change_mtu = ops->ndo_change_mtu;
+ dev->neigh_setup = ops->ndo_neigh_setup;
+ dev->tx_timeout = ops->ndo_tx_timeout;
+ dev->get_stats = ops->ndo_get_stats;
+ dev->vlan_rx_register = ops->ndo_vlan_rx_register;
+ dev->vlan_rx_add_vid = ops->ndo_vlan_rx_add_vid;
+ dev->vlan_rx_kill_vid = ops->ndo_vlan_rx_kill_vid;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = ops->ndo_poll_controller;
+#endif
+#endif
+}
+EXPORT_SYMBOL(netdev_resync_ops);
+
/**
* register_netdevice - register a network device
* @dev: device to register
@@ -4326,27 +4360,7 @@ int register_netdevice(struct net_device *dev)
* This is temporary until all network devices are converted.
*/
if (dev->netdev_ops) {
- const struct net_device_ops *ops = dev->netdev_ops;
-
- dev->init = ops->ndo_init;
- dev->uninit = ops->ndo_uninit;
- dev->open = ops->ndo_open;
- dev->change_rx_flags = ops->ndo_change_rx_flags;
- dev->set_rx_mode = ops->ndo_set_rx_mode;
- dev->set_multicast_list = ops->ndo_set_multicast_list;
- dev->set_mac_address = ops->ndo_set_mac_address;
- dev->validate_addr = ops->ndo_validate_addr;
- dev->do_ioctl = ops->ndo_do_ioctl;
- dev->set_config = ops->ndo_set_config;
- dev->change_mtu = ops->ndo_change_mtu;
- dev->tx_timeout = ops->ndo_tx_timeout;
- dev->get_stats = ops->ndo_get_stats;
- dev->vlan_rx_register = ops->ndo_vlan_rx_register;
- dev->vlan_rx_add_vid = ops->ndo_vlan_rx_add_vid;
- dev->vlan_rx_kill_vid = ops->ndo_vlan_rx_kill_vid;
-#ifdef CONFIG_NET_POLL_CONTROLLER
- dev->poll_controller = ops->ndo_poll_controller;
-#endif
+ netdev_resync_ops(dev);
} else {
char drivername[64];
pr_info("%s (%s): not using net_device_ops yet\n",
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 6ac29a46e23e..484f58750eba 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -77,7 +77,9 @@ static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
if (endp == buf)
goto err;
- rtnl_lock();
+ if (!rtnl_trylock())
+ return -ERESTARTSYS;
+
if (dev_isalive(net)) {
if ((ret = (*set)(net, new)) == 0)
ret = len;
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 55151faaf90c..e3bebd36f053 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -32,24 +32,14 @@ static __net_init int setup_net(struct net *net)
{
/* Must be called with net_mutex held */
struct pernet_operations *ops;
- int error;
- struct net_generic *ng;
+ int error = 0;
atomic_set(&net->count, 1);
+
#ifdef NETNS_REFCNT_DEBUG
atomic_set(&net->use_count, 0);
#endif
- error = -ENOMEM;
- ng = kzalloc(sizeof(struct net_generic) +
- INITIAL_NET_GEN_PTRS * sizeof(void *), GFP_KERNEL);
- if (ng == NULL)
- goto out;
-
- ng->len = INITIAL_NET_GEN_PTRS;
- rcu_assign_pointer(net->gen, ng);
-
- error = 0;
list_for_each_entry(ops, &pernet_list, list) {
if (ops->init) {
error = ops->init(net);
@@ -70,24 +60,50 @@ out_undo:
}
rcu_barrier();
- kfree(ng);
goto out;
}
+static struct net_generic *net_alloc_generic(void)
+{
+ struct net_generic *ng;
+ size_t generic_size = sizeof(struct net_generic) +
+ INITIAL_NET_GEN_PTRS * sizeof(void *);
+
+ ng = kzalloc(generic_size, GFP_KERNEL);
+ if (ng)
+ ng->len = INITIAL_NET_GEN_PTRS;
+
+ return ng;
+}
+
#ifdef CONFIG_NET_NS
static struct kmem_cache *net_cachep;
static struct workqueue_struct *netns_wq;
static struct net *net_alloc(void)
{
- return kmem_cache_zalloc(net_cachep, GFP_KERNEL);
+ struct net *net = NULL;
+ struct net_generic *ng;
+
+ ng = net_alloc_generic();
+ if (!ng)
+ goto out;
+
+ net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
+ if (!net)
+ goto out_free;
+
+ rcu_assign_pointer(net->gen, ng);
+out:
+ return net;
+
+out_free:
+ kfree(ng);
+ goto out;
}
static void net_free(struct net *net)
{
- if (!net)
- return;
-
#ifdef NETNS_REFCNT_DEBUG
if (unlikely(atomic_read(&net->use_count) != 0)) {
printk(KERN_EMERG "network namespace not free! Usage: %d\n",
@@ -112,27 +128,28 @@ struct net *copy_net_ns(unsigned long flags, struct net *old_net)
err = -ENOMEM;
new_net = net_alloc();
if (!new_net)
- goto out;
+ goto out_err;
mutex_lock(&net_mutex);
err = setup_net(new_net);
- if (err)
- goto out_unlock;
-
- rtnl_lock();
- list_add_tail(&new_net->list, &net_namespace_list);
- rtnl_unlock();
-
-
-out_unlock:
+ if (!err) {
+ rtnl_lock();
+ list_add_tail(&new_net->list, &net_namespace_list);
+ rtnl_unlock();
+ }
mutex_unlock(&net_mutex);
+
+ if (err)
+ goto out_free;
out:
put_net(old_net);
- if (err) {
- net_free(new_net);
- new_net = ERR_PTR(err);
- }
return new_net;
+
+out_free:
+ net_free(new_net);
+out_err:
+ new_net = ERR_PTR(err);
+ goto out;
}
static void cleanup_net(struct work_struct *work)
@@ -140,9 +157,6 @@ static void cleanup_net(struct work_struct *work)
struct pernet_operations *ops;
struct net *net;
- /* Be very certain incoming network packets will not find us */
- rcu_barrier();
-
net = container_of(work, struct net, work);
mutex_lock(&net_mutex);
@@ -188,6 +202,7 @@ struct net *copy_net_ns(unsigned long flags, struct net *old_net)
static int __init net_ns_init(void)
{
+ struct net_generic *ng;
int err;
printk(KERN_INFO "net_namespace: %zd bytes\n", sizeof(struct net));
@@ -202,6 +217,12 @@ static int __init net_ns_init(void)
panic("Could not create netns workq");
#endif
+ ng = net_alloc_generic();
+ if (!ng)
+ panic("Could not allocate generic netns");
+
+ rcu_assign_pointer(init_net.gen, ng);
+
mutex_lock(&net_mutex);
err = setup_net(&init_net);
diff --git a/net/core/sock.c b/net/core/sock.c
index 6e4f14d1ef81..5f97caa158e8 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -696,7 +696,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
if (len < 0)
return -EINVAL;
- v.val = 0;
+ memset(&v, 0, sizeof(v));
switch(optname) {
case SO_DEBUG:
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 6bb2635b5ded..7bc992976d29 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -3,11 +3,16 @@
*
* This is an implementation of the CIPSO 2.2 protocol as specified in
* draft-ietf-cipso-ipsecurity-01.txt with additional tag types as found in
- * FIPS-188, copies of both documents can be found in the Documentation
- * directory. While CIPSO never became a full IETF RFC standard many vendors
+ * FIPS-188. While CIPSO never became a full IETF RFC standard many vendors
* have chosen to adopt the protocol and over the years it has become a
* de-facto standard for labeled networking.
*
+ * The CIPSO draft specification can be found in the kernel's Documentation
+ * directory as well as the following URL:
+ * http://netlabel.sourceforge.net/files/draft-ietf-cipso-ipsecurity-01.txt
+ * The FIPS-188 specification can be found at the following URL:
+ * http://www.itl.nist.gov/fipspubs/fip188.htm
+ *
* Author: Paul Moore <paul.moore@hp.com>
*
*/
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 705b33b184a3..fc562d29cc46 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -1205,7 +1205,7 @@ static struct pernet_operations __net_initdata icmp_sk_ops = {
int __init icmp_init(void)
{
- return register_pernet_device(&icmp_sk_ops);
+ return register_pernet_subsys(&icmp_sk_ops);
}
EXPORT_SYMBOL(icmp_err_convert);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index a6961d75c7ea..c28976a7e596 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1374,7 +1374,8 @@ static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
struct tcp_sacktag_state *state,
- unsigned int pcount, int shifted, int mss)
+ unsigned int pcount, int shifted, int mss,
+ int dup_sack)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *prev = tcp_write_queue_prev(sk, skb);
@@ -1410,7 +1411,7 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
}
/* We discard results */
- tcp_sacktag_one(skb, sk, state, 0, pcount);
+ tcp_sacktag_one(skb, sk, state, dup_sack, pcount);
/* Difference in this won't matter, both ACKed by the same cumul. ACK */
TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS);
@@ -1561,7 +1562,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
if (!skb_shift(prev, skb, len))
goto fallback;
- if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss))
+ if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack))
goto out;
/* Hole filled allows collapsing with the next as well, this is very
@@ -1580,7 +1581,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
len = skb->len;
if (skb_shift(prev, skb, len)) {
pcount += tcp_skb_pcount(skb);
- tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss);
+ tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss, 0);
}
out:
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 19d7b429a262..cf74c416831a 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2443,7 +2443,7 @@ static struct pernet_operations __net_initdata tcp_sk_ops = {
void __init tcp_v4_init(void)
{
inet_hashinfo_init(&tcp_hashinfo);
- if (register_pernet_device(&tcp_sk_ops))
+ if (register_pernet_subsys(&tcp_sk_ops))
panic("Failed to create the TCP control socket.\n");
}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index dda42f0bd7a3..da2c3b8794f2 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2023,7 +2023,6 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
last_lost = tp->snd_una;
}
- /* First pass: retransmit lost packets. */
tcp_for_write_queue_from(skb, sk) {
__u8 sacked = TCP_SKB_CB(skb)->sacked;
diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c
index 2747ec7bfb63..4660b088a8ce 100644
--- a/net/ipv4/tcp_scalable.c
+++ b/net/ipv4/tcp_scalable.c
@@ -1,6 +1,6 @@
/* Tom Kelly's Scalable TCP
*
- * See htt://www-lce.eng.cam.ac.uk/~ctk21/scalable/
+ * See http://www.deneholme.net/tom/scalable/
*
* John Heffner <jheffner@sc.edu>
*/
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index f9afb452249c..1220e2c7831e 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -493,15 +493,17 @@ static void addrconf_forward_change(struct net *net, __s32 newf)
read_unlock(&dev_base_lock);
}
-static void addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old)
+static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old)
{
struct net *net;
net = (struct net *)table->extra2;
if (p == &net->ipv6.devconf_dflt->forwarding)
- return;
+ return 0;
+
+ if (!rtnl_trylock())
+ return -ERESTARTSYS;
- rtnl_lock();
if (p == &net->ipv6.devconf_all->forwarding) {
__s32 newf = net->ipv6.devconf_all->forwarding;
net->ipv6.devconf_dflt->forwarding = newf;
@@ -512,6 +514,7 @@ static void addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old)
if (*p)
rt6_purge_dflt_routers(net);
+ return 1;
}
#endif
@@ -2608,9 +2611,6 @@ static int addrconf_ifdown(struct net_device *dev, int how)
ASSERT_RTNL();
- if ((dev->flags & IFF_LOOPBACK) && how == 1)
- how = 0;
-
rt6_ifdown(net, dev);
neigh_ifdown(&nd_tbl, dev);
@@ -3983,7 +3983,7 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write, struct file * filp,
ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
if (write)
- addrconf_fixup_forwarding(ctl, valp, val);
+ ret = addrconf_fixup_forwarding(ctl, valp, val);
return ret;
}
@@ -4019,8 +4019,7 @@ static int addrconf_sysctl_forward_strategy(ctl_table *table,
}
*valp = new;
- addrconf_fixup_forwarding(table, valp, val);
- return 1;
+ return addrconf_fixup_forwarding(table, valp, val);
}
static struct addrconf_sysctl_table
@@ -4446,25 +4445,6 @@ int unregister_inet6addr_notifier(struct notifier_block *nb)
EXPORT_SYMBOL(unregister_inet6addr_notifier);
-static void addrconf_net_exit(struct net *net)
-{
- struct net_device *dev;
-
- rtnl_lock();
- /* clean dev list */
- for_each_netdev(net, dev) {
- if (__in6_dev_get(dev) == NULL)
- continue;
- addrconf_ifdown(dev, 1);
- }
- addrconf_ifdown(net->loopback_dev, 2);
- rtnl_unlock();
-}
-
-static struct pernet_operations addrconf_net_ops = {
- .exit = addrconf_net_exit,
-};
-
/*
* Init / cleanup code
*/
@@ -4506,10 +4486,6 @@ int __init addrconf_init(void)
if (err)
goto errlo;
- err = register_pernet_device(&addrconf_net_ops);
- if (err)
- return err;
-
register_netdevice_notifier(&ipv6_dev_notf);
addrconf_verify(0);
@@ -4539,15 +4515,22 @@ errlo:
void addrconf_cleanup(void)
{
struct inet6_ifaddr *ifa;
+ struct net_device *dev;
int i;
unregister_netdevice_notifier(&ipv6_dev_notf);
- unregister_pernet_device(&addrconf_net_ops);
-
unregister_pernet_subsys(&addrconf_ops);
rtnl_lock();
+ /* clean dev list */
+ for_each_netdev(&init_net, dev) {
+ if (__in6_dev_get(dev) == NULL)
+ continue;
+ addrconf_ifdown(dev, 1);
+ }
+ addrconf_ifdown(init_net.loopback_dev, 2);
+
/*
* Check hash table.
*/
@@ -4568,6 +4551,4 @@ void addrconf_cleanup(void)
del_timer(&addr_chk_timer);
rtnl_unlock();
-
- unregister_pernet_subsys(&addrconf_net_ops);
}
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index c802bc1658a8..9c8309ed35cf 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -72,6 +72,10 @@ MODULE_LICENSE("GPL");
static struct list_head inetsw6[SOCK_MAX];
static DEFINE_SPINLOCK(inetsw6_lock);
+static int disable_ipv6 = 0;
+module_param_named(disable, disable_ipv6, int, 0);
+MODULE_PARM_DESC(disable, "Disable IPv6 such that it is non-functional");
+
static __inline__ struct ipv6_pinfo *inet6_sk_generic(struct sock *sk)
{
const int offset = sk->sk_prot->obj_size - sizeof(struct ipv6_pinfo);
@@ -991,10 +995,21 @@ static int __init inet6_init(void)
{
struct sk_buff *dummy_skb;
struct list_head *r;
- int err;
+ int err = 0;
BUILD_BUG_ON(sizeof(struct inet6_skb_parm) > sizeof(dummy_skb->cb));
+ /* Register the socket-side information for inet6_create. */
+ for(r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r)
+ INIT_LIST_HEAD(r);
+
+ if (disable_ipv6) {
+ printk(KERN_INFO
+ "IPv6: Loaded, but administratively disabled, "
+ "reboot required to enable\n");
+ goto out;
+ }
+
err = proto_register(&tcpv6_prot, 1);
if (err)
goto out;
@@ -1012,10 +1027,6 @@ static int __init inet6_init(void)
goto out_unregister_udplite_proto;
- /* Register the socket-side information for inet6_create. */
- for(r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r)
- INIT_LIST_HEAD(r);
-
/* We MUST register RAW sockets before we create the ICMP6,
* IGMP6, or NDISC control sockets.
*/
@@ -1181,6 +1192,9 @@ module_init(inet6_init);
static void __exit inet6_exit(void)
{
+ if (disable_ipv6)
+ return;
+
/* First of all disallow new sockets creation. */
sock_unregister(PF_INET6);
/* Disallow any further netlink messages */
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 8fe267feb81e..1bcc3431859e 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -258,11 +258,11 @@ unique:
if (twp != NULL) {
*twp = tw;
- NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITRECYCLED);
+ NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
} else if (tw != NULL) {
/* Silly. Should hash-dance instead... */
inet_twsk_deschedule(tw, death_row);
- NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITRECYCLED);
+ NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
inet_twsk_put(tw);
}
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 9eb895c7a2a9..3ae3cb816563 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1084,6 +1084,13 @@ out:
return 0;
}
+/**
+ * netlink_set_err - report error to broadcast listeners
+ * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
+ * @pid: the PID of a process that we want to skip (if any)
+ * @groups: the broadcast group that will notice the error
+ * @code: error code, must be negative (as usual in kernelspace)
+ */
void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
{
struct netlink_set_err_data info;
@@ -1093,7 +1100,8 @@ void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
info.exclude_sk = ssk;
info.pid = pid;
info.group = group;
- info.code = code;
+ /* sk->sk_err wants a positive error value */
+ info.code = -code;
read_lock(&nl_table_lock);
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 5c72a116b1a4..f8f047b61245 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -183,13 +183,6 @@ override:
if (R_tab == NULL)
goto failure;
- if (!est && (ret == ACT_P_CREATED ||
- !gen_estimator_active(&police->tcf_bstats,
- &police->tcf_rate_est))) {
- err = -EINVAL;
- goto failure;
- }
-
if (parm->peakrate.rate) {
P_tab = qdisc_get_rtab(&parm->peakrate,
tb[TCA_POLICE_PEAKRATE]);
@@ -205,6 +198,12 @@ override:
&police->tcf_lock, est);
if (err)
goto failure_unlock;
+ } else if (tb[TCA_POLICE_AVRATE] &&
+ (ret == ACT_P_CREATED ||
+ !gen_estimator_active(&police->tcf_bstats,
+ &police->tcf_rate_est))) {
+ err = -EINVAL;
+ goto failure_unlock;
}
/* No failure allowed after this point */
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index f6b4fa97df70..e36e94ab4e10 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -66,11 +66,15 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
{
struct drr_sched *q = qdisc_priv(sch);
struct drr_class *cl = (struct drr_class *)*arg;
+ struct nlattr *opt = tca[TCA_OPTIONS];
struct nlattr *tb[TCA_DRR_MAX + 1];
u32 quantum;
int err;
- err = nla_parse_nested(tb, TCA_DRR_MAX, tca[TCA_OPTIONS], drr_policy);
+ if (!opt)
+ return -EINVAL;
+
+ err = nla_parse_nested(tb, TCA_DRR_MAX, opt, drr_policy);
if (err < 0)
return err;
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index b78e3be69013..c4986d0f7419 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -717,15 +717,20 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
static int sctp_ctl_sock_init(void)
{
int err;
- sa_family_t family;
+ sa_family_t family = PF_INET;
if (sctp_get_pf_specific(PF_INET6))
family = PF_INET6;
- else
- family = PF_INET;
err = inet_ctl_sock_create(&sctp_ctl_sock, family,
SOCK_SEQPACKET, IPPROTO_SCTP, &init_net);
+
+ /* If IPv6 socket could not be created, try the IPv4 socket */
+ if (err < 0 && family == PF_INET6)
+ err = inet_ctl_sock_create(&sctp_ctl_sock, AF_INET,
+ SOCK_SEQPACKET, IPPROTO_SCTP,
+ &init_net);
+
if (err < 0) {
printk(KERN_ERR
"SCTP: Failed to create the SCTP control socket.\n");
@@ -1322,9 +1327,8 @@ SCTP_STATIC __init int sctp_init(void)
out:
return status;
err_v6_add_protocol:
- sctp_v6_del_protocol();
-err_add_protocol:
sctp_v4_del_protocol();
+err_add_protocol:
inet_ctl_sock_destroy(sctp_ctl_sock);
err_ctl_sock_init:
sctp_v6_protosw_exit();
@@ -1335,7 +1339,6 @@ err_protosw_init:
sctp_v4_pf_exit();
sctp_v6_pf_exit();
sctp_sysctl_unregister();
- list_del(&sctp_af_inet.list);
free_pages((unsigned long)sctp_port_hashtable,
get_order(sctp_port_hashsize *
sizeof(struct sctp_bind_hashbucket)));
@@ -1383,7 +1386,6 @@ SCTP_STATIC __exit void sctp_exit(void)
sctp_v4_pf_exit();
sctp_sysctl_unregister();
- list_del(&sctp_af_inet.list);
free_pages((unsigned long)sctp_assoc_hashtable,
get_order(sctp_assoc_hashsize *
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index e1d6076b4f59..b5495aecab60 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -787,36 +787,48 @@ static void sctp_cmd_process_operr(sctp_cmd_seq_t *cmds,
struct sctp_association *asoc,
struct sctp_chunk *chunk)
{
- struct sctp_operr_chunk *operr_chunk;
struct sctp_errhdr *err_hdr;
+ struct sctp_ulpevent *ev;
- operr_chunk = (struct sctp_operr_chunk *)chunk->chunk_hdr;
- err_hdr = &operr_chunk->err_hdr;
+ while (chunk->chunk_end > chunk->skb->data) {
+ err_hdr = (struct sctp_errhdr *)(chunk->skb->data);
- switch (err_hdr->cause) {
- case SCTP_ERROR_UNKNOWN_CHUNK:
- {
- struct sctp_chunkhdr *unk_chunk_hdr;
+ ev = sctp_ulpevent_make_remote_error(asoc, chunk, 0,
+ GFP_ATOMIC);
+ if (!ev)
+ return;
- unk_chunk_hdr = (struct sctp_chunkhdr *)err_hdr->variable;
- switch (unk_chunk_hdr->type) {
- /* ADDIP 4.1 A9) If the peer responds to an ASCONF with an
- * ERROR chunk reporting that it did not recognized the ASCONF
- * chunk type, the sender of the ASCONF MUST NOT send any
- * further ASCONF chunks and MUST stop its T-4 timer.
- */
- case SCTP_CID_ASCONF:
- asoc->peer.asconf_capable = 0;
- sctp_add_cmd_sf(cmds, SCTP_CMD_TIMER_STOP,
+ sctp_ulpq_tail_event(&asoc->ulpq, ev);
+
+ switch (err_hdr->cause) {
+ case SCTP_ERROR_UNKNOWN_CHUNK:
+ {
+ sctp_chunkhdr_t *unk_chunk_hdr;
+
+ unk_chunk_hdr = (sctp_chunkhdr_t *)err_hdr->variable;
+ switch (unk_chunk_hdr->type) {
+ /* ADDIP 4.1 A9) If the peer responds to an ASCONF with
+ * an ERROR chunk reporting that it did not recognized
+ * the ASCONF chunk type, the sender of the ASCONF MUST
+ * NOT send any further ASCONF chunks and MUST stop its
+ * T-4 timer.
+ */
+ case SCTP_CID_ASCONF:
+ if (asoc->peer.asconf_capable == 0)
+ break;
+
+ asoc->peer.asconf_capable = 0;
+ sctp_add_cmd_sf(cmds, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
+ break;
+ default:
+ break;
+ }
break;
+ }
default:
break;
}
- break;
- }
- default:
- break;
}
}
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 3a0cd075914f..f88dfded0e3a 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -3163,7 +3163,6 @@ sctp_disposition_t sctp_sf_operr_notify(const struct sctp_endpoint *ep,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
- struct sctp_ulpevent *ev;
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
@@ -3173,21 +3172,10 @@ sctp_disposition_t sctp_sf_operr_notify(const struct sctp_endpoint *ep,
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
- while (chunk->chunk_end > chunk->skb->data) {
- ev = sctp_ulpevent_make_remote_error(asoc, chunk, 0,
- GFP_ATOMIC);
- if (!ev)
- goto nomem;
+ sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_OPERR,
+ SCTP_CHUNK(chunk));
- sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
- SCTP_ULPEVENT(ev));
- sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_OPERR,
- SCTP_CHUNK(chunk));
- }
return SCTP_DISPOSITION_CONSUME;
-
-nomem:
- return SCTP_DISPOSITION_NOMEM;
}
/*
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 85c9034c59b2..bd0a16c3de5e 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -380,7 +380,8 @@ static bool is_valid_reg_rule(const struct ieee80211_reg_rule *rule)
freq_diff = freq_range->end_freq_khz - freq_range->start_freq_khz;
- if (freq_diff <= 0 || freq_range->max_bandwidth_khz > freq_diff)
+ if (freq_range->end_freq_khz <= freq_range->start_freq_khz ||
+ freq_range->max_bandwidth_khz > freq_diff)
return false;
return true;
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index e25ff62ab2a6..62a5425cc6aa 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -748,12 +748,51 @@ static void xfrm_hash_grow_check(struct net *net, int have_hash_collision)
schedule_work(&net->xfrm.state_hash_work);
}
+static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x,
+ struct flowi *fl, unsigned short family,
+ xfrm_address_t *daddr, xfrm_address_t *saddr,
+ struct xfrm_state **best, int *acq_in_progress,
+ int *error)
+{
+ /* Resolution logic:
+ * 1. There is a valid state with matching selector. Done.
+ * 2. Valid state with inappropriate selector. Skip.
+ *
+ * Entering area of "sysdeps".
+ *
+ * 3. If state is not valid, selector is temporary, it selects
+ * only session which triggered previous resolution. Key
+ * manager will do something to install a state with proper
+ * selector.
+ */
+ if (x->km.state == XFRM_STATE_VALID) {
+ if ((x->sel.family &&
+ !xfrm_selector_match(&x->sel, fl, x->sel.family)) ||
+ !security_xfrm_state_pol_flow_match(x, pol, fl))
+ return;
+
+ if (!*best ||
+ (*best)->km.dying > x->km.dying ||
+ ((*best)->km.dying == x->km.dying &&
+ (*best)->curlft.add_time < x->curlft.add_time))
+ *best = x;
+ } else if (x->km.state == XFRM_STATE_ACQ) {
+ *acq_in_progress = 1;
+ } else if (x->km.state == XFRM_STATE_ERROR ||
+ x->km.state == XFRM_STATE_EXPIRED) {
+ if (xfrm_selector_match(&x->sel, fl, x->sel.family) &&
+ security_xfrm_state_pol_flow_match(x, pol, fl))
+ *error = -ESRCH;
+ }
+}
+
struct xfrm_state *
xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
struct flowi *fl, struct xfrm_tmpl *tmpl,
struct xfrm_policy *pol, int *err,
unsigned short family)
{
+ static xfrm_address_t saddr_wildcard = { };
struct net *net = xp_net(pol);
unsigned int h;
struct hlist_node *entry;
@@ -773,40 +812,27 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
xfrm_state_addr_check(x, daddr, saddr, family) &&
tmpl->mode == x->props.mode &&
tmpl->id.proto == x->id.proto &&
- (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) {
- /* Resolution logic:
- 1. There is a valid state with matching selector.
- Done.
- 2. Valid state with inappropriate selector. Skip.
-
- Entering area of "sysdeps".
-
- 3. If state is not valid, selector is temporary,
- it selects only session which triggered
- previous resolution. Key manager will do
- something to install a state with proper
- selector.
- */
- if (x->km.state == XFRM_STATE_VALID) {
- if ((x->sel.family && !xfrm_selector_match(&x->sel, fl, x->sel.family)) ||
- !security_xfrm_state_pol_flow_match(x, pol, fl))
- continue;
- if (!best ||
- best->km.dying > x->km.dying ||
- (best->km.dying == x->km.dying &&
- best->curlft.add_time < x->curlft.add_time))
- best = x;
- } else if (x->km.state == XFRM_STATE_ACQ) {
- acquire_in_progress = 1;
- } else if (x->km.state == XFRM_STATE_ERROR ||
- x->km.state == XFRM_STATE_EXPIRED) {
- if (xfrm_selector_match(&x->sel, fl, x->sel.family) &&
- security_xfrm_state_pol_flow_match(x, pol, fl))
- error = -ESRCH;
- }
- }
+ (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
+ xfrm_state_look_at(pol, x, fl, family, daddr, saddr,
+ &best, &acquire_in_progress, &error);
+ }
+ if (best)
+ goto found;
+
+ h = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, family);
+ hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) {
+ if (x->props.family == family &&
+ x->props.reqid == tmpl->reqid &&
+ !(x->props.flags & XFRM_STATE_WILDRECV) &&
+ xfrm_state_addr_check(x, daddr, saddr, family) &&
+ tmpl->mode == x->props.mode &&
+ tmpl->id.proto == x->id.proto &&
+ (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
+ xfrm_state_look_at(pol, x, fl, family, daddr, saddr,
+ &best, &acquire_in_progress, &error);
}
+found:
x = best;
if (!x && !error && !acquire_in_progress) {
if (tmpl->id.spi &&