summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan.c11
-rw-r--r--net/appletalk/ddp.c6
-rw-r--r--net/atm/br2684.c3
-rw-r--r--net/atm/clip.c16
-rw-r--r--net/atm/ipcommon.c17
-rw-r--r--net/atm/lec.c3
-rw-r--r--net/atm/mpc.c3
-rw-r--r--net/atm/pppoatm.c3
-rw-r--r--net/atm/resources.c3
-rw-r--r--net/ax25/af_ax25.c17
-rw-r--r--net/ax25/ax25_dev.c4
-rw-r--r--net/ax25/ax25_ds_subr.c8
-rw-r--r--net/ax25/ax25_ds_timer.c4
-rw-r--r--net/ax25/ax25_iface.c18
-rw-r--r--net/ax25/ax25_in.c2
-rw-r--r--net/ax25/sysctl_net_ax25.c4
-rw-r--r--net/bluetooth/cmtp/capi.c4
-rw-r--r--net/bluetooth/cmtp/core.c3
-rw-r--r--net/bluetooth/hci_core.c7
-rw-r--r--net/bluetooth/hidp/Kconfig3
-rw-r--r--net/bluetooth/hidp/core.c3
-rw-r--r--net/bluetooth/l2cap.c18
-rw-r--r--net/bluetooth/rfcomm/core.c28
-rw-r--r--net/bluetooth/rfcomm/tty.c3
-rw-r--r--net/bluetooth/sco.c12
-rw-r--r--net/bridge/br_forward.c2
-rw-r--r--net/bridge/br_ioctl.c7
-rw-r--r--net/bridge/br_netfilter.c7
-rw-r--r--net/core/dev.c42
-rw-r--r--net/core/ethtool.c2
-rw-r--r--net/core/skbuff.c91
-rw-r--r--net/core/stream.c16
-rw-r--r--net/core/user_dma.c1
-rw-r--r--net/dccp/feat.h2
-rw-r--r--net/dccp/ipv4.c3
-rw-r--r--net/dccp/ipv6.c4
-rw-r--r--net/dccp/options.c2
-rw-r--r--net/dccp/proto.c4
-rw-r--r--net/decnet/dn_dev.c9
-rw-r--r--net/decnet/dn_fib.c3
-rw-r--r--net/decnet/dn_neigh.c3
-rw-r--r--net/decnet/dn_rules.c6
-rw-r--r--net/decnet/dn_table.c11
-rw-r--r--net/econet/af_econet.c3
-rw-r--r--net/ieee80211/Kconfig1
-rw-r--r--net/ieee80211/ieee80211_crypt.c3
-rw-r--r--net/ieee80211/ieee80211_crypt_ccmp.c3
-rw-r--r--net/ieee80211/ieee80211_crypt_wep.c3
-rw-r--r--net/ieee80211/ieee80211_wx.c7
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_auth.c28
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_io.c3
-rw-r--r--net/ipv4/Kconfig10
-rw-r--r--net/ipv4/Makefile1
-rw-r--r--net/ipv4/af_inet.c36
-rw-r--r--net/ipv4/ah4.c4
-rw-r--r--net/ipv4/arp.c3
-rw-r--r--net/ipv4/devinet.c6
-rw-r--r--net/ipv4/esp4.c4
-rw-r--r--net/ipv4/fib_hash.c6
-rw-r--r--net/ipv4/fib_rules.c7
-rw-r--r--net/ipv4/fib_semantics.c15
-rw-r--r--net/ipv4/fib_trie.c2
-rw-r--r--net/ipv4/igmp.c12
-rw-r--r--net/ipv4/inet_diag.c3
-rw-r--r--net/ipv4/inetpeer.c2
-rw-r--r--net/ipv4/ip_gre.c1
-rw-r--r--net/ipv4/ip_input.c3
-rw-r--r--net/ipv4/ip_options.c1
-rw-r--r--net/ipv4/ip_output.c4
-rw-r--r--net/ipv4/ipcomp.c6
-rw-r--r--net/ipv4/ipip.c1
-rw-r--r--net/ipv4/ipmr.c21
-rw-r--r--net/ipv4/ipvs/ip_vs_ctl.c10
-rw-r--r--net/ipv4/ipvs/ip_vs_est.c3
-rw-r--r--net/ipv4/netfilter/ip_conntrack_helper_h323.c2
-rw-r--r--net/ipv4/netfilter/ip_conntrack_standalone.c4
-rw-r--r--net/ipv4/netfilter/ip_nat_snmp_basic.c4
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c3
-rw-r--r--net/ipv4/raw.c1
-rw-r--r--net/ipv4/tcp_compound.c448
-rw-r--r--net/ipv4/tcp_highspeed.c13
-rw-r--r--net/ipv4/tcp_ipv4.c21
-rw-r--r--net/ipv4/udp.c3
-rw-r--r--net/ipv4/xfrm4_mode_tunnel.c1
-rw-r--r--net/ipv4/xfrm4_output.c2
-rw-r--r--net/ipv6/addrconf.c25
-rw-r--r--net/ipv6/ip6_input.c2
-rw-r--r--net/ipv6/ip6_output.c4
-rw-r--r--net/ipv6/ip6_tunnel.c3
-rw-r--r--net/ipv6/ipcomp6.c3
-rw-r--r--net/ipv6/ipv6_sockglue.c89
-rw-r--r--net/ipv6/raw.c1
-rw-r--r--net/ipv6/sit.c1
-rw-r--r--net/ipv6/tcp_ipv6.c19
-rw-r--r--net/ipv6/xfrm6_output.c2
-rw-r--r--net/ipv6/xfrm6_tunnel.c140
-rw-r--r--net/irda/af_irda.c2
-rw-r--r--net/irda/ircomm/ircomm_core.c4
-rw-r--r--net/irda/ircomm/ircomm_lmp.c4
-rw-r--r--net/irda/ircomm/ircomm_param.c2
-rw-r--r--net/irda/ircomm/ircomm_tty.c8
-rw-r--r--net/irda/irda_device.c4
-rw-r--r--net/irda/iriap.c9
-rw-r--r--net/irda/iriap_event.c2
-rw-r--r--net/irda/irias_object.c24
-rw-r--r--net/irda/irlan/irlan_common.c16
-rw-r--r--net/irda/irlan/irlan_provider.c2
-rw-r--r--net/irda/irlap.c8
-rw-r--r--net/irda/irlap_frame.c19
-rw-r--r--net/irda/irlmp.c11
-rw-r--r--net/irda/irnet/irnet_ppp.c3
-rw-r--r--net/irda/irttp.c20
-rw-r--r--net/lapb/lapb_iface.c4
-rw-r--r--net/llc/llc_core.c3
-rw-r--r--net/netfilter/Kconfig4
-rw-r--r--net/netfilter/nf_conntrack_standalone.c4
-rw-r--r--net/netfilter/nf_queue.c9
-rw-r--r--net/netfilter/xt_physdev.c15
-rw-r--r--net/netfilter/xt_pkttype.c12
-rw-r--r--net/netlink/af_netlink.c13
-rw-r--r--net/netrom/af_netrom.c25
-rw-r--r--net/netrom/nr_timer.c2
-rw-r--r--net/rose/af_rose.c12
-rw-r--r--net/rxrpc/connection.c6
-rw-r--r--net/rxrpc/peer.c3
-rw-r--r--net/rxrpc/transport.c6
-rw-r--r--net/sched/act_api.c13
-rw-r--r--net/sched/act_pedit.c3
-rw-r--r--net/sched/act_police.c6
-rw-r--r--net/sched/cls_basic.c6
-rw-r--r--net/sched/cls_fw.c6
-rw-r--r--net/sched/cls_route.c9
-rw-r--r--net/sched/cls_rsvp.h9
-rw-r--r--net/sched/cls_tcindex.c12
-rw-r--r--net/sched/cls_u32.c15
-rw-r--r--net/sched/em_meta.c3
-rw-r--r--net/sched/ematch.c3
-rw-r--r--net/sched/estimator.c3
-rw-r--r--net/sched/sch_cbq.c3
-rw-r--r--net/sched/sch_generic.c3
-rw-r--r--net/sched/sch_gred.c3
-rw-r--r--net/sched/sch_hfsc.c3
-rw-r--r--net/sched/sch_htb.c7
-rw-r--r--net/sched/sch_netem.c4
-rw-r--r--net/sctp/associola.c27
-rw-r--r--net/sctp/bind_addr.c8
-rw-r--r--net/sctp/endpointola.c11
-rw-r--r--net/sctp/ipv6.c3
-rw-r--r--net/sctp/outqueue.c9
-rw-r--r--net/sctp/protocol.c7
-rw-r--r--net/sctp/sm_make_chunk.c14
-rw-r--r--net/sctp/sm_sideeffect.c12
-rw-r--r--net/sctp/sm_statefuns.c8
-rw-r--r--net/sctp/socket.c74
-rw-r--r--net/sctp/transport.c9
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c9
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c3
-rw-r--r--net/sunrpc/auth_gss/gss_mech_switch.c3
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_mech.c3
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_token.c3
-rw-r--r--net/sunrpc/clnt.c3
-rw-r--r--net/sunrpc/stats.c7
-rw-r--r--net/sunrpc/svc.c6
-rw-r--r--net/sunrpc/svcsock.c3
-rw-r--r--net/sunrpc/xprt.c3
-rw-r--r--net/sunrpc/xprtsock.c6
-rw-r--r--net/tipc/bearer.c6
-rw-r--r--net/tipc/cluster.c8
-rw-r--r--net/tipc/discover.c2
-rw-r--r--net/tipc/link.c3
-rw-r--r--net/tipc/name_table.c16
-rw-r--r--net/tipc/net.c5
-rw-r--r--net/tipc/port.c5
-rw-r--r--net/tipc/ref.c2
-rw-r--r--net/tipc/subscr.c3
-rw-r--r--net/tipc/user_reg.c3
-rw-r--r--net/tipc/zone.c3
-rw-r--r--net/unix/af_unix.c3
-rw-r--r--net/wanrouter/af_wanpipe.c9
-rw-r--r--net/wanrouter/wanmain.c9
-rw-r--r--net/xfrm/xfrm_policy.c3
-rw-r--r--net/xfrm/xfrm_state.c3
182 files changed, 858 insertions, 1244 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 458031bfff55..18fcb9fa518d 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -67,10 +67,6 @@ static struct packet_type vlan_packet_type = {
.func = vlan_skb_recv, /* VLAN receive method */
};
-/* Bits of netdev state that are propagated from real device to virtual */
-#define VLAN_LINK_STATE_MASK \
- ((1<<__LINK_STATE_PRESENT)|(1<<__LINK_STATE_NOCARRIER)|(1<<__LINK_STATE_DORMANT))
-
/* End of global variables definitions. */
/*
@@ -479,7 +475,9 @@ static struct net_device *register_vlan_device(const char *eth_IF_name,
new_dev->flags = real_dev->flags;
new_dev->flags &= ~IFF_UP;
- new_dev->state = real_dev->state & ~(1<<__LINK_STATE_START);
+ new_dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) |
+ (1<<__LINK_STATE_DORMANT))) |
+ (1<<__LINK_STATE_PRESENT);
/* need 4 bytes for extra VLAN header info,
* hope the underlying device can handle it.
@@ -542,12 +540,11 @@ static struct net_device *register_vlan_device(const char *eth_IF_name,
* so it cannot "appear" on us.
*/
if (!grp) { /* need to add a new group */
- grp = kmalloc(sizeof(struct vlan_group), GFP_KERNEL);
+ grp = kzalloc(sizeof(struct vlan_group), GFP_KERNEL);
if (!grp)
goto out_free_unregister;
/* printk(KERN_ALERT "VLAN REGISTER: Allocated new group.\n"); */
- memset(grp, 0, sizeof(struct vlan_group));
grp->real_dev_ifindex = real_dev->ifindex;
hlist_add_head_rcu(&grp->hlist,
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 5ee96d4b40e9..96dc6bb52d14 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -227,12 +227,11 @@ static void atif_drop_device(struct net_device *dev)
static struct atalk_iface *atif_add_device(struct net_device *dev,
struct atalk_addr *sa)
{
- struct atalk_iface *iface = kmalloc(sizeof(*iface), GFP_KERNEL);
+ struct atalk_iface *iface = kzalloc(sizeof(*iface), GFP_KERNEL);
if (!iface)
goto out;
- memset(iface, 0, sizeof(*iface));
dev_hold(dev);
iface->dev = dev;
dev->atalk_ptr = iface;
@@ -559,12 +558,11 @@ static int atrtr_create(struct rtentry *r, struct net_device *devhint)
}
if (!rt) {
- rt = kmalloc(sizeof(*rt), GFP_ATOMIC);
+ rt = kzalloc(sizeof(*rt), GFP_ATOMIC);
retval = -ENOBUFS;
if (!rt)
goto out_unlock;
- memset(rt, 0, sizeof(*rt));
rt->next = atalk_routes;
atalk_routes = rt;
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index a487233dc466..d00cca97eb33 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -508,10 +508,9 @@ Note: we do not have explicit unassign, but look at _push()
if (copy_from_user(&be, arg, sizeof be))
return -EFAULT;
- brvcc = kmalloc(sizeof(struct br2684_vcc), GFP_KERNEL);
+ brvcc = kzalloc(sizeof(struct br2684_vcc), GFP_KERNEL);
if (!brvcc)
return -ENOMEM;
- memset(brvcc, 0, sizeof(struct br2684_vcc));
write_lock_irq(&devs_lock);
net_dev = br2684_find_dev(&be.ifspec);
if (net_dev == NULL) {
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 121bf6f49148..7ce7bfe3fbad 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -929,12 +929,11 @@ static int arp_seq_open(struct inode *inode, struct file *file)
struct seq_file *seq;
int rc = -EAGAIN;
- state = kmalloc(sizeof(*state), GFP_KERNEL);
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state) {
rc = -ENOMEM;
goto out_kfree;
}
- memset(state, 0, sizeof(*state));
state->ns.neigh_sub_iter = clip_seq_sub_iter;
rc = seq_open(file, &arp_seq_ops);
@@ -962,7 +961,6 @@ static struct file_operations arp_seq_fops = {
static int __init atm_clip_init(void)
{
- struct proc_dir_entry *p;
neigh_table_init_no_netlink(&clip_tbl);
clip_tbl_hook = &clip_tbl;
@@ -972,9 +970,15 @@ static int __init atm_clip_init(void)
setup_timer(&idle_timer, idle_timer_check, 0);
- p = create_proc_entry("arp", S_IRUGO, atm_proc_root);
- if (p)
- p->proc_fops = &arp_seq_fops;
+#ifdef CONFIG_PROC_FS
+ {
+ struct proc_dir_entry *p;
+
+ p = create_proc_entry("arp", S_IRUGO, atm_proc_root);
+ if (p)
+ p->proc_fops = &arp_seq_fops;
+ }
+#endif
return 0;
}
diff --git a/net/atm/ipcommon.c b/net/atm/ipcommon.c
index 4b1faca5013f..1d3de42fada0 100644
--- a/net/atm/ipcommon.c
+++ b/net/atm/ipcommon.c
@@ -25,22 +25,27 @@
/*
* skb_migrate appends the list at "from" to "to", emptying "from" in the
* process. skb_migrate is atomic with respect to all other skb operations on
- * "from" and "to". Note that it locks both lists at the same time, so beware
- * of potential deadlocks.
+ * "from" and "to". Note that it locks both lists at the same time, so to deal
+ * with the lock ordering, the locks are taken in address order.
*
* This function should live in skbuff.c or skbuff.h.
*/
-void skb_migrate(struct sk_buff_head *from,struct sk_buff_head *to)
+void skb_migrate(struct sk_buff_head *from, struct sk_buff_head *to)
{
unsigned long flags;
struct sk_buff *skb_from = (struct sk_buff *) from;
struct sk_buff *skb_to = (struct sk_buff *) to;
struct sk_buff *prev;
- spin_lock_irqsave(&from->lock,flags);
- spin_lock(&to->lock);
+ if ((unsigned long) from < (unsigned long) to) {
+ spin_lock_irqsave(&from->lock, flags);
+ spin_lock_nested(&to->lock, SINGLE_DEPTH_NESTING);
+ } else {
+ spin_lock_irqsave(&to->lock, flags);
+ spin_lock_nested(&from->lock, SINGLE_DEPTH_NESTING);
+ }
prev = from->prev;
from->next->prev = to->prev;
prev->next = skb_to;
@@ -51,7 +56,7 @@ void skb_migrate(struct sk_buff_head *from,struct sk_buff_head *to)
from->prev = skb_from;
from->next = skb_from;
from->qlen = 0;
- spin_unlock_irqrestore(&from->lock,flags);
+ spin_unlock_irqrestore(&from->lock, flags);
}
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 4b68a18171cf..b4aa489849df 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -1811,12 +1811,11 @@ make_entry(struct lec_priv *priv, unsigned char *mac_addr)
{
struct lec_arp_table *to_return;
- to_return = kmalloc(sizeof(struct lec_arp_table), GFP_ATOMIC);
+ to_return = kzalloc(sizeof(struct lec_arp_table), GFP_ATOMIC);
if (!to_return) {
printk("LEC: Arp entry kmalloc failed\n");
return NULL;
}
- memset(to_return, 0, sizeof(struct lec_arp_table));
memcpy(to_return->mac_addr, mac_addr, ETH_ALEN);
init_timer(&to_return->timer);
to_return->timer.function = lec_arp_expire_arp;
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index 9aafe1e2f048..00704661e83f 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -258,10 +258,9 @@ static struct mpoa_client *alloc_mpc(void)
{
struct mpoa_client *mpc;
- mpc = kmalloc(sizeof (struct mpoa_client), GFP_KERNEL);
+ mpc = kzalloc(sizeof (struct mpoa_client), GFP_KERNEL);
if (mpc == NULL)
return NULL;
- memset(mpc, 0, sizeof(struct mpoa_client));
rwlock_init(&mpc->ingress_lock);
rwlock_init(&mpc->egress_lock);
mpc->next = mpcs;
diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c
index 76a7d8ff6c0e..19d5dfc0702f 100644
--- a/net/atm/pppoatm.c
+++ b/net/atm/pppoatm.c
@@ -287,10 +287,9 @@ static int pppoatm_assign_vcc(struct atm_vcc *atmvcc, void __user *arg)
if (be.encaps != PPPOATM_ENCAPS_AUTODETECT &&
be.encaps != PPPOATM_ENCAPS_VC && be.encaps != PPPOATM_ENCAPS_LLC)
return -EINVAL;
- pvcc = kmalloc(sizeof(*pvcc), GFP_KERNEL);
+ pvcc = kzalloc(sizeof(*pvcc), GFP_KERNEL);
if (pvcc == NULL)
return -ENOMEM;
- memset(pvcc, 0, sizeof(*pvcc));
pvcc->atmvcc = atmvcc;
pvcc->old_push = atmvcc->push;
pvcc->old_pop = atmvcc->pop;
diff --git a/net/atm/resources.c b/net/atm/resources.c
index de25c6408b04..529f7e64aa2c 100644
--- a/net/atm/resources.c
+++ b/net/atm/resources.c
@@ -33,10 +33,9 @@ static struct atm_dev *__alloc_atm_dev(const char *type)
{
struct atm_dev *dev;
- dev = kmalloc(sizeof(*dev), GFP_KERNEL);
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return NULL;
- memset(dev, 0, sizeof(*dev));
dev->type = type;
dev->signal = ATM_PHY_SIG_UNKNOWN;
dev->link_rate = ATM_OC3_PCR;
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 10a3c0aa8398..000695c48583 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -145,7 +145,7 @@ struct sock *ax25_find_listener(ax25_address *addr, int digi,
ax25_cb *s;
struct hlist_node *node;
- spin_lock_bh(&ax25_list_lock);
+ spin_lock(&ax25_list_lock);
ax25_for_each(s, node, &ax25_list) {
if ((s->iamdigi && !digi) || (!s->iamdigi && digi))
continue;
@@ -154,12 +154,12 @@ struct sock *ax25_find_listener(ax25_address *addr, int digi,
/* If device is null we match any device */
if (s->ax25_dev == NULL || s->ax25_dev->dev == dev) {
sock_hold(s->sk);
- spin_unlock_bh(&ax25_list_lock);
+ spin_unlock(&ax25_list_lock);
return s->sk;
}
}
}
- spin_unlock_bh(&ax25_list_lock);
+ spin_unlock(&ax25_list_lock);
return NULL;
}
@@ -174,7 +174,7 @@ struct sock *ax25_get_socket(ax25_address *my_addr, ax25_address *dest_addr,
ax25_cb *s;
struct hlist_node *node;
- spin_lock_bh(&ax25_list_lock);
+ spin_lock(&ax25_list_lock);
ax25_for_each(s, node, &ax25_list) {
if (s->sk && !ax25cmp(&s->source_addr, my_addr) &&
!ax25cmp(&s->dest_addr, dest_addr) &&
@@ -185,7 +185,7 @@ struct sock *ax25_get_socket(ax25_address *my_addr, ax25_address *dest_addr,
}
}
- spin_unlock_bh(&ax25_list_lock);
+ spin_unlock(&ax25_list_lock);
return sk;
}
@@ -235,7 +235,7 @@ void ax25_send_to_raw(ax25_address *addr, struct sk_buff *skb, int proto)
struct sk_buff *copy;
struct hlist_node *node;
- spin_lock_bh(&ax25_list_lock);
+ spin_lock(&ax25_list_lock);
ax25_for_each(s, node, &ax25_list) {
if (s->sk != NULL && ax25cmp(&s->source_addr, addr) == 0 &&
s->sk->sk_type == SOCK_RAW &&
@@ -248,7 +248,7 @@ void ax25_send_to_raw(ax25_address *addr, struct sk_buff *skb, int proto)
kfree_skb(copy);
}
}
- spin_unlock_bh(&ax25_list_lock);
+ spin_unlock(&ax25_list_lock);
}
/*
@@ -486,10 +486,9 @@ ax25_cb *ax25_create_cb(void)
{
ax25_cb *ax25;
- if ((ax25 = kmalloc(sizeof(*ax25), GFP_ATOMIC)) == NULL)
+ if ((ax25 = kzalloc(sizeof(*ax25), GFP_ATOMIC)) == NULL)
return NULL;
- memset(ax25, 0x00, sizeof(*ax25));
atomic_set(&ax25->refcount, 1);
skb_queue_head_init(&ax25->write_queue);
diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c
index 47e6e790bd67..b787678220ff 100644
--- a/net/ax25/ax25_dev.c
+++ b/net/ax25/ax25_dev.c
@@ -55,15 +55,13 @@ void ax25_dev_device_up(struct net_device *dev)
{
ax25_dev *ax25_dev;
- if ((ax25_dev = kmalloc(sizeof(*ax25_dev), GFP_ATOMIC)) == NULL) {
+ if ((ax25_dev = kzalloc(sizeof(*ax25_dev), GFP_ATOMIC)) == NULL) {
printk(KERN_ERR "AX.25: ax25_dev_device_up - out of memory\n");
return;
}
ax25_unregister_sysctl();
- memset(ax25_dev, 0x00, sizeof(*ax25_dev));
-
dev->ax25_ptr = ax25_dev;
ax25_dev->dev = dev;
dev_hold(dev);
diff --git a/net/ax25/ax25_ds_subr.c b/net/ax25/ax25_ds_subr.c
index 1d4ab641f82b..4d22d4430ec8 100644
--- a/net/ax25/ax25_ds_subr.c
+++ b/net/ax25/ax25_ds_subr.c
@@ -80,7 +80,7 @@ void ax25_ds_enquiry_response(ax25_cb *ax25)
ax25_start_t3timer(ax25);
ax25_ds_set_timer(ax25->ax25_dev);
- spin_lock_bh(&ax25_list_lock);
+ spin_lock(&ax25_list_lock);
ax25_for_each(ax25o, node, &ax25_list) {
if (ax25o == ax25)
continue;
@@ -106,7 +106,7 @@ void ax25_ds_enquiry_response(ax25_cb *ax25)
if (ax25o->state != AX25_STATE_0)
ax25_start_t3timer(ax25o);
}
- spin_unlock_bh(&ax25_list_lock);
+ spin_unlock(&ax25_list_lock);
}
void ax25_ds_establish_data_link(ax25_cb *ax25)
@@ -162,13 +162,13 @@ static int ax25_check_dama_slave(ax25_dev *ax25_dev)
int res = 0;
struct hlist_node *node;
- spin_lock_bh(&ax25_list_lock);
+ spin_lock(&ax25_list_lock);
ax25_for_each(ax25, node, &ax25_list)
if (ax25->ax25_dev == ax25_dev && (ax25->condition & AX25_COND_DAMA_MODE) && ax25->state > AX25_STATE_1) {
res = 1;
break;
}
- spin_unlock_bh(&ax25_list_lock);
+ spin_unlock(&ax25_list_lock);
return res;
}
diff --git a/net/ax25/ax25_ds_timer.c b/net/ax25/ax25_ds_timer.c
index 5961459935eb..4f44185955c7 100644
--- a/net/ax25/ax25_ds_timer.c
+++ b/net/ax25/ax25_ds_timer.c
@@ -85,7 +85,7 @@ static void ax25_ds_timeout(unsigned long arg)
return;
}
- spin_lock_bh(&ax25_list_lock);
+ spin_lock(&ax25_list_lock);
ax25_for_each(ax25, node, &ax25_list) {
if (ax25->ax25_dev != ax25_dev || !(ax25->condition & AX25_COND_DAMA_MODE))
continue;
@@ -93,7 +93,7 @@ static void ax25_ds_timeout(unsigned long arg)
ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
ax25_disconnect(ax25, ETIMEDOUT);
}
- spin_unlock_bh(&ax25_list_lock);
+ spin_unlock(&ax25_list_lock);
ax25_dev_dama_off(ax25_dev);
}
diff --git a/net/ax25/ax25_iface.c b/net/ax25/ax25_iface.c
index 77ba07c67682..07ac0207eb69 100644
--- a/net/ax25/ax25_iface.c
+++ b/net/ax25/ax25_iface.c
@@ -66,10 +66,10 @@ int ax25_protocol_register(unsigned int pid,
protocol->pid = pid;
protocol->func = func;
- write_lock(&protocol_list_lock);
+ write_lock_bh(&protocol_list_lock);
protocol->next = protocol_list;
protocol_list = protocol;
- write_unlock(&protocol_list_lock);
+ write_unlock_bh(&protocol_list_lock);
return 1;
}
@@ -80,16 +80,16 @@ void ax25_protocol_release(unsigned int pid)
{
struct protocol_struct *s, *protocol;
- write_lock(&protocol_list_lock);
+ write_lock_bh(&protocol_list_lock);
protocol = protocol_list;
if (protocol == NULL) {
- write_unlock(&protocol_list_lock);
+ write_unlock_bh(&protocol_list_lock);
return;
}
if (protocol->pid == pid) {
protocol_list = protocol->next;
- write_unlock(&protocol_list_lock);
+ write_unlock_bh(&protocol_list_lock);
kfree(protocol);
return;
}
@@ -98,14 +98,14 @@ void ax25_protocol_release(unsigned int pid)
if (protocol->next->pid == pid) {
s = protocol->next;
protocol->next = protocol->next->next;
- write_unlock(&protocol_list_lock);
+ write_unlock_bh(&protocol_list_lock);
kfree(s);
return;
}
protocol = protocol->next;
}
- write_unlock(&protocol_list_lock);
+ write_unlock_bh(&protocol_list_lock);
}
EXPORT_SYMBOL(ax25_protocol_release);
@@ -266,13 +266,13 @@ int ax25_protocol_is_registered(unsigned int pid)
struct protocol_struct *protocol;
int res = 0;
- read_lock(&protocol_list_lock);
+ read_lock_bh(&protocol_list_lock);
for (protocol = protocol_list; protocol != NULL; protocol = protocol->next)
if (protocol->pid == pid) {
res = 1;
break;
}
- read_unlock(&protocol_list_lock);
+ read_unlock_bh(&protocol_list_lock);
return res;
}
diff --git a/net/ax25/ax25_in.c b/net/ax25/ax25_in.c
index 4cf87540fb3a..e9d94291581e 100644
--- a/net/ax25/ax25_in.c
+++ b/net/ax25/ax25_in.c
@@ -102,8 +102,8 @@ static int ax25_rx_fragment(ax25_cb *ax25, struct sk_buff *skb)
int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb)
{
int (*func)(struct sk_buff *, ax25_cb *);
- volatile int queued = 0;
unsigned char pid;
+ int queued = 0;
if (skb == NULL) return 0;
diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
index 369a75b160f2..867d42537979 100644
--- a/net/ax25/sysctl_net_ax25.c
+++ b/net/ax25/sysctl_net_ax25.c
@@ -203,13 +203,11 @@ void ax25_register_sysctl(void)
for (ax25_table_size = sizeof(ctl_table), ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next)
ax25_table_size += sizeof(ctl_table);
- if ((ax25_table = kmalloc(ax25_table_size, GFP_ATOMIC)) == NULL) {
+ if ((ax25_table = kzalloc(ax25_table_size, GFP_ATOMIC)) == NULL) {
spin_unlock_bh(&ax25_dev_lock);
return;
}
- memset(ax25_table, 0x00, ax25_table_size);
-
for (n = 0, ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next) {
ctl_table *child = kmalloc(sizeof(ax25_param_table), GFP_ATOMIC);
if (!child) {
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index 6fb47e00e188..be04e9fb11f6 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -75,15 +75,13 @@
static struct cmtp_application *cmtp_application_add(struct cmtp_session *session, __u16 appl)
{
- struct cmtp_application *app = kmalloc(sizeof(*app), GFP_KERNEL);
+ struct cmtp_application *app = kzalloc(sizeof(*app), GFP_KERNEL);
BT_DBG("session %p application %p appl %d", session, app, appl);
if (!app)
return NULL;
- memset(app, 0, sizeof(*app));
-
app->state = BT_OPEN;
app->appl = appl;
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 182254a580e2..b81a01c64aea 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -335,10 +335,9 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
baswap(&src, &bt_sk(sock->sk)->src);
baswap(&dst, &bt_sk(sock->sk)->dst);
- session = kmalloc(sizeof(struct cmtp_session), GFP_KERNEL);
+ session = kzalloc(sizeof(struct cmtp_session), GFP_KERNEL);
if (!session)
return -ENOMEM;
- memset(session, 0, sizeof(struct cmtp_session));
down_write(&cmtp_session_sem);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 54e8e5ea2154..5ed474277903 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -336,9 +336,8 @@ void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
/* Entry not in the cache. Add new one. */
- if (!(e = kmalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
+ if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
return;
- memset(e, 0, sizeof(struct inquiry_entry));
e->next = cache->list;
cache->list = e;
}
@@ -800,12 +799,10 @@ struct hci_dev *hci_alloc_dev(void)
{
struct hci_dev *hdev;
- hdev = kmalloc(sizeof(struct hci_dev), GFP_KERNEL);
+ hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
if (!hdev)
return NULL;
- memset(hdev, 0, sizeof(struct hci_dev));
-
skb_queue_head_init(&hdev->driver_init);
return hdev;
diff --git a/net/bluetooth/hidp/Kconfig b/net/bluetooth/hidp/Kconfig
index edfea772fb67..c6abf2a5a932 100644
--- a/net/bluetooth/hidp/Kconfig
+++ b/net/bluetooth/hidp/Kconfig
@@ -1,7 +1,6 @@
config BT_HIDP
tristate "HIDP protocol support"
- depends on BT && BT_L2CAP && (BROKEN || !S390)
- select INPUT
+ depends on BT && BT_L2CAP && INPUT
help
HIDP (Human Interface Device Protocol) is a transport layer
for HID reports. HIDP is required for the Bluetooth Human
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index b9c24a55425c..c6e3a2c27c6e 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -582,10 +582,9 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
bacmp(&bt_sk(ctrl_sock->sk)->dst, &bt_sk(intr_sock->sk)->dst))
return -ENOTUNIQ;
- session = kmalloc(sizeof(struct hidp_session), GFP_KERNEL);
+ session = kzalloc(sizeof(struct hidp_session), GFP_KERNEL);
if (!session)
return -ENOMEM;
- memset(session, 0, sizeof(struct hidp_session));
session->input = input_allocate_device();
if (!session->input) {
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index eaaad658d11d..d56f60b392ac 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -185,7 +185,7 @@ static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
{
struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
- write_lock(&l->lock);
+ write_lock_bh(&l->lock);
if (sk == l->head)
l->head = next;
@@ -193,7 +193,7 @@ static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
l2cap_pi(next)->prev_c = prev;
if (prev)
l2cap_pi(prev)->next_c = next;
- write_unlock(&l->lock);
+ write_unlock_bh(&l->lock);
__sock_put(sk);
}
@@ -313,9 +313,9 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
{
struct l2cap_chan_list *l = &conn->chan_list;
- write_lock(&l->lock);
+ write_lock_bh(&l->lock);
__l2cap_chan_add(conn, sk, parent);
- write_unlock(&l->lock);
+ write_unlock_bh(&l->lock);
}
static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
@@ -328,14 +328,14 @@ static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
* 200 - 254 are used by utilities like l2ping, etc.
*/
- spin_lock(&conn->lock);
+ spin_lock_bh(&conn->lock);
if (++conn->tx_ident > 128)
conn->tx_ident = 1;
id = conn->tx_ident;
- spin_unlock(&conn->lock);
+ spin_unlock_bh(&conn->lock);
return id;
}
@@ -1416,11 +1416,11 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
if (!sk)
goto response;
- write_lock(&list->lock);
+ write_lock_bh(&list->lock);
/* Check if we already have channel with that dcid */
if (__l2cap_get_chan_by_dcid(list, scid)) {
- write_unlock(&list->lock);
+ write_unlock_bh(&list->lock);
sock_set_flag(sk, SOCK_ZAPPED);
l2cap_sock_kill(sk);
goto response;
@@ -1458,7 +1458,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
result = status = 0;
done:
- write_unlock(&list->lock);
+ write_unlock_bh(&list->lock);
response:
bh_unlock_sock(parent);
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 155a2b93760e..332dd8f436ea 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -55,6 +55,7 @@
#define VERSION "1.8"
static int disable_cfc = 0;
+static int channel_mtu = -1;
static unsigned int l2cap_mtu = RFCOMM_MAX_L2CAP_MTU;
static struct task_struct *rfcomm_thread;
@@ -273,10 +274,10 @@ static void rfcomm_dlc_clear_state(struct rfcomm_dlc *d)
struct rfcomm_dlc *rfcomm_dlc_alloc(gfp_t prio)
{
- struct rfcomm_dlc *d = kmalloc(sizeof(*d), prio);
+ struct rfcomm_dlc *d = kzalloc(sizeof(*d), prio);
+
if (!d)
return NULL;
- memset(d, 0, sizeof(*d));
init_timer(&d->timer);
d->timer.function = rfcomm_dlc_timeout;
@@ -289,6 +290,7 @@ struct rfcomm_dlc *rfcomm_dlc_alloc(gfp_t prio)
rfcomm_dlc_clear_state(d);
BT_DBG("%p", d);
+
return d;
}
@@ -522,10 +524,10 @@ int rfcomm_dlc_get_modem_status(struct rfcomm_dlc *d, u8 *v24_sig)
/* ---- RFCOMM sessions ---- */
static struct rfcomm_session *rfcomm_session_add(struct socket *sock, int state)
{
- struct rfcomm_session *s = kmalloc(sizeof(*s), GFP_KERNEL);
+ struct rfcomm_session *s = kzalloc(sizeof(*s), GFP_KERNEL);
+
if (!s)
return NULL;
- memset(s, 0, sizeof(*s));
BT_DBG("session %p sock %p", s, sock);
@@ -811,7 +813,10 @@ static int rfcomm_send_pn(struct rfcomm_session *s, int cr, struct rfcomm_dlc *d
pn->credits = 0;
}
- pn->mtu = htobs(d->mtu);
+ if (cr && channel_mtu >= 0)
+ pn->mtu = htobs(channel_mtu);
+ else
+ pn->mtu = htobs(d->mtu);
*ptr = __fcs(buf); ptr++;
@@ -1242,7 +1247,10 @@ static int rfcomm_apply_pn(struct rfcomm_dlc *d, int cr, struct rfcomm_pn *pn)
d->priority = pn->priority;
- d->mtu = s->mtu = btohs(pn->mtu);
+ d->mtu = btohs(pn->mtu);
+
+ if (cr && d->mtu > s->mtu)
+ d->mtu = s->mtu;
return 0;
}
@@ -1769,6 +1777,11 @@ static inline void rfcomm_accept_connection(struct rfcomm_session *s)
s = rfcomm_session_add(nsock, BT_OPEN);
if (s) {
rfcomm_session_hold(s);
+
+ /* We should adjust MTU on incoming sessions.
+ * L2CAP MTU minus UIH header and FCS. */
+ s->mtu = min(l2cap_pi(nsock->sk)->omtu, l2cap_pi(nsock->sk)->imtu) - 5;
+
rfcomm_schedule(RFCOMM_SCHED_RX);
} else
sock_release(nsock);
@@ -2086,6 +2099,9 @@ module_exit(rfcomm_exit);
module_param(disable_cfc, bool, 0644);
MODULE_PARM_DESC(disable_cfc, "Disable credit based flow control");
+module_param(channel_mtu, int, 0644);
+MODULE_PARM_DESC(channel_mtu, "Default MTU for the RFCOMM channel");
+
module_param(l2cap_mtu, uint, 0644);
MODULE_PARM_DESC(l2cap_mtu, "Default MTU for the L2CAP connection");
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index 2ff2d5b87c93..bd8d671a0ba6 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -169,10 +169,9 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
BT_DBG("id %d channel %d", req->dev_id, req->channel);
- dev = kmalloc(sizeof(struct rfcomm_dev), GFP_KERNEL);
+ dev = kzalloc(sizeof(struct rfcomm_dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
- memset(dev, 0, sizeof(struct rfcomm_dev));
write_lock_bh(&rfcomm_dev_lock);
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 85defccc0287..7714a2ec3854 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -108,17 +108,14 @@ static void sco_sock_init_timer(struct sock *sk)
static struct sco_conn *sco_conn_add(struct hci_conn *hcon, __u8 status)
{
struct hci_dev *hdev = hcon->hdev;
- struct sco_conn *conn;
-
- if ((conn = hcon->sco_data))
- return conn;
+ struct sco_conn *conn = hcon->sco_data;
- if (status)
+ if (conn || status)
return conn;
- if (!(conn = kmalloc(sizeof(struct sco_conn), GFP_ATOMIC)))
+ conn = kzalloc(sizeof(struct sco_conn), GFP_ATOMIC);
+ if (!conn)
return NULL;
- memset(conn, 0, sizeof(struct sco_conn));
spin_lock_init(&conn->lock);
@@ -134,6 +131,7 @@ static struct sco_conn *sco_conn_add(struct hci_conn *hcon, __u8 status)
conn->mtu = 60;
BT_DBG("hcon %p conn %p", hcon, conn);
+
return conn;
}
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 8be9f2123e54..6ccd32b30809 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -35,7 +35,7 @@ static inline unsigned packet_length(const struct sk_buff *skb)
int br_dev_queue_push_xmit(struct sk_buff *skb)
{
/* drop mtu oversized packets except gso */
- if (packet_length(skb) > skb->dev->mtu && !skb_shinfo(skb)->gso_size)
+ if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb))
kfree_skb(skb);
else {
#ifdef CONFIG_BRIDGE_NETFILTER
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
index 159fb8409824..4e4119a12139 100644
--- a/net/bridge/br_ioctl.c
+++ b/net/bridge/br_ioctl.c
@@ -162,12 +162,10 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if (num > BR_MAX_PORTS)
num = BR_MAX_PORTS;
- indices = kmalloc(num*sizeof(int), GFP_KERNEL);
+ indices = kcalloc(num, sizeof(int), GFP_KERNEL);
if (indices == NULL)
return -ENOMEM;
- memset(indices, 0, num*sizeof(int));
-
get_port_ifindices(br, indices, num);
if (copy_to_user((void __user *)args[1], indices, num*sizeof(int)))
num = -EFAULT;
@@ -327,11 +325,10 @@ static int old_deviceless(void __user *uarg)
if (args[2] >= 2048)
return -ENOMEM;
- indices = kmalloc(args[2]*sizeof(int), GFP_KERNEL);
+ indices = kcalloc(args[2], sizeof(int), GFP_KERNEL);
if (indices == NULL)
return -ENOMEM;
- memset(indices, 0, args[2]*sizeof(int));
args[2] = get_bridge_ifindices(indices, args[2]);
ret = copy_to_user((void __user *)args[1], indices, args[2]*sizeof(int))
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 8298a5179aef..05b3de888243 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -61,6 +61,9 @@ static int brnf_filter_vlan_tagged = 1;
#define brnf_filter_vlan_tagged 1
#endif
+int brnf_deferred_hooks;
+EXPORT_SYMBOL_GPL(brnf_deferred_hooks);
+
static __be16 inline vlan_proto(const struct sk_buff *skb)
{
return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
@@ -761,7 +764,7 @@ static int br_nf_dev_queue_xmit(struct sk_buff *skb)
{
if (skb->protocol == htons(ETH_P_IP) &&
skb->len > skb->dev->mtu &&
- !skb_shinfo(skb)->gso_size)
+ !skb_is_gso(skb))
return ip_fragment(skb, br_dev_queue_push_xmit);
else
return br_dev_queue_push_xmit(skb);
@@ -890,6 +893,8 @@ static unsigned int ip_sabotage_out(unsigned int hook, struct sk_buff **pskb,
return NF_ACCEPT;
else if (ip->version == 6 && !brnf_call_ip6tables)
return NF_ACCEPT;
+ else if (!brnf_deferred_hooks)
+ return NF_ACCEPT;
#endif
if (hook == NF_IP_POST_ROUTING)
return NF_ACCEPT;
diff --git a/net/core/dev.c b/net/core/dev.c
index 066a60a75280..4d2b5167d7f5 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1162,9 +1162,17 @@ int skb_checksum_help(struct sk_buff *skb, int inward)
unsigned int csum;
int ret = 0, offset = skb->h.raw - skb->data;
- if (inward) {
- skb->ip_summed = CHECKSUM_NONE;
- goto out;
+ if (inward)
+ goto out_set_summed;
+
+ if (unlikely(skb_shinfo(skb)->gso_size)) {
+ static int warned;
+
+ WARN_ON(!warned);
+ warned = 1;
+
+ /* Let GSO fix up the checksum. */
+ goto out_set_summed;
}
if (skb_cloned(skb)) {
@@ -1181,6 +1189,8 @@ int skb_checksum_help(struct sk_buff *skb, int inward)
BUG_ON(skb->csum + 2 > offset);
*(u16*)(skb->h.raw + skb->csum) = csum_fold(csum);
+
+out_set_summed:
skb->ip_summed = CHECKSUM_NONE;
out:
return ret;
@@ -1201,17 +1211,35 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
struct packet_type *ptype;
int type = skb->protocol;
+ int err;
BUG_ON(skb_shinfo(skb)->frag_list);
- BUG_ON(skb->ip_summed != CHECKSUM_HW);
skb->mac.raw = skb->data;
skb->mac_len = skb->nh.raw - skb->data;
__skb_pull(skb, skb->mac_len);
+ if (unlikely(skb->ip_summed != CHECKSUM_HW)) {
+ static int warned;
+
+ WARN_ON(!warned);
+ warned = 1;
+
+ if (skb_header_cloned(skb) &&
+ (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
+ return ERR_PTR(err);
+ }
+
rcu_read_lock();
list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) {
if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
+ if (unlikely(skb->ip_summed != CHECKSUM_HW)) {
+ err = ptype->gso_send_check(skb);
+ segs = ERR_PTR(err);
+ if (err || skb_gso_ok(skb, features))
+ break;
+ __skb_push(skb, skb->data - skb->nh.raw);
+ }
segs = ptype->gso_segment(skb, features);
break;
}
@@ -1727,7 +1755,7 @@ static int ing_filter(struct sk_buff *skb)
if (dev->qdisc_ingress) {
__u32 ttl = (__u32) G_TC_RTTL(skb->tc_verd);
if (MAX_RED_LOOP < ttl++) {
- printk("Redir loop detected Dropping packet (%s->%s)\n",
+ printk(KERN_WARNING "Redir loop detected Dropping packet (%s->%s)\n",
skb->input_dev->name, skb->dev->name);
return TC_ACT_SHOT;
}
@@ -2922,7 +2950,7 @@ int register_netdevice(struct net_device *dev)
/* Fix illegal SG+CSUM combinations. */
if ((dev->features & NETIF_F_SG) &&
!(dev->features & NETIF_F_ALL_CSUM)) {
- printk("%s: Dropping NETIF_F_SG since no checksum feature.\n",
+ printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no checksum feature.\n",
dev->name);
dev->features &= ~NETIF_F_SG;
}
@@ -2930,7 +2958,7 @@ int register_netdevice(struct net_device *dev)
/* TSO requires that SG is present as well. */
if ((dev->features & NETIF_F_TSO) &&
!(dev->features & NETIF_F_SG)) {
- printk("%s: Dropping NETIF_F_TSO since no SG feature.\n",
+ printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no SG feature.\n",
dev->name);
dev->features &= ~NETIF_F_TSO;
}
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 27ce1683caf5..2797e2815418 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -437,7 +437,7 @@ static int ethtool_set_pauseparam(struct net_device *dev, void __user *useraddr)
{
struct ethtool_pauseparam pauseparam;
- if (!dev->ethtool_ops->get_pauseparam)
+ if (!dev->ethtool_ops->set_pauseparam)
return -EOPNOTSUPP;
if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam)))
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 44f6a181a754..476aa3978504 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -257,11 +257,11 @@ nodata:
}
-static void skb_drop_fraglist(struct sk_buff *skb)
+static void skb_drop_list(struct sk_buff **listp)
{
- struct sk_buff *list = skb_shinfo(skb)->frag_list;
+ struct sk_buff *list = *listp;
- skb_shinfo(skb)->frag_list = NULL;
+ *listp = NULL;
do {
struct sk_buff *this = list;
@@ -270,6 +270,11 @@ static void skb_drop_fraglist(struct sk_buff *skb)
} while (list);
}
+static inline void skb_drop_fraglist(struct sk_buff *skb)
+{
+ skb_drop_list(&skb_shinfo(skb)->frag_list);
+}
+
static void skb_clone_fraglist(struct sk_buff *skb)
{
struct sk_buff *list;
@@ -830,41 +835,75 @@ free_skb:
int ___pskb_trim(struct sk_buff *skb, unsigned int len)
{
+ struct sk_buff **fragp;
+ struct sk_buff *frag;
int offset = skb_headlen(skb);
int nfrags = skb_shinfo(skb)->nr_frags;
int i;
+ int err;
+
+ if (skb_cloned(skb) &&
+ unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
+ return err;
for (i = 0; i < nfrags; i++) {
int end = offset + skb_shinfo(skb)->frags[i].size;
- if (end > len) {
- if (skb_cloned(skb)) {
- if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
- return -ENOMEM;
- }
- if (len <= offset) {
- put_page(skb_shinfo(skb)->frags[i].page);
- skb_shinfo(skb)->nr_frags--;
- } else {
- skb_shinfo(skb)->frags[i].size = len - offset;
- }
+
+ if (end < len) {
+ offset = end;
+ continue;
}
- offset = end;
+
+ if (len > offset)
+ skb_shinfo(skb)->frags[i++].size = len - offset;
+
+ skb_shinfo(skb)->nr_frags = i;
+
+ for (; i < nfrags; i++)
+ put_page(skb_shinfo(skb)->frags[i].page);
+
+ if (skb_shinfo(skb)->frag_list)
+ skb_drop_fraglist(skb);
+ break;
}
- if (offset < len) {
+ for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
+ fragp = &frag->next) {
+ int end = offset + frag->len;
+
+ if (skb_shared(frag)) {
+ struct sk_buff *nfrag;
+
+ nfrag = skb_clone(frag, GFP_ATOMIC);
+ if (unlikely(!nfrag))
+ return -ENOMEM;
+
+ nfrag->next = frag->next;
+ frag = nfrag;
+ *fragp = frag;
+ }
+
+ if (end < len) {
+ offset = end;
+ continue;
+ }
+
+ if (end > len &&
+ unlikely((err = pskb_trim(frag, len - offset))))
+ return err;
+
+ if (frag->next)
+ skb_drop_list(&frag->next);
+ break;
+ }
+
+ if (len > skb_headlen(skb)) {
skb->data_len -= skb->len - len;
skb->len = len;
} else {
- if (len <= skb_headlen(skb)) {
- skb->len = len;
- skb->data_len = 0;
- skb->tail = skb->data + len;
- if (skb_shinfo(skb)->frag_list && !skb_cloned(skb))
- skb_drop_fraglist(skb);
- } else {
- skb->data_len -= skb->len - len;
- skb->len = len;
- }
+ skb->len = len;
+ skb->data_len = 0;
+ skb->tail = skb->data + len;
}
return 0;
diff --git a/net/core/stream.c b/net/core/stream.c
index e9489696f694..d1d7decf70b0 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -196,15 +196,13 @@ EXPORT_SYMBOL(sk_stream_error);
void __sk_stream_mem_reclaim(struct sock *sk)
{
- if (sk->sk_forward_alloc >= SK_STREAM_MEM_QUANTUM) {
- atomic_sub(sk->sk_forward_alloc / SK_STREAM_MEM_QUANTUM,
- sk->sk_prot->memory_allocated);
- sk->sk_forward_alloc &= SK_STREAM_MEM_QUANTUM - 1;
- if (*sk->sk_prot->memory_pressure &&
- (atomic_read(sk->sk_prot->memory_allocated) <
- sk->sk_prot->sysctl_mem[0]))
- *sk->sk_prot->memory_pressure = 0;
- }
+ atomic_sub(sk->sk_forward_alloc / SK_STREAM_MEM_QUANTUM,
+ sk->sk_prot->memory_allocated);
+ sk->sk_forward_alloc &= SK_STREAM_MEM_QUANTUM - 1;
+ if (*sk->sk_prot->memory_pressure &&
+ (atomic_read(sk->sk_prot->memory_allocated) <
+ sk->sk_prot->sysctl_mem[0]))
+ *sk->sk_prot->memory_pressure = 0;
}
EXPORT_SYMBOL(__sk_stream_mem_reclaim);
diff --git a/net/core/user_dma.c b/net/core/user_dma.c
index b7c98dbcdb81..248a6b666aff 100644
--- a/net/core/user_dma.c
+++ b/net/core/user_dma.c
@@ -29,6 +29,7 @@
#include <linux/socket.h>
#include <linux/rtnetlink.h> /* for BUG_TRAP */
#include <net/tcp.h>
+#include <net/netdma.h>
#define NET_DMA_DEFAULT_COPYBREAK 4096
diff --git a/net/dccp/feat.h b/net/dccp/feat.h
index 6048373c7186..b44c45504fb6 100644
--- a/net/dccp/feat.h
+++ b/net/dccp/feat.h
@@ -26,4 +26,6 @@ extern void dccp_feat_clean(struct dccp_minisock *dmsk);
extern int dccp_feat_clone(struct sock *oldsk, struct sock *newsk);
extern int dccp_feat_init(struct dccp_minisock *dmsk);
+extern int dccp_feat_default_sequence_window;
+
#endif /* _DCCP_FEAT_H */
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index c3073e7e81d3..7f56f7e8f571 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -504,8 +504,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
ireq = inet_rsk(req);
ireq->loc_addr = daddr;
ireq->rmt_addr = saddr;
- req->rcv_wnd = 100; /* Fake, option parsing will get the
- right value */
+ req->rcv_wnd = dccp_feat_default_sequence_window;
ireq->opt = NULL;
/*
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index ff42bc43263d..9f3d4d7cd0bf 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -31,6 +31,7 @@
#include "dccp.h"
#include "ipv6.h"
+#include "feat.h"
/* Socket used for sending RSTs and ACKs */
static struct socket *dccp_v6_ctl_socket;
@@ -707,8 +708,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
ireq = inet_rsk(req);
ipv6_addr_copy(&ireq6->rmt_addr, &skb->nh.ipv6h->saddr);
ipv6_addr_copy(&ireq6->loc_addr, &skb->nh.ipv6h->daddr);
- req->rcv_wnd = 100; /* Fake, option parsing will get the
- right value */
+ req->rcv_wnd = dccp_feat_default_sequence_window;
ireq6->pktopts = NULL;
if (ipv6_opt_accepted(sk, skb) ||
diff --git a/net/dccp/options.c b/net/dccp/options.c
index c3cda1e39aa8..daf72bb671f0 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -29,6 +29,8 @@ int dccp_feat_default_ack_ratio = DCCPF_INITIAL_ACK_RATIO;
int dccp_feat_default_send_ack_vector = DCCPF_INITIAL_SEND_ACK_VECTOR;
int dccp_feat_default_send_ndp_count = DCCPF_INITIAL_SEND_NDP_COUNT;
+EXPORT_SYMBOL_GPL(dccp_feat_default_sequence_window);
+
void dccp_minisock_init(struct dccp_minisock *dmsk)
{
dmsk->dccpms_sequence_window = dccp_feat_default_sequence_window;
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index f4f0627ea41c..6f14bb5a28d4 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -484,7 +484,7 @@ static int do_dccp_setsockopt(struct sock *sk, int level, int optname,
err = -EINVAL;
else
err = dccp_setsockopt_change(sk, DCCPO_CHANGE_L,
- (struct dccp_so_feat *)
+ (struct dccp_so_feat __user *)
optval);
break;
@@ -493,7 +493,7 @@ static int do_dccp_setsockopt(struct sock *sk, int level, int optname,
err = -EINVAL;
else
err = dccp_setsockopt_change(sk, DCCPO_CHANGE_R,
- (struct dccp_so_feat *)
+ (struct dccp_so_feat __user *)
optval);
break;
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index 98a25208440d..476455fbdb03 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -413,11 +413,7 @@ static struct dn_ifaddr *dn_dev_alloc_ifa(void)
{
struct dn_ifaddr *ifa;
- ifa = kmalloc(sizeof(*ifa), GFP_KERNEL);
-
- if (ifa) {
- memset(ifa, 0, sizeof(*ifa));
- }
+ ifa = kzalloc(sizeof(*ifa), GFP_KERNEL);
return ifa;
}
@@ -1105,10 +1101,9 @@ struct dn_dev *dn_dev_create(struct net_device *dev, int *err)
return NULL;
*err = -ENOBUFS;
- if ((dn_db = kmalloc(sizeof(struct dn_dev), GFP_ATOMIC)) == NULL)
+ if ((dn_db = kzalloc(sizeof(struct dn_dev), GFP_ATOMIC)) == NULL)
return NULL;
- memset(dn_db, 0, sizeof(struct dn_dev));
memcpy(&dn_db->parms, p, sizeof(struct dn_dev_parms));
smp_wmb();
dev->dn_ptr = dn_db;
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
index 0375077391b7..fa20e2efcfc1 100644
--- a/net/decnet/dn_fib.c
+++ b/net/decnet/dn_fib.c
@@ -283,11 +283,10 @@ struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r, struct dn_kern_rta
goto err_inval;
}
- fi = kmalloc(sizeof(*fi)+nhs*sizeof(struct dn_fib_nh), GFP_KERNEL);
+ fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct dn_fib_nh), GFP_KERNEL);
err = -ENOBUFS;
if (fi == NULL)
goto failure;
- memset(fi, 0, sizeof(*fi)+nhs*sizeof(struct dn_fib_nh));
fi->fib_protocol = r->rtm_protocol;
fi->fib_nhs = nhs;
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index 5ce9c9e0565c..ff0ebe99137d 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -580,12 +580,11 @@ static int dn_neigh_seq_open(struct inode *inode, struct file *file)
{
struct seq_file *seq;
int rc = -ENOMEM;
- struct neigh_seq_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
+ struct neigh_seq_state *s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s)
goto out;
- memset(s, 0, sizeof(*s));
rc = seq_open(file, &dn_neigh_seq_ops);
if (rc)
goto out_kfree;
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c
index 06e785fe5757..6986be754ef2 100644
--- a/net/decnet/dn_rules.c
+++ b/net/decnet/dn_rules.c
@@ -151,10 +151,9 @@ int dn_fib_rtm_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
}
}
- new_r = kmalloc(sizeof(*new_r), GFP_KERNEL);
+ new_r = kzalloc(sizeof(*new_r), GFP_KERNEL);
if (!new_r)
return -ENOMEM;
- memset(new_r, 0, sizeof(*new_r));
if (rta[RTA_SRC-1])
memcpy(&new_r->r_src, RTA_DATA(rta[RTA_SRC-1]), 2);
@@ -399,9 +398,10 @@ int dn_fib_dump_rules(struct sk_buff *skb, struct netlink_callback *cb)
rcu_read_lock();
hlist_for_each_entry(r, node, &dn_fib_rules, r_hlist) {
if (idx < s_idx)
- continue;
+ goto next;
if (dn_fib_fill_rule(skb, r, cb, NLM_F_MULTI) < 0)
break;
+next:
idx++;
}
rcu_read_unlock();
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
index 37d9d0a1ac8c..e926c952e363 100644
--- a/net/decnet/dn_table.c
+++ b/net/decnet/dn_table.c
@@ -158,12 +158,10 @@ static void dn_rehash_zone(struct dn_zone *dz)
break;
}
- ht = kmalloc(new_divisor*sizeof(struct dn_fib_node*), GFP_KERNEL);
-
+ ht = kcalloc(new_divisor, sizeof(struct dn_fib_node*), GFP_KERNEL);
if (ht == NULL)
return;
- memset(ht, 0, new_divisor*sizeof(struct dn_fib_node *));
write_lock_bh(&dn_fib_tables_lock);
old_ht = dz->dz_hash;
dz->dz_hash = ht;
@@ -184,11 +182,10 @@ static void dn_free_node(struct dn_fib_node *f)
static struct dn_zone *dn_new_zone(struct dn_hash *table, int z)
{
int i;
- struct dn_zone *dz = kmalloc(sizeof(struct dn_zone), GFP_KERNEL);
+ struct dn_zone *dz = kzalloc(sizeof(struct dn_zone), GFP_KERNEL);
if (!dz)
return NULL;
- memset(dz, 0, sizeof(struct dn_zone));
if (z) {
dz->dz_divisor = 16;
dz->dz_hashmask = 0x0F;
@@ -197,14 +194,12 @@ static struct dn_zone *dn_new_zone(struct dn_hash *table, int z)
dz->dz_hashmask = 0;
}
- dz->dz_hash = kmalloc(dz->dz_divisor*sizeof(struct dn_fib_node *), GFP_KERNEL);
-
+ dz->dz_hash = kcalloc(dz->dz_divisor, sizeof(struct dn_fib_node *), GFP_KERNEL);
if (!dz->dz_hash) {
kfree(dz);
return NULL;
}
- memset(dz->dz_hash, 0, dz->dz_divisor*sizeof(struct dn_fib_node*));
dz->dz_order = z;
dz->dz_mask = dnet_make_mask(z);
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index 309ae4c6549a..4d66aac13483 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -673,12 +673,11 @@ static int ec_dev_ioctl(struct socket *sock, unsigned int cmd, void __user *arg)
edev = dev->ec_ptr;
if (edev == NULL) {
/* Magic up a new one. */
- edev = kmalloc(sizeof(struct ec_device), GFP_KERNEL);
+ edev = kzalloc(sizeof(struct ec_device), GFP_KERNEL);
if (edev == NULL) {
err = -ENOMEM;
break;
}
- memset(edev, 0, sizeof(struct ec_device));
dev->ec_ptr = edev;
} else
net2dev_map[edev->net] = NULL;
diff --git a/net/ieee80211/Kconfig b/net/ieee80211/Kconfig
index dbb08528ddf5..f7e84e9d13ad 100644
--- a/net/ieee80211/Kconfig
+++ b/net/ieee80211/Kconfig
@@ -58,6 +58,7 @@ config IEEE80211_CRYPT_TKIP
depends on IEEE80211 && NET_RADIO
select CRYPTO
select CRYPTO_MICHAEL_MIC
+ select CRC32
---help---
Include software based cipher suites in support of IEEE 802.11i
(aka TGi, WPA, WPA2, WPA-PSK, etc.) for use with TKIP enabled
diff --git a/net/ieee80211/ieee80211_crypt.c b/net/ieee80211/ieee80211_crypt.c
index cb71d794a7d1..5ed0a98b2d76 100644
--- a/net/ieee80211/ieee80211_crypt.c
+++ b/net/ieee80211/ieee80211_crypt.c
@@ -110,11 +110,10 @@ int ieee80211_register_crypto_ops(struct ieee80211_crypto_ops *ops)
unsigned long flags;
struct ieee80211_crypto_alg *alg;
- alg = kmalloc(sizeof(*alg), GFP_KERNEL);
+ alg = kzalloc(sizeof(*alg), GFP_KERNEL);
if (alg == NULL)
return -ENOMEM;
- memset(alg, 0, sizeof(*alg));
alg->ops = ops;
spin_lock_irqsave(&ieee80211_crypto_lock, flags);
diff --git a/net/ieee80211/ieee80211_crypt_ccmp.c b/net/ieee80211/ieee80211_crypt_ccmp.c
index 492647382ad0..ed90a8af1444 100644
--- a/net/ieee80211/ieee80211_crypt_ccmp.c
+++ b/net/ieee80211/ieee80211_crypt_ccmp.c
@@ -76,10 +76,9 @@ static void *ieee80211_ccmp_init(int key_idx)
{
struct ieee80211_ccmp_data *priv;
- priv = kmalloc(sizeof(*priv), GFP_ATOMIC);
+ priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
if (priv == NULL)
goto fail;
- memset(priv, 0, sizeof(*priv));
priv->key_idx = key_idx;
priv->tfm = crypto_alloc_tfm("aes", 0);
diff --git a/net/ieee80211/ieee80211_crypt_wep.c b/net/ieee80211/ieee80211_crypt_wep.c
index c5a87724aabe..0ebf235f6939 100644
--- a/net/ieee80211/ieee80211_crypt_wep.c
+++ b/net/ieee80211/ieee80211_crypt_wep.c
@@ -39,10 +39,9 @@ static void *prism2_wep_init(int keyidx)
{
struct prism2_wep_data *priv;
- priv = kmalloc(sizeof(*priv), GFP_ATOMIC);
+ priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
if (priv == NULL)
goto fail;
- memset(priv, 0, sizeof(*priv));
priv->key_idx = keyidx;
priv->tfm = crypto_alloc_tfm("arc4", 0);
diff --git a/net/ieee80211/ieee80211_wx.c b/net/ieee80211/ieee80211_wx.c
index a78c4f845f66..5cb9cfd35397 100644
--- a/net/ieee80211/ieee80211_wx.c
+++ b/net/ieee80211/ieee80211_wx.c
@@ -369,11 +369,10 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
struct ieee80211_crypt_data *new_crypt;
/* take WEP into use */
- new_crypt = kmalloc(sizeof(struct ieee80211_crypt_data),
+ new_crypt = kzalloc(sizeof(struct ieee80211_crypt_data),
GFP_KERNEL);
if (new_crypt == NULL)
return -ENOMEM;
- memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data));
new_crypt->ops = ieee80211_get_crypto_ops("WEP");
if (!new_crypt->ops) {
request_module("ieee80211_crypt_wep");
@@ -616,13 +615,11 @@ int ieee80211_wx_set_encodeext(struct ieee80211_device *ieee,
ieee80211_crypt_delayed_deinit(ieee, crypt);
- new_crypt = (struct ieee80211_crypt_data *)
- kmalloc(sizeof(*new_crypt), GFP_KERNEL);
+ new_crypt = kzalloc(sizeof(*new_crypt), GFP_KERNEL);
if (new_crypt == NULL) {
ret = -ENOMEM;
goto done;
}
- memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data));
new_crypt->ops = ops;
if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
new_crypt->priv = new_crypt->ops->init(idx);
diff --git a/net/ieee80211/softmac/ieee80211softmac_auth.c b/net/ieee80211/softmac/ieee80211softmac_auth.c
index ebc33ca6e692..4cef39e171d0 100644
--- a/net/ieee80211/softmac/ieee80211softmac_auth.c
+++ b/net/ieee80211/softmac/ieee80211softmac_auth.c
@@ -116,6 +116,16 @@ ieee80211softmac_auth_queue(void *data)
kfree(auth);
}
+/* Sends a response to an auth challenge (for shared key auth). */
+static void
+ieee80211softmac_auth_challenge_response(void *_aq)
+{
+ struct ieee80211softmac_auth_queue_item *aq = _aq;
+
+ /* Send our response */
+ ieee80211softmac_send_mgt_frame(aq->mac, aq->net, IEEE80211_STYPE_AUTH, aq->state);
+}
+
/* Handle the auth response from the AP
* This should be registered with ieee80211 as handle_auth
*/
@@ -197,24 +207,30 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth)
case IEEE80211SOFTMAC_AUTH_SHARED_CHALLENGE:
/* Check to make sure we have a challenge IE */
data = (u8 *)auth->info_element;
- if(*data++ != MFIE_TYPE_CHALLENGE){
+ if (*data++ != MFIE_TYPE_CHALLENGE) {
printkl(KERN_NOTICE PFX "Shared Key Authentication failed due to a missing challenge.\n");
break;
}
/* Save the challenge */
spin_lock_irqsave(&mac->lock, flags);
net->challenge_len = *data++;
- if(net->challenge_len > WLAN_AUTH_CHALLENGE_LEN)
+ if (net->challenge_len > WLAN_AUTH_CHALLENGE_LEN)
net->challenge_len = WLAN_AUTH_CHALLENGE_LEN;
- if(net->challenge != NULL)
+ if (net->challenge != NULL)
kfree(net->challenge);
net->challenge = kmalloc(net->challenge_len, GFP_ATOMIC);
memcpy(net->challenge, data, net->challenge_len);
aq->state = IEEE80211SOFTMAC_AUTH_SHARED_RESPONSE;
- spin_unlock_irqrestore(&mac->lock, flags);
- /* Send our response */
- ieee80211softmac_send_mgt_frame(mac, aq->net, IEEE80211_STYPE_AUTH, aq->state);
+ /* We reuse the work struct from the auth request here.
+ * It is safe to do so as each one is per-request, and
+ * at this point (dealing with authentication response)
+ * we have obviously already sent the initial auth
+ * request. */
+ cancel_delayed_work(&aq->work);
+ INIT_WORK(&aq->work, &ieee80211softmac_auth_challenge_response, (void *)aq);
+ schedule_work(&aq->work);
+ spin_unlock_irqrestore(&mac->lock, flags);
return 0;
case IEEE80211SOFTMAC_AUTH_SHARED_PASS:
kfree(net->challenge);
diff --git a/net/ieee80211/softmac/ieee80211softmac_io.c b/net/ieee80211/softmac/ieee80211softmac_io.c
index 8cc8b20f5cda..6ae5a1dc7956 100644
--- a/net/ieee80211/softmac/ieee80211softmac_io.c
+++ b/net/ieee80211/softmac/ieee80211softmac_io.c
@@ -96,8 +96,7 @@ ieee80211softmac_alloc_mgt(u32 size)
if(size > IEEE80211_DATA_LEN)
return NULL;
/* Allocate the frame */
- data = kmalloc(size, GFP_ATOMIC);
- memset(data, 0, size);
+ data = kzalloc(size, GFP_ATOMIC);
return data;
}
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index da33393be45f..8514106761b0 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -572,16 +572,6 @@ config TCP_CONG_VENO
loss packets.
See http://www.ntu.edu.sg/home5/ZHOU0022/papers/CPFu03a.pdf
-config TCP_CONG_COMPOUND
- tristate "TCP Compound"
- depends on EXPERIMENTAL
- default n
- ---help---
- TCP Compound is a sender-side only change to TCP that uses
- a mixed Reno/Vegas approach to calculate the cwnd.
- For further details look here:
- ftp://ftp.research.microsoft.com/pub/tr/TR-2005-86.pdf
-
endmenu
config TCP_CONG_BIC
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index 38b8039bdd55..4878fc5be85f 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -47,7 +47,6 @@ obj-$(CONFIG_TCP_CONG_VEGAS) += tcp_vegas.o
obj-$(CONFIG_TCP_CONG_VENO) += tcp_veno.o
obj-$(CONFIG_TCP_CONG_SCALABLE) += tcp_scalable.o
obj-$(CONFIG_TCP_CONG_LP) += tcp_lp.o
-obj-$(CONFIG_TCP_CONG_COMPOUND) += tcp_compound.o
obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \
xfrm4_output.o
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 318d4674faa1..c84a32070f8d 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1097,6 +1097,40 @@ int inet_sk_rebuild_header(struct sock *sk)
EXPORT_SYMBOL(inet_sk_rebuild_header);
+static int inet_gso_send_check(struct sk_buff *skb)
+{
+ struct iphdr *iph;
+ struct net_protocol *ops;
+ int proto;
+ int ihl;
+ int err = -EINVAL;
+
+ if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
+ goto out;
+
+ iph = skb->nh.iph;
+ ihl = iph->ihl * 4;
+ if (ihl < sizeof(*iph))
+ goto out;
+
+ if (unlikely(!pskb_may_pull(skb, ihl)))
+ goto out;
+
+ skb->h.raw = __skb_pull(skb, ihl);
+ iph = skb->nh.iph;
+ proto = iph->protocol & (MAX_INET_PROTOS - 1);
+ err = -EPROTONOSUPPORT;
+
+ rcu_read_lock();
+ ops = rcu_dereference(inet_protos[proto]);
+ if (likely(ops && ops->gso_send_check))
+ err = ops->gso_send_check(skb);
+ rcu_read_unlock();
+
+out:
+ return err;
+}
+
static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
@@ -1162,6 +1196,7 @@ static struct net_protocol igmp_protocol = {
static struct net_protocol tcp_protocol = {
.handler = tcp_v4_rcv,
.err_handler = tcp_v4_err,
+ .gso_send_check = tcp_v4_gso_send_check,
.gso_segment = tcp_tso_segment,
.no_policy = 1,
};
@@ -1208,6 +1243,7 @@ static int ipv4_proc_init(void);
static struct packet_type ip_packet_type = {
.type = __constant_htons(ETH_P_IP),
.func = ip_rcv,
+ .gso_send_check = inet_gso_send_check,
.gso_segment = inet_gso_segment,
};
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 8e748be36c5a..1366bc6ce6a5 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -215,12 +215,10 @@ static int ah_init_state(struct xfrm_state *x)
if (x->encap)
goto error;
- ahp = kmalloc(sizeof(*ahp), GFP_KERNEL);
+ ahp = kzalloc(sizeof(*ahp), GFP_KERNEL);
if (ahp == NULL)
return -ENOMEM;
- memset(ahp, 0, sizeof(*ahp));
-
ahp->key = x->aalg->alg_key;
ahp->key_len = (x->aalg->alg_key_len+7)/8;
ahp->tfm = crypto_alloc_tfm(x->aalg->alg_name, 0);
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 7b51b3bdb548..c8a3723bc001 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1372,12 +1372,11 @@ static int arp_seq_open(struct inode *inode, struct file *file)
{
struct seq_file *seq;
int rc = -ENOMEM;
- struct neigh_seq_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
+ struct neigh_seq_state *s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s)
goto out;
- memset(s, 0, sizeof(*s));
rc = seq_open(file, &arp_seq_ops);
if (rc)
goto out_kfree;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index a7c65e9e5ec9..a6cc31d911eb 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -93,10 +93,9 @@ static void devinet_sysctl_unregister(struct ipv4_devconf *p);
static struct in_ifaddr *inet_alloc_ifa(void)
{
- struct in_ifaddr *ifa = kmalloc(sizeof(*ifa), GFP_KERNEL);
+ struct in_ifaddr *ifa = kzalloc(sizeof(*ifa), GFP_KERNEL);
if (ifa) {
- memset(ifa, 0, sizeof(*ifa));
INIT_RCU_HEAD(&ifa->rcu_head);
}
@@ -140,10 +139,9 @@ struct in_device *inetdev_init(struct net_device *dev)
ASSERT_RTNL();
- in_dev = kmalloc(sizeof(*in_dev), GFP_KERNEL);
+ in_dev = kzalloc(sizeof(*in_dev), GFP_KERNEL);
if (!in_dev)
goto out;
- memset(in_dev, 0, sizeof(*in_dev));
INIT_RCU_HEAD(&in_dev->rcu_head);
memcpy(&in_dev->cnf, &ipv4_devconf_dflt, sizeof(in_dev->cnf));
in_dev->cnf.sysctl = NULL;
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 4e112738b3fa..fc2f8ce441de 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -316,12 +316,10 @@ static int esp_init_state(struct xfrm_state *x)
if (x->ealg == NULL)
goto error;
- esp = kmalloc(sizeof(*esp), GFP_KERNEL);
+ esp = kzalloc(sizeof(*esp), GFP_KERNEL);
if (esp == NULL)
return -ENOMEM;
- memset(esp, 0, sizeof(*esp));
-
if (x->aalg) {
struct xfrm_algo_desc *aalg_desc;
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c
index 3c1d32ad35f2..72c633b357cf 100644
--- a/net/ipv4/fib_hash.c
+++ b/net/ipv4/fib_hash.c
@@ -204,11 +204,10 @@ static struct fn_zone *
fn_new_zone(struct fn_hash *table, int z)
{
int i;
- struct fn_zone *fz = kmalloc(sizeof(struct fn_zone), GFP_KERNEL);
+ struct fn_zone *fz = kzalloc(sizeof(struct fn_zone), GFP_KERNEL);
if (!fz)
return NULL;
- memset(fz, 0, sizeof(struct fn_zone));
if (z) {
fz->fz_divisor = 16;
} else {
@@ -1046,7 +1045,7 @@ static int fib_seq_open(struct inode *inode, struct file *file)
{
struct seq_file *seq;
int rc = -ENOMEM;
- struct fib_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
+ struct fib_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s)
goto out;
@@ -1057,7 +1056,6 @@ static int fib_seq_open(struct inode *inode, struct file *file)
seq = file->private_data;
seq->private = s;
- memset(s, 0, sizeof(*s));
out:
return rc;
out_kfree:
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 6c642d11d4ca..79b04718bdfd 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -196,10 +196,9 @@ int inet_rtm_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
}
}
- new_r = kmalloc(sizeof(*new_r), GFP_KERNEL);
+ new_r = kzalloc(sizeof(*new_r), GFP_KERNEL);
if (!new_r)
return -ENOMEM;
- memset(new_r, 0, sizeof(*new_r));
if (rta[RTA_SRC-1])
memcpy(&new_r->r_src, RTA_DATA(rta[RTA_SRC-1]), 4);
@@ -457,13 +456,13 @@ int inet_dump_rules(struct sk_buff *skb, struct netlink_callback *cb)
rcu_read_lock();
hlist_for_each_entry(r, node, &fib_rules, hlist) {
-
if (idx < s_idx)
- continue;
+ goto next;
if (inet_fill_rule(skb, r, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq,
RTM_NEWRULE, NLM_F_MULTI) < 0)
break;
+next:
idx++;
}
rcu_read_unlock();
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 5f87533684d5..9be53a8e72c3 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -709,11 +709,10 @@ fib_create_info(const struct rtmsg *r, struct kern_rta *rta,
goto failure;
}
- fi = kmalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
+ fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
if (fi == NULL)
goto failure;
fib_info_cnt++;
- memset(fi, 0, sizeof(*fi)+nhs*sizeof(struct fib_nh));
fi->fib_protocol = r->rtm_protocol;
@@ -962,10 +961,6 @@ fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
rtm->rtm_protocol = fi->fib_protocol;
if (fi->fib_priority)
RTA_PUT(skb, RTA_PRIORITY, 4, &fi->fib_priority);
-#ifdef CONFIG_NET_CLS_ROUTE
- if (fi->fib_nh[0].nh_tclassid)
- RTA_PUT(skb, RTA_FLOW, 4, &fi->fib_nh[0].nh_tclassid);
-#endif
if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0)
goto rtattr_failure;
if (fi->fib_prefsrc)
@@ -975,6 +970,10 @@ fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
RTA_PUT(skb, RTA_GATEWAY, 4, &fi->fib_nh->nh_gw);
if (fi->fib_nh->nh_oif)
RTA_PUT(skb, RTA_OIF, sizeof(int), &fi->fib_nh->nh_oif);
+#ifdef CONFIG_NET_CLS_ROUTE
+ if (fi->fib_nh[0].nh_tclassid)
+ RTA_PUT(skb, RTA_FLOW, 4, &fi->fib_nh[0].nh_tclassid);
+#endif
}
#ifdef CONFIG_IP_ROUTE_MULTIPATH
if (fi->fib_nhs > 1) {
@@ -993,6 +992,10 @@ fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
nhp->rtnh_ifindex = nh->nh_oif;
if (nh->nh_gw)
RTA_PUT(skb, RTA_GATEWAY, 4, &nh->nh_gw);
+#ifdef CONFIG_NET_CLS_ROUTE
+ if (nh->nh_tclassid)
+ RTA_PUT(skb, RTA_FLOW, 4, &nh->nh_tclassid);
+#endif
nhp->rtnh_len = skb->tail - (unsigned char*)nhp;
} endfor_nexthops(fi);
mp_head->rta_type = RTA_MULTIPATH;
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 1cb65305e102..23fb9d9768e3 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1252,8 +1252,8 @@ fn_trie_insert(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta,
*/
if (!fa_head) {
- fa_head = fib_insert_node(t, &err, key, plen);
err = 0;
+ fa_head = fib_insert_node(t, &err, key, plen);
if (err)
goto out_free_new_fa;
}
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index d299c8e547d6..9f4b752f5a33 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1028,10 +1028,9 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im)
* for deleted items allows change reports to use common code with
* non-deleted or query-response MCA's.
*/
- pmc = kmalloc(sizeof(*pmc), GFP_KERNEL);
+ pmc = kzalloc(sizeof(*pmc), GFP_KERNEL);
if (!pmc)
return;
- memset(pmc, 0, sizeof(*pmc));
spin_lock_bh(&im->lock);
pmc->interface = im->interface;
in_dev_hold(in_dev);
@@ -1529,10 +1528,9 @@ static int ip_mc_add1_src(struct ip_mc_list *pmc, int sfmode,
psf_prev = psf;
}
if (!psf) {
- psf = kmalloc(sizeof(*psf), GFP_ATOMIC);
+ psf = kzalloc(sizeof(*psf), GFP_ATOMIC);
if (!psf)
return -ENOBUFS;
- memset(psf, 0, sizeof(*psf));
psf->sf_inaddr = *psfsrc;
if (psf_prev) {
psf_prev->sf_next = psf;
@@ -2380,7 +2378,7 @@ static int igmp_mc_seq_open(struct inode *inode, struct file *file)
{
struct seq_file *seq;
int rc = -ENOMEM;
- struct igmp_mc_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
+ struct igmp_mc_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s)
goto out;
@@ -2390,7 +2388,6 @@ static int igmp_mc_seq_open(struct inode *inode, struct file *file)
seq = file->private_data;
seq->private = s;
- memset(s, 0, sizeof(*s));
out:
return rc;
out_kfree:
@@ -2555,7 +2552,7 @@ static int igmp_mcf_seq_open(struct inode *inode, struct file *file)
{
struct seq_file *seq;
int rc = -ENOMEM;
- struct igmp_mcf_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
+ struct igmp_mcf_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s)
goto out;
@@ -2565,7 +2562,6 @@ static int igmp_mcf_seq_open(struct inode *inode, struct file *file)
seq = file->private_data;
seq->private = s;
- memset(s, 0, sizeof(*s));
out:
return rc;
out_kfree:
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 8e7e41b66c79..492858e6faf0 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -909,11 +909,10 @@ static int __init inet_diag_init(void)
sizeof(struct inet_diag_handler *));
int err = -ENOMEM;
- inet_diag_table = kmalloc(inet_diag_table_size, GFP_KERNEL);
+ inet_diag_table = kzalloc(inet_diag_table_size, GFP_KERNEL);
if (!inet_diag_table)
goto out;
- memset(inet_diag_table, 0, inet_diag_table_size);
idiagnl = netlink_kernel_create(NETLINK_INET_DIAG, 0, inet_diag_rcv,
THIS_MODULE);
if (idiagnl == NULL)
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index 2160874ce7aa..03ff62ebcfeb 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -86,7 +86,7 @@ static struct inet_peer *peer_root = peer_avl_empty;
static DEFINE_RWLOCK(peer_pool_lock);
#define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
-static volatile int peer_total;
+static int peer_total;
/* Exported for sysctl_net_ipv4. */
int inet_peer_threshold = 65536 + 128; /* start to throw entries more
* aggressively at this stage */
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 6ff9b10d9563..0f9b3a31997b 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -617,7 +617,6 @@ static int ipgre_rcv(struct sk_buff *skb)
skb->mac.raw = skb->nh.raw;
skb->nh.raw = __pskb_pull(skb, offset);
skb_postpull_rcsum(skb, skb->h.raw, offset);
- memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
skb->pkt_type = PACKET_HOST;
#ifdef CONFIG_NET_IPGRE_BROADCAST
if (MULTICAST(iph->daddr)) {
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index e1a7dba2fa8a..212734ca238f 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -428,6 +428,9 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
goto drop;
}
+ /* Remove any debris in the socket control block */
+ memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
+
return NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, dev, NULL,
ip_rcv_finish);
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index cbcae6544622..406056edc02b 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -256,7 +256,6 @@ int ip_options_compile(struct ip_options * opt, struct sk_buff * skb)
if (!opt) {
opt = &(IPCB(skb)->opt);
- memset(opt, 0, sizeof(struct ip_options));
iph = skb->nh.raw;
opt->optlen = ((struct iphdr *)iph)->ihl*4 - sizeof(struct iphdr);
optptr = iph + sizeof(struct iphdr);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index ca0e714613fb..7c9f9a6421b8 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -209,7 +209,7 @@ static inline int ip_finish_output(struct sk_buff *skb)
return dst_output(skb);
}
#endif
- if (skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size)
+ if (skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb))
return ip_fragment(skb, ip_finish_output2);
else
return ip_finish_output2(skb);
@@ -1095,7 +1095,7 @@ ssize_t ip_append_page(struct sock *sk, struct page *page,
while (size > 0) {
int i;
- if (skb_shinfo(skb)->gso_size)
+ if (skb_is_gso(skb))
len = size;
else {
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index 8e0374847532..a0c28b2b756e 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -70,7 +70,8 @@ static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
if (err)
goto out;
- skb_put(skb, dlen - plen);
+ skb->truesize += dlen - plen;
+ __skb_put(skb, dlen - plen);
memcpy(skb->data, scratch, dlen);
out:
put_cpu();
@@ -409,11 +410,10 @@ static int ipcomp_init_state(struct xfrm_state *x)
goto out;
err = -ENOMEM;
- ipcd = kmalloc(sizeof(*ipcd), GFP_KERNEL);
+ ipcd = kzalloc(sizeof(*ipcd), GFP_KERNEL);
if (!ipcd)
goto out;
- memset(ipcd, 0, sizeof(*ipcd));
x->props.header_len = 0;
if (x->props.mode)
x->props.header_len += sizeof(struct iphdr);
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 3291d5192aad..76ab50b0d6ef 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -487,7 +487,6 @@ static int ipip_rcv(struct sk_buff *skb)
skb->mac.raw = skb->nh.raw;
skb->nh.raw = skb->data;
- memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
skb->protocol = htons(ETH_P_IP);
skb->pkt_type = PACKET_HOST;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index ba33f8621c67..85893eef6b16 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1461,7 +1461,6 @@ int pim_rcv_v1(struct sk_buff * skb)
skb_pull(skb, (u8*)encap - skb->data);
skb->nh.iph = (struct iphdr *)skb->data;
skb->dev = reg_dev;
- memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
skb->protocol = htons(ETH_P_IP);
skb->ip_summed = 0;
skb->pkt_type = PACKET_HOST;
@@ -1517,7 +1516,6 @@ static int pim_rcv(struct sk_buff * skb)
skb_pull(skb, (u8*)encap - skb->data);
skb->nh.iph = (struct iphdr *)skb->data;
skb->dev = reg_dev;
- memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
skb->protocol = htons(ETH_P_IP);
skb->ip_summed = 0;
skb->pkt_type = PACKET_HOST;
@@ -1580,6 +1578,7 @@ int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
cache = ipmr_cache_find(rt->rt_src, rt->rt_dst);
if (cache==NULL) {
+ struct sk_buff *skb2;
struct net_device *dev;
int vif;
@@ -1593,12 +1592,18 @@ int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
read_unlock(&mrt_lock);
return -ENODEV;
}
- skb->nh.raw = skb_push(skb, sizeof(struct iphdr));
- skb->nh.iph->ihl = sizeof(struct iphdr)>>2;
- skb->nh.iph->saddr = rt->rt_src;
- skb->nh.iph->daddr = rt->rt_dst;
- skb->nh.iph->version = 0;
- err = ipmr_cache_unresolved(vif, skb);
+ skb2 = skb_clone(skb, GFP_ATOMIC);
+ if (!skb2) {
+ read_unlock(&mrt_lock);
+ return -ENOMEM;
+ }
+
+ skb2->nh.raw = skb_push(skb2, sizeof(struct iphdr));
+ skb2->nh.iph->ihl = sizeof(struct iphdr)>>2;
+ skb2->nh.iph->saddr = rt->rt_src;
+ skb2->nh.iph->daddr = rt->rt_dst;
+ skb2->nh.iph->version = 0;
+ err = ipmr_cache_unresolved(vif, skb2);
read_unlock(&mrt_lock);
return err;
}
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c
index f28ec6882162..6a28fafe910c 100644
--- a/net/ipv4/ipvs/ip_vs_ctl.c
+++ b/net/ipv4/ipvs/ip_vs_ctl.c
@@ -735,12 +735,11 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest,
if (atype != RTN_LOCAL && atype != RTN_UNICAST)
return -EINVAL;
- dest = kmalloc(sizeof(struct ip_vs_dest), GFP_ATOMIC);
+ dest = kzalloc(sizeof(struct ip_vs_dest), GFP_ATOMIC);
if (dest == NULL) {
IP_VS_ERR("ip_vs_new_dest: kmalloc failed.\n");
return -ENOMEM;
}
- memset(dest, 0, sizeof(struct ip_vs_dest));
dest->protocol = svc->protocol;
dest->vaddr = svc->addr;
@@ -1050,14 +1049,12 @@ ip_vs_add_service(struct ip_vs_service_user *u, struct ip_vs_service **svc_p)
goto out_mod_dec;
}
- svc = (struct ip_vs_service *)
- kmalloc(sizeof(struct ip_vs_service), GFP_ATOMIC);
+ svc = kzalloc(sizeof(struct ip_vs_service), GFP_ATOMIC);
if (svc == NULL) {
IP_VS_DBG(1, "ip_vs_add_service: kmalloc failed.\n");
ret = -ENOMEM;
goto out_err;
}
- memset(svc, 0, sizeof(struct ip_vs_service));
/* I'm the first user of the service */
atomic_set(&svc->usecnt, 1);
@@ -1797,7 +1794,7 @@ static int ip_vs_info_open(struct inode *inode, struct file *file)
{
struct seq_file *seq;
int rc = -ENOMEM;
- struct ip_vs_iter *s = kmalloc(sizeof(*s), GFP_KERNEL);
+ struct ip_vs_iter *s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s)
goto out;
@@ -1808,7 +1805,6 @@ static int ip_vs_info_open(struct inode *inode, struct file *file)
seq = file->private_data;
seq->private = s;
- memset(s, 0, sizeof(*s));
out:
return rc;
out_kfree:
diff --git a/net/ipv4/ipvs/ip_vs_est.c b/net/ipv4/ipvs/ip_vs_est.c
index 4c1940381ba0..7d68b80c4c19 100644
--- a/net/ipv4/ipvs/ip_vs_est.c
+++ b/net/ipv4/ipvs/ip_vs_est.c
@@ -123,11 +123,10 @@ int ip_vs_new_estimator(struct ip_vs_stats *stats)
{
struct ip_vs_estimator *est;
- est = kmalloc(sizeof(*est), GFP_KERNEL);
+ est = kzalloc(sizeof(*est), GFP_KERNEL);
if (est == NULL)
return -ENOMEM;
- memset(est, 0, sizeof(*est));
est->stats = stats;
est->last_conns = stats->conns;
est->cps = stats->cps<<10;
diff --git a/net/ipv4/netfilter/ip_conntrack_helper_h323.c b/net/ipv4/netfilter/ip_conntrack_helper_h323.c
index af35235672d5..9a39e2969712 100644
--- a/net/ipv4/netfilter/ip_conntrack_helper_h323.c
+++ b/net/ipv4/netfilter/ip_conntrack_helper_h323.c
@@ -1200,7 +1200,7 @@ static struct ip_conntrack_expect *find_expect(struct ip_conntrack *ct,
tuple.dst.protonum = IPPROTO_TCP;
exp = __ip_conntrack_expect_find(&tuple);
- if (exp->master == ct)
+ if (exp && exp->master == ct)
return exp;
return NULL;
}
diff --git a/net/ipv4/netfilter/ip_conntrack_standalone.c b/net/ipv4/netfilter/ip_conntrack_standalone.c
index 7bd3c22003a2..7a9fa04a467a 100644
--- a/net/ipv4/netfilter/ip_conntrack_standalone.c
+++ b/net/ipv4/netfilter/ip_conntrack_standalone.c
@@ -534,6 +534,8 @@ static struct nf_hook_ops ip_conntrack_ops[] = {
/* Sysctl support */
+int ip_conntrack_checksum = 1;
+
#ifdef CONFIG_SYSCTL
/* From ip_conntrack_core.c */
@@ -568,8 +570,6 @@ extern unsigned int ip_ct_generic_timeout;
static int log_invalid_proto_min = 0;
static int log_invalid_proto_max = 255;
-int ip_conntrack_checksum = 1;
-
static struct ctl_table_header *ip_ct_sysctl_header;
static ctl_table ip_ct_sysctl_table[] = {
diff --git a/net/ipv4/netfilter/ip_nat_snmp_basic.c b/net/ipv4/netfilter/ip_nat_snmp_basic.c
index 0b1b416759cc..18b7fbdccb61 100644
--- a/net/ipv4/netfilter/ip_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/ip_nat_snmp_basic.c
@@ -1255,9 +1255,9 @@ static int help(struct sk_buff **pskb,
struct udphdr *udph = (struct udphdr *)((u_int32_t *)iph + iph->ihl);
/* SNMP replies and originating SNMP traps get mangled */
- if (udph->source == ntohs(SNMP_PORT) && dir != IP_CT_DIR_REPLY)
+ if (udph->source == htons(SNMP_PORT) && dir != IP_CT_DIR_REPLY)
return NF_ACCEPT;
- if (udph->dest == ntohs(SNMP_TRAP_PORT) && dir != IP_CT_DIR_ORIGINAL)
+ if (udph->dest == htons(SNMP_TRAP_PORT) && dir != IP_CT_DIR_ORIGINAL)
return NF_ACCEPT;
/* No NAT? */
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index cbffeae3f565..d994c5f5744c 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -172,11 +172,10 @@ clusterip_config_init(struct ipt_clusterip_tgt_info *i, u_int32_t ip,
struct clusterip_config *c;
char buffer[16];
- c = kmalloc(sizeof(*c), GFP_ATOMIC);
+ c = kzalloc(sizeof(*c), GFP_ATOMIC);
if (!c)
return NULL;
- memset(c, 0, sizeof(*c));
c->dev = dev;
c->clusterip = ip;
memcpy(&c->clustermac, &i->clustermac, ETH_ALEN);
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index bd221ec3f81e..62b2762a2420 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -609,6 +609,7 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (sin) {
sin->sin_family = AF_INET;
sin->sin_addr.s_addr = skb->nh.iph->saddr;
+ sin->sin_port = 0;
memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
}
if (inet->cmsg_flags)
diff --git a/net/ipv4/tcp_compound.c b/net/ipv4/tcp_compound.c
deleted file mode 100644
index bc54f7e9aea9..000000000000
--- a/net/ipv4/tcp_compound.c
+++ /dev/null
@@ -1,448 +0,0 @@
-/*
- * TCP Vegas congestion control
- *
- * This is based on the congestion detection/avoidance scheme described in
- * Lawrence S. Brakmo and Larry L. Peterson.
- * "TCP Vegas: End to end congestion avoidance on a global internet."
- * IEEE Journal on Selected Areas in Communication, 13(8):1465--1480,
- * October 1995. Available from:
- * ftp://ftp.cs.arizona.edu/xkernel/Papers/jsac.ps
- *
- * See http://www.cs.arizona.edu/xkernel/ for their implementation.
- * The main aspects that distinguish this implementation from the
- * Arizona Vegas implementation are:
- * o We do not change the loss detection or recovery mechanisms of
- * Linux in any way. Linux already recovers from losses quite well,
- * using fine-grained timers, NewReno, and FACK.
- * o To avoid the performance penalty imposed by increasing cwnd
- * only every-other RTT during slow start, we increase during
- * every RTT during slow start, just like Reno.
- * o Largely to allow continuous cwnd growth during slow start,
- * we use the rate at which ACKs come back as the "actual"
- * rate, rather than the rate at which data is sent.
- * o To speed convergence to the right rate, we set the cwnd
- * to achieve the right ("actual") rate when we exit slow start.
- * o To filter out the noise caused by delayed ACKs, we use the
- * minimum RTT sample observed during the last RTT to calculate
- * the actual rate.
- * o When the sender re-starts from idle, it waits until it has
- * received ACKs for an entire flight of new data before making
- * a cwnd adjustment decision. The original Vegas implementation
- * assumed senders never went idle.
- *
- *
- * TCP Compound based on TCP Vegas
- *
- * further details can be found here:
- * ftp://ftp.research.microsoft.com/pub/tr/TR-2005-86.pdf
- */
-
-#include <linux/config.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/inet_diag.h>
-
-#include <net/tcp.h>
-
-/* Default values of the Vegas variables, in fixed-point representation
- * with V_PARAM_SHIFT bits to the right of the binary point.
- */
-#define V_PARAM_SHIFT 1
-
-#define TCP_COMPOUND_ALPHA 3U
-#define TCP_COMPOUND_BETA 1U
-#define TCP_COMPOUND_GAMMA 30
-#define TCP_COMPOUND_ZETA 1
-
-/* TCP compound variables */
-struct compound {
- u32 beg_snd_nxt; /* right edge during last RTT */
- u32 beg_snd_una; /* left edge during last RTT */
- u32 beg_snd_cwnd; /* saves the size of the cwnd */
- u8 doing_vegas_now; /* if true, do vegas for this RTT */
- u16 cntRTT; /* # of RTTs measured within last RTT */
- u32 minRTT; /* min of RTTs measured within last RTT (in usec) */
- u32 baseRTT; /* the min of all Vegas RTT measurements seen (in usec) */
-
- u32 cwnd;
- u32 dwnd;
-};
-
-/* There are several situations when we must "re-start" Vegas:
- *
- * o when a connection is established
- * o after an RTO
- * o after fast recovery
- * o when we send a packet and there is no outstanding
- * unacknowledged data (restarting an idle connection)
- *
- * In these circumstances we cannot do a Vegas calculation at the
- * end of the first RTT, because any calculation we do is using
- * stale info -- both the saved cwnd and congestion feedback are
- * stale.
- *
- * Instead we must wait until the completion of an RTT during
- * which we actually receive ACKs.
- */
-static inline void vegas_enable(struct sock *sk)
-{
- const struct tcp_sock *tp = tcp_sk(sk);
- struct compound *vegas = inet_csk_ca(sk);
-
- /* Begin taking Vegas samples next time we send something. */
- vegas->doing_vegas_now = 1;
-
- /* Set the beginning of the next send window. */
- vegas->beg_snd_nxt = tp->snd_nxt;
-
- vegas->cntRTT = 0;
- vegas->minRTT = 0x7fffffff;
-}
-
-/* Stop taking Vegas samples for now. */
-static inline void vegas_disable(struct sock *sk)
-{
- struct compound *vegas = inet_csk_ca(sk);
-
- vegas->doing_vegas_now = 0;
-}
-
-static void tcp_compound_init(struct sock *sk)
-{
- struct compound *vegas = inet_csk_ca(sk);
- const struct tcp_sock *tp = tcp_sk(sk);
-
- vegas->baseRTT = 0x7fffffff;
- vegas_enable(sk);
-
- vegas->dwnd = 0;
- vegas->cwnd = tp->snd_cwnd;
-}
-
-/* Do RTT sampling needed for Vegas.
- * Basically we:
- * o min-filter RTT samples from within an RTT to get the current
- * propagation delay + queuing delay (we are min-filtering to try to
- * avoid the effects of delayed ACKs)
- * o min-filter RTT samples from a much longer window (forever for now)
- * to find the propagation delay (baseRTT)
- */
-static void tcp_compound_rtt_calc(struct sock *sk, u32 usrtt)
-{
- struct compound *vegas = inet_csk_ca(sk);
- u32 vrtt = usrtt + 1; /* Never allow zero rtt or baseRTT */
-
- /* Filter to find propagation delay: */
- if (vrtt < vegas->baseRTT)
- vegas->baseRTT = vrtt;
-
- /* Find the min RTT during the last RTT to find
- * the current prop. delay + queuing delay:
- */
-
- vegas->minRTT = min(vegas->minRTT, vrtt);
- vegas->cntRTT++;
-}
-
-static void tcp_compound_state(struct sock *sk, u8 ca_state)
-{
-
- if (ca_state == TCP_CA_Open)
- vegas_enable(sk);
- else
- vegas_disable(sk);
-}
-
-
-/* 64bit divisor, dividend and result. dynamic precision */
-static inline u64 div64_64(u64 dividend, u64 divisor)
-{
- u32 d = divisor;
-
- if (divisor > 0xffffffffULL) {
- unsigned int shift = fls(divisor >> 32);
-
- d = divisor >> shift;
- dividend >>= shift;
- }
-
- /* avoid 64 bit division if possible */
- if (dividend >> 32)
- do_div(dividend, d);
- else
- dividend = (u32) dividend / d;
-
- return dividend;
-}
-
-/* calculate the quartic root of "a" using Newton-Raphson */
-static u32 qroot(u64 a)
-{
- u32 x, x1;
-
- /* Initial estimate is based on:
- * qrt(x) = exp(log(x) / 4)
- */
- x = 1u << (fls64(a) >> 2);
-
- /*
- * Iteration based on:
- * 3
- * x = ( 3 * x + a / x ) / 4
- * k+1 k k
- */
- do {
- u64 x3 = x;
-
- x1 = x;
- x3 *= x;
- x3 *= x;
-
- x = (3 * x + (u32) div64_64(a, x3)) / 4;
- } while (abs(x1 - x) > 1);
-
- return x;
-}
-
-
-/*
- * If the connection is idle and we are restarting,
- * then we don't want to do any Vegas calculations
- * until we get fresh RTT samples. So when we
- * restart, we reset our Vegas state to a clean
- * slate. After we get acks for this flight of
- * packets, _then_ we can make Vegas calculations
- * again.
- */
-static void tcp_compound_cwnd_event(struct sock *sk, enum tcp_ca_event event)
-{
- if (event == CA_EVENT_CWND_RESTART || event == CA_EVENT_TX_START)
- tcp_compound_init(sk);
-}
-
-static void tcp_compound_cong_avoid(struct sock *sk, u32 ack,
- u32 seq_rtt, u32 in_flight, int flag)
-{
- struct tcp_sock *tp = tcp_sk(sk);
- struct compound *vegas = inet_csk_ca(sk);
- u8 inc = 0;
-
- if (vegas->cwnd + vegas->dwnd > tp->snd_cwnd) {
- if (vegas->cwnd > tp->snd_cwnd || vegas->dwnd > tp->snd_cwnd) {
- vegas->cwnd = tp->snd_cwnd;
- vegas->dwnd = 0;
- } else
- vegas->cwnd = tp->snd_cwnd - vegas->dwnd;
-
- }
-
- if (!tcp_is_cwnd_limited(sk, in_flight))
- return;
-
- if (vegas->cwnd <= tp->snd_ssthresh)
- inc = 1;
- else if (tp->snd_cwnd_cnt < tp->snd_cwnd)
- tp->snd_cwnd_cnt++;
-
- if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
- inc = 1;
- tp->snd_cwnd_cnt = 0;
- }
-
- if (inc && tp->snd_cwnd < tp->snd_cwnd_clamp)
- vegas->cwnd++;
-
- /* The key players are v_beg_snd_una and v_beg_snd_nxt.
- *
- * These are so named because they represent the approximate values
- * of snd_una and snd_nxt at the beginning of the current RTT. More
- * precisely, they represent the amount of data sent during the RTT.
- * At the end of the RTT, when we receive an ACK for v_beg_snd_nxt,
- * we will calculate that (v_beg_snd_nxt - v_beg_snd_una) outstanding
- * bytes of data have been ACKed during the course of the RTT, giving
- * an "actual" rate of:
- *
- * (v_beg_snd_nxt - v_beg_snd_una) / (rtt duration)
- *
- * Unfortunately, v_beg_snd_una is not exactly equal to snd_una,
- * because delayed ACKs can cover more than one segment, so they
- * don't line up nicely with the boundaries of RTTs.
- *
- * Another unfortunate fact of life is that delayed ACKs delay the
- * advance of the left edge of our send window, so that the number
- * of bytes we send in an RTT is often less than our cwnd will allow.
- * So we keep track of our cwnd separately, in v_beg_snd_cwnd.
- */
-
- if (after(ack, vegas->beg_snd_nxt)) {
- /* Do the Vegas once-per-RTT cwnd adjustment. */
- u32 old_wnd, old_snd_cwnd;
-
- /* Here old_wnd is essentially the window of data that was
- * sent during the previous RTT, and has all
- * been acknowledged in the course of the RTT that ended
- * with the ACK we just received. Likewise, old_snd_cwnd
- * is the cwnd during the previous RTT.
- */
- if (!tp->mss_cache)
- return;
-
- old_wnd = (vegas->beg_snd_nxt - vegas->beg_snd_una) /
- tp->mss_cache;
- old_snd_cwnd = vegas->beg_snd_cwnd;
-
- /* Save the extent of the current window so we can use this
- * at the end of the next RTT.
- */
- vegas->beg_snd_una = vegas->beg_snd_nxt;
- vegas->beg_snd_nxt = tp->snd_nxt;
- vegas->beg_snd_cwnd = tp->snd_cwnd;
-
- /* We do the Vegas calculations only if we got enough RTT
- * samples that we can be reasonably sure that we got
- * at least one RTT sample that wasn't from a delayed ACK.
- * If we only had 2 samples total,
- * then that means we're getting only 1 ACK per RTT, which
- * means they're almost certainly delayed ACKs.
- * If we have 3 samples, we should be OK.
- */
-
- if (vegas->cntRTT > 2) {
- u32 rtt, target_cwnd, diff;
- u32 brtt, dwnd;
-
- /* We have enough RTT samples, so, using the Vegas
- * algorithm, we determine if we should increase or
- * decrease cwnd, and by how much.
- */
-
- /* Pluck out the RTT we are using for the Vegas
- * calculations. This is the min RTT seen during the
- * last RTT. Taking the min filters out the effects
- * of delayed ACKs, at the cost of noticing congestion
- * a bit later.
- */
- rtt = vegas->minRTT;
-
- /* Calculate the cwnd we should have, if we weren't
- * going too fast.
- *
- * This is:
- * (actual rate in segments) * baseRTT
- * We keep it as a fixed point number with
- * V_PARAM_SHIFT bits to the right of the binary point.
- */
- if (!rtt)
- return;
-
- brtt = vegas->baseRTT;
- target_cwnd = ((old_wnd * brtt)
- << V_PARAM_SHIFT) / rtt;
-
- /* Calculate the difference between the window we had,
- * and the window we would like to have. This quantity
- * is the "Diff" from the Arizona Vegas papers.
- *
- * Again, this is a fixed point number with
- * V_PARAM_SHIFT bits to the right of the binary
- * point.
- */
-
- diff = (old_wnd << V_PARAM_SHIFT) - target_cwnd;
-
- dwnd = vegas->dwnd;
-
- if (diff < (TCP_COMPOUND_GAMMA << V_PARAM_SHIFT)) {
- u64 v;
- u32 x;
-
- /*
- * The TCP Compound paper describes the choice
- * of "k" determines the agressiveness,
- * ie. slope of the response function.
- *
- * For same value as HSTCP would be 0.8
- * but for computaional reasons, both the
- * original authors and this implementation
- * use 0.75.
- */
- v = old_wnd;
- x = qroot(v * v * v) >> TCP_COMPOUND_ALPHA;
- if (x > 1)
- dwnd = x - 1;
- else
- dwnd = 0;
-
- dwnd += vegas->dwnd;
-
- } else if ((dwnd << V_PARAM_SHIFT) <
- (diff * TCP_COMPOUND_BETA))
- dwnd = 0;
- else
- dwnd =
- ((dwnd << V_PARAM_SHIFT) -
- (diff *
- TCP_COMPOUND_BETA)) >> V_PARAM_SHIFT;
-
- vegas->dwnd = dwnd;
-
- }
-
- /* Wipe the slate clean for the next RTT. */
- vegas->cntRTT = 0;
- vegas->minRTT = 0x7fffffff;
- }
-
- tp->snd_cwnd = vegas->cwnd + vegas->dwnd;
-}
-
-/* Extract info for Tcp socket info provided via netlink. */
-static void tcp_compound_get_info(struct sock *sk, u32 ext, struct sk_buff *skb)
-{
- const struct compound *ca = inet_csk_ca(sk);
- if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
- struct tcpvegas_info *info;
-
- info = RTA_DATA(__RTA_PUT(skb, INET_DIAG_VEGASINFO,
- sizeof(*info)));
-
- info->tcpv_enabled = ca->doing_vegas_now;
- info->tcpv_rttcnt = ca->cntRTT;
- info->tcpv_rtt = ca->baseRTT;
- info->tcpv_minrtt = ca->minRTT;
- rtattr_failure:;
- }
-}
-
-static struct tcp_congestion_ops tcp_compound = {
- .init = tcp_compound_init,
- .ssthresh = tcp_reno_ssthresh,
- .cong_avoid = tcp_compound_cong_avoid,
- .rtt_sample = tcp_compound_rtt_calc,
- .set_state = tcp_compound_state,
- .cwnd_event = tcp_compound_cwnd_event,
- .get_info = tcp_compound_get_info,
-
- .owner = THIS_MODULE,
- .name = "compound",
-};
-
-static int __init tcp_compound_register(void)
-{
- BUG_ON(sizeof(struct compound) > ICSK_CA_PRIV_SIZE);
- tcp_register_congestion_control(&tcp_compound);
- return 0;
-}
-
-static void __exit tcp_compound_unregister(void)
-{
- tcp_unregister_congestion_control(&tcp_compound);
-}
-
-module_init(tcp_compound_register);
-module_exit(tcp_compound_unregister);
-
-MODULE_AUTHOR("Angelo P. Castellani, Stephen Hemminger");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("TCP Compound");
diff --git a/net/ipv4/tcp_highspeed.c b/net/ipv4/tcp_highspeed.c
index aaa1538c0692..fa3e1aad660c 100644
--- a/net/ipv4/tcp_highspeed.c
+++ b/net/ipv4/tcp_highspeed.c
@@ -139,14 +139,19 @@ static void hstcp_cong_avoid(struct sock *sk, u32 adk, u32 rtt,
tp->snd_cwnd++;
}
} else {
- /* Update AIMD parameters */
+ /* Update AIMD parameters.
+ *
+ * We want to guarantee that:
+ * hstcp_aimd_vals[ca->ai-1].cwnd <
+ * snd_cwnd <=
+ * hstcp_aimd_vals[ca->ai].cwnd
+ */
if (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd) {
while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd &&
ca->ai < HSTCP_AIMD_MAX - 1)
ca->ai++;
- } else if (tp->snd_cwnd < hstcp_aimd_vals[ca->ai].cwnd) {
- while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd &&
- ca->ai > 0)
+ } else if (ca->ai && tp->snd_cwnd <= hstcp_aimd_vals[ca->ai-1].cwnd) {
+ while (ca->ai && tp->snd_cwnd <= hstcp_aimd_vals[ca->ai-1].cwnd)
ca->ai--;
}
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 5a886e6efbbe..f6f39e814291 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -496,6 +496,24 @@ void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
}
}
+int tcp_v4_gso_send_check(struct sk_buff *skb)
+{
+ struct iphdr *iph;
+ struct tcphdr *th;
+
+ if (!pskb_may_pull(skb, sizeof(*th)))
+ return -EINVAL;
+
+ iph = skb->nh.iph;
+ th = skb->h.th;
+
+ th->check = 0;
+ th->check = ~tcp_v4_check(th, skb->len, iph->saddr, iph->daddr, 0);
+ skb->csum = offsetof(struct tcphdr, check);
+ skb->ip_summed = CHECKSUM_HW;
+ return 0;
+}
+
/*
* This routine will send an RST to the other tcp.
*
@@ -1622,10 +1640,9 @@ static int tcp_seq_open(struct inode *inode, struct file *file)
if (unlikely(afinfo == NULL))
return -EINVAL;
- s = kmalloc(sizeof(*s), GFP_KERNEL);
+ s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s)
return -ENOMEM;
- memset(s, 0, sizeof(*s));
s->family = afinfo->family;
s->seq_ops.start = tcp_seq_start;
s->seq_ops.next = tcp_seq_next;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 9bfcddad695b..f136cec96d95 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1468,11 +1468,10 @@ static int udp_seq_open(struct inode *inode, struct file *file)
struct udp_seq_afinfo *afinfo = PDE(inode)->data;
struct seq_file *seq;
int rc = -ENOMEM;
- struct udp_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
+ struct udp_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s)
goto out;
- memset(s, 0, sizeof(*s));
s->family = afinfo->family;
s->seq_ops.start = udp_seq_start;
s->seq_ops.next = udp_seq_next;
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
index f8d880beb12f..13cafbe56ce3 100644
--- a/net/ipv4/xfrm4_mode_tunnel.c
+++ b/net/ipv4/xfrm4_mode_tunnel.c
@@ -92,7 +92,6 @@ static int xfrm4_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
skb->mac.raw = memmove(skb->data - skb->mac_len,
skb->mac.raw, skb->mac_len);
skb->nh.raw = skb->data;
- memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
err = 0;
out:
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index 193363e22932..d16f863cf687 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -134,7 +134,7 @@ static int xfrm4_output_finish(struct sk_buff *skb)
}
#endif
- if (!skb_shinfo(skb)->gso_size)
+ if (!skb_is_gso(skb))
return xfrm4_output_finish2(skb);
skb->protocol = htons(ETH_P_IP);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index c250d0af10d7..2316a4315a18 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -508,6 +508,26 @@ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
kfree(ifp);
}
+static void
+ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp)
+{
+ struct inet6_ifaddr *ifa, **ifap;
+ int ifp_scope = ipv6_addr_src_scope(&ifp->addr);
+
+ /*
+ * Each device address list is sorted in order of scope -
+ * global before linklocal.
+ */
+ for (ifap = &idev->addr_list; (ifa = *ifap) != NULL;
+ ifap = &ifa->if_next) {
+ if (ifp_scope >= ipv6_addr_src_scope(&ifa->addr))
+ break;
+ }
+
+ ifp->if_next = *ifap;
+ *ifap = ifp;
+}
+
/* On success it returns ifp with increased reference count */
static struct inet6_ifaddr *
@@ -573,8 +593,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
write_lock(&idev->lock);
/* Add to inet6_dev unicast addr list. */
- ifa->if_next = idev->addr_list;
- idev->addr_list = ifa;
+ ipv6_link_dev_addr(idev, ifa);
#ifdef CONFIG_IPV6_PRIVACY
if (ifa->flags&IFA_F_TEMPORARY) {
@@ -987,7 +1006,7 @@ int ipv6_dev_get_saddr(struct net_device *daddr_dev,
continue;
} else if (score.scope < hiscore.scope) {
if (score.scope < daddr_scope)
- continue;
+ break; /* addresses sorted by scope */
else {
score.rule = 2;
goto record_it;
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index df8f051c0fce..25c2a9e03895 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -71,6 +71,8 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
goto out;
}
+ memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
+
/*
* Store incoming device index. When the packet will
* be queued, we cannot refer to skb->dev anymore.
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 2c5b44575af0..3bc74ce78800 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -147,7 +147,7 @@ static int ip6_output2(struct sk_buff *skb)
int ip6_output(struct sk_buff *skb)
{
- if ((skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size) ||
+ if ((skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb)) ||
dst_allfrag(skb->dst))
return ip6_fragment(skb, ip6_output2);
else
@@ -229,7 +229,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
skb->priority = sk->sk_priority;
mtu = dst_mtu(dst);
- if ((skb->len <= mtu) || ipfragok || skb_shinfo(skb)->gso_size) {
+ if ((skb->len <= mtu) || ipfragok || skb_is_gso(skb)) {
IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev,
dst_output);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index bc77c0e1a943..84d7ebdb9d21 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -567,10 +567,9 @@ static inline struct ipv6_txoptions *create_tel(__u8 encap_limit)
int opt_len = sizeof(*opt) + 8;
- if (!(opt = kmalloc(opt_len, GFP_ATOMIC))) {
+ if (!(opt = kzalloc(opt_len, GFP_ATOMIC))) {
return NULL;
}
- memset(opt, 0, opt_len);
opt->tot_len = opt_len;
opt->dst0opt = (struct ipv6_opt_hdr *) (opt + 1);
opt->opt_nflen = 8;
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index b285b0357084..7e4d1c17bfbc 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -109,7 +109,8 @@ static int ipcomp6_input(struct xfrm_state *x, struct sk_buff *skb)
goto out_put_cpu;
}
- skb_put(skb, dlen - plen);
+ skb->truesize += dlen - plen;
+ __skb_put(skb, dlen - plen);
memcpy(skb->data, scratch, dlen);
err = ipch->nexthdr;
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 0c17dec11c8d..43327264e69c 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -57,29 +57,11 @@
DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics) __read_mostly;
-static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features)
+static struct inet6_protocol *ipv6_gso_pull_exthdrs(struct sk_buff *skb,
+ int proto)
{
- struct sk_buff *segs = ERR_PTR(-EINVAL);
- struct ipv6hdr *ipv6h;
- struct inet6_protocol *ops;
- int proto;
+ struct inet6_protocol *ops = NULL;
- if (unlikely(skb_shinfo(skb)->gso_type &
- ~(SKB_GSO_UDP |
- SKB_GSO_DODGY |
- SKB_GSO_TCP_ECN |
- SKB_GSO_TCPV6 |
- 0)))
- goto out;
-
- if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
- goto out;
-
- ipv6h = skb->nh.ipv6h;
- proto = ipv6h->nexthdr;
- __skb_pull(skb, sizeof(*ipv6h));
-
- rcu_read_lock();
for (;;) {
struct ipv6_opt_hdr *opth;
int len;
@@ -88,30 +70,80 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features)
ops = rcu_dereference(inet6_protos[proto]);
if (unlikely(!ops))
- goto unlock;
+ break;
if (!(ops->flags & INET6_PROTO_GSO_EXTHDR))
break;
}
if (unlikely(!pskb_may_pull(skb, 8)))
- goto unlock;
+ break;
opth = (void *)skb->data;
len = opth->hdrlen * 8 + 8;
if (unlikely(!pskb_may_pull(skb, len)))
- goto unlock;
+ break;
proto = opth->nexthdr;
__skb_pull(skb, len);
}
- skb->h.raw = skb->data;
- if (likely(ops->gso_segment))
- segs = ops->gso_segment(skb, features);
+ return ops;
+}
+
+static int ipv6_gso_send_check(struct sk_buff *skb)
+{
+ struct ipv6hdr *ipv6h;
+ struct inet6_protocol *ops;
+ int err = -EINVAL;
+
+ if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
+ goto out;
-unlock:
+ ipv6h = skb->nh.ipv6h;
+ __skb_pull(skb, sizeof(*ipv6h));
+ err = -EPROTONOSUPPORT;
+
+ rcu_read_lock();
+ ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
+ if (likely(ops && ops->gso_send_check)) {
+ skb->h.raw = skb->data;
+ err = ops->gso_send_check(skb);
+ }
+ rcu_read_unlock();
+
+out:
+ return err;
+}
+
+static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features)
+{
+ struct sk_buff *segs = ERR_PTR(-EINVAL);
+ struct ipv6hdr *ipv6h;
+ struct inet6_protocol *ops;
+
+ if (unlikely(skb_shinfo(skb)->gso_type &
+ ~(SKB_GSO_UDP |
+ SKB_GSO_DODGY |
+ SKB_GSO_TCP_ECN |
+ SKB_GSO_TCPV6 |
+ 0)))
+ goto out;
+
+ if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
+ goto out;
+
+ ipv6h = skb->nh.ipv6h;
+ __skb_pull(skb, sizeof(*ipv6h));
+ segs = ERR_PTR(-EPROTONOSUPPORT);
+
+ rcu_read_lock();
+ ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
+ if (likely(ops && ops->gso_segment)) {
+ skb->h.raw = skb->data;
+ segs = ops->gso_segment(skb, features);
+ }
rcu_read_unlock();
if (unlikely(IS_ERR(segs)))
@@ -130,6 +162,7 @@ out:
static struct packet_type ipv6_packet_type = {
.type = __constant_htons(ETH_P_IPV6),
.func = ipv6_rcv,
+ .gso_send_check = ipv6_gso_send_check,
.gso_segment = ipv6_gso_segment,
};
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index fa1ce0ae123e..d57e61ce4a7d 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -411,6 +411,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
/* Copy the address. */
if (sin6) {
sin6->sin6_family = AF_INET6;
+ sin6->sin6_port = 0;
ipv6_addr_copy(&sin6->sin6_addr, &skb->nh.ipv6h->saddr);
sin6->sin6_flowinfo = 0;
sin6->sin6_scope_id = 0;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index c56aeece2bf5..836eecd7e62b 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -380,7 +380,6 @@ static int ipip6_rcv(struct sk_buff *skb)
secpath_reset(skb);
skb->mac.raw = skb->nh.raw;
skb->nh.raw = skb->data;
- memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
IPCB(skb)->flags = 0;
skb->protocol = htons(ETH_P_IPV6);
skb->pkt_type = PACKET_HOST;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 5bdcb9002cf7..923989d0520d 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -552,6 +552,24 @@ static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
}
}
+static int tcp_v6_gso_send_check(struct sk_buff *skb)
+{
+ struct ipv6hdr *ipv6h;
+ struct tcphdr *th;
+
+ if (!pskb_may_pull(skb, sizeof(*th)))
+ return -EINVAL;
+
+ ipv6h = skb->nh.ipv6h;
+ th = skb->h.th;
+
+ th->check = 0;
+ th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
+ IPPROTO_TCP, 0);
+ skb->csum = offsetof(struct tcphdr, check);
+ skb->ip_summed = CHECKSUM_HW;
+ return 0;
+}
static void tcp_v6_send_reset(struct sk_buff *skb)
{
@@ -1603,6 +1621,7 @@ struct proto tcpv6_prot = {
static struct inet6_protocol tcpv6_protocol = {
.handler = tcp_v6_rcv,
.err_handler = tcp_v6_err,
+ .gso_send_check = tcp_v6_gso_send_check,
.gso_segment = tcp_tso_segment,
.flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
};
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 48fccb1eca08..0eea60ea9ebc 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -122,7 +122,7 @@ static int xfrm6_output_finish(struct sk_buff *skb)
{
struct sk_buff *segs;
- if (!skb_shinfo(skb)->gso_size)
+ if (!skb_is_gso(skb))
return xfrm6_output_finish2(skb);
skb->protocol = htons(ETH_P_IP);
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 6b44fe8516c3..c8f9369c2a87 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -31,27 +31,6 @@
#include <linux/icmpv6.h>
#include <linux/mutex.h>
-#ifdef CONFIG_IPV6_XFRM6_TUNNEL_DEBUG
-# define X6TDEBUG 3
-#else
-# define X6TDEBUG 1
-#endif
-
-#define X6TPRINTK(fmt, args...) printk(fmt, ## args)
-#define X6TNOPRINTK(fmt, args...) do { ; } while(0)
-
-#if X6TDEBUG >= 1
-# define X6TPRINTK1 X6TPRINTK
-#else
-# define X6TPRINTK1 X6TNOPRINTK
-#endif
-
-#if X6TDEBUG >= 3
-# define X6TPRINTK3 X6TPRINTK
-#else
-# define X6TPRINTK3 X6TNOPRINTK
-#endif
-
/*
* xfrm_tunnel_spi things are for allocating unique id ("spi")
* per xfrm_address_t.
@@ -62,15 +41,8 @@ struct xfrm6_tunnel_spi {
xfrm_address_t addr;
u32 spi;
atomic_t refcnt;
-#ifdef XFRM6_TUNNEL_SPI_MAGIC
- u32 magic;
-#endif
};
-#ifdef CONFIG_IPV6_XFRM6_TUNNEL_DEBUG
-# define XFRM6_TUNNEL_SPI_MAGIC 0xdeadbeef
-#endif
-
static DEFINE_RWLOCK(xfrm6_tunnel_spi_lock);
static u32 xfrm6_tunnel_spi;
@@ -86,43 +58,15 @@ static kmem_cache_t *xfrm6_tunnel_spi_kmem __read_mostly;
static struct hlist_head xfrm6_tunnel_spi_byaddr[XFRM6_TUNNEL_SPI_BYADDR_HSIZE];
static struct hlist_head xfrm6_tunnel_spi_byspi[XFRM6_TUNNEL_SPI_BYSPI_HSIZE];
-#ifdef XFRM6_TUNNEL_SPI_MAGIC
-static int x6spi_check_magic(const struct xfrm6_tunnel_spi *x6spi,
- const char *name)
-{
- if (unlikely(x6spi->magic != XFRM6_TUNNEL_SPI_MAGIC)) {
- X6TPRINTK3(KERN_DEBUG "%s(): x6spi object "
- "at %p has corrupted magic %08x "
- "(should be %08x)\n",
- name, x6spi, x6spi->magic, XFRM6_TUNNEL_SPI_MAGIC);
- return -1;
- }
- return 0;
-}
-#else
-static int inline x6spi_check_magic(const struct xfrm6_tunnel_spi *x6spi,
- const char *name)
-{
- return 0;
-}
-#endif
-
-#define X6SPI_CHECK_MAGIC(x6spi) x6spi_check_magic((x6spi), __FUNCTION__)
-
-
static unsigned inline xfrm6_tunnel_spi_hash_byaddr(xfrm_address_t *addr)
{
unsigned h;
- X6TPRINTK3(KERN_DEBUG "%s(addr=%p)\n", __FUNCTION__, addr);
-
h = addr->a6[0] ^ addr->a6[1] ^ addr->a6[2] ^ addr->a6[3];
h ^= h >> 16;
h ^= h >> 8;
h &= XFRM6_TUNNEL_SPI_BYADDR_HSIZE - 1;
- X6TPRINTK3(KERN_DEBUG "%s() = %u\n", __FUNCTION__, h);
-
return h;
}
@@ -136,19 +80,13 @@ static int xfrm6_tunnel_spi_init(void)
{
int i;
- X6TPRINTK3(KERN_DEBUG "%s()\n", __FUNCTION__);
-
xfrm6_tunnel_spi = 0;
xfrm6_tunnel_spi_kmem = kmem_cache_create("xfrm6_tunnel_spi",
sizeof(struct xfrm6_tunnel_spi),
0, SLAB_HWCACHE_ALIGN,
NULL, NULL);
- if (!xfrm6_tunnel_spi_kmem) {
- X6TPRINTK1(KERN_ERR
- "%s(): failed to allocate xfrm6_tunnel_spi_kmem\n",
- __FUNCTION__);
+ if (!xfrm6_tunnel_spi_kmem)
return -ENOMEM;
- }
for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
INIT_HLIST_HEAD(&xfrm6_tunnel_spi_byaddr[i]);
@@ -161,22 +99,16 @@ static void xfrm6_tunnel_spi_fini(void)
{
int i;
- X6TPRINTK3(KERN_DEBUG "%s()\n", __FUNCTION__);
-
for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) {
if (!hlist_empty(&xfrm6_tunnel_spi_byaddr[i]))
- goto err;
+ return;
}
for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++) {
if (!hlist_empty(&xfrm6_tunnel_spi_byspi[i]))
- goto err;
+ return;
}
kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
xfrm6_tunnel_spi_kmem = NULL;
- return;
-err:
- X6TPRINTK1(KERN_ERR "%s(): table is not empty\n", __FUNCTION__);
- return;
}
static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr)
@@ -184,19 +116,13 @@ static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr)
struct xfrm6_tunnel_spi *x6spi;
struct hlist_node *pos;
- X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr);
-
hlist_for_each_entry(x6spi, pos,
&xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
list_byaddr) {
- if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) {
- X6SPI_CHECK_MAGIC(x6spi);
- X6TPRINTK3(KERN_DEBUG "%s() = %p(%u)\n", __FUNCTION__, x6spi, x6spi->spi);
+ if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0)
return x6spi;
- }
}
- X6TPRINTK3(KERN_DEBUG "%s() = NULL(0)\n", __FUNCTION__);
return NULL;
}
@@ -205,8 +131,6 @@ u32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr)
struct xfrm6_tunnel_spi *x6spi;
u32 spi;
- X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr);
-
read_lock_bh(&xfrm6_tunnel_spi_lock);
x6spi = __xfrm6_tunnel_spi_lookup(saddr);
spi = x6spi ? x6spi->spi : 0;
@@ -223,8 +147,6 @@ static u32 __xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr)
struct hlist_node *pos;
unsigned index;
- X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr);
-
if (xfrm6_tunnel_spi < XFRM6_TUNNEL_SPI_MIN ||
xfrm6_tunnel_spi >= XFRM6_TUNNEL_SPI_MAX)
xfrm6_tunnel_spi = XFRM6_TUNNEL_SPI_MIN;
@@ -258,18 +180,10 @@ try_next_2:;
spi = 0;
goto out;
alloc_spi:
- X6TPRINTK3(KERN_DEBUG "%s(): allocate new spi for " NIP6_FMT "\n",
- __FUNCTION__,
- NIP6(*(struct in6_addr *)saddr));
x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, SLAB_ATOMIC);
- if (!x6spi) {
- X6TPRINTK1(KERN_ERR "%s(): kmem_cache_alloc() failed\n",
- __FUNCTION__);
+ if (!x6spi)
goto out;
- }
-#ifdef XFRM6_TUNNEL_SPI_MAGIC
- x6spi->magic = XFRM6_TUNNEL_SPI_MAGIC;
-#endif
+
memcpy(&x6spi->addr, saddr, sizeof(x6spi->addr));
x6spi->spi = spi;
atomic_set(&x6spi->refcnt, 1);
@@ -278,9 +192,7 @@ alloc_spi:
index = xfrm6_tunnel_spi_hash_byaddr(saddr);
hlist_add_head(&x6spi->list_byaddr, &xfrm6_tunnel_spi_byaddr[index]);
- X6SPI_CHECK_MAGIC(x6spi);
out:
- X6TPRINTK3(KERN_DEBUG "%s() = %u\n", __FUNCTION__, spi);
return spi;
}
@@ -289,8 +201,6 @@ u32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr)
struct xfrm6_tunnel_spi *x6spi;
u32 spi;
- X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr);
-
write_lock_bh(&xfrm6_tunnel_spi_lock);
x6spi = __xfrm6_tunnel_spi_lookup(saddr);
if (x6spi) {
@@ -300,8 +210,6 @@ u32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr)
spi = __xfrm6_tunnel_alloc_spi(saddr);
write_unlock_bh(&xfrm6_tunnel_spi_lock);
- X6TPRINTK3(KERN_DEBUG "%s() = %u\n", __FUNCTION__, spi);
-
return spi;
}
@@ -312,8 +220,6 @@ void xfrm6_tunnel_free_spi(xfrm_address_t *saddr)
struct xfrm6_tunnel_spi *x6spi;
struct hlist_node *pos, *n;
- X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr);
-
write_lock_bh(&xfrm6_tunnel_spi_lock);
hlist_for_each_entry_safe(x6spi, pos, n,
@@ -321,12 +227,6 @@ void xfrm6_tunnel_free_spi(xfrm_address_t *saddr)
list_byaddr)
{
if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) {
- X6TPRINTK3(KERN_DEBUG "%s(): x6spi object for " NIP6_FMT
- " found at %p\n",
- __FUNCTION__,
- NIP6(*(struct in6_addr *)saddr),
- x6spi);
- X6SPI_CHECK_MAGIC(x6spi);
if (atomic_dec_and_test(&x6spi->refcnt)) {
hlist_del(&x6spi->list_byaddr);
hlist_del(&x6spi->list_byspi);
@@ -377,20 +277,14 @@ static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
case ICMPV6_ADDR_UNREACH:
case ICMPV6_PORT_UNREACH:
default:
- X6TPRINTK3(KERN_DEBUG
- "xfrm6_tunnel: Destination Unreach.\n");
break;
}
break;
case ICMPV6_PKT_TOOBIG:
- X6TPRINTK3(KERN_DEBUG
- "xfrm6_tunnel: Packet Too Big.\n");
break;
case ICMPV6_TIME_EXCEED:
switch (code) {
case ICMPV6_EXC_HOPLIMIT:
- X6TPRINTK3(KERN_DEBUG
- "xfrm6_tunnel: Too small Hoplimit.\n");
break;
case ICMPV6_EXC_FRAGTIME:
default:
@@ -447,22 +341,14 @@ static struct xfrm6_tunnel xfrm6_tunnel_handler = {
static int __init xfrm6_tunnel_init(void)
{
- X6TPRINTK3(KERN_DEBUG "%s()\n", __FUNCTION__);
-
- if (xfrm_register_type(&xfrm6_tunnel_type, AF_INET6) < 0) {
- X6TPRINTK1(KERN_ERR
- "xfrm6_tunnel init: can't add xfrm type\n");
+ if (xfrm_register_type(&xfrm6_tunnel_type, AF_INET6) < 0)
return -EAGAIN;
- }
+
if (xfrm6_tunnel_register(&xfrm6_tunnel_handler)) {
- X6TPRINTK1(KERN_ERR
- "xfrm6_tunnel init(): can't add handler\n");
xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
return -EAGAIN;
}
if (xfrm6_tunnel_spi_init() < 0) {
- X6TPRINTK1(KERN_ERR
- "xfrm6_tunnel init: failed to initialize spi\n");
xfrm6_tunnel_deregister(&xfrm6_tunnel_handler);
xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
return -EAGAIN;
@@ -472,15 +358,9 @@ static int __init xfrm6_tunnel_init(void)
static void __exit xfrm6_tunnel_fini(void)
{
- X6TPRINTK3(KERN_DEBUG "%s()\n", __FUNCTION__);
-
xfrm6_tunnel_spi_fini();
- if (xfrm6_tunnel_deregister(&xfrm6_tunnel_handler))
- X6TPRINTK1(KERN_ERR
- "xfrm6_tunnel close: can't remove handler\n");
- if (xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6) < 0)
- X6TPRINTK1(KERN_ERR
- "xfrm6_tunnel close: can't remove xfrm type\n");
+ xfrm6_tunnel_deregister(&xfrm6_tunnel_handler);
+ xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
}
module_init(xfrm6_tunnel_init);
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 7fae48a53bff..17699eeb64d7 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -308,7 +308,7 @@ static void irda_connect_response(struct irda_sock *self)
IRDA_ASSERT(self != NULL, return;);
- skb = dev_alloc_skb(64);
+ skb = alloc_skb(64, GFP_ATOMIC);
if (skb == NULL) {
IRDA_DEBUG(0, "%s() Unable to allocate sk_buff!\n",
__FUNCTION__);
diff --git a/net/irda/ircomm/ircomm_core.c b/net/irda/ircomm/ircomm_core.c
index 9c4a902a9dba..ad6b6af3dd97 100644
--- a/net/irda/ircomm/ircomm_core.c
+++ b/net/irda/ircomm/ircomm_core.c
@@ -115,12 +115,10 @@ struct ircomm_cb *ircomm_open(notify_t *notify, __u8 service_type, int line)
IRDA_ASSERT(ircomm != NULL, return NULL;);
- self = kmalloc(sizeof(struct ircomm_cb), GFP_ATOMIC);
+ self = kzalloc(sizeof(struct ircomm_cb), GFP_ATOMIC);
if (self == NULL)
return NULL;
- memset(self, 0, sizeof(struct ircomm_cb));
-
self->notify = *notify;
self->magic = IRCOMM_MAGIC;
diff --git a/net/irda/ircomm/ircomm_lmp.c b/net/irda/ircomm/ircomm_lmp.c
index d9097207aed3..959874b6451f 100644
--- a/net/irda/ircomm/ircomm_lmp.c
+++ b/net/irda/ircomm/ircomm_lmp.c
@@ -81,7 +81,7 @@ static int ircomm_lmp_connect_response(struct ircomm_cb *self,
/* Any userdata supplied? */
if (userdata == NULL) {
- tx_skb = dev_alloc_skb(64);
+ tx_skb = alloc_skb(64, GFP_ATOMIC);
if (!tx_skb)
return -ENOMEM;
@@ -115,7 +115,7 @@ static int ircomm_lmp_disconnect_request(struct ircomm_cb *self,
IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );
if (!userdata) {
- tx_skb = dev_alloc_skb(64);
+ tx_skb = alloc_skb(64, GFP_ATOMIC);
if (!tx_skb)
return -ENOMEM;
diff --git a/net/irda/ircomm/ircomm_param.c b/net/irda/ircomm/ircomm_param.c
index 6009bab05091..a39f5735a90b 100644
--- a/net/irda/ircomm/ircomm_param.c
+++ b/net/irda/ircomm/ircomm_param.c
@@ -121,7 +121,7 @@ int ircomm_param_request(struct ircomm_tty_cb *self, __u8 pi, int flush)
skb = self->ctrl_skb;
if (!skb) {
- skb = dev_alloc_skb(256);
+ skb = alloc_skb(256, GFP_ATOMIC);
if (!skb) {
spin_unlock_irqrestore(&self->spinlock, flags);
return -ENOMEM;
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index b400f27851fc..3bcdb467efc5 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -379,12 +379,11 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
self = hashbin_lock_find(ircomm_tty, line, NULL);
if (!self) {
/* No, so make new instance */
- self = kmalloc(sizeof(struct ircomm_tty_cb), GFP_KERNEL);
+ self = kzalloc(sizeof(struct ircomm_tty_cb), GFP_KERNEL);
if (self == NULL) {
IRDA_ERROR("%s(), kmalloc failed!\n", __FUNCTION__);
return -ENOMEM;
}
- memset(self, 0, sizeof(struct ircomm_tty_cb));
self->magic = IRCOMM_TTY_MAGIC;
self->flow = FLOW_STOP;
@@ -759,8 +758,9 @@ static int ircomm_tty_write(struct tty_struct *tty,
}
} else {
/* Prepare a full sized frame */
- skb = dev_alloc_skb(self->max_data_size+
- self->max_header_size);
+ skb = alloc_skb(self->max_data_size+
+ self->max_header_size,
+ GFP_ATOMIC);
if (!skb) {
spin_unlock_irqrestore(&self->spinlock, flags);
return -ENOBUFS;
diff --git a/net/irda/irda_device.c b/net/irda/irda_device.c
index ba40e5495f58..7e7a31798d8d 100644
--- a/net/irda/irda_device.c
+++ b/net/irda/irda_device.c
@@ -401,12 +401,10 @@ dongle_t *irda_device_dongle_init(struct net_device *dev, int type)
}
/* Allocate dongle info for this instance */
- dongle = kmalloc(sizeof(dongle_t), GFP_KERNEL);
+ dongle = kzalloc(sizeof(dongle_t), GFP_KERNEL);
if (!dongle)
goto out;
- memset(dongle, 0, sizeof(dongle_t));
-
/* Bind the registration info to this particular instance */
dongle->issue = reg;
dongle->dev = dev;
diff --git a/net/irda/iriap.c b/net/irda/iriap.c
index a0472652a44e..61128aa05b40 100644
--- a/net/irda/iriap.c
+++ b/net/irda/iriap.c
@@ -345,7 +345,7 @@ static void iriap_disconnect_request(struct iriap_cb *self)
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
- tx_skb = dev_alloc_skb(64);
+ tx_skb = alloc_skb(64, GFP_ATOMIC);
if (tx_skb == NULL) {
IRDA_DEBUG(0, "%s(), Could not allocate an sk_buff of length %d\n",
__FUNCTION__, 64);
@@ -396,7 +396,7 @@ int iriap_getvaluebyclass_request(struct iriap_cb *self,
attr_len = strlen(attr); /* Up to IAS_MAX_ATTRIBNAME = 60 */
skb_len = self->max_header_size+2+name_len+1+attr_len+4;
- tx_skb = dev_alloc_skb(skb_len);
+ tx_skb = alloc_skb(skb_len, GFP_ATOMIC);
if (!tx_skb)
return -ENOMEM;
@@ -562,7 +562,8 @@ static void iriap_getvaluebyclass_response(struct iriap_cb *self,
* value. We add 32 bytes because of the 6 bytes for the frame and
* max 5 bytes for the value coding.
*/
- tx_skb = dev_alloc_skb(value->len + self->max_header_size + 32);
+ tx_skb = alloc_skb(value->len + self->max_header_size + 32,
+ GFP_ATOMIC);
if (!tx_skb)
return;
@@ -700,7 +701,7 @@ void iriap_send_ack(struct iriap_cb *self)
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
- tx_skb = dev_alloc_skb(64);
+ tx_skb = alloc_skb(64, GFP_ATOMIC);
if (!tx_skb)
return;
diff --git a/net/irda/iriap_event.c b/net/irda/iriap_event.c
index a73607450de1..da17395df05a 100644
--- a/net/irda/iriap_event.c
+++ b/net/irda/iriap_event.c
@@ -365,7 +365,7 @@ static void state_r_disconnect(struct iriap_cb *self, IRIAP_EVENT event,
switch (event) {
case IAP_LM_CONNECT_INDICATION:
- tx_skb = dev_alloc_skb(64);
+ tx_skb = alloc_skb(64, GFP_ATOMIC);
if (tx_skb == NULL) {
IRDA_WARNING("%s: unable to malloc!\n", __FUNCTION__);
return;
diff --git a/net/irda/irias_object.c b/net/irda/irias_object.c
index 82e665c79991..a154b1d71c0f 100644
--- a/net/irda/irias_object.c
+++ b/net/irda/irias_object.c
@@ -82,13 +82,12 @@ struct ias_object *irias_new_object( char *name, int id)
IRDA_DEBUG( 4, "%s()\n", __FUNCTION__);
- obj = kmalloc(sizeof(struct ias_object), GFP_ATOMIC);
+ obj = kzalloc(sizeof(struct ias_object), GFP_ATOMIC);
if (obj == NULL) {
IRDA_WARNING("%s(), Unable to allocate object!\n",
__FUNCTION__);
return NULL;
}
- memset(obj, 0, sizeof( struct ias_object));
obj->magic = IAS_OBJECT_MAGIC;
obj->name = strndup(name, IAS_MAX_CLASSNAME);
@@ -346,13 +345,12 @@ void irias_add_integer_attrib(struct ias_object *obj, char *name, int value,
IRDA_ASSERT(obj->magic == IAS_OBJECT_MAGIC, return;);
IRDA_ASSERT(name != NULL, return;);
- attrib = kmalloc(sizeof(struct ias_attrib), GFP_ATOMIC);
+ attrib = kzalloc(sizeof(struct ias_attrib), GFP_ATOMIC);
if (attrib == NULL) {
IRDA_WARNING("%s: Unable to allocate attribute!\n",
__FUNCTION__);
return;
}
- memset(attrib, 0, sizeof( struct ias_attrib));
attrib->magic = IAS_ATTRIB_MAGIC;
attrib->name = strndup(name, IAS_MAX_ATTRIBNAME);
@@ -382,13 +380,12 @@ void irias_add_octseq_attrib(struct ias_object *obj, char *name, __u8 *octets,
IRDA_ASSERT(name != NULL, return;);
IRDA_ASSERT(octets != NULL, return;);
- attrib = kmalloc(sizeof(struct ias_attrib), GFP_ATOMIC);
+ attrib = kzalloc(sizeof(struct ias_attrib), GFP_ATOMIC);
if (attrib == NULL) {
IRDA_WARNING("%s: Unable to allocate attribute!\n",
__FUNCTION__);
return;
}
- memset(attrib, 0, sizeof( struct ias_attrib));
attrib->magic = IAS_ATTRIB_MAGIC;
attrib->name = strndup(name, IAS_MAX_ATTRIBNAME);
@@ -416,13 +413,12 @@ void irias_add_string_attrib(struct ias_object *obj, char *name, char *value,
IRDA_ASSERT(name != NULL, return;);
IRDA_ASSERT(value != NULL, return;);
- attrib = kmalloc(sizeof( struct ias_attrib), GFP_ATOMIC);
+ attrib = kzalloc(sizeof( struct ias_attrib), GFP_ATOMIC);
if (attrib == NULL) {
IRDA_WARNING("%s: Unable to allocate attribute!\n",
__FUNCTION__);
return;
}
- memset(attrib, 0, sizeof( struct ias_attrib));
attrib->magic = IAS_ATTRIB_MAGIC;
attrib->name = strndup(name, IAS_MAX_ATTRIBNAME);
@@ -443,12 +439,11 @@ struct ias_value *irias_new_integer_value(int integer)
{
struct ias_value *value;
- value = kmalloc(sizeof(struct ias_value), GFP_ATOMIC);
+ value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC);
if (value == NULL) {
IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__);
return NULL;
}
- memset(value, 0, sizeof(struct ias_value));
value->type = IAS_INTEGER;
value->len = 4;
@@ -469,12 +464,11 @@ struct ias_value *irias_new_string_value(char *string)
{
struct ias_value *value;
- value = kmalloc(sizeof(struct ias_value), GFP_ATOMIC);
+ value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC);
if (value == NULL) {
IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__);
return NULL;
}
- memset( value, 0, sizeof( struct ias_value));
value->type = IAS_STRING;
value->charset = CS_ASCII;
@@ -495,12 +489,11 @@ struct ias_value *irias_new_octseq_value(__u8 *octseq , int len)
{
struct ias_value *value;
- value = kmalloc(sizeof(struct ias_value), GFP_ATOMIC);
+ value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC);
if (value == NULL) {
IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__);
return NULL;
}
- memset(value, 0, sizeof(struct ias_value));
value->type = IAS_OCT_SEQ;
/* Check length */
@@ -522,12 +515,11 @@ struct ias_value *irias_new_missing_value(void)
{
struct ias_value *value;
- value = kmalloc(sizeof(struct ias_value), GFP_ATOMIC);
+ value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC);
if (value == NULL) {
IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__);
return NULL;
}
- memset(value, 0, sizeof(struct ias_value));
value->type = IAS_MISSING;
value->len = 0;
diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c
index bd659dd545ac..7dd0a2fe1d20 100644
--- a/net/irda/irlan/irlan_common.c
+++ b/net/irda/irlan/irlan_common.c
@@ -636,7 +636,7 @@ void irlan_get_provider_info(struct irlan_cb *self)
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
- skb = dev_alloc_skb(64);
+ skb = alloc_skb(64, GFP_ATOMIC);
if (!skb)
return;
@@ -668,7 +668,7 @@ void irlan_open_data_channel(struct irlan_cb *self)
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
- skb = dev_alloc_skb(64);
+ skb = alloc_skb(64, GFP_ATOMIC);
if (!skb)
return;
@@ -704,7 +704,7 @@ void irlan_close_data_channel(struct irlan_cb *self)
if (self->client.tsap_ctrl == NULL)
return;
- skb = dev_alloc_skb(64);
+ skb = alloc_skb(64, GFP_ATOMIC);
if (!skb)
return;
@@ -739,7 +739,7 @@ static void irlan_open_unicast_addr(struct irlan_cb *self)
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
- skb = dev_alloc_skb(128);
+ skb = alloc_skb(128, GFP_ATOMIC);
if (!skb)
return;
@@ -777,7 +777,7 @@ void irlan_set_broadcast_filter(struct irlan_cb *self, int status)
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
- skb = dev_alloc_skb(128);
+ skb = alloc_skb(128, GFP_ATOMIC);
if (!skb)
return;
@@ -816,7 +816,7 @@ void irlan_set_multicast_filter(struct irlan_cb *self, int status)
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
- skb = dev_alloc_skb(128);
+ skb = alloc_skb(128, GFP_ATOMIC);
if (!skb)
return;
@@ -856,7 +856,7 @@ static void irlan_get_unicast_addr(struct irlan_cb *self)
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
- skb = dev_alloc_skb(128);
+ skb = alloc_skb(128, GFP_ATOMIC);
if (!skb)
return;
@@ -891,7 +891,7 @@ void irlan_get_media_char(struct irlan_cb *self)
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
- skb = dev_alloc_skb(64);
+ skb = alloc_skb(64, GFP_ATOMIC);
if (!skb)
return;
diff --git a/net/irda/irlan/irlan_provider.c b/net/irda/irlan/irlan_provider.c
index 39c202d1c374..9c0df86044d7 100644
--- a/net/irda/irlan/irlan_provider.c
+++ b/net/irda/irlan/irlan_provider.c
@@ -296,7 +296,7 @@ void irlan_provider_send_reply(struct irlan_cb *self, int command,
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
- skb = dev_alloc_skb(128);
+ skb = alloc_skb(128, GFP_ATOMIC);
if (!skb)
return;
diff --git a/net/irda/irlap.c b/net/irda/irlap.c
index cade355ac8af..e7852a07495e 100644
--- a/net/irda/irlap.c
+++ b/net/irda/irlap.c
@@ -116,11 +116,10 @@ struct irlap_cb *irlap_open(struct net_device *dev, struct qos_info *qos,
IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
/* Initialize the irlap structure. */
- self = kmalloc(sizeof(struct irlap_cb), GFP_KERNEL);
+ self = kzalloc(sizeof(struct irlap_cb), GFP_KERNEL);
if (self == NULL)
return NULL;
- memset(self, 0, sizeof(struct irlap_cb));
self->magic = LAP_MAGIC;
/* Make a binding between the layers */
@@ -882,7 +881,7 @@ static void irlap_change_speed(struct irlap_cb *self, __u32 speed, int now)
/* Change speed now, or just piggyback speed on frames */
if (now) {
/* Send down empty frame to trigger speed change */
- skb = dev_alloc_skb(0);
+ skb = alloc_skb(0, GFP_ATOMIC);
if (skb)
irlap_queue_xmit(self, skb);
}
@@ -1222,7 +1221,7 @@ static int irlap_seq_open(struct inode *inode, struct file *file)
{
struct seq_file *seq;
int rc = -ENOMEM;
- struct irlap_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
+ struct irlap_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s)
goto out;
@@ -1238,7 +1237,6 @@ static int irlap_seq_open(struct inode *inode, struct file *file)
seq = file->private_data;
seq->private = s;
- memset(s, 0, sizeof(*s));
out:
return rc;
out_kfree:
diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c
index 3e9a06abbdd0..ccb983bf0f4a 100644
--- a/net/irda/irlap_frame.c
+++ b/net/irda/irlap_frame.c
@@ -117,7 +117,7 @@ void irlap_send_snrm_frame(struct irlap_cb *self, struct qos_info *qos)
IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
/* Allocate frame */
- tx_skb = dev_alloc_skb(64);
+ tx_skb = alloc_skb(64, GFP_ATOMIC);
if (!tx_skb)
return;
@@ -210,7 +210,7 @@ void irlap_send_ua_response_frame(struct irlap_cb *self, struct qos_info *qos)
IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
/* Allocate frame */
- tx_skb = dev_alloc_skb(64);
+ tx_skb = alloc_skb(64, GFP_ATOMIC);
if (!tx_skb)
return;
@@ -250,7 +250,7 @@ void irlap_send_dm_frame( struct irlap_cb *self)
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
- tx_skb = dev_alloc_skb(32);
+ tx_skb = alloc_skb(32, GFP_ATOMIC);
if (!tx_skb)
return;
@@ -282,7 +282,7 @@ void irlap_send_disc_frame(struct irlap_cb *self)
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
- tx_skb = dev_alloc_skb(16);
+ tx_skb = alloc_skb(16, GFP_ATOMIC);
if (!tx_skb)
return;
@@ -315,7 +315,7 @@ void irlap_send_discovery_xid_frame(struct irlap_cb *self, int S, __u8 s,
IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
IRDA_ASSERT(discovery != NULL, return;);
- tx_skb = dev_alloc_skb(64);
+ tx_skb = alloc_skb(64, GFP_ATOMIC);
if (!tx_skb)
return;
@@ -422,11 +422,10 @@ static void irlap_recv_discovery_xid_rsp(struct irlap_cb *self,
return;
}
- if ((discovery = kmalloc(sizeof(discovery_t), GFP_ATOMIC)) == NULL) {
+ if ((discovery = kzalloc(sizeof(discovery_t), GFP_ATOMIC)) == NULL) {
IRDA_WARNING("%s: kmalloc failed!\n", __FUNCTION__);
return;
}
- memset(discovery, 0, sizeof(discovery_t));
discovery->data.daddr = info->daddr;
discovery->data.saddr = self->saddr;
@@ -576,7 +575,7 @@ void irlap_send_rr_frame(struct irlap_cb *self, int command)
struct sk_buff *tx_skb;
__u8 *frame;
- tx_skb = dev_alloc_skb(16);
+ tx_skb = alloc_skb(16, GFP_ATOMIC);
if (!tx_skb)
return;
@@ -601,7 +600,7 @@ void irlap_send_rd_frame(struct irlap_cb *self)
struct sk_buff *tx_skb;
__u8 *frame;
- tx_skb = dev_alloc_skb(16);
+ tx_skb = alloc_skb(16, GFP_ATOMIC);
if (!tx_skb)
return;
@@ -1215,7 +1214,7 @@ void irlap_send_test_frame(struct irlap_cb *self, __u8 caddr, __u32 daddr,
struct test_frame *frame;
__u8 *info;
- tx_skb = dev_alloc_skb(cmd->len+sizeof(struct test_frame));
+ tx_skb = alloc_skb(cmd->len+sizeof(struct test_frame), GFP_ATOMIC);
if (!tx_skb)
return;
diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c
index 129ad64c15bb..c440913dee14 100644
--- a/net/irda/irlmp.c
+++ b/net/irda/irlmp.c
@@ -78,10 +78,9 @@ int __init irlmp_init(void)
{
IRDA_DEBUG(1, "%s()\n", __FUNCTION__);
/* Initialize the irlmp structure. */
- irlmp = kmalloc( sizeof(struct irlmp_cb), GFP_KERNEL);
+ irlmp = kzalloc( sizeof(struct irlmp_cb), GFP_KERNEL);
if (irlmp == NULL)
return -ENOMEM;
- memset(irlmp, 0, sizeof(struct irlmp_cb));
irlmp->magic = LMP_MAGIC;
@@ -160,12 +159,11 @@ struct lsap_cb *irlmp_open_lsap(__u8 slsap_sel, notify_t *notify, __u8 pid)
return NULL;
/* Allocate new instance of a LSAP connection */
- self = kmalloc(sizeof(struct lsap_cb), GFP_ATOMIC);
+ self = kzalloc(sizeof(struct lsap_cb), GFP_ATOMIC);
if (self == NULL) {
IRDA_ERROR("%s: can't allocate memory\n", __FUNCTION__);
return NULL;
}
- memset(self, 0, sizeof(struct lsap_cb));
self->magic = LMP_LSAP_MAGIC;
self->slsap_sel = slsap_sel;
@@ -288,12 +286,11 @@ void irlmp_register_link(struct irlap_cb *irlap, __u32 saddr, notify_t *notify)
/*
* Allocate new instance of a LSAP connection
*/
- lap = kmalloc(sizeof(struct lap_cb), GFP_KERNEL);
+ lap = kzalloc(sizeof(struct lap_cb), GFP_KERNEL);
if (lap == NULL) {
IRDA_ERROR("%s: unable to kmalloc\n", __FUNCTION__);
return;
}
- memset(lap, 0, sizeof(struct lap_cb));
lap->irlap = irlap;
lap->magic = LMP_LAP_MAGIC;
@@ -395,7 +392,7 @@ int irlmp_connect_request(struct lsap_cb *self, __u8 dlsap_sel,
/* Any userdata? */
if (tx_skb == NULL) {
- tx_skb = dev_alloc_skb(64);
+ tx_skb = alloc_skb(64, GFP_ATOMIC);
if (!tx_skb)
return -ENOMEM;
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c
index e53bf9e0053e..a1e502ff9070 100644
--- a/net/irda/irnet/irnet_ppp.c
+++ b/net/irda/irnet/irnet_ppp.c
@@ -476,11 +476,10 @@ dev_irnet_open(struct inode * inode,
#endif /* SECURE_DEVIRNET */
/* Allocate a private structure for this IrNET instance */
- ap = kmalloc(sizeof(*ap), GFP_KERNEL);
+ ap = kzalloc(sizeof(*ap), GFP_KERNEL);
DABORT(ap == NULL, -ENOMEM, FS_ERROR, "Can't allocate struct irnet...\n");
/* initialize the irnet structure */
- memset(ap, 0, sizeof(*ap));
ap->file = file;
/* PPP channel setup */
diff --git a/net/irda/irttp.c b/net/irda/irttp.c
index 49c51c5f1a86..42acf1cde737 100644
--- a/net/irda/irttp.c
+++ b/net/irda/irttp.c
@@ -85,10 +85,9 @@ static pi_param_info_t param_info = { pi_major_call_table, 1, 0x0f, 4 };
*/
int __init irttp_init(void)
{
- irttp = kmalloc(sizeof(struct irttp_cb), GFP_KERNEL);
+ irttp = kzalloc(sizeof(struct irttp_cb), GFP_KERNEL);
if (irttp == NULL)
return -ENOMEM;
- memset(irttp, 0, sizeof(struct irttp_cb));
irttp->magic = TTP_MAGIC;
@@ -306,7 +305,8 @@ static inline void irttp_fragment_skb(struct tsap_cb *self,
IRDA_DEBUG(2, "%s(), fragmenting ...\n", __FUNCTION__);
/* Make new segment */
- frag = dev_alloc_skb(self->max_seg_size+self->max_header_size);
+ frag = alloc_skb(self->max_seg_size+self->max_header_size,
+ GFP_ATOMIC);
if (!frag)
return;
@@ -389,12 +389,11 @@ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify)
return NULL;
}
- self = kmalloc(sizeof(struct tsap_cb), GFP_ATOMIC);
+ self = kzalloc(sizeof(struct tsap_cb), GFP_ATOMIC);
if (self == NULL) {
IRDA_DEBUG(0, "%s(), unable to kmalloc!\n", __FUNCTION__);
return NULL;
}
- memset(self, 0, sizeof(struct tsap_cb));
spin_lock_init(&self->lock);
/* Initialise todo timer */
@@ -805,7 +804,7 @@ static inline void irttp_give_credit(struct tsap_cb *self)
self->send_credit, self->avail_credit, self->remote_credit);
/* Give credit to peer */
- tx_skb = dev_alloc_skb(64);
+ tx_skb = alloc_skb(64, GFP_ATOMIC);
if (!tx_skb)
return;
@@ -1094,7 +1093,7 @@ int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel,
/* Any userdata supplied? */
if (userdata == NULL) {
- tx_skb = dev_alloc_skb(64);
+ tx_skb = alloc_skb(64, GFP_ATOMIC);
if (!tx_skb)
return -ENOMEM;
@@ -1342,7 +1341,7 @@ int irttp_connect_response(struct tsap_cb *self, __u32 max_sdu_size,
/* Any userdata supplied? */
if (userdata == NULL) {
- tx_skb = dev_alloc_skb(64);
+ tx_skb = alloc_skb(64, GFP_ATOMIC);
if (!tx_skb)
return -ENOMEM;
@@ -1541,7 +1540,7 @@ int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata,
if (!userdata) {
struct sk_buff *tx_skb;
- tx_skb = dev_alloc_skb(64);
+ tx_skb = alloc_skb(64, GFP_ATOMIC);
if (!tx_skb)
return -ENOMEM;
@@ -1876,7 +1875,7 @@ static int irttp_seq_open(struct inode *inode, struct file *file)
int rc = -ENOMEM;
struct irttp_iter_state *s;
- s = kmalloc(sizeof(*s), GFP_KERNEL);
+ s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s)
goto out;
@@ -1886,7 +1885,6 @@ static int irttp_seq_open(struct inode *inode, struct file *file)
seq = file->private_data;
seq->private = s;
- memset(s, 0, sizeof(*s));
out:
return rc;
out_kfree:
diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
index aea6616cea3d..d504eed416f6 100644
--- a/net/lapb/lapb_iface.c
+++ b/net/lapb/lapb_iface.c
@@ -115,14 +115,12 @@ static struct lapb_cb *lapb_devtostruct(struct net_device *dev)
*/
static struct lapb_cb *lapb_create_cb(void)
{
- struct lapb_cb *lapb = kmalloc(sizeof(*lapb), GFP_ATOMIC);
+ struct lapb_cb *lapb = kzalloc(sizeof(*lapb), GFP_ATOMIC);
if (!lapb)
goto out;
- memset(lapb, 0x00, sizeof(*lapb));
-
skb_queue_head_init(&lapb->write_queue);
skb_queue_head_init(&lapb->ack_queue);
diff --git a/net/llc/llc_core.c b/net/llc/llc_core.c
index bd242a49514a..d12413cff5bd 100644
--- a/net/llc/llc_core.c
+++ b/net/llc/llc_core.c
@@ -33,10 +33,9 @@ unsigned char llc_station_mac_sa[ETH_ALEN];
*/
static struct llc_sap *llc_sap_alloc(void)
{
- struct llc_sap *sap = kmalloc(sizeof(*sap), GFP_ATOMIC);
+ struct llc_sap *sap = kzalloc(sizeof(*sap), GFP_ATOMIC);
if (sap) {
- memset(sap, 0, sizeof(*sap));
sap->state = LLC_SAP_STATE_ACTIVE;
memcpy(sap->laddr.mac, llc_station_mac_sa, ETH_ALEN);
rwlock_init(&sap->sk_list.lock);
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 42a178aa30f9..a9894ddfd72a 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -386,8 +386,8 @@ config NETFILTER_XT_MATCH_REALM
<file:Documentation/modules.txt>. If unsure, say `N'.
config NETFILTER_XT_MATCH_SCTP
- tristate '"sctp" protocol match support'
- depends on NETFILTER_XTABLES
+ tristate '"sctp" protocol match support (EXPERIMENTAL)'
+ depends on NETFILTER_XTABLES && EXPERIMENTAL
help
With this option enabled, you will be able to use the
`sctp' match in order to match on SCTP source/destination ports
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 5fcab2ef231f..4ef836699962 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -428,6 +428,8 @@ static struct file_operations ct_cpu_seq_fops = {
/* Sysctl support */
+int nf_conntrack_checksum = 1;
+
#ifdef CONFIG_SYSCTL
/* From nf_conntrack_core.c */
@@ -459,8 +461,6 @@ extern unsigned int nf_ct_generic_timeout;
static int log_invalid_proto_min = 0;
static int log_invalid_proto_max = 255;
-int nf_conntrack_checksum = 1;
-
static struct ctl_table_header *nf_ct_sysctl_header;
static ctl_table nf_ct_sysctl_table[] = {
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index bb6fcee452ca..662a869593bf 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -219,21 +219,20 @@ void nf_reinject(struct sk_buff *skb, struct nf_info *info,
switch (verdict & NF_VERDICT_MASK) {
case NF_ACCEPT:
+ case NF_STOP:
info->okfn(skb);
+ case NF_STOLEN:
break;
-
case NF_QUEUE:
if (!nf_queue(&skb, elem, info->pf, info->hook,
info->indev, info->outdev, info->okfn,
verdict >> NF_VERDICT_BITS))
goto next_hook;
break;
+ default:
+ kfree_skb(skb);
}
rcu_read_unlock();
-
- if (verdict == NF_DROP)
- kfree_skb(skb);
-
kfree(info);
return;
}
diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c
index 5fe4c9df17f5..a9f4f6f3c628 100644
--- a/net/netfilter/xt_physdev.c
+++ b/net/netfilter/xt_physdev.c
@@ -113,6 +113,21 @@ checkentry(const char *tablename,
if (!(info->bitmask & XT_PHYSDEV_OP_MASK) ||
info->bitmask & ~XT_PHYSDEV_OP_MASK)
return 0;
+ if (brnf_deferred_hooks == 0 &&
+ info->bitmask & XT_PHYSDEV_OP_OUT &&
+ (!(info->bitmask & XT_PHYSDEV_OP_BRIDGED) ||
+ info->invert & XT_PHYSDEV_OP_BRIDGED) &&
+ hook_mask & ((1 << NF_IP_LOCAL_OUT) | (1 << NF_IP_FORWARD) |
+ (1 << NF_IP_POST_ROUTING))) {
+ printk(KERN_WARNING "physdev match: using --physdev-out in the "
+ "OUTPUT, FORWARD and POSTROUTING chains for non-bridged "
+ "traffic is deprecated and breaks other things, it will "
+ "be removed in January 2007. See Documentation/"
+ "feature-removal-schedule.txt for details. This doesn't "
+ "affect you in case you're using it for purely bridged "
+ "traffic.\n");
+ brnf_deferred_hooks = 1;
+ }
return 1;
}
diff --git a/net/netfilter/xt_pkttype.c b/net/netfilter/xt_pkttype.c
index 3ac703b5cb8f..d2f5320a80bf 100644
--- a/net/netfilter/xt_pkttype.c
+++ b/net/netfilter/xt_pkttype.c
@@ -9,6 +9,8 @@
#include <linux/skbuff.h>
#include <linux/if_ether.h>
#include <linux/if_packet.h>
+#include <linux/in.h>
+#include <linux/ip.h>
#include <linux/netfilter/xt_pkttype.h>
#include <linux/netfilter/x_tables.h>
@@ -28,9 +30,17 @@ static int match(const struct sk_buff *skb,
unsigned int protoff,
int *hotdrop)
{
+ u_int8_t type;
const struct xt_pkttype_info *info = matchinfo;
- return (skb->pkt_type == info->pkttype) ^ info->invert;
+ if (skb->pkt_type == PACKET_LOOPBACK)
+ type = (MULTICAST(skb->nh.iph->daddr)
+ ? PACKET_MULTICAST
+ : PACKET_BROADCAST);
+ else
+ type = skb->pkt_type;
+
+ return (type == info->pkttype) ^ info->invert;
}
static struct xt_match pkttype_match = {
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 55c0adc8f115..b85c1f9f1288 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -562,10 +562,9 @@ static int netlink_alloc_groups(struct sock *sk)
if (err)
return err;
- nlk->groups = kmalloc(NLGRPSZ(groups), GFP_KERNEL);
+ nlk->groups = kzalloc(NLGRPSZ(groups), GFP_KERNEL);
if (nlk->groups == NULL)
return -ENOMEM;
- memset(nlk->groups, 0, NLGRPSZ(groups));
nlk->ngroups = groups;
return 0;
}
@@ -1393,11 +1392,10 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
struct sock *sk;
struct netlink_sock *nlk;
- cb = kmalloc(sizeof(*cb), GFP_KERNEL);
+ cb = kzalloc(sizeof(*cb), GFP_KERNEL);
if (cb == NULL)
return -ENOBUFS;
- memset(cb, 0, sizeof(*cb));
cb->dump = dump;
cb->done = done;
cb->nlh = nlh;
@@ -1668,7 +1666,7 @@ static int netlink_seq_open(struct inode *inode, struct file *file)
struct nl_seq_iter *iter;
int err;
- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
if (!iter)
return -ENOMEM;
@@ -1678,7 +1676,6 @@ static int netlink_seq_open(struct inode *inode, struct file *file)
return err;
}
- memset(iter, 0, sizeof(*iter));
seq = file->private_data;
seq->private = iter;
return 0;
@@ -1747,15 +1744,13 @@ static int __init netlink_proto_init(void)
if (sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb))
netlink_skb_parms_too_large();
- nl_table = kmalloc(sizeof(*nl_table) * MAX_LINKS, GFP_KERNEL);
+ nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
if (!nl_table) {
enomem:
printk(KERN_CRIT "netlink_init: Cannot allocate nl_table\n");
return -ENOMEM;
}
- memset(nl_table, 0, sizeof(*nl_table) * MAX_LINKS);
-
if (num_physpages >= (128 * 1024))
max = num_physpages >> (21 - PAGE_SHIFT);
else
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 389a4119e1b4..1d50f801f181 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -66,6 +66,14 @@ static DEFINE_SPINLOCK(nr_list_lock);
static const struct proto_ops nr_proto_ops;
/*
+ * NETROM network devices are virtual network devices encapsulating NETROM
+ * frames into AX.25 which will be sent through an AX.25 device, so form a
+ * special "super class" of normal net devices; split their locks off into a
+ * separate class since they always nest.
+ */
+static struct lock_class_key nr_netdev_xmit_lock_key;
+
+/*
* Socket removal during an interrupt is now safe.
*/
static void nr_remove_socket(struct sock *sk)
@@ -986,18 +994,18 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
nr_make->vl = 0;
nr_make->state = NR_STATE_3;
sk_acceptq_added(sk);
-
- nr_insert_socket(make);
-
skb_queue_head(&sk->sk_receive_queue, skb);
- nr_start_heartbeat(make);
- nr_start_idletimer(make);
-
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_data_ready(sk, skb->len);
bh_unlock_sock(sk);
+
+ nr_insert_socket(make);
+
+ nr_start_heartbeat(make);
+ nr_start_idletimer(make);
+
return 1;
}
@@ -1382,14 +1390,12 @@ static int __init nr_proto_init(void)
return -1;
}
- dev_nr = kmalloc(nr_ndevs * sizeof(struct net_device *), GFP_KERNEL);
+ dev_nr = kzalloc(nr_ndevs * sizeof(struct net_device *), GFP_KERNEL);
if (dev_nr == NULL) {
printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device array\n");
return -1;
}
- memset(dev_nr, 0x00, nr_ndevs * sizeof(struct net_device *));
-
for (i = 0; i < nr_ndevs; i++) {
char name[IFNAMSIZ];
struct net_device *dev;
@@ -1407,6 +1413,7 @@ static int __init nr_proto_init(void)
free_netdev(dev);
goto fail;
}
+ lockdep_set_class(&dev->_xmit_lock, &nr_netdev_xmit_lock_key);
dev_nr[i] = dev;
}
diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c
index 75b72d389ba9..ddba1c144260 100644
--- a/net/netrom/nr_timer.c
+++ b/net/netrom/nr_timer.c
@@ -138,8 +138,8 @@ static void nr_heartbeat_expiry(unsigned long param)
if (sock_flag(sk, SOCK_DESTROY) ||
(sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) {
sock_hold(sk);
- nr_destroy_socket(sk);
bh_unlock_sock(sk);
+ nr_destroy_socket(sk);
sock_put(sk);
return;
}
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index d0a67bb31363..08a542855654 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -67,6 +67,14 @@ static struct proto_ops rose_proto_ops;
ax25_address rose_callsign;
/*
+ * ROSE network devices are virtual network devices encapsulating ROSE
+ * frames into AX.25 which will be sent through an AX.25 device, so form a
+ * special "super class" of normal net devices; split their locks off into a
+ * separate class since they always nest.
+ */
+static struct lock_class_key rose_netdev_xmit_lock_key;
+
+/*
* Convert a ROSE address into text.
*/
const char *rose2asc(const rose_address *addr)
@@ -1490,14 +1498,13 @@ static int __init rose_proto_init(void)
rose_callsign = null_ax25_address;
- dev_rose = kmalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL);
+ dev_rose = kzalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL);
if (dev_rose == NULL) {
printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n");
rc = -ENOMEM;
goto out_proto_unregister;
}
- memset(dev_rose, 0x00, rose_ndevs * sizeof(struct net_device*));
for (i = 0; i < rose_ndevs; i++) {
struct net_device *dev;
char name[IFNAMSIZ];
@@ -1516,6 +1523,7 @@ static int __init rose_proto_init(void)
free_netdev(dev);
goto fail;
}
+ lockdep_set_class(&dev->_xmit_lock, &rose_netdev_xmit_lock_key);
dev_rose[i] = dev;
}
diff --git a/net/rxrpc/connection.c b/net/rxrpc/connection.c
index 573b572f8f91..93d2c55ad2d5 100644
--- a/net/rxrpc/connection.c
+++ b/net/rxrpc/connection.c
@@ -58,13 +58,12 @@ static inline int __rxrpc_create_connection(struct rxrpc_peer *peer,
_enter("%p",peer);
/* allocate and initialise a connection record */
- conn = kmalloc(sizeof(struct rxrpc_connection), GFP_KERNEL);
+ conn = kzalloc(sizeof(struct rxrpc_connection), GFP_KERNEL);
if (!conn) {
_leave(" = -ENOMEM");
return -ENOMEM;
}
- memset(conn, 0, sizeof(struct rxrpc_connection));
atomic_set(&conn->usage, 1);
INIT_LIST_HEAD(&conn->link);
@@ -535,13 +534,12 @@ int rxrpc_conn_newmsg(struct rxrpc_connection *conn,
return -EINVAL;
}
- msg = kmalloc(sizeof(struct rxrpc_message), alloc_flags);
+ msg = kzalloc(sizeof(struct rxrpc_message), alloc_flags);
if (!msg) {
_leave(" = -ENOMEM");
return -ENOMEM;
}
- memset(msg, 0, sizeof(*msg));
atomic_set(&msg->usage, 1);
INIT_LIST_HEAD(&msg->link);
diff --git a/net/rxrpc/peer.c b/net/rxrpc/peer.c
index ed38f5b17c1b..8a275157a3bb 100644
--- a/net/rxrpc/peer.c
+++ b/net/rxrpc/peer.c
@@ -58,13 +58,12 @@ static int __rxrpc_create_peer(struct rxrpc_transport *trans, __be32 addr,
_enter("%p,%08x", trans, ntohl(addr));
/* allocate and initialise a peer record */
- peer = kmalloc(sizeof(struct rxrpc_peer), GFP_KERNEL);
+ peer = kzalloc(sizeof(struct rxrpc_peer), GFP_KERNEL);
if (!peer) {
_leave(" = -ENOMEM");
return -ENOMEM;
}
- memset(peer, 0, sizeof(struct rxrpc_peer));
atomic_set(&peer->usage, 1);
INIT_LIST_HEAD(&peer->link);
diff --git a/net/rxrpc/transport.c b/net/rxrpc/transport.c
index dbe6105e83a5..465efc86fccf 100644
--- a/net/rxrpc/transport.c
+++ b/net/rxrpc/transport.c
@@ -68,11 +68,10 @@ int rxrpc_create_transport(unsigned short port,
_enter("%hu", port);
- trans = kmalloc(sizeof(struct rxrpc_transport), GFP_KERNEL);
+ trans = kzalloc(sizeof(struct rxrpc_transport), GFP_KERNEL);
if (!trans)
return -ENOMEM;
- memset(trans, 0, sizeof(struct rxrpc_transport));
atomic_set(&trans->usage, 1);
INIT_LIST_HEAD(&trans->services);
INIT_LIST_HEAD(&trans->link);
@@ -312,13 +311,12 @@ static int rxrpc_incoming_msg(struct rxrpc_transport *trans,
_enter("");
- msg = kmalloc(sizeof(struct rxrpc_message), GFP_KERNEL);
+ msg = kzalloc(sizeof(struct rxrpc_message), GFP_KERNEL);
if (!msg) {
_leave(" = -ENOMEM");
return -ENOMEM;
}
- memset(msg, 0, sizeof(*msg));
atomic_set(&msg->usage, 1);
list_add_tail(&msg->link,msgq);
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 599423cc9d0d..a2587b52e531 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -312,10 +312,9 @@ struct tc_action *tcf_action_init_1(struct rtattr *rta, struct rtattr *est,
}
*err = -ENOMEM;
- a = kmalloc(sizeof(*a), GFP_KERNEL);
+ a = kzalloc(sizeof(*a), GFP_KERNEL);
if (a == NULL)
goto err_mod;
- memset(a, 0, sizeof(*a));
/* backward compatibility for policer */
if (name == NULL)
@@ -492,10 +491,9 @@ tcf_action_get_1(struct rtattr *rta, struct nlmsghdr *n, u32 pid, int *err)
index = *(int *)RTA_DATA(tb[TCA_ACT_INDEX - 1]);
*err = -ENOMEM;
- a = kmalloc(sizeof(struct tc_action), GFP_KERNEL);
+ a = kzalloc(sizeof(struct tc_action), GFP_KERNEL);
if (a == NULL)
return NULL;
- memset(a, 0, sizeof(struct tc_action));
*err = -EINVAL;
a->ops = tc_lookup_action(tb[TCA_ACT_KIND - 1]);
@@ -531,12 +529,11 @@ static struct tc_action *create_a(int i)
{
struct tc_action *act;
- act = kmalloc(sizeof(*act), GFP_KERNEL);
+ act = kzalloc(sizeof(*act), GFP_KERNEL);
if (act == NULL) {
printk("create_a: failed to alloc!\n");
return NULL;
}
- memset(act, 0, sizeof(*act));
act->order = i;
return act;
}
@@ -602,8 +599,8 @@ static int tca_action_flush(struct rtattr *rta, struct nlmsghdr *n, u32 pid)
return err;
rtattr_failure:
- module_put(a->ops->owner);
nlmsg_failure:
+ module_put(a->ops->owner);
err_out:
kfree_skb(skb);
kfree(a);
@@ -884,8 +881,6 @@ static int __init tc_action_init(void)
link_p[RTM_GETACTION-RTM_BASE].dumpit = tc_dump_action;
}
- printk("TC classifier action (bugs to netdev@vger.kernel.org cc "
- "hadi@cyberus.ca)\n");
return 0;
}
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 58b3a8652042..f257475e0e0c 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -209,10 +209,9 @@ tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,int bind, int ref)
s = sizeof(*opt) + p->nkeys * sizeof(struct tc_pedit_key);
/* netlink spinlocks held above us - must use ATOMIC */
- opt = kmalloc(s, GFP_ATOMIC);
+ opt = kzalloc(s, GFP_ATOMIC);
if (opt == NULL)
return -ENOBUFS;
- memset(opt, 0, s);
memcpy(opt->keys, p->keys, p->nkeys * sizeof(struct tc_pedit_key));
opt->index = p->index;
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 47e00bd9625e..da905d7b4b40 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -196,10 +196,9 @@ static int tcf_act_police_locate(struct rtattr *rta, struct rtattr *est,
return ret;
}
- p = kmalloc(sizeof(*p), GFP_KERNEL);
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
if (p == NULL)
return -ENOMEM;
- memset(p, 0, sizeof(*p));
ret = ACT_P_CREATED;
p->refcnt = 1;
@@ -429,11 +428,10 @@ struct tcf_police * tcf_police_locate(struct rtattr *rta, struct rtattr *est)
return p;
}
- p = kmalloc(sizeof(*p), GFP_KERNEL);
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
if (p == NULL)
return NULL;
- memset(p, 0, sizeof(*p));
p->refcnt = 1;
spin_lock_init(&p->lock);
p->stats_lock = &p->lock;
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index 61507f006b11..86cac49a0531 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -178,19 +178,17 @@ static int basic_change(struct tcf_proto *tp, unsigned long base, u32 handle,
err = -ENOBUFS;
if (head == NULL) {
- head = kmalloc(sizeof(*head), GFP_KERNEL);
+ head = kzalloc(sizeof(*head), GFP_KERNEL);
if (head == NULL)
goto errout;
- memset(head, 0, sizeof(*head));
INIT_LIST_HEAD(&head->flist);
tp->root = head;
}
- f = kmalloc(sizeof(*f), GFP_KERNEL);
+ f = kzalloc(sizeof(*f), GFP_KERNEL);
if (f == NULL)
goto errout;
- memset(f, 0, sizeof(*f));
err = -EINVAL;
if (handle)
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index d41de91fc4f6..e6973d9b686d 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -267,20 +267,18 @@ static int fw_change(struct tcf_proto *tp, unsigned long base,
return -EINVAL;
if (head == NULL) {
- head = kmalloc(sizeof(struct fw_head), GFP_KERNEL);
+ head = kzalloc(sizeof(struct fw_head), GFP_KERNEL);
if (head == NULL)
return -ENOBUFS;
- memset(head, 0, sizeof(*head));
tcf_tree_lock(tp);
tp->root = head;
tcf_tree_unlock(tp);
}
- f = kmalloc(sizeof(struct fw_filter), GFP_KERNEL);
+ f = kzalloc(sizeof(struct fw_filter), GFP_KERNEL);
if (f == NULL)
return -ENOBUFS;
- memset(f, 0, sizeof(*f));
f->id = handle;
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index c2e71900f7bd..d3aea730d4c8 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -396,10 +396,9 @@ static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
h1 = to_hash(nhandle);
if ((b = head->table[h1]) == NULL) {
err = -ENOBUFS;
- b = kmalloc(sizeof(struct route4_bucket), GFP_KERNEL);
+ b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
if (b == NULL)
goto errout;
- memset(b, 0, sizeof(*b));
tcf_tree_lock(tp);
head->table[h1] = b;
@@ -475,20 +474,18 @@ static int route4_change(struct tcf_proto *tp, unsigned long base,
err = -ENOBUFS;
if (head == NULL) {
- head = kmalloc(sizeof(struct route4_head), GFP_KERNEL);
+ head = kzalloc(sizeof(struct route4_head), GFP_KERNEL);
if (head == NULL)
goto errout;
- memset(head, 0, sizeof(struct route4_head));
tcf_tree_lock(tp);
tp->root = head;
tcf_tree_unlock(tp);
}
- f = kmalloc(sizeof(struct route4_filter), GFP_KERNEL);
+ f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL);
if (f == NULL)
goto errout;
- memset(f, 0, sizeof(*f));
err = route4_set_parms(tp, base, f, handle, head, tb,
tca[TCA_RATE-1], 1);
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index ba8741971629..6e230ecfba05 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -240,9 +240,8 @@ static int rsvp_init(struct tcf_proto *tp)
{
struct rsvp_head *data;
- data = kmalloc(sizeof(struct rsvp_head), GFP_KERNEL);
+ data = kzalloc(sizeof(struct rsvp_head), GFP_KERNEL);
if (data) {
- memset(data, 0, sizeof(struct rsvp_head));
tp->root = data;
return 0;
}
@@ -446,11 +445,10 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
goto errout2;
err = -ENOBUFS;
- f = kmalloc(sizeof(struct rsvp_filter), GFP_KERNEL);
+ f = kzalloc(sizeof(struct rsvp_filter), GFP_KERNEL);
if (f == NULL)
goto errout2;
- memset(f, 0, sizeof(*f));
h2 = 16;
if (tb[TCA_RSVP_SRC-1]) {
err = -EINVAL;
@@ -532,10 +530,9 @@ insert:
/* No session found. Create new one. */
err = -ENOBUFS;
- s = kmalloc(sizeof(struct rsvp_session), GFP_KERNEL);
+ s = kzalloc(sizeof(struct rsvp_session), GFP_KERNEL);
if (s == NULL)
goto errout;
- memset(s, 0, sizeof(*s));
memcpy(s->dst, dst, sizeof(s->dst));
if (pinfo) {
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index 7870e7bb0bac..5af8a59e1503 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -148,11 +148,10 @@ static int tcindex_init(struct tcf_proto *tp)
struct tcindex_data *p;
DPRINTK("tcindex_init(tp %p)\n",tp);
- p = kmalloc(sizeof(struct tcindex_data),GFP_KERNEL);
+ p = kzalloc(sizeof(struct tcindex_data),GFP_KERNEL);
if (!p)
return -ENOMEM;
- memset(p, 0, sizeof(*p));
p->mask = 0xffff;
p->hash = DEFAULT_HASH_SIZE;
p->fall_through = 1;
@@ -296,16 +295,14 @@ tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle,
err = -ENOMEM;
if (!cp.perfect && !cp.h) {
if (valid_perfect_hash(&cp)) {
- cp.perfect = kmalloc(cp.hash * sizeof(*r), GFP_KERNEL);
+ cp.perfect = kcalloc(cp.hash, sizeof(*r), GFP_KERNEL);
if (!cp.perfect)
goto errout;
- memset(cp.perfect, 0, cp.hash * sizeof(*r));
balloc = 1;
} else {
- cp.h = kmalloc(cp.hash * sizeof(f), GFP_KERNEL);
+ cp.h = kcalloc(cp.hash, sizeof(f), GFP_KERNEL);
if (!cp.h)
goto errout;
- memset(cp.h, 0, cp.hash * sizeof(f));
balloc = 2;
}
}
@@ -316,10 +313,9 @@ tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle,
r = tcindex_lookup(&cp, handle) ? : &new_filter_result;
if (r == &new_filter_result) {
- f = kmalloc(sizeof(*f), GFP_KERNEL);
+ f = kzalloc(sizeof(*f), GFP_KERNEL);
if (!f)
goto errout_alloc;
- memset(f, 0, sizeof(*f));
}
if (tb[TCA_TCINDEX_CLASSID-1]) {
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index d712edcd1bcf..eea366966740 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -307,23 +307,21 @@ static int u32_init(struct tcf_proto *tp)
if (tp_c->q == tp->q)
break;
- root_ht = kmalloc(sizeof(*root_ht), GFP_KERNEL);
+ root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL);
if (root_ht == NULL)
return -ENOBUFS;
- memset(root_ht, 0, sizeof(*root_ht));
root_ht->divisor = 0;
root_ht->refcnt++;
root_ht->handle = tp_c ? gen_new_htid(tp_c) : 0x80000000;
root_ht->prio = tp->prio;
if (tp_c == NULL) {
- tp_c = kmalloc(sizeof(*tp_c), GFP_KERNEL);
+ tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
if (tp_c == NULL) {
kfree(root_ht);
return -ENOBUFS;
}
- memset(tp_c, 0, sizeof(*tp_c));
tp_c->q = tp->q;
tp_c->next = u32_list;
u32_list = tp_c;
@@ -571,10 +569,9 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
if (handle == 0)
return -ENOMEM;
}
- ht = kmalloc(sizeof(*ht) + divisor*sizeof(void*), GFP_KERNEL);
+ ht = kzalloc(sizeof(*ht) + divisor*sizeof(void*), GFP_KERNEL);
if (ht == NULL)
return -ENOBUFS;
- memset(ht, 0, sizeof(*ht) + divisor*sizeof(void*));
ht->tp_c = tp_c;
ht->refcnt = 0;
ht->divisor = divisor;
@@ -617,18 +614,16 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
s = RTA_DATA(tb[TCA_U32_SEL-1]);
- n = kmalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
+ n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
if (n == NULL)
return -ENOBUFS;
- memset(n, 0, sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key));
#ifdef CONFIG_CLS_U32_PERF
- n->pf = kmalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64), GFP_KERNEL);
+ n->pf = kzalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64), GFP_KERNEL);
if (n->pf == NULL) {
kfree(n);
return -ENOBUFS;
}
- memset(n->pf, 0, sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64));
#endif
memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 698372954f4d..61e3b740ab1a 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -773,10 +773,9 @@ static int em_meta_change(struct tcf_proto *tp, void *data, int len,
TCF_META_ID(hdr->right.kind) > TCF_META_ID_MAX)
goto errout;
- meta = kmalloc(sizeof(*meta), GFP_KERNEL);
+ meta = kzalloc(sizeof(*meta), GFP_KERNEL);
if (meta == NULL)
goto errout;
- memset(meta, 0, sizeof(*meta));
memcpy(&meta->lvalue.hdr, &hdr->left, sizeof(hdr->left));
memcpy(&meta->rvalue.hdr, &hdr->right, sizeof(hdr->right));
diff --git a/net/sched/ematch.c b/net/sched/ematch.c
index 2405a86093a2..0fd0768a17c6 100644
--- a/net/sched/ematch.c
+++ b/net/sched/ematch.c
@@ -321,10 +321,9 @@ int tcf_em_tree_validate(struct tcf_proto *tp, struct rtattr *rta,
list_len = RTA_PAYLOAD(rt_list);
matches_len = tree_hdr->nmatches * sizeof(*em);
- tree->matches = kmalloc(matches_len, GFP_KERNEL);
+ tree->matches = kzalloc(matches_len, GFP_KERNEL);
if (tree->matches == NULL)
goto errout;
- memset(tree->matches, 0, matches_len);
/* We do not use rtattr_parse_nested here because the maximum
* number of attributes is unknown. This saves us the allocation
diff --git a/net/sched/estimator.c b/net/sched/estimator.c
index 5d3ae03e22a7..0ebc98e9be2d 100644
--- a/net/sched/estimator.c
+++ b/net/sched/estimator.c
@@ -139,11 +139,10 @@ int qdisc_new_estimator(struct tc_stats *stats, spinlock_t *stats_lock, struct r
if (parm->interval < -2 || parm->interval > 3)
return -EINVAL;
- est = kmalloc(sizeof(*est), GFP_KERNEL);
+ est = kzalloc(sizeof(*est), GFP_KERNEL);
if (est == NULL)
return -ENOBUFS;
- memset(est, 0, sizeof(*est));
est->interval = parm->interval + 2;
est->stats = stats;
est->stats_lock = stats_lock;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 80b7f6a8d008..bac881bfe362 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1926,10 +1926,9 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t
}
err = -ENOBUFS;
- cl = kmalloc(sizeof(*cl), GFP_KERNEL);
+ cl = kzalloc(sizeof(*cl), GFP_KERNEL);
if (cl == NULL)
goto failure;
- memset(cl, 0, sizeof(*cl));
cl->R_tab = rtab;
rtab = NULL;
cl->refcnt = 1;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index d735f51686a1..0834c2ee9174 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -432,10 +432,9 @@ struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops)
size = QDISC_ALIGN(sizeof(*sch));
size += ops->priv_size + (QDISC_ALIGNTO - 1);
- p = kmalloc(size, GFP_KERNEL);
+ p = kzalloc(size, GFP_KERNEL);
if (!p)
goto errout;
- memset(p, 0, size);
sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
sch->padded = (char *) sch - (char *) p;
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 0cafdd5feb1b..18e81a8ffb01 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -406,10 +406,9 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp,
struct gred_sched_data *q;
if (table->tab[dp] == NULL) {
- table->tab[dp] = kmalloc(sizeof(*q), GFP_KERNEL);
+ table->tab[dp] = kzalloc(sizeof(*q), GFP_KERNEL);
if (table->tab[dp] == NULL)
return -ENOMEM;
- memset(table->tab[dp], 0, sizeof(*q));
}
q = table->tab[dp];
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 6b1b4a981e88..6a6735a2ed35 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1123,10 +1123,9 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (rsc == NULL && fsc == NULL)
return -EINVAL;
- cl = kmalloc(sizeof(struct hfsc_class), GFP_KERNEL);
+ cl = kzalloc(sizeof(struct hfsc_class), GFP_KERNEL);
if (cl == NULL)
return -ENOBUFS;
- memset(cl, 0, sizeof(struct hfsc_class));
if (rsc != NULL)
hfsc_change_rsc(cl, rsc, 0);
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 34afe41fa2f3..880a3394a51f 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -196,7 +196,7 @@ struct htb_class
struct qdisc_rate_table *rate; /* rate table of the class itself */
struct qdisc_rate_table *ceil; /* ceiling rate (limits borrows too) */
long buffer,cbuffer; /* token bucket depth/rate */
- long mbuffer; /* max wait time */
+ psched_tdiff_t mbuffer; /* max wait time */
long tokens,ctokens; /* current number of tokens */
psched_time_t t_c; /* checkpoint time */
};
@@ -1559,10 +1559,9 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
goto failure;
}
err = -ENOBUFS;
- if ((cl = kmalloc(sizeof(*cl), GFP_KERNEL)) == NULL)
+ if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL)
goto failure;
- memset(cl, 0, sizeof(*cl));
cl->refcnt = 1;
INIT_LIST_HEAD(&cl->sibling);
INIT_LIST_HEAD(&cl->hlist);
@@ -1601,7 +1600,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
/* set class to be in HTB_CAN_SEND state */
cl->tokens = hopt->buffer;
cl->ctokens = hopt->cbuffer;
- cl->mbuffer = 60000000; /* 1min */
+ cl->mbuffer = PSCHED_JIFFIE2US(HZ*60); /* 1min */
PSCHED_GET_TIME(cl->t_c);
cl->cmode = HTB_CAN_SEND;
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index c5bd8064e6d8..a08ec4c7c55d 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -148,7 +148,8 @@ static long tabledist(unsigned long mu, long sigma,
static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct netem_sched_data *q = qdisc_priv(sch);
- struct netem_skb_cb *cb = (struct netem_skb_cb *)skb->cb;
+ /* We don't fill cb now as skb_unshare() may invalidate it */
+ struct netem_skb_cb *cb;
struct sk_buff *skb2;
int ret;
int count = 1;
@@ -200,6 +201,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
}
+ cb = (struct netem_skb_cb *)skb->cb;
if (q->gap == 0 /* not doing reordering */
|| q->counter < q->gap /* inside last reordering gap */
|| q->reorder < get_crandom(&q->reorder_cor)) {
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 9d05e13e92f6..27329ce9c311 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -441,7 +441,8 @@ void sctp_assoc_set_primary(struct sctp_association *asoc,
/* If the primary path is changing, assume that the
* user wants to use this new path.
*/
- if (transport->state != SCTP_INACTIVE)
+ if ((transport->state == SCTP_ACTIVE) ||
+ (transport->state == SCTP_UNKNOWN))
asoc->peer.active_path = transport;
/*
@@ -532,11 +533,11 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
port = addr->v4.sin_port;
SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_add_peer:association %p addr: ",
- " port: %d state:%s\n",
+ " port: %d state:%d\n",
asoc,
addr,
addr->v4.sin_port,
- peer_state == SCTP_UNKNOWN?"UNKNOWN":"ACTIVE");
+ peer_state);
/* Set the port if it has not been set yet. */
if (0 == asoc->peer.port)
@@ -545,9 +546,12 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
/* Check to see if this is a duplicate. */
peer = sctp_assoc_lookup_paddr(asoc, addr);
if (peer) {
- if (peer_state == SCTP_ACTIVE &&
- peer->state == SCTP_UNKNOWN)
- peer->state = SCTP_ACTIVE;
+ if (peer->state == SCTP_UNKNOWN) {
+ if (peer_state == SCTP_ACTIVE)
+ peer->state = SCTP_ACTIVE;
+ if (peer_state == SCTP_UNCONFIRMED)
+ peer->state = SCTP_UNCONFIRMED;
+ }
return peer;
}
@@ -739,7 +743,8 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
list_for_each(pos, &asoc->peer.transport_addr_list) {
t = list_entry(pos, struct sctp_transport, transports);
- if (t->state == SCTP_INACTIVE)
+ if ((t->state == SCTP_INACTIVE) ||
+ (t->state == SCTP_UNCONFIRMED))
continue;
if (!first || t->last_time_heard > first->last_time_heard) {
second = first;
@@ -759,7 +764,8 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
* [If the primary is active but not most recent, bump the most
* recently used transport.]
*/
- if (asoc->peer.primary_path->state != SCTP_INACTIVE &&
+ if (((asoc->peer.primary_path->state == SCTP_ACTIVE) ||
+ (asoc->peer.primary_path->state == SCTP_UNKNOWN)) &&
first != asoc->peer.primary_path) {
second = first;
first = asoc->peer.primary_path;
@@ -1054,7 +1060,7 @@ void sctp_assoc_update(struct sctp_association *asoc,
transports);
if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr))
sctp_assoc_add_peer(asoc, &trans->ipaddr,
- GFP_ATOMIC, SCTP_ACTIVE);
+ GFP_ATOMIC, trans->state);
}
asoc->ctsn_ack_point = asoc->next_tsn - 1;
@@ -1094,7 +1100,8 @@ void sctp_assoc_update_retran_path(struct sctp_association *asoc)
/* Try to find an active transport. */
- if (t->state != SCTP_INACTIVE) {
+ if ((t->state == SCTP_ACTIVE) ||
+ (t->state == SCTP_UNKNOWN)) {
break;
} else {
/* Keep track of the next transport in case
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index 2b962627f631..2b9c12a170e5 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -146,7 +146,7 @@ void sctp_bind_addr_free(struct sctp_bind_addr *bp)
/* Add an address to the bind address list in the SCTP_bind_addr structure. */
int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new,
- gfp_t gfp)
+ __u8 use_as_src, gfp_t gfp)
{
struct sctp_sockaddr_entry *addr;
@@ -163,6 +163,8 @@ int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new,
if (!addr->a.v4.sin_port)
addr->a.v4.sin_port = bp->port;
+ addr->use_as_src = use_as_src;
+
INIT_LIST_HEAD(&addr->list);
list_add_tail(&addr->list, &bp->address_list);
SCTP_DBG_OBJCNT_INC(addr);
@@ -274,7 +276,7 @@ int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw_addr_list,
}
af->from_addr_param(&addr, rawaddr, port, 0);
- retval = sctp_add_bind_addr(bp, &addr, gfp);
+ retval = sctp_add_bind_addr(bp, &addr, 1, gfp);
if (retval) {
/* Can't finish building the list, clean up. */
sctp_bind_addr_clean(bp);
@@ -367,7 +369,7 @@ static int sctp_copy_one_addr(struct sctp_bind_addr *dest,
(((AF_INET6 == addr->sa.sa_family) &&
(flags & SCTP_ADDR6_ALLOWED) &&
(flags & SCTP_ADDR6_PEERSUPP))))
- error = sctp_add_bind_addr(dest, addr, gfp);
+ error = sctp_add_bind_addr(dest, addr, 1, gfp);
}
return error;
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 67bd53070ee0..ffda1d680529 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -158,6 +158,12 @@ void sctp_endpoint_add_asoc(struct sctp_endpoint *ep,
void sctp_endpoint_free(struct sctp_endpoint *ep)
{
ep->base.dead = 1;
+
+ ep->base.sk->sk_state = SCTP_SS_CLOSED;
+
+ /* Unlink this endpoint, so we can't find it again! */
+ sctp_unhash_endpoint(ep);
+
sctp_endpoint_put(ep);
}
@@ -166,11 +172,6 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
{
SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return);
- ep->base.sk->sk_state = SCTP_SS_CLOSED;
-
- /* Unlink this endpoint, so we can't find it again! */
- sctp_unhash_endpoint(ep);
-
/* Free up the HMAC transform. */
sctp_crypto_free_tfm(sctp_sk(ep->base.sk)->hmac);
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 8ef08070c8b6..99c0cefc04e0 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -290,7 +290,8 @@ static void sctp_v6_get_saddr(struct sctp_association *asoc,
sctp_read_lock(addr_lock);
list_for_each(pos, &bp->address_list) {
laddr = list_entry(pos, struct sctp_sockaddr_entry, list);
- if ((laddr->a.sa.sa_family == AF_INET6) &&
+ if ((laddr->use_as_src) &&
+ (laddr->a.sa.sa_family == AF_INET6) &&
(scope <= sctp_scope(&laddr->a))) {
bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
if (!baddr || (matchlen < bmatchlen)) {
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index e5faa351aaad..30b710c54e64 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -691,7 +691,8 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
if (!new_transport) {
new_transport = asoc->peer.active_path;
- } else if (new_transport->state == SCTP_INACTIVE) {
+ } else if ((new_transport->state == SCTP_INACTIVE) ||
+ (new_transport->state == SCTP_UNCONFIRMED)) {
/* If the chunk is Heartbeat or Heartbeat Ack,
* send it to chunk->transport, even if it's
* inactive.
@@ -848,7 +849,8 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
*/
new_transport = chunk->transport;
if (!new_transport ||
- new_transport->state == SCTP_INACTIVE)
+ ((new_transport->state == SCTP_INACTIVE) ||
+ (new_transport->state == SCTP_UNCONFIRMED)))
new_transport = asoc->peer.active_path;
/* Change packets if necessary. */
@@ -1464,7 +1466,8 @@ static void sctp_check_transmitted(struct sctp_outq *q,
/* Mark the destination transport address as
* active if it is not so marked.
*/
- if (transport->state == SCTP_INACTIVE) {
+ if ((transport->state == SCTP_INACTIVE) ||
+ (transport->state == SCTP_UNCONFIRMED)) {
sctp_assoc_control_transport(
transport->asoc,
transport,
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 816c033d7886..1ab03a27a76e 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -240,7 +240,7 @@ int sctp_copy_local_addr_list(struct sctp_bind_addr *bp, sctp_scope_t scope,
(((AF_INET6 == addr->a.sa.sa_family) &&
(copy_flags & SCTP_ADDR6_ALLOWED) &&
(copy_flags & SCTP_ADDR6_PEERSUPP)))) {
- error = sctp_add_bind_addr(bp, &addr->a,
+ error = sctp_add_bind_addr(bp, &addr->a, 1,
GFP_ATOMIC);
if (error)
goto end_copy;
@@ -486,6 +486,8 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
list_for_each(pos, &bp->address_list) {
laddr = list_entry(pos, struct sctp_sockaddr_entry,
list);
+ if (!laddr->use_as_src)
+ continue;
sctp_v4_dst_saddr(&dst_saddr, dst, bp->port);
if (sctp_v4_cmp_addr(&dst_saddr, &laddr->a))
goto out_unlock;
@@ -506,7 +508,8 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
list_for_each(pos, &bp->address_list) {
laddr = list_entry(pos, struct sctp_sockaddr_entry, list);
- if (AF_INET == laddr->a.sa.sa_family) {
+ if ((laddr->use_as_src) &&
+ (AF_INET == laddr->a.sa.sa_family)) {
fl.fl4_src = laddr->a.v4.sin_addr.s_addr;
if (!ip_route_output_key(&rt, &fl)) {
dst = &rt->u.dst;
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 2a8773691695..4f11f5858209 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1493,7 +1493,7 @@ no_hmac:
/* Also, add the destination address. */
if (list_empty(&retval->base.bind_addr.address_list)) {
- sctp_add_bind_addr(&retval->base.bind_addr, &chunk->dest,
+ sctp_add_bind_addr(&retval->base.bind_addr, &chunk->dest, 1,
GFP_ATOMIC);
}
@@ -2017,7 +2017,7 @@ static int sctp_process_param(struct sctp_association *asoc,
af->from_addr_param(&addr, param.addr, asoc->peer.port, 0);
scope = sctp_scope(peer_addr);
if (sctp_in_scope(&addr, scope))
- if (!sctp_assoc_add_peer(asoc, &addr, gfp, SCTP_ACTIVE))
+ if (!sctp_assoc_add_peer(asoc, &addr, gfp, SCTP_UNCONFIRMED))
return 0;
break;
@@ -2418,7 +2418,7 @@ static __u16 sctp_process_asconf_param(struct sctp_association *asoc,
* Due to Resource Shortage'.
*/
- peer = sctp_assoc_add_peer(asoc, &addr, GFP_ATOMIC, SCTP_ACTIVE);
+ peer = sctp_assoc_add_peer(asoc, &addr, GFP_ATOMIC, SCTP_UNCONFIRMED);
if (!peer)
return SCTP_ERROR_RSRC_LOW;
@@ -2565,6 +2565,7 @@ static int sctp_asconf_param_success(struct sctp_association *asoc,
union sctp_addr_param *addr_param;
struct list_head *pos;
struct sctp_transport *transport;
+ struct sctp_sockaddr_entry *saddr;
int retval = 0;
addr_param = (union sctp_addr_param *)
@@ -2578,7 +2579,11 @@ static int sctp_asconf_param_success(struct sctp_association *asoc,
case SCTP_PARAM_ADD_IP:
sctp_local_bh_disable();
sctp_write_lock(&asoc->base.addr_lock);
- retval = sctp_add_bind_addr(bp, &addr, GFP_ATOMIC);
+ list_for_each(pos, &bp->address_list) {
+ saddr = list_entry(pos, struct sctp_sockaddr_entry, list);
+ if (sctp_cmp_addr_exact(&saddr->a, &addr))
+ saddr->use_as_src = 1;
+ }
sctp_write_unlock(&asoc->base.addr_lock);
sctp_local_bh_enable();
break;
@@ -2591,6 +2596,7 @@ static int sctp_asconf_param_success(struct sctp_association *asoc,
list_for_each(pos, &asoc->peer.transport_addr_list) {
transport = list_entry(pos, struct sctp_transport,
transports);
+ dst_release(transport->dst);
sctp_transport_route(transport, NULL,
sctp_sk(asoc->base.sk));
}
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index c5beb2ad7ef7..9c10bdec1afe 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -430,7 +430,11 @@ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc,
/* The check for association's overall error counter exceeding the
* threshold is done in the state function.
*/
- asoc->overall_error_count++;
+ /* When probing UNCONFIRMED addresses, the association overall
+ * error count is NOT incremented
+ */
+ if (transport->state != SCTP_UNCONFIRMED)
+ asoc->overall_error_count++;
if (transport->state != SCTP_INACTIVE &&
(transport->error_count++ >= transport->pathmaxrxt)) {
@@ -610,7 +614,7 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
/* Mark the destination transport address as active if it is not so
* marked.
*/
- if (t->state == SCTP_INACTIVE)
+ if ((t->state == SCTP_INACTIVE) || (t->state == SCTP_UNCONFIRMED))
sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP,
SCTP_HEARTBEAT_SUCCESS);
@@ -620,6 +624,10 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
*/
hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data;
sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at));
+
+ /* Update the heartbeat timer. */
+ if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
+ sctp_transport_hold(t);
}
/* Helper function to do a transport reset at the expiry of the hearbeat
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 9e58144f4851..ead3f1b0ea3d 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -846,6 +846,7 @@ static sctp_disposition_t sctp_sf_heartbeat(const struct sctp_endpoint *ep,
hbinfo.param_hdr.length = htons(sizeof(sctp_sender_hb_info_t));
hbinfo.daddr = transport->ipaddr;
hbinfo.sent_at = jiffies;
+ hbinfo.hb_nonce = transport->hb_nonce;
/* Send a heartbeat to our peer. */
paylen = sizeof(sctp_sender_hb_info_t);
@@ -1048,6 +1049,10 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
return SCTP_DISPOSITION_DISCARD;
}
+ /* Validate the 64-bit random nonce. */
+ if (hbinfo->hb_nonce != link->hb_nonce)
+ return SCTP_DISPOSITION_DISCARD;
+
max_interval = link->hbinterval + link->rto;
/* Check if the timestamp looks valid. */
@@ -5278,7 +5283,6 @@ static int sctp_eat_data(const struct sctp_association *asoc,
datalen -= sizeof(sctp_data_chunk_t);
deliver = SCTP_CMD_CHUNK_ULP;
- chunk->data_accepted = 1;
/* Think about partial delivery. */
if ((datalen >= asoc->rwnd) && (!asoc->ulpq.pd_mode)) {
@@ -5357,6 +5361,8 @@ static int sctp_eat_data(const struct sctp_association *asoc,
if (SCTP_CMD_CHUNK_ULP == deliver)
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));
+ chunk->data_accepted = 1;
+
/* Note: Some chunks may get overcounted (if we drop) or overcounted
* if we renege and the chunk arrives again.
*/
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 0a2c71d0d8aa..54722e622e6d 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -369,7 +369,7 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
/* Use GFP_ATOMIC since BHs are disabled. */
addr->v4.sin_port = ntohs(addr->v4.sin_port);
- ret = sctp_add_bind_addr(bp, addr, GFP_ATOMIC);
+ ret = sctp_add_bind_addr(bp, addr, 1, GFP_ATOMIC);
addr->v4.sin_port = htons(addr->v4.sin_port);
sctp_write_unlock(&ep->base.addr_lock);
sctp_local_bh_enable();
@@ -491,6 +491,7 @@ static int sctp_send_asconf_add_ip(struct sock *sk,
struct sctp_chunk *chunk;
struct sctp_sockaddr_entry *laddr;
union sctp_addr *addr;
+ union sctp_addr saveaddr;
void *addr_buf;
struct sctp_af *af;
struct list_head *pos;
@@ -558,14 +559,26 @@ static int sctp_send_asconf_add_ip(struct sock *sk,
}
retval = sctp_send_asconf(asoc, chunk);
+ if (retval)
+ goto out;
- /* FIXME: After sending the add address ASCONF chunk, we
- * cannot append the address to the association's binding
- * address list, because the new address may be used as the
- * source of a message sent to the peer before the ASCONF
- * chunk is received by the peer. So we should wait until
- * ASCONF_ACK is received.
+ /* Add the new addresses to the bind address list with
+ * use_as_src set to 0.
*/
+ sctp_local_bh_disable();
+ sctp_write_lock(&asoc->base.addr_lock);
+ addr_buf = addrs;
+ for (i = 0; i < addrcnt; i++) {
+ addr = (union sctp_addr *)addr_buf;
+ af = sctp_get_af_specific(addr->v4.sin_family);
+ memcpy(&saveaddr, addr, af->sockaddr_len);
+ saveaddr.v4.sin_port = ntohs(saveaddr.v4.sin_port);
+ retval = sctp_add_bind_addr(bp, &saveaddr, 0,
+ GFP_ATOMIC);
+ addr_buf += af->sockaddr_len;
+ }
+ sctp_write_unlock(&asoc->base.addr_lock);
+ sctp_local_bh_enable();
}
out:
@@ -676,12 +689,15 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
struct sctp_sock *sp;
struct sctp_endpoint *ep;
struct sctp_association *asoc;
+ struct sctp_transport *transport;
struct sctp_bind_addr *bp;
struct sctp_chunk *chunk;
union sctp_addr *laddr;
+ union sctp_addr saveaddr;
void *addr_buf;
struct sctp_af *af;
- struct list_head *pos;
+ struct list_head *pos, *pos1;
+ struct sctp_sockaddr_entry *saddr;
int i;
int retval = 0;
@@ -748,14 +764,42 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
goto out;
}
- retval = sctp_send_asconf(asoc, chunk);
+ /* Reset use_as_src flag for the addresses in the bind address
+ * list that are to be deleted.
+ */
+ sctp_local_bh_disable();
+ sctp_write_lock(&asoc->base.addr_lock);
+ addr_buf = addrs;
+ for (i = 0; i < addrcnt; i++) {
+ laddr = (union sctp_addr *)addr_buf;
+ af = sctp_get_af_specific(laddr->v4.sin_family);
+ memcpy(&saveaddr, laddr, af->sockaddr_len);
+ saveaddr.v4.sin_port = ntohs(saveaddr.v4.sin_port);
+ list_for_each(pos1, &bp->address_list) {
+ saddr = list_entry(pos1,
+ struct sctp_sockaddr_entry,
+ list);
+ if (sctp_cmp_addr_exact(&saddr->a, &saveaddr))
+ saddr->use_as_src = 0;
+ }
+ addr_buf += af->sockaddr_len;
+ }
+ sctp_write_unlock(&asoc->base.addr_lock);
+ sctp_local_bh_enable();
- /* FIXME: After sending the delete address ASCONF chunk, we
- * cannot remove the addresses from the association's bind
- * address list, because there maybe some packet send to
- * the delete addresses, so we should wait until ASCONF_ACK
- * packet is received.
+ /* Update the route and saddr entries for all the transports
+ * as some of the addresses in the bind address list are
+ * about to be deleted and cannot be used as source addresses.
*/
+ list_for_each(pos1, &asoc->peer.transport_addr_list) {
+ transport = list_entry(pos1, struct sctp_transport,
+ transports);
+ dst_release(transport->dst);
+ sctp_transport_route(transport, NULL,
+ sctp_sk(asoc->base.sk));
+ }
+
+ retval = sctp_send_asconf(asoc, chunk);
}
out:
return retval;
@@ -4977,7 +5021,7 @@ static struct sctp_bind_bucket *sctp_bucket_create(
/* Caller must hold hashbucket lock for this tb with local BH disabled */
static void sctp_bucket_destroy(struct sctp_bind_bucket *pp)
{
- if (hlist_empty(&pp->owner)) {
+ if (pp && hlist_empty(&pp->owner)) {
if (pp->next)
pp->next->pprev = pp->pprev;
*(pp->pprev) = pp->next;
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 160f62ad1cc5..2763aa93de1a 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -49,6 +49,7 @@
*/
#include <linux/types.h>
+#include <linux/random.h>
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
@@ -85,7 +86,6 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
peer->init_sent_count = 0;
- peer->state = SCTP_ACTIVE;
peer->param_flags = SPP_HB_DISABLE |
SPP_PMTUD_ENABLE |
SPP_SACKDELAY_ENABLE;
@@ -109,6 +109,9 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
peer->hb_timer.function = sctp_generate_heartbeat_event;
peer->hb_timer.data = (unsigned long)peer;
+ /* Initialize the 64-bit random nonce sent with heartbeat. */
+ get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce));
+
atomic_set(&peer->refcnt, 1);
peer->dead = 0;
@@ -517,7 +520,9 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport,
unsigned long sctp_transport_timeout(struct sctp_transport *t)
{
unsigned long timeout;
- timeout = t->hbinterval + t->rto + sctp_jitter(t->rto);
+ timeout = t->rto + sctp_jitter(t->rto);
+ if (t->state != SCTP_UNCONFIRMED)
+ timeout += t->hbinterval;
timeout += jiffies;
return timeout;
}
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 519ebc17c028..4a9aa9393b97 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -225,9 +225,8 @@ gss_alloc_context(void)
{
struct gss_cl_ctx *ctx;
- ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (ctx != NULL) {
- memset(ctx, 0, sizeof(*ctx));
ctx->gc_proc = RPC_GSS_PROC_DATA;
ctx->gc_seq = 1; /* NetApp 6.4R1 doesn't accept seq. no. 0 */
spin_lock_init(&ctx->gc_seq_lock);
@@ -391,9 +390,8 @@ gss_alloc_msg(struct gss_auth *gss_auth, uid_t uid)
{
struct gss_upcall_msg *gss_msg;
- gss_msg = kmalloc(sizeof(*gss_msg), GFP_KERNEL);
+ gss_msg = kzalloc(sizeof(*gss_msg), GFP_KERNEL);
if (gss_msg != NULL) {
- memset(gss_msg, 0, sizeof(*gss_msg));
INIT_LIST_HEAD(&gss_msg->list);
rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq");
init_waitqueue_head(&gss_msg->waitqueue);
@@ -776,10 +774,9 @@ gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
dprintk("RPC: gss_create_cred for uid %d, flavor %d\n",
acred->uid, auth->au_flavor);
- if (!(cred = kmalloc(sizeof(*cred), GFP_KERNEL)))
+ if (!(cred = kzalloc(sizeof(*cred), GFP_KERNEL)))
goto out_err;
- memset(cred, 0, sizeof(*cred));
atomic_set(&cred->gc_count, 1);
cred->gc_uid = acred->uid;
/*
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index b8714a87b34c..70e1e53a632b 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -129,9 +129,8 @@ gss_import_sec_context_kerberos(const void *p,
const void *end = (const void *)((const char *)p + len);
struct krb5_ctx *ctx;
- if (!(ctx = kmalloc(sizeof(*ctx), GFP_KERNEL)))
+ if (!(ctx = kzalloc(sizeof(*ctx), GFP_KERNEL)))
goto out_err;
- memset(ctx, 0, sizeof(*ctx));
p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate));
if (IS_ERR(p))
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index d88468d21c37..3db745379d06 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -237,9 +237,8 @@ gss_import_sec_context(const void *input_token, size_t bufsize,
struct gss_api_mech *mech,
struct gss_ctx **ctx_id)
{
- if (!(*ctx_id = kmalloc(sizeof(**ctx_id), GFP_KERNEL)))
+ if (!(*ctx_id = kzalloc(sizeof(**ctx_id), GFP_KERNEL)))
return GSS_S_FAILURE;
- memset(*ctx_id, 0, sizeof(**ctx_id));
(*ctx_id)->mech_type = gss_mech_get(mech);
return mech->gm_ops
diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c
index 3d0432aa45c1..88dcb52d171b 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_mech.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c
@@ -152,9 +152,8 @@ gss_import_sec_context_spkm3(const void *p, size_t len,
const void *end = (const void *)((const char *)p + len);
struct spkm3_ctx *ctx;
- if (!(ctx = kmalloc(sizeof(*ctx), GFP_KERNEL)))
+ if (!(ctx = kzalloc(sizeof(*ctx), GFP_KERNEL)))
goto out_err;
- memset(ctx, 0, sizeof(*ctx));
p = simple_get_netobj(p, end, &ctx->ctx_id);
if (IS_ERR(p))
diff --git a/net/sunrpc/auth_gss/gss_spkm3_token.c b/net/sunrpc/auth_gss/gss_spkm3_token.c
index af0d7ce74686..854a983ccf26 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_token.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_token.c
@@ -90,10 +90,9 @@ asn1_bitstring_len(struct xdr_netobj *in, int *enclen, int *zerobits)
int
decode_asn1_bitstring(struct xdr_netobj *out, char *in, int enclen, int explen)
{
- if (!(out->data = kmalloc(explen,GFP_KERNEL)))
+ if (!(out->data = kzalloc(explen,GFP_KERNEL)))
return 0;
out->len = explen;
- memset(out->data, 0, explen);
memcpy(out->data, in, enclen);
return 1;
}
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index aa8965e9d307..4ba271f892c8 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -125,10 +125,9 @@ rpc_new_client(struct rpc_xprt *xprt, char *servname,
goto out_err;
err = -ENOMEM;
- clnt = kmalloc(sizeof(*clnt), GFP_KERNEL);
+ clnt = kzalloc(sizeof(*clnt), GFP_KERNEL);
if (!clnt)
goto out_err;
- memset(clnt, 0, sizeof(*clnt));
atomic_set(&clnt->cl_users, 0);
atomic_set(&clnt->cl_count, 1);
clnt->cl_parent = clnt;
diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c
index 15c2db26767b..bd98124c3a64 100644
--- a/net/sunrpc/stats.c
+++ b/net/sunrpc/stats.c
@@ -114,13 +114,8 @@ void svc_seq_show(struct seq_file *seq, const struct svc_stat *statp) {
*/
struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt)
{
- unsigned int ops = clnt->cl_maxproc;
- size_t size = ops * sizeof(struct rpc_iostats);
struct rpc_iostats *new;
-
- new = kmalloc(size, GFP_KERNEL);
- if (new)
- memset(new, 0 , size);
+ new = kcalloc(clnt->cl_maxproc, sizeof(struct rpc_iostats), GFP_KERNEL);
return new;
}
EXPORT_SYMBOL(rpc_alloc_iostats);
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 01ba60a49572..b76a227dd3ad 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -32,9 +32,8 @@ svc_create(struct svc_program *prog, unsigned int bufsize)
int vers;
unsigned int xdrsize;
- if (!(serv = kmalloc(sizeof(*serv), GFP_KERNEL)))
+ if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL)))
return NULL;
- memset(serv, 0, sizeof(*serv));
serv->sv_name = prog->pg_name;
serv->sv_program = prog;
serv->sv_nrthreads = 1;
@@ -159,11 +158,10 @@ svc_create_thread(svc_thread_fn func, struct svc_serv *serv)
struct svc_rqst *rqstp;
int error = -ENOMEM;
- rqstp = kmalloc(sizeof(*rqstp), GFP_KERNEL);
+ rqstp = kzalloc(sizeof(*rqstp), GFP_KERNEL);
if (!rqstp)
goto out;
- memset(rqstp, 0, sizeof(*rqstp));
init_waitqueue_head(&rqstp->rq_wait);
if (!(rqstp->rq_argp = kmalloc(serv->sv_xdrsize, GFP_KERNEL))
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index a27905a0ad27..d9a95732df46 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1322,11 +1322,10 @@ svc_setup_socket(struct svc_serv *serv, struct socket *sock,
struct sock *inet;
dprintk("svc: svc_setup_socket %p\n", sock);
- if (!(svsk = kmalloc(sizeof(*svsk), GFP_KERNEL))) {
+ if (!(svsk = kzalloc(sizeof(*svsk), GFP_KERNEL))) {
*errp = -ENOMEM;
return NULL;
}
- memset(svsk, 0, sizeof(*svsk));
inet = sock->sk;
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 02060d0e7be8..313b68d892c6 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -908,9 +908,8 @@ static struct rpc_xprt *xprt_setup(int proto, struct sockaddr_in *ap, struct rpc
struct rpc_xprt *xprt;
struct rpc_rqst *req;
- if ((xprt = kmalloc(sizeof(struct rpc_xprt), GFP_KERNEL)) == NULL)
+ if ((xprt = kzalloc(sizeof(struct rpc_xprt), GFP_KERNEL)) == NULL)
return ERR_PTR(-ENOMEM);
- memset(xprt, 0, sizeof(*xprt)); /* Nnnngh! */
xprt->addr = *ap;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 21006b109101..ee678ed13b6f 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1276,10 +1276,9 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to)
xprt->max_reqs = xprt_udp_slot_table_entries;
slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]);
- xprt->slot = kmalloc(slot_table_size, GFP_KERNEL);
+ xprt->slot = kzalloc(slot_table_size, GFP_KERNEL);
if (xprt->slot == NULL)
return -ENOMEM;
- memset(xprt->slot, 0, slot_table_size);
xprt->prot = IPPROTO_UDP;
xprt->port = xs_get_random_port();
@@ -1318,10 +1317,9 @@ int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to)
xprt->max_reqs = xprt_tcp_slot_table_entries;
slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]);
- xprt->slot = kmalloc(slot_table_size, GFP_KERNEL);
+ xprt->slot = kzalloc(slot_table_size, GFP_KERNEL);
if (xprt->slot == NULL)
return -ENOMEM;
- memset(xprt->slot, 0, slot_table_size);
xprt->prot = IPPROTO_TCP;
xprt->port = xs_get_random_port();
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 7ef17a449cfd..75a5968c2139 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -665,11 +665,9 @@ int tipc_bearer_init(void)
int res;
write_lock_bh(&tipc_net_lock);
- tipc_bearers = kmalloc(MAX_BEARERS * sizeof(struct bearer), GFP_ATOMIC);
- media_list = kmalloc(MAX_MEDIA * sizeof(struct media), GFP_ATOMIC);
+ tipc_bearers = kcalloc(MAX_BEARERS, sizeof(struct bearer), GFP_ATOMIC);
+ media_list = kcalloc(MAX_MEDIA, sizeof(struct media), GFP_ATOMIC);
if (tipc_bearers && media_list) {
- memset(tipc_bearers, 0, MAX_BEARERS * sizeof(struct bearer));
- memset(media_list, 0, MAX_MEDIA * sizeof(struct media));
res = TIPC_OK;
} else {
kfree(tipc_bearers);
diff --git a/net/tipc/cluster.c b/net/tipc/cluster.c
index 1dcb6940e338..b46b5188a9fd 100644
--- a/net/tipc/cluster.c
+++ b/net/tipc/cluster.c
@@ -57,29 +57,25 @@ struct cluster *tipc_cltr_create(u32 addr)
struct _zone *z_ptr;
struct cluster *c_ptr;
int max_nodes;
- int alloc;
- c_ptr = (struct cluster *)kmalloc(sizeof(*c_ptr), GFP_ATOMIC);
+ c_ptr = kzalloc(sizeof(*c_ptr), GFP_ATOMIC);
if (c_ptr == NULL) {
warn("Cluster creation failure, no memory\n");
return NULL;
}
- memset(c_ptr, 0, sizeof(*c_ptr));
c_ptr->addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0);
if (in_own_cluster(addr))
max_nodes = LOWEST_SLAVE + tipc_max_slaves;
else
max_nodes = tipc_max_nodes + 1;
- alloc = sizeof(void *) * (max_nodes + 1);
- c_ptr->nodes = (struct node **)kmalloc(alloc, GFP_ATOMIC);
+ c_ptr->nodes = kcalloc(max_nodes + 1, sizeof(void*), GFP_ATOMIC);
if (c_ptr->nodes == NULL) {
warn("Cluster creation failure, no memory for node area\n");
kfree(c_ptr);
return NULL;
}
- memset(c_ptr->nodes, 0, alloc);
if (in_own_cluster(addr))
tipc_local_nodes = c_ptr->nodes;
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 2b8441203120..ee94de92ae99 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -295,7 +295,7 @@ struct link_req *tipc_disc_init_link_req(struct bearer *b_ptr,
{
struct link_req *req;
- req = (struct link_req *)kmalloc(sizeof(*req), GFP_ATOMIC);
+ req = kmalloc(sizeof(*req), GFP_ATOMIC);
if (!req)
return NULL;
diff --git a/net/tipc/link.c b/net/tipc/link.c
index c10e18a49b96..693f02eca6d6 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -417,12 +417,11 @@ struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
struct tipc_msg *msg;
char *if_name;
- l_ptr = (struct link *)kmalloc(sizeof(*l_ptr), GFP_ATOMIC);
+ l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
if (!l_ptr) {
warn("Link creation failed, no memory\n");
return NULL;
}
- memset(l_ptr, 0, sizeof(*l_ptr));
l_ptr->addr = peer;
if_name = strchr(b_ptr->publ.name, ':') + 1;
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index a6926ff07bcc..049242ea5c38 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -117,14 +117,12 @@ static struct publication *publ_create(u32 type, u32 lower, u32 upper,
u32 scope, u32 node, u32 port_ref,
u32 key)
{
- struct publication *publ =
- (struct publication *)kmalloc(sizeof(*publ), GFP_ATOMIC);
+ struct publication *publ = kzalloc(sizeof(*publ), GFP_ATOMIC);
if (publ == NULL) {
warn("Publication creation failure, no memory\n");
return NULL;
}
- memset(publ, 0, sizeof(*publ));
publ->type = type;
publ->lower = lower;
publ->upper = upper;
@@ -144,11 +142,7 @@ static struct publication *publ_create(u32 type, u32 lower, u32 upper,
static struct sub_seq *tipc_subseq_alloc(u32 cnt)
{
- u32 sz = cnt * sizeof(struct sub_seq);
- struct sub_seq *sseq = (struct sub_seq *)kmalloc(sz, GFP_ATOMIC);
-
- if (sseq)
- memset(sseq, 0, sz);
+ struct sub_seq *sseq = kcalloc(cnt, sizeof(struct sub_seq), GFP_ATOMIC);
return sseq;
}
@@ -160,8 +154,7 @@ static struct sub_seq *tipc_subseq_alloc(u32 cnt)
static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_head)
{
- struct name_seq *nseq =
- (struct name_seq *)kmalloc(sizeof(*nseq), GFP_ATOMIC);
+ struct name_seq *nseq = kzalloc(sizeof(*nseq), GFP_ATOMIC);
struct sub_seq *sseq = tipc_subseq_alloc(1);
if (!nseq || !sseq) {
@@ -171,7 +164,6 @@ static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_hea
return NULL;
}
- memset(nseq, 0, sizeof(*nseq));
spin_lock_init(&nseq->lock);
nseq->type = type;
nseq->sseqs = sseq;
@@ -1060,7 +1052,7 @@ int tipc_nametbl_init(void)
{
int array_size = sizeof(struct hlist_head) * tipc_nametbl_size;
- table.types = (struct hlist_head *)kmalloc(array_size, GFP_ATOMIC);
+ table.types = kmalloc(array_size, GFP_ATOMIC);
if (!table.types)
return -ENOMEM;
diff --git a/net/tipc/net.c b/net/tipc/net.c
index e5a359ab4930..a991bf8a7f74 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -160,14 +160,11 @@ void tipc_net_send_external_routes(u32 dest)
static int net_init(void)
{
- u32 sz = sizeof(struct _zone *) * (tipc_max_zones + 1);
-
memset(&tipc_net, 0, sizeof(tipc_net));
- tipc_net.zones = (struct _zone **)kmalloc(sz, GFP_ATOMIC);
+ tipc_net.zones = kcalloc(tipc_max_zones + 1, sizeof(struct _zone *), GFP_ATOMIC);
if (!tipc_net.zones) {
return -ENOMEM;
}
- memset(tipc_net.zones, 0, sz);
return TIPC_OK;
}
diff --git a/net/tipc/port.c b/net/tipc/port.c
index 3251c8d8e53c..b9c8c6b9e94f 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -226,12 +226,11 @@ u32 tipc_createport_raw(void *usr_handle,
struct tipc_msg *msg;
u32 ref;
- p_ptr = kmalloc(sizeof(*p_ptr), GFP_ATOMIC);
+ p_ptr = kzalloc(sizeof(*p_ptr), GFP_ATOMIC);
if (!p_ptr) {
warn("Port creation failed, no memory\n");
return 0;
}
- memset(p_ptr, 0, sizeof(*p_ptr));
ref = tipc_ref_acquire(p_ptr, &p_ptr->publ.lock);
if (!ref) {
warn("Port creation failed, reference table exhausted\n");
@@ -1058,7 +1057,7 @@ int tipc_createport(u32 user_ref,
struct port *p_ptr;
u32 ref;
- up_ptr = (struct user_port *)kmalloc(sizeof(*up_ptr), GFP_ATOMIC);
+ up_ptr = kmalloc(sizeof(*up_ptr), GFP_ATOMIC);
if (!up_ptr) {
warn("Port creation failed, no memory\n");
return -ENOMEM;
diff --git a/net/tipc/ref.c b/net/tipc/ref.c
index 596d3c8ff750..e6d6ae22ea49 100644
--- a/net/tipc/ref.c
+++ b/net/tipc/ref.c
@@ -79,7 +79,7 @@ int tipc_ref_table_init(u32 requested_size, u32 start)
while (sz < requested_size) {
sz <<= 1;
}
- table = (struct reference *)vmalloc(sz * sizeof(struct reference));
+ table = vmalloc(sz * sizeof(*table));
if (table == NULL)
return -ENOMEM;
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index e19b4bcd67ec..c51600ba5f4a 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -393,12 +393,11 @@ static void subscr_named_msg_event(void *usr_handle,
/* Create subscriber object */
- subscriber = kmalloc(sizeof(struct subscriber), GFP_ATOMIC);
+ subscriber = kzalloc(sizeof(struct subscriber), GFP_ATOMIC);
if (subscriber == NULL) {
warn("Subscriber rejected, no memory\n");
return;
}
- memset(subscriber, 0, sizeof(struct subscriber));
INIT_LIST_HEAD(&subscriber->subscription_list);
INIT_LIST_HEAD(&subscriber->subscriber_list);
subscriber->ref = tipc_ref_acquire(subscriber, &subscriber->lock);
diff --git a/net/tipc/user_reg.c b/net/tipc/user_reg.c
index 1e3ae57c7228..04d1b9be9c51 100644
--- a/net/tipc/user_reg.c
+++ b/net/tipc/user_reg.c
@@ -82,9 +82,8 @@ static int reg_init(void)
spin_lock_bh(&reg_lock);
if (!users) {
- users = (struct tipc_user *)kmalloc(USER_LIST_SIZE, GFP_ATOMIC);
+ users = kzalloc(USER_LIST_SIZE, GFP_ATOMIC);
if (users) {
- memset(users, 0, USER_LIST_SIZE);
for (i = 1; i <= MAX_USERID; i++) {
users[i].next = i - 1;
}
diff --git a/net/tipc/zone.c b/net/tipc/zone.c
index 316c4872ff5b..f5b00ea2d5ac 100644
--- a/net/tipc/zone.c
+++ b/net/tipc/zone.c
@@ -52,13 +52,12 @@ struct _zone *tipc_zone_create(u32 addr)
return NULL;
}
- z_ptr = (struct _zone *)kmalloc(sizeof(*z_ptr), GFP_ATOMIC);
+ z_ptr = kzalloc(sizeof(*z_ptr), GFP_ATOMIC);
if (!z_ptr) {
warn("Zone creation failed, insufficient memory\n");
return NULL;
}
- memset(z_ptr, 0, sizeof(*z_ptr));
z_num = tipc_zone(addr);
z_ptr->addr = tipc_addr(z_num, 0, 0);
tipc_net.zones[z_num] = z_ptr;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index f70475bfb62a..6f2909279268 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -663,11 +663,10 @@ static int unix_autobind(struct socket *sock)
goto out;
err = -ENOMEM;
- addr = kmalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
+ addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
if (!addr)
goto out;
- memset(addr, 0, sizeof(*addr) + sizeof(short) + 16);
addr->name->sun_family = AF_UNIX;
atomic_set(&addr->refcnt, 1);
diff --git a/net/wanrouter/af_wanpipe.c b/net/wanrouter/af_wanpipe.c
index a690cf773b6a..6f39faa15832 100644
--- a/net/wanrouter/af_wanpipe.c
+++ b/net/wanrouter/af_wanpipe.c
@@ -370,12 +370,11 @@ static int wanpipe_listen_rcv (struct sk_buff *skb, struct sock *sk)
* used by the ioctl call to read call information
* and to execute commands.
*/
- if ((mbox_ptr = kmalloc(sizeof(mbox_cmd_t), GFP_ATOMIC)) == NULL) {
+ if ((mbox_ptr = kzalloc(sizeof(mbox_cmd_t), GFP_ATOMIC)) == NULL) {
wanpipe_kill_sock_irq (newsk);
release_device(dev);
return -ENOMEM;
}
- memset(mbox_ptr, 0, sizeof(mbox_cmd_t));
memcpy(mbox_ptr,skb->data,skb->len);
/* Register the lcn on which incoming call came
@@ -507,11 +506,10 @@ static struct sock *wanpipe_alloc_socket(void)
if ((sk = sk_alloc(PF_WANPIPE, GFP_ATOMIC, &wanpipe_proto, 1)) == NULL)
return NULL;
- if ((wan_opt = kmalloc(sizeof(struct wanpipe_opt), GFP_ATOMIC)) == NULL) {
+ if ((wan_opt = kzalloc(sizeof(struct wanpipe_opt), GFP_ATOMIC)) == NULL) {
sk_free(sk);
return NULL;
}
- memset(wan_opt, 0x00, sizeof(struct wanpipe_opt));
wp_sk(sk) = wan_opt;
@@ -2011,10 +2009,9 @@ static int set_ioctl_cmd (struct sock *sk, void *arg)
dev_put(dev);
- if ((mbox_ptr = kmalloc(sizeof(mbox_cmd_t), GFP_ATOMIC)) == NULL)
+ if ((mbox_ptr = kzalloc(sizeof(mbox_cmd_t), GFP_ATOMIC)) == NULL)
return -ENOMEM;
- memset(mbox_ptr, 0, sizeof(mbox_cmd_t));
wp_sk(sk)->mbox = mbox_ptr;
wanpipe_link_driver(dev,sk);
diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c
index ad8e8a797790..9479659277ae 100644
--- a/net/wanrouter/wanmain.c
+++ b/net/wanrouter/wanmain.c
@@ -642,18 +642,16 @@ static int wanrouter_device_new_if(struct wan_device *wandev,
if (cnf->config_id == WANCONFIG_MPPP) {
#ifdef CONFIG_WANPIPE_MULTPPP
- pppdev = kmalloc(sizeof(struct ppp_device), GFP_KERNEL);
+ pppdev = kzalloc(sizeof(struct ppp_device), GFP_KERNEL);
err = -ENOBUFS;
if (pppdev == NULL)
goto out;
- memset(pppdev, 0, sizeof(struct ppp_device));
- pppdev->dev = kmalloc(sizeof(struct net_device), GFP_KERNEL);
+ pppdev->dev = kzalloc(sizeof(struct net_device), GFP_KERNEL);
if (pppdev->dev == NULL) {
kfree(pppdev);
err = -ENOBUFS;
goto out;
}
- memset(pppdev->dev, 0, sizeof(struct net_device));
err = wandev->new_if(wandev, (struct net_device *)pppdev, cnf);
dev = pppdev->dev;
#else
@@ -663,11 +661,10 @@ static int wanrouter_device_new_if(struct wan_device *wandev,
goto out;
#endif
} else {
- dev = kmalloc(sizeof(struct net_device), GFP_KERNEL);
+ dev = kzalloc(sizeof(struct net_device), GFP_KERNEL);
err = -ENOBUFS;
if (dev == NULL)
goto out;
- memset(dev, 0, sizeof(struct net_device));
err = wandev->new_if(wandev, dev, cnf);
}
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 405b741dff43..f35bc676128c 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -307,10 +307,9 @@ struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp)
{
struct xfrm_policy *policy;
- policy = kmalloc(sizeof(struct xfrm_policy), gfp);
+ policy = kzalloc(sizeof(struct xfrm_policy), gfp);
if (policy) {
- memset(policy, 0, sizeof(struct xfrm_policy));
atomic_set(&policy->refcnt, 1);
rwlock_init(&policy->lock);
init_timer(&policy->timer);
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 43f00fc28a3d..0021aad5db43 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -194,10 +194,9 @@ struct xfrm_state *xfrm_state_alloc(void)
{
struct xfrm_state *x;
- x = kmalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
+ x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
if (x) {
- memset(x, 0, sizeof(struct xfrm_state));
atomic_set(&x->refcnt, 1);
atomic_set(&x->tunnel_users, 0);
INIT_LIST_HEAD(&x->bydst);