summaryrefslogtreecommitdiff
path: root/net/core
diff options
context:
space:
mode:
authorJames Morris <james.l.morris@oracle.com>2012-05-04 12:46:40 +1000
committerJames Morris <james.l.morris@oracle.com>2012-05-04 12:46:40 +1000
commit898bfc1d46bd76f8ea2a0fbd239dd2073efe2aa3 (patch)
treee6e666085abe674dbf6292555961fe0a0f2e2d2f /net/core
parent08162e6a23d476544adfe1164afe9ea8b34ab859 (diff)
parent69964ea4c7b68c9399f7977aa5b9aa6539a6a98a (diff)
Merge tag 'v3.4-rc5' into next
Linux 3.4-rc5 Merge to pull in prerequisite change for Smack: 86812bb0de1a3758dc6c7aa01a763158a7c0638a Requested by Casey.
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c20
-rw-r--r--net/core/drop_monitor.c1
-rw-r--r--net/core/net_namespace.c33
-rw-r--r--net/core/skbuff.c4
4 files changed, 42 insertions, 16 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index c25d453b2803..9bb8f87c4cda 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1409,14 +1409,34 @@ EXPORT_SYMBOL(register_netdevice_notifier);
* register_netdevice_notifier(). The notifier is unlinked into the
* kernel structures and may then be reused. A negative errno code
* is returned on a failure.
+ *
+ * After unregistering unregister and down device events are synthesized
+ * for all devices on the device list to the removed notifier to remove
+ * the need for special case cleanup code.
*/
int unregister_netdevice_notifier(struct notifier_block *nb)
{
+ struct net_device *dev;
+ struct net *net;
int err;
rtnl_lock();
err = raw_notifier_chain_unregister(&netdev_chain, nb);
+ if (err)
+ goto unlock;
+
+ for_each_net(net) {
+ for_each_netdev(net, dev) {
+ if (dev->flags & IFF_UP) {
+ nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
+ nb->notifier_call(nb, NETDEV_DOWN, dev);
+ }
+ nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
+ nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
+ }
+ }
+unlock:
rtnl_unlock();
return err;
}
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 7f36b38e060f..5c3c81a609e5 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -150,6 +150,7 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
for (i = 0; i < msg->entries; i++) {
if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) {
msg->points[i].count++;
+ atomic_inc(&data->dm_hit_count);
goto out;
}
}
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 0e950fda9a0a..31a5ae51a45c 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -83,21 +83,29 @@ assign:
static int ops_init(const struct pernet_operations *ops, struct net *net)
{
- int err;
+ int err = -ENOMEM;
+ void *data = NULL;
+
if (ops->id && ops->size) {
- void *data = kzalloc(ops->size, GFP_KERNEL);
+ data = kzalloc(ops->size, GFP_KERNEL);
if (!data)
- return -ENOMEM;
+ goto out;
err = net_assign_generic(net, *ops->id, data);
- if (err) {
- kfree(data);
- return err;
- }
+ if (err)
+ goto cleanup;
}
+ err = 0;
if (ops->init)
- return ops->init(net);
- return 0;
+ err = ops->init(net);
+ if (!err)
+ return 0;
+
+cleanup:
+ kfree(data);
+
+out:
+ return err;
}
static void ops_free(const struct pernet_operations *ops, struct net *net)
@@ -448,12 +456,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
static int __register_pernet_operations(struct list_head *list,
struct pernet_operations *ops)
{
- int err = 0;
- err = ops_init(ops, &init_net);
- if (err)
- ops_free(ops, &init_net);
- return err;
-
+ return ops_init(ops, &init_net);
}
static void __unregister_pernet_operations(struct pernet_operations *ops)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index baf8d281152c..e59840010d45 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -952,9 +952,11 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
goto adjust_others;
}
- data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
+ data = kmalloc(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
+ gfp_mask);
if (!data)
goto nodata;
+ size = SKB_WITH_OVERHEAD(ksize(data));
/* Copy only real data... and, alas, header. This should be
* optimized for the cases when header is void.