summaryrefslogtreecommitdiff
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/datagram.c2
-rw-r--r--net/core/dev.c383
-rw-r--r--net/core/ethtool.c21
-rw-r--r--net/core/filter.c514
-rw-r--r--net/core/neighbour.c1
-rw-r--r--net/core/net-sysfs.c430
-rw-r--r--net/core/net-sysfs.h4
-rw-r--r--net/core/netpoll.c4
-rw-r--r--net/core/pktgen.c44
-rw-r--r--net/core/request_sock.c1
-rw-r--r--net/core/rtnetlink.c166
-rw-r--r--net/core/scm.c10
-rw-r--r--net/core/skbuff.c36
-rw-r--r--net/core/sock.c13
-rw-r--r--net/core/timestamping.c4
15 files changed, 1200 insertions, 433 deletions
diff --git a/net/core/datagram.c b/net/core/datagram.c
index cd1e039c8755..18ac112ea7ae 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -177,7 +177,7 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
* interrupt level will suddenly eat the receive_queue.
*
* Look at current nfs client by the way...
- * However, this function was corrent in any case. 8)
+ * However, this function was correct in any case. 8)
*/
unsigned long cpu_flags;
diff --git a/net/core/dev.c b/net/core/dev.c
index 0dd54a69dace..a215269d2e35 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -743,34 +743,31 @@ struct net_device *dev_get_by_index(struct net *net, int ifindex)
EXPORT_SYMBOL(dev_get_by_index);
/**
- * dev_getbyhwaddr - find a device by its hardware address
+ * dev_getbyhwaddr_rcu - find a device by its hardware address
* @net: the applicable net namespace
* @type: media type of device
* @ha: hardware address
*
* Search for an interface by MAC address. Returns NULL if the device
- * is not found or a pointer to the device. The caller must hold the
- * rtnl semaphore. The returned device has not had its ref count increased
+ * is not found or a pointer to the device. The caller must hold RCU
+ * The returned device has not had its ref count increased
* and the caller must therefore be careful about locking
*
- * BUGS:
- * If the API was consistent this would be __dev_get_by_hwaddr
*/
-struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
+struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
+ const char *ha)
{
struct net_device *dev;
- ASSERT_RTNL();
-
- for_each_netdev(net, dev)
+ for_each_netdev_rcu(net, dev)
if (dev->type == type &&
!memcmp(dev->dev_addr, ha, dev->addr_len))
return dev;
return NULL;
}
-EXPORT_SYMBOL(dev_getbyhwaddr);
+EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
{
@@ -1225,52 +1222,90 @@ int dev_open(struct net_device *dev)
}
EXPORT_SYMBOL(dev_open);
-static int __dev_close(struct net_device *dev)
+static int __dev_close_many(struct list_head *head)
{
- const struct net_device_ops *ops = dev->netdev_ops;
+ struct net_device *dev;
ASSERT_RTNL();
might_sleep();
- /*
- * Tell people we are going down, so that they can
- * prepare to death, when device is still operating.
- */
- call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
+ list_for_each_entry(dev, head, unreg_list) {
+ /*
+ * Tell people we are going down, so that they can
+ * prepare to death, when device is still operating.
+ */
+ call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
- clear_bit(__LINK_STATE_START, &dev->state);
+ clear_bit(__LINK_STATE_START, &dev->state);
- /* Synchronize to scheduled poll. We cannot touch poll list,
- * it can be even on different cpu. So just clear netif_running().
- *
- * dev->stop() will invoke napi_disable() on all of it's
- * napi_struct instances on this device.
- */
- smp_mb__after_clear_bit(); /* Commit netif_running(). */
+ /* Synchronize to scheduled poll. We cannot touch poll list, it
+ * can be even on different cpu. So just clear netif_running().
+ *
+ * dev->stop() will invoke napi_disable() on all of it's
+ * napi_struct instances on this device.
+ */
+ smp_mb__after_clear_bit(); /* Commit netif_running(). */
+ }
- dev_deactivate(dev);
+ dev_deactivate_many(head);
- /*
- * Call the device specific close. This cannot fail.
- * Only if device is UP
- *
- * We allow it to be called even after a DETACH hot-plug
- * event.
- */
- if (ops->ndo_stop)
- ops->ndo_stop(dev);
+ list_for_each_entry(dev, head, unreg_list) {
+ const struct net_device_ops *ops = dev->netdev_ops;
- /*
- * Device is now down.
- */
+ /*
+ * Call the device specific close. This cannot fail.
+ * Only if device is UP
+ *
+ * We allow it to be called even after a DETACH hot-plug
+ * event.
+ */
+ if (ops->ndo_stop)
+ ops->ndo_stop(dev);
+
+ /*
+ * Device is now down.
+ */
+
+ dev->flags &= ~IFF_UP;
+
+ /*
+ * Shutdown NET_DMA
+ */
+ net_dmaengine_put();
+ }
+
+ return 0;
+}
+
+static int __dev_close(struct net_device *dev)
+{
+ LIST_HEAD(single);
+
+ list_add(&dev->unreg_list, &single);
+ return __dev_close_many(&single);
+}
+
+int dev_close_many(struct list_head *head)
+{
+ struct net_device *dev, *tmp;
+ LIST_HEAD(tmp_list);
+
+ list_for_each_entry_safe(dev, tmp, head, unreg_list)
+ if (!(dev->flags & IFF_UP))
+ list_move(&dev->unreg_list, &tmp_list);
- dev->flags &= ~IFF_UP;
+ __dev_close_many(head);
/*
- * Shutdown NET_DMA
+ * Tell people we are down
*/
- net_dmaengine_put();
+ list_for_each_entry(dev, head, unreg_list) {
+ rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
+ call_netdevice_notifiers(NETDEV_DOWN, dev);
+ }
+ /* rollback_registered_many needs the complete original list */
+ list_splice(&tmp_list, head);
return 0;
}
@@ -1285,16 +1320,10 @@ static int __dev_close(struct net_device *dev)
*/
int dev_close(struct net_device *dev)
{
- if (!(dev->flags & IFF_UP))
- return 0;
-
- __dev_close(dev);
+ LIST_HEAD(single);
- /*
- * Tell people we are down
- */
- rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
- call_netdevice_notifiers(NETDEV_DOWN, dev);
+ list_add(&dev->unreg_list, &single);
+ dev_close_many(&single);
return 0;
}
@@ -1499,6 +1528,14 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(dev_forward_skb);
+static inline int deliver_skb(struct sk_buff *skb,
+ struct packet_type *pt_prev,
+ struct net_device *orig_dev)
+{
+ atomic_inc(&skb->users);
+ return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
+}
+
/*
* Support routine. Sends outgoing frames to any network
* taps currently in use.
@@ -1507,13 +1544,8 @@ EXPORT_SYMBOL_GPL(dev_forward_skb);
static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
{
struct packet_type *ptype;
-
-#ifdef CONFIG_NET_CLS_ACT
- if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
- net_timestamp_set(skb);
-#else
- net_timestamp_set(skb);
-#endif
+ struct sk_buff *skb2 = NULL;
+ struct packet_type *pt_prev = NULL;
rcu_read_lock();
list_for_each_entry_rcu(ptype, &ptype_all, list) {
@@ -1523,10 +1555,18 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
if ((ptype->dev == dev || !ptype->dev) &&
(ptype->af_packet_priv == NULL ||
(struct sock *)ptype->af_packet_priv != skb->sk)) {
- struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
+ if (pt_prev) {
+ deliver_skb(skb2, pt_prev, skb->dev);
+ pt_prev = ptype;
+ continue;
+ }
+
+ skb2 = skb_clone(skb, GFP_ATOMIC);
if (!skb2)
break;
+ net_timestamp_set(skb2);
+
/* skb->nh should be correctly
set by sender, so that the second statement is
just protection against buggy protocols.
@@ -1545,9 +1585,11 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
skb2->transport_header = skb2->network_header;
skb2->pkt_type = PACKET_OUTGOING;
- ptype->func(skb2, skb->dev, ptype, skb->dev);
+ pt_prev = ptype;
}
}
+ if (pt_prev)
+ pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
rcu_read_unlock();
}
@@ -1557,12 +1599,19 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
*/
int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
{
+ int rc;
+
if (txq < 1 || txq > dev->num_tx_queues)
return -EINVAL;
if (dev->reg_state == NETREG_REGISTERED) {
ASSERT_RTNL();
+ rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
+ txq);
+ if (rc)
+ return rc;
+
if (txq < dev->real_num_tx_queues)
qdisc_reset_all_tx_gt(dev, txq);
}
@@ -1757,7 +1806,7 @@ int skb_checksum_help(struct sk_buff *skb)
goto out_set_summed;
}
- offset = skb->csum_start - skb_headroom(skb);
+ offset = skb_checksum_start_offset(skb);
BUG_ON(offset >= skb_headlen(skb));
csum = skb_checksum(skb, offset, skb->len - offset, 0);
@@ -1794,16 +1843,18 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
struct packet_type *ptype;
__be16 type = skb->protocol;
+ int vlan_depth = ETH_HLEN;
int err;
- if (type == htons(ETH_P_8021Q)) {
- struct vlan_ethhdr *veh;
+ while (type == htons(ETH_P_8021Q)) {
+ struct vlan_hdr *vh;
- if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
+ if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
return ERR_PTR(-EINVAL);
- veh = (struct vlan_ethhdr *)skb->data;
- type = veh->h_vlan_encapsulated_proto;
+ vh = (struct vlan_hdr *)(skb->data + vlan_depth);
+ type = vh->h_vlan_encapsulated_proto;
+ vlan_depth += VLAN_HLEN;
}
skb_reset_mac_header(skb);
@@ -1817,8 +1868,7 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
dev->ethtool_ops->get_drvinfo(dev, &info);
- WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
- "ip_summed=%d",
+ WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d ip_summed=%d\n",
info.driver, dev ? dev->features : 0L,
skb->sk ? skb->sk->sk_route_caps : 0L,
skb->len, skb->data_len, skb->ip_summed);
@@ -1967,6 +2017,23 @@ static inline void skb_orphan_try(struct sk_buff *skb)
}
}
+int netif_get_vlan_features(struct sk_buff *skb, struct net_device *dev)
+{
+ __be16 protocol = skb->protocol;
+
+ if (protocol == htons(ETH_P_8021Q)) {
+ struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
+ protocol = veh->h_vlan_encapsulated_proto;
+ } else if (!skb->vlan_tci)
+ return dev->features;
+
+ if (protocol != htons(ETH_P_8021Q))
+ return dev->features & dev->vlan_features;
+ else
+ return 0;
+}
+EXPORT_SYMBOL(netif_get_vlan_features);
+
/*
* Returns true if either:
* 1. skb has frag_list and the device doesn't support FRAGLIST, or
@@ -1977,15 +2044,20 @@ static inline void skb_orphan_try(struct sk_buff *skb)
static inline int skb_needs_linearize(struct sk_buff *skb,
struct net_device *dev)
{
- int features = dev->features;
+ if (skb_is_nonlinear(skb)) {
+ int features = dev->features;
- if (skb->protocol == htons(ETH_P_8021Q) || vlan_tx_tag_present(skb))
- features &= dev->vlan_features;
+ if (vlan_tx_tag_present(skb))
+ features &= dev->vlan_features;
- return skb_is_nonlinear(skb) &&
- ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
- (skb_shinfo(skb)->nr_frags && (!(features & NETIF_F_SG) ||
- illegal_highdma(dev, skb))));
+ return (skb_has_frag_list(skb) &&
+ !(features & NETIF_F_FRAGLIST)) ||
+ (skb_shinfo(skb)->nr_frags &&
+ (!(features & NETIF_F_SG) ||
+ illegal_highdma(dev, skb)));
+ }
+
+ return 0;
}
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
@@ -1995,9 +2067,6 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
int rc = NETDEV_TX_OK;
if (likely(!skb->next)) {
- if (!list_empty(&ptype_all))
- dev_queue_xmit_nit(skb, dev);
-
/*
* If device doesnt need skb->dst, release it right now while
* its hot in this cpu cache
@@ -2005,6 +2074,9 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
skb_dst_drop(skb);
+ if (!list_empty(&ptype_all))
+ dev_queue_xmit_nit(skb, dev);
+
skb_orphan_try(skb);
if (vlan_tx_tag_present(skb) &&
@@ -2031,8 +2103,8 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
* checksumming here.
*/
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- skb_set_transport_header(skb, skb->csum_start -
- skb_headroom(skb));
+ skb_set_transport_header(skb,
+ skb_checksum_start_offset(skb));
if (!dev_can_checksum(dev, skb) &&
skb_checksum_help(skb))
goto out_kfree_skb;
@@ -2085,14 +2157,19 @@ out:
static u32 hashrnd __read_mostly;
-u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
+/*
+ * Returns a Tx hash based on the given packet descriptor a Tx queues' number
+ * to be used as a distribution range.
+ */
+u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
+ unsigned int num_tx_queues)
{
u32 hash;
if (skb_rx_queue_recorded(skb)) {
hash = skb_get_rx_queue(skb);
- while (unlikely(hash >= dev->real_num_tx_queues))
- hash -= dev->real_num_tx_queues;
+ while (unlikely(hash >= num_tx_queues))
+ hash -= num_tx_queues;
return hash;
}
@@ -2102,9 +2179,9 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
hash = (__force u16) skb->protocol ^ skb->rxhash;
hash = jhash_1word(hash, hashrnd);
- return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
+ return (u16) (((u64) hash * num_tx_queues) >> 32);
}
-EXPORT_SYMBOL(skb_tx_hash);
+EXPORT_SYMBOL(__skb_tx_hash);
static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
{
@@ -2119,26 +2196,70 @@ static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
return queue_index;
}
+static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
+{
+#ifdef CONFIG_XPS
+ struct xps_dev_maps *dev_maps;
+ struct xps_map *map;
+ int queue_index = -1;
+
+ rcu_read_lock();
+ dev_maps = rcu_dereference(dev->xps_maps);
+ if (dev_maps) {
+ map = rcu_dereference(
+ dev_maps->cpu_map[raw_smp_processor_id()]);
+ if (map) {
+ if (map->len == 1)
+ queue_index = map->queues[0];
+ else {
+ u32 hash;
+ if (skb->sk && skb->sk->sk_hash)
+ hash = skb->sk->sk_hash;
+ else
+ hash = (__force u16) skb->protocol ^
+ skb->rxhash;
+ hash = jhash_1word(hash, hashrnd);
+ queue_index = map->queues[
+ ((u64)hash * map->len) >> 32];
+ }
+ if (unlikely(queue_index >= dev->real_num_tx_queues))
+ queue_index = -1;
+ }
+ }
+ rcu_read_unlock();
+
+ return queue_index;
+#else
+ return -1;
+#endif
+}
+
static struct netdev_queue *dev_pick_tx(struct net_device *dev,
struct sk_buff *skb)
{
int queue_index;
const struct net_device_ops *ops = dev->netdev_ops;
- if (ops->ndo_select_queue) {
+ if (dev->real_num_tx_queues == 1)
+ queue_index = 0;
+ else if (ops->ndo_select_queue) {
queue_index = ops->ndo_select_queue(dev, skb);
queue_index = dev_cap_txqueue(dev, queue_index);
} else {
struct sock *sk = skb->sk;
queue_index = sk_tx_queue_get(sk);
- if (queue_index < 0 || queue_index >= dev->real_num_tx_queues) {
- queue_index = 0;
- if (dev->real_num_tx_queues > 1)
+ if (queue_index < 0 || skb->ooo_okay ||
+ queue_index >= dev->real_num_tx_queues) {
+ int old_index = queue_index;
+
+ queue_index = get_xps_queue(dev, skb);
+ if (queue_index < 0)
queue_index = skb_tx_hash(dev, skb);
- if (sk) {
- struct dst_entry *dst = rcu_dereference_check(sk->sk_dst_cache, 1);
+ if (queue_index != old_index && sk) {
+ struct dst_entry *dst =
+ rcu_dereference_check(sk->sk_dst_cache, 1);
if (dst && skb_dst(skb) == dst)
sk_tx_queue_set(sk, queue_index);
@@ -2712,14 +2833,6 @@ static void net_tx_action(struct softirq_action *h)
}
}
-static inline int deliver_skb(struct sk_buff *skb,
- struct packet_type *pt_prev,
- struct net_device *orig_dev)
-{
- atomic_inc(&skb->users);
- return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
-}
-
#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
(defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
/* This hook is defined here for ATM LANE */
@@ -4887,10 +5000,12 @@ static void rollback_registered_many(struct list_head *head)
}
BUG_ON(dev->reg_state != NETREG_REGISTERED);
+ }
- /* If device is running, close it first. */
- dev_close(dev);
+ /* If device is running, close it first. */
+ dev_close_many(head);
+ list_for_each_entry(dev, head, unreg_list) {
/* And unlink it from device chain. */
unlist_netdevice(dev);
@@ -4967,10 +5082,13 @@ unsigned long netdev_fix_features(unsigned long features, const char *name)
}
if (features & NETIF_F_UFO) {
- if (!(features & NETIF_F_GEN_CSUM)) {
+ /* maybe split UFO into V4 and V6? */
+ if (!((features & NETIF_F_GEN_CSUM) ||
+ (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
+ == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
if (name)
printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
- "since no NETIF_F_HW_CSUM feature.\n",
+ "since no checksum offload features.\n",
name);
features &= ~NETIF_F_UFO;
}
@@ -5014,9 +5132,9 @@ void netif_stacked_transfer_operstate(const struct net_device *rootdev,
}
EXPORT_SYMBOL(netif_stacked_transfer_operstate);
+#ifdef CONFIG_RPS
static int netif_alloc_rx_queues(struct net_device *dev)
{
-#ifdef CONFIG_RPS
unsigned int i, count = dev->num_rx_queues;
struct netdev_rx_queue *rx;
@@ -5029,15 +5147,22 @@ static int netif_alloc_rx_queues(struct net_device *dev)
}
dev->_rx = rx;
- /*
- * Set a pointer to first element in the array which holds the
- * reference count.
- */
for (i = 0; i < count; i++)
- rx[i].first = rx;
-#endif
+ rx[i].dev = dev;
return 0;
}
+#endif
+
+static void netdev_init_one_queue(struct net_device *dev,
+ struct netdev_queue *queue, void *_unused)
+{
+ /* Initialize queue lock */
+ spin_lock_init(&queue->_xmit_lock);
+ netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
+ queue->xmit_lock_owner = -1;
+ netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
+ queue->dev = dev;
+}
static int netif_alloc_netdev_queues(struct net_device *dev)
{
@@ -5053,25 +5178,11 @@ static int netif_alloc_netdev_queues(struct net_device *dev)
return -ENOMEM;
}
dev->_tx = tx;
- return 0;
-}
-static void netdev_init_one_queue(struct net_device *dev,
- struct netdev_queue *queue,
- void *_unused)
-{
- queue->dev = dev;
-
- /* Initialize queue lock */
- spin_lock_init(&queue->_xmit_lock);
- netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
- queue->xmit_lock_owner = -1;
-}
-
-static void netdev_init_queues(struct net_device *dev)
-{
netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
spin_lock_init(&dev->tx_global_lock);
+
+ return 0;
}
/**
@@ -5110,16 +5221,6 @@ int register_netdevice(struct net_device *dev)
dev->iflink = -1;
- ret = netif_alloc_rx_queues(dev);
- if (ret)
- goto out;
-
- ret = netif_alloc_netdev_queues(dev);
- if (ret)
- goto out;
-
- netdev_init_queues(dev);
-
/* Init, if this function is available */
if (dev->netdev_ops->ndo_init) {
ret = dev->netdev_ops->ndo_init(dev);
@@ -5577,10 +5678,14 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
dev->num_tx_queues = queue_count;
dev->real_num_tx_queues = queue_count;
+ if (netif_alloc_netdev_queues(dev))
+ goto free_pcpu;
#ifdef CONFIG_RPS
dev->num_rx_queues = queue_count;
dev->real_num_rx_queues = queue_count;
+ if (netif_alloc_rx_queues(dev))
+ goto free_pcpu;
#endif
dev->gso_max_size = GSO_MAX_SIZE;
@@ -5597,6 +5702,11 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
free_pcpu:
free_percpu(dev->pcpu_refcnt);
+ kfree(dev->_tx);
+#ifdef CONFIG_RPS
+ kfree(dev->_rx);
+#endif
+
free_p:
kfree(p);
return NULL;
@@ -5618,6 +5728,9 @@ void free_netdev(struct net_device *dev)
release_net(dev_net(dev));
kfree(dev->_tx);
+#ifdef CONFIG_RPS
+ kfree(dev->_rx);
+#endif
kfree(rcu_dereference_raw(dev->ingress_queue));
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 956a9f4971cb..17741782a345 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -891,6 +891,20 @@ static int ethtool_nway_reset(struct net_device *dev)
return dev->ethtool_ops->nway_reset(dev);
}
+static int ethtool_get_link(struct net_device *dev, char __user *useraddr)
+{
+ struct ethtool_value edata = { .cmd = ETHTOOL_GLINK };
+
+ if (!dev->ethtool_ops->get_link)
+ return -EOPNOTSUPP;
+
+ edata.data = netif_running(dev) && dev->ethtool_ops->get_link(dev);
+
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+}
+
static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr)
{
struct ethtool_eeprom eeprom;
@@ -1171,7 +1185,9 @@ static int ethtool_set_ufo(struct net_device *dev, char __user *useraddr)
return -EFAULT;
if (edata.data && !(dev->features & NETIF_F_SG))
return -EINVAL;
- if (edata.data && !(dev->features & NETIF_F_HW_CSUM))
+ if (edata.data && !((dev->features & NETIF_F_GEN_CSUM) ||
+ (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
+ == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM)))
return -EINVAL;
return dev->ethtool_ops->set_ufo(dev, edata.data);
}
@@ -1528,8 +1544,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
rc = ethtool_nway_reset(dev);
break;
case ETHTOOL_GLINK:
- rc = ethtool_get_value(dev, useraddr, ethcmd,
- dev->ethtool_ops->get_link);
+ rc = ethtool_get_link(dev, useraddr);
break;
case ETHTOOL_GEEPROM:
rc = ethtool_get_eeprom(dev, useraddr);
diff --git a/net/core/filter.c b/net/core/filter.c
index ae21a0d3c4a2..2b27d4efdd48 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -37,9 +37,69 @@
#include <asm/uaccess.h>
#include <asm/unaligned.h>
#include <linux/filter.h>
+#include <linux/reciprocal_div.h>
+
+enum {
+ BPF_S_RET_K = 1,
+ BPF_S_RET_A,
+ BPF_S_ALU_ADD_K,
+ BPF_S_ALU_ADD_X,
+ BPF_S_ALU_SUB_K,
+ BPF_S_ALU_SUB_X,
+ BPF_S_ALU_MUL_K,
+ BPF_S_ALU_MUL_X,
+ BPF_S_ALU_DIV_X,
+ BPF_S_ALU_AND_K,
+ BPF_S_ALU_AND_X,
+ BPF_S_ALU_OR_K,
+ BPF_S_ALU_OR_X,
+ BPF_S_ALU_LSH_K,
+ BPF_S_ALU_LSH_X,
+ BPF_S_ALU_RSH_K,
+ BPF_S_ALU_RSH_X,
+ BPF_S_ALU_NEG,
+ BPF_S_LD_W_ABS,
+ BPF_S_LD_H_ABS,
+ BPF_S_LD_B_ABS,
+ BPF_S_LD_W_LEN,
+ BPF_S_LD_W_IND,
+ BPF_S_LD_H_IND,
+ BPF_S_LD_B_IND,
+ BPF_S_LD_IMM,
+ BPF_S_LDX_W_LEN,
+ BPF_S_LDX_B_MSH,
+ BPF_S_LDX_IMM,
+ BPF_S_MISC_TAX,
+ BPF_S_MISC_TXA,
+ BPF_S_ALU_DIV_K,
+ BPF_S_LD_MEM,
+ BPF_S_LDX_MEM,
+ BPF_S_ST,
+ BPF_S_STX,
+ BPF_S_JMP_JA,
+ BPF_S_JMP_JEQ_K,
+ BPF_S_JMP_JEQ_X,
+ BPF_S_JMP_JGE_K,
+ BPF_S_JMP_JGE_X,
+ BPF_S_JMP_JGT_K,
+ BPF_S_JMP_JGT_X,
+ BPF_S_JMP_JSET_K,
+ BPF_S_JMP_JSET_X,
+ /* Ancillary data */
+ BPF_S_ANC_PROTOCOL,
+ BPF_S_ANC_PKTTYPE,
+ BPF_S_ANC_IFINDEX,
+ BPF_S_ANC_NLATTR,
+ BPF_S_ANC_NLATTR_NEST,
+ BPF_S_ANC_MARK,
+ BPF_S_ANC_QUEUE,
+ BPF_S_ANC_HATYPE,
+ BPF_S_ANC_RXHASH,
+ BPF_S_ANC_CPU,
+};
/* No hurry in this branch */
-static void *__load_pointer(struct sk_buff *skb, int k)
+static void *__load_pointer(const struct sk_buff *skb, int k, unsigned int size)
{
u8 *ptr = NULL;
@@ -48,21 +108,17 @@ static void *__load_pointer(struct sk_buff *skb, int k)
else if (k >= SKF_LL_OFF)
ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
- if (ptr >= skb->head && ptr < skb_tail_pointer(skb))
+ if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
return ptr;
return NULL;
}
-static inline void *load_pointer(struct sk_buff *skb, int k,
+static inline void *load_pointer(const struct sk_buff *skb, int k,
unsigned int size, void *buffer)
{
if (k >= 0)
return skb_header_pointer(skb, k, size, buffer);
- else {
- if (k >= SKF_AD_OFF)
- return NULL;
- return __load_pointer(skb, k);
- }
+ return __load_pointer(skb, k, size);
}
/**
@@ -89,7 +145,7 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
rcu_read_lock_bh();
filter = rcu_dereference_bh(sk->sk_filter);
if (filter) {
- unsigned int pkt_len = sk_run_filter(skb, filter->insns, filter->len);
+ unsigned int pkt_len = sk_run_filter(skb, filter->insns);
err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
}
@@ -103,50 +159,52 @@ EXPORT_SYMBOL(sk_filter);
* sk_run_filter - run a filter on a socket
* @skb: buffer to run the filter on
* @filter: filter to apply
- * @flen: length of filter
*
* Decode and apply filter instructions to the skb->data.
- * Return length to keep, 0 for none. skb is the data we are
- * filtering, filter is the array of filter instructions, and
- * len is the number of filter blocks in the array.
+ * Return length to keep, 0 for none. @skb is the data we are
+ * filtering, @filter is the array of filter instructions.
+ * Because all jumps are guaranteed to be before last instruction,
+ * and last instruction guaranteed to be a RET, we dont need to check
+ * flen. (We used to pass to this function the length of filter)
*/
-unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen)
+unsigned int sk_run_filter(const struct sk_buff *skb,
+ const struct sock_filter *fentry)
{
void *ptr;
u32 A = 0; /* Accumulator */
u32 X = 0; /* Index Register */
u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */
- unsigned long memvalid = 0;
u32 tmp;
int k;
- int pc;
- BUILD_BUG_ON(BPF_MEMWORDS > BITS_PER_LONG);
/*
* Process array of filter instructions.
*/
- for (pc = 0; pc < flen; pc++) {
- const struct sock_filter *fentry = &filter[pc];
- u32 f_k = fentry->k;
+ for (;; fentry++) {
+#if defined(CONFIG_X86_32)
+#define K (fentry->k)
+#else
+ const u32 K = fentry->k;
+#endif
switch (fentry->code) {
case BPF_S_ALU_ADD_X:
A += X;
continue;
case BPF_S_ALU_ADD_K:
- A += f_k;
+ A += K;
continue;
case BPF_S_ALU_SUB_X:
A -= X;
continue;
case BPF_S_ALU_SUB_K:
- A -= f_k;
+ A -= K;
continue;
case BPF_S_ALU_MUL_X:
A *= X;
continue;
case BPF_S_ALU_MUL_K:
- A *= f_k;
+ A *= K;
continue;
case BPF_S_ALU_DIV_X:
if (X == 0)
@@ -154,89 +212,89 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int
A /= X;
continue;
case BPF_S_ALU_DIV_K:
- A /= f_k;
+ A = reciprocal_divide(A, K);
continue;
case BPF_S_ALU_AND_X:
A &= X;
continue;
case BPF_S_ALU_AND_K:
- A &= f_k;
+ A &= K;
continue;
case BPF_S_ALU_OR_X:
A |= X;
continue;
case BPF_S_ALU_OR_K:
- A |= f_k;
+ A |= K;
continue;
case BPF_S_ALU_LSH_X:
A <<= X;
continue;
case BPF_S_ALU_LSH_K:
- A <<= f_k;
+ A <<= K;
continue;
case BPF_S_ALU_RSH_X:
A >>= X;
continue;
case BPF_S_ALU_RSH_K:
- A >>= f_k;
+ A >>= K;
continue;
case BPF_S_ALU_NEG:
A = -A;
continue;
case BPF_S_JMP_JA:
- pc += f_k;
+ fentry += K;
continue;
case BPF_S_JMP_JGT_K:
- pc += (A > f_k) ? fentry->jt : fentry->jf;
+ fentry += (A > K) ? fentry->jt : fentry->jf;
continue;
case BPF_S_JMP_JGE_K:
- pc += (A >= f_k) ? fentry->jt : fentry->jf;
+ fentry += (A >= K) ? fentry->jt : fentry->jf;
continue;
case BPF_S_JMP_JEQ_K:
- pc += (A == f_k) ? fentry->jt : fentry->jf;
+ fentry += (A == K) ? fentry->jt : fentry->jf;
continue;
case BPF_S_JMP_JSET_K:
- pc += (A & f_k) ? fentry->jt : fentry->jf;
+ fentry += (A & K) ? fentry->jt : fentry->jf;
continue;
case BPF_S_JMP_JGT_X:
- pc += (A > X) ? fentry->jt : fentry->jf;
+ fentry += (A > X) ? fentry->jt : fentry->jf;
continue;
case BPF_S_JMP_JGE_X:
- pc += (A >= X) ? fentry->jt : fentry->jf;
+ fentry += (A >= X) ? fentry->jt : fentry->jf;
continue;
case BPF_S_JMP_JEQ_X:
- pc += (A == X) ? fentry->jt : fentry->jf;
+ fentry += (A == X) ? fentry->jt : fentry->jf;
continue;
case BPF_S_JMP_JSET_X:
- pc += (A & X) ? fentry->jt : fentry->jf;
+ fentry += (A & X) ? fentry->jt : fentry->jf;
continue;
case BPF_S_LD_W_ABS:
- k = f_k;
+ k = K;
load_w:
ptr = load_pointer(skb, k, 4, &tmp);
if (ptr != NULL) {
A = get_unaligned_be32(ptr);
continue;
}
- break;
+ return 0;
case BPF_S_LD_H_ABS:
- k = f_k;
+ k = K;
load_h:
ptr = load_pointer(skb, k, 2, &tmp);
if (ptr != NULL) {
A = get_unaligned_be16(ptr);
continue;
}
- break;
+ return 0;
case BPF_S_LD_B_ABS:
- k = f_k;
+ k = K;
load_b:
ptr = load_pointer(skb, k, 1, &tmp);
if (ptr != NULL) {
A = *(u8 *)ptr;
continue;
}
- break;
+ return 0;
case BPF_S_LD_W_LEN:
A = skb->len;
continue;
@@ -244,34 +302,32 @@ load_b:
X = skb->len;
continue;
case BPF_S_LD_W_IND:
- k = X + f_k;
+ k = X + K;
goto load_w;
case BPF_S_LD_H_IND:
- k = X + f_k;
+ k = X + K;
goto load_h;
case BPF_S_LD_B_IND:
- k = X + f_k;
+ k = X + K;
goto load_b;
case BPF_S_LDX_B_MSH:
- ptr = load_pointer(skb, f_k, 1, &tmp);
+ ptr = load_pointer(skb, K, 1, &tmp);
if (ptr != NULL) {
X = (*(u8 *)ptr & 0xf) << 2;
continue;
}
return 0;
case BPF_S_LD_IMM:
- A = f_k;
+ A = K;
continue;
case BPF_S_LDX_IMM:
- X = f_k;
+ X = K;
continue;
case BPF_S_LD_MEM:
- A = (memvalid & (1UL << f_k)) ?
- mem[f_k] : 0;
+ A = mem[K];
continue;
case BPF_S_LDX_MEM:
- X = (memvalid & (1UL << f_k)) ?
- mem[f_k] : 0;
+ X = mem[K];
continue;
case BPF_S_MISC_TAX:
X = A;
@@ -280,50 +336,44 @@ load_b:
A = X;
continue;
case BPF_S_RET_K:
- return f_k;
+ return K;
case BPF_S_RET_A:
return A;
case BPF_S_ST:
- memvalid |= 1UL << f_k;
- mem[f_k] = A;
+ mem[K] = A;
continue;
case BPF_S_STX:
- memvalid |= 1UL << f_k;
- mem[f_k] = X;
+ mem[K] = X;
continue;
- default:
- WARN_ON(1);
- return 0;
- }
-
- /*
- * Handle ancillary data, which are impossible
- * (or very difficult) to get parsing packet contents.
- */
- switch (k-SKF_AD_OFF) {
- case SKF_AD_PROTOCOL:
+ case BPF_S_ANC_PROTOCOL:
A = ntohs(skb->protocol);
continue;
- case SKF_AD_PKTTYPE:
+ case BPF_S_ANC_PKTTYPE:
A = skb->pkt_type;
continue;
- case SKF_AD_IFINDEX:
+ case BPF_S_ANC_IFINDEX:
if (!skb->dev)
return 0;
A = skb->dev->ifindex;
continue;
- case SKF_AD_MARK:
+ case BPF_S_ANC_MARK:
A = skb->mark;
continue;
- case SKF_AD_QUEUE:
+ case BPF_S_ANC_QUEUE:
A = skb->queue_mapping;
continue;
- case SKF_AD_HATYPE:
+ case BPF_S_ANC_HATYPE:
if (!skb->dev)
return 0;
A = skb->dev->type;
continue;
- case SKF_AD_NLATTR: {
+ case BPF_S_ANC_RXHASH:
+ A = skb->rxhash;
+ continue;
+ case BPF_S_ANC_CPU:
+ A = raw_smp_processor_id();
+ continue;
+ case BPF_S_ANC_NLATTR: {
struct nlattr *nla;
if (skb_is_nonlinear(skb))
@@ -339,7 +389,7 @@ load_b:
A = 0;
continue;
}
- case SKF_AD_NLATTR_NEST: {
+ case BPF_S_ANC_NLATTR_NEST: {
struct nlattr *nla;
if (skb_is_nonlinear(skb))
@@ -359,6 +409,7 @@ load_b:
continue;
}
default:
+ WARN_ON(1);
return 0;
}
}
@@ -367,6 +418,66 @@ load_b:
}
EXPORT_SYMBOL(sk_run_filter);
+/*
+ * Security :
+ * A BPF program is able to use 16 cells of memory to store intermediate
+ * values (check u32 mem[BPF_MEMWORDS] in sk_run_filter())
+ * As we dont want to clear mem[] array for each packet going through
+ * sk_run_filter(), we check that filter loaded by user never try to read
+ * a cell if not previously written, and we check all branches to be sure
+ * a malicious user doesnt try to abuse us.
+ */
+static int check_load_and_stores(struct sock_filter *filter, int flen)
+{
+ u16 *masks, memvalid = 0; /* one bit per cell, 16 cells */
+ int pc, ret = 0;
+
+ BUILD_BUG_ON(BPF_MEMWORDS > 16);
+ masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL);
+ if (!masks)
+ return -ENOMEM;
+ memset(masks, 0xff, flen * sizeof(*masks));
+
+ for (pc = 0; pc < flen; pc++) {
+ memvalid &= masks[pc];
+
+ switch (filter[pc].code) {
+ case BPF_S_ST:
+ case BPF_S_STX:
+ memvalid |= (1 << filter[pc].k);
+ break;
+ case BPF_S_LD_MEM:
+ case BPF_S_LDX_MEM:
+ if (!(memvalid & (1 << filter[pc].k))) {
+ ret = -EINVAL;
+ goto error;
+ }
+ break;
+ case BPF_S_JMP_JA:
+ /* a jump must set masks on target */
+ masks[pc + 1 + filter[pc].k] &= memvalid;
+ memvalid = ~0;
+ break;
+ case BPF_S_JMP_JEQ_K:
+ case BPF_S_JMP_JEQ_X:
+ case BPF_S_JMP_JGE_K:
+ case BPF_S_JMP_JGE_X:
+ case BPF_S_JMP_JGT_K:
+ case BPF_S_JMP_JGT_X:
+ case BPF_S_JMP_JSET_X:
+ case BPF_S_JMP_JSET_K:
+ /* a jump must set masks on targets */
+ masks[pc + 1 + filter[pc].jt] &= memvalid;
+ masks[pc + 1 + filter[pc].jf] &= memvalid;
+ memvalid = ~0;
+ break;
+ }
+ }
+error:
+ kfree(masks);
+ return ret;
+}
+
/**
* sk_chk_filter - verify socket filter code
* @filter: filter to verify
@@ -383,7 +494,57 @@ EXPORT_SYMBOL(sk_run_filter);
*/
int sk_chk_filter(struct sock_filter *filter, int flen)
{
- struct sock_filter *ftest;
+ /*
+ * Valid instructions are initialized to non-0.
+ * Invalid instructions are initialized to 0.
+ */
+ static const u8 codes[] = {
+ [BPF_ALU|BPF_ADD|BPF_K] = BPF_S_ALU_ADD_K,
+ [BPF_ALU|BPF_ADD|BPF_X] = BPF_S_ALU_ADD_X,
+ [BPF_ALU|BPF_SUB|BPF_K] = BPF_S_ALU_SUB_K,
+ [BPF_ALU|BPF_SUB|BPF_X] = BPF_S_ALU_SUB_X,
+ [BPF_ALU|BPF_MUL|BPF_K] = BPF_S_ALU_MUL_K,
+ [BPF_ALU|BPF_MUL|BPF_X] = BPF_S_ALU_MUL_X,
+ [BPF_ALU|BPF_DIV|BPF_X] = BPF_S_ALU_DIV_X,
+ [BPF_ALU|BPF_AND|BPF_K] = BPF_S_ALU_AND_K,
+ [BPF_ALU|BPF_AND|BPF_X] = BPF_S_ALU_AND_X,
+ [BPF_ALU|BPF_OR|BPF_K] = BPF_S_ALU_OR_K,
+ [BPF_ALU|BPF_OR|BPF_X] = BPF_S_ALU_OR_X,
+ [BPF_ALU|BPF_LSH|BPF_K] = BPF_S_ALU_LSH_K,
+ [BPF_ALU|BPF_LSH|BPF_X] = BPF_S_ALU_LSH_X,
+ [BPF_ALU|BPF_RSH|BPF_K] = BPF_S_ALU_RSH_K,
+ [BPF_ALU|BPF_RSH|BPF_X] = BPF_S_ALU_RSH_X,
+ [BPF_ALU|BPF_NEG] = BPF_S_ALU_NEG,
+ [BPF_LD|BPF_W|BPF_ABS] = BPF_S_LD_W_ABS,
+ [BPF_LD|BPF_H|BPF_ABS] = BPF_S_LD_H_ABS,
+ [BPF_LD|BPF_B|BPF_ABS] = BPF_S_LD_B_ABS,
+ [BPF_LD|BPF_W|BPF_LEN] = BPF_S_LD_W_LEN,
+ [BPF_LD|BPF_W|BPF_IND] = BPF_S_LD_W_IND,
+ [BPF_LD|BPF_H|BPF_IND] = BPF_S_LD_H_IND,
+ [BPF_LD|BPF_B|BPF_IND] = BPF_S_LD_B_IND,
+ [BPF_LD|BPF_IMM] = BPF_S_LD_IMM,
+ [BPF_LDX|BPF_W|BPF_LEN] = BPF_S_LDX_W_LEN,
+ [BPF_LDX|BPF_B|BPF_MSH] = BPF_S_LDX_B_MSH,
+ [BPF_LDX|BPF_IMM] = BPF_S_LDX_IMM,
+ [BPF_MISC|BPF_TAX] = BPF_S_MISC_TAX,
+ [BPF_MISC|BPF_TXA] = BPF_S_MISC_TXA,
+ [BPF_RET|BPF_K] = BPF_S_RET_K,
+ [BPF_RET|BPF_A] = BPF_S_RET_A,
+ [BPF_ALU|BPF_DIV|BPF_K] = BPF_S_ALU_DIV_K,
+ [BPF_LD|BPF_MEM] = BPF_S_LD_MEM,
+ [BPF_LDX|BPF_MEM] = BPF_S_LDX_MEM,
+ [BPF_ST] = BPF_S_ST,
+ [BPF_STX] = BPF_S_STX,
+ [BPF_JMP|BPF_JA] = BPF_S_JMP_JA,
+ [BPF_JMP|BPF_JEQ|BPF_K] = BPF_S_JMP_JEQ_K,
+ [BPF_JMP|BPF_JEQ|BPF_X] = BPF_S_JMP_JEQ_X,
+ [BPF_JMP|BPF_JGE|BPF_K] = BPF_S_JMP_JGE_K,
+ [BPF_JMP|BPF_JGE|BPF_X] = BPF_S_JMP_JGE_X,
+ [BPF_JMP|BPF_JGT|BPF_K] = BPF_S_JMP_JGT_K,
+ [BPF_JMP|BPF_JGT|BPF_X] = BPF_S_JMP_JGT_X,
+ [BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K,
+ [BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X,
+ };
int pc;
if (flen == 0 || flen > BPF_MAXINSNS)
@@ -391,136 +552,31 @@ int sk_chk_filter(struct sock_filter *filter, int flen)
/* check the filter code now */
for (pc = 0; pc < flen; pc++) {
- ftest = &filter[pc];
-
- /* Only allow valid instructions */
- switch (ftest->code) {
- case BPF_ALU|BPF_ADD|BPF_K:
- ftest->code = BPF_S_ALU_ADD_K;
- break;
- case BPF_ALU|BPF_ADD|BPF_X:
- ftest->code = BPF_S_ALU_ADD_X;
- break;
- case BPF_ALU|BPF_SUB|BPF_K:
- ftest->code = BPF_S_ALU_SUB_K;
- break;
- case BPF_ALU|BPF_SUB|BPF_X:
- ftest->code = BPF_S_ALU_SUB_X;
- break;
- case BPF_ALU|BPF_MUL|BPF_K:
- ftest->code = BPF_S_ALU_MUL_K;
- break;
- case BPF_ALU|BPF_MUL|BPF_X:
- ftest->code = BPF_S_ALU_MUL_X;
- break;
- case BPF_ALU|BPF_DIV|BPF_X:
- ftest->code = BPF_S_ALU_DIV_X;
- break;
- case BPF_ALU|BPF_AND|BPF_K:
- ftest->code = BPF_S_ALU_AND_K;
- break;
- case BPF_ALU|BPF_AND|BPF_X:
- ftest->code = BPF_S_ALU_AND_X;
- break;
- case BPF_ALU|BPF_OR|BPF_K:
- ftest->code = BPF_S_ALU_OR_K;
- break;
- case BPF_ALU|BPF_OR|BPF_X:
- ftest->code = BPF_S_ALU_OR_X;
- break;
- case BPF_ALU|BPF_LSH|BPF_K:
- ftest->code = BPF_S_ALU_LSH_K;
- break;
- case BPF_ALU|BPF_LSH|BPF_X:
- ftest->code = BPF_S_ALU_LSH_X;
- break;
- case BPF_ALU|BPF_RSH|BPF_K:
- ftest->code = BPF_S_ALU_RSH_K;
- break;
- case BPF_ALU|BPF_RSH|BPF_X:
- ftest->code = BPF_S_ALU_RSH_X;
- break;
- case BPF_ALU|BPF_NEG:
- ftest->code = BPF_S_ALU_NEG;
- break;
- case BPF_LD|BPF_W|BPF_ABS:
- ftest->code = BPF_S_LD_W_ABS;
- break;
- case BPF_LD|BPF_H|BPF_ABS:
- ftest->code = BPF_S_LD_H_ABS;
- break;
- case BPF_LD|BPF_B|BPF_ABS:
- ftest->code = BPF_S_LD_B_ABS;
- break;
- case BPF_LD|BPF_W|BPF_LEN:
- ftest->code = BPF_S_LD_W_LEN;
- break;
- case BPF_LD|BPF_W|BPF_IND:
- ftest->code = BPF_S_LD_W_IND;
- break;
- case BPF_LD|BPF_H|BPF_IND:
- ftest->code = BPF_S_LD_H_IND;
- break;
- case BPF_LD|BPF_B|BPF_IND:
- ftest->code = BPF_S_LD_B_IND;
- break;
- case BPF_LD|BPF_IMM:
- ftest->code = BPF_S_LD_IMM;
- break;
- case BPF_LDX|BPF_W|BPF_LEN:
- ftest->code = BPF_S_LDX_W_LEN;
- break;
- case BPF_LDX|BPF_B|BPF_MSH:
- ftest->code = BPF_S_LDX_B_MSH;
- break;
- case BPF_LDX|BPF_IMM:
- ftest->code = BPF_S_LDX_IMM;
- break;
- case BPF_MISC|BPF_TAX:
- ftest->code = BPF_S_MISC_TAX;
- break;
- case BPF_MISC|BPF_TXA:
- ftest->code = BPF_S_MISC_TXA;
- break;
- case BPF_RET|BPF_K:
- ftest->code = BPF_S_RET_K;
- break;
- case BPF_RET|BPF_A:
- ftest->code = BPF_S_RET_A;
- break;
+ struct sock_filter *ftest = &filter[pc];
+ u16 code = ftest->code;
+ if (code >= ARRAY_SIZE(codes))
+ return -EINVAL;
+ code = codes[code];
+ if (!code)
+ return -EINVAL;
/* Some instructions need special checks */
-
+ switch (code) {
+ case BPF_S_ALU_DIV_K:
/* check for division by zero */
- case BPF_ALU|BPF_DIV|BPF_K:
if (ftest->k == 0)
return -EINVAL;
- ftest->code = BPF_S_ALU_DIV_K;
- break;
-
- /* check for invalid memory addresses */
- case BPF_LD|BPF_MEM:
- if (ftest->k >= BPF_MEMWORDS)
- return -EINVAL;
- ftest->code = BPF_S_LD_MEM;
- break;
- case BPF_LDX|BPF_MEM:
- if (ftest->k >= BPF_MEMWORDS)
- return -EINVAL;
- ftest->code = BPF_S_LDX_MEM;
- break;
- case BPF_ST:
- if (ftest->k >= BPF_MEMWORDS)
- return -EINVAL;
- ftest->code = BPF_S_ST;
+ ftest->k = reciprocal_value(ftest->k);
break;
- case BPF_STX:
+ case BPF_S_LD_MEM:
+ case BPF_S_LDX_MEM:
+ case BPF_S_ST:
+ case BPF_S_STX:
+ /* check for invalid memory addresses */
if (ftest->k >= BPF_MEMWORDS)
return -EINVAL;
- ftest->code = BPF_S_STX;
break;
-
- case BPF_JMP|BPF_JA:
+ case BPF_S_JMP_JA:
/*
* Note, the large ftest->k might cause loops.
* Compare this with conditional jumps below,
@@ -528,40 +584,7 @@ int sk_chk_filter(struct sock_filter *filter, int flen)
*/
if (ftest->k >= (unsigned)(flen-pc-1))
return -EINVAL;
- ftest->code = BPF_S_JMP_JA;
- break;
-
- case BPF_JMP|BPF_JEQ|BPF_K:
- ftest->code = BPF_S_JMP_JEQ_K;
- break;
- case BPF_JMP|BPF_JEQ|BPF_X:
- ftest->code = BPF_S_JMP_JEQ_X;
- break;
- case BPF_JMP|BPF_JGE|BPF_K:
- ftest->code = BPF_S_JMP_JGE_K;
- break;
- case BPF_JMP|BPF_JGE|BPF_X:
- ftest->code = BPF_S_JMP_JGE_X;
- break;
- case BPF_JMP|BPF_JGT|BPF_K:
- ftest->code = BPF_S_JMP_JGT_K;
- break;
- case BPF_JMP|BPF_JGT|BPF_X:
- ftest->code = BPF_S_JMP_JGT_X;
- break;
- case BPF_JMP|BPF_JSET|BPF_K:
- ftest->code = BPF_S_JMP_JSET_K;
- break;
- case BPF_JMP|BPF_JSET|BPF_X:
- ftest->code = BPF_S_JMP_JSET_X;
break;
-
- default:
- return -EINVAL;
- }
-
- /* for conditionals both must be safe */
- switch (ftest->code) {
case BPF_S_JMP_JEQ_K:
case BPF_S_JMP_JEQ_X:
case BPF_S_JMP_JGE_K:
@@ -570,21 +593,40 @@ int sk_chk_filter(struct sock_filter *filter, int flen)
case BPF_S_JMP_JGT_X:
case BPF_S_JMP_JSET_X:
case BPF_S_JMP_JSET_K:
+ /* for conditionals both must be safe */
if (pc + ftest->jt + 1 >= flen ||
pc + ftest->jf + 1 >= flen)
return -EINVAL;
+ break;
+ case BPF_S_LD_W_ABS:
+ case BPF_S_LD_H_ABS:
+ case BPF_S_LD_B_ABS:
+#define ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \
+ code = BPF_S_ANC_##CODE; \
+ break
+ switch (ftest->k) {
+ ANCILLARY(PROTOCOL);
+ ANCILLARY(PKTTYPE);
+ ANCILLARY(IFINDEX);
+ ANCILLARY(NLATTR);
+ ANCILLARY(NLATTR_NEST);
+ ANCILLARY(MARK);
+ ANCILLARY(QUEUE);
+ ANCILLARY(HATYPE);
+ ANCILLARY(RXHASH);
+ ANCILLARY(CPU);
+ }
}
+ ftest->code = code;
}
/* last instruction must be a RET code */
switch (filter[flen - 1].code) {
case BPF_S_RET_K:
case BPF_S_RET_A:
- return 0;
- break;
- default:
- return -EINVAL;
- }
+ return check_load_and_stores(filter, flen);
+ }
+ return -EINVAL;
}
EXPORT_SYMBOL(sk_chk_filter);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 8cc8f9a79db9..60a902913429 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -41,7 +41,6 @@
#define NEIGH_PRINTK(x...) printk(x)
#define NEIGH_NOPRINTK(x...) do { ; } while(0)
-#define NEIGH_PRINTK0 NEIGH_PRINTK
#define NEIGH_PRINTK1 NEIGH_NOPRINTK
#define NEIGH_PRINTK2 NEIGH_NOPRINTK
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 7f902cad10f8..e23c01be5a5b 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -706,7 +706,6 @@ static struct attribute *rx_queue_default_attrs[] = {
static void rx_queue_release(struct kobject *kobj)
{
struct netdev_rx_queue *queue = to_rx_queue(kobj);
- struct netdev_rx_queue *first = queue->first;
struct rps_map *map;
struct rps_dev_flow_table *flow_table;
@@ -723,10 +722,8 @@ static void rx_queue_release(struct kobject *kobj)
call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
}
- if (atomic_dec_and_test(&first->count))
- kfree(first);
- else
- memset(kobj, 0, sizeof(*kobj));
+ memset(kobj, 0, sizeof(*kobj));
+ dev_put(queue->dev);
}
static struct kobj_type rx_queue_ktype = {
@@ -738,7 +735,6 @@ static struct kobj_type rx_queue_ktype = {
static int rx_queue_add_kobject(struct net_device *net, int index)
{
struct netdev_rx_queue *queue = net->_rx + index;
- struct netdev_rx_queue *first = queue->first;
struct kobject *kobj = &queue->kobj;
int error = 0;
@@ -751,14 +747,16 @@ static int rx_queue_add_kobject(struct net_device *net, int index)
}
kobject_uevent(kobj, KOBJ_ADD);
- atomic_inc(&first->count);
+ dev_hold(queue->dev);
return error;
}
+#endif /* CONFIG_RPS */
int
net_rx_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
{
+#ifdef CONFIG_RPS
int i;
int error = 0;
@@ -774,23 +772,423 @@ net_rx_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
kobject_put(&net->_rx[i].kobj);
return error;
+#else
+ return 0;
+#endif
+}
+
+#ifdef CONFIG_XPS
+/*
+ * netdev_queue sysfs structures and functions.
+ */
+struct netdev_queue_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct netdev_queue *queue,
+ struct netdev_queue_attribute *attr, char *buf);
+ ssize_t (*store)(struct netdev_queue *queue,
+ struct netdev_queue_attribute *attr, const char *buf, size_t len);
+};
+#define to_netdev_queue_attr(_attr) container_of(_attr, \
+ struct netdev_queue_attribute, attr)
+
+#define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj)
+
+static ssize_t netdev_queue_attr_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
+ struct netdev_queue *queue = to_netdev_queue(kobj);
+
+ if (!attribute->show)
+ return -EIO;
+
+ return attribute->show(queue, attribute, buf);
}
-static int rx_queue_register_kobjects(struct net_device *net)
+static ssize_t netdev_queue_attr_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf, size_t count)
{
+ struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
+ struct netdev_queue *queue = to_netdev_queue(kobj);
+
+ if (!attribute->store)
+ return -EIO;
+
+ return attribute->store(queue, attribute, buf, count);
+}
+
+static const struct sysfs_ops netdev_queue_sysfs_ops = {
+ .show = netdev_queue_attr_show,
+ .store = netdev_queue_attr_store,
+};
+
+static inline unsigned int get_netdev_queue_index(struct netdev_queue *queue)
+{
+ struct net_device *dev = queue->dev;
+ int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++)
+ if (queue == &dev->_tx[i])
+ break;
+
+ BUG_ON(i >= dev->num_tx_queues);
+
+ return i;
+}
+
+
+static ssize_t show_xps_map(struct netdev_queue *queue,
+ struct netdev_queue_attribute *attribute, char *buf)
+{
+ struct net_device *dev = queue->dev;
+ struct xps_dev_maps *dev_maps;
+ cpumask_var_t mask;
+ unsigned long index;
+ size_t len = 0;
+ int i;
+
+ if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ index = get_netdev_queue_index(queue);
+
+ rcu_read_lock();
+ dev_maps = rcu_dereference(dev->xps_maps);
+ if (dev_maps) {
+ for_each_possible_cpu(i) {
+ struct xps_map *map =
+ rcu_dereference(dev_maps->cpu_map[i]);
+ if (map) {
+ int j;
+ for (j = 0; j < map->len; j++) {
+ if (map->queues[j] == index) {
+ cpumask_set_cpu(i, mask);
+ break;
+ }
+ }
+ }
+ }
+ }
+ rcu_read_unlock();
+
+ len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask);
+ if (PAGE_SIZE - len < 3) {
+ free_cpumask_var(mask);
+ return -EINVAL;
+ }
+
+ free_cpumask_var(mask);
+ len += sprintf(buf + len, "\n");
+ return len;
+}
+
+static void xps_map_release(struct rcu_head *rcu)
+{
+ struct xps_map *map = container_of(rcu, struct xps_map, rcu);
+
+ kfree(map);
+}
+
+static void xps_dev_maps_release(struct rcu_head *rcu)
+{
+ struct xps_dev_maps *dev_maps =
+ container_of(rcu, struct xps_dev_maps, rcu);
+
+ kfree(dev_maps);
+}
+
+static DEFINE_MUTEX(xps_map_mutex);
+#define xmap_dereference(P) \
+ rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
+
+static ssize_t store_xps_map(struct netdev_queue *queue,
+ struct netdev_queue_attribute *attribute,
+ const char *buf, size_t len)
+{
+ struct net_device *dev = queue->dev;
+ cpumask_var_t mask;
+ int err, i, cpu, pos, map_len, alloc_len, need_set;
+ unsigned long index;
+ struct xps_map *map, *new_map;
+ struct xps_dev_maps *dev_maps, *new_dev_maps;
+ int nonempty = 0;
+ int numa_node = -2;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ index = get_netdev_queue_index(queue);
+
+ err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
+ if (err) {
+ free_cpumask_var(mask);
+ return err;
+ }
+
+ new_dev_maps = kzalloc(max_t(unsigned,
+ XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES), GFP_KERNEL);
+ if (!new_dev_maps) {
+ free_cpumask_var(mask);
+ return -ENOMEM;
+ }
+
+ mutex_lock(&xps_map_mutex);
+
+ dev_maps = xmap_dereference(dev->xps_maps);
+
+ for_each_possible_cpu(cpu) {
+ map = dev_maps ?
+ xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
+ new_map = map;
+ if (map) {
+ for (pos = 0; pos < map->len; pos++)
+ if (map->queues[pos] == index)
+ break;
+ map_len = map->len;
+ alloc_len = map->alloc_len;
+ } else
+ pos = map_len = alloc_len = 0;
+
+ need_set = cpu_isset(cpu, *mask) && cpu_online(cpu);
+#ifdef CONFIG_NUMA
+ if (need_set) {
+ if (numa_node == -2)
+ numa_node = cpu_to_node(cpu);
+ else if (numa_node != cpu_to_node(cpu))
+ numa_node = -1;
+ }
+#endif
+ if (need_set && pos >= map_len) {
+ /* Need to add queue to this CPU's map */
+ if (map_len >= alloc_len) {
+ alloc_len = alloc_len ?
+ 2 * alloc_len : XPS_MIN_MAP_ALLOC;
+ new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len),
+ GFP_KERNEL,
+ cpu_to_node(cpu));
+ if (!new_map)
+ goto error;
+ new_map->alloc_len = alloc_len;
+ for (i = 0; i < map_len; i++)
+ new_map->queues[i] = map->queues[i];
+ new_map->len = map_len;
+ }
+ new_map->queues[new_map->len++] = index;
+ } else if (!need_set && pos < map_len) {
+ /* Need to remove queue from this CPU's map */
+ if (map_len > 1)
+ new_map->queues[pos] =
+ new_map->queues[--new_map->len];
+ else
+ new_map = NULL;
+ }
+ RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], new_map);
+ }
+
+ /* Cleanup old maps */
+ for_each_possible_cpu(cpu) {
+ map = dev_maps ?
+ xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
+ if (map && xmap_dereference(new_dev_maps->cpu_map[cpu]) != map)
+ call_rcu(&map->rcu, xps_map_release);
+ if (new_dev_maps->cpu_map[cpu])
+ nonempty = 1;
+ }
+
+ if (nonempty)
+ rcu_assign_pointer(dev->xps_maps, new_dev_maps);
+ else {
+ kfree(new_dev_maps);
+ rcu_assign_pointer(dev->xps_maps, NULL);
+ }
+
+ if (dev_maps)
+ call_rcu(&dev_maps->rcu, xps_dev_maps_release);
+
+ netdev_queue_numa_node_write(queue, (numa_node >= 0) ? numa_node :
+ NUMA_NO_NODE);
+
+ mutex_unlock(&xps_map_mutex);
+
+ free_cpumask_var(mask);
+ return len;
+
+error:
+ mutex_unlock(&xps_map_mutex);
+
+ if (new_dev_maps)
+ for_each_possible_cpu(i)
+ kfree(rcu_dereference_protected(
+ new_dev_maps->cpu_map[i],
+ 1));
+ kfree(new_dev_maps);
+ free_cpumask_var(mask);
+ return -ENOMEM;
+}
+
+static struct netdev_queue_attribute xps_cpus_attribute =
+ __ATTR(xps_cpus, S_IRUGO | S_IWUSR, show_xps_map, store_xps_map);
+
+static struct attribute *netdev_queue_default_attrs[] = {
+ &xps_cpus_attribute.attr,
+ NULL
+};
+
+static void netdev_queue_release(struct kobject *kobj)
+{
+ struct netdev_queue *queue = to_netdev_queue(kobj);
+ struct net_device *dev = queue->dev;
+ struct xps_dev_maps *dev_maps;
+ struct xps_map *map;
+ unsigned long index;
+ int i, pos, nonempty = 0;
+
+ index = get_netdev_queue_index(queue);
+
+ mutex_lock(&xps_map_mutex);
+ dev_maps = xmap_dereference(dev->xps_maps);
+
+ if (dev_maps) {
+ for_each_possible_cpu(i) {
+ map = xmap_dereference(dev_maps->cpu_map[i]);
+ if (!map)
+ continue;
+
+ for (pos = 0; pos < map->len; pos++)
+ if (map->queues[pos] == index)
+ break;
+
+ if (pos < map->len) {
+ if (map->len > 1)
+ map->queues[pos] =
+ map->queues[--map->len];
+ else {
+ RCU_INIT_POINTER(dev_maps->cpu_map[i],
+ NULL);
+ call_rcu(&map->rcu, xps_map_release);
+ map = NULL;
+ }
+ }
+ if (map)
+ nonempty = 1;
+ }
+
+ if (!nonempty) {
+ RCU_INIT_POINTER(dev->xps_maps, NULL);
+ call_rcu(&dev_maps->rcu, xps_dev_maps_release);
+ }
+ }
+
+ mutex_unlock(&xps_map_mutex);
+
+ memset(kobj, 0, sizeof(*kobj));
+ dev_put(queue->dev);
+}
+
+static struct kobj_type netdev_queue_ktype = {
+ .sysfs_ops = &netdev_queue_sysfs_ops,
+ .release = netdev_queue_release,
+ .default_attrs = netdev_queue_default_attrs,
+};
+
+static int netdev_queue_add_kobject(struct net_device *net, int index)
+{
+ struct netdev_queue *queue = net->_tx + index;
+ struct kobject *kobj = &queue->kobj;
+ int error = 0;
+
+ kobj->kset = net->queues_kset;
+ error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
+ "tx-%u", index);
+ if (error) {
+ kobject_put(kobj);
+ return error;
+ }
+
+ kobject_uevent(kobj, KOBJ_ADD);
+ dev_hold(queue->dev);
+
+ return error;
+}
+#endif /* CONFIG_XPS */
+
+int
+netdev_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
+{
+#ifdef CONFIG_XPS
+ int i;
+ int error = 0;
+
+ for (i = old_num; i < new_num; i++) {
+ error = netdev_queue_add_kobject(net, i);
+ if (error) {
+ new_num = old_num;
+ break;
+ }
+ }
+
+ while (--i >= new_num)
+ kobject_put(&net->_tx[i].kobj);
+
+ return error;
+#else
+ return 0;
+#endif
+}
+
+static int register_queue_kobjects(struct net_device *net)
+{
+ int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
+
+#if defined(CONFIG_RPS) || defined(CONFIG_XPS)
net->queues_kset = kset_create_and_add("queues",
NULL, &net->dev.kobj);
if (!net->queues_kset)
return -ENOMEM;
- return net_rx_queue_update_kobjects(net, 0, net->real_num_rx_queues);
+#endif
+
+#ifdef CONFIG_RPS
+ real_rx = net->real_num_rx_queues;
+#endif
+ real_tx = net->real_num_tx_queues;
+
+ error = net_rx_queue_update_kobjects(net, 0, real_rx);
+ if (error)
+ goto error;
+ rxq = real_rx;
+
+ error = netdev_queue_update_kobjects(net, 0, real_tx);
+ if (error)
+ goto error;
+ txq = real_tx;
+
+ return 0;
+
+error:
+ netdev_queue_update_kobjects(net, txq, 0);
+ net_rx_queue_update_kobjects(net, rxq, 0);
+ return error;
}
-static void rx_queue_remove_kobjects(struct net_device *net)
+static void remove_queue_kobjects(struct net_device *net)
{
- net_rx_queue_update_kobjects(net, net->real_num_rx_queues, 0);
+ int real_rx = 0, real_tx = 0;
+
+#ifdef CONFIG_RPS
+ real_rx = net->real_num_rx_queues;
+#endif
+ real_tx = net->real_num_tx_queues;
+
+ net_rx_queue_update_kobjects(net, real_rx, 0);
+ netdev_queue_update_kobjects(net, real_tx, 0);
+#if defined(CONFIG_RPS) || defined(CONFIG_XPS)
kset_unregister(net->queues_kset);
+#endif
}
-#endif /* CONFIG_RPS */
static const void *net_current_ns(void)
{
@@ -889,9 +1287,7 @@ void netdev_unregister_kobject(struct net_device * net)
kobject_get(&dev->kobj);
-#ifdef CONFIG_RPS
- rx_queue_remove_kobjects(net);
-#endif
+ remove_queue_kobjects(net);
device_del(dev);
}
@@ -930,13 +1326,11 @@ int netdev_register_kobject(struct net_device *net)
if (error)
return error;
-#ifdef CONFIG_RPS
- error = rx_queue_register_kobjects(net);
+ error = register_queue_kobjects(net);
if (error) {
device_del(dev);
return error;
}
-#endif
return error;
}
diff --git a/net/core/net-sysfs.h b/net/core/net-sysfs.h
index 778e1571548d..bd7751ec1c4d 100644
--- a/net/core/net-sysfs.h
+++ b/net/core/net-sysfs.h
@@ -4,8 +4,8 @@
int netdev_kobject_init(void);
int netdev_register_kobject(struct net_device *);
void netdev_unregister_kobject(struct net_device *);
-#ifdef CONFIG_RPS
int net_rx_queue_update_kobjects(struct net_device *, int old_num, int new_num);
-#endif
+int netdev_queue_update_kobjects(struct net_device *net,
+ int old_num, int new_num);
#endif
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 4e98ffac3af0..72d9b50109fc 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -35,7 +35,6 @@
#define MAX_UDP_CHUNK 1460
#define MAX_SKBS 32
-#define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
static struct sk_buff_head skb_pool;
@@ -76,8 +75,7 @@ static void queue_process(struct work_struct *work)
local_irq_save(flags);
__netif_tx_lock(txq, smp_processor_id());
- if (netif_tx_queue_stopped(txq) ||
- netif_tx_queue_frozen(txq) ||
+ if (netif_tx_queue_frozen_or_stopped(txq) ||
ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
skb_queue_head(&npinfo->txq, skb);
__netif_tx_unlock(txq);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 33bc3823ac6f..a9e7fc4c461f 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -378,6 +378,7 @@ struct pktgen_dev {
u16 queue_map_min;
u16 queue_map_max;
+ __u32 skb_priority; /* skb priority field */
int node; /* Memory node */
#ifdef CONFIG_XFRM
@@ -394,6 +395,8 @@ struct pktgen_hdr {
__be32 tv_usec;
};
+static bool pktgen_exiting __read_mostly;
+
struct pktgen_thread {
spinlock_t if_lock; /* for list of devices */
struct list_head if_list; /* All device here */
@@ -547,6 +550,10 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
pkt_dev->queue_map_min,
pkt_dev->queue_map_max);
+ if (pkt_dev->skb_priority)
+ seq_printf(seq, " skb_priority: %u\n",
+ pkt_dev->skb_priority);
+
if (pkt_dev->flags & F_IPV6) {
char b1[128], b2[128], b3[128];
fmt_ip6(b1, pkt_dev->in6_saddr.s6_addr);
@@ -1711,6 +1718,18 @@ static ssize_t pktgen_if_write(struct file *file,
return count;
}
+ if (!strcmp(name, "skb_priority")) {
+ len = num_arg(&user_buffer[i], 9, &value);
+ if (len < 0)
+ return len;
+
+ i += len;
+ pkt_dev->skb_priority = value;
+ sprintf(pg_result, "OK: skb_priority=%i",
+ pkt_dev->skb_priority);
+ return count;
+ }
+
sprintf(pkt_dev->result, "No such parameter \"%s\"", name);
return -EINVAL;
}
@@ -2641,6 +2660,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
sprintf(pkt_dev->result, "No memory");
return NULL;
}
+ prefetchw(skb->data);
skb_reserve(skb, datalen);
@@ -2671,6 +2691,8 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
skb->transport_header = skb->network_header + sizeof(struct iphdr);
skb_put(skb, sizeof(struct iphdr) + sizeof(struct udphdr));
skb_set_queue_mapping(skb, queue_map);
+ skb->priority = pkt_dev->skb_priority;
+
iph = ip_hdr(skb);
udph = udp_hdr(skb);
@@ -2986,6 +3008,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
sprintf(pkt_dev->result, "No memory");
return NULL;
}
+ prefetchw(skb->data);
skb_reserve(skb, 16);
@@ -3016,6 +3039,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
skb->transport_header = skb->network_header + sizeof(struct ipv6hdr);
skb_put(skb, sizeof(struct ipv6hdr) + sizeof(struct udphdr));
skb_set_queue_mapping(skb, queue_map);
+ skb->priority = pkt_dev->skb_priority;
iph = ipv6_hdr(skb);
udph = udp_hdr(skb);
@@ -3431,11 +3455,6 @@ static void pktgen_rem_thread(struct pktgen_thread *t)
remove_proc_entry(t->tsk->comm, pg_proc_dir);
- mutex_lock(&pktgen_thread_lock);
-
- list_del(&t->th_list);
-
- mutex_unlock(&pktgen_thread_lock);
}
static void pktgen_resched(struct pktgen_dev *pkt_dev)
@@ -3510,7 +3529,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
__netif_tx_lock_bh(txq);
- if (unlikely(netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq))) {
+ if (unlikely(netif_tx_queue_frozen_or_stopped(txq))) {
ret = NETDEV_TX_BUSY;
pkt_dev->last_ok = 0;
goto unlock;
@@ -3534,8 +3553,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
break;
default: /* Drivers are not supposed to return other values! */
if (net_ratelimit())
- pr_info("pktgen: %s xmit error: %d\n",
- pkt_dev->odevname, ret);
+ pr_info("%s xmit error: %d\n", pkt_dev->odevname, ret);
pkt_dev->errors++;
/* fallthru */
case NETDEV_TX_LOCKED:
@@ -3582,6 +3600,8 @@ static int pktgen_thread_worker(void *arg)
pkt_dev = next_to_run(t);
if (unlikely(!pkt_dev && t->control == 0)) {
+ if (pktgen_exiting)
+ break;
wait_event_interruptible_timeout(t->queue,
t->control != 0,
HZ/10);
@@ -3634,6 +3654,13 @@ static int pktgen_thread_worker(void *arg)
pr_debug("%s removing thread\n", t->tsk->comm);
pktgen_rem_thread(t);
+ /* Wait for kthread_stop */
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+ }
+ __set_current_state(TASK_RUNNING);
+
return 0;
}
@@ -3908,6 +3935,7 @@ static void __exit pg_cleanup(void)
struct list_head *q, *n;
/* Stop all interfaces & threads */
+ pktgen_exiting = true;
list_for_each_safe(q, n, &pktgen_threads) {
t = list_entry(q, struct pktgen_thread, th_list);
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index fceeb37d7161..182236b2510a 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -33,6 +33,7 @@
* Note : Dont forget somaxconn that may limit backlog too.
*/
int sysctl_max_syn_backlog = 256;
+EXPORT_SYMBOL(sysctl_max_syn_backlog);
int reqsk_queue_alloc(struct request_sock_queue *queue,
unsigned int nr_table_entries)
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 841c287ef40a..750db57f3bb3 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -362,6 +362,95 @@ static size_t rtnl_link_get_size(const struct net_device *dev)
return size;
}
+static LIST_HEAD(rtnl_af_ops);
+
+static const struct rtnl_af_ops *rtnl_af_lookup(const int family)
+{
+ const struct rtnl_af_ops *ops;
+
+ list_for_each_entry(ops, &rtnl_af_ops, list) {
+ if (ops->family == family)
+ return ops;
+ }
+
+ return NULL;
+}
+
+/**
+ * __rtnl_af_register - Register rtnl_af_ops with rtnetlink.
+ * @ops: struct rtnl_af_ops * to register
+ *
+ * The caller must hold the rtnl_mutex.
+ *
+ * Returns 0 on success or a negative error code.
+ */
+int __rtnl_af_register(struct rtnl_af_ops *ops)
+{
+ list_add_tail(&ops->list, &rtnl_af_ops);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__rtnl_af_register);
+
+/**
+ * rtnl_af_register - Register rtnl_af_ops with rtnetlink.
+ * @ops: struct rtnl_af_ops * to register
+ *
+ * Returns 0 on success or a negative error code.
+ */
+int rtnl_af_register(struct rtnl_af_ops *ops)
+{
+ int err;
+
+ rtnl_lock();
+ err = __rtnl_af_register(ops);
+ rtnl_unlock();
+ return err;
+}
+EXPORT_SYMBOL_GPL(rtnl_af_register);
+
+/**
+ * __rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
+ * @ops: struct rtnl_af_ops * to unregister
+ *
+ * The caller must hold the rtnl_mutex.
+ */
+void __rtnl_af_unregister(struct rtnl_af_ops *ops)
+{
+ list_del(&ops->list);
+}
+EXPORT_SYMBOL_GPL(__rtnl_af_unregister);
+
+/**
+ * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
+ * @ops: struct rtnl_af_ops * to unregister
+ */
+void rtnl_af_unregister(struct rtnl_af_ops *ops)
+{
+ rtnl_lock();
+ __rtnl_af_unregister(ops);
+ rtnl_unlock();
+}
+EXPORT_SYMBOL_GPL(rtnl_af_unregister);
+
+static size_t rtnl_link_get_af_size(const struct net_device *dev)
+{
+ struct rtnl_af_ops *af_ops;
+ size_t size;
+
+ /* IFLA_AF_SPEC */
+ size = nla_total_size(sizeof(struct nlattr));
+
+ list_for_each_entry(af_ops, &rtnl_af_ops, list) {
+ if (af_ops->get_link_af_size) {
+ /* AF_* + nested data */
+ size += nla_total_size(sizeof(struct nlattr)) +
+ af_ops->get_link_af_size(dev);
+ }
+ }
+
+ return size;
+}
+
static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
{
const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
@@ -671,7 +760,8 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev)
+ nla_total_size(4) /* IFLA_NUM_VF */
+ rtnl_vfinfo_size(dev) /* IFLA_VFINFO_LIST */
+ rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
- + rtnl_link_get_size(dev); /* IFLA_LINKINFO */
+ + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
+ + rtnl_link_get_af_size(dev); /* IFLA_AF_SPEC */
}
static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
@@ -757,7 +847,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
struct nlmsghdr *nlh;
struct rtnl_link_stats64 temp;
const struct rtnl_link_stats64 *stats;
- struct nlattr *attr;
+ struct nlattr *attr, *af_spec;
+ struct rtnl_af_ops *af_ops;
nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
if (nlh == NULL)
@@ -866,6 +957,36 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
goto nla_put_failure;
}
+ if (!(af_spec = nla_nest_start(skb, IFLA_AF_SPEC)))
+ goto nla_put_failure;
+
+ list_for_each_entry(af_ops, &rtnl_af_ops, list) {
+ if (af_ops->fill_link_af) {
+ struct nlattr *af;
+ int err;
+
+ if (!(af = nla_nest_start(skb, af_ops->family)))
+ goto nla_put_failure;
+
+ err = af_ops->fill_link_af(skb, dev);
+
+ /*
+ * Caller may return ENODATA to indicate that there
+ * was no data to be dumped. This is not an error, it
+ * means we should trim the attribute header and
+ * continue.
+ */
+ if (err == -ENODATA)
+ nla_nest_cancel(skb, af);
+ else if (err < 0)
+ goto nla_put_failure;
+
+ nla_nest_end(skb, af);
+ }
+ }
+
+ nla_nest_end(skb, af_spec);
+
return nlmsg_end(skb, nlh);
nla_put_failure:
@@ -924,6 +1045,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_VFINFO_LIST] = {. type = NLA_NESTED },
[IFLA_VF_PORTS] = { .type = NLA_NESTED },
[IFLA_PORT_SELF] = { .type = NLA_NESTED },
+ [IFLA_AF_SPEC] = { .type = NLA_NESTED },
};
EXPORT_SYMBOL(ifla_policy);
@@ -985,6 +1107,28 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
return -EINVAL;
}
+ if (tb[IFLA_AF_SPEC]) {
+ struct nlattr *af;
+ int rem, err;
+
+ nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
+ const struct rtnl_af_ops *af_ops;
+
+ if (!(af_ops = rtnl_af_lookup(nla_type(af))))
+ return -EAFNOSUPPORT;
+
+ if (!af_ops->set_link_af)
+ return -EOPNOTSUPP;
+
+ if (af_ops->validate_link_af) {
+ err = af_ops->validate_link_af(dev,
+ tb[IFLA_AF_SPEC]);
+ if (err < 0)
+ return err;
+ }
+ }
+ }
+
return 0;
}
@@ -1225,6 +1369,24 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
goto errout;
modified = 1;
}
+
+ if (tb[IFLA_AF_SPEC]) {
+ struct nlattr *af;
+ int rem;
+
+ nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
+ const struct rtnl_af_ops *af_ops;
+
+ if (!(af_ops = rtnl_af_lookup(nla_type(af))))
+ BUG();
+
+ err = af_ops->set_link_af(dev, af);
+ if (err < 0)
+ goto errout;
+
+ modified = 1;
+ }
+ }
err = 0;
errout:
diff --git a/net/core/scm.c b/net/core/scm.c
index 413cab89017d..bbe454450801 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -79,10 +79,11 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
return -ENOMEM;
*fplp = fpl;
fpl->count = 0;
+ fpl->max = SCM_MAX_FD;
}
fpp = &fpl->fp[fpl->count];
- if (fpl->count + num > SCM_MAX_FD)
+ if (fpl->count + num > fpl->max)
return -EINVAL;
/*
@@ -331,11 +332,12 @@ struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl)
if (!fpl)
return NULL;
- new_fpl = kmalloc(sizeof(*fpl), GFP_KERNEL);
+ new_fpl = kmemdup(fpl, offsetof(struct scm_fp_list, fp[fpl->count]),
+ GFP_KERNEL);
if (new_fpl) {
- for (i=fpl->count-1; i>=0; i--)
+ for (i = 0; i < fpl->count; i++)
get_file(fpl->fp[i]);
- memcpy(new_fpl, fpl, sizeof(*fpl));
+ new_fpl->max = new_fpl->count;
}
return new_fpl;
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 104f8444754a..19d6c21220fd 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -778,6 +778,28 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
size = SKB_DATA_ALIGN(size);
+ /* Check if we can avoid taking references on fragments if we own
+ * the last reference on skb->head. (see skb_release_data())
+ */
+ if (!skb->cloned)
+ fastpath = true;
+ else {
+ int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1;
+
+ fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta;
+ }
+
+ if (fastpath &&
+ size + sizeof(struct skb_shared_info) <= ksize(skb->head)) {
+ memmove(skb->head + size, skb_shinfo(skb),
+ offsetof(struct skb_shared_info,
+ frags[skb_shinfo(skb)->nr_frags]));
+ memmove(skb->head + nhead, skb->head,
+ skb_tail_pointer(skb) - skb->head);
+ off = nhead;
+ goto adjust_others;
+ }
+
data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
if (!data)
goto nodata;
@@ -791,17 +813,6 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
skb_shinfo(skb),
offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
- /* Check if we can avoid taking references on fragments if we own
- * the last reference on skb->head. (see skb_release_data())
- */
- if (!skb->cloned)
- fastpath = true;
- else {
- int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1;
-
- fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta;
- }
-
if (fastpath) {
kfree(skb->head);
} else {
@@ -816,6 +827,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
off = (data + nhead) - skb->head;
skb->head = data;
+adjust_others:
skb->data += off;
#ifdef NET_SKBUFF_DATA_USES_OFFSET
skb->end = size;
@@ -1812,7 +1824,7 @@ void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
long csstart;
if (skb->ip_summed == CHECKSUM_PARTIAL)
- csstart = skb->csum_start - skb_headroom(skb);
+ csstart = skb_checksum_start_offset(skb);
else
csstart = skb_headlen(skb);
diff --git a/net/core/sock.c b/net/core/sock.c
index e5af8d5d5b50..a658aeb6d554 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -992,17 +992,18 @@ static inline void sock_lock_init(struct sock *sk)
/*
* Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
* even temporarly, because of RCU lookups. sk_node should also be left as is.
+ * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
*/
static void sock_copy(struct sock *nsk, const struct sock *osk)
{
#ifdef CONFIG_SECURITY_NETWORK
void *sptr = nsk->sk_security;
#endif
- BUILD_BUG_ON(offsetof(struct sock, sk_copy_start) !=
- sizeof(osk->sk_node) + sizeof(osk->sk_refcnt) +
- sizeof(osk->sk_tx_queue_mapping));
- memcpy(&nsk->sk_copy_start, &osk->sk_copy_start,
- osk->sk_prot->obj_size - offsetof(struct sock, sk_copy_start));
+ memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
+
+ memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
+ osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
+
#ifdef CONFIG_SECURITY_NETWORK
nsk->sk_security = sptr;
security_sk_clone(osk, nsk);
@@ -1907,7 +1908,7 @@ static void sock_def_readable(struct sock *sk, int len)
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
if (wq_has_sleeper(wq))
- wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
+ wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
POLLRDNORM | POLLRDBAND);
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
rcu_read_unlock();
diff --git a/net/core/timestamping.c b/net/core/timestamping.c
index c19bb4ee405e..7e7ca375d431 100644
--- a/net/core/timestamping.c
+++ b/net/core/timestamping.c
@@ -26,12 +26,12 @@ static struct sock_filter ptp_filter[] = {
PTP_FILTER
};
-static unsigned int classify(struct sk_buff *skb)
+static unsigned int classify(const struct sk_buff *skb)
{
if (likely(skb->dev &&
skb->dev->phydev &&
skb->dev->phydev->drv))
- return sk_run_filter(skb, ptp_filter, ARRAY_SIZE(ptp_filter));
+ return sk_run_filter(skb, ptp_filter);
else
return PTP_CLASS_NONE;
}