diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-08-13 10:46:39 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-08-13 10:46:39 -0700 |
| commit | 26b552e0a85ba7e74d384a9624d83118d38071f7 (patch) | |
| tree | fc3202ffaf08f65d3528dad9b6d13ada7b20e427 /drivers/net/xen-netback/netback.c | |
| parent | 2331d30dc8c121c0f350a27fda9e9154b8d3c178 (diff) | |
| parent | e6d006938c9bda7ffd22af9d3e1257fd75941fb7 (diff) | |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller:
1) Workaround hw bug when acquiring PCI bos ownership of iwlwifi
devices, from Emmanuel Grumbach.
2) Falling back to vmalloc in conntrack should not emit a warning, from
Pablo Neira Ayuso.
3) Fix NULL deref when rtlwifi driver is used as an AP, from Luis
Felipe Dominguez Vega.
4) Rocker doesn't free netdev on device removal, from Ido Schimmel.
5) UDP multicast early sock demux has route handling races, from Eric
Dumazet.
6) Fix L4 checksum handling in openvswitch, from Glenn Griffin.
7) Fix use-after-free in skb_set_peeked, from Herbert Xu.
8) Don't advertize NETIF_F_FRAGLIST in virtio_net driver, this can lead
to fraglists longer than the driver can support. From Jason Wang.
9) Fix mlx5 on non-4k-pagesize systems, from Carol L Soto.
10) Fix interrupt storm in bna driver, from Ivan Vecera.
11) Don't propagate -EBUSY from netlink_insert(), from Daniel Borkmann.
12) Fix inet request sock leak, from Eric Dumazet.
13) Fix TX interrupt masking and marking in TX descriptors of fs_enet
driver, from LEROY Christophe.
14) Get rid of rule optimizer in gianfar driver, it's buggy and unlikely
to get fixed any time soon. From Jakub Kicinski
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (61 commits)
cosa: missing error code on failure in probe()
gianfar: remove faulty filer optimizer
gianfar: correct list membership accounting
gianfar: correct filer table writing
bonding: Gratuitous ARP gets dropped when first slave added
net: dsa: Do not override PHY interface if already configured
net: fs_enet: mask interrupts for TX partial frames.
net: fs_enet: explicitly remove I flag on TX partial frames
inet: fix possible request socket leak
inet: fix races with reqsk timers
mkiss: Fix error handling in mkiss_open()
bnx2x: Free NVRAM lock at end of each page
bnx2x: Prevent null pointer dereference on SKB release
cxgb4: missing curly braces in t4_setup_debugfs()
net-timestamp: Update skb_complete_tx_timestamp comment
ipv6: don't reject link-local nexthop on other interface
netlink: make sure -EBUSY won't escape from netlink_insert
bna: fix interrupts storm caused by erroneous packets
net: mvpp2: replace TX coalescing interrupts with hrtimer
net: mvpp2: enable proper per-CPU TX buffers unmapping
...
Diffstat (limited to 'drivers/net/xen-netback/netback.c')
| -rw-r--r-- | drivers/net/xen-netback/netback.c | 62 |
1 files changed, 33 insertions, 29 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 7d50711476fe..3f44b522b831 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -810,23 +810,17 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size) static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue, struct sk_buff *skb, struct xen_netif_tx_request *txp, - struct gnttab_map_grant_ref *gop) + struct gnttab_map_grant_ref *gop, + unsigned int frag_overflow, + struct sk_buff *nskb) { struct skb_shared_info *shinfo = skb_shinfo(skb); skb_frag_t *frags = shinfo->frags; u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx; int start; pending_ring_idx_t index; - unsigned int nr_slots, frag_overflow = 0; + unsigned int nr_slots; - /* At this point shinfo->nr_frags is in fact the number of - * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX. - */ - if (shinfo->nr_frags > MAX_SKB_FRAGS) { - frag_overflow = shinfo->nr_frags - MAX_SKB_FRAGS; - BUG_ON(frag_overflow > MAX_SKB_FRAGS); - shinfo->nr_frags = MAX_SKB_FRAGS; - } nr_slots = shinfo->nr_frags; /* Skip first skb fragment if it is on same page as header fragment. */ @@ -841,13 +835,6 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que } if (frag_overflow) { - struct sk_buff *nskb = xenvif_alloc_skb(0); - if (unlikely(nskb == NULL)) { - if (net_ratelimit()) - netdev_err(queue->vif->dev, - "Can't allocate the frag_list skb.\n"); - return NULL; - } shinfo = skb_shinfo(nskb); frags = shinfo->frags; @@ -1175,9 +1162,10 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, unsigned *copy_ops, unsigned *map_ops) { - struct gnttab_map_grant_ref *gop = queue->tx_map_ops, *request_gop; - struct sk_buff *skb; + struct gnttab_map_grant_ref *gop = queue->tx_map_ops; + struct sk_buff *skb, *nskb; int ret; + unsigned int frag_overflow; while (skb_queue_len(&queue->tx_queue) < budget) { struct xen_netif_tx_request txreq; @@ -1265,6 +1253,29 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, break; } + skb_shinfo(skb)->nr_frags = ret; + if (data_len < txreq.size) + skb_shinfo(skb)->nr_frags++; + /* At this point shinfo->nr_frags is in fact the number of + * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX. + */ + frag_overflow = 0; + nskb = NULL; + if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) { + frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS; + BUG_ON(frag_overflow > MAX_SKB_FRAGS); + skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS; + nskb = xenvif_alloc_skb(0); + if (unlikely(nskb == NULL)) { + kfree_skb(skb); + xenvif_tx_err(queue, &txreq, idx); + if (net_ratelimit()) + netdev_err(queue->vif->dev, + "Can't allocate the frag_list skb.\n"); + break; + } + } + if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { struct xen_netif_extra_info *gso; gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; @@ -1272,6 +1283,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, if (xenvif_set_skb_gso(queue->vif, skb, gso)) { /* Failure in xenvif_set_skb_gso is fatal. */ kfree_skb(skb); + kfree_skb(nskb); break; } } @@ -1294,9 +1306,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, (*copy_ops)++; - skb_shinfo(skb)->nr_frags = ret; if (data_len < txreq.size) { - skb_shinfo(skb)->nr_frags++; frag_set_pending_idx(&skb_shinfo(skb)->frags[0], pending_idx); xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop); @@ -1310,13 +1320,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, queue->pending_cons++; - request_gop = xenvif_get_requests(queue, skb, txfrags, gop); - if (request_gop == NULL) { - kfree_skb(skb); - xenvif_tx_err(queue, &txreq, idx); - break; - } - gop = request_gop; + gop = xenvif_get_requests(queue, skb, txfrags, gop, + frag_overflow, nskb); __skb_queue_tail(&queue->tx_queue, skb); @@ -1536,7 +1541,6 @@ void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success) smp_wmb(); queue->dealloc_prod++; } while (ubuf); - wake_up(&queue->dealloc_wq); spin_unlock_irqrestore(&queue->callback_lock, flags); if (likely(zerocopy_success)) |
