summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael S. Tsirkin <mst@redhat.com>2013-12-26 15:32:51 +0200
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2014-01-15 15:31:37 -0800
commitaa016e2d4009621c8d48fecb20ef46a005c75aa8 (patch)
tree2c4c639740ec1e4b7d7d39abdc72f6ef26ddb328
parent20d93869b19d03748b3f86d169ad830e2fbb7923 (diff)
virtio-net: make all RX paths handle errors consistently
receive mergeable now handles errors internally. Do same for big and small packet paths, otherwise the logic is too hard to follow. Cc: Jason Wang <jasowang@redhat.com> Cc: David S. Miller <davem@davemloft.net> Acked-by: Michael Dalton <mwdalton@google.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> (cherry picked from commit f121159d72091f25afb22007c833e60a6845e912) Acked-by: Jason Wang <jasowang@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--drivers/net/virtio_net.c56
1 files changed, 36 insertions, 20 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 52d8936bdad1..21f2b4a0f193 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -297,6 +297,34 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
return skb;
}
+static struct sk_buff *receive_small(void *buf, unsigned int len)
+{
+ struct sk_buff * skb = buf;
+
+ len -= sizeof(struct virtio_net_hdr);
+ skb_trim(skb, len);
+
+ return skb;
+}
+
+static struct sk_buff *receive_big(struct net_device *dev,
+ struct receive_queue *rq,
+ void *buf)
+{
+ struct page *page = buf;
+ struct sk_buff *skb = page_to_skb(rq, page, 0);
+
+ if (unlikely(!skb))
+ goto err;
+
+ return skb;
+
+err:
+ dev->stats.rx_dropped++;
+ give_pages(rq, page);
+ return NULL;
+}
+
static struct sk_buff *receive_mergeable(struct net_device *dev,
struct receive_queue *rq,
void *buf,
@@ -360,7 +388,6 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
struct net_device *dev = vi->dev;
struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
struct sk_buff *skb;
- struct page *page;
struct skb_vnet_hdr *hdr;
if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
@@ -372,26 +399,15 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
dev_kfree_skb(buf);
return;
}
+ if (vi->mergeable_rx_bufs)
+ skb = receive_mergeable(dev, rq, buf, len);
+ else if (vi->big_packets)
+ skb = receive_big(dev, rq, buf);
+ else
+ skb = receive_small(buf, len);
- if (!vi->mergeable_rx_bufs && !vi->big_packets) {
- skb = buf;
- len -= sizeof(struct virtio_net_hdr);
- skb_trim(skb, len);
- } else {
- page = buf;
- if (vi->mergeable_rx_bufs) {
- skb = receive_mergeable(dev, rq, page, len);
- if (unlikely(!skb))
- return;
- } else {
- skb = page_to_skb(rq, page, len);
- if (unlikely(!skb)) {
- dev->stats.rx_dropped++;
- give_pages(rq, page);
- return;
- }
- }
- }
+ if (unlikely(!skb))
+ return;
hdr = skb_vnet_hdr(skb);