diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-11 12:20:31 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-11 12:20:31 -0800 |
commit | 6b9e2cea428cf7af93a84bcb865e478d8bf1c165 (patch) | |
tree | 11be387e37129fce0c4c111803df1a2e56637b60 /drivers/net | |
parent | 14ba9a2e4bacc6f5a0dbe0de5390daedd544508f (diff) | |
parent | f01a2a811ae04124fc9382925038fcbbd2f0b7c8 (diff) |
Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
Pull virtio updates from Michael Tsirkin:
"virtio: virtio 1.0 support, misc patches
This adds a lot of infrastructure for virtio 1.0 support. Notable
missing pieces: virtio pci, virtio balloon (needs spec extension),
vhost scsi.
Plus, there are some minor fixes in a couple of places.
Note: some net drivers are affected by these patches. David said he's
fine with merging these patches through my tree.
Rusty's on vacation, he acked using my tree for these, too"
* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: (70 commits)
virtio_ccw: finalize_features error handling
virtio_ccw: future-proof finalize_features
virtio_pci: rename virtio_pci -> virtio_pci_common
virtio_pci: update file descriptions and copyright
virtio_pci: split out legacy device support
virtio_pci: setup config vector indirectly
virtio_pci: setup vqs indirectly
virtio_pci: delete vqs indirectly
virtio_pci: use priv for vq notification
virtio_pci: free up vq->priv
virtio_pci: fix coding style for structs
virtio_pci: add isr field
virtio: drop legacy_only driver flag
virtio_balloon: drop legacy_only driver flag
virtio_ccw: rev 1 devices set VIRTIO_F_VERSION_1
virtio: allow finalize_features to fail
virtio_ccw: legacy: don't negotiate rev 1/features
virtio: add API to detect legacy devices
virtio_console: fix sparse warnings
vhost: remove unnecessary forward declarations in vhost.h
...
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/macvtap.c | 68 | ||||
-rw-r--r-- | drivers/net/tun.c | 168 | ||||
-rw-r--r-- | drivers/net/virtio_net.c | 161 |
3 files changed, 196 insertions, 201 deletions
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 880cc090dc44..af90ab5e5768 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -45,6 +45,18 @@ struct macvtap_queue { struct list_head next; }; +#define MACVTAP_FEATURES (IFF_VNET_HDR | IFF_VNET_LE | IFF_MULTI_QUEUE) + +static inline u16 macvtap16_to_cpu(struct macvtap_queue *q, __virtio16 val) +{ + return __virtio16_to_cpu(q->flags & IFF_VNET_LE, val); +} + +static inline __virtio16 cpu_to_macvtap16(struct macvtap_queue *q, u16 val) +{ + return __cpu_to_virtio16(q->flags & IFF_VNET_LE, val); +} + static struct proto macvtap_proto = { .name = "macvtap", .owner = THIS_MODULE, @@ -557,7 +569,8 @@ static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad, * macvtap_skb_from_vnet_hdr and macvtap_skb_to_vnet_hdr should * be shared with the tun/tap driver. */ -static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb, +static int macvtap_skb_from_vnet_hdr(struct macvtap_queue *q, + struct sk_buff *skb, struct virtio_net_hdr *vnet_hdr) { unsigned short gso_type = 0; @@ -588,13 +601,13 @@ static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb, } if (vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { - if (!skb_partial_csum_set(skb, vnet_hdr->csum_start, - vnet_hdr->csum_offset)) + if (!skb_partial_csum_set(skb, macvtap16_to_cpu(q, vnet_hdr->csum_start), + macvtap16_to_cpu(q, vnet_hdr->csum_offset))) return -EINVAL; } if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { - skb_shinfo(skb)->gso_size = vnet_hdr->gso_size; + skb_shinfo(skb)->gso_size = macvtap16_to_cpu(q, vnet_hdr->gso_size); skb_shinfo(skb)->gso_type = gso_type; /* Header must be checked, and gso_segs computed. */ @@ -604,8 +617,9 @@ static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb, return 0; } -static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb, - struct virtio_net_hdr *vnet_hdr) +static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q, + const struct sk_buff *skb, + struct virtio_net_hdr *vnet_hdr) { memset(vnet_hdr, 0, sizeof(*vnet_hdr)); @@ -613,8 +627,8 @@ static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb, struct skb_shared_info *sinfo = skb_shinfo(skb); /* This is a hint as to how much should be linear. */ - vnet_hdr->hdr_len = skb_headlen(skb); - vnet_hdr->gso_size = sinfo->gso_size; + vnet_hdr->hdr_len = cpu_to_macvtap16(q, skb_headlen(skb)); + vnet_hdr->gso_size = cpu_to_macvtap16(q, sinfo->gso_size); if (sinfo->gso_type & SKB_GSO_TCPV4) vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; else if (sinfo->gso_type & SKB_GSO_TCPV6) @@ -628,10 +642,13 @@ static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb, if (skb->ip_summed == CHECKSUM_PARTIAL) { vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; - vnet_hdr->csum_start = skb_checksum_start_offset(skb); if (vlan_tx_tag_present(skb)) - vnet_hdr->csum_start += VLAN_HLEN; - vnet_hdr->csum_offset = skb->csum_offset; + vnet_hdr->csum_start = cpu_to_macvtap16(q, + skb_checksum_start_offset(skb) + VLAN_HLEN); + else + vnet_hdr->csum_start = cpu_to_macvtap16(q, + skb_checksum_start_offset(skb)); + vnet_hdr->csum_offset = cpu_to_macvtap16(q, skb->csum_offset); } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID; } /* else everything is zero */ @@ -666,12 +683,14 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, if (err < 0) goto err; if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && - vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 > - vnet_hdr.hdr_len) - vnet_hdr.hdr_len = vnet_hdr.csum_start + - vnet_hdr.csum_offset + 2; + macvtap16_to_cpu(q, vnet_hdr.csum_start) + + macvtap16_to_cpu(q, vnet_hdr.csum_offset) + 2 > + macvtap16_to_cpu(q, vnet_hdr.hdr_len)) + vnet_hdr.hdr_len = cpu_to_macvtap16(q, + macvtap16_to_cpu(q, vnet_hdr.csum_start) + + macvtap16_to_cpu(q, vnet_hdr.csum_offset) + 2); err = -EINVAL; - if (vnet_hdr.hdr_len > len) + if (macvtap16_to_cpu(q, vnet_hdr.hdr_len) > len) goto err; } @@ -684,7 +703,8 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, goto err; if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) { - copylen = vnet_hdr.hdr_len ? vnet_hdr.hdr_len : GOODCOPY_LEN; + copylen = vnet_hdr.hdr_len ? + macvtap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN; if (copylen > good_linear) copylen = good_linear; linear = copylen; @@ -695,10 +715,10 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, if (!zerocopy) { copylen = len; - if (vnet_hdr.hdr_len > good_linear) + if (macvtap16_to_cpu(q, vnet_hdr.hdr_len) > good_linear) linear = good_linear; else - linear = vnet_hdr.hdr_len; + linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len); } skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen, @@ -725,7 +745,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, skb->protocol = eth_hdr(skb)->h_proto; if (vnet_hdr_len) { - err = macvtap_skb_from_vnet_hdr(skb, &vnet_hdr); + err = macvtap_skb_from_vnet_hdr(q, skb, &vnet_hdr); if (err) goto err_kfree; } @@ -791,7 +811,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q, if ((len -= vnet_hdr_len) < 0) return -EINVAL; - macvtap_skb_to_vnet_hdr(skb, &vnet_hdr); + macvtap_skb_to_vnet_hdr(q, skb, &vnet_hdr); if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr))) return -EFAULT; @@ -1003,8 +1023,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd, return -EFAULT; ret = 0; - if ((u & ~(IFF_VNET_HDR | IFF_MULTI_QUEUE)) != - (IFF_NO_PI | IFF_TAP)) + if ((u & ~MACVTAP_FEATURES) != (IFF_NO_PI | IFF_TAP)) ret = -EINVAL; else q->flags = u; @@ -1036,8 +1055,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd, return ret; case TUNGETFEATURES: - if (put_user(IFF_TAP | IFF_NO_PI | IFF_VNET_HDR | - IFF_MULTI_QUEUE, up)) + if (put_user(IFF_TAP | IFF_NO_PI | MACVTAP_FEATURES, up)) return -EFAULT; return 0; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 4d332dc93b70..798ce70e3d61 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -103,6 +103,15 @@ do { \ } while (0) #endif +/* TUN device flags */ + +/* IFF_ATTACH_QUEUE is never stored in device flags, + * overload it to mean fasync when stored there. + */ +#define TUN_FASYNC IFF_ATTACH_QUEUE + +#define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \ + IFF_VNET_LE | IFF_MULTI_QUEUE) #define GOODCOPY_LEN 128 #define FLT_EXACT_COUNT 8 @@ -196,6 +205,16 @@ struct tun_struct { u32 flow_count; }; +static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val) +{ + return __virtio16_to_cpu(tun->flags & IFF_VNET_LE, val); +} + +static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val) +{ + return __cpu_to_virtio16(tun->flags & IFF_VNET_LE, val); +} + static inline u32 tun_hashfn(u32 rxhash) { return rxhash & 0x3ff; @@ -472,7 +491,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean) if (tun && tun->numqueues == 0 && tun->numdisabled == 0) { netif_carrier_off(tun->dev); - if (!(tun->flags & TUN_PERSIST) && + if (!(tun->flags & IFF_PERSIST) && tun->dev->reg_state == NETREG_REGISTERED) unregister_netdevice(tun->dev); } @@ -523,7 +542,7 @@ static void tun_detach_all(struct net_device *dev) } BUG_ON(tun->numdisabled != 0); - if (tun->flags & TUN_PERSIST) + if (tun->flags & IFF_PERSIST) module_put(THIS_MODULE); } @@ -541,7 +560,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte goto out; err = -EBUSY; - if (!(tun->flags & TUN_TAP_MQ) && tun->numqueues == 1) + if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1) goto out; err = -E2BIG; @@ -920,7 +939,7 @@ static void tun_net_init(struct net_device *dev) struct tun_struct *tun = netdev_priv(dev); switch (tun->flags & TUN_TYPE_MASK) { - case TUN_TUN_DEV: + case IFF_TUN: dev->netdev_ops = &tun_netdev_ops; /* Point-to-Point TUN Device */ @@ -934,7 +953,7 @@ static void tun_net_init(struct net_device *dev) dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */ break; - case TUN_TAP_DEV: + case IFF_TAP: dev->netdev_ops = &tap_netdev_ops; /* Ethernet TAP Device */ ether_setup(dev); @@ -1025,7 +1044,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, int err; u32 rxhash; - if (!(tun->flags & TUN_NO_PI)) { + if (!(tun->flags & IFF_NO_PI)) { if (len < sizeof(pi)) return -EINVAL; len -= sizeof(pi); @@ -1035,7 +1054,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, offset += sizeof(pi); } - if (tun->flags & TUN_VNET_HDR) { + if (tun->flags & IFF_VNET_HDR) { if (len < tun->vnet_hdr_sz) return -EINVAL; len -= tun->vnet_hdr_sz; @@ -1044,18 +1063,18 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, return -EFAULT; if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && - gso.csum_start + gso.csum_offset + 2 > gso.hdr_len) - gso.hdr_len = gso.csum_start + gso.csum_offset + 2; + tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len)) + gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2); - if (gso.hdr_len > len) + if (tun16_to_cpu(tun, gso.hdr_len) > len) return -EINVAL; offset += tun->vnet_hdr_sz; } - if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) { + if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) { align += NET_IP_ALIGN; if (unlikely(len < ETH_HLEN || - (gso.hdr_len && gso.hdr_len < ETH_HLEN))) + (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN))) return -EINVAL; } @@ -1066,7 +1085,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, * enough room for skb expand head in case it is used. * The rest of the buffer is mapped from userspace. */ - copylen = gso.hdr_len ? gso.hdr_len : GOODCOPY_LEN; + copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN; if (copylen > good_linear) copylen = good_linear; linear = copylen; @@ -1076,10 +1095,10 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, if (!zerocopy) { copylen = len; - if (gso.hdr_len > good_linear) + if (tun16_to_cpu(tun, gso.hdr_len) > good_linear) linear = good_linear; else - linear = gso.hdr_len; + linear = tun16_to_cpu(tun, gso.hdr_len); } skb = tun_alloc_skb(tfile, align, copylen, linear, noblock); @@ -1106,8 +1125,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, } if (gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { - if (!skb_partial_csum_set(skb, gso.csum_start, - gso.csum_offset)) { + if (!skb_partial_csum_set(skb, tun16_to_cpu(tun, gso.csum_start), + tun16_to_cpu(tun, gso.csum_offset))) { tun->dev->stats.rx_frame_errors++; kfree_skb(skb); return -EINVAL; @@ -1115,8 +1134,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, } switch (tun->flags & TUN_TYPE_MASK) { - case TUN_TUN_DEV: - if (tun->flags & TUN_NO_PI) { + case IFF_TUN: + if (tun->flags & IFF_NO_PI) { switch (skb->data[0] & 0xf0) { case 0x40: pi.proto = htons(ETH_P_IP); @@ -1135,7 +1154,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, skb->protocol = pi.proto; skb->dev = tun->dev; break; - case TUN_TAP_DEV: + case IFF_TAP: skb->protocol = eth_type_trans(skb, tun->dev); break; } @@ -1175,7 +1194,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, if (gso.gso_type & VIRTIO_NET_HDR_GSO_ECN) skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; - skb_shinfo(skb)->gso_size = gso.gso_size; + skb_shinfo(skb)->gso_size = tun16_to_cpu(tun, gso.gso_size); if (skb_shinfo(skb)->gso_size == 0) { tun->dev->stats.rx_frame_errors++; kfree_skb(skb); @@ -1241,10 +1260,10 @@ static ssize_t tun_put_user(struct tun_struct *tun, if (vlan_tx_tag_present(skb)) vlan_hlen = VLAN_HLEN; - if (tun->flags & TUN_VNET_HDR) + if (tun->flags & IFF_VNET_HDR) vnet_hdr_sz = tun->vnet_hdr_sz; - if (!(tun->flags & TUN_NO_PI)) { + if (!(tun->flags & IFF_NO_PI)) { if ((len -= sizeof(pi)) < 0) return -EINVAL; @@ -1267,8 +1286,8 @@ static ssize_t tun_put_user(struct tun_struct *tun, struct skb_shared_info *sinfo = skb_shinfo(skb); /* This is a hint as to how much should be linear. */ - gso.hdr_len = skb_headlen(skb); - gso.gso_size = sinfo->gso_size; + gso.hdr_len = cpu_to_tun16(tun, skb_headlen(skb)); + gso.gso_size = cpu_to_tun16(tun, sinfo->gso_size); if (sinfo->gso_type & SKB_GSO_TCPV4) gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; else if (sinfo->gso_type & SKB_GSO_TCPV6) @@ -1276,12 +1295,12 @@ static ssize_t tun_put_user(struct tun_struct *tun, else { pr_err("unexpected GSO type: " "0x%x, gso_size %d, hdr_len %d\n", - sinfo->gso_type, gso.gso_size, - gso.hdr_len); + sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size), + tun16_to_cpu(tun, gso.hdr_len)); print_hex_dump(KERN_ERR, "tun: ", DUMP_PREFIX_NONE, 16, 1, skb->head, - min((int)gso.hdr_len, 64), true); + min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true); WARN_ON_ONCE(1); return -EINVAL; } @@ -1292,9 +1311,9 @@ static ssize_t tun_put_user(struct tun_struct *tun, if (skb->ip_summed == CHECKSUM_PARTIAL) { gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; - gso.csum_start = skb_checksum_start_offset(skb) + - vlan_hlen; - gso.csum_offset = skb->csum_offset; + gso.csum_start = cpu_to_tun16(tun, skb_checksum_start_offset(skb) + + vlan_hlen); + gso.csum_offset = cpu_to_tun16(tun, skb->csum_offset); } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { gso.flags = VIRTIO_NET_HDR_F_DATA_VALID; } /* else everything is zero */ @@ -1521,32 +1540,7 @@ static struct proto tun_proto = { static int tun_flags(struct tun_struct *tun) { - int flags = 0; - - if (tun->flags & TUN_TUN_DEV) - flags |= IFF_TUN; - else - flags |= IFF_TAP; - - if (tun->flags & TUN_NO_PI) - flags |= IFF_NO_PI; - - /* This flag has no real effect. We track the value for backwards - * compatibility. - */ - if (tun->flags & TUN_ONE_QUEUE) - flags |= IFF_ONE_QUEUE; - - if (tun->flags & TUN_VNET_HDR) - flags |= IFF_VNET_HDR; - - if (tun->flags & TUN_TAP_MQ) - flags |= IFF_MULTI_QUEUE; - - if (tun->flags & TUN_PERSIST) - flags |= IFF_PERSIST; - - return flags; + return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP); } static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr, @@ -1602,7 +1596,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) return -EINVAL; if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) != - !!(tun->flags & TUN_TAP_MQ)) + !!(tun->flags & IFF_MULTI_QUEUE)) return -EINVAL; if (tun_not_capable(tun)) @@ -1615,7 +1609,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) if (err < 0) return err; - if (tun->flags & TUN_TAP_MQ && + if (tun->flags & IFF_MULTI_QUEUE && (tun->numqueues + tun->numdisabled > 1)) { /* One or more queue has already been attached, no need * to initialize the device again. @@ -1638,11 +1632,11 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) /* Set dev type */ if (ifr->ifr_flags & IFF_TUN) { /* TUN device */ - flags |= TUN_TUN_DEV; + flags |= IFF_TUN; name = "tun%d"; } else if (ifr->ifr_flags & IFF_TAP) { /* TAP device */ - flags |= TUN_TAP_DEV; + flags |= IFF_TAP; name = "tap%d"; } else return -EINVAL; @@ -1706,28 +1700,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) tun_debug(KERN_INFO, tun, "tun_set_iff\n"); - if (ifr->ifr_flags & IFF_NO_PI) - tun->flags |= TUN_NO_PI; - else - tun->flags &= ~TUN_NO_PI; - - /* This flag has no real effect. We track the value for backwards - * compatibility. - */ - if (ifr->ifr_flags & IFF_ONE_QUEUE) - tun->flags |= TUN_ONE_QUEUE; - else - tun->flags &= ~TUN_ONE_QUEUE; - - if (ifr->ifr_flags & IFF_VNET_HDR) - tun->flags |= TUN_VNET_HDR; - else - tun->flags &= ~TUN_VNET_HDR; - - if (ifr->ifr_flags & IFF_MULTI_QUEUE) - tun->flags |= TUN_TAP_MQ; - else - tun->flags &= ~TUN_TAP_MQ; + tun->flags = (tun->flags & ~TUN_FEATURES) | + (ifr->ifr_flags & TUN_FEATURES); /* Make sure persistent devices do not get stuck in * xoff state. @@ -1855,7 +1829,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr) ret = tun_attach(tun, file, false); } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { tun = rtnl_dereference(tfile->tun); - if (!tun || !(tun->flags & TUN_TAP_MQ) || tfile->detached) + if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached) ret = -EINVAL; else __tun_detach(tfile, false); @@ -1890,9 +1864,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, if (cmd == TUNGETFEATURES) { /* Currently this just means: "what IFF flags are valid?". * This is needed because we never checked for invalid flags on - * TUNSETIFF. */ - return put_user(IFF_TUN | IFF_TAP | IFF_NO_PI | IFF_ONE_QUEUE | - IFF_VNET_HDR | IFF_MULTI_QUEUE, + * TUNSETIFF. + */ + return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES, (unsigned int __user*)argp); } else if (cmd == TUNSETQUEUE) return tun_set_queue(file, &ifr); @@ -1959,12 +1933,12 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, /* Disable/Enable persist mode. Keep an extra reference to the * module to prevent the module being unprobed. */ - if (arg && !(tun->flags & TUN_PERSIST)) { - tun->flags |= TUN_PERSIST; + if (arg && !(tun->flags & IFF_PERSIST)) { + tun->flags |= IFF_PERSIST; __module_get(THIS_MODULE); } - if (!arg && (tun->flags & TUN_PERSIST)) { - tun->flags &= ~TUN_PERSIST; + if (!arg && (tun->flags & IFF_PERSIST)) { + tun->flags &= ~IFF_PERSIST; module_put(THIS_MODULE); } @@ -2022,7 +1996,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, case TUNSETTXFILTER: /* Can be set only for TAPs */ ret = -EINVAL; - if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV) + if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) break; ret = update_filter(&tun->txflt, (void __user *)arg); break; @@ -2081,7 +2055,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, case TUNATTACHFILTER: /* Can be set only for TAPs */ ret = -EINVAL; - if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV) + if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) break; ret = -EFAULT; if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog))) @@ -2093,7 +2067,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, case TUNDETACHFILTER: /* Can be set only for TAPs */ ret = -EINVAL; - if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV) + if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) break; ret = 0; tun_detach_filter(tun, tun->numqueues); @@ -2101,7 +2075,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, case TUNGETFILTER: ret = -EINVAL; - if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV) + if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) break; ret = -EFAULT; if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog))) @@ -2294,10 +2268,10 @@ static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info strlcpy(info->version, DRV_VERSION, sizeof(info->version)); switch (tun->flags & TUN_TYPE_MASK) { - case TUN_TUN_DEV: + case IFF_TUN: strlcpy(info->bus_info, "tun", sizeof(info->bus_info)); break; - case TUN_TAP_DEV: + case IFF_TAP: strlcpy(info->bus_info, "tap", sizeof(info->bus_info)); break; } diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index b0bc8ead47de..b8bd7191572d 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -123,6 +123,9 @@ struct virtnet_info { /* Host can handle any s/g split between our header and packet data */ bool any_header_sg; + /* Packet virtio header size */ + u8 hdr_len; + /* Active statistics */ struct virtnet_stats __percpu *stats; @@ -139,21 +142,14 @@ struct virtnet_info { struct notifier_block nb; }; -struct skb_vnet_hdr { - union { - struct virtio_net_hdr hdr; - struct virtio_net_hdr_mrg_rxbuf mhdr; - }; -}; - struct padded_vnet_hdr { - struct virtio_net_hdr hdr; + struct virtio_net_hdr_mrg_rxbuf hdr; /* - * virtio_net_hdr should be in a separated sg buffer because of a - * QEMU bug, and data sg buffer shares same page with this header sg. - * This padding makes next sg 16 byte aligned after virtio_net_hdr. + * hdr is in a separate sg buffer, and data sg buffer shares same page + * with this header sg. This padding makes next sg 16 byte aligned + * after the header. */ - char padding[6]; + char padding[4]; }; /* Converting between virtqueue no. and kernel tx/rx queue no. @@ -179,9 +175,9 @@ static int rxq2vq(int rxq) return rxq * 2; } -static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb) +static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb) { - return (struct skb_vnet_hdr *)skb->cb; + return (struct virtio_net_hdr_mrg_rxbuf *)skb->cb; } /* @@ -241,13 +237,13 @@ static unsigned long mergeable_buf_to_ctx(void *buf, unsigned int truesize) } /* Called from bottom half context */ -static struct sk_buff *page_to_skb(struct receive_queue *rq, +static struct sk_buff *page_to_skb(struct virtnet_info *vi, + struct receive_queue *rq, struct page *page, unsigned int offset, unsigned int len, unsigned int truesize) { - struct virtnet_info *vi = rq->vq->vdev->priv; struct sk_buff *skb; - struct skb_vnet_hdr *hdr; + struct virtio_net_hdr_mrg_rxbuf *hdr; unsigned int copy, hdr_len, hdr_padded_len; char *p; @@ -260,13 +256,11 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq, hdr = skb_vnet_hdr(skb); - if (vi->mergeable_rx_bufs) { - hdr_len = sizeof hdr->mhdr; - hdr_padded_len = sizeof hdr->mhdr; - } else { - hdr_len = sizeof hdr->hdr; + hdr_len = vi->hdr_len; + if (vi->mergeable_rx_bufs) + hdr_padded_len = sizeof *hdr; + else hdr_padded_len = sizeof(struct padded_vnet_hdr); - } memcpy(hdr, p, hdr_len); @@ -317,23 +311,24 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq, return skb; } -static struct sk_buff *receive_small(void *buf, unsigned int len) +static struct sk_buff *receive_small(struct virtnet_info *vi, void *buf, unsigned int len) { struct sk_buff * skb = buf; - len -= sizeof(struct virtio_net_hdr); + len -= vi->hdr_len; skb_trim(skb, len); return skb; } static struct sk_buff *receive_big(struct net_device *dev, + struct virtnet_info *vi, struct receive_queue *rq, void *buf, unsigned int len) { struct page *page = buf; - struct sk_buff *skb = page_to_skb(rq, page, 0, len, PAGE_SIZE); + struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE); if (unlikely(!skb)) goto err; @@ -347,18 +342,20 @@ err: } static struct sk_buff *receive_mergeable(struct net_device *dev, + struct virtnet_info *vi, struct receive_queue *rq, unsigned long ctx, unsigned int len) { void *buf = mergeable_ctx_to_buf_address(ctx); - struct skb_vnet_hdr *hdr = buf; - int num_buf = hdr->mhdr.num_buffers; + struct virtio_net_hdr_mrg_rxbuf *hdr = buf; + u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); struct page *page = virt_to_head_page(buf); int offset = buf - page_address(page); unsigned int truesize = max(len, mergeable_ctx_to_buf_truesize(ctx)); - struct sk_buff *head_skb = page_to_skb(rq, page, offset, len, truesize); + struct sk_buff *head_skb = page_to_skb(vi, rq, page, offset, len, + truesize); struct sk_buff *curr_skb = head_skb; if (unlikely(!curr_skb)) @@ -369,7 +366,9 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len); if (unlikely(!ctx)) { pr_debug("%s: rx error: %d buffers out of %d missing\n", - dev->name, num_buf, hdr->mhdr.num_buffers); + dev->name, num_buf, + virtio16_to_cpu(vi->vdev, + hdr->num_buffers)); dev->stats.rx_length_errors++; goto err_buf; } @@ -430,15 +429,15 @@ err_buf: return NULL; } -static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) +static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, + void *buf, unsigned int len) { - struct virtnet_info *vi = rq->vq->vdev->priv; struct net_device *dev = vi->dev; struct virtnet_stats *stats = this_cpu_ptr(vi->stats); struct sk_buff *skb; - struct skb_vnet_hdr *hdr; + struct virtio_net_hdr_mrg_rxbuf *hdr; - if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { + if (unlikely(len < vi->hdr_len + ETH_HLEN)) { pr_debug("%s: short packet %i\n", dev->name, len); dev->stats.rx_length_errors++; if (vi->mergeable_rx_bufs) { @@ -454,11 +453,11 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) } if (vi->mergeable_rx_bufs) - skb = receive_mergeable(dev, rq, (unsigned long)buf, len); + skb = receive_mergeable(dev, vi, rq, (unsigned long)buf, len); else if (vi->big_packets) - skb = receive_big(dev, rq, buf, len); + skb = receive_big(dev, vi, rq, buf, len); else - skb = receive_small(buf, len); + skb = receive_small(vi, buf, len); if (unlikely(!skb)) return; @@ -473,8 +472,8 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { pr_debug("Needs csum!\n"); if (!skb_partial_csum_set(skb, - hdr->hdr.csum_start, - hdr->hdr.csum_offset)) + virtio16_to_cpu(vi->vdev, hdr->hdr.csum_start), + virtio16_to_cpu(vi->vdev, hdr->hdr.csum_offset))) goto frame_err; } else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) { skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -514,7 +513,8 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN) skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; - skb_shinfo(skb)->gso_size = hdr->hdr.gso_size; + skb_shinfo(skb)->gso_size = virtio16_to_cpu(vi->vdev, + hdr->hdr.gso_size); if (skb_shinfo(skb)->gso_size == 0) { net_warn_ratelimited("%s: zero gso size.\n", dev->name); goto frame_err; @@ -535,11 +535,11 @@ frame_err: dev_kfree_skb(skb); } -static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp) +static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, + gfp_t gfp) { - struct virtnet_info *vi = rq->vq->vdev->priv; struct sk_buff *skb; - struct skb_vnet_hdr *hdr; + struct virtio_net_hdr_mrg_rxbuf *hdr; int err; skb = __netdev_alloc_skb_ip_align(vi->dev, GOOD_PACKET_LEN, gfp); @@ -550,7 +550,7 @@ static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp) hdr = skb_vnet_hdr(skb); sg_init_table(rq->sg, MAX_SKB_FRAGS + 2); - sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr); + sg_set_buf(rq->sg, hdr, vi->hdr_len); skb_to_sgvec(skb, rq->sg + 1, 0, skb->len); err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp); @@ -560,7 +560,8 @@ static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp) return err; } -static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp) +static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq, + gfp_t gfp) { struct page *first, *list = NULL; char *p; @@ -591,8 +592,8 @@ static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp) p = page_address(first); /* rq->sg[0], rq->sg[1] share the same page */ - /* a separated rq->sg[0] for virtio_net_hdr only due to QEMU bug */ - sg_set_buf(&rq->sg[0], p, sizeof(struct virtio_net_hdr)); + /* a separated rq->sg[0] for header - required in case !any_header_sg */ + sg_set_buf(&rq->sg[0], p, vi->hdr_len); /* rq->sg[1] for data packet, from offset */ offset = sizeof(struct padded_vnet_hdr); @@ -660,9 +661,9 @@ static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp) * before we're receiving packets, or from refill_work which is * careful to disable receiving (using napi_disable). */ -static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp) +static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, + gfp_t gfp) { - struct virtnet_info *vi = rq->vq->vdev->priv; int err; bool oom; @@ -671,9 +672,9 @@ static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp) if (vi->mergeable_rx_bufs) err = add_recvbuf_mergeable(rq, gfp); else if (vi->big_packets) - err = add_recvbuf_big(rq, gfp); + err = add_recvbuf_big(vi, rq, gfp); else - err = add_recvbuf_small(rq, gfp); + err = add_recvbuf_small(vi, rq, gfp); oom = err == -ENOMEM; if (err) @@ -722,7 +723,7 @@ static void refill_work(struct work_struct *work) struct receive_queue *rq = &vi->rq[i]; napi_disable(&rq->napi); - still_empty = !try_fill_recv(rq, GFP_KERNEL); + still_empty = !try_fill_recv(vi, rq, GFP_KERNEL); virtnet_napi_enable(rq); /* In theory, this can happen: if we don't get any buffers in @@ -741,12 +742,12 @@ static int virtnet_receive(struct receive_queue *rq, int budget) while (received < budget && (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { - receive_buf(rq, buf, len); + receive_buf(vi, rq, buf, len); received++; } if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) { - if (!try_fill_recv(rq, GFP_ATOMIC)) + if (!try_fill_recv(vi, rq, GFP_ATOMIC)) schedule_delayed_work(&vi->refill, 0); } @@ -822,7 +823,7 @@ static int virtnet_open(struct net_device *dev) for (i = 0; i < vi->max_queue_pairs; i++) { if (i < vi->curr_queue_pairs) /* Make sure we have some buffers: if oom use wq. */ - if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) + if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) schedule_delayed_work(&vi->refill, 0); virtnet_napi_enable(&vi->rq[i]); } @@ -851,18 +852,14 @@ static void free_old_xmit_skbs(struct send_queue *sq) static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) { - struct skb_vnet_hdr *hdr; + struct virtio_net_hdr_mrg_rxbuf *hdr; const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; struct virtnet_info *vi = sq->vq->vdev->priv; unsigned num_sg; - unsigned hdr_len; + unsigned hdr_len = vi->hdr_len; bool can_push; pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); - if (vi->mergeable_rx_bufs) - hdr_len = sizeof hdr->mhdr; - else - hdr_len = sizeof hdr->hdr; can_push = vi->any_header_sg && !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) && @@ -870,22 +867,25 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) /* Even if we can, don't push here yet as this would skew * csum_start offset below. */ if (can_push) - hdr = (struct skb_vnet_hdr *)(skb->data - hdr_len); + hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len); else hdr = skb_vnet_hdr(skb); if (skb->ip_summed == CHECKSUM_PARTIAL) { hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; - hdr->hdr.csum_start = skb_checksum_start_offset(skb); - hdr->hdr.csum_offset = skb->csum_offset; + hdr->hdr.csum_start = cpu_to_virtio16(vi->vdev, + skb_checksum_start_offset(skb)); + hdr->hdr.csum_offset = cpu_to_virtio16(vi->vdev, + skb->csum_offset); } else { hdr->hdr.flags = 0; hdr->hdr.csum_offset = hdr->hdr.csum_start = 0; } if (skb_is_gso(skb)) { - hdr->hdr.hdr_len = skb_headlen(skb); - hdr->hdr.gso_size = skb_shinfo(skb)->gso_size; + hdr->hdr.hdr_len = cpu_to_virtio16(vi->vdev, skb_headlen(skb)); + hdr->hdr.gso_size = cpu_to_virtio16(vi->vdev, + skb_shinfo(skb)->gso_size); if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) @@ -900,7 +900,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) } if (vi->mergeable_rx_bufs) - hdr->mhdr.num_buffers = 0; + hdr->num_buffers = 0; sg_init_table(sq->sg, MAX_SKB_FRAGS + 2); if (can_push) { @@ -1030,7 +1030,8 @@ static int virtnet_set_mac_address(struct net_device *dev, void *p) "Failed to set mac address by vq command.\n"); return -EINVAL; } - } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) { + } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) && + !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) { unsigned int i; /* Naturally, this has an atomicity problem. */ @@ -1112,7 +1113,7 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) return 0; - s.virtqueue_pairs = queue_pairs; + s.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); sg_init_one(&sg, &s, sizeof(s)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, @@ -1189,7 +1190,7 @@ static void virtnet_set_rx_mode(struct net_device *dev) sg_init_table(sg, 2); /* Store the unicast list and count in the front of the buffer */ - mac_data->entries = uc_count; + mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count); i = 0; netdev_for_each_uc_addr(ha, dev) memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); @@ -1200,7 +1201,7 @@ static void virtnet_set_rx_mode(struct net_device *dev) /* multicast list and count fill the end */ mac_data = (void *)&mac_data->macs[uc_count][0]; - mac_data->entries = mc_count; + mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count); i = 0; netdev_for_each_mc_addr(ha, dev) memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); @@ -1805,18 +1806,20 @@ static int virtnet_probe(struct virtio_device *vdev) if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) vi->mergeable_rx_bufs = true; + if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) || + virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) + vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); + else + vi->hdr_len = sizeof(struct virtio_net_hdr); + if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT)) vi->any_header_sg = true; if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) vi->has_cvq = true; - if (vi->any_header_sg) { - if (vi->mergeable_rx_bufs) - dev->needed_headroom = sizeof(struct virtio_net_hdr_mrg_rxbuf); - else - dev->needed_headroom = sizeof(struct virtio_net_hdr); - } + if (vi->any_header_sg) + dev->needed_headroom = vi->hdr_len; /* Use single tx/rx queue pair as default */ vi->curr_queue_pairs = 1; @@ -1844,7 +1847,7 @@ static int virtnet_probe(struct virtio_device *vdev) /* Last of all, set up some receive buffers. */ for (i = 0; i < vi->curr_queue_pairs; i++) { - try_fill_recv(&vi->rq[i], GFP_KERNEL); + try_fill_recv(vi, &vi->rq[i], GFP_KERNEL); /* If we didn't even get one input buffer, we're useless. */ if (vi->rq[i].vq->num_free == @@ -1964,7 +1967,7 @@ static int virtnet_restore(struct virtio_device *vdev) if (netif_running(vi->dev)) { for (i = 0; i < vi->curr_queue_pairs; i++) - if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) + if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) schedule_delayed_work(&vi->refill, 0); for (i = 0; i < vi->max_queue_pairs; i++) |