From 82fa3c776e5abba7ed6e4b4f4983d14731c37d6a Mon Sep 17 00:00:00 2001 From: KY Srinivasan Date: Mon, 11 May 2015 15:39:46 -0700 Subject: hv_netvsc: Use the xmit_more skb flag to optimize signaling the host Based on the information given to this driver (via the xmit_more skb flag), we can defer signaling the host if more packets are on the way. This will help make the host more efficient since it can potentially process a larger batch of packets. Implement this optimization. Signed-off-by: K. Y. Srinivasan Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc.c | 43 +++++++++++++++++++++++++++---------------- 1 file changed, 27 insertions(+), 16 deletions(-) (limited to 'drivers/net/hyperv') diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 2d9ef533cc48..1c4f265f4e7c 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -743,6 +743,7 @@ static inline int netvsc_send_pkt( u64 req_id; int ret; struct hv_page_buffer *pgbuf; + u32 ring_avail = hv_ringbuf_avail_percent(&out_channel->outbound); nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT; if (packet->is_data_pkt) { @@ -769,32 +770,42 @@ static inline int netvsc_send_pkt( if (out_channel->rescind) return -ENODEV; + /* + * It is possible that once we successfully place this packet + * on the ringbuffer, we may stop the queue. In that case, we want + * to notify the host independent of the xmit_more flag. We don't + * need to be precise here; in the worst case we may signal the host + * unnecessarily. + */ + if (ring_avail < (RING_AVAIL_PERCENT_LOWATER + 1)) + packet->xmit_more = false; + if (packet->page_buf_cnt) { pgbuf = packet->cp_partial ? packet->page_buf + packet->rmsg_pgcnt : packet->page_buf; - ret = vmbus_sendpacket_pagebuffer(out_channel, - pgbuf, - packet->page_buf_cnt, - &nvmsg, - sizeof(struct nvsp_message), - req_id); + ret = vmbus_sendpacket_pagebuffer_ctl(out_channel, + pgbuf, + packet->page_buf_cnt, + &nvmsg, + sizeof(struct nvsp_message), + req_id, + VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED, + !packet->xmit_more); } else { - ret = vmbus_sendpacket( - out_channel, &nvmsg, - sizeof(struct nvsp_message), - req_id, - VM_PKT_DATA_INBAND, - VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); + ret = vmbus_sendpacket_ctl(out_channel, &nvmsg, + sizeof(struct nvsp_message), + req_id, + VM_PKT_DATA_INBAND, + VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED, + !packet->xmit_more); } if (ret == 0) { atomic_inc(&net_device->num_outstanding_sends); atomic_inc(&net_device->queue_sends[q_idx]); - if (hv_ringbuf_avail_percent(&out_channel->outbound) < - RING_AVAIL_PERCENT_LOWATER) { - netif_tx_stop_queue(netdev_get_tx_queue( - ndev, q_idx)); + if (ring_avail < RING_AVAIL_PERCENT_LOWATER) { + netif_tx_stop_queue(netdev_get_tx_queue(ndev, q_idx)); if (atomic_read(&net_device-> queue_sends[q_idx]) < 1) -- cgit v1.2.3 From 06635a35d13d42b95422bba6633f175245cc644e Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Tue, 12 May 2015 14:56:16 +0200 Subject: flow_dissect: use programable dissector in skb_flow_dissect and friends Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc_drv.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/net/hyperv') diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 5993c7e2d723..8e5fe888a0ec 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -196,12 +196,12 @@ static bool netvsc_set_hash(u32 *hash, struct sk_buff *skb) struct flow_keys flow; int data_len; - if (!skb_flow_dissect(skb, &flow) || - !(flow.n_proto == htons(ETH_P_IP) || - flow.n_proto == htons(ETH_P_IPV6))) + if (!skb_flow_dissect_flow_keys(skb, &flow) || + !(flow.basic.n_proto == htons(ETH_P_IP) || + flow.basic.n_proto == htons(ETH_P_IPV6))) return false; - if (flow.ip_proto == IPPROTO_TCP) + if (flow.basic.ip_proto == IPPROTO_TCP) data_len = 12; else data_len = 8; -- cgit v1.2.3 From 7eafd9b4005643cfc24f1daf78f4dd56ff71f559 Mon Sep 17 00:00:00 2001 From: "sixiao@microsoft.com" Date: Thu, 14 May 2015 01:00:25 -0700 Subject: hv_netvsc: use per_cpu stats to calculate TX/RX data Current code does not lock anything when calculating the TX and RX stats. As a result, the RX and TX data reported by ifconfig are not accuracy in a system with high network throughput and multiple CPUs (in my test, RX/TX = 83% between 2 HyperV VM nodes which have 8 vCPUs and 40G Ethernet). This patch fixed the above issue by using per_cpu stats. netvsc_get_stats64() summarizes TX and RX data by iterating over all CPUs to get their respective stats. This v2 patch addressed David's comments on the cleanup path when netdev_alloc_pcpu_stats() failed. Signed-off-by: Simon Xiao Reviewed-by: K. Y. Srinivasan Reviewed-by: Haiyang Zhang Signed-off-by: David S. Miller --- drivers/net/hyperv/hyperv_net.h | 9 +++++ drivers/net/hyperv/netvsc_drv.c | 85 +++++++++++++++++++++++++++++++++++++---- 2 files changed, 86 insertions(+), 8 deletions(-) (limited to 'drivers/net/hyperv') diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 41071d32bc8e..5a92b36daed6 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -611,6 +611,12 @@ struct multi_send_data { u32 count; /* counter of batched packets */ }; +struct netvsc_stats { + u64 packets; + u64 bytes; + struct u64_stats_sync s_sync; +}; + /* The context of the netvsc device */ struct net_device_context { /* point back to our device context */ @@ -618,6 +624,9 @@ struct net_device_context { struct delayed_work dwork; struct work_struct work; u32 msg_enable; /* debug level */ + + struct netvsc_stats __percpu *tx_stats; + struct netvsc_stats __percpu *rx_stats; }; /* Per netvsc device */ diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 8e5fe888a0ec..0c8587240ff3 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -391,7 +391,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) u32 skb_length; u32 pkt_sz; struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT]; - + struct netvsc_stats *tx_stats = this_cpu_ptr(net_device_ctx->tx_stats); /* We will atmost need two pages to describe the rndis * header. We can only transmit MAX_PAGE_BUFFER_COUNT number @@ -580,8 +580,10 @@ do_send: drop: if (ret == 0) { - net->stats.tx_bytes += skb_length; - net->stats.tx_packets++; + u64_stats_update_begin(&tx_stats->s_sync); + tx_stats->packets++; + tx_stats->bytes += skb_length; + u64_stats_update_end(&tx_stats->s_sync); } else { if (ret != -EAGAIN) { dev_kfree_skb_any(skb); @@ -644,13 +646,17 @@ int netvsc_recv_callback(struct hv_device *device_obj, struct ndis_tcp_ip_checksum_info *csum_info) { struct net_device *net; + struct net_device_context *net_device_ctx; struct sk_buff *skb; + struct netvsc_stats *rx_stats; net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev; if (!net || net->reg_state != NETREG_REGISTERED) { packet->status = NVSP_STAT_FAIL; return 0; } + net_device_ctx = netdev_priv(net); + rx_stats = this_cpu_ptr(net_device_ctx->rx_stats); /* Allocate a skb - TODO direct I/O to pages? */ skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen); @@ -686,8 +692,10 @@ int netvsc_recv_callback(struct hv_device *device_obj, skb_record_rx_queue(skb, packet->channel-> offermsg.offer.sub_channel_index); - net->stats.rx_packets++; - net->stats.rx_bytes += packet->total_data_buflen; + u64_stats_update_begin(&rx_stats->s_sync); + rx_stats->packets++; + rx_stats->bytes += packet->total_data_buflen; + u64_stats_update_end(&rx_stats->s_sync); /* * Pass the skb back up. Network stack will deallocate the skb when it @@ -753,6 +761,46 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) return 0; } +static struct rtnl_link_stats64 *netvsc_get_stats64(struct net_device *net, + struct rtnl_link_stats64 *t) +{ + struct net_device_context *ndev_ctx = netdev_priv(net); + int cpu; + + for_each_possible_cpu(cpu) { + struct netvsc_stats *tx_stats = per_cpu_ptr(ndev_ctx->tx_stats, + cpu); + struct netvsc_stats *rx_stats = per_cpu_ptr(ndev_ctx->rx_stats, + cpu); + u64 tx_packets, tx_bytes, rx_packets, rx_bytes; + unsigned int start; + + do { + start = u64_stats_fetch_begin_irq(&tx_stats->s_sync); + tx_packets = tx_stats->packets; + tx_bytes = tx_stats->bytes; + } while (u64_stats_fetch_retry_irq(&tx_stats->s_sync, start)); + + do { + start = u64_stats_fetch_begin_irq(&rx_stats->s_sync); + rx_packets = rx_stats->packets; + rx_bytes = rx_stats->bytes; + } while (u64_stats_fetch_retry_irq(&rx_stats->s_sync, start)); + + t->tx_bytes += tx_bytes; + t->tx_packets += tx_packets; + t->rx_bytes += rx_bytes; + t->rx_packets += rx_packets; + } + + t->tx_dropped = net->stats.tx_dropped; + t->tx_errors = net->stats.tx_dropped; + + t->rx_dropped = net->stats.rx_dropped; + t->rx_errors = net->stats.rx_errors; + + return t; +} static int netvsc_set_mac_addr(struct net_device *ndev, void *p) { @@ -804,6 +852,7 @@ static const struct net_device_ops device_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = netvsc_set_mac_addr, .ndo_select_queue = netvsc_select_queue, + .ndo_get_stats64 = netvsc_get_stats64, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = netvsc_poll_controller, #endif @@ -855,6 +904,14 @@ static void netvsc_link_change(struct work_struct *w) netdev_notify_peers(net); } +static void netvsc_free_netdev(struct net_device *netdev) +{ + struct net_device_context *net_device_ctx = netdev_priv(netdev); + + free_percpu(net_device_ctx->tx_stats); + free_percpu(net_device_ctx->rx_stats); + free_netdev(netdev); +} static int netvsc_probe(struct hv_device *dev, const struct hv_vmbus_device_id *dev_id) @@ -883,6 +940,18 @@ static int netvsc_probe(struct hv_device *dev, netdev_dbg(net, "netvsc msg_enable: %d\n", net_device_ctx->msg_enable); + net_device_ctx->tx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats); + if (!net_device_ctx->tx_stats) { + free_netdev(net); + return -ENOMEM; + } + net_device_ctx->rx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats); + if (!net_device_ctx->rx_stats) { + free_percpu(net_device_ctx->tx_stats); + free_netdev(net); + return -ENOMEM; + } + hv_set_drvdata(dev, net); INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); INIT_WORK(&net_device_ctx->work, do_set_multicast); @@ -909,7 +978,7 @@ static int netvsc_probe(struct hv_device *dev, ret = rndis_filter_device_add(dev, &device_info); if (ret != 0) { netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); - free_netdev(net); + netvsc_free_netdev(net); hv_set_drvdata(dev, NULL); return ret; } @@ -923,7 +992,7 @@ static int netvsc_probe(struct hv_device *dev, if (ret != 0) { pr_err("Unable to register netdev.\n"); rndis_filter_device_remove(dev); - free_netdev(net); + netvsc_free_netdev(net); } else { schedule_delayed_work(&net_device_ctx->dwork, 0); } @@ -962,7 +1031,7 @@ static int netvsc_remove(struct hv_device *dev) */ rndis_filter_device_remove(dev); - free_netdev(net); + netvsc_free_netdev(net); return 0; } -- cgit v1.2.3 From 4b02b58b52fab385e120c4b4f0d90f5f3076352d Mon Sep 17 00:00:00 2001 From: "sixiao@microsoft.com" Date: Fri, 15 May 2015 02:33:03 -0700 Subject: hv_netvsc: change member name of struct netvsc_stats Currently the struct netvsc_stats has a member s_sync of type u64_stats_sync. This definition will break kernel build as the macro netdev_alloc_pcpu_stats requires this member name to be syncp. (see netdev_alloc_pcpu_stats definition in ./include/linux/netdevice.h) This patch changes netvsc_stats's member name from s_sync to syncp to fix the build break. Signed-off-by: Simon Xiao Signed-off-by: David S. Miller --- drivers/net/hyperv/hyperv_net.h | 2 +- drivers/net/hyperv/netvsc_drv.c | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) (limited to 'drivers/net/hyperv') diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 5a92b36daed6..ddcc7f8d22b4 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -614,7 +614,7 @@ struct multi_send_data { struct netvsc_stats { u64 packets; u64 bytes; - struct u64_stats_sync s_sync; + struct u64_stats_sync syncp; }; /* The context of the netvsc device */ diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 0c8587240ff3..d9c88bc09f45 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -580,10 +580,10 @@ do_send: drop: if (ret == 0) { - u64_stats_update_begin(&tx_stats->s_sync); + u64_stats_update_begin(&tx_stats->syncp); tx_stats->packets++; tx_stats->bytes += skb_length; - u64_stats_update_end(&tx_stats->s_sync); + u64_stats_update_end(&tx_stats->syncp); } else { if (ret != -EAGAIN) { dev_kfree_skb_any(skb); @@ -692,10 +692,10 @@ int netvsc_recv_callback(struct hv_device *device_obj, skb_record_rx_queue(skb, packet->channel-> offermsg.offer.sub_channel_index); - u64_stats_update_begin(&rx_stats->s_sync); + u64_stats_update_begin(&rx_stats->syncp); rx_stats->packets++; rx_stats->bytes += packet->total_data_buflen; - u64_stats_update_end(&rx_stats->s_sync); + u64_stats_update_end(&rx_stats->syncp); /* * Pass the skb back up. Network stack will deallocate the skb when it @@ -776,16 +776,16 @@ static struct rtnl_link_stats64 *netvsc_get_stats64(struct net_device *net, unsigned int start; do { - start = u64_stats_fetch_begin_irq(&tx_stats->s_sync); + start = u64_stats_fetch_begin_irq(&tx_stats->syncp); tx_packets = tx_stats->packets; tx_bytes = tx_stats->bytes; - } while (u64_stats_fetch_retry_irq(&tx_stats->s_sync, start)); + } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start)); do { - start = u64_stats_fetch_begin_irq(&rx_stats->s_sync); + start = u64_stats_fetch_begin_irq(&rx_stats->syncp); rx_packets = rx_stats->packets; rx_bytes = rx_stats->bytes; - } while (u64_stats_fetch_retry_irq(&rx_stats->s_sync, start)); + } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start)); t->tx_bytes += tx_bytes; t->tx_packets += tx_packets; -- cgit v1.2.3 From e01ec2199ef22e2cabd7d6e68a192f3eb728029f Mon Sep 17 00:00:00 2001 From: KY Srinivasan Date: Wed, 27 May 2015 13:16:57 -0700 Subject: hv_netvsc: Properly size the vrss queues The current algorithm for deciding on the number of VRSS channels is not optimal since we open up the min of number of CPUs online and the number of VRSS channels the host is offering. So on a 32 VCPU guest we could potentially open 32 VRSS subchannels. Experimentation has shown that it is best to limit the number of VRSS channels to the number of CPUs within a NUMA node. Here is the new algorithm for deciding on the number of sub-channels we would open up: 1) Pick the minimum of what the host is offering and what the driver in the guest is specifying as the default value. 2) Pick the minimum of (1) and the numbers of CPUs in the NUMA node the primary channel is bound to. Signed-off-by: K. Y. Srinivasan Signed-off-by: David S. Miller --- drivers/net/hyperv/hyperv_net.h | 1 + drivers/net/hyperv/netvsc_drv.c | 4 ++++ drivers/net/hyperv/rndis_filter.c | 16 ++++++++++++++-- 3 files changed, 19 insertions(+), 2 deletions(-) (limited to 'drivers/net/hyperv') diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index ddcc7f8d22b4..dd4544085db3 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -161,6 +161,7 @@ struct netvsc_device_info { unsigned char mac_adr[ETH_ALEN]; bool link_state; /* 0 - link up, 1 - link down */ int ring_size; + u32 max_num_vrss_chns; }; enum rndis_device_state { diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index d9c88bc09f45..358475ed9b59 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -46,6 +46,8 @@ static int ring_size = 128; module_param(ring_size, int, S_IRUGO); MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)"); +static int max_num_vrss_chns = 8; + static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | @@ -755,6 +757,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) ndevctx->device_ctx = hdev; hv_set_drvdata(hdev, ndev); device_info.ring_size = ring_size; + device_info.max_num_vrss_chns = max_num_vrss_chns; rndis_filter_device_add(hdev, &device_info); netif_tx_wake_all_queues(ndev); @@ -975,6 +978,7 @@ static int netvsc_probe(struct hv_device *dev, /* Notify the netvsc driver of the new device */ device_info.ring_size = ring_size; + device_info.max_num_vrss_chns = max_num_vrss_chns; ret = rndis_filter_device_add(dev, &device_info); if (ret != 0) { netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 9118cea91882..006c1b8c2385 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -1013,6 +1013,9 @@ int rndis_filter_device_add(struct hv_device *dev, struct ndis_recv_scale_cap rsscap; u32 rsscap_size = sizeof(struct ndis_recv_scale_cap); u32 mtu, size; + u32 num_rss_qs; + const struct cpumask *node_cpu_mask; + u32 num_possible_rss_qs; rndis_device = get_rndis_device(); if (!rndis_device) @@ -1100,9 +1103,18 @@ int rndis_filter_device_add(struct hv_device *dev, if (ret || rsscap.num_recv_que < 2) goto out; + num_rss_qs = min(device_info->max_num_vrss_chns, rsscap.num_recv_que); + net_device->max_chn = rsscap.num_recv_que; - net_device->num_chn = (num_online_cpus() < rsscap.num_recv_que) ? - num_online_cpus() : rsscap.num_recv_que; + + /* + * We will limit the VRSS channels to the number CPUs in the NUMA node + * the primary channel is currently bound to. + */ + node_cpu_mask = cpumask_of_node(cpu_to_node(dev->channel->target_cpu)); + num_possible_rss_qs = cpumask_weight(node_cpu_mask); + net_device->num_chn = min(num_possible_rss_qs, num_rss_qs); + if (net_device->num_chn == 1) goto out; -- cgit v1.2.3 From 0a726c2b499e390b1c1fc3092bd789f2192a2d03 Mon Sep 17 00:00:00 2001 From: "K. Y. Srinivasan" Date: Thu, 28 May 2015 17:08:06 -0700 Subject: hv_netvsc: Allocate the receive buffer from the correct NUMA node Allocate the receive bufer from the NUMA node assigned to the primary channel. Signed-off-by: K. Y. Srinivasan Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers/net/hyperv') diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index b0249685139c..d187965eba36 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -227,13 +227,18 @@ static int netvsc_init_buf(struct hv_device *device) struct netvsc_device *net_device; struct nvsp_message *init_packet; struct net_device *ndev; + int node; net_device = get_outbound_net_device(device); if (!net_device) return -ENODEV; ndev = net_device->ndev; - net_device->recv_buf = vzalloc(net_device->recv_buf_size); + node = cpu_to_node(device->channel->target_cpu); + net_device->recv_buf = vzalloc_node(net_device->recv_buf_size, node); + if (!net_device->recv_buf) + net_device->recv_buf = vzalloc(net_device->recv_buf_size); + if (!net_device->recv_buf) { netdev_err(ndev, "unable to allocate receive " "buffer of size %d\n", net_device->recv_buf_size); -- cgit v1.2.3 From 5defde5946676ee23cd6a9d0e1de899410f4a33f Mon Sep 17 00:00:00 2001 From: "K. Y. Srinivasan" Date: Thu, 28 May 2015 17:08:07 -0700 Subject: hv_netvsc: Allocate the sendbuf in a NUMA aware way Allocate the send buffer in a NUMA aware way. Signed-off-by: K. Y. Srinivasan Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/net/hyperv') diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index d187965eba36..06de98a05622 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -326,7 +326,9 @@ static int netvsc_init_buf(struct hv_device *device) /* Now setup the send buffer. */ - net_device->send_buf = vzalloc(net_device->send_buf_size); + net_device->send_buf = vzalloc_node(net_device->send_buf_size, node); + if (!net_device->send_buf) + net_device->send_buf = vzalloc(net_device->send_buf_size); if (!net_device->send_buf) { netdev_err(ndev, "unable to allocate send " "buffer of size %d\n", net_device->send_buf_size); -- cgit v1.2.3