2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, see <http://www.gnu.org/licenses/>.
17 * Haiyang Zhang <haiyangz@microsoft.com>
18 * Hank Janssen <hjanssen@microsoft.com>
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/init.h>
23 #include <linux/atomic.h>
24 #include <linux/module.h>
25 #include <linux/highmem.h>
26 #include <linux/device.h>
28 #include <linux/delay.h>
29 #include <linux/netdevice.h>
30 #include <linux/inetdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/pci.h>
33 #include <linux/skbuff.h>
34 #include <linux/if_vlan.h>
36 #include <linux/slab.h>
37 #include <linux/rtnetlink.h>
38 #include <linux/netpoll.h>
41 #include <net/route.h>
43 #include <net/pkt_sched.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
47 #include "hyperv_net.h"
49 #define RING_SIZE_MIN 64
50 #define RETRY_US_LO 5000
51 #define RETRY_US_HI 10000
52 #define RETRY_MAX 2000 /* >10 sec */
54 #define LINKCHANGE_INT (2 * HZ)
55 #define VF_TAKEOVER_INT (HZ / 10)
57 static unsigned int ring_size __ro_after_init = 128;
58 module_param(ring_size, uint, 0444);
59 MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
60 unsigned int netvsc_ring_bytes __ro_after_init;
62 static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
63 NETIF_MSG_LINK | NETIF_MSG_IFUP |
64 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
67 static int debug = -1;
68 module_param(debug, int, 0444);
69 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
71 static LIST_HEAD(netvsc_dev_list);
73 static void netvsc_change_rx_flags(struct net_device *net, int change)
75 struct net_device_context *ndev_ctx = netdev_priv(net);
76 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
82 if (change & IFF_PROMISC) {
83 inc = (net->flags & IFF_PROMISC) ? 1 : -1;
84 dev_set_promiscuity(vf_netdev, inc);
87 if (change & IFF_ALLMULTI) {
88 inc = (net->flags & IFF_ALLMULTI) ? 1 : -1;
89 dev_set_allmulti(vf_netdev, inc);
93 static void netvsc_set_rx_mode(struct net_device *net)
95 struct net_device_context *ndev_ctx = netdev_priv(net);
96 struct net_device *vf_netdev;
97 struct netvsc_device *nvdev;
100 vf_netdev = rcu_dereference(ndev_ctx->vf_netdev);
102 dev_uc_sync(vf_netdev, net);
103 dev_mc_sync(vf_netdev, net);
106 nvdev = rcu_dereference(ndev_ctx->nvdev);
108 rndis_filter_update(nvdev);
112 static int netvsc_open(struct net_device *net)
114 struct net_device_context *ndev_ctx = netdev_priv(net);
115 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
116 struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev);
117 struct rndis_device *rdev;
120 netif_carrier_off(net);
122 /* Open up the device */
123 ret = rndis_filter_open(nvdev);
125 netdev_err(net, "unable to open device (ret %d).\n", ret);
129 rdev = nvdev->extension;
130 if (!rdev->link_state) {
131 netif_carrier_on(net);
132 netif_tx_wake_all_queues(net);
136 /* Setting synthetic device up transparently sets
137 * slave as up. If open fails, then slave will be
138 * still be offline (and not used).
140 ret = dev_open(vf_netdev, NULL);
143 "unable to open slave: %s: %d\n",
144 vf_netdev->name, ret);
149 static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
151 unsigned int retry = 0;
154 /* Ensure pending bytes in ring are read */
158 for (i = 0; i < nvdev->num_chn; i++) {
159 struct vmbus_channel *chn
160 = nvdev->chan_table[i].channel;
165 /* make sure receive not running now */
166 napi_synchronize(&nvdev->chan_table[i].napi);
168 aread = hv_get_bytes_to_read(&chn->inbound);
172 aread = hv_get_bytes_to_read(&chn->outbound);
180 if (++retry > RETRY_MAX)
183 usleep_range(RETRY_US_LO, RETRY_US_HI);
187 static int netvsc_close(struct net_device *net)
189 struct net_device_context *net_device_ctx = netdev_priv(net);
190 struct net_device *vf_netdev
191 = rtnl_dereference(net_device_ctx->vf_netdev);
192 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
195 netif_tx_disable(net);
197 /* No need to close rndis filter if it is removed already */
201 ret = rndis_filter_close(nvdev);
203 netdev_err(net, "unable to close device (ret %d).\n", ret);
207 ret = netvsc_wait_until_empty(nvdev);
209 netdev_err(net, "Ring buffer not empty after closing rndis\n");
212 dev_close(vf_netdev);
217 static inline void *init_ppi_data(struct rndis_message *msg,
218 u32 ppi_size, u32 pkt_type)
220 struct rndis_packet *rndis_pkt = &msg->msg.pkt;
221 struct rndis_per_packet_info *ppi;
223 rndis_pkt->data_offset += ppi_size;
224 ppi = (void *)rndis_pkt + rndis_pkt->per_pkt_info_offset
225 + rndis_pkt->per_pkt_info_len;
227 ppi->size = ppi_size;
228 ppi->type = pkt_type;
230 ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
232 rndis_pkt->per_pkt_info_len += ppi_size;
237 /* Azure hosts don't support non-TCP port numbers in hashing for fragmented
238 * packets. We can use ethtool to change UDP hash level when necessary.
240 static inline u32 netvsc_get_hash(
242 const struct net_device_context *ndc)
244 struct flow_keys flow;
245 u32 hash, pkt_proto = 0;
246 static u32 hashrnd __read_mostly;
248 net_get_random_once(&hashrnd, sizeof(hashrnd));
250 if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
253 switch (flow.basic.ip_proto) {
255 if (flow.basic.n_proto == htons(ETH_P_IP))
256 pkt_proto = HV_TCP4_L4HASH;
257 else if (flow.basic.n_proto == htons(ETH_P_IPV6))
258 pkt_proto = HV_TCP6_L4HASH;
263 if (flow.basic.n_proto == htons(ETH_P_IP))
264 pkt_proto = HV_UDP4_L4HASH;
265 else if (flow.basic.n_proto == htons(ETH_P_IPV6))
266 pkt_proto = HV_UDP6_L4HASH;
271 if (pkt_proto & ndc->l4_hash) {
272 return skb_get_hash(skb);
274 if (flow.basic.n_proto == htons(ETH_P_IP))
275 hash = jhash2((u32 *)&flow.addrs.v4addrs, 2, hashrnd);
276 else if (flow.basic.n_proto == htons(ETH_P_IPV6))
277 hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd);
281 skb_set_hash(skb, hash, PKT_HASH_TYPE_L3);
287 static inline int netvsc_get_tx_queue(struct net_device *ndev,
288 struct sk_buff *skb, int old_idx)
290 const struct net_device_context *ndc = netdev_priv(ndev);
291 struct sock *sk = skb->sk;
294 q_idx = ndc->tx_table[netvsc_get_hash(skb, ndc) &
295 (VRSS_SEND_TAB_SIZE - 1)];
297 /* If queue index changed record the new value */
298 if (q_idx != old_idx &&
299 sk && sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
300 sk_tx_queue_set(sk, q_idx);
306 * Select queue for transmit.
308 * If a valid queue has already been assigned, then use that.
309 * Otherwise compute tx queue based on hash and the send table.
311 * This is basically similar to default (__netdev_pick_tx) with the added step
312 * of using the host send_table when no other queue has been assigned.
314 * TODO support XPS - but get_xps_queue not exported
316 static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb)
318 int q_idx = sk_tx_queue_get(skb->sk);
320 if (q_idx < 0 || skb->ooo_okay || q_idx >= ndev->real_num_tx_queues) {
321 /* If forwarding a packet, we use the recorded queue when
322 * available for better cache locality.
324 if (skb_rx_queue_recorded(skb))
325 q_idx = skb_get_rx_queue(skb);
327 q_idx = netvsc_get_tx_queue(ndev, skb, q_idx);
333 static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
334 struct net_device *sb_dev,
335 select_queue_fallback_t fallback)
337 struct net_device_context *ndc = netdev_priv(ndev);
338 struct net_device *vf_netdev;
342 vf_netdev = rcu_dereference(ndc->vf_netdev);
344 const struct net_device_ops *vf_ops = vf_netdev->netdev_ops;
346 if (vf_ops->ndo_select_queue)
347 txq = vf_ops->ndo_select_queue(vf_netdev, skb,
350 txq = fallback(vf_netdev, skb, NULL);
352 /* Record the queue selected by VF so that it can be
353 * used for common case where VF has more queues than
354 * the synthetic device.
356 qdisc_skb_cb(skb)->slave_dev_queue_mapping = txq;
358 txq = netvsc_pick_tx(ndev, skb);
362 while (unlikely(txq >= ndev->real_num_tx_queues))
363 txq -= ndev->real_num_tx_queues;
368 static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
369 struct hv_page_buffer *pb)
373 /* Deal with compound pages by ignoring unused part
376 page += (offset >> PAGE_SHIFT);
377 offset &= ~PAGE_MASK;
382 bytes = PAGE_SIZE - offset;
385 pb[j].pfn = page_to_pfn(page);
386 pb[j].offset = offset;
392 if (offset == PAGE_SIZE && len) {
402 static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
403 struct hv_netvsc_packet *packet,
404 struct hv_page_buffer *pb)
407 char *data = skb->data;
408 int frags = skb_shinfo(skb)->nr_frags;
411 /* The packet is laid out thus:
412 * 1. hdr: RNDIS header and PPI
414 * 3. skb fragment data
416 slots_used += fill_pg_buf(virt_to_page(hdr),
418 len, &pb[slots_used]);
420 packet->rmsg_size = len;
421 packet->rmsg_pgcnt = slots_used;
423 slots_used += fill_pg_buf(virt_to_page(data),
424 offset_in_page(data),
425 skb_headlen(skb), &pb[slots_used]);
427 for (i = 0; i < frags; i++) {
428 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
430 slots_used += fill_pg_buf(skb_frag_page(frag),
432 skb_frag_size(frag), &pb[slots_used]);
437 static int count_skb_frag_slots(struct sk_buff *skb)
439 int i, frags = skb_shinfo(skb)->nr_frags;
442 for (i = 0; i < frags; i++) {
443 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
444 unsigned long size = skb_frag_size(frag);
445 unsigned long offset = frag->page_offset;
447 /* Skip unused frames from start of page */
448 offset &= ~PAGE_MASK;
449 pages += PFN_UP(offset + size);
454 static int netvsc_get_slots(struct sk_buff *skb)
456 char *data = skb->data;
457 unsigned int offset = offset_in_page(data);
458 unsigned int len = skb_headlen(skb);
462 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
463 frag_slots = count_skb_frag_slots(skb);
464 return slots + frag_slots;
467 static u32 net_checksum_info(struct sk_buff *skb)
469 if (skb->protocol == htons(ETH_P_IP)) {
470 struct iphdr *ip = ip_hdr(skb);
472 if (ip->protocol == IPPROTO_TCP)
473 return TRANSPORT_INFO_IPV4_TCP;
474 else if (ip->protocol == IPPROTO_UDP)
475 return TRANSPORT_INFO_IPV4_UDP;
477 struct ipv6hdr *ip6 = ipv6_hdr(skb);
479 if (ip6->nexthdr == IPPROTO_TCP)
480 return TRANSPORT_INFO_IPV6_TCP;
481 else if (ip6->nexthdr == IPPROTO_UDP)
482 return TRANSPORT_INFO_IPV6_UDP;
485 return TRANSPORT_INFO_NOT_IP;
488 /* Send skb on the slave VF device. */
489 static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev,
492 struct net_device_context *ndev_ctx = netdev_priv(net);
493 unsigned int len = skb->len;
496 skb->dev = vf_netdev;
497 skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping;
499 rc = dev_queue_xmit(skb);
500 if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) {
501 struct netvsc_vf_pcpu_stats *pcpu_stats
502 = this_cpu_ptr(ndev_ctx->vf_stats);
504 u64_stats_update_begin(&pcpu_stats->syncp);
505 pcpu_stats->tx_packets++;
506 pcpu_stats->tx_bytes += len;
507 u64_stats_update_end(&pcpu_stats->syncp);
509 this_cpu_inc(ndev_ctx->vf_stats->tx_dropped);
515 static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
517 struct net_device_context *net_device_ctx = netdev_priv(net);
518 struct hv_netvsc_packet *packet = NULL;
520 unsigned int num_data_pgs;
521 struct rndis_message *rndis_msg;
522 struct net_device *vf_netdev;
525 struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT];
527 /* if VF is present and up then redirect packets
528 * already called with rcu_read_lock_bh
530 vf_netdev = rcu_dereference_bh(net_device_ctx->vf_netdev);
531 if (vf_netdev && netif_running(vf_netdev) &&
532 !netpoll_tx_running(net))
533 return netvsc_vf_xmit(net, vf_netdev, skb);
535 /* We will atmost need two pages to describe the rndis
536 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
537 * of pages in a single packet. If skb is scattered around
538 * more pages we try linearizing it.
541 num_data_pgs = netvsc_get_slots(skb) + 2;
543 if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
544 ++net_device_ctx->eth_stats.tx_scattered;
546 if (skb_linearize(skb))
549 num_data_pgs = netvsc_get_slots(skb) + 2;
550 if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
551 ++net_device_ctx->eth_stats.tx_too_big;
557 * Place the rndis header in the skb head room and
558 * the skb->cb will be used for hv_netvsc_packet
561 ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE);
565 /* Use the skb control buffer for building up the packet */
566 BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) >
567 FIELD_SIZEOF(struct sk_buff, cb));
568 packet = (struct hv_netvsc_packet *)skb->cb;
570 packet->q_idx = skb_get_queue_mapping(skb);
572 packet->total_data_buflen = skb->len;
573 packet->total_bytes = skb->len;
574 packet->total_packets = 1;
576 rndis_msg = (struct rndis_message *)skb->head;
578 /* Add the rndis header */
579 rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
580 rndis_msg->msg_len = packet->total_data_buflen;
582 rndis_msg->msg.pkt = (struct rndis_packet) {
583 .data_offset = sizeof(struct rndis_packet),
584 .data_len = packet->total_data_buflen,
585 .per_pkt_info_offset = sizeof(struct rndis_packet),
588 rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
590 hash = skb_get_hash_raw(skb);
591 if (hash != 0 && net->real_num_tx_queues > 1) {
594 rndis_msg_size += NDIS_HASH_PPI_SIZE;
595 hash_info = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
600 if (skb_vlan_tag_present(skb)) {
601 struct ndis_pkt_8021q_info *vlan;
603 rndis_msg_size += NDIS_VLAN_PPI_SIZE;
604 vlan = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
608 vlan->vlanid = skb_vlan_tag_get_id(skb);
609 vlan->cfi = skb_vlan_tag_get_cfi(skb);
610 vlan->pri = skb_vlan_tag_get_prio(skb);
613 if (skb_is_gso(skb)) {
614 struct ndis_tcp_lso_info *lso_info;
616 rndis_msg_size += NDIS_LSO_PPI_SIZE;
617 lso_info = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
618 TCP_LARGESEND_PKTINFO);
621 lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
622 if (skb->protocol == htons(ETH_P_IP)) {
623 lso_info->lso_v2_transmit.ip_version =
624 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
625 ip_hdr(skb)->tot_len = 0;
626 ip_hdr(skb)->check = 0;
627 tcp_hdr(skb)->check =
628 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
629 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
631 lso_info->lso_v2_transmit.ip_version =
632 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
633 ipv6_hdr(skb)->payload_len = 0;
634 tcp_hdr(skb)->check =
635 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
636 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
638 lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb);
639 lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
640 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
641 if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) {
642 struct ndis_tcp_ip_checksum_info *csum_info;
644 rndis_msg_size += NDIS_CSUM_PPI_SIZE;
645 csum_info = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
646 TCPIP_CHKSUM_PKTINFO);
648 csum_info->value = 0;
649 csum_info->transmit.tcp_header_offset = skb_transport_offset(skb);
651 if (skb->protocol == htons(ETH_P_IP)) {
652 csum_info->transmit.is_ipv4 = 1;
654 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
655 csum_info->transmit.tcp_checksum = 1;
657 csum_info->transmit.udp_checksum = 1;
659 csum_info->transmit.is_ipv6 = 1;
661 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
662 csum_info->transmit.tcp_checksum = 1;
664 csum_info->transmit.udp_checksum = 1;
667 /* Can't do offload of this type of checksum */
668 if (skb_checksum_help(skb))
673 /* Start filling in the page buffers with the rndis hdr */
674 rndis_msg->msg_len += rndis_msg_size;
675 packet->total_data_buflen = rndis_msg->msg_len;
676 packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
679 /* timestamp packet in software */
680 skb_tx_timestamp(skb);
682 ret = netvsc_send(net, packet, rndis_msg, pb, skb);
683 if (likely(ret == 0))
686 if (ret == -EAGAIN) {
687 ++net_device_ctx->eth_stats.tx_busy;
688 return NETDEV_TX_BUSY;
692 ++net_device_ctx->eth_stats.tx_no_space;
695 dev_kfree_skb_any(skb);
696 net->stats.tx_dropped++;
701 ++net_device_ctx->eth_stats.tx_no_memory;
706 * netvsc_linkstatus_callback - Link up/down notification
708 void netvsc_linkstatus_callback(struct net_device *net,
709 struct rndis_message *resp)
711 struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
712 struct net_device_context *ndev_ctx = netdev_priv(net);
713 struct netvsc_reconfig *event;
716 /* Update the physical link speed when changing to another vSwitch */
717 if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) {
720 speed = *(u32 *)((void *)indicate
721 + indicate->status_buf_offset) / 10000;
722 ndev_ctx->speed = speed;
726 /* Handle these link change statuses below */
727 if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE &&
728 indicate->status != RNDIS_STATUS_MEDIA_CONNECT &&
729 indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT)
732 if (net->reg_state != NETREG_REGISTERED)
735 event = kzalloc(sizeof(*event), GFP_ATOMIC);
738 event->event = indicate->status;
740 spin_lock_irqsave(&ndev_ctx->lock, flags);
741 list_add_tail(&event->list, &ndev_ctx->reconfig_events);
742 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
744 schedule_delayed_work(&ndev_ctx->dwork, 0);
747 static void netvsc_comp_ipcsum(struct sk_buff *skb)
749 struct iphdr *iph = (struct iphdr *)skb->data;
752 iph->check = ip_fast_csum(iph, iph->ihl);
755 static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
756 struct netvsc_channel *nvchan)
758 struct napi_struct *napi = &nvchan->napi;
759 const struct ndis_pkt_8021q_info *vlan = nvchan->rsc.vlan;
760 const struct ndis_tcp_ip_checksum_info *csum_info =
761 nvchan->rsc.csum_info;
765 skb = napi_alloc_skb(napi, nvchan->rsc.pktlen);
770 * Copy to skb. This copy is needed here since the memory pointed by
771 * hv_netvsc_packet cannot be deallocated
773 for (i = 0; i < nvchan->rsc.cnt; i++)
774 skb_put_data(skb, nvchan->rsc.data[i], nvchan->rsc.len[i]);
776 skb->protocol = eth_type_trans(skb, net);
778 /* skb is already created with CHECKSUM_NONE */
779 skb_checksum_none_assert(skb);
781 /* Incoming packets may have IP header checksum verified by the host.
782 * They may not have IP header checksum computed after coalescing.
783 * We compute it here if the flags are set, because on Linux, the IP
784 * checksum is always checked.
786 if (csum_info && csum_info->receive.ip_checksum_value_invalid &&
787 csum_info->receive.ip_checksum_succeeded &&
788 skb->protocol == htons(ETH_P_IP))
789 netvsc_comp_ipcsum(skb);
791 /* Do L4 checksum offload if enabled and present.
793 if (csum_info && (net->features & NETIF_F_RXCSUM)) {
794 if (csum_info->receive.tcp_checksum_succeeded ||
795 csum_info->receive.udp_checksum_succeeded)
796 skb->ip_summed = CHECKSUM_UNNECESSARY;
800 u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT) |
801 (vlan->cfi ? VLAN_CFI_MASK : 0);
803 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
811 * netvsc_recv_callback - Callback when we receive a packet from the
812 * "wire" on the specified device.
814 int netvsc_recv_callback(struct net_device *net,
815 struct netvsc_device *net_device,
816 struct netvsc_channel *nvchan)
818 struct net_device_context *net_device_ctx = netdev_priv(net);
819 struct vmbus_channel *channel = nvchan->channel;
820 u16 q_idx = channel->offermsg.offer.sub_channel_index;
822 struct netvsc_stats *rx_stats;
824 if (net->reg_state != NETREG_REGISTERED)
825 return NVSP_STAT_FAIL;
827 /* Allocate a skb - TODO direct I/O to pages? */
828 skb = netvsc_alloc_recv_skb(net, nvchan);
830 if (unlikely(!skb)) {
831 ++net_device_ctx->eth_stats.rx_no_memory;
833 return NVSP_STAT_FAIL;
836 skb_record_rx_queue(skb, q_idx);
839 * Even if injecting the packet, record the statistics
840 * on the synthetic device because modifying the VF device
841 * statistics will not work correctly.
843 rx_stats = &nvchan->rx_stats;
844 u64_stats_update_begin(&rx_stats->syncp);
846 rx_stats->bytes += nvchan->rsc.pktlen;
848 if (skb->pkt_type == PACKET_BROADCAST)
849 ++rx_stats->broadcast;
850 else if (skb->pkt_type == PACKET_MULTICAST)
851 ++rx_stats->multicast;
852 u64_stats_update_end(&rx_stats->syncp);
854 napi_gro_receive(&nvchan->napi, skb);
855 return NVSP_STAT_SUCCESS;
858 static void netvsc_get_drvinfo(struct net_device *net,
859 struct ethtool_drvinfo *info)
861 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
862 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
865 static void netvsc_get_channels(struct net_device *net,
866 struct ethtool_channels *channel)
868 struct net_device_context *net_device_ctx = netdev_priv(net);
869 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
872 channel->max_combined = nvdev->max_chn;
873 channel->combined_count = nvdev->num_chn;
877 /* Alloc struct netvsc_device_info, and initialize it from either existing
878 * struct netvsc_device, or from default values.
880 static struct netvsc_device_info *netvsc_devinfo_get
881 (struct netvsc_device *nvdev)
883 struct netvsc_device_info *dev_info;
885 dev_info = kzalloc(sizeof(*dev_info), GFP_ATOMIC);
891 dev_info->num_chn = nvdev->num_chn;
892 dev_info->send_sections = nvdev->send_section_cnt;
893 dev_info->send_section_size = nvdev->send_section_size;
894 dev_info->recv_sections = nvdev->recv_section_cnt;
895 dev_info->recv_section_size = nvdev->recv_section_size;
897 memcpy(dev_info->rss_key, nvdev->extension->rss_key,
900 dev_info->num_chn = VRSS_CHANNEL_DEFAULT;
901 dev_info->send_sections = NETVSC_DEFAULT_TX;
902 dev_info->send_section_size = NETVSC_SEND_SECTION_SIZE;
903 dev_info->recv_sections = NETVSC_DEFAULT_RX;
904 dev_info->recv_section_size = NETVSC_RECV_SECTION_SIZE;
910 static int netvsc_detach(struct net_device *ndev,
911 struct netvsc_device *nvdev)
913 struct net_device_context *ndev_ctx = netdev_priv(ndev);
914 struct hv_device *hdev = ndev_ctx->device_ctx;
917 /* Don't try continuing to try and setup sub channels */
918 if (cancel_work_sync(&nvdev->subchan_work))
921 /* If device was up (receiving) then shutdown */
922 if (netif_running(ndev)) {
923 netif_tx_disable(ndev);
925 ret = rndis_filter_close(nvdev);
928 "unable to close device (ret %d).\n", ret);
932 ret = netvsc_wait_until_empty(nvdev);
935 "Ring buffer not empty after closing rndis\n");
940 netif_device_detach(ndev);
942 rndis_filter_device_remove(hdev, nvdev);
947 static int netvsc_attach(struct net_device *ndev,
948 struct netvsc_device_info *dev_info)
950 struct net_device_context *ndev_ctx = netdev_priv(ndev);
951 struct hv_device *hdev = ndev_ctx->device_ctx;
952 struct netvsc_device *nvdev;
953 struct rndis_device *rdev;
956 nvdev = rndis_filter_device_add(hdev, dev_info);
958 return PTR_ERR(nvdev);
960 if (nvdev->num_chn > 1) {
961 ret = rndis_set_subchannel(ndev, nvdev, dev_info);
963 /* if unavailable, just proceed with one queue */
970 /* In any case device is now ready */
971 netif_device_attach(ndev);
973 /* Note: enable and attach happen when sub-channels setup */
974 netif_carrier_off(ndev);
976 if (netif_running(ndev)) {
977 ret = rndis_filter_open(nvdev);
981 rdev = nvdev->extension;
982 if (!rdev->link_state)
983 netif_carrier_on(ndev);
989 static int netvsc_set_channels(struct net_device *net,
990 struct ethtool_channels *channels)
992 struct net_device_context *net_device_ctx = netdev_priv(net);
993 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
994 unsigned int orig, count = channels->combined_count;
995 struct netvsc_device_info *device_info;
998 /* We do not support separate count for rx, tx, or other */
1000 channels->rx_count || channels->tx_count || channels->other_count)
1003 if (!nvdev || nvdev->destroy)
1006 if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5)
1009 if (count > nvdev->max_chn)
1012 orig = nvdev->num_chn;
1014 device_info = netvsc_devinfo_get(nvdev);
1019 device_info->num_chn = count;
1021 ret = netvsc_detach(net, nvdev);
1025 ret = netvsc_attach(net, device_info);
1027 device_info->num_chn = orig;
1028 if (netvsc_attach(net, device_info))
1029 netdev_err(net, "restoring channel setting failed\n");
1038 netvsc_validate_ethtool_ss_cmd(const struct ethtool_link_ksettings *cmd)
1040 struct ethtool_link_ksettings diff1 = *cmd;
1041 struct ethtool_link_ksettings diff2 = {};
1043 diff1.base.speed = 0;
1044 diff1.base.duplex = 0;
1045 /* advertising and cmd are usually set */
1046 ethtool_link_ksettings_zero_link_mode(&diff1, advertising);
1048 /* We set port to PORT_OTHER */
1049 diff2.base.port = PORT_OTHER;
1051 return !memcmp(&diff1, &diff2, sizeof(diff1));
1054 static void netvsc_init_settings(struct net_device *dev)
1056 struct net_device_context *ndc = netdev_priv(dev);
1058 ndc->l4_hash = HV_DEFAULT_L4HASH;
1060 ndc->speed = SPEED_UNKNOWN;
1061 ndc->duplex = DUPLEX_FULL;
1063 dev->features = NETIF_F_LRO;
1066 static int netvsc_get_link_ksettings(struct net_device *dev,
1067 struct ethtool_link_ksettings *cmd)
1069 struct net_device_context *ndc = netdev_priv(dev);
1071 cmd->base.speed = ndc->speed;
1072 cmd->base.duplex = ndc->duplex;
1073 cmd->base.port = PORT_OTHER;
1078 static int netvsc_set_link_ksettings(struct net_device *dev,
1079 const struct ethtool_link_ksettings *cmd)
1081 struct net_device_context *ndc = netdev_priv(dev);
1084 speed = cmd->base.speed;
1085 if (!ethtool_validate_speed(speed) ||
1086 !ethtool_validate_duplex(cmd->base.duplex) ||
1087 !netvsc_validate_ethtool_ss_cmd(cmd))
1091 ndc->duplex = cmd->base.duplex;
1096 static int netvsc_change_mtu(struct net_device *ndev, int mtu)
1098 struct net_device_context *ndevctx = netdev_priv(ndev);
1099 struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
1100 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1101 int orig_mtu = ndev->mtu;
1102 struct netvsc_device_info *device_info;
1105 if (!nvdev || nvdev->destroy)
1108 device_info = netvsc_devinfo_get(nvdev);
1113 /* Change MTU of underlying VF netdev first. */
1115 ret = dev_set_mtu(vf_netdev, mtu);
1120 ret = netvsc_detach(ndev, nvdev);
1126 ret = netvsc_attach(ndev, device_info);
1130 /* Attempt rollback to original MTU */
1131 ndev->mtu = orig_mtu;
1133 if (netvsc_attach(ndev, device_info))
1134 netdev_err(ndev, "restoring mtu failed\n");
1137 dev_set_mtu(vf_netdev, orig_mtu);
1144 static void netvsc_get_vf_stats(struct net_device *net,
1145 struct netvsc_vf_pcpu_stats *tot)
1147 struct net_device_context *ndev_ctx = netdev_priv(net);
1150 memset(tot, 0, sizeof(*tot));
1152 for_each_possible_cpu(i) {
1153 const struct netvsc_vf_pcpu_stats *stats
1154 = per_cpu_ptr(ndev_ctx->vf_stats, i);
1155 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1159 start = u64_stats_fetch_begin_irq(&stats->syncp);
1160 rx_packets = stats->rx_packets;
1161 tx_packets = stats->tx_packets;
1162 rx_bytes = stats->rx_bytes;
1163 tx_bytes = stats->tx_bytes;
1164 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1166 tot->rx_packets += rx_packets;
1167 tot->tx_packets += tx_packets;
1168 tot->rx_bytes += rx_bytes;
1169 tot->tx_bytes += tx_bytes;
1170 tot->tx_dropped += stats->tx_dropped;
1174 static void netvsc_get_pcpu_stats(struct net_device *net,
1175 struct netvsc_ethtool_pcpu_stats *pcpu_tot)
1177 struct net_device_context *ndev_ctx = netdev_priv(net);
1178 struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev);
1181 /* fetch percpu stats of vf */
1182 for_each_possible_cpu(i) {
1183 const struct netvsc_vf_pcpu_stats *stats =
1184 per_cpu_ptr(ndev_ctx->vf_stats, i);
1185 struct netvsc_ethtool_pcpu_stats *this_tot = &pcpu_tot[i];
1189 start = u64_stats_fetch_begin_irq(&stats->syncp);
1190 this_tot->vf_rx_packets = stats->rx_packets;
1191 this_tot->vf_tx_packets = stats->tx_packets;
1192 this_tot->vf_rx_bytes = stats->rx_bytes;
1193 this_tot->vf_tx_bytes = stats->tx_bytes;
1194 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1195 this_tot->rx_packets = this_tot->vf_rx_packets;
1196 this_tot->tx_packets = this_tot->vf_tx_packets;
1197 this_tot->rx_bytes = this_tot->vf_rx_bytes;
1198 this_tot->tx_bytes = this_tot->vf_tx_bytes;
1201 /* fetch percpu stats of netvsc */
1202 for (i = 0; i < nvdev->num_chn; i++) {
1203 const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
1204 const struct netvsc_stats *stats;
1205 struct netvsc_ethtool_pcpu_stats *this_tot =
1206 &pcpu_tot[nvchan->channel->target_cpu];
1210 stats = &nvchan->tx_stats;
1212 start = u64_stats_fetch_begin_irq(&stats->syncp);
1213 packets = stats->packets;
1214 bytes = stats->bytes;
1215 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1217 this_tot->tx_bytes += bytes;
1218 this_tot->tx_packets += packets;
1220 stats = &nvchan->rx_stats;
1222 start = u64_stats_fetch_begin_irq(&stats->syncp);
1223 packets = stats->packets;
1224 bytes = stats->bytes;
1225 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1227 this_tot->rx_bytes += bytes;
1228 this_tot->rx_packets += packets;
1232 static void netvsc_get_stats64(struct net_device *net,
1233 struct rtnl_link_stats64 *t)
1235 struct net_device_context *ndev_ctx = netdev_priv(net);
1236 struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev);
1237 struct netvsc_vf_pcpu_stats vf_tot;
1243 netdev_stats_to_stats64(t, &net->stats);
1245 netvsc_get_vf_stats(net, &vf_tot);
1246 t->rx_packets += vf_tot.rx_packets;
1247 t->tx_packets += vf_tot.tx_packets;
1248 t->rx_bytes += vf_tot.rx_bytes;
1249 t->tx_bytes += vf_tot.tx_bytes;
1250 t->tx_dropped += vf_tot.tx_dropped;
1252 for (i = 0; i < nvdev->num_chn; i++) {
1253 const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
1254 const struct netvsc_stats *stats;
1255 u64 packets, bytes, multicast;
1258 stats = &nvchan->tx_stats;
1260 start = u64_stats_fetch_begin_irq(&stats->syncp);
1261 packets = stats->packets;
1262 bytes = stats->bytes;
1263 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1265 t->tx_bytes += bytes;
1266 t->tx_packets += packets;
1268 stats = &nvchan->rx_stats;
1270 start = u64_stats_fetch_begin_irq(&stats->syncp);
1271 packets = stats->packets;
1272 bytes = stats->bytes;
1273 multicast = stats->multicast + stats->broadcast;
1274 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1276 t->rx_bytes += bytes;
1277 t->rx_packets += packets;
1278 t->multicast += multicast;
1282 static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
1284 struct net_device_context *ndc = netdev_priv(ndev);
1285 struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev);
1286 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1287 struct sockaddr *addr = p;
1290 err = eth_prepare_mac_addr_change(ndev, p);
1298 err = dev_set_mac_address(vf_netdev, addr, NULL);
1303 err = rndis_filter_set_device_mac(nvdev, addr->sa_data);
1305 eth_commit_mac_addr_change(ndev, p);
1306 } else if (vf_netdev) {
1307 /* rollback change on VF */
1308 memcpy(addr->sa_data, ndev->dev_addr, ETH_ALEN);
1309 dev_set_mac_address(vf_netdev, addr, NULL);
1315 static const struct {
1316 char name[ETH_GSTRING_LEN];
1318 } netvsc_stats[] = {
1319 { "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) },
1320 { "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) },
1321 { "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) },
1322 { "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) },
1323 { "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) },
1324 { "tx_send_full", offsetof(struct netvsc_ethtool_stats, tx_send_full) },
1325 { "rx_comp_busy", offsetof(struct netvsc_ethtool_stats, rx_comp_busy) },
1326 { "rx_no_memory", offsetof(struct netvsc_ethtool_stats, rx_no_memory) },
1327 { "stop_queue", offsetof(struct netvsc_ethtool_stats, stop_queue) },
1328 { "wake_queue", offsetof(struct netvsc_ethtool_stats, wake_queue) },
1330 { "cpu%u_rx_packets",
1331 offsetof(struct netvsc_ethtool_pcpu_stats, rx_packets) },
1333 offsetof(struct netvsc_ethtool_pcpu_stats, rx_bytes) },
1334 { "cpu%u_tx_packets",
1335 offsetof(struct netvsc_ethtool_pcpu_stats, tx_packets) },
1337 offsetof(struct netvsc_ethtool_pcpu_stats, tx_bytes) },
1338 { "cpu%u_vf_rx_packets",
1339 offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_packets) },
1340 { "cpu%u_vf_rx_bytes",
1341 offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_bytes) },
1342 { "cpu%u_vf_tx_packets",
1343 offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_packets) },
1344 { "cpu%u_vf_tx_bytes",
1345 offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_bytes) },
1347 { "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats, rx_packets) },
1348 { "vf_rx_bytes", offsetof(struct netvsc_vf_pcpu_stats, rx_bytes) },
1349 { "vf_tx_packets", offsetof(struct netvsc_vf_pcpu_stats, tx_packets) },
1350 { "vf_tx_bytes", offsetof(struct netvsc_vf_pcpu_stats, tx_bytes) },
1351 { "vf_tx_dropped", offsetof(struct netvsc_vf_pcpu_stats, tx_dropped) },
1354 #define NETVSC_GLOBAL_STATS_LEN ARRAY_SIZE(netvsc_stats)
1355 #define NETVSC_VF_STATS_LEN ARRAY_SIZE(vf_stats)
1357 /* statistics per queue (rx/tx packets/bytes) */
1358 #define NETVSC_PCPU_STATS_LEN (num_present_cpus() * ARRAY_SIZE(pcpu_stats))
1360 /* 4 statistics per queue (rx/tx packets/bytes) */
1361 #define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 4)
1363 static int netvsc_get_sset_count(struct net_device *dev, int string_set)
1365 struct net_device_context *ndc = netdev_priv(dev);
1366 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1371 switch (string_set) {
1373 return NETVSC_GLOBAL_STATS_LEN
1374 + NETVSC_VF_STATS_LEN
1375 + NETVSC_QUEUE_STATS_LEN(nvdev)
1376 + NETVSC_PCPU_STATS_LEN;
1382 static void netvsc_get_ethtool_stats(struct net_device *dev,
1383 struct ethtool_stats *stats, u64 *data)
1385 struct net_device_context *ndc = netdev_priv(dev);
1386 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1387 const void *nds = &ndc->eth_stats;
1388 const struct netvsc_stats *qstats;
1389 struct netvsc_vf_pcpu_stats sum;
1390 struct netvsc_ethtool_pcpu_stats *pcpu_sum;
1398 for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++)
1399 data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset);
1401 netvsc_get_vf_stats(dev, &sum);
1402 for (j = 0; j < NETVSC_VF_STATS_LEN; j++)
1403 data[i++] = *(u64 *)((void *)&sum + vf_stats[j].offset);
1405 for (j = 0; j < nvdev->num_chn; j++) {
1406 qstats = &nvdev->chan_table[j].tx_stats;
1409 start = u64_stats_fetch_begin_irq(&qstats->syncp);
1410 packets = qstats->packets;
1411 bytes = qstats->bytes;
1412 } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
1413 data[i++] = packets;
1416 qstats = &nvdev->chan_table[j].rx_stats;
1418 start = u64_stats_fetch_begin_irq(&qstats->syncp);
1419 packets = qstats->packets;
1420 bytes = qstats->bytes;
1421 } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
1422 data[i++] = packets;
1426 pcpu_sum = kvmalloc_array(num_possible_cpus(),
1427 sizeof(struct netvsc_ethtool_pcpu_stats),
1429 netvsc_get_pcpu_stats(dev, pcpu_sum);
1430 for_each_present_cpu(cpu) {
1431 struct netvsc_ethtool_pcpu_stats *this_sum = &pcpu_sum[cpu];
1433 for (j = 0; j < ARRAY_SIZE(pcpu_stats); j++)
1434 data[i++] = *(u64 *)((void *)this_sum
1435 + pcpu_stats[j].offset);
1440 static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1442 struct net_device_context *ndc = netdev_priv(dev);
1443 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1450 switch (stringset) {
1452 for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) {
1453 memcpy(p, netvsc_stats[i].name, ETH_GSTRING_LEN);
1454 p += ETH_GSTRING_LEN;
1457 for (i = 0; i < ARRAY_SIZE(vf_stats); i++) {
1458 memcpy(p, vf_stats[i].name, ETH_GSTRING_LEN);
1459 p += ETH_GSTRING_LEN;
1462 for (i = 0; i < nvdev->num_chn; i++) {
1463 sprintf(p, "tx_queue_%u_packets", i);
1464 p += ETH_GSTRING_LEN;
1465 sprintf(p, "tx_queue_%u_bytes", i);
1466 p += ETH_GSTRING_LEN;
1467 sprintf(p, "rx_queue_%u_packets", i);
1468 p += ETH_GSTRING_LEN;
1469 sprintf(p, "rx_queue_%u_bytes", i);
1470 p += ETH_GSTRING_LEN;
1473 for_each_present_cpu(cpu) {
1474 for (i = 0; i < ARRAY_SIZE(pcpu_stats); i++) {
1475 sprintf(p, pcpu_stats[i].name, cpu);
1476 p += ETH_GSTRING_LEN;
1485 netvsc_get_rss_hash_opts(struct net_device_context *ndc,
1486 struct ethtool_rxnfc *info)
1488 const u32 l4_flag = RXH_L4_B_0_1 | RXH_L4_B_2_3;
1490 info->data = RXH_IP_SRC | RXH_IP_DST;
1492 switch (info->flow_type) {
1494 if (ndc->l4_hash & HV_TCP4_L4HASH)
1495 info->data |= l4_flag;
1500 if (ndc->l4_hash & HV_TCP6_L4HASH)
1501 info->data |= l4_flag;
1506 if (ndc->l4_hash & HV_UDP4_L4HASH)
1507 info->data |= l4_flag;
1512 if (ndc->l4_hash & HV_UDP6_L4HASH)
1513 info->data |= l4_flag;
1529 netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1532 struct net_device_context *ndc = netdev_priv(dev);
1533 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1538 switch (info->cmd) {
1539 case ETHTOOL_GRXRINGS:
1540 info->data = nvdev->num_chn;
1544 return netvsc_get_rss_hash_opts(ndc, info);
1549 static int netvsc_set_rss_hash_opts(struct net_device_context *ndc,
1550 struct ethtool_rxnfc *info)
1552 if (info->data == (RXH_IP_SRC | RXH_IP_DST |
1553 RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1554 switch (info->flow_type) {
1556 ndc->l4_hash |= HV_TCP4_L4HASH;
1560 ndc->l4_hash |= HV_TCP6_L4HASH;
1564 ndc->l4_hash |= HV_UDP4_L4HASH;
1568 ndc->l4_hash |= HV_UDP6_L4HASH;
1578 if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
1579 switch (info->flow_type) {
1581 ndc->l4_hash &= ~HV_TCP4_L4HASH;
1585 ndc->l4_hash &= ~HV_TCP6_L4HASH;
1589 ndc->l4_hash &= ~HV_UDP4_L4HASH;
1593 ndc->l4_hash &= ~HV_UDP6_L4HASH;
1607 netvsc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *info)
1609 struct net_device_context *ndc = netdev_priv(ndev);
1611 if (info->cmd == ETHTOOL_SRXFH)
1612 return netvsc_set_rss_hash_opts(ndc, info);
1617 static u32 netvsc_get_rxfh_key_size(struct net_device *dev)
1619 return NETVSC_HASH_KEYLEN;
1622 static u32 netvsc_rss_indir_size(struct net_device *dev)
1627 static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
1630 struct net_device_context *ndc = netdev_priv(dev);
1631 struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
1632 struct rndis_device *rndis_dev;
1639 *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
1641 rndis_dev = ndev->extension;
1643 for (i = 0; i < ITAB_NUM; i++)
1644 indir[i] = rndis_dev->rx_table[i];
1648 memcpy(key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN);
1653 static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
1654 const u8 *key, const u8 hfunc)
1656 struct net_device_context *ndc = netdev_priv(dev);
1657 struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
1658 struct rndis_device *rndis_dev;
1664 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
1667 rndis_dev = ndev->extension;
1669 for (i = 0; i < ITAB_NUM; i++)
1670 if (indir[i] >= ndev->num_chn)
1673 for (i = 0; i < ITAB_NUM; i++)
1674 rndis_dev->rx_table[i] = indir[i];
1681 key = rndis_dev->rss_key;
1684 return rndis_filter_set_rss_param(rndis_dev, key);
1687 /* Hyper-V RNDIS protocol does not have ring in the HW sense.
1688 * It does have pre-allocated receive area which is divided into sections.
1690 static void __netvsc_get_ringparam(struct netvsc_device *nvdev,
1691 struct ethtool_ringparam *ring)
1695 ring->rx_pending = nvdev->recv_section_cnt;
1696 ring->tx_pending = nvdev->send_section_cnt;
1698 if (nvdev->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
1699 max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
1701 max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
1703 ring->rx_max_pending = max_buf_size / nvdev->recv_section_size;
1704 ring->tx_max_pending = NETVSC_SEND_BUFFER_SIZE
1705 / nvdev->send_section_size;
1708 static void netvsc_get_ringparam(struct net_device *ndev,
1709 struct ethtool_ringparam *ring)
1711 struct net_device_context *ndevctx = netdev_priv(ndev);
1712 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1717 __netvsc_get_ringparam(nvdev, ring);
1720 static int netvsc_set_ringparam(struct net_device *ndev,
1721 struct ethtool_ringparam *ring)
1723 struct net_device_context *ndevctx = netdev_priv(ndev);
1724 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1725 struct netvsc_device_info *device_info;
1726 struct ethtool_ringparam orig;
1730 if (!nvdev || nvdev->destroy)
1733 memset(&orig, 0, sizeof(orig));
1734 __netvsc_get_ringparam(nvdev, &orig);
1736 new_tx = clamp_t(u32, ring->tx_pending,
1737 NETVSC_MIN_TX_SECTIONS, orig.tx_max_pending);
1738 new_rx = clamp_t(u32, ring->rx_pending,
1739 NETVSC_MIN_RX_SECTIONS, orig.rx_max_pending);
1741 if (new_tx == orig.tx_pending &&
1742 new_rx == orig.rx_pending)
1743 return 0; /* no change */
1745 device_info = netvsc_devinfo_get(nvdev);
1750 device_info->send_sections = new_tx;
1751 device_info->recv_sections = new_rx;
1753 ret = netvsc_detach(ndev, nvdev);
1757 ret = netvsc_attach(ndev, device_info);
1759 device_info->send_sections = orig.tx_pending;
1760 device_info->recv_sections = orig.rx_pending;
1762 if (netvsc_attach(ndev, device_info))
1763 netdev_err(ndev, "restoring ringparam failed");
1771 static int netvsc_set_features(struct net_device *ndev,
1772 netdev_features_t features)
1774 netdev_features_t change = features ^ ndev->features;
1775 struct net_device_context *ndevctx = netdev_priv(ndev);
1776 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1777 struct ndis_offload_params offloads;
1779 if (!nvdev || nvdev->destroy)
1782 if (!(change & NETIF_F_LRO))
1785 memset(&offloads, 0, sizeof(struct ndis_offload_params));
1787 if (features & NETIF_F_LRO) {
1788 offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
1789 offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
1791 offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
1792 offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
1795 return rndis_filter_set_offload_params(ndev, nvdev, &offloads);
1798 static u32 netvsc_get_msglevel(struct net_device *ndev)
1800 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1802 return ndev_ctx->msg_enable;
1805 static void netvsc_set_msglevel(struct net_device *ndev, u32 val)
1807 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1809 ndev_ctx->msg_enable = val;
1812 static const struct ethtool_ops ethtool_ops = {
1813 .get_drvinfo = netvsc_get_drvinfo,
1814 .get_msglevel = netvsc_get_msglevel,
1815 .set_msglevel = netvsc_set_msglevel,
1816 .get_link = ethtool_op_get_link,
1817 .get_ethtool_stats = netvsc_get_ethtool_stats,
1818 .get_sset_count = netvsc_get_sset_count,
1819 .get_strings = netvsc_get_strings,
1820 .get_channels = netvsc_get_channels,
1821 .set_channels = netvsc_set_channels,
1822 .get_ts_info = ethtool_op_get_ts_info,
1823 .get_rxnfc = netvsc_get_rxnfc,
1824 .set_rxnfc = netvsc_set_rxnfc,
1825 .get_rxfh_key_size = netvsc_get_rxfh_key_size,
1826 .get_rxfh_indir_size = netvsc_rss_indir_size,
1827 .get_rxfh = netvsc_get_rxfh,
1828 .set_rxfh = netvsc_set_rxfh,
1829 .get_link_ksettings = netvsc_get_link_ksettings,
1830 .set_link_ksettings = netvsc_set_link_ksettings,
1831 .get_ringparam = netvsc_get_ringparam,
1832 .set_ringparam = netvsc_set_ringparam,
1835 static const struct net_device_ops device_ops = {
1836 .ndo_open = netvsc_open,
1837 .ndo_stop = netvsc_close,
1838 .ndo_start_xmit = netvsc_start_xmit,
1839 .ndo_change_rx_flags = netvsc_change_rx_flags,
1840 .ndo_set_rx_mode = netvsc_set_rx_mode,
1841 .ndo_set_features = netvsc_set_features,
1842 .ndo_change_mtu = netvsc_change_mtu,
1843 .ndo_validate_addr = eth_validate_addr,
1844 .ndo_set_mac_address = netvsc_set_mac_addr,
1845 .ndo_select_queue = netvsc_select_queue,
1846 .ndo_get_stats64 = netvsc_get_stats64,
1850 * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link
1851 * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is
1852 * present send GARP packet to network peers with netif_notify_peers().
1854 static void netvsc_link_change(struct work_struct *w)
1856 struct net_device_context *ndev_ctx =
1857 container_of(w, struct net_device_context, dwork.work);
1858 struct hv_device *device_obj = ndev_ctx->device_ctx;
1859 struct net_device *net = hv_get_drvdata(device_obj);
1860 struct netvsc_device *net_device;
1861 struct rndis_device *rdev;
1862 struct netvsc_reconfig *event = NULL;
1863 bool notify = false, reschedule = false;
1864 unsigned long flags, next_reconfig, delay;
1866 /* if changes are happening, comeback later */
1867 if (!rtnl_trylock()) {
1868 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
1872 net_device = rtnl_dereference(ndev_ctx->nvdev);
1876 rdev = net_device->extension;
1878 next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT;
1879 if (time_is_after_jiffies(next_reconfig)) {
1880 /* link_watch only sends one notification with current state
1881 * per second, avoid doing reconfig more frequently. Handle
1884 delay = next_reconfig - jiffies;
1885 delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT;
1886 schedule_delayed_work(&ndev_ctx->dwork, delay);
1889 ndev_ctx->last_reconfig = jiffies;
1891 spin_lock_irqsave(&ndev_ctx->lock, flags);
1892 if (!list_empty(&ndev_ctx->reconfig_events)) {
1893 event = list_first_entry(&ndev_ctx->reconfig_events,
1894 struct netvsc_reconfig, list);
1895 list_del(&event->list);
1896 reschedule = !list_empty(&ndev_ctx->reconfig_events);
1898 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1903 switch (event->event) {
1904 /* Only the following events are possible due to the check in
1905 * netvsc_linkstatus_callback()
1907 case RNDIS_STATUS_MEDIA_CONNECT:
1908 if (rdev->link_state) {
1909 rdev->link_state = false;
1910 netif_carrier_on(net);
1911 netif_tx_wake_all_queues(net);
1917 case RNDIS_STATUS_MEDIA_DISCONNECT:
1918 if (!rdev->link_state) {
1919 rdev->link_state = true;
1920 netif_carrier_off(net);
1921 netif_tx_stop_all_queues(net);
1925 case RNDIS_STATUS_NETWORK_CHANGE:
1926 /* Only makes sense if carrier is present */
1927 if (!rdev->link_state) {
1928 rdev->link_state = true;
1929 netif_carrier_off(net);
1930 netif_tx_stop_all_queues(net);
1931 event->event = RNDIS_STATUS_MEDIA_CONNECT;
1932 spin_lock_irqsave(&ndev_ctx->lock, flags);
1933 list_add(&event->list, &ndev_ctx->reconfig_events);
1934 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1943 netdev_notify_peers(net);
1945 /* link_watch only sends one notification with current state per
1946 * second, handle next reconfig event in 2 seconds.
1949 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
1957 static struct net_device *get_netvsc_byref(struct net_device *vf_netdev)
1959 struct net_device_context *net_device_ctx;
1960 struct net_device *dev;
1962 dev = netdev_master_upper_dev_get(vf_netdev);
1963 if (!dev || dev->netdev_ops != &device_ops)
1964 return NULL; /* not a netvsc device */
1966 net_device_ctx = netdev_priv(dev);
1967 if (!rtnl_dereference(net_device_ctx->nvdev))
1968 return NULL; /* device is removed */
1973 /* Called when VF is injecting data into network stack.
1974 * Change the associated network device from VF to netvsc.
1975 * note: already called with rcu_read_lock
1977 static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb)
1979 struct sk_buff *skb = *pskb;
1980 struct net_device *ndev = rcu_dereference(skb->dev->rx_handler_data);
1981 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1982 struct netvsc_vf_pcpu_stats *pcpu_stats
1983 = this_cpu_ptr(ndev_ctx->vf_stats);
1987 u64_stats_update_begin(&pcpu_stats->syncp);
1988 pcpu_stats->rx_packets++;
1989 pcpu_stats->rx_bytes += skb->len;
1990 u64_stats_update_end(&pcpu_stats->syncp);
1992 return RX_HANDLER_ANOTHER;
1995 static int netvsc_vf_join(struct net_device *vf_netdev,
1996 struct net_device *ndev)
1998 struct net_device_context *ndev_ctx = netdev_priv(ndev);
2001 ret = netdev_rx_handler_register(vf_netdev,
2002 netvsc_vf_handle_frame, ndev);
2004 netdev_err(vf_netdev,
2005 "can not register netvsc VF receive handler (err = %d)\n",
2007 goto rx_handler_failed;
2010 ret = netdev_master_upper_dev_link(vf_netdev, ndev,
2013 netdev_err(vf_netdev,
2014 "can not set master device %s (err = %d)\n",
2016 goto upper_link_failed;
2019 /* set slave flag before open to prevent IPv6 addrconf */
2020 vf_netdev->flags |= IFF_SLAVE;
2022 schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
2024 call_netdevice_notifiers(NETDEV_JOIN, vf_netdev);
2026 netdev_info(vf_netdev, "joined to %s\n", ndev->name);
2030 netdev_rx_handler_unregister(vf_netdev);
2035 static void __netvsc_vf_setup(struct net_device *ndev,
2036 struct net_device *vf_netdev)
2040 /* Align MTU of VF with master */
2041 ret = dev_set_mtu(vf_netdev, ndev->mtu);
2043 netdev_warn(vf_netdev,
2044 "unable to change mtu to %u\n", ndev->mtu);
2046 /* set multicast etc flags on VF */
2047 dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE, NULL);
2049 /* sync address list from ndev to VF */
2050 netif_addr_lock_bh(ndev);
2051 dev_uc_sync(vf_netdev, ndev);
2052 dev_mc_sync(vf_netdev, ndev);
2053 netif_addr_unlock_bh(ndev);
2055 if (netif_running(ndev)) {
2056 ret = dev_open(vf_netdev, NULL);
2058 netdev_warn(vf_netdev,
2059 "unable to open: %d\n", ret);
2063 /* Setup VF as slave of the synthetic device.
2064 * Runs in workqueue to avoid recursion in netlink callbacks.
2066 static void netvsc_vf_setup(struct work_struct *w)
2068 struct net_device_context *ndev_ctx
2069 = container_of(w, struct net_device_context, vf_takeover.work);
2070 struct net_device *ndev = hv_get_drvdata(ndev_ctx->device_ctx);
2071 struct net_device *vf_netdev;
2073 if (!rtnl_trylock()) {
2074 schedule_delayed_work(&ndev_ctx->vf_takeover, 0);
2078 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
2080 __netvsc_vf_setup(ndev, vf_netdev);
2085 /* Find netvsc by VF serial number.
2086 * The PCI hyperv controller records the serial number as the slot kobj name.
2088 static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
2090 struct device *parent = vf_netdev->dev.parent;
2091 struct net_device_context *ndev_ctx;
2092 struct pci_dev *pdev;
2095 if (!parent || !dev_is_pci(parent))
2096 return NULL; /* not a PCI device */
2098 pdev = to_pci_dev(parent);
2100 netdev_notice(vf_netdev, "no PCI slot information\n");
2104 if (kstrtou32(pci_slot_name(pdev->slot), 10, &serial)) {
2105 netdev_notice(vf_netdev, "Invalid vf serial:%s\n",
2106 pci_slot_name(pdev->slot));
2110 list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
2111 if (!ndev_ctx->vf_alloc)
2114 if (ndev_ctx->vf_serial == serial)
2115 return hv_get_drvdata(ndev_ctx->device_ctx);
2118 netdev_notice(vf_netdev,
2119 "no netdev found for vf serial:%u\n", serial);
2123 static int netvsc_register_vf(struct net_device *vf_netdev)
2125 struct net_device_context *net_device_ctx;
2126 struct netvsc_device *netvsc_dev;
2127 struct net_device *ndev;
2130 if (vf_netdev->addr_len != ETH_ALEN)
2133 ndev = get_netvsc_byslot(vf_netdev);
2137 net_device_ctx = netdev_priv(ndev);
2138 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
2139 if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev))
2142 /* if synthetic interface is a different namespace,
2143 * then move the VF to that namespace; join will be
2144 * done again in that context.
2146 if (!net_eq(dev_net(ndev), dev_net(vf_netdev))) {
2147 ret = dev_change_net_namespace(vf_netdev,
2148 dev_net(ndev), "eth%d");
2150 netdev_err(vf_netdev,
2151 "could not move to same namespace as %s: %d\n",
2154 netdev_info(vf_netdev,
2155 "VF moved to namespace with: %s\n",
2160 netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
2162 if (netvsc_vf_join(vf_netdev, ndev) != 0)
2165 dev_hold(vf_netdev);
2166 rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev);
2170 /* VF up/down change detected, schedule to change data path */
2171 static int netvsc_vf_changed(struct net_device *vf_netdev)
2173 struct net_device_context *net_device_ctx;
2174 struct netvsc_device *netvsc_dev;
2175 struct net_device *ndev;
2176 bool vf_is_up = netif_running(vf_netdev);
2178 ndev = get_netvsc_byref(vf_netdev);
2182 net_device_ctx = netdev_priv(ndev);
2183 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
2187 netvsc_switch_datapath(ndev, vf_is_up);
2188 netdev_info(ndev, "Data path switched %s VF: %s\n",
2189 vf_is_up ? "to" : "from", vf_netdev->name);
2194 static int netvsc_unregister_vf(struct net_device *vf_netdev)
2196 struct net_device *ndev;
2197 struct net_device_context *net_device_ctx;
2199 ndev = get_netvsc_byref(vf_netdev);
2203 net_device_ctx = netdev_priv(ndev);
2204 cancel_delayed_work_sync(&net_device_ctx->vf_takeover);
2206 netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
2208 netdev_rx_handler_unregister(vf_netdev);
2209 netdev_upper_dev_unlink(vf_netdev, ndev);
2210 RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
2216 static int netvsc_probe(struct hv_device *dev,
2217 const struct hv_vmbus_device_id *dev_id)
2219 struct net_device *net = NULL;
2220 struct net_device_context *net_device_ctx;
2221 struct netvsc_device_info *device_info = NULL;
2222 struct netvsc_device *nvdev;
2225 net = alloc_etherdev_mq(sizeof(struct net_device_context),
2230 netif_carrier_off(net);
2232 netvsc_init_settings(net);
2234 net_device_ctx = netdev_priv(net);
2235 net_device_ctx->device_ctx = dev;
2236 net_device_ctx->msg_enable = netif_msg_init(debug, default_msg);
2237 if (netif_msg_probe(net_device_ctx))
2238 netdev_dbg(net, "netvsc msg_enable: %d\n",
2239 net_device_ctx->msg_enable);
2241 hv_set_drvdata(dev, net);
2243 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
2245 spin_lock_init(&net_device_ctx->lock);
2246 INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
2247 INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup);
2249 net_device_ctx->vf_stats
2250 = netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats);
2251 if (!net_device_ctx->vf_stats)
2254 net->netdev_ops = &device_ops;
2255 net->ethtool_ops = ðtool_ops;
2256 SET_NETDEV_DEV(net, &dev->device);
2258 /* We always need headroom for rndis header */
2259 net->needed_headroom = RNDIS_AND_PPI_SIZE;
2261 /* Initialize the number of queues to be 1, we may change it if more
2262 * channels are offered later.
2264 netif_set_real_num_tx_queues(net, 1);
2265 netif_set_real_num_rx_queues(net, 1);
2267 /* Notify the netvsc driver of the new device */
2268 device_info = netvsc_devinfo_get(NULL);
2272 goto devinfo_failed;
2275 nvdev = rndis_filter_device_add(dev, device_info);
2276 if (IS_ERR(nvdev)) {
2277 ret = PTR_ERR(nvdev);
2278 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
2282 memcpy(net->dev_addr, device_info->mac_adr, ETH_ALEN);
2284 /* We must get rtnl lock before scheduling nvdev->subchan_work,
2285 * otherwise netvsc_subchan_work() can get rtnl lock first and wait
2286 * all subchannels to show up, but that may not happen because
2287 * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer()
2288 * -> ... -> device_add() -> ... -> __device_attach() can't get
2289 * the device lock, so all the subchannels can't be processed --
2290 * finally netvsc_subchan_work() hangs forever.
2294 if (nvdev->num_chn > 1)
2295 schedule_work(&nvdev->subchan_work);
2297 /* hw_features computed in rndis_netdev_set_hwcaps() */
2298 net->features = net->hw_features |
2299 NETIF_F_HIGHDMA | NETIF_F_SG |
2300 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
2301 net->vlan_features = net->features;
2303 netdev_lockdep_set_classes(net);
2305 /* MTU range: 68 - 1500 or 65521 */
2306 net->min_mtu = NETVSC_MTU_MIN;
2307 if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
2308 net->max_mtu = NETVSC_MTU - ETH_HLEN;
2310 net->max_mtu = ETH_DATA_LEN;
2312 ret = register_netdevice(net);
2314 pr_err("Unable to register netdev.\n");
2315 goto register_failed;
2318 list_add(&net_device_ctx->list, &netvsc_dev_list);
2326 rndis_filter_device_remove(dev, nvdev);
2330 free_percpu(net_device_ctx->vf_stats);
2332 hv_set_drvdata(dev, NULL);
2338 static int netvsc_remove(struct hv_device *dev)
2340 struct net_device_context *ndev_ctx;
2341 struct net_device *vf_netdev, *net;
2342 struct netvsc_device *nvdev;
2344 net = hv_get_drvdata(dev);
2346 dev_err(&dev->device, "No net device to remove\n");
2350 ndev_ctx = netdev_priv(net);
2352 cancel_delayed_work_sync(&ndev_ctx->dwork);
2355 nvdev = rtnl_dereference(ndev_ctx->nvdev);
2357 cancel_work_sync(&nvdev->subchan_work);
2360 * Call to the vsc driver to let it know that the device is being
2361 * removed. Also blocks mtu and channel changes.
2363 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
2365 netvsc_unregister_vf(vf_netdev);
2368 rndis_filter_device_remove(dev, nvdev);
2370 unregister_netdevice(net);
2371 list_del(&ndev_ctx->list);
2375 hv_set_drvdata(dev, NULL);
2377 free_percpu(ndev_ctx->vf_stats);
2382 static const struct hv_vmbus_device_id id_table[] = {
2388 MODULE_DEVICE_TABLE(vmbus, id_table);
2390 /* The one and only one */
2391 static struct hv_driver netvsc_drv = {
2392 .name = KBUILD_MODNAME,
2393 .id_table = id_table,
2394 .probe = netvsc_probe,
2395 .remove = netvsc_remove,
2397 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
2402 * On Hyper-V, every VF interface is matched with a corresponding
2403 * synthetic interface. The synthetic interface is presented first
2404 * to the guest. When the corresponding VF instance is registered,
2405 * we will take care of switching the data path.
2407 static int netvsc_netdev_event(struct notifier_block *this,
2408 unsigned long event, void *ptr)
2410 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
2412 /* Skip our own events */
2413 if (event_dev->netdev_ops == &device_ops)
2416 /* Avoid non-Ethernet type devices */
2417 if (event_dev->type != ARPHRD_ETHER)
2420 /* Avoid Vlan dev with same MAC registering as VF */
2421 if (is_vlan_dev(event_dev))
2424 /* Avoid Bonding master dev with same MAC registering as VF */
2425 if ((event_dev->priv_flags & IFF_BONDING) &&
2426 (event_dev->flags & IFF_MASTER))
2430 case NETDEV_REGISTER:
2431 return netvsc_register_vf(event_dev);
2432 case NETDEV_UNREGISTER:
2433 return netvsc_unregister_vf(event_dev);
2436 return netvsc_vf_changed(event_dev);
2442 static struct notifier_block netvsc_netdev_notifier = {
2443 .notifier_call = netvsc_netdev_event,
2446 static void __exit netvsc_drv_exit(void)
2448 unregister_netdevice_notifier(&netvsc_netdev_notifier);
2449 vmbus_driver_unregister(&netvsc_drv);
2452 static int __init netvsc_drv_init(void)
2456 if (ring_size < RING_SIZE_MIN) {
2457 ring_size = RING_SIZE_MIN;
2458 pr_info("Increased ring_size to %u (min allowed)\n",
2461 netvsc_ring_bytes = ring_size * PAGE_SIZE;
2463 ret = vmbus_driver_register(&netvsc_drv);
2467 register_netdevice_notifier(&netvsc_netdev_notifier);
2471 MODULE_LICENSE("GPL");
2472 MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
2474 module_init(netvsc_drv_init);
2475 module_exit(netvsc_drv_exit);