Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2009, Microsoft Corporation.
4 *
5 * Authors:
6 * Haiyang Zhang <haiyangz@microsoft.com>
7 * Hank Janssen <hjanssen@microsoft.com>
8 */
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11#include <linux/init.h>
12#include <linux/atomic.h>
13#include <linux/module.h>
14#include <linux/highmem.h>
15#include <linux/device.h>
16#include <linux/io.h>
17#include <linux/delay.h>
18#include <linux/netdevice.h>
19#include <linux/inetdevice.h>
20#include <linux/etherdevice.h>
21#include <linux/pci.h>
22#include <linux/skbuff.h>
23#include <linux/if_vlan.h>
24#include <linux/in.h>
25#include <linux/slab.h>
26#include <linux/rtnetlink.h>
27#include <linux/netpoll.h>
28#include <linux/bpf.h>
29
30#include <net/arp.h>
31#include <net/route.h>
32#include <net/sock.h>
33#include <net/pkt_sched.h>
34#include <net/checksum.h>
35#include <net/ip6_checksum.h>
36
37#include "hyperv_net.h"
38
39#define RING_SIZE_MIN 64
40#define RETRY_US_LO 5000
41#define RETRY_US_HI 10000
42#define RETRY_MAX 2000 /* >10 sec */
43
44#define LINKCHANGE_INT (2 * HZ)
45#define VF_TAKEOVER_INT (HZ / 10)
46
47static unsigned int ring_size __ro_after_init = 128;
48module_param(ring_size, uint, 0444);
49MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
50unsigned int netvsc_ring_bytes __ro_after_init;
51
52static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
53 NETIF_MSG_LINK | NETIF_MSG_IFUP |
54 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
55 NETIF_MSG_TX_ERR;
56
57static int debug = -1;
58module_param(debug, int, 0444);
59MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
60
61static LIST_HEAD(netvsc_dev_list);
62
63static void netvsc_change_rx_flags(struct net_device *net, int change)
64{
65 struct net_device_context *ndev_ctx = netdev_priv(net);
66 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
67 int inc;
68
69 if (!vf_netdev)
70 return;
71
72 if (change & IFF_PROMISC) {
73 inc = (net->flags & IFF_PROMISC) ? 1 : -1;
74 dev_set_promiscuity(vf_netdev, inc);
75 }
76
77 if (change & IFF_ALLMULTI) {
78 inc = (net->flags & IFF_ALLMULTI) ? 1 : -1;
79 dev_set_allmulti(vf_netdev, inc);
80 }
81}
82
83static void netvsc_set_rx_mode(struct net_device *net)
84{
85 struct net_device_context *ndev_ctx = netdev_priv(net);
86 struct net_device *vf_netdev;
87 struct netvsc_device *nvdev;
88
89 rcu_read_lock();
90 vf_netdev = rcu_dereference(ndev_ctx->vf_netdev);
91 if (vf_netdev) {
92 dev_uc_sync(vf_netdev, net);
93 dev_mc_sync(vf_netdev, net);
94 }
95
96 nvdev = rcu_dereference(ndev_ctx->nvdev);
97 if (nvdev)
98 rndis_filter_update(nvdev);
99 rcu_read_unlock();
100}
101
102static void netvsc_tx_enable(struct netvsc_device *nvscdev,
103 struct net_device *ndev)
104{
105 nvscdev->tx_disable = false;
106 virt_wmb(); /* ensure queue wake up mechanism is on */
107
108 netif_tx_wake_all_queues(ndev);
109}
110
111static int netvsc_open(struct net_device *net)
112{
113 struct net_device_context *ndev_ctx = netdev_priv(net);
114 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
115 struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev);
116 struct rndis_device *rdev;
117 int ret = 0;
118
119 netif_carrier_off(net);
120
121 /* Open up the device */
122 ret = rndis_filter_open(nvdev);
123 if (ret != 0) {
124 netdev_err(net, "unable to open device (ret %d).\n", ret);
125 return ret;
126 }
127
128 rdev = nvdev->extension;
129 if (!rdev->link_state) {
130 netif_carrier_on(net);
131 netvsc_tx_enable(nvdev, net);
132 }
133
134 if (vf_netdev) {
135 /* Setting synthetic device up transparently sets
136 * slave as up. If open fails, then slave will be
137 * still be offline (and not used).
138 */
139 ret = dev_open(vf_netdev, NULL);
140 if (ret)
141 netdev_warn(net,
142 "unable to open slave: %s: %d\n",
143 vf_netdev->name, ret);
144 }
145 return 0;
146}
147
148static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
149{
150 unsigned int retry = 0;
151 int i;
152
153 /* Ensure pending bytes in ring are read */
154 for (;;) {
155 u32 aread = 0;
156
157 for (i = 0; i < nvdev->num_chn; i++) {
158 struct vmbus_channel *chn
159 = nvdev->chan_table[i].channel;
160
161 if (!chn)
162 continue;
163
164 /* make sure receive not running now */
165 napi_synchronize(&nvdev->chan_table[i].napi);
166
167 aread = hv_get_bytes_to_read(&chn->inbound);
168 if (aread)
169 break;
170
171 aread = hv_get_bytes_to_read(&chn->outbound);
172 if (aread)
173 break;
174 }
175
176 if (aread == 0)
177 return 0;
178
179 if (++retry > RETRY_MAX)
180 return -ETIMEDOUT;
181
182 usleep_range(RETRY_US_LO, RETRY_US_HI);
183 }
184}
185
186static void netvsc_tx_disable(struct netvsc_device *nvscdev,
187 struct net_device *ndev)
188{
189 if (nvscdev) {
190 nvscdev->tx_disable = true;
191 virt_wmb(); /* ensure txq will not wake up after stop */
192 }
193
194 netif_tx_disable(ndev);
195}
196
197static int netvsc_close(struct net_device *net)
198{
199 struct net_device_context *net_device_ctx = netdev_priv(net);
200 struct net_device *vf_netdev
201 = rtnl_dereference(net_device_ctx->vf_netdev);
202 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
203 int ret;
204
205 netvsc_tx_disable(nvdev, net);
206
207 /* No need to close rndis filter if it is removed already */
208 if (!nvdev)
209 return 0;
210
211 ret = rndis_filter_close(nvdev);
212 if (ret != 0) {
213 netdev_err(net, "unable to close device (ret %d).\n", ret);
214 return ret;
215 }
216
217 ret = netvsc_wait_until_empty(nvdev);
218 if (ret)
219 netdev_err(net, "Ring buffer not empty after closing rndis\n");
220
221 if (vf_netdev)
222 dev_close(vf_netdev);
223
224 return ret;
225}
226
227static inline void *init_ppi_data(struct rndis_message *msg,
228 u32 ppi_size, u32 pkt_type)
229{
230 struct rndis_packet *rndis_pkt = &msg->msg.pkt;
231 struct rndis_per_packet_info *ppi;
232
233 rndis_pkt->data_offset += ppi_size;
234 ppi = (void *)rndis_pkt + rndis_pkt->per_pkt_info_offset
235 + rndis_pkt->per_pkt_info_len;
236
237 ppi->size = ppi_size;
238 ppi->type = pkt_type;
239 ppi->internal = 0;
240 ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
241
242 rndis_pkt->per_pkt_info_len += ppi_size;
243
244 return ppi + 1;
245}
246
247/* Azure hosts don't support non-TCP port numbers in hashing for fragmented
248 * packets. We can use ethtool to change UDP hash level when necessary.
249 */
250static inline u32 netvsc_get_hash(
251 struct sk_buff *skb,
252 const struct net_device_context *ndc)
253{
254 struct flow_keys flow;
255 u32 hash, pkt_proto = 0;
256 static u32 hashrnd __read_mostly;
257
258 net_get_random_once(&hashrnd, sizeof(hashrnd));
259
260 if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
261 return 0;
262
263 switch (flow.basic.ip_proto) {
264 case IPPROTO_TCP:
265 if (flow.basic.n_proto == htons(ETH_P_IP))
266 pkt_proto = HV_TCP4_L4HASH;
267 else if (flow.basic.n_proto == htons(ETH_P_IPV6))
268 pkt_proto = HV_TCP6_L4HASH;
269
270 break;
271
272 case IPPROTO_UDP:
273 if (flow.basic.n_proto == htons(ETH_P_IP))
274 pkt_proto = HV_UDP4_L4HASH;
275 else if (flow.basic.n_proto == htons(ETH_P_IPV6))
276 pkt_proto = HV_UDP6_L4HASH;
277
278 break;
279 }
280
281 if (pkt_proto & ndc->l4_hash) {
282 return skb_get_hash(skb);
283 } else {
284 if (flow.basic.n_proto == htons(ETH_P_IP))
285 hash = jhash2((u32 *)&flow.addrs.v4addrs, 2, hashrnd);
286 else if (flow.basic.n_proto == htons(ETH_P_IPV6))
287 hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd);
288 else
289 return 0;
290
291 __skb_set_sw_hash(skb, hash, false);
292 }
293
294 return hash;
295}
296
297static inline int netvsc_get_tx_queue(struct net_device *ndev,
298 struct sk_buff *skb, int old_idx)
299{
300 const struct net_device_context *ndc = netdev_priv(ndev);
301 struct sock *sk = skb->sk;
302 int q_idx;
303
304 q_idx = ndc->tx_table[netvsc_get_hash(skb, ndc) &
305 (VRSS_SEND_TAB_SIZE - 1)];
306
307 /* If queue index changed record the new value */
308 if (q_idx != old_idx &&
309 sk && sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
310 sk_tx_queue_set(sk, q_idx);
311
312 return q_idx;
313}
314
315/*
316 * Select queue for transmit.
317 *
318 * If a valid queue has already been assigned, then use that.
319 * Otherwise compute tx queue based on hash and the send table.
320 *
321 * This is basically similar to default (netdev_pick_tx) with the added step
322 * of using the host send_table when no other queue has been assigned.
323 *
324 * TODO support XPS - but get_xps_queue not exported
325 */
326static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb)
327{
328 int q_idx = sk_tx_queue_get(skb->sk);
329
330 if (q_idx < 0 || skb->ooo_okay || q_idx >= ndev->real_num_tx_queues) {
331 /* If forwarding a packet, we use the recorded queue when
332 * available for better cache locality.
333 */
334 if (skb_rx_queue_recorded(skb))
335 q_idx = skb_get_rx_queue(skb);
336 else
337 q_idx = netvsc_get_tx_queue(ndev, skb, q_idx);
338 }
339
340 return q_idx;
341}
342
343static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
344 struct net_device *sb_dev)
345{
346 struct net_device_context *ndc = netdev_priv(ndev);
347 struct net_device *vf_netdev;
348 u16 txq;
349
350 rcu_read_lock();
351 vf_netdev = rcu_dereference(ndc->vf_netdev);
352 if (vf_netdev) {
353 const struct net_device_ops *vf_ops = vf_netdev->netdev_ops;
354
355 if (vf_ops->ndo_select_queue)
356 txq = vf_ops->ndo_select_queue(vf_netdev, skb, sb_dev);
357 else
358 txq = netdev_pick_tx(vf_netdev, skb, NULL);
359
360 /* Record the queue selected by VF so that it can be
361 * used for common case where VF has more queues than
362 * the synthetic device.
363 */
364 qdisc_skb_cb(skb)->slave_dev_queue_mapping = txq;
365 } else {
366 txq = netvsc_pick_tx(ndev, skb);
367 }
368 rcu_read_unlock();
369
370 while (unlikely(txq >= ndev->real_num_tx_queues))
371 txq -= ndev->real_num_tx_queues;
372
373 return txq;
374}
375
376static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
377 struct hv_page_buffer *pb)
378{
379 int j = 0;
380
381 /* Deal with compound pages by ignoring unused part
382 * of the page.
383 */
384 page += (offset >> PAGE_SHIFT);
385 offset &= ~PAGE_MASK;
386
387 while (len > 0) {
388 unsigned long bytes;
389
390 bytes = PAGE_SIZE - offset;
391 if (bytes > len)
392 bytes = len;
393 pb[j].pfn = page_to_pfn(page);
394 pb[j].offset = offset;
395 pb[j].len = bytes;
396
397 offset += bytes;
398 len -= bytes;
399
400 if (offset == PAGE_SIZE && len) {
401 page++;
402 offset = 0;
403 j++;
404 }
405 }
406
407 return j + 1;
408}
409
410static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
411 struct hv_netvsc_packet *packet,
412 struct hv_page_buffer *pb)
413{
414 u32 slots_used = 0;
415 char *data = skb->data;
416 int frags = skb_shinfo(skb)->nr_frags;
417 int i;
418
419 /* The packet is laid out thus:
420 * 1. hdr: RNDIS header and PPI
421 * 2. skb linear data
422 * 3. skb fragment data
423 */
424 slots_used += fill_pg_buf(virt_to_page(hdr),
425 offset_in_page(hdr),
426 len, &pb[slots_used]);
427
428 packet->rmsg_size = len;
429 packet->rmsg_pgcnt = slots_used;
430
431 slots_used += fill_pg_buf(virt_to_page(data),
432 offset_in_page(data),
433 skb_headlen(skb), &pb[slots_used]);
434
435 for (i = 0; i < frags; i++) {
436 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
437
438 slots_used += fill_pg_buf(skb_frag_page(frag),
439 skb_frag_off(frag),
440 skb_frag_size(frag), &pb[slots_used]);
441 }
442 return slots_used;
443}
444
445static int count_skb_frag_slots(struct sk_buff *skb)
446{
447 int i, frags = skb_shinfo(skb)->nr_frags;
448 int pages = 0;
449
450 for (i = 0; i < frags; i++) {
451 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
452 unsigned long size = skb_frag_size(frag);
453 unsigned long offset = skb_frag_off(frag);
454
455 /* Skip unused frames from start of page */
456 offset &= ~PAGE_MASK;
457 pages += PFN_UP(offset + size);
458 }
459 return pages;
460}
461
462static int netvsc_get_slots(struct sk_buff *skb)
463{
464 char *data = skb->data;
465 unsigned int offset = offset_in_page(data);
466 unsigned int len = skb_headlen(skb);
467 int slots;
468 int frag_slots;
469
470 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
471 frag_slots = count_skb_frag_slots(skb);
472 return slots + frag_slots;
473}
474
475static u32 net_checksum_info(struct sk_buff *skb)
476{
477 if (skb->protocol == htons(ETH_P_IP)) {
478 struct iphdr *ip = ip_hdr(skb);
479
480 if (ip->protocol == IPPROTO_TCP)
481 return TRANSPORT_INFO_IPV4_TCP;
482 else if (ip->protocol == IPPROTO_UDP)
483 return TRANSPORT_INFO_IPV4_UDP;
484 } else {
485 struct ipv6hdr *ip6 = ipv6_hdr(skb);
486
487 if (ip6->nexthdr == IPPROTO_TCP)
488 return TRANSPORT_INFO_IPV6_TCP;
489 else if (ip6->nexthdr == IPPROTO_UDP)
490 return TRANSPORT_INFO_IPV6_UDP;
491 }
492
493 return TRANSPORT_INFO_NOT_IP;
494}
495
496/* Send skb on the slave VF device. */
497static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev,
498 struct sk_buff *skb)
499{
500 struct net_device_context *ndev_ctx = netdev_priv(net);
501 unsigned int len = skb->len;
502 int rc;
503
504 skb->dev = vf_netdev;
505 skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping;
506
507 rc = dev_queue_xmit(skb);
508 if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) {
509 struct netvsc_vf_pcpu_stats *pcpu_stats
510 = this_cpu_ptr(ndev_ctx->vf_stats);
511
512 u64_stats_update_begin(&pcpu_stats->syncp);
513 pcpu_stats->tx_packets++;
514 pcpu_stats->tx_bytes += len;
515 u64_stats_update_end(&pcpu_stats->syncp);
516 } else {
517 this_cpu_inc(ndev_ctx->vf_stats->tx_dropped);
518 }
519
520 return rc;
521}
522
523static int netvsc_xmit(struct sk_buff *skb, struct net_device *net, bool xdp_tx)
524{
525 struct net_device_context *net_device_ctx = netdev_priv(net);
526 struct hv_netvsc_packet *packet = NULL;
527 int ret;
528 unsigned int num_data_pgs;
529 struct rndis_message *rndis_msg;
530 struct net_device *vf_netdev;
531 u32 rndis_msg_size;
532 u32 hash;
533 struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT];
534
535 /* if VF is present and up then redirect packets
536 * already called with rcu_read_lock_bh
537 */
538 vf_netdev = rcu_dereference_bh(net_device_ctx->vf_netdev);
539 if (vf_netdev && netif_running(vf_netdev) &&
540 !netpoll_tx_running(net))
541 return netvsc_vf_xmit(net, vf_netdev, skb);
542
543 /* We will atmost need two pages to describe the rndis
544 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
545 * of pages in a single packet. If skb is scattered around
546 * more pages we try linearizing it.
547 */
548
549 num_data_pgs = netvsc_get_slots(skb) + 2;
550
551 if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
552 ++net_device_ctx->eth_stats.tx_scattered;
553
554 if (skb_linearize(skb))
555 goto no_memory;
556
557 num_data_pgs = netvsc_get_slots(skb) + 2;
558 if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
559 ++net_device_ctx->eth_stats.tx_too_big;
560 goto drop;
561 }
562 }
563
564 /*
565 * Place the rndis header in the skb head room and
566 * the skb->cb will be used for hv_netvsc_packet
567 * structure.
568 */
569 ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE);
570 if (ret)
571 goto no_memory;
572
573 /* Use the skb control buffer for building up the packet */
574 BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) >
575 sizeof_field(struct sk_buff, cb));
576 packet = (struct hv_netvsc_packet *)skb->cb;
577
578 packet->q_idx = skb_get_queue_mapping(skb);
579
580 packet->total_data_buflen = skb->len;
581 packet->total_bytes = skb->len;
582 packet->total_packets = 1;
583
584 rndis_msg = (struct rndis_message *)skb->head;
585
586 /* Add the rndis header */
587 rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
588 rndis_msg->msg_len = packet->total_data_buflen;
589
590 rndis_msg->msg.pkt = (struct rndis_packet) {
591 .data_offset = sizeof(struct rndis_packet),
592 .data_len = packet->total_data_buflen,
593 .per_pkt_info_offset = sizeof(struct rndis_packet),
594 };
595
596 rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
597
598 hash = skb_get_hash_raw(skb);
599 if (hash != 0 && net->real_num_tx_queues > 1) {
600 u32 *hash_info;
601
602 rndis_msg_size += NDIS_HASH_PPI_SIZE;
603 hash_info = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
604 NBL_HASH_VALUE);
605 *hash_info = hash;
606 }
607
608 if (skb_vlan_tag_present(skb)) {
609 struct ndis_pkt_8021q_info *vlan;
610
611 rndis_msg_size += NDIS_VLAN_PPI_SIZE;
612 vlan = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
613 IEEE_8021Q_INFO);
614
615 vlan->value = 0;
616 vlan->vlanid = skb_vlan_tag_get_id(skb);
617 vlan->cfi = skb_vlan_tag_get_cfi(skb);
618 vlan->pri = skb_vlan_tag_get_prio(skb);
619 }
620
621 if (skb_is_gso(skb)) {
622 struct ndis_tcp_lso_info *lso_info;
623
624 rndis_msg_size += NDIS_LSO_PPI_SIZE;
625 lso_info = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
626 TCP_LARGESEND_PKTINFO);
627
628 lso_info->value = 0;
629 lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
630 if (skb->protocol == htons(ETH_P_IP)) {
631 lso_info->lso_v2_transmit.ip_version =
632 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
633 ip_hdr(skb)->tot_len = 0;
634 ip_hdr(skb)->check = 0;
635 tcp_hdr(skb)->check =
636 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
637 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
638 } else {
639 lso_info->lso_v2_transmit.ip_version =
640 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
641 ipv6_hdr(skb)->payload_len = 0;
642 tcp_hdr(skb)->check =
643 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
644 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
645 }
646 lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb);
647 lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
648 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
649 if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) {
650 struct ndis_tcp_ip_checksum_info *csum_info;
651
652 rndis_msg_size += NDIS_CSUM_PPI_SIZE;
653 csum_info = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
654 TCPIP_CHKSUM_PKTINFO);
655
656 csum_info->value = 0;
657 csum_info->transmit.tcp_header_offset = skb_transport_offset(skb);
658
659 if (skb->protocol == htons(ETH_P_IP)) {
660 csum_info->transmit.is_ipv4 = 1;
661
662 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
663 csum_info->transmit.tcp_checksum = 1;
664 else
665 csum_info->transmit.udp_checksum = 1;
666 } else {
667 csum_info->transmit.is_ipv6 = 1;
668
669 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
670 csum_info->transmit.tcp_checksum = 1;
671 else
672 csum_info->transmit.udp_checksum = 1;
673 }
674 } else {
675 /* Can't do offload of this type of checksum */
676 if (skb_checksum_help(skb))
677 goto drop;
678 }
679 }
680
681 /* Start filling in the page buffers with the rndis hdr */
682 rndis_msg->msg_len += rndis_msg_size;
683 packet->total_data_buflen = rndis_msg->msg_len;
684 packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
685 skb, packet, pb);
686
687 /* timestamp packet in software */
688 skb_tx_timestamp(skb);
689
690 ret = netvsc_send(net, packet, rndis_msg, pb, skb, xdp_tx);
691 if (likely(ret == 0))
692 return NETDEV_TX_OK;
693
694 if (ret == -EAGAIN) {
695 ++net_device_ctx->eth_stats.tx_busy;
696 return NETDEV_TX_BUSY;
697 }
698
699 if (ret == -ENOSPC)
700 ++net_device_ctx->eth_stats.tx_no_space;
701
702drop:
703 dev_kfree_skb_any(skb);
704 net->stats.tx_dropped++;
705
706 return NETDEV_TX_OK;
707
708no_memory:
709 ++net_device_ctx->eth_stats.tx_no_memory;
710 goto drop;
711}
712
713static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *ndev)
714{
715 return netvsc_xmit(skb, ndev, false);
716}
717
718/*
719 * netvsc_linkstatus_callback - Link up/down notification
720 */
721void netvsc_linkstatus_callback(struct net_device *net,
722 struct rndis_message *resp)
723{
724 struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
725 struct net_device_context *ndev_ctx = netdev_priv(net);
726 struct netvsc_reconfig *event;
727 unsigned long flags;
728
729 /* Update the physical link speed when changing to another vSwitch */
730 if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) {
731 u32 speed;
732
733 speed = *(u32 *)((void *)indicate
734 + indicate->status_buf_offset) / 10000;
735 ndev_ctx->speed = speed;
736 return;
737 }
738
739 /* Handle these link change statuses below */
740 if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE &&
741 indicate->status != RNDIS_STATUS_MEDIA_CONNECT &&
742 indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT)
743 return;
744
745 if (net->reg_state != NETREG_REGISTERED)
746 return;
747
748 event = kzalloc(sizeof(*event), GFP_ATOMIC);
749 if (!event)
750 return;
751 event->event = indicate->status;
752
753 spin_lock_irqsave(&ndev_ctx->lock, flags);
754 list_add_tail(&event->list, &ndev_ctx->reconfig_events);
755 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
756
757 schedule_delayed_work(&ndev_ctx->dwork, 0);
758}
759
760static void netvsc_xdp_xmit(struct sk_buff *skb, struct net_device *ndev)
761{
762 int rc;
763
764 skb->queue_mapping = skb_get_rx_queue(skb);
765 __skb_push(skb, ETH_HLEN);
766
767 rc = netvsc_xmit(skb, ndev, true);
768
769 if (dev_xmit_complete(rc))
770 return;
771
772 dev_kfree_skb_any(skb);
773 ndev->stats.tx_dropped++;
774}
775
776static void netvsc_comp_ipcsum(struct sk_buff *skb)
777{
778 struct iphdr *iph = (struct iphdr *)skb->data;
779
780 iph->check = 0;
781 iph->check = ip_fast_csum(iph, iph->ihl);
782}
783
784static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
785 struct netvsc_channel *nvchan,
786 struct xdp_buff *xdp)
787{
788 struct napi_struct *napi = &nvchan->napi;
789 const struct ndis_pkt_8021q_info *vlan = nvchan->rsc.vlan;
790 const struct ndis_tcp_ip_checksum_info *csum_info =
791 nvchan->rsc.csum_info;
792 const u32 *hash_info = nvchan->rsc.hash_info;
793 struct sk_buff *skb;
794 void *xbuf = xdp->data_hard_start;
795 int i;
796
797 if (xbuf) {
798 unsigned int hdroom = xdp->data - xdp->data_hard_start;
799 unsigned int xlen = xdp->data_end - xdp->data;
800 unsigned int frag_size = netvsc_xdp_fraglen(hdroom + xlen);
801
802 skb = build_skb(xbuf, frag_size);
803
804 if (!skb) {
805 __free_page(virt_to_page(xbuf));
806 return NULL;
807 }
808
809 skb_reserve(skb, hdroom);
810 skb_put(skb, xlen);
811 skb->dev = napi->dev;
812 } else {
813 skb = napi_alloc_skb(napi, nvchan->rsc.pktlen);
814
815 if (!skb)
816 return NULL;
817
818 /* Copy to skb. This copy is needed here since the memory
819 * pointed by hv_netvsc_packet cannot be deallocated.
820 */
821 for (i = 0; i < nvchan->rsc.cnt; i++)
822 skb_put_data(skb, nvchan->rsc.data[i],
823 nvchan->rsc.len[i]);
824 }
825
826 skb->protocol = eth_type_trans(skb, net);
827
828 /* skb is already created with CHECKSUM_NONE */
829 skb_checksum_none_assert(skb);
830
831 /* Incoming packets may have IP header checksum verified by the host.
832 * They may not have IP header checksum computed after coalescing.
833 * We compute it here if the flags are set, because on Linux, the IP
834 * checksum is always checked.
835 */
836 if (csum_info && csum_info->receive.ip_checksum_value_invalid &&
837 csum_info->receive.ip_checksum_succeeded &&
838 skb->protocol == htons(ETH_P_IP))
839 netvsc_comp_ipcsum(skb);
840
841 /* Do L4 checksum offload if enabled and present. */
842 if (csum_info && (net->features & NETIF_F_RXCSUM)) {
843 if (csum_info->receive.tcp_checksum_succeeded ||
844 csum_info->receive.udp_checksum_succeeded)
845 skb->ip_summed = CHECKSUM_UNNECESSARY;
846 }
847
848 if (hash_info && (net->features & NETIF_F_RXHASH))
849 skb_set_hash(skb, *hash_info, PKT_HASH_TYPE_L4);
850
851 if (vlan) {
852 u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT) |
853 (vlan->cfi ? VLAN_CFI_MASK : 0);
854
855 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
856 vlan_tci);
857 }
858
859 return skb;
860}
861
862/*
863 * netvsc_recv_callback - Callback when we receive a packet from the
864 * "wire" on the specified device.
865 */
866int netvsc_recv_callback(struct net_device *net,
867 struct netvsc_device *net_device,
868 struct netvsc_channel *nvchan)
869{
870 struct net_device_context *net_device_ctx = netdev_priv(net);
871 struct vmbus_channel *channel = nvchan->channel;
872 u16 q_idx = channel->offermsg.offer.sub_channel_index;
873 struct sk_buff *skb;
874 struct netvsc_stats *rx_stats = &nvchan->rx_stats;
875 struct xdp_buff xdp;
876 u32 act;
877
878 if (net->reg_state != NETREG_REGISTERED)
879 return NVSP_STAT_FAIL;
880
881 act = netvsc_run_xdp(net, nvchan, &xdp);
882
883 if (act != XDP_PASS && act != XDP_TX) {
884 u64_stats_update_begin(&rx_stats->syncp);
885 rx_stats->xdp_drop++;
886 u64_stats_update_end(&rx_stats->syncp);
887
888 return NVSP_STAT_SUCCESS; /* consumed by XDP */
889 }
890
891 /* Allocate a skb - TODO direct I/O to pages? */
892 skb = netvsc_alloc_recv_skb(net, nvchan, &xdp);
893
894 if (unlikely(!skb)) {
895 ++net_device_ctx->eth_stats.rx_no_memory;
896 return NVSP_STAT_FAIL;
897 }
898
899 skb_record_rx_queue(skb, q_idx);
900
901 /*
902 * Even if injecting the packet, record the statistics
903 * on the synthetic device because modifying the VF device
904 * statistics will not work correctly.
905 */
906 u64_stats_update_begin(&rx_stats->syncp);
907 rx_stats->packets++;
908 rx_stats->bytes += nvchan->rsc.pktlen;
909
910 if (skb->pkt_type == PACKET_BROADCAST)
911 ++rx_stats->broadcast;
912 else if (skb->pkt_type == PACKET_MULTICAST)
913 ++rx_stats->multicast;
914 u64_stats_update_end(&rx_stats->syncp);
915
916 if (act == XDP_TX) {
917 netvsc_xdp_xmit(skb, net);
918 return NVSP_STAT_SUCCESS;
919 }
920
921 napi_gro_receive(&nvchan->napi, skb);
922 return NVSP_STAT_SUCCESS;
923}
924
925static void netvsc_get_drvinfo(struct net_device *net,
926 struct ethtool_drvinfo *info)
927{
928 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
929 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
930}
931
932static void netvsc_get_channels(struct net_device *net,
933 struct ethtool_channels *channel)
934{
935 struct net_device_context *net_device_ctx = netdev_priv(net);
936 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
937
938 if (nvdev) {
939 channel->max_combined = nvdev->max_chn;
940 channel->combined_count = nvdev->num_chn;
941 }
942}
943
944/* Alloc struct netvsc_device_info, and initialize it from either existing
945 * struct netvsc_device, or from default values.
946 */
947static
948struct netvsc_device_info *netvsc_devinfo_get(struct netvsc_device *nvdev)
949{
950 struct netvsc_device_info *dev_info;
951 struct bpf_prog *prog;
952
953 dev_info = kzalloc(sizeof(*dev_info), GFP_ATOMIC);
954
955 if (!dev_info)
956 return NULL;
957
958 if (nvdev) {
959 ASSERT_RTNL();
960
961 dev_info->num_chn = nvdev->num_chn;
962 dev_info->send_sections = nvdev->send_section_cnt;
963 dev_info->send_section_size = nvdev->send_section_size;
964 dev_info->recv_sections = nvdev->recv_section_cnt;
965 dev_info->recv_section_size = nvdev->recv_section_size;
966
967 memcpy(dev_info->rss_key, nvdev->extension->rss_key,
968 NETVSC_HASH_KEYLEN);
969
970 prog = netvsc_xdp_get(nvdev);
971 if (prog) {
972 bpf_prog_inc(prog);
973 dev_info->bprog = prog;
974 }
975 } else {
976 dev_info->num_chn = VRSS_CHANNEL_DEFAULT;
977 dev_info->send_sections = NETVSC_DEFAULT_TX;
978 dev_info->send_section_size = NETVSC_SEND_SECTION_SIZE;
979 dev_info->recv_sections = NETVSC_DEFAULT_RX;
980 dev_info->recv_section_size = NETVSC_RECV_SECTION_SIZE;
981 }
982
983 return dev_info;
984}
985
986/* Free struct netvsc_device_info */
987static void netvsc_devinfo_put(struct netvsc_device_info *dev_info)
988{
989 if (dev_info->bprog) {
990 ASSERT_RTNL();
991 bpf_prog_put(dev_info->bprog);
992 }
993
994 kfree(dev_info);
995}
996
997static int netvsc_detach(struct net_device *ndev,
998 struct netvsc_device *nvdev)
999{
1000 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1001 struct hv_device *hdev = ndev_ctx->device_ctx;
1002 int ret;
1003
1004 /* Don't try continuing to try and setup sub channels */
1005 if (cancel_work_sync(&nvdev->subchan_work))
1006 nvdev->num_chn = 1;
1007
1008 netvsc_xdp_set(ndev, NULL, NULL, nvdev);
1009
1010 /* If device was up (receiving) then shutdown */
1011 if (netif_running(ndev)) {
1012 netvsc_tx_disable(nvdev, ndev);
1013
1014 ret = rndis_filter_close(nvdev);
1015 if (ret) {
1016 netdev_err(ndev,
1017 "unable to close device (ret %d).\n", ret);
1018 return ret;
1019 }
1020
1021 ret = netvsc_wait_until_empty(nvdev);
1022 if (ret) {
1023 netdev_err(ndev,
1024 "Ring buffer not empty after closing rndis\n");
1025 return ret;
1026 }
1027 }
1028
1029 netif_device_detach(ndev);
1030
1031 rndis_filter_device_remove(hdev, nvdev);
1032
1033 return 0;
1034}
1035
1036static int netvsc_attach(struct net_device *ndev,
1037 struct netvsc_device_info *dev_info)
1038{
1039 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1040 struct hv_device *hdev = ndev_ctx->device_ctx;
1041 struct netvsc_device *nvdev;
1042 struct rndis_device *rdev;
1043 struct bpf_prog *prog;
1044 int ret = 0;
1045
1046 nvdev = rndis_filter_device_add(hdev, dev_info);
1047 if (IS_ERR(nvdev))
1048 return PTR_ERR(nvdev);
1049
1050 if (nvdev->num_chn > 1) {
1051 ret = rndis_set_subchannel(ndev, nvdev, dev_info);
1052
1053 /* if unavailable, just proceed with one queue */
1054 if (ret) {
1055 nvdev->max_chn = 1;
1056 nvdev->num_chn = 1;
1057 }
1058 }
1059
1060 prog = dev_info->bprog;
1061 if (prog) {
1062 bpf_prog_inc(prog);
1063 ret = netvsc_xdp_set(ndev, prog, NULL, nvdev);
1064 if (ret) {
1065 bpf_prog_put(prog);
1066 goto err1;
1067 }
1068 }
1069
1070 /* In any case device is now ready */
1071 nvdev->tx_disable = false;
1072 netif_device_attach(ndev);
1073
1074 /* Note: enable and attach happen when sub-channels setup */
1075 netif_carrier_off(ndev);
1076
1077 if (netif_running(ndev)) {
1078 ret = rndis_filter_open(nvdev);
1079 if (ret)
1080 goto err2;
1081
1082 rdev = nvdev->extension;
1083 if (!rdev->link_state)
1084 netif_carrier_on(ndev);
1085 }
1086
1087 return 0;
1088
1089err2:
1090 netif_device_detach(ndev);
1091
1092err1:
1093 rndis_filter_device_remove(hdev, nvdev);
1094
1095 return ret;
1096}
1097
1098static int netvsc_set_channels(struct net_device *net,
1099 struct ethtool_channels *channels)
1100{
1101 struct net_device_context *net_device_ctx = netdev_priv(net);
1102 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
1103 unsigned int orig, count = channels->combined_count;
1104 struct netvsc_device_info *device_info;
1105 int ret;
1106
1107 /* We do not support separate count for rx, tx, or other */
1108 if (count == 0 ||
1109 channels->rx_count || channels->tx_count || channels->other_count)
1110 return -EINVAL;
1111
1112 if (!nvdev || nvdev->destroy)
1113 return -ENODEV;
1114
1115 if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5)
1116 return -EINVAL;
1117
1118 if (count > nvdev->max_chn)
1119 return -EINVAL;
1120
1121 orig = nvdev->num_chn;
1122
1123 device_info = netvsc_devinfo_get(nvdev);
1124
1125 if (!device_info)
1126 return -ENOMEM;
1127
1128 device_info->num_chn = count;
1129
1130 ret = netvsc_detach(net, nvdev);
1131 if (ret)
1132 goto out;
1133
1134 ret = netvsc_attach(net, device_info);
1135 if (ret) {
1136 device_info->num_chn = orig;
1137 if (netvsc_attach(net, device_info))
1138 netdev_err(net, "restoring channel setting failed\n");
1139 }
1140
1141out:
1142 netvsc_devinfo_put(device_info);
1143 return ret;
1144}
1145
1146static bool
1147netvsc_validate_ethtool_ss_cmd(const struct ethtool_link_ksettings *cmd)
1148{
1149 struct ethtool_link_ksettings diff1 = *cmd;
1150 struct ethtool_link_ksettings diff2 = {};
1151
1152 diff1.base.speed = 0;
1153 diff1.base.duplex = 0;
1154 /* advertising and cmd are usually set */
1155 ethtool_link_ksettings_zero_link_mode(&diff1, advertising);
1156 diff1.base.cmd = 0;
1157 /* We set port to PORT_OTHER */
1158 diff2.base.port = PORT_OTHER;
1159
1160 return !memcmp(&diff1, &diff2, sizeof(diff1));
1161}
1162
1163static void netvsc_init_settings(struct net_device *dev)
1164{
1165 struct net_device_context *ndc = netdev_priv(dev);
1166
1167 ndc->l4_hash = HV_DEFAULT_L4HASH;
1168
1169 ndc->speed = SPEED_UNKNOWN;
1170 ndc->duplex = DUPLEX_FULL;
1171
1172 dev->features = NETIF_F_LRO;
1173}
1174
1175static int netvsc_get_link_ksettings(struct net_device *dev,
1176 struct ethtool_link_ksettings *cmd)
1177{
1178 struct net_device_context *ndc = netdev_priv(dev);
1179
1180 cmd->base.speed = ndc->speed;
1181 cmd->base.duplex = ndc->duplex;
1182 cmd->base.port = PORT_OTHER;
1183
1184 return 0;
1185}
1186
1187static int netvsc_set_link_ksettings(struct net_device *dev,
1188 const struct ethtool_link_ksettings *cmd)
1189{
1190 struct net_device_context *ndc = netdev_priv(dev);
1191 u32 speed;
1192
1193 speed = cmd->base.speed;
1194 if (!ethtool_validate_speed(speed) ||
1195 !ethtool_validate_duplex(cmd->base.duplex) ||
1196 !netvsc_validate_ethtool_ss_cmd(cmd))
1197 return -EINVAL;
1198
1199 ndc->speed = speed;
1200 ndc->duplex = cmd->base.duplex;
1201
1202 return 0;
1203}
1204
1205static int netvsc_change_mtu(struct net_device *ndev, int mtu)
1206{
1207 struct net_device_context *ndevctx = netdev_priv(ndev);
1208 struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
1209 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1210 int orig_mtu = ndev->mtu;
1211 struct netvsc_device_info *device_info;
1212 int ret = 0;
1213
1214 if (!nvdev || nvdev->destroy)
1215 return -ENODEV;
1216
1217 device_info = netvsc_devinfo_get(nvdev);
1218
1219 if (!device_info)
1220 return -ENOMEM;
1221
1222 /* Change MTU of underlying VF netdev first. */
1223 if (vf_netdev) {
1224 ret = dev_set_mtu(vf_netdev, mtu);
1225 if (ret)
1226 goto out;
1227 }
1228
1229 ret = netvsc_detach(ndev, nvdev);
1230 if (ret)
1231 goto rollback_vf;
1232
1233 ndev->mtu = mtu;
1234
1235 ret = netvsc_attach(ndev, device_info);
1236 if (!ret)
1237 goto out;
1238
1239 /* Attempt rollback to original MTU */
1240 ndev->mtu = orig_mtu;
1241
1242 if (netvsc_attach(ndev, device_info))
1243 netdev_err(ndev, "restoring mtu failed\n");
1244rollback_vf:
1245 if (vf_netdev)
1246 dev_set_mtu(vf_netdev, orig_mtu);
1247
1248out:
1249 netvsc_devinfo_put(device_info);
1250 return ret;
1251}
1252
1253static void netvsc_get_vf_stats(struct net_device *net,
1254 struct netvsc_vf_pcpu_stats *tot)
1255{
1256 struct net_device_context *ndev_ctx = netdev_priv(net);
1257 int i;
1258
1259 memset(tot, 0, sizeof(*tot));
1260
1261 for_each_possible_cpu(i) {
1262 const struct netvsc_vf_pcpu_stats *stats
1263 = per_cpu_ptr(ndev_ctx->vf_stats, i);
1264 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1265 unsigned int start;
1266
1267 do {
1268 start = u64_stats_fetch_begin_irq(&stats->syncp);
1269 rx_packets = stats->rx_packets;
1270 tx_packets = stats->tx_packets;
1271 rx_bytes = stats->rx_bytes;
1272 tx_bytes = stats->tx_bytes;
1273 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1274
1275 tot->rx_packets += rx_packets;
1276 tot->tx_packets += tx_packets;
1277 tot->rx_bytes += rx_bytes;
1278 tot->tx_bytes += tx_bytes;
1279 tot->tx_dropped += stats->tx_dropped;
1280 }
1281}
1282
1283static void netvsc_get_pcpu_stats(struct net_device *net,
1284 struct netvsc_ethtool_pcpu_stats *pcpu_tot)
1285{
1286 struct net_device_context *ndev_ctx = netdev_priv(net);
1287 struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev);
1288 int i;
1289
1290 /* fetch percpu stats of vf */
1291 for_each_possible_cpu(i) {
1292 const struct netvsc_vf_pcpu_stats *stats =
1293 per_cpu_ptr(ndev_ctx->vf_stats, i);
1294 struct netvsc_ethtool_pcpu_stats *this_tot = &pcpu_tot[i];
1295 unsigned int start;
1296
1297 do {
1298 start = u64_stats_fetch_begin_irq(&stats->syncp);
1299 this_tot->vf_rx_packets = stats->rx_packets;
1300 this_tot->vf_tx_packets = stats->tx_packets;
1301 this_tot->vf_rx_bytes = stats->rx_bytes;
1302 this_tot->vf_tx_bytes = stats->tx_bytes;
1303 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1304 this_tot->rx_packets = this_tot->vf_rx_packets;
1305 this_tot->tx_packets = this_tot->vf_tx_packets;
1306 this_tot->rx_bytes = this_tot->vf_rx_bytes;
1307 this_tot->tx_bytes = this_tot->vf_tx_bytes;
1308 }
1309
1310 /* fetch percpu stats of netvsc */
1311 for (i = 0; i < nvdev->num_chn; i++) {
1312 const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
1313 const struct netvsc_stats *stats;
1314 struct netvsc_ethtool_pcpu_stats *this_tot =
1315 &pcpu_tot[nvchan->channel->target_cpu];
1316 u64 packets, bytes;
1317 unsigned int start;
1318
1319 stats = &nvchan->tx_stats;
1320 do {
1321 start = u64_stats_fetch_begin_irq(&stats->syncp);
1322 packets = stats->packets;
1323 bytes = stats->bytes;
1324 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1325
1326 this_tot->tx_bytes += bytes;
1327 this_tot->tx_packets += packets;
1328
1329 stats = &nvchan->rx_stats;
1330 do {
1331 start = u64_stats_fetch_begin_irq(&stats->syncp);
1332 packets = stats->packets;
1333 bytes = stats->bytes;
1334 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1335
1336 this_tot->rx_bytes += bytes;
1337 this_tot->rx_packets += packets;
1338 }
1339}
1340
1341static void netvsc_get_stats64(struct net_device *net,
1342 struct rtnl_link_stats64 *t)
1343{
1344 struct net_device_context *ndev_ctx = netdev_priv(net);
1345 struct netvsc_device *nvdev;
1346 struct netvsc_vf_pcpu_stats vf_tot;
1347 int i;
1348
1349 rcu_read_lock();
1350
1351 nvdev = rcu_dereference(ndev_ctx->nvdev);
1352 if (!nvdev)
1353 goto out;
1354
1355 netdev_stats_to_stats64(t, &net->stats);
1356
1357 netvsc_get_vf_stats(net, &vf_tot);
1358 t->rx_packets += vf_tot.rx_packets;
1359 t->tx_packets += vf_tot.tx_packets;
1360 t->rx_bytes += vf_tot.rx_bytes;
1361 t->tx_bytes += vf_tot.tx_bytes;
1362 t->tx_dropped += vf_tot.tx_dropped;
1363
1364 for (i = 0; i < nvdev->num_chn; i++) {
1365 const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
1366 const struct netvsc_stats *stats;
1367 u64 packets, bytes, multicast;
1368 unsigned int start;
1369
1370 stats = &nvchan->tx_stats;
1371 do {
1372 start = u64_stats_fetch_begin_irq(&stats->syncp);
1373 packets = stats->packets;
1374 bytes = stats->bytes;
1375 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1376
1377 t->tx_bytes += bytes;
1378 t->tx_packets += packets;
1379
1380 stats = &nvchan->rx_stats;
1381 do {
1382 start = u64_stats_fetch_begin_irq(&stats->syncp);
1383 packets = stats->packets;
1384 bytes = stats->bytes;
1385 multicast = stats->multicast + stats->broadcast;
1386 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1387
1388 t->rx_bytes += bytes;
1389 t->rx_packets += packets;
1390 t->multicast += multicast;
1391 }
1392out:
1393 rcu_read_unlock();
1394}
1395
1396static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
1397{
1398 struct net_device_context *ndc = netdev_priv(ndev);
1399 struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev);
1400 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1401 struct sockaddr *addr = p;
1402 int err;
1403
1404 err = eth_prepare_mac_addr_change(ndev, p);
1405 if (err)
1406 return err;
1407
1408 if (!nvdev)
1409 return -ENODEV;
1410
1411 if (vf_netdev) {
1412 err = dev_set_mac_address(vf_netdev, addr, NULL);
1413 if (err)
1414 return err;
1415 }
1416
1417 err = rndis_filter_set_device_mac(nvdev, addr->sa_data);
1418 if (!err) {
1419 eth_commit_mac_addr_change(ndev, p);
1420 } else if (vf_netdev) {
1421 /* rollback change on VF */
1422 memcpy(addr->sa_data, ndev->dev_addr, ETH_ALEN);
1423 dev_set_mac_address(vf_netdev, addr, NULL);
1424 }
1425
1426 return err;
1427}
1428
1429static const struct {
1430 char name[ETH_GSTRING_LEN];
1431 u16 offset;
1432} netvsc_stats[] = {
1433 { "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) },
1434 { "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) },
1435 { "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) },
1436 { "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) },
1437 { "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) },
1438 { "tx_send_full", offsetof(struct netvsc_ethtool_stats, tx_send_full) },
1439 { "rx_comp_busy", offsetof(struct netvsc_ethtool_stats, rx_comp_busy) },
1440 { "rx_no_memory", offsetof(struct netvsc_ethtool_stats, rx_no_memory) },
1441 { "stop_queue", offsetof(struct netvsc_ethtool_stats, stop_queue) },
1442 { "wake_queue", offsetof(struct netvsc_ethtool_stats, wake_queue) },
1443}, pcpu_stats[] = {
1444 { "cpu%u_rx_packets",
1445 offsetof(struct netvsc_ethtool_pcpu_stats, rx_packets) },
1446 { "cpu%u_rx_bytes",
1447 offsetof(struct netvsc_ethtool_pcpu_stats, rx_bytes) },
1448 { "cpu%u_tx_packets",
1449 offsetof(struct netvsc_ethtool_pcpu_stats, tx_packets) },
1450 { "cpu%u_tx_bytes",
1451 offsetof(struct netvsc_ethtool_pcpu_stats, tx_bytes) },
1452 { "cpu%u_vf_rx_packets",
1453 offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_packets) },
1454 { "cpu%u_vf_rx_bytes",
1455 offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_bytes) },
1456 { "cpu%u_vf_tx_packets",
1457 offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_packets) },
1458 { "cpu%u_vf_tx_bytes",
1459 offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_bytes) },
1460}, vf_stats[] = {
1461 { "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats, rx_packets) },
1462 { "vf_rx_bytes", offsetof(struct netvsc_vf_pcpu_stats, rx_bytes) },
1463 { "vf_tx_packets", offsetof(struct netvsc_vf_pcpu_stats, tx_packets) },
1464 { "vf_tx_bytes", offsetof(struct netvsc_vf_pcpu_stats, tx_bytes) },
1465 { "vf_tx_dropped", offsetof(struct netvsc_vf_pcpu_stats, tx_dropped) },
1466};
1467
1468#define NETVSC_GLOBAL_STATS_LEN ARRAY_SIZE(netvsc_stats)
1469#define NETVSC_VF_STATS_LEN ARRAY_SIZE(vf_stats)
1470
1471/* statistics per queue (rx/tx packets/bytes) */
1472#define NETVSC_PCPU_STATS_LEN (num_present_cpus() * ARRAY_SIZE(pcpu_stats))
1473
1474/* 5 statistics per queue (rx/tx packets/bytes, rx xdp_drop) */
1475#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 5)
1476
1477static int netvsc_get_sset_count(struct net_device *dev, int string_set)
1478{
1479 struct net_device_context *ndc = netdev_priv(dev);
1480 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1481
1482 if (!nvdev)
1483 return -ENODEV;
1484
1485 switch (string_set) {
1486 case ETH_SS_STATS:
1487 return NETVSC_GLOBAL_STATS_LEN
1488 + NETVSC_VF_STATS_LEN
1489 + NETVSC_QUEUE_STATS_LEN(nvdev)
1490 + NETVSC_PCPU_STATS_LEN;
1491 default:
1492 return -EINVAL;
1493 }
1494}
1495
1496static void netvsc_get_ethtool_stats(struct net_device *dev,
1497 struct ethtool_stats *stats, u64 *data)
1498{
1499 struct net_device_context *ndc = netdev_priv(dev);
1500 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1501 const void *nds = &ndc->eth_stats;
1502 const struct netvsc_stats *qstats;
1503 struct netvsc_vf_pcpu_stats sum;
1504 struct netvsc_ethtool_pcpu_stats *pcpu_sum;
1505 unsigned int start;
1506 u64 packets, bytes;
1507 u64 xdp_drop;
1508 int i, j, cpu;
1509
1510 if (!nvdev)
1511 return;
1512
1513 for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++)
1514 data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset);
1515
1516 netvsc_get_vf_stats(dev, &sum);
1517 for (j = 0; j < NETVSC_VF_STATS_LEN; j++)
1518 data[i++] = *(u64 *)((void *)&sum + vf_stats[j].offset);
1519
1520 for (j = 0; j < nvdev->num_chn; j++) {
1521 qstats = &nvdev->chan_table[j].tx_stats;
1522
1523 do {
1524 start = u64_stats_fetch_begin_irq(&qstats->syncp);
1525 packets = qstats->packets;
1526 bytes = qstats->bytes;
1527 } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
1528 data[i++] = packets;
1529 data[i++] = bytes;
1530
1531 qstats = &nvdev->chan_table[j].rx_stats;
1532 do {
1533 start = u64_stats_fetch_begin_irq(&qstats->syncp);
1534 packets = qstats->packets;
1535 bytes = qstats->bytes;
1536 xdp_drop = qstats->xdp_drop;
1537 } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
1538 data[i++] = packets;
1539 data[i++] = bytes;
1540 data[i++] = xdp_drop;
1541 }
1542
1543 pcpu_sum = kvmalloc_array(num_possible_cpus(),
1544 sizeof(struct netvsc_ethtool_pcpu_stats),
1545 GFP_KERNEL);
1546 netvsc_get_pcpu_stats(dev, pcpu_sum);
1547 for_each_present_cpu(cpu) {
1548 struct netvsc_ethtool_pcpu_stats *this_sum = &pcpu_sum[cpu];
1549
1550 for (j = 0; j < ARRAY_SIZE(pcpu_stats); j++)
1551 data[i++] = *(u64 *)((void *)this_sum
1552 + pcpu_stats[j].offset);
1553 }
1554 kvfree(pcpu_sum);
1555}
1556
1557static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1558{
1559 struct net_device_context *ndc = netdev_priv(dev);
1560 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1561 u8 *p = data;
1562 int i, cpu;
1563
1564 if (!nvdev)
1565 return;
1566
1567 switch (stringset) {
1568 case ETH_SS_STATS:
1569 for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) {
1570 memcpy(p, netvsc_stats[i].name, ETH_GSTRING_LEN);
1571 p += ETH_GSTRING_LEN;
1572 }
1573
1574 for (i = 0; i < ARRAY_SIZE(vf_stats); i++) {
1575 memcpy(p, vf_stats[i].name, ETH_GSTRING_LEN);
1576 p += ETH_GSTRING_LEN;
1577 }
1578
1579 for (i = 0; i < nvdev->num_chn; i++) {
1580 sprintf(p, "tx_queue_%u_packets", i);
1581 p += ETH_GSTRING_LEN;
1582 sprintf(p, "tx_queue_%u_bytes", i);
1583 p += ETH_GSTRING_LEN;
1584 sprintf(p, "rx_queue_%u_packets", i);
1585 p += ETH_GSTRING_LEN;
1586 sprintf(p, "rx_queue_%u_bytes", i);
1587 p += ETH_GSTRING_LEN;
1588 sprintf(p, "rx_queue_%u_xdp_drop", i);
1589 p += ETH_GSTRING_LEN;
1590 }
1591
1592 for_each_present_cpu(cpu) {
1593 for (i = 0; i < ARRAY_SIZE(pcpu_stats); i++) {
1594 sprintf(p, pcpu_stats[i].name, cpu);
1595 p += ETH_GSTRING_LEN;
1596 }
1597 }
1598
1599 break;
1600 }
1601}
1602
1603static int
1604netvsc_get_rss_hash_opts(struct net_device_context *ndc,
1605 struct ethtool_rxnfc *info)
1606{
1607 const u32 l4_flag = RXH_L4_B_0_1 | RXH_L4_B_2_3;
1608
1609 info->data = RXH_IP_SRC | RXH_IP_DST;
1610
1611 switch (info->flow_type) {
1612 case TCP_V4_FLOW:
1613 if (ndc->l4_hash & HV_TCP4_L4HASH)
1614 info->data |= l4_flag;
1615
1616 break;
1617
1618 case TCP_V6_FLOW:
1619 if (ndc->l4_hash & HV_TCP6_L4HASH)
1620 info->data |= l4_flag;
1621
1622 break;
1623
1624 case UDP_V4_FLOW:
1625 if (ndc->l4_hash & HV_UDP4_L4HASH)
1626 info->data |= l4_flag;
1627
1628 break;
1629
1630 case UDP_V6_FLOW:
1631 if (ndc->l4_hash & HV_UDP6_L4HASH)
1632 info->data |= l4_flag;
1633
1634 break;
1635
1636 case IPV4_FLOW:
1637 case IPV6_FLOW:
1638 break;
1639 default:
1640 info->data = 0;
1641 break;
1642 }
1643
1644 return 0;
1645}
1646
1647static int
1648netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1649 u32 *rules)
1650{
1651 struct net_device_context *ndc = netdev_priv(dev);
1652 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1653
1654 if (!nvdev)
1655 return -ENODEV;
1656
1657 switch (info->cmd) {
1658 case ETHTOOL_GRXRINGS:
1659 info->data = nvdev->num_chn;
1660 return 0;
1661
1662 case ETHTOOL_GRXFH:
1663 return netvsc_get_rss_hash_opts(ndc, info);
1664 }
1665 return -EOPNOTSUPP;
1666}
1667
1668static int netvsc_set_rss_hash_opts(struct net_device_context *ndc,
1669 struct ethtool_rxnfc *info)
1670{
1671 if (info->data == (RXH_IP_SRC | RXH_IP_DST |
1672 RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1673 switch (info->flow_type) {
1674 case TCP_V4_FLOW:
1675 ndc->l4_hash |= HV_TCP4_L4HASH;
1676 break;
1677
1678 case TCP_V6_FLOW:
1679 ndc->l4_hash |= HV_TCP6_L4HASH;
1680 break;
1681
1682 case UDP_V4_FLOW:
1683 ndc->l4_hash |= HV_UDP4_L4HASH;
1684 break;
1685
1686 case UDP_V6_FLOW:
1687 ndc->l4_hash |= HV_UDP6_L4HASH;
1688 break;
1689
1690 default:
1691 return -EOPNOTSUPP;
1692 }
1693
1694 return 0;
1695 }
1696
1697 if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
1698 switch (info->flow_type) {
1699 case TCP_V4_FLOW:
1700 ndc->l4_hash &= ~HV_TCP4_L4HASH;
1701 break;
1702
1703 case TCP_V6_FLOW:
1704 ndc->l4_hash &= ~HV_TCP6_L4HASH;
1705 break;
1706
1707 case UDP_V4_FLOW:
1708 ndc->l4_hash &= ~HV_UDP4_L4HASH;
1709 break;
1710
1711 case UDP_V6_FLOW:
1712 ndc->l4_hash &= ~HV_UDP6_L4HASH;
1713 break;
1714
1715 default:
1716 return -EOPNOTSUPP;
1717 }
1718
1719 return 0;
1720 }
1721
1722 return -EOPNOTSUPP;
1723}
1724
1725static int
1726netvsc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *info)
1727{
1728 struct net_device_context *ndc = netdev_priv(ndev);
1729
1730 if (info->cmd == ETHTOOL_SRXFH)
1731 return netvsc_set_rss_hash_opts(ndc, info);
1732
1733 return -EOPNOTSUPP;
1734}
1735
1736static u32 netvsc_get_rxfh_key_size(struct net_device *dev)
1737{
1738 return NETVSC_HASH_KEYLEN;
1739}
1740
1741static u32 netvsc_rss_indir_size(struct net_device *dev)
1742{
1743 return ITAB_NUM;
1744}
1745
1746static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
1747 u8 *hfunc)
1748{
1749 struct net_device_context *ndc = netdev_priv(dev);
1750 struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
1751 struct rndis_device *rndis_dev;
1752 int i;
1753
1754 if (!ndev)
1755 return -ENODEV;
1756
1757 if (hfunc)
1758 *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
1759
1760 rndis_dev = ndev->extension;
1761 if (indir) {
1762 for (i = 0; i < ITAB_NUM; i++)
1763 indir[i] = ndc->rx_table[i];
1764 }
1765
1766 if (key)
1767 memcpy(key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN);
1768
1769 return 0;
1770}
1771
1772static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
1773 const u8 *key, const u8 hfunc)
1774{
1775 struct net_device_context *ndc = netdev_priv(dev);
1776 struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
1777 struct rndis_device *rndis_dev;
1778 int i;
1779
1780 if (!ndev)
1781 return -ENODEV;
1782
1783 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
1784 return -EOPNOTSUPP;
1785
1786 rndis_dev = ndev->extension;
1787 if (indir) {
1788 for (i = 0; i < ITAB_NUM; i++)
1789 if (indir[i] >= ndev->num_chn)
1790 return -EINVAL;
1791
1792 for (i = 0; i < ITAB_NUM; i++)
1793 ndc->rx_table[i] = indir[i];
1794 }
1795
1796 if (!key) {
1797 if (!indir)
1798 return 0;
1799
1800 key = rndis_dev->rss_key;
1801 }
1802
1803 return rndis_filter_set_rss_param(rndis_dev, key);
1804}
1805
1806/* Hyper-V RNDIS protocol does not have ring in the HW sense.
1807 * It does have pre-allocated receive area which is divided into sections.
1808 */
1809static void __netvsc_get_ringparam(struct netvsc_device *nvdev,
1810 struct ethtool_ringparam *ring)
1811{
1812 u32 max_buf_size;
1813
1814 ring->rx_pending = nvdev->recv_section_cnt;
1815 ring->tx_pending = nvdev->send_section_cnt;
1816
1817 if (nvdev->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
1818 max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
1819 else
1820 max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
1821
1822 ring->rx_max_pending = max_buf_size / nvdev->recv_section_size;
1823 ring->tx_max_pending = NETVSC_SEND_BUFFER_SIZE
1824 / nvdev->send_section_size;
1825}
1826
1827static void netvsc_get_ringparam(struct net_device *ndev,
1828 struct ethtool_ringparam *ring)
1829{
1830 struct net_device_context *ndevctx = netdev_priv(ndev);
1831 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1832
1833 if (!nvdev)
1834 return;
1835
1836 __netvsc_get_ringparam(nvdev, ring);
1837}
1838
1839static int netvsc_set_ringparam(struct net_device *ndev,
1840 struct ethtool_ringparam *ring)
1841{
1842 struct net_device_context *ndevctx = netdev_priv(ndev);
1843 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1844 struct netvsc_device_info *device_info;
1845 struct ethtool_ringparam orig;
1846 u32 new_tx, new_rx;
1847 int ret = 0;
1848
1849 if (!nvdev || nvdev->destroy)
1850 return -ENODEV;
1851
1852 memset(&orig, 0, sizeof(orig));
1853 __netvsc_get_ringparam(nvdev, &orig);
1854
1855 new_tx = clamp_t(u32, ring->tx_pending,
1856 NETVSC_MIN_TX_SECTIONS, orig.tx_max_pending);
1857 new_rx = clamp_t(u32, ring->rx_pending,
1858 NETVSC_MIN_RX_SECTIONS, orig.rx_max_pending);
1859
1860 if (new_tx == orig.tx_pending &&
1861 new_rx == orig.rx_pending)
1862 return 0; /* no change */
1863
1864 device_info = netvsc_devinfo_get(nvdev);
1865
1866 if (!device_info)
1867 return -ENOMEM;
1868
1869 device_info->send_sections = new_tx;
1870 device_info->recv_sections = new_rx;
1871
1872 ret = netvsc_detach(ndev, nvdev);
1873 if (ret)
1874 goto out;
1875
1876 ret = netvsc_attach(ndev, device_info);
1877 if (ret) {
1878 device_info->send_sections = orig.tx_pending;
1879 device_info->recv_sections = orig.rx_pending;
1880
1881 if (netvsc_attach(ndev, device_info))
1882 netdev_err(ndev, "restoring ringparam failed");
1883 }
1884
1885out:
1886 netvsc_devinfo_put(device_info);
1887 return ret;
1888}
1889
1890static netdev_features_t netvsc_fix_features(struct net_device *ndev,
1891 netdev_features_t features)
1892{
1893 struct net_device_context *ndevctx = netdev_priv(ndev);
1894 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1895
1896 if (!nvdev || nvdev->destroy)
1897 return features;
1898
1899 if ((features & NETIF_F_LRO) && netvsc_xdp_get(nvdev)) {
1900 features ^= NETIF_F_LRO;
1901 netdev_info(ndev, "Skip LRO - unsupported with XDP\n");
1902 }
1903
1904 return features;
1905}
1906
1907static int netvsc_set_features(struct net_device *ndev,
1908 netdev_features_t features)
1909{
1910 netdev_features_t change = features ^ ndev->features;
1911 struct net_device_context *ndevctx = netdev_priv(ndev);
1912 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1913 struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
1914 struct ndis_offload_params offloads;
1915 int ret = 0;
1916
1917 if (!nvdev || nvdev->destroy)
1918 return -ENODEV;
1919
1920 if (!(change & NETIF_F_LRO))
1921 goto syncvf;
1922
1923 memset(&offloads, 0, sizeof(struct ndis_offload_params));
1924
1925 if (features & NETIF_F_LRO) {
1926 offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
1927 offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
1928 } else {
1929 offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
1930 offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
1931 }
1932
1933 ret = rndis_filter_set_offload_params(ndev, nvdev, &offloads);
1934
1935 if (ret) {
1936 features ^= NETIF_F_LRO;
1937 ndev->features = features;
1938 }
1939
1940syncvf:
1941 if (!vf_netdev)
1942 return ret;
1943
1944 vf_netdev->wanted_features = features;
1945 netdev_update_features(vf_netdev);
1946
1947 return ret;
1948}
1949
1950static u32 netvsc_get_msglevel(struct net_device *ndev)
1951{
1952 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1953
1954 return ndev_ctx->msg_enable;
1955}
1956
1957static void netvsc_set_msglevel(struct net_device *ndev, u32 val)
1958{
1959 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1960
1961 ndev_ctx->msg_enable = val;
1962}
1963
1964static const struct ethtool_ops ethtool_ops = {
1965 .get_drvinfo = netvsc_get_drvinfo,
1966 .get_msglevel = netvsc_get_msglevel,
1967 .set_msglevel = netvsc_set_msglevel,
1968 .get_link = ethtool_op_get_link,
1969 .get_ethtool_stats = netvsc_get_ethtool_stats,
1970 .get_sset_count = netvsc_get_sset_count,
1971 .get_strings = netvsc_get_strings,
1972 .get_channels = netvsc_get_channels,
1973 .set_channels = netvsc_set_channels,
1974 .get_ts_info = ethtool_op_get_ts_info,
1975 .get_rxnfc = netvsc_get_rxnfc,
1976 .set_rxnfc = netvsc_set_rxnfc,
1977 .get_rxfh_key_size = netvsc_get_rxfh_key_size,
1978 .get_rxfh_indir_size = netvsc_rss_indir_size,
1979 .get_rxfh = netvsc_get_rxfh,
1980 .set_rxfh = netvsc_set_rxfh,
1981 .get_link_ksettings = netvsc_get_link_ksettings,
1982 .set_link_ksettings = netvsc_set_link_ksettings,
1983 .get_ringparam = netvsc_get_ringparam,
1984 .set_ringparam = netvsc_set_ringparam,
1985};
1986
1987static const struct net_device_ops device_ops = {
1988 .ndo_open = netvsc_open,
1989 .ndo_stop = netvsc_close,
1990 .ndo_start_xmit = netvsc_start_xmit,
1991 .ndo_change_rx_flags = netvsc_change_rx_flags,
1992 .ndo_set_rx_mode = netvsc_set_rx_mode,
1993 .ndo_fix_features = netvsc_fix_features,
1994 .ndo_set_features = netvsc_set_features,
1995 .ndo_change_mtu = netvsc_change_mtu,
1996 .ndo_validate_addr = eth_validate_addr,
1997 .ndo_set_mac_address = netvsc_set_mac_addr,
1998 .ndo_select_queue = netvsc_select_queue,
1999 .ndo_get_stats64 = netvsc_get_stats64,
2000 .ndo_bpf = netvsc_bpf,
2001};
2002
2003/*
2004 * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link
2005 * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is
2006 * present send GARP packet to network peers with netif_notify_peers().
2007 */
2008static void netvsc_link_change(struct work_struct *w)
2009{
2010 struct net_device_context *ndev_ctx =
2011 container_of(w, struct net_device_context, dwork.work);
2012 struct hv_device *device_obj = ndev_ctx->device_ctx;
2013 struct net_device *net = hv_get_drvdata(device_obj);
2014 struct netvsc_device *net_device;
2015 struct rndis_device *rdev;
2016 struct netvsc_reconfig *event = NULL;
2017 bool notify = false, reschedule = false;
2018 unsigned long flags, next_reconfig, delay;
2019
2020 /* if changes are happening, comeback later */
2021 if (!rtnl_trylock()) {
2022 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
2023 return;
2024 }
2025
2026 net_device = rtnl_dereference(ndev_ctx->nvdev);
2027 if (!net_device)
2028 goto out_unlock;
2029
2030 rdev = net_device->extension;
2031
2032 next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT;
2033 if (time_is_after_jiffies(next_reconfig)) {
2034 /* link_watch only sends one notification with current state
2035 * per second, avoid doing reconfig more frequently. Handle
2036 * wrap around.
2037 */
2038 delay = next_reconfig - jiffies;
2039 delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT;
2040 schedule_delayed_work(&ndev_ctx->dwork, delay);
2041 goto out_unlock;
2042 }
2043 ndev_ctx->last_reconfig = jiffies;
2044
2045 spin_lock_irqsave(&ndev_ctx->lock, flags);
2046 if (!list_empty(&ndev_ctx->reconfig_events)) {
2047 event = list_first_entry(&ndev_ctx->reconfig_events,
2048 struct netvsc_reconfig, list);
2049 list_del(&event->list);
2050 reschedule = !list_empty(&ndev_ctx->reconfig_events);
2051 }
2052 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
2053
2054 if (!event)
2055 goto out_unlock;
2056
2057 switch (event->event) {
2058 /* Only the following events are possible due to the check in
2059 * netvsc_linkstatus_callback()
2060 */
2061 case RNDIS_STATUS_MEDIA_CONNECT:
2062 if (rdev->link_state) {
2063 rdev->link_state = false;
2064 netif_carrier_on(net);
2065 netvsc_tx_enable(net_device, net);
2066 } else {
2067 notify = true;
2068 }
2069 kfree(event);
2070 break;
2071 case RNDIS_STATUS_MEDIA_DISCONNECT:
2072 if (!rdev->link_state) {
2073 rdev->link_state = true;
2074 netif_carrier_off(net);
2075 netvsc_tx_disable(net_device, net);
2076 }
2077 kfree(event);
2078 break;
2079 case RNDIS_STATUS_NETWORK_CHANGE:
2080 /* Only makes sense if carrier is present */
2081 if (!rdev->link_state) {
2082 rdev->link_state = true;
2083 netif_carrier_off(net);
2084 netvsc_tx_disable(net_device, net);
2085 event->event = RNDIS_STATUS_MEDIA_CONNECT;
2086 spin_lock_irqsave(&ndev_ctx->lock, flags);
2087 list_add(&event->list, &ndev_ctx->reconfig_events);
2088 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
2089 reschedule = true;
2090 }
2091 break;
2092 }
2093
2094 rtnl_unlock();
2095
2096 if (notify)
2097 netdev_notify_peers(net);
2098
2099 /* link_watch only sends one notification with current state per
2100 * second, handle next reconfig event in 2 seconds.
2101 */
2102 if (reschedule)
2103 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
2104
2105 return;
2106
2107out_unlock:
2108 rtnl_unlock();
2109}
2110
2111static struct net_device *get_netvsc_byref(struct net_device *vf_netdev)
2112{
2113 struct net_device_context *net_device_ctx;
2114 struct net_device *dev;
2115
2116 dev = netdev_master_upper_dev_get(vf_netdev);
2117 if (!dev || dev->netdev_ops != &device_ops)
2118 return NULL; /* not a netvsc device */
2119
2120 net_device_ctx = netdev_priv(dev);
2121 if (!rtnl_dereference(net_device_ctx->nvdev))
2122 return NULL; /* device is removed */
2123
2124 return dev;
2125}
2126
2127/* Called when VF is injecting data into network stack.
2128 * Change the associated network device from VF to netvsc.
2129 * note: already called with rcu_read_lock
2130 */
2131static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb)
2132{
2133 struct sk_buff *skb = *pskb;
2134 struct net_device *ndev = rcu_dereference(skb->dev->rx_handler_data);
2135 struct net_device_context *ndev_ctx = netdev_priv(ndev);
2136 struct netvsc_vf_pcpu_stats *pcpu_stats
2137 = this_cpu_ptr(ndev_ctx->vf_stats);
2138
2139 skb = skb_share_check(skb, GFP_ATOMIC);
2140 if (unlikely(!skb))
2141 return RX_HANDLER_CONSUMED;
2142
2143 *pskb = skb;
2144
2145 skb->dev = ndev;
2146
2147 u64_stats_update_begin(&pcpu_stats->syncp);
2148 pcpu_stats->rx_packets++;
2149 pcpu_stats->rx_bytes += skb->len;
2150 u64_stats_update_end(&pcpu_stats->syncp);
2151
2152 return RX_HANDLER_ANOTHER;
2153}
2154
2155static int netvsc_vf_join(struct net_device *vf_netdev,
2156 struct net_device *ndev)
2157{
2158 struct net_device_context *ndev_ctx = netdev_priv(ndev);
2159 int ret;
2160
2161 ret = netdev_rx_handler_register(vf_netdev,
2162 netvsc_vf_handle_frame, ndev);
2163 if (ret != 0) {
2164 netdev_err(vf_netdev,
2165 "can not register netvsc VF receive handler (err = %d)\n",
2166 ret);
2167 goto rx_handler_failed;
2168 }
2169
2170 ret = netdev_master_upper_dev_link(vf_netdev, ndev,
2171 NULL, NULL, NULL);
2172 if (ret != 0) {
2173 netdev_err(vf_netdev,
2174 "can not set master device %s (err = %d)\n",
2175 ndev->name, ret);
2176 goto upper_link_failed;
2177 }
2178
2179 /* set slave flag before open to prevent IPv6 addrconf */
2180 vf_netdev->flags |= IFF_SLAVE;
2181
2182 schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
2183
2184 call_netdevice_notifiers(NETDEV_JOIN, vf_netdev);
2185
2186 netdev_info(vf_netdev, "joined to %s\n", ndev->name);
2187 return 0;
2188
2189upper_link_failed:
2190 netdev_rx_handler_unregister(vf_netdev);
2191rx_handler_failed:
2192 return ret;
2193}
2194
2195static void __netvsc_vf_setup(struct net_device *ndev,
2196 struct net_device *vf_netdev)
2197{
2198 int ret;
2199
2200 /* Align MTU of VF with master */
2201 ret = dev_set_mtu(vf_netdev, ndev->mtu);
2202 if (ret)
2203 netdev_warn(vf_netdev,
2204 "unable to change mtu to %u\n", ndev->mtu);
2205
2206 /* set multicast etc flags on VF */
2207 dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE, NULL);
2208
2209 /* sync address list from ndev to VF */
2210 netif_addr_lock_bh(ndev);
2211 dev_uc_sync(vf_netdev, ndev);
2212 dev_mc_sync(vf_netdev, ndev);
2213 netif_addr_unlock_bh(ndev);
2214
2215 if (netif_running(ndev)) {
2216 ret = dev_open(vf_netdev, NULL);
2217 if (ret)
2218 netdev_warn(vf_netdev,
2219 "unable to open: %d\n", ret);
2220 }
2221}
2222
2223/* Setup VF as slave of the synthetic device.
2224 * Runs in workqueue to avoid recursion in netlink callbacks.
2225 */
2226static void netvsc_vf_setup(struct work_struct *w)
2227{
2228 struct net_device_context *ndev_ctx
2229 = container_of(w, struct net_device_context, vf_takeover.work);
2230 struct net_device *ndev = hv_get_drvdata(ndev_ctx->device_ctx);
2231 struct net_device *vf_netdev;
2232
2233 if (!rtnl_trylock()) {
2234 schedule_delayed_work(&ndev_ctx->vf_takeover, 0);
2235 return;
2236 }
2237
2238 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
2239 if (vf_netdev)
2240 __netvsc_vf_setup(ndev, vf_netdev);
2241
2242 rtnl_unlock();
2243}
2244
2245/* Find netvsc by VF serial number.
2246 * The PCI hyperv controller records the serial number as the slot kobj name.
2247 */
2248static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
2249{
2250 struct device *parent = vf_netdev->dev.parent;
2251 struct net_device_context *ndev_ctx;
2252 struct pci_dev *pdev;
2253 u32 serial;
2254
2255 if (!parent || !dev_is_pci(parent))
2256 return NULL; /* not a PCI device */
2257
2258 pdev = to_pci_dev(parent);
2259 if (!pdev->slot) {
2260 netdev_notice(vf_netdev, "no PCI slot information\n");
2261 return NULL;
2262 }
2263
2264 if (kstrtou32(pci_slot_name(pdev->slot), 10, &serial)) {
2265 netdev_notice(vf_netdev, "Invalid vf serial:%s\n",
2266 pci_slot_name(pdev->slot));
2267 return NULL;
2268 }
2269
2270 list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
2271 if (!ndev_ctx->vf_alloc)
2272 continue;
2273
2274 if (ndev_ctx->vf_serial == serial)
2275 return hv_get_drvdata(ndev_ctx->device_ctx);
2276 }
2277
2278 netdev_notice(vf_netdev,
2279 "no netdev found for vf serial:%u\n", serial);
2280 return NULL;
2281}
2282
2283static int netvsc_register_vf(struct net_device *vf_netdev)
2284{
2285 struct net_device_context *net_device_ctx;
2286 struct netvsc_device *netvsc_dev;
2287 struct bpf_prog *prog;
2288 struct net_device *ndev;
2289 int ret;
2290
2291 if (vf_netdev->addr_len != ETH_ALEN)
2292 return NOTIFY_DONE;
2293
2294 ndev = get_netvsc_byslot(vf_netdev);
2295 if (!ndev)
2296 return NOTIFY_DONE;
2297
2298 net_device_ctx = netdev_priv(ndev);
2299 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
2300 if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev))
2301 return NOTIFY_DONE;
2302
2303 /* if synthetic interface is a different namespace,
2304 * then move the VF to that namespace; join will be
2305 * done again in that context.
2306 */
2307 if (!net_eq(dev_net(ndev), dev_net(vf_netdev))) {
2308 ret = dev_change_net_namespace(vf_netdev,
2309 dev_net(ndev), "eth%d");
2310 if (ret)
2311 netdev_err(vf_netdev,
2312 "could not move to same namespace as %s: %d\n",
2313 ndev->name, ret);
2314 else
2315 netdev_info(vf_netdev,
2316 "VF moved to namespace with: %s\n",
2317 ndev->name);
2318 return NOTIFY_DONE;
2319 }
2320
2321 netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
2322
2323 if (netvsc_vf_join(vf_netdev, ndev) != 0)
2324 return NOTIFY_DONE;
2325
2326 dev_hold(vf_netdev);
2327 rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev);
2328
2329 vf_netdev->wanted_features = ndev->features;
2330 netdev_update_features(vf_netdev);
2331
2332 prog = netvsc_xdp_get(netvsc_dev);
2333 netvsc_vf_setxdp(vf_netdev, prog);
2334
2335 return NOTIFY_OK;
2336}
2337
2338/* VF up/down change detected, schedule to change data path */
2339static int netvsc_vf_changed(struct net_device *vf_netdev)
2340{
2341 struct net_device_context *net_device_ctx;
2342 struct netvsc_device *netvsc_dev;
2343 struct net_device *ndev;
2344 bool vf_is_up = netif_running(vf_netdev);
2345
2346 ndev = get_netvsc_byref(vf_netdev);
2347 if (!ndev)
2348 return NOTIFY_DONE;
2349
2350 net_device_ctx = netdev_priv(ndev);
2351 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
2352 if (!netvsc_dev)
2353 return NOTIFY_DONE;
2354
2355 netvsc_switch_datapath(ndev, vf_is_up);
2356 netdev_info(ndev, "Data path switched %s VF: %s\n",
2357 vf_is_up ? "to" : "from", vf_netdev->name);
2358
2359 return NOTIFY_OK;
2360}
2361
2362static int netvsc_unregister_vf(struct net_device *vf_netdev)
2363{
2364 struct net_device *ndev;
2365 struct net_device_context *net_device_ctx;
2366
2367 ndev = get_netvsc_byref(vf_netdev);
2368 if (!ndev)
2369 return NOTIFY_DONE;
2370
2371 net_device_ctx = netdev_priv(ndev);
2372 cancel_delayed_work_sync(&net_device_ctx->vf_takeover);
2373
2374 netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
2375
2376 netvsc_vf_setxdp(vf_netdev, NULL);
2377
2378 netdev_rx_handler_unregister(vf_netdev);
2379 netdev_upper_dev_unlink(vf_netdev, ndev);
2380 RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
2381 dev_put(vf_netdev);
2382
2383 return NOTIFY_OK;
2384}
2385
2386static int netvsc_probe(struct hv_device *dev,
2387 const struct hv_vmbus_device_id *dev_id)
2388{
2389 struct net_device *net = NULL;
2390 struct net_device_context *net_device_ctx;
2391 struct netvsc_device_info *device_info = NULL;
2392 struct netvsc_device *nvdev;
2393 int ret = -ENOMEM;
2394
2395 net = alloc_etherdev_mq(sizeof(struct net_device_context),
2396 VRSS_CHANNEL_MAX);
2397 if (!net)
2398 goto no_net;
2399
2400 netif_carrier_off(net);
2401
2402 netvsc_init_settings(net);
2403
2404 net_device_ctx = netdev_priv(net);
2405 net_device_ctx->device_ctx = dev;
2406 net_device_ctx->msg_enable = netif_msg_init(debug, default_msg);
2407 if (netif_msg_probe(net_device_ctx))
2408 netdev_dbg(net, "netvsc msg_enable: %d\n",
2409 net_device_ctx->msg_enable);
2410
2411 hv_set_drvdata(dev, net);
2412
2413 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
2414
2415 spin_lock_init(&net_device_ctx->lock);
2416 INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
2417 INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup);
2418
2419 net_device_ctx->vf_stats
2420 = netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats);
2421 if (!net_device_ctx->vf_stats)
2422 goto no_stats;
2423
2424 net->netdev_ops = &device_ops;
2425 net->ethtool_ops = ðtool_ops;
2426 SET_NETDEV_DEV(net, &dev->device);
2427
2428 /* We always need headroom for rndis header */
2429 net->needed_headroom = RNDIS_AND_PPI_SIZE;
2430
2431 /* Initialize the number of queues to be 1, we may change it if more
2432 * channels are offered later.
2433 */
2434 netif_set_real_num_tx_queues(net, 1);
2435 netif_set_real_num_rx_queues(net, 1);
2436
2437 /* Notify the netvsc driver of the new device */
2438 device_info = netvsc_devinfo_get(NULL);
2439
2440 if (!device_info) {
2441 ret = -ENOMEM;
2442 goto devinfo_failed;
2443 }
2444
2445 nvdev = rndis_filter_device_add(dev, device_info);
2446 if (IS_ERR(nvdev)) {
2447 ret = PTR_ERR(nvdev);
2448 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
2449 goto rndis_failed;
2450 }
2451
2452 memcpy(net->dev_addr, device_info->mac_adr, ETH_ALEN);
2453
2454 /* We must get rtnl lock before scheduling nvdev->subchan_work,
2455 * otherwise netvsc_subchan_work() can get rtnl lock first and wait
2456 * all subchannels to show up, but that may not happen because
2457 * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer()
2458 * -> ... -> device_add() -> ... -> __device_attach() can't get
2459 * the device lock, so all the subchannels can't be processed --
2460 * finally netvsc_subchan_work() hangs forever.
2461 */
2462 rtnl_lock();
2463
2464 if (nvdev->num_chn > 1)
2465 schedule_work(&nvdev->subchan_work);
2466
2467 /* hw_features computed in rndis_netdev_set_hwcaps() */
2468 net->features = net->hw_features |
2469 NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX |
2470 NETIF_F_HW_VLAN_CTAG_RX;
2471 net->vlan_features = net->features;
2472
2473 /* MTU range: 68 - 1500 or 65521 */
2474 net->min_mtu = NETVSC_MTU_MIN;
2475 if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
2476 net->max_mtu = NETVSC_MTU - ETH_HLEN;
2477 else
2478 net->max_mtu = ETH_DATA_LEN;
2479
2480 nvdev->tx_disable = false;
2481
2482 ret = register_netdevice(net);
2483 if (ret != 0) {
2484 pr_err("Unable to register netdev.\n");
2485 goto register_failed;
2486 }
2487
2488 list_add(&net_device_ctx->list, &netvsc_dev_list);
2489 rtnl_unlock();
2490
2491 netvsc_devinfo_put(device_info);
2492 return 0;
2493
2494register_failed:
2495 rtnl_unlock();
2496 rndis_filter_device_remove(dev, nvdev);
2497rndis_failed:
2498 netvsc_devinfo_put(device_info);
2499devinfo_failed:
2500 free_percpu(net_device_ctx->vf_stats);
2501no_stats:
2502 hv_set_drvdata(dev, NULL);
2503 free_netdev(net);
2504no_net:
2505 return ret;
2506}
2507
2508static int netvsc_remove(struct hv_device *dev)
2509{
2510 struct net_device_context *ndev_ctx;
2511 struct net_device *vf_netdev, *net;
2512 struct netvsc_device *nvdev;
2513
2514 net = hv_get_drvdata(dev);
2515 if (net == NULL) {
2516 dev_err(&dev->device, "No net device to remove\n");
2517 return 0;
2518 }
2519
2520 ndev_ctx = netdev_priv(net);
2521
2522 cancel_delayed_work_sync(&ndev_ctx->dwork);
2523
2524 rtnl_lock();
2525 nvdev = rtnl_dereference(ndev_ctx->nvdev);
2526 if (nvdev) {
2527 cancel_work_sync(&nvdev->subchan_work);
2528 netvsc_xdp_set(net, NULL, NULL, nvdev);
2529 }
2530
2531 /*
2532 * Call to the vsc driver to let it know that the device is being
2533 * removed. Also blocks mtu and channel changes.
2534 */
2535 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
2536 if (vf_netdev)
2537 netvsc_unregister_vf(vf_netdev);
2538
2539 if (nvdev)
2540 rndis_filter_device_remove(dev, nvdev);
2541
2542 unregister_netdevice(net);
2543 list_del(&ndev_ctx->list);
2544
2545 rtnl_unlock();
2546
2547 hv_set_drvdata(dev, NULL);
2548
2549 free_percpu(ndev_ctx->vf_stats);
2550 free_netdev(net);
2551 return 0;
2552}
2553
2554static int netvsc_suspend(struct hv_device *dev)
2555{
2556 struct net_device_context *ndev_ctx;
2557 struct net_device *vf_netdev, *net;
2558 struct netvsc_device *nvdev;
2559 int ret;
2560
2561 net = hv_get_drvdata(dev);
2562
2563 ndev_ctx = netdev_priv(net);
2564 cancel_delayed_work_sync(&ndev_ctx->dwork);
2565
2566 rtnl_lock();
2567
2568 nvdev = rtnl_dereference(ndev_ctx->nvdev);
2569 if (nvdev == NULL) {
2570 ret = -ENODEV;
2571 goto out;
2572 }
2573
2574 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
2575 if (vf_netdev)
2576 netvsc_unregister_vf(vf_netdev);
2577
2578 /* Save the current config info */
2579 ndev_ctx->saved_netvsc_dev_info = netvsc_devinfo_get(nvdev);
2580
2581 ret = netvsc_detach(net, nvdev);
2582out:
2583 rtnl_unlock();
2584
2585 return ret;
2586}
2587
2588static int netvsc_resume(struct hv_device *dev)
2589{
2590 struct net_device *net = hv_get_drvdata(dev);
2591 struct net_device_context *net_device_ctx;
2592 struct netvsc_device_info *device_info;
2593 int ret;
2594
2595 rtnl_lock();
2596
2597 net_device_ctx = netdev_priv(net);
2598 device_info = net_device_ctx->saved_netvsc_dev_info;
2599
2600 ret = netvsc_attach(net, device_info);
2601
2602 netvsc_devinfo_put(device_info);
2603 net_device_ctx->saved_netvsc_dev_info = NULL;
2604
2605 rtnl_unlock();
2606
2607 return ret;
2608}
2609static const struct hv_vmbus_device_id id_table[] = {
2610 /* Network guid */
2611 { HV_NIC_GUID, },
2612 { },
2613};
2614
2615MODULE_DEVICE_TABLE(vmbus, id_table);
2616
2617/* The one and only one */
2618static struct hv_driver netvsc_drv = {
2619 .name = KBUILD_MODNAME,
2620 .id_table = id_table,
2621 .probe = netvsc_probe,
2622 .remove = netvsc_remove,
2623 .suspend = netvsc_suspend,
2624 .resume = netvsc_resume,
2625 .driver = {
2626 .probe_type = PROBE_FORCE_SYNCHRONOUS,
2627 },
2628};
2629
2630/*
2631 * On Hyper-V, every VF interface is matched with a corresponding
2632 * synthetic interface. The synthetic interface is presented first
2633 * to the guest. When the corresponding VF instance is registered,
2634 * we will take care of switching the data path.
2635 */
2636static int netvsc_netdev_event(struct notifier_block *this,
2637 unsigned long event, void *ptr)
2638{
2639 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
2640
2641 /* Skip our own events */
2642 if (event_dev->netdev_ops == &device_ops)
2643 return NOTIFY_DONE;
2644
2645 /* Avoid non-Ethernet type devices */
2646 if (event_dev->type != ARPHRD_ETHER)
2647 return NOTIFY_DONE;
2648
2649 /* Avoid Vlan dev with same MAC registering as VF */
2650 if (is_vlan_dev(event_dev))
2651 return NOTIFY_DONE;
2652
2653 /* Avoid Bonding master dev with same MAC registering as VF */
2654 if ((event_dev->priv_flags & IFF_BONDING) &&
2655 (event_dev->flags & IFF_MASTER))
2656 return NOTIFY_DONE;
2657
2658 switch (event) {
2659 case NETDEV_REGISTER:
2660 return netvsc_register_vf(event_dev);
2661 case NETDEV_UNREGISTER:
2662 return netvsc_unregister_vf(event_dev);
2663 case NETDEV_UP:
2664 case NETDEV_DOWN:
2665 return netvsc_vf_changed(event_dev);
2666 default:
2667 return NOTIFY_DONE;
2668 }
2669}
2670
2671static struct notifier_block netvsc_netdev_notifier = {
2672 .notifier_call = netvsc_netdev_event,
2673};
2674
2675static void __exit netvsc_drv_exit(void)
2676{
2677 unregister_netdevice_notifier(&netvsc_netdev_notifier);
2678 vmbus_driver_unregister(&netvsc_drv);
2679}
2680
2681static int __init netvsc_drv_init(void)
2682{
2683 int ret;
2684
2685 if (ring_size < RING_SIZE_MIN) {
2686 ring_size = RING_SIZE_MIN;
2687 pr_info("Increased ring_size to %u (min allowed)\n",
2688 ring_size);
2689 }
2690 netvsc_ring_bytes = ring_size * PAGE_SIZE;
2691
2692 ret = vmbus_driver_register(&netvsc_drv);
2693 if (ret)
2694 return ret;
2695
2696 register_netdevice_notifier(&netvsc_netdev_notifier);
2697 return 0;
2698}
2699
2700MODULE_LICENSE("GPL");
2701MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
2702
2703module_init(netvsc_drv_init);
2704module_exit(netvsc_drv_exit);