Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 *
16 * Authors:
17 * Haiyang Zhang <haiyangz@microsoft.com>
18 * Hank Janssen <hjanssen@microsoft.com>
19 */
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22#include <linux/init.h>
23#include <linux/atomic.h>
24#include <linux/module.h>
25#include <linux/highmem.h>
26#include <linux/device.h>
27#include <linux/io.h>
28#include <linux/delay.h>
29#include <linux/netdevice.h>
30#include <linux/inetdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/pci.h>
33#include <linux/skbuff.h>
34#include <linux/if_vlan.h>
35#include <linux/in.h>
36#include <linux/slab.h>
37#include <linux/rtnetlink.h>
38#include <linux/netpoll.h>
39
40#include <net/arp.h>
41#include <net/route.h>
42#include <net/sock.h>
43#include <net/pkt_sched.h>
44#include <net/checksum.h>
45#include <net/ip6_checksum.h>
46
47#include "hyperv_net.h"
48
49#define RING_SIZE_MIN 64
50#define RETRY_US_LO 5000
51#define RETRY_US_HI 10000
52#define RETRY_MAX 2000 /* >10 sec */
53
54#define LINKCHANGE_INT (2 * HZ)
55#define VF_TAKEOVER_INT (HZ / 10)
56
57static unsigned int ring_size __ro_after_init = 128;
58module_param(ring_size, uint, 0444);
59MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
60unsigned int netvsc_ring_bytes __ro_after_init;
61
62static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
63 NETIF_MSG_LINK | NETIF_MSG_IFUP |
64 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
65 NETIF_MSG_TX_ERR;
66
67static int debug = -1;
68module_param(debug, int, 0444);
69MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
70
71static LIST_HEAD(netvsc_dev_list);
72
73static void netvsc_change_rx_flags(struct net_device *net, int change)
74{
75 struct net_device_context *ndev_ctx = netdev_priv(net);
76 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
77 int inc;
78
79 if (!vf_netdev)
80 return;
81
82 if (change & IFF_PROMISC) {
83 inc = (net->flags & IFF_PROMISC) ? 1 : -1;
84 dev_set_promiscuity(vf_netdev, inc);
85 }
86
87 if (change & IFF_ALLMULTI) {
88 inc = (net->flags & IFF_ALLMULTI) ? 1 : -1;
89 dev_set_allmulti(vf_netdev, inc);
90 }
91}
92
93static void netvsc_set_rx_mode(struct net_device *net)
94{
95 struct net_device_context *ndev_ctx = netdev_priv(net);
96 struct net_device *vf_netdev;
97 struct netvsc_device *nvdev;
98
99 rcu_read_lock();
100 vf_netdev = rcu_dereference(ndev_ctx->vf_netdev);
101 if (vf_netdev) {
102 dev_uc_sync(vf_netdev, net);
103 dev_mc_sync(vf_netdev, net);
104 }
105
106 nvdev = rcu_dereference(ndev_ctx->nvdev);
107 if (nvdev)
108 rndis_filter_update(nvdev);
109 rcu_read_unlock();
110}
111
112static void netvsc_tx_enable(struct netvsc_device *nvscdev,
113 struct net_device *ndev)
114{
115 nvscdev->tx_disable = false;
116 virt_wmb(); /* ensure queue wake up mechanism is on */
117
118 netif_tx_wake_all_queues(ndev);
119}
120
121static int netvsc_open(struct net_device *net)
122{
123 struct net_device_context *ndev_ctx = netdev_priv(net);
124 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
125 struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev);
126 struct rndis_device *rdev;
127 int ret = 0;
128
129 netif_carrier_off(net);
130
131 /* Open up the device */
132 ret = rndis_filter_open(nvdev);
133 if (ret != 0) {
134 netdev_err(net, "unable to open device (ret %d).\n", ret);
135 return ret;
136 }
137
138 rdev = nvdev->extension;
139 if (!rdev->link_state) {
140 netif_carrier_on(net);
141 netvsc_tx_enable(nvdev, net);
142 }
143
144 if (vf_netdev) {
145 /* Setting synthetic device up transparently sets
146 * slave as up. If open fails, then slave will be
147 * still be offline (and not used).
148 */
149 ret = dev_open(vf_netdev, NULL);
150 if (ret)
151 netdev_warn(net,
152 "unable to open slave: %s: %d\n",
153 vf_netdev->name, ret);
154 }
155 return 0;
156}
157
158static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
159{
160 unsigned int retry = 0;
161 int i;
162
163 /* Ensure pending bytes in ring are read */
164 for (;;) {
165 u32 aread = 0;
166
167 for (i = 0; i < nvdev->num_chn; i++) {
168 struct vmbus_channel *chn
169 = nvdev->chan_table[i].channel;
170
171 if (!chn)
172 continue;
173
174 /* make sure receive not running now */
175 napi_synchronize(&nvdev->chan_table[i].napi);
176
177 aread = hv_get_bytes_to_read(&chn->inbound);
178 if (aread)
179 break;
180
181 aread = hv_get_bytes_to_read(&chn->outbound);
182 if (aread)
183 break;
184 }
185
186 if (aread == 0)
187 return 0;
188
189 if (++retry > RETRY_MAX)
190 return -ETIMEDOUT;
191
192 usleep_range(RETRY_US_LO, RETRY_US_HI);
193 }
194}
195
196static void netvsc_tx_disable(struct netvsc_device *nvscdev,
197 struct net_device *ndev)
198{
199 if (nvscdev) {
200 nvscdev->tx_disable = true;
201 virt_wmb(); /* ensure txq will not wake up after stop */
202 }
203
204 netif_tx_disable(ndev);
205}
206
207static int netvsc_close(struct net_device *net)
208{
209 struct net_device_context *net_device_ctx = netdev_priv(net);
210 struct net_device *vf_netdev
211 = rtnl_dereference(net_device_ctx->vf_netdev);
212 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
213 int ret;
214
215 netvsc_tx_disable(nvdev, net);
216
217 /* No need to close rndis filter if it is removed already */
218 if (!nvdev)
219 return 0;
220
221 ret = rndis_filter_close(nvdev);
222 if (ret != 0) {
223 netdev_err(net, "unable to close device (ret %d).\n", ret);
224 return ret;
225 }
226
227 ret = netvsc_wait_until_empty(nvdev);
228 if (ret)
229 netdev_err(net, "Ring buffer not empty after closing rndis\n");
230
231 if (vf_netdev)
232 dev_close(vf_netdev);
233
234 return ret;
235}
236
237static inline void *init_ppi_data(struct rndis_message *msg,
238 u32 ppi_size, u32 pkt_type)
239{
240 struct rndis_packet *rndis_pkt = &msg->msg.pkt;
241 struct rndis_per_packet_info *ppi;
242
243 rndis_pkt->data_offset += ppi_size;
244 ppi = (void *)rndis_pkt + rndis_pkt->per_pkt_info_offset
245 + rndis_pkt->per_pkt_info_len;
246
247 ppi->size = ppi_size;
248 ppi->type = pkt_type;
249 ppi->internal = 0;
250 ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
251
252 rndis_pkt->per_pkt_info_len += ppi_size;
253
254 return ppi + 1;
255}
256
257/* Azure hosts don't support non-TCP port numbers in hashing for fragmented
258 * packets. We can use ethtool to change UDP hash level when necessary.
259 */
260static inline u32 netvsc_get_hash(
261 struct sk_buff *skb,
262 const struct net_device_context *ndc)
263{
264 struct flow_keys flow;
265 u32 hash, pkt_proto = 0;
266 static u32 hashrnd __read_mostly;
267
268 net_get_random_once(&hashrnd, sizeof(hashrnd));
269
270 if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
271 return 0;
272
273 switch (flow.basic.ip_proto) {
274 case IPPROTO_TCP:
275 if (flow.basic.n_proto == htons(ETH_P_IP))
276 pkt_proto = HV_TCP4_L4HASH;
277 else if (flow.basic.n_proto == htons(ETH_P_IPV6))
278 pkt_proto = HV_TCP6_L4HASH;
279
280 break;
281
282 case IPPROTO_UDP:
283 if (flow.basic.n_proto == htons(ETH_P_IP))
284 pkt_proto = HV_UDP4_L4HASH;
285 else if (flow.basic.n_proto == htons(ETH_P_IPV6))
286 pkt_proto = HV_UDP6_L4HASH;
287
288 break;
289 }
290
291 if (pkt_proto & ndc->l4_hash) {
292 return skb_get_hash(skb);
293 } else {
294 if (flow.basic.n_proto == htons(ETH_P_IP))
295 hash = jhash2((u32 *)&flow.addrs.v4addrs, 2, hashrnd);
296 else if (flow.basic.n_proto == htons(ETH_P_IPV6))
297 hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd);
298 else
299 hash = 0;
300
301 skb_set_hash(skb, hash, PKT_HASH_TYPE_L3);
302 }
303
304 return hash;
305}
306
307static inline int netvsc_get_tx_queue(struct net_device *ndev,
308 struct sk_buff *skb, int old_idx)
309{
310 const struct net_device_context *ndc = netdev_priv(ndev);
311 struct sock *sk = skb->sk;
312 int q_idx;
313
314 q_idx = ndc->tx_table[netvsc_get_hash(skb, ndc) &
315 (VRSS_SEND_TAB_SIZE - 1)];
316
317 /* If queue index changed record the new value */
318 if (q_idx != old_idx &&
319 sk && sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
320 sk_tx_queue_set(sk, q_idx);
321
322 return q_idx;
323}
324
325/*
326 * Select queue for transmit.
327 *
328 * If a valid queue has already been assigned, then use that.
329 * Otherwise compute tx queue based on hash and the send table.
330 *
331 * This is basically similar to default (__netdev_pick_tx) with the added step
332 * of using the host send_table when no other queue has been assigned.
333 *
334 * TODO support XPS - but get_xps_queue not exported
335 */
336static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb)
337{
338 int q_idx = sk_tx_queue_get(skb->sk);
339
340 if (q_idx < 0 || skb->ooo_okay || q_idx >= ndev->real_num_tx_queues) {
341 /* If forwarding a packet, we use the recorded queue when
342 * available for better cache locality.
343 */
344 if (skb_rx_queue_recorded(skb))
345 q_idx = skb_get_rx_queue(skb);
346 else
347 q_idx = netvsc_get_tx_queue(ndev, skb, q_idx);
348 }
349
350 return q_idx;
351}
352
353static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
354 struct net_device *sb_dev,
355 select_queue_fallback_t fallback)
356{
357 struct net_device_context *ndc = netdev_priv(ndev);
358 struct net_device *vf_netdev;
359 u16 txq;
360
361 rcu_read_lock();
362 vf_netdev = rcu_dereference(ndc->vf_netdev);
363 if (vf_netdev) {
364 const struct net_device_ops *vf_ops = vf_netdev->netdev_ops;
365
366 if (vf_ops->ndo_select_queue)
367 txq = vf_ops->ndo_select_queue(vf_netdev, skb,
368 sb_dev, fallback);
369 else
370 txq = fallback(vf_netdev, skb, NULL);
371
372 /* Record the queue selected by VF so that it can be
373 * used for common case where VF has more queues than
374 * the synthetic device.
375 */
376 qdisc_skb_cb(skb)->slave_dev_queue_mapping = txq;
377 } else {
378 txq = netvsc_pick_tx(ndev, skb);
379 }
380 rcu_read_unlock();
381
382 while (unlikely(txq >= ndev->real_num_tx_queues))
383 txq -= ndev->real_num_tx_queues;
384
385 return txq;
386}
387
388static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
389 struct hv_page_buffer *pb)
390{
391 int j = 0;
392
393 /* Deal with compound pages by ignoring unused part
394 * of the page.
395 */
396 page += (offset >> PAGE_SHIFT);
397 offset &= ~PAGE_MASK;
398
399 while (len > 0) {
400 unsigned long bytes;
401
402 bytes = PAGE_SIZE - offset;
403 if (bytes > len)
404 bytes = len;
405 pb[j].pfn = page_to_pfn(page);
406 pb[j].offset = offset;
407 pb[j].len = bytes;
408
409 offset += bytes;
410 len -= bytes;
411
412 if (offset == PAGE_SIZE && len) {
413 page++;
414 offset = 0;
415 j++;
416 }
417 }
418
419 return j + 1;
420}
421
422static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
423 struct hv_netvsc_packet *packet,
424 struct hv_page_buffer *pb)
425{
426 u32 slots_used = 0;
427 char *data = skb->data;
428 int frags = skb_shinfo(skb)->nr_frags;
429 int i;
430
431 /* The packet is laid out thus:
432 * 1. hdr: RNDIS header and PPI
433 * 2. skb linear data
434 * 3. skb fragment data
435 */
436 slots_used += fill_pg_buf(virt_to_page(hdr),
437 offset_in_page(hdr),
438 len, &pb[slots_used]);
439
440 packet->rmsg_size = len;
441 packet->rmsg_pgcnt = slots_used;
442
443 slots_used += fill_pg_buf(virt_to_page(data),
444 offset_in_page(data),
445 skb_headlen(skb), &pb[slots_used]);
446
447 for (i = 0; i < frags; i++) {
448 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
449
450 slots_used += fill_pg_buf(skb_frag_page(frag),
451 frag->page_offset,
452 skb_frag_size(frag), &pb[slots_used]);
453 }
454 return slots_used;
455}
456
457static int count_skb_frag_slots(struct sk_buff *skb)
458{
459 int i, frags = skb_shinfo(skb)->nr_frags;
460 int pages = 0;
461
462 for (i = 0; i < frags; i++) {
463 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
464 unsigned long size = skb_frag_size(frag);
465 unsigned long offset = frag->page_offset;
466
467 /* Skip unused frames from start of page */
468 offset &= ~PAGE_MASK;
469 pages += PFN_UP(offset + size);
470 }
471 return pages;
472}
473
474static int netvsc_get_slots(struct sk_buff *skb)
475{
476 char *data = skb->data;
477 unsigned int offset = offset_in_page(data);
478 unsigned int len = skb_headlen(skb);
479 int slots;
480 int frag_slots;
481
482 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
483 frag_slots = count_skb_frag_slots(skb);
484 return slots + frag_slots;
485}
486
487static u32 net_checksum_info(struct sk_buff *skb)
488{
489 if (skb->protocol == htons(ETH_P_IP)) {
490 struct iphdr *ip = ip_hdr(skb);
491
492 if (ip->protocol == IPPROTO_TCP)
493 return TRANSPORT_INFO_IPV4_TCP;
494 else if (ip->protocol == IPPROTO_UDP)
495 return TRANSPORT_INFO_IPV4_UDP;
496 } else {
497 struct ipv6hdr *ip6 = ipv6_hdr(skb);
498
499 if (ip6->nexthdr == IPPROTO_TCP)
500 return TRANSPORT_INFO_IPV6_TCP;
501 else if (ip6->nexthdr == IPPROTO_UDP)
502 return TRANSPORT_INFO_IPV6_UDP;
503 }
504
505 return TRANSPORT_INFO_NOT_IP;
506}
507
508/* Send skb on the slave VF device. */
509static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev,
510 struct sk_buff *skb)
511{
512 struct net_device_context *ndev_ctx = netdev_priv(net);
513 unsigned int len = skb->len;
514 int rc;
515
516 skb->dev = vf_netdev;
517 skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping;
518
519 rc = dev_queue_xmit(skb);
520 if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) {
521 struct netvsc_vf_pcpu_stats *pcpu_stats
522 = this_cpu_ptr(ndev_ctx->vf_stats);
523
524 u64_stats_update_begin(&pcpu_stats->syncp);
525 pcpu_stats->tx_packets++;
526 pcpu_stats->tx_bytes += len;
527 u64_stats_update_end(&pcpu_stats->syncp);
528 } else {
529 this_cpu_inc(ndev_ctx->vf_stats->tx_dropped);
530 }
531
532 return rc;
533}
534
535static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
536{
537 struct net_device_context *net_device_ctx = netdev_priv(net);
538 struct hv_netvsc_packet *packet = NULL;
539 int ret;
540 unsigned int num_data_pgs;
541 struct rndis_message *rndis_msg;
542 struct net_device *vf_netdev;
543 u32 rndis_msg_size;
544 u32 hash;
545 struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT];
546
547 /* if VF is present and up then redirect packets
548 * already called with rcu_read_lock_bh
549 */
550 vf_netdev = rcu_dereference_bh(net_device_ctx->vf_netdev);
551 if (vf_netdev && netif_running(vf_netdev) &&
552 !netpoll_tx_running(net))
553 return netvsc_vf_xmit(net, vf_netdev, skb);
554
555 /* We will atmost need two pages to describe the rndis
556 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
557 * of pages in a single packet. If skb is scattered around
558 * more pages we try linearizing it.
559 */
560
561 num_data_pgs = netvsc_get_slots(skb) + 2;
562
563 if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
564 ++net_device_ctx->eth_stats.tx_scattered;
565
566 if (skb_linearize(skb))
567 goto no_memory;
568
569 num_data_pgs = netvsc_get_slots(skb) + 2;
570 if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
571 ++net_device_ctx->eth_stats.tx_too_big;
572 goto drop;
573 }
574 }
575
576 /*
577 * Place the rndis header in the skb head room and
578 * the skb->cb will be used for hv_netvsc_packet
579 * structure.
580 */
581 ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE);
582 if (ret)
583 goto no_memory;
584
585 /* Use the skb control buffer for building up the packet */
586 BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) >
587 FIELD_SIZEOF(struct sk_buff, cb));
588 packet = (struct hv_netvsc_packet *)skb->cb;
589
590 packet->q_idx = skb_get_queue_mapping(skb);
591
592 packet->total_data_buflen = skb->len;
593 packet->total_bytes = skb->len;
594 packet->total_packets = 1;
595
596 rndis_msg = (struct rndis_message *)skb->head;
597
598 /* Add the rndis header */
599 rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
600 rndis_msg->msg_len = packet->total_data_buflen;
601
602 rndis_msg->msg.pkt = (struct rndis_packet) {
603 .data_offset = sizeof(struct rndis_packet),
604 .data_len = packet->total_data_buflen,
605 .per_pkt_info_offset = sizeof(struct rndis_packet),
606 };
607
608 rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
609
610 hash = skb_get_hash_raw(skb);
611 if (hash != 0 && net->real_num_tx_queues > 1) {
612 u32 *hash_info;
613
614 rndis_msg_size += NDIS_HASH_PPI_SIZE;
615 hash_info = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
616 NBL_HASH_VALUE);
617 *hash_info = hash;
618 }
619
620 if (skb_vlan_tag_present(skb)) {
621 struct ndis_pkt_8021q_info *vlan;
622
623 rndis_msg_size += NDIS_VLAN_PPI_SIZE;
624 vlan = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
625 IEEE_8021Q_INFO);
626
627 vlan->value = 0;
628 vlan->vlanid = skb_vlan_tag_get_id(skb);
629 vlan->cfi = skb_vlan_tag_get_cfi(skb);
630 vlan->pri = skb_vlan_tag_get_prio(skb);
631 }
632
633 if (skb_is_gso(skb)) {
634 struct ndis_tcp_lso_info *lso_info;
635
636 rndis_msg_size += NDIS_LSO_PPI_SIZE;
637 lso_info = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
638 TCP_LARGESEND_PKTINFO);
639
640 lso_info->value = 0;
641 lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
642 if (skb->protocol == htons(ETH_P_IP)) {
643 lso_info->lso_v2_transmit.ip_version =
644 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
645 ip_hdr(skb)->tot_len = 0;
646 ip_hdr(skb)->check = 0;
647 tcp_hdr(skb)->check =
648 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
649 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
650 } else {
651 lso_info->lso_v2_transmit.ip_version =
652 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
653 ipv6_hdr(skb)->payload_len = 0;
654 tcp_hdr(skb)->check =
655 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
656 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
657 }
658 lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb);
659 lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
660 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
661 if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) {
662 struct ndis_tcp_ip_checksum_info *csum_info;
663
664 rndis_msg_size += NDIS_CSUM_PPI_SIZE;
665 csum_info = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
666 TCPIP_CHKSUM_PKTINFO);
667
668 csum_info->value = 0;
669 csum_info->transmit.tcp_header_offset = skb_transport_offset(skb);
670
671 if (skb->protocol == htons(ETH_P_IP)) {
672 csum_info->transmit.is_ipv4 = 1;
673
674 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
675 csum_info->transmit.tcp_checksum = 1;
676 else
677 csum_info->transmit.udp_checksum = 1;
678 } else {
679 csum_info->transmit.is_ipv6 = 1;
680
681 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
682 csum_info->transmit.tcp_checksum = 1;
683 else
684 csum_info->transmit.udp_checksum = 1;
685 }
686 } else {
687 /* Can't do offload of this type of checksum */
688 if (skb_checksum_help(skb))
689 goto drop;
690 }
691 }
692
693 /* Start filling in the page buffers with the rndis hdr */
694 rndis_msg->msg_len += rndis_msg_size;
695 packet->total_data_buflen = rndis_msg->msg_len;
696 packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
697 skb, packet, pb);
698
699 /* timestamp packet in software */
700 skb_tx_timestamp(skb);
701
702 ret = netvsc_send(net, packet, rndis_msg, pb, skb);
703 if (likely(ret == 0))
704 return NETDEV_TX_OK;
705
706 if (ret == -EAGAIN) {
707 ++net_device_ctx->eth_stats.tx_busy;
708 return NETDEV_TX_BUSY;
709 }
710
711 if (ret == -ENOSPC)
712 ++net_device_ctx->eth_stats.tx_no_space;
713
714drop:
715 dev_kfree_skb_any(skb);
716 net->stats.tx_dropped++;
717
718 return NETDEV_TX_OK;
719
720no_memory:
721 ++net_device_ctx->eth_stats.tx_no_memory;
722 goto drop;
723}
724
725/*
726 * netvsc_linkstatus_callback - Link up/down notification
727 */
728void netvsc_linkstatus_callback(struct net_device *net,
729 struct rndis_message *resp)
730{
731 struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
732 struct net_device_context *ndev_ctx = netdev_priv(net);
733 struct netvsc_reconfig *event;
734 unsigned long flags;
735
736 /* Update the physical link speed when changing to another vSwitch */
737 if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) {
738 u32 speed;
739
740 speed = *(u32 *)((void *)indicate
741 + indicate->status_buf_offset) / 10000;
742 ndev_ctx->speed = speed;
743 return;
744 }
745
746 /* Handle these link change statuses below */
747 if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE &&
748 indicate->status != RNDIS_STATUS_MEDIA_CONNECT &&
749 indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT)
750 return;
751
752 if (net->reg_state != NETREG_REGISTERED)
753 return;
754
755 event = kzalloc(sizeof(*event), GFP_ATOMIC);
756 if (!event)
757 return;
758 event->event = indicate->status;
759
760 spin_lock_irqsave(&ndev_ctx->lock, flags);
761 list_add_tail(&event->list, &ndev_ctx->reconfig_events);
762 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
763
764 schedule_delayed_work(&ndev_ctx->dwork, 0);
765}
766
767static void netvsc_comp_ipcsum(struct sk_buff *skb)
768{
769 struct iphdr *iph = (struct iphdr *)skb->data;
770
771 iph->check = 0;
772 iph->check = ip_fast_csum(iph, iph->ihl);
773}
774
775static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
776 struct netvsc_channel *nvchan)
777{
778 struct napi_struct *napi = &nvchan->napi;
779 const struct ndis_pkt_8021q_info *vlan = nvchan->rsc.vlan;
780 const struct ndis_tcp_ip_checksum_info *csum_info =
781 nvchan->rsc.csum_info;
782 struct sk_buff *skb;
783 int i;
784
785 skb = napi_alloc_skb(napi, nvchan->rsc.pktlen);
786 if (!skb)
787 return skb;
788
789 /*
790 * Copy to skb. This copy is needed here since the memory pointed by
791 * hv_netvsc_packet cannot be deallocated
792 */
793 for (i = 0; i < nvchan->rsc.cnt; i++)
794 skb_put_data(skb, nvchan->rsc.data[i], nvchan->rsc.len[i]);
795
796 skb->protocol = eth_type_trans(skb, net);
797
798 /* skb is already created with CHECKSUM_NONE */
799 skb_checksum_none_assert(skb);
800
801 /* Incoming packets may have IP header checksum verified by the host.
802 * They may not have IP header checksum computed after coalescing.
803 * We compute it here if the flags are set, because on Linux, the IP
804 * checksum is always checked.
805 */
806 if (csum_info && csum_info->receive.ip_checksum_value_invalid &&
807 csum_info->receive.ip_checksum_succeeded &&
808 skb->protocol == htons(ETH_P_IP))
809 netvsc_comp_ipcsum(skb);
810
811 /* Do L4 checksum offload if enabled and present.
812 */
813 if (csum_info && (net->features & NETIF_F_RXCSUM)) {
814 if (csum_info->receive.tcp_checksum_succeeded ||
815 csum_info->receive.udp_checksum_succeeded)
816 skb->ip_summed = CHECKSUM_UNNECESSARY;
817 }
818
819 if (vlan) {
820 u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT) |
821 (vlan->cfi ? VLAN_CFI_MASK : 0);
822
823 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
824 vlan_tci);
825 }
826
827 return skb;
828}
829
830/*
831 * netvsc_recv_callback - Callback when we receive a packet from the
832 * "wire" on the specified device.
833 */
834int netvsc_recv_callback(struct net_device *net,
835 struct netvsc_device *net_device,
836 struct netvsc_channel *nvchan)
837{
838 struct net_device_context *net_device_ctx = netdev_priv(net);
839 struct vmbus_channel *channel = nvchan->channel;
840 u16 q_idx = channel->offermsg.offer.sub_channel_index;
841 struct sk_buff *skb;
842 struct netvsc_stats *rx_stats;
843
844 if (net->reg_state != NETREG_REGISTERED)
845 return NVSP_STAT_FAIL;
846
847 /* Allocate a skb - TODO direct I/O to pages? */
848 skb = netvsc_alloc_recv_skb(net, nvchan);
849
850 if (unlikely(!skb)) {
851 ++net_device_ctx->eth_stats.rx_no_memory;
852 rcu_read_unlock();
853 return NVSP_STAT_FAIL;
854 }
855
856 skb_record_rx_queue(skb, q_idx);
857
858 /*
859 * Even if injecting the packet, record the statistics
860 * on the synthetic device because modifying the VF device
861 * statistics will not work correctly.
862 */
863 rx_stats = &nvchan->rx_stats;
864 u64_stats_update_begin(&rx_stats->syncp);
865 rx_stats->packets++;
866 rx_stats->bytes += nvchan->rsc.pktlen;
867
868 if (skb->pkt_type == PACKET_BROADCAST)
869 ++rx_stats->broadcast;
870 else if (skb->pkt_type == PACKET_MULTICAST)
871 ++rx_stats->multicast;
872 u64_stats_update_end(&rx_stats->syncp);
873
874 napi_gro_receive(&nvchan->napi, skb);
875 return NVSP_STAT_SUCCESS;
876}
877
878static void netvsc_get_drvinfo(struct net_device *net,
879 struct ethtool_drvinfo *info)
880{
881 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
882 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
883}
884
885static void netvsc_get_channels(struct net_device *net,
886 struct ethtool_channels *channel)
887{
888 struct net_device_context *net_device_ctx = netdev_priv(net);
889 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
890
891 if (nvdev) {
892 channel->max_combined = nvdev->max_chn;
893 channel->combined_count = nvdev->num_chn;
894 }
895}
896
897/* Alloc struct netvsc_device_info, and initialize it from either existing
898 * struct netvsc_device, or from default values.
899 */
900static struct netvsc_device_info *netvsc_devinfo_get
901 (struct netvsc_device *nvdev)
902{
903 struct netvsc_device_info *dev_info;
904
905 dev_info = kzalloc(sizeof(*dev_info), GFP_ATOMIC);
906
907 if (!dev_info)
908 return NULL;
909
910 if (nvdev) {
911 dev_info->num_chn = nvdev->num_chn;
912 dev_info->send_sections = nvdev->send_section_cnt;
913 dev_info->send_section_size = nvdev->send_section_size;
914 dev_info->recv_sections = nvdev->recv_section_cnt;
915 dev_info->recv_section_size = nvdev->recv_section_size;
916
917 memcpy(dev_info->rss_key, nvdev->extension->rss_key,
918 NETVSC_HASH_KEYLEN);
919 } else {
920 dev_info->num_chn = VRSS_CHANNEL_DEFAULT;
921 dev_info->send_sections = NETVSC_DEFAULT_TX;
922 dev_info->send_section_size = NETVSC_SEND_SECTION_SIZE;
923 dev_info->recv_sections = NETVSC_DEFAULT_RX;
924 dev_info->recv_section_size = NETVSC_RECV_SECTION_SIZE;
925 }
926
927 return dev_info;
928}
929
930static int netvsc_detach(struct net_device *ndev,
931 struct netvsc_device *nvdev)
932{
933 struct net_device_context *ndev_ctx = netdev_priv(ndev);
934 struct hv_device *hdev = ndev_ctx->device_ctx;
935 int ret;
936
937 /* Don't try continuing to try and setup sub channels */
938 if (cancel_work_sync(&nvdev->subchan_work))
939 nvdev->num_chn = 1;
940
941 /* If device was up (receiving) then shutdown */
942 if (netif_running(ndev)) {
943 netvsc_tx_disable(nvdev, ndev);
944
945 ret = rndis_filter_close(nvdev);
946 if (ret) {
947 netdev_err(ndev,
948 "unable to close device (ret %d).\n", ret);
949 return ret;
950 }
951
952 ret = netvsc_wait_until_empty(nvdev);
953 if (ret) {
954 netdev_err(ndev,
955 "Ring buffer not empty after closing rndis\n");
956 return ret;
957 }
958 }
959
960 netif_device_detach(ndev);
961
962 rndis_filter_device_remove(hdev, nvdev);
963
964 return 0;
965}
966
967static int netvsc_attach(struct net_device *ndev,
968 struct netvsc_device_info *dev_info)
969{
970 struct net_device_context *ndev_ctx = netdev_priv(ndev);
971 struct hv_device *hdev = ndev_ctx->device_ctx;
972 struct netvsc_device *nvdev;
973 struct rndis_device *rdev;
974 int ret;
975
976 nvdev = rndis_filter_device_add(hdev, dev_info);
977 if (IS_ERR(nvdev))
978 return PTR_ERR(nvdev);
979
980 if (nvdev->num_chn > 1) {
981 ret = rndis_set_subchannel(ndev, nvdev, dev_info);
982
983 /* if unavailable, just proceed with one queue */
984 if (ret) {
985 nvdev->max_chn = 1;
986 nvdev->num_chn = 1;
987 }
988 }
989
990 /* In any case device is now ready */
991 netif_device_attach(ndev);
992
993 /* Note: enable and attach happen when sub-channels setup */
994 netif_carrier_off(ndev);
995
996 if (netif_running(ndev)) {
997 ret = rndis_filter_open(nvdev);
998 if (ret)
999 return ret;
1000
1001 rdev = nvdev->extension;
1002 if (!rdev->link_state)
1003 netif_carrier_on(ndev);
1004 }
1005
1006 return 0;
1007}
1008
1009static int netvsc_set_channels(struct net_device *net,
1010 struct ethtool_channels *channels)
1011{
1012 struct net_device_context *net_device_ctx = netdev_priv(net);
1013 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
1014 unsigned int orig, count = channels->combined_count;
1015 struct netvsc_device_info *device_info;
1016 int ret;
1017
1018 /* We do not support separate count for rx, tx, or other */
1019 if (count == 0 ||
1020 channels->rx_count || channels->tx_count || channels->other_count)
1021 return -EINVAL;
1022
1023 if (!nvdev || nvdev->destroy)
1024 return -ENODEV;
1025
1026 if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5)
1027 return -EINVAL;
1028
1029 if (count > nvdev->max_chn)
1030 return -EINVAL;
1031
1032 orig = nvdev->num_chn;
1033
1034 device_info = netvsc_devinfo_get(nvdev);
1035
1036 if (!device_info)
1037 return -ENOMEM;
1038
1039 device_info->num_chn = count;
1040
1041 ret = netvsc_detach(net, nvdev);
1042 if (ret)
1043 goto out;
1044
1045 ret = netvsc_attach(net, device_info);
1046 if (ret) {
1047 device_info->num_chn = orig;
1048 if (netvsc_attach(net, device_info))
1049 netdev_err(net, "restoring channel setting failed\n");
1050 }
1051
1052out:
1053 kfree(device_info);
1054 return ret;
1055}
1056
1057static bool
1058netvsc_validate_ethtool_ss_cmd(const struct ethtool_link_ksettings *cmd)
1059{
1060 struct ethtool_link_ksettings diff1 = *cmd;
1061 struct ethtool_link_ksettings diff2 = {};
1062
1063 diff1.base.speed = 0;
1064 diff1.base.duplex = 0;
1065 /* advertising and cmd are usually set */
1066 ethtool_link_ksettings_zero_link_mode(&diff1, advertising);
1067 diff1.base.cmd = 0;
1068 /* We set port to PORT_OTHER */
1069 diff2.base.port = PORT_OTHER;
1070
1071 return !memcmp(&diff1, &diff2, sizeof(diff1));
1072}
1073
1074static void netvsc_init_settings(struct net_device *dev)
1075{
1076 struct net_device_context *ndc = netdev_priv(dev);
1077
1078 ndc->l4_hash = HV_DEFAULT_L4HASH;
1079
1080 ndc->speed = SPEED_UNKNOWN;
1081 ndc->duplex = DUPLEX_FULL;
1082
1083 dev->features = NETIF_F_LRO;
1084}
1085
1086static int netvsc_get_link_ksettings(struct net_device *dev,
1087 struct ethtool_link_ksettings *cmd)
1088{
1089 struct net_device_context *ndc = netdev_priv(dev);
1090
1091 cmd->base.speed = ndc->speed;
1092 cmd->base.duplex = ndc->duplex;
1093 cmd->base.port = PORT_OTHER;
1094
1095 return 0;
1096}
1097
1098static int netvsc_set_link_ksettings(struct net_device *dev,
1099 const struct ethtool_link_ksettings *cmd)
1100{
1101 struct net_device_context *ndc = netdev_priv(dev);
1102 u32 speed;
1103
1104 speed = cmd->base.speed;
1105 if (!ethtool_validate_speed(speed) ||
1106 !ethtool_validate_duplex(cmd->base.duplex) ||
1107 !netvsc_validate_ethtool_ss_cmd(cmd))
1108 return -EINVAL;
1109
1110 ndc->speed = speed;
1111 ndc->duplex = cmd->base.duplex;
1112
1113 return 0;
1114}
1115
1116static int netvsc_change_mtu(struct net_device *ndev, int mtu)
1117{
1118 struct net_device_context *ndevctx = netdev_priv(ndev);
1119 struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
1120 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1121 int orig_mtu = ndev->mtu;
1122 struct netvsc_device_info *device_info;
1123 int ret = 0;
1124
1125 if (!nvdev || nvdev->destroy)
1126 return -ENODEV;
1127
1128 device_info = netvsc_devinfo_get(nvdev);
1129
1130 if (!device_info)
1131 return -ENOMEM;
1132
1133 /* Change MTU of underlying VF netdev first. */
1134 if (vf_netdev) {
1135 ret = dev_set_mtu(vf_netdev, mtu);
1136 if (ret)
1137 goto out;
1138 }
1139
1140 ret = netvsc_detach(ndev, nvdev);
1141 if (ret)
1142 goto rollback_vf;
1143
1144 ndev->mtu = mtu;
1145
1146 ret = netvsc_attach(ndev, device_info);
1147 if (!ret)
1148 goto out;
1149
1150 /* Attempt rollback to original MTU */
1151 ndev->mtu = orig_mtu;
1152
1153 if (netvsc_attach(ndev, device_info))
1154 netdev_err(ndev, "restoring mtu failed\n");
1155rollback_vf:
1156 if (vf_netdev)
1157 dev_set_mtu(vf_netdev, orig_mtu);
1158
1159out:
1160 kfree(device_info);
1161 return ret;
1162}
1163
1164static void netvsc_get_vf_stats(struct net_device *net,
1165 struct netvsc_vf_pcpu_stats *tot)
1166{
1167 struct net_device_context *ndev_ctx = netdev_priv(net);
1168 int i;
1169
1170 memset(tot, 0, sizeof(*tot));
1171
1172 for_each_possible_cpu(i) {
1173 const struct netvsc_vf_pcpu_stats *stats
1174 = per_cpu_ptr(ndev_ctx->vf_stats, i);
1175 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1176 unsigned int start;
1177
1178 do {
1179 start = u64_stats_fetch_begin_irq(&stats->syncp);
1180 rx_packets = stats->rx_packets;
1181 tx_packets = stats->tx_packets;
1182 rx_bytes = stats->rx_bytes;
1183 tx_bytes = stats->tx_bytes;
1184 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1185
1186 tot->rx_packets += rx_packets;
1187 tot->tx_packets += tx_packets;
1188 tot->rx_bytes += rx_bytes;
1189 tot->tx_bytes += tx_bytes;
1190 tot->tx_dropped += stats->tx_dropped;
1191 }
1192}
1193
1194static void netvsc_get_pcpu_stats(struct net_device *net,
1195 struct netvsc_ethtool_pcpu_stats *pcpu_tot)
1196{
1197 struct net_device_context *ndev_ctx = netdev_priv(net);
1198 struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev);
1199 int i;
1200
1201 /* fetch percpu stats of vf */
1202 for_each_possible_cpu(i) {
1203 const struct netvsc_vf_pcpu_stats *stats =
1204 per_cpu_ptr(ndev_ctx->vf_stats, i);
1205 struct netvsc_ethtool_pcpu_stats *this_tot = &pcpu_tot[i];
1206 unsigned int start;
1207
1208 do {
1209 start = u64_stats_fetch_begin_irq(&stats->syncp);
1210 this_tot->vf_rx_packets = stats->rx_packets;
1211 this_tot->vf_tx_packets = stats->tx_packets;
1212 this_tot->vf_rx_bytes = stats->rx_bytes;
1213 this_tot->vf_tx_bytes = stats->tx_bytes;
1214 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1215 this_tot->rx_packets = this_tot->vf_rx_packets;
1216 this_tot->tx_packets = this_tot->vf_tx_packets;
1217 this_tot->rx_bytes = this_tot->vf_rx_bytes;
1218 this_tot->tx_bytes = this_tot->vf_tx_bytes;
1219 }
1220
1221 /* fetch percpu stats of netvsc */
1222 for (i = 0; i < nvdev->num_chn; i++) {
1223 const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
1224 const struct netvsc_stats *stats;
1225 struct netvsc_ethtool_pcpu_stats *this_tot =
1226 &pcpu_tot[nvchan->channel->target_cpu];
1227 u64 packets, bytes;
1228 unsigned int start;
1229
1230 stats = &nvchan->tx_stats;
1231 do {
1232 start = u64_stats_fetch_begin_irq(&stats->syncp);
1233 packets = stats->packets;
1234 bytes = stats->bytes;
1235 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1236
1237 this_tot->tx_bytes += bytes;
1238 this_tot->tx_packets += packets;
1239
1240 stats = &nvchan->rx_stats;
1241 do {
1242 start = u64_stats_fetch_begin_irq(&stats->syncp);
1243 packets = stats->packets;
1244 bytes = stats->bytes;
1245 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1246
1247 this_tot->rx_bytes += bytes;
1248 this_tot->rx_packets += packets;
1249 }
1250}
1251
1252static void netvsc_get_stats64(struct net_device *net,
1253 struct rtnl_link_stats64 *t)
1254{
1255 struct net_device_context *ndev_ctx = netdev_priv(net);
1256 struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev);
1257 struct netvsc_vf_pcpu_stats vf_tot;
1258 int i;
1259
1260 if (!nvdev)
1261 return;
1262
1263 netdev_stats_to_stats64(t, &net->stats);
1264
1265 netvsc_get_vf_stats(net, &vf_tot);
1266 t->rx_packets += vf_tot.rx_packets;
1267 t->tx_packets += vf_tot.tx_packets;
1268 t->rx_bytes += vf_tot.rx_bytes;
1269 t->tx_bytes += vf_tot.tx_bytes;
1270 t->tx_dropped += vf_tot.tx_dropped;
1271
1272 for (i = 0; i < nvdev->num_chn; i++) {
1273 const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
1274 const struct netvsc_stats *stats;
1275 u64 packets, bytes, multicast;
1276 unsigned int start;
1277
1278 stats = &nvchan->tx_stats;
1279 do {
1280 start = u64_stats_fetch_begin_irq(&stats->syncp);
1281 packets = stats->packets;
1282 bytes = stats->bytes;
1283 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1284
1285 t->tx_bytes += bytes;
1286 t->tx_packets += packets;
1287
1288 stats = &nvchan->rx_stats;
1289 do {
1290 start = u64_stats_fetch_begin_irq(&stats->syncp);
1291 packets = stats->packets;
1292 bytes = stats->bytes;
1293 multicast = stats->multicast + stats->broadcast;
1294 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1295
1296 t->rx_bytes += bytes;
1297 t->rx_packets += packets;
1298 t->multicast += multicast;
1299 }
1300}
1301
1302static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
1303{
1304 struct net_device_context *ndc = netdev_priv(ndev);
1305 struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev);
1306 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1307 struct sockaddr *addr = p;
1308 int err;
1309
1310 err = eth_prepare_mac_addr_change(ndev, p);
1311 if (err)
1312 return err;
1313
1314 if (!nvdev)
1315 return -ENODEV;
1316
1317 if (vf_netdev) {
1318 err = dev_set_mac_address(vf_netdev, addr, NULL);
1319 if (err)
1320 return err;
1321 }
1322
1323 err = rndis_filter_set_device_mac(nvdev, addr->sa_data);
1324 if (!err) {
1325 eth_commit_mac_addr_change(ndev, p);
1326 } else if (vf_netdev) {
1327 /* rollback change on VF */
1328 memcpy(addr->sa_data, ndev->dev_addr, ETH_ALEN);
1329 dev_set_mac_address(vf_netdev, addr, NULL);
1330 }
1331
1332 return err;
1333}
1334
1335static const struct {
1336 char name[ETH_GSTRING_LEN];
1337 u16 offset;
1338} netvsc_stats[] = {
1339 { "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) },
1340 { "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) },
1341 { "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) },
1342 { "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) },
1343 { "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) },
1344 { "tx_send_full", offsetof(struct netvsc_ethtool_stats, tx_send_full) },
1345 { "rx_comp_busy", offsetof(struct netvsc_ethtool_stats, rx_comp_busy) },
1346 { "rx_no_memory", offsetof(struct netvsc_ethtool_stats, rx_no_memory) },
1347 { "stop_queue", offsetof(struct netvsc_ethtool_stats, stop_queue) },
1348 { "wake_queue", offsetof(struct netvsc_ethtool_stats, wake_queue) },
1349}, pcpu_stats[] = {
1350 { "cpu%u_rx_packets",
1351 offsetof(struct netvsc_ethtool_pcpu_stats, rx_packets) },
1352 { "cpu%u_rx_bytes",
1353 offsetof(struct netvsc_ethtool_pcpu_stats, rx_bytes) },
1354 { "cpu%u_tx_packets",
1355 offsetof(struct netvsc_ethtool_pcpu_stats, tx_packets) },
1356 { "cpu%u_tx_bytes",
1357 offsetof(struct netvsc_ethtool_pcpu_stats, tx_bytes) },
1358 { "cpu%u_vf_rx_packets",
1359 offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_packets) },
1360 { "cpu%u_vf_rx_bytes",
1361 offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_bytes) },
1362 { "cpu%u_vf_tx_packets",
1363 offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_packets) },
1364 { "cpu%u_vf_tx_bytes",
1365 offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_bytes) },
1366}, vf_stats[] = {
1367 { "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats, rx_packets) },
1368 { "vf_rx_bytes", offsetof(struct netvsc_vf_pcpu_stats, rx_bytes) },
1369 { "vf_tx_packets", offsetof(struct netvsc_vf_pcpu_stats, tx_packets) },
1370 { "vf_tx_bytes", offsetof(struct netvsc_vf_pcpu_stats, tx_bytes) },
1371 { "vf_tx_dropped", offsetof(struct netvsc_vf_pcpu_stats, tx_dropped) },
1372};
1373
1374#define NETVSC_GLOBAL_STATS_LEN ARRAY_SIZE(netvsc_stats)
1375#define NETVSC_VF_STATS_LEN ARRAY_SIZE(vf_stats)
1376
1377/* statistics per queue (rx/tx packets/bytes) */
1378#define NETVSC_PCPU_STATS_LEN (num_present_cpus() * ARRAY_SIZE(pcpu_stats))
1379
1380/* 4 statistics per queue (rx/tx packets/bytes) */
1381#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 4)
1382
1383static int netvsc_get_sset_count(struct net_device *dev, int string_set)
1384{
1385 struct net_device_context *ndc = netdev_priv(dev);
1386 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1387
1388 if (!nvdev)
1389 return -ENODEV;
1390
1391 switch (string_set) {
1392 case ETH_SS_STATS:
1393 return NETVSC_GLOBAL_STATS_LEN
1394 + NETVSC_VF_STATS_LEN
1395 + NETVSC_QUEUE_STATS_LEN(nvdev)
1396 + NETVSC_PCPU_STATS_LEN;
1397 default:
1398 return -EINVAL;
1399 }
1400}
1401
1402static void netvsc_get_ethtool_stats(struct net_device *dev,
1403 struct ethtool_stats *stats, u64 *data)
1404{
1405 struct net_device_context *ndc = netdev_priv(dev);
1406 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1407 const void *nds = &ndc->eth_stats;
1408 const struct netvsc_stats *qstats;
1409 struct netvsc_vf_pcpu_stats sum;
1410 struct netvsc_ethtool_pcpu_stats *pcpu_sum;
1411 unsigned int start;
1412 u64 packets, bytes;
1413 int i, j, cpu;
1414
1415 if (!nvdev)
1416 return;
1417
1418 for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++)
1419 data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset);
1420
1421 netvsc_get_vf_stats(dev, &sum);
1422 for (j = 0; j < NETVSC_VF_STATS_LEN; j++)
1423 data[i++] = *(u64 *)((void *)&sum + vf_stats[j].offset);
1424
1425 for (j = 0; j < nvdev->num_chn; j++) {
1426 qstats = &nvdev->chan_table[j].tx_stats;
1427
1428 do {
1429 start = u64_stats_fetch_begin_irq(&qstats->syncp);
1430 packets = qstats->packets;
1431 bytes = qstats->bytes;
1432 } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
1433 data[i++] = packets;
1434 data[i++] = bytes;
1435
1436 qstats = &nvdev->chan_table[j].rx_stats;
1437 do {
1438 start = u64_stats_fetch_begin_irq(&qstats->syncp);
1439 packets = qstats->packets;
1440 bytes = qstats->bytes;
1441 } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
1442 data[i++] = packets;
1443 data[i++] = bytes;
1444 }
1445
1446 pcpu_sum = kvmalloc_array(num_possible_cpus(),
1447 sizeof(struct netvsc_ethtool_pcpu_stats),
1448 GFP_KERNEL);
1449 netvsc_get_pcpu_stats(dev, pcpu_sum);
1450 for_each_present_cpu(cpu) {
1451 struct netvsc_ethtool_pcpu_stats *this_sum = &pcpu_sum[cpu];
1452
1453 for (j = 0; j < ARRAY_SIZE(pcpu_stats); j++)
1454 data[i++] = *(u64 *)((void *)this_sum
1455 + pcpu_stats[j].offset);
1456 }
1457 kvfree(pcpu_sum);
1458}
1459
1460static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1461{
1462 struct net_device_context *ndc = netdev_priv(dev);
1463 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1464 u8 *p = data;
1465 int i, cpu;
1466
1467 if (!nvdev)
1468 return;
1469
1470 switch (stringset) {
1471 case ETH_SS_STATS:
1472 for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) {
1473 memcpy(p, netvsc_stats[i].name, ETH_GSTRING_LEN);
1474 p += ETH_GSTRING_LEN;
1475 }
1476
1477 for (i = 0; i < ARRAY_SIZE(vf_stats); i++) {
1478 memcpy(p, vf_stats[i].name, ETH_GSTRING_LEN);
1479 p += ETH_GSTRING_LEN;
1480 }
1481
1482 for (i = 0; i < nvdev->num_chn; i++) {
1483 sprintf(p, "tx_queue_%u_packets", i);
1484 p += ETH_GSTRING_LEN;
1485 sprintf(p, "tx_queue_%u_bytes", i);
1486 p += ETH_GSTRING_LEN;
1487 sprintf(p, "rx_queue_%u_packets", i);
1488 p += ETH_GSTRING_LEN;
1489 sprintf(p, "rx_queue_%u_bytes", i);
1490 p += ETH_GSTRING_LEN;
1491 }
1492
1493 for_each_present_cpu(cpu) {
1494 for (i = 0; i < ARRAY_SIZE(pcpu_stats); i++) {
1495 sprintf(p, pcpu_stats[i].name, cpu);
1496 p += ETH_GSTRING_LEN;
1497 }
1498 }
1499
1500 break;
1501 }
1502}
1503
1504static int
1505netvsc_get_rss_hash_opts(struct net_device_context *ndc,
1506 struct ethtool_rxnfc *info)
1507{
1508 const u32 l4_flag = RXH_L4_B_0_1 | RXH_L4_B_2_3;
1509
1510 info->data = RXH_IP_SRC | RXH_IP_DST;
1511
1512 switch (info->flow_type) {
1513 case TCP_V4_FLOW:
1514 if (ndc->l4_hash & HV_TCP4_L4HASH)
1515 info->data |= l4_flag;
1516
1517 break;
1518
1519 case TCP_V6_FLOW:
1520 if (ndc->l4_hash & HV_TCP6_L4HASH)
1521 info->data |= l4_flag;
1522
1523 break;
1524
1525 case UDP_V4_FLOW:
1526 if (ndc->l4_hash & HV_UDP4_L4HASH)
1527 info->data |= l4_flag;
1528
1529 break;
1530
1531 case UDP_V6_FLOW:
1532 if (ndc->l4_hash & HV_UDP6_L4HASH)
1533 info->data |= l4_flag;
1534
1535 break;
1536
1537 case IPV4_FLOW:
1538 case IPV6_FLOW:
1539 break;
1540 default:
1541 info->data = 0;
1542 break;
1543 }
1544
1545 return 0;
1546}
1547
1548static int
1549netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1550 u32 *rules)
1551{
1552 struct net_device_context *ndc = netdev_priv(dev);
1553 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1554
1555 if (!nvdev)
1556 return -ENODEV;
1557
1558 switch (info->cmd) {
1559 case ETHTOOL_GRXRINGS:
1560 info->data = nvdev->num_chn;
1561 return 0;
1562
1563 case ETHTOOL_GRXFH:
1564 return netvsc_get_rss_hash_opts(ndc, info);
1565 }
1566 return -EOPNOTSUPP;
1567}
1568
1569static int netvsc_set_rss_hash_opts(struct net_device_context *ndc,
1570 struct ethtool_rxnfc *info)
1571{
1572 if (info->data == (RXH_IP_SRC | RXH_IP_DST |
1573 RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1574 switch (info->flow_type) {
1575 case TCP_V4_FLOW:
1576 ndc->l4_hash |= HV_TCP4_L4HASH;
1577 break;
1578
1579 case TCP_V6_FLOW:
1580 ndc->l4_hash |= HV_TCP6_L4HASH;
1581 break;
1582
1583 case UDP_V4_FLOW:
1584 ndc->l4_hash |= HV_UDP4_L4HASH;
1585 break;
1586
1587 case UDP_V6_FLOW:
1588 ndc->l4_hash |= HV_UDP6_L4HASH;
1589 break;
1590
1591 default:
1592 return -EOPNOTSUPP;
1593 }
1594
1595 return 0;
1596 }
1597
1598 if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
1599 switch (info->flow_type) {
1600 case TCP_V4_FLOW:
1601 ndc->l4_hash &= ~HV_TCP4_L4HASH;
1602 break;
1603
1604 case TCP_V6_FLOW:
1605 ndc->l4_hash &= ~HV_TCP6_L4HASH;
1606 break;
1607
1608 case UDP_V4_FLOW:
1609 ndc->l4_hash &= ~HV_UDP4_L4HASH;
1610 break;
1611
1612 case UDP_V6_FLOW:
1613 ndc->l4_hash &= ~HV_UDP6_L4HASH;
1614 break;
1615
1616 default:
1617 return -EOPNOTSUPP;
1618 }
1619
1620 return 0;
1621 }
1622
1623 return -EOPNOTSUPP;
1624}
1625
1626static int
1627netvsc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *info)
1628{
1629 struct net_device_context *ndc = netdev_priv(ndev);
1630
1631 if (info->cmd == ETHTOOL_SRXFH)
1632 return netvsc_set_rss_hash_opts(ndc, info);
1633
1634 return -EOPNOTSUPP;
1635}
1636
1637static u32 netvsc_get_rxfh_key_size(struct net_device *dev)
1638{
1639 return NETVSC_HASH_KEYLEN;
1640}
1641
1642static u32 netvsc_rss_indir_size(struct net_device *dev)
1643{
1644 return ITAB_NUM;
1645}
1646
1647static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
1648 u8 *hfunc)
1649{
1650 struct net_device_context *ndc = netdev_priv(dev);
1651 struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
1652 struct rndis_device *rndis_dev;
1653 int i;
1654
1655 if (!ndev)
1656 return -ENODEV;
1657
1658 if (hfunc)
1659 *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
1660
1661 rndis_dev = ndev->extension;
1662 if (indir) {
1663 for (i = 0; i < ITAB_NUM; i++)
1664 indir[i] = rndis_dev->rx_table[i];
1665 }
1666
1667 if (key)
1668 memcpy(key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN);
1669
1670 return 0;
1671}
1672
1673static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
1674 const u8 *key, const u8 hfunc)
1675{
1676 struct net_device_context *ndc = netdev_priv(dev);
1677 struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
1678 struct rndis_device *rndis_dev;
1679 int i;
1680
1681 if (!ndev)
1682 return -ENODEV;
1683
1684 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
1685 return -EOPNOTSUPP;
1686
1687 rndis_dev = ndev->extension;
1688 if (indir) {
1689 for (i = 0; i < ITAB_NUM; i++)
1690 if (indir[i] >= ndev->num_chn)
1691 return -EINVAL;
1692
1693 for (i = 0; i < ITAB_NUM; i++)
1694 rndis_dev->rx_table[i] = indir[i];
1695 }
1696
1697 if (!key) {
1698 if (!indir)
1699 return 0;
1700
1701 key = rndis_dev->rss_key;
1702 }
1703
1704 return rndis_filter_set_rss_param(rndis_dev, key);
1705}
1706
1707/* Hyper-V RNDIS protocol does not have ring in the HW sense.
1708 * It does have pre-allocated receive area which is divided into sections.
1709 */
1710static void __netvsc_get_ringparam(struct netvsc_device *nvdev,
1711 struct ethtool_ringparam *ring)
1712{
1713 u32 max_buf_size;
1714
1715 ring->rx_pending = nvdev->recv_section_cnt;
1716 ring->tx_pending = nvdev->send_section_cnt;
1717
1718 if (nvdev->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
1719 max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
1720 else
1721 max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
1722
1723 ring->rx_max_pending = max_buf_size / nvdev->recv_section_size;
1724 ring->tx_max_pending = NETVSC_SEND_BUFFER_SIZE
1725 / nvdev->send_section_size;
1726}
1727
1728static void netvsc_get_ringparam(struct net_device *ndev,
1729 struct ethtool_ringparam *ring)
1730{
1731 struct net_device_context *ndevctx = netdev_priv(ndev);
1732 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1733
1734 if (!nvdev)
1735 return;
1736
1737 __netvsc_get_ringparam(nvdev, ring);
1738}
1739
1740static int netvsc_set_ringparam(struct net_device *ndev,
1741 struct ethtool_ringparam *ring)
1742{
1743 struct net_device_context *ndevctx = netdev_priv(ndev);
1744 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1745 struct netvsc_device_info *device_info;
1746 struct ethtool_ringparam orig;
1747 u32 new_tx, new_rx;
1748 int ret = 0;
1749
1750 if (!nvdev || nvdev->destroy)
1751 return -ENODEV;
1752
1753 memset(&orig, 0, sizeof(orig));
1754 __netvsc_get_ringparam(nvdev, &orig);
1755
1756 new_tx = clamp_t(u32, ring->tx_pending,
1757 NETVSC_MIN_TX_SECTIONS, orig.tx_max_pending);
1758 new_rx = clamp_t(u32, ring->rx_pending,
1759 NETVSC_MIN_RX_SECTIONS, orig.rx_max_pending);
1760
1761 if (new_tx == orig.tx_pending &&
1762 new_rx == orig.rx_pending)
1763 return 0; /* no change */
1764
1765 device_info = netvsc_devinfo_get(nvdev);
1766
1767 if (!device_info)
1768 return -ENOMEM;
1769
1770 device_info->send_sections = new_tx;
1771 device_info->recv_sections = new_rx;
1772
1773 ret = netvsc_detach(ndev, nvdev);
1774 if (ret)
1775 goto out;
1776
1777 ret = netvsc_attach(ndev, device_info);
1778 if (ret) {
1779 device_info->send_sections = orig.tx_pending;
1780 device_info->recv_sections = orig.rx_pending;
1781
1782 if (netvsc_attach(ndev, device_info))
1783 netdev_err(ndev, "restoring ringparam failed");
1784 }
1785
1786out:
1787 kfree(device_info);
1788 return ret;
1789}
1790
1791static int netvsc_set_features(struct net_device *ndev,
1792 netdev_features_t features)
1793{
1794 netdev_features_t change = features ^ ndev->features;
1795 struct net_device_context *ndevctx = netdev_priv(ndev);
1796 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1797 struct ndis_offload_params offloads;
1798
1799 if (!nvdev || nvdev->destroy)
1800 return -ENODEV;
1801
1802 if (!(change & NETIF_F_LRO))
1803 return 0;
1804
1805 memset(&offloads, 0, sizeof(struct ndis_offload_params));
1806
1807 if (features & NETIF_F_LRO) {
1808 offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
1809 offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
1810 } else {
1811 offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
1812 offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
1813 }
1814
1815 return rndis_filter_set_offload_params(ndev, nvdev, &offloads);
1816}
1817
1818static u32 netvsc_get_msglevel(struct net_device *ndev)
1819{
1820 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1821
1822 return ndev_ctx->msg_enable;
1823}
1824
1825static void netvsc_set_msglevel(struct net_device *ndev, u32 val)
1826{
1827 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1828
1829 ndev_ctx->msg_enable = val;
1830}
1831
1832static const struct ethtool_ops ethtool_ops = {
1833 .get_drvinfo = netvsc_get_drvinfo,
1834 .get_msglevel = netvsc_get_msglevel,
1835 .set_msglevel = netvsc_set_msglevel,
1836 .get_link = ethtool_op_get_link,
1837 .get_ethtool_stats = netvsc_get_ethtool_stats,
1838 .get_sset_count = netvsc_get_sset_count,
1839 .get_strings = netvsc_get_strings,
1840 .get_channels = netvsc_get_channels,
1841 .set_channels = netvsc_set_channels,
1842 .get_ts_info = ethtool_op_get_ts_info,
1843 .get_rxnfc = netvsc_get_rxnfc,
1844 .set_rxnfc = netvsc_set_rxnfc,
1845 .get_rxfh_key_size = netvsc_get_rxfh_key_size,
1846 .get_rxfh_indir_size = netvsc_rss_indir_size,
1847 .get_rxfh = netvsc_get_rxfh,
1848 .set_rxfh = netvsc_set_rxfh,
1849 .get_link_ksettings = netvsc_get_link_ksettings,
1850 .set_link_ksettings = netvsc_set_link_ksettings,
1851 .get_ringparam = netvsc_get_ringparam,
1852 .set_ringparam = netvsc_set_ringparam,
1853};
1854
1855static const struct net_device_ops device_ops = {
1856 .ndo_open = netvsc_open,
1857 .ndo_stop = netvsc_close,
1858 .ndo_start_xmit = netvsc_start_xmit,
1859 .ndo_change_rx_flags = netvsc_change_rx_flags,
1860 .ndo_set_rx_mode = netvsc_set_rx_mode,
1861 .ndo_set_features = netvsc_set_features,
1862 .ndo_change_mtu = netvsc_change_mtu,
1863 .ndo_validate_addr = eth_validate_addr,
1864 .ndo_set_mac_address = netvsc_set_mac_addr,
1865 .ndo_select_queue = netvsc_select_queue,
1866 .ndo_get_stats64 = netvsc_get_stats64,
1867};
1868
1869/*
1870 * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link
1871 * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is
1872 * present send GARP packet to network peers with netif_notify_peers().
1873 */
1874static void netvsc_link_change(struct work_struct *w)
1875{
1876 struct net_device_context *ndev_ctx =
1877 container_of(w, struct net_device_context, dwork.work);
1878 struct hv_device *device_obj = ndev_ctx->device_ctx;
1879 struct net_device *net = hv_get_drvdata(device_obj);
1880 struct netvsc_device *net_device;
1881 struct rndis_device *rdev;
1882 struct netvsc_reconfig *event = NULL;
1883 bool notify = false, reschedule = false;
1884 unsigned long flags, next_reconfig, delay;
1885
1886 /* if changes are happening, comeback later */
1887 if (!rtnl_trylock()) {
1888 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
1889 return;
1890 }
1891
1892 net_device = rtnl_dereference(ndev_ctx->nvdev);
1893 if (!net_device)
1894 goto out_unlock;
1895
1896 rdev = net_device->extension;
1897
1898 next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT;
1899 if (time_is_after_jiffies(next_reconfig)) {
1900 /* link_watch only sends one notification with current state
1901 * per second, avoid doing reconfig more frequently. Handle
1902 * wrap around.
1903 */
1904 delay = next_reconfig - jiffies;
1905 delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT;
1906 schedule_delayed_work(&ndev_ctx->dwork, delay);
1907 goto out_unlock;
1908 }
1909 ndev_ctx->last_reconfig = jiffies;
1910
1911 spin_lock_irqsave(&ndev_ctx->lock, flags);
1912 if (!list_empty(&ndev_ctx->reconfig_events)) {
1913 event = list_first_entry(&ndev_ctx->reconfig_events,
1914 struct netvsc_reconfig, list);
1915 list_del(&event->list);
1916 reschedule = !list_empty(&ndev_ctx->reconfig_events);
1917 }
1918 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1919
1920 if (!event)
1921 goto out_unlock;
1922
1923 switch (event->event) {
1924 /* Only the following events are possible due to the check in
1925 * netvsc_linkstatus_callback()
1926 */
1927 case RNDIS_STATUS_MEDIA_CONNECT:
1928 if (rdev->link_state) {
1929 rdev->link_state = false;
1930 netif_carrier_on(net);
1931 netvsc_tx_enable(net_device, net);
1932 } else {
1933 notify = true;
1934 }
1935 kfree(event);
1936 break;
1937 case RNDIS_STATUS_MEDIA_DISCONNECT:
1938 if (!rdev->link_state) {
1939 rdev->link_state = true;
1940 netif_carrier_off(net);
1941 netvsc_tx_disable(net_device, net);
1942 }
1943 kfree(event);
1944 break;
1945 case RNDIS_STATUS_NETWORK_CHANGE:
1946 /* Only makes sense if carrier is present */
1947 if (!rdev->link_state) {
1948 rdev->link_state = true;
1949 netif_carrier_off(net);
1950 netvsc_tx_disable(net_device, net);
1951 event->event = RNDIS_STATUS_MEDIA_CONNECT;
1952 spin_lock_irqsave(&ndev_ctx->lock, flags);
1953 list_add(&event->list, &ndev_ctx->reconfig_events);
1954 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1955 reschedule = true;
1956 }
1957 break;
1958 }
1959
1960 rtnl_unlock();
1961
1962 if (notify)
1963 netdev_notify_peers(net);
1964
1965 /* link_watch only sends one notification with current state per
1966 * second, handle next reconfig event in 2 seconds.
1967 */
1968 if (reschedule)
1969 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
1970
1971 return;
1972
1973out_unlock:
1974 rtnl_unlock();
1975}
1976
1977static struct net_device *get_netvsc_byref(struct net_device *vf_netdev)
1978{
1979 struct net_device_context *net_device_ctx;
1980 struct net_device *dev;
1981
1982 dev = netdev_master_upper_dev_get(vf_netdev);
1983 if (!dev || dev->netdev_ops != &device_ops)
1984 return NULL; /* not a netvsc device */
1985
1986 net_device_ctx = netdev_priv(dev);
1987 if (!rtnl_dereference(net_device_ctx->nvdev))
1988 return NULL; /* device is removed */
1989
1990 return dev;
1991}
1992
1993/* Called when VF is injecting data into network stack.
1994 * Change the associated network device from VF to netvsc.
1995 * note: already called with rcu_read_lock
1996 */
1997static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb)
1998{
1999 struct sk_buff *skb = *pskb;
2000 struct net_device *ndev = rcu_dereference(skb->dev->rx_handler_data);
2001 struct net_device_context *ndev_ctx = netdev_priv(ndev);
2002 struct netvsc_vf_pcpu_stats *pcpu_stats
2003 = this_cpu_ptr(ndev_ctx->vf_stats);
2004
2005 skb->dev = ndev;
2006
2007 u64_stats_update_begin(&pcpu_stats->syncp);
2008 pcpu_stats->rx_packets++;
2009 pcpu_stats->rx_bytes += skb->len;
2010 u64_stats_update_end(&pcpu_stats->syncp);
2011
2012 return RX_HANDLER_ANOTHER;
2013}
2014
2015static int netvsc_vf_join(struct net_device *vf_netdev,
2016 struct net_device *ndev)
2017{
2018 struct net_device_context *ndev_ctx = netdev_priv(ndev);
2019 int ret;
2020
2021 ret = netdev_rx_handler_register(vf_netdev,
2022 netvsc_vf_handle_frame, ndev);
2023 if (ret != 0) {
2024 netdev_err(vf_netdev,
2025 "can not register netvsc VF receive handler (err = %d)\n",
2026 ret);
2027 goto rx_handler_failed;
2028 }
2029
2030 ret = netdev_master_upper_dev_link(vf_netdev, ndev,
2031 NULL, NULL, NULL);
2032 if (ret != 0) {
2033 netdev_err(vf_netdev,
2034 "can not set master device %s (err = %d)\n",
2035 ndev->name, ret);
2036 goto upper_link_failed;
2037 }
2038
2039 /* set slave flag before open to prevent IPv6 addrconf */
2040 vf_netdev->flags |= IFF_SLAVE;
2041
2042 schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
2043
2044 call_netdevice_notifiers(NETDEV_JOIN, vf_netdev);
2045
2046 netdev_info(vf_netdev, "joined to %s\n", ndev->name);
2047 return 0;
2048
2049upper_link_failed:
2050 netdev_rx_handler_unregister(vf_netdev);
2051rx_handler_failed:
2052 return ret;
2053}
2054
2055static void __netvsc_vf_setup(struct net_device *ndev,
2056 struct net_device *vf_netdev)
2057{
2058 int ret;
2059
2060 /* Align MTU of VF with master */
2061 ret = dev_set_mtu(vf_netdev, ndev->mtu);
2062 if (ret)
2063 netdev_warn(vf_netdev,
2064 "unable to change mtu to %u\n", ndev->mtu);
2065
2066 /* set multicast etc flags on VF */
2067 dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE, NULL);
2068
2069 /* sync address list from ndev to VF */
2070 netif_addr_lock_bh(ndev);
2071 dev_uc_sync(vf_netdev, ndev);
2072 dev_mc_sync(vf_netdev, ndev);
2073 netif_addr_unlock_bh(ndev);
2074
2075 if (netif_running(ndev)) {
2076 ret = dev_open(vf_netdev, NULL);
2077 if (ret)
2078 netdev_warn(vf_netdev,
2079 "unable to open: %d\n", ret);
2080 }
2081}
2082
2083/* Setup VF as slave of the synthetic device.
2084 * Runs in workqueue to avoid recursion in netlink callbacks.
2085 */
2086static void netvsc_vf_setup(struct work_struct *w)
2087{
2088 struct net_device_context *ndev_ctx
2089 = container_of(w, struct net_device_context, vf_takeover.work);
2090 struct net_device *ndev = hv_get_drvdata(ndev_ctx->device_ctx);
2091 struct net_device *vf_netdev;
2092
2093 if (!rtnl_trylock()) {
2094 schedule_delayed_work(&ndev_ctx->vf_takeover, 0);
2095 return;
2096 }
2097
2098 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
2099 if (vf_netdev)
2100 __netvsc_vf_setup(ndev, vf_netdev);
2101
2102 rtnl_unlock();
2103}
2104
2105/* Find netvsc by VF serial number.
2106 * The PCI hyperv controller records the serial number as the slot kobj name.
2107 */
2108static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
2109{
2110 struct device *parent = vf_netdev->dev.parent;
2111 struct net_device_context *ndev_ctx;
2112 struct pci_dev *pdev;
2113 u32 serial;
2114
2115 if (!parent || !dev_is_pci(parent))
2116 return NULL; /* not a PCI device */
2117
2118 pdev = to_pci_dev(parent);
2119 if (!pdev->slot) {
2120 netdev_notice(vf_netdev, "no PCI slot information\n");
2121 return NULL;
2122 }
2123
2124 if (kstrtou32(pci_slot_name(pdev->slot), 10, &serial)) {
2125 netdev_notice(vf_netdev, "Invalid vf serial:%s\n",
2126 pci_slot_name(pdev->slot));
2127 return NULL;
2128 }
2129
2130 list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
2131 if (!ndev_ctx->vf_alloc)
2132 continue;
2133
2134 if (ndev_ctx->vf_serial == serial)
2135 return hv_get_drvdata(ndev_ctx->device_ctx);
2136 }
2137
2138 netdev_notice(vf_netdev,
2139 "no netdev found for vf serial:%u\n", serial);
2140 return NULL;
2141}
2142
2143static int netvsc_register_vf(struct net_device *vf_netdev)
2144{
2145 struct net_device_context *net_device_ctx;
2146 struct netvsc_device *netvsc_dev;
2147 struct net_device *ndev;
2148 int ret;
2149
2150 if (vf_netdev->addr_len != ETH_ALEN)
2151 return NOTIFY_DONE;
2152
2153 ndev = get_netvsc_byslot(vf_netdev);
2154 if (!ndev)
2155 return NOTIFY_DONE;
2156
2157 net_device_ctx = netdev_priv(ndev);
2158 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
2159 if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev))
2160 return NOTIFY_DONE;
2161
2162 /* if synthetic interface is a different namespace,
2163 * then move the VF to that namespace; join will be
2164 * done again in that context.
2165 */
2166 if (!net_eq(dev_net(ndev), dev_net(vf_netdev))) {
2167 ret = dev_change_net_namespace(vf_netdev,
2168 dev_net(ndev), "eth%d");
2169 if (ret)
2170 netdev_err(vf_netdev,
2171 "could not move to same namespace as %s: %d\n",
2172 ndev->name, ret);
2173 else
2174 netdev_info(vf_netdev,
2175 "VF moved to namespace with: %s\n",
2176 ndev->name);
2177 return NOTIFY_DONE;
2178 }
2179
2180 netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
2181
2182 if (netvsc_vf_join(vf_netdev, ndev) != 0)
2183 return NOTIFY_DONE;
2184
2185 dev_hold(vf_netdev);
2186 rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev);
2187 return NOTIFY_OK;
2188}
2189
2190/* VF up/down change detected, schedule to change data path */
2191static int netvsc_vf_changed(struct net_device *vf_netdev)
2192{
2193 struct net_device_context *net_device_ctx;
2194 struct netvsc_device *netvsc_dev;
2195 struct net_device *ndev;
2196 bool vf_is_up = netif_running(vf_netdev);
2197
2198 ndev = get_netvsc_byref(vf_netdev);
2199 if (!ndev)
2200 return NOTIFY_DONE;
2201
2202 net_device_ctx = netdev_priv(ndev);
2203 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
2204 if (!netvsc_dev)
2205 return NOTIFY_DONE;
2206
2207 netvsc_switch_datapath(ndev, vf_is_up);
2208 netdev_info(ndev, "Data path switched %s VF: %s\n",
2209 vf_is_up ? "to" : "from", vf_netdev->name);
2210
2211 return NOTIFY_OK;
2212}
2213
2214static int netvsc_unregister_vf(struct net_device *vf_netdev)
2215{
2216 struct net_device *ndev;
2217 struct net_device_context *net_device_ctx;
2218
2219 ndev = get_netvsc_byref(vf_netdev);
2220 if (!ndev)
2221 return NOTIFY_DONE;
2222
2223 net_device_ctx = netdev_priv(ndev);
2224 cancel_delayed_work_sync(&net_device_ctx->vf_takeover);
2225
2226 netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
2227
2228 netdev_rx_handler_unregister(vf_netdev);
2229 netdev_upper_dev_unlink(vf_netdev, ndev);
2230 RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
2231 dev_put(vf_netdev);
2232
2233 return NOTIFY_OK;
2234}
2235
2236static int netvsc_probe(struct hv_device *dev,
2237 const struct hv_vmbus_device_id *dev_id)
2238{
2239 struct net_device *net = NULL;
2240 struct net_device_context *net_device_ctx;
2241 struct netvsc_device_info *device_info = NULL;
2242 struct netvsc_device *nvdev;
2243 int ret = -ENOMEM;
2244
2245 net = alloc_etherdev_mq(sizeof(struct net_device_context),
2246 VRSS_CHANNEL_MAX);
2247 if (!net)
2248 goto no_net;
2249
2250 netif_carrier_off(net);
2251
2252 netvsc_init_settings(net);
2253
2254 net_device_ctx = netdev_priv(net);
2255 net_device_ctx->device_ctx = dev;
2256 net_device_ctx->msg_enable = netif_msg_init(debug, default_msg);
2257 if (netif_msg_probe(net_device_ctx))
2258 netdev_dbg(net, "netvsc msg_enable: %d\n",
2259 net_device_ctx->msg_enable);
2260
2261 hv_set_drvdata(dev, net);
2262
2263 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
2264
2265 spin_lock_init(&net_device_ctx->lock);
2266 INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
2267 INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup);
2268
2269 net_device_ctx->vf_stats
2270 = netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats);
2271 if (!net_device_ctx->vf_stats)
2272 goto no_stats;
2273
2274 net->netdev_ops = &device_ops;
2275 net->ethtool_ops = ðtool_ops;
2276 SET_NETDEV_DEV(net, &dev->device);
2277
2278 /* We always need headroom for rndis header */
2279 net->needed_headroom = RNDIS_AND_PPI_SIZE;
2280
2281 /* Initialize the number of queues to be 1, we may change it if more
2282 * channels are offered later.
2283 */
2284 netif_set_real_num_tx_queues(net, 1);
2285 netif_set_real_num_rx_queues(net, 1);
2286
2287 /* Notify the netvsc driver of the new device */
2288 device_info = netvsc_devinfo_get(NULL);
2289
2290 if (!device_info) {
2291 ret = -ENOMEM;
2292 goto devinfo_failed;
2293 }
2294
2295 nvdev = rndis_filter_device_add(dev, device_info);
2296 if (IS_ERR(nvdev)) {
2297 ret = PTR_ERR(nvdev);
2298 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
2299 goto rndis_failed;
2300 }
2301
2302 memcpy(net->dev_addr, device_info->mac_adr, ETH_ALEN);
2303
2304 /* We must get rtnl lock before scheduling nvdev->subchan_work,
2305 * otherwise netvsc_subchan_work() can get rtnl lock first and wait
2306 * all subchannels to show up, but that may not happen because
2307 * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer()
2308 * -> ... -> device_add() -> ... -> __device_attach() can't get
2309 * the device lock, so all the subchannels can't be processed --
2310 * finally netvsc_subchan_work() hangs forever.
2311 */
2312 rtnl_lock();
2313
2314 if (nvdev->num_chn > 1)
2315 schedule_work(&nvdev->subchan_work);
2316
2317 /* hw_features computed in rndis_netdev_set_hwcaps() */
2318 net->features = net->hw_features |
2319 NETIF_F_HIGHDMA | NETIF_F_SG |
2320 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
2321 net->vlan_features = net->features;
2322
2323 netdev_lockdep_set_classes(net);
2324
2325 /* MTU range: 68 - 1500 or 65521 */
2326 net->min_mtu = NETVSC_MTU_MIN;
2327 if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
2328 net->max_mtu = NETVSC_MTU - ETH_HLEN;
2329 else
2330 net->max_mtu = ETH_DATA_LEN;
2331
2332 ret = register_netdevice(net);
2333 if (ret != 0) {
2334 pr_err("Unable to register netdev.\n");
2335 goto register_failed;
2336 }
2337
2338 list_add(&net_device_ctx->list, &netvsc_dev_list);
2339 rtnl_unlock();
2340
2341 kfree(device_info);
2342 return 0;
2343
2344register_failed:
2345 rtnl_unlock();
2346 rndis_filter_device_remove(dev, nvdev);
2347rndis_failed:
2348 kfree(device_info);
2349devinfo_failed:
2350 free_percpu(net_device_ctx->vf_stats);
2351no_stats:
2352 hv_set_drvdata(dev, NULL);
2353 free_netdev(net);
2354no_net:
2355 return ret;
2356}
2357
2358static int netvsc_remove(struct hv_device *dev)
2359{
2360 struct net_device_context *ndev_ctx;
2361 struct net_device *vf_netdev, *net;
2362 struct netvsc_device *nvdev;
2363
2364 net = hv_get_drvdata(dev);
2365 if (net == NULL) {
2366 dev_err(&dev->device, "No net device to remove\n");
2367 return 0;
2368 }
2369
2370 ndev_ctx = netdev_priv(net);
2371
2372 cancel_delayed_work_sync(&ndev_ctx->dwork);
2373
2374 rtnl_lock();
2375 nvdev = rtnl_dereference(ndev_ctx->nvdev);
2376 if (nvdev)
2377 cancel_work_sync(&nvdev->subchan_work);
2378
2379 /*
2380 * Call to the vsc driver to let it know that the device is being
2381 * removed. Also blocks mtu and channel changes.
2382 */
2383 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
2384 if (vf_netdev)
2385 netvsc_unregister_vf(vf_netdev);
2386
2387 if (nvdev)
2388 rndis_filter_device_remove(dev, nvdev);
2389
2390 unregister_netdevice(net);
2391 list_del(&ndev_ctx->list);
2392
2393 rtnl_unlock();
2394
2395 hv_set_drvdata(dev, NULL);
2396
2397 free_percpu(ndev_ctx->vf_stats);
2398 free_netdev(net);
2399 return 0;
2400}
2401
2402static const struct hv_vmbus_device_id id_table[] = {
2403 /* Network guid */
2404 { HV_NIC_GUID, },
2405 { },
2406};
2407
2408MODULE_DEVICE_TABLE(vmbus, id_table);
2409
2410/* The one and only one */
2411static struct hv_driver netvsc_drv = {
2412 .name = KBUILD_MODNAME,
2413 .id_table = id_table,
2414 .probe = netvsc_probe,
2415 .remove = netvsc_remove,
2416 .driver = {
2417 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
2418 },
2419};
2420
2421/*
2422 * On Hyper-V, every VF interface is matched with a corresponding
2423 * synthetic interface. The synthetic interface is presented first
2424 * to the guest. When the corresponding VF instance is registered,
2425 * we will take care of switching the data path.
2426 */
2427static int netvsc_netdev_event(struct notifier_block *this,
2428 unsigned long event, void *ptr)
2429{
2430 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
2431
2432 /* Skip our own events */
2433 if (event_dev->netdev_ops == &device_ops)
2434 return NOTIFY_DONE;
2435
2436 /* Avoid non-Ethernet type devices */
2437 if (event_dev->type != ARPHRD_ETHER)
2438 return NOTIFY_DONE;
2439
2440 /* Avoid Vlan dev with same MAC registering as VF */
2441 if (is_vlan_dev(event_dev))
2442 return NOTIFY_DONE;
2443
2444 /* Avoid Bonding master dev with same MAC registering as VF */
2445 if ((event_dev->priv_flags & IFF_BONDING) &&
2446 (event_dev->flags & IFF_MASTER))
2447 return NOTIFY_DONE;
2448
2449 switch (event) {
2450 case NETDEV_REGISTER:
2451 return netvsc_register_vf(event_dev);
2452 case NETDEV_UNREGISTER:
2453 return netvsc_unregister_vf(event_dev);
2454 case NETDEV_UP:
2455 case NETDEV_DOWN:
2456 return netvsc_vf_changed(event_dev);
2457 default:
2458 return NOTIFY_DONE;
2459 }
2460}
2461
2462static struct notifier_block netvsc_netdev_notifier = {
2463 .notifier_call = netvsc_netdev_event,
2464};
2465
2466static void __exit netvsc_drv_exit(void)
2467{
2468 unregister_netdevice_notifier(&netvsc_netdev_notifier);
2469 vmbus_driver_unregister(&netvsc_drv);
2470}
2471
2472static int __init netvsc_drv_init(void)
2473{
2474 int ret;
2475
2476 if (ring_size < RING_SIZE_MIN) {
2477 ring_size = RING_SIZE_MIN;
2478 pr_info("Increased ring_size to %u (min allowed)\n",
2479 ring_size);
2480 }
2481 netvsc_ring_bytes = ring_size * PAGE_SIZE;
2482
2483 ret = vmbus_driver_register(&netvsc_drv);
2484 if (ret)
2485 return ret;
2486
2487 register_netdevice_notifier(&netvsc_netdev_notifier);
2488 return 0;
2489}
2490
2491MODULE_LICENSE("GPL");
2492MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
2493
2494module_init(netvsc_drv_init);
2495module_exit(netvsc_drv_exit);