at v6.19 2902 lines 76 kB view raw
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Copyright (c) 2009, Microsoft Corporation. 4 * 5 * Authors: 6 * Haiyang Zhang <haiyangz@microsoft.com> 7 * Hank Janssen <hjanssen@microsoft.com> 8 */ 9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 11#include <linux/init.h> 12#include <linux/atomic.h> 13#include <linux/ethtool.h> 14#include <linux/module.h> 15#include <linux/highmem.h> 16#include <linux/device.h> 17#include <linux/io.h> 18#include <linux/delay.h> 19#include <linux/netdevice.h> 20#include <linux/inetdevice.h> 21#include <linux/etherdevice.h> 22#include <linux/pci.h> 23#include <linux/skbuff.h> 24#include <linux/if_vlan.h> 25#include <linux/in.h> 26#include <linux/slab.h> 27#include <linux/rtnetlink.h> 28#include <linux/netpoll.h> 29#include <linux/bpf.h> 30 31#include <net/arp.h> 32#include <net/netdev_lock.h> 33#include <net/route.h> 34#include <net/sock.h> 35#include <net/pkt_sched.h> 36#include <net/checksum.h> 37#include <net/ip6_checksum.h> 38 39#include "hyperv_net.h" 40 41#define RING_SIZE_MIN 64 42 43#define LINKCHANGE_INT (2 * HZ) 44#define VF_TAKEOVER_INT (HZ / 10) 45 46/* Macros to define the context of vf registration */ 47#define VF_REG_IN_PROBE 1 48#define VF_REG_IN_NOTIFIER 2 49 50static unsigned int ring_size __ro_after_init = 128; 51module_param(ring_size, uint, 0444); 52MODULE_PARM_DESC(ring_size, "Ring buffer size (# of 4K pages)"); 53unsigned int netvsc_ring_bytes __ro_after_init; 54 55static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | 56 NETIF_MSG_LINK | NETIF_MSG_IFUP | 57 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | 58 NETIF_MSG_TX_ERR; 59 60static int debug = -1; 61module_param(debug, int, 0444); 62MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 63 64static LIST_HEAD(netvsc_dev_list); 65 66static void netvsc_change_rx_flags(struct net_device *net, int change) 67{ 68 struct net_device_context *ndev_ctx = netdev_priv(net); 69 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); 70 int inc; 71 72 if (!vf_netdev) 73 return; 74 75 if (change & IFF_PROMISC) { 76 inc = (net->flags & IFF_PROMISC) ? 1 : -1; 77 dev_set_promiscuity(vf_netdev, inc); 78 } 79 80 if (change & IFF_ALLMULTI) { 81 inc = (net->flags & IFF_ALLMULTI) ? 1 : -1; 82 dev_set_allmulti(vf_netdev, inc); 83 } 84} 85 86static void netvsc_set_rx_mode(struct net_device *net) 87{ 88 struct net_device_context *ndev_ctx = netdev_priv(net); 89 struct net_device *vf_netdev; 90 struct netvsc_device *nvdev; 91 92 rcu_read_lock(); 93 vf_netdev = rcu_dereference(ndev_ctx->vf_netdev); 94 if (vf_netdev) { 95 dev_uc_sync(vf_netdev, net); 96 dev_mc_sync(vf_netdev, net); 97 } 98 99 nvdev = rcu_dereference(ndev_ctx->nvdev); 100 if (nvdev) 101 rndis_filter_update(nvdev); 102 rcu_read_unlock(); 103} 104 105static void netvsc_tx_enable(struct netvsc_device *nvscdev, 106 struct net_device *ndev) 107{ 108 nvscdev->tx_disable = false; 109 virt_wmb(); /* ensure queue wake up mechanism is on */ 110 111 netif_tx_wake_all_queues(ndev); 112} 113 114static int netvsc_open(struct net_device *net) 115{ 116 struct net_device_context *ndev_ctx = netdev_priv(net); 117 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); 118 struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev); 119 struct rndis_device *rdev; 120 int ret = 0; 121 122 netif_carrier_off(net); 123 124 /* Open up the device */ 125 ret = rndis_filter_open(nvdev); 126 if (ret != 0) { 127 netdev_err(net, "unable to open device (ret %d).\n", ret); 128 return ret; 129 } 130 131 rdev = nvdev->extension; 132 if (!rdev->link_state) { 133 netif_carrier_on(net); 134 netvsc_tx_enable(nvdev, net); 135 } 136 137 if (vf_netdev) { 138 /* Setting synthetic device up transparently sets 139 * slave as up. If open fails, then slave will be 140 * still be offline (and not used). 141 */ 142 ret = dev_open(vf_netdev, NULL); 143 if (ret) 144 netdev_warn(net, 145 "unable to open slave: %s: %d\n", 146 vf_netdev->name, ret); 147 } 148 return 0; 149} 150 151static int netvsc_wait_until_empty(struct netvsc_device *nvdev) 152{ 153 unsigned int retry = 0; 154 int i; 155 156 /* Ensure pending bytes in ring are read */ 157 for (;;) { 158 u32 aread = 0; 159 160 for (i = 0; i < nvdev->num_chn; i++) { 161 struct vmbus_channel *chn 162 = nvdev->chan_table[i].channel; 163 164 if (!chn) 165 continue; 166 167 /* make sure receive not running now */ 168 napi_synchronize(&nvdev->chan_table[i].napi); 169 170 aread = hv_get_bytes_to_read(&chn->inbound); 171 if (aread) 172 break; 173 174 aread = hv_get_bytes_to_read(&chn->outbound); 175 if (aread) 176 break; 177 } 178 179 if (aread == 0) 180 return 0; 181 182 if (++retry > RETRY_MAX) 183 return -ETIMEDOUT; 184 185 usleep_range(RETRY_US_LO, RETRY_US_HI); 186 } 187} 188 189static void netvsc_tx_disable(struct netvsc_device *nvscdev, 190 struct net_device *ndev) 191{ 192 if (nvscdev) { 193 nvscdev->tx_disable = true; 194 virt_wmb(); /* ensure txq will not wake up after stop */ 195 } 196 197 netif_tx_disable(ndev); 198} 199 200static int netvsc_close(struct net_device *net) 201{ 202 struct net_device_context *net_device_ctx = netdev_priv(net); 203 struct net_device *vf_netdev 204 = rtnl_dereference(net_device_ctx->vf_netdev); 205 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); 206 int ret; 207 208 netvsc_tx_disable(nvdev, net); 209 210 /* No need to close rndis filter if it is removed already */ 211 if (!nvdev) 212 return 0; 213 214 ret = rndis_filter_close(nvdev); 215 if (ret != 0) { 216 netdev_err(net, "unable to close device (ret %d).\n", ret); 217 return ret; 218 } 219 220 ret = netvsc_wait_until_empty(nvdev); 221 if (ret) 222 netdev_err(net, "Ring buffer not empty after closing rndis\n"); 223 224 if (vf_netdev) 225 dev_close(vf_netdev); 226 227 return ret; 228} 229 230static inline void *init_ppi_data(struct rndis_message *msg, 231 u32 ppi_size, u32 pkt_type) 232{ 233 struct rndis_packet *rndis_pkt = &msg->msg.pkt; 234 struct rndis_per_packet_info *ppi; 235 236 rndis_pkt->data_offset += ppi_size; 237 ppi = (void *)rndis_pkt + rndis_pkt->per_pkt_info_offset 238 + rndis_pkt->per_pkt_info_len; 239 240 ppi->size = ppi_size; 241 ppi->type = pkt_type; 242 ppi->internal = 0; 243 ppi->ppi_offset = sizeof(struct rndis_per_packet_info); 244 245 rndis_pkt->per_pkt_info_len += ppi_size; 246 247 return ppi + 1; 248} 249 250static inline int netvsc_get_tx_queue(struct net_device *ndev, 251 struct sk_buff *skb, int old_idx) 252{ 253 const struct net_device_context *ndc = netdev_priv(ndev); 254 struct sock *sk = skb->sk; 255 int q_idx; 256 257 q_idx = ndc->tx_table[netvsc_get_hash(skb, ndc) & 258 (VRSS_SEND_TAB_SIZE - 1)]; 259 260 /* If queue index changed record the new value */ 261 if (q_idx != old_idx && 262 sk && sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache)) 263 sk_tx_queue_set(sk, q_idx); 264 265 return q_idx; 266} 267 268/* 269 * Select queue for transmit. 270 * 271 * If a valid queue has already been assigned, then use that. 272 * Otherwise compute tx queue based on hash and the send table. 273 * 274 * This is basically similar to default (netdev_pick_tx) with the added step 275 * of using the host send_table when no other queue has been assigned. 276 * 277 * TODO support XPS - but get_xps_queue not exported 278 */ 279static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb) 280{ 281 int q_idx = sk_tx_queue_get(skb->sk); 282 283 if (q_idx < 0 || skb->ooo_okay || q_idx >= ndev->real_num_tx_queues) { 284 /* If forwarding a packet, we use the recorded queue when 285 * available for better cache locality. 286 */ 287 if (skb_rx_queue_recorded(skb)) 288 q_idx = skb_get_rx_queue(skb); 289 else 290 q_idx = netvsc_get_tx_queue(ndev, skb, q_idx); 291 } 292 293 return q_idx; 294} 295 296static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, 297 struct net_device *sb_dev) 298{ 299 struct net_device_context *ndc = netdev_priv(ndev); 300 struct net_device *vf_netdev; 301 u16 txq; 302 303 rcu_read_lock(); 304 vf_netdev = rcu_dereference(ndc->vf_netdev); 305 if (vf_netdev) { 306 const struct net_device_ops *vf_ops = vf_netdev->netdev_ops; 307 308 if (vf_ops->ndo_select_queue) 309 txq = vf_ops->ndo_select_queue(vf_netdev, skb, sb_dev); 310 else 311 txq = netdev_pick_tx(vf_netdev, skb, NULL); 312 313 /* Record the queue selected by VF so that it can be 314 * used for common case where VF has more queues than 315 * the synthetic device. 316 */ 317 qdisc_skb_cb(skb)->slave_dev_queue_mapping = txq; 318 } else { 319 txq = netvsc_pick_tx(ndev, skb); 320 } 321 rcu_read_unlock(); 322 323 while (txq >= ndev->real_num_tx_queues) 324 txq -= ndev->real_num_tx_queues; 325 326 return txq; 327} 328 329static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb, 330 struct hv_netvsc_packet *packet, 331 struct hv_page_buffer *pb) 332{ 333 int frags = skb_shinfo(skb)->nr_frags; 334 int i; 335 336 /* The packet is laid out thus: 337 * 1. hdr: RNDIS header and PPI 338 * 2. skb linear data 339 * 3. skb fragment data 340 */ 341 342 pb[0].offset = offset_in_hvpage(hdr); 343 pb[0].len = len; 344 pb[0].pfn = virt_to_hvpfn(hdr); 345 packet->rmsg_size = len; 346 347 pb[1].offset = offset_in_hvpage(skb->data); 348 pb[1].len = skb_headlen(skb); 349 pb[1].pfn = virt_to_hvpfn(skb->data); 350 351 for (i = 0; i < frags; i++) { 352 skb_frag_t *frag = skb_shinfo(skb)->frags + i; 353 struct hv_page_buffer *cur_pb = &pb[i + 2]; 354 u64 pfn = page_to_hvpfn(skb_frag_page(frag)); 355 u32 offset = skb_frag_off(frag); 356 357 cur_pb->offset = offset_in_hvpage(offset); 358 cur_pb->len = skb_frag_size(frag); 359 cur_pb->pfn = pfn + (offset >> HV_HYP_PAGE_SHIFT); 360 } 361 return frags + 2; 362} 363 364static int count_skb_frag_slots(struct sk_buff *skb) 365{ 366 int i, frags = skb_shinfo(skb)->nr_frags; 367 int pages = 0; 368 369 for (i = 0; i < frags; i++) { 370 skb_frag_t *frag = skb_shinfo(skb)->frags + i; 371 unsigned long size = skb_frag_size(frag); 372 unsigned long offset = skb_frag_off(frag); 373 374 /* Skip unused frames from start of page */ 375 offset &= ~HV_HYP_PAGE_MASK; 376 pages += HVPFN_UP(offset + size); 377 } 378 return pages; 379} 380 381static int netvsc_get_slots(struct sk_buff *skb) 382{ 383 char *data = skb->data; 384 unsigned int offset = offset_in_hvpage(data); 385 unsigned int len = skb_headlen(skb); 386 int slots; 387 int frag_slots; 388 389 slots = DIV_ROUND_UP(offset + len, HV_HYP_PAGE_SIZE); 390 frag_slots = count_skb_frag_slots(skb); 391 return slots + frag_slots; 392} 393 394static u32 net_checksum_info(struct sk_buff *skb) 395{ 396 if (skb->protocol == htons(ETH_P_IP)) { 397 struct iphdr *ip = ip_hdr(skb); 398 399 if (ip->protocol == IPPROTO_TCP) 400 return TRANSPORT_INFO_IPV4_TCP; 401 else if (ip->protocol == IPPROTO_UDP) 402 return TRANSPORT_INFO_IPV4_UDP; 403 } else { 404 struct ipv6hdr *ip6 = ipv6_hdr(skb); 405 406 if (ip6->nexthdr == IPPROTO_TCP) 407 return TRANSPORT_INFO_IPV6_TCP; 408 else if (ip6->nexthdr == IPPROTO_UDP) 409 return TRANSPORT_INFO_IPV6_UDP; 410 } 411 412 return TRANSPORT_INFO_NOT_IP; 413} 414 415/* Send skb on the slave VF device. */ 416static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev, 417 struct sk_buff *skb) 418{ 419 struct net_device_context *ndev_ctx = netdev_priv(net); 420 unsigned int len = skb->len; 421 int rc; 422 423 skb->dev = vf_netdev; 424 skb_record_rx_queue(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping); 425 426 rc = dev_queue_xmit(skb); 427 if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) { 428 struct netvsc_vf_pcpu_stats *pcpu_stats 429 = this_cpu_ptr(ndev_ctx->vf_stats); 430 431 u64_stats_update_begin(&pcpu_stats->syncp); 432 pcpu_stats->tx_packets++; 433 pcpu_stats->tx_bytes += len; 434 u64_stats_update_end(&pcpu_stats->syncp); 435 } else { 436 this_cpu_inc(ndev_ctx->vf_stats->tx_dropped); 437 } 438 439 return rc; 440} 441 442static int netvsc_xmit(struct sk_buff *skb, struct net_device *net, bool xdp_tx) 443{ 444 struct net_device_context *net_device_ctx = netdev_priv(net); 445 struct hv_netvsc_packet *packet = NULL; 446 int ret; 447 unsigned int num_data_pgs; 448 struct rndis_message *rndis_msg; 449 struct net_device *vf_netdev; 450 u32 rndis_msg_size; 451 u32 hash; 452 struct hv_page_buffer pb[MAX_DATA_RANGES]; 453 454 /* If VF is present and up then redirect packets to it. 455 * Skip the VF if it is marked down or has no carrier. 456 * If netpoll is in uses, then VF can not be used either. 457 */ 458 vf_netdev = rcu_dereference_bh(net_device_ctx->vf_netdev); 459 if (vf_netdev && netif_running(vf_netdev) && 460 netif_carrier_ok(vf_netdev) && !netpoll_tx_running(net) && 461 net_device_ctx->data_path_is_vf) 462 return netvsc_vf_xmit(net, vf_netdev, skb); 463 464 /* We will atmost need two pages to describe the rndis 465 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number 466 * of pages in a single packet. If skb is scattered around 467 * more pages we try linearizing it. 468 */ 469 470 num_data_pgs = netvsc_get_slots(skb) + 2; 471 472 if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) { 473 ++net_device_ctx->eth_stats.tx_scattered; 474 475 if (skb_linearize(skb)) 476 goto no_memory; 477 478 num_data_pgs = netvsc_get_slots(skb) + 2; 479 if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) { 480 ++net_device_ctx->eth_stats.tx_too_big; 481 goto drop; 482 } 483 } 484 485 /* 486 * Place the rndis header in the skb head room and 487 * the skb->cb will be used for hv_netvsc_packet 488 * structure. 489 */ 490 ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE); 491 if (ret) 492 goto no_memory; 493 494 /* Use the skb control buffer for building up the packet */ 495 BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) > 496 sizeof_field(struct sk_buff, cb)); 497 packet = (struct hv_netvsc_packet *)skb->cb; 498 499 packet->q_idx = skb_get_queue_mapping(skb); 500 501 packet->total_data_buflen = skb->len; 502 packet->total_bytes = skb->len; 503 packet->total_packets = 1; 504 505 rndis_msg = (struct rndis_message *)skb->head; 506 507 /* Add the rndis header */ 508 rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET; 509 rndis_msg->msg_len = packet->total_data_buflen; 510 511 rndis_msg->msg.pkt = (struct rndis_packet) { 512 .data_offset = sizeof(struct rndis_packet), 513 .data_len = packet->total_data_buflen, 514 .per_pkt_info_offset = sizeof(struct rndis_packet), 515 }; 516 517 rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet); 518 519 hash = skb_get_hash_raw(skb); 520 if (hash != 0 && net->real_num_tx_queues > 1) { 521 u32 *hash_info; 522 523 rndis_msg_size += NDIS_HASH_PPI_SIZE; 524 hash_info = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE, 525 NBL_HASH_VALUE); 526 *hash_info = hash; 527 } 528 529 /* When using AF_PACKET we need to drop VLAN header from 530 * the frame and update the SKB to allow the HOST OS 531 * to transmit the 802.1Q packet 532 */ 533 if (skb->protocol == htons(ETH_P_8021Q)) { 534 u16 vlan_tci; 535 536 skb_reset_mac_header(skb); 537 if (eth_type_vlan(eth_hdr(skb)->h_proto)) { 538 if (unlikely(__skb_vlan_pop(skb, &vlan_tci) != 0)) { 539 ++net_device_ctx->eth_stats.vlan_error; 540 goto drop; 541 } 542 543 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); 544 /* Update the NDIS header pkt lengths */ 545 packet->total_data_buflen -= VLAN_HLEN; 546 packet->total_bytes -= VLAN_HLEN; 547 rndis_msg->msg_len = packet->total_data_buflen; 548 rndis_msg->msg.pkt.data_len = packet->total_data_buflen; 549 } 550 } 551 552 if (skb_vlan_tag_present(skb)) { 553 struct ndis_pkt_8021q_info *vlan; 554 555 rndis_msg_size += NDIS_VLAN_PPI_SIZE; 556 vlan = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE, 557 IEEE_8021Q_INFO); 558 559 vlan->value = 0; 560 vlan->vlanid = skb_vlan_tag_get_id(skb); 561 vlan->cfi = skb_vlan_tag_get_cfi(skb); 562 vlan->pri = skb_vlan_tag_get_prio(skb); 563 } 564 565 if (skb_is_gso(skb)) { 566 struct ndis_tcp_lso_info *lso_info; 567 568 rndis_msg_size += NDIS_LSO_PPI_SIZE; 569 lso_info = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE, 570 TCP_LARGESEND_PKTINFO); 571 572 lso_info->value = 0; 573 lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE; 574 if (skb->protocol == htons(ETH_P_IP)) { 575 lso_info->lso_v2_transmit.ip_version = 576 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4; 577 ip_hdr(skb)->tot_len = 0; 578 ip_hdr(skb)->check = 0; 579 tcp_hdr(skb)->check = 580 ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 581 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); 582 } else { 583 lso_info->lso_v2_transmit.ip_version = 584 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6; 585 tcp_v6_gso_csum_prep(skb); 586 } 587 lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb); 588 lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size; 589 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 590 if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) { 591 struct ndis_tcp_ip_checksum_info *csum_info; 592 593 rndis_msg_size += NDIS_CSUM_PPI_SIZE; 594 csum_info = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE, 595 TCPIP_CHKSUM_PKTINFO); 596 597 csum_info->value = 0; 598 csum_info->transmit.tcp_header_offset = skb_transport_offset(skb); 599 600 if (skb->protocol == htons(ETH_P_IP)) { 601 csum_info->transmit.is_ipv4 = 1; 602 603 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 604 csum_info->transmit.tcp_checksum = 1; 605 else 606 csum_info->transmit.udp_checksum = 1; 607 } else { 608 csum_info->transmit.is_ipv6 = 1; 609 610 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 611 csum_info->transmit.tcp_checksum = 1; 612 else 613 csum_info->transmit.udp_checksum = 1; 614 } 615 } else { 616 /* Can't do offload of this type of checksum */ 617 if (skb_checksum_help(skb)) 618 goto drop; 619 } 620 } 621 622 /* Start filling in the page buffers with the rndis hdr */ 623 rndis_msg->msg_len += rndis_msg_size; 624 packet->total_data_buflen = rndis_msg->msg_len; 625 packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size, 626 skb, packet, pb); 627 628 /* timestamp packet in software */ 629 skb_tx_timestamp(skb); 630 631 ret = netvsc_send(net, packet, rndis_msg, pb, skb, xdp_tx); 632 if (likely(ret == 0)) 633 return NETDEV_TX_OK; 634 635 if (ret == -EAGAIN) { 636 ++net_device_ctx->eth_stats.tx_busy; 637 return NETDEV_TX_BUSY; 638 } 639 640 if (ret == -ENOSPC) 641 ++net_device_ctx->eth_stats.tx_no_space; 642 643drop: 644 dev_kfree_skb_any(skb); 645 net->stats.tx_dropped++; 646 647 return NETDEV_TX_OK; 648 649no_memory: 650 ++net_device_ctx->eth_stats.tx_no_memory; 651 goto drop; 652} 653 654static netdev_tx_t netvsc_start_xmit(struct sk_buff *skb, 655 struct net_device *ndev) 656{ 657 return netvsc_xmit(skb, ndev, false); 658} 659 660/* 661 * netvsc_linkstatus_callback - Link up/down notification 662 */ 663void netvsc_linkstatus_callback(struct net_device *net, 664 struct rndis_message *resp, 665 void *data, u32 data_buflen) 666{ 667 struct rndis_indicate_status *indicate = &resp->msg.indicate_status; 668 struct net_device_context *ndev_ctx = netdev_priv(net); 669 struct netvsc_reconfig *event; 670 unsigned long flags; 671 672 /* Ensure the packet is big enough to access its fields */ 673 if (resp->msg_len - RNDIS_HEADER_SIZE < sizeof(struct rndis_indicate_status)) { 674 netdev_err(net, "invalid rndis_indicate_status packet, len: %u\n", 675 resp->msg_len); 676 return; 677 } 678 679 /* Copy the RNDIS indicate status into nvchan->recv_buf */ 680 memcpy(indicate, data + RNDIS_HEADER_SIZE, sizeof(*indicate)); 681 682 /* Update the physical link speed when changing to another vSwitch */ 683 if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) { 684 u32 speed; 685 686 /* Validate status_buf_offset and status_buflen. 687 * 688 * Certain (pre-Fe) implementations of Hyper-V's vSwitch didn't account 689 * for the status buffer field in resp->msg_len; perform the validation 690 * using data_buflen (>= resp->msg_len). 691 */ 692 if (indicate->status_buflen < sizeof(speed) || 693 indicate->status_buf_offset < sizeof(*indicate) || 694 data_buflen - RNDIS_HEADER_SIZE < indicate->status_buf_offset || 695 data_buflen - RNDIS_HEADER_SIZE - indicate->status_buf_offset 696 < indicate->status_buflen) { 697 netdev_err(net, "invalid rndis_indicate_status packet\n"); 698 return; 699 } 700 701 speed = *(u32 *)(data + RNDIS_HEADER_SIZE + indicate->status_buf_offset) / 10000; 702 ndev_ctx->speed = speed; 703 return; 704 } 705 706 /* Handle these link change statuses below */ 707 if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE && 708 indicate->status != RNDIS_STATUS_MEDIA_CONNECT && 709 indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT) 710 return; 711 712 if (net->reg_state != NETREG_REGISTERED) 713 return; 714 715 event = kzalloc(sizeof(*event), GFP_ATOMIC); 716 if (!event) 717 return; 718 event->event = indicate->status; 719 720 spin_lock_irqsave(&ndev_ctx->lock, flags); 721 list_add_tail(&event->list, &ndev_ctx->reconfig_events); 722 spin_unlock_irqrestore(&ndev_ctx->lock, flags); 723 724 schedule_delayed_work(&ndev_ctx->dwork, 0); 725} 726 727/* This function should only be called after skb_record_rx_queue() */ 728void netvsc_xdp_xmit(struct sk_buff *skb, struct net_device *ndev) 729{ 730 int rc; 731 732 skb->queue_mapping = skb_get_rx_queue(skb); 733 __skb_push(skb, ETH_HLEN); 734 735 rc = netvsc_xmit(skb, ndev, true); 736 737 if (dev_xmit_complete(rc)) 738 return; 739 740 dev_kfree_skb_any(skb); 741 ndev->stats.tx_dropped++; 742} 743 744static void netvsc_comp_ipcsum(struct sk_buff *skb) 745{ 746 struct iphdr *iph = (struct iphdr *)skb->data; 747 748 iph->check = 0; 749 iph->check = ip_fast_csum(iph, iph->ihl); 750} 751 752static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net, 753 struct netvsc_channel *nvchan, 754 struct xdp_buff *xdp) 755{ 756 struct napi_struct *napi = &nvchan->napi; 757 const struct ndis_pkt_8021q_info *vlan = &nvchan->rsc.vlan; 758 const struct ndis_tcp_ip_checksum_info *csum_info = 759 &nvchan->rsc.csum_info; 760 const u32 *hash_info = &nvchan->rsc.hash_info; 761 u8 ppi_flags = nvchan->rsc.ppi_flags; 762 struct sk_buff *skb; 763 void *xbuf = xdp->data_hard_start; 764 int i; 765 766 if (xbuf) { 767 unsigned int hdroom = xdp->data - xdp->data_hard_start; 768 unsigned int xlen = xdp->data_end - xdp->data; 769 unsigned int frag_size = xdp->frame_sz; 770 771 skb = build_skb(xbuf, frag_size); 772 773 if (!skb) { 774 __free_page(virt_to_page(xbuf)); 775 return NULL; 776 } 777 778 skb_reserve(skb, hdroom); 779 skb_put(skb, xlen); 780 skb->dev = napi->dev; 781 } else { 782 skb = napi_alloc_skb(napi, nvchan->rsc.pktlen); 783 784 if (!skb) 785 return NULL; 786 787 /* Copy to skb. This copy is needed here since the memory 788 * pointed by hv_netvsc_packet cannot be deallocated. 789 */ 790 for (i = 0; i < nvchan->rsc.cnt; i++) 791 skb_put_data(skb, nvchan->rsc.data[i], 792 nvchan->rsc.len[i]); 793 } 794 795 skb->protocol = eth_type_trans(skb, net); 796 797 /* skb is already created with CHECKSUM_NONE */ 798 skb_checksum_none_assert(skb); 799 800 /* Incoming packets may have IP header checksum verified by the host. 801 * They may not have IP header checksum computed after coalescing. 802 * We compute it here if the flags are set, because on Linux, the IP 803 * checksum is always checked. 804 */ 805 if ((ppi_flags & NVSC_RSC_CSUM_INFO) && csum_info->receive.ip_checksum_value_invalid && 806 csum_info->receive.ip_checksum_succeeded && 807 skb->protocol == htons(ETH_P_IP)) { 808 /* Check that there is enough space to hold the IP header. */ 809 if (skb_headlen(skb) < sizeof(struct iphdr)) { 810 kfree_skb(skb); 811 return NULL; 812 } 813 netvsc_comp_ipcsum(skb); 814 } 815 816 /* Do L4 checksum offload if enabled and present. */ 817 if ((ppi_flags & NVSC_RSC_CSUM_INFO) && (net->features & NETIF_F_RXCSUM)) { 818 if (csum_info->receive.tcp_checksum_succeeded || 819 csum_info->receive.udp_checksum_succeeded) 820 skb->ip_summed = CHECKSUM_UNNECESSARY; 821 } 822 823 if ((ppi_flags & NVSC_RSC_HASH_INFO) && (net->features & NETIF_F_RXHASH)) 824 skb_set_hash(skb, *hash_info, PKT_HASH_TYPE_L4); 825 826 if (ppi_flags & NVSC_RSC_VLAN) { 827 u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT) | 828 (vlan->cfi ? VLAN_CFI_MASK : 0); 829 830 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 831 vlan_tci); 832 } 833 834 return skb; 835} 836 837/* 838 * netvsc_recv_callback - Callback when we receive a packet from the 839 * "wire" on the specified device. 840 */ 841int netvsc_recv_callback(struct net_device *net, 842 struct netvsc_device *net_device, 843 struct netvsc_channel *nvchan) 844{ 845 struct net_device_context *net_device_ctx = netdev_priv(net); 846 struct vmbus_channel *channel = nvchan->channel; 847 u16 q_idx = channel->offermsg.offer.sub_channel_index; 848 struct sk_buff *skb; 849 struct netvsc_stats_rx *rx_stats = &nvchan->rx_stats; 850 struct xdp_buff xdp; 851 u32 act; 852 853 if (net->reg_state != NETREG_REGISTERED) 854 return NVSP_STAT_FAIL; 855 856 act = netvsc_run_xdp(net, nvchan, &xdp); 857 858 if (act == XDP_REDIRECT) 859 return NVSP_STAT_SUCCESS; 860 861 if (act != XDP_PASS && act != XDP_TX) { 862 u64_stats_update_begin(&rx_stats->syncp); 863 rx_stats->xdp_drop++; 864 u64_stats_update_end(&rx_stats->syncp); 865 866 return NVSP_STAT_SUCCESS; /* consumed by XDP */ 867 } 868 869 /* Allocate a skb - TODO direct I/O to pages? */ 870 skb = netvsc_alloc_recv_skb(net, nvchan, &xdp); 871 872 if (unlikely(!skb)) { 873 ++net_device_ctx->eth_stats.rx_no_memory; 874 return NVSP_STAT_FAIL; 875 } 876 877 skb_record_rx_queue(skb, q_idx); 878 879 /* 880 * Even if injecting the packet, record the statistics 881 * on the synthetic device because modifying the VF device 882 * statistics will not work correctly. 883 */ 884 u64_stats_update_begin(&rx_stats->syncp); 885 if (act == XDP_TX) 886 rx_stats->xdp_tx++; 887 888 rx_stats->packets++; 889 rx_stats->bytes += nvchan->rsc.pktlen; 890 891 if (skb->pkt_type == PACKET_BROADCAST) 892 ++rx_stats->broadcast; 893 else if (skb->pkt_type == PACKET_MULTICAST) 894 ++rx_stats->multicast; 895 u64_stats_update_end(&rx_stats->syncp); 896 897 if (act == XDP_TX) { 898 netvsc_xdp_xmit(skb, net); 899 return NVSP_STAT_SUCCESS; 900 } 901 902 napi_gro_receive(&nvchan->napi, skb); 903 return NVSP_STAT_SUCCESS; 904} 905 906static void netvsc_get_drvinfo(struct net_device *net, 907 struct ethtool_drvinfo *info) 908{ 909 strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 910 strscpy(info->fw_version, "N/A", sizeof(info->fw_version)); 911} 912 913static void netvsc_get_channels(struct net_device *net, 914 struct ethtool_channels *channel) 915{ 916 struct net_device_context *net_device_ctx = netdev_priv(net); 917 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); 918 919 if (nvdev) { 920 channel->max_combined = nvdev->max_chn; 921 channel->combined_count = nvdev->num_chn; 922 } 923} 924 925/* Alloc struct netvsc_device_info, and initialize it from either existing 926 * struct netvsc_device, or from default values. 927 */ 928static 929struct netvsc_device_info *netvsc_devinfo_get(struct netvsc_device *nvdev) 930{ 931 struct netvsc_device_info *dev_info; 932 struct bpf_prog *prog; 933 934 dev_info = kzalloc(sizeof(*dev_info), GFP_ATOMIC); 935 936 if (!dev_info) 937 return NULL; 938 939 if (nvdev) { 940 ASSERT_RTNL(); 941 942 dev_info->num_chn = nvdev->num_chn; 943 dev_info->send_sections = nvdev->send_section_cnt; 944 dev_info->send_section_size = nvdev->send_section_size; 945 dev_info->recv_sections = nvdev->recv_section_cnt; 946 dev_info->recv_section_size = nvdev->recv_section_size; 947 948 memcpy(dev_info->rss_key, nvdev->extension->rss_key, 949 NETVSC_HASH_KEYLEN); 950 951 prog = netvsc_xdp_get(nvdev); 952 if (prog) { 953 bpf_prog_inc(prog); 954 dev_info->bprog = prog; 955 } 956 } else { 957 dev_info->num_chn = max(VRSS_CHANNEL_DEFAULT, 958 netif_get_num_default_rss_queues()); 959 dev_info->send_sections = NETVSC_DEFAULT_TX; 960 dev_info->send_section_size = NETVSC_SEND_SECTION_SIZE; 961 dev_info->recv_sections = NETVSC_DEFAULT_RX; 962 dev_info->recv_section_size = NETVSC_RECV_SECTION_SIZE; 963 } 964 965 return dev_info; 966} 967 968/* Free struct netvsc_device_info */ 969static void netvsc_devinfo_put(struct netvsc_device_info *dev_info) 970{ 971 if (dev_info->bprog) { 972 ASSERT_RTNL(); 973 bpf_prog_put(dev_info->bprog); 974 } 975 976 kfree(dev_info); 977} 978 979static int netvsc_detach(struct net_device *ndev, 980 struct netvsc_device *nvdev) 981{ 982 struct net_device_context *ndev_ctx = netdev_priv(ndev); 983 struct hv_device *hdev = ndev_ctx->device_ctx; 984 int ret; 985 986 /* Don't try continuing to try and setup sub channels */ 987 if (cancel_work_sync(&nvdev->subchan_work)) 988 nvdev->num_chn = 1; 989 990 netvsc_xdp_set(ndev, NULL, NULL, nvdev); 991 992 /* If device was up (receiving) then shutdown */ 993 if (netif_running(ndev)) { 994 netvsc_tx_disable(nvdev, ndev); 995 996 ret = rndis_filter_close(nvdev); 997 if (ret) { 998 netdev_err(ndev, 999 "unable to close device (ret %d).\n", ret); 1000 return ret; 1001 } 1002 1003 ret = netvsc_wait_until_empty(nvdev); 1004 if (ret) { 1005 netdev_err(ndev, 1006 "Ring buffer not empty after closing rndis\n"); 1007 return ret; 1008 } 1009 } 1010 1011 netif_device_detach(ndev); 1012 1013 rndis_filter_device_remove(hdev, nvdev); 1014 1015 return 0; 1016} 1017 1018static int netvsc_attach(struct net_device *ndev, 1019 struct netvsc_device_info *dev_info) 1020{ 1021 struct net_device_context *ndev_ctx = netdev_priv(ndev); 1022 struct hv_device *hdev = ndev_ctx->device_ctx; 1023 struct netvsc_device *nvdev; 1024 struct rndis_device *rdev; 1025 struct bpf_prog *prog; 1026 int ret = 0; 1027 1028 nvdev = rndis_filter_device_add(hdev, dev_info); 1029 if (IS_ERR(nvdev)) 1030 return PTR_ERR(nvdev); 1031 1032 if (nvdev->num_chn > 1) { 1033 ret = rndis_set_subchannel(ndev, nvdev, dev_info); 1034 1035 /* if unavailable, just proceed with one queue */ 1036 if (ret) { 1037 nvdev->max_chn = 1; 1038 nvdev->num_chn = 1; 1039 } 1040 } 1041 1042 prog = dev_info->bprog; 1043 if (prog) { 1044 bpf_prog_inc(prog); 1045 ret = netvsc_xdp_set(ndev, prog, NULL, nvdev); 1046 if (ret) { 1047 bpf_prog_put(prog); 1048 goto err1; 1049 } 1050 } 1051 1052 /* In any case device is now ready */ 1053 nvdev->tx_disable = false; 1054 netif_device_attach(ndev); 1055 1056 /* Note: enable and attach happen when sub-channels setup */ 1057 netif_carrier_off(ndev); 1058 1059 if (netif_running(ndev)) { 1060 ret = rndis_filter_open(nvdev); 1061 if (ret) 1062 goto err2; 1063 1064 rdev = nvdev->extension; 1065 if (!rdev->link_state) 1066 netif_carrier_on(ndev); 1067 } 1068 1069 return 0; 1070 1071err2: 1072 netif_device_detach(ndev); 1073 1074err1: 1075 rndis_filter_device_remove(hdev, nvdev); 1076 1077 return ret; 1078} 1079 1080static int netvsc_set_channels(struct net_device *net, 1081 struct ethtool_channels *channels) 1082{ 1083 struct net_device_context *net_device_ctx = netdev_priv(net); 1084 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); 1085 unsigned int orig, count = channels->combined_count; 1086 struct netvsc_device_info *device_info; 1087 int ret; 1088 1089 /* We do not support separate count for rx, tx, or other */ 1090 if (count == 0 || 1091 channels->rx_count || channels->tx_count || channels->other_count) 1092 return -EINVAL; 1093 1094 if (!nvdev || nvdev->destroy) 1095 return -ENODEV; 1096 1097 if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5) 1098 return -EINVAL; 1099 1100 if (count > nvdev->max_chn) 1101 return -EINVAL; 1102 1103 orig = nvdev->num_chn; 1104 1105 device_info = netvsc_devinfo_get(nvdev); 1106 1107 if (!device_info) 1108 return -ENOMEM; 1109 1110 device_info->num_chn = count; 1111 1112 ret = netvsc_detach(net, nvdev); 1113 if (ret) 1114 goto out; 1115 1116 ret = netvsc_attach(net, device_info); 1117 if (ret) { 1118 device_info->num_chn = orig; 1119 if (netvsc_attach(net, device_info)) 1120 netdev_err(net, "restoring channel setting failed\n"); 1121 } 1122 1123out: 1124 netvsc_devinfo_put(device_info); 1125 return ret; 1126} 1127 1128static void netvsc_init_settings(struct net_device *dev) 1129{ 1130 struct net_device_context *ndc = netdev_priv(dev); 1131 1132 ndc->l4_hash = HV_DEFAULT_L4HASH; 1133 1134 ndc->speed = SPEED_UNKNOWN; 1135 ndc->duplex = DUPLEX_FULL; 1136 1137 dev->features = NETIF_F_LRO; 1138} 1139 1140static int netvsc_get_link_ksettings(struct net_device *dev, 1141 struct ethtool_link_ksettings *cmd) 1142{ 1143 struct net_device_context *ndc = netdev_priv(dev); 1144 struct net_device *vf_netdev; 1145 1146 vf_netdev = rtnl_dereference(ndc->vf_netdev); 1147 1148 if (vf_netdev) 1149 return __ethtool_get_link_ksettings(vf_netdev, cmd); 1150 1151 cmd->base.speed = ndc->speed; 1152 cmd->base.duplex = ndc->duplex; 1153 cmd->base.port = PORT_OTHER; 1154 1155 return 0; 1156} 1157 1158static int netvsc_set_link_ksettings(struct net_device *dev, 1159 const struct ethtool_link_ksettings *cmd) 1160{ 1161 struct net_device_context *ndc = netdev_priv(dev); 1162 struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev); 1163 1164 if (vf_netdev) { 1165 if (!vf_netdev->ethtool_ops->set_link_ksettings) 1166 return -EOPNOTSUPP; 1167 1168 return vf_netdev->ethtool_ops->set_link_ksettings(vf_netdev, 1169 cmd); 1170 } 1171 1172 return ethtool_virtdev_set_link_ksettings(dev, cmd, 1173 &ndc->speed, &ndc->duplex); 1174} 1175 1176static int netvsc_change_mtu(struct net_device *ndev, int mtu) 1177{ 1178 struct net_device_context *ndevctx = netdev_priv(ndev); 1179 struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev); 1180 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); 1181 int orig_mtu = ndev->mtu; 1182 struct netvsc_device_info *device_info; 1183 int ret = 0; 1184 1185 if (!nvdev || nvdev->destroy) 1186 return -ENODEV; 1187 1188 device_info = netvsc_devinfo_get(nvdev); 1189 1190 if (!device_info) 1191 return -ENOMEM; 1192 1193 /* Change MTU of underlying VF netdev first. */ 1194 if (vf_netdev) { 1195 ret = dev_set_mtu(vf_netdev, mtu); 1196 if (ret) 1197 goto out; 1198 } 1199 1200 ret = netvsc_detach(ndev, nvdev); 1201 if (ret) 1202 goto rollback_vf; 1203 1204 WRITE_ONCE(ndev->mtu, mtu); 1205 1206 ret = netvsc_attach(ndev, device_info); 1207 if (!ret) 1208 goto out; 1209 1210 /* Attempt rollback to original MTU */ 1211 WRITE_ONCE(ndev->mtu, orig_mtu); 1212 1213 if (netvsc_attach(ndev, device_info)) 1214 netdev_err(ndev, "restoring mtu failed\n"); 1215rollback_vf: 1216 if (vf_netdev) 1217 dev_set_mtu(vf_netdev, orig_mtu); 1218 1219out: 1220 netvsc_devinfo_put(device_info); 1221 return ret; 1222} 1223 1224static void netvsc_get_vf_stats(struct net_device *net, 1225 struct netvsc_vf_pcpu_stats *tot) 1226{ 1227 struct net_device_context *ndev_ctx = netdev_priv(net); 1228 int i; 1229 1230 memset(tot, 0, sizeof(*tot)); 1231 1232 for_each_possible_cpu(i) { 1233 const struct netvsc_vf_pcpu_stats *stats 1234 = per_cpu_ptr(ndev_ctx->vf_stats, i); 1235 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 1236 unsigned int start; 1237 1238 do { 1239 start = u64_stats_fetch_begin(&stats->syncp); 1240 rx_packets = stats->rx_packets; 1241 tx_packets = stats->tx_packets; 1242 rx_bytes = stats->rx_bytes; 1243 tx_bytes = stats->tx_bytes; 1244 } while (u64_stats_fetch_retry(&stats->syncp, start)); 1245 1246 tot->rx_packets += rx_packets; 1247 tot->tx_packets += tx_packets; 1248 tot->rx_bytes += rx_bytes; 1249 tot->tx_bytes += tx_bytes; 1250 tot->tx_dropped += stats->tx_dropped; 1251 } 1252} 1253 1254static void netvsc_get_pcpu_stats(struct net_device *net, 1255 struct netvsc_ethtool_pcpu_stats *pcpu_tot) 1256{ 1257 struct net_device_context *ndev_ctx = netdev_priv(net); 1258 struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev); 1259 int i; 1260 1261 /* fetch percpu stats of vf */ 1262 for_each_possible_cpu(i) { 1263 const struct netvsc_vf_pcpu_stats *stats = 1264 per_cpu_ptr(ndev_ctx->vf_stats, i); 1265 struct netvsc_ethtool_pcpu_stats *this_tot = &pcpu_tot[i]; 1266 unsigned int start; 1267 1268 do { 1269 start = u64_stats_fetch_begin(&stats->syncp); 1270 this_tot->vf_rx_packets = stats->rx_packets; 1271 this_tot->vf_tx_packets = stats->tx_packets; 1272 this_tot->vf_rx_bytes = stats->rx_bytes; 1273 this_tot->vf_tx_bytes = stats->tx_bytes; 1274 } while (u64_stats_fetch_retry(&stats->syncp, start)); 1275 this_tot->rx_packets = this_tot->vf_rx_packets; 1276 this_tot->tx_packets = this_tot->vf_tx_packets; 1277 this_tot->rx_bytes = this_tot->vf_rx_bytes; 1278 this_tot->tx_bytes = this_tot->vf_tx_bytes; 1279 } 1280 1281 /* fetch percpu stats of netvsc */ 1282 for (i = 0; i < nvdev->num_chn; i++) { 1283 const struct netvsc_channel *nvchan = &nvdev->chan_table[i]; 1284 const struct netvsc_stats_tx *tx_stats; 1285 const struct netvsc_stats_rx *rx_stats; 1286 struct netvsc_ethtool_pcpu_stats *this_tot = 1287 &pcpu_tot[nvchan->channel->target_cpu]; 1288 u64 packets, bytes; 1289 unsigned int start; 1290 1291 tx_stats = &nvchan->tx_stats; 1292 do { 1293 start = u64_stats_fetch_begin(&tx_stats->syncp); 1294 packets = tx_stats->packets; 1295 bytes = tx_stats->bytes; 1296 } while (u64_stats_fetch_retry(&tx_stats->syncp, start)); 1297 1298 this_tot->tx_bytes += bytes; 1299 this_tot->tx_packets += packets; 1300 1301 rx_stats = &nvchan->rx_stats; 1302 do { 1303 start = u64_stats_fetch_begin(&rx_stats->syncp); 1304 packets = rx_stats->packets; 1305 bytes = rx_stats->bytes; 1306 } while (u64_stats_fetch_retry(&rx_stats->syncp, start)); 1307 1308 this_tot->rx_bytes += bytes; 1309 this_tot->rx_packets += packets; 1310 } 1311} 1312 1313static void netvsc_get_stats64(struct net_device *net, 1314 struct rtnl_link_stats64 *t) 1315{ 1316 struct net_device_context *ndev_ctx = netdev_priv(net); 1317 struct netvsc_device *nvdev; 1318 struct netvsc_vf_pcpu_stats vf_tot; 1319 int i; 1320 1321 rcu_read_lock(); 1322 1323 nvdev = rcu_dereference(ndev_ctx->nvdev); 1324 if (!nvdev) 1325 goto out; 1326 1327 netdev_stats_to_stats64(t, &net->stats); 1328 1329 netvsc_get_vf_stats(net, &vf_tot); 1330 t->rx_packets += vf_tot.rx_packets; 1331 t->tx_packets += vf_tot.tx_packets; 1332 t->rx_bytes += vf_tot.rx_bytes; 1333 t->tx_bytes += vf_tot.tx_bytes; 1334 t->tx_dropped += vf_tot.tx_dropped; 1335 1336 for (i = 0; i < nvdev->num_chn; i++) { 1337 const struct netvsc_channel *nvchan = &nvdev->chan_table[i]; 1338 const struct netvsc_stats_tx *tx_stats; 1339 const struct netvsc_stats_rx *rx_stats; 1340 u64 packets, bytes, multicast; 1341 unsigned int start; 1342 1343 tx_stats = &nvchan->tx_stats; 1344 do { 1345 start = u64_stats_fetch_begin(&tx_stats->syncp); 1346 packets = tx_stats->packets; 1347 bytes = tx_stats->bytes; 1348 } while (u64_stats_fetch_retry(&tx_stats->syncp, start)); 1349 1350 t->tx_bytes += bytes; 1351 t->tx_packets += packets; 1352 1353 rx_stats = &nvchan->rx_stats; 1354 do { 1355 start = u64_stats_fetch_begin(&rx_stats->syncp); 1356 packets = rx_stats->packets; 1357 bytes = rx_stats->bytes; 1358 multicast = rx_stats->multicast + rx_stats->broadcast; 1359 } while (u64_stats_fetch_retry(&rx_stats->syncp, start)); 1360 1361 t->rx_bytes += bytes; 1362 t->rx_packets += packets; 1363 t->multicast += multicast; 1364 } 1365out: 1366 rcu_read_unlock(); 1367} 1368 1369static int netvsc_set_mac_addr(struct net_device *ndev, void *p) 1370{ 1371 struct net_device_context *ndc = netdev_priv(ndev); 1372 struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev); 1373 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); 1374 struct sockaddr_storage *addr = p; 1375 int err; 1376 1377 err = eth_prepare_mac_addr_change(ndev, p); 1378 if (err) 1379 return err; 1380 1381 if (!nvdev) 1382 return -ENODEV; 1383 1384 if (vf_netdev) { 1385 err = dev_set_mac_address(vf_netdev, addr, NULL); 1386 if (err) 1387 return err; 1388 } 1389 1390 err = rndis_filter_set_device_mac(nvdev, addr->__data); 1391 if (!err) { 1392 eth_commit_mac_addr_change(ndev, p); 1393 } else if (vf_netdev) { 1394 /* rollback change on VF */ 1395 memcpy(addr->__data, ndev->dev_addr, ETH_ALEN); 1396 dev_set_mac_address(vf_netdev, addr, NULL); 1397 } 1398 1399 return err; 1400} 1401 1402static const struct { 1403 char name[ETH_GSTRING_LEN]; 1404 u16 offset; 1405} netvsc_stats[] = { 1406 { "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) }, 1407 { "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) }, 1408 { "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) }, 1409 { "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) }, 1410 { "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) }, 1411 { "tx_send_full", offsetof(struct netvsc_ethtool_stats, tx_send_full) }, 1412 { "rx_comp_busy", offsetof(struct netvsc_ethtool_stats, rx_comp_busy) }, 1413 { "rx_no_memory", offsetof(struct netvsc_ethtool_stats, rx_no_memory) }, 1414 { "stop_queue", offsetof(struct netvsc_ethtool_stats, stop_queue) }, 1415 { "wake_queue", offsetof(struct netvsc_ethtool_stats, wake_queue) }, 1416 { "vlan_error", offsetof(struct netvsc_ethtool_stats, vlan_error) }, 1417}, pcpu_stats[] = { 1418 { "cpu%u_rx_packets", 1419 offsetof(struct netvsc_ethtool_pcpu_stats, rx_packets) }, 1420 { "cpu%u_rx_bytes", 1421 offsetof(struct netvsc_ethtool_pcpu_stats, rx_bytes) }, 1422 { "cpu%u_tx_packets", 1423 offsetof(struct netvsc_ethtool_pcpu_stats, tx_packets) }, 1424 { "cpu%u_tx_bytes", 1425 offsetof(struct netvsc_ethtool_pcpu_stats, tx_bytes) }, 1426 { "cpu%u_vf_rx_packets", 1427 offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_packets) }, 1428 { "cpu%u_vf_rx_bytes", 1429 offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_bytes) }, 1430 { "cpu%u_vf_tx_packets", 1431 offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_packets) }, 1432 { "cpu%u_vf_tx_bytes", 1433 offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_bytes) }, 1434}, vf_stats[] = { 1435 { "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats, rx_packets) }, 1436 { "vf_rx_bytes", offsetof(struct netvsc_vf_pcpu_stats, rx_bytes) }, 1437 { "vf_tx_packets", offsetof(struct netvsc_vf_pcpu_stats, tx_packets) }, 1438 { "vf_tx_bytes", offsetof(struct netvsc_vf_pcpu_stats, tx_bytes) }, 1439 { "vf_tx_dropped", offsetof(struct netvsc_vf_pcpu_stats, tx_dropped) }, 1440}; 1441 1442#define NETVSC_GLOBAL_STATS_LEN ARRAY_SIZE(netvsc_stats) 1443#define NETVSC_VF_STATS_LEN ARRAY_SIZE(vf_stats) 1444 1445/* statistics per queue (rx/tx packets/bytes) */ 1446#define NETVSC_PCPU_STATS_LEN (num_present_cpus() * ARRAY_SIZE(pcpu_stats)) 1447 1448/* 8 statistics per queue (rx/tx packets/bytes, XDP actions) */ 1449#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 8) 1450 1451static int netvsc_get_sset_count(struct net_device *dev, int string_set) 1452{ 1453 struct net_device_context *ndc = netdev_priv(dev); 1454 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); 1455 1456 if (!nvdev) 1457 return -ENODEV; 1458 1459 switch (string_set) { 1460 case ETH_SS_STATS: 1461 return NETVSC_GLOBAL_STATS_LEN 1462 + NETVSC_VF_STATS_LEN 1463 + NETVSC_QUEUE_STATS_LEN(nvdev) 1464 + NETVSC_PCPU_STATS_LEN; 1465 default: 1466 return -EINVAL; 1467 } 1468} 1469 1470static void netvsc_get_ethtool_stats(struct net_device *dev, 1471 struct ethtool_stats *stats, u64 *data) 1472{ 1473 struct net_device_context *ndc = netdev_priv(dev); 1474 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); 1475 const void *nds = &ndc->eth_stats; 1476 const struct netvsc_stats_tx *tx_stats; 1477 const struct netvsc_stats_rx *rx_stats; 1478 struct netvsc_vf_pcpu_stats sum; 1479 struct netvsc_ethtool_pcpu_stats *pcpu_sum; 1480 unsigned int start; 1481 u64 packets, bytes; 1482 u64 xdp_drop; 1483 u64 xdp_redirect; 1484 u64 xdp_tx; 1485 u64 xdp_xmit; 1486 int i, j, cpu; 1487 1488 if (!nvdev) 1489 return; 1490 1491 for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++) 1492 data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset); 1493 1494 netvsc_get_vf_stats(dev, &sum); 1495 for (j = 0; j < NETVSC_VF_STATS_LEN; j++) 1496 data[i++] = *(u64 *)((void *)&sum + vf_stats[j].offset); 1497 1498 for (j = 0; j < nvdev->num_chn; j++) { 1499 tx_stats = &nvdev->chan_table[j].tx_stats; 1500 1501 do { 1502 start = u64_stats_fetch_begin(&tx_stats->syncp); 1503 packets = tx_stats->packets; 1504 bytes = tx_stats->bytes; 1505 xdp_xmit = tx_stats->xdp_xmit; 1506 } while (u64_stats_fetch_retry(&tx_stats->syncp, start)); 1507 data[i++] = packets; 1508 data[i++] = bytes; 1509 data[i++] = xdp_xmit; 1510 1511 rx_stats = &nvdev->chan_table[j].rx_stats; 1512 do { 1513 start = u64_stats_fetch_begin(&rx_stats->syncp); 1514 packets = rx_stats->packets; 1515 bytes = rx_stats->bytes; 1516 xdp_drop = rx_stats->xdp_drop; 1517 xdp_redirect = rx_stats->xdp_redirect; 1518 xdp_tx = rx_stats->xdp_tx; 1519 } while (u64_stats_fetch_retry(&rx_stats->syncp, start)); 1520 data[i++] = packets; 1521 data[i++] = bytes; 1522 data[i++] = xdp_drop; 1523 data[i++] = xdp_redirect; 1524 data[i++] = xdp_tx; 1525 } 1526 1527 pcpu_sum = kvmalloc_array(nr_cpu_ids, 1528 sizeof(struct netvsc_ethtool_pcpu_stats), 1529 GFP_KERNEL); 1530 if (!pcpu_sum) 1531 return; 1532 1533 netvsc_get_pcpu_stats(dev, pcpu_sum); 1534 for_each_present_cpu(cpu) { 1535 struct netvsc_ethtool_pcpu_stats *this_sum = &pcpu_sum[cpu]; 1536 1537 for (j = 0; j < ARRAY_SIZE(pcpu_stats); j++) 1538 data[i++] = *(u64 *)((void *)this_sum 1539 + pcpu_stats[j].offset); 1540 } 1541 kvfree(pcpu_sum); 1542} 1543 1544static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data) 1545{ 1546 struct net_device_context *ndc = netdev_priv(dev); 1547 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); 1548 u8 *p = data; 1549 int i, cpu; 1550 1551 if (!nvdev) 1552 return; 1553 1554 switch (stringset) { 1555 case ETH_SS_STATS: 1556 for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) 1557 ethtool_puts(&p, netvsc_stats[i].name); 1558 1559 for (i = 0; i < ARRAY_SIZE(vf_stats); i++) 1560 ethtool_puts(&p, vf_stats[i].name); 1561 1562 for (i = 0; i < nvdev->num_chn; i++) { 1563 ethtool_sprintf(&p, "tx_queue_%u_packets", i); 1564 ethtool_sprintf(&p, "tx_queue_%u_bytes", i); 1565 ethtool_sprintf(&p, "tx_queue_%u_xdp_xmit", i); 1566 ethtool_sprintf(&p, "rx_queue_%u_packets", i); 1567 ethtool_sprintf(&p, "rx_queue_%u_bytes", i); 1568 ethtool_sprintf(&p, "rx_queue_%u_xdp_drop", i); 1569 ethtool_sprintf(&p, "rx_queue_%u_xdp_redirect", i); 1570 ethtool_sprintf(&p, "rx_queue_%u_xdp_tx", i); 1571 } 1572 1573 for_each_present_cpu(cpu) { 1574 for (i = 0; i < ARRAY_SIZE(pcpu_stats); i++) 1575 ethtool_sprintf(&p, pcpu_stats[i].name, cpu); 1576 } 1577 1578 break; 1579 } 1580} 1581 1582static int 1583netvsc_get_rxfh_fields(struct net_device *ndev, 1584 struct ethtool_rxfh_fields *info) 1585{ 1586 struct net_device_context *ndc = netdev_priv(ndev); 1587 const u32 l4_flag = RXH_L4_B_0_1 | RXH_L4_B_2_3; 1588 1589 info->data = RXH_IP_SRC | RXH_IP_DST; 1590 1591 switch (info->flow_type) { 1592 case TCP_V4_FLOW: 1593 if (ndc->l4_hash & HV_TCP4_L4HASH) 1594 info->data |= l4_flag; 1595 1596 break; 1597 1598 case TCP_V6_FLOW: 1599 if (ndc->l4_hash & HV_TCP6_L4HASH) 1600 info->data |= l4_flag; 1601 1602 break; 1603 1604 case UDP_V4_FLOW: 1605 if (ndc->l4_hash & HV_UDP4_L4HASH) 1606 info->data |= l4_flag; 1607 1608 break; 1609 1610 case UDP_V6_FLOW: 1611 if (ndc->l4_hash & HV_UDP6_L4HASH) 1612 info->data |= l4_flag; 1613 1614 break; 1615 1616 case IPV4_FLOW: 1617 case IPV6_FLOW: 1618 break; 1619 default: 1620 info->data = 0; 1621 break; 1622 } 1623 1624 return 0; 1625} 1626 1627static u32 netvsc_get_rx_ring_count(struct net_device *dev) 1628{ 1629 struct net_device_context *ndc = netdev_priv(dev); 1630 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); 1631 1632 if (!nvdev) 1633 return 0; 1634 1635 return nvdev->num_chn; 1636} 1637 1638static int 1639netvsc_set_rxfh_fields(struct net_device *dev, 1640 const struct ethtool_rxfh_fields *info, 1641 struct netlink_ext_ack *extack) 1642{ 1643 struct net_device_context *ndc = netdev_priv(dev); 1644 1645 if (info->data == (RXH_IP_SRC | RXH_IP_DST | 1646 RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 1647 switch (info->flow_type) { 1648 case TCP_V4_FLOW: 1649 ndc->l4_hash |= HV_TCP4_L4HASH; 1650 break; 1651 1652 case TCP_V6_FLOW: 1653 ndc->l4_hash |= HV_TCP6_L4HASH; 1654 break; 1655 1656 case UDP_V4_FLOW: 1657 ndc->l4_hash |= HV_UDP4_L4HASH; 1658 break; 1659 1660 case UDP_V6_FLOW: 1661 ndc->l4_hash |= HV_UDP6_L4HASH; 1662 break; 1663 1664 default: 1665 return -EOPNOTSUPP; 1666 } 1667 1668 return 0; 1669 } 1670 1671 if (info->data == (RXH_IP_SRC | RXH_IP_DST)) { 1672 switch (info->flow_type) { 1673 case TCP_V4_FLOW: 1674 ndc->l4_hash &= ~HV_TCP4_L4HASH; 1675 break; 1676 1677 case TCP_V6_FLOW: 1678 ndc->l4_hash &= ~HV_TCP6_L4HASH; 1679 break; 1680 1681 case UDP_V4_FLOW: 1682 ndc->l4_hash &= ~HV_UDP4_L4HASH; 1683 break; 1684 1685 case UDP_V6_FLOW: 1686 ndc->l4_hash &= ~HV_UDP6_L4HASH; 1687 break; 1688 1689 default: 1690 return -EOPNOTSUPP; 1691 } 1692 1693 return 0; 1694 } 1695 1696 return -EOPNOTSUPP; 1697} 1698 1699static u32 netvsc_get_rxfh_key_size(struct net_device *dev) 1700{ 1701 return NETVSC_HASH_KEYLEN; 1702} 1703 1704static u32 netvsc_rss_indir_size(struct net_device *dev) 1705{ 1706 struct net_device_context *ndc = netdev_priv(dev); 1707 1708 return ndc->rx_table_sz; 1709} 1710 1711static int netvsc_get_rxfh(struct net_device *dev, 1712 struct ethtool_rxfh_param *rxfh) 1713{ 1714 struct net_device_context *ndc = netdev_priv(dev); 1715 struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev); 1716 struct rndis_device *rndis_dev; 1717 int i; 1718 1719 if (!ndev) 1720 return -ENODEV; 1721 1722 rxfh->hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */ 1723 1724 rndis_dev = ndev->extension; 1725 if (rxfh->indir) { 1726 for (i = 0; i < ndc->rx_table_sz; i++) 1727 rxfh->indir[i] = ndc->rx_table[i]; 1728 } 1729 1730 if (rxfh->key) 1731 memcpy(rxfh->key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN); 1732 1733 return 0; 1734} 1735 1736static int netvsc_set_rxfh(struct net_device *dev, 1737 struct ethtool_rxfh_param *rxfh, 1738 struct netlink_ext_ack *extack) 1739{ 1740 struct net_device_context *ndc = netdev_priv(dev); 1741 struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev); 1742 struct rndis_device *rndis_dev; 1743 u8 *key = rxfh->key; 1744 int i; 1745 1746 if (!ndev) 1747 return -ENODEV; 1748 1749 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && 1750 rxfh->hfunc != ETH_RSS_HASH_TOP) 1751 return -EOPNOTSUPP; 1752 1753 if (!ndc->rx_table_sz) 1754 return -EOPNOTSUPP; 1755 1756 rndis_dev = ndev->extension; 1757 if (rxfh->indir) { 1758 for (i = 0; i < ndc->rx_table_sz; i++) 1759 if (rxfh->indir[i] >= ndev->num_chn) 1760 return -EINVAL; 1761 1762 for (i = 0; i < ndc->rx_table_sz; i++) 1763 ndc->rx_table[i] = rxfh->indir[i]; 1764 } 1765 1766 if (!key) { 1767 if (!rxfh->indir) 1768 return 0; 1769 1770 key = rndis_dev->rss_key; 1771 } 1772 1773 return rndis_filter_set_rss_param(rndis_dev, key); 1774} 1775 1776/* Hyper-V RNDIS protocol does not have ring in the HW sense. 1777 * It does have pre-allocated receive area which is divided into sections. 1778 */ 1779static void __netvsc_get_ringparam(struct netvsc_device *nvdev, 1780 struct ethtool_ringparam *ring) 1781{ 1782 u32 max_buf_size; 1783 1784 ring->rx_pending = nvdev->recv_section_cnt; 1785 ring->tx_pending = nvdev->send_section_cnt; 1786 1787 if (nvdev->nvsp_version <= NVSP_PROTOCOL_VERSION_2) 1788 max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY; 1789 else 1790 max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE; 1791 1792 ring->rx_max_pending = max_buf_size / nvdev->recv_section_size; 1793 ring->tx_max_pending = NETVSC_SEND_BUFFER_SIZE 1794 / nvdev->send_section_size; 1795} 1796 1797static void netvsc_get_ringparam(struct net_device *ndev, 1798 struct ethtool_ringparam *ring, 1799 struct kernel_ethtool_ringparam *kernel_ring, 1800 struct netlink_ext_ack *extack) 1801{ 1802 struct net_device_context *ndevctx = netdev_priv(ndev); 1803 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); 1804 1805 if (!nvdev) 1806 return; 1807 1808 __netvsc_get_ringparam(nvdev, ring); 1809} 1810 1811static int netvsc_set_ringparam(struct net_device *ndev, 1812 struct ethtool_ringparam *ring, 1813 struct kernel_ethtool_ringparam *kernel_ring, 1814 struct netlink_ext_ack *extack) 1815{ 1816 struct net_device_context *ndevctx = netdev_priv(ndev); 1817 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); 1818 struct netvsc_device_info *device_info; 1819 struct ethtool_ringparam orig; 1820 u32 new_tx, new_rx; 1821 int ret = 0; 1822 1823 if (!nvdev || nvdev->destroy) 1824 return -ENODEV; 1825 1826 memset(&orig, 0, sizeof(orig)); 1827 __netvsc_get_ringparam(nvdev, &orig); 1828 1829 new_tx = clamp_t(u32, ring->tx_pending, 1830 NETVSC_MIN_TX_SECTIONS, orig.tx_max_pending); 1831 new_rx = clamp_t(u32, ring->rx_pending, 1832 NETVSC_MIN_RX_SECTIONS, orig.rx_max_pending); 1833 1834 if (new_tx == orig.tx_pending && 1835 new_rx == orig.rx_pending) 1836 return 0; /* no change */ 1837 1838 device_info = netvsc_devinfo_get(nvdev); 1839 1840 if (!device_info) 1841 return -ENOMEM; 1842 1843 device_info->send_sections = new_tx; 1844 device_info->recv_sections = new_rx; 1845 1846 ret = netvsc_detach(ndev, nvdev); 1847 if (ret) 1848 goto out; 1849 1850 ret = netvsc_attach(ndev, device_info); 1851 if (ret) { 1852 device_info->send_sections = orig.tx_pending; 1853 device_info->recv_sections = orig.rx_pending; 1854 1855 if (netvsc_attach(ndev, device_info)) 1856 netdev_err(ndev, "restoring ringparam failed"); 1857 } 1858 1859out: 1860 netvsc_devinfo_put(device_info); 1861 return ret; 1862} 1863 1864static netdev_features_t netvsc_fix_features(struct net_device *ndev, 1865 netdev_features_t features) 1866{ 1867 struct net_device_context *ndevctx = netdev_priv(ndev); 1868 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); 1869 1870 if (!nvdev || nvdev->destroy) 1871 return features; 1872 1873 if ((features & NETIF_F_LRO) && netvsc_xdp_get(nvdev)) { 1874 features ^= NETIF_F_LRO; 1875 netdev_info(ndev, "Skip LRO - unsupported with XDP\n"); 1876 } 1877 1878 return features; 1879} 1880 1881static int netvsc_set_features(struct net_device *ndev, 1882 netdev_features_t features) 1883{ 1884 netdev_features_t change = features ^ ndev->features; 1885 struct net_device_context *ndevctx = netdev_priv(ndev); 1886 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); 1887 struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev); 1888 struct ndis_offload_params offloads; 1889 int ret = 0; 1890 1891 if (!nvdev || nvdev->destroy) 1892 return -ENODEV; 1893 1894 if (!(change & NETIF_F_LRO)) 1895 goto syncvf; 1896 1897 memset(&offloads, 0, sizeof(struct ndis_offload_params)); 1898 1899 if (features & NETIF_F_LRO) { 1900 offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED; 1901 offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED; 1902 } else { 1903 offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED; 1904 offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED; 1905 } 1906 1907 ret = rndis_filter_set_offload_params(ndev, nvdev, &offloads); 1908 1909 if (ret) { 1910 features ^= NETIF_F_LRO; 1911 ndev->features = features; 1912 } 1913 1914syncvf: 1915 if (!vf_netdev) 1916 return ret; 1917 1918 vf_netdev->wanted_features = features; 1919 netdev_update_features(vf_netdev); 1920 1921 return ret; 1922} 1923 1924static int netvsc_get_regs_len(struct net_device *netdev) 1925{ 1926 return VRSS_SEND_TAB_SIZE * sizeof(u32); 1927} 1928 1929static void netvsc_get_regs(struct net_device *netdev, 1930 struct ethtool_regs *regs, void *p) 1931{ 1932 struct net_device_context *ndc = netdev_priv(netdev); 1933 u32 *regs_buff = p; 1934 1935 /* increase the version, if buffer format is changed. */ 1936 regs->version = 1; 1937 1938 memcpy(regs_buff, ndc->tx_table, VRSS_SEND_TAB_SIZE * sizeof(u32)); 1939} 1940 1941static u32 netvsc_get_msglevel(struct net_device *ndev) 1942{ 1943 struct net_device_context *ndev_ctx = netdev_priv(ndev); 1944 1945 return ndev_ctx->msg_enable; 1946} 1947 1948static void netvsc_set_msglevel(struct net_device *ndev, u32 val) 1949{ 1950 struct net_device_context *ndev_ctx = netdev_priv(ndev); 1951 1952 ndev_ctx->msg_enable = val; 1953} 1954 1955static const struct ethtool_ops ethtool_ops = { 1956 .get_drvinfo = netvsc_get_drvinfo, 1957 .get_regs_len = netvsc_get_regs_len, 1958 .get_regs = netvsc_get_regs, 1959 .get_msglevel = netvsc_get_msglevel, 1960 .set_msglevel = netvsc_set_msglevel, 1961 .get_link = ethtool_op_get_link, 1962 .get_ethtool_stats = netvsc_get_ethtool_stats, 1963 .get_sset_count = netvsc_get_sset_count, 1964 .get_strings = netvsc_get_strings, 1965 .get_channels = netvsc_get_channels, 1966 .set_channels = netvsc_set_channels, 1967 .get_ts_info = ethtool_op_get_ts_info, 1968 .get_rx_ring_count = netvsc_get_rx_ring_count, 1969 .get_rxfh_key_size = netvsc_get_rxfh_key_size, 1970 .get_rxfh_indir_size = netvsc_rss_indir_size, 1971 .get_rxfh = netvsc_get_rxfh, 1972 .set_rxfh = netvsc_set_rxfh, 1973 .get_rxfh_fields = netvsc_get_rxfh_fields, 1974 .set_rxfh_fields = netvsc_set_rxfh_fields, 1975 .get_link_ksettings = netvsc_get_link_ksettings, 1976 .set_link_ksettings = netvsc_set_link_ksettings, 1977 .get_ringparam = netvsc_get_ringparam, 1978 .set_ringparam = netvsc_set_ringparam, 1979}; 1980 1981static const struct net_device_ops device_ops = { 1982 .ndo_open = netvsc_open, 1983 .ndo_stop = netvsc_close, 1984 .ndo_start_xmit = netvsc_start_xmit, 1985 .ndo_change_rx_flags = netvsc_change_rx_flags, 1986 .ndo_set_rx_mode = netvsc_set_rx_mode, 1987 .ndo_fix_features = netvsc_fix_features, 1988 .ndo_set_features = netvsc_set_features, 1989 .ndo_change_mtu = netvsc_change_mtu, 1990 .ndo_validate_addr = eth_validate_addr, 1991 .ndo_set_mac_address = netvsc_set_mac_addr, 1992 .ndo_select_queue = netvsc_select_queue, 1993 .ndo_get_stats64 = netvsc_get_stats64, 1994 .ndo_bpf = netvsc_bpf, 1995 .ndo_xdp_xmit = netvsc_ndoxdp_xmit, 1996}; 1997 1998/* 1999 * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link 2000 * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is 2001 * present send GARP packet to network peers with netif_notify_peers(). 2002 */ 2003static void netvsc_link_change(struct work_struct *w) 2004{ 2005 struct net_device_context *ndev_ctx = 2006 container_of(w, struct net_device_context, dwork.work); 2007 struct hv_device *device_obj = ndev_ctx->device_ctx; 2008 struct net_device *net = hv_get_drvdata(device_obj); 2009 unsigned long flags, next_reconfig, delay; 2010 struct netvsc_reconfig *event = NULL; 2011 struct netvsc_device *net_device; 2012 struct rndis_device *rdev; 2013 bool reschedule = false; 2014 2015 /* if changes are happening, comeback later */ 2016 if (!rtnl_trylock()) { 2017 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT); 2018 return; 2019 } 2020 2021 net_device = rtnl_dereference(ndev_ctx->nvdev); 2022 if (!net_device) 2023 goto out_unlock; 2024 2025 rdev = net_device->extension; 2026 2027 next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT; 2028 if (time_is_after_jiffies(next_reconfig)) { 2029 /* link_watch only sends one notification with current state 2030 * per second, avoid doing reconfig more frequently. Handle 2031 * wrap around. 2032 */ 2033 delay = next_reconfig - jiffies; 2034 delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT; 2035 schedule_delayed_work(&ndev_ctx->dwork, delay); 2036 goto out_unlock; 2037 } 2038 ndev_ctx->last_reconfig = jiffies; 2039 2040 spin_lock_irqsave(&ndev_ctx->lock, flags); 2041 if (!list_empty(&ndev_ctx->reconfig_events)) { 2042 event = list_first_entry(&ndev_ctx->reconfig_events, 2043 struct netvsc_reconfig, list); 2044 list_del(&event->list); 2045 reschedule = !list_empty(&ndev_ctx->reconfig_events); 2046 } 2047 spin_unlock_irqrestore(&ndev_ctx->lock, flags); 2048 2049 if (!event) 2050 goto out_unlock; 2051 2052 switch (event->event) { 2053 /* Only the following events are possible due to the check in 2054 * netvsc_linkstatus_callback() 2055 */ 2056 case RNDIS_STATUS_MEDIA_CONNECT: 2057 if (rdev->link_state) { 2058 rdev->link_state = false; 2059 netif_carrier_on(net); 2060 netvsc_tx_enable(net_device, net); 2061 } else { 2062 __netdev_notify_peers(net); 2063 } 2064 kfree(event); 2065 break; 2066 case RNDIS_STATUS_MEDIA_DISCONNECT: 2067 if (!rdev->link_state) { 2068 rdev->link_state = true; 2069 netif_carrier_off(net); 2070 netvsc_tx_disable(net_device, net); 2071 } 2072 kfree(event); 2073 break; 2074 case RNDIS_STATUS_NETWORK_CHANGE: 2075 /* Only makes sense if carrier is present */ 2076 if (!rdev->link_state) { 2077 rdev->link_state = true; 2078 netif_carrier_off(net); 2079 netvsc_tx_disable(net_device, net); 2080 event->event = RNDIS_STATUS_MEDIA_CONNECT; 2081 spin_lock_irqsave(&ndev_ctx->lock, flags); 2082 list_add(&event->list, &ndev_ctx->reconfig_events); 2083 spin_unlock_irqrestore(&ndev_ctx->lock, flags); 2084 reschedule = true; 2085 } 2086 break; 2087 } 2088 2089 rtnl_unlock(); 2090 2091 /* link_watch only sends one notification with current state per 2092 * second, handle next reconfig event in 2 seconds. 2093 */ 2094 if (reschedule) 2095 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT); 2096 2097 return; 2098 2099out_unlock: 2100 rtnl_unlock(); 2101} 2102 2103static struct net_device *get_netvsc_byref(struct net_device *vf_netdev) 2104{ 2105 struct net_device_context *net_device_ctx; 2106 struct net_device *dev; 2107 2108 dev = netdev_master_upper_dev_get(vf_netdev); 2109 if (!dev || dev->netdev_ops != &device_ops) 2110 return NULL; /* not a netvsc device */ 2111 2112 net_device_ctx = netdev_priv(dev); 2113 if (!rtnl_dereference(net_device_ctx->nvdev)) 2114 return NULL; /* device is removed */ 2115 2116 return dev; 2117} 2118 2119/* Called when VF is injecting data into network stack. 2120 * Change the associated network device from VF to netvsc. 2121 * note: already called with rcu_read_lock 2122 */ 2123static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb) 2124{ 2125 struct sk_buff *skb = *pskb; 2126 struct net_device *ndev = rcu_dereference(skb->dev->rx_handler_data); 2127 struct net_device_context *ndev_ctx = netdev_priv(ndev); 2128 struct netvsc_vf_pcpu_stats *pcpu_stats 2129 = this_cpu_ptr(ndev_ctx->vf_stats); 2130 2131 skb = skb_share_check(skb, GFP_ATOMIC); 2132 if (unlikely(!skb)) 2133 return RX_HANDLER_CONSUMED; 2134 2135 *pskb = skb; 2136 2137 skb->dev = ndev; 2138 2139 u64_stats_update_begin(&pcpu_stats->syncp); 2140 pcpu_stats->rx_packets++; 2141 pcpu_stats->rx_bytes += skb->len; 2142 u64_stats_update_end(&pcpu_stats->syncp); 2143 2144 return RX_HANDLER_ANOTHER; 2145} 2146 2147static int netvsc_vf_join(struct net_device *vf_netdev, 2148 struct net_device *ndev, int context) 2149{ 2150 struct net_device_context *ndev_ctx = netdev_priv(ndev); 2151 int ret; 2152 2153 ret = netdev_rx_handler_register(vf_netdev, 2154 netvsc_vf_handle_frame, ndev); 2155 if (ret != 0) { 2156 netdev_err(vf_netdev, 2157 "can not register netvsc VF receive handler (err = %d)\n", 2158 ret); 2159 goto rx_handler_failed; 2160 } 2161 2162 ret = netdev_master_upper_dev_link(vf_netdev, ndev, 2163 NULL, NULL, NULL); 2164 if (ret != 0) { 2165 netdev_err(vf_netdev, 2166 "can not set master device %s (err = %d)\n", 2167 ndev->name, ret); 2168 goto upper_link_failed; 2169 } 2170 2171 /* If this registration is called from probe context vf_takeover 2172 * is taken care of later in probe itself. 2173 */ 2174 if (context == VF_REG_IN_NOTIFIER) 2175 schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT); 2176 2177 call_netdevice_notifiers(NETDEV_JOIN, vf_netdev); 2178 2179 netdev_info(vf_netdev, "joined to %s\n", ndev->name); 2180 return 0; 2181 2182upper_link_failed: 2183 netdev_rx_handler_unregister(vf_netdev); 2184rx_handler_failed: 2185 return ret; 2186} 2187 2188static void __netvsc_vf_setup(struct net_device *ndev, 2189 struct net_device *vf_netdev) 2190{ 2191 int ret; 2192 2193 /* Align MTU of VF with master */ 2194 ret = dev_set_mtu(vf_netdev, ndev->mtu); 2195 if (ret) 2196 netdev_warn(vf_netdev, 2197 "unable to change mtu to %u\n", ndev->mtu); 2198 2199 /* set multicast etc flags on VF */ 2200 dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE, NULL); 2201 2202 /* sync address list from ndev to VF */ 2203 netif_addr_lock_bh(ndev); 2204 dev_uc_sync(vf_netdev, ndev); 2205 dev_mc_sync(vf_netdev, ndev); 2206 netif_addr_unlock_bh(ndev); 2207 2208 if (netif_running(ndev)) { 2209 ret = dev_open(vf_netdev, NULL); 2210 if (ret) 2211 netdev_warn(vf_netdev, 2212 "unable to open: %d\n", ret); 2213 } 2214} 2215 2216/* Setup VF as slave of the synthetic device. 2217 * Runs in workqueue to avoid recursion in netlink callbacks. 2218 */ 2219static void netvsc_vf_setup(struct work_struct *w) 2220{ 2221 struct net_device_context *ndev_ctx 2222 = container_of(w, struct net_device_context, vf_takeover.work); 2223 struct net_device *ndev = hv_get_drvdata(ndev_ctx->device_ctx); 2224 struct net_device *vf_netdev; 2225 2226 if (!rtnl_trylock()) { 2227 schedule_delayed_work(&ndev_ctx->vf_takeover, 0); 2228 return; 2229 } 2230 2231 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); 2232 if (vf_netdev) 2233 __netvsc_vf_setup(ndev, vf_netdev); 2234 2235 rtnl_unlock(); 2236} 2237 2238/* Find netvsc by VF serial number. 2239 * The PCI hyperv controller records the serial number as the slot kobj name. 2240 */ 2241static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev) 2242{ 2243 struct device *parent = vf_netdev->dev.parent; 2244 struct net_device_context *ndev_ctx; 2245 struct net_device *ndev; 2246 struct pci_dev *pdev; 2247 u32 serial; 2248 2249 if (!parent || !dev_is_pci(parent)) 2250 return NULL; /* not a PCI device */ 2251 2252 pdev = to_pci_dev(parent); 2253 if (!pdev->slot) { 2254 netdev_notice(vf_netdev, "no PCI slot information\n"); 2255 return NULL; 2256 } 2257 2258 if (kstrtou32(pci_slot_name(pdev->slot), 10, &serial)) { 2259 netdev_notice(vf_netdev, "Invalid vf serial:%s\n", 2260 pci_slot_name(pdev->slot)); 2261 return NULL; 2262 } 2263 2264 list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) { 2265 if (!ndev_ctx->vf_alloc) 2266 continue; 2267 2268 if (ndev_ctx->vf_serial != serial) 2269 continue; 2270 2271 ndev = hv_get_drvdata(ndev_ctx->device_ctx); 2272 if (ndev->addr_len != vf_netdev->addr_len || 2273 memcmp(ndev->perm_addr, vf_netdev->perm_addr, 2274 ndev->addr_len) != 0) 2275 continue; 2276 2277 return ndev; 2278 2279 } 2280 2281 /* Fallback path to check synthetic vf with help of mac addr. 2282 * Because this function can be called before vf_netdev is 2283 * initialized (NETDEV_POST_INIT) when its perm_addr has not been copied 2284 * from dev_addr, also try to match to its dev_addr. 2285 * Note: On Hyper-V and Azure, it's not possible to set a MAC address 2286 * on a VF that matches to the MAC of a unrelated NETVSC device. 2287 */ 2288 list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) { 2289 ndev = hv_get_drvdata(ndev_ctx->device_ctx); 2290 if (ether_addr_equal(vf_netdev->perm_addr, ndev->perm_addr) || 2291 ether_addr_equal(vf_netdev->dev_addr, ndev->perm_addr)) 2292 return ndev; 2293 } 2294 2295 netdev_notice(vf_netdev, 2296 "no netdev found for vf serial:%u\n", serial); 2297 return NULL; 2298} 2299 2300static int netvsc_prepare_bonding(struct net_device *vf_netdev) 2301{ 2302 struct net_device *ndev; 2303 2304 ndev = get_netvsc_byslot(vf_netdev); 2305 if (!ndev) 2306 return NOTIFY_DONE; 2307 2308 /* Set slave flag and no addrconf flag before open 2309 * to prevent IPv6 addrconf. 2310 */ 2311 vf_netdev->flags |= IFF_SLAVE; 2312 vf_netdev->priv_flags |= IFF_NO_ADDRCONF; 2313 return NOTIFY_DONE; 2314} 2315 2316static int netvsc_register_vf(struct net_device *vf_netdev, int context) 2317{ 2318 struct net_device_context *net_device_ctx; 2319 struct netvsc_device *netvsc_dev; 2320 struct bpf_prog *prog; 2321 struct net_device *ndev; 2322 int ret; 2323 2324 if (vf_netdev->addr_len != ETH_ALEN) 2325 return NOTIFY_DONE; 2326 2327 ndev = get_netvsc_byslot(vf_netdev); 2328 if (!ndev) 2329 return NOTIFY_DONE; 2330 2331 net_device_ctx = netdev_priv(ndev); 2332 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev); 2333 if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev)) 2334 return NOTIFY_DONE; 2335 2336 /* if synthetic interface is a different namespace, 2337 * then move the VF to that namespace; join will be 2338 * done again in that context. 2339 */ 2340 if (!net_eq(dev_net(ndev), dev_net(vf_netdev))) { 2341 ret = dev_change_net_namespace(vf_netdev, 2342 dev_net(ndev), "eth%d"); 2343 if (ret) 2344 netdev_err(vf_netdev, 2345 "could not move to same namespace as %s: %d\n", 2346 ndev->name, ret); 2347 else 2348 netdev_info(vf_netdev, 2349 "VF moved to namespace with: %s\n", 2350 ndev->name); 2351 return NOTIFY_DONE; 2352 } 2353 2354 netdev_info(ndev, "VF registering: %s\n", vf_netdev->name); 2355 2356 if (netvsc_vf_join(vf_netdev, ndev, context) != 0) 2357 return NOTIFY_DONE; 2358 2359 dev_hold(vf_netdev); 2360 rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev); 2361 2362 if (ndev->needed_headroom < vf_netdev->needed_headroom) 2363 ndev->needed_headroom = vf_netdev->needed_headroom; 2364 2365 vf_netdev->wanted_features = ndev->features; 2366 netdev_update_features(vf_netdev); 2367 2368 prog = netvsc_xdp_get(netvsc_dev); 2369 netvsc_vf_setxdp(vf_netdev, prog); 2370 2371 return NOTIFY_OK; 2372} 2373 2374/* Change the data path when VF UP/DOWN/CHANGE are detected. 2375 * 2376 * Typically a UP or DOWN event is followed by a CHANGE event, so 2377 * net_device_ctx->data_path_is_vf is used to cache the current data path 2378 * to avoid the duplicate call of netvsc_switch_datapath() and the duplicate 2379 * message. 2380 * 2381 * During hibernation, if a VF NIC driver (e.g. mlx5) preserves the network 2382 * interface, there is only the CHANGE event and no UP or DOWN event. 2383 */ 2384static int netvsc_vf_changed(struct net_device *vf_netdev, unsigned long event) 2385{ 2386 struct net_device_context *net_device_ctx; 2387 struct netvsc_device *netvsc_dev; 2388 struct net_device *ndev; 2389 bool vf_is_up = false; 2390 int ret; 2391 2392 if (event != NETDEV_GOING_DOWN) 2393 vf_is_up = netif_running(vf_netdev); 2394 2395 ndev = get_netvsc_byref(vf_netdev); 2396 if (!ndev) 2397 return NOTIFY_DONE; 2398 2399 net_device_ctx = netdev_priv(ndev); 2400 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev); 2401 if (!netvsc_dev) 2402 return NOTIFY_DONE; 2403 2404 if (net_device_ctx->data_path_is_vf == vf_is_up) 2405 return NOTIFY_OK; 2406 2407 if (vf_is_up && !net_device_ctx->vf_alloc) { 2408 netdev_info(ndev, "Waiting for the VF association from host\n"); 2409 wait_for_completion(&net_device_ctx->vf_add); 2410 } 2411 2412 ret = netvsc_switch_datapath(ndev, vf_is_up); 2413 2414 if (ret) { 2415 netdev_err(ndev, 2416 "Data path failed to switch %s VF: %s, err: %d\n", 2417 vf_is_up ? "to" : "from", vf_netdev->name, ret); 2418 return NOTIFY_DONE; 2419 } else { 2420 netdev_info(ndev, "Data path switched %s VF: %s\n", 2421 vf_is_up ? "to" : "from", vf_netdev->name); 2422 2423 /* In Azure, when accelerated networking in enabled, other NICs 2424 * like MANA, MLX, are configured as a bonded nic with 2425 * Netvsc(failover) NIC. For bonded NICs, the min of the max 2426 * pkt aggregate size of the members is propagated in the stack. 2427 * In order to allow these NICs (MANA/MLX) to use up to 2428 * GSO_MAX_SIZE gso packet size, we need to allow Netvsc NIC to 2429 * also support this in the guest. 2430 * This value is only increased for netvsc NIC when datapath is 2431 * switched over to the VF 2432 */ 2433 if (vf_is_up) 2434 netif_set_tso_max_size(ndev, vf_netdev->tso_max_size); 2435 else 2436 netif_set_tso_max_size(ndev, netvsc_dev->netvsc_gso_max_size); 2437 } 2438 2439 return NOTIFY_OK; 2440} 2441 2442static int netvsc_unregister_vf(struct net_device *vf_netdev) 2443{ 2444 struct net_device *ndev; 2445 struct net_device_context *net_device_ctx; 2446 2447 ndev = get_netvsc_byref(vf_netdev); 2448 if (!ndev) 2449 return NOTIFY_DONE; 2450 2451 net_device_ctx = netdev_priv(ndev); 2452 cancel_delayed_work_sync(&net_device_ctx->vf_takeover); 2453 2454 netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name); 2455 2456 reinit_completion(&net_device_ctx->vf_add); 2457 netdev_rx_handler_unregister(vf_netdev); 2458 netdev_upper_dev_unlink(vf_netdev, ndev); 2459 RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL); 2460 dev_put(vf_netdev); 2461 2462 ndev->needed_headroom = RNDIS_AND_PPI_SIZE; 2463 2464 return NOTIFY_OK; 2465} 2466 2467static int check_dev_is_matching_vf(struct net_device *event_ndev) 2468{ 2469 /* Skip NetVSC interfaces */ 2470 if (event_ndev->netdev_ops == &device_ops) 2471 return -ENODEV; 2472 2473 /* Avoid non-Ethernet type devices */ 2474 if (event_ndev->type != ARPHRD_ETHER) 2475 return -ENODEV; 2476 2477 /* Avoid Vlan dev with same MAC registering as VF */ 2478 if (is_vlan_dev(event_ndev)) 2479 return -ENODEV; 2480 2481 /* Avoid Bonding master dev with same MAC registering as VF */ 2482 if (netif_is_bond_master(event_ndev)) 2483 return -ENODEV; 2484 2485 return 0; 2486} 2487 2488static int netvsc_probe(struct hv_device *dev, 2489 const struct hv_vmbus_device_id *dev_id) 2490{ 2491 struct net_device *net = NULL, *vf_netdev; 2492 struct net_device_context *net_device_ctx; 2493 struct netvsc_device_info *device_info = NULL; 2494 struct netvsc_device *nvdev; 2495 int ret = -ENOMEM; 2496 2497 net = alloc_etherdev_mq(sizeof(struct net_device_context), 2498 VRSS_CHANNEL_MAX); 2499 if (!net) 2500 goto no_net; 2501 2502 netif_carrier_off(net); 2503 2504 netvsc_init_settings(net); 2505 2506 net_device_ctx = netdev_priv(net); 2507 net_device_ctx->device_ctx = dev; 2508 net_device_ctx->msg_enable = netif_msg_init(debug, default_msg); 2509 if (netif_msg_probe(net_device_ctx)) 2510 netdev_dbg(net, "netvsc msg_enable: %d\n", 2511 net_device_ctx->msg_enable); 2512 2513 hv_set_drvdata(dev, net); 2514 2515 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); 2516 2517 init_completion(&net_device_ctx->vf_add); 2518 spin_lock_init(&net_device_ctx->lock); 2519 INIT_LIST_HEAD(&net_device_ctx->reconfig_events); 2520 INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup); 2521 INIT_DELAYED_WORK(&net_device_ctx->vfns_work, netvsc_vfns_work); 2522 2523 net_device_ctx->vf_stats 2524 = netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats); 2525 if (!net_device_ctx->vf_stats) 2526 goto no_stats; 2527 2528 net->netdev_ops = &device_ops; 2529 net->ethtool_ops = &ethtool_ops; 2530 SET_NETDEV_DEV(net, &dev->device); 2531 dma_set_min_align_mask(&dev->device, HV_HYP_PAGE_SIZE - 1); 2532 2533 /* We always need headroom for rndis header */ 2534 net->needed_headroom = RNDIS_AND_PPI_SIZE; 2535 2536 /* Initialize the number of queues to be 1, we may change it if more 2537 * channels are offered later. 2538 */ 2539 netif_set_real_num_tx_queues(net, 1); 2540 netif_set_real_num_rx_queues(net, 1); 2541 2542 /* Notify the netvsc driver of the new device */ 2543 device_info = netvsc_devinfo_get(NULL); 2544 2545 if (!device_info) { 2546 ret = -ENOMEM; 2547 goto devinfo_failed; 2548 } 2549 2550 /* We must get rtnl lock before scheduling nvdev->subchan_work, 2551 * otherwise netvsc_subchan_work() can get rtnl lock first and wait 2552 * all subchannels to show up, but that may not happen because 2553 * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer() 2554 * -> ... -> device_add() -> ... -> __device_attach() can't get 2555 * the device lock, so all the subchannels can't be processed -- 2556 * finally netvsc_subchan_work() hangs forever. 2557 * 2558 * The rtnl lock also needs to be held before rndis_filter_device_add() 2559 * which advertises nvsp_2_vsc_capability / sriov bit, and triggers 2560 * VF NIC offering and registering. If VF NIC finished register_netdev() 2561 * earlier it may cause name based config failure. 2562 */ 2563 rtnl_lock(); 2564 2565 nvdev = rndis_filter_device_add(dev, device_info); 2566 if (IS_ERR(nvdev)) { 2567 ret = PTR_ERR(nvdev); 2568 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); 2569 goto rndis_failed; 2570 } 2571 2572 eth_hw_addr_set(net, device_info->mac_adr); 2573 2574 if (nvdev->num_chn > 1) 2575 schedule_work(&nvdev->subchan_work); 2576 2577 /* hw_features computed in rndis_netdev_set_hwcaps() */ 2578 net->features = net->hw_features | 2579 NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX | 2580 NETIF_F_HW_VLAN_CTAG_RX; 2581 net->vlan_features = net->features; 2582 2583 netdev_lockdep_set_classes(net); 2584 2585 net->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | 2586 NETDEV_XDP_ACT_NDO_XMIT; 2587 2588 /* MTU range: 68 - 1500 or 65521 */ 2589 net->min_mtu = NETVSC_MTU_MIN; 2590 if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2) 2591 net->max_mtu = NETVSC_MTU - ETH_HLEN; 2592 else 2593 net->max_mtu = ETH_DATA_LEN; 2594 2595 nvdev->tx_disable = false; 2596 2597 ret = register_netdevice(net); 2598 if (ret != 0) { 2599 pr_err("Unable to register netdev.\n"); 2600 goto register_failed; 2601 } 2602 2603 list_add(&net_device_ctx->list, &netvsc_dev_list); 2604 2605 /* When the hv_netvsc driver is unloaded and reloaded, the 2606 * NET_DEVICE_REGISTER for the vf device is replayed before probe 2607 * is complete. This is because register_netdevice_notifier() gets 2608 * registered before vmbus_driver_register() so that callback func 2609 * is set before probe and we don't miss events like NETDEV_POST_INIT 2610 * So, in this section we try to register the matching vf device that 2611 * is present as a netdevice, knowing that its register call is not 2612 * processed in the netvsc_netdev_notifier(as probing is progress and 2613 * get_netvsc_byslot fails). 2614 */ 2615 for_each_netdev(dev_net(net), vf_netdev) { 2616 ret = check_dev_is_matching_vf(vf_netdev); 2617 if (ret != 0) 2618 continue; 2619 2620 if (net != get_netvsc_byslot(vf_netdev)) 2621 continue; 2622 2623 netvsc_prepare_bonding(vf_netdev); 2624 netdev_lock_ops(vf_netdev); 2625 netvsc_register_vf(vf_netdev, VF_REG_IN_PROBE); 2626 netdev_unlock_ops(vf_netdev); 2627 __netvsc_vf_setup(net, vf_netdev); 2628 break; 2629 } 2630 rtnl_unlock(); 2631 2632 netvsc_devinfo_put(device_info); 2633 return 0; 2634 2635register_failed: 2636 rndis_filter_device_remove(dev, nvdev); 2637rndis_failed: 2638 rtnl_unlock(); 2639 netvsc_devinfo_put(device_info); 2640devinfo_failed: 2641 free_percpu(net_device_ctx->vf_stats); 2642no_stats: 2643 hv_set_drvdata(dev, NULL); 2644 free_netdev(net); 2645no_net: 2646 return ret; 2647} 2648 2649static void netvsc_remove(struct hv_device *dev) 2650{ 2651 struct net_device_context *ndev_ctx; 2652 struct net_device *vf_netdev, *net; 2653 struct netvsc_device *nvdev; 2654 2655 net = hv_get_drvdata(dev); 2656 if (net == NULL) { 2657 dev_err(&dev->device, "No net device to remove\n"); 2658 return; 2659 } 2660 2661 ndev_ctx = netdev_priv(net); 2662 2663 cancel_delayed_work_sync(&ndev_ctx->dwork); 2664 2665 rtnl_lock(); 2666 cancel_delayed_work_sync(&ndev_ctx->vfns_work); 2667 2668 nvdev = rtnl_dereference(ndev_ctx->nvdev); 2669 if (nvdev) { 2670 cancel_work_sync(&nvdev->subchan_work); 2671 netvsc_xdp_set(net, NULL, NULL, nvdev); 2672 } 2673 2674 /* 2675 * Call to the vsc driver to let it know that the device is being 2676 * removed. Also blocks mtu and channel changes. 2677 */ 2678 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); 2679 if (vf_netdev) 2680 netvsc_unregister_vf(vf_netdev); 2681 2682 if (nvdev) 2683 rndis_filter_device_remove(dev, nvdev); 2684 2685 unregister_netdevice(net); 2686 list_del(&ndev_ctx->list); 2687 2688 rtnl_unlock(); 2689 2690 hv_set_drvdata(dev, NULL); 2691 2692 free_percpu(ndev_ctx->vf_stats); 2693 free_netdev(net); 2694} 2695 2696static int netvsc_suspend(struct hv_device *dev) 2697{ 2698 struct net_device_context *ndev_ctx; 2699 struct netvsc_device *nvdev; 2700 struct net_device *net; 2701 int ret; 2702 2703 net = hv_get_drvdata(dev); 2704 2705 ndev_ctx = netdev_priv(net); 2706 cancel_delayed_work_sync(&ndev_ctx->dwork); 2707 2708 rtnl_lock(); 2709 cancel_delayed_work_sync(&ndev_ctx->vfns_work); 2710 2711 nvdev = rtnl_dereference(ndev_ctx->nvdev); 2712 if (nvdev == NULL) { 2713 ret = -ENODEV; 2714 goto out; 2715 } 2716 2717 /* Save the current config info */ 2718 ndev_ctx->saved_netvsc_dev_info = netvsc_devinfo_get(nvdev); 2719 if (!ndev_ctx->saved_netvsc_dev_info) { 2720 ret = -ENOMEM; 2721 goto out; 2722 } 2723 ret = netvsc_detach(net, nvdev); 2724out: 2725 rtnl_unlock(); 2726 2727 return ret; 2728} 2729 2730static int netvsc_resume(struct hv_device *dev) 2731{ 2732 struct net_device *net = hv_get_drvdata(dev); 2733 struct net_device_context *net_device_ctx; 2734 struct netvsc_device_info *device_info; 2735 int ret; 2736 2737 rtnl_lock(); 2738 2739 net_device_ctx = netdev_priv(net); 2740 2741 /* Reset the data path to the netvsc NIC before re-opening the vmbus 2742 * channel. Later netvsc_netdev_event() will switch the data path to 2743 * the VF upon the UP or CHANGE event. 2744 */ 2745 net_device_ctx->data_path_is_vf = false; 2746 device_info = net_device_ctx->saved_netvsc_dev_info; 2747 2748 ret = netvsc_attach(net, device_info); 2749 2750 netvsc_devinfo_put(device_info); 2751 net_device_ctx->saved_netvsc_dev_info = NULL; 2752 2753 rtnl_unlock(); 2754 2755 return ret; 2756} 2757static const struct hv_vmbus_device_id id_table[] = { 2758 /* Network guid */ 2759 { HV_NIC_GUID, }, 2760 { }, 2761}; 2762 2763MODULE_DEVICE_TABLE(vmbus, id_table); 2764 2765/* The one and only one */ 2766static struct hv_driver netvsc_drv = { 2767 .name = KBUILD_MODNAME, 2768 .id_table = id_table, 2769 .probe = netvsc_probe, 2770 .remove = netvsc_remove, 2771 .suspend = netvsc_suspend, 2772 .resume = netvsc_resume, 2773 .driver = { 2774 .probe_type = PROBE_FORCE_SYNCHRONOUS, 2775 }, 2776}; 2777 2778/* Set VF's namespace same as the synthetic NIC */ 2779static void netvsc_event_set_vf_ns(struct net_device *ndev) 2780{ 2781 struct net_device_context *ndev_ctx = netdev_priv(ndev); 2782 struct net_device *vf_netdev; 2783 int ret; 2784 2785 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); 2786 if (!vf_netdev) 2787 return; 2788 2789 if (!net_eq(dev_net(ndev), dev_net(vf_netdev))) { 2790 ret = dev_change_net_namespace(vf_netdev, dev_net(ndev), 2791 "eth%d"); 2792 if (ret) 2793 netdev_err(vf_netdev, 2794 "Cannot move to same namespace as %s: %d\n", 2795 ndev->name, ret); 2796 else 2797 netdev_info(vf_netdev, 2798 "Moved VF to namespace with: %s\n", 2799 ndev->name); 2800 } 2801} 2802 2803void netvsc_vfns_work(struct work_struct *w) 2804{ 2805 struct net_device_context *ndev_ctx = 2806 container_of(w, struct net_device_context, vfns_work.work); 2807 struct net_device *ndev; 2808 2809 if (!rtnl_trylock()) { 2810 schedule_delayed_work(&ndev_ctx->vfns_work, 1); 2811 return; 2812 } 2813 2814 ndev = hv_get_drvdata(ndev_ctx->device_ctx); 2815 if (!ndev) 2816 goto out; 2817 2818 netvsc_event_set_vf_ns(ndev); 2819 2820out: 2821 rtnl_unlock(); 2822} 2823 2824/* 2825 * On Hyper-V, every VF interface is matched with a corresponding 2826 * synthetic interface. The synthetic interface is presented first 2827 * to the guest. When the corresponding VF instance is registered, 2828 * we will take care of switching the data path. 2829 */ 2830static int netvsc_netdev_event(struct notifier_block *this, 2831 unsigned long event, void *ptr) 2832{ 2833 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); 2834 struct net_device_context *ndev_ctx; 2835 int ret = 0; 2836 2837 if (event_dev->netdev_ops == &device_ops && event == NETDEV_REGISTER) { 2838 ndev_ctx = netdev_priv(event_dev); 2839 schedule_delayed_work(&ndev_ctx->vfns_work, 0); 2840 return NOTIFY_DONE; 2841 } 2842 2843 ret = check_dev_is_matching_vf(event_dev); 2844 if (ret != 0) 2845 return NOTIFY_DONE; 2846 2847 switch (event) { 2848 case NETDEV_POST_INIT: 2849 return netvsc_prepare_bonding(event_dev); 2850 case NETDEV_REGISTER: 2851 return netvsc_register_vf(event_dev, VF_REG_IN_NOTIFIER); 2852 case NETDEV_UNREGISTER: 2853 return netvsc_unregister_vf(event_dev); 2854 case NETDEV_UP: 2855 case NETDEV_DOWN: 2856 case NETDEV_CHANGE: 2857 case NETDEV_GOING_DOWN: 2858 return netvsc_vf_changed(event_dev, event); 2859 default: 2860 return NOTIFY_DONE; 2861 } 2862} 2863 2864static struct notifier_block netvsc_netdev_notifier = { 2865 .notifier_call = netvsc_netdev_event, 2866}; 2867 2868static void __exit netvsc_drv_exit(void) 2869{ 2870 unregister_netdevice_notifier(&netvsc_netdev_notifier); 2871 vmbus_driver_unregister(&netvsc_drv); 2872} 2873 2874static int __init netvsc_drv_init(void) 2875{ 2876 int ret; 2877 2878 if (ring_size < RING_SIZE_MIN) { 2879 ring_size = RING_SIZE_MIN; 2880 pr_info("Increased ring_size to %u (min allowed)\n", 2881 ring_size); 2882 } 2883 netvsc_ring_bytes = VMBUS_RING_SIZE(ring_size * 4096); 2884 2885 register_netdevice_notifier(&netvsc_netdev_notifier); 2886 2887 ret = vmbus_driver_register(&netvsc_drv); 2888 if (ret) 2889 goto err_vmbus_reg; 2890 2891 return 0; 2892 2893err_vmbus_reg: 2894 unregister_netdevice_notifier(&netvsc_netdev_notifier); 2895 return ret; 2896} 2897 2898MODULE_LICENSE("GPL"); 2899MODULE_DESCRIPTION("Microsoft Hyper-V network driver"); 2900 2901module_init(netvsc_drv_init); 2902module_exit(netvsc_drv_exit);