Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v3.15 608 lines 16 kB view raw
1/* 2 * Network-device interface management. 3 * 4 * Copyright (c) 2004-2005, Keir Fraser 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License version 2 8 * as published by the Free Software Foundation; or, when distributed 9 * separately from the Linux kernel or incorporated into other 10 * software packages, subject to the following license: 11 * 12 * Permission is hereby granted, free of charge, to any person obtaining a copy 13 * of this source file (the "Software"), to deal in the Software without 14 * restriction, including without limitation the rights to use, copy, modify, 15 * merge, publish, distribute, sublicense, and/or sell copies of the Software, 16 * and to permit persons to whom the Software is furnished to do so, subject to 17 * the following conditions: 18 * 19 * The above copyright notice and this permission notice shall be included in 20 * all copies or substantial portions of the Software. 21 * 22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 28 * IN THE SOFTWARE. 29 */ 30 31#include "common.h" 32 33#include <linux/kthread.h> 34#include <linux/ethtool.h> 35#include <linux/rtnetlink.h> 36#include <linux/if_vlan.h> 37#include <linux/vmalloc.h> 38 39#include <xen/events.h> 40#include <asm/xen/hypercall.h> 41#include <xen/balloon.h> 42 43#define XENVIF_QUEUE_LENGTH 32 44#define XENVIF_NAPI_WEIGHT 64 45 46int xenvif_schedulable(struct xenvif *vif) 47{ 48 return netif_running(vif->dev) && netif_carrier_ok(vif->dev); 49} 50 51static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) 52{ 53 struct xenvif *vif = dev_id; 54 55 if (RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)) 56 napi_schedule(&vif->napi); 57 58 return IRQ_HANDLED; 59} 60 61static int xenvif_poll(struct napi_struct *napi, int budget) 62{ 63 struct xenvif *vif = container_of(napi, struct xenvif, napi); 64 int work_done; 65 66 /* This vif is rogue, we pretend we've there is nothing to do 67 * for this vif to deschedule it from NAPI. But this interface 68 * will be turned off in thread context later. 69 */ 70 if (unlikely(vif->disabled)) { 71 napi_complete(napi); 72 return 0; 73 } 74 75 work_done = xenvif_tx_action(vif, budget); 76 77 if (work_done < budget) { 78 napi_complete(napi); 79 xenvif_napi_schedule_or_enable_events(vif); 80 } 81 82 return work_done; 83} 84 85static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) 86{ 87 struct xenvif *vif = dev_id; 88 89 xenvif_kick_thread(vif); 90 91 return IRQ_HANDLED; 92} 93 94static irqreturn_t xenvif_interrupt(int irq, void *dev_id) 95{ 96 xenvif_tx_interrupt(irq, dev_id); 97 xenvif_rx_interrupt(irq, dev_id); 98 99 return IRQ_HANDLED; 100} 101 102static void xenvif_wake_queue(unsigned long data) 103{ 104 struct xenvif *vif = (struct xenvif *)data; 105 106 if (netif_queue_stopped(vif->dev)) { 107 netdev_err(vif->dev, "draining TX queue\n"); 108 vif->rx_queue_purge = true; 109 xenvif_kick_thread(vif); 110 netif_wake_queue(vif->dev); 111 } 112} 113 114static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) 115{ 116 struct xenvif *vif = netdev_priv(dev); 117 int min_slots_needed; 118 119 BUG_ON(skb->dev != dev); 120 121 /* Drop the packet if vif is not ready */ 122 if (vif->task == NULL || 123 vif->dealloc_task == NULL || 124 !xenvif_schedulable(vif)) 125 goto drop; 126 127 /* At best we'll need one slot for the header and one for each 128 * frag. 129 */ 130 min_slots_needed = 1 + skb_shinfo(skb)->nr_frags; 131 132 /* If the skb is GSO then we'll also need an extra slot for the 133 * metadata. 134 */ 135 if (skb_is_gso(skb)) 136 min_slots_needed++; 137 138 /* If the skb can't possibly fit in the remaining slots 139 * then turn off the queue to give the ring a chance to 140 * drain. 141 */ 142 if (!xenvif_rx_ring_slots_available(vif, min_slots_needed)) { 143 vif->wake_queue.function = xenvif_wake_queue; 144 vif->wake_queue.data = (unsigned long)vif; 145 xenvif_stop_queue(vif); 146 mod_timer(&vif->wake_queue, 147 jiffies + rx_drain_timeout_jiffies); 148 } 149 150 skb_queue_tail(&vif->rx_queue, skb); 151 xenvif_kick_thread(vif); 152 153 return NETDEV_TX_OK; 154 155 drop: 156 vif->dev->stats.tx_dropped++; 157 dev_kfree_skb(skb); 158 return NETDEV_TX_OK; 159} 160 161static struct net_device_stats *xenvif_get_stats(struct net_device *dev) 162{ 163 struct xenvif *vif = netdev_priv(dev); 164 return &vif->dev->stats; 165} 166 167static void xenvif_up(struct xenvif *vif) 168{ 169 napi_enable(&vif->napi); 170 enable_irq(vif->tx_irq); 171 if (vif->tx_irq != vif->rx_irq) 172 enable_irq(vif->rx_irq); 173 xenvif_napi_schedule_or_enable_events(vif); 174} 175 176static void xenvif_down(struct xenvif *vif) 177{ 178 napi_disable(&vif->napi); 179 disable_irq(vif->tx_irq); 180 if (vif->tx_irq != vif->rx_irq) 181 disable_irq(vif->rx_irq); 182 del_timer_sync(&vif->credit_timeout); 183} 184 185static int xenvif_open(struct net_device *dev) 186{ 187 struct xenvif *vif = netdev_priv(dev); 188 if (netif_carrier_ok(dev)) 189 xenvif_up(vif); 190 netif_start_queue(dev); 191 return 0; 192} 193 194static int xenvif_close(struct net_device *dev) 195{ 196 struct xenvif *vif = netdev_priv(dev); 197 if (netif_carrier_ok(dev)) 198 xenvif_down(vif); 199 netif_stop_queue(dev); 200 return 0; 201} 202 203static int xenvif_change_mtu(struct net_device *dev, int mtu) 204{ 205 struct xenvif *vif = netdev_priv(dev); 206 int max = vif->can_sg ? 65535 - VLAN_ETH_HLEN : ETH_DATA_LEN; 207 208 if (mtu > max) 209 return -EINVAL; 210 dev->mtu = mtu; 211 return 0; 212} 213 214static netdev_features_t xenvif_fix_features(struct net_device *dev, 215 netdev_features_t features) 216{ 217 struct xenvif *vif = netdev_priv(dev); 218 219 if (!vif->can_sg) 220 features &= ~NETIF_F_SG; 221 if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV4)) 222 features &= ~NETIF_F_TSO; 223 if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV6)) 224 features &= ~NETIF_F_TSO6; 225 if (!vif->ip_csum) 226 features &= ~NETIF_F_IP_CSUM; 227 if (!vif->ipv6_csum) 228 features &= ~NETIF_F_IPV6_CSUM; 229 230 return features; 231} 232 233static const struct xenvif_stat { 234 char name[ETH_GSTRING_LEN]; 235 u16 offset; 236} xenvif_stats[] = { 237 { 238 "rx_gso_checksum_fixup", 239 offsetof(struct xenvif, rx_gso_checksum_fixup) 240 }, 241 /* If (sent != success + fail), there are probably packets never 242 * freed up properly! 243 */ 244 { 245 "tx_zerocopy_sent", 246 offsetof(struct xenvif, tx_zerocopy_sent), 247 }, 248 { 249 "tx_zerocopy_success", 250 offsetof(struct xenvif, tx_zerocopy_success), 251 }, 252 { 253 "tx_zerocopy_fail", 254 offsetof(struct xenvif, tx_zerocopy_fail) 255 }, 256 /* Number of packets exceeding MAX_SKB_FRAG slots. You should use 257 * a guest with the same MAX_SKB_FRAG 258 */ 259 { 260 "tx_frag_overflow", 261 offsetof(struct xenvif, tx_frag_overflow) 262 }, 263}; 264 265static int xenvif_get_sset_count(struct net_device *dev, int string_set) 266{ 267 switch (string_set) { 268 case ETH_SS_STATS: 269 return ARRAY_SIZE(xenvif_stats); 270 default: 271 return -EINVAL; 272 } 273} 274 275static void xenvif_get_ethtool_stats(struct net_device *dev, 276 struct ethtool_stats *stats, u64 * data) 277{ 278 void *vif = netdev_priv(dev); 279 int i; 280 281 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) 282 data[i] = *(unsigned long *)(vif + xenvif_stats[i].offset); 283} 284 285static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data) 286{ 287 int i; 288 289 switch (stringset) { 290 case ETH_SS_STATS: 291 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) 292 memcpy(data + i * ETH_GSTRING_LEN, 293 xenvif_stats[i].name, ETH_GSTRING_LEN); 294 break; 295 } 296} 297 298static const struct ethtool_ops xenvif_ethtool_ops = { 299 .get_link = ethtool_op_get_link, 300 301 .get_sset_count = xenvif_get_sset_count, 302 .get_ethtool_stats = xenvif_get_ethtool_stats, 303 .get_strings = xenvif_get_strings, 304}; 305 306static const struct net_device_ops xenvif_netdev_ops = { 307 .ndo_start_xmit = xenvif_start_xmit, 308 .ndo_get_stats = xenvif_get_stats, 309 .ndo_open = xenvif_open, 310 .ndo_stop = xenvif_close, 311 .ndo_change_mtu = xenvif_change_mtu, 312 .ndo_fix_features = xenvif_fix_features, 313 .ndo_set_mac_address = eth_mac_addr, 314 .ndo_validate_addr = eth_validate_addr, 315}; 316 317struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, 318 unsigned int handle) 319{ 320 int err; 321 struct net_device *dev; 322 struct xenvif *vif; 323 char name[IFNAMSIZ] = {}; 324 int i; 325 326 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle); 327 dev = alloc_netdev(sizeof(struct xenvif), name, ether_setup); 328 if (dev == NULL) { 329 pr_warn("Could not allocate netdev for %s\n", name); 330 return ERR_PTR(-ENOMEM); 331 } 332 333 SET_NETDEV_DEV(dev, parent); 334 335 vif = netdev_priv(dev); 336 337 vif->grant_copy_op = vmalloc(sizeof(struct gnttab_copy) * 338 MAX_GRANT_COPY_OPS); 339 if (vif->grant_copy_op == NULL) { 340 pr_warn("Could not allocate grant copy space for %s\n", name); 341 free_netdev(dev); 342 return ERR_PTR(-ENOMEM); 343 } 344 345 vif->domid = domid; 346 vif->handle = handle; 347 vif->can_sg = 1; 348 vif->ip_csum = 1; 349 vif->dev = dev; 350 351 vif->disabled = false; 352 353 vif->credit_bytes = vif->remaining_credit = ~0UL; 354 vif->credit_usec = 0UL; 355 init_timer(&vif->credit_timeout); 356 vif->credit_window_start = get_jiffies_64(); 357 358 init_timer(&vif->wake_queue); 359 360 dev->netdev_ops = &xenvif_netdev_ops; 361 dev->hw_features = NETIF_F_SG | 362 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 363 NETIF_F_TSO | NETIF_F_TSO6; 364 dev->features = dev->hw_features | NETIF_F_RXCSUM; 365 SET_ETHTOOL_OPS(dev, &xenvif_ethtool_ops); 366 367 dev->tx_queue_len = XENVIF_QUEUE_LENGTH; 368 369 skb_queue_head_init(&vif->rx_queue); 370 skb_queue_head_init(&vif->tx_queue); 371 372 vif->pending_cons = 0; 373 vif->pending_prod = MAX_PENDING_REQS; 374 for (i = 0; i < MAX_PENDING_REQS; i++) 375 vif->pending_ring[i] = i; 376 spin_lock_init(&vif->callback_lock); 377 spin_lock_init(&vif->response_lock); 378 /* If ballooning is disabled, this will consume real memory, so you 379 * better enable it. The long term solution would be to use just a 380 * bunch of valid page descriptors, without dependency on ballooning 381 */ 382 err = alloc_xenballooned_pages(MAX_PENDING_REQS, 383 vif->mmap_pages, 384 false); 385 if (err) { 386 netdev_err(dev, "Could not reserve mmap_pages\n"); 387 return ERR_PTR(-ENOMEM); 388 } 389 for (i = 0; i < MAX_PENDING_REQS; i++) { 390 vif->pending_tx_info[i].callback_struct = (struct ubuf_info) 391 { .callback = xenvif_zerocopy_callback, 392 .ctx = NULL, 393 .desc = i }; 394 vif->grant_tx_handle[i] = NETBACK_INVALID_HANDLE; 395 } 396 397 /* 398 * Initialise a dummy MAC address. We choose the numerically 399 * largest non-broadcast address to prevent the address getting 400 * stolen by an Ethernet bridge for STP purposes. 401 * (FE:FF:FF:FF:FF:FF) 402 */ 403 memset(dev->dev_addr, 0xFF, ETH_ALEN); 404 dev->dev_addr[0] &= ~0x01; 405 406 netif_napi_add(dev, &vif->napi, xenvif_poll, XENVIF_NAPI_WEIGHT); 407 408 netif_carrier_off(dev); 409 410 err = register_netdev(dev); 411 if (err) { 412 netdev_warn(dev, "Could not register device: err=%d\n", err); 413 free_netdev(dev); 414 return ERR_PTR(err); 415 } 416 417 netdev_dbg(dev, "Successfully created xenvif\n"); 418 419 __module_get(THIS_MODULE); 420 421 return vif; 422} 423 424int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, 425 unsigned long rx_ring_ref, unsigned int tx_evtchn, 426 unsigned int rx_evtchn) 427{ 428 struct task_struct *task; 429 int err = -ENOMEM; 430 431 BUG_ON(vif->tx_irq); 432 BUG_ON(vif->task); 433 BUG_ON(vif->dealloc_task); 434 435 err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref); 436 if (err < 0) 437 goto err; 438 439 init_waitqueue_head(&vif->wq); 440 init_waitqueue_head(&vif->dealloc_wq); 441 442 if (tx_evtchn == rx_evtchn) { 443 /* feature-split-event-channels == 0 */ 444 err = bind_interdomain_evtchn_to_irqhandler( 445 vif->domid, tx_evtchn, xenvif_interrupt, 0, 446 vif->dev->name, vif); 447 if (err < 0) 448 goto err_unmap; 449 vif->tx_irq = vif->rx_irq = err; 450 disable_irq(vif->tx_irq); 451 } else { 452 /* feature-split-event-channels == 1 */ 453 snprintf(vif->tx_irq_name, sizeof(vif->tx_irq_name), 454 "%s-tx", vif->dev->name); 455 err = bind_interdomain_evtchn_to_irqhandler( 456 vif->domid, tx_evtchn, xenvif_tx_interrupt, 0, 457 vif->tx_irq_name, vif); 458 if (err < 0) 459 goto err_unmap; 460 vif->tx_irq = err; 461 disable_irq(vif->tx_irq); 462 463 snprintf(vif->rx_irq_name, sizeof(vif->rx_irq_name), 464 "%s-rx", vif->dev->name); 465 err = bind_interdomain_evtchn_to_irqhandler( 466 vif->domid, rx_evtchn, xenvif_rx_interrupt, 0, 467 vif->rx_irq_name, vif); 468 if (err < 0) 469 goto err_tx_unbind; 470 vif->rx_irq = err; 471 disable_irq(vif->rx_irq); 472 } 473 474 task = kthread_create(xenvif_kthread_guest_rx, 475 (void *)vif, "%s-guest-rx", vif->dev->name); 476 if (IS_ERR(task)) { 477 pr_warn("Could not allocate kthread for %s\n", vif->dev->name); 478 err = PTR_ERR(task); 479 goto err_rx_unbind; 480 } 481 482 vif->task = task; 483 484 task = kthread_create(xenvif_dealloc_kthread, 485 (void *)vif, "%s-dealloc", vif->dev->name); 486 if (IS_ERR(task)) { 487 pr_warn("Could not allocate kthread for %s\n", vif->dev->name); 488 err = PTR_ERR(task); 489 goto err_rx_unbind; 490 } 491 492 vif->dealloc_task = task; 493 494 rtnl_lock(); 495 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) 496 dev_set_mtu(vif->dev, ETH_DATA_LEN); 497 netdev_update_features(vif->dev); 498 netif_carrier_on(vif->dev); 499 if (netif_running(vif->dev)) 500 xenvif_up(vif); 501 rtnl_unlock(); 502 503 wake_up_process(vif->task); 504 wake_up_process(vif->dealloc_task); 505 506 return 0; 507 508err_rx_unbind: 509 unbind_from_irqhandler(vif->rx_irq, vif); 510 vif->rx_irq = 0; 511err_tx_unbind: 512 unbind_from_irqhandler(vif->tx_irq, vif); 513 vif->tx_irq = 0; 514err_unmap: 515 xenvif_unmap_frontend_rings(vif); 516err: 517 module_put(THIS_MODULE); 518 return err; 519} 520 521void xenvif_carrier_off(struct xenvif *vif) 522{ 523 struct net_device *dev = vif->dev; 524 525 rtnl_lock(); 526 netif_carrier_off(dev); /* discard queued packets */ 527 if (netif_running(dev)) 528 xenvif_down(vif); 529 rtnl_unlock(); 530} 531 532void xenvif_disconnect(struct xenvif *vif) 533{ 534 if (netif_carrier_ok(vif->dev)) 535 xenvif_carrier_off(vif); 536 537 if (vif->task) { 538 del_timer_sync(&vif->wake_queue); 539 kthread_stop(vif->task); 540 vif->task = NULL; 541 } 542 543 if (vif->dealloc_task) { 544 kthread_stop(vif->dealloc_task); 545 vif->dealloc_task = NULL; 546 } 547 548 if (vif->tx_irq) { 549 if (vif->tx_irq == vif->rx_irq) 550 unbind_from_irqhandler(vif->tx_irq, vif); 551 else { 552 unbind_from_irqhandler(vif->tx_irq, vif); 553 unbind_from_irqhandler(vif->rx_irq, vif); 554 } 555 vif->tx_irq = 0; 556 } 557 558 xenvif_unmap_frontend_rings(vif); 559} 560 561void xenvif_free(struct xenvif *vif) 562{ 563 int i, unmap_timeout = 0; 564 /* Here we want to avoid timeout messages if an skb can be legitimately 565 * stuck somewhere else. Realistically this could be an another vif's 566 * internal or QDisc queue. That another vif also has this 567 * rx_drain_timeout_msecs timeout, but the timer only ditches the 568 * internal queue. After that, the QDisc queue can put in worst case 569 * XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS skbs into that another vif's 570 * internal queue, so we need several rounds of such timeouts until we 571 * can be sure that no another vif should have skb's from us. We are 572 * not sending more skb's, so newly stuck packets are not interesting 573 * for us here. 574 */ 575 unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000) * 576 DIV_ROUND_UP(XENVIF_QUEUE_LENGTH, (XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS)); 577 578 for (i = 0; i < MAX_PENDING_REQS; ++i) { 579 if (vif->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) { 580 unmap_timeout++; 581 schedule_timeout(msecs_to_jiffies(1000)); 582 if (unmap_timeout > worst_case_skb_lifetime && 583 net_ratelimit()) 584 netdev_err(vif->dev, 585 "Page still granted! Index: %x\n", 586 i); 587 /* If there are still unmapped pages, reset the loop to 588 * start checking again. We shouldn't exit here until 589 * dealloc thread and NAPI instance release all the 590 * pages. If a kernel bug causes the skbs to stall 591 * somewhere, the interface cannot be brought down 592 * properly. 593 */ 594 i = -1; 595 } 596 } 597 598 free_xenballooned_pages(MAX_PENDING_REQS, vif->mmap_pages); 599 600 netif_napi_del(&vif->napi); 601 602 unregister_netdev(vif->dev); 603 604 vfree(vif->grant_copy_op); 605 free_netdev(vif->dev); 606 607 module_put(THIS_MODULE); 608}