at v2.6.31-rc9 867 lines 20 kB view raw
1/* 2 * Common framework for low-level network console, dump, and debugger code 3 * 4 * Sep 8 2003 Matt Mackall <mpm@selenic.com> 5 * 6 * based on the netconsole code from: 7 * 8 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com> 9 * Copyright (C) 2002 Red Hat, Inc. 10 */ 11 12#include <linux/netdevice.h> 13#include <linux/etherdevice.h> 14#include <linux/string.h> 15#include <linux/if_arp.h> 16#include <linux/inetdevice.h> 17#include <linux/inet.h> 18#include <linux/interrupt.h> 19#include <linux/netpoll.h> 20#include <linux/sched.h> 21#include <linux/delay.h> 22#include <linux/rcupdate.h> 23#include <linux/workqueue.h> 24#include <net/tcp.h> 25#include <net/udp.h> 26#include <asm/unaligned.h> 27#include <trace/events/napi.h> 28 29/* 30 * We maintain a small pool of fully-sized skbs, to make sure the 31 * message gets out even in extreme OOM situations. 32 */ 33 34#define MAX_UDP_CHUNK 1460 35#define MAX_SKBS 32 36#define MAX_QUEUE_DEPTH (MAX_SKBS / 2) 37 38static struct sk_buff_head skb_pool; 39 40static atomic_t trapped; 41 42#define USEC_PER_POLL 50 43#define NETPOLL_RX_ENABLED 1 44#define NETPOLL_RX_DROP 2 45 46#define MAX_SKB_SIZE \ 47 (MAX_UDP_CHUNK + sizeof(struct udphdr) + \ 48 sizeof(struct iphdr) + sizeof(struct ethhdr)) 49 50static void zap_completion_queue(void); 51static void arp_reply(struct sk_buff *skb); 52 53static void queue_process(struct work_struct *work) 54{ 55 struct netpoll_info *npinfo = 56 container_of(work, struct netpoll_info, tx_work.work); 57 struct sk_buff *skb; 58 unsigned long flags; 59 60 while ((skb = skb_dequeue(&npinfo->txq))) { 61 struct net_device *dev = skb->dev; 62 const struct net_device_ops *ops = dev->netdev_ops; 63 struct netdev_queue *txq; 64 65 if (!netif_device_present(dev) || !netif_running(dev)) { 66 __kfree_skb(skb); 67 continue; 68 } 69 70 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 71 72 local_irq_save(flags); 73 __netif_tx_lock(txq, smp_processor_id()); 74 if (netif_tx_queue_stopped(txq) || 75 netif_tx_queue_frozen(txq) || 76 ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) { 77 skb_queue_head(&npinfo->txq, skb); 78 __netif_tx_unlock(txq); 79 local_irq_restore(flags); 80 81 schedule_delayed_work(&npinfo->tx_work, HZ/10); 82 return; 83 } 84 __netif_tx_unlock(txq); 85 local_irq_restore(flags); 86 } 87} 88 89static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh, 90 unsigned short ulen, __be32 saddr, __be32 daddr) 91{ 92 __wsum psum; 93 94 if (uh->check == 0 || skb_csum_unnecessary(skb)) 95 return 0; 96 97 psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0); 98 99 if (skb->ip_summed == CHECKSUM_COMPLETE && 100 !csum_fold(csum_add(psum, skb->csum))) 101 return 0; 102 103 skb->csum = psum; 104 105 return __skb_checksum_complete(skb); 106} 107 108/* 109 * Check whether delayed processing was scheduled for our NIC. If so, 110 * we attempt to grab the poll lock and use ->poll() to pump the card. 111 * If this fails, either we've recursed in ->poll() or it's already 112 * running on another CPU. 113 * 114 * Note: we don't mask interrupts with this lock because we're using 115 * trylock here and interrupts are already disabled in the softirq 116 * case. Further, we test the poll_owner to avoid recursion on UP 117 * systems where the lock doesn't exist. 118 * 119 * In cases where there is bi-directional communications, reading only 120 * one message at a time can lead to packets being dropped by the 121 * network adapter, forcing superfluous retries and possibly timeouts. 122 * Thus, we set our budget to greater than 1. 123 */ 124static int poll_one_napi(struct netpoll_info *npinfo, 125 struct napi_struct *napi, int budget) 126{ 127 int work; 128 129 /* net_rx_action's ->poll() invocations and our's are 130 * synchronized by this test which is only made while 131 * holding the napi->poll_lock. 132 */ 133 if (!test_bit(NAPI_STATE_SCHED, &napi->state)) 134 return budget; 135 136 npinfo->rx_flags |= NETPOLL_RX_DROP; 137 atomic_inc(&trapped); 138 set_bit(NAPI_STATE_NPSVC, &napi->state); 139 140 work = napi->poll(napi, budget); 141 trace_napi_poll(napi); 142 143 clear_bit(NAPI_STATE_NPSVC, &napi->state); 144 atomic_dec(&trapped); 145 npinfo->rx_flags &= ~NETPOLL_RX_DROP; 146 147 return budget - work; 148} 149 150static void poll_napi(struct net_device *dev) 151{ 152 struct napi_struct *napi; 153 int budget = 16; 154 155 list_for_each_entry(napi, &dev->napi_list, dev_list) { 156 if (napi->poll_owner != smp_processor_id() && 157 spin_trylock(&napi->poll_lock)) { 158 budget = poll_one_napi(dev->npinfo, napi, budget); 159 spin_unlock(&napi->poll_lock); 160 161 if (!budget) 162 break; 163 } 164 } 165} 166 167static void service_arp_queue(struct netpoll_info *npi) 168{ 169 if (npi) { 170 struct sk_buff *skb; 171 172 while ((skb = skb_dequeue(&npi->arp_tx))) 173 arp_reply(skb); 174 } 175} 176 177void netpoll_poll(struct netpoll *np) 178{ 179 struct net_device *dev = np->dev; 180 const struct net_device_ops *ops; 181 182 if (!dev || !netif_running(dev)) 183 return; 184 185 ops = dev->netdev_ops; 186 if (!ops->ndo_poll_controller) 187 return; 188 189 /* Process pending work on NIC */ 190 ops->ndo_poll_controller(dev); 191 192 poll_napi(dev); 193 194 service_arp_queue(dev->npinfo); 195 196 zap_completion_queue(); 197} 198 199static void refill_skbs(void) 200{ 201 struct sk_buff *skb; 202 unsigned long flags; 203 204 spin_lock_irqsave(&skb_pool.lock, flags); 205 while (skb_pool.qlen < MAX_SKBS) { 206 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC); 207 if (!skb) 208 break; 209 210 __skb_queue_tail(&skb_pool, skb); 211 } 212 spin_unlock_irqrestore(&skb_pool.lock, flags); 213} 214 215static void zap_completion_queue(void) 216{ 217 unsigned long flags; 218 struct softnet_data *sd = &get_cpu_var(softnet_data); 219 220 if (sd->completion_queue) { 221 struct sk_buff *clist; 222 223 local_irq_save(flags); 224 clist = sd->completion_queue; 225 sd->completion_queue = NULL; 226 local_irq_restore(flags); 227 228 while (clist != NULL) { 229 struct sk_buff *skb = clist; 230 clist = clist->next; 231 if (skb->destructor) { 232 atomic_inc(&skb->users); 233 dev_kfree_skb_any(skb); /* put this one back */ 234 } else { 235 __kfree_skb(skb); 236 } 237 } 238 } 239 240 put_cpu_var(softnet_data); 241} 242 243static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve) 244{ 245 int count = 0; 246 struct sk_buff *skb; 247 248 zap_completion_queue(); 249 refill_skbs(); 250repeat: 251 252 skb = alloc_skb(len, GFP_ATOMIC); 253 if (!skb) 254 skb = skb_dequeue(&skb_pool); 255 256 if (!skb) { 257 if (++count < 10) { 258 netpoll_poll(np); 259 goto repeat; 260 } 261 return NULL; 262 } 263 264 atomic_set(&skb->users, 1); 265 skb_reserve(skb, reserve); 266 return skb; 267} 268 269static int netpoll_owner_active(struct net_device *dev) 270{ 271 struct napi_struct *napi; 272 273 list_for_each_entry(napi, &dev->napi_list, dev_list) { 274 if (napi->poll_owner == smp_processor_id()) 275 return 1; 276 } 277 return 0; 278} 279 280static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) 281{ 282 int status = NETDEV_TX_BUSY; 283 unsigned long tries; 284 struct net_device *dev = np->dev; 285 const struct net_device_ops *ops = dev->netdev_ops; 286 struct netpoll_info *npinfo = np->dev->npinfo; 287 288 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) { 289 __kfree_skb(skb); 290 return; 291 } 292 293 /* don't get messages out of order, and no recursion */ 294 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { 295 struct netdev_queue *txq; 296 unsigned long flags; 297 298 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 299 300 local_irq_save(flags); 301 /* try until next clock tick */ 302 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; 303 tries > 0; --tries) { 304 if (__netif_tx_trylock(txq)) { 305 if (!netif_tx_queue_stopped(txq)) { 306 status = ops->ndo_start_xmit(skb, dev); 307 if (status == NETDEV_TX_OK) 308 txq_trans_update(txq); 309 } 310 __netif_tx_unlock(txq); 311 312 if (status == NETDEV_TX_OK) 313 break; 314 315 } 316 317 /* tickle device maybe there is some cleanup */ 318 netpoll_poll(np); 319 320 udelay(USEC_PER_POLL); 321 } 322 323 WARN_ONCE(!irqs_disabled(), 324 "netpoll_send_skb(): %s enabled interrupts in poll (%pF)\n", 325 dev->name, ops->ndo_start_xmit); 326 327 local_irq_restore(flags); 328 } 329 330 if (status != NETDEV_TX_OK) { 331 skb_queue_tail(&npinfo->txq, skb); 332 schedule_delayed_work(&npinfo->tx_work,0); 333 } 334} 335 336void netpoll_send_udp(struct netpoll *np, const char *msg, int len) 337{ 338 int total_len, eth_len, ip_len, udp_len; 339 struct sk_buff *skb; 340 struct udphdr *udph; 341 struct iphdr *iph; 342 struct ethhdr *eth; 343 344 udp_len = len + sizeof(*udph); 345 ip_len = eth_len = udp_len + sizeof(*iph); 346 total_len = eth_len + ETH_HLEN + NET_IP_ALIGN; 347 348 skb = find_skb(np, total_len, total_len - len); 349 if (!skb) 350 return; 351 352 skb_copy_to_linear_data(skb, msg, len); 353 skb->len += len; 354 355 skb_push(skb, sizeof(*udph)); 356 skb_reset_transport_header(skb); 357 udph = udp_hdr(skb); 358 udph->source = htons(np->local_port); 359 udph->dest = htons(np->remote_port); 360 udph->len = htons(udp_len); 361 udph->check = 0; 362 udph->check = csum_tcpudp_magic(np->local_ip, 363 np->remote_ip, 364 udp_len, IPPROTO_UDP, 365 csum_partial(udph, udp_len, 0)); 366 if (udph->check == 0) 367 udph->check = CSUM_MANGLED_0; 368 369 skb_push(skb, sizeof(*iph)); 370 skb_reset_network_header(skb); 371 iph = ip_hdr(skb); 372 373 /* iph->version = 4; iph->ihl = 5; */ 374 put_unaligned(0x45, (unsigned char *)iph); 375 iph->tos = 0; 376 put_unaligned(htons(ip_len), &(iph->tot_len)); 377 iph->id = 0; 378 iph->frag_off = 0; 379 iph->ttl = 64; 380 iph->protocol = IPPROTO_UDP; 381 iph->check = 0; 382 put_unaligned(np->local_ip, &(iph->saddr)); 383 put_unaligned(np->remote_ip, &(iph->daddr)); 384 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); 385 386 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN); 387 skb_reset_mac_header(skb); 388 skb->protocol = eth->h_proto = htons(ETH_P_IP); 389 memcpy(eth->h_source, np->dev->dev_addr, ETH_ALEN); 390 memcpy(eth->h_dest, np->remote_mac, ETH_ALEN); 391 392 skb->dev = np->dev; 393 394 netpoll_send_skb(np, skb); 395} 396 397static void arp_reply(struct sk_buff *skb) 398{ 399 struct netpoll_info *npinfo = skb->dev->npinfo; 400 struct arphdr *arp; 401 unsigned char *arp_ptr; 402 int size, type = ARPOP_REPLY, ptype = ETH_P_ARP; 403 __be32 sip, tip; 404 unsigned char *sha; 405 struct sk_buff *send_skb; 406 struct netpoll *np = NULL; 407 408 if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev) 409 np = npinfo->rx_np; 410 if (!np) 411 return; 412 413 /* No arp on this interface */ 414 if (skb->dev->flags & IFF_NOARP) 415 return; 416 417 if (!pskb_may_pull(skb, arp_hdr_len(skb->dev))) 418 return; 419 420 skb_reset_network_header(skb); 421 skb_reset_transport_header(skb); 422 arp = arp_hdr(skb); 423 424 if ((arp->ar_hrd != htons(ARPHRD_ETHER) && 425 arp->ar_hrd != htons(ARPHRD_IEEE802)) || 426 arp->ar_pro != htons(ETH_P_IP) || 427 arp->ar_op != htons(ARPOP_REQUEST)) 428 return; 429 430 arp_ptr = (unsigned char *)(arp+1); 431 /* save the location of the src hw addr */ 432 sha = arp_ptr; 433 arp_ptr += skb->dev->addr_len; 434 memcpy(&sip, arp_ptr, 4); 435 arp_ptr += 4; 436 /* if we actually cared about dst hw addr, it would get copied here */ 437 arp_ptr += skb->dev->addr_len; 438 memcpy(&tip, arp_ptr, 4); 439 440 /* Should we ignore arp? */ 441 if (tip != np->local_ip || 442 ipv4_is_loopback(tip) || ipv4_is_multicast(tip)) 443 return; 444 445 size = arp_hdr_len(skb->dev); 446 send_skb = find_skb(np, size + LL_ALLOCATED_SPACE(np->dev), 447 LL_RESERVED_SPACE(np->dev)); 448 449 if (!send_skb) 450 return; 451 452 skb_reset_network_header(send_skb); 453 arp = (struct arphdr *) skb_put(send_skb, size); 454 send_skb->dev = skb->dev; 455 send_skb->protocol = htons(ETH_P_ARP); 456 457 /* Fill the device header for the ARP frame */ 458 if (dev_hard_header(send_skb, skb->dev, ptype, 459 sha, np->dev->dev_addr, 460 send_skb->len) < 0) { 461 kfree_skb(send_skb); 462 return; 463 } 464 465 /* 466 * Fill out the arp protocol part. 467 * 468 * we only support ethernet device type, 469 * which (according to RFC 1390) should always equal 1 (Ethernet). 470 */ 471 472 arp->ar_hrd = htons(np->dev->type); 473 arp->ar_pro = htons(ETH_P_IP); 474 arp->ar_hln = np->dev->addr_len; 475 arp->ar_pln = 4; 476 arp->ar_op = htons(type); 477 478 arp_ptr=(unsigned char *)(arp + 1); 479 memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len); 480 arp_ptr += np->dev->addr_len; 481 memcpy(arp_ptr, &tip, 4); 482 arp_ptr += 4; 483 memcpy(arp_ptr, sha, np->dev->addr_len); 484 arp_ptr += np->dev->addr_len; 485 memcpy(arp_ptr, &sip, 4); 486 487 netpoll_send_skb(np, send_skb); 488} 489 490int __netpoll_rx(struct sk_buff *skb) 491{ 492 int proto, len, ulen; 493 struct iphdr *iph; 494 struct udphdr *uh; 495 struct netpoll_info *npi = skb->dev->npinfo; 496 struct netpoll *np = npi->rx_np; 497 498 if (!np) 499 goto out; 500 if (skb->dev->type != ARPHRD_ETHER) 501 goto out; 502 503 /* check if netpoll clients need ARP */ 504 if (skb->protocol == htons(ETH_P_ARP) && 505 atomic_read(&trapped)) { 506 skb_queue_tail(&npi->arp_tx, skb); 507 return 1; 508 } 509 510 proto = ntohs(eth_hdr(skb)->h_proto); 511 if (proto != ETH_P_IP) 512 goto out; 513 if (skb->pkt_type == PACKET_OTHERHOST) 514 goto out; 515 if (skb_shared(skb)) 516 goto out; 517 518 iph = (struct iphdr *)skb->data; 519 if (!pskb_may_pull(skb, sizeof(struct iphdr))) 520 goto out; 521 if (iph->ihl < 5 || iph->version != 4) 522 goto out; 523 if (!pskb_may_pull(skb, iph->ihl*4)) 524 goto out; 525 if (ip_fast_csum((u8 *)iph, iph->ihl) != 0) 526 goto out; 527 528 len = ntohs(iph->tot_len); 529 if (skb->len < len || len < iph->ihl*4) 530 goto out; 531 532 /* 533 * Our transport medium may have padded the buffer out. 534 * Now We trim to the true length of the frame. 535 */ 536 if (pskb_trim_rcsum(skb, len)) 537 goto out; 538 539 if (iph->protocol != IPPROTO_UDP) 540 goto out; 541 542 len -= iph->ihl*4; 543 uh = (struct udphdr *)(((char *)iph) + iph->ihl*4); 544 ulen = ntohs(uh->len); 545 546 if (ulen != len) 547 goto out; 548 if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr)) 549 goto out; 550 if (np->local_ip && np->local_ip != iph->daddr) 551 goto out; 552 if (np->remote_ip && np->remote_ip != iph->saddr) 553 goto out; 554 if (np->local_port && np->local_port != ntohs(uh->dest)) 555 goto out; 556 557 np->rx_hook(np, ntohs(uh->source), 558 (char *)(uh+1), 559 ulen - sizeof(struct udphdr)); 560 561 kfree_skb(skb); 562 return 1; 563 564out: 565 if (atomic_read(&trapped)) { 566 kfree_skb(skb); 567 return 1; 568 } 569 570 return 0; 571} 572 573void netpoll_print_options(struct netpoll *np) 574{ 575 printk(KERN_INFO "%s: local port %d\n", 576 np->name, np->local_port); 577 printk(KERN_INFO "%s: local IP %pI4\n", 578 np->name, &np->local_ip); 579 printk(KERN_INFO "%s: interface %s\n", 580 np->name, np->dev_name); 581 printk(KERN_INFO "%s: remote port %d\n", 582 np->name, np->remote_port); 583 printk(KERN_INFO "%s: remote IP %pI4\n", 584 np->name, &np->remote_ip); 585 printk(KERN_INFO "%s: remote ethernet address %pM\n", 586 np->name, np->remote_mac); 587} 588 589int netpoll_parse_options(struct netpoll *np, char *opt) 590{ 591 char *cur=opt, *delim; 592 593 if (*cur != '@') { 594 if ((delim = strchr(cur, '@')) == NULL) 595 goto parse_failed; 596 *delim = 0; 597 np->local_port = simple_strtol(cur, NULL, 10); 598 cur = delim; 599 } 600 cur++; 601 602 if (*cur != '/') { 603 if ((delim = strchr(cur, '/')) == NULL) 604 goto parse_failed; 605 *delim = 0; 606 np->local_ip = in_aton(cur); 607 cur = delim; 608 } 609 cur++; 610 611 if (*cur != ',') { 612 /* parse out dev name */ 613 if ((delim = strchr(cur, ',')) == NULL) 614 goto parse_failed; 615 *delim = 0; 616 strlcpy(np->dev_name, cur, sizeof(np->dev_name)); 617 cur = delim; 618 } 619 cur++; 620 621 if (*cur != '@') { 622 /* dst port */ 623 if ((delim = strchr(cur, '@')) == NULL) 624 goto parse_failed; 625 *delim = 0; 626 np->remote_port = simple_strtol(cur, NULL, 10); 627 cur = delim; 628 } 629 cur++; 630 631 /* dst ip */ 632 if ((delim = strchr(cur, '/')) == NULL) 633 goto parse_failed; 634 *delim = 0; 635 np->remote_ip = in_aton(cur); 636 cur = delim + 1; 637 638 if (*cur != 0) { 639 /* MAC address */ 640 if ((delim = strchr(cur, ':')) == NULL) 641 goto parse_failed; 642 *delim = 0; 643 np->remote_mac[0] = simple_strtol(cur, NULL, 16); 644 cur = delim + 1; 645 if ((delim = strchr(cur, ':')) == NULL) 646 goto parse_failed; 647 *delim = 0; 648 np->remote_mac[1] = simple_strtol(cur, NULL, 16); 649 cur = delim + 1; 650 if ((delim = strchr(cur, ':')) == NULL) 651 goto parse_failed; 652 *delim = 0; 653 np->remote_mac[2] = simple_strtol(cur, NULL, 16); 654 cur = delim + 1; 655 if ((delim = strchr(cur, ':')) == NULL) 656 goto parse_failed; 657 *delim = 0; 658 np->remote_mac[3] = simple_strtol(cur, NULL, 16); 659 cur = delim + 1; 660 if ((delim = strchr(cur, ':')) == NULL) 661 goto parse_failed; 662 *delim = 0; 663 np->remote_mac[4] = simple_strtol(cur, NULL, 16); 664 cur = delim + 1; 665 np->remote_mac[5] = simple_strtol(cur, NULL, 16); 666 } 667 668 netpoll_print_options(np); 669 670 return 0; 671 672 parse_failed: 673 printk(KERN_INFO "%s: couldn't parse config at %s!\n", 674 np->name, cur); 675 return -1; 676} 677 678int netpoll_setup(struct netpoll *np) 679{ 680 struct net_device *ndev = NULL; 681 struct in_device *in_dev; 682 struct netpoll_info *npinfo; 683 unsigned long flags; 684 int err; 685 686 if (np->dev_name) 687 ndev = dev_get_by_name(&init_net, np->dev_name); 688 if (!ndev) { 689 printk(KERN_ERR "%s: %s doesn't exist, aborting.\n", 690 np->name, np->dev_name); 691 return -ENODEV; 692 } 693 694 np->dev = ndev; 695 if (!ndev->npinfo) { 696 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL); 697 if (!npinfo) { 698 err = -ENOMEM; 699 goto release; 700 } 701 702 npinfo->rx_flags = 0; 703 npinfo->rx_np = NULL; 704 705 spin_lock_init(&npinfo->rx_lock); 706 skb_queue_head_init(&npinfo->arp_tx); 707 skb_queue_head_init(&npinfo->txq); 708 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process); 709 710 atomic_set(&npinfo->refcnt, 1); 711 } else { 712 npinfo = ndev->npinfo; 713 atomic_inc(&npinfo->refcnt); 714 } 715 716 if (!ndev->netdev_ops->ndo_poll_controller) { 717 printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n", 718 np->name, np->dev_name); 719 err = -ENOTSUPP; 720 goto release; 721 } 722 723 if (!netif_running(ndev)) { 724 unsigned long atmost, atleast; 725 726 printk(KERN_INFO "%s: device %s not up yet, forcing it\n", 727 np->name, np->dev_name); 728 729 rtnl_lock(); 730 err = dev_open(ndev); 731 rtnl_unlock(); 732 733 if (err) { 734 printk(KERN_ERR "%s: failed to open %s\n", 735 np->name, ndev->name); 736 goto release; 737 } 738 739 atleast = jiffies + HZ/10; 740 atmost = jiffies + 4*HZ; 741 while (!netif_carrier_ok(ndev)) { 742 if (time_after(jiffies, atmost)) { 743 printk(KERN_NOTICE 744 "%s: timeout waiting for carrier\n", 745 np->name); 746 break; 747 } 748 msleep(1); 749 } 750 751 /* If carrier appears to come up instantly, we don't 752 * trust it and pause so that we don't pump all our 753 * queued console messages into the bitbucket. 754 */ 755 756 if (time_before(jiffies, atleast)) { 757 printk(KERN_NOTICE "%s: carrier detect appears" 758 " untrustworthy, waiting 4 seconds\n", 759 np->name); 760 msleep(4000); 761 } 762 } 763 764 if (!np->local_ip) { 765 rcu_read_lock(); 766 in_dev = __in_dev_get_rcu(ndev); 767 768 if (!in_dev || !in_dev->ifa_list) { 769 rcu_read_unlock(); 770 printk(KERN_ERR "%s: no IP address for %s, aborting\n", 771 np->name, np->dev_name); 772 err = -EDESTADDRREQ; 773 goto release; 774 } 775 776 np->local_ip = in_dev->ifa_list->ifa_local; 777 rcu_read_unlock(); 778 printk(KERN_INFO "%s: local IP %pI4\n", np->name, &np->local_ip); 779 } 780 781 if (np->rx_hook) { 782 spin_lock_irqsave(&npinfo->rx_lock, flags); 783 npinfo->rx_flags |= NETPOLL_RX_ENABLED; 784 npinfo->rx_np = np; 785 spin_unlock_irqrestore(&npinfo->rx_lock, flags); 786 } 787 788 /* fill up the skb queue */ 789 refill_skbs(); 790 791 /* last thing to do is link it to the net device structure */ 792 ndev->npinfo = npinfo; 793 794 /* avoid racing with NAPI reading npinfo */ 795 synchronize_rcu(); 796 797 return 0; 798 799 release: 800 if (!ndev->npinfo) 801 kfree(npinfo); 802 np->dev = NULL; 803 dev_put(ndev); 804 return err; 805} 806 807static int __init netpoll_init(void) 808{ 809 skb_queue_head_init(&skb_pool); 810 return 0; 811} 812core_initcall(netpoll_init); 813 814void netpoll_cleanup(struct netpoll *np) 815{ 816 struct netpoll_info *npinfo; 817 unsigned long flags; 818 819 if (np->dev) { 820 npinfo = np->dev->npinfo; 821 if (npinfo) { 822 if (npinfo->rx_np == np) { 823 spin_lock_irqsave(&npinfo->rx_lock, flags); 824 npinfo->rx_np = NULL; 825 npinfo->rx_flags &= ~NETPOLL_RX_ENABLED; 826 spin_unlock_irqrestore(&npinfo->rx_lock, flags); 827 } 828 829 if (atomic_dec_and_test(&npinfo->refcnt)) { 830 skb_queue_purge(&npinfo->arp_tx); 831 skb_queue_purge(&npinfo->txq); 832 cancel_rearming_delayed_work(&npinfo->tx_work); 833 834 /* clean after last, unfinished work */ 835 __skb_queue_purge(&npinfo->txq); 836 kfree(npinfo); 837 np->dev->npinfo = NULL; 838 } 839 } 840 841 dev_put(np->dev); 842 } 843 844 np->dev = NULL; 845} 846 847int netpoll_trap(void) 848{ 849 return atomic_read(&trapped); 850} 851 852void netpoll_set_trap(int trap) 853{ 854 if (trap) 855 atomic_inc(&trapped); 856 else 857 atomic_dec(&trapped); 858} 859 860EXPORT_SYMBOL(netpoll_set_trap); 861EXPORT_SYMBOL(netpoll_trap); 862EXPORT_SYMBOL(netpoll_print_options); 863EXPORT_SYMBOL(netpoll_parse_options); 864EXPORT_SYMBOL(netpoll_setup); 865EXPORT_SYMBOL(netpoll_cleanup); 866EXPORT_SYMBOL(netpoll_send_udp); 867EXPORT_SYMBOL(netpoll_poll);