Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.28-rc6 2162 lines 57 kB view raw
1/**************************************************************************** 2 * Driver for Solarflare Solarstorm network controllers and boards 3 * Copyright 2005-2006 Fen Systems Ltd. 4 * Copyright 2005-2008 Solarflare Communications Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation, incorporated herein by reference. 9 */ 10 11#include <linux/module.h> 12#include <linux/pci.h> 13#include <linux/netdevice.h> 14#include <linux/etherdevice.h> 15#include <linux/delay.h> 16#include <linux/notifier.h> 17#include <linux/ip.h> 18#include <linux/tcp.h> 19#include <linux/in.h> 20#include <linux/crc32.h> 21#include <linux/ethtool.h> 22#include <linux/topology.h> 23#include "net_driver.h" 24#include "gmii.h" 25#include "ethtool.h" 26#include "tx.h" 27#include "rx.h" 28#include "efx.h" 29#include "mdio_10g.h" 30#include "falcon.h" 31#include "mac.h" 32 33#define EFX_MAX_MTU (9 * 1024) 34 35/* RX slow fill workqueue. If memory allocation fails in the fast path, 36 * a work item is pushed onto this work queue to retry the allocation later, 37 * to avoid the NIC being starved of RX buffers. Since this is a per cpu 38 * workqueue, there is nothing to be gained in making it per NIC 39 */ 40static struct workqueue_struct *refill_workqueue; 41 42/************************************************************************** 43 * 44 * Configurable values 45 * 46 *************************************************************************/ 47 48/* 49 * Enable large receive offload (LRO) aka soft segment reassembly (SSR) 50 * 51 * This sets the default for new devices. It can be controlled later 52 * using ethtool. 53 */ 54static int lro = true; 55module_param(lro, int, 0644); 56MODULE_PARM_DESC(lro, "Large receive offload acceleration"); 57 58/* 59 * Use separate channels for TX and RX events 60 * 61 * Set this to 1 to use separate channels for TX and RX. It allows us to 62 * apply a higher level of interrupt moderation to TX events. 63 * 64 * This is forced to 0 for MSI interrupt mode as the interrupt vector 65 * is not written 66 */ 67static unsigned int separate_tx_and_rx_channels = true; 68 69/* This is the weight assigned to each of the (per-channel) virtual 70 * NAPI devices. 71 */ 72static int napi_weight = 64; 73 74/* This is the time (in jiffies) between invocations of the hardware 75 * monitor, which checks for known hardware bugs and resets the 76 * hardware and driver as necessary. 77 */ 78unsigned int efx_monitor_interval = 1 * HZ; 79 80/* This controls whether or not the hardware monitor will trigger a 81 * reset when it detects an error condition. 82 */ 83static unsigned int monitor_reset = true; 84 85/* This controls whether or not the driver will initialise devices 86 * with invalid MAC addresses stored in the EEPROM or flash. If true, 87 * such devices will be initialised with a random locally-generated 88 * MAC address. This allows for loading the sfc_mtd driver to 89 * reprogram the flash, even if the flash contents (including the MAC 90 * address) have previously been erased. 91 */ 92static unsigned int allow_bad_hwaddr; 93 94/* Initial interrupt moderation settings. They can be modified after 95 * module load with ethtool. 96 * 97 * The default for RX should strike a balance between increasing the 98 * round-trip latency and reducing overhead. 99 */ 100static unsigned int rx_irq_mod_usec = 60; 101 102/* Initial interrupt moderation settings. They can be modified after 103 * module load with ethtool. 104 * 105 * This default is chosen to ensure that a 10G link does not go idle 106 * while a TX queue is stopped after it has become full. A queue is 107 * restarted when it drops below half full. The time this takes (assuming 108 * worst case 3 descriptors per packet and 1024 descriptors) is 109 * 512 / 3 * 1.2 = 205 usec. 110 */ 111static unsigned int tx_irq_mod_usec = 150; 112 113/* This is the first interrupt mode to try out of: 114 * 0 => MSI-X 115 * 1 => MSI 116 * 2 => legacy 117 */ 118static unsigned int interrupt_mode; 119 120/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS), 121 * i.e. the number of CPUs among which we may distribute simultaneous 122 * interrupt handling. 123 * 124 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt. 125 * The default (0) means to assign an interrupt to each package (level II cache) 126 */ 127static unsigned int rss_cpus; 128module_param(rss_cpus, uint, 0444); 129MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling"); 130 131/************************************************************************** 132 * 133 * Utility functions and prototypes 134 * 135 *************************************************************************/ 136static void efx_remove_channel(struct efx_channel *channel); 137static void efx_remove_port(struct efx_nic *efx); 138static void efx_fini_napi(struct efx_nic *efx); 139static void efx_fini_channels(struct efx_nic *efx); 140 141#define EFX_ASSERT_RESET_SERIALISED(efx) \ 142 do { \ 143 if (efx->state == STATE_RUNNING) \ 144 ASSERT_RTNL(); \ 145 } while (0) 146 147/************************************************************************** 148 * 149 * Event queue processing 150 * 151 *************************************************************************/ 152 153/* Process channel's event queue 154 * 155 * This function is responsible for processing the event queue of a 156 * single channel. The caller must guarantee that this function will 157 * never be concurrently called more than once on the same channel, 158 * though different channels may be being processed concurrently. 159 */ 160static int efx_process_channel(struct efx_channel *channel, int rx_quota) 161{ 162 struct efx_nic *efx = channel->efx; 163 int rx_packets; 164 165 if (unlikely(efx->reset_pending != RESET_TYPE_NONE || 166 !channel->enabled)) 167 return 0; 168 169 rx_packets = falcon_process_eventq(channel, rx_quota); 170 if (rx_packets == 0) 171 return 0; 172 173 /* Deliver last RX packet. */ 174 if (channel->rx_pkt) { 175 __efx_rx_packet(channel, channel->rx_pkt, 176 channel->rx_pkt_csummed); 177 channel->rx_pkt = NULL; 178 } 179 180 efx_flush_lro(channel); 181 efx_rx_strategy(channel); 182 183 efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]); 184 185 return rx_packets; 186} 187 188/* Mark channel as finished processing 189 * 190 * Note that since we will not receive further interrupts for this 191 * channel before we finish processing and call the eventq_read_ack() 192 * method, there is no need to use the interrupt hold-off timers. 193 */ 194static inline void efx_channel_processed(struct efx_channel *channel) 195{ 196 /* The interrupt handler for this channel may set work_pending 197 * as soon as we acknowledge the events we've seen. Make sure 198 * it's cleared before then. */ 199 channel->work_pending = false; 200 smp_wmb(); 201 202 falcon_eventq_read_ack(channel); 203} 204 205/* NAPI poll handler 206 * 207 * NAPI guarantees serialisation of polls of the same device, which 208 * provides the guarantee required by efx_process_channel(). 209 */ 210static int efx_poll(struct napi_struct *napi, int budget) 211{ 212 struct efx_channel *channel = 213 container_of(napi, struct efx_channel, napi_str); 214 struct net_device *napi_dev = channel->napi_dev; 215 int rx_packets; 216 217 EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n", 218 channel->channel, raw_smp_processor_id()); 219 220 rx_packets = efx_process_channel(channel, budget); 221 222 if (rx_packets < budget) { 223 /* There is no race here; although napi_disable() will 224 * only wait for netif_rx_complete(), this isn't a problem 225 * since efx_channel_processed() will have no effect if 226 * interrupts have already been disabled. 227 */ 228 netif_rx_complete(napi_dev, napi); 229 efx_channel_processed(channel); 230 } 231 232 return rx_packets; 233} 234 235/* Process the eventq of the specified channel immediately on this CPU 236 * 237 * Disable hardware generated interrupts, wait for any existing 238 * processing to finish, then directly poll (and ack ) the eventq. 239 * Finally reenable NAPI and interrupts. 240 * 241 * Since we are touching interrupts the caller should hold the suspend lock 242 */ 243void efx_process_channel_now(struct efx_channel *channel) 244{ 245 struct efx_nic *efx = channel->efx; 246 247 BUG_ON(!channel->used_flags); 248 BUG_ON(!channel->enabled); 249 250 /* Disable interrupts and wait for ISRs to complete */ 251 falcon_disable_interrupts(efx); 252 if (efx->legacy_irq) 253 synchronize_irq(efx->legacy_irq); 254 if (channel->irq) 255 synchronize_irq(channel->irq); 256 257 /* Wait for any NAPI processing to complete */ 258 napi_disable(&channel->napi_str); 259 260 /* Poll the channel */ 261 efx_process_channel(channel, efx->type->evq_size); 262 263 /* Ack the eventq. This may cause an interrupt to be generated 264 * when they are reenabled */ 265 efx_channel_processed(channel); 266 267 napi_enable(&channel->napi_str); 268 falcon_enable_interrupts(efx); 269} 270 271/* Create event queue 272 * Event queue memory allocations are done only once. If the channel 273 * is reset, the memory buffer will be reused; this guards against 274 * errors during channel reset and also simplifies interrupt handling. 275 */ 276static int efx_probe_eventq(struct efx_channel *channel) 277{ 278 EFX_LOG(channel->efx, "chan %d create event queue\n", channel->channel); 279 280 return falcon_probe_eventq(channel); 281} 282 283/* Prepare channel's event queue */ 284static void efx_init_eventq(struct efx_channel *channel) 285{ 286 EFX_LOG(channel->efx, "chan %d init event queue\n", channel->channel); 287 288 channel->eventq_read_ptr = 0; 289 290 falcon_init_eventq(channel); 291} 292 293static void efx_fini_eventq(struct efx_channel *channel) 294{ 295 EFX_LOG(channel->efx, "chan %d fini event queue\n", channel->channel); 296 297 falcon_fini_eventq(channel); 298} 299 300static void efx_remove_eventq(struct efx_channel *channel) 301{ 302 EFX_LOG(channel->efx, "chan %d remove event queue\n", channel->channel); 303 304 falcon_remove_eventq(channel); 305} 306 307/************************************************************************** 308 * 309 * Channel handling 310 * 311 *************************************************************************/ 312 313static int efx_probe_channel(struct efx_channel *channel) 314{ 315 struct efx_tx_queue *tx_queue; 316 struct efx_rx_queue *rx_queue; 317 int rc; 318 319 EFX_LOG(channel->efx, "creating channel %d\n", channel->channel); 320 321 rc = efx_probe_eventq(channel); 322 if (rc) 323 goto fail1; 324 325 efx_for_each_channel_tx_queue(tx_queue, channel) { 326 rc = efx_probe_tx_queue(tx_queue); 327 if (rc) 328 goto fail2; 329 } 330 331 efx_for_each_channel_rx_queue(rx_queue, channel) { 332 rc = efx_probe_rx_queue(rx_queue); 333 if (rc) 334 goto fail3; 335 } 336 337 channel->n_rx_frm_trunc = 0; 338 339 return 0; 340 341 fail3: 342 efx_for_each_channel_rx_queue(rx_queue, channel) 343 efx_remove_rx_queue(rx_queue); 344 fail2: 345 efx_for_each_channel_tx_queue(tx_queue, channel) 346 efx_remove_tx_queue(tx_queue); 347 fail1: 348 return rc; 349} 350 351 352/* Channels are shutdown and reinitialised whilst the NIC is running 353 * to propagate configuration changes (mtu, checksum offload), or 354 * to clear hardware error conditions 355 */ 356static void efx_init_channels(struct efx_nic *efx) 357{ 358 struct efx_tx_queue *tx_queue; 359 struct efx_rx_queue *rx_queue; 360 struct efx_channel *channel; 361 362 /* Calculate the rx buffer allocation parameters required to 363 * support the current MTU, including padding for header 364 * alignment and overruns. 365 */ 366 efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) + 367 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + 368 efx->type->rx_buffer_padding); 369 efx->rx_buffer_order = get_order(efx->rx_buffer_len); 370 371 /* Initialise the channels */ 372 efx_for_each_channel(channel, efx) { 373 EFX_LOG(channel->efx, "init chan %d\n", channel->channel); 374 375 efx_init_eventq(channel); 376 377 efx_for_each_channel_tx_queue(tx_queue, channel) 378 efx_init_tx_queue(tx_queue); 379 380 /* The rx buffer allocation strategy is MTU dependent */ 381 efx_rx_strategy(channel); 382 383 efx_for_each_channel_rx_queue(rx_queue, channel) 384 efx_init_rx_queue(rx_queue); 385 386 WARN_ON(channel->rx_pkt != NULL); 387 efx_rx_strategy(channel); 388 } 389} 390 391/* This enables event queue processing and packet transmission. 392 * 393 * Note that this function is not allowed to fail, since that would 394 * introduce too much complexity into the suspend/resume path. 395 */ 396static void efx_start_channel(struct efx_channel *channel) 397{ 398 struct efx_rx_queue *rx_queue; 399 400 EFX_LOG(channel->efx, "starting chan %d\n", channel->channel); 401 402 if (!(channel->efx->net_dev->flags & IFF_UP)) 403 netif_napi_add(channel->napi_dev, &channel->napi_str, 404 efx_poll, napi_weight); 405 406 /* The interrupt handler for this channel may set work_pending 407 * as soon as we enable it. Make sure it's cleared before 408 * then. Similarly, make sure it sees the enabled flag set. */ 409 channel->work_pending = false; 410 channel->enabled = true; 411 smp_wmb(); 412 413 napi_enable(&channel->napi_str); 414 415 /* Load up RX descriptors */ 416 efx_for_each_channel_rx_queue(rx_queue, channel) 417 efx_fast_push_rx_descriptors(rx_queue); 418} 419 420/* This disables event queue processing and packet transmission. 421 * This function does not guarantee that all queue processing 422 * (e.g. RX refill) is complete. 423 */ 424static void efx_stop_channel(struct efx_channel *channel) 425{ 426 struct efx_rx_queue *rx_queue; 427 428 if (!channel->enabled) 429 return; 430 431 EFX_LOG(channel->efx, "stop chan %d\n", channel->channel); 432 433 channel->enabled = false; 434 napi_disable(&channel->napi_str); 435 436 /* Ensure that any worker threads have exited or will be no-ops */ 437 efx_for_each_channel_rx_queue(rx_queue, channel) { 438 spin_lock_bh(&rx_queue->add_lock); 439 spin_unlock_bh(&rx_queue->add_lock); 440 } 441} 442 443static void efx_fini_channels(struct efx_nic *efx) 444{ 445 struct efx_channel *channel; 446 struct efx_tx_queue *tx_queue; 447 struct efx_rx_queue *rx_queue; 448 int rc; 449 450 EFX_ASSERT_RESET_SERIALISED(efx); 451 BUG_ON(efx->port_enabled); 452 453 rc = falcon_flush_queues(efx); 454 if (rc) 455 EFX_ERR(efx, "failed to flush queues\n"); 456 else 457 EFX_LOG(efx, "successfully flushed all queues\n"); 458 459 efx_for_each_channel(channel, efx) { 460 EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel); 461 462 efx_for_each_channel_rx_queue(rx_queue, channel) 463 efx_fini_rx_queue(rx_queue); 464 efx_for_each_channel_tx_queue(tx_queue, channel) 465 efx_fini_tx_queue(tx_queue); 466 efx_fini_eventq(channel); 467 } 468} 469 470static void efx_remove_channel(struct efx_channel *channel) 471{ 472 struct efx_tx_queue *tx_queue; 473 struct efx_rx_queue *rx_queue; 474 475 EFX_LOG(channel->efx, "destroy chan %d\n", channel->channel); 476 477 efx_for_each_channel_rx_queue(rx_queue, channel) 478 efx_remove_rx_queue(rx_queue); 479 efx_for_each_channel_tx_queue(tx_queue, channel) 480 efx_remove_tx_queue(tx_queue); 481 efx_remove_eventq(channel); 482 483 channel->used_flags = 0; 484} 485 486void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay) 487{ 488 queue_delayed_work(refill_workqueue, &rx_queue->work, delay); 489} 490 491/************************************************************************** 492 * 493 * Port handling 494 * 495 **************************************************************************/ 496 497/* This ensures that the kernel is kept informed (via 498 * netif_carrier_on/off) of the link status, and also maintains the 499 * link status's stop on the port's TX queue. 500 */ 501static void efx_link_status_changed(struct efx_nic *efx) 502{ 503 /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure 504 * that no events are triggered between unregister_netdev() and the 505 * driver unloading. A more general condition is that NETDEV_CHANGE 506 * can only be generated between NETDEV_UP and NETDEV_DOWN */ 507 if (!netif_running(efx->net_dev)) 508 return; 509 510 if (efx->port_inhibited) { 511 netif_carrier_off(efx->net_dev); 512 return; 513 } 514 515 if (efx->link_up != netif_carrier_ok(efx->net_dev)) { 516 efx->n_link_state_changes++; 517 518 if (efx->link_up) 519 netif_carrier_on(efx->net_dev); 520 else 521 netif_carrier_off(efx->net_dev); 522 } 523 524 /* Status message for kernel log */ 525 if (efx->link_up) { 526 struct mii_if_info *gmii = &efx->mii; 527 unsigned adv, lpa; 528 /* NONE here means direct XAUI from the controller, with no 529 * MDIO-attached device we can query. */ 530 if (efx->phy_type != PHY_TYPE_NONE) { 531 adv = gmii_advertised(gmii); 532 lpa = gmii_lpa(gmii); 533 } else { 534 lpa = GM_LPA_10000 | LPA_DUPLEX; 535 adv = lpa; 536 } 537 EFX_INFO(efx, "link up at %dMbps %s-duplex " 538 "(adv %04x lpa %04x) (MTU %d)%s\n", 539 (efx->link_options & GM_LPA_10000 ? 10000 : 540 (efx->link_options & GM_LPA_1000 ? 1000 : 541 (efx->link_options & GM_LPA_100 ? 100 : 542 10))), 543 (efx->link_options & GM_LPA_DUPLEX ? 544 "full" : "half"), 545 adv, lpa, 546 efx->net_dev->mtu, 547 (efx->promiscuous ? " [PROMISC]" : "")); 548 } else { 549 EFX_INFO(efx, "link down\n"); 550 } 551 552} 553 554/* This call reinitialises the MAC to pick up new PHY settings. The 555 * caller must hold the mac_lock */ 556void __efx_reconfigure_port(struct efx_nic *efx) 557{ 558 WARN_ON(!mutex_is_locked(&efx->mac_lock)); 559 560 EFX_LOG(efx, "reconfiguring MAC from PHY settings on CPU %d\n", 561 raw_smp_processor_id()); 562 563 /* Serialise the promiscuous flag with efx_set_multicast_list. */ 564 if (efx_dev_registered(efx)) { 565 netif_addr_lock_bh(efx->net_dev); 566 netif_addr_unlock_bh(efx->net_dev); 567 } 568 569 falcon_reconfigure_xmac(efx); 570 571 /* Inform kernel of loss/gain of carrier */ 572 efx_link_status_changed(efx); 573} 574 575/* Reinitialise the MAC to pick up new PHY settings, even if the port is 576 * disabled. */ 577void efx_reconfigure_port(struct efx_nic *efx) 578{ 579 EFX_ASSERT_RESET_SERIALISED(efx); 580 581 mutex_lock(&efx->mac_lock); 582 __efx_reconfigure_port(efx); 583 mutex_unlock(&efx->mac_lock); 584} 585 586/* Asynchronous efx_reconfigure_port work item. To speed up efx_flush_all() 587 * we don't efx_reconfigure_port() if the port is disabled. Care is taken 588 * in efx_stop_all() and efx_start_port() to prevent PHY events being lost */ 589static void efx_reconfigure_work(struct work_struct *data) 590{ 591 struct efx_nic *efx = container_of(data, struct efx_nic, 592 reconfigure_work); 593 594 mutex_lock(&efx->mac_lock); 595 if (efx->port_enabled) 596 __efx_reconfigure_port(efx); 597 mutex_unlock(&efx->mac_lock); 598} 599 600static int efx_probe_port(struct efx_nic *efx) 601{ 602 int rc; 603 604 EFX_LOG(efx, "create port\n"); 605 606 /* Connect up MAC/PHY operations table and read MAC address */ 607 rc = falcon_probe_port(efx); 608 if (rc) 609 goto err; 610 611 /* Sanity check MAC address */ 612 if (is_valid_ether_addr(efx->mac_address)) { 613 memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN); 614 } else { 615 DECLARE_MAC_BUF(mac); 616 617 EFX_ERR(efx, "invalid MAC address %s\n", 618 print_mac(mac, efx->mac_address)); 619 if (!allow_bad_hwaddr) { 620 rc = -EINVAL; 621 goto err; 622 } 623 random_ether_addr(efx->net_dev->dev_addr); 624 EFX_INFO(efx, "using locally-generated MAC %s\n", 625 print_mac(mac, efx->net_dev->dev_addr)); 626 } 627 628 return 0; 629 630 err: 631 efx_remove_port(efx); 632 return rc; 633} 634 635static int efx_init_port(struct efx_nic *efx) 636{ 637 int rc; 638 639 EFX_LOG(efx, "init port\n"); 640 641 /* Initialise the MAC and PHY */ 642 rc = falcon_init_xmac(efx); 643 if (rc) 644 return rc; 645 646 efx->port_initialized = true; 647 efx->stats_enabled = true; 648 649 /* Reconfigure port to program MAC registers */ 650 falcon_reconfigure_xmac(efx); 651 652 return 0; 653} 654 655/* Allow efx_reconfigure_port() to be scheduled, and close the window 656 * between efx_stop_port and efx_flush_all whereby a previously scheduled 657 * efx_reconfigure_port() may have been cancelled */ 658static void efx_start_port(struct efx_nic *efx) 659{ 660 EFX_LOG(efx, "start port\n"); 661 BUG_ON(efx->port_enabled); 662 663 mutex_lock(&efx->mac_lock); 664 efx->port_enabled = true; 665 __efx_reconfigure_port(efx); 666 mutex_unlock(&efx->mac_lock); 667} 668 669/* Prevent efx_reconfigure_work and efx_monitor() from executing, and 670 * efx_set_multicast_list() from scheduling efx_reconfigure_work. 671 * efx_reconfigure_work can still be scheduled via NAPI processing 672 * until efx_flush_all() is called */ 673static void efx_stop_port(struct efx_nic *efx) 674{ 675 EFX_LOG(efx, "stop port\n"); 676 677 mutex_lock(&efx->mac_lock); 678 efx->port_enabled = false; 679 mutex_unlock(&efx->mac_lock); 680 681 /* Serialise against efx_set_multicast_list() */ 682 if (efx_dev_registered(efx)) { 683 netif_addr_lock_bh(efx->net_dev); 684 netif_addr_unlock_bh(efx->net_dev); 685 } 686} 687 688static void efx_fini_port(struct efx_nic *efx) 689{ 690 EFX_LOG(efx, "shut down port\n"); 691 692 if (!efx->port_initialized) 693 return; 694 695 falcon_fini_xmac(efx); 696 efx->port_initialized = false; 697 698 efx->link_up = false; 699 efx_link_status_changed(efx); 700} 701 702static void efx_remove_port(struct efx_nic *efx) 703{ 704 EFX_LOG(efx, "destroying port\n"); 705 706 falcon_remove_port(efx); 707} 708 709/************************************************************************** 710 * 711 * NIC handling 712 * 713 **************************************************************************/ 714 715/* This configures the PCI device to enable I/O and DMA. */ 716static int efx_init_io(struct efx_nic *efx) 717{ 718 struct pci_dev *pci_dev = efx->pci_dev; 719 dma_addr_t dma_mask = efx->type->max_dma_mask; 720 int rc; 721 722 EFX_LOG(efx, "initialising I/O\n"); 723 724 rc = pci_enable_device(pci_dev); 725 if (rc) { 726 EFX_ERR(efx, "failed to enable PCI device\n"); 727 goto fail1; 728 } 729 730 pci_set_master(pci_dev); 731 732 /* Set the PCI DMA mask. Try all possibilities from our 733 * genuine mask down to 32 bits, because some architectures 734 * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit 735 * masks event though they reject 46 bit masks. 736 */ 737 while (dma_mask > 0x7fffffffUL) { 738 if (pci_dma_supported(pci_dev, dma_mask) && 739 ((rc = pci_set_dma_mask(pci_dev, dma_mask)) == 0)) 740 break; 741 dma_mask >>= 1; 742 } 743 if (rc) { 744 EFX_ERR(efx, "could not find a suitable DMA mask\n"); 745 goto fail2; 746 } 747 EFX_LOG(efx, "using DMA mask %llx\n", (unsigned long long) dma_mask); 748 rc = pci_set_consistent_dma_mask(pci_dev, dma_mask); 749 if (rc) { 750 /* pci_set_consistent_dma_mask() is not *allowed* to 751 * fail with a mask that pci_set_dma_mask() accepted, 752 * but just in case... 753 */ 754 EFX_ERR(efx, "failed to set consistent DMA mask\n"); 755 goto fail2; 756 } 757 758 efx->membase_phys = pci_resource_start(efx->pci_dev, 759 efx->type->mem_bar); 760 rc = pci_request_region(pci_dev, efx->type->mem_bar, "sfc"); 761 if (rc) { 762 EFX_ERR(efx, "request for memory BAR failed\n"); 763 rc = -EIO; 764 goto fail3; 765 } 766 efx->membase = ioremap_nocache(efx->membase_phys, 767 efx->type->mem_map_size); 768 if (!efx->membase) { 769 EFX_ERR(efx, "could not map memory BAR %d at %llx+%x\n", 770 efx->type->mem_bar, 771 (unsigned long long)efx->membase_phys, 772 efx->type->mem_map_size); 773 rc = -ENOMEM; 774 goto fail4; 775 } 776 EFX_LOG(efx, "memory BAR %u at %llx+%x (virtual %p)\n", 777 efx->type->mem_bar, (unsigned long long)efx->membase_phys, 778 efx->type->mem_map_size, efx->membase); 779 780 return 0; 781 782 fail4: 783 pci_release_region(efx->pci_dev, efx->type->mem_bar); 784 fail3: 785 efx->membase_phys = 0; 786 fail2: 787 pci_disable_device(efx->pci_dev); 788 fail1: 789 return rc; 790} 791 792static void efx_fini_io(struct efx_nic *efx) 793{ 794 EFX_LOG(efx, "shutting down I/O\n"); 795 796 if (efx->membase) { 797 iounmap(efx->membase); 798 efx->membase = NULL; 799 } 800 801 if (efx->membase_phys) { 802 pci_release_region(efx->pci_dev, efx->type->mem_bar); 803 efx->membase_phys = 0; 804 } 805 806 pci_disable_device(efx->pci_dev); 807} 808 809/* Get number of RX queues wanted. Return number of online CPU 810 * packages in the expectation that an IRQ balancer will spread 811 * interrupts across them. */ 812static int efx_wanted_rx_queues(void) 813{ 814 cpumask_t core_mask; 815 int count; 816 int cpu; 817 818 cpus_clear(core_mask); 819 count = 0; 820 for_each_online_cpu(cpu) { 821 if (!cpu_isset(cpu, core_mask)) { 822 ++count; 823 cpus_or(core_mask, core_mask, 824 topology_core_siblings(cpu)); 825 } 826 } 827 828 return count; 829} 830 831/* Probe the number and type of interrupts we are able to obtain, and 832 * the resulting numbers of channels and RX queues. 833 */ 834static void efx_probe_interrupts(struct efx_nic *efx) 835{ 836 int max_channels = 837 min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS); 838 int rc, i; 839 840 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { 841 struct msix_entry xentries[EFX_MAX_CHANNELS]; 842 int wanted_ints; 843 844 /* We want one RX queue and interrupt per CPU package 845 * (or as specified by the rss_cpus module parameter). 846 * We will need one channel per interrupt. 847 */ 848 wanted_ints = rss_cpus ? rss_cpus : efx_wanted_rx_queues(); 849 efx->n_rx_queues = min(wanted_ints, max_channels); 850 851 for (i = 0; i < efx->n_rx_queues; i++) 852 xentries[i].entry = i; 853 rc = pci_enable_msix(efx->pci_dev, xentries, efx->n_rx_queues); 854 if (rc > 0) { 855 EFX_BUG_ON_PARANOID(rc >= efx->n_rx_queues); 856 efx->n_rx_queues = rc; 857 rc = pci_enable_msix(efx->pci_dev, xentries, 858 efx->n_rx_queues); 859 } 860 861 if (rc == 0) { 862 for (i = 0; i < efx->n_rx_queues; i++) 863 efx->channel[i].irq = xentries[i].vector; 864 } else { 865 /* Fall back to single channel MSI */ 866 efx->interrupt_mode = EFX_INT_MODE_MSI; 867 EFX_ERR(efx, "could not enable MSI-X\n"); 868 } 869 } 870 871 /* Try single interrupt MSI */ 872 if (efx->interrupt_mode == EFX_INT_MODE_MSI) { 873 efx->n_rx_queues = 1; 874 rc = pci_enable_msi(efx->pci_dev); 875 if (rc == 0) { 876 efx->channel[0].irq = efx->pci_dev->irq; 877 } else { 878 EFX_ERR(efx, "could not enable MSI\n"); 879 efx->interrupt_mode = EFX_INT_MODE_LEGACY; 880 } 881 } 882 883 /* Assume legacy interrupts */ 884 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) { 885 efx->n_rx_queues = 1; 886 efx->legacy_irq = efx->pci_dev->irq; 887 } 888} 889 890static void efx_remove_interrupts(struct efx_nic *efx) 891{ 892 struct efx_channel *channel; 893 894 /* Remove MSI/MSI-X interrupts */ 895 efx_for_each_channel(channel, efx) 896 channel->irq = 0; 897 pci_disable_msi(efx->pci_dev); 898 pci_disable_msix(efx->pci_dev); 899 900 /* Remove legacy interrupt */ 901 efx->legacy_irq = 0; 902} 903 904static void efx_set_channels(struct efx_nic *efx) 905{ 906 struct efx_tx_queue *tx_queue; 907 struct efx_rx_queue *rx_queue; 908 909 efx_for_each_tx_queue(tx_queue, efx) { 910 if (!EFX_INT_MODE_USE_MSI(efx) && separate_tx_and_rx_channels) 911 tx_queue->channel = &efx->channel[1]; 912 else 913 tx_queue->channel = &efx->channel[0]; 914 tx_queue->channel->used_flags |= EFX_USED_BY_TX; 915 } 916 917 efx_for_each_rx_queue(rx_queue, efx) { 918 rx_queue->channel = &efx->channel[rx_queue->queue]; 919 rx_queue->channel->used_flags |= EFX_USED_BY_RX; 920 } 921} 922 923static int efx_probe_nic(struct efx_nic *efx) 924{ 925 int rc; 926 927 EFX_LOG(efx, "creating NIC\n"); 928 929 /* Carry out hardware-type specific initialisation */ 930 rc = falcon_probe_nic(efx); 931 if (rc) 932 return rc; 933 934 /* Determine the number of channels and RX queues by trying to hook 935 * in MSI-X interrupts. */ 936 efx_probe_interrupts(efx); 937 938 efx_set_channels(efx); 939 940 /* Initialise the interrupt moderation settings */ 941 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec); 942 943 return 0; 944} 945 946static void efx_remove_nic(struct efx_nic *efx) 947{ 948 EFX_LOG(efx, "destroying NIC\n"); 949 950 efx_remove_interrupts(efx); 951 falcon_remove_nic(efx); 952} 953 954/************************************************************************** 955 * 956 * NIC startup/shutdown 957 * 958 *************************************************************************/ 959 960static int efx_probe_all(struct efx_nic *efx) 961{ 962 struct efx_channel *channel; 963 int rc; 964 965 /* Create NIC */ 966 rc = efx_probe_nic(efx); 967 if (rc) { 968 EFX_ERR(efx, "failed to create NIC\n"); 969 goto fail1; 970 } 971 972 /* Create port */ 973 rc = efx_probe_port(efx); 974 if (rc) { 975 EFX_ERR(efx, "failed to create port\n"); 976 goto fail2; 977 } 978 979 /* Create channels */ 980 efx_for_each_channel(channel, efx) { 981 rc = efx_probe_channel(channel); 982 if (rc) { 983 EFX_ERR(efx, "failed to create channel %d\n", 984 channel->channel); 985 goto fail3; 986 } 987 } 988 989 return 0; 990 991 fail3: 992 efx_for_each_channel(channel, efx) 993 efx_remove_channel(channel); 994 efx_remove_port(efx); 995 fail2: 996 efx_remove_nic(efx); 997 fail1: 998 return rc; 999} 1000 1001/* Called after previous invocation(s) of efx_stop_all, restarts the 1002 * port, kernel transmit queue, NAPI processing and hardware interrupts, 1003 * and ensures that the port is scheduled to be reconfigured. 1004 * This function is safe to call multiple times when the NIC is in any 1005 * state. */ 1006static void efx_start_all(struct efx_nic *efx) 1007{ 1008 struct efx_channel *channel; 1009 1010 EFX_ASSERT_RESET_SERIALISED(efx); 1011 1012 /* Check that it is appropriate to restart the interface. All 1013 * of these flags are safe to read under just the rtnl lock */ 1014 if (efx->port_enabled) 1015 return; 1016 if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT)) 1017 return; 1018 if (efx_dev_registered(efx) && !netif_running(efx->net_dev)) 1019 return; 1020 1021 /* Mark the port as enabled so port reconfigurations can start, then 1022 * restart the transmit interface early so the watchdog timer stops */ 1023 efx_start_port(efx); 1024 if (efx_dev_registered(efx)) 1025 efx_wake_queue(efx); 1026 1027 efx_for_each_channel(channel, efx) 1028 efx_start_channel(channel); 1029 1030 falcon_enable_interrupts(efx); 1031 1032 /* Start hardware monitor if we're in RUNNING */ 1033 if (efx->state == STATE_RUNNING) 1034 queue_delayed_work(efx->workqueue, &efx->monitor_work, 1035 efx_monitor_interval); 1036} 1037 1038/* Flush all delayed work. Should only be called when no more delayed work 1039 * will be scheduled. This doesn't flush pending online resets (efx_reset), 1040 * since we're holding the rtnl_lock at this point. */ 1041static void efx_flush_all(struct efx_nic *efx) 1042{ 1043 struct efx_rx_queue *rx_queue; 1044 1045 /* Make sure the hardware monitor is stopped */ 1046 cancel_delayed_work_sync(&efx->monitor_work); 1047 1048 /* Ensure that all RX slow refills are complete. */ 1049 efx_for_each_rx_queue(rx_queue, efx) 1050 cancel_delayed_work_sync(&rx_queue->work); 1051 1052 /* Stop scheduled port reconfigurations */ 1053 cancel_work_sync(&efx->reconfigure_work); 1054 1055} 1056 1057/* Quiesce hardware and software without bringing the link down. 1058 * Safe to call multiple times, when the nic and interface is in any 1059 * state. The caller is guaranteed to subsequently be in a position 1060 * to modify any hardware and software state they see fit without 1061 * taking locks. */ 1062static void efx_stop_all(struct efx_nic *efx) 1063{ 1064 struct efx_channel *channel; 1065 1066 EFX_ASSERT_RESET_SERIALISED(efx); 1067 1068 /* port_enabled can be read safely under the rtnl lock */ 1069 if (!efx->port_enabled) 1070 return; 1071 1072 /* Disable interrupts and wait for ISR to complete */ 1073 falcon_disable_interrupts(efx); 1074 if (efx->legacy_irq) 1075 synchronize_irq(efx->legacy_irq); 1076 efx_for_each_channel(channel, efx) { 1077 if (channel->irq) 1078 synchronize_irq(channel->irq); 1079 } 1080 1081 /* Stop all NAPI processing and synchronous rx refills */ 1082 efx_for_each_channel(channel, efx) 1083 efx_stop_channel(channel); 1084 1085 /* Stop all asynchronous port reconfigurations. Since all 1086 * event processing has already been stopped, there is no 1087 * window to loose phy events */ 1088 efx_stop_port(efx); 1089 1090 /* Flush reconfigure_work, refill_workqueue, monitor_work */ 1091 efx_flush_all(efx); 1092 1093 /* Isolate the MAC from the TX and RX engines, so that queue 1094 * flushes will complete in a timely fashion. */ 1095 falcon_drain_tx_fifo(efx); 1096 1097 /* Stop the kernel transmit interface late, so the watchdog 1098 * timer isn't ticking over the flush */ 1099 if (efx_dev_registered(efx)) { 1100 efx_stop_queue(efx); 1101 netif_tx_lock_bh(efx->net_dev); 1102 netif_tx_unlock_bh(efx->net_dev); 1103 } 1104} 1105 1106static void efx_remove_all(struct efx_nic *efx) 1107{ 1108 struct efx_channel *channel; 1109 1110 efx_for_each_channel(channel, efx) 1111 efx_remove_channel(channel); 1112 efx_remove_port(efx); 1113 efx_remove_nic(efx); 1114} 1115 1116/* A convinience function to safely flush all the queues */ 1117void efx_flush_queues(struct efx_nic *efx) 1118{ 1119 EFX_ASSERT_RESET_SERIALISED(efx); 1120 1121 efx_stop_all(efx); 1122 1123 efx_fini_channels(efx); 1124 efx_init_channels(efx); 1125 1126 efx_start_all(efx); 1127} 1128 1129/************************************************************************** 1130 * 1131 * Interrupt moderation 1132 * 1133 **************************************************************************/ 1134 1135/* Set interrupt moderation parameters */ 1136void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs) 1137{ 1138 struct efx_tx_queue *tx_queue; 1139 struct efx_rx_queue *rx_queue; 1140 1141 EFX_ASSERT_RESET_SERIALISED(efx); 1142 1143 efx_for_each_tx_queue(tx_queue, efx) 1144 tx_queue->channel->irq_moderation = tx_usecs; 1145 1146 efx_for_each_rx_queue(rx_queue, efx) 1147 rx_queue->channel->irq_moderation = rx_usecs; 1148} 1149 1150/************************************************************************** 1151 * 1152 * Hardware monitor 1153 * 1154 **************************************************************************/ 1155 1156/* Run periodically off the general workqueue. Serialised against 1157 * efx_reconfigure_port via the mac_lock */ 1158static void efx_monitor(struct work_struct *data) 1159{ 1160 struct efx_nic *efx = container_of(data, struct efx_nic, 1161 monitor_work.work); 1162 int rc = 0; 1163 1164 EFX_TRACE(efx, "hardware monitor executing on CPU %d\n", 1165 raw_smp_processor_id()); 1166 1167 1168 /* If the mac_lock is already held then it is likely a port 1169 * reconfiguration is already in place, which will likely do 1170 * most of the work of check_hw() anyway. */ 1171 if (!mutex_trylock(&efx->mac_lock)) { 1172 queue_delayed_work(efx->workqueue, &efx->monitor_work, 1173 efx_monitor_interval); 1174 return; 1175 } 1176 1177 if (efx->port_enabled) 1178 rc = falcon_check_xmac(efx); 1179 mutex_unlock(&efx->mac_lock); 1180 1181 if (rc) { 1182 if (monitor_reset) { 1183 EFX_ERR(efx, "hardware monitor detected a fault: " 1184 "triggering reset\n"); 1185 efx_schedule_reset(efx, RESET_TYPE_MONITOR); 1186 } else { 1187 EFX_ERR(efx, "hardware monitor detected a fault, " 1188 "skipping reset\n"); 1189 } 1190 } 1191 1192 queue_delayed_work(efx->workqueue, &efx->monitor_work, 1193 efx_monitor_interval); 1194} 1195 1196/************************************************************************** 1197 * 1198 * ioctls 1199 * 1200 *************************************************************************/ 1201 1202/* Net device ioctl 1203 * Context: process, rtnl_lock() held. 1204 */ 1205static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) 1206{ 1207 struct efx_nic *efx = netdev_priv(net_dev); 1208 1209 EFX_ASSERT_RESET_SERIALISED(efx); 1210 1211 return generic_mii_ioctl(&efx->mii, if_mii(ifr), cmd, NULL); 1212} 1213 1214/************************************************************************** 1215 * 1216 * NAPI interface 1217 * 1218 **************************************************************************/ 1219 1220static int efx_init_napi(struct efx_nic *efx) 1221{ 1222 struct efx_channel *channel; 1223 int rc; 1224 1225 efx_for_each_channel(channel, efx) { 1226 channel->napi_dev = efx->net_dev; 1227 rc = efx_lro_init(&channel->lro_mgr, efx); 1228 if (rc) 1229 goto err; 1230 } 1231 return 0; 1232 err: 1233 efx_fini_napi(efx); 1234 return rc; 1235} 1236 1237static void efx_fini_napi(struct efx_nic *efx) 1238{ 1239 struct efx_channel *channel; 1240 1241 efx_for_each_channel(channel, efx) { 1242 efx_lro_fini(&channel->lro_mgr); 1243 channel->napi_dev = NULL; 1244 } 1245} 1246 1247/************************************************************************** 1248 * 1249 * Kernel netpoll interface 1250 * 1251 *************************************************************************/ 1252 1253#ifdef CONFIG_NET_POLL_CONTROLLER 1254 1255/* Although in the common case interrupts will be disabled, this is not 1256 * guaranteed. However, all our work happens inside the NAPI callback, 1257 * so no locking is required. 1258 */ 1259static void efx_netpoll(struct net_device *net_dev) 1260{ 1261 struct efx_nic *efx = netdev_priv(net_dev); 1262 struct efx_channel *channel; 1263 1264 efx_for_each_channel(channel, efx) 1265 efx_schedule_channel(channel); 1266} 1267 1268#endif 1269 1270/************************************************************************** 1271 * 1272 * Kernel net device interface 1273 * 1274 *************************************************************************/ 1275 1276/* Context: process, rtnl_lock() held. */ 1277static int efx_net_open(struct net_device *net_dev) 1278{ 1279 struct efx_nic *efx = netdev_priv(net_dev); 1280 EFX_ASSERT_RESET_SERIALISED(efx); 1281 1282 EFX_LOG(efx, "opening device %s on CPU %d\n", net_dev->name, 1283 raw_smp_processor_id()); 1284 1285 if (efx->phy_mode & PHY_MODE_SPECIAL) 1286 return -EBUSY; 1287 1288 efx_start_all(efx); 1289 return 0; 1290} 1291 1292/* Context: process, rtnl_lock() held. 1293 * Note that the kernel will ignore our return code; this method 1294 * should really be a void. 1295 */ 1296static int efx_net_stop(struct net_device *net_dev) 1297{ 1298 struct efx_nic *efx = netdev_priv(net_dev); 1299 1300 EFX_LOG(efx, "closing %s on CPU %d\n", net_dev->name, 1301 raw_smp_processor_id()); 1302 1303 /* Stop the device and flush all the channels */ 1304 efx_stop_all(efx); 1305 efx_fini_channels(efx); 1306 efx_init_channels(efx); 1307 1308 return 0; 1309} 1310 1311/* Context: process, dev_base_lock or RTNL held, non-blocking. */ 1312static struct net_device_stats *efx_net_stats(struct net_device *net_dev) 1313{ 1314 struct efx_nic *efx = netdev_priv(net_dev); 1315 struct efx_mac_stats *mac_stats = &efx->mac_stats; 1316 struct net_device_stats *stats = &net_dev->stats; 1317 1318 /* Update stats if possible, but do not wait if another thread 1319 * is updating them (or resetting the NIC); slightly stale 1320 * stats are acceptable. 1321 */ 1322 if (!spin_trylock(&efx->stats_lock)) 1323 return stats; 1324 if (efx->stats_enabled) { 1325 falcon_update_stats_xmac(efx); 1326 falcon_update_nic_stats(efx); 1327 } 1328 spin_unlock(&efx->stats_lock); 1329 1330 stats->rx_packets = mac_stats->rx_packets; 1331 stats->tx_packets = mac_stats->tx_packets; 1332 stats->rx_bytes = mac_stats->rx_bytes; 1333 stats->tx_bytes = mac_stats->tx_bytes; 1334 stats->multicast = mac_stats->rx_multicast; 1335 stats->collisions = mac_stats->tx_collision; 1336 stats->rx_length_errors = (mac_stats->rx_gtjumbo + 1337 mac_stats->rx_length_error); 1338 stats->rx_over_errors = efx->n_rx_nodesc_drop_cnt; 1339 stats->rx_crc_errors = mac_stats->rx_bad; 1340 stats->rx_frame_errors = mac_stats->rx_align_error; 1341 stats->rx_fifo_errors = mac_stats->rx_overflow; 1342 stats->rx_missed_errors = mac_stats->rx_missed; 1343 stats->tx_window_errors = mac_stats->tx_late_collision; 1344 1345 stats->rx_errors = (stats->rx_length_errors + 1346 stats->rx_over_errors + 1347 stats->rx_crc_errors + 1348 stats->rx_frame_errors + 1349 stats->rx_fifo_errors + 1350 stats->rx_missed_errors + 1351 mac_stats->rx_symbol_error); 1352 stats->tx_errors = (stats->tx_window_errors + 1353 mac_stats->tx_bad); 1354 1355 return stats; 1356} 1357 1358/* Context: netif_tx_lock held, BHs disabled. */ 1359static void efx_watchdog(struct net_device *net_dev) 1360{ 1361 struct efx_nic *efx = netdev_priv(net_dev); 1362 1363 EFX_ERR(efx, "TX stuck with stop_count=%d port_enabled=%d: %s\n", 1364 atomic_read(&efx->netif_stop_count), efx->port_enabled, 1365 monitor_reset ? "resetting channels" : "skipping reset"); 1366 1367 if (monitor_reset) 1368 efx_schedule_reset(efx, RESET_TYPE_MONITOR); 1369} 1370 1371 1372/* Context: process, rtnl_lock() held. */ 1373static int efx_change_mtu(struct net_device *net_dev, int new_mtu) 1374{ 1375 struct efx_nic *efx = netdev_priv(net_dev); 1376 int rc = 0; 1377 1378 EFX_ASSERT_RESET_SERIALISED(efx); 1379 1380 if (new_mtu > EFX_MAX_MTU) 1381 return -EINVAL; 1382 1383 efx_stop_all(efx); 1384 1385 EFX_LOG(efx, "changing MTU to %d\n", new_mtu); 1386 1387 efx_fini_channels(efx); 1388 net_dev->mtu = new_mtu; 1389 efx_init_channels(efx); 1390 1391 efx_start_all(efx); 1392 return rc; 1393} 1394 1395static int efx_set_mac_address(struct net_device *net_dev, void *data) 1396{ 1397 struct efx_nic *efx = netdev_priv(net_dev); 1398 struct sockaddr *addr = data; 1399 char *new_addr = addr->sa_data; 1400 1401 EFX_ASSERT_RESET_SERIALISED(efx); 1402 1403 if (!is_valid_ether_addr(new_addr)) { 1404 DECLARE_MAC_BUF(mac); 1405 EFX_ERR(efx, "invalid ethernet MAC address requested: %s\n", 1406 print_mac(mac, new_addr)); 1407 return -EINVAL; 1408 } 1409 1410 memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len); 1411 1412 /* Reconfigure the MAC */ 1413 efx_reconfigure_port(efx); 1414 1415 return 0; 1416} 1417 1418/* Context: netif_addr_lock held, BHs disabled. */ 1419static void efx_set_multicast_list(struct net_device *net_dev) 1420{ 1421 struct efx_nic *efx = netdev_priv(net_dev); 1422 struct dev_mc_list *mc_list = net_dev->mc_list; 1423 union efx_multicast_hash *mc_hash = &efx->multicast_hash; 1424 bool promiscuous = !!(net_dev->flags & IFF_PROMISC); 1425 bool changed = (efx->promiscuous != promiscuous); 1426 u32 crc; 1427 int bit; 1428 int i; 1429 1430 efx->promiscuous = promiscuous; 1431 1432 /* Build multicast hash table */ 1433 if (promiscuous || (net_dev->flags & IFF_ALLMULTI)) { 1434 memset(mc_hash, 0xff, sizeof(*mc_hash)); 1435 } else { 1436 memset(mc_hash, 0x00, sizeof(*mc_hash)); 1437 for (i = 0; i < net_dev->mc_count; i++) { 1438 crc = ether_crc_le(ETH_ALEN, mc_list->dmi_addr); 1439 bit = crc & (EFX_MCAST_HASH_ENTRIES - 1); 1440 set_bit_le(bit, mc_hash->byte); 1441 mc_list = mc_list->next; 1442 } 1443 } 1444 1445 if (!efx->port_enabled) 1446 /* Delay pushing settings until efx_start_port() */ 1447 return; 1448 1449 if (changed) 1450 queue_work(efx->workqueue, &efx->reconfigure_work); 1451 1452 /* Create and activate new global multicast hash table */ 1453 falcon_set_multicast_hash(efx); 1454} 1455 1456static int efx_netdev_event(struct notifier_block *this, 1457 unsigned long event, void *ptr) 1458{ 1459 struct net_device *net_dev = ptr; 1460 1461 if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) { 1462 struct efx_nic *efx = netdev_priv(net_dev); 1463 1464 strcpy(efx->name, net_dev->name); 1465 } 1466 1467 return NOTIFY_DONE; 1468} 1469 1470static struct notifier_block efx_netdev_notifier = { 1471 .notifier_call = efx_netdev_event, 1472}; 1473 1474static int efx_register_netdev(struct efx_nic *efx) 1475{ 1476 struct net_device *net_dev = efx->net_dev; 1477 int rc; 1478 1479 net_dev->watchdog_timeo = 5 * HZ; 1480 net_dev->irq = efx->pci_dev->irq; 1481 net_dev->open = efx_net_open; 1482 net_dev->stop = efx_net_stop; 1483 net_dev->get_stats = efx_net_stats; 1484 net_dev->tx_timeout = &efx_watchdog; 1485 net_dev->hard_start_xmit = efx_hard_start_xmit; 1486 net_dev->do_ioctl = efx_ioctl; 1487 net_dev->change_mtu = efx_change_mtu; 1488 net_dev->set_mac_address = efx_set_mac_address; 1489 net_dev->set_multicast_list = efx_set_multicast_list; 1490#ifdef CONFIG_NET_POLL_CONTROLLER 1491 net_dev->poll_controller = efx_netpoll; 1492#endif 1493 SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev); 1494 SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops); 1495 1496 /* Always start with carrier off; PHY events will detect the link */ 1497 netif_carrier_off(efx->net_dev); 1498 1499 /* Clear MAC statistics */ 1500 falcon_update_stats_xmac(efx); 1501 memset(&efx->mac_stats, 0, sizeof(efx->mac_stats)); 1502 1503 rc = register_netdev(net_dev); 1504 if (rc) { 1505 EFX_ERR(efx, "could not register net dev\n"); 1506 return rc; 1507 } 1508 strcpy(efx->name, net_dev->name); 1509 1510 return 0; 1511} 1512 1513static void efx_unregister_netdev(struct efx_nic *efx) 1514{ 1515 struct efx_tx_queue *tx_queue; 1516 1517 if (!efx->net_dev) 1518 return; 1519 1520 BUG_ON(netdev_priv(efx->net_dev) != efx); 1521 1522 /* Free up any skbs still remaining. This has to happen before 1523 * we try to unregister the netdev as running their destructors 1524 * may be needed to get the device ref. count to 0. */ 1525 efx_for_each_tx_queue(tx_queue, efx) 1526 efx_release_tx_buffers(tx_queue); 1527 1528 if (efx_dev_registered(efx)) { 1529 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); 1530 unregister_netdev(efx->net_dev); 1531 } 1532} 1533 1534/************************************************************************** 1535 * 1536 * Device reset and suspend 1537 * 1538 **************************************************************************/ 1539 1540/* Tears down the entire software state and most of the hardware state 1541 * before reset. */ 1542void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd) 1543{ 1544 int rc; 1545 1546 EFX_ASSERT_RESET_SERIALISED(efx); 1547 1548 /* The net_dev->get_stats handler is quite slow, and will fail 1549 * if a fetch is pending over reset. Serialise against it. */ 1550 spin_lock(&efx->stats_lock); 1551 efx->stats_enabled = false; 1552 spin_unlock(&efx->stats_lock); 1553 1554 efx_stop_all(efx); 1555 mutex_lock(&efx->mac_lock); 1556 1557 rc = falcon_xmac_get_settings(efx, ecmd); 1558 if (rc) 1559 EFX_ERR(efx, "could not back up PHY settings\n"); 1560 1561 efx_fini_channels(efx); 1562} 1563 1564/* This function will always ensure that the locks acquired in 1565 * efx_reset_down() are released. A failure return code indicates 1566 * that we were unable to reinitialise the hardware, and the 1567 * driver should be disabled. If ok is false, then the rx and tx 1568 * engines are not restarted, pending a RESET_DISABLE. */ 1569int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd, bool ok) 1570{ 1571 int rc; 1572 1573 EFX_ASSERT_RESET_SERIALISED(efx); 1574 1575 rc = falcon_init_nic(efx); 1576 if (rc) { 1577 EFX_ERR(efx, "failed to initialise NIC\n"); 1578 ok = false; 1579 } 1580 1581 if (ok) { 1582 efx_init_channels(efx); 1583 1584 if (falcon_xmac_set_settings(efx, ecmd)) 1585 EFX_ERR(efx, "could not restore PHY settings\n"); 1586 } 1587 1588 mutex_unlock(&efx->mac_lock); 1589 1590 if (ok) { 1591 efx_start_all(efx); 1592 efx->stats_enabled = true; 1593 } 1594 return rc; 1595} 1596 1597/* Reset the NIC as transparently as possible. Do not reset the PHY 1598 * Note that the reset may fail, in which case the card will be left 1599 * in a most-probably-unusable state. 1600 * 1601 * This function will sleep. You cannot reset from within an atomic 1602 * state; use efx_schedule_reset() instead. 1603 * 1604 * Grabs the rtnl_lock. 1605 */ 1606static int efx_reset(struct efx_nic *efx) 1607{ 1608 struct ethtool_cmd ecmd; 1609 enum reset_type method = efx->reset_pending; 1610 int rc; 1611 1612 /* Serialise with kernel interfaces */ 1613 rtnl_lock(); 1614 1615 /* If we're not RUNNING then don't reset. Leave the reset_pending 1616 * flag set so that efx_pci_probe_main will be retried */ 1617 if (efx->state != STATE_RUNNING) { 1618 EFX_INFO(efx, "scheduled reset quenched. NIC not RUNNING\n"); 1619 goto unlock_rtnl; 1620 } 1621 1622 EFX_INFO(efx, "resetting (%d)\n", method); 1623 1624 efx_reset_down(efx, &ecmd); 1625 1626 rc = falcon_reset_hw(efx, method); 1627 if (rc) { 1628 EFX_ERR(efx, "failed to reset hardware\n"); 1629 goto fail; 1630 } 1631 1632 /* Allow resets to be rescheduled. */ 1633 efx->reset_pending = RESET_TYPE_NONE; 1634 1635 /* Reinitialise bus-mastering, which may have been turned off before 1636 * the reset was scheduled. This is still appropriate, even in the 1637 * RESET_TYPE_DISABLE since this driver generally assumes the hardware 1638 * can respond to requests. */ 1639 pci_set_master(efx->pci_dev); 1640 1641 /* Leave device stopped if necessary */ 1642 if (method == RESET_TYPE_DISABLE) { 1643 rc = -EIO; 1644 goto fail; 1645 } 1646 1647 rc = efx_reset_up(efx, &ecmd, true); 1648 if (rc) 1649 goto disable; 1650 1651 EFX_LOG(efx, "reset complete\n"); 1652 unlock_rtnl: 1653 rtnl_unlock(); 1654 return 0; 1655 1656 fail: 1657 efx_reset_up(efx, &ecmd, false); 1658 disable: 1659 EFX_ERR(efx, "has been disabled\n"); 1660 efx->state = STATE_DISABLED; 1661 1662 rtnl_unlock(); 1663 efx_unregister_netdev(efx); 1664 efx_fini_port(efx); 1665 return rc; 1666} 1667 1668/* The worker thread exists so that code that cannot sleep can 1669 * schedule a reset for later. 1670 */ 1671static void efx_reset_work(struct work_struct *data) 1672{ 1673 struct efx_nic *nic = container_of(data, struct efx_nic, reset_work); 1674 1675 efx_reset(nic); 1676} 1677 1678void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) 1679{ 1680 enum reset_type method; 1681 1682 if (efx->reset_pending != RESET_TYPE_NONE) { 1683 EFX_INFO(efx, "quenching already scheduled reset\n"); 1684 return; 1685 } 1686 1687 switch (type) { 1688 case RESET_TYPE_INVISIBLE: 1689 case RESET_TYPE_ALL: 1690 case RESET_TYPE_WORLD: 1691 case RESET_TYPE_DISABLE: 1692 method = type; 1693 break; 1694 case RESET_TYPE_RX_RECOVERY: 1695 case RESET_TYPE_RX_DESC_FETCH: 1696 case RESET_TYPE_TX_DESC_FETCH: 1697 case RESET_TYPE_TX_SKIP: 1698 method = RESET_TYPE_INVISIBLE; 1699 break; 1700 default: 1701 method = RESET_TYPE_ALL; 1702 break; 1703 } 1704 1705 if (method != type) 1706 EFX_LOG(efx, "scheduling reset (%d:%d)\n", type, method); 1707 else 1708 EFX_LOG(efx, "scheduling reset (%d)\n", method); 1709 1710 efx->reset_pending = method; 1711 1712 queue_work(efx->reset_workqueue, &efx->reset_work); 1713} 1714 1715/************************************************************************** 1716 * 1717 * List of NICs we support 1718 * 1719 **************************************************************************/ 1720 1721/* PCI device ID table */ 1722static struct pci_device_id efx_pci_table[] __devinitdata = { 1723 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID), 1724 .driver_data = (unsigned long) &falcon_a_nic_type}, 1725 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID), 1726 .driver_data = (unsigned long) &falcon_b_nic_type}, 1727 {0} /* end of list */ 1728}; 1729 1730/************************************************************************** 1731 * 1732 * Dummy PHY/MAC/Board operations 1733 * 1734 * Can be used for some unimplemented operations 1735 * Needed so all function pointers are valid and do not have to be tested 1736 * before use 1737 * 1738 **************************************************************************/ 1739int efx_port_dummy_op_int(struct efx_nic *efx) 1740{ 1741 return 0; 1742} 1743void efx_port_dummy_op_void(struct efx_nic *efx) {} 1744void efx_port_dummy_op_blink(struct efx_nic *efx, bool blink) {} 1745 1746static struct efx_phy_operations efx_dummy_phy_operations = { 1747 .init = efx_port_dummy_op_int, 1748 .reconfigure = efx_port_dummy_op_void, 1749 .check_hw = efx_port_dummy_op_int, 1750 .fini = efx_port_dummy_op_void, 1751 .clear_interrupt = efx_port_dummy_op_void, 1752}; 1753 1754static struct efx_board efx_dummy_board_info = { 1755 .init = efx_port_dummy_op_int, 1756 .init_leds = efx_port_dummy_op_int, 1757 .set_fault_led = efx_port_dummy_op_blink, 1758 .blink = efx_port_dummy_op_blink, 1759 .fini = efx_port_dummy_op_void, 1760}; 1761 1762/************************************************************************** 1763 * 1764 * Data housekeeping 1765 * 1766 **************************************************************************/ 1767 1768/* This zeroes out and then fills in the invariants in a struct 1769 * efx_nic (including all sub-structures). 1770 */ 1771static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type, 1772 struct pci_dev *pci_dev, struct net_device *net_dev) 1773{ 1774 struct efx_channel *channel; 1775 struct efx_tx_queue *tx_queue; 1776 struct efx_rx_queue *rx_queue; 1777 int i, rc; 1778 1779 /* Initialise common structures */ 1780 memset(efx, 0, sizeof(*efx)); 1781 spin_lock_init(&efx->biu_lock); 1782 spin_lock_init(&efx->phy_lock); 1783 INIT_WORK(&efx->reset_work, efx_reset_work); 1784 INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor); 1785 efx->pci_dev = pci_dev; 1786 efx->state = STATE_INIT; 1787 efx->reset_pending = RESET_TYPE_NONE; 1788 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); 1789 efx->board_info = efx_dummy_board_info; 1790 1791 efx->net_dev = net_dev; 1792 efx->rx_checksum_enabled = true; 1793 spin_lock_init(&efx->netif_stop_lock); 1794 spin_lock_init(&efx->stats_lock); 1795 mutex_init(&efx->mac_lock); 1796 efx->phy_op = &efx_dummy_phy_operations; 1797 efx->mii.dev = net_dev; 1798 INIT_WORK(&efx->reconfigure_work, efx_reconfigure_work); 1799 atomic_set(&efx->netif_stop_count, 1); 1800 1801 for (i = 0; i < EFX_MAX_CHANNELS; i++) { 1802 channel = &efx->channel[i]; 1803 channel->efx = efx; 1804 channel->channel = i; 1805 channel->work_pending = false; 1806 } 1807 for (i = 0; i < EFX_TX_QUEUE_COUNT; i++) { 1808 tx_queue = &efx->tx_queue[i]; 1809 tx_queue->efx = efx; 1810 tx_queue->queue = i; 1811 tx_queue->buffer = NULL; 1812 tx_queue->channel = &efx->channel[0]; /* for safety */ 1813 tx_queue->tso_headers_free = NULL; 1814 } 1815 for (i = 0; i < EFX_MAX_RX_QUEUES; i++) { 1816 rx_queue = &efx->rx_queue[i]; 1817 rx_queue->efx = efx; 1818 rx_queue->queue = i; 1819 rx_queue->channel = &efx->channel[0]; /* for safety */ 1820 rx_queue->buffer = NULL; 1821 spin_lock_init(&rx_queue->add_lock); 1822 INIT_DELAYED_WORK(&rx_queue->work, efx_rx_work); 1823 } 1824 1825 efx->type = type; 1826 1827 /* Sanity-check NIC type */ 1828 EFX_BUG_ON_PARANOID(efx->type->txd_ring_mask & 1829 (efx->type->txd_ring_mask + 1)); 1830 EFX_BUG_ON_PARANOID(efx->type->rxd_ring_mask & 1831 (efx->type->rxd_ring_mask + 1)); 1832 EFX_BUG_ON_PARANOID(efx->type->evq_size & 1833 (efx->type->evq_size - 1)); 1834 /* As close as we can get to guaranteeing that we don't overflow */ 1835 EFX_BUG_ON_PARANOID(efx->type->evq_size < 1836 (efx->type->txd_ring_mask + 1 + 1837 efx->type->rxd_ring_mask + 1)); 1838 EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS); 1839 1840 /* Higher numbered interrupt modes are less capable! */ 1841 efx->interrupt_mode = max(efx->type->max_interrupt_mode, 1842 interrupt_mode); 1843 1844 efx->workqueue = create_singlethread_workqueue("sfc_work"); 1845 if (!efx->workqueue) { 1846 rc = -ENOMEM; 1847 goto fail1; 1848 } 1849 1850 efx->reset_workqueue = create_singlethread_workqueue("sfc_reset"); 1851 if (!efx->reset_workqueue) { 1852 rc = -ENOMEM; 1853 goto fail2; 1854 } 1855 1856 return 0; 1857 1858 fail2: 1859 destroy_workqueue(efx->workqueue); 1860 efx->workqueue = NULL; 1861 1862 fail1: 1863 return rc; 1864} 1865 1866static void efx_fini_struct(struct efx_nic *efx) 1867{ 1868 if (efx->reset_workqueue) { 1869 destroy_workqueue(efx->reset_workqueue); 1870 efx->reset_workqueue = NULL; 1871 } 1872 if (efx->workqueue) { 1873 destroy_workqueue(efx->workqueue); 1874 efx->workqueue = NULL; 1875 } 1876} 1877 1878/************************************************************************** 1879 * 1880 * PCI interface 1881 * 1882 **************************************************************************/ 1883 1884/* Main body of final NIC shutdown code 1885 * This is called only at module unload (or hotplug removal). 1886 */ 1887static void efx_pci_remove_main(struct efx_nic *efx) 1888{ 1889 EFX_ASSERT_RESET_SERIALISED(efx); 1890 1891 /* Skip everything if we never obtained a valid membase */ 1892 if (!efx->membase) 1893 return; 1894 1895 efx_fini_channels(efx); 1896 efx_fini_port(efx); 1897 1898 /* Shutdown the board, then the NIC and board state */ 1899 efx->board_info.fini(efx); 1900 falcon_fini_interrupt(efx); 1901 1902 efx_fini_napi(efx); 1903 efx_remove_all(efx); 1904} 1905 1906/* Final NIC shutdown 1907 * This is called only at module unload (or hotplug removal). 1908 */ 1909static void efx_pci_remove(struct pci_dev *pci_dev) 1910{ 1911 struct efx_nic *efx; 1912 1913 efx = pci_get_drvdata(pci_dev); 1914 if (!efx) 1915 return; 1916 1917 /* Mark the NIC as fini, then stop the interface */ 1918 rtnl_lock(); 1919 efx->state = STATE_FINI; 1920 dev_close(efx->net_dev); 1921 1922 /* Allow any queued efx_resets() to complete */ 1923 rtnl_unlock(); 1924 1925 if (efx->membase == NULL) 1926 goto out; 1927 1928 efx_unregister_netdev(efx); 1929 1930 /* Wait for any scheduled resets to complete. No more will be 1931 * scheduled from this point because efx_stop_all() has been 1932 * called, we are no longer registered with driverlink, and 1933 * the net_device's have been removed. */ 1934 flush_workqueue(efx->reset_workqueue); 1935 1936 efx_pci_remove_main(efx); 1937 1938out: 1939 efx_fini_io(efx); 1940 EFX_LOG(efx, "shutdown successful\n"); 1941 1942 pci_set_drvdata(pci_dev, NULL); 1943 efx_fini_struct(efx); 1944 free_netdev(efx->net_dev); 1945}; 1946 1947/* Main body of NIC initialisation 1948 * This is called at module load (or hotplug insertion, theoretically). 1949 */ 1950static int efx_pci_probe_main(struct efx_nic *efx) 1951{ 1952 int rc; 1953 1954 /* Do start-of-day initialisation */ 1955 rc = efx_probe_all(efx); 1956 if (rc) 1957 goto fail1; 1958 1959 rc = efx_init_napi(efx); 1960 if (rc) 1961 goto fail2; 1962 1963 /* Initialise the board */ 1964 rc = efx->board_info.init(efx); 1965 if (rc) { 1966 EFX_ERR(efx, "failed to initialise board\n"); 1967 goto fail3; 1968 } 1969 1970 rc = falcon_init_nic(efx); 1971 if (rc) { 1972 EFX_ERR(efx, "failed to initialise NIC\n"); 1973 goto fail4; 1974 } 1975 1976 rc = efx_init_port(efx); 1977 if (rc) { 1978 EFX_ERR(efx, "failed to initialise port\n"); 1979 goto fail5; 1980 } 1981 1982 efx_init_channels(efx); 1983 1984 rc = falcon_init_interrupt(efx); 1985 if (rc) 1986 goto fail6; 1987 1988 return 0; 1989 1990 fail6: 1991 efx_fini_channels(efx); 1992 efx_fini_port(efx); 1993 fail5: 1994 fail4: 1995 fail3: 1996 efx_fini_napi(efx); 1997 fail2: 1998 efx_remove_all(efx); 1999 fail1: 2000 return rc; 2001} 2002 2003/* NIC initialisation 2004 * 2005 * This is called at module load (or hotplug insertion, 2006 * theoretically). It sets up PCI mappings, tests and resets the NIC, 2007 * sets up and registers the network devices with the kernel and hooks 2008 * the interrupt service routine. It does not prepare the device for 2009 * transmission; this is left to the first time one of the network 2010 * interfaces is brought up (i.e. efx_net_open). 2011 */ 2012static int __devinit efx_pci_probe(struct pci_dev *pci_dev, 2013 const struct pci_device_id *entry) 2014{ 2015 struct efx_nic_type *type = (struct efx_nic_type *) entry->driver_data; 2016 struct net_device *net_dev; 2017 struct efx_nic *efx; 2018 int i, rc; 2019 2020 /* Allocate and initialise a struct net_device and struct efx_nic */ 2021 net_dev = alloc_etherdev(sizeof(*efx)); 2022 if (!net_dev) 2023 return -ENOMEM; 2024 net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG | 2025 NETIF_F_HIGHDMA | NETIF_F_TSO); 2026 if (lro) 2027 net_dev->features |= NETIF_F_LRO; 2028 /* Mask for features that also apply to VLAN devices */ 2029 net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG | 2030 NETIF_F_HIGHDMA | NETIF_F_TSO); 2031 efx = netdev_priv(net_dev); 2032 pci_set_drvdata(pci_dev, efx); 2033 rc = efx_init_struct(efx, type, pci_dev, net_dev); 2034 if (rc) 2035 goto fail1; 2036 2037 EFX_INFO(efx, "Solarflare Communications NIC detected\n"); 2038 2039 /* Set up basic I/O (BAR mappings etc) */ 2040 rc = efx_init_io(efx); 2041 if (rc) 2042 goto fail2; 2043 2044 /* No serialisation is required with the reset path because 2045 * we're in STATE_INIT. */ 2046 for (i = 0; i < 5; i++) { 2047 rc = efx_pci_probe_main(efx); 2048 if (rc == 0) 2049 break; 2050 2051 /* Serialise against efx_reset(). No more resets will be 2052 * scheduled since efx_stop_all() has been called, and we 2053 * have not and never have been registered with either 2054 * the rtnetlink or driverlink layers. */ 2055 flush_workqueue(efx->reset_workqueue); 2056 2057 /* Retry if a recoverably reset event has been scheduled */ 2058 if ((efx->reset_pending != RESET_TYPE_INVISIBLE) && 2059 (efx->reset_pending != RESET_TYPE_ALL)) 2060 goto fail3; 2061 2062 efx->reset_pending = RESET_TYPE_NONE; 2063 } 2064 2065 if (rc) { 2066 EFX_ERR(efx, "Could not reset NIC\n"); 2067 goto fail4; 2068 } 2069 2070 /* Switch to the running state before we expose the device to 2071 * the OS. This is to ensure that the initial gathering of 2072 * MAC stats succeeds. */ 2073 rtnl_lock(); 2074 efx->state = STATE_RUNNING; 2075 rtnl_unlock(); 2076 2077 rc = efx_register_netdev(efx); 2078 if (rc) 2079 goto fail5; 2080 2081 EFX_LOG(efx, "initialisation successful\n"); 2082 2083 return 0; 2084 2085 fail5: 2086 efx_pci_remove_main(efx); 2087 fail4: 2088 fail3: 2089 efx_fini_io(efx); 2090 fail2: 2091 efx_fini_struct(efx); 2092 fail1: 2093 EFX_LOG(efx, "initialisation failed. rc=%d\n", rc); 2094 free_netdev(net_dev); 2095 return rc; 2096} 2097 2098static struct pci_driver efx_pci_driver = { 2099 .name = EFX_DRIVER_NAME, 2100 .id_table = efx_pci_table, 2101 .probe = efx_pci_probe, 2102 .remove = efx_pci_remove, 2103}; 2104 2105/************************************************************************** 2106 * 2107 * Kernel module interface 2108 * 2109 *************************************************************************/ 2110 2111module_param(interrupt_mode, uint, 0444); 2112MODULE_PARM_DESC(interrupt_mode, 2113 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)"); 2114 2115static int __init efx_init_module(void) 2116{ 2117 int rc; 2118 2119 printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n"); 2120 2121 rc = register_netdevice_notifier(&efx_netdev_notifier); 2122 if (rc) 2123 goto err_notifier; 2124 2125 refill_workqueue = create_workqueue("sfc_refill"); 2126 if (!refill_workqueue) { 2127 rc = -ENOMEM; 2128 goto err_refill; 2129 } 2130 2131 rc = pci_register_driver(&efx_pci_driver); 2132 if (rc < 0) 2133 goto err_pci; 2134 2135 return 0; 2136 2137 err_pci: 2138 destroy_workqueue(refill_workqueue); 2139 err_refill: 2140 unregister_netdevice_notifier(&efx_netdev_notifier); 2141 err_notifier: 2142 return rc; 2143} 2144 2145static void __exit efx_exit_module(void) 2146{ 2147 printk(KERN_INFO "Solarflare NET driver unloading\n"); 2148 2149 pci_unregister_driver(&efx_pci_driver); 2150 destroy_workqueue(refill_workqueue); 2151 unregister_netdevice_notifier(&efx_netdev_notifier); 2152 2153} 2154 2155module_init(efx_init_module); 2156module_exit(efx_exit_module); 2157 2158MODULE_AUTHOR("Michael Brown <mbrown@fensystems.co.uk> and " 2159 "Solarflare Communications"); 2160MODULE_DESCRIPTION("Solarflare Communications network driver"); 2161MODULE_LICENSE("GPL"); 2162MODULE_DEVICE_TABLE(pci, efx_pci_table);