Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.38-rc7 2657 lines 71 kB view raw
1/**************************************************************************** 2 * Driver for Solarflare Solarstorm network controllers and boards 3 * Copyright 2005-2006 Fen Systems Ltd. 4 * Copyright 2005-2009 Solarflare Communications Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation, incorporated herein by reference. 9 */ 10 11#include <linux/module.h> 12#include <linux/pci.h> 13#include <linux/netdevice.h> 14#include <linux/etherdevice.h> 15#include <linux/delay.h> 16#include <linux/notifier.h> 17#include <linux/ip.h> 18#include <linux/tcp.h> 19#include <linux/in.h> 20#include <linux/crc32.h> 21#include <linux/ethtool.h> 22#include <linux/topology.h> 23#include <linux/gfp.h> 24#include "net_driver.h" 25#include "efx.h" 26#include "nic.h" 27 28#include "mcdi.h" 29#include "workarounds.h" 30 31/************************************************************************** 32 * 33 * Type name strings 34 * 35 ************************************************************************** 36 */ 37 38/* Loopback mode names (see LOOPBACK_MODE()) */ 39const unsigned int efx_loopback_mode_max = LOOPBACK_MAX; 40const char *efx_loopback_mode_names[] = { 41 [LOOPBACK_NONE] = "NONE", 42 [LOOPBACK_DATA] = "DATAPATH", 43 [LOOPBACK_GMAC] = "GMAC", 44 [LOOPBACK_XGMII] = "XGMII", 45 [LOOPBACK_XGXS] = "XGXS", 46 [LOOPBACK_XAUI] = "XAUI", 47 [LOOPBACK_GMII] = "GMII", 48 [LOOPBACK_SGMII] = "SGMII", 49 [LOOPBACK_XGBR] = "XGBR", 50 [LOOPBACK_XFI] = "XFI", 51 [LOOPBACK_XAUI_FAR] = "XAUI_FAR", 52 [LOOPBACK_GMII_FAR] = "GMII_FAR", 53 [LOOPBACK_SGMII_FAR] = "SGMII_FAR", 54 [LOOPBACK_XFI_FAR] = "XFI_FAR", 55 [LOOPBACK_GPHY] = "GPHY", 56 [LOOPBACK_PHYXS] = "PHYXS", 57 [LOOPBACK_PCS] = "PCS", 58 [LOOPBACK_PMAPMD] = "PMA/PMD", 59 [LOOPBACK_XPORT] = "XPORT", 60 [LOOPBACK_XGMII_WS] = "XGMII_WS", 61 [LOOPBACK_XAUI_WS] = "XAUI_WS", 62 [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR", 63 [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR", 64 [LOOPBACK_GMII_WS] = "GMII_WS", 65 [LOOPBACK_XFI_WS] = "XFI_WS", 66 [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR", 67 [LOOPBACK_PHYXS_WS] = "PHYXS_WS", 68}; 69 70const unsigned int efx_reset_type_max = RESET_TYPE_MAX; 71const char *efx_reset_type_names[] = { 72 [RESET_TYPE_INVISIBLE] = "INVISIBLE", 73 [RESET_TYPE_ALL] = "ALL", 74 [RESET_TYPE_WORLD] = "WORLD", 75 [RESET_TYPE_DISABLE] = "DISABLE", 76 [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG", 77 [RESET_TYPE_INT_ERROR] = "INT_ERROR", 78 [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY", 79 [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH", 80 [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH", 81 [RESET_TYPE_TX_SKIP] = "TX_SKIP", 82 [RESET_TYPE_MC_FAILURE] = "MC_FAILURE", 83}; 84 85#define EFX_MAX_MTU (9 * 1024) 86 87/* Reset workqueue. If any NIC has a hardware failure then a reset will be 88 * queued onto this work queue. This is not a per-nic work queue, because 89 * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised. 90 */ 91static struct workqueue_struct *reset_workqueue; 92 93/************************************************************************** 94 * 95 * Configurable values 96 * 97 *************************************************************************/ 98 99/* 100 * Use separate channels for TX and RX events 101 * 102 * Set this to 1 to use separate channels for TX and RX. It allows us 103 * to control interrupt affinity separately for TX and RX. 104 * 105 * This is only used in MSI-X interrupt mode 106 */ 107static unsigned int separate_tx_channels; 108module_param(separate_tx_channels, uint, 0444); 109MODULE_PARM_DESC(separate_tx_channels, 110 "Use separate channels for TX and RX"); 111 112/* This is the weight assigned to each of the (per-channel) virtual 113 * NAPI devices. 114 */ 115static int napi_weight = 64; 116 117/* This is the time (in jiffies) between invocations of the hardware 118 * monitor. On Falcon-based NICs, this will: 119 * - Check the on-board hardware monitor; 120 * - Poll the link state and reconfigure the hardware as necessary. 121 */ 122static unsigned int efx_monitor_interval = 1 * HZ; 123 124/* This controls whether or not the driver will initialise devices 125 * with invalid MAC addresses stored in the EEPROM or flash. If true, 126 * such devices will be initialised with a random locally-generated 127 * MAC address. This allows for loading the sfc_mtd driver to 128 * reprogram the flash, even if the flash contents (including the MAC 129 * address) have previously been erased. 130 */ 131static unsigned int allow_bad_hwaddr; 132 133/* Initial interrupt moderation settings. They can be modified after 134 * module load with ethtool. 135 * 136 * The default for RX should strike a balance between increasing the 137 * round-trip latency and reducing overhead. 138 */ 139static unsigned int rx_irq_mod_usec = 60; 140 141/* Initial interrupt moderation settings. They can be modified after 142 * module load with ethtool. 143 * 144 * This default is chosen to ensure that a 10G link does not go idle 145 * while a TX queue is stopped after it has become full. A queue is 146 * restarted when it drops below half full. The time this takes (assuming 147 * worst case 3 descriptors per packet and 1024 descriptors) is 148 * 512 / 3 * 1.2 = 205 usec. 149 */ 150static unsigned int tx_irq_mod_usec = 150; 151 152/* This is the first interrupt mode to try out of: 153 * 0 => MSI-X 154 * 1 => MSI 155 * 2 => legacy 156 */ 157static unsigned int interrupt_mode; 158 159/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS), 160 * i.e. the number of CPUs among which we may distribute simultaneous 161 * interrupt handling. 162 * 163 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt. 164 * The default (0) means to assign an interrupt to each package (level II cache) 165 */ 166static unsigned int rss_cpus; 167module_param(rss_cpus, uint, 0444); 168MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling"); 169 170static int phy_flash_cfg; 171module_param(phy_flash_cfg, int, 0644); 172MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially"); 173 174static unsigned irq_adapt_low_thresh = 10000; 175module_param(irq_adapt_low_thresh, uint, 0644); 176MODULE_PARM_DESC(irq_adapt_low_thresh, 177 "Threshold score for reducing IRQ moderation"); 178 179static unsigned irq_adapt_high_thresh = 20000; 180module_param(irq_adapt_high_thresh, uint, 0644); 181MODULE_PARM_DESC(irq_adapt_high_thresh, 182 "Threshold score for increasing IRQ moderation"); 183 184static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE | 185 NETIF_MSG_LINK | NETIF_MSG_IFDOWN | 186 NETIF_MSG_IFUP | NETIF_MSG_RX_ERR | 187 NETIF_MSG_TX_ERR | NETIF_MSG_HW); 188module_param(debug, uint, 0); 189MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value"); 190 191/************************************************************************** 192 * 193 * Utility functions and prototypes 194 * 195 *************************************************************************/ 196 197static void efx_remove_channels(struct efx_nic *efx); 198static void efx_remove_port(struct efx_nic *efx); 199static void efx_init_napi(struct efx_nic *efx); 200static void efx_fini_napi(struct efx_nic *efx); 201static void efx_fini_napi_channel(struct efx_channel *channel); 202static void efx_fini_struct(struct efx_nic *efx); 203static void efx_start_all(struct efx_nic *efx); 204static void efx_stop_all(struct efx_nic *efx); 205 206#define EFX_ASSERT_RESET_SERIALISED(efx) \ 207 do { \ 208 if ((efx->state == STATE_RUNNING) || \ 209 (efx->state == STATE_DISABLED)) \ 210 ASSERT_RTNL(); \ 211 } while (0) 212 213/************************************************************************** 214 * 215 * Event queue processing 216 * 217 *************************************************************************/ 218 219/* Process channel's event queue 220 * 221 * This function is responsible for processing the event queue of a 222 * single channel. The caller must guarantee that this function will 223 * never be concurrently called more than once on the same channel, 224 * though different channels may be being processed concurrently. 225 */ 226static int efx_process_channel(struct efx_channel *channel, int budget) 227{ 228 struct efx_nic *efx = channel->efx; 229 int spent; 230 231 if (unlikely(efx->reset_pending != RESET_TYPE_NONE || 232 !channel->enabled)) 233 return 0; 234 235 spent = efx_nic_process_eventq(channel, budget); 236 if (spent == 0) 237 return 0; 238 239 /* Deliver last RX packet. */ 240 if (channel->rx_pkt) { 241 __efx_rx_packet(channel, channel->rx_pkt, 242 channel->rx_pkt_csummed); 243 channel->rx_pkt = NULL; 244 } 245 246 efx_rx_strategy(channel); 247 248 efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel)); 249 250 return spent; 251} 252 253/* Mark channel as finished processing 254 * 255 * Note that since we will not receive further interrupts for this 256 * channel before we finish processing and call the eventq_read_ack() 257 * method, there is no need to use the interrupt hold-off timers. 258 */ 259static inline void efx_channel_processed(struct efx_channel *channel) 260{ 261 /* The interrupt handler for this channel may set work_pending 262 * as soon as we acknowledge the events we've seen. Make sure 263 * it's cleared before then. */ 264 channel->work_pending = false; 265 smp_wmb(); 266 267 efx_nic_eventq_read_ack(channel); 268} 269 270/* NAPI poll handler 271 * 272 * NAPI guarantees serialisation of polls of the same device, which 273 * provides the guarantee required by efx_process_channel(). 274 */ 275static int efx_poll(struct napi_struct *napi, int budget) 276{ 277 struct efx_channel *channel = 278 container_of(napi, struct efx_channel, napi_str); 279 struct efx_nic *efx = channel->efx; 280 int spent; 281 282 netif_vdbg(efx, intr, efx->net_dev, 283 "channel %d NAPI poll executing on CPU %d\n", 284 channel->channel, raw_smp_processor_id()); 285 286 spent = efx_process_channel(channel, budget); 287 288 if (spent < budget) { 289 if (channel->channel < efx->n_rx_channels && 290 efx->irq_rx_adaptive && 291 unlikely(++channel->irq_count == 1000)) { 292 if (unlikely(channel->irq_mod_score < 293 irq_adapt_low_thresh)) { 294 if (channel->irq_moderation > 1) { 295 channel->irq_moderation -= 1; 296 efx->type->push_irq_moderation(channel); 297 } 298 } else if (unlikely(channel->irq_mod_score > 299 irq_adapt_high_thresh)) { 300 if (channel->irq_moderation < 301 efx->irq_rx_moderation) { 302 channel->irq_moderation += 1; 303 efx->type->push_irq_moderation(channel); 304 } 305 } 306 channel->irq_count = 0; 307 channel->irq_mod_score = 0; 308 } 309 310 /* There is no race here; although napi_disable() will 311 * only wait for napi_complete(), this isn't a problem 312 * since efx_channel_processed() will have no effect if 313 * interrupts have already been disabled. 314 */ 315 napi_complete(napi); 316 efx_channel_processed(channel); 317 } 318 319 return spent; 320} 321 322/* Process the eventq of the specified channel immediately on this CPU 323 * 324 * Disable hardware generated interrupts, wait for any existing 325 * processing to finish, then directly poll (and ack ) the eventq. 326 * Finally reenable NAPI and interrupts. 327 * 328 * Since we are touching interrupts the caller should hold the suspend lock 329 */ 330void efx_process_channel_now(struct efx_channel *channel) 331{ 332 struct efx_nic *efx = channel->efx; 333 334 BUG_ON(channel->channel >= efx->n_channels); 335 BUG_ON(!channel->enabled); 336 337 /* Disable interrupts and wait for ISRs to complete */ 338 efx_nic_disable_interrupts(efx); 339 if (efx->legacy_irq) { 340 synchronize_irq(efx->legacy_irq); 341 efx->legacy_irq_enabled = false; 342 } 343 if (channel->irq) 344 synchronize_irq(channel->irq); 345 346 /* Wait for any NAPI processing to complete */ 347 napi_disable(&channel->napi_str); 348 349 /* Poll the channel */ 350 efx_process_channel(channel, channel->eventq_mask + 1); 351 352 /* Ack the eventq. This may cause an interrupt to be generated 353 * when they are reenabled */ 354 efx_channel_processed(channel); 355 356 napi_enable(&channel->napi_str); 357 if (efx->legacy_irq) 358 efx->legacy_irq_enabled = true; 359 efx_nic_enable_interrupts(efx); 360} 361 362/* Create event queue 363 * Event queue memory allocations are done only once. If the channel 364 * is reset, the memory buffer will be reused; this guards against 365 * errors during channel reset and also simplifies interrupt handling. 366 */ 367static int efx_probe_eventq(struct efx_channel *channel) 368{ 369 struct efx_nic *efx = channel->efx; 370 unsigned long entries; 371 372 netif_dbg(channel->efx, probe, channel->efx->net_dev, 373 "chan %d create event queue\n", channel->channel); 374 375 /* Build an event queue with room for one event per tx and rx buffer, 376 * plus some extra for link state events and MCDI completions. */ 377 entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128); 378 EFX_BUG_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE); 379 channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1; 380 381 return efx_nic_probe_eventq(channel); 382} 383 384/* Prepare channel's event queue */ 385static void efx_init_eventq(struct efx_channel *channel) 386{ 387 netif_dbg(channel->efx, drv, channel->efx->net_dev, 388 "chan %d init event queue\n", channel->channel); 389 390 channel->eventq_read_ptr = 0; 391 392 efx_nic_init_eventq(channel); 393} 394 395static void efx_fini_eventq(struct efx_channel *channel) 396{ 397 netif_dbg(channel->efx, drv, channel->efx->net_dev, 398 "chan %d fini event queue\n", channel->channel); 399 400 efx_nic_fini_eventq(channel); 401} 402 403static void efx_remove_eventq(struct efx_channel *channel) 404{ 405 netif_dbg(channel->efx, drv, channel->efx->net_dev, 406 "chan %d remove event queue\n", channel->channel); 407 408 efx_nic_remove_eventq(channel); 409} 410 411/************************************************************************** 412 * 413 * Channel handling 414 * 415 *************************************************************************/ 416 417/* Allocate and initialise a channel structure, optionally copying 418 * parameters (but not resources) from an old channel structure. */ 419static struct efx_channel * 420efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel) 421{ 422 struct efx_channel *channel; 423 struct efx_rx_queue *rx_queue; 424 struct efx_tx_queue *tx_queue; 425 int j; 426 427 if (old_channel) { 428 channel = kmalloc(sizeof(*channel), GFP_KERNEL); 429 if (!channel) 430 return NULL; 431 432 *channel = *old_channel; 433 434 channel->napi_dev = NULL; 435 memset(&channel->eventq, 0, sizeof(channel->eventq)); 436 437 rx_queue = &channel->rx_queue; 438 rx_queue->buffer = NULL; 439 memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd)); 440 441 for (j = 0; j < EFX_TXQ_TYPES; j++) { 442 tx_queue = &channel->tx_queue[j]; 443 if (tx_queue->channel) 444 tx_queue->channel = channel; 445 tx_queue->buffer = NULL; 446 memset(&tx_queue->txd, 0, sizeof(tx_queue->txd)); 447 } 448 } else { 449 channel = kzalloc(sizeof(*channel), GFP_KERNEL); 450 if (!channel) 451 return NULL; 452 453 channel->efx = efx; 454 channel->channel = i; 455 456 for (j = 0; j < EFX_TXQ_TYPES; j++) { 457 tx_queue = &channel->tx_queue[j]; 458 tx_queue->efx = efx; 459 tx_queue->queue = i * EFX_TXQ_TYPES + j; 460 tx_queue->channel = channel; 461 } 462 } 463 464 rx_queue = &channel->rx_queue; 465 rx_queue->efx = efx; 466 setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill, 467 (unsigned long)rx_queue); 468 469 return channel; 470} 471 472static int efx_probe_channel(struct efx_channel *channel) 473{ 474 struct efx_tx_queue *tx_queue; 475 struct efx_rx_queue *rx_queue; 476 int rc; 477 478 netif_dbg(channel->efx, probe, channel->efx->net_dev, 479 "creating channel %d\n", channel->channel); 480 481 rc = efx_probe_eventq(channel); 482 if (rc) 483 goto fail1; 484 485 efx_for_each_channel_tx_queue(tx_queue, channel) { 486 rc = efx_probe_tx_queue(tx_queue); 487 if (rc) 488 goto fail2; 489 } 490 491 efx_for_each_channel_rx_queue(rx_queue, channel) { 492 rc = efx_probe_rx_queue(rx_queue); 493 if (rc) 494 goto fail3; 495 } 496 497 channel->n_rx_frm_trunc = 0; 498 499 return 0; 500 501 fail3: 502 efx_for_each_channel_rx_queue(rx_queue, channel) 503 efx_remove_rx_queue(rx_queue); 504 fail2: 505 efx_for_each_channel_tx_queue(tx_queue, channel) 506 efx_remove_tx_queue(tx_queue); 507 fail1: 508 return rc; 509} 510 511 512static void efx_set_channel_names(struct efx_nic *efx) 513{ 514 struct efx_channel *channel; 515 const char *type = ""; 516 int number; 517 518 efx_for_each_channel(channel, efx) { 519 number = channel->channel; 520 if (efx->n_channels > efx->n_rx_channels) { 521 if (channel->channel < efx->n_rx_channels) { 522 type = "-rx"; 523 } else { 524 type = "-tx"; 525 number -= efx->n_rx_channels; 526 } 527 } 528 snprintf(efx->channel_name[channel->channel], 529 sizeof(efx->channel_name[0]), 530 "%s%s-%d", efx->name, type, number); 531 } 532} 533 534static int efx_probe_channels(struct efx_nic *efx) 535{ 536 struct efx_channel *channel; 537 int rc; 538 539 /* Restart special buffer allocation */ 540 efx->next_buffer_table = 0; 541 542 efx_for_each_channel(channel, efx) { 543 rc = efx_probe_channel(channel); 544 if (rc) { 545 netif_err(efx, probe, efx->net_dev, 546 "failed to create channel %d\n", 547 channel->channel); 548 goto fail; 549 } 550 } 551 efx_set_channel_names(efx); 552 553 return 0; 554 555fail: 556 efx_remove_channels(efx); 557 return rc; 558} 559 560/* Channels are shutdown and reinitialised whilst the NIC is running 561 * to propagate configuration changes (mtu, checksum offload), or 562 * to clear hardware error conditions 563 */ 564static void efx_init_channels(struct efx_nic *efx) 565{ 566 struct efx_tx_queue *tx_queue; 567 struct efx_rx_queue *rx_queue; 568 struct efx_channel *channel; 569 570 /* Calculate the rx buffer allocation parameters required to 571 * support the current MTU, including padding for header 572 * alignment and overruns. 573 */ 574 efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) + 575 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + 576 efx->type->rx_buffer_hash_size + 577 efx->type->rx_buffer_padding); 578 efx->rx_buffer_order = get_order(efx->rx_buffer_len + 579 sizeof(struct efx_rx_page_state)); 580 581 /* Initialise the channels */ 582 efx_for_each_channel(channel, efx) { 583 netif_dbg(channel->efx, drv, channel->efx->net_dev, 584 "init chan %d\n", channel->channel); 585 586 efx_init_eventq(channel); 587 588 efx_for_each_channel_tx_queue(tx_queue, channel) 589 efx_init_tx_queue(tx_queue); 590 591 /* The rx buffer allocation strategy is MTU dependent */ 592 efx_rx_strategy(channel); 593 594 efx_for_each_channel_rx_queue(rx_queue, channel) 595 efx_init_rx_queue(rx_queue); 596 597 WARN_ON(channel->rx_pkt != NULL); 598 efx_rx_strategy(channel); 599 } 600} 601 602/* This enables event queue processing and packet transmission. 603 * 604 * Note that this function is not allowed to fail, since that would 605 * introduce too much complexity into the suspend/resume path. 606 */ 607static void efx_start_channel(struct efx_channel *channel) 608{ 609 struct efx_rx_queue *rx_queue; 610 611 netif_dbg(channel->efx, ifup, channel->efx->net_dev, 612 "starting chan %d\n", channel->channel); 613 614 /* The interrupt handler for this channel may set work_pending 615 * as soon as we enable it. Make sure it's cleared before 616 * then. Similarly, make sure it sees the enabled flag set. */ 617 channel->work_pending = false; 618 channel->enabled = true; 619 smp_wmb(); 620 621 /* Fill the queues before enabling NAPI */ 622 efx_for_each_channel_rx_queue(rx_queue, channel) 623 efx_fast_push_rx_descriptors(rx_queue); 624 625 napi_enable(&channel->napi_str); 626} 627 628/* This disables event queue processing and packet transmission. 629 * This function does not guarantee that all queue processing 630 * (e.g. RX refill) is complete. 631 */ 632static void efx_stop_channel(struct efx_channel *channel) 633{ 634 if (!channel->enabled) 635 return; 636 637 netif_dbg(channel->efx, ifdown, channel->efx->net_dev, 638 "stop chan %d\n", channel->channel); 639 640 channel->enabled = false; 641 napi_disable(&channel->napi_str); 642} 643 644static void efx_fini_channels(struct efx_nic *efx) 645{ 646 struct efx_channel *channel; 647 struct efx_tx_queue *tx_queue; 648 struct efx_rx_queue *rx_queue; 649 int rc; 650 651 EFX_ASSERT_RESET_SERIALISED(efx); 652 BUG_ON(efx->port_enabled); 653 654 rc = efx_nic_flush_queues(efx); 655 if (rc && EFX_WORKAROUND_7803(efx)) { 656 /* Schedule a reset to recover from the flush failure. The 657 * descriptor caches reference memory we're about to free, 658 * but falcon_reconfigure_mac_wrapper() won't reconnect 659 * the MACs because of the pending reset. */ 660 netif_err(efx, drv, efx->net_dev, 661 "Resetting to recover from flush failure\n"); 662 efx_schedule_reset(efx, RESET_TYPE_ALL); 663 } else if (rc) { 664 netif_err(efx, drv, efx->net_dev, "failed to flush queues\n"); 665 } else { 666 netif_dbg(efx, drv, efx->net_dev, 667 "successfully flushed all queues\n"); 668 } 669 670 efx_for_each_channel(channel, efx) { 671 netif_dbg(channel->efx, drv, channel->efx->net_dev, 672 "shut down chan %d\n", channel->channel); 673 674 efx_for_each_channel_rx_queue(rx_queue, channel) 675 efx_fini_rx_queue(rx_queue); 676 efx_for_each_channel_tx_queue(tx_queue, channel) 677 efx_fini_tx_queue(tx_queue); 678 efx_fini_eventq(channel); 679 } 680} 681 682static void efx_remove_channel(struct efx_channel *channel) 683{ 684 struct efx_tx_queue *tx_queue; 685 struct efx_rx_queue *rx_queue; 686 687 netif_dbg(channel->efx, drv, channel->efx->net_dev, 688 "destroy chan %d\n", channel->channel); 689 690 efx_for_each_channel_rx_queue(rx_queue, channel) 691 efx_remove_rx_queue(rx_queue); 692 efx_for_each_channel_tx_queue(tx_queue, channel) 693 efx_remove_tx_queue(tx_queue); 694 efx_remove_eventq(channel); 695} 696 697static void efx_remove_channels(struct efx_nic *efx) 698{ 699 struct efx_channel *channel; 700 701 efx_for_each_channel(channel, efx) 702 efx_remove_channel(channel); 703} 704 705int 706efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) 707{ 708 struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel; 709 u32 old_rxq_entries, old_txq_entries; 710 unsigned i; 711 int rc; 712 713 efx_stop_all(efx); 714 efx_fini_channels(efx); 715 716 /* Clone channels */ 717 memset(other_channel, 0, sizeof(other_channel)); 718 for (i = 0; i < efx->n_channels; i++) { 719 channel = efx_alloc_channel(efx, i, efx->channel[i]); 720 if (!channel) { 721 rc = -ENOMEM; 722 goto out; 723 } 724 other_channel[i] = channel; 725 } 726 727 /* Swap entry counts and channel pointers */ 728 old_rxq_entries = efx->rxq_entries; 729 old_txq_entries = efx->txq_entries; 730 efx->rxq_entries = rxq_entries; 731 efx->txq_entries = txq_entries; 732 for (i = 0; i < efx->n_channels; i++) { 733 channel = efx->channel[i]; 734 efx->channel[i] = other_channel[i]; 735 other_channel[i] = channel; 736 } 737 738 rc = efx_probe_channels(efx); 739 if (rc) 740 goto rollback; 741 742 efx_init_napi(efx); 743 744 /* Destroy old channels */ 745 for (i = 0; i < efx->n_channels; i++) { 746 efx_fini_napi_channel(other_channel[i]); 747 efx_remove_channel(other_channel[i]); 748 } 749out: 750 /* Free unused channel structures */ 751 for (i = 0; i < efx->n_channels; i++) 752 kfree(other_channel[i]); 753 754 efx_init_channels(efx); 755 efx_start_all(efx); 756 return rc; 757 758rollback: 759 /* Swap back */ 760 efx->rxq_entries = old_rxq_entries; 761 efx->txq_entries = old_txq_entries; 762 for (i = 0; i < efx->n_channels; i++) { 763 channel = efx->channel[i]; 764 efx->channel[i] = other_channel[i]; 765 other_channel[i] = channel; 766 } 767 goto out; 768} 769 770void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue) 771{ 772 mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100)); 773} 774 775/************************************************************************** 776 * 777 * Port handling 778 * 779 **************************************************************************/ 780 781/* This ensures that the kernel is kept informed (via 782 * netif_carrier_on/off) of the link status, and also maintains the 783 * link status's stop on the port's TX queue. 784 */ 785void efx_link_status_changed(struct efx_nic *efx) 786{ 787 struct efx_link_state *link_state = &efx->link_state; 788 789 /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure 790 * that no events are triggered between unregister_netdev() and the 791 * driver unloading. A more general condition is that NETDEV_CHANGE 792 * can only be generated between NETDEV_UP and NETDEV_DOWN */ 793 if (!netif_running(efx->net_dev)) 794 return; 795 796 if (efx->port_inhibited) { 797 netif_carrier_off(efx->net_dev); 798 return; 799 } 800 801 if (link_state->up != netif_carrier_ok(efx->net_dev)) { 802 efx->n_link_state_changes++; 803 804 if (link_state->up) 805 netif_carrier_on(efx->net_dev); 806 else 807 netif_carrier_off(efx->net_dev); 808 } 809 810 /* Status message for kernel log */ 811 if (link_state->up) { 812 netif_info(efx, link, efx->net_dev, 813 "link up at %uMbps %s-duplex (MTU %d)%s\n", 814 link_state->speed, link_state->fd ? "full" : "half", 815 efx->net_dev->mtu, 816 (efx->promiscuous ? " [PROMISC]" : "")); 817 } else { 818 netif_info(efx, link, efx->net_dev, "link down\n"); 819 } 820 821} 822 823void efx_link_set_advertising(struct efx_nic *efx, u32 advertising) 824{ 825 efx->link_advertising = advertising; 826 if (advertising) { 827 if (advertising & ADVERTISED_Pause) 828 efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX); 829 else 830 efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX); 831 if (advertising & ADVERTISED_Asym_Pause) 832 efx->wanted_fc ^= EFX_FC_TX; 833 } 834} 835 836void efx_link_set_wanted_fc(struct efx_nic *efx, enum efx_fc_type wanted_fc) 837{ 838 efx->wanted_fc = wanted_fc; 839 if (efx->link_advertising) { 840 if (wanted_fc & EFX_FC_RX) 841 efx->link_advertising |= (ADVERTISED_Pause | 842 ADVERTISED_Asym_Pause); 843 else 844 efx->link_advertising &= ~(ADVERTISED_Pause | 845 ADVERTISED_Asym_Pause); 846 if (wanted_fc & EFX_FC_TX) 847 efx->link_advertising ^= ADVERTISED_Asym_Pause; 848 } 849} 850 851static void efx_fini_port(struct efx_nic *efx); 852 853/* Push loopback/power/transmit disable settings to the PHY, and reconfigure 854 * the MAC appropriately. All other PHY configuration changes are pushed 855 * through phy_op->set_settings(), and pushed asynchronously to the MAC 856 * through efx_monitor(). 857 * 858 * Callers must hold the mac_lock 859 */ 860int __efx_reconfigure_port(struct efx_nic *efx) 861{ 862 enum efx_phy_mode phy_mode; 863 int rc; 864 865 WARN_ON(!mutex_is_locked(&efx->mac_lock)); 866 867 /* Serialise the promiscuous flag with efx_set_multicast_list. */ 868 if (efx_dev_registered(efx)) { 869 netif_addr_lock_bh(efx->net_dev); 870 netif_addr_unlock_bh(efx->net_dev); 871 } 872 873 /* Disable PHY transmit in mac level loopbacks */ 874 phy_mode = efx->phy_mode; 875 if (LOOPBACK_INTERNAL(efx)) 876 efx->phy_mode |= PHY_MODE_TX_DISABLED; 877 else 878 efx->phy_mode &= ~PHY_MODE_TX_DISABLED; 879 880 rc = efx->type->reconfigure_port(efx); 881 882 if (rc) 883 efx->phy_mode = phy_mode; 884 885 return rc; 886} 887 888/* Reinitialise the MAC to pick up new PHY settings, even if the port is 889 * disabled. */ 890int efx_reconfigure_port(struct efx_nic *efx) 891{ 892 int rc; 893 894 EFX_ASSERT_RESET_SERIALISED(efx); 895 896 mutex_lock(&efx->mac_lock); 897 rc = __efx_reconfigure_port(efx); 898 mutex_unlock(&efx->mac_lock); 899 900 return rc; 901} 902 903/* Asynchronous work item for changing MAC promiscuity and multicast 904 * hash. Avoid a drain/rx_ingress enable by reconfiguring the current 905 * MAC directly. */ 906static void efx_mac_work(struct work_struct *data) 907{ 908 struct efx_nic *efx = container_of(data, struct efx_nic, mac_work); 909 910 mutex_lock(&efx->mac_lock); 911 if (efx->port_enabled) { 912 efx->type->push_multicast_hash(efx); 913 efx->mac_op->reconfigure(efx); 914 } 915 mutex_unlock(&efx->mac_lock); 916} 917 918static int efx_probe_port(struct efx_nic *efx) 919{ 920 unsigned char *perm_addr; 921 int rc; 922 923 netif_dbg(efx, probe, efx->net_dev, "create port\n"); 924 925 if (phy_flash_cfg) 926 efx->phy_mode = PHY_MODE_SPECIAL; 927 928 /* Connect up MAC/PHY operations table */ 929 rc = efx->type->probe_port(efx); 930 if (rc) 931 return rc; 932 933 /* Sanity check MAC address */ 934 perm_addr = efx->net_dev->perm_addr; 935 if (is_valid_ether_addr(perm_addr)) { 936 memcpy(efx->net_dev->dev_addr, perm_addr, ETH_ALEN); 937 } else { 938 netif_err(efx, probe, efx->net_dev, "invalid MAC address %pM\n", 939 perm_addr); 940 if (!allow_bad_hwaddr) { 941 rc = -EINVAL; 942 goto err; 943 } 944 random_ether_addr(efx->net_dev->dev_addr); 945 netif_info(efx, probe, efx->net_dev, 946 "using locally-generated MAC %pM\n", 947 efx->net_dev->dev_addr); 948 } 949 950 return 0; 951 952 err: 953 efx->type->remove_port(efx); 954 return rc; 955} 956 957static int efx_init_port(struct efx_nic *efx) 958{ 959 int rc; 960 961 netif_dbg(efx, drv, efx->net_dev, "init port\n"); 962 963 mutex_lock(&efx->mac_lock); 964 965 rc = efx->phy_op->init(efx); 966 if (rc) 967 goto fail1; 968 969 efx->port_initialized = true; 970 971 /* Reconfigure the MAC before creating dma queues (required for 972 * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */ 973 efx->mac_op->reconfigure(efx); 974 975 /* Ensure the PHY advertises the correct flow control settings */ 976 rc = efx->phy_op->reconfigure(efx); 977 if (rc) 978 goto fail2; 979 980 mutex_unlock(&efx->mac_lock); 981 return 0; 982 983fail2: 984 efx->phy_op->fini(efx); 985fail1: 986 mutex_unlock(&efx->mac_lock); 987 return rc; 988} 989 990static void efx_start_port(struct efx_nic *efx) 991{ 992 netif_dbg(efx, ifup, efx->net_dev, "start port\n"); 993 BUG_ON(efx->port_enabled); 994 995 mutex_lock(&efx->mac_lock); 996 efx->port_enabled = true; 997 998 /* efx_mac_work() might have been scheduled after efx_stop_port(), 999 * and then cancelled by efx_flush_all() */ 1000 efx->type->push_multicast_hash(efx); 1001 efx->mac_op->reconfigure(efx); 1002 1003 mutex_unlock(&efx->mac_lock); 1004} 1005 1006/* Prevent efx_mac_work() and efx_monitor() from working */ 1007static void efx_stop_port(struct efx_nic *efx) 1008{ 1009 netif_dbg(efx, ifdown, efx->net_dev, "stop port\n"); 1010 1011 mutex_lock(&efx->mac_lock); 1012 efx->port_enabled = false; 1013 mutex_unlock(&efx->mac_lock); 1014 1015 /* Serialise against efx_set_multicast_list() */ 1016 if (efx_dev_registered(efx)) { 1017 netif_addr_lock_bh(efx->net_dev); 1018 netif_addr_unlock_bh(efx->net_dev); 1019 } 1020} 1021 1022static void efx_fini_port(struct efx_nic *efx) 1023{ 1024 netif_dbg(efx, drv, efx->net_dev, "shut down port\n"); 1025 1026 if (!efx->port_initialized) 1027 return; 1028 1029 efx->phy_op->fini(efx); 1030 efx->port_initialized = false; 1031 1032 efx->link_state.up = false; 1033 efx_link_status_changed(efx); 1034} 1035 1036static void efx_remove_port(struct efx_nic *efx) 1037{ 1038 netif_dbg(efx, drv, efx->net_dev, "destroying port\n"); 1039 1040 efx->type->remove_port(efx); 1041} 1042 1043/************************************************************************** 1044 * 1045 * NIC handling 1046 * 1047 **************************************************************************/ 1048 1049/* This configures the PCI device to enable I/O and DMA. */ 1050static int efx_init_io(struct efx_nic *efx) 1051{ 1052 struct pci_dev *pci_dev = efx->pci_dev; 1053 dma_addr_t dma_mask = efx->type->max_dma_mask; 1054 int rc; 1055 1056 netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); 1057 1058 rc = pci_enable_device(pci_dev); 1059 if (rc) { 1060 netif_err(efx, probe, efx->net_dev, 1061 "failed to enable PCI device\n"); 1062 goto fail1; 1063 } 1064 1065 pci_set_master(pci_dev); 1066 1067 /* Set the PCI DMA mask. Try all possibilities from our 1068 * genuine mask down to 32 bits, because some architectures 1069 * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit 1070 * masks event though they reject 46 bit masks. 1071 */ 1072 while (dma_mask > 0x7fffffffUL) { 1073 if (pci_dma_supported(pci_dev, dma_mask) && 1074 ((rc = pci_set_dma_mask(pci_dev, dma_mask)) == 0)) 1075 break; 1076 dma_mask >>= 1; 1077 } 1078 if (rc) { 1079 netif_err(efx, probe, efx->net_dev, 1080 "could not find a suitable DMA mask\n"); 1081 goto fail2; 1082 } 1083 netif_dbg(efx, probe, efx->net_dev, 1084 "using DMA mask %llx\n", (unsigned long long) dma_mask); 1085 rc = pci_set_consistent_dma_mask(pci_dev, dma_mask); 1086 if (rc) { 1087 /* pci_set_consistent_dma_mask() is not *allowed* to 1088 * fail with a mask that pci_set_dma_mask() accepted, 1089 * but just in case... 1090 */ 1091 netif_err(efx, probe, efx->net_dev, 1092 "failed to set consistent DMA mask\n"); 1093 goto fail2; 1094 } 1095 1096 efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR); 1097 rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc"); 1098 if (rc) { 1099 netif_err(efx, probe, efx->net_dev, 1100 "request for memory BAR failed\n"); 1101 rc = -EIO; 1102 goto fail3; 1103 } 1104 efx->membase = ioremap_nocache(efx->membase_phys, 1105 efx->type->mem_map_size); 1106 if (!efx->membase) { 1107 netif_err(efx, probe, efx->net_dev, 1108 "could not map memory BAR at %llx+%x\n", 1109 (unsigned long long)efx->membase_phys, 1110 efx->type->mem_map_size); 1111 rc = -ENOMEM; 1112 goto fail4; 1113 } 1114 netif_dbg(efx, probe, efx->net_dev, 1115 "memory BAR at %llx+%x (virtual %p)\n", 1116 (unsigned long long)efx->membase_phys, 1117 efx->type->mem_map_size, efx->membase); 1118 1119 return 0; 1120 1121 fail4: 1122 pci_release_region(efx->pci_dev, EFX_MEM_BAR); 1123 fail3: 1124 efx->membase_phys = 0; 1125 fail2: 1126 pci_disable_device(efx->pci_dev); 1127 fail1: 1128 return rc; 1129} 1130 1131static void efx_fini_io(struct efx_nic *efx) 1132{ 1133 netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n"); 1134 1135 if (efx->membase) { 1136 iounmap(efx->membase); 1137 efx->membase = NULL; 1138 } 1139 1140 if (efx->membase_phys) { 1141 pci_release_region(efx->pci_dev, EFX_MEM_BAR); 1142 efx->membase_phys = 0; 1143 } 1144 1145 pci_disable_device(efx->pci_dev); 1146} 1147 1148/* Get number of channels wanted. Each channel will have its own IRQ, 1149 * 1 RX queue and/or 2 TX queues. */ 1150static int efx_wanted_channels(void) 1151{ 1152 cpumask_var_t core_mask; 1153 int count; 1154 int cpu; 1155 1156 if (rss_cpus) 1157 return rss_cpus; 1158 1159 if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) { 1160 printk(KERN_WARNING 1161 "sfc: RSS disabled due to allocation failure\n"); 1162 return 1; 1163 } 1164 1165 count = 0; 1166 for_each_online_cpu(cpu) { 1167 if (!cpumask_test_cpu(cpu, core_mask)) { 1168 ++count; 1169 cpumask_or(core_mask, core_mask, 1170 topology_core_cpumask(cpu)); 1171 } 1172 } 1173 1174 free_cpumask_var(core_mask); 1175 return count; 1176} 1177 1178/* Probe the number and type of interrupts we are able to obtain, and 1179 * the resulting numbers of channels and RX queues. 1180 */ 1181static void efx_probe_interrupts(struct efx_nic *efx) 1182{ 1183 int max_channels = 1184 min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS); 1185 int rc, i; 1186 1187 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { 1188 struct msix_entry xentries[EFX_MAX_CHANNELS]; 1189 int n_channels; 1190 1191 n_channels = efx_wanted_channels(); 1192 if (separate_tx_channels) 1193 n_channels *= 2; 1194 n_channels = min(n_channels, max_channels); 1195 1196 for (i = 0; i < n_channels; i++) 1197 xentries[i].entry = i; 1198 rc = pci_enable_msix(efx->pci_dev, xentries, n_channels); 1199 if (rc > 0) { 1200 netif_err(efx, drv, efx->net_dev, 1201 "WARNING: Insufficient MSI-X vectors" 1202 " available (%d < %d).\n", rc, n_channels); 1203 netif_err(efx, drv, efx->net_dev, 1204 "WARNING: Performance may be reduced.\n"); 1205 EFX_BUG_ON_PARANOID(rc >= n_channels); 1206 n_channels = rc; 1207 rc = pci_enable_msix(efx->pci_dev, xentries, 1208 n_channels); 1209 } 1210 1211 if (rc == 0) { 1212 efx->n_channels = n_channels; 1213 if (separate_tx_channels) { 1214 efx->n_tx_channels = 1215 max(efx->n_channels / 2, 1U); 1216 efx->n_rx_channels = 1217 max(efx->n_channels - 1218 efx->n_tx_channels, 1U); 1219 } else { 1220 efx->n_tx_channels = efx->n_channels; 1221 efx->n_rx_channels = efx->n_channels; 1222 } 1223 for (i = 0; i < n_channels; i++) 1224 efx_get_channel(efx, i)->irq = 1225 xentries[i].vector; 1226 } else { 1227 /* Fall back to single channel MSI */ 1228 efx->interrupt_mode = EFX_INT_MODE_MSI; 1229 netif_err(efx, drv, efx->net_dev, 1230 "could not enable MSI-X\n"); 1231 } 1232 } 1233 1234 /* Try single interrupt MSI */ 1235 if (efx->interrupt_mode == EFX_INT_MODE_MSI) { 1236 efx->n_channels = 1; 1237 efx->n_rx_channels = 1; 1238 efx->n_tx_channels = 1; 1239 rc = pci_enable_msi(efx->pci_dev); 1240 if (rc == 0) { 1241 efx_get_channel(efx, 0)->irq = efx->pci_dev->irq; 1242 } else { 1243 netif_err(efx, drv, efx->net_dev, 1244 "could not enable MSI\n"); 1245 efx->interrupt_mode = EFX_INT_MODE_LEGACY; 1246 } 1247 } 1248 1249 /* Assume legacy interrupts */ 1250 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) { 1251 efx->n_channels = 1 + (separate_tx_channels ? 1 : 0); 1252 efx->n_rx_channels = 1; 1253 efx->n_tx_channels = 1; 1254 efx->legacy_irq = efx->pci_dev->irq; 1255 } 1256} 1257 1258static void efx_remove_interrupts(struct efx_nic *efx) 1259{ 1260 struct efx_channel *channel; 1261 1262 /* Remove MSI/MSI-X interrupts */ 1263 efx_for_each_channel(channel, efx) 1264 channel->irq = 0; 1265 pci_disable_msi(efx->pci_dev); 1266 pci_disable_msix(efx->pci_dev); 1267 1268 /* Remove legacy interrupt */ 1269 efx->legacy_irq = 0; 1270} 1271 1272static void efx_set_channels(struct efx_nic *efx) 1273{ 1274 struct efx_channel *channel; 1275 struct efx_tx_queue *tx_queue; 1276 1277 efx->tx_channel_offset = 1278 separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; 1279 1280 /* Channel pointers were set in efx_init_struct() but we now 1281 * need to clear them for TX queues in any RX-only channels. */ 1282 efx_for_each_channel(channel, efx) { 1283 if (channel->channel - efx->tx_channel_offset >= 1284 efx->n_tx_channels) { 1285 efx_for_each_channel_tx_queue(tx_queue, channel) 1286 tx_queue->channel = NULL; 1287 } 1288 } 1289} 1290 1291static int efx_probe_nic(struct efx_nic *efx) 1292{ 1293 size_t i; 1294 int rc; 1295 1296 netif_dbg(efx, probe, efx->net_dev, "creating NIC\n"); 1297 1298 /* Carry out hardware-type specific initialisation */ 1299 rc = efx->type->probe(efx); 1300 if (rc) 1301 return rc; 1302 1303 /* Determine the number of channels and queues by trying to hook 1304 * in MSI-X interrupts. */ 1305 efx_probe_interrupts(efx); 1306 1307 if (efx->n_channels > 1) 1308 get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key)); 1309 for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++) 1310 efx->rx_indir_table[i] = i % efx->n_rx_channels; 1311 1312 efx_set_channels(efx); 1313 netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); 1314 netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels); 1315 1316 /* Initialise the interrupt moderation settings */ 1317 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true); 1318 1319 return 0; 1320} 1321 1322static void efx_remove_nic(struct efx_nic *efx) 1323{ 1324 netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n"); 1325 1326 efx_remove_interrupts(efx); 1327 efx->type->remove(efx); 1328} 1329 1330/************************************************************************** 1331 * 1332 * NIC startup/shutdown 1333 * 1334 *************************************************************************/ 1335 1336static int efx_probe_all(struct efx_nic *efx) 1337{ 1338 int rc; 1339 1340 rc = efx_probe_nic(efx); 1341 if (rc) { 1342 netif_err(efx, probe, efx->net_dev, "failed to create NIC\n"); 1343 goto fail1; 1344 } 1345 1346 rc = efx_probe_port(efx); 1347 if (rc) { 1348 netif_err(efx, probe, efx->net_dev, "failed to create port\n"); 1349 goto fail2; 1350 } 1351 1352 efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE; 1353 rc = efx_probe_channels(efx); 1354 if (rc) 1355 goto fail3; 1356 1357 rc = efx_probe_filters(efx); 1358 if (rc) { 1359 netif_err(efx, probe, efx->net_dev, 1360 "failed to create filter tables\n"); 1361 goto fail4; 1362 } 1363 1364 return 0; 1365 1366 fail4: 1367 efx_remove_channels(efx); 1368 fail3: 1369 efx_remove_port(efx); 1370 fail2: 1371 efx_remove_nic(efx); 1372 fail1: 1373 return rc; 1374} 1375 1376/* Called after previous invocation(s) of efx_stop_all, restarts the 1377 * port, kernel transmit queue, NAPI processing and hardware interrupts, 1378 * and ensures that the port is scheduled to be reconfigured. 1379 * This function is safe to call multiple times when the NIC is in any 1380 * state. */ 1381static void efx_start_all(struct efx_nic *efx) 1382{ 1383 struct efx_channel *channel; 1384 1385 EFX_ASSERT_RESET_SERIALISED(efx); 1386 1387 /* Check that it is appropriate to restart the interface. All 1388 * of these flags are safe to read under just the rtnl lock */ 1389 if (efx->port_enabled) 1390 return; 1391 if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT)) 1392 return; 1393 if (efx_dev_registered(efx) && !netif_running(efx->net_dev)) 1394 return; 1395 1396 /* Mark the port as enabled so port reconfigurations can start, then 1397 * restart the transmit interface early so the watchdog timer stops */ 1398 efx_start_port(efx); 1399 1400 if (efx_dev_registered(efx)) 1401 netif_tx_wake_all_queues(efx->net_dev); 1402 1403 efx_for_each_channel(channel, efx) 1404 efx_start_channel(channel); 1405 1406 if (efx->legacy_irq) 1407 efx->legacy_irq_enabled = true; 1408 efx_nic_enable_interrupts(efx); 1409 1410 /* Switch to event based MCDI completions after enabling interrupts. 1411 * If a reset has been scheduled, then we need to stay in polled mode. 1412 * Rather than serialising efx_mcdi_mode_event() [which sleeps] and 1413 * reset_pending [modified from an atomic context], we instead guarantee 1414 * that efx_mcdi_mode_poll() isn't reverted erroneously */ 1415 efx_mcdi_mode_event(efx); 1416 if (efx->reset_pending != RESET_TYPE_NONE) 1417 efx_mcdi_mode_poll(efx); 1418 1419 /* Start the hardware monitor if there is one. Otherwise (we're link 1420 * event driven), we have to poll the PHY because after an event queue 1421 * flush, we could have a missed a link state change */ 1422 if (efx->type->monitor != NULL) { 1423 queue_delayed_work(efx->workqueue, &efx->monitor_work, 1424 efx_monitor_interval); 1425 } else { 1426 mutex_lock(&efx->mac_lock); 1427 if (efx->phy_op->poll(efx)) 1428 efx_link_status_changed(efx); 1429 mutex_unlock(&efx->mac_lock); 1430 } 1431 1432 efx->type->start_stats(efx); 1433} 1434 1435/* Flush all delayed work. Should only be called when no more delayed work 1436 * will be scheduled. This doesn't flush pending online resets (efx_reset), 1437 * since we're holding the rtnl_lock at this point. */ 1438static void efx_flush_all(struct efx_nic *efx) 1439{ 1440 /* Make sure the hardware monitor is stopped */ 1441 cancel_delayed_work_sync(&efx->monitor_work); 1442 /* Stop scheduled port reconfigurations */ 1443 cancel_work_sync(&efx->mac_work); 1444} 1445 1446/* Quiesce hardware and software without bringing the link down. 1447 * Safe to call multiple times, when the nic and interface is in any 1448 * state. The caller is guaranteed to subsequently be in a position 1449 * to modify any hardware and software state they see fit without 1450 * taking locks. */ 1451static void efx_stop_all(struct efx_nic *efx) 1452{ 1453 struct efx_channel *channel; 1454 1455 EFX_ASSERT_RESET_SERIALISED(efx); 1456 1457 /* port_enabled can be read safely under the rtnl lock */ 1458 if (!efx->port_enabled) 1459 return; 1460 1461 efx->type->stop_stats(efx); 1462 1463 /* Switch to MCDI polling on Siena before disabling interrupts */ 1464 efx_mcdi_mode_poll(efx); 1465 1466 /* Disable interrupts and wait for ISR to complete */ 1467 efx_nic_disable_interrupts(efx); 1468 if (efx->legacy_irq) { 1469 synchronize_irq(efx->legacy_irq); 1470 efx->legacy_irq_enabled = false; 1471 } 1472 efx_for_each_channel(channel, efx) { 1473 if (channel->irq) 1474 synchronize_irq(channel->irq); 1475 } 1476 1477 /* Stop all NAPI processing and synchronous rx refills */ 1478 efx_for_each_channel(channel, efx) 1479 efx_stop_channel(channel); 1480 1481 /* Stop all asynchronous port reconfigurations. Since all 1482 * event processing has already been stopped, there is no 1483 * window to loose phy events */ 1484 efx_stop_port(efx); 1485 1486 /* Flush efx_mac_work(), refill_workqueue, monitor_work */ 1487 efx_flush_all(efx); 1488 1489 /* Stop the kernel transmit interface late, so the watchdog 1490 * timer isn't ticking over the flush */ 1491 if (efx_dev_registered(efx)) { 1492 netif_tx_stop_all_queues(efx->net_dev); 1493 netif_tx_lock_bh(efx->net_dev); 1494 netif_tx_unlock_bh(efx->net_dev); 1495 } 1496} 1497 1498static void efx_remove_all(struct efx_nic *efx) 1499{ 1500 efx_remove_filters(efx); 1501 efx_remove_channels(efx); 1502 efx_remove_port(efx); 1503 efx_remove_nic(efx); 1504} 1505 1506/************************************************************************** 1507 * 1508 * Interrupt moderation 1509 * 1510 **************************************************************************/ 1511 1512static unsigned irq_mod_ticks(int usecs, int resolution) 1513{ 1514 if (usecs <= 0) 1515 return 0; /* cannot receive interrupts ahead of time :-) */ 1516 if (usecs < resolution) 1517 return 1; /* never round down to 0 */ 1518 return usecs / resolution; 1519} 1520 1521/* Set interrupt moderation parameters */ 1522void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs, 1523 bool rx_adaptive) 1524{ 1525 struct efx_channel *channel; 1526 unsigned tx_ticks = irq_mod_ticks(tx_usecs, EFX_IRQ_MOD_RESOLUTION); 1527 unsigned rx_ticks = irq_mod_ticks(rx_usecs, EFX_IRQ_MOD_RESOLUTION); 1528 1529 EFX_ASSERT_RESET_SERIALISED(efx); 1530 1531 efx->irq_rx_adaptive = rx_adaptive; 1532 efx->irq_rx_moderation = rx_ticks; 1533 efx_for_each_channel(channel, efx) { 1534 if (efx_channel_get_rx_queue(channel)) 1535 channel->irq_moderation = rx_ticks; 1536 else if (efx_channel_get_tx_queue(channel, 0)) 1537 channel->irq_moderation = tx_ticks; 1538 } 1539} 1540 1541/************************************************************************** 1542 * 1543 * Hardware monitor 1544 * 1545 **************************************************************************/ 1546 1547/* Run periodically off the general workqueue */ 1548static void efx_monitor(struct work_struct *data) 1549{ 1550 struct efx_nic *efx = container_of(data, struct efx_nic, 1551 monitor_work.work); 1552 1553 netif_vdbg(efx, timer, efx->net_dev, 1554 "hardware monitor executing on CPU %d\n", 1555 raw_smp_processor_id()); 1556 BUG_ON(efx->type->monitor == NULL); 1557 1558 /* If the mac_lock is already held then it is likely a port 1559 * reconfiguration is already in place, which will likely do 1560 * most of the work of monitor() anyway. */ 1561 if (mutex_trylock(&efx->mac_lock)) { 1562 if (efx->port_enabled) 1563 efx->type->monitor(efx); 1564 mutex_unlock(&efx->mac_lock); 1565 } 1566 1567 queue_delayed_work(efx->workqueue, &efx->monitor_work, 1568 efx_monitor_interval); 1569} 1570 1571/************************************************************************** 1572 * 1573 * ioctls 1574 * 1575 *************************************************************************/ 1576 1577/* Net device ioctl 1578 * Context: process, rtnl_lock() held. 1579 */ 1580static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) 1581{ 1582 struct efx_nic *efx = netdev_priv(net_dev); 1583 struct mii_ioctl_data *data = if_mii(ifr); 1584 1585 EFX_ASSERT_RESET_SERIALISED(efx); 1586 1587 /* Convert phy_id from older PRTAD/DEVAD format */ 1588 if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) && 1589 (data->phy_id & 0xfc00) == 0x0400) 1590 data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400; 1591 1592 return mdio_mii_ioctl(&efx->mdio, data, cmd); 1593} 1594 1595/************************************************************************** 1596 * 1597 * NAPI interface 1598 * 1599 **************************************************************************/ 1600 1601static void efx_init_napi(struct efx_nic *efx) 1602{ 1603 struct efx_channel *channel; 1604 1605 efx_for_each_channel(channel, efx) { 1606 channel->napi_dev = efx->net_dev; 1607 netif_napi_add(channel->napi_dev, &channel->napi_str, 1608 efx_poll, napi_weight); 1609 } 1610} 1611 1612static void efx_fini_napi_channel(struct efx_channel *channel) 1613{ 1614 if (channel->napi_dev) 1615 netif_napi_del(&channel->napi_str); 1616 channel->napi_dev = NULL; 1617} 1618 1619static void efx_fini_napi(struct efx_nic *efx) 1620{ 1621 struct efx_channel *channel; 1622 1623 efx_for_each_channel(channel, efx) 1624 efx_fini_napi_channel(channel); 1625} 1626 1627/************************************************************************** 1628 * 1629 * Kernel netpoll interface 1630 * 1631 *************************************************************************/ 1632 1633#ifdef CONFIG_NET_POLL_CONTROLLER 1634 1635/* Although in the common case interrupts will be disabled, this is not 1636 * guaranteed. However, all our work happens inside the NAPI callback, 1637 * so no locking is required. 1638 */ 1639static void efx_netpoll(struct net_device *net_dev) 1640{ 1641 struct efx_nic *efx = netdev_priv(net_dev); 1642 struct efx_channel *channel; 1643 1644 efx_for_each_channel(channel, efx) 1645 efx_schedule_channel(channel); 1646} 1647 1648#endif 1649 1650/************************************************************************** 1651 * 1652 * Kernel net device interface 1653 * 1654 *************************************************************************/ 1655 1656/* Context: process, rtnl_lock() held. */ 1657static int efx_net_open(struct net_device *net_dev) 1658{ 1659 struct efx_nic *efx = netdev_priv(net_dev); 1660 EFX_ASSERT_RESET_SERIALISED(efx); 1661 1662 netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n", 1663 raw_smp_processor_id()); 1664 1665 if (efx->state == STATE_DISABLED) 1666 return -EIO; 1667 if (efx->phy_mode & PHY_MODE_SPECIAL) 1668 return -EBUSY; 1669 if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL)) 1670 return -EIO; 1671 1672 /* Notify the kernel of the link state polled during driver load, 1673 * before the monitor starts running */ 1674 efx_link_status_changed(efx); 1675 1676 efx_start_all(efx); 1677 return 0; 1678} 1679 1680/* Context: process, rtnl_lock() held. 1681 * Note that the kernel will ignore our return code; this method 1682 * should really be a void. 1683 */ 1684static int efx_net_stop(struct net_device *net_dev) 1685{ 1686 struct efx_nic *efx = netdev_priv(net_dev); 1687 1688 netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n", 1689 raw_smp_processor_id()); 1690 1691 if (efx->state != STATE_DISABLED) { 1692 /* Stop the device and flush all the channels */ 1693 efx_stop_all(efx); 1694 efx_fini_channels(efx); 1695 efx_init_channels(efx); 1696 } 1697 1698 return 0; 1699} 1700 1701/* Context: process, dev_base_lock or RTNL held, non-blocking. */ 1702static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats) 1703{ 1704 struct efx_nic *efx = netdev_priv(net_dev); 1705 struct efx_mac_stats *mac_stats = &efx->mac_stats; 1706 1707 spin_lock_bh(&efx->stats_lock); 1708 efx->type->update_stats(efx); 1709 spin_unlock_bh(&efx->stats_lock); 1710 1711 stats->rx_packets = mac_stats->rx_packets; 1712 stats->tx_packets = mac_stats->tx_packets; 1713 stats->rx_bytes = mac_stats->rx_bytes; 1714 stats->tx_bytes = mac_stats->tx_bytes; 1715 stats->rx_dropped = efx->n_rx_nodesc_drop_cnt; 1716 stats->multicast = mac_stats->rx_multicast; 1717 stats->collisions = mac_stats->tx_collision; 1718 stats->rx_length_errors = (mac_stats->rx_gtjumbo + 1719 mac_stats->rx_length_error); 1720 stats->rx_crc_errors = mac_stats->rx_bad; 1721 stats->rx_frame_errors = mac_stats->rx_align_error; 1722 stats->rx_fifo_errors = mac_stats->rx_overflow; 1723 stats->rx_missed_errors = mac_stats->rx_missed; 1724 stats->tx_window_errors = mac_stats->tx_late_collision; 1725 1726 stats->rx_errors = (stats->rx_length_errors + 1727 stats->rx_crc_errors + 1728 stats->rx_frame_errors + 1729 mac_stats->rx_symbol_error); 1730 stats->tx_errors = (stats->tx_window_errors + 1731 mac_stats->tx_bad); 1732 1733 return stats; 1734} 1735 1736/* Context: netif_tx_lock held, BHs disabled. */ 1737static void efx_watchdog(struct net_device *net_dev) 1738{ 1739 struct efx_nic *efx = netdev_priv(net_dev); 1740 1741 netif_err(efx, tx_err, efx->net_dev, 1742 "TX stuck with port_enabled=%d: resetting channels\n", 1743 efx->port_enabled); 1744 1745 efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG); 1746} 1747 1748 1749/* Context: process, rtnl_lock() held. */ 1750static int efx_change_mtu(struct net_device *net_dev, int new_mtu) 1751{ 1752 struct efx_nic *efx = netdev_priv(net_dev); 1753 int rc = 0; 1754 1755 EFX_ASSERT_RESET_SERIALISED(efx); 1756 1757 if (new_mtu > EFX_MAX_MTU) 1758 return -EINVAL; 1759 1760 efx_stop_all(efx); 1761 1762 netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); 1763 1764 efx_fini_channels(efx); 1765 1766 mutex_lock(&efx->mac_lock); 1767 /* Reconfigure the MAC before enabling the dma queues so that 1768 * the RX buffers don't overflow */ 1769 net_dev->mtu = new_mtu; 1770 efx->mac_op->reconfigure(efx); 1771 mutex_unlock(&efx->mac_lock); 1772 1773 efx_init_channels(efx); 1774 1775 efx_start_all(efx); 1776 return rc; 1777} 1778 1779static int efx_set_mac_address(struct net_device *net_dev, void *data) 1780{ 1781 struct efx_nic *efx = netdev_priv(net_dev); 1782 struct sockaddr *addr = data; 1783 char *new_addr = addr->sa_data; 1784 1785 EFX_ASSERT_RESET_SERIALISED(efx); 1786 1787 if (!is_valid_ether_addr(new_addr)) { 1788 netif_err(efx, drv, efx->net_dev, 1789 "invalid ethernet MAC address requested: %pM\n", 1790 new_addr); 1791 return -EINVAL; 1792 } 1793 1794 memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len); 1795 1796 /* Reconfigure the MAC */ 1797 mutex_lock(&efx->mac_lock); 1798 efx->mac_op->reconfigure(efx); 1799 mutex_unlock(&efx->mac_lock); 1800 1801 return 0; 1802} 1803 1804/* Context: netif_addr_lock held, BHs disabled. */ 1805static void efx_set_multicast_list(struct net_device *net_dev) 1806{ 1807 struct efx_nic *efx = netdev_priv(net_dev); 1808 struct netdev_hw_addr *ha; 1809 union efx_multicast_hash *mc_hash = &efx->multicast_hash; 1810 u32 crc; 1811 int bit; 1812 1813 efx->promiscuous = !!(net_dev->flags & IFF_PROMISC); 1814 1815 /* Build multicast hash table */ 1816 if (efx->promiscuous || (net_dev->flags & IFF_ALLMULTI)) { 1817 memset(mc_hash, 0xff, sizeof(*mc_hash)); 1818 } else { 1819 memset(mc_hash, 0x00, sizeof(*mc_hash)); 1820 netdev_for_each_mc_addr(ha, net_dev) { 1821 crc = ether_crc_le(ETH_ALEN, ha->addr); 1822 bit = crc & (EFX_MCAST_HASH_ENTRIES - 1); 1823 set_bit_le(bit, mc_hash->byte); 1824 } 1825 1826 /* Broadcast packets go through the multicast hash filter. 1827 * ether_crc_le() of the broadcast address is 0xbe2612ff 1828 * so we always add bit 0xff to the mask. 1829 */ 1830 set_bit_le(0xff, mc_hash->byte); 1831 } 1832 1833 if (efx->port_enabled) 1834 queue_work(efx->workqueue, &efx->mac_work); 1835 /* Otherwise efx_start_port() will do this */ 1836} 1837 1838static const struct net_device_ops efx_netdev_ops = { 1839 .ndo_open = efx_net_open, 1840 .ndo_stop = efx_net_stop, 1841 .ndo_get_stats64 = efx_net_stats, 1842 .ndo_tx_timeout = efx_watchdog, 1843 .ndo_start_xmit = efx_hard_start_xmit, 1844 .ndo_validate_addr = eth_validate_addr, 1845 .ndo_do_ioctl = efx_ioctl, 1846 .ndo_change_mtu = efx_change_mtu, 1847 .ndo_set_mac_address = efx_set_mac_address, 1848 .ndo_set_multicast_list = efx_set_multicast_list, 1849#ifdef CONFIG_NET_POLL_CONTROLLER 1850 .ndo_poll_controller = efx_netpoll, 1851#endif 1852}; 1853 1854static void efx_update_name(struct efx_nic *efx) 1855{ 1856 strcpy(efx->name, efx->net_dev->name); 1857 efx_mtd_rename(efx); 1858 efx_set_channel_names(efx); 1859} 1860 1861static int efx_netdev_event(struct notifier_block *this, 1862 unsigned long event, void *ptr) 1863{ 1864 struct net_device *net_dev = ptr; 1865 1866 if (net_dev->netdev_ops == &efx_netdev_ops && 1867 event == NETDEV_CHANGENAME) 1868 efx_update_name(netdev_priv(net_dev)); 1869 1870 return NOTIFY_DONE; 1871} 1872 1873static struct notifier_block efx_netdev_notifier = { 1874 .notifier_call = efx_netdev_event, 1875}; 1876 1877static ssize_t 1878show_phy_type(struct device *dev, struct device_attribute *attr, char *buf) 1879{ 1880 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); 1881 return sprintf(buf, "%d\n", efx->phy_type); 1882} 1883static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL); 1884 1885static int efx_register_netdev(struct efx_nic *efx) 1886{ 1887 struct net_device *net_dev = efx->net_dev; 1888 struct efx_channel *channel; 1889 int rc; 1890 1891 net_dev->watchdog_timeo = 5 * HZ; 1892 net_dev->irq = efx->pci_dev->irq; 1893 net_dev->netdev_ops = &efx_netdev_ops; 1894 SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops); 1895 1896 /* Clear MAC statistics */ 1897 efx->mac_op->update_stats(efx); 1898 memset(&efx->mac_stats, 0, sizeof(efx->mac_stats)); 1899 1900 rtnl_lock(); 1901 1902 rc = dev_alloc_name(net_dev, net_dev->name); 1903 if (rc < 0) 1904 goto fail_locked; 1905 efx_update_name(efx); 1906 1907 rc = register_netdevice(net_dev); 1908 if (rc) 1909 goto fail_locked; 1910 1911 efx_for_each_channel(channel, efx) { 1912 struct efx_tx_queue *tx_queue; 1913 efx_for_each_channel_tx_queue(tx_queue, channel) { 1914 tx_queue->core_txq = netdev_get_tx_queue( 1915 efx->net_dev, tx_queue->queue / EFX_TXQ_TYPES); 1916 } 1917 } 1918 1919 /* Always start with carrier off; PHY events will detect the link */ 1920 netif_carrier_off(efx->net_dev); 1921 1922 rtnl_unlock(); 1923 1924 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type); 1925 if (rc) { 1926 netif_err(efx, drv, efx->net_dev, 1927 "failed to init net dev attributes\n"); 1928 goto fail_registered; 1929 } 1930 1931 return 0; 1932 1933fail_locked: 1934 rtnl_unlock(); 1935 netif_err(efx, drv, efx->net_dev, "could not register net dev\n"); 1936 return rc; 1937 1938fail_registered: 1939 unregister_netdev(net_dev); 1940 return rc; 1941} 1942 1943static void efx_unregister_netdev(struct efx_nic *efx) 1944{ 1945 struct efx_channel *channel; 1946 struct efx_tx_queue *tx_queue; 1947 1948 if (!efx->net_dev) 1949 return; 1950 1951 BUG_ON(netdev_priv(efx->net_dev) != efx); 1952 1953 /* Free up any skbs still remaining. This has to happen before 1954 * we try to unregister the netdev as running their destructors 1955 * may be needed to get the device ref. count to 0. */ 1956 efx_for_each_channel(channel, efx) { 1957 efx_for_each_channel_tx_queue(tx_queue, channel) 1958 efx_release_tx_buffers(tx_queue); 1959 } 1960 1961 if (efx_dev_registered(efx)) { 1962 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); 1963 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); 1964 unregister_netdev(efx->net_dev); 1965 } 1966} 1967 1968/************************************************************************** 1969 * 1970 * Device reset and suspend 1971 * 1972 **************************************************************************/ 1973 1974/* Tears down the entire software state and most of the hardware state 1975 * before reset. */ 1976void efx_reset_down(struct efx_nic *efx, enum reset_type method) 1977{ 1978 EFX_ASSERT_RESET_SERIALISED(efx); 1979 1980 efx_stop_all(efx); 1981 mutex_lock(&efx->mac_lock); 1982 1983 efx_fini_channels(efx); 1984 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) 1985 efx->phy_op->fini(efx); 1986 efx->type->fini(efx); 1987} 1988 1989/* This function will always ensure that the locks acquired in 1990 * efx_reset_down() are released. A failure return code indicates 1991 * that we were unable to reinitialise the hardware, and the 1992 * driver should be disabled. If ok is false, then the rx and tx 1993 * engines are not restarted, pending a RESET_DISABLE. */ 1994int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) 1995{ 1996 int rc; 1997 1998 EFX_ASSERT_RESET_SERIALISED(efx); 1999 2000 rc = efx->type->init(efx); 2001 if (rc) { 2002 netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n"); 2003 goto fail; 2004 } 2005 2006 if (!ok) 2007 goto fail; 2008 2009 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) { 2010 rc = efx->phy_op->init(efx); 2011 if (rc) 2012 goto fail; 2013 if (efx->phy_op->reconfigure(efx)) 2014 netif_err(efx, drv, efx->net_dev, 2015 "could not restore PHY settings\n"); 2016 } 2017 2018 efx->mac_op->reconfigure(efx); 2019 2020 efx_init_channels(efx); 2021 efx_restore_filters(efx); 2022 2023 mutex_unlock(&efx->mac_lock); 2024 2025 efx_start_all(efx); 2026 2027 return 0; 2028 2029fail: 2030 efx->port_initialized = false; 2031 2032 mutex_unlock(&efx->mac_lock); 2033 2034 return rc; 2035} 2036 2037/* Reset the NIC using the specified method. Note that the reset may 2038 * fail, in which case the card will be left in an unusable state. 2039 * 2040 * Caller must hold the rtnl_lock. 2041 */ 2042int efx_reset(struct efx_nic *efx, enum reset_type method) 2043{ 2044 int rc, rc2; 2045 bool disabled; 2046 2047 netif_info(efx, drv, efx->net_dev, "resetting (%s)\n", 2048 RESET_TYPE(method)); 2049 2050 efx_reset_down(efx, method); 2051 2052 rc = efx->type->reset(efx, method); 2053 if (rc) { 2054 netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n"); 2055 goto out; 2056 } 2057 2058 /* Allow resets to be rescheduled. */ 2059 efx->reset_pending = RESET_TYPE_NONE; 2060 2061 /* Reinitialise bus-mastering, which may have been turned off before 2062 * the reset was scheduled. This is still appropriate, even in the 2063 * RESET_TYPE_DISABLE since this driver generally assumes the hardware 2064 * can respond to requests. */ 2065 pci_set_master(efx->pci_dev); 2066 2067out: 2068 /* Leave device stopped if necessary */ 2069 disabled = rc || method == RESET_TYPE_DISABLE; 2070 rc2 = efx_reset_up(efx, method, !disabled); 2071 if (rc2) { 2072 disabled = true; 2073 if (!rc) 2074 rc = rc2; 2075 } 2076 2077 if (disabled) { 2078 dev_close(efx->net_dev); 2079 netif_err(efx, drv, efx->net_dev, "has been disabled\n"); 2080 efx->state = STATE_DISABLED; 2081 } else { 2082 netif_dbg(efx, drv, efx->net_dev, "reset complete\n"); 2083 } 2084 return rc; 2085} 2086 2087/* The worker thread exists so that code that cannot sleep can 2088 * schedule a reset for later. 2089 */ 2090static void efx_reset_work(struct work_struct *data) 2091{ 2092 struct efx_nic *efx = container_of(data, struct efx_nic, reset_work); 2093 2094 if (efx->reset_pending == RESET_TYPE_NONE) 2095 return; 2096 2097 /* If we're not RUNNING then don't reset. Leave the reset_pending 2098 * flag set so that efx_pci_probe_main will be retried */ 2099 if (efx->state != STATE_RUNNING) { 2100 netif_info(efx, drv, efx->net_dev, 2101 "scheduled reset quenched. NIC not RUNNING\n"); 2102 return; 2103 } 2104 2105 rtnl_lock(); 2106 (void)efx_reset(efx, efx->reset_pending); 2107 rtnl_unlock(); 2108} 2109 2110void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) 2111{ 2112 enum reset_type method; 2113 2114 if (efx->reset_pending != RESET_TYPE_NONE) { 2115 netif_info(efx, drv, efx->net_dev, 2116 "quenching already scheduled reset\n"); 2117 return; 2118 } 2119 2120 switch (type) { 2121 case RESET_TYPE_INVISIBLE: 2122 case RESET_TYPE_ALL: 2123 case RESET_TYPE_WORLD: 2124 case RESET_TYPE_DISABLE: 2125 method = type; 2126 break; 2127 case RESET_TYPE_RX_RECOVERY: 2128 case RESET_TYPE_RX_DESC_FETCH: 2129 case RESET_TYPE_TX_DESC_FETCH: 2130 case RESET_TYPE_TX_SKIP: 2131 method = RESET_TYPE_INVISIBLE; 2132 break; 2133 case RESET_TYPE_MC_FAILURE: 2134 default: 2135 method = RESET_TYPE_ALL; 2136 break; 2137 } 2138 2139 if (method != type) 2140 netif_dbg(efx, drv, efx->net_dev, 2141 "scheduling %s reset for %s\n", 2142 RESET_TYPE(method), RESET_TYPE(type)); 2143 else 2144 netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n", 2145 RESET_TYPE(method)); 2146 2147 efx->reset_pending = method; 2148 2149 /* efx_process_channel() will no longer read events once a 2150 * reset is scheduled. So switch back to poll'd MCDI completions. */ 2151 efx_mcdi_mode_poll(efx); 2152 2153 queue_work(reset_workqueue, &efx->reset_work); 2154} 2155 2156/************************************************************************** 2157 * 2158 * List of NICs we support 2159 * 2160 **************************************************************************/ 2161 2162/* PCI device ID table */ 2163static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = { 2164 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID), 2165 .driver_data = (unsigned long) &falcon_a1_nic_type}, 2166 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID), 2167 .driver_data = (unsigned long) &falcon_b0_nic_type}, 2168 {PCI_DEVICE(EFX_VENDID_SFC, BETHPAGE_A_P_DEVID), 2169 .driver_data = (unsigned long) &siena_a0_nic_type}, 2170 {PCI_DEVICE(EFX_VENDID_SFC, SIENA_A_P_DEVID), 2171 .driver_data = (unsigned long) &siena_a0_nic_type}, 2172 {0} /* end of list */ 2173}; 2174 2175/************************************************************************** 2176 * 2177 * Dummy PHY/MAC operations 2178 * 2179 * Can be used for some unimplemented operations 2180 * Needed so all function pointers are valid and do not have to be tested 2181 * before use 2182 * 2183 **************************************************************************/ 2184int efx_port_dummy_op_int(struct efx_nic *efx) 2185{ 2186 return 0; 2187} 2188void efx_port_dummy_op_void(struct efx_nic *efx) {} 2189 2190static bool efx_port_dummy_op_poll(struct efx_nic *efx) 2191{ 2192 return false; 2193} 2194 2195static struct efx_phy_operations efx_dummy_phy_operations = { 2196 .init = efx_port_dummy_op_int, 2197 .reconfigure = efx_port_dummy_op_int, 2198 .poll = efx_port_dummy_op_poll, 2199 .fini = efx_port_dummy_op_void, 2200}; 2201 2202/************************************************************************** 2203 * 2204 * Data housekeeping 2205 * 2206 **************************************************************************/ 2207 2208/* This zeroes out and then fills in the invariants in a struct 2209 * efx_nic (including all sub-structures). 2210 */ 2211static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type, 2212 struct pci_dev *pci_dev, struct net_device *net_dev) 2213{ 2214 int i; 2215 2216 /* Initialise common structures */ 2217 memset(efx, 0, sizeof(*efx)); 2218 spin_lock_init(&efx->biu_lock); 2219#ifdef CONFIG_SFC_MTD 2220 INIT_LIST_HEAD(&efx->mtd_list); 2221#endif 2222 INIT_WORK(&efx->reset_work, efx_reset_work); 2223 INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor); 2224 efx->pci_dev = pci_dev; 2225 efx->msg_enable = debug; 2226 efx->state = STATE_INIT; 2227 efx->reset_pending = RESET_TYPE_NONE; 2228 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); 2229 2230 efx->net_dev = net_dev; 2231 efx->rx_checksum_enabled = true; 2232 spin_lock_init(&efx->stats_lock); 2233 mutex_init(&efx->mac_lock); 2234 efx->mac_op = type->default_mac_ops; 2235 efx->phy_op = &efx_dummy_phy_operations; 2236 efx->mdio.dev = net_dev; 2237 INIT_WORK(&efx->mac_work, efx_mac_work); 2238 2239 for (i = 0; i < EFX_MAX_CHANNELS; i++) { 2240 efx->channel[i] = efx_alloc_channel(efx, i, NULL); 2241 if (!efx->channel[i]) 2242 goto fail; 2243 } 2244 2245 efx->type = type; 2246 2247 EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS); 2248 2249 /* Higher numbered interrupt modes are less capable! */ 2250 efx->interrupt_mode = max(efx->type->max_interrupt_mode, 2251 interrupt_mode); 2252 2253 /* Would be good to use the net_dev name, but we're too early */ 2254 snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s", 2255 pci_name(pci_dev)); 2256 efx->workqueue = create_singlethread_workqueue(efx->workqueue_name); 2257 if (!efx->workqueue) 2258 goto fail; 2259 2260 return 0; 2261 2262fail: 2263 efx_fini_struct(efx); 2264 return -ENOMEM; 2265} 2266 2267static void efx_fini_struct(struct efx_nic *efx) 2268{ 2269 int i; 2270 2271 for (i = 0; i < EFX_MAX_CHANNELS; i++) 2272 kfree(efx->channel[i]); 2273 2274 if (efx->workqueue) { 2275 destroy_workqueue(efx->workqueue); 2276 efx->workqueue = NULL; 2277 } 2278} 2279 2280/************************************************************************** 2281 * 2282 * PCI interface 2283 * 2284 **************************************************************************/ 2285 2286/* Main body of final NIC shutdown code 2287 * This is called only at module unload (or hotplug removal). 2288 */ 2289static void efx_pci_remove_main(struct efx_nic *efx) 2290{ 2291 efx_nic_fini_interrupt(efx); 2292 efx_fini_channels(efx); 2293 efx_fini_port(efx); 2294 efx->type->fini(efx); 2295 efx_fini_napi(efx); 2296 efx_remove_all(efx); 2297} 2298 2299/* Final NIC shutdown 2300 * This is called only at module unload (or hotplug removal). 2301 */ 2302static void efx_pci_remove(struct pci_dev *pci_dev) 2303{ 2304 struct efx_nic *efx; 2305 2306 efx = pci_get_drvdata(pci_dev); 2307 if (!efx) 2308 return; 2309 2310 /* Mark the NIC as fini, then stop the interface */ 2311 rtnl_lock(); 2312 efx->state = STATE_FINI; 2313 dev_close(efx->net_dev); 2314 2315 /* Allow any queued efx_resets() to complete */ 2316 rtnl_unlock(); 2317 2318 efx_unregister_netdev(efx); 2319 2320 efx_mtd_remove(efx); 2321 2322 /* Wait for any scheduled resets to complete. No more will be 2323 * scheduled from this point because efx_stop_all() has been 2324 * called, we are no longer registered with driverlink, and 2325 * the net_device's have been removed. */ 2326 cancel_work_sync(&efx->reset_work); 2327 2328 efx_pci_remove_main(efx); 2329 2330 efx_fini_io(efx); 2331 netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n"); 2332 2333 pci_set_drvdata(pci_dev, NULL); 2334 efx_fini_struct(efx); 2335 free_netdev(efx->net_dev); 2336}; 2337 2338/* Main body of NIC initialisation 2339 * This is called at module load (or hotplug insertion, theoretically). 2340 */ 2341static int efx_pci_probe_main(struct efx_nic *efx) 2342{ 2343 int rc; 2344 2345 /* Do start-of-day initialisation */ 2346 rc = efx_probe_all(efx); 2347 if (rc) 2348 goto fail1; 2349 2350 efx_init_napi(efx); 2351 2352 rc = efx->type->init(efx); 2353 if (rc) { 2354 netif_err(efx, probe, efx->net_dev, 2355 "failed to initialise NIC\n"); 2356 goto fail3; 2357 } 2358 2359 rc = efx_init_port(efx); 2360 if (rc) { 2361 netif_err(efx, probe, efx->net_dev, 2362 "failed to initialise port\n"); 2363 goto fail4; 2364 } 2365 2366 efx_init_channels(efx); 2367 2368 rc = efx_nic_init_interrupt(efx); 2369 if (rc) 2370 goto fail5; 2371 2372 return 0; 2373 2374 fail5: 2375 efx_fini_channels(efx); 2376 efx_fini_port(efx); 2377 fail4: 2378 efx->type->fini(efx); 2379 fail3: 2380 efx_fini_napi(efx); 2381 efx_remove_all(efx); 2382 fail1: 2383 return rc; 2384} 2385 2386/* NIC initialisation 2387 * 2388 * This is called at module load (or hotplug insertion, 2389 * theoretically). It sets up PCI mappings, tests and resets the NIC, 2390 * sets up and registers the network devices with the kernel and hooks 2391 * the interrupt service routine. It does not prepare the device for 2392 * transmission; this is left to the first time one of the network 2393 * interfaces is brought up (i.e. efx_net_open). 2394 */ 2395static int __devinit efx_pci_probe(struct pci_dev *pci_dev, 2396 const struct pci_device_id *entry) 2397{ 2398 struct efx_nic_type *type = (struct efx_nic_type *) entry->driver_data; 2399 struct net_device *net_dev; 2400 struct efx_nic *efx; 2401 int i, rc; 2402 2403 /* Allocate and initialise a struct net_device and struct efx_nic */ 2404 net_dev = alloc_etherdev_mq(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES); 2405 if (!net_dev) 2406 return -ENOMEM; 2407 net_dev->features |= (type->offload_features | NETIF_F_SG | 2408 NETIF_F_HIGHDMA | NETIF_F_TSO | 2409 NETIF_F_GRO); 2410 if (type->offload_features & NETIF_F_V6_CSUM) 2411 net_dev->features |= NETIF_F_TSO6; 2412 /* Mask for features that also apply to VLAN devices */ 2413 net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG | 2414 NETIF_F_HIGHDMA | NETIF_F_TSO); 2415 efx = netdev_priv(net_dev); 2416 pci_set_drvdata(pci_dev, efx); 2417 SET_NETDEV_DEV(net_dev, &pci_dev->dev); 2418 rc = efx_init_struct(efx, type, pci_dev, net_dev); 2419 if (rc) 2420 goto fail1; 2421 2422 netif_info(efx, probe, efx->net_dev, 2423 "Solarflare Communications NIC detected\n"); 2424 2425 /* Set up basic I/O (BAR mappings etc) */ 2426 rc = efx_init_io(efx); 2427 if (rc) 2428 goto fail2; 2429 2430 /* No serialisation is required with the reset path because 2431 * we're in STATE_INIT. */ 2432 for (i = 0; i < 5; i++) { 2433 rc = efx_pci_probe_main(efx); 2434 2435 /* Serialise against efx_reset(). No more resets will be 2436 * scheduled since efx_stop_all() has been called, and we 2437 * have not and never have been registered with either 2438 * the rtnetlink or driverlink layers. */ 2439 cancel_work_sync(&efx->reset_work); 2440 2441 if (rc == 0) { 2442 if (efx->reset_pending != RESET_TYPE_NONE) { 2443 /* If there was a scheduled reset during 2444 * probe, the NIC is probably hosed anyway */ 2445 efx_pci_remove_main(efx); 2446 rc = -EIO; 2447 } else { 2448 break; 2449 } 2450 } 2451 2452 /* Retry if a recoverably reset event has been scheduled */ 2453 if ((efx->reset_pending != RESET_TYPE_INVISIBLE) && 2454 (efx->reset_pending != RESET_TYPE_ALL)) 2455 goto fail3; 2456 2457 efx->reset_pending = RESET_TYPE_NONE; 2458 } 2459 2460 if (rc) { 2461 netif_err(efx, probe, efx->net_dev, "Could not reset NIC\n"); 2462 goto fail4; 2463 } 2464 2465 /* Switch to the running state before we expose the device to the OS, 2466 * so that dev_open()|efx_start_all() will actually start the device */ 2467 efx->state = STATE_RUNNING; 2468 2469 rc = efx_register_netdev(efx); 2470 if (rc) 2471 goto fail5; 2472 2473 netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n"); 2474 2475 rtnl_lock(); 2476 efx_mtd_probe(efx); /* allowed to fail */ 2477 rtnl_unlock(); 2478 return 0; 2479 2480 fail5: 2481 efx_pci_remove_main(efx); 2482 fail4: 2483 fail3: 2484 efx_fini_io(efx); 2485 fail2: 2486 efx_fini_struct(efx); 2487 fail1: 2488 WARN_ON(rc > 0); 2489 netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc); 2490 free_netdev(net_dev); 2491 return rc; 2492} 2493 2494static int efx_pm_freeze(struct device *dev) 2495{ 2496 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); 2497 2498 efx->state = STATE_FINI; 2499 2500 netif_device_detach(efx->net_dev); 2501 2502 efx_stop_all(efx); 2503 efx_fini_channels(efx); 2504 2505 return 0; 2506} 2507 2508static int efx_pm_thaw(struct device *dev) 2509{ 2510 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); 2511 2512 efx->state = STATE_INIT; 2513 2514 efx_init_channels(efx); 2515 2516 mutex_lock(&efx->mac_lock); 2517 efx->phy_op->reconfigure(efx); 2518 mutex_unlock(&efx->mac_lock); 2519 2520 efx_start_all(efx); 2521 2522 netif_device_attach(efx->net_dev); 2523 2524 efx->state = STATE_RUNNING; 2525 2526 efx->type->resume_wol(efx); 2527 2528 /* Reschedule any quenched resets scheduled during efx_pm_freeze() */ 2529 queue_work(reset_workqueue, &efx->reset_work); 2530 2531 return 0; 2532} 2533 2534static int efx_pm_poweroff(struct device *dev) 2535{ 2536 struct pci_dev *pci_dev = to_pci_dev(dev); 2537 struct efx_nic *efx = pci_get_drvdata(pci_dev); 2538 2539 efx->type->fini(efx); 2540 2541 efx->reset_pending = RESET_TYPE_NONE; 2542 2543 pci_save_state(pci_dev); 2544 return pci_set_power_state(pci_dev, PCI_D3hot); 2545} 2546 2547/* Used for both resume and restore */ 2548static int efx_pm_resume(struct device *dev) 2549{ 2550 struct pci_dev *pci_dev = to_pci_dev(dev); 2551 struct efx_nic *efx = pci_get_drvdata(pci_dev); 2552 int rc; 2553 2554 rc = pci_set_power_state(pci_dev, PCI_D0); 2555 if (rc) 2556 return rc; 2557 pci_restore_state(pci_dev); 2558 rc = pci_enable_device(pci_dev); 2559 if (rc) 2560 return rc; 2561 pci_set_master(efx->pci_dev); 2562 rc = efx->type->reset(efx, RESET_TYPE_ALL); 2563 if (rc) 2564 return rc; 2565 rc = efx->type->init(efx); 2566 if (rc) 2567 return rc; 2568 efx_pm_thaw(dev); 2569 return 0; 2570} 2571 2572static int efx_pm_suspend(struct device *dev) 2573{ 2574 int rc; 2575 2576 efx_pm_freeze(dev); 2577 rc = efx_pm_poweroff(dev); 2578 if (rc) 2579 efx_pm_resume(dev); 2580 return rc; 2581} 2582 2583static struct dev_pm_ops efx_pm_ops = { 2584 .suspend = efx_pm_suspend, 2585 .resume = efx_pm_resume, 2586 .freeze = efx_pm_freeze, 2587 .thaw = efx_pm_thaw, 2588 .poweroff = efx_pm_poweroff, 2589 .restore = efx_pm_resume, 2590}; 2591 2592static struct pci_driver efx_pci_driver = { 2593 .name = KBUILD_MODNAME, 2594 .id_table = efx_pci_table, 2595 .probe = efx_pci_probe, 2596 .remove = efx_pci_remove, 2597 .driver.pm = &efx_pm_ops, 2598}; 2599 2600/************************************************************************** 2601 * 2602 * Kernel module interface 2603 * 2604 *************************************************************************/ 2605 2606module_param(interrupt_mode, uint, 0444); 2607MODULE_PARM_DESC(interrupt_mode, 2608 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)"); 2609 2610static int __init efx_init_module(void) 2611{ 2612 int rc; 2613 2614 printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n"); 2615 2616 rc = register_netdevice_notifier(&efx_netdev_notifier); 2617 if (rc) 2618 goto err_notifier; 2619 2620 reset_workqueue = create_singlethread_workqueue("sfc_reset"); 2621 if (!reset_workqueue) { 2622 rc = -ENOMEM; 2623 goto err_reset; 2624 } 2625 2626 rc = pci_register_driver(&efx_pci_driver); 2627 if (rc < 0) 2628 goto err_pci; 2629 2630 return 0; 2631 2632 err_pci: 2633 destroy_workqueue(reset_workqueue); 2634 err_reset: 2635 unregister_netdevice_notifier(&efx_netdev_notifier); 2636 err_notifier: 2637 return rc; 2638} 2639 2640static void __exit efx_exit_module(void) 2641{ 2642 printk(KERN_INFO "Solarflare NET driver unloading\n"); 2643 2644 pci_unregister_driver(&efx_pci_driver); 2645 destroy_workqueue(reset_workqueue); 2646 unregister_netdevice_notifier(&efx_netdev_notifier); 2647 2648} 2649 2650module_init(efx_init_module); 2651module_exit(efx_exit_module); 2652 2653MODULE_AUTHOR("Solarflare Communications and " 2654 "Michael Brown <mbrown@fensystems.co.uk>"); 2655MODULE_DESCRIPTION("Solarflare Communications network driver"); 2656MODULE_LICENSE("GPL"); 2657MODULE_DEVICE_TABLE(pci, efx_pci_table);