Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.35-rc3 2469 lines 66 kB view raw
1/**************************************************************************** 2 * Driver for Solarflare Solarstorm network controllers and boards 3 * Copyright 2005-2006 Fen Systems Ltd. 4 * Copyright 2005-2009 Solarflare Communications Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation, incorporated herein by reference. 9 */ 10 11#include <linux/module.h> 12#include <linux/pci.h> 13#include <linux/netdevice.h> 14#include <linux/etherdevice.h> 15#include <linux/delay.h> 16#include <linux/notifier.h> 17#include <linux/ip.h> 18#include <linux/tcp.h> 19#include <linux/in.h> 20#include <linux/crc32.h> 21#include <linux/ethtool.h> 22#include <linux/topology.h> 23#include <linux/gfp.h> 24#include "net_driver.h" 25#include "efx.h" 26#include "mdio_10g.h" 27#include "nic.h" 28 29#include "mcdi.h" 30 31/************************************************************************** 32 * 33 * Type name strings 34 * 35 ************************************************************************** 36 */ 37 38/* Loopback mode names (see LOOPBACK_MODE()) */ 39const unsigned int efx_loopback_mode_max = LOOPBACK_MAX; 40const char *efx_loopback_mode_names[] = { 41 [LOOPBACK_NONE] = "NONE", 42 [LOOPBACK_DATA] = "DATAPATH", 43 [LOOPBACK_GMAC] = "GMAC", 44 [LOOPBACK_XGMII] = "XGMII", 45 [LOOPBACK_XGXS] = "XGXS", 46 [LOOPBACK_XAUI] = "XAUI", 47 [LOOPBACK_GMII] = "GMII", 48 [LOOPBACK_SGMII] = "SGMII", 49 [LOOPBACK_XGBR] = "XGBR", 50 [LOOPBACK_XFI] = "XFI", 51 [LOOPBACK_XAUI_FAR] = "XAUI_FAR", 52 [LOOPBACK_GMII_FAR] = "GMII_FAR", 53 [LOOPBACK_SGMII_FAR] = "SGMII_FAR", 54 [LOOPBACK_XFI_FAR] = "XFI_FAR", 55 [LOOPBACK_GPHY] = "GPHY", 56 [LOOPBACK_PHYXS] = "PHYXS", 57 [LOOPBACK_PCS] = "PCS", 58 [LOOPBACK_PMAPMD] = "PMA/PMD", 59 [LOOPBACK_XPORT] = "XPORT", 60 [LOOPBACK_XGMII_WS] = "XGMII_WS", 61 [LOOPBACK_XAUI_WS] = "XAUI_WS", 62 [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR", 63 [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR", 64 [LOOPBACK_GMII_WS] = "GMII_WS", 65 [LOOPBACK_XFI_WS] = "XFI_WS", 66 [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR", 67 [LOOPBACK_PHYXS_WS] = "PHYXS_WS", 68}; 69 70/* Interrupt mode names (see INT_MODE())) */ 71const unsigned int efx_interrupt_mode_max = EFX_INT_MODE_MAX; 72const char *efx_interrupt_mode_names[] = { 73 [EFX_INT_MODE_MSIX] = "MSI-X", 74 [EFX_INT_MODE_MSI] = "MSI", 75 [EFX_INT_MODE_LEGACY] = "legacy", 76}; 77 78const unsigned int efx_reset_type_max = RESET_TYPE_MAX; 79const char *efx_reset_type_names[] = { 80 [RESET_TYPE_INVISIBLE] = "INVISIBLE", 81 [RESET_TYPE_ALL] = "ALL", 82 [RESET_TYPE_WORLD] = "WORLD", 83 [RESET_TYPE_DISABLE] = "DISABLE", 84 [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG", 85 [RESET_TYPE_INT_ERROR] = "INT_ERROR", 86 [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY", 87 [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH", 88 [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH", 89 [RESET_TYPE_TX_SKIP] = "TX_SKIP", 90 [RESET_TYPE_MC_FAILURE] = "MC_FAILURE", 91}; 92 93#define EFX_MAX_MTU (9 * 1024) 94 95/* RX slow fill workqueue. If memory allocation fails in the fast path, 96 * a work item is pushed onto this work queue to retry the allocation later, 97 * to avoid the NIC being starved of RX buffers. Since this is a per cpu 98 * workqueue, there is nothing to be gained in making it per NIC 99 */ 100static struct workqueue_struct *refill_workqueue; 101 102/* Reset workqueue. If any NIC has a hardware failure then a reset will be 103 * queued onto this work queue. This is not a per-nic work queue, because 104 * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised. 105 */ 106static struct workqueue_struct *reset_workqueue; 107 108/************************************************************************** 109 * 110 * Configurable values 111 * 112 *************************************************************************/ 113 114/* 115 * Use separate channels for TX and RX events 116 * 117 * Set this to 1 to use separate channels for TX and RX. It allows us 118 * to control interrupt affinity separately for TX and RX. 119 * 120 * This is only used in MSI-X interrupt mode 121 */ 122static unsigned int separate_tx_channels; 123module_param(separate_tx_channels, uint, 0644); 124MODULE_PARM_DESC(separate_tx_channels, 125 "Use separate channels for TX and RX"); 126 127/* This is the weight assigned to each of the (per-channel) virtual 128 * NAPI devices. 129 */ 130static int napi_weight = 64; 131 132/* This is the time (in jiffies) between invocations of the hardware 133 * monitor, which checks for known hardware bugs and resets the 134 * hardware and driver as necessary. 135 */ 136unsigned int efx_monitor_interval = 1 * HZ; 137 138/* This controls whether or not the driver will initialise devices 139 * with invalid MAC addresses stored in the EEPROM or flash. If true, 140 * such devices will be initialised with a random locally-generated 141 * MAC address. This allows for loading the sfc_mtd driver to 142 * reprogram the flash, even if the flash contents (including the MAC 143 * address) have previously been erased. 144 */ 145static unsigned int allow_bad_hwaddr; 146 147/* Initial interrupt moderation settings. They can be modified after 148 * module load with ethtool. 149 * 150 * The default for RX should strike a balance between increasing the 151 * round-trip latency and reducing overhead. 152 */ 153static unsigned int rx_irq_mod_usec = 60; 154 155/* Initial interrupt moderation settings. They can be modified after 156 * module load with ethtool. 157 * 158 * This default is chosen to ensure that a 10G link does not go idle 159 * while a TX queue is stopped after it has become full. A queue is 160 * restarted when it drops below half full. The time this takes (assuming 161 * worst case 3 descriptors per packet and 1024 descriptors) is 162 * 512 / 3 * 1.2 = 205 usec. 163 */ 164static unsigned int tx_irq_mod_usec = 150; 165 166/* This is the first interrupt mode to try out of: 167 * 0 => MSI-X 168 * 1 => MSI 169 * 2 => legacy 170 */ 171static unsigned int interrupt_mode; 172 173/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS), 174 * i.e. the number of CPUs among which we may distribute simultaneous 175 * interrupt handling. 176 * 177 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt. 178 * The default (0) means to assign an interrupt to each package (level II cache) 179 */ 180static unsigned int rss_cpus; 181module_param(rss_cpus, uint, 0444); 182MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling"); 183 184static int phy_flash_cfg; 185module_param(phy_flash_cfg, int, 0644); 186MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially"); 187 188static unsigned irq_adapt_low_thresh = 10000; 189module_param(irq_adapt_low_thresh, uint, 0644); 190MODULE_PARM_DESC(irq_adapt_low_thresh, 191 "Threshold score for reducing IRQ moderation"); 192 193static unsigned irq_adapt_high_thresh = 20000; 194module_param(irq_adapt_high_thresh, uint, 0644); 195MODULE_PARM_DESC(irq_adapt_high_thresh, 196 "Threshold score for increasing IRQ moderation"); 197 198/************************************************************************** 199 * 200 * Utility functions and prototypes 201 * 202 *************************************************************************/ 203static void efx_remove_channel(struct efx_channel *channel); 204static void efx_remove_port(struct efx_nic *efx); 205static void efx_fini_napi(struct efx_nic *efx); 206static void efx_fini_channels(struct efx_nic *efx); 207 208#define EFX_ASSERT_RESET_SERIALISED(efx) \ 209 do { \ 210 if ((efx->state == STATE_RUNNING) || \ 211 (efx->state == STATE_DISABLED)) \ 212 ASSERT_RTNL(); \ 213 } while (0) 214 215/************************************************************************** 216 * 217 * Event queue processing 218 * 219 *************************************************************************/ 220 221/* Process channel's event queue 222 * 223 * This function is responsible for processing the event queue of a 224 * single channel. The caller must guarantee that this function will 225 * never be concurrently called more than once on the same channel, 226 * though different channels may be being processed concurrently. 227 */ 228static int efx_process_channel(struct efx_channel *channel, int budget) 229{ 230 struct efx_nic *efx = channel->efx; 231 int spent; 232 233 if (unlikely(efx->reset_pending != RESET_TYPE_NONE || 234 !channel->enabled)) 235 return 0; 236 237 spent = efx_nic_process_eventq(channel, budget); 238 if (spent == 0) 239 return 0; 240 241 /* Deliver last RX packet. */ 242 if (channel->rx_pkt) { 243 __efx_rx_packet(channel, channel->rx_pkt, 244 channel->rx_pkt_csummed); 245 channel->rx_pkt = NULL; 246 } 247 248 efx_rx_strategy(channel); 249 250 efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]); 251 252 return spent; 253} 254 255/* Mark channel as finished processing 256 * 257 * Note that since we will not receive further interrupts for this 258 * channel before we finish processing and call the eventq_read_ack() 259 * method, there is no need to use the interrupt hold-off timers. 260 */ 261static inline void efx_channel_processed(struct efx_channel *channel) 262{ 263 /* The interrupt handler for this channel may set work_pending 264 * as soon as we acknowledge the events we've seen. Make sure 265 * it's cleared before then. */ 266 channel->work_pending = false; 267 smp_wmb(); 268 269 efx_nic_eventq_read_ack(channel); 270} 271 272/* NAPI poll handler 273 * 274 * NAPI guarantees serialisation of polls of the same device, which 275 * provides the guarantee required by efx_process_channel(). 276 */ 277static int efx_poll(struct napi_struct *napi, int budget) 278{ 279 struct efx_channel *channel = 280 container_of(napi, struct efx_channel, napi_str); 281 int spent; 282 283 EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n", 284 channel->channel, raw_smp_processor_id()); 285 286 spent = efx_process_channel(channel, budget); 287 288 if (spent < budget) { 289 struct efx_nic *efx = channel->efx; 290 291 if (channel->channel < efx->n_rx_channels && 292 efx->irq_rx_adaptive && 293 unlikely(++channel->irq_count == 1000)) { 294 if (unlikely(channel->irq_mod_score < 295 irq_adapt_low_thresh)) { 296 if (channel->irq_moderation > 1) { 297 channel->irq_moderation -= 1; 298 efx->type->push_irq_moderation(channel); 299 } 300 } else if (unlikely(channel->irq_mod_score > 301 irq_adapt_high_thresh)) { 302 if (channel->irq_moderation < 303 efx->irq_rx_moderation) { 304 channel->irq_moderation += 1; 305 efx->type->push_irq_moderation(channel); 306 } 307 } 308 channel->irq_count = 0; 309 channel->irq_mod_score = 0; 310 } 311 312 /* There is no race here; although napi_disable() will 313 * only wait for napi_complete(), this isn't a problem 314 * since efx_channel_processed() will have no effect if 315 * interrupts have already been disabled. 316 */ 317 napi_complete(napi); 318 efx_channel_processed(channel); 319 } 320 321 return spent; 322} 323 324/* Process the eventq of the specified channel immediately on this CPU 325 * 326 * Disable hardware generated interrupts, wait for any existing 327 * processing to finish, then directly poll (and ack ) the eventq. 328 * Finally reenable NAPI and interrupts. 329 * 330 * Since we are touching interrupts the caller should hold the suspend lock 331 */ 332void efx_process_channel_now(struct efx_channel *channel) 333{ 334 struct efx_nic *efx = channel->efx; 335 336 BUG_ON(!channel->enabled); 337 338 /* Disable interrupts and wait for ISRs to complete */ 339 efx_nic_disable_interrupts(efx); 340 if (efx->legacy_irq) 341 synchronize_irq(efx->legacy_irq); 342 if (channel->irq) 343 synchronize_irq(channel->irq); 344 345 /* Wait for any NAPI processing to complete */ 346 napi_disable(&channel->napi_str); 347 348 /* Poll the channel */ 349 efx_process_channel(channel, EFX_EVQ_SIZE); 350 351 /* Ack the eventq. This may cause an interrupt to be generated 352 * when they are reenabled */ 353 efx_channel_processed(channel); 354 355 napi_enable(&channel->napi_str); 356 efx_nic_enable_interrupts(efx); 357} 358 359/* Create event queue 360 * Event queue memory allocations are done only once. If the channel 361 * is reset, the memory buffer will be reused; this guards against 362 * errors during channel reset and also simplifies interrupt handling. 363 */ 364static int efx_probe_eventq(struct efx_channel *channel) 365{ 366 EFX_LOG(channel->efx, "chan %d create event queue\n", channel->channel); 367 368 return efx_nic_probe_eventq(channel); 369} 370 371/* Prepare channel's event queue */ 372static void efx_init_eventq(struct efx_channel *channel) 373{ 374 EFX_LOG(channel->efx, "chan %d init event queue\n", channel->channel); 375 376 channel->eventq_read_ptr = 0; 377 378 efx_nic_init_eventq(channel); 379} 380 381static void efx_fini_eventq(struct efx_channel *channel) 382{ 383 EFX_LOG(channel->efx, "chan %d fini event queue\n", channel->channel); 384 385 efx_nic_fini_eventq(channel); 386} 387 388static void efx_remove_eventq(struct efx_channel *channel) 389{ 390 EFX_LOG(channel->efx, "chan %d remove event queue\n", channel->channel); 391 392 efx_nic_remove_eventq(channel); 393} 394 395/************************************************************************** 396 * 397 * Channel handling 398 * 399 *************************************************************************/ 400 401static int efx_probe_channel(struct efx_channel *channel) 402{ 403 struct efx_tx_queue *tx_queue; 404 struct efx_rx_queue *rx_queue; 405 int rc; 406 407 EFX_LOG(channel->efx, "creating channel %d\n", channel->channel); 408 409 rc = efx_probe_eventq(channel); 410 if (rc) 411 goto fail1; 412 413 efx_for_each_channel_tx_queue(tx_queue, channel) { 414 rc = efx_probe_tx_queue(tx_queue); 415 if (rc) 416 goto fail2; 417 } 418 419 efx_for_each_channel_rx_queue(rx_queue, channel) { 420 rc = efx_probe_rx_queue(rx_queue); 421 if (rc) 422 goto fail3; 423 } 424 425 channel->n_rx_frm_trunc = 0; 426 427 return 0; 428 429 fail3: 430 efx_for_each_channel_rx_queue(rx_queue, channel) 431 efx_remove_rx_queue(rx_queue); 432 fail2: 433 efx_for_each_channel_tx_queue(tx_queue, channel) 434 efx_remove_tx_queue(tx_queue); 435 fail1: 436 return rc; 437} 438 439 440static void efx_set_channel_names(struct efx_nic *efx) 441{ 442 struct efx_channel *channel; 443 const char *type = ""; 444 int number; 445 446 efx_for_each_channel(channel, efx) { 447 number = channel->channel; 448 if (efx->n_channels > efx->n_rx_channels) { 449 if (channel->channel < efx->n_rx_channels) { 450 type = "-rx"; 451 } else { 452 type = "-tx"; 453 number -= efx->n_rx_channels; 454 } 455 } 456 snprintf(channel->name, sizeof(channel->name), 457 "%s%s-%d", efx->name, type, number); 458 } 459} 460 461/* Channels are shutdown and reinitialised whilst the NIC is running 462 * to propagate configuration changes (mtu, checksum offload), or 463 * to clear hardware error conditions 464 */ 465static void efx_init_channels(struct efx_nic *efx) 466{ 467 struct efx_tx_queue *tx_queue; 468 struct efx_rx_queue *rx_queue; 469 struct efx_channel *channel; 470 471 /* Calculate the rx buffer allocation parameters required to 472 * support the current MTU, including padding for header 473 * alignment and overruns. 474 */ 475 efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) + 476 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + 477 efx->type->rx_buffer_padding); 478 efx->rx_buffer_order = get_order(efx->rx_buffer_len); 479 480 /* Initialise the channels */ 481 efx_for_each_channel(channel, efx) { 482 EFX_LOG(channel->efx, "init chan %d\n", channel->channel); 483 484 efx_init_eventq(channel); 485 486 efx_for_each_channel_tx_queue(tx_queue, channel) 487 efx_init_tx_queue(tx_queue); 488 489 /* The rx buffer allocation strategy is MTU dependent */ 490 efx_rx_strategy(channel); 491 492 efx_for_each_channel_rx_queue(rx_queue, channel) 493 efx_init_rx_queue(rx_queue); 494 495 WARN_ON(channel->rx_pkt != NULL); 496 efx_rx_strategy(channel); 497 } 498} 499 500/* This enables event queue processing and packet transmission. 501 * 502 * Note that this function is not allowed to fail, since that would 503 * introduce too much complexity into the suspend/resume path. 504 */ 505static void efx_start_channel(struct efx_channel *channel) 506{ 507 struct efx_rx_queue *rx_queue; 508 509 EFX_LOG(channel->efx, "starting chan %d\n", channel->channel); 510 511 /* The interrupt handler for this channel may set work_pending 512 * as soon as we enable it. Make sure it's cleared before 513 * then. Similarly, make sure it sees the enabled flag set. */ 514 channel->work_pending = false; 515 channel->enabled = true; 516 smp_wmb(); 517 518 napi_enable(&channel->napi_str); 519 520 /* Load up RX descriptors */ 521 efx_for_each_channel_rx_queue(rx_queue, channel) 522 efx_fast_push_rx_descriptors(rx_queue); 523} 524 525/* This disables event queue processing and packet transmission. 526 * This function does not guarantee that all queue processing 527 * (e.g. RX refill) is complete. 528 */ 529static void efx_stop_channel(struct efx_channel *channel) 530{ 531 struct efx_rx_queue *rx_queue; 532 533 if (!channel->enabled) 534 return; 535 536 EFX_LOG(channel->efx, "stop chan %d\n", channel->channel); 537 538 channel->enabled = false; 539 napi_disable(&channel->napi_str); 540 541 /* Ensure that any worker threads have exited or will be no-ops */ 542 efx_for_each_channel_rx_queue(rx_queue, channel) { 543 spin_lock_bh(&rx_queue->add_lock); 544 spin_unlock_bh(&rx_queue->add_lock); 545 } 546} 547 548static void efx_fini_channels(struct efx_nic *efx) 549{ 550 struct efx_channel *channel; 551 struct efx_tx_queue *tx_queue; 552 struct efx_rx_queue *rx_queue; 553 int rc; 554 555 EFX_ASSERT_RESET_SERIALISED(efx); 556 BUG_ON(efx->port_enabled); 557 558 rc = efx_nic_flush_queues(efx); 559 if (rc) 560 EFX_ERR(efx, "failed to flush queues\n"); 561 else 562 EFX_LOG(efx, "successfully flushed all queues\n"); 563 564 efx_for_each_channel(channel, efx) { 565 EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel); 566 567 efx_for_each_channel_rx_queue(rx_queue, channel) 568 efx_fini_rx_queue(rx_queue); 569 efx_for_each_channel_tx_queue(tx_queue, channel) 570 efx_fini_tx_queue(tx_queue); 571 efx_fini_eventq(channel); 572 } 573} 574 575static void efx_remove_channel(struct efx_channel *channel) 576{ 577 struct efx_tx_queue *tx_queue; 578 struct efx_rx_queue *rx_queue; 579 580 EFX_LOG(channel->efx, "destroy chan %d\n", channel->channel); 581 582 efx_for_each_channel_rx_queue(rx_queue, channel) 583 efx_remove_rx_queue(rx_queue); 584 efx_for_each_channel_tx_queue(tx_queue, channel) 585 efx_remove_tx_queue(tx_queue); 586 efx_remove_eventq(channel); 587} 588 589void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay) 590{ 591 queue_delayed_work(refill_workqueue, &rx_queue->work, delay); 592} 593 594/************************************************************************** 595 * 596 * Port handling 597 * 598 **************************************************************************/ 599 600/* This ensures that the kernel is kept informed (via 601 * netif_carrier_on/off) of the link status, and also maintains the 602 * link status's stop on the port's TX queue. 603 */ 604void efx_link_status_changed(struct efx_nic *efx) 605{ 606 struct efx_link_state *link_state = &efx->link_state; 607 608 /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure 609 * that no events are triggered between unregister_netdev() and the 610 * driver unloading. A more general condition is that NETDEV_CHANGE 611 * can only be generated between NETDEV_UP and NETDEV_DOWN */ 612 if (!netif_running(efx->net_dev)) 613 return; 614 615 if (efx->port_inhibited) { 616 netif_carrier_off(efx->net_dev); 617 return; 618 } 619 620 if (link_state->up != netif_carrier_ok(efx->net_dev)) { 621 efx->n_link_state_changes++; 622 623 if (link_state->up) 624 netif_carrier_on(efx->net_dev); 625 else 626 netif_carrier_off(efx->net_dev); 627 } 628 629 /* Status message for kernel log */ 630 if (link_state->up) { 631 EFX_INFO(efx, "link up at %uMbps %s-duplex (MTU %d)%s\n", 632 link_state->speed, link_state->fd ? "full" : "half", 633 efx->net_dev->mtu, 634 (efx->promiscuous ? " [PROMISC]" : "")); 635 } else { 636 EFX_INFO(efx, "link down\n"); 637 } 638 639} 640 641void efx_link_set_advertising(struct efx_nic *efx, u32 advertising) 642{ 643 efx->link_advertising = advertising; 644 if (advertising) { 645 if (advertising & ADVERTISED_Pause) 646 efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX); 647 else 648 efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX); 649 if (advertising & ADVERTISED_Asym_Pause) 650 efx->wanted_fc ^= EFX_FC_TX; 651 } 652} 653 654void efx_link_set_wanted_fc(struct efx_nic *efx, enum efx_fc_type wanted_fc) 655{ 656 efx->wanted_fc = wanted_fc; 657 if (efx->link_advertising) { 658 if (wanted_fc & EFX_FC_RX) 659 efx->link_advertising |= (ADVERTISED_Pause | 660 ADVERTISED_Asym_Pause); 661 else 662 efx->link_advertising &= ~(ADVERTISED_Pause | 663 ADVERTISED_Asym_Pause); 664 if (wanted_fc & EFX_FC_TX) 665 efx->link_advertising ^= ADVERTISED_Asym_Pause; 666 } 667} 668 669static void efx_fini_port(struct efx_nic *efx); 670 671/* Push loopback/power/transmit disable settings to the PHY, and reconfigure 672 * the MAC appropriately. All other PHY configuration changes are pushed 673 * through phy_op->set_settings(), and pushed asynchronously to the MAC 674 * through efx_monitor(). 675 * 676 * Callers must hold the mac_lock 677 */ 678int __efx_reconfigure_port(struct efx_nic *efx) 679{ 680 enum efx_phy_mode phy_mode; 681 int rc; 682 683 WARN_ON(!mutex_is_locked(&efx->mac_lock)); 684 685 /* Serialise the promiscuous flag with efx_set_multicast_list. */ 686 if (efx_dev_registered(efx)) { 687 netif_addr_lock_bh(efx->net_dev); 688 netif_addr_unlock_bh(efx->net_dev); 689 } 690 691 /* Disable PHY transmit in mac level loopbacks */ 692 phy_mode = efx->phy_mode; 693 if (LOOPBACK_INTERNAL(efx)) 694 efx->phy_mode |= PHY_MODE_TX_DISABLED; 695 else 696 efx->phy_mode &= ~PHY_MODE_TX_DISABLED; 697 698 rc = efx->type->reconfigure_port(efx); 699 700 if (rc) 701 efx->phy_mode = phy_mode; 702 703 return rc; 704} 705 706/* Reinitialise the MAC to pick up new PHY settings, even if the port is 707 * disabled. */ 708int efx_reconfigure_port(struct efx_nic *efx) 709{ 710 int rc; 711 712 EFX_ASSERT_RESET_SERIALISED(efx); 713 714 mutex_lock(&efx->mac_lock); 715 rc = __efx_reconfigure_port(efx); 716 mutex_unlock(&efx->mac_lock); 717 718 return rc; 719} 720 721/* Asynchronous work item for changing MAC promiscuity and multicast 722 * hash. Avoid a drain/rx_ingress enable by reconfiguring the current 723 * MAC directly. */ 724static void efx_mac_work(struct work_struct *data) 725{ 726 struct efx_nic *efx = container_of(data, struct efx_nic, mac_work); 727 728 mutex_lock(&efx->mac_lock); 729 if (efx->port_enabled) { 730 efx->type->push_multicast_hash(efx); 731 efx->mac_op->reconfigure(efx); 732 } 733 mutex_unlock(&efx->mac_lock); 734} 735 736static int efx_probe_port(struct efx_nic *efx) 737{ 738 int rc; 739 740 EFX_LOG(efx, "create port\n"); 741 742 if (phy_flash_cfg) 743 efx->phy_mode = PHY_MODE_SPECIAL; 744 745 /* Connect up MAC/PHY operations table */ 746 rc = efx->type->probe_port(efx); 747 if (rc) 748 goto err; 749 750 /* Sanity check MAC address */ 751 if (is_valid_ether_addr(efx->mac_address)) { 752 memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN); 753 } else { 754 EFX_ERR(efx, "invalid MAC address %pM\n", 755 efx->mac_address); 756 if (!allow_bad_hwaddr) { 757 rc = -EINVAL; 758 goto err; 759 } 760 random_ether_addr(efx->net_dev->dev_addr); 761 EFX_INFO(efx, "using locally-generated MAC %pM\n", 762 efx->net_dev->dev_addr); 763 } 764 765 return 0; 766 767 err: 768 efx_remove_port(efx); 769 return rc; 770} 771 772static int efx_init_port(struct efx_nic *efx) 773{ 774 int rc; 775 776 EFX_LOG(efx, "init port\n"); 777 778 mutex_lock(&efx->mac_lock); 779 780 rc = efx->phy_op->init(efx); 781 if (rc) 782 goto fail1; 783 784 efx->port_initialized = true; 785 786 /* Reconfigure the MAC before creating dma queues (required for 787 * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */ 788 efx->mac_op->reconfigure(efx); 789 790 /* Ensure the PHY advertises the correct flow control settings */ 791 rc = efx->phy_op->reconfigure(efx); 792 if (rc) 793 goto fail2; 794 795 mutex_unlock(&efx->mac_lock); 796 return 0; 797 798fail2: 799 efx->phy_op->fini(efx); 800fail1: 801 mutex_unlock(&efx->mac_lock); 802 return rc; 803} 804 805static void efx_start_port(struct efx_nic *efx) 806{ 807 EFX_LOG(efx, "start port\n"); 808 BUG_ON(efx->port_enabled); 809 810 mutex_lock(&efx->mac_lock); 811 efx->port_enabled = true; 812 813 /* efx_mac_work() might have been scheduled after efx_stop_port(), 814 * and then cancelled by efx_flush_all() */ 815 efx->type->push_multicast_hash(efx); 816 efx->mac_op->reconfigure(efx); 817 818 mutex_unlock(&efx->mac_lock); 819} 820 821/* Prevent efx_mac_work() and efx_monitor() from working */ 822static void efx_stop_port(struct efx_nic *efx) 823{ 824 EFX_LOG(efx, "stop port\n"); 825 826 mutex_lock(&efx->mac_lock); 827 efx->port_enabled = false; 828 mutex_unlock(&efx->mac_lock); 829 830 /* Serialise against efx_set_multicast_list() */ 831 if (efx_dev_registered(efx)) { 832 netif_addr_lock_bh(efx->net_dev); 833 netif_addr_unlock_bh(efx->net_dev); 834 } 835} 836 837static void efx_fini_port(struct efx_nic *efx) 838{ 839 EFX_LOG(efx, "shut down port\n"); 840 841 if (!efx->port_initialized) 842 return; 843 844 efx->phy_op->fini(efx); 845 efx->port_initialized = false; 846 847 efx->link_state.up = false; 848 efx_link_status_changed(efx); 849} 850 851static void efx_remove_port(struct efx_nic *efx) 852{ 853 EFX_LOG(efx, "destroying port\n"); 854 855 efx->type->remove_port(efx); 856} 857 858/************************************************************************** 859 * 860 * NIC handling 861 * 862 **************************************************************************/ 863 864/* This configures the PCI device to enable I/O and DMA. */ 865static int efx_init_io(struct efx_nic *efx) 866{ 867 struct pci_dev *pci_dev = efx->pci_dev; 868 dma_addr_t dma_mask = efx->type->max_dma_mask; 869 int rc; 870 871 EFX_LOG(efx, "initialising I/O\n"); 872 873 rc = pci_enable_device(pci_dev); 874 if (rc) { 875 EFX_ERR(efx, "failed to enable PCI device\n"); 876 goto fail1; 877 } 878 879 pci_set_master(pci_dev); 880 881 /* Set the PCI DMA mask. Try all possibilities from our 882 * genuine mask down to 32 bits, because some architectures 883 * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit 884 * masks event though they reject 46 bit masks. 885 */ 886 while (dma_mask > 0x7fffffffUL) { 887 if (pci_dma_supported(pci_dev, dma_mask) && 888 ((rc = pci_set_dma_mask(pci_dev, dma_mask)) == 0)) 889 break; 890 dma_mask >>= 1; 891 } 892 if (rc) { 893 EFX_ERR(efx, "could not find a suitable DMA mask\n"); 894 goto fail2; 895 } 896 EFX_LOG(efx, "using DMA mask %llx\n", (unsigned long long) dma_mask); 897 rc = pci_set_consistent_dma_mask(pci_dev, dma_mask); 898 if (rc) { 899 /* pci_set_consistent_dma_mask() is not *allowed* to 900 * fail with a mask that pci_set_dma_mask() accepted, 901 * but just in case... 902 */ 903 EFX_ERR(efx, "failed to set consistent DMA mask\n"); 904 goto fail2; 905 } 906 907 efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR); 908 rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc"); 909 if (rc) { 910 EFX_ERR(efx, "request for memory BAR failed\n"); 911 rc = -EIO; 912 goto fail3; 913 } 914 efx->membase = ioremap_nocache(efx->membase_phys, 915 efx->type->mem_map_size); 916 if (!efx->membase) { 917 EFX_ERR(efx, "could not map memory BAR at %llx+%x\n", 918 (unsigned long long)efx->membase_phys, 919 efx->type->mem_map_size); 920 rc = -ENOMEM; 921 goto fail4; 922 } 923 EFX_LOG(efx, "memory BAR at %llx+%x (virtual %p)\n", 924 (unsigned long long)efx->membase_phys, 925 efx->type->mem_map_size, efx->membase); 926 927 return 0; 928 929 fail4: 930 pci_release_region(efx->pci_dev, EFX_MEM_BAR); 931 fail3: 932 efx->membase_phys = 0; 933 fail2: 934 pci_disable_device(efx->pci_dev); 935 fail1: 936 return rc; 937} 938 939static void efx_fini_io(struct efx_nic *efx) 940{ 941 EFX_LOG(efx, "shutting down I/O\n"); 942 943 if (efx->membase) { 944 iounmap(efx->membase); 945 efx->membase = NULL; 946 } 947 948 if (efx->membase_phys) { 949 pci_release_region(efx->pci_dev, EFX_MEM_BAR); 950 efx->membase_phys = 0; 951 } 952 953 pci_disable_device(efx->pci_dev); 954} 955 956/* Get number of channels wanted. Each channel will have its own IRQ, 957 * 1 RX queue and/or 2 TX queues. */ 958static int efx_wanted_channels(void) 959{ 960 cpumask_var_t core_mask; 961 int count; 962 int cpu; 963 964 if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) { 965 printk(KERN_WARNING 966 "sfc: RSS disabled due to allocation failure\n"); 967 return 1; 968 } 969 970 count = 0; 971 for_each_online_cpu(cpu) { 972 if (!cpumask_test_cpu(cpu, core_mask)) { 973 ++count; 974 cpumask_or(core_mask, core_mask, 975 topology_core_cpumask(cpu)); 976 } 977 } 978 979 free_cpumask_var(core_mask); 980 return count; 981} 982 983/* Probe the number and type of interrupts we are able to obtain, and 984 * the resulting numbers of channels and RX queues. 985 */ 986static void efx_probe_interrupts(struct efx_nic *efx) 987{ 988 int max_channels = 989 min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS); 990 int rc, i; 991 992 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { 993 struct msix_entry xentries[EFX_MAX_CHANNELS]; 994 int n_channels; 995 996 n_channels = efx_wanted_channels(); 997 if (separate_tx_channels) 998 n_channels *= 2; 999 n_channels = min(n_channels, max_channels); 1000 1001 for (i = 0; i < n_channels; i++) 1002 xentries[i].entry = i; 1003 rc = pci_enable_msix(efx->pci_dev, xentries, n_channels); 1004 if (rc > 0) { 1005 EFX_ERR(efx, "WARNING: Insufficient MSI-X vectors" 1006 " available (%d < %d).\n", rc, n_channels); 1007 EFX_ERR(efx, "WARNING: Performance may be reduced.\n"); 1008 EFX_BUG_ON_PARANOID(rc >= n_channels); 1009 n_channels = rc; 1010 rc = pci_enable_msix(efx->pci_dev, xentries, 1011 n_channels); 1012 } 1013 1014 if (rc == 0) { 1015 efx->n_channels = n_channels; 1016 if (separate_tx_channels) { 1017 efx->n_tx_channels = 1018 max(efx->n_channels / 2, 1U); 1019 efx->n_rx_channels = 1020 max(efx->n_channels - 1021 efx->n_tx_channels, 1U); 1022 } else { 1023 efx->n_tx_channels = efx->n_channels; 1024 efx->n_rx_channels = efx->n_channels; 1025 } 1026 for (i = 0; i < n_channels; i++) 1027 efx->channel[i].irq = xentries[i].vector; 1028 } else { 1029 /* Fall back to single channel MSI */ 1030 efx->interrupt_mode = EFX_INT_MODE_MSI; 1031 EFX_ERR(efx, "could not enable MSI-X\n"); 1032 } 1033 } 1034 1035 /* Try single interrupt MSI */ 1036 if (efx->interrupt_mode == EFX_INT_MODE_MSI) { 1037 efx->n_channels = 1; 1038 efx->n_rx_channels = 1; 1039 efx->n_tx_channels = 1; 1040 rc = pci_enable_msi(efx->pci_dev); 1041 if (rc == 0) { 1042 efx->channel[0].irq = efx->pci_dev->irq; 1043 } else { 1044 EFX_ERR(efx, "could not enable MSI\n"); 1045 efx->interrupt_mode = EFX_INT_MODE_LEGACY; 1046 } 1047 } 1048 1049 /* Assume legacy interrupts */ 1050 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) { 1051 efx->n_channels = 1 + (separate_tx_channels ? 1 : 0); 1052 efx->n_rx_channels = 1; 1053 efx->n_tx_channels = 1; 1054 efx->legacy_irq = efx->pci_dev->irq; 1055 } 1056} 1057 1058static void efx_remove_interrupts(struct efx_nic *efx) 1059{ 1060 struct efx_channel *channel; 1061 1062 /* Remove MSI/MSI-X interrupts */ 1063 efx_for_each_channel(channel, efx) 1064 channel->irq = 0; 1065 pci_disable_msi(efx->pci_dev); 1066 pci_disable_msix(efx->pci_dev); 1067 1068 /* Remove legacy interrupt */ 1069 efx->legacy_irq = 0; 1070} 1071 1072static void efx_set_channels(struct efx_nic *efx) 1073{ 1074 struct efx_channel *channel; 1075 struct efx_tx_queue *tx_queue; 1076 struct efx_rx_queue *rx_queue; 1077 unsigned tx_channel_offset = 1078 separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; 1079 1080 efx_for_each_channel(channel, efx) { 1081 if (channel->channel - tx_channel_offset < efx->n_tx_channels) { 1082 channel->tx_queue = &efx->tx_queue[ 1083 (channel->channel - tx_channel_offset) * 1084 EFX_TXQ_TYPES]; 1085 efx_for_each_channel_tx_queue(tx_queue, channel) 1086 tx_queue->channel = channel; 1087 } 1088 } 1089 1090 efx_for_each_rx_queue(rx_queue, efx) 1091 rx_queue->channel = &efx->channel[rx_queue->queue]; 1092} 1093 1094static int efx_probe_nic(struct efx_nic *efx) 1095{ 1096 int rc; 1097 1098 EFX_LOG(efx, "creating NIC\n"); 1099 1100 /* Carry out hardware-type specific initialisation */ 1101 rc = efx->type->probe(efx); 1102 if (rc) 1103 return rc; 1104 1105 /* Determine the number of channels and queues by trying to hook 1106 * in MSI-X interrupts. */ 1107 efx_probe_interrupts(efx); 1108 1109 efx_set_channels(efx); 1110 efx->net_dev->real_num_tx_queues = efx->n_tx_channels; 1111 1112 /* Initialise the interrupt moderation settings */ 1113 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true); 1114 1115 return 0; 1116} 1117 1118static void efx_remove_nic(struct efx_nic *efx) 1119{ 1120 EFX_LOG(efx, "destroying NIC\n"); 1121 1122 efx_remove_interrupts(efx); 1123 efx->type->remove(efx); 1124} 1125 1126/************************************************************************** 1127 * 1128 * NIC startup/shutdown 1129 * 1130 *************************************************************************/ 1131 1132static int efx_probe_all(struct efx_nic *efx) 1133{ 1134 struct efx_channel *channel; 1135 int rc; 1136 1137 /* Create NIC */ 1138 rc = efx_probe_nic(efx); 1139 if (rc) { 1140 EFX_ERR(efx, "failed to create NIC\n"); 1141 goto fail1; 1142 } 1143 1144 /* Create port */ 1145 rc = efx_probe_port(efx); 1146 if (rc) { 1147 EFX_ERR(efx, "failed to create port\n"); 1148 goto fail2; 1149 } 1150 1151 /* Create channels */ 1152 efx_for_each_channel(channel, efx) { 1153 rc = efx_probe_channel(channel); 1154 if (rc) { 1155 EFX_ERR(efx, "failed to create channel %d\n", 1156 channel->channel); 1157 goto fail3; 1158 } 1159 } 1160 efx_set_channel_names(efx); 1161 1162 return 0; 1163 1164 fail3: 1165 efx_for_each_channel(channel, efx) 1166 efx_remove_channel(channel); 1167 efx_remove_port(efx); 1168 fail2: 1169 efx_remove_nic(efx); 1170 fail1: 1171 return rc; 1172} 1173 1174/* Called after previous invocation(s) of efx_stop_all, restarts the 1175 * port, kernel transmit queue, NAPI processing and hardware interrupts, 1176 * and ensures that the port is scheduled to be reconfigured. 1177 * This function is safe to call multiple times when the NIC is in any 1178 * state. */ 1179static void efx_start_all(struct efx_nic *efx) 1180{ 1181 struct efx_channel *channel; 1182 1183 EFX_ASSERT_RESET_SERIALISED(efx); 1184 1185 /* Check that it is appropriate to restart the interface. All 1186 * of these flags are safe to read under just the rtnl lock */ 1187 if (efx->port_enabled) 1188 return; 1189 if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT)) 1190 return; 1191 if (efx_dev_registered(efx) && !netif_running(efx->net_dev)) 1192 return; 1193 1194 /* Mark the port as enabled so port reconfigurations can start, then 1195 * restart the transmit interface early so the watchdog timer stops */ 1196 efx_start_port(efx); 1197 1198 efx_for_each_channel(channel, efx) { 1199 if (efx_dev_registered(efx)) 1200 efx_wake_queue(channel); 1201 efx_start_channel(channel); 1202 } 1203 1204 efx_nic_enable_interrupts(efx); 1205 1206 /* Switch to event based MCDI completions after enabling interrupts. 1207 * If a reset has been scheduled, then we need to stay in polled mode. 1208 * Rather than serialising efx_mcdi_mode_event() [which sleeps] and 1209 * reset_pending [modified from an atomic context], we instead guarantee 1210 * that efx_mcdi_mode_poll() isn't reverted erroneously */ 1211 efx_mcdi_mode_event(efx); 1212 if (efx->reset_pending != RESET_TYPE_NONE) 1213 efx_mcdi_mode_poll(efx); 1214 1215 /* Start the hardware monitor if there is one. Otherwise (we're link 1216 * event driven), we have to poll the PHY because after an event queue 1217 * flush, we could have a missed a link state change */ 1218 if (efx->type->monitor != NULL) { 1219 queue_delayed_work(efx->workqueue, &efx->monitor_work, 1220 efx_monitor_interval); 1221 } else { 1222 mutex_lock(&efx->mac_lock); 1223 if (efx->phy_op->poll(efx)) 1224 efx_link_status_changed(efx); 1225 mutex_unlock(&efx->mac_lock); 1226 } 1227 1228 efx->type->start_stats(efx); 1229} 1230 1231/* Flush all delayed work. Should only be called when no more delayed work 1232 * will be scheduled. This doesn't flush pending online resets (efx_reset), 1233 * since we're holding the rtnl_lock at this point. */ 1234static void efx_flush_all(struct efx_nic *efx) 1235{ 1236 struct efx_rx_queue *rx_queue; 1237 1238 /* Make sure the hardware monitor is stopped */ 1239 cancel_delayed_work_sync(&efx->monitor_work); 1240 1241 /* Ensure that all RX slow refills are complete. */ 1242 efx_for_each_rx_queue(rx_queue, efx) 1243 cancel_delayed_work_sync(&rx_queue->work); 1244 1245 /* Stop scheduled port reconfigurations */ 1246 cancel_work_sync(&efx->mac_work); 1247} 1248 1249/* Quiesce hardware and software without bringing the link down. 1250 * Safe to call multiple times, when the nic and interface is in any 1251 * state. The caller is guaranteed to subsequently be in a position 1252 * to modify any hardware and software state they see fit without 1253 * taking locks. */ 1254static void efx_stop_all(struct efx_nic *efx) 1255{ 1256 struct efx_channel *channel; 1257 1258 EFX_ASSERT_RESET_SERIALISED(efx); 1259 1260 /* port_enabled can be read safely under the rtnl lock */ 1261 if (!efx->port_enabled) 1262 return; 1263 1264 efx->type->stop_stats(efx); 1265 1266 /* Switch to MCDI polling on Siena before disabling interrupts */ 1267 efx_mcdi_mode_poll(efx); 1268 1269 /* Disable interrupts and wait for ISR to complete */ 1270 efx_nic_disable_interrupts(efx); 1271 if (efx->legacy_irq) 1272 synchronize_irq(efx->legacy_irq); 1273 efx_for_each_channel(channel, efx) { 1274 if (channel->irq) 1275 synchronize_irq(channel->irq); 1276 } 1277 1278 /* Stop all NAPI processing and synchronous rx refills */ 1279 efx_for_each_channel(channel, efx) 1280 efx_stop_channel(channel); 1281 1282 /* Stop all asynchronous port reconfigurations. Since all 1283 * event processing has already been stopped, there is no 1284 * window to loose phy events */ 1285 efx_stop_port(efx); 1286 1287 /* Flush efx_mac_work(), refill_workqueue, monitor_work */ 1288 efx_flush_all(efx); 1289 1290 /* Stop the kernel transmit interface late, so the watchdog 1291 * timer isn't ticking over the flush */ 1292 if (efx_dev_registered(efx)) { 1293 struct efx_channel *channel; 1294 efx_for_each_channel(channel, efx) 1295 efx_stop_queue(channel); 1296 netif_tx_lock_bh(efx->net_dev); 1297 netif_tx_unlock_bh(efx->net_dev); 1298 } 1299} 1300 1301static void efx_remove_all(struct efx_nic *efx) 1302{ 1303 struct efx_channel *channel; 1304 1305 efx_for_each_channel(channel, efx) 1306 efx_remove_channel(channel); 1307 efx_remove_port(efx); 1308 efx_remove_nic(efx); 1309} 1310 1311/************************************************************************** 1312 * 1313 * Interrupt moderation 1314 * 1315 **************************************************************************/ 1316 1317static unsigned irq_mod_ticks(int usecs, int resolution) 1318{ 1319 if (usecs <= 0) 1320 return 0; /* cannot receive interrupts ahead of time :-) */ 1321 if (usecs < resolution) 1322 return 1; /* never round down to 0 */ 1323 return usecs / resolution; 1324} 1325 1326/* Set interrupt moderation parameters */ 1327void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs, 1328 bool rx_adaptive) 1329{ 1330 struct efx_tx_queue *tx_queue; 1331 struct efx_rx_queue *rx_queue; 1332 unsigned tx_ticks = irq_mod_ticks(tx_usecs, EFX_IRQ_MOD_RESOLUTION); 1333 unsigned rx_ticks = irq_mod_ticks(rx_usecs, EFX_IRQ_MOD_RESOLUTION); 1334 1335 EFX_ASSERT_RESET_SERIALISED(efx); 1336 1337 efx_for_each_tx_queue(tx_queue, efx) 1338 tx_queue->channel->irq_moderation = tx_ticks; 1339 1340 efx->irq_rx_adaptive = rx_adaptive; 1341 efx->irq_rx_moderation = rx_ticks; 1342 efx_for_each_rx_queue(rx_queue, efx) 1343 rx_queue->channel->irq_moderation = rx_ticks; 1344} 1345 1346/************************************************************************** 1347 * 1348 * Hardware monitor 1349 * 1350 **************************************************************************/ 1351 1352/* Run periodically off the general workqueue. Serialised against 1353 * efx_reconfigure_port via the mac_lock */ 1354static void efx_monitor(struct work_struct *data) 1355{ 1356 struct efx_nic *efx = container_of(data, struct efx_nic, 1357 monitor_work.work); 1358 1359 EFX_TRACE(efx, "hardware monitor executing on CPU %d\n", 1360 raw_smp_processor_id()); 1361 BUG_ON(efx->type->monitor == NULL); 1362 1363 /* If the mac_lock is already held then it is likely a port 1364 * reconfiguration is already in place, which will likely do 1365 * most of the work of check_hw() anyway. */ 1366 if (!mutex_trylock(&efx->mac_lock)) 1367 goto out_requeue; 1368 if (!efx->port_enabled) 1369 goto out_unlock; 1370 efx->type->monitor(efx); 1371 1372out_unlock: 1373 mutex_unlock(&efx->mac_lock); 1374out_requeue: 1375 queue_delayed_work(efx->workqueue, &efx->monitor_work, 1376 efx_monitor_interval); 1377} 1378 1379/************************************************************************** 1380 * 1381 * ioctls 1382 * 1383 *************************************************************************/ 1384 1385/* Net device ioctl 1386 * Context: process, rtnl_lock() held. 1387 */ 1388static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) 1389{ 1390 struct efx_nic *efx = netdev_priv(net_dev); 1391 struct mii_ioctl_data *data = if_mii(ifr); 1392 1393 EFX_ASSERT_RESET_SERIALISED(efx); 1394 1395 /* Convert phy_id from older PRTAD/DEVAD format */ 1396 if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) && 1397 (data->phy_id & 0xfc00) == 0x0400) 1398 data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400; 1399 1400 return mdio_mii_ioctl(&efx->mdio, data, cmd); 1401} 1402 1403/************************************************************************** 1404 * 1405 * NAPI interface 1406 * 1407 **************************************************************************/ 1408 1409static int efx_init_napi(struct efx_nic *efx) 1410{ 1411 struct efx_channel *channel; 1412 1413 efx_for_each_channel(channel, efx) { 1414 channel->napi_dev = efx->net_dev; 1415 netif_napi_add(channel->napi_dev, &channel->napi_str, 1416 efx_poll, napi_weight); 1417 } 1418 return 0; 1419} 1420 1421static void efx_fini_napi(struct efx_nic *efx) 1422{ 1423 struct efx_channel *channel; 1424 1425 efx_for_each_channel(channel, efx) { 1426 if (channel->napi_dev) 1427 netif_napi_del(&channel->napi_str); 1428 channel->napi_dev = NULL; 1429 } 1430} 1431 1432/************************************************************************** 1433 * 1434 * Kernel netpoll interface 1435 * 1436 *************************************************************************/ 1437 1438#ifdef CONFIG_NET_POLL_CONTROLLER 1439 1440/* Although in the common case interrupts will be disabled, this is not 1441 * guaranteed. However, all our work happens inside the NAPI callback, 1442 * so no locking is required. 1443 */ 1444static void efx_netpoll(struct net_device *net_dev) 1445{ 1446 struct efx_nic *efx = netdev_priv(net_dev); 1447 struct efx_channel *channel; 1448 1449 efx_for_each_channel(channel, efx) 1450 efx_schedule_channel(channel); 1451} 1452 1453#endif 1454 1455/************************************************************************** 1456 * 1457 * Kernel net device interface 1458 * 1459 *************************************************************************/ 1460 1461/* Context: process, rtnl_lock() held. */ 1462static int efx_net_open(struct net_device *net_dev) 1463{ 1464 struct efx_nic *efx = netdev_priv(net_dev); 1465 EFX_ASSERT_RESET_SERIALISED(efx); 1466 1467 EFX_LOG(efx, "opening device %s on CPU %d\n", net_dev->name, 1468 raw_smp_processor_id()); 1469 1470 if (efx->state == STATE_DISABLED) 1471 return -EIO; 1472 if (efx->phy_mode & PHY_MODE_SPECIAL) 1473 return -EBUSY; 1474 if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL)) 1475 return -EIO; 1476 1477 /* Notify the kernel of the link state polled during driver load, 1478 * before the monitor starts running */ 1479 efx_link_status_changed(efx); 1480 1481 efx_start_all(efx); 1482 return 0; 1483} 1484 1485/* Context: process, rtnl_lock() held. 1486 * Note that the kernel will ignore our return code; this method 1487 * should really be a void. 1488 */ 1489static int efx_net_stop(struct net_device *net_dev) 1490{ 1491 struct efx_nic *efx = netdev_priv(net_dev); 1492 1493 EFX_LOG(efx, "closing %s on CPU %d\n", net_dev->name, 1494 raw_smp_processor_id()); 1495 1496 if (efx->state != STATE_DISABLED) { 1497 /* Stop the device and flush all the channels */ 1498 efx_stop_all(efx); 1499 efx_fini_channels(efx); 1500 efx_init_channels(efx); 1501 } 1502 1503 return 0; 1504} 1505 1506/* Context: process, dev_base_lock or RTNL held, non-blocking. */ 1507static struct net_device_stats *efx_net_stats(struct net_device *net_dev) 1508{ 1509 struct efx_nic *efx = netdev_priv(net_dev); 1510 struct efx_mac_stats *mac_stats = &efx->mac_stats; 1511 struct net_device_stats *stats = &net_dev->stats; 1512 1513 spin_lock_bh(&efx->stats_lock); 1514 efx->type->update_stats(efx); 1515 spin_unlock_bh(&efx->stats_lock); 1516 1517 stats->rx_packets = mac_stats->rx_packets; 1518 stats->tx_packets = mac_stats->tx_packets; 1519 stats->rx_bytes = mac_stats->rx_bytes; 1520 stats->tx_bytes = mac_stats->tx_bytes; 1521 stats->multicast = mac_stats->rx_multicast; 1522 stats->collisions = mac_stats->tx_collision; 1523 stats->rx_length_errors = (mac_stats->rx_gtjumbo + 1524 mac_stats->rx_length_error); 1525 stats->rx_over_errors = efx->n_rx_nodesc_drop_cnt; 1526 stats->rx_crc_errors = mac_stats->rx_bad; 1527 stats->rx_frame_errors = mac_stats->rx_align_error; 1528 stats->rx_fifo_errors = mac_stats->rx_overflow; 1529 stats->rx_missed_errors = mac_stats->rx_missed; 1530 stats->tx_window_errors = mac_stats->tx_late_collision; 1531 1532 stats->rx_errors = (stats->rx_length_errors + 1533 stats->rx_over_errors + 1534 stats->rx_crc_errors + 1535 stats->rx_frame_errors + 1536 stats->rx_fifo_errors + 1537 stats->rx_missed_errors + 1538 mac_stats->rx_symbol_error); 1539 stats->tx_errors = (stats->tx_window_errors + 1540 mac_stats->tx_bad); 1541 1542 return stats; 1543} 1544 1545/* Context: netif_tx_lock held, BHs disabled. */ 1546static void efx_watchdog(struct net_device *net_dev) 1547{ 1548 struct efx_nic *efx = netdev_priv(net_dev); 1549 1550 EFX_ERR(efx, "TX stuck with port_enabled=%d: resetting channels\n", 1551 efx->port_enabled); 1552 1553 efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG); 1554} 1555 1556 1557/* Context: process, rtnl_lock() held. */ 1558static int efx_change_mtu(struct net_device *net_dev, int new_mtu) 1559{ 1560 struct efx_nic *efx = netdev_priv(net_dev); 1561 int rc = 0; 1562 1563 EFX_ASSERT_RESET_SERIALISED(efx); 1564 1565 if (new_mtu > EFX_MAX_MTU) 1566 return -EINVAL; 1567 1568 efx_stop_all(efx); 1569 1570 EFX_LOG(efx, "changing MTU to %d\n", new_mtu); 1571 1572 efx_fini_channels(efx); 1573 1574 mutex_lock(&efx->mac_lock); 1575 /* Reconfigure the MAC before enabling the dma queues so that 1576 * the RX buffers don't overflow */ 1577 net_dev->mtu = new_mtu; 1578 efx->mac_op->reconfigure(efx); 1579 mutex_unlock(&efx->mac_lock); 1580 1581 efx_init_channels(efx); 1582 1583 efx_start_all(efx); 1584 return rc; 1585} 1586 1587static int efx_set_mac_address(struct net_device *net_dev, void *data) 1588{ 1589 struct efx_nic *efx = netdev_priv(net_dev); 1590 struct sockaddr *addr = data; 1591 char *new_addr = addr->sa_data; 1592 1593 EFX_ASSERT_RESET_SERIALISED(efx); 1594 1595 if (!is_valid_ether_addr(new_addr)) { 1596 EFX_ERR(efx, "invalid ethernet MAC address requested: %pM\n", 1597 new_addr); 1598 return -EINVAL; 1599 } 1600 1601 memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len); 1602 1603 /* Reconfigure the MAC */ 1604 mutex_lock(&efx->mac_lock); 1605 efx->mac_op->reconfigure(efx); 1606 mutex_unlock(&efx->mac_lock); 1607 1608 return 0; 1609} 1610 1611/* Context: netif_addr_lock held, BHs disabled. */ 1612static void efx_set_multicast_list(struct net_device *net_dev) 1613{ 1614 struct efx_nic *efx = netdev_priv(net_dev); 1615 struct netdev_hw_addr *ha; 1616 union efx_multicast_hash *mc_hash = &efx->multicast_hash; 1617 u32 crc; 1618 int bit; 1619 1620 efx->promiscuous = !!(net_dev->flags & IFF_PROMISC); 1621 1622 /* Build multicast hash table */ 1623 if (efx->promiscuous || (net_dev->flags & IFF_ALLMULTI)) { 1624 memset(mc_hash, 0xff, sizeof(*mc_hash)); 1625 } else { 1626 memset(mc_hash, 0x00, sizeof(*mc_hash)); 1627 netdev_for_each_mc_addr(ha, net_dev) { 1628 crc = ether_crc_le(ETH_ALEN, ha->addr); 1629 bit = crc & (EFX_MCAST_HASH_ENTRIES - 1); 1630 set_bit_le(bit, mc_hash->byte); 1631 } 1632 1633 /* Broadcast packets go through the multicast hash filter. 1634 * ether_crc_le() of the broadcast address is 0xbe2612ff 1635 * so we always add bit 0xff to the mask. 1636 */ 1637 set_bit_le(0xff, mc_hash->byte); 1638 } 1639 1640 if (efx->port_enabled) 1641 queue_work(efx->workqueue, &efx->mac_work); 1642 /* Otherwise efx_start_port() will do this */ 1643} 1644 1645static const struct net_device_ops efx_netdev_ops = { 1646 .ndo_open = efx_net_open, 1647 .ndo_stop = efx_net_stop, 1648 .ndo_get_stats = efx_net_stats, 1649 .ndo_tx_timeout = efx_watchdog, 1650 .ndo_start_xmit = efx_hard_start_xmit, 1651 .ndo_validate_addr = eth_validate_addr, 1652 .ndo_do_ioctl = efx_ioctl, 1653 .ndo_change_mtu = efx_change_mtu, 1654 .ndo_set_mac_address = efx_set_mac_address, 1655 .ndo_set_multicast_list = efx_set_multicast_list, 1656#ifdef CONFIG_NET_POLL_CONTROLLER 1657 .ndo_poll_controller = efx_netpoll, 1658#endif 1659}; 1660 1661static void efx_update_name(struct efx_nic *efx) 1662{ 1663 strcpy(efx->name, efx->net_dev->name); 1664 efx_mtd_rename(efx); 1665 efx_set_channel_names(efx); 1666} 1667 1668static int efx_netdev_event(struct notifier_block *this, 1669 unsigned long event, void *ptr) 1670{ 1671 struct net_device *net_dev = ptr; 1672 1673 if (net_dev->netdev_ops == &efx_netdev_ops && 1674 event == NETDEV_CHANGENAME) 1675 efx_update_name(netdev_priv(net_dev)); 1676 1677 return NOTIFY_DONE; 1678} 1679 1680static struct notifier_block efx_netdev_notifier = { 1681 .notifier_call = efx_netdev_event, 1682}; 1683 1684static ssize_t 1685show_phy_type(struct device *dev, struct device_attribute *attr, char *buf) 1686{ 1687 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); 1688 return sprintf(buf, "%d\n", efx->phy_type); 1689} 1690static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL); 1691 1692static int efx_register_netdev(struct efx_nic *efx) 1693{ 1694 struct net_device *net_dev = efx->net_dev; 1695 int rc; 1696 1697 net_dev->watchdog_timeo = 5 * HZ; 1698 net_dev->irq = efx->pci_dev->irq; 1699 net_dev->netdev_ops = &efx_netdev_ops; 1700 SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev); 1701 SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops); 1702 1703 /* Clear MAC statistics */ 1704 efx->mac_op->update_stats(efx); 1705 memset(&efx->mac_stats, 0, sizeof(efx->mac_stats)); 1706 1707 rtnl_lock(); 1708 1709 rc = dev_alloc_name(net_dev, net_dev->name); 1710 if (rc < 0) 1711 goto fail_locked; 1712 efx_update_name(efx); 1713 1714 rc = register_netdevice(net_dev); 1715 if (rc) 1716 goto fail_locked; 1717 1718 /* Always start with carrier off; PHY events will detect the link */ 1719 netif_carrier_off(efx->net_dev); 1720 1721 rtnl_unlock(); 1722 1723 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type); 1724 if (rc) { 1725 EFX_ERR(efx, "failed to init net dev attributes\n"); 1726 goto fail_registered; 1727 } 1728 1729 return 0; 1730 1731fail_locked: 1732 rtnl_unlock(); 1733 EFX_ERR(efx, "could not register net dev\n"); 1734 return rc; 1735 1736fail_registered: 1737 unregister_netdev(net_dev); 1738 return rc; 1739} 1740 1741static void efx_unregister_netdev(struct efx_nic *efx) 1742{ 1743 struct efx_tx_queue *tx_queue; 1744 1745 if (!efx->net_dev) 1746 return; 1747 1748 BUG_ON(netdev_priv(efx->net_dev) != efx); 1749 1750 /* Free up any skbs still remaining. This has to happen before 1751 * we try to unregister the netdev as running their destructors 1752 * may be needed to get the device ref. count to 0. */ 1753 efx_for_each_tx_queue(tx_queue, efx) 1754 efx_release_tx_buffers(tx_queue); 1755 1756 if (efx_dev_registered(efx)) { 1757 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); 1758 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); 1759 unregister_netdev(efx->net_dev); 1760 } 1761} 1762 1763/************************************************************************** 1764 * 1765 * Device reset and suspend 1766 * 1767 **************************************************************************/ 1768 1769/* Tears down the entire software state and most of the hardware state 1770 * before reset. */ 1771void efx_reset_down(struct efx_nic *efx, enum reset_type method) 1772{ 1773 EFX_ASSERT_RESET_SERIALISED(efx); 1774 1775 efx_stop_all(efx); 1776 mutex_lock(&efx->mac_lock); 1777 mutex_lock(&efx->spi_lock); 1778 1779 efx_fini_channels(efx); 1780 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) 1781 efx->phy_op->fini(efx); 1782 efx->type->fini(efx); 1783} 1784 1785/* This function will always ensure that the locks acquired in 1786 * efx_reset_down() are released. A failure return code indicates 1787 * that we were unable to reinitialise the hardware, and the 1788 * driver should be disabled. If ok is false, then the rx and tx 1789 * engines are not restarted, pending a RESET_DISABLE. */ 1790int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) 1791{ 1792 int rc; 1793 1794 EFX_ASSERT_RESET_SERIALISED(efx); 1795 1796 rc = efx->type->init(efx); 1797 if (rc) { 1798 EFX_ERR(efx, "failed to initialise NIC\n"); 1799 goto fail; 1800 } 1801 1802 if (!ok) 1803 goto fail; 1804 1805 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) { 1806 rc = efx->phy_op->init(efx); 1807 if (rc) 1808 goto fail; 1809 if (efx->phy_op->reconfigure(efx)) 1810 EFX_ERR(efx, "could not restore PHY settings\n"); 1811 } 1812 1813 efx->mac_op->reconfigure(efx); 1814 1815 efx_init_channels(efx); 1816 1817 mutex_unlock(&efx->spi_lock); 1818 mutex_unlock(&efx->mac_lock); 1819 1820 efx_start_all(efx); 1821 1822 return 0; 1823 1824fail: 1825 efx->port_initialized = false; 1826 1827 mutex_unlock(&efx->spi_lock); 1828 mutex_unlock(&efx->mac_lock); 1829 1830 return rc; 1831} 1832 1833/* Reset the NIC using the specified method. Note that the reset may 1834 * fail, in which case the card will be left in an unusable state. 1835 * 1836 * Caller must hold the rtnl_lock. 1837 */ 1838int efx_reset(struct efx_nic *efx, enum reset_type method) 1839{ 1840 int rc, rc2; 1841 bool disabled; 1842 1843 EFX_INFO(efx, "resetting (%s)\n", RESET_TYPE(method)); 1844 1845 efx_reset_down(efx, method); 1846 1847 rc = efx->type->reset(efx, method); 1848 if (rc) { 1849 EFX_ERR(efx, "failed to reset hardware\n"); 1850 goto out; 1851 } 1852 1853 /* Allow resets to be rescheduled. */ 1854 efx->reset_pending = RESET_TYPE_NONE; 1855 1856 /* Reinitialise bus-mastering, which may have been turned off before 1857 * the reset was scheduled. This is still appropriate, even in the 1858 * RESET_TYPE_DISABLE since this driver generally assumes the hardware 1859 * can respond to requests. */ 1860 pci_set_master(efx->pci_dev); 1861 1862out: 1863 /* Leave device stopped if necessary */ 1864 disabled = rc || method == RESET_TYPE_DISABLE; 1865 rc2 = efx_reset_up(efx, method, !disabled); 1866 if (rc2) { 1867 disabled = true; 1868 if (!rc) 1869 rc = rc2; 1870 } 1871 1872 if (disabled) { 1873 dev_close(efx->net_dev); 1874 EFX_ERR(efx, "has been disabled\n"); 1875 efx->state = STATE_DISABLED; 1876 } else { 1877 EFX_LOG(efx, "reset complete\n"); 1878 } 1879 return rc; 1880} 1881 1882/* The worker thread exists so that code that cannot sleep can 1883 * schedule a reset for later. 1884 */ 1885static void efx_reset_work(struct work_struct *data) 1886{ 1887 struct efx_nic *efx = container_of(data, struct efx_nic, reset_work); 1888 1889 /* If we're not RUNNING then don't reset. Leave the reset_pending 1890 * flag set so that efx_pci_probe_main will be retried */ 1891 if (efx->state != STATE_RUNNING) { 1892 EFX_INFO(efx, "scheduled reset quenched. NIC not RUNNING\n"); 1893 return; 1894 } 1895 1896 rtnl_lock(); 1897 (void)efx_reset(efx, efx->reset_pending); 1898 rtnl_unlock(); 1899} 1900 1901void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) 1902{ 1903 enum reset_type method; 1904 1905 if (efx->reset_pending != RESET_TYPE_NONE) { 1906 EFX_INFO(efx, "quenching already scheduled reset\n"); 1907 return; 1908 } 1909 1910 switch (type) { 1911 case RESET_TYPE_INVISIBLE: 1912 case RESET_TYPE_ALL: 1913 case RESET_TYPE_WORLD: 1914 case RESET_TYPE_DISABLE: 1915 method = type; 1916 break; 1917 case RESET_TYPE_RX_RECOVERY: 1918 case RESET_TYPE_RX_DESC_FETCH: 1919 case RESET_TYPE_TX_DESC_FETCH: 1920 case RESET_TYPE_TX_SKIP: 1921 method = RESET_TYPE_INVISIBLE; 1922 break; 1923 case RESET_TYPE_MC_FAILURE: 1924 default: 1925 method = RESET_TYPE_ALL; 1926 break; 1927 } 1928 1929 if (method != type) 1930 EFX_LOG(efx, "scheduling %s reset for %s\n", 1931 RESET_TYPE(method), RESET_TYPE(type)); 1932 else 1933 EFX_LOG(efx, "scheduling %s reset\n", RESET_TYPE(method)); 1934 1935 efx->reset_pending = method; 1936 1937 /* efx_process_channel() will no longer read events once a 1938 * reset is scheduled. So switch back to poll'd MCDI completions. */ 1939 efx_mcdi_mode_poll(efx); 1940 1941 queue_work(reset_workqueue, &efx->reset_work); 1942} 1943 1944/************************************************************************** 1945 * 1946 * List of NICs we support 1947 * 1948 **************************************************************************/ 1949 1950/* PCI device ID table */ 1951static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = { 1952 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID), 1953 .driver_data = (unsigned long) &falcon_a1_nic_type}, 1954 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID), 1955 .driver_data = (unsigned long) &falcon_b0_nic_type}, 1956 {PCI_DEVICE(EFX_VENDID_SFC, BETHPAGE_A_P_DEVID), 1957 .driver_data = (unsigned long) &siena_a0_nic_type}, 1958 {PCI_DEVICE(EFX_VENDID_SFC, SIENA_A_P_DEVID), 1959 .driver_data = (unsigned long) &siena_a0_nic_type}, 1960 {0} /* end of list */ 1961}; 1962 1963/************************************************************************** 1964 * 1965 * Dummy PHY/MAC operations 1966 * 1967 * Can be used for some unimplemented operations 1968 * Needed so all function pointers are valid and do not have to be tested 1969 * before use 1970 * 1971 **************************************************************************/ 1972int efx_port_dummy_op_int(struct efx_nic *efx) 1973{ 1974 return 0; 1975} 1976void efx_port_dummy_op_void(struct efx_nic *efx) {} 1977void efx_port_dummy_op_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) 1978{ 1979} 1980bool efx_port_dummy_op_poll(struct efx_nic *efx) 1981{ 1982 return false; 1983} 1984 1985static struct efx_phy_operations efx_dummy_phy_operations = { 1986 .init = efx_port_dummy_op_int, 1987 .reconfigure = efx_port_dummy_op_int, 1988 .poll = efx_port_dummy_op_poll, 1989 .fini = efx_port_dummy_op_void, 1990}; 1991 1992/************************************************************************** 1993 * 1994 * Data housekeeping 1995 * 1996 **************************************************************************/ 1997 1998/* This zeroes out and then fills in the invariants in a struct 1999 * efx_nic (including all sub-structures). 2000 */ 2001static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type, 2002 struct pci_dev *pci_dev, struct net_device *net_dev) 2003{ 2004 struct efx_channel *channel; 2005 struct efx_tx_queue *tx_queue; 2006 struct efx_rx_queue *rx_queue; 2007 int i; 2008 2009 /* Initialise common structures */ 2010 memset(efx, 0, sizeof(*efx)); 2011 spin_lock_init(&efx->biu_lock); 2012 mutex_init(&efx->mdio_lock); 2013 mutex_init(&efx->spi_lock); 2014#ifdef CONFIG_SFC_MTD 2015 INIT_LIST_HEAD(&efx->mtd_list); 2016#endif 2017 INIT_WORK(&efx->reset_work, efx_reset_work); 2018 INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor); 2019 efx->pci_dev = pci_dev; 2020 efx->state = STATE_INIT; 2021 efx->reset_pending = RESET_TYPE_NONE; 2022 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); 2023 2024 efx->net_dev = net_dev; 2025 efx->rx_checksum_enabled = true; 2026 spin_lock_init(&efx->stats_lock); 2027 mutex_init(&efx->mac_lock); 2028 efx->mac_op = type->default_mac_ops; 2029 efx->phy_op = &efx_dummy_phy_operations; 2030 efx->mdio.dev = net_dev; 2031 INIT_WORK(&efx->mac_work, efx_mac_work); 2032 2033 for (i = 0; i < EFX_MAX_CHANNELS; i++) { 2034 channel = &efx->channel[i]; 2035 channel->efx = efx; 2036 channel->channel = i; 2037 channel->work_pending = false; 2038 spin_lock_init(&channel->tx_stop_lock); 2039 atomic_set(&channel->tx_stop_count, 1); 2040 } 2041 for (i = 0; i < EFX_MAX_TX_QUEUES; i++) { 2042 tx_queue = &efx->tx_queue[i]; 2043 tx_queue->efx = efx; 2044 tx_queue->queue = i; 2045 tx_queue->buffer = NULL; 2046 tx_queue->channel = &efx->channel[0]; /* for safety */ 2047 tx_queue->tso_headers_free = NULL; 2048 } 2049 for (i = 0; i < EFX_MAX_RX_QUEUES; i++) { 2050 rx_queue = &efx->rx_queue[i]; 2051 rx_queue->efx = efx; 2052 rx_queue->queue = i; 2053 rx_queue->channel = &efx->channel[0]; /* for safety */ 2054 rx_queue->buffer = NULL; 2055 spin_lock_init(&rx_queue->add_lock); 2056 INIT_DELAYED_WORK(&rx_queue->work, efx_rx_work); 2057 } 2058 2059 efx->type = type; 2060 2061 /* As close as we can get to guaranteeing that we don't overflow */ 2062 BUILD_BUG_ON(EFX_EVQ_SIZE < EFX_TXQ_SIZE + EFX_RXQ_SIZE); 2063 2064 EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS); 2065 2066 /* Higher numbered interrupt modes are less capable! */ 2067 efx->interrupt_mode = max(efx->type->max_interrupt_mode, 2068 interrupt_mode); 2069 2070 /* Would be good to use the net_dev name, but we're too early */ 2071 snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s", 2072 pci_name(pci_dev)); 2073 efx->workqueue = create_singlethread_workqueue(efx->workqueue_name); 2074 if (!efx->workqueue) 2075 return -ENOMEM; 2076 2077 return 0; 2078} 2079 2080static void efx_fini_struct(struct efx_nic *efx) 2081{ 2082 if (efx->workqueue) { 2083 destroy_workqueue(efx->workqueue); 2084 efx->workqueue = NULL; 2085 } 2086} 2087 2088/************************************************************************** 2089 * 2090 * PCI interface 2091 * 2092 **************************************************************************/ 2093 2094/* Main body of final NIC shutdown code 2095 * This is called only at module unload (or hotplug removal). 2096 */ 2097static void efx_pci_remove_main(struct efx_nic *efx) 2098{ 2099 efx_nic_fini_interrupt(efx); 2100 efx_fini_channels(efx); 2101 efx_fini_port(efx); 2102 efx->type->fini(efx); 2103 efx_fini_napi(efx); 2104 efx_remove_all(efx); 2105} 2106 2107/* Final NIC shutdown 2108 * This is called only at module unload (or hotplug removal). 2109 */ 2110static void efx_pci_remove(struct pci_dev *pci_dev) 2111{ 2112 struct efx_nic *efx; 2113 2114 efx = pci_get_drvdata(pci_dev); 2115 if (!efx) 2116 return; 2117 2118 /* Mark the NIC as fini, then stop the interface */ 2119 rtnl_lock(); 2120 efx->state = STATE_FINI; 2121 dev_close(efx->net_dev); 2122 2123 /* Allow any queued efx_resets() to complete */ 2124 rtnl_unlock(); 2125 2126 efx_unregister_netdev(efx); 2127 2128 efx_mtd_remove(efx); 2129 2130 /* Wait for any scheduled resets to complete. No more will be 2131 * scheduled from this point because efx_stop_all() has been 2132 * called, we are no longer registered with driverlink, and 2133 * the net_device's have been removed. */ 2134 cancel_work_sync(&efx->reset_work); 2135 2136 efx_pci_remove_main(efx); 2137 2138 efx_fini_io(efx); 2139 EFX_LOG(efx, "shutdown successful\n"); 2140 2141 pci_set_drvdata(pci_dev, NULL); 2142 efx_fini_struct(efx); 2143 free_netdev(efx->net_dev); 2144}; 2145 2146/* Main body of NIC initialisation 2147 * This is called at module load (or hotplug insertion, theoretically). 2148 */ 2149static int efx_pci_probe_main(struct efx_nic *efx) 2150{ 2151 int rc; 2152 2153 /* Do start-of-day initialisation */ 2154 rc = efx_probe_all(efx); 2155 if (rc) 2156 goto fail1; 2157 2158 rc = efx_init_napi(efx); 2159 if (rc) 2160 goto fail2; 2161 2162 rc = efx->type->init(efx); 2163 if (rc) { 2164 EFX_ERR(efx, "failed to initialise NIC\n"); 2165 goto fail3; 2166 } 2167 2168 rc = efx_init_port(efx); 2169 if (rc) { 2170 EFX_ERR(efx, "failed to initialise port\n"); 2171 goto fail4; 2172 } 2173 2174 efx_init_channels(efx); 2175 2176 rc = efx_nic_init_interrupt(efx); 2177 if (rc) 2178 goto fail5; 2179 2180 return 0; 2181 2182 fail5: 2183 efx_fini_channels(efx); 2184 efx_fini_port(efx); 2185 fail4: 2186 efx->type->fini(efx); 2187 fail3: 2188 efx_fini_napi(efx); 2189 fail2: 2190 efx_remove_all(efx); 2191 fail1: 2192 return rc; 2193} 2194 2195/* NIC initialisation 2196 * 2197 * This is called at module load (or hotplug insertion, 2198 * theoretically). It sets up PCI mappings, tests and resets the NIC, 2199 * sets up and registers the network devices with the kernel and hooks 2200 * the interrupt service routine. It does not prepare the device for 2201 * transmission; this is left to the first time one of the network 2202 * interfaces is brought up (i.e. efx_net_open). 2203 */ 2204static int __devinit efx_pci_probe(struct pci_dev *pci_dev, 2205 const struct pci_device_id *entry) 2206{ 2207 struct efx_nic_type *type = (struct efx_nic_type *) entry->driver_data; 2208 struct net_device *net_dev; 2209 struct efx_nic *efx; 2210 int i, rc; 2211 2212 /* Allocate and initialise a struct net_device and struct efx_nic */ 2213 net_dev = alloc_etherdev_mq(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES); 2214 if (!net_dev) 2215 return -ENOMEM; 2216 net_dev->features |= (type->offload_features | NETIF_F_SG | 2217 NETIF_F_HIGHDMA | NETIF_F_TSO | 2218 NETIF_F_GRO); 2219 if (type->offload_features & NETIF_F_V6_CSUM) 2220 net_dev->features |= NETIF_F_TSO6; 2221 /* Mask for features that also apply to VLAN devices */ 2222 net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG | 2223 NETIF_F_HIGHDMA | NETIF_F_TSO); 2224 efx = netdev_priv(net_dev); 2225 pci_set_drvdata(pci_dev, efx); 2226 rc = efx_init_struct(efx, type, pci_dev, net_dev); 2227 if (rc) 2228 goto fail1; 2229 2230 EFX_INFO(efx, "Solarflare Communications NIC detected\n"); 2231 2232 /* Set up basic I/O (BAR mappings etc) */ 2233 rc = efx_init_io(efx); 2234 if (rc) 2235 goto fail2; 2236 2237 /* No serialisation is required with the reset path because 2238 * we're in STATE_INIT. */ 2239 for (i = 0; i < 5; i++) { 2240 rc = efx_pci_probe_main(efx); 2241 2242 /* Serialise against efx_reset(). No more resets will be 2243 * scheduled since efx_stop_all() has been called, and we 2244 * have not and never have been registered with either 2245 * the rtnetlink or driverlink layers. */ 2246 cancel_work_sync(&efx->reset_work); 2247 2248 if (rc == 0) { 2249 if (efx->reset_pending != RESET_TYPE_NONE) { 2250 /* If there was a scheduled reset during 2251 * probe, the NIC is probably hosed anyway */ 2252 efx_pci_remove_main(efx); 2253 rc = -EIO; 2254 } else { 2255 break; 2256 } 2257 } 2258 2259 /* Retry if a recoverably reset event has been scheduled */ 2260 if ((efx->reset_pending != RESET_TYPE_INVISIBLE) && 2261 (efx->reset_pending != RESET_TYPE_ALL)) 2262 goto fail3; 2263 2264 efx->reset_pending = RESET_TYPE_NONE; 2265 } 2266 2267 if (rc) { 2268 EFX_ERR(efx, "Could not reset NIC\n"); 2269 goto fail4; 2270 } 2271 2272 /* Switch to the running state before we expose the device to the OS, 2273 * so that dev_open()|efx_start_all() will actually start the device */ 2274 efx->state = STATE_RUNNING; 2275 2276 rc = efx_register_netdev(efx); 2277 if (rc) 2278 goto fail5; 2279 2280 EFX_LOG(efx, "initialisation successful\n"); 2281 2282 rtnl_lock(); 2283 efx_mtd_probe(efx); /* allowed to fail */ 2284 rtnl_unlock(); 2285 return 0; 2286 2287 fail5: 2288 efx_pci_remove_main(efx); 2289 fail4: 2290 fail3: 2291 efx_fini_io(efx); 2292 fail2: 2293 efx_fini_struct(efx); 2294 fail1: 2295 WARN_ON(rc > 0); 2296 EFX_LOG(efx, "initialisation failed. rc=%d\n", rc); 2297 free_netdev(net_dev); 2298 return rc; 2299} 2300 2301static int efx_pm_freeze(struct device *dev) 2302{ 2303 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); 2304 2305 efx->state = STATE_FINI; 2306 2307 netif_device_detach(efx->net_dev); 2308 2309 efx_stop_all(efx); 2310 efx_fini_channels(efx); 2311 2312 return 0; 2313} 2314 2315static int efx_pm_thaw(struct device *dev) 2316{ 2317 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); 2318 2319 efx->state = STATE_INIT; 2320 2321 efx_init_channels(efx); 2322 2323 mutex_lock(&efx->mac_lock); 2324 efx->phy_op->reconfigure(efx); 2325 mutex_unlock(&efx->mac_lock); 2326 2327 efx_start_all(efx); 2328 2329 netif_device_attach(efx->net_dev); 2330 2331 efx->state = STATE_RUNNING; 2332 2333 efx->type->resume_wol(efx); 2334 2335 return 0; 2336} 2337 2338static int efx_pm_poweroff(struct device *dev) 2339{ 2340 struct pci_dev *pci_dev = to_pci_dev(dev); 2341 struct efx_nic *efx = pci_get_drvdata(pci_dev); 2342 2343 efx->type->fini(efx); 2344 2345 efx->reset_pending = RESET_TYPE_NONE; 2346 2347 pci_save_state(pci_dev); 2348 return pci_set_power_state(pci_dev, PCI_D3hot); 2349} 2350 2351/* Used for both resume and restore */ 2352static int efx_pm_resume(struct device *dev) 2353{ 2354 struct pci_dev *pci_dev = to_pci_dev(dev); 2355 struct efx_nic *efx = pci_get_drvdata(pci_dev); 2356 int rc; 2357 2358 rc = pci_set_power_state(pci_dev, PCI_D0); 2359 if (rc) 2360 return rc; 2361 pci_restore_state(pci_dev); 2362 rc = pci_enable_device(pci_dev); 2363 if (rc) 2364 return rc; 2365 pci_set_master(efx->pci_dev); 2366 rc = efx->type->reset(efx, RESET_TYPE_ALL); 2367 if (rc) 2368 return rc; 2369 rc = efx->type->init(efx); 2370 if (rc) 2371 return rc; 2372 efx_pm_thaw(dev); 2373 return 0; 2374} 2375 2376static int efx_pm_suspend(struct device *dev) 2377{ 2378 int rc; 2379 2380 efx_pm_freeze(dev); 2381 rc = efx_pm_poweroff(dev); 2382 if (rc) 2383 efx_pm_resume(dev); 2384 return rc; 2385} 2386 2387static struct dev_pm_ops efx_pm_ops = { 2388 .suspend = efx_pm_suspend, 2389 .resume = efx_pm_resume, 2390 .freeze = efx_pm_freeze, 2391 .thaw = efx_pm_thaw, 2392 .poweroff = efx_pm_poweroff, 2393 .restore = efx_pm_resume, 2394}; 2395 2396static struct pci_driver efx_pci_driver = { 2397 .name = EFX_DRIVER_NAME, 2398 .id_table = efx_pci_table, 2399 .probe = efx_pci_probe, 2400 .remove = efx_pci_remove, 2401 .driver.pm = &efx_pm_ops, 2402}; 2403 2404/************************************************************************** 2405 * 2406 * Kernel module interface 2407 * 2408 *************************************************************************/ 2409 2410module_param(interrupt_mode, uint, 0444); 2411MODULE_PARM_DESC(interrupt_mode, 2412 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)"); 2413 2414static int __init efx_init_module(void) 2415{ 2416 int rc; 2417 2418 printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n"); 2419 2420 rc = register_netdevice_notifier(&efx_netdev_notifier); 2421 if (rc) 2422 goto err_notifier; 2423 2424 refill_workqueue = create_workqueue("sfc_refill"); 2425 if (!refill_workqueue) { 2426 rc = -ENOMEM; 2427 goto err_refill; 2428 } 2429 reset_workqueue = create_singlethread_workqueue("sfc_reset"); 2430 if (!reset_workqueue) { 2431 rc = -ENOMEM; 2432 goto err_reset; 2433 } 2434 2435 rc = pci_register_driver(&efx_pci_driver); 2436 if (rc < 0) 2437 goto err_pci; 2438 2439 return 0; 2440 2441 err_pci: 2442 destroy_workqueue(reset_workqueue); 2443 err_reset: 2444 destroy_workqueue(refill_workqueue); 2445 err_refill: 2446 unregister_netdevice_notifier(&efx_netdev_notifier); 2447 err_notifier: 2448 return rc; 2449} 2450 2451static void __exit efx_exit_module(void) 2452{ 2453 printk(KERN_INFO "Solarflare NET driver unloading\n"); 2454 2455 pci_unregister_driver(&efx_pci_driver); 2456 destroy_workqueue(reset_workqueue); 2457 destroy_workqueue(refill_workqueue); 2458 unregister_netdevice_notifier(&efx_netdev_notifier); 2459 2460} 2461 2462module_init(efx_init_module); 2463module_exit(efx_exit_module); 2464 2465MODULE_AUTHOR("Solarflare Communications and " 2466 "Michael Brown <mbrown@fensystems.co.uk>"); 2467MODULE_DESCRIPTION("Solarflare Communications network driver"); 2468MODULE_LICENSE("GPL"); 2469MODULE_DEVICE_TABLE(pci, efx_pci_table);