Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at 17431928194b36a0f88082df875e2e036da7fddf 4778 lines 134 kB view raw
1/******************************************************************************* 2 3 Intel PRO/1000 Linux driver 4 Copyright(c) 1999 - 2006 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 Linux NICS <linux.nics@intel.com> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 27*******************************************************************************/ 28 29#include "e1000.h" 30#include <net/ip6_checksum.h> 31 32char e1000_driver_name[] = "e1000"; 33static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; 34#define DRV_VERSION "7.3.21-k6-NAPI" 35const char e1000_driver_version[] = DRV_VERSION; 36static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; 37 38/* e1000_pci_tbl - PCI Device ID Table 39 * 40 * Last entry must be all 0s 41 * 42 * Macro expands to... 43 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} 44 */ 45static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { 46 INTEL_E1000_ETHERNET_DEVICE(0x1000), 47 INTEL_E1000_ETHERNET_DEVICE(0x1001), 48 INTEL_E1000_ETHERNET_DEVICE(0x1004), 49 INTEL_E1000_ETHERNET_DEVICE(0x1008), 50 INTEL_E1000_ETHERNET_DEVICE(0x1009), 51 INTEL_E1000_ETHERNET_DEVICE(0x100C), 52 INTEL_E1000_ETHERNET_DEVICE(0x100D), 53 INTEL_E1000_ETHERNET_DEVICE(0x100E), 54 INTEL_E1000_ETHERNET_DEVICE(0x100F), 55 INTEL_E1000_ETHERNET_DEVICE(0x1010), 56 INTEL_E1000_ETHERNET_DEVICE(0x1011), 57 INTEL_E1000_ETHERNET_DEVICE(0x1012), 58 INTEL_E1000_ETHERNET_DEVICE(0x1013), 59 INTEL_E1000_ETHERNET_DEVICE(0x1014), 60 INTEL_E1000_ETHERNET_DEVICE(0x1015), 61 INTEL_E1000_ETHERNET_DEVICE(0x1016), 62 INTEL_E1000_ETHERNET_DEVICE(0x1017), 63 INTEL_E1000_ETHERNET_DEVICE(0x1018), 64 INTEL_E1000_ETHERNET_DEVICE(0x1019), 65 INTEL_E1000_ETHERNET_DEVICE(0x101A), 66 INTEL_E1000_ETHERNET_DEVICE(0x101D), 67 INTEL_E1000_ETHERNET_DEVICE(0x101E), 68 INTEL_E1000_ETHERNET_DEVICE(0x1026), 69 INTEL_E1000_ETHERNET_DEVICE(0x1027), 70 INTEL_E1000_ETHERNET_DEVICE(0x1028), 71 INTEL_E1000_ETHERNET_DEVICE(0x1075), 72 INTEL_E1000_ETHERNET_DEVICE(0x1076), 73 INTEL_E1000_ETHERNET_DEVICE(0x1077), 74 INTEL_E1000_ETHERNET_DEVICE(0x1078), 75 INTEL_E1000_ETHERNET_DEVICE(0x1079), 76 INTEL_E1000_ETHERNET_DEVICE(0x107A), 77 INTEL_E1000_ETHERNET_DEVICE(0x107B), 78 INTEL_E1000_ETHERNET_DEVICE(0x107C), 79 INTEL_E1000_ETHERNET_DEVICE(0x108A), 80 INTEL_E1000_ETHERNET_DEVICE(0x1099), 81 INTEL_E1000_ETHERNET_DEVICE(0x10B5), 82 /* required last entry */ 83 {0,} 84}; 85 86MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); 87 88int e1000_up(struct e1000_adapter *adapter); 89void e1000_down(struct e1000_adapter *adapter); 90void e1000_reinit_locked(struct e1000_adapter *adapter); 91void e1000_reset(struct e1000_adapter *adapter); 92int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx); 93int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); 94int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); 95void e1000_free_all_tx_resources(struct e1000_adapter *adapter); 96void e1000_free_all_rx_resources(struct e1000_adapter *adapter); 97static int e1000_setup_tx_resources(struct e1000_adapter *adapter, 98 struct e1000_tx_ring *txdr); 99static int e1000_setup_rx_resources(struct e1000_adapter *adapter, 100 struct e1000_rx_ring *rxdr); 101static void e1000_free_tx_resources(struct e1000_adapter *adapter, 102 struct e1000_tx_ring *tx_ring); 103static void e1000_free_rx_resources(struct e1000_adapter *adapter, 104 struct e1000_rx_ring *rx_ring); 105void e1000_update_stats(struct e1000_adapter *adapter); 106 107static int e1000_init_module(void); 108static void e1000_exit_module(void); 109static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); 110static void __devexit e1000_remove(struct pci_dev *pdev); 111static int e1000_alloc_queues(struct e1000_adapter *adapter); 112static int e1000_sw_init(struct e1000_adapter *adapter); 113static int e1000_open(struct net_device *netdev); 114static int e1000_close(struct net_device *netdev); 115static void e1000_configure_tx(struct e1000_adapter *adapter); 116static void e1000_configure_rx(struct e1000_adapter *adapter); 117static void e1000_setup_rctl(struct e1000_adapter *adapter); 118static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter); 119static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter); 120static void e1000_clean_tx_ring(struct e1000_adapter *adapter, 121 struct e1000_tx_ring *tx_ring); 122static void e1000_clean_rx_ring(struct e1000_adapter *adapter, 123 struct e1000_rx_ring *rx_ring); 124static void e1000_set_rx_mode(struct net_device *netdev); 125static void e1000_update_phy_info(unsigned long data); 126static void e1000_watchdog(unsigned long data); 127static void e1000_82547_tx_fifo_stall(unsigned long data); 128static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, 129 struct net_device *netdev); 130static struct net_device_stats * e1000_get_stats(struct net_device *netdev); 131static int e1000_change_mtu(struct net_device *netdev, int new_mtu); 132static int e1000_set_mac(struct net_device *netdev, void *p); 133static irqreturn_t e1000_intr(int irq, void *data); 134static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, 135 struct e1000_tx_ring *tx_ring); 136static int e1000_clean(struct napi_struct *napi, int budget); 137static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, 138 struct e1000_rx_ring *rx_ring, 139 int *work_done, int work_to_do); 140static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, 141 struct e1000_rx_ring *rx_ring, 142 int *work_done, int work_to_do); 143static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, 144 struct e1000_rx_ring *rx_ring, 145 int cleaned_count); 146static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, 147 struct e1000_rx_ring *rx_ring, 148 int cleaned_count); 149static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); 150static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 151 int cmd); 152static void e1000_enter_82542_rst(struct e1000_adapter *adapter); 153static void e1000_leave_82542_rst(struct e1000_adapter *adapter); 154static void e1000_tx_timeout(struct net_device *dev); 155static void e1000_reset_task(struct work_struct *work); 156static void e1000_smartspeed(struct e1000_adapter *adapter); 157static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, 158 struct sk_buff *skb); 159 160static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp); 161static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid); 162static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); 163static void e1000_restore_vlan(struct e1000_adapter *adapter); 164 165#ifdef CONFIG_PM 166static int e1000_suspend(struct pci_dev *pdev, pm_message_t state); 167static int e1000_resume(struct pci_dev *pdev); 168#endif 169static void e1000_shutdown(struct pci_dev *pdev); 170 171#ifdef CONFIG_NET_POLL_CONTROLLER 172/* for netdump / net console */ 173static void e1000_netpoll (struct net_device *netdev); 174#endif 175 176#define COPYBREAK_DEFAULT 256 177static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT; 178module_param(copybreak, uint, 0644); 179MODULE_PARM_DESC(copybreak, 180 "Maximum size of packet that is copied to a new buffer on receive"); 181 182static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, 183 pci_channel_state_t state); 184static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev); 185static void e1000_io_resume(struct pci_dev *pdev); 186 187static struct pci_error_handlers e1000_err_handler = { 188 .error_detected = e1000_io_error_detected, 189 .slot_reset = e1000_io_slot_reset, 190 .resume = e1000_io_resume, 191}; 192 193static struct pci_driver e1000_driver = { 194 .name = e1000_driver_name, 195 .id_table = e1000_pci_tbl, 196 .probe = e1000_probe, 197 .remove = __devexit_p(e1000_remove), 198#ifdef CONFIG_PM 199 /* Power Managment Hooks */ 200 .suspend = e1000_suspend, 201 .resume = e1000_resume, 202#endif 203 .shutdown = e1000_shutdown, 204 .err_handler = &e1000_err_handler 205}; 206 207MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 208MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); 209MODULE_LICENSE("GPL"); 210MODULE_VERSION(DRV_VERSION); 211 212static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE; 213module_param(debug, int, 0); 214MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 215 216/** 217 * e1000_get_hw_dev - return device 218 * used by hardware layer to print debugging information 219 * 220 **/ 221struct net_device *e1000_get_hw_dev(struct e1000_hw *hw) 222{ 223 struct e1000_adapter *adapter = hw->back; 224 return adapter->netdev; 225} 226 227/** 228 * e1000_init_module - Driver Registration Routine 229 * 230 * e1000_init_module is the first routine called when the driver is 231 * loaded. All it does is register with the PCI subsystem. 232 **/ 233 234static int __init e1000_init_module(void) 235{ 236 int ret; 237 pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version); 238 239 pr_info("%s\n", e1000_copyright); 240 241 ret = pci_register_driver(&e1000_driver); 242 if (copybreak != COPYBREAK_DEFAULT) { 243 if (copybreak == 0) 244 pr_info("copybreak disabled\n"); 245 else 246 pr_info("copybreak enabled for " 247 "packets <= %u bytes\n", copybreak); 248 } 249 return ret; 250} 251 252module_init(e1000_init_module); 253 254/** 255 * e1000_exit_module - Driver Exit Cleanup Routine 256 * 257 * e1000_exit_module is called just before the driver is removed 258 * from memory. 259 **/ 260 261static void __exit e1000_exit_module(void) 262{ 263 pci_unregister_driver(&e1000_driver); 264} 265 266module_exit(e1000_exit_module); 267 268static int e1000_request_irq(struct e1000_adapter *adapter) 269{ 270 struct net_device *netdev = adapter->netdev; 271 irq_handler_t handler = e1000_intr; 272 int irq_flags = IRQF_SHARED; 273 int err; 274 275 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, 276 netdev); 277 if (err) { 278 e_err("Unable to allocate interrupt Error: %d\n", err); 279 } 280 281 return err; 282} 283 284static void e1000_free_irq(struct e1000_adapter *adapter) 285{ 286 struct net_device *netdev = adapter->netdev; 287 288 free_irq(adapter->pdev->irq, netdev); 289} 290 291/** 292 * e1000_irq_disable - Mask off interrupt generation on the NIC 293 * @adapter: board private structure 294 **/ 295 296static void e1000_irq_disable(struct e1000_adapter *adapter) 297{ 298 struct e1000_hw *hw = &adapter->hw; 299 300 ew32(IMC, ~0); 301 E1000_WRITE_FLUSH(); 302 synchronize_irq(adapter->pdev->irq); 303} 304 305/** 306 * e1000_irq_enable - Enable default interrupt generation settings 307 * @adapter: board private structure 308 **/ 309 310static void e1000_irq_enable(struct e1000_adapter *adapter) 311{ 312 struct e1000_hw *hw = &adapter->hw; 313 314 ew32(IMS, IMS_ENABLE_MASK); 315 E1000_WRITE_FLUSH(); 316} 317 318static void e1000_update_mng_vlan(struct e1000_adapter *adapter) 319{ 320 struct e1000_hw *hw = &adapter->hw; 321 struct net_device *netdev = adapter->netdev; 322 u16 vid = hw->mng_cookie.vlan_id; 323 u16 old_vid = adapter->mng_vlan_id; 324 if (adapter->vlgrp) { 325 if (!vlan_group_get_device(adapter->vlgrp, vid)) { 326 if (hw->mng_cookie.status & 327 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { 328 e1000_vlan_rx_add_vid(netdev, vid); 329 adapter->mng_vlan_id = vid; 330 } else 331 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 332 333 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && 334 (vid != old_vid) && 335 !vlan_group_get_device(adapter->vlgrp, old_vid)) 336 e1000_vlan_rx_kill_vid(netdev, old_vid); 337 } else 338 adapter->mng_vlan_id = vid; 339 } 340} 341 342static void e1000_init_manageability(struct e1000_adapter *adapter) 343{ 344 struct e1000_hw *hw = &adapter->hw; 345 346 if (adapter->en_mng_pt) { 347 u32 manc = er32(MANC); 348 349 /* disable hardware interception of ARP */ 350 manc &= ~(E1000_MANC_ARP_EN); 351 352 ew32(MANC, manc); 353 } 354} 355 356static void e1000_release_manageability(struct e1000_adapter *adapter) 357{ 358 struct e1000_hw *hw = &adapter->hw; 359 360 if (adapter->en_mng_pt) { 361 u32 manc = er32(MANC); 362 363 /* re-enable hardware interception of ARP */ 364 manc |= E1000_MANC_ARP_EN; 365 366 ew32(MANC, manc); 367 } 368} 369 370/** 371 * e1000_configure - configure the hardware for RX and TX 372 * @adapter = private board structure 373 **/ 374static void e1000_configure(struct e1000_adapter *adapter) 375{ 376 struct net_device *netdev = adapter->netdev; 377 int i; 378 379 e1000_set_rx_mode(netdev); 380 381 e1000_restore_vlan(adapter); 382 e1000_init_manageability(adapter); 383 384 e1000_configure_tx(adapter); 385 e1000_setup_rctl(adapter); 386 e1000_configure_rx(adapter); 387 /* call E1000_DESC_UNUSED which always leaves 388 * at least 1 descriptor unused to make sure 389 * next_to_use != next_to_clean */ 390 for (i = 0; i < adapter->num_rx_queues; i++) { 391 struct e1000_rx_ring *ring = &adapter->rx_ring[i]; 392 adapter->alloc_rx_buf(adapter, ring, 393 E1000_DESC_UNUSED(ring)); 394 } 395} 396 397int e1000_up(struct e1000_adapter *adapter) 398{ 399 struct e1000_hw *hw = &adapter->hw; 400 401 /* hardware has been reset, we need to reload some things */ 402 e1000_configure(adapter); 403 404 clear_bit(__E1000_DOWN, &adapter->flags); 405 406 napi_enable(&adapter->napi); 407 408 e1000_irq_enable(adapter); 409 410 netif_wake_queue(adapter->netdev); 411 412 /* fire a link change interrupt to start the watchdog */ 413 ew32(ICS, E1000_ICS_LSC); 414 return 0; 415} 416 417/** 418 * e1000_power_up_phy - restore link in case the phy was powered down 419 * @adapter: address of board private structure 420 * 421 * The phy may be powered down to save power and turn off link when the 422 * driver is unloaded and wake on lan is not enabled (among others) 423 * *** this routine MUST be followed by a call to e1000_reset *** 424 * 425 **/ 426 427void e1000_power_up_phy(struct e1000_adapter *adapter) 428{ 429 struct e1000_hw *hw = &adapter->hw; 430 u16 mii_reg = 0; 431 432 /* Just clear the power down bit to wake the phy back up */ 433 if (hw->media_type == e1000_media_type_copper) { 434 /* according to the manual, the phy will retain its 435 * settings across a power-down/up cycle */ 436 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); 437 mii_reg &= ~MII_CR_POWER_DOWN; 438 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); 439 } 440} 441 442static void e1000_power_down_phy(struct e1000_adapter *adapter) 443{ 444 struct e1000_hw *hw = &adapter->hw; 445 446 /* Power down the PHY so no link is implied when interface is down * 447 * The PHY cannot be powered down if any of the following is true * 448 * (a) WoL is enabled 449 * (b) AMT is active 450 * (c) SoL/IDER session is active */ 451 if (!adapter->wol && hw->mac_type >= e1000_82540 && 452 hw->media_type == e1000_media_type_copper) { 453 u16 mii_reg = 0; 454 455 switch (hw->mac_type) { 456 case e1000_82540: 457 case e1000_82545: 458 case e1000_82545_rev_3: 459 case e1000_82546: 460 case e1000_82546_rev_3: 461 case e1000_82541: 462 case e1000_82541_rev_2: 463 case e1000_82547: 464 case e1000_82547_rev_2: 465 if (er32(MANC) & E1000_MANC_SMBUS_EN) 466 goto out; 467 break; 468 default: 469 goto out; 470 } 471 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); 472 mii_reg |= MII_CR_POWER_DOWN; 473 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); 474 mdelay(1); 475 } 476out: 477 return; 478} 479 480void e1000_down(struct e1000_adapter *adapter) 481{ 482 struct e1000_hw *hw = &adapter->hw; 483 struct net_device *netdev = adapter->netdev; 484 u32 rctl, tctl; 485 486 /* signal that we're down so the interrupt handler does not 487 * reschedule our watchdog timer */ 488 set_bit(__E1000_DOWN, &adapter->flags); 489 490 /* disable receives in the hardware */ 491 rctl = er32(RCTL); 492 ew32(RCTL, rctl & ~E1000_RCTL_EN); 493 /* flush and sleep below */ 494 495 netif_tx_disable(netdev); 496 497 /* disable transmits in the hardware */ 498 tctl = er32(TCTL); 499 tctl &= ~E1000_TCTL_EN; 500 ew32(TCTL, tctl); 501 /* flush both disables and wait for them to finish */ 502 E1000_WRITE_FLUSH(); 503 msleep(10); 504 505 napi_disable(&adapter->napi); 506 507 e1000_irq_disable(adapter); 508 509 del_timer_sync(&adapter->tx_fifo_stall_timer); 510 del_timer_sync(&adapter->watchdog_timer); 511 del_timer_sync(&adapter->phy_info_timer); 512 513 adapter->link_speed = 0; 514 adapter->link_duplex = 0; 515 netif_carrier_off(netdev); 516 517 e1000_reset(adapter); 518 e1000_clean_all_tx_rings(adapter); 519 e1000_clean_all_rx_rings(adapter); 520} 521 522void e1000_reinit_locked(struct e1000_adapter *adapter) 523{ 524 WARN_ON(in_interrupt()); 525 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) 526 msleep(1); 527 e1000_down(adapter); 528 e1000_up(adapter); 529 clear_bit(__E1000_RESETTING, &adapter->flags); 530} 531 532void e1000_reset(struct e1000_adapter *adapter) 533{ 534 struct e1000_hw *hw = &adapter->hw; 535 u32 pba = 0, tx_space, min_tx_space, min_rx_space; 536 bool legacy_pba_adjust = false; 537 u16 hwm; 538 539 /* Repartition Pba for greater than 9k mtu 540 * To take effect CTRL.RST is required. 541 */ 542 543 switch (hw->mac_type) { 544 case e1000_82542_rev2_0: 545 case e1000_82542_rev2_1: 546 case e1000_82543: 547 case e1000_82544: 548 case e1000_82540: 549 case e1000_82541: 550 case e1000_82541_rev_2: 551 legacy_pba_adjust = true; 552 pba = E1000_PBA_48K; 553 break; 554 case e1000_82545: 555 case e1000_82545_rev_3: 556 case e1000_82546: 557 case e1000_82546_rev_3: 558 pba = E1000_PBA_48K; 559 break; 560 case e1000_82547: 561 case e1000_82547_rev_2: 562 legacy_pba_adjust = true; 563 pba = E1000_PBA_30K; 564 break; 565 case e1000_undefined: 566 case e1000_num_macs: 567 break; 568 } 569 570 if (legacy_pba_adjust) { 571 if (hw->max_frame_size > E1000_RXBUFFER_8192) 572 pba -= 8; /* allocate more FIFO for Tx */ 573 574 if (hw->mac_type == e1000_82547) { 575 adapter->tx_fifo_head = 0; 576 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; 577 adapter->tx_fifo_size = 578 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT; 579 atomic_set(&adapter->tx_fifo_stall, 0); 580 } 581 } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { 582 /* adjust PBA for jumbo frames */ 583 ew32(PBA, pba); 584 585 /* To maintain wire speed transmits, the Tx FIFO should be 586 * large enough to accommodate two full transmit packets, 587 * rounded up to the next 1KB and expressed in KB. Likewise, 588 * the Rx FIFO should be large enough to accommodate at least 589 * one full receive packet and is similarly rounded up and 590 * expressed in KB. */ 591 pba = er32(PBA); 592 /* upper 16 bits has Tx packet buffer allocation size in KB */ 593 tx_space = pba >> 16; 594 /* lower 16 bits has Rx packet buffer allocation size in KB */ 595 pba &= 0xffff; 596 /* 597 * the tx fifo also stores 16 bytes of information about the tx 598 * but don't include ethernet FCS because hardware appends it 599 */ 600 min_tx_space = (hw->max_frame_size + 601 sizeof(struct e1000_tx_desc) - 602 ETH_FCS_LEN) * 2; 603 min_tx_space = ALIGN(min_tx_space, 1024); 604 min_tx_space >>= 10; 605 /* software strips receive CRC, so leave room for it */ 606 min_rx_space = hw->max_frame_size; 607 min_rx_space = ALIGN(min_rx_space, 1024); 608 min_rx_space >>= 10; 609 610 /* If current Tx allocation is less than the min Tx FIFO size, 611 * and the min Tx FIFO size is less than the current Rx FIFO 612 * allocation, take space away from current Rx allocation */ 613 if (tx_space < min_tx_space && 614 ((min_tx_space - tx_space) < pba)) { 615 pba = pba - (min_tx_space - tx_space); 616 617 /* PCI/PCIx hardware has PBA alignment constraints */ 618 switch (hw->mac_type) { 619 case e1000_82545 ... e1000_82546_rev_3: 620 pba &= ~(E1000_PBA_8K - 1); 621 break; 622 default: 623 break; 624 } 625 626 /* if short on rx space, rx wins and must trump tx 627 * adjustment or use Early Receive if available */ 628 if (pba < min_rx_space) 629 pba = min_rx_space; 630 } 631 } 632 633 ew32(PBA, pba); 634 635 /* 636 * flow control settings: 637 * The high water mark must be low enough to fit one full frame 638 * (or the size used for early receive) above it in the Rx FIFO. 639 * Set it to the lower of: 640 * - 90% of the Rx FIFO size, and 641 * - the full Rx FIFO size minus the early receive size (for parts 642 * with ERT support assuming ERT set to E1000_ERT_2048), or 643 * - the full Rx FIFO size minus one full frame 644 */ 645 hwm = min(((pba << 10) * 9 / 10), 646 ((pba << 10) - hw->max_frame_size)); 647 648 hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */ 649 hw->fc_low_water = hw->fc_high_water - 8; 650 hw->fc_pause_time = E1000_FC_PAUSE_TIME; 651 hw->fc_send_xon = 1; 652 hw->fc = hw->original_fc; 653 654 /* Allow time for pending master requests to run */ 655 e1000_reset_hw(hw); 656 if (hw->mac_type >= e1000_82544) 657 ew32(WUC, 0); 658 659 if (e1000_init_hw(hw)) 660 e_err("Hardware Error\n"); 661 e1000_update_mng_vlan(adapter); 662 663 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ 664 if (hw->mac_type >= e1000_82544 && 665 hw->autoneg == 1 && 666 hw->autoneg_advertised == ADVERTISE_1000_FULL) { 667 u32 ctrl = er32(CTRL); 668 /* clear phy power management bit if we are in gig only mode, 669 * which if enabled will attempt negotiation to 100Mb, which 670 * can cause a loss of link at power off or driver unload */ 671 ctrl &= ~E1000_CTRL_SWDPIN3; 672 ew32(CTRL, ctrl); 673 } 674 675 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ 676 ew32(VET, ETHERNET_IEEE_VLAN_TYPE); 677 678 e1000_reset_adaptive(hw); 679 e1000_phy_get_info(hw, &adapter->phy_info); 680 681 e1000_release_manageability(adapter); 682} 683 684/** 685 * Dump the eeprom for users having checksum issues 686 **/ 687static void e1000_dump_eeprom(struct e1000_adapter *adapter) 688{ 689 struct net_device *netdev = adapter->netdev; 690 struct ethtool_eeprom eeprom; 691 const struct ethtool_ops *ops = netdev->ethtool_ops; 692 u8 *data; 693 int i; 694 u16 csum_old, csum_new = 0; 695 696 eeprom.len = ops->get_eeprom_len(netdev); 697 eeprom.offset = 0; 698 699 data = kmalloc(eeprom.len, GFP_KERNEL); 700 if (!data) { 701 pr_err("Unable to allocate memory to dump EEPROM data\n"); 702 return; 703 } 704 705 ops->get_eeprom(netdev, &eeprom, data); 706 707 csum_old = (data[EEPROM_CHECKSUM_REG * 2]) + 708 (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8); 709 for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2) 710 csum_new += data[i] + (data[i + 1] << 8); 711 csum_new = EEPROM_SUM - csum_new; 712 713 pr_err("/*********************/\n"); 714 pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old); 715 pr_err("Calculated : 0x%04x\n", csum_new); 716 717 pr_err("Offset Values\n"); 718 pr_err("======== ======\n"); 719 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0); 720 721 pr_err("Include this output when contacting your support provider.\n"); 722 pr_err("This is not a software error! Something bad happened to\n"); 723 pr_err("your hardware or EEPROM image. Ignoring this problem could\n"); 724 pr_err("result in further problems, possibly loss of data,\n"); 725 pr_err("corruption or system hangs!\n"); 726 pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n"); 727 pr_err("which is invalid and requires you to set the proper MAC\n"); 728 pr_err("address manually before continuing to enable this network\n"); 729 pr_err("device. Please inspect the EEPROM dump and report the\n"); 730 pr_err("issue to your hardware vendor or Intel Customer Support.\n"); 731 pr_err("/*********************/\n"); 732 733 kfree(data); 734} 735 736/** 737 * e1000_is_need_ioport - determine if an adapter needs ioport resources or not 738 * @pdev: PCI device information struct 739 * 740 * Return true if an adapter needs ioport resources 741 **/ 742static int e1000_is_need_ioport(struct pci_dev *pdev) 743{ 744 switch (pdev->device) { 745 case E1000_DEV_ID_82540EM: 746 case E1000_DEV_ID_82540EM_LOM: 747 case E1000_DEV_ID_82540EP: 748 case E1000_DEV_ID_82540EP_LOM: 749 case E1000_DEV_ID_82540EP_LP: 750 case E1000_DEV_ID_82541EI: 751 case E1000_DEV_ID_82541EI_MOBILE: 752 case E1000_DEV_ID_82541ER: 753 case E1000_DEV_ID_82541ER_LOM: 754 case E1000_DEV_ID_82541GI: 755 case E1000_DEV_ID_82541GI_LF: 756 case E1000_DEV_ID_82541GI_MOBILE: 757 case E1000_DEV_ID_82544EI_COPPER: 758 case E1000_DEV_ID_82544EI_FIBER: 759 case E1000_DEV_ID_82544GC_COPPER: 760 case E1000_DEV_ID_82544GC_LOM: 761 case E1000_DEV_ID_82545EM_COPPER: 762 case E1000_DEV_ID_82545EM_FIBER: 763 case E1000_DEV_ID_82546EB_COPPER: 764 case E1000_DEV_ID_82546EB_FIBER: 765 case E1000_DEV_ID_82546EB_QUAD_COPPER: 766 return true; 767 default: 768 return false; 769 } 770} 771 772static const struct net_device_ops e1000_netdev_ops = { 773 .ndo_open = e1000_open, 774 .ndo_stop = e1000_close, 775 .ndo_start_xmit = e1000_xmit_frame, 776 .ndo_get_stats = e1000_get_stats, 777 .ndo_set_rx_mode = e1000_set_rx_mode, 778 .ndo_set_mac_address = e1000_set_mac, 779 .ndo_tx_timeout = e1000_tx_timeout, 780 .ndo_change_mtu = e1000_change_mtu, 781 .ndo_do_ioctl = e1000_ioctl, 782 .ndo_validate_addr = eth_validate_addr, 783 784 .ndo_vlan_rx_register = e1000_vlan_rx_register, 785 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, 786 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid, 787#ifdef CONFIG_NET_POLL_CONTROLLER 788 .ndo_poll_controller = e1000_netpoll, 789#endif 790}; 791 792/** 793 * e1000_probe - Device Initialization Routine 794 * @pdev: PCI device information struct 795 * @ent: entry in e1000_pci_tbl 796 * 797 * Returns 0 on success, negative on failure 798 * 799 * e1000_probe initializes an adapter identified by a pci_dev structure. 800 * The OS initialization, configuring of the adapter private structure, 801 * and a hardware reset occur. 802 **/ 803static int __devinit e1000_probe(struct pci_dev *pdev, 804 const struct pci_device_id *ent) 805{ 806 struct net_device *netdev; 807 struct e1000_adapter *adapter; 808 struct e1000_hw *hw; 809 810 static int cards_found = 0; 811 static int global_quad_port_a = 0; /* global ksp3 port a indication */ 812 int i, err, pci_using_dac; 813 u16 eeprom_data = 0; 814 u16 eeprom_apme_mask = E1000_EEPROM_APME; 815 int bars, need_ioport; 816 817 /* do not allocate ioport bars when not needed */ 818 need_ioport = e1000_is_need_ioport(pdev); 819 if (need_ioport) { 820 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); 821 err = pci_enable_device(pdev); 822 } else { 823 bars = pci_select_bars(pdev, IORESOURCE_MEM); 824 err = pci_enable_device_mem(pdev); 825 } 826 if (err) 827 return err; 828 829 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && 830 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { 831 pci_using_dac = 1; 832 } else { 833 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 834 if (err) { 835 err = dma_set_coherent_mask(&pdev->dev, 836 DMA_BIT_MASK(32)); 837 if (err) { 838 pr_err("No usable DMA config, aborting\n"); 839 goto err_dma; 840 } 841 } 842 pci_using_dac = 0; 843 } 844 845 err = pci_request_selected_regions(pdev, bars, e1000_driver_name); 846 if (err) 847 goto err_pci_reg; 848 849 pci_set_master(pdev); 850 err = pci_save_state(pdev); 851 if (err) 852 goto err_alloc_etherdev; 853 854 err = -ENOMEM; 855 netdev = alloc_etherdev(sizeof(struct e1000_adapter)); 856 if (!netdev) 857 goto err_alloc_etherdev; 858 859 SET_NETDEV_DEV(netdev, &pdev->dev); 860 861 pci_set_drvdata(pdev, netdev); 862 adapter = netdev_priv(netdev); 863 adapter->netdev = netdev; 864 adapter->pdev = pdev; 865 adapter->msg_enable = (1 << debug) - 1; 866 adapter->bars = bars; 867 adapter->need_ioport = need_ioport; 868 869 hw = &adapter->hw; 870 hw->back = adapter; 871 872 err = -EIO; 873 hw->hw_addr = pci_ioremap_bar(pdev, BAR_0); 874 if (!hw->hw_addr) 875 goto err_ioremap; 876 877 if (adapter->need_ioport) { 878 for (i = BAR_1; i <= BAR_5; i++) { 879 if (pci_resource_len(pdev, i) == 0) 880 continue; 881 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { 882 hw->io_base = pci_resource_start(pdev, i); 883 break; 884 } 885 } 886 } 887 888 netdev->netdev_ops = &e1000_netdev_ops; 889 e1000_set_ethtool_ops(netdev); 890 netdev->watchdog_timeo = 5 * HZ; 891 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); 892 893 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); 894 895 adapter->bd_number = cards_found; 896 897 /* setup the private structure */ 898 899 err = e1000_sw_init(adapter); 900 if (err) 901 goto err_sw_init; 902 903 err = -EIO; 904 905 if (hw->mac_type >= e1000_82543) { 906 netdev->features = NETIF_F_SG | 907 NETIF_F_HW_CSUM | 908 NETIF_F_HW_VLAN_TX | 909 NETIF_F_HW_VLAN_RX | 910 NETIF_F_HW_VLAN_FILTER; 911 } 912 913 if ((hw->mac_type >= e1000_82544) && 914 (hw->mac_type != e1000_82547)) 915 netdev->features |= NETIF_F_TSO; 916 917 if (pci_using_dac) 918 netdev->features |= NETIF_F_HIGHDMA; 919 920 netdev->vlan_features |= NETIF_F_TSO; 921 netdev->vlan_features |= NETIF_F_HW_CSUM; 922 netdev->vlan_features |= NETIF_F_SG; 923 924 adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw); 925 926 /* initialize eeprom parameters */ 927 if (e1000_init_eeprom_params(hw)) { 928 e_err("EEPROM initialization failed\n"); 929 goto err_eeprom; 930 } 931 932 /* before reading the EEPROM, reset the controller to 933 * put the device in a known good starting state */ 934 935 e1000_reset_hw(hw); 936 937 /* make sure the EEPROM is good */ 938 if (e1000_validate_eeprom_checksum(hw) < 0) { 939 e_err("The EEPROM Checksum Is Not Valid\n"); 940 e1000_dump_eeprom(adapter); 941 /* 942 * set MAC address to all zeroes to invalidate and temporary 943 * disable this device for the user. This blocks regular 944 * traffic while still permitting ethtool ioctls from reaching 945 * the hardware as well as allowing the user to run the 946 * interface after manually setting a hw addr using 947 * `ip set address` 948 */ 949 memset(hw->mac_addr, 0, netdev->addr_len); 950 } else { 951 /* copy the MAC address out of the EEPROM */ 952 if (e1000_read_mac_addr(hw)) 953 e_err("EEPROM Read Error\n"); 954 } 955 /* don't block initalization here due to bad MAC address */ 956 memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len); 957 memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len); 958 959 if (!is_valid_ether_addr(netdev->perm_addr)) 960 e_err("Invalid MAC Address\n"); 961 962 e1000_get_bus_info(hw); 963 964 init_timer(&adapter->tx_fifo_stall_timer); 965 adapter->tx_fifo_stall_timer.function = &e1000_82547_tx_fifo_stall; 966 adapter->tx_fifo_stall_timer.data = (unsigned long)adapter; 967 968 init_timer(&adapter->watchdog_timer); 969 adapter->watchdog_timer.function = &e1000_watchdog; 970 adapter->watchdog_timer.data = (unsigned long) adapter; 971 972 init_timer(&adapter->phy_info_timer); 973 adapter->phy_info_timer.function = &e1000_update_phy_info; 974 adapter->phy_info_timer.data = (unsigned long)adapter; 975 976 INIT_WORK(&adapter->reset_task, e1000_reset_task); 977 978 e1000_check_options(adapter); 979 980 /* Initial Wake on LAN setting 981 * If APM wake is enabled in the EEPROM, 982 * enable the ACPI Magic Packet filter 983 */ 984 985 switch (hw->mac_type) { 986 case e1000_82542_rev2_0: 987 case e1000_82542_rev2_1: 988 case e1000_82543: 989 break; 990 case e1000_82544: 991 e1000_read_eeprom(hw, 992 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data); 993 eeprom_apme_mask = E1000_EEPROM_82544_APM; 994 break; 995 case e1000_82546: 996 case e1000_82546_rev_3: 997 if (er32(STATUS) & E1000_STATUS_FUNC_1){ 998 e1000_read_eeprom(hw, 999 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 1000 break; 1001 } 1002 /* Fall Through */ 1003 default: 1004 e1000_read_eeprom(hw, 1005 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 1006 break; 1007 } 1008 if (eeprom_data & eeprom_apme_mask) 1009 adapter->eeprom_wol |= E1000_WUFC_MAG; 1010 1011 /* now that we have the eeprom settings, apply the special cases 1012 * where the eeprom may be wrong or the board simply won't support 1013 * wake on lan on a particular port */ 1014 switch (pdev->device) { 1015 case E1000_DEV_ID_82546GB_PCIE: 1016 adapter->eeprom_wol = 0; 1017 break; 1018 case E1000_DEV_ID_82546EB_FIBER: 1019 case E1000_DEV_ID_82546GB_FIBER: 1020 /* Wake events only supported on port A for dual fiber 1021 * regardless of eeprom setting */ 1022 if (er32(STATUS) & E1000_STATUS_FUNC_1) 1023 adapter->eeprom_wol = 0; 1024 break; 1025 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: 1026 /* if quad port adapter, disable WoL on all but port A */ 1027 if (global_quad_port_a != 0) 1028 adapter->eeprom_wol = 0; 1029 else 1030 adapter->quad_port_a = 1; 1031 /* Reset for multiple quad port adapters */ 1032 if (++global_quad_port_a == 4) 1033 global_quad_port_a = 0; 1034 break; 1035 } 1036 1037 /* initialize the wol settings based on the eeprom settings */ 1038 adapter->wol = adapter->eeprom_wol; 1039 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 1040 1041 /* reset the hardware with the new settings */ 1042 e1000_reset(adapter); 1043 1044 strcpy(netdev->name, "eth%d"); 1045 err = register_netdev(netdev); 1046 if (err) 1047 goto err_register; 1048 1049 /* print bus type/speed/width info */ 1050 e_info("(PCI%s:%s:%s) ", 1051 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""), 1052 ((hw->bus_speed == e1000_bus_speed_133) ? "133MHz" : 1053 (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" : 1054 (hw->bus_speed == e1000_bus_speed_100) ? "100MHz" : 1055 (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"), 1056 ((hw->bus_width == e1000_bus_width_64) ? "64-bit" : "32-bit")); 1057 1058 e_info("%pM\n", netdev->dev_addr); 1059 1060 /* carrier off reporting is important to ethtool even BEFORE open */ 1061 netif_carrier_off(netdev); 1062 1063 e_info("Intel(R) PRO/1000 Network Connection\n"); 1064 1065 cards_found++; 1066 return 0; 1067 1068err_register: 1069err_eeprom: 1070 e1000_phy_hw_reset(hw); 1071 1072 if (hw->flash_address) 1073 iounmap(hw->flash_address); 1074 kfree(adapter->tx_ring); 1075 kfree(adapter->rx_ring); 1076err_sw_init: 1077 iounmap(hw->hw_addr); 1078err_ioremap: 1079 free_netdev(netdev); 1080err_alloc_etherdev: 1081 pci_release_selected_regions(pdev, bars); 1082err_pci_reg: 1083err_dma: 1084 pci_disable_device(pdev); 1085 return err; 1086} 1087 1088/** 1089 * e1000_remove - Device Removal Routine 1090 * @pdev: PCI device information struct 1091 * 1092 * e1000_remove is called by the PCI subsystem to alert the driver 1093 * that it should release a PCI device. The could be caused by a 1094 * Hot-Plug event, or because the driver is going to be removed from 1095 * memory. 1096 **/ 1097 1098static void __devexit e1000_remove(struct pci_dev *pdev) 1099{ 1100 struct net_device *netdev = pci_get_drvdata(pdev); 1101 struct e1000_adapter *adapter = netdev_priv(netdev); 1102 struct e1000_hw *hw = &adapter->hw; 1103 1104 set_bit(__E1000_DOWN, &adapter->flags); 1105 del_timer_sync(&adapter->tx_fifo_stall_timer); 1106 del_timer_sync(&adapter->watchdog_timer); 1107 del_timer_sync(&adapter->phy_info_timer); 1108 1109 cancel_work_sync(&adapter->reset_task); 1110 1111 e1000_release_manageability(adapter); 1112 1113 unregister_netdev(netdev); 1114 1115 e1000_phy_hw_reset(hw); 1116 1117 kfree(adapter->tx_ring); 1118 kfree(adapter->rx_ring); 1119 1120 iounmap(hw->hw_addr); 1121 if (hw->flash_address) 1122 iounmap(hw->flash_address); 1123 pci_release_selected_regions(pdev, adapter->bars); 1124 1125 free_netdev(netdev); 1126 1127 pci_disable_device(pdev); 1128} 1129 1130/** 1131 * e1000_sw_init - Initialize general software structures (struct e1000_adapter) 1132 * @adapter: board private structure to initialize 1133 * 1134 * e1000_sw_init initializes the Adapter private data structure. 1135 * Fields are initialized based on PCI device information and 1136 * OS network device settings (MTU size). 1137 **/ 1138 1139static int __devinit e1000_sw_init(struct e1000_adapter *adapter) 1140{ 1141 struct e1000_hw *hw = &adapter->hw; 1142 struct net_device *netdev = adapter->netdev; 1143 struct pci_dev *pdev = adapter->pdev; 1144 1145 /* PCI config space info */ 1146 1147 hw->vendor_id = pdev->vendor; 1148 hw->device_id = pdev->device; 1149 hw->subsystem_vendor_id = pdev->subsystem_vendor; 1150 hw->subsystem_id = pdev->subsystem_device; 1151 hw->revision_id = pdev->revision; 1152 1153 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); 1154 1155 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1156 hw->max_frame_size = netdev->mtu + 1157 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 1158 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; 1159 1160 /* identify the MAC */ 1161 1162 if (e1000_set_mac_type(hw)) { 1163 e_err("Unknown MAC Type\n"); 1164 return -EIO; 1165 } 1166 1167 switch (hw->mac_type) { 1168 default: 1169 break; 1170 case e1000_82541: 1171 case e1000_82547: 1172 case e1000_82541_rev_2: 1173 case e1000_82547_rev_2: 1174 hw->phy_init_script = 1; 1175 break; 1176 } 1177 1178 e1000_set_media_type(hw); 1179 1180 hw->wait_autoneg_complete = false; 1181 hw->tbi_compatibility_en = true; 1182 hw->adaptive_ifs = true; 1183 1184 /* Copper options */ 1185 1186 if (hw->media_type == e1000_media_type_copper) { 1187 hw->mdix = AUTO_ALL_MODES; 1188 hw->disable_polarity_correction = false; 1189 hw->master_slave = E1000_MASTER_SLAVE; 1190 } 1191 1192 adapter->num_tx_queues = 1; 1193 adapter->num_rx_queues = 1; 1194 1195 if (e1000_alloc_queues(adapter)) { 1196 e_err("Unable to allocate memory for queues\n"); 1197 return -ENOMEM; 1198 } 1199 1200 /* Explicitly disable IRQ since the NIC can be in any state. */ 1201 e1000_irq_disable(adapter); 1202 1203 spin_lock_init(&adapter->stats_lock); 1204 1205 set_bit(__E1000_DOWN, &adapter->flags); 1206 1207 return 0; 1208} 1209 1210/** 1211 * e1000_alloc_queues - Allocate memory for all rings 1212 * @adapter: board private structure to initialize 1213 * 1214 * We allocate one ring per queue at run-time since we don't know the 1215 * number of queues at compile-time. 1216 **/ 1217 1218static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter) 1219{ 1220 adapter->tx_ring = kcalloc(adapter->num_tx_queues, 1221 sizeof(struct e1000_tx_ring), GFP_KERNEL); 1222 if (!adapter->tx_ring) 1223 return -ENOMEM; 1224 1225 adapter->rx_ring = kcalloc(adapter->num_rx_queues, 1226 sizeof(struct e1000_rx_ring), GFP_KERNEL); 1227 if (!adapter->rx_ring) { 1228 kfree(adapter->tx_ring); 1229 return -ENOMEM; 1230 } 1231 1232 return E1000_SUCCESS; 1233} 1234 1235/** 1236 * e1000_open - Called when a network interface is made active 1237 * @netdev: network interface device structure 1238 * 1239 * Returns 0 on success, negative value on failure 1240 * 1241 * The open entry point is called when a network interface is made 1242 * active by the system (IFF_UP). At this point all resources needed 1243 * for transmit and receive operations are allocated, the interrupt 1244 * handler is registered with the OS, the watchdog timer is started, 1245 * and the stack is notified that the interface is ready. 1246 **/ 1247 1248static int e1000_open(struct net_device *netdev) 1249{ 1250 struct e1000_adapter *adapter = netdev_priv(netdev); 1251 struct e1000_hw *hw = &adapter->hw; 1252 int err; 1253 1254 /* disallow open during test */ 1255 if (test_bit(__E1000_TESTING, &adapter->flags)) 1256 return -EBUSY; 1257 1258 netif_carrier_off(netdev); 1259 1260 /* allocate transmit descriptors */ 1261 err = e1000_setup_all_tx_resources(adapter); 1262 if (err) 1263 goto err_setup_tx; 1264 1265 /* allocate receive descriptors */ 1266 err = e1000_setup_all_rx_resources(adapter); 1267 if (err) 1268 goto err_setup_rx; 1269 1270 e1000_power_up_phy(adapter); 1271 1272 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 1273 if ((hw->mng_cookie.status & 1274 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { 1275 e1000_update_mng_vlan(adapter); 1276 } 1277 1278 /* before we allocate an interrupt, we must be ready to handle it. 1279 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 1280 * as soon as we call pci_request_irq, so we have to setup our 1281 * clean_rx handler before we do so. */ 1282 e1000_configure(adapter); 1283 1284 err = e1000_request_irq(adapter); 1285 if (err) 1286 goto err_req_irq; 1287 1288 /* From here on the code is the same as e1000_up() */ 1289 clear_bit(__E1000_DOWN, &adapter->flags); 1290 1291 napi_enable(&adapter->napi); 1292 1293 e1000_irq_enable(adapter); 1294 1295 netif_start_queue(netdev); 1296 1297 /* fire a link status change interrupt to start the watchdog */ 1298 ew32(ICS, E1000_ICS_LSC); 1299 1300 return E1000_SUCCESS; 1301 1302err_req_irq: 1303 e1000_power_down_phy(adapter); 1304 e1000_free_all_rx_resources(adapter); 1305err_setup_rx: 1306 e1000_free_all_tx_resources(adapter); 1307err_setup_tx: 1308 e1000_reset(adapter); 1309 1310 return err; 1311} 1312 1313/** 1314 * e1000_close - Disables a network interface 1315 * @netdev: network interface device structure 1316 * 1317 * Returns 0, this is not allowed to fail 1318 * 1319 * The close entry point is called when an interface is de-activated 1320 * by the OS. The hardware is still under the drivers control, but 1321 * needs to be disabled. A global MAC reset is issued to stop the 1322 * hardware, and all transmit and receive resources are freed. 1323 **/ 1324 1325static int e1000_close(struct net_device *netdev) 1326{ 1327 struct e1000_adapter *adapter = netdev_priv(netdev); 1328 struct e1000_hw *hw = &adapter->hw; 1329 1330 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); 1331 e1000_down(adapter); 1332 e1000_power_down_phy(adapter); 1333 e1000_free_irq(adapter); 1334 1335 e1000_free_all_tx_resources(adapter); 1336 e1000_free_all_rx_resources(adapter); 1337 1338 /* kill manageability vlan ID if supported, but not if a vlan with 1339 * the same ID is registered on the host OS (let 8021q kill it) */ 1340 if ((hw->mng_cookie.status & 1341 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 1342 !(adapter->vlgrp && 1343 vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) { 1344 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 1345 } 1346 1347 return 0; 1348} 1349 1350/** 1351 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary 1352 * @adapter: address of board private structure 1353 * @start: address of beginning of memory 1354 * @len: length of memory 1355 **/ 1356static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start, 1357 unsigned long len) 1358{ 1359 struct e1000_hw *hw = &adapter->hw; 1360 unsigned long begin = (unsigned long)start; 1361 unsigned long end = begin + len; 1362 1363 /* First rev 82545 and 82546 need to not allow any memory 1364 * write location to cross 64k boundary due to errata 23 */ 1365 if (hw->mac_type == e1000_82545 || 1366 hw->mac_type == e1000_82546) { 1367 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true; 1368 } 1369 1370 return true; 1371} 1372 1373/** 1374 * e1000_setup_tx_resources - allocate Tx resources (Descriptors) 1375 * @adapter: board private structure 1376 * @txdr: tx descriptor ring (for a specific queue) to setup 1377 * 1378 * Return 0 on success, negative on failure 1379 **/ 1380 1381static int e1000_setup_tx_resources(struct e1000_adapter *adapter, 1382 struct e1000_tx_ring *txdr) 1383{ 1384 struct pci_dev *pdev = adapter->pdev; 1385 int size; 1386 1387 size = sizeof(struct e1000_buffer) * txdr->count; 1388 txdr->buffer_info = vmalloc(size); 1389 if (!txdr->buffer_info) { 1390 e_err("Unable to allocate memory for the Tx descriptor ring\n"); 1391 return -ENOMEM; 1392 } 1393 memset(txdr->buffer_info, 0, size); 1394 1395 /* round up to nearest 4K */ 1396 1397 txdr->size = txdr->count * sizeof(struct e1000_tx_desc); 1398 txdr->size = ALIGN(txdr->size, 4096); 1399 1400 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, 1401 GFP_KERNEL); 1402 if (!txdr->desc) { 1403setup_tx_desc_die: 1404 vfree(txdr->buffer_info); 1405 e_err("Unable to allocate memory for the Tx descriptor ring\n"); 1406 return -ENOMEM; 1407 } 1408 1409 /* Fix for errata 23, can't cross 64kB boundary */ 1410 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { 1411 void *olddesc = txdr->desc; 1412 dma_addr_t olddma = txdr->dma; 1413 e_err("txdr align check failed: %u bytes at %p\n", 1414 txdr->size, txdr->desc); 1415 /* Try again, without freeing the previous */ 1416 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, 1417 &txdr->dma, GFP_KERNEL); 1418 /* Failed allocation, critical failure */ 1419 if (!txdr->desc) { 1420 dma_free_coherent(&pdev->dev, txdr->size, olddesc, 1421 olddma); 1422 goto setup_tx_desc_die; 1423 } 1424 1425 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { 1426 /* give up */ 1427 dma_free_coherent(&pdev->dev, txdr->size, txdr->desc, 1428 txdr->dma); 1429 dma_free_coherent(&pdev->dev, txdr->size, olddesc, 1430 olddma); 1431 e_err("Unable to allocate aligned memory " 1432 "for the transmit descriptor ring\n"); 1433 vfree(txdr->buffer_info); 1434 return -ENOMEM; 1435 } else { 1436 /* Free old allocation, new allocation was successful */ 1437 dma_free_coherent(&pdev->dev, txdr->size, olddesc, 1438 olddma); 1439 } 1440 } 1441 memset(txdr->desc, 0, txdr->size); 1442 1443 txdr->next_to_use = 0; 1444 txdr->next_to_clean = 0; 1445 1446 return 0; 1447} 1448 1449/** 1450 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources 1451 * (Descriptors) for all queues 1452 * @adapter: board private structure 1453 * 1454 * Return 0 on success, negative on failure 1455 **/ 1456 1457int e1000_setup_all_tx_resources(struct e1000_adapter *adapter) 1458{ 1459 int i, err = 0; 1460 1461 for (i = 0; i < adapter->num_tx_queues; i++) { 1462 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); 1463 if (err) { 1464 e_err("Allocation for Tx Queue %u failed\n", i); 1465 for (i-- ; i >= 0; i--) 1466 e1000_free_tx_resources(adapter, 1467 &adapter->tx_ring[i]); 1468 break; 1469 } 1470 } 1471 1472 return err; 1473} 1474 1475/** 1476 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset 1477 * @adapter: board private structure 1478 * 1479 * Configure the Tx unit of the MAC after a reset. 1480 **/ 1481 1482static void e1000_configure_tx(struct e1000_adapter *adapter) 1483{ 1484 u64 tdba; 1485 struct e1000_hw *hw = &adapter->hw; 1486 u32 tdlen, tctl, tipg; 1487 u32 ipgr1, ipgr2; 1488 1489 /* Setup the HW Tx Head and Tail descriptor pointers */ 1490 1491 switch (adapter->num_tx_queues) { 1492 case 1: 1493 default: 1494 tdba = adapter->tx_ring[0].dma; 1495 tdlen = adapter->tx_ring[0].count * 1496 sizeof(struct e1000_tx_desc); 1497 ew32(TDLEN, tdlen); 1498 ew32(TDBAH, (tdba >> 32)); 1499 ew32(TDBAL, (tdba & 0x00000000ffffffffULL)); 1500 ew32(TDT, 0); 1501 ew32(TDH, 0); 1502 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH); 1503 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT); 1504 break; 1505 } 1506 1507 /* Set the default values for the Tx Inter Packet Gap timer */ 1508 if ((hw->media_type == e1000_media_type_fiber || 1509 hw->media_type == e1000_media_type_internal_serdes)) 1510 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 1511 else 1512 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; 1513 1514 switch (hw->mac_type) { 1515 case e1000_82542_rev2_0: 1516 case e1000_82542_rev2_1: 1517 tipg = DEFAULT_82542_TIPG_IPGT; 1518 ipgr1 = DEFAULT_82542_TIPG_IPGR1; 1519 ipgr2 = DEFAULT_82542_TIPG_IPGR2; 1520 break; 1521 default: 1522 ipgr1 = DEFAULT_82543_TIPG_IPGR1; 1523 ipgr2 = DEFAULT_82543_TIPG_IPGR2; 1524 break; 1525 } 1526 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; 1527 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; 1528 ew32(TIPG, tipg); 1529 1530 /* Set the Tx Interrupt Delay register */ 1531 1532 ew32(TIDV, adapter->tx_int_delay); 1533 if (hw->mac_type >= e1000_82540) 1534 ew32(TADV, adapter->tx_abs_int_delay); 1535 1536 /* Program the Transmit Control Register */ 1537 1538 tctl = er32(TCTL); 1539 tctl &= ~E1000_TCTL_CT; 1540 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | 1541 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 1542 1543 e1000_config_collision_dist(hw); 1544 1545 /* Setup Transmit Descriptor Settings for eop descriptor */ 1546 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; 1547 1548 /* only set IDE if we are delaying interrupts using the timers */ 1549 if (adapter->tx_int_delay) 1550 adapter->txd_cmd |= E1000_TXD_CMD_IDE; 1551 1552 if (hw->mac_type < e1000_82543) 1553 adapter->txd_cmd |= E1000_TXD_CMD_RPS; 1554 else 1555 adapter->txd_cmd |= E1000_TXD_CMD_RS; 1556 1557 /* Cache if we're 82544 running in PCI-X because we'll 1558 * need this to apply a workaround later in the send path. */ 1559 if (hw->mac_type == e1000_82544 && 1560 hw->bus_type == e1000_bus_type_pcix) 1561 adapter->pcix_82544 = 1; 1562 1563 ew32(TCTL, tctl); 1564 1565} 1566 1567/** 1568 * e1000_setup_rx_resources - allocate Rx resources (Descriptors) 1569 * @adapter: board private structure 1570 * @rxdr: rx descriptor ring (for a specific queue) to setup 1571 * 1572 * Returns 0 on success, negative on failure 1573 **/ 1574 1575static int e1000_setup_rx_resources(struct e1000_adapter *adapter, 1576 struct e1000_rx_ring *rxdr) 1577{ 1578 struct pci_dev *pdev = adapter->pdev; 1579 int size, desc_len; 1580 1581 size = sizeof(struct e1000_buffer) * rxdr->count; 1582 rxdr->buffer_info = vmalloc(size); 1583 if (!rxdr->buffer_info) { 1584 e_err("Unable to allocate memory for the Rx descriptor ring\n"); 1585 return -ENOMEM; 1586 } 1587 memset(rxdr->buffer_info, 0, size); 1588 1589 desc_len = sizeof(struct e1000_rx_desc); 1590 1591 /* Round up to nearest 4K */ 1592 1593 rxdr->size = rxdr->count * desc_len; 1594 rxdr->size = ALIGN(rxdr->size, 4096); 1595 1596 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, 1597 GFP_KERNEL); 1598 1599 if (!rxdr->desc) { 1600 e_err("Unable to allocate memory for the Rx descriptor ring\n"); 1601setup_rx_desc_die: 1602 vfree(rxdr->buffer_info); 1603 return -ENOMEM; 1604 } 1605 1606 /* Fix for errata 23, can't cross 64kB boundary */ 1607 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { 1608 void *olddesc = rxdr->desc; 1609 dma_addr_t olddma = rxdr->dma; 1610 e_err("rxdr align check failed: %u bytes at %p\n", 1611 rxdr->size, rxdr->desc); 1612 /* Try again, without freeing the previous */ 1613 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, 1614 &rxdr->dma, GFP_KERNEL); 1615 /* Failed allocation, critical failure */ 1616 if (!rxdr->desc) { 1617 dma_free_coherent(&pdev->dev, rxdr->size, olddesc, 1618 olddma); 1619 e_err("Unable to allocate memory for the Rx descriptor " 1620 "ring\n"); 1621 goto setup_rx_desc_die; 1622 } 1623 1624 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { 1625 /* give up */ 1626 dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc, 1627 rxdr->dma); 1628 dma_free_coherent(&pdev->dev, rxdr->size, olddesc, 1629 olddma); 1630 e_err("Unable to allocate aligned memory for the Rx " 1631 "descriptor ring\n"); 1632 goto setup_rx_desc_die; 1633 } else { 1634 /* Free old allocation, new allocation was successful */ 1635 dma_free_coherent(&pdev->dev, rxdr->size, olddesc, 1636 olddma); 1637 } 1638 } 1639 memset(rxdr->desc, 0, rxdr->size); 1640 1641 rxdr->next_to_clean = 0; 1642 rxdr->next_to_use = 0; 1643 rxdr->rx_skb_top = NULL; 1644 1645 return 0; 1646} 1647 1648/** 1649 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources 1650 * (Descriptors) for all queues 1651 * @adapter: board private structure 1652 * 1653 * Return 0 on success, negative on failure 1654 **/ 1655 1656int e1000_setup_all_rx_resources(struct e1000_adapter *adapter) 1657{ 1658 int i, err = 0; 1659 1660 for (i = 0; i < adapter->num_rx_queues; i++) { 1661 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); 1662 if (err) { 1663 e_err("Allocation for Rx Queue %u failed\n", i); 1664 for (i-- ; i >= 0; i--) 1665 e1000_free_rx_resources(adapter, 1666 &adapter->rx_ring[i]); 1667 break; 1668 } 1669 } 1670 1671 return err; 1672} 1673 1674/** 1675 * e1000_setup_rctl - configure the receive control registers 1676 * @adapter: Board private structure 1677 **/ 1678static void e1000_setup_rctl(struct e1000_adapter *adapter) 1679{ 1680 struct e1000_hw *hw = &adapter->hw; 1681 u32 rctl; 1682 1683 rctl = er32(RCTL); 1684 1685 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 1686 1687 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | 1688 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | 1689 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT); 1690 1691 if (hw->tbi_compatibility_on == 1) 1692 rctl |= E1000_RCTL_SBP; 1693 else 1694 rctl &= ~E1000_RCTL_SBP; 1695 1696 if (adapter->netdev->mtu <= ETH_DATA_LEN) 1697 rctl &= ~E1000_RCTL_LPE; 1698 else 1699 rctl |= E1000_RCTL_LPE; 1700 1701 /* Setup buffer sizes */ 1702 rctl &= ~E1000_RCTL_SZ_4096; 1703 rctl |= E1000_RCTL_BSEX; 1704 switch (adapter->rx_buffer_len) { 1705 case E1000_RXBUFFER_2048: 1706 default: 1707 rctl |= E1000_RCTL_SZ_2048; 1708 rctl &= ~E1000_RCTL_BSEX; 1709 break; 1710 case E1000_RXBUFFER_4096: 1711 rctl |= E1000_RCTL_SZ_4096; 1712 break; 1713 case E1000_RXBUFFER_8192: 1714 rctl |= E1000_RCTL_SZ_8192; 1715 break; 1716 case E1000_RXBUFFER_16384: 1717 rctl |= E1000_RCTL_SZ_16384; 1718 break; 1719 } 1720 1721 ew32(RCTL, rctl); 1722} 1723 1724/** 1725 * e1000_configure_rx - Configure 8254x Receive Unit after Reset 1726 * @adapter: board private structure 1727 * 1728 * Configure the Rx unit of the MAC after a reset. 1729 **/ 1730 1731static void e1000_configure_rx(struct e1000_adapter *adapter) 1732{ 1733 u64 rdba; 1734 struct e1000_hw *hw = &adapter->hw; 1735 u32 rdlen, rctl, rxcsum; 1736 1737 if (adapter->netdev->mtu > ETH_DATA_LEN) { 1738 rdlen = adapter->rx_ring[0].count * 1739 sizeof(struct e1000_rx_desc); 1740 adapter->clean_rx = e1000_clean_jumbo_rx_irq; 1741 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; 1742 } else { 1743 rdlen = adapter->rx_ring[0].count * 1744 sizeof(struct e1000_rx_desc); 1745 adapter->clean_rx = e1000_clean_rx_irq; 1746 adapter->alloc_rx_buf = e1000_alloc_rx_buffers; 1747 } 1748 1749 /* disable receives while setting up the descriptors */ 1750 rctl = er32(RCTL); 1751 ew32(RCTL, rctl & ~E1000_RCTL_EN); 1752 1753 /* set the Receive Delay Timer Register */ 1754 ew32(RDTR, adapter->rx_int_delay); 1755 1756 if (hw->mac_type >= e1000_82540) { 1757 ew32(RADV, adapter->rx_abs_int_delay); 1758 if (adapter->itr_setting != 0) 1759 ew32(ITR, 1000000000 / (adapter->itr * 256)); 1760 } 1761 1762 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1763 * the Base and Length of the Rx Descriptor Ring */ 1764 switch (adapter->num_rx_queues) { 1765 case 1: 1766 default: 1767 rdba = adapter->rx_ring[0].dma; 1768 ew32(RDLEN, rdlen); 1769 ew32(RDBAH, (rdba >> 32)); 1770 ew32(RDBAL, (rdba & 0x00000000ffffffffULL)); 1771 ew32(RDT, 0); 1772 ew32(RDH, 0); 1773 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH); 1774 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT); 1775 break; 1776 } 1777 1778 /* Enable 82543 Receive Checksum Offload for TCP and UDP */ 1779 if (hw->mac_type >= e1000_82543) { 1780 rxcsum = er32(RXCSUM); 1781 if (adapter->rx_csum) 1782 rxcsum |= E1000_RXCSUM_TUOFL; 1783 else 1784 /* don't need to clear IPPCSE as it defaults to 0 */ 1785 rxcsum &= ~E1000_RXCSUM_TUOFL; 1786 ew32(RXCSUM, rxcsum); 1787 } 1788 1789 /* Enable Receives */ 1790 ew32(RCTL, rctl); 1791} 1792 1793/** 1794 * e1000_free_tx_resources - Free Tx Resources per Queue 1795 * @adapter: board private structure 1796 * @tx_ring: Tx descriptor ring for a specific queue 1797 * 1798 * Free all transmit software resources 1799 **/ 1800 1801static void e1000_free_tx_resources(struct e1000_adapter *adapter, 1802 struct e1000_tx_ring *tx_ring) 1803{ 1804 struct pci_dev *pdev = adapter->pdev; 1805 1806 e1000_clean_tx_ring(adapter, tx_ring); 1807 1808 vfree(tx_ring->buffer_info); 1809 tx_ring->buffer_info = NULL; 1810 1811 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, 1812 tx_ring->dma); 1813 1814 tx_ring->desc = NULL; 1815} 1816 1817/** 1818 * e1000_free_all_tx_resources - Free Tx Resources for All Queues 1819 * @adapter: board private structure 1820 * 1821 * Free all transmit software resources 1822 **/ 1823 1824void e1000_free_all_tx_resources(struct e1000_adapter *adapter) 1825{ 1826 int i; 1827 1828 for (i = 0; i < adapter->num_tx_queues; i++) 1829 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); 1830} 1831 1832static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, 1833 struct e1000_buffer *buffer_info) 1834{ 1835 if (buffer_info->dma) { 1836 if (buffer_info->mapped_as_page) 1837 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, 1838 buffer_info->length, DMA_TO_DEVICE); 1839 else 1840 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, 1841 buffer_info->length, 1842 DMA_TO_DEVICE); 1843 buffer_info->dma = 0; 1844 } 1845 if (buffer_info->skb) { 1846 dev_kfree_skb_any(buffer_info->skb); 1847 buffer_info->skb = NULL; 1848 } 1849 buffer_info->time_stamp = 0; 1850 /* buffer_info must be completely set up in the transmit path */ 1851} 1852 1853/** 1854 * e1000_clean_tx_ring - Free Tx Buffers 1855 * @adapter: board private structure 1856 * @tx_ring: ring to be cleaned 1857 **/ 1858 1859static void e1000_clean_tx_ring(struct e1000_adapter *adapter, 1860 struct e1000_tx_ring *tx_ring) 1861{ 1862 struct e1000_hw *hw = &adapter->hw; 1863 struct e1000_buffer *buffer_info; 1864 unsigned long size; 1865 unsigned int i; 1866 1867 /* Free all the Tx ring sk_buffs */ 1868 1869 for (i = 0; i < tx_ring->count; i++) { 1870 buffer_info = &tx_ring->buffer_info[i]; 1871 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 1872 } 1873 1874 size = sizeof(struct e1000_buffer) * tx_ring->count; 1875 memset(tx_ring->buffer_info, 0, size); 1876 1877 /* Zero out the descriptor ring */ 1878 1879 memset(tx_ring->desc, 0, tx_ring->size); 1880 1881 tx_ring->next_to_use = 0; 1882 tx_ring->next_to_clean = 0; 1883 tx_ring->last_tx_tso = 0; 1884 1885 writel(0, hw->hw_addr + tx_ring->tdh); 1886 writel(0, hw->hw_addr + tx_ring->tdt); 1887} 1888 1889/** 1890 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues 1891 * @adapter: board private structure 1892 **/ 1893 1894static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter) 1895{ 1896 int i; 1897 1898 for (i = 0; i < adapter->num_tx_queues; i++) 1899 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]); 1900} 1901 1902/** 1903 * e1000_free_rx_resources - Free Rx Resources 1904 * @adapter: board private structure 1905 * @rx_ring: ring to clean the resources from 1906 * 1907 * Free all receive software resources 1908 **/ 1909 1910static void e1000_free_rx_resources(struct e1000_adapter *adapter, 1911 struct e1000_rx_ring *rx_ring) 1912{ 1913 struct pci_dev *pdev = adapter->pdev; 1914 1915 e1000_clean_rx_ring(adapter, rx_ring); 1916 1917 vfree(rx_ring->buffer_info); 1918 rx_ring->buffer_info = NULL; 1919 1920 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, 1921 rx_ring->dma); 1922 1923 rx_ring->desc = NULL; 1924} 1925 1926/** 1927 * e1000_free_all_rx_resources - Free Rx Resources for All Queues 1928 * @adapter: board private structure 1929 * 1930 * Free all receive software resources 1931 **/ 1932 1933void e1000_free_all_rx_resources(struct e1000_adapter *adapter) 1934{ 1935 int i; 1936 1937 for (i = 0; i < adapter->num_rx_queues; i++) 1938 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]); 1939} 1940 1941/** 1942 * e1000_clean_rx_ring - Free Rx Buffers per Queue 1943 * @adapter: board private structure 1944 * @rx_ring: ring to free buffers from 1945 **/ 1946 1947static void e1000_clean_rx_ring(struct e1000_adapter *adapter, 1948 struct e1000_rx_ring *rx_ring) 1949{ 1950 struct e1000_hw *hw = &adapter->hw; 1951 struct e1000_buffer *buffer_info; 1952 struct pci_dev *pdev = adapter->pdev; 1953 unsigned long size; 1954 unsigned int i; 1955 1956 /* Free all the Rx ring sk_buffs */ 1957 for (i = 0; i < rx_ring->count; i++) { 1958 buffer_info = &rx_ring->buffer_info[i]; 1959 if (buffer_info->dma && 1960 adapter->clean_rx == e1000_clean_rx_irq) { 1961 dma_unmap_single(&pdev->dev, buffer_info->dma, 1962 buffer_info->length, 1963 DMA_FROM_DEVICE); 1964 } else if (buffer_info->dma && 1965 adapter->clean_rx == e1000_clean_jumbo_rx_irq) { 1966 dma_unmap_page(&pdev->dev, buffer_info->dma, 1967 buffer_info->length, 1968 DMA_FROM_DEVICE); 1969 } 1970 1971 buffer_info->dma = 0; 1972 if (buffer_info->page) { 1973 put_page(buffer_info->page); 1974 buffer_info->page = NULL; 1975 } 1976 if (buffer_info->skb) { 1977 dev_kfree_skb(buffer_info->skb); 1978 buffer_info->skb = NULL; 1979 } 1980 } 1981 1982 /* there also may be some cached data from a chained receive */ 1983 if (rx_ring->rx_skb_top) { 1984 dev_kfree_skb(rx_ring->rx_skb_top); 1985 rx_ring->rx_skb_top = NULL; 1986 } 1987 1988 size = sizeof(struct e1000_buffer) * rx_ring->count; 1989 memset(rx_ring->buffer_info, 0, size); 1990 1991 /* Zero out the descriptor ring */ 1992 memset(rx_ring->desc, 0, rx_ring->size); 1993 1994 rx_ring->next_to_clean = 0; 1995 rx_ring->next_to_use = 0; 1996 1997 writel(0, hw->hw_addr + rx_ring->rdh); 1998 writel(0, hw->hw_addr + rx_ring->rdt); 1999} 2000 2001/** 2002 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues 2003 * @adapter: board private structure 2004 **/ 2005 2006static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter) 2007{ 2008 int i; 2009 2010 for (i = 0; i < adapter->num_rx_queues; i++) 2011 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]); 2012} 2013 2014/* The 82542 2.0 (revision 2) needs to have the receive unit in reset 2015 * and memory write and invalidate disabled for certain operations 2016 */ 2017static void e1000_enter_82542_rst(struct e1000_adapter *adapter) 2018{ 2019 struct e1000_hw *hw = &adapter->hw; 2020 struct net_device *netdev = adapter->netdev; 2021 u32 rctl; 2022 2023 e1000_pci_clear_mwi(hw); 2024 2025 rctl = er32(RCTL); 2026 rctl |= E1000_RCTL_RST; 2027 ew32(RCTL, rctl); 2028 E1000_WRITE_FLUSH(); 2029 mdelay(5); 2030 2031 if (netif_running(netdev)) 2032 e1000_clean_all_rx_rings(adapter); 2033} 2034 2035static void e1000_leave_82542_rst(struct e1000_adapter *adapter) 2036{ 2037 struct e1000_hw *hw = &adapter->hw; 2038 struct net_device *netdev = adapter->netdev; 2039 u32 rctl; 2040 2041 rctl = er32(RCTL); 2042 rctl &= ~E1000_RCTL_RST; 2043 ew32(RCTL, rctl); 2044 E1000_WRITE_FLUSH(); 2045 mdelay(5); 2046 2047 if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) 2048 e1000_pci_set_mwi(hw); 2049 2050 if (netif_running(netdev)) { 2051 /* No need to loop, because 82542 supports only 1 queue */ 2052 struct e1000_rx_ring *ring = &adapter->rx_ring[0]; 2053 e1000_configure_rx(adapter); 2054 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring)); 2055 } 2056} 2057 2058/** 2059 * e1000_set_mac - Change the Ethernet Address of the NIC 2060 * @netdev: network interface device structure 2061 * @p: pointer to an address structure 2062 * 2063 * Returns 0 on success, negative on failure 2064 **/ 2065 2066static int e1000_set_mac(struct net_device *netdev, void *p) 2067{ 2068 struct e1000_adapter *adapter = netdev_priv(netdev); 2069 struct e1000_hw *hw = &adapter->hw; 2070 struct sockaddr *addr = p; 2071 2072 if (!is_valid_ether_addr(addr->sa_data)) 2073 return -EADDRNOTAVAIL; 2074 2075 /* 82542 2.0 needs to be in reset to write receive address registers */ 2076 2077 if (hw->mac_type == e1000_82542_rev2_0) 2078 e1000_enter_82542_rst(adapter); 2079 2080 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 2081 memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len); 2082 2083 e1000_rar_set(hw, hw->mac_addr, 0); 2084 2085 if (hw->mac_type == e1000_82542_rev2_0) 2086 e1000_leave_82542_rst(adapter); 2087 2088 return 0; 2089} 2090 2091/** 2092 * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set 2093 * @netdev: network interface device structure 2094 * 2095 * The set_rx_mode entry point is called whenever the unicast or multicast 2096 * address lists or the network interface flags are updated. This routine is 2097 * responsible for configuring the hardware for proper unicast, multicast, 2098 * promiscuous mode, and all-multi behavior. 2099 **/ 2100 2101static void e1000_set_rx_mode(struct net_device *netdev) 2102{ 2103 struct e1000_adapter *adapter = netdev_priv(netdev); 2104 struct e1000_hw *hw = &adapter->hw; 2105 struct netdev_hw_addr *ha; 2106 bool use_uc = false; 2107 u32 rctl; 2108 u32 hash_value; 2109 int i, rar_entries = E1000_RAR_ENTRIES; 2110 int mta_reg_count = E1000_NUM_MTA_REGISTERS; 2111 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC); 2112 2113 if (!mcarray) { 2114 e_err("memory allocation failed\n"); 2115 return; 2116 } 2117 2118 /* Check for Promiscuous and All Multicast modes */ 2119 2120 rctl = er32(RCTL); 2121 2122 if (netdev->flags & IFF_PROMISC) { 2123 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 2124 rctl &= ~E1000_RCTL_VFE; 2125 } else { 2126 if (netdev->flags & IFF_ALLMULTI) 2127 rctl |= E1000_RCTL_MPE; 2128 else 2129 rctl &= ~E1000_RCTL_MPE; 2130 /* Enable VLAN filter if there is a VLAN */ 2131 if (adapter->vlgrp) 2132 rctl |= E1000_RCTL_VFE; 2133 } 2134 2135 if (netdev_uc_count(netdev) > rar_entries - 1) { 2136 rctl |= E1000_RCTL_UPE; 2137 } else if (!(netdev->flags & IFF_PROMISC)) { 2138 rctl &= ~E1000_RCTL_UPE; 2139 use_uc = true; 2140 } 2141 2142 ew32(RCTL, rctl); 2143 2144 /* 82542 2.0 needs to be in reset to write receive address registers */ 2145 2146 if (hw->mac_type == e1000_82542_rev2_0) 2147 e1000_enter_82542_rst(adapter); 2148 2149 /* load the first 14 addresses into the exact filters 1-14. Unicast 2150 * addresses take precedence to avoid disabling unicast filtering 2151 * when possible. 2152 * 2153 * RAR 0 is used for the station MAC adddress 2154 * if there are not 14 addresses, go ahead and clear the filters 2155 */ 2156 i = 1; 2157 if (use_uc) 2158 netdev_for_each_uc_addr(ha, netdev) { 2159 if (i == rar_entries) 2160 break; 2161 e1000_rar_set(hw, ha->addr, i++); 2162 } 2163 2164 netdev_for_each_mc_addr(ha, netdev) { 2165 if (i == rar_entries) { 2166 /* load any remaining addresses into the hash table */ 2167 u32 hash_reg, hash_bit, mta; 2168 hash_value = e1000_hash_mc_addr(hw, ha->addr); 2169 hash_reg = (hash_value >> 5) & 0x7F; 2170 hash_bit = hash_value & 0x1F; 2171 mta = (1 << hash_bit); 2172 mcarray[hash_reg] |= mta; 2173 } else { 2174 e1000_rar_set(hw, ha->addr, i++); 2175 } 2176 } 2177 2178 for (; i < rar_entries; i++) { 2179 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0); 2180 E1000_WRITE_FLUSH(); 2181 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0); 2182 E1000_WRITE_FLUSH(); 2183 } 2184 2185 /* write the hash table completely, write from bottom to avoid 2186 * both stupid write combining chipsets, and flushing each write */ 2187 for (i = mta_reg_count - 1; i >= 0 ; i--) { 2188 /* 2189 * If we are on an 82544 has an errata where writing odd 2190 * offsets overwrites the previous even offset, but writing 2191 * backwards over the range solves the issue by always 2192 * writing the odd offset first 2193 */ 2194 E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]); 2195 } 2196 E1000_WRITE_FLUSH(); 2197 2198 if (hw->mac_type == e1000_82542_rev2_0) 2199 e1000_leave_82542_rst(adapter); 2200 2201 kfree(mcarray); 2202} 2203 2204/* Need to wait a few seconds after link up to get diagnostic information from 2205 * the phy */ 2206 2207static void e1000_update_phy_info(unsigned long data) 2208{ 2209 struct e1000_adapter *adapter = (struct e1000_adapter *)data; 2210 struct e1000_hw *hw = &adapter->hw; 2211 e1000_phy_get_info(hw, &adapter->phy_info); 2212} 2213 2214/** 2215 * e1000_82547_tx_fifo_stall - Timer Call-back 2216 * @data: pointer to adapter cast into an unsigned long 2217 **/ 2218 2219static void e1000_82547_tx_fifo_stall(unsigned long data) 2220{ 2221 struct e1000_adapter *adapter = (struct e1000_adapter *)data; 2222 struct e1000_hw *hw = &adapter->hw; 2223 struct net_device *netdev = adapter->netdev; 2224 u32 tctl; 2225 2226 if (atomic_read(&adapter->tx_fifo_stall)) { 2227 if ((er32(TDT) == er32(TDH)) && 2228 (er32(TDFT) == er32(TDFH)) && 2229 (er32(TDFTS) == er32(TDFHS))) { 2230 tctl = er32(TCTL); 2231 ew32(TCTL, tctl & ~E1000_TCTL_EN); 2232 ew32(TDFT, adapter->tx_head_addr); 2233 ew32(TDFH, adapter->tx_head_addr); 2234 ew32(TDFTS, adapter->tx_head_addr); 2235 ew32(TDFHS, adapter->tx_head_addr); 2236 ew32(TCTL, tctl); 2237 E1000_WRITE_FLUSH(); 2238 2239 adapter->tx_fifo_head = 0; 2240 atomic_set(&adapter->tx_fifo_stall, 0); 2241 netif_wake_queue(netdev); 2242 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) { 2243 mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1); 2244 } 2245 } 2246} 2247 2248bool e1000_has_link(struct e1000_adapter *adapter) 2249{ 2250 struct e1000_hw *hw = &adapter->hw; 2251 bool link_active = false; 2252 2253 /* get_link_status is set on LSC (link status) interrupt or 2254 * rx sequence error interrupt. get_link_status will stay 2255 * false until the e1000_check_for_link establishes link 2256 * for copper adapters ONLY 2257 */ 2258 switch (hw->media_type) { 2259 case e1000_media_type_copper: 2260 if (hw->get_link_status) { 2261 e1000_check_for_link(hw); 2262 link_active = !hw->get_link_status; 2263 } else { 2264 link_active = true; 2265 } 2266 break; 2267 case e1000_media_type_fiber: 2268 e1000_check_for_link(hw); 2269 link_active = !!(er32(STATUS) & E1000_STATUS_LU); 2270 break; 2271 case e1000_media_type_internal_serdes: 2272 e1000_check_for_link(hw); 2273 link_active = hw->serdes_has_link; 2274 break; 2275 default: 2276 break; 2277 } 2278 2279 return link_active; 2280} 2281 2282/** 2283 * e1000_watchdog - Timer Call-back 2284 * @data: pointer to adapter cast into an unsigned long 2285 **/ 2286static void e1000_watchdog(unsigned long data) 2287{ 2288 struct e1000_adapter *adapter = (struct e1000_adapter *)data; 2289 struct e1000_hw *hw = &adapter->hw; 2290 struct net_device *netdev = adapter->netdev; 2291 struct e1000_tx_ring *txdr = adapter->tx_ring; 2292 u32 link, tctl; 2293 2294 link = e1000_has_link(adapter); 2295 if ((netif_carrier_ok(netdev)) && link) 2296 goto link_up; 2297 2298 if (link) { 2299 if (!netif_carrier_ok(netdev)) { 2300 u32 ctrl; 2301 bool txb2b = true; 2302 /* update snapshot of PHY registers on LSC */ 2303 e1000_get_speed_and_duplex(hw, 2304 &adapter->link_speed, 2305 &adapter->link_duplex); 2306 2307 ctrl = er32(CTRL); 2308 pr_info("%s NIC Link is Up %d Mbps %s, " 2309 "Flow Control: %s\n", 2310 netdev->name, 2311 adapter->link_speed, 2312 adapter->link_duplex == FULL_DUPLEX ? 2313 "Full Duplex" : "Half Duplex", 2314 ((ctrl & E1000_CTRL_TFCE) && (ctrl & 2315 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl & 2316 E1000_CTRL_RFCE) ? "RX" : ((ctrl & 2317 E1000_CTRL_TFCE) ? "TX" : "None"))); 2318 2319 /* adjust timeout factor according to speed/duplex */ 2320 adapter->tx_timeout_factor = 1; 2321 switch (adapter->link_speed) { 2322 case SPEED_10: 2323 txb2b = false; 2324 adapter->tx_timeout_factor = 16; 2325 break; 2326 case SPEED_100: 2327 txb2b = false; 2328 /* maybe add some timeout factor ? */ 2329 break; 2330 } 2331 2332 /* enable transmits in the hardware */ 2333 tctl = er32(TCTL); 2334 tctl |= E1000_TCTL_EN; 2335 ew32(TCTL, tctl); 2336 2337 netif_carrier_on(netdev); 2338 if (!test_bit(__E1000_DOWN, &adapter->flags)) 2339 mod_timer(&adapter->phy_info_timer, 2340 round_jiffies(jiffies + 2 * HZ)); 2341 adapter->smartspeed = 0; 2342 } 2343 } else { 2344 if (netif_carrier_ok(netdev)) { 2345 adapter->link_speed = 0; 2346 adapter->link_duplex = 0; 2347 pr_info("%s NIC Link is Down\n", 2348 netdev->name); 2349 netif_carrier_off(netdev); 2350 2351 if (!test_bit(__E1000_DOWN, &adapter->flags)) 2352 mod_timer(&adapter->phy_info_timer, 2353 round_jiffies(jiffies + 2 * HZ)); 2354 } 2355 2356 e1000_smartspeed(adapter); 2357 } 2358 2359link_up: 2360 e1000_update_stats(adapter); 2361 2362 hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; 2363 adapter->tpt_old = adapter->stats.tpt; 2364 hw->collision_delta = adapter->stats.colc - adapter->colc_old; 2365 adapter->colc_old = adapter->stats.colc; 2366 2367 adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old; 2368 adapter->gorcl_old = adapter->stats.gorcl; 2369 adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old; 2370 adapter->gotcl_old = adapter->stats.gotcl; 2371 2372 e1000_update_adaptive(hw); 2373 2374 if (!netif_carrier_ok(netdev)) { 2375 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { 2376 /* We've lost link, so the controller stops DMA, 2377 * but we've got queued Tx work that's never going 2378 * to get done, so reset controller to flush Tx. 2379 * (Do the reset outside of interrupt context). */ 2380 adapter->tx_timeout_count++; 2381 schedule_work(&adapter->reset_task); 2382 /* return immediately since reset is imminent */ 2383 return; 2384 } 2385 } 2386 2387 /* Simple mode for Interrupt Throttle Rate (ITR) */ 2388 if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) { 2389 /* 2390 * Symmetric Tx/Rx gets a reduced ITR=2000; 2391 * Total asymmetrical Tx or Rx gets ITR=8000; 2392 * everyone else is between 2000-8000. 2393 */ 2394 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000; 2395 u32 dif = (adapter->gotcl > adapter->gorcl ? 2396 adapter->gotcl - adapter->gorcl : 2397 adapter->gorcl - adapter->gotcl) / 10000; 2398 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; 2399 2400 ew32(ITR, 1000000000 / (itr * 256)); 2401 } 2402 2403 /* Cause software interrupt to ensure rx ring is cleaned */ 2404 ew32(ICS, E1000_ICS_RXDMT0); 2405 2406 /* Force detection of hung controller every watchdog period */ 2407 adapter->detect_tx_hung = true; 2408 2409 /* Reset the timer */ 2410 if (!test_bit(__E1000_DOWN, &adapter->flags)) 2411 mod_timer(&adapter->watchdog_timer, 2412 round_jiffies(jiffies + 2 * HZ)); 2413} 2414 2415enum latency_range { 2416 lowest_latency = 0, 2417 low_latency = 1, 2418 bulk_latency = 2, 2419 latency_invalid = 255 2420}; 2421 2422/** 2423 * e1000_update_itr - update the dynamic ITR value based on statistics 2424 * @adapter: pointer to adapter 2425 * @itr_setting: current adapter->itr 2426 * @packets: the number of packets during this measurement interval 2427 * @bytes: the number of bytes during this measurement interval 2428 * 2429 * Stores a new ITR value based on packets and byte 2430 * counts during the last interrupt. The advantage of per interrupt 2431 * computation is faster updates and more accurate ITR for the current 2432 * traffic pattern. Constants in this function were computed 2433 * based on theoretical maximum wire speed and thresholds were set based 2434 * on testing data as well as attempting to minimize response time 2435 * while increasing bulk throughput. 2436 * this functionality is controlled by the InterruptThrottleRate module 2437 * parameter (see e1000_param.c) 2438 **/ 2439static unsigned int e1000_update_itr(struct e1000_adapter *adapter, 2440 u16 itr_setting, int packets, int bytes) 2441{ 2442 unsigned int retval = itr_setting; 2443 struct e1000_hw *hw = &adapter->hw; 2444 2445 if (unlikely(hw->mac_type < e1000_82540)) 2446 goto update_itr_done; 2447 2448 if (packets == 0) 2449 goto update_itr_done; 2450 2451 switch (itr_setting) { 2452 case lowest_latency: 2453 /* jumbo frames get bulk treatment*/ 2454 if (bytes/packets > 8000) 2455 retval = bulk_latency; 2456 else if ((packets < 5) && (bytes > 512)) 2457 retval = low_latency; 2458 break; 2459 case low_latency: /* 50 usec aka 20000 ints/s */ 2460 if (bytes > 10000) { 2461 /* jumbo frames need bulk latency setting */ 2462 if (bytes/packets > 8000) 2463 retval = bulk_latency; 2464 else if ((packets < 10) || ((bytes/packets) > 1200)) 2465 retval = bulk_latency; 2466 else if ((packets > 35)) 2467 retval = lowest_latency; 2468 } else if (bytes/packets > 2000) 2469 retval = bulk_latency; 2470 else if (packets <= 2 && bytes < 512) 2471 retval = lowest_latency; 2472 break; 2473 case bulk_latency: /* 250 usec aka 4000 ints/s */ 2474 if (bytes > 25000) { 2475 if (packets > 35) 2476 retval = low_latency; 2477 } else if (bytes < 6000) { 2478 retval = low_latency; 2479 } 2480 break; 2481 } 2482 2483update_itr_done: 2484 return retval; 2485} 2486 2487static void e1000_set_itr(struct e1000_adapter *adapter) 2488{ 2489 struct e1000_hw *hw = &adapter->hw; 2490 u16 current_itr; 2491 u32 new_itr = adapter->itr; 2492 2493 if (unlikely(hw->mac_type < e1000_82540)) 2494 return; 2495 2496 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ 2497 if (unlikely(adapter->link_speed != SPEED_1000)) { 2498 current_itr = 0; 2499 new_itr = 4000; 2500 goto set_itr_now; 2501 } 2502 2503 adapter->tx_itr = e1000_update_itr(adapter, 2504 adapter->tx_itr, 2505 adapter->total_tx_packets, 2506 adapter->total_tx_bytes); 2507 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 2508 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) 2509 adapter->tx_itr = low_latency; 2510 2511 adapter->rx_itr = e1000_update_itr(adapter, 2512 adapter->rx_itr, 2513 adapter->total_rx_packets, 2514 adapter->total_rx_bytes); 2515 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 2516 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) 2517 adapter->rx_itr = low_latency; 2518 2519 current_itr = max(adapter->rx_itr, adapter->tx_itr); 2520 2521 switch (current_itr) { 2522 /* counts and packets in update_itr are dependent on these numbers */ 2523 case lowest_latency: 2524 new_itr = 70000; 2525 break; 2526 case low_latency: 2527 new_itr = 20000; /* aka hwitr = ~200 */ 2528 break; 2529 case bulk_latency: 2530 new_itr = 4000; 2531 break; 2532 default: 2533 break; 2534 } 2535 2536set_itr_now: 2537 if (new_itr != adapter->itr) { 2538 /* this attempts to bias the interrupt rate towards Bulk 2539 * by adding intermediate steps when interrupt rate is 2540 * increasing */ 2541 new_itr = new_itr > adapter->itr ? 2542 min(adapter->itr + (new_itr >> 2), new_itr) : 2543 new_itr; 2544 adapter->itr = new_itr; 2545 ew32(ITR, 1000000000 / (new_itr * 256)); 2546 } 2547} 2548 2549#define E1000_TX_FLAGS_CSUM 0x00000001 2550#define E1000_TX_FLAGS_VLAN 0x00000002 2551#define E1000_TX_FLAGS_TSO 0x00000004 2552#define E1000_TX_FLAGS_IPV4 0x00000008 2553#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 2554#define E1000_TX_FLAGS_VLAN_SHIFT 16 2555 2556static int e1000_tso(struct e1000_adapter *adapter, 2557 struct e1000_tx_ring *tx_ring, struct sk_buff *skb) 2558{ 2559 struct e1000_context_desc *context_desc; 2560 struct e1000_buffer *buffer_info; 2561 unsigned int i; 2562 u32 cmd_length = 0; 2563 u16 ipcse = 0, tucse, mss; 2564 u8 ipcss, ipcso, tucss, tucso, hdr_len; 2565 int err; 2566 2567 if (skb_is_gso(skb)) { 2568 if (skb_header_cloned(skb)) { 2569 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2570 if (err) 2571 return err; 2572 } 2573 2574 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 2575 mss = skb_shinfo(skb)->gso_size; 2576 if (skb->protocol == htons(ETH_P_IP)) { 2577 struct iphdr *iph = ip_hdr(skb); 2578 iph->tot_len = 0; 2579 iph->check = 0; 2580 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 2581 iph->daddr, 0, 2582 IPPROTO_TCP, 2583 0); 2584 cmd_length = E1000_TXD_CMD_IP; 2585 ipcse = skb_transport_offset(skb) - 1; 2586 } else if (skb->protocol == htons(ETH_P_IPV6)) { 2587 ipv6_hdr(skb)->payload_len = 0; 2588 tcp_hdr(skb)->check = 2589 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 2590 &ipv6_hdr(skb)->daddr, 2591 0, IPPROTO_TCP, 0); 2592 ipcse = 0; 2593 } 2594 ipcss = skb_network_offset(skb); 2595 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; 2596 tucss = skb_transport_offset(skb); 2597 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; 2598 tucse = 0; 2599 2600 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | 2601 E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); 2602 2603 i = tx_ring->next_to_use; 2604 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 2605 buffer_info = &tx_ring->buffer_info[i]; 2606 2607 context_desc->lower_setup.ip_fields.ipcss = ipcss; 2608 context_desc->lower_setup.ip_fields.ipcso = ipcso; 2609 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); 2610 context_desc->upper_setup.tcp_fields.tucss = tucss; 2611 context_desc->upper_setup.tcp_fields.tucso = tucso; 2612 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); 2613 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); 2614 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; 2615 context_desc->cmd_and_length = cpu_to_le32(cmd_length); 2616 2617 buffer_info->time_stamp = jiffies; 2618 buffer_info->next_to_watch = i; 2619 2620 if (++i == tx_ring->count) i = 0; 2621 tx_ring->next_to_use = i; 2622 2623 return true; 2624 } 2625 return false; 2626} 2627 2628static bool e1000_tx_csum(struct e1000_adapter *adapter, 2629 struct e1000_tx_ring *tx_ring, struct sk_buff *skb) 2630{ 2631 struct e1000_context_desc *context_desc; 2632 struct e1000_buffer *buffer_info; 2633 unsigned int i; 2634 u8 css; 2635 u32 cmd_len = E1000_TXD_CMD_DEXT; 2636 2637 if (skb->ip_summed != CHECKSUM_PARTIAL) 2638 return false; 2639 2640 switch (skb->protocol) { 2641 case cpu_to_be16(ETH_P_IP): 2642 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 2643 cmd_len |= E1000_TXD_CMD_TCP; 2644 break; 2645 case cpu_to_be16(ETH_P_IPV6): 2646 /* XXX not handling all IPV6 headers */ 2647 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 2648 cmd_len |= E1000_TXD_CMD_TCP; 2649 break; 2650 default: 2651 if (unlikely(net_ratelimit())) 2652 e_warn("checksum_partial proto=%x!\n", skb->protocol); 2653 break; 2654 } 2655 2656 css = skb_transport_offset(skb); 2657 2658 i = tx_ring->next_to_use; 2659 buffer_info = &tx_ring->buffer_info[i]; 2660 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 2661 2662 context_desc->lower_setup.ip_config = 0; 2663 context_desc->upper_setup.tcp_fields.tucss = css; 2664 context_desc->upper_setup.tcp_fields.tucso = 2665 css + skb->csum_offset; 2666 context_desc->upper_setup.tcp_fields.tucse = 0; 2667 context_desc->tcp_seg_setup.data = 0; 2668 context_desc->cmd_and_length = cpu_to_le32(cmd_len); 2669 2670 buffer_info->time_stamp = jiffies; 2671 buffer_info->next_to_watch = i; 2672 2673 if (unlikely(++i == tx_ring->count)) i = 0; 2674 tx_ring->next_to_use = i; 2675 2676 return true; 2677} 2678 2679#define E1000_MAX_TXD_PWR 12 2680#define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR) 2681 2682static int e1000_tx_map(struct e1000_adapter *adapter, 2683 struct e1000_tx_ring *tx_ring, 2684 struct sk_buff *skb, unsigned int first, 2685 unsigned int max_per_txd, unsigned int nr_frags, 2686 unsigned int mss) 2687{ 2688 struct e1000_hw *hw = &adapter->hw; 2689 struct pci_dev *pdev = adapter->pdev; 2690 struct e1000_buffer *buffer_info; 2691 unsigned int len = skb_headlen(skb); 2692 unsigned int offset = 0, size, count = 0, i; 2693 unsigned int f; 2694 2695 i = tx_ring->next_to_use; 2696 2697 while (len) { 2698 buffer_info = &tx_ring->buffer_info[i]; 2699 size = min(len, max_per_txd); 2700 /* Workaround for Controller erratum -- 2701 * descriptor for non-tso packet in a linear SKB that follows a 2702 * tso gets written back prematurely before the data is fully 2703 * DMA'd to the controller */ 2704 if (!skb->data_len && tx_ring->last_tx_tso && 2705 !skb_is_gso(skb)) { 2706 tx_ring->last_tx_tso = 0; 2707 size -= 4; 2708 } 2709 2710 /* Workaround for premature desc write-backs 2711 * in TSO mode. Append 4-byte sentinel desc */ 2712 if (unlikely(mss && !nr_frags && size == len && size > 8)) 2713 size -= 4; 2714 /* work-around for errata 10 and it applies 2715 * to all controllers in PCI-X mode 2716 * The fix is to make sure that the first descriptor of a 2717 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes 2718 */ 2719 if (unlikely((hw->bus_type == e1000_bus_type_pcix) && 2720 (size > 2015) && count == 0)) 2721 size = 2015; 2722 2723 /* Workaround for potential 82544 hang in PCI-X. Avoid 2724 * terminating buffers within evenly-aligned dwords. */ 2725 if (unlikely(adapter->pcix_82544 && 2726 !((unsigned long)(skb->data + offset + size - 1) & 4) && 2727 size > 4)) 2728 size -= 4; 2729 2730 buffer_info->length = size; 2731 /* set time_stamp *before* dma to help avoid a possible race */ 2732 buffer_info->time_stamp = jiffies; 2733 buffer_info->mapped_as_page = false; 2734 buffer_info->dma = dma_map_single(&pdev->dev, 2735 skb->data + offset, 2736 size, DMA_TO_DEVICE); 2737 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 2738 goto dma_error; 2739 buffer_info->next_to_watch = i; 2740 2741 len -= size; 2742 offset += size; 2743 count++; 2744 if (len) { 2745 i++; 2746 if (unlikely(i == tx_ring->count)) 2747 i = 0; 2748 } 2749 } 2750 2751 for (f = 0; f < nr_frags; f++) { 2752 struct skb_frag_struct *frag; 2753 2754 frag = &skb_shinfo(skb)->frags[f]; 2755 len = frag->size; 2756 offset = frag->page_offset; 2757 2758 while (len) { 2759 i++; 2760 if (unlikely(i == tx_ring->count)) 2761 i = 0; 2762 2763 buffer_info = &tx_ring->buffer_info[i]; 2764 size = min(len, max_per_txd); 2765 /* Workaround for premature desc write-backs 2766 * in TSO mode. Append 4-byte sentinel desc */ 2767 if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8)) 2768 size -= 4; 2769 /* Workaround for potential 82544 hang in PCI-X. 2770 * Avoid terminating buffers within evenly-aligned 2771 * dwords. */ 2772 if (unlikely(adapter->pcix_82544 && 2773 !((unsigned long)(page_to_phys(frag->page) + offset 2774 + size - 1) & 4) && 2775 size > 4)) 2776 size -= 4; 2777 2778 buffer_info->length = size; 2779 buffer_info->time_stamp = jiffies; 2780 buffer_info->mapped_as_page = true; 2781 buffer_info->dma = dma_map_page(&pdev->dev, frag->page, 2782 offset, size, 2783 DMA_TO_DEVICE); 2784 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 2785 goto dma_error; 2786 buffer_info->next_to_watch = i; 2787 2788 len -= size; 2789 offset += size; 2790 count++; 2791 } 2792 } 2793 2794 tx_ring->buffer_info[i].skb = skb; 2795 tx_ring->buffer_info[first].next_to_watch = i; 2796 2797 return count; 2798 2799dma_error: 2800 dev_err(&pdev->dev, "TX DMA map failed\n"); 2801 buffer_info->dma = 0; 2802 if (count) 2803 count--; 2804 2805 while (count--) { 2806 if (i==0) 2807 i += tx_ring->count; 2808 i--; 2809 buffer_info = &tx_ring->buffer_info[i]; 2810 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 2811 } 2812 2813 return 0; 2814} 2815 2816static void e1000_tx_queue(struct e1000_adapter *adapter, 2817 struct e1000_tx_ring *tx_ring, int tx_flags, 2818 int count) 2819{ 2820 struct e1000_hw *hw = &adapter->hw; 2821 struct e1000_tx_desc *tx_desc = NULL; 2822 struct e1000_buffer *buffer_info; 2823 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; 2824 unsigned int i; 2825 2826 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) { 2827 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | 2828 E1000_TXD_CMD_TSE; 2829 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2830 2831 if (likely(tx_flags & E1000_TX_FLAGS_IPV4)) 2832 txd_upper |= E1000_TXD_POPTS_IXSM << 8; 2833 } 2834 2835 if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) { 2836 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; 2837 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2838 } 2839 2840 if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) { 2841 txd_lower |= E1000_TXD_CMD_VLE; 2842 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); 2843 } 2844 2845 i = tx_ring->next_to_use; 2846 2847 while (count--) { 2848 buffer_info = &tx_ring->buffer_info[i]; 2849 tx_desc = E1000_TX_DESC(*tx_ring, i); 2850 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 2851 tx_desc->lower.data = 2852 cpu_to_le32(txd_lower | buffer_info->length); 2853 tx_desc->upper.data = cpu_to_le32(txd_upper); 2854 if (unlikely(++i == tx_ring->count)) i = 0; 2855 } 2856 2857 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); 2858 2859 /* Force memory writes to complete before letting h/w 2860 * know there are new descriptors to fetch. (Only 2861 * applicable for weak-ordered memory model archs, 2862 * such as IA-64). */ 2863 wmb(); 2864 2865 tx_ring->next_to_use = i; 2866 writel(i, hw->hw_addr + tx_ring->tdt); 2867 /* we need this if more than one processor can write to our tail 2868 * at a time, it syncronizes IO on IA64/Altix systems */ 2869 mmiowb(); 2870} 2871 2872/** 2873 * 82547 workaround to avoid controller hang in half-duplex environment. 2874 * The workaround is to avoid queuing a large packet that would span 2875 * the internal Tx FIFO ring boundary by notifying the stack to resend 2876 * the packet at a later time. This gives the Tx FIFO an opportunity to 2877 * flush all packets. When that occurs, we reset the Tx FIFO pointers 2878 * to the beginning of the Tx FIFO. 2879 **/ 2880 2881#define E1000_FIFO_HDR 0x10 2882#define E1000_82547_PAD_LEN 0x3E0 2883 2884static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, 2885 struct sk_buff *skb) 2886{ 2887 u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; 2888 u32 skb_fifo_len = skb->len + E1000_FIFO_HDR; 2889 2890 skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR); 2891 2892 if (adapter->link_duplex != HALF_DUPLEX) 2893 goto no_fifo_stall_required; 2894 2895 if (atomic_read(&adapter->tx_fifo_stall)) 2896 return 1; 2897 2898 if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) { 2899 atomic_set(&adapter->tx_fifo_stall, 1); 2900 return 1; 2901 } 2902 2903no_fifo_stall_required: 2904 adapter->tx_fifo_head += skb_fifo_len; 2905 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) 2906 adapter->tx_fifo_head -= adapter->tx_fifo_size; 2907 return 0; 2908} 2909 2910static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) 2911{ 2912 struct e1000_adapter *adapter = netdev_priv(netdev); 2913 struct e1000_tx_ring *tx_ring = adapter->tx_ring; 2914 2915 netif_stop_queue(netdev); 2916 /* Herbert's original patch had: 2917 * smp_mb__after_netif_stop_queue(); 2918 * but since that doesn't exist yet, just open code it. */ 2919 smp_mb(); 2920 2921 /* We need to check again in a case another CPU has just 2922 * made room available. */ 2923 if (likely(E1000_DESC_UNUSED(tx_ring) < size)) 2924 return -EBUSY; 2925 2926 /* A reprieve! */ 2927 netif_start_queue(netdev); 2928 ++adapter->restart_queue; 2929 return 0; 2930} 2931 2932static int e1000_maybe_stop_tx(struct net_device *netdev, 2933 struct e1000_tx_ring *tx_ring, int size) 2934{ 2935 if (likely(E1000_DESC_UNUSED(tx_ring) >= size)) 2936 return 0; 2937 return __e1000_maybe_stop_tx(netdev, size); 2938} 2939 2940#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) 2941static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, 2942 struct net_device *netdev) 2943{ 2944 struct e1000_adapter *adapter = netdev_priv(netdev); 2945 struct e1000_hw *hw = &adapter->hw; 2946 struct e1000_tx_ring *tx_ring; 2947 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; 2948 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; 2949 unsigned int tx_flags = 0; 2950 unsigned int len = skb_headlen(skb); 2951 unsigned int nr_frags; 2952 unsigned int mss; 2953 int count = 0; 2954 int tso; 2955 unsigned int f; 2956 2957 /* This goes back to the question of how to logically map a tx queue 2958 * to a flow. Right now, performance is impacted slightly negatively 2959 * if using multiple tx queues. If the stack breaks away from a 2960 * single qdisc implementation, we can look at this again. */ 2961 tx_ring = adapter->tx_ring; 2962 2963 if (unlikely(skb->len <= 0)) { 2964 dev_kfree_skb_any(skb); 2965 return NETDEV_TX_OK; 2966 } 2967 2968 mss = skb_shinfo(skb)->gso_size; 2969 /* The controller does a simple calculation to 2970 * make sure there is enough room in the FIFO before 2971 * initiating the DMA for each buffer. The calc is: 2972 * 4 = ceil(buffer len/mss). To make sure we don't 2973 * overrun the FIFO, adjust the max buffer len if mss 2974 * drops. */ 2975 if (mss) { 2976 u8 hdr_len; 2977 max_per_txd = min(mss << 2, max_per_txd); 2978 max_txd_pwr = fls(max_per_txd) - 1; 2979 2980 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 2981 if (skb->data_len && hdr_len == len) { 2982 switch (hw->mac_type) { 2983 unsigned int pull_size; 2984 case e1000_82544: 2985 /* Make sure we have room to chop off 4 bytes, 2986 * and that the end alignment will work out to 2987 * this hardware's requirements 2988 * NOTE: this is a TSO only workaround 2989 * if end byte alignment not correct move us 2990 * into the next dword */ 2991 if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4) 2992 break; 2993 /* fall through */ 2994 pull_size = min((unsigned int)4, skb->data_len); 2995 if (!__pskb_pull_tail(skb, pull_size)) { 2996 e_err("__pskb_pull_tail failed.\n"); 2997 dev_kfree_skb_any(skb); 2998 return NETDEV_TX_OK; 2999 } 3000 len = skb_headlen(skb); 3001 break; 3002 default: 3003 /* do nothing */ 3004 break; 3005 } 3006 } 3007 } 3008 3009 /* reserve a descriptor for the offload context */ 3010 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) 3011 count++; 3012 count++; 3013 3014 /* Controller Erratum workaround */ 3015 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb)) 3016 count++; 3017 3018 count += TXD_USE_COUNT(len, max_txd_pwr); 3019 3020 if (adapter->pcix_82544) 3021 count++; 3022 3023 /* work-around for errata 10 and it applies to all controllers 3024 * in PCI-X mode, so add one more descriptor to the count 3025 */ 3026 if (unlikely((hw->bus_type == e1000_bus_type_pcix) && 3027 (len > 2015))) 3028 count++; 3029 3030 nr_frags = skb_shinfo(skb)->nr_frags; 3031 for (f = 0; f < nr_frags; f++) 3032 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size, 3033 max_txd_pwr); 3034 if (adapter->pcix_82544) 3035 count += nr_frags; 3036 3037 /* need: count + 2 desc gap to keep tail from touching 3038 * head, otherwise try next time */ 3039 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) 3040 return NETDEV_TX_BUSY; 3041 3042 if (unlikely(hw->mac_type == e1000_82547)) { 3043 if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) { 3044 netif_stop_queue(netdev); 3045 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3046 mod_timer(&adapter->tx_fifo_stall_timer, 3047 jiffies + 1); 3048 return NETDEV_TX_BUSY; 3049 } 3050 } 3051 3052 if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) { 3053 tx_flags |= E1000_TX_FLAGS_VLAN; 3054 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); 3055 } 3056 3057 first = tx_ring->next_to_use; 3058 3059 tso = e1000_tso(adapter, tx_ring, skb); 3060 if (tso < 0) { 3061 dev_kfree_skb_any(skb); 3062 return NETDEV_TX_OK; 3063 } 3064 3065 if (likely(tso)) { 3066 if (likely(hw->mac_type != e1000_82544)) 3067 tx_ring->last_tx_tso = 1; 3068 tx_flags |= E1000_TX_FLAGS_TSO; 3069 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb))) 3070 tx_flags |= E1000_TX_FLAGS_CSUM; 3071 3072 if (likely(skb->protocol == htons(ETH_P_IP))) 3073 tx_flags |= E1000_TX_FLAGS_IPV4; 3074 3075 count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd, 3076 nr_frags, mss); 3077 3078 if (count) { 3079 e1000_tx_queue(adapter, tx_ring, tx_flags, count); 3080 /* Make sure there is space in the ring for the next send. */ 3081 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2); 3082 3083 } else { 3084 dev_kfree_skb_any(skb); 3085 tx_ring->buffer_info[first].time_stamp = 0; 3086 tx_ring->next_to_use = first; 3087 } 3088 3089 return NETDEV_TX_OK; 3090} 3091 3092/** 3093 * e1000_tx_timeout - Respond to a Tx Hang 3094 * @netdev: network interface device structure 3095 **/ 3096 3097static void e1000_tx_timeout(struct net_device *netdev) 3098{ 3099 struct e1000_adapter *adapter = netdev_priv(netdev); 3100 3101 /* Do the reset outside of interrupt context */ 3102 adapter->tx_timeout_count++; 3103 schedule_work(&adapter->reset_task); 3104} 3105 3106static void e1000_reset_task(struct work_struct *work) 3107{ 3108 struct e1000_adapter *adapter = 3109 container_of(work, struct e1000_adapter, reset_task); 3110 3111 e1000_reinit_locked(adapter); 3112} 3113 3114/** 3115 * e1000_get_stats - Get System Network Statistics 3116 * @netdev: network interface device structure 3117 * 3118 * Returns the address of the device statistics structure. 3119 * The statistics are actually updated from the timer callback. 3120 **/ 3121 3122static struct net_device_stats *e1000_get_stats(struct net_device *netdev) 3123{ 3124 /* only return the current stats */ 3125 return &netdev->stats; 3126} 3127 3128/** 3129 * e1000_change_mtu - Change the Maximum Transfer Unit 3130 * @netdev: network interface device structure 3131 * @new_mtu: new value for maximum frame size 3132 * 3133 * Returns 0 on success, negative on failure 3134 **/ 3135 3136static int e1000_change_mtu(struct net_device *netdev, int new_mtu) 3137{ 3138 struct e1000_adapter *adapter = netdev_priv(netdev); 3139 struct e1000_hw *hw = &adapter->hw; 3140 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 3141 3142 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || 3143 (max_frame > MAX_JUMBO_FRAME_SIZE)) { 3144 e_err("Invalid MTU setting\n"); 3145 return -EINVAL; 3146 } 3147 3148 /* Adapter-specific max frame size limits. */ 3149 switch (hw->mac_type) { 3150 case e1000_undefined ... e1000_82542_rev2_1: 3151 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) { 3152 e_err("Jumbo Frames not supported.\n"); 3153 return -EINVAL; 3154 } 3155 break; 3156 default: 3157 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */ 3158 break; 3159 } 3160 3161 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) 3162 msleep(1); 3163 /* e1000_down has a dependency on max_frame_size */ 3164 hw->max_frame_size = max_frame; 3165 if (netif_running(netdev)) 3166 e1000_down(adapter); 3167 3168 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 3169 * means we reserve 2 more, this pushes us to allocate from the next 3170 * larger slab size. 3171 * i.e. RXBUFFER_2048 --> size-4096 slab 3172 * however with the new *_jumbo_rx* routines, jumbo receives will use 3173 * fragmented skbs */ 3174 3175 if (max_frame <= E1000_RXBUFFER_2048) 3176 adapter->rx_buffer_len = E1000_RXBUFFER_2048; 3177 else 3178#if (PAGE_SIZE >= E1000_RXBUFFER_16384) 3179 adapter->rx_buffer_len = E1000_RXBUFFER_16384; 3180#elif (PAGE_SIZE >= E1000_RXBUFFER_4096) 3181 adapter->rx_buffer_len = PAGE_SIZE; 3182#endif 3183 3184 /* adjust allocation if LPE protects us, and we aren't using SBP */ 3185 if (!hw->tbi_compatibility_on && 3186 ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) || 3187 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) 3188 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 3189 3190 pr_info("%s changing MTU from %d to %d\n", 3191 netdev->name, netdev->mtu, new_mtu); 3192 netdev->mtu = new_mtu; 3193 3194 if (netif_running(netdev)) 3195 e1000_up(adapter); 3196 else 3197 e1000_reset(adapter); 3198 3199 clear_bit(__E1000_RESETTING, &adapter->flags); 3200 3201 return 0; 3202} 3203 3204/** 3205 * e1000_update_stats - Update the board statistics counters 3206 * @adapter: board private structure 3207 **/ 3208 3209void e1000_update_stats(struct e1000_adapter *adapter) 3210{ 3211 struct net_device *netdev = adapter->netdev; 3212 struct e1000_hw *hw = &adapter->hw; 3213 struct pci_dev *pdev = adapter->pdev; 3214 unsigned long flags; 3215 u16 phy_tmp; 3216 3217#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF 3218 3219 /* 3220 * Prevent stats update while adapter is being reset, or if the pci 3221 * connection is down. 3222 */ 3223 if (adapter->link_speed == 0) 3224 return; 3225 if (pci_channel_offline(pdev)) 3226 return; 3227 3228 spin_lock_irqsave(&adapter->stats_lock, flags); 3229 3230 /* these counters are modified from e1000_tbi_adjust_stats, 3231 * called from the interrupt context, so they must only 3232 * be written while holding adapter->stats_lock 3233 */ 3234 3235 adapter->stats.crcerrs += er32(CRCERRS); 3236 adapter->stats.gprc += er32(GPRC); 3237 adapter->stats.gorcl += er32(GORCL); 3238 adapter->stats.gorch += er32(GORCH); 3239 adapter->stats.bprc += er32(BPRC); 3240 adapter->stats.mprc += er32(MPRC); 3241 adapter->stats.roc += er32(ROC); 3242 3243 adapter->stats.prc64 += er32(PRC64); 3244 adapter->stats.prc127 += er32(PRC127); 3245 adapter->stats.prc255 += er32(PRC255); 3246 adapter->stats.prc511 += er32(PRC511); 3247 adapter->stats.prc1023 += er32(PRC1023); 3248 adapter->stats.prc1522 += er32(PRC1522); 3249 3250 adapter->stats.symerrs += er32(SYMERRS); 3251 adapter->stats.mpc += er32(MPC); 3252 adapter->stats.scc += er32(SCC); 3253 adapter->stats.ecol += er32(ECOL); 3254 adapter->stats.mcc += er32(MCC); 3255 adapter->stats.latecol += er32(LATECOL); 3256 adapter->stats.dc += er32(DC); 3257 adapter->stats.sec += er32(SEC); 3258 adapter->stats.rlec += er32(RLEC); 3259 adapter->stats.xonrxc += er32(XONRXC); 3260 adapter->stats.xontxc += er32(XONTXC); 3261 adapter->stats.xoffrxc += er32(XOFFRXC); 3262 adapter->stats.xofftxc += er32(XOFFTXC); 3263 adapter->stats.fcruc += er32(FCRUC); 3264 adapter->stats.gptc += er32(GPTC); 3265 adapter->stats.gotcl += er32(GOTCL); 3266 adapter->stats.gotch += er32(GOTCH); 3267 adapter->stats.rnbc += er32(RNBC); 3268 adapter->stats.ruc += er32(RUC); 3269 adapter->stats.rfc += er32(RFC); 3270 adapter->stats.rjc += er32(RJC); 3271 adapter->stats.torl += er32(TORL); 3272 adapter->stats.torh += er32(TORH); 3273 adapter->stats.totl += er32(TOTL); 3274 adapter->stats.toth += er32(TOTH); 3275 adapter->stats.tpr += er32(TPR); 3276 3277 adapter->stats.ptc64 += er32(PTC64); 3278 adapter->stats.ptc127 += er32(PTC127); 3279 adapter->stats.ptc255 += er32(PTC255); 3280 adapter->stats.ptc511 += er32(PTC511); 3281 adapter->stats.ptc1023 += er32(PTC1023); 3282 adapter->stats.ptc1522 += er32(PTC1522); 3283 3284 adapter->stats.mptc += er32(MPTC); 3285 adapter->stats.bptc += er32(BPTC); 3286 3287 /* used for adaptive IFS */ 3288 3289 hw->tx_packet_delta = er32(TPT); 3290 adapter->stats.tpt += hw->tx_packet_delta; 3291 hw->collision_delta = er32(COLC); 3292 adapter->stats.colc += hw->collision_delta; 3293 3294 if (hw->mac_type >= e1000_82543) { 3295 adapter->stats.algnerrc += er32(ALGNERRC); 3296 adapter->stats.rxerrc += er32(RXERRC); 3297 adapter->stats.tncrs += er32(TNCRS); 3298 adapter->stats.cexterr += er32(CEXTERR); 3299 adapter->stats.tsctc += er32(TSCTC); 3300 adapter->stats.tsctfc += er32(TSCTFC); 3301 } 3302 3303 /* Fill out the OS statistics structure */ 3304 netdev->stats.multicast = adapter->stats.mprc; 3305 netdev->stats.collisions = adapter->stats.colc; 3306 3307 /* Rx Errors */ 3308 3309 /* RLEC on some newer hardware can be incorrect so build 3310 * our own version based on RUC and ROC */ 3311 netdev->stats.rx_errors = adapter->stats.rxerrc + 3312 adapter->stats.crcerrs + adapter->stats.algnerrc + 3313 adapter->stats.ruc + adapter->stats.roc + 3314 adapter->stats.cexterr; 3315 adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc; 3316 netdev->stats.rx_length_errors = adapter->stats.rlerrc; 3317 netdev->stats.rx_crc_errors = adapter->stats.crcerrs; 3318 netdev->stats.rx_frame_errors = adapter->stats.algnerrc; 3319 netdev->stats.rx_missed_errors = adapter->stats.mpc; 3320 3321 /* Tx Errors */ 3322 adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol; 3323 netdev->stats.tx_errors = adapter->stats.txerrc; 3324 netdev->stats.tx_aborted_errors = adapter->stats.ecol; 3325 netdev->stats.tx_window_errors = adapter->stats.latecol; 3326 netdev->stats.tx_carrier_errors = adapter->stats.tncrs; 3327 if (hw->bad_tx_carr_stats_fd && 3328 adapter->link_duplex == FULL_DUPLEX) { 3329 netdev->stats.tx_carrier_errors = 0; 3330 adapter->stats.tncrs = 0; 3331 } 3332 3333 /* Tx Dropped needs to be maintained elsewhere */ 3334 3335 /* Phy Stats */ 3336 if (hw->media_type == e1000_media_type_copper) { 3337 if ((adapter->link_speed == SPEED_1000) && 3338 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { 3339 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; 3340 adapter->phy_stats.idle_errors += phy_tmp; 3341 } 3342 3343 if ((hw->mac_type <= e1000_82546) && 3344 (hw->phy_type == e1000_phy_m88) && 3345 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp)) 3346 adapter->phy_stats.receive_errors += phy_tmp; 3347 } 3348 3349 /* Management Stats */ 3350 if (hw->has_smbus) { 3351 adapter->stats.mgptc += er32(MGTPTC); 3352 adapter->stats.mgprc += er32(MGTPRC); 3353 adapter->stats.mgpdc += er32(MGTPDC); 3354 } 3355 3356 spin_unlock_irqrestore(&adapter->stats_lock, flags); 3357} 3358 3359/** 3360 * e1000_intr - Interrupt Handler 3361 * @irq: interrupt number 3362 * @data: pointer to a network interface device structure 3363 **/ 3364 3365static irqreturn_t e1000_intr(int irq, void *data) 3366{ 3367 struct net_device *netdev = data; 3368 struct e1000_adapter *adapter = netdev_priv(netdev); 3369 struct e1000_hw *hw = &adapter->hw; 3370 u32 icr = er32(ICR); 3371 3372 if (unlikely((!icr) || test_bit(__E1000_DOWN, &adapter->flags))) 3373 return IRQ_NONE; /* Not our interrupt */ 3374 3375 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { 3376 hw->get_link_status = 1; 3377 /* guard against interrupt when we're going down */ 3378 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3379 mod_timer(&adapter->watchdog_timer, jiffies + 1); 3380 } 3381 3382 /* disable interrupts, without the synchronize_irq bit */ 3383 ew32(IMC, ~0); 3384 E1000_WRITE_FLUSH(); 3385 3386 if (likely(napi_schedule_prep(&adapter->napi))) { 3387 adapter->total_tx_bytes = 0; 3388 adapter->total_tx_packets = 0; 3389 adapter->total_rx_bytes = 0; 3390 adapter->total_rx_packets = 0; 3391 __napi_schedule(&adapter->napi); 3392 } else { 3393 /* this really should not happen! if it does it is basically a 3394 * bug, but not a hard error, so enable ints and continue */ 3395 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3396 e1000_irq_enable(adapter); 3397 } 3398 3399 return IRQ_HANDLED; 3400} 3401 3402/** 3403 * e1000_clean - NAPI Rx polling callback 3404 * @adapter: board private structure 3405 **/ 3406static int e1000_clean(struct napi_struct *napi, int budget) 3407{ 3408 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); 3409 int tx_clean_complete = 0, work_done = 0; 3410 3411 tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]); 3412 3413 adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget); 3414 3415 if (!tx_clean_complete) 3416 work_done = budget; 3417 3418 /* If budget not fully consumed, exit the polling mode */ 3419 if (work_done < budget) { 3420 if (likely(adapter->itr_setting & 3)) 3421 e1000_set_itr(adapter); 3422 napi_complete(napi); 3423 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3424 e1000_irq_enable(adapter); 3425 } 3426 3427 return work_done; 3428} 3429 3430/** 3431 * e1000_clean_tx_irq - Reclaim resources after transmit completes 3432 * @adapter: board private structure 3433 **/ 3434static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, 3435 struct e1000_tx_ring *tx_ring) 3436{ 3437 struct e1000_hw *hw = &adapter->hw; 3438 struct net_device *netdev = adapter->netdev; 3439 struct e1000_tx_desc *tx_desc, *eop_desc; 3440 struct e1000_buffer *buffer_info; 3441 unsigned int i, eop; 3442 unsigned int count = 0; 3443 unsigned int total_tx_bytes=0, total_tx_packets=0; 3444 3445 i = tx_ring->next_to_clean; 3446 eop = tx_ring->buffer_info[i].next_to_watch; 3447 eop_desc = E1000_TX_DESC(*tx_ring, eop); 3448 3449 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && 3450 (count < tx_ring->count)) { 3451 bool cleaned = false; 3452 for ( ; !cleaned; count++) { 3453 tx_desc = E1000_TX_DESC(*tx_ring, i); 3454 buffer_info = &tx_ring->buffer_info[i]; 3455 cleaned = (i == eop); 3456 3457 if (cleaned) { 3458 struct sk_buff *skb = buffer_info->skb; 3459 unsigned int segs, bytecount; 3460 segs = skb_shinfo(skb)->gso_segs ?: 1; 3461 /* multiply data chunks by size of headers */ 3462 bytecount = ((segs - 1) * skb_headlen(skb)) + 3463 skb->len; 3464 total_tx_packets += segs; 3465 total_tx_bytes += bytecount; 3466 } 3467 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 3468 tx_desc->upper.data = 0; 3469 3470 if (unlikely(++i == tx_ring->count)) i = 0; 3471 } 3472 3473 eop = tx_ring->buffer_info[i].next_to_watch; 3474 eop_desc = E1000_TX_DESC(*tx_ring, eop); 3475 } 3476 3477 tx_ring->next_to_clean = i; 3478 3479#define TX_WAKE_THRESHOLD 32 3480 if (unlikely(count && netif_carrier_ok(netdev) && 3481 E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) { 3482 /* Make sure that anybody stopping the queue after this 3483 * sees the new next_to_clean. 3484 */ 3485 smp_mb(); 3486 3487 if (netif_queue_stopped(netdev) && 3488 !(test_bit(__E1000_DOWN, &adapter->flags))) { 3489 netif_wake_queue(netdev); 3490 ++adapter->restart_queue; 3491 } 3492 } 3493 3494 if (adapter->detect_tx_hung) { 3495 /* Detect a transmit hang in hardware, this serializes the 3496 * check with the clearing of time_stamp and movement of i */ 3497 adapter->detect_tx_hung = false; 3498 if (tx_ring->buffer_info[eop].time_stamp && 3499 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + 3500 (adapter->tx_timeout_factor * HZ)) && 3501 !(er32(STATUS) & E1000_STATUS_TXOFF)) { 3502 3503 /* detected Tx unit hang */ 3504 e_err("Detected Tx Unit Hang\n" 3505 " Tx Queue <%lu>\n" 3506 " TDH <%x>\n" 3507 " TDT <%x>\n" 3508 " next_to_use <%x>\n" 3509 " next_to_clean <%x>\n" 3510 "buffer_info[next_to_clean]\n" 3511 " time_stamp <%lx>\n" 3512 " next_to_watch <%x>\n" 3513 " jiffies <%lx>\n" 3514 " next_to_watch.status <%x>\n", 3515 (unsigned long)((tx_ring - adapter->tx_ring) / 3516 sizeof(struct e1000_tx_ring)), 3517 readl(hw->hw_addr + tx_ring->tdh), 3518 readl(hw->hw_addr + tx_ring->tdt), 3519 tx_ring->next_to_use, 3520 tx_ring->next_to_clean, 3521 tx_ring->buffer_info[eop].time_stamp, 3522 eop, 3523 jiffies, 3524 eop_desc->upper.fields.status); 3525 netif_stop_queue(netdev); 3526 } 3527 } 3528 adapter->total_tx_bytes += total_tx_bytes; 3529 adapter->total_tx_packets += total_tx_packets; 3530 netdev->stats.tx_bytes += total_tx_bytes; 3531 netdev->stats.tx_packets += total_tx_packets; 3532 return (count < tx_ring->count); 3533} 3534 3535/** 3536 * e1000_rx_checksum - Receive Checksum Offload for 82543 3537 * @adapter: board private structure 3538 * @status_err: receive descriptor status and error fields 3539 * @csum: receive descriptor csum field 3540 * @sk_buff: socket buffer with received data 3541 **/ 3542 3543static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, 3544 u32 csum, struct sk_buff *skb) 3545{ 3546 struct e1000_hw *hw = &adapter->hw; 3547 u16 status = (u16)status_err; 3548 u8 errors = (u8)(status_err >> 24); 3549 skb->ip_summed = CHECKSUM_NONE; 3550 3551 /* 82543 or newer only */ 3552 if (unlikely(hw->mac_type < e1000_82543)) return; 3553 /* Ignore Checksum bit is set */ 3554 if (unlikely(status & E1000_RXD_STAT_IXSM)) return; 3555 /* TCP/UDP checksum error bit is set */ 3556 if (unlikely(errors & E1000_RXD_ERR_TCPE)) { 3557 /* let the stack verify checksum errors */ 3558 adapter->hw_csum_err++; 3559 return; 3560 } 3561 /* TCP/UDP Checksum has not been calculated */ 3562 if (!(status & E1000_RXD_STAT_TCPCS)) 3563 return; 3564 3565 /* It must be a TCP or UDP packet with a valid checksum */ 3566 if (likely(status & E1000_RXD_STAT_TCPCS)) { 3567 /* TCP checksum is good */ 3568 skb->ip_summed = CHECKSUM_UNNECESSARY; 3569 } 3570 adapter->hw_csum_good++; 3571} 3572 3573/** 3574 * e1000_consume_page - helper function 3575 **/ 3576static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, 3577 u16 length) 3578{ 3579 bi->page = NULL; 3580 skb->len += length; 3581 skb->data_len += length; 3582 skb->truesize += length; 3583} 3584 3585/** 3586 * e1000_receive_skb - helper function to handle rx indications 3587 * @adapter: board private structure 3588 * @status: descriptor status field as written by hardware 3589 * @vlan: descriptor vlan field as written by hardware (no le/be conversion) 3590 * @skb: pointer to sk_buff to be indicated to stack 3591 */ 3592static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status, 3593 __le16 vlan, struct sk_buff *skb) 3594{ 3595 if (unlikely(adapter->vlgrp && (status & E1000_RXD_STAT_VP))) { 3596 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 3597 le16_to_cpu(vlan) & 3598 E1000_RXD_SPC_VLAN_MASK); 3599 } else { 3600 netif_receive_skb(skb); 3601 } 3602} 3603 3604/** 3605 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy 3606 * @adapter: board private structure 3607 * @rx_ring: ring to clean 3608 * @work_done: amount of napi work completed this call 3609 * @work_to_do: max amount of work allowed for this call to do 3610 * 3611 * the return value indicates whether actual cleaning was done, there 3612 * is no guarantee that everything was cleaned 3613 */ 3614static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, 3615 struct e1000_rx_ring *rx_ring, 3616 int *work_done, int work_to_do) 3617{ 3618 struct e1000_hw *hw = &adapter->hw; 3619 struct net_device *netdev = adapter->netdev; 3620 struct pci_dev *pdev = adapter->pdev; 3621 struct e1000_rx_desc *rx_desc, *next_rxd; 3622 struct e1000_buffer *buffer_info, *next_buffer; 3623 unsigned long irq_flags; 3624 u32 length; 3625 unsigned int i; 3626 int cleaned_count = 0; 3627 bool cleaned = false; 3628 unsigned int total_rx_bytes=0, total_rx_packets=0; 3629 3630 i = rx_ring->next_to_clean; 3631 rx_desc = E1000_RX_DESC(*rx_ring, i); 3632 buffer_info = &rx_ring->buffer_info[i]; 3633 3634 while (rx_desc->status & E1000_RXD_STAT_DD) { 3635 struct sk_buff *skb; 3636 u8 status; 3637 3638 if (*work_done >= work_to_do) 3639 break; 3640 (*work_done)++; 3641 3642 status = rx_desc->status; 3643 skb = buffer_info->skb; 3644 buffer_info->skb = NULL; 3645 3646 if (++i == rx_ring->count) i = 0; 3647 next_rxd = E1000_RX_DESC(*rx_ring, i); 3648 prefetch(next_rxd); 3649 3650 next_buffer = &rx_ring->buffer_info[i]; 3651 3652 cleaned = true; 3653 cleaned_count++; 3654 dma_unmap_page(&pdev->dev, buffer_info->dma, 3655 buffer_info->length, DMA_FROM_DEVICE); 3656 buffer_info->dma = 0; 3657 3658 length = le16_to_cpu(rx_desc->length); 3659 3660 /* errors is only valid for DD + EOP descriptors */ 3661 if (unlikely((status & E1000_RXD_STAT_EOP) && 3662 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) { 3663 u8 last_byte = *(skb->data + length - 1); 3664 if (TBI_ACCEPT(hw, status, rx_desc->errors, length, 3665 last_byte)) { 3666 spin_lock_irqsave(&adapter->stats_lock, 3667 irq_flags); 3668 e1000_tbi_adjust_stats(hw, &adapter->stats, 3669 length, skb->data); 3670 spin_unlock_irqrestore(&adapter->stats_lock, 3671 irq_flags); 3672 length--; 3673 } else { 3674 /* recycle both page and skb */ 3675 buffer_info->skb = skb; 3676 /* an error means any chain goes out the window 3677 * too */ 3678 if (rx_ring->rx_skb_top) 3679 dev_kfree_skb(rx_ring->rx_skb_top); 3680 rx_ring->rx_skb_top = NULL; 3681 goto next_desc; 3682 } 3683 } 3684 3685#define rxtop rx_ring->rx_skb_top 3686 if (!(status & E1000_RXD_STAT_EOP)) { 3687 /* this descriptor is only the beginning (or middle) */ 3688 if (!rxtop) { 3689 /* this is the beginning of a chain */ 3690 rxtop = skb; 3691 skb_fill_page_desc(rxtop, 0, buffer_info->page, 3692 0, length); 3693 } else { 3694 /* this is the middle of a chain */ 3695 skb_fill_page_desc(rxtop, 3696 skb_shinfo(rxtop)->nr_frags, 3697 buffer_info->page, 0, length); 3698 /* re-use the skb, only consumed the page */ 3699 buffer_info->skb = skb; 3700 } 3701 e1000_consume_page(buffer_info, rxtop, length); 3702 goto next_desc; 3703 } else { 3704 if (rxtop) { 3705 /* end of the chain */ 3706 skb_fill_page_desc(rxtop, 3707 skb_shinfo(rxtop)->nr_frags, 3708 buffer_info->page, 0, length); 3709 /* re-use the current skb, we only consumed the 3710 * page */ 3711 buffer_info->skb = skb; 3712 skb = rxtop; 3713 rxtop = NULL; 3714 e1000_consume_page(buffer_info, skb, length); 3715 } else { 3716 /* no chain, got EOP, this buf is the packet 3717 * copybreak to save the put_page/alloc_page */ 3718 if (length <= copybreak && 3719 skb_tailroom(skb) >= length) { 3720 u8 *vaddr; 3721 vaddr = kmap_atomic(buffer_info->page, 3722 KM_SKB_DATA_SOFTIRQ); 3723 memcpy(skb_tail_pointer(skb), vaddr, length); 3724 kunmap_atomic(vaddr, 3725 KM_SKB_DATA_SOFTIRQ); 3726 /* re-use the page, so don't erase 3727 * buffer_info->page */ 3728 skb_put(skb, length); 3729 } else { 3730 skb_fill_page_desc(skb, 0, 3731 buffer_info->page, 0, 3732 length); 3733 e1000_consume_page(buffer_info, skb, 3734 length); 3735 } 3736 } 3737 } 3738 3739 /* Receive Checksum Offload XXX recompute due to CRC strip? */ 3740 e1000_rx_checksum(adapter, 3741 (u32)(status) | 3742 ((u32)(rx_desc->errors) << 24), 3743 le16_to_cpu(rx_desc->csum), skb); 3744 3745 pskb_trim(skb, skb->len - 4); 3746 3747 /* probably a little skewed due to removing CRC */ 3748 total_rx_bytes += skb->len; 3749 total_rx_packets++; 3750 3751 /* eth type trans needs skb->data to point to something */ 3752 if (!pskb_may_pull(skb, ETH_HLEN)) { 3753 e_err("pskb_may_pull failed.\n"); 3754 dev_kfree_skb(skb); 3755 goto next_desc; 3756 } 3757 3758 skb->protocol = eth_type_trans(skb, netdev); 3759 3760 e1000_receive_skb(adapter, status, rx_desc->special, skb); 3761 3762next_desc: 3763 rx_desc->status = 0; 3764 3765 /* return some buffers to hardware, one at a time is too slow */ 3766 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { 3767 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 3768 cleaned_count = 0; 3769 } 3770 3771 /* use prefetched values */ 3772 rx_desc = next_rxd; 3773 buffer_info = next_buffer; 3774 } 3775 rx_ring->next_to_clean = i; 3776 3777 cleaned_count = E1000_DESC_UNUSED(rx_ring); 3778 if (cleaned_count) 3779 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 3780 3781 adapter->total_rx_packets += total_rx_packets; 3782 adapter->total_rx_bytes += total_rx_bytes; 3783 netdev->stats.rx_bytes += total_rx_bytes; 3784 netdev->stats.rx_packets += total_rx_packets; 3785 return cleaned; 3786} 3787 3788/* 3789 * this should improve performance for small packets with large amounts 3790 * of reassembly being done in the stack 3791 */ 3792static void e1000_check_copybreak(struct net_device *netdev, 3793 struct e1000_buffer *buffer_info, 3794 u32 length, struct sk_buff **skb) 3795{ 3796 struct sk_buff *new_skb; 3797 3798 if (length > copybreak) 3799 return; 3800 3801 new_skb = netdev_alloc_skb_ip_align(netdev, length); 3802 if (!new_skb) 3803 return; 3804 3805 skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN, 3806 (*skb)->data - NET_IP_ALIGN, 3807 length + NET_IP_ALIGN); 3808 /* save the skb in buffer_info as good */ 3809 buffer_info->skb = *skb; 3810 *skb = new_skb; 3811} 3812 3813/** 3814 * e1000_clean_rx_irq - Send received data up the network stack; legacy 3815 * @adapter: board private structure 3816 * @rx_ring: ring to clean 3817 * @work_done: amount of napi work completed this call 3818 * @work_to_do: max amount of work allowed for this call to do 3819 */ 3820static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, 3821 struct e1000_rx_ring *rx_ring, 3822 int *work_done, int work_to_do) 3823{ 3824 struct e1000_hw *hw = &adapter->hw; 3825 struct net_device *netdev = adapter->netdev; 3826 struct pci_dev *pdev = adapter->pdev; 3827 struct e1000_rx_desc *rx_desc, *next_rxd; 3828 struct e1000_buffer *buffer_info, *next_buffer; 3829 unsigned long flags; 3830 u32 length; 3831 unsigned int i; 3832 int cleaned_count = 0; 3833 bool cleaned = false; 3834 unsigned int total_rx_bytes=0, total_rx_packets=0; 3835 3836 i = rx_ring->next_to_clean; 3837 rx_desc = E1000_RX_DESC(*rx_ring, i); 3838 buffer_info = &rx_ring->buffer_info[i]; 3839 3840 while (rx_desc->status & E1000_RXD_STAT_DD) { 3841 struct sk_buff *skb; 3842 u8 status; 3843 3844 if (*work_done >= work_to_do) 3845 break; 3846 (*work_done)++; 3847 3848 status = rx_desc->status; 3849 skb = buffer_info->skb; 3850 buffer_info->skb = NULL; 3851 3852 prefetch(skb->data - NET_IP_ALIGN); 3853 3854 if (++i == rx_ring->count) i = 0; 3855 next_rxd = E1000_RX_DESC(*rx_ring, i); 3856 prefetch(next_rxd); 3857 3858 next_buffer = &rx_ring->buffer_info[i]; 3859 3860 cleaned = true; 3861 cleaned_count++; 3862 dma_unmap_single(&pdev->dev, buffer_info->dma, 3863 buffer_info->length, DMA_FROM_DEVICE); 3864 buffer_info->dma = 0; 3865 3866 length = le16_to_cpu(rx_desc->length); 3867 /* !EOP means multiple descriptors were used to store a single 3868 * packet, if thats the case we need to toss it. In fact, we 3869 * to toss every packet with the EOP bit clear and the next 3870 * frame that _does_ have the EOP bit set, as it is by 3871 * definition only a frame fragment 3872 */ 3873 if (unlikely(!(status & E1000_RXD_STAT_EOP))) 3874 adapter->discarding = true; 3875 3876 if (adapter->discarding) { 3877 /* All receives must fit into a single buffer */ 3878 e_info("Receive packet consumed multiple buffers\n"); 3879 /* recycle */ 3880 buffer_info->skb = skb; 3881 if (status & E1000_RXD_STAT_EOP) 3882 adapter->discarding = false; 3883 goto next_desc; 3884 } 3885 3886 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { 3887 u8 last_byte = *(skb->data + length - 1); 3888 if (TBI_ACCEPT(hw, status, rx_desc->errors, length, 3889 last_byte)) { 3890 spin_lock_irqsave(&adapter->stats_lock, flags); 3891 e1000_tbi_adjust_stats(hw, &adapter->stats, 3892 length, skb->data); 3893 spin_unlock_irqrestore(&adapter->stats_lock, 3894 flags); 3895 length--; 3896 } else { 3897 /* recycle */ 3898 buffer_info->skb = skb; 3899 goto next_desc; 3900 } 3901 } 3902 3903 /* adjust length to remove Ethernet CRC, this must be 3904 * done after the TBI_ACCEPT workaround above */ 3905 length -= 4; 3906 3907 /* probably a little skewed due to removing CRC */ 3908 total_rx_bytes += length; 3909 total_rx_packets++; 3910 3911 e1000_check_copybreak(netdev, buffer_info, length, &skb); 3912 3913 skb_put(skb, length); 3914 3915 /* Receive Checksum Offload */ 3916 e1000_rx_checksum(adapter, 3917 (u32)(status) | 3918 ((u32)(rx_desc->errors) << 24), 3919 le16_to_cpu(rx_desc->csum), skb); 3920 3921 skb->protocol = eth_type_trans(skb, netdev); 3922 3923 e1000_receive_skb(adapter, status, rx_desc->special, skb); 3924 3925next_desc: 3926 rx_desc->status = 0; 3927 3928 /* return some buffers to hardware, one at a time is too slow */ 3929 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { 3930 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 3931 cleaned_count = 0; 3932 } 3933 3934 /* use prefetched values */ 3935 rx_desc = next_rxd; 3936 buffer_info = next_buffer; 3937 } 3938 rx_ring->next_to_clean = i; 3939 3940 cleaned_count = E1000_DESC_UNUSED(rx_ring); 3941 if (cleaned_count) 3942 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 3943 3944 adapter->total_rx_packets += total_rx_packets; 3945 adapter->total_rx_bytes += total_rx_bytes; 3946 netdev->stats.rx_bytes += total_rx_bytes; 3947 netdev->stats.rx_packets += total_rx_packets; 3948 return cleaned; 3949} 3950 3951/** 3952 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers 3953 * @adapter: address of board private structure 3954 * @rx_ring: pointer to receive ring structure 3955 * @cleaned_count: number of buffers to allocate this pass 3956 **/ 3957 3958static void 3959e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, 3960 struct e1000_rx_ring *rx_ring, int cleaned_count) 3961{ 3962 struct net_device *netdev = adapter->netdev; 3963 struct pci_dev *pdev = adapter->pdev; 3964 struct e1000_rx_desc *rx_desc; 3965 struct e1000_buffer *buffer_info; 3966 struct sk_buff *skb; 3967 unsigned int i; 3968 unsigned int bufsz = 256 - 16 /*for skb_reserve */ ; 3969 3970 i = rx_ring->next_to_use; 3971 buffer_info = &rx_ring->buffer_info[i]; 3972 3973 while (cleaned_count--) { 3974 skb = buffer_info->skb; 3975 if (skb) { 3976 skb_trim(skb, 0); 3977 goto check_page; 3978 } 3979 3980 skb = netdev_alloc_skb_ip_align(netdev, bufsz); 3981 if (unlikely(!skb)) { 3982 /* Better luck next round */ 3983 adapter->alloc_rx_buff_failed++; 3984 break; 3985 } 3986 3987 /* Fix for errata 23, can't cross 64kB boundary */ 3988 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { 3989 struct sk_buff *oldskb = skb; 3990 e_err("skb align check failed: %u bytes at %p\n", 3991 bufsz, skb->data); 3992 /* Try again, without freeing the previous */ 3993 skb = netdev_alloc_skb_ip_align(netdev, bufsz); 3994 /* Failed allocation, critical failure */ 3995 if (!skb) { 3996 dev_kfree_skb(oldskb); 3997 adapter->alloc_rx_buff_failed++; 3998 break; 3999 } 4000 4001 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { 4002 /* give up */ 4003 dev_kfree_skb(skb); 4004 dev_kfree_skb(oldskb); 4005 break; /* while (cleaned_count--) */ 4006 } 4007 4008 /* Use new allocation */ 4009 dev_kfree_skb(oldskb); 4010 } 4011 buffer_info->skb = skb; 4012 buffer_info->length = adapter->rx_buffer_len; 4013check_page: 4014 /* allocate a new page if necessary */ 4015 if (!buffer_info->page) { 4016 buffer_info->page = alloc_page(GFP_ATOMIC); 4017 if (unlikely(!buffer_info->page)) { 4018 adapter->alloc_rx_buff_failed++; 4019 break; 4020 } 4021 } 4022 4023 if (!buffer_info->dma) { 4024 buffer_info->dma = dma_map_page(&pdev->dev, 4025 buffer_info->page, 0, 4026 buffer_info->length, 4027 DMA_FROM_DEVICE); 4028 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { 4029 put_page(buffer_info->page); 4030 dev_kfree_skb(skb); 4031 buffer_info->page = NULL; 4032 buffer_info->skb = NULL; 4033 buffer_info->dma = 0; 4034 adapter->alloc_rx_buff_failed++; 4035 break; /* while !buffer_info->skb */ 4036 } 4037 } 4038 4039 rx_desc = E1000_RX_DESC(*rx_ring, i); 4040 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 4041 4042 if (unlikely(++i == rx_ring->count)) 4043 i = 0; 4044 buffer_info = &rx_ring->buffer_info[i]; 4045 } 4046 4047 if (likely(rx_ring->next_to_use != i)) { 4048 rx_ring->next_to_use = i; 4049 if (unlikely(i-- == 0)) 4050 i = (rx_ring->count - 1); 4051 4052 /* Force memory writes to complete before letting h/w 4053 * know there are new descriptors to fetch. (Only 4054 * applicable for weak-ordered memory model archs, 4055 * such as IA-64). */ 4056 wmb(); 4057 writel(i, adapter->hw.hw_addr + rx_ring->rdt); 4058 } 4059} 4060 4061/** 4062 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended 4063 * @adapter: address of board private structure 4064 **/ 4065 4066static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, 4067 struct e1000_rx_ring *rx_ring, 4068 int cleaned_count) 4069{ 4070 struct e1000_hw *hw = &adapter->hw; 4071 struct net_device *netdev = adapter->netdev; 4072 struct pci_dev *pdev = adapter->pdev; 4073 struct e1000_rx_desc *rx_desc; 4074 struct e1000_buffer *buffer_info; 4075 struct sk_buff *skb; 4076 unsigned int i; 4077 unsigned int bufsz = adapter->rx_buffer_len; 4078 4079 i = rx_ring->next_to_use; 4080 buffer_info = &rx_ring->buffer_info[i]; 4081 4082 while (cleaned_count--) { 4083 skb = buffer_info->skb; 4084 if (skb) { 4085 skb_trim(skb, 0); 4086 goto map_skb; 4087 } 4088 4089 skb = netdev_alloc_skb_ip_align(netdev, bufsz); 4090 if (unlikely(!skb)) { 4091 /* Better luck next round */ 4092 adapter->alloc_rx_buff_failed++; 4093 break; 4094 } 4095 4096 /* Fix for errata 23, can't cross 64kB boundary */ 4097 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { 4098 struct sk_buff *oldskb = skb; 4099 e_err("skb align check failed: %u bytes at %p\n", 4100 bufsz, skb->data); 4101 /* Try again, without freeing the previous */ 4102 skb = netdev_alloc_skb_ip_align(netdev, bufsz); 4103 /* Failed allocation, critical failure */ 4104 if (!skb) { 4105 dev_kfree_skb(oldskb); 4106 adapter->alloc_rx_buff_failed++; 4107 break; 4108 } 4109 4110 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { 4111 /* give up */ 4112 dev_kfree_skb(skb); 4113 dev_kfree_skb(oldskb); 4114 adapter->alloc_rx_buff_failed++; 4115 break; /* while !buffer_info->skb */ 4116 } 4117 4118 /* Use new allocation */ 4119 dev_kfree_skb(oldskb); 4120 } 4121 buffer_info->skb = skb; 4122 buffer_info->length = adapter->rx_buffer_len; 4123map_skb: 4124 buffer_info->dma = dma_map_single(&pdev->dev, 4125 skb->data, 4126 buffer_info->length, 4127 DMA_FROM_DEVICE); 4128 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { 4129 dev_kfree_skb(skb); 4130 buffer_info->skb = NULL; 4131 buffer_info->dma = 0; 4132 adapter->alloc_rx_buff_failed++; 4133 break; /* while !buffer_info->skb */ 4134 } 4135 4136 /* 4137 * XXX if it was allocated cleanly it will never map to a 4138 * boundary crossing 4139 */ 4140 4141 /* Fix for errata 23, can't cross 64kB boundary */ 4142 if (!e1000_check_64k_bound(adapter, 4143 (void *)(unsigned long)buffer_info->dma, 4144 adapter->rx_buffer_len)) { 4145 e_err("dma align check failed: %u bytes at %p\n", 4146 adapter->rx_buffer_len, 4147 (void *)(unsigned long)buffer_info->dma); 4148 dev_kfree_skb(skb); 4149 buffer_info->skb = NULL; 4150 4151 dma_unmap_single(&pdev->dev, buffer_info->dma, 4152 adapter->rx_buffer_len, 4153 DMA_FROM_DEVICE); 4154 buffer_info->dma = 0; 4155 4156 adapter->alloc_rx_buff_failed++; 4157 break; /* while !buffer_info->skb */ 4158 } 4159 rx_desc = E1000_RX_DESC(*rx_ring, i); 4160 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 4161 4162 if (unlikely(++i == rx_ring->count)) 4163 i = 0; 4164 buffer_info = &rx_ring->buffer_info[i]; 4165 } 4166 4167 if (likely(rx_ring->next_to_use != i)) { 4168 rx_ring->next_to_use = i; 4169 if (unlikely(i-- == 0)) 4170 i = (rx_ring->count - 1); 4171 4172 /* Force memory writes to complete before letting h/w 4173 * know there are new descriptors to fetch. (Only 4174 * applicable for weak-ordered memory model archs, 4175 * such as IA-64). */ 4176 wmb(); 4177 writel(i, hw->hw_addr + rx_ring->rdt); 4178 } 4179} 4180 4181/** 4182 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers. 4183 * @adapter: 4184 **/ 4185 4186static void e1000_smartspeed(struct e1000_adapter *adapter) 4187{ 4188 struct e1000_hw *hw = &adapter->hw; 4189 u16 phy_status; 4190 u16 phy_ctrl; 4191 4192 if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg || 4193 !(hw->autoneg_advertised & ADVERTISE_1000_FULL)) 4194 return; 4195 4196 if (adapter->smartspeed == 0) { 4197 /* If Master/Slave config fault is asserted twice, 4198 * we assume back-to-back */ 4199 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); 4200 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; 4201 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); 4202 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; 4203 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); 4204 if (phy_ctrl & CR_1000T_MS_ENABLE) { 4205 phy_ctrl &= ~CR_1000T_MS_ENABLE; 4206 e1000_write_phy_reg(hw, PHY_1000T_CTRL, 4207 phy_ctrl); 4208 adapter->smartspeed++; 4209 if (!e1000_phy_setup_autoneg(hw) && 4210 !e1000_read_phy_reg(hw, PHY_CTRL, 4211 &phy_ctrl)) { 4212 phy_ctrl |= (MII_CR_AUTO_NEG_EN | 4213 MII_CR_RESTART_AUTO_NEG); 4214 e1000_write_phy_reg(hw, PHY_CTRL, 4215 phy_ctrl); 4216 } 4217 } 4218 return; 4219 } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { 4220 /* If still no link, perhaps using 2/3 pair cable */ 4221 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); 4222 phy_ctrl |= CR_1000T_MS_ENABLE; 4223 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl); 4224 if (!e1000_phy_setup_autoneg(hw) && 4225 !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) { 4226 phy_ctrl |= (MII_CR_AUTO_NEG_EN | 4227 MII_CR_RESTART_AUTO_NEG); 4228 e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl); 4229 } 4230 } 4231 /* Restart process after E1000_SMARTSPEED_MAX iterations */ 4232 if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX) 4233 adapter->smartspeed = 0; 4234} 4235 4236/** 4237 * e1000_ioctl - 4238 * @netdev: 4239 * @ifreq: 4240 * @cmd: 4241 **/ 4242 4243static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 4244{ 4245 switch (cmd) { 4246 case SIOCGMIIPHY: 4247 case SIOCGMIIREG: 4248 case SIOCSMIIREG: 4249 return e1000_mii_ioctl(netdev, ifr, cmd); 4250 default: 4251 return -EOPNOTSUPP; 4252 } 4253} 4254 4255/** 4256 * e1000_mii_ioctl - 4257 * @netdev: 4258 * @ifreq: 4259 * @cmd: 4260 **/ 4261 4262static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 4263 int cmd) 4264{ 4265 struct e1000_adapter *adapter = netdev_priv(netdev); 4266 struct e1000_hw *hw = &adapter->hw; 4267 struct mii_ioctl_data *data = if_mii(ifr); 4268 int retval; 4269 u16 mii_reg; 4270 u16 spddplx; 4271 unsigned long flags; 4272 4273 if (hw->media_type != e1000_media_type_copper) 4274 return -EOPNOTSUPP; 4275 4276 switch (cmd) { 4277 case SIOCGMIIPHY: 4278 data->phy_id = hw->phy_addr; 4279 break; 4280 case SIOCGMIIREG: 4281 spin_lock_irqsave(&adapter->stats_lock, flags); 4282 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F, 4283 &data->val_out)) { 4284 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4285 return -EIO; 4286 } 4287 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4288 break; 4289 case SIOCSMIIREG: 4290 if (data->reg_num & ~(0x1F)) 4291 return -EFAULT; 4292 mii_reg = data->val_in; 4293 spin_lock_irqsave(&adapter->stats_lock, flags); 4294 if (e1000_write_phy_reg(hw, data->reg_num, 4295 mii_reg)) { 4296 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4297 return -EIO; 4298 } 4299 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4300 if (hw->media_type == e1000_media_type_copper) { 4301 switch (data->reg_num) { 4302 case PHY_CTRL: 4303 if (mii_reg & MII_CR_POWER_DOWN) 4304 break; 4305 if (mii_reg & MII_CR_AUTO_NEG_EN) { 4306 hw->autoneg = 1; 4307 hw->autoneg_advertised = 0x2F; 4308 } else { 4309 if (mii_reg & 0x40) 4310 spddplx = SPEED_1000; 4311 else if (mii_reg & 0x2000) 4312 spddplx = SPEED_100; 4313 else 4314 spddplx = SPEED_10; 4315 spddplx += (mii_reg & 0x100) 4316 ? DUPLEX_FULL : 4317 DUPLEX_HALF; 4318 retval = e1000_set_spd_dplx(adapter, 4319 spddplx); 4320 if (retval) 4321 return retval; 4322 } 4323 if (netif_running(adapter->netdev)) 4324 e1000_reinit_locked(adapter); 4325 else 4326 e1000_reset(adapter); 4327 break; 4328 case M88E1000_PHY_SPEC_CTRL: 4329 case M88E1000_EXT_PHY_SPEC_CTRL: 4330 if (e1000_phy_reset(hw)) 4331 return -EIO; 4332 break; 4333 } 4334 } else { 4335 switch (data->reg_num) { 4336 case PHY_CTRL: 4337 if (mii_reg & MII_CR_POWER_DOWN) 4338 break; 4339 if (netif_running(adapter->netdev)) 4340 e1000_reinit_locked(adapter); 4341 else 4342 e1000_reset(adapter); 4343 break; 4344 } 4345 } 4346 break; 4347 default: 4348 return -EOPNOTSUPP; 4349 } 4350 return E1000_SUCCESS; 4351} 4352 4353void e1000_pci_set_mwi(struct e1000_hw *hw) 4354{ 4355 struct e1000_adapter *adapter = hw->back; 4356 int ret_val = pci_set_mwi(adapter->pdev); 4357 4358 if (ret_val) 4359 e_err("Error in setting MWI\n"); 4360} 4361 4362void e1000_pci_clear_mwi(struct e1000_hw *hw) 4363{ 4364 struct e1000_adapter *adapter = hw->back; 4365 4366 pci_clear_mwi(adapter->pdev); 4367} 4368 4369int e1000_pcix_get_mmrbc(struct e1000_hw *hw) 4370{ 4371 struct e1000_adapter *adapter = hw->back; 4372 return pcix_get_mmrbc(adapter->pdev); 4373} 4374 4375void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc) 4376{ 4377 struct e1000_adapter *adapter = hw->back; 4378 pcix_set_mmrbc(adapter->pdev, mmrbc); 4379} 4380 4381void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value) 4382{ 4383 outl(value, port); 4384} 4385 4386static void e1000_vlan_rx_register(struct net_device *netdev, 4387 struct vlan_group *grp) 4388{ 4389 struct e1000_adapter *adapter = netdev_priv(netdev); 4390 struct e1000_hw *hw = &adapter->hw; 4391 u32 ctrl, rctl; 4392 4393 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4394 e1000_irq_disable(adapter); 4395 adapter->vlgrp = grp; 4396 4397 if (grp) { 4398 /* enable VLAN tag insert/strip */ 4399 ctrl = er32(CTRL); 4400 ctrl |= E1000_CTRL_VME; 4401 ew32(CTRL, ctrl); 4402 4403 /* enable VLAN receive filtering */ 4404 rctl = er32(RCTL); 4405 rctl &= ~E1000_RCTL_CFIEN; 4406 if (!(netdev->flags & IFF_PROMISC)) 4407 rctl |= E1000_RCTL_VFE; 4408 ew32(RCTL, rctl); 4409 e1000_update_mng_vlan(adapter); 4410 } else { 4411 /* disable VLAN tag insert/strip */ 4412 ctrl = er32(CTRL); 4413 ctrl &= ~E1000_CTRL_VME; 4414 ew32(CTRL, ctrl); 4415 4416 /* disable VLAN receive filtering */ 4417 rctl = er32(RCTL); 4418 rctl &= ~E1000_RCTL_VFE; 4419 ew32(RCTL, rctl); 4420 4421 if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) { 4422 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 4423 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 4424 } 4425 } 4426 4427 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4428 e1000_irq_enable(adapter); 4429} 4430 4431static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 4432{ 4433 struct e1000_adapter *adapter = netdev_priv(netdev); 4434 struct e1000_hw *hw = &adapter->hw; 4435 u32 vfta, index; 4436 4437 if ((hw->mng_cookie.status & 4438 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 4439 (vid == adapter->mng_vlan_id)) 4440 return; 4441 /* add VID to filter table */ 4442 index = (vid >> 5) & 0x7F; 4443 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); 4444 vfta |= (1 << (vid & 0x1F)); 4445 e1000_write_vfta(hw, index, vfta); 4446} 4447 4448static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 4449{ 4450 struct e1000_adapter *adapter = netdev_priv(netdev); 4451 struct e1000_hw *hw = &adapter->hw; 4452 u32 vfta, index; 4453 4454 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4455 e1000_irq_disable(adapter); 4456 vlan_group_set_device(adapter->vlgrp, vid, NULL); 4457 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4458 e1000_irq_enable(adapter); 4459 4460 /* remove VID from filter table */ 4461 index = (vid >> 5) & 0x7F; 4462 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); 4463 vfta &= ~(1 << (vid & 0x1F)); 4464 e1000_write_vfta(hw, index, vfta); 4465} 4466 4467static void e1000_restore_vlan(struct e1000_adapter *adapter) 4468{ 4469 e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp); 4470 4471 if (adapter->vlgrp) { 4472 u16 vid; 4473 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 4474 if (!vlan_group_get_device(adapter->vlgrp, vid)) 4475 continue; 4476 e1000_vlan_rx_add_vid(adapter->netdev, vid); 4477 } 4478 } 4479} 4480 4481int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) 4482{ 4483 struct e1000_hw *hw = &adapter->hw; 4484 4485 hw->autoneg = 0; 4486 4487 /* Fiber NICs only allow 1000 gbps Full duplex */ 4488 if ((hw->media_type == e1000_media_type_fiber) && 4489 spddplx != (SPEED_1000 + DUPLEX_FULL)) { 4490 e_err("Unsupported Speed/Duplex configuration\n"); 4491 return -EINVAL; 4492 } 4493 4494 switch (spddplx) { 4495 case SPEED_10 + DUPLEX_HALF: 4496 hw->forced_speed_duplex = e1000_10_half; 4497 break; 4498 case SPEED_10 + DUPLEX_FULL: 4499 hw->forced_speed_duplex = e1000_10_full; 4500 break; 4501 case SPEED_100 + DUPLEX_HALF: 4502 hw->forced_speed_duplex = e1000_100_half; 4503 break; 4504 case SPEED_100 + DUPLEX_FULL: 4505 hw->forced_speed_duplex = e1000_100_full; 4506 break; 4507 case SPEED_1000 + DUPLEX_FULL: 4508 hw->autoneg = 1; 4509 hw->autoneg_advertised = ADVERTISE_1000_FULL; 4510 break; 4511 case SPEED_1000 + DUPLEX_HALF: /* not supported */ 4512 default: 4513 e_err("Unsupported Speed/Duplex configuration\n"); 4514 return -EINVAL; 4515 } 4516 return 0; 4517} 4518 4519static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) 4520{ 4521 struct net_device *netdev = pci_get_drvdata(pdev); 4522 struct e1000_adapter *adapter = netdev_priv(netdev); 4523 struct e1000_hw *hw = &adapter->hw; 4524 u32 ctrl, ctrl_ext, rctl, status; 4525 u32 wufc = adapter->wol; 4526#ifdef CONFIG_PM 4527 int retval = 0; 4528#endif 4529 4530 netif_device_detach(netdev); 4531 4532 if (netif_running(netdev)) { 4533 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); 4534 e1000_down(adapter); 4535 } 4536 4537#ifdef CONFIG_PM 4538 retval = pci_save_state(pdev); 4539 if (retval) 4540 return retval; 4541#endif 4542 4543 status = er32(STATUS); 4544 if (status & E1000_STATUS_LU) 4545 wufc &= ~E1000_WUFC_LNKC; 4546 4547 if (wufc) { 4548 e1000_setup_rctl(adapter); 4549 e1000_set_rx_mode(netdev); 4550 4551 /* turn on all-multi mode if wake on multicast is enabled */ 4552 if (wufc & E1000_WUFC_MC) { 4553 rctl = er32(RCTL); 4554 rctl |= E1000_RCTL_MPE; 4555 ew32(RCTL, rctl); 4556 } 4557 4558 if (hw->mac_type >= e1000_82540) { 4559 ctrl = er32(CTRL); 4560 /* advertise wake from D3Cold */ 4561 #define E1000_CTRL_ADVD3WUC 0x00100000 4562 /* phy power management enable */ 4563 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 4564 ctrl |= E1000_CTRL_ADVD3WUC | 4565 E1000_CTRL_EN_PHY_PWR_MGMT; 4566 ew32(CTRL, ctrl); 4567 } 4568 4569 if (hw->media_type == e1000_media_type_fiber || 4570 hw->media_type == e1000_media_type_internal_serdes) { 4571 /* keep the laser running in D3 */ 4572 ctrl_ext = er32(CTRL_EXT); 4573 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; 4574 ew32(CTRL_EXT, ctrl_ext); 4575 } 4576 4577 ew32(WUC, E1000_WUC_PME_EN); 4578 ew32(WUFC, wufc); 4579 } else { 4580 ew32(WUC, 0); 4581 ew32(WUFC, 0); 4582 } 4583 4584 e1000_release_manageability(adapter); 4585 4586 *enable_wake = !!wufc; 4587 4588 /* make sure adapter isn't asleep if manageability is enabled */ 4589 if (adapter->en_mng_pt) 4590 *enable_wake = true; 4591 4592 if (netif_running(netdev)) 4593 e1000_free_irq(adapter); 4594 4595 pci_disable_device(pdev); 4596 4597 return 0; 4598} 4599 4600#ifdef CONFIG_PM 4601static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) 4602{ 4603 int retval; 4604 bool wake; 4605 4606 retval = __e1000_shutdown(pdev, &wake); 4607 if (retval) 4608 return retval; 4609 4610 if (wake) { 4611 pci_prepare_to_sleep(pdev); 4612 } else { 4613 pci_wake_from_d3(pdev, false); 4614 pci_set_power_state(pdev, PCI_D3hot); 4615 } 4616 4617 return 0; 4618} 4619 4620static int e1000_resume(struct pci_dev *pdev) 4621{ 4622 struct net_device *netdev = pci_get_drvdata(pdev); 4623 struct e1000_adapter *adapter = netdev_priv(netdev); 4624 struct e1000_hw *hw = &adapter->hw; 4625 u32 err; 4626 4627 pci_set_power_state(pdev, PCI_D0); 4628 pci_restore_state(pdev); 4629 pci_save_state(pdev); 4630 4631 if (adapter->need_ioport) 4632 err = pci_enable_device(pdev); 4633 else 4634 err = pci_enable_device_mem(pdev); 4635 if (err) { 4636 pr_err("Cannot enable PCI device from suspend\n"); 4637 return err; 4638 } 4639 pci_set_master(pdev); 4640 4641 pci_enable_wake(pdev, PCI_D3hot, 0); 4642 pci_enable_wake(pdev, PCI_D3cold, 0); 4643 4644 if (netif_running(netdev)) { 4645 err = e1000_request_irq(adapter); 4646 if (err) 4647 return err; 4648 } 4649 4650 e1000_power_up_phy(adapter); 4651 e1000_reset(adapter); 4652 ew32(WUS, ~0); 4653 4654 e1000_init_manageability(adapter); 4655 4656 if (netif_running(netdev)) 4657 e1000_up(adapter); 4658 4659 netif_device_attach(netdev); 4660 4661 return 0; 4662} 4663#endif 4664 4665static void e1000_shutdown(struct pci_dev *pdev) 4666{ 4667 bool wake; 4668 4669 __e1000_shutdown(pdev, &wake); 4670 4671 if (system_state == SYSTEM_POWER_OFF) { 4672 pci_wake_from_d3(pdev, wake); 4673 pci_set_power_state(pdev, PCI_D3hot); 4674 } 4675} 4676 4677#ifdef CONFIG_NET_POLL_CONTROLLER 4678/* 4679 * Polling 'interrupt' - used by things like netconsole to send skbs 4680 * without having to re-enable interrupts. It's not called while 4681 * the interrupt routine is executing. 4682 */ 4683static void e1000_netpoll(struct net_device *netdev) 4684{ 4685 struct e1000_adapter *adapter = netdev_priv(netdev); 4686 4687 disable_irq(adapter->pdev->irq); 4688 e1000_intr(adapter->pdev->irq, netdev); 4689 enable_irq(adapter->pdev->irq); 4690} 4691#endif 4692 4693/** 4694 * e1000_io_error_detected - called when PCI error is detected 4695 * @pdev: Pointer to PCI device 4696 * @state: The current pci connection state 4697 * 4698 * This function is called after a PCI bus error affecting 4699 * this device has been detected. 4700 */ 4701static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, 4702 pci_channel_state_t state) 4703{ 4704 struct net_device *netdev = pci_get_drvdata(pdev); 4705 struct e1000_adapter *adapter = netdev_priv(netdev); 4706 4707 netif_device_detach(netdev); 4708 4709 if (state == pci_channel_io_perm_failure) 4710 return PCI_ERS_RESULT_DISCONNECT; 4711 4712 if (netif_running(netdev)) 4713 e1000_down(adapter); 4714 pci_disable_device(pdev); 4715 4716 /* Request a slot slot reset. */ 4717 return PCI_ERS_RESULT_NEED_RESET; 4718} 4719 4720/** 4721 * e1000_io_slot_reset - called after the pci bus has been reset. 4722 * @pdev: Pointer to PCI device 4723 * 4724 * Restart the card from scratch, as if from a cold-boot. Implementation 4725 * resembles the first-half of the e1000_resume routine. 4726 */ 4727static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) 4728{ 4729 struct net_device *netdev = pci_get_drvdata(pdev); 4730 struct e1000_adapter *adapter = netdev_priv(netdev); 4731 struct e1000_hw *hw = &adapter->hw; 4732 int err; 4733 4734 if (adapter->need_ioport) 4735 err = pci_enable_device(pdev); 4736 else 4737 err = pci_enable_device_mem(pdev); 4738 if (err) { 4739 pr_err("Cannot re-enable PCI device after reset.\n"); 4740 return PCI_ERS_RESULT_DISCONNECT; 4741 } 4742 pci_set_master(pdev); 4743 4744 pci_enable_wake(pdev, PCI_D3hot, 0); 4745 pci_enable_wake(pdev, PCI_D3cold, 0); 4746 4747 e1000_reset(adapter); 4748 ew32(WUS, ~0); 4749 4750 return PCI_ERS_RESULT_RECOVERED; 4751} 4752 4753/** 4754 * e1000_io_resume - called when traffic can start flowing again. 4755 * @pdev: Pointer to PCI device 4756 * 4757 * This callback is called when the error recovery driver tells us that 4758 * its OK to resume normal operation. Implementation resembles the 4759 * second-half of the e1000_resume routine. 4760 */ 4761static void e1000_io_resume(struct pci_dev *pdev) 4762{ 4763 struct net_device *netdev = pci_get_drvdata(pdev); 4764 struct e1000_adapter *adapter = netdev_priv(netdev); 4765 4766 e1000_init_manageability(adapter); 4767 4768 if (netif_running(netdev)) { 4769 if (e1000_up(adapter)) { 4770 pr_info("can't bring device back up after reset\n"); 4771 return; 4772 } 4773 } 4774 4775 netif_device_attach(netdev); 4776} 4777 4778/* e1000_main.c */