Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.29-rc2 2524 lines 68 kB view raw
1/******************************************************************************* 2 3 Intel PRO/1000 Linux driver 4 Copyright(c) 1999 - 2008 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 Linux NICS <linux.nics@intel.com> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 27*******************************************************************************/ 28 29#include <linux/netdevice.h> 30#include <linux/ethtool.h> 31#include <linux/delay.h> 32#include <linux/pci.h> 33 34#include "e1000.h" 35 36enum e1000_mng_mode { 37 e1000_mng_mode_none = 0, 38 e1000_mng_mode_asf, 39 e1000_mng_mode_pt, 40 e1000_mng_mode_ipmi, 41 e1000_mng_mode_host_if_only 42}; 43 44#define E1000_FACTPS_MNGCG 0x20000000 45 46/* Intel(R) Active Management Technology signature */ 47#define E1000_IAMT_SIGNATURE 0x544D4149 48 49/** 50 * e1000e_get_bus_info_pcie - Get PCIe bus information 51 * @hw: pointer to the HW structure 52 * 53 * Determines and stores the system bus information for a particular 54 * network interface. The following bus information is determined and stored: 55 * bus speed, bus width, type (PCIe), and PCIe function. 56 **/ 57s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw) 58{ 59 struct e1000_bus_info *bus = &hw->bus; 60 struct e1000_adapter *adapter = hw->adapter; 61 u32 status; 62 u16 pcie_link_status, pci_header_type, cap_offset; 63 64 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP); 65 if (!cap_offset) { 66 bus->width = e1000_bus_width_unknown; 67 } else { 68 pci_read_config_word(adapter->pdev, 69 cap_offset + PCIE_LINK_STATUS, 70 &pcie_link_status); 71 bus->width = (enum e1000_bus_width)((pcie_link_status & 72 PCIE_LINK_WIDTH_MASK) >> 73 PCIE_LINK_WIDTH_SHIFT); 74 } 75 76 pci_read_config_word(adapter->pdev, PCI_HEADER_TYPE_REGISTER, 77 &pci_header_type); 78 if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) { 79 status = er32(STATUS); 80 bus->func = (status & E1000_STATUS_FUNC_MASK) 81 >> E1000_STATUS_FUNC_SHIFT; 82 } else { 83 bus->func = 0; 84 } 85 86 return 0; 87} 88 89/** 90 * e1000e_write_vfta - Write value to VLAN filter table 91 * @hw: pointer to the HW structure 92 * @offset: register offset in VLAN filter table 93 * @value: register value written to VLAN filter table 94 * 95 * Writes value at the given offset in the register array which stores 96 * the VLAN filter table. 97 **/ 98void e1000e_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) 99{ 100 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); 101 e1e_flush(); 102} 103 104/** 105 * e1000e_init_rx_addrs - Initialize receive address's 106 * @hw: pointer to the HW structure 107 * @rar_count: receive address registers 108 * 109 * Setups the receive address registers by setting the base receive address 110 * register to the devices MAC address and clearing all the other receive 111 * address registers to 0. 112 **/ 113void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) 114{ 115 u32 i; 116 117 /* Setup the receive address */ 118 hw_dbg(hw, "Programming MAC Address into RAR[0]\n"); 119 120 e1000e_rar_set(hw, hw->mac.addr, 0); 121 122 /* Zero out the other (rar_entry_count - 1) receive addresses */ 123 hw_dbg(hw, "Clearing RAR[1-%u]\n", rar_count-1); 124 for (i = 1; i < rar_count; i++) { 125 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1), 0); 126 e1e_flush(); 127 E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((i << 1) + 1), 0); 128 e1e_flush(); 129 } 130} 131 132/** 133 * e1000e_rar_set - Set receive address register 134 * @hw: pointer to the HW structure 135 * @addr: pointer to the receive address 136 * @index: receive address array register 137 * 138 * Sets the receive address array register at index to the address passed 139 * in by addr. 140 **/ 141void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) 142{ 143 u32 rar_low, rar_high; 144 145 /* 146 * HW expects these in little endian so we reverse the byte order 147 * from network order (big endian) to little endian 148 */ 149 rar_low = ((u32) addr[0] | 150 ((u32) addr[1] << 8) | 151 ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); 152 153 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); 154 155 rar_high |= E1000_RAH_AV; 156 157 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (index << 1), rar_low); 158 E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((index << 1) + 1), rar_high); 159} 160 161/** 162 * e1000_mta_set - Set multicast filter table address 163 * @hw: pointer to the HW structure 164 * @hash_value: determines the MTA register and bit to set 165 * 166 * The multicast table address is a register array of 32-bit registers. 167 * The hash_value is used to determine what register the bit is in, the 168 * current value is read, the new bit is OR'd in and the new value is 169 * written back into the register. 170 **/ 171static void e1000_mta_set(struct e1000_hw *hw, u32 hash_value) 172{ 173 u32 hash_bit, hash_reg, mta; 174 175 /* 176 * The MTA is a register array of 32-bit registers. It is 177 * treated like an array of (32*mta_reg_count) bits. We want to 178 * set bit BitArray[hash_value]. So we figure out what register 179 * the bit is in, read it, OR in the new bit, then write 180 * back the new value. The (hw->mac.mta_reg_count - 1) serves as a 181 * mask to bits 31:5 of the hash value which gives us the 182 * register we're modifying. The hash bit within that register 183 * is determined by the lower 5 bits of the hash value. 184 */ 185 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); 186 hash_bit = hash_value & 0x1F; 187 188 mta = E1000_READ_REG_ARRAY(hw, E1000_MTA, hash_reg); 189 190 mta |= (1 << hash_bit); 191 192 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, hash_reg, mta); 193 e1e_flush(); 194} 195 196/** 197 * e1000_hash_mc_addr - Generate a multicast hash value 198 * @hw: pointer to the HW structure 199 * @mc_addr: pointer to a multicast address 200 * 201 * Generates a multicast address hash value which is used to determine 202 * the multicast filter table array address and new table value. See 203 * e1000_mta_set_generic() 204 **/ 205static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) 206{ 207 u32 hash_value, hash_mask; 208 u8 bit_shift = 0; 209 210 /* Register count multiplied by bits per register */ 211 hash_mask = (hw->mac.mta_reg_count * 32) - 1; 212 213 /* 214 * For a mc_filter_type of 0, bit_shift is the number of left-shifts 215 * where 0xFF would still fall within the hash mask. 216 */ 217 while (hash_mask >> bit_shift != 0xFF) 218 bit_shift++; 219 220 /* 221 * The portion of the address that is used for the hash table 222 * is determined by the mc_filter_type setting. 223 * The algorithm is such that there is a total of 8 bits of shifting. 224 * The bit_shift for a mc_filter_type of 0 represents the number of 225 * left-shifts where the MSB of mc_addr[5] would still fall within 226 * the hash_mask. Case 0 does this exactly. Since there are a total 227 * of 8 bits of shifting, then mc_addr[4] will shift right the 228 * remaining number of bits. Thus 8 - bit_shift. The rest of the 229 * cases are a variation of this algorithm...essentially raising the 230 * number of bits to shift mc_addr[5] left, while still keeping the 231 * 8-bit shifting total. 232 * 233 * For example, given the following Destination MAC Address and an 234 * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask), 235 * we can see that the bit_shift for case 0 is 4. These are the hash 236 * values resulting from each mc_filter_type... 237 * [0] [1] [2] [3] [4] [5] 238 * 01 AA 00 12 34 56 239 * LSB MSB 240 * 241 * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 242 * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 243 * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163 244 * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634 245 */ 246 switch (hw->mac.mc_filter_type) { 247 default: 248 case 0: 249 break; 250 case 1: 251 bit_shift += 1; 252 break; 253 case 2: 254 bit_shift += 2; 255 break; 256 case 3: 257 bit_shift += 4; 258 break; 259 } 260 261 hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | 262 (((u16) mc_addr[5]) << bit_shift))); 263 264 return hash_value; 265} 266 267/** 268 * e1000e_update_mc_addr_list_generic - Update Multicast addresses 269 * @hw: pointer to the HW structure 270 * @mc_addr_list: array of multicast addresses to program 271 * @mc_addr_count: number of multicast addresses to program 272 * @rar_used_count: the first RAR register free to program 273 * @rar_count: total number of supported Receive Address Registers 274 * 275 * Updates the Receive Address Registers and Multicast Table Array. 276 * The caller must have a packed mc_addr_list of multicast addresses. 277 * The parameter rar_count will usually be hw->mac.rar_entry_count 278 * unless there are workarounds that change this. 279 **/ 280void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, 281 u8 *mc_addr_list, u32 mc_addr_count, 282 u32 rar_used_count, u32 rar_count) 283{ 284 u32 hash_value; 285 u32 i; 286 287 /* 288 * Load the first set of multicast addresses into the exact 289 * filters (RAR). If there are not enough to fill the RAR 290 * array, clear the filters. 291 */ 292 for (i = rar_used_count; i < rar_count; i++) { 293 if (mc_addr_count) { 294 e1000e_rar_set(hw, mc_addr_list, i); 295 mc_addr_count--; 296 mc_addr_list += ETH_ALEN; 297 } else { 298 E1000_WRITE_REG_ARRAY(hw, E1000_RA, i << 1, 0); 299 e1e_flush(); 300 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1) + 1, 0); 301 e1e_flush(); 302 } 303 } 304 305 /* Clear the old settings from the MTA */ 306 hw_dbg(hw, "Clearing MTA\n"); 307 for (i = 0; i < hw->mac.mta_reg_count; i++) { 308 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); 309 e1e_flush(); 310 } 311 312 /* Load any remaining multicast addresses into the hash table. */ 313 for (; mc_addr_count > 0; mc_addr_count--) { 314 hash_value = e1000_hash_mc_addr(hw, mc_addr_list); 315 hw_dbg(hw, "Hash value = 0x%03X\n", hash_value); 316 e1000_mta_set(hw, hash_value); 317 mc_addr_list += ETH_ALEN; 318 } 319} 320 321/** 322 * e1000e_clear_hw_cntrs_base - Clear base hardware counters 323 * @hw: pointer to the HW structure 324 * 325 * Clears the base hardware counters by reading the counter registers. 326 **/ 327void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw) 328{ 329 u32 temp; 330 331 temp = er32(CRCERRS); 332 temp = er32(SYMERRS); 333 temp = er32(MPC); 334 temp = er32(SCC); 335 temp = er32(ECOL); 336 temp = er32(MCC); 337 temp = er32(LATECOL); 338 temp = er32(COLC); 339 temp = er32(DC); 340 temp = er32(SEC); 341 temp = er32(RLEC); 342 temp = er32(XONRXC); 343 temp = er32(XONTXC); 344 temp = er32(XOFFRXC); 345 temp = er32(XOFFTXC); 346 temp = er32(FCRUC); 347 temp = er32(GPRC); 348 temp = er32(BPRC); 349 temp = er32(MPRC); 350 temp = er32(GPTC); 351 temp = er32(GORCL); 352 temp = er32(GORCH); 353 temp = er32(GOTCL); 354 temp = er32(GOTCH); 355 temp = er32(RNBC); 356 temp = er32(RUC); 357 temp = er32(RFC); 358 temp = er32(ROC); 359 temp = er32(RJC); 360 temp = er32(TORL); 361 temp = er32(TORH); 362 temp = er32(TOTL); 363 temp = er32(TOTH); 364 temp = er32(TPR); 365 temp = er32(TPT); 366 temp = er32(MPTC); 367 temp = er32(BPTC); 368} 369 370/** 371 * e1000e_check_for_copper_link - Check for link (Copper) 372 * @hw: pointer to the HW structure 373 * 374 * Checks to see of the link status of the hardware has changed. If a 375 * change in link status has been detected, then we read the PHY registers 376 * to get the current speed/duplex if link exists. 377 **/ 378s32 e1000e_check_for_copper_link(struct e1000_hw *hw) 379{ 380 struct e1000_mac_info *mac = &hw->mac; 381 s32 ret_val; 382 bool link; 383 384 /* 385 * We only want to go out to the PHY registers to see if Auto-Neg 386 * has completed and/or if our link status has changed. The 387 * get_link_status flag is set upon receiving a Link Status 388 * Change or Rx Sequence Error interrupt. 389 */ 390 if (!mac->get_link_status) 391 return 0; 392 393 /* 394 * First we want to see if the MII Status Register reports 395 * link. If so, then we want to get the current speed/duplex 396 * of the PHY. 397 */ 398 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); 399 if (ret_val) 400 return ret_val; 401 402 if (!link) 403 return ret_val; /* No link detected */ 404 405 mac->get_link_status = 0; 406 407 /* 408 * Check if there was DownShift, must be checked 409 * immediately after link-up 410 */ 411 e1000e_check_downshift(hw); 412 413 /* 414 * If we are forcing speed/duplex, then we simply return since 415 * we have already determined whether we have link or not. 416 */ 417 if (!mac->autoneg) { 418 ret_val = -E1000_ERR_CONFIG; 419 return ret_val; 420 } 421 422 /* 423 * Auto-Neg is enabled. Auto Speed Detection takes care 424 * of MAC speed/duplex configuration. So we only need to 425 * configure Collision Distance in the MAC. 426 */ 427 e1000e_config_collision_dist(hw); 428 429 /* 430 * Configure Flow Control now that Auto-Neg has completed. 431 * First, we need to restore the desired flow control 432 * settings because we may have had to re-autoneg with a 433 * different link partner. 434 */ 435 ret_val = e1000e_config_fc_after_link_up(hw); 436 if (ret_val) { 437 hw_dbg(hw, "Error configuring flow control\n"); 438 } 439 440 return ret_val; 441} 442 443/** 444 * e1000e_check_for_fiber_link - Check for link (Fiber) 445 * @hw: pointer to the HW structure 446 * 447 * Checks for link up on the hardware. If link is not up and we have 448 * a signal, then we need to force link up. 449 **/ 450s32 e1000e_check_for_fiber_link(struct e1000_hw *hw) 451{ 452 struct e1000_mac_info *mac = &hw->mac; 453 u32 rxcw; 454 u32 ctrl; 455 u32 status; 456 s32 ret_val; 457 458 ctrl = er32(CTRL); 459 status = er32(STATUS); 460 rxcw = er32(RXCW); 461 462 /* 463 * If we don't have link (auto-negotiation failed or link partner 464 * cannot auto-negotiate), the cable is plugged in (we have signal), 465 * and our link partner is not trying to auto-negotiate with us (we 466 * are receiving idles or data), we need to force link up. We also 467 * need to give auto-negotiation time to complete, in case the cable 468 * was just plugged in. The autoneg_failed flag does this. 469 */ 470 /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ 471 if ((ctrl & E1000_CTRL_SWDPIN1) && (!(status & E1000_STATUS_LU)) && 472 (!(rxcw & E1000_RXCW_C))) { 473 if (mac->autoneg_failed == 0) { 474 mac->autoneg_failed = 1; 475 return 0; 476 } 477 hw_dbg(hw, "NOT RXing /C/, disable AutoNeg and force link.\n"); 478 479 /* Disable auto-negotiation in the TXCW register */ 480 ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); 481 482 /* Force link-up and also force full-duplex. */ 483 ctrl = er32(CTRL); 484 ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); 485 ew32(CTRL, ctrl); 486 487 /* Configure Flow Control after forcing link up. */ 488 ret_val = e1000e_config_fc_after_link_up(hw); 489 if (ret_val) { 490 hw_dbg(hw, "Error configuring flow control\n"); 491 return ret_val; 492 } 493 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { 494 /* 495 * If we are forcing link and we are receiving /C/ ordered 496 * sets, re-enable auto-negotiation in the TXCW register 497 * and disable forced link in the Device Control register 498 * in an attempt to auto-negotiate with our link partner. 499 */ 500 hw_dbg(hw, "RXing /C/, enable AutoNeg and stop forcing link.\n"); 501 ew32(TXCW, mac->txcw); 502 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); 503 504 mac->serdes_has_link = 1; 505 } 506 507 return 0; 508} 509 510/** 511 * e1000e_check_for_serdes_link - Check for link (Serdes) 512 * @hw: pointer to the HW structure 513 * 514 * Checks for link up on the hardware. If link is not up and we have 515 * a signal, then we need to force link up. 516 **/ 517s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) 518{ 519 struct e1000_mac_info *mac = &hw->mac; 520 u32 rxcw; 521 u32 ctrl; 522 u32 status; 523 s32 ret_val; 524 525 ctrl = er32(CTRL); 526 status = er32(STATUS); 527 rxcw = er32(RXCW); 528 529 /* 530 * If we don't have link (auto-negotiation failed or link partner 531 * cannot auto-negotiate), and our link partner is not trying to 532 * auto-negotiate with us (we are receiving idles or data), 533 * we need to force link up. We also need to give auto-negotiation 534 * time to complete. 535 */ 536 /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ 537 if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) { 538 if (mac->autoneg_failed == 0) { 539 mac->autoneg_failed = 1; 540 return 0; 541 } 542 hw_dbg(hw, "NOT RXing /C/, disable AutoNeg and force link.\n"); 543 544 /* Disable auto-negotiation in the TXCW register */ 545 ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); 546 547 /* Force link-up and also force full-duplex. */ 548 ctrl = er32(CTRL); 549 ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); 550 ew32(CTRL, ctrl); 551 552 /* Configure Flow Control after forcing link up. */ 553 ret_val = e1000e_config_fc_after_link_up(hw); 554 if (ret_val) { 555 hw_dbg(hw, "Error configuring flow control\n"); 556 return ret_val; 557 } 558 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { 559 /* 560 * If we are forcing link and we are receiving /C/ ordered 561 * sets, re-enable auto-negotiation in the TXCW register 562 * and disable forced link in the Device Control register 563 * in an attempt to auto-negotiate with our link partner. 564 */ 565 hw_dbg(hw, "RXing /C/, enable AutoNeg and stop forcing link.\n"); 566 ew32(TXCW, mac->txcw); 567 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); 568 569 mac->serdes_has_link = 1; 570 } else if (!(E1000_TXCW_ANE & er32(TXCW))) { 571 /* 572 * If we force link for non-auto-negotiation switch, check 573 * link status based on MAC synchronization for internal 574 * serdes media type. 575 */ 576 /* SYNCH bit and IV bit are sticky. */ 577 udelay(10); 578 rxcw = er32(RXCW); 579 if (rxcw & E1000_RXCW_SYNCH) { 580 if (!(rxcw & E1000_RXCW_IV)) { 581 mac->serdes_has_link = true; 582 hw_dbg(hw, "SERDES: Link up - forced.\n"); 583 } 584 } else { 585 mac->serdes_has_link = false; 586 hw_dbg(hw, "SERDES: Link down - force failed.\n"); 587 } 588 } 589 590 if (E1000_TXCW_ANE & er32(TXCW)) { 591 status = er32(STATUS); 592 if (status & E1000_STATUS_LU) { 593 /* SYNCH bit and IV bit are sticky, so reread rxcw. */ 594 udelay(10); 595 rxcw = er32(RXCW); 596 if (rxcw & E1000_RXCW_SYNCH) { 597 if (!(rxcw & E1000_RXCW_IV)) { 598 mac->serdes_has_link = true; 599 hw_dbg(hw, "SERDES: Link up - autoneg " 600 "completed sucessfully.\n"); 601 } else { 602 mac->serdes_has_link = false; 603 hw_dbg(hw, "SERDES: Link down - invalid" 604 "codewords detected in autoneg.\n"); 605 } 606 } else { 607 mac->serdes_has_link = false; 608 hw_dbg(hw, "SERDES: Link down - no sync.\n"); 609 } 610 } else { 611 mac->serdes_has_link = false; 612 hw_dbg(hw, "SERDES: Link down - autoneg failed\n"); 613 } 614 } 615 616 return 0; 617} 618 619/** 620 * e1000_set_default_fc_generic - Set flow control default values 621 * @hw: pointer to the HW structure 622 * 623 * Read the EEPROM for the default values for flow control and store the 624 * values. 625 **/ 626static s32 e1000_set_default_fc_generic(struct e1000_hw *hw) 627{ 628 s32 ret_val; 629 u16 nvm_data; 630 631 /* 632 * Read and store word 0x0F of the EEPROM. This word contains bits 633 * that determine the hardware's default PAUSE (flow control) mode, 634 * a bit that determines whether the HW defaults to enabling or 635 * disabling auto-negotiation, and the direction of the 636 * SW defined pins. If there is no SW over-ride of the flow 637 * control setting, then the variable hw->fc will 638 * be initialized based on a value in the EEPROM. 639 */ 640 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data); 641 642 if (ret_val) { 643 hw_dbg(hw, "NVM Read Error\n"); 644 return ret_val; 645 } 646 647 if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0) 648 hw->fc.requested_mode = e1000_fc_none; 649 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 650 NVM_WORD0F_ASM_DIR) 651 hw->fc.requested_mode = e1000_fc_tx_pause; 652 else 653 hw->fc.requested_mode = e1000_fc_full; 654 655 return 0; 656} 657 658/** 659 * e1000e_setup_link - Setup flow control and link settings 660 * @hw: pointer to the HW structure 661 * 662 * Determines which flow control settings to use, then configures flow 663 * control. Calls the appropriate media-specific link configuration 664 * function. Assuming the adapter has a valid link partner, a valid link 665 * should be established. Assumes the hardware has previously been reset 666 * and the transmitter and receiver are not enabled. 667 **/ 668s32 e1000e_setup_link(struct e1000_hw *hw) 669{ 670 struct e1000_mac_info *mac = &hw->mac; 671 s32 ret_val; 672 673 /* 674 * In the case of the phy reset being blocked, we already have a link. 675 * We do not need to set it up again. 676 */ 677 if (e1000_check_reset_block(hw)) 678 return 0; 679 680 /* 681 * If requested flow control is set to default, set flow control 682 * based on the EEPROM flow control settings. 683 */ 684 if (hw->fc.requested_mode == e1000_fc_default) { 685 ret_val = e1000_set_default_fc_generic(hw); 686 if (ret_val) 687 return ret_val; 688 } 689 690 /* 691 * Save off the requested flow control mode for use later. Depending 692 * on the link partner's capabilities, we may or may not use this mode. 693 */ 694 hw->fc.current_mode = hw->fc.requested_mode; 695 696 hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", 697 hw->fc.current_mode); 698 699 /* Call the necessary media_type subroutine to configure the link. */ 700 ret_val = mac->ops.setup_physical_interface(hw); 701 if (ret_val) 702 return ret_val; 703 704 /* 705 * Initialize the flow control address, type, and PAUSE timer 706 * registers to their default values. This is done even if flow 707 * control is disabled, because it does not hurt anything to 708 * initialize these registers. 709 */ 710 hw_dbg(hw, "Initializing the Flow Control address, type and timer regs\n"); 711 ew32(FCT, FLOW_CONTROL_TYPE); 712 ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); 713 ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); 714 715 ew32(FCTTV, hw->fc.pause_time); 716 717 return e1000e_set_fc_watermarks(hw); 718} 719 720/** 721 * e1000_commit_fc_settings_generic - Configure flow control 722 * @hw: pointer to the HW structure 723 * 724 * Write the flow control settings to the Transmit Config Word Register (TXCW) 725 * base on the flow control settings in e1000_mac_info. 726 **/ 727static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw) 728{ 729 struct e1000_mac_info *mac = &hw->mac; 730 u32 txcw; 731 732 /* 733 * Check for a software override of the flow control settings, and 734 * setup the device accordingly. If auto-negotiation is enabled, then 735 * software will have to set the "PAUSE" bits to the correct value in 736 * the Transmit Config Word Register (TXCW) and re-start auto- 737 * negotiation. However, if auto-negotiation is disabled, then 738 * software will have to manually configure the two flow control enable 739 * bits in the CTRL register. 740 * 741 * The possible values of the "fc" parameter are: 742 * 0: Flow control is completely disabled 743 * 1: Rx flow control is enabled (we can receive pause frames, 744 * but not send pause frames). 745 * 2: Tx flow control is enabled (we can send pause frames but we 746 * do not support receiving pause frames). 747 * 3: Both Rx and Tx flow control (symmetric) are enabled. 748 */ 749 switch (hw->fc.current_mode) { 750 case e1000_fc_none: 751 /* Flow control completely disabled by a software over-ride. */ 752 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); 753 break; 754 case e1000_fc_rx_pause: 755 /* 756 * Rx Flow control is enabled and Tx Flow control is disabled 757 * by a software over-ride. Since there really isn't a way to 758 * advertise that we are capable of Rx Pause ONLY, we will 759 * advertise that we support both symmetric and asymmetric Rx 760 * PAUSE. Later, we will disable the adapter's ability to send 761 * PAUSE frames. 762 */ 763 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); 764 break; 765 case e1000_fc_tx_pause: 766 /* 767 * Tx Flow control is enabled, and Rx Flow control is disabled, 768 * by a software over-ride. 769 */ 770 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); 771 break; 772 case e1000_fc_full: 773 /* 774 * Flow control (both Rx and Tx) is enabled by a software 775 * over-ride. 776 */ 777 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); 778 break; 779 default: 780 hw_dbg(hw, "Flow control param set incorrectly\n"); 781 return -E1000_ERR_CONFIG; 782 break; 783 } 784 785 ew32(TXCW, txcw); 786 mac->txcw = txcw; 787 788 return 0; 789} 790 791/** 792 * e1000_poll_fiber_serdes_link_generic - Poll for link up 793 * @hw: pointer to the HW structure 794 * 795 * Polls for link up by reading the status register, if link fails to come 796 * up with auto-negotiation, then the link is forced if a signal is detected. 797 **/ 798static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) 799{ 800 struct e1000_mac_info *mac = &hw->mac; 801 u32 i, status; 802 s32 ret_val; 803 804 /* 805 * If we have a signal (the cable is plugged in, or assumed true for 806 * serdes media) then poll for a "Link-Up" indication in the Device 807 * Status Register. Time-out if a link isn't seen in 500 milliseconds 808 * seconds (Auto-negotiation should complete in less than 500 809 * milliseconds even if the other end is doing it in SW). 810 */ 811 for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) { 812 msleep(10); 813 status = er32(STATUS); 814 if (status & E1000_STATUS_LU) 815 break; 816 } 817 if (i == FIBER_LINK_UP_LIMIT) { 818 hw_dbg(hw, "Never got a valid link from auto-neg!!!\n"); 819 mac->autoneg_failed = 1; 820 /* 821 * AutoNeg failed to achieve a link, so we'll call 822 * mac->check_for_link. This routine will force the 823 * link up if we detect a signal. This will allow us to 824 * communicate with non-autonegotiating link partners. 825 */ 826 ret_val = mac->ops.check_for_link(hw); 827 if (ret_val) { 828 hw_dbg(hw, "Error while checking for link\n"); 829 return ret_val; 830 } 831 mac->autoneg_failed = 0; 832 } else { 833 mac->autoneg_failed = 0; 834 hw_dbg(hw, "Valid Link Found\n"); 835 } 836 837 return 0; 838} 839 840/** 841 * e1000e_setup_fiber_serdes_link - Setup link for fiber/serdes 842 * @hw: pointer to the HW structure 843 * 844 * Configures collision distance and flow control for fiber and serdes 845 * links. Upon successful setup, poll for link. 846 **/ 847s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw) 848{ 849 u32 ctrl; 850 s32 ret_val; 851 852 ctrl = er32(CTRL); 853 854 /* Take the link out of reset */ 855 ctrl &= ~E1000_CTRL_LRST; 856 857 e1000e_config_collision_dist(hw); 858 859 ret_val = e1000_commit_fc_settings_generic(hw); 860 if (ret_val) 861 return ret_val; 862 863 /* 864 * Since auto-negotiation is enabled, take the link out of reset (the 865 * link will be in reset, because we previously reset the chip). This 866 * will restart auto-negotiation. If auto-negotiation is successful 867 * then the link-up status bit will be set and the flow control enable 868 * bits (RFCE and TFCE) will be set according to their negotiated value. 869 */ 870 hw_dbg(hw, "Auto-negotiation enabled\n"); 871 872 ew32(CTRL, ctrl); 873 e1e_flush(); 874 msleep(1); 875 876 /* 877 * For these adapters, the SW definable pin 1 is set when the optics 878 * detect a signal. If we have a signal, then poll for a "Link-Up" 879 * indication. 880 */ 881 if (hw->phy.media_type == e1000_media_type_internal_serdes || 882 (er32(CTRL) & E1000_CTRL_SWDPIN1)) { 883 ret_val = e1000_poll_fiber_serdes_link_generic(hw); 884 } else { 885 hw_dbg(hw, "No signal detected\n"); 886 } 887 888 return 0; 889} 890 891/** 892 * e1000e_config_collision_dist - Configure collision distance 893 * @hw: pointer to the HW structure 894 * 895 * Configures the collision distance to the default value and is used 896 * during link setup. Currently no func pointer exists and all 897 * implementations are handled in the generic version of this function. 898 **/ 899void e1000e_config_collision_dist(struct e1000_hw *hw) 900{ 901 u32 tctl; 902 903 tctl = er32(TCTL); 904 905 tctl &= ~E1000_TCTL_COLD; 906 tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; 907 908 ew32(TCTL, tctl); 909 e1e_flush(); 910} 911 912/** 913 * e1000e_set_fc_watermarks - Set flow control high/low watermarks 914 * @hw: pointer to the HW structure 915 * 916 * Sets the flow control high/low threshold (watermark) registers. If 917 * flow control XON frame transmission is enabled, then set XON frame 918 * transmission as well. 919 **/ 920s32 e1000e_set_fc_watermarks(struct e1000_hw *hw) 921{ 922 u32 fcrtl = 0, fcrth = 0; 923 924 /* 925 * Set the flow control receive threshold registers. Normally, 926 * these registers will be set to a default threshold that may be 927 * adjusted later by the driver's runtime code. However, if the 928 * ability to transmit pause frames is not enabled, then these 929 * registers will be set to 0. 930 */ 931 if (hw->fc.current_mode & e1000_fc_tx_pause) { 932 /* 933 * We need to set up the Receive Threshold high and low water 934 * marks as well as (optionally) enabling the transmission of 935 * XON frames. 936 */ 937 fcrtl = hw->fc.low_water; 938 fcrtl |= E1000_FCRTL_XONE; 939 fcrth = hw->fc.high_water; 940 } 941 ew32(FCRTL, fcrtl); 942 ew32(FCRTH, fcrth); 943 944 return 0; 945} 946 947/** 948 * e1000e_force_mac_fc - Force the MAC's flow control settings 949 * @hw: pointer to the HW structure 950 * 951 * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the 952 * device control register to reflect the adapter settings. TFCE and RFCE 953 * need to be explicitly set by software when a copper PHY is used because 954 * autonegotiation is managed by the PHY rather than the MAC. Software must 955 * also configure these bits when link is forced on a fiber connection. 956 **/ 957s32 e1000e_force_mac_fc(struct e1000_hw *hw) 958{ 959 u32 ctrl; 960 961 ctrl = er32(CTRL); 962 963 /* 964 * Because we didn't get link via the internal auto-negotiation 965 * mechanism (we either forced link or we got link via PHY 966 * auto-neg), we have to manually enable/disable transmit an 967 * receive flow control. 968 * 969 * The "Case" statement below enables/disable flow control 970 * according to the "hw->fc.current_mode" parameter. 971 * 972 * The possible values of the "fc" parameter are: 973 * 0: Flow control is completely disabled 974 * 1: Rx flow control is enabled (we can receive pause 975 * frames but not send pause frames). 976 * 2: Tx flow control is enabled (we can send pause frames 977 * frames but we do not receive pause frames). 978 * 3: Both Rx and Tx flow control (symmetric) is enabled. 979 * other: No other values should be possible at this point. 980 */ 981 hw_dbg(hw, "hw->fc.current_mode = %u\n", hw->fc.current_mode); 982 983 switch (hw->fc.current_mode) { 984 case e1000_fc_none: 985 ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); 986 break; 987 case e1000_fc_rx_pause: 988 ctrl &= (~E1000_CTRL_TFCE); 989 ctrl |= E1000_CTRL_RFCE; 990 break; 991 case e1000_fc_tx_pause: 992 ctrl &= (~E1000_CTRL_RFCE); 993 ctrl |= E1000_CTRL_TFCE; 994 break; 995 case e1000_fc_full: 996 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); 997 break; 998 default: 999 hw_dbg(hw, "Flow control param set incorrectly\n"); 1000 return -E1000_ERR_CONFIG; 1001 } 1002 1003 ew32(CTRL, ctrl); 1004 1005 return 0; 1006} 1007 1008/** 1009 * e1000e_config_fc_after_link_up - Configures flow control after link 1010 * @hw: pointer to the HW structure 1011 * 1012 * Checks the status of auto-negotiation after link up to ensure that the 1013 * speed and duplex were not forced. If the link needed to be forced, then 1014 * flow control needs to be forced also. If auto-negotiation is enabled 1015 * and did not fail, then we configure flow control based on our link 1016 * partner. 1017 **/ 1018s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) 1019{ 1020 struct e1000_mac_info *mac = &hw->mac; 1021 s32 ret_val = 0; 1022 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; 1023 u16 speed, duplex; 1024 1025 /* 1026 * Check for the case where we have fiber media and auto-neg failed 1027 * so we had to force link. In this case, we need to force the 1028 * configuration of the MAC to match the "fc" parameter. 1029 */ 1030 if (mac->autoneg_failed) { 1031 if (hw->phy.media_type == e1000_media_type_fiber || 1032 hw->phy.media_type == e1000_media_type_internal_serdes) 1033 ret_val = e1000e_force_mac_fc(hw); 1034 } else { 1035 if (hw->phy.media_type == e1000_media_type_copper) 1036 ret_val = e1000e_force_mac_fc(hw); 1037 } 1038 1039 if (ret_val) { 1040 hw_dbg(hw, "Error forcing flow control settings\n"); 1041 return ret_val; 1042 } 1043 1044 /* 1045 * Check for the case where we have copper media and auto-neg is 1046 * enabled. In this case, we need to check and see if Auto-Neg 1047 * has completed, and if so, how the PHY and link partner has 1048 * flow control configured. 1049 */ 1050 if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) { 1051 /* 1052 * Read the MII Status Register and check to see if AutoNeg 1053 * has completed. We read this twice because this reg has 1054 * some "sticky" (latched) bits. 1055 */ 1056 ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg); 1057 if (ret_val) 1058 return ret_val; 1059 ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg); 1060 if (ret_val) 1061 return ret_val; 1062 1063 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { 1064 hw_dbg(hw, "Copper PHY and Auto Neg " 1065 "has not completed.\n"); 1066 return ret_val; 1067 } 1068 1069 /* 1070 * The AutoNeg process has completed, so we now need to 1071 * read both the Auto Negotiation Advertisement 1072 * Register (Address 4) and the Auto_Negotiation Base 1073 * Page Ability Register (Address 5) to determine how 1074 * flow control was negotiated. 1075 */ 1076 ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg); 1077 if (ret_val) 1078 return ret_val; 1079 ret_val = e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg); 1080 if (ret_val) 1081 return ret_val; 1082 1083 /* 1084 * Two bits in the Auto Negotiation Advertisement Register 1085 * (Address 4) and two bits in the Auto Negotiation Base 1086 * Page Ability Register (Address 5) determine flow control 1087 * for both the PHY and the link partner. The following 1088 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, 1089 * 1999, describes these PAUSE resolution bits and how flow 1090 * control is determined based upon these settings. 1091 * NOTE: DC = Don't Care 1092 * 1093 * LOCAL DEVICE | LINK PARTNER 1094 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution 1095 *-------|---------|-------|---------|-------------------- 1096 * 0 | 0 | DC | DC | e1000_fc_none 1097 * 0 | 1 | 0 | DC | e1000_fc_none 1098 * 0 | 1 | 1 | 0 | e1000_fc_none 1099 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause 1100 * 1 | 0 | 0 | DC | e1000_fc_none 1101 * 1 | DC | 1 | DC | e1000_fc_full 1102 * 1 | 1 | 0 | 0 | e1000_fc_none 1103 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause 1104 * 1105 * 1106 * Are both PAUSE bits set to 1? If so, this implies 1107 * Symmetric Flow Control is enabled at both ends. The 1108 * ASM_DIR bits are irrelevant per the spec. 1109 * 1110 * For Symmetric Flow Control: 1111 * 1112 * LOCAL DEVICE | LINK PARTNER 1113 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result 1114 *-------|---------|-------|---------|-------------------- 1115 * 1 | DC | 1 | DC | E1000_fc_full 1116 * 1117 */ 1118 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && 1119 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { 1120 /* 1121 * Now we need to check if the user selected Rx ONLY 1122 * of pause frames. In this case, we had to advertise 1123 * FULL flow control because we could not advertise Rx 1124 * ONLY. Hence, we must now check to see if we need to 1125 * turn OFF the TRANSMISSION of PAUSE frames. 1126 */ 1127 if (hw->fc.requested_mode == e1000_fc_full) { 1128 hw->fc.current_mode = e1000_fc_full; 1129 hw_dbg(hw, "Flow Control = FULL.\r\n"); 1130 } else { 1131 hw->fc.current_mode = e1000_fc_rx_pause; 1132 hw_dbg(hw, "Flow Control = " 1133 "RX PAUSE frames only.\r\n"); 1134 } 1135 } 1136 /* 1137 * For receiving PAUSE frames ONLY. 1138 * 1139 * LOCAL DEVICE | LINK PARTNER 1140 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result 1141 *-------|---------|-------|---------|-------------------- 1142 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause 1143 * 1144 */ 1145 else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && 1146 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && 1147 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && 1148 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { 1149 hw->fc.current_mode = e1000_fc_tx_pause; 1150 hw_dbg(hw, "Flow Control = Tx PAUSE frames only.\r\n"); 1151 } 1152 /* 1153 * For transmitting PAUSE frames ONLY. 1154 * 1155 * LOCAL DEVICE | LINK PARTNER 1156 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result 1157 *-------|---------|-------|---------|-------------------- 1158 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause 1159 * 1160 */ 1161 else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && 1162 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && 1163 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && 1164 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { 1165 hw->fc.current_mode = e1000_fc_rx_pause; 1166 hw_dbg(hw, "Flow Control = Rx PAUSE frames only.\r\n"); 1167 } else { 1168 /* 1169 * Per the IEEE spec, at this point flow control 1170 * should be disabled. 1171 */ 1172 hw->fc.current_mode = e1000_fc_none; 1173 hw_dbg(hw, "Flow Control = NONE.\r\n"); 1174 } 1175 1176 /* 1177 * Now we need to do one last check... If we auto- 1178 * negotiated to HALF DUPLEX, flow control should not be 1179 * enabled per IEEE 802.3 spec. 1180 */ 1181 ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex); 1182 if (ret_val) { 1183 hw_dbg(hw, "Error getting link speed and duplex\n"); 1184 return ret_val; 1185 } 1186 1187 if (duplex == HALF_DUPLEX) 1188 hw->fc.current_mode = e1000_fc_none; 1189 1190 /* 1191 * Now we call a subroutine to actually force the MAC 1192 * controller to use the correct flow control settings. 1193 */ 1194 ret_val = e1000e_force_mac_fc(hw); 1195 if (ret_val) { 1196 hw_dbg(hw, "Error forcing flow control settings\n"); 1197 return ret_val; 1198 } 1199 } 1200 1201 return 0; 1202} 1203 1204/** 1205 * e1000e_get_speed_and_duplex_copper - Retrieve current speed/duplex 1206 * @hw: pointer to the HW structure 1207 * @speed: stores the current speed 1208 * @duplex: stores the current duplex 1209 * 1210 * Read the status register for the current speed/duplex and store the current 1211 * speed and duplex for copper connections. 1212 **/ 1213s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex) 1214{ 1215 u32 status; 1216 1217 status = er32(STATUS); 1218 if (status & E1000_STATUS_SPEED_1000) { 1219 *speed = SPEED_1000; 1220 hw_dbg(hw, "1000 Mbs, "); 1221 } else if (status & E1000_STATUS_SPEED_100) { 1222 *speed = SPEED_100; 1223 hw_dbg(hw, "100 Mbs, "); 1224 } else { 1225 *speed = SPEED_10; 1226 hw_dbg(hw, "10 Mbs, "); 1227 } 1228 1229 if (status & E1000_STATUS_FD) { 1230 *duplex = FULL_DUPLEX; 1231 hw_dbg(hw, "Full Duplex\n"); 1232 } else { 1233 *duplex = HALF_DUPLEX; 1234 hw_dbg(hw, "Half Duplex\n"); 1235 } 1236 1237 return 0; 1238} 1239 1240/** 1241 * e1000e_get_speed_and_duplex_fiber_serdes - Retrieve current speed/duplex 1242 * @hw: pointer to the HW structure 1243 * @speed: stores the current speed 1244 * @duplex: stores the current duplex 1245 * 1246 * Sets the speed and duplex to gigabit full duplex (the only possible option) 1247 * for fiber/serdes links. 1248 **/ 1249s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex) 1250{ 1251 *speed = SPEED_1000; 1252 *duplex = FULL_DUPLEX; 1253 1254 return 0; 1255} 1256 1257/** 1258 * e1000e_get_hw_semaphore - Acquire hardware semaphore 1259 * @hw: pointer to the HW structure 1260 * 1261 * Acquire the HW semaphore to access the PHY or NVM 1262 **/ 1263s32 e1000e_get_hw_semaphore(struct e1000_hw *hw) 1264{ 1265 u32 swsm; 1266 s32 timeout = hw->nvm.word_size + 1; 1267 s32 i = 0; 1268 1269 /* Get the SW semaphore */ 1270 while (i < timeout) { 1271 swsm = er32(SWSM); 1272 if (!(swsm & E1000_SWSM_SMBI)) 1273 break; 1274 1275 udelay(50); 1276 i++; 1277 } 1278 1279 if (i == timeout) { 1280 hw_dbg(hw, "Driver can't access device - SMBI bit is set.\n"); 1281 return -E1000_ERR_NVM; 1282 } 1283 1284 /* Get the FW semaphore. */ 1285 for (i = 0; i < timeout; i++) { 1286 swsm = er32(SWSM); 1287 ew32(SWSM, swsm | E1000_SWSM_SWESMBI); 1288 1289 /* Semaphore acquired if bit latched */ 1290 if (er32(SWSM) & E1000_SWSM_SWESMBI) 1291 break; 1292 1293 udelay(50); 1294 } 1295 1296 if (i == timeout) { 1297 /* Release semaphores */ 1298 e1000e_put_hw_semaphore(hw); 1299 hw_dbg(hw, "Driver can't access the NVM\n"); 1300 return -E1000_ERR_NVM; 1301 } 1302 1303 return 0; 1304} 1305 1306/** 1307 * e1000e_put_hw_semaphore - Release hardware semaphore 1308 * @hw: pointer to the HW structure 1309 * 1310 * Release hardware semaphore used to access the PHY or NVM 1311 **/ 1312void e1000e_put_hw_semaphore(struct e1000_hw *hw) 1313{ 1314 u32 swsm; 1315 1316 swsm = er32(SWSM); 1317 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); 1318 ew32(SWSM, swsm); 1319} 1320 1321/** 1322 * e1000e_get_auto_rd_done - Check for auto read completion 1323 * @hw: pointer to the HW structure 1324 * 1325 * Check EEPROM for Auto Read done bit. 1326 **/ 1327s32 e1000e_get_auto_rd_done(struct e1000_hw *hw) 1328{ 1329 s32 i = 0; 1330 1331 while (i < AUTO_READ_DONE_TIMEOUT) { 1332 if (er32(EECD) & E1000_EECD_AUTO_RD) 1333 break; 1334 msleep(1); 1335 i++; 1336 } 1337 1338 if (i == AUTO_READ_DONE_TIMEOUT) { 1339 hw_dbg(hw, "Auto read by HW from NVM has not completed.\n"); 1340 return -E1000_ERR_RESET; 1341 } 1342 1343 return 0; 1344} 1345 1346/** 1347 * e1000e_valid_led_default - Verify a valid default LED config 1348 * @hw: pointer to the HW structure 1349 * @data: pointer to the NVM (EEPROM) 1350 * 1351 * Read the EEPROM for the current default LED configuration. If the 1352 * LED configuration is not valid, set to a valid LED configuration. 1353 **/ 1354s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data) 1355{ 1356 s32 ret_val; 1357 1358 ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); 1359 if (ret_val) { 1360 hw_dbg(hw, "NVM Read Error\n"); 1361 return ret_val; 1362 } 1363 1364 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) 1365 *data = ID_LED_DEFAULT; 1366 1367 return 0; 1368} 1369 1370/** 1371 * e1000e_id_led_init - 1372 * @hw: pointer to the HW structure 1373 * 1374 **/ 1375s32 e1000e_id_led_init(struct e1000_hw *hw) 1376{ 1377 struct e1000_mac_info *mac = &hw->mac; 1378 s32 ret_val; 1379 const u32 ledctl_mask = 0x000000FF; 1380 const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; 1381 const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; 1382 u16 data, i, temp; 1383 const u16 led_mask = 0x0F; 1384 1385 ret_val = hw->nvm.ops.valid_led_default(hw, &data); 1386 if (ret_val) 1387 return ret_val; 1388 1389 mac->ledctl_default = er32(LEDCTL); 1390 mac->ledctl_mode1 = mac->ledctl_default; 1391 mac->ledctl_mode2 = mac->ledctl_default; 1392 1393 for (i = 0; i < 4; i++) { 1394 temp = (data >> (i << 2)) & led_mask; 1395 switch (temp) { 1396 case ID_LED_ON1_DEF2: 1397 case ID_LED_ON1_ON2: 1398 case ID_LED_ON1_OFF2: 1399 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); 1400 mac->ledctl_mode1 |= ledctl_on << (i << 3); 1401 break; 1402 case ID_LED_OFF1_DEF2: 1403 case ID_LED_OFF1_ON2: 1404 case ID_LED_OFF1_OFF2: 1405 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); 1406 mac->ledctl_mode1 |= ledctl_off << (i << 3); 1407 break; 1408 default: 1409 /* Do nothing */ 1410 break; 1411 } 1412 switch (temp) { 1413 case ID_LED_DEF1_ON2: 1414 case ID_LED_ON1_ON2: 1415 case ID_LED_OFF1_ON2: 1416 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); 1417 mac->ledctl_mode2 |= ledctl_on << (i << 3); 1418 break; 1419 case ID_LED_DEF1_OFF2: 1420 case ID_LED_ON1_OFF2: 1421 case ID_LED_OFF1_OFF2: 1422 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); 1423 mac->ledctl_mode2 |= ledctl_off << (i << 3); 1424 break; 1425 default: 1426 /* Do nothing */ 1427 break; 1428 } 1429 } 1430 1431 return 0; 1432} 1433 1434/** 1435 * e1000e_cleanup_led_generic - Set LED config to default operation 1436 * @hw: pointer to the HW structure 1437 * 1438 * Remove the current LED configuration and set the LED configuration 1439 * to the default value, saved from the EEPROM. 1440 **/ 1441s32 e1000e_cleanup_led_generic(struct e1000_hw *hw) 1442{ 1443 ew32(LEDCTL, hw->mac.ledctl_default); 1444 return 0; 1445} 1446 1447/** 1448 * e1000e_blink_led - Blink LED 1449 * @hw: pointer to the HW structure 1450 * 1451 * Blink the LEDs which are set to be on. 1452 **/ 1453s32 e1000e_blink_led(struct e1000_hw *hw) 1454{ 1455 u32 ledctl_blink = 0; 1456 u32 i; 1457 1458 if (hw->phy.media_type == e1000_media_type_fiber) { 1459 /* always blink LED0 for PCI-E fiber */ 1460 ledctl_blink = E1000_LEDCTL_LED0_BLINK | 1461 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); 1462 } else { 1463 /* 1464 * set the blink bit for each LED that's "on" (0x0E) 1465 * in ledctl_mode2 1466 */ 1467 ledctl_blink = hw->mac.ledctl_mode2; 1468 for (i = 0; i < 4; i++) 1469 if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) == 1470 E1000_LEDCTL_MODE_LED_ON) 1471 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK << 1472 (i * 8)); 1473 } 1474 1475 ew32(LEDCTL, ledctl_blink); 1476 1477 return 0; 1478} 1479 1480/** 1481 * e1000e_led_on_generic - Turn LED on 1482 * @hw: pointer to the HW structure 1483 * 1484 * Turn LED on. 1485 **/ 1486s32 e1000e_led_on_generic(struct e1000_hw *hw) 1487{ 1488 u32 ctrl; 1489 1490 switch (hw->phy.media_type) { 1491 case e1000_media_type_fiber: 1492 ctrl = er32(CTRL); 1493 ctrl &= ~E1000_CTRL_SWDPIN0; 1494 ctrl |= E1000_CTRL_SWDPIO0; 1495 ew32(CTRL, ctrl); 1496 break; 1497 case e1000_media_type_copper: 1498 ew32(LEDCTL, hw->mac.ledctl_mode2); 1499 break; 1500 default: 1501 break; 1502 } 1503 1504 return 0; 1505} 1506 1507/** 1508 * e1000e_led_off_generic - Turn LED off 1509 * @hw: pointer to the HW structure 1510 * 1511 * Turn LED off. 1512 **/ 1513s32 e1000e_led_off_generic(struct e1000_hw *hw) 1514{ 1515 u32 ctrl; 1516 1517 switch (hw->phy.media_type) { 1518 case e1000_media_type_fiber: 1519 ctrl = er32(CTRL); 1520 ctrl |= E1000_CTRL_SWDPIN0; 1521 ctrl |= E1000_CTRL_SWDPIO0; 1522 ew32(CTRL, ctrl); 1523 break; 1524 case e1000_media_type_copper: 1525 ew32(LEDCTL, hw->mac.ledctl_mode1); 1526 break; 1527 default: 1528 break; 1529 } 1530 1531 return 0; 1532} 1533 1534/** 1535 * e1000e_set_pcie_no_snoop - Set PCI-express capabilities 1536 * @hw: pointer to the HW structure 1537 * @no_snoop: bitmap of snoop events 1538 * 1539 * Set the PCI-express register to snoop for events enabled in 'no_snoop'. 1540 **/ 1541void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop) 1542{ 1543 u32 gcr; 1544 1545 if (no_snoop) { 1546 gcr = er32(GCR); 1547 gcr &= ~(PCIE_NO_SNOOP_ALL); 1548 gcr |= no_snoop; 1549 ew32(GCR, gcr); 1550 } 1551} 1552 1553/** 1554 * e1000e_disable_pcie_master - Disables PCI-express master access 1555 * @hw: pointer to the HW structure 1556 * 1557 * Returns 0 if successful, else returns -10 1558 * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused 1559 * the master requests to be disabled. 1560 * 1561 * Disables PCI-Express master access and verifies there are no pending 1562 * requests. 1563 **/ 1564s32 e1000e_disable_pcie_master(struct e1000_hw *hw) 1565{ 1566 u32 ctrl; 1567 s32 timeout = MASTER_DISABLE_TIMEOUT; 1568 1569 ctrl = er32(CTRL); 1570 ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; 1571 ew32(CTRL, ctrl); 1572 1573 while (timeout) { 1574 if (!(er32(STATUS) & 1575 E1000_STATUS_GIO_MASTER_ENABLE)) 1576 break; 1577 udelay(100); 1578 timeout--; 1579 } 1580 1581 if (!timeout) { 1582 hw_dbg(hw, "Master requests are pending.\n"); 1583 return -E1000_ERR_MASTER_REQUESTS_PENDING; 1584 } 1585 1586 return 0; 1587} 1588 1589/** 1590 * e1000e_reset_adaptive - Reset Adaptive Interframe Spacing 1591 * @hw: pointer to the HW structure 1592 * 1593 * Reset the Adaptive Interframe Spacing throttle to default values. 1594 **/ 1595void e1000e_reset_adaptive(struct e1000_hw *hw) 1596{ 1597 struct e1000_mac_info *mac = &hw->mac; 1598 1599 mac->current_ifs_val = 0; 1600 mac->ifs_min_val = IFS_MIN; 1601 mac->ifs_max_val = IFS_MAX; 1602 mac->ifs_step_size = IFS_STEP; 1603 mac->ifs_ratio = IFS_RATIO; 1604 1605 mac->in_ifs_mode = 0; 1606 ew32(AIT, 0); 1607} 1608 1609/** 1610 * e1000e_update_adaptive - Update Adaptive Interframe Spacing 1611 * @hw: pointer to the HW structure 1612 * 1613 * Update the Adaptive Interframe Spacing Throttle value based on the 1614 * time between transmitted packets and time between collisions. 1615 **/ 1616void e1000e_update_adaptive(struct e1000_hw *hw) 1617{ 1618 struct e1000_mac_info *mac = &hw->mac; 1619 1620 if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) { 1621 if (mac->tx_packet_delta > MIN_NUM_XMITS) { 1622 mac->in_ifs_mode = 1; 1623 if (mac->current_ifs_val < mac->ifs_max_val) { 1624 if (!mac->current_ifs_val) 1625 mac->current_ifs_val = mac->ifs_min_val; 1626 else 1627 mac->current_ifs_val += 1628 mac->ifs_step_size; 1629 ew32(AIT, mac->current_ifs_val); 1630 } 1631 } 1632 } else { 1633 if (mac->in_ifs_mode && 1634 (mac->tx_packet_delta <= MIN_NUM_XMITS)) { 1635 mac->current_ifs_val = 0; 1636 mac->in_ifs_mode = 0; 1637 ew32(AIT, 0); 1638 } 1639 } 1640} 1641 1642/** 1643 * e1000_raise_eec_clk - Raise EEPROM clock 1644 * @hw: pointer to the HW structure 1645 * @eecd: pointer to the EEPROM 1646 * 1647 * Enable/Raise the EEPROM clock bit. 1648 **/ 1649static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd) 1650{ 1651 *eecd = *eecd | E1000_EECD_SK; 1652 ew32(EECD, *eecd); 1653 e1e_flush(); 1654 udelay(hw->nvm.delay_usec); 1655} 1656 1657/** 1658 * e1000_lower_eec_clk - Lower EEPROM clock 1659 * @hw: pointer to the HW structure 1660 * @eecd: pointer to the EEPROM 1661 * 1662 * Clear/Lower the EEPROM clock bit. 1663 **/ 1664static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd) 1665{ 1666 *eecd = *eecd & ~E1000_EECD_SK; 1667 ew32(EECD, *eecd); 1668 e1e_flush(); 1669 udelay(hw->nvm.delay_usec); 1670} 1671 1672/** 1673 * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM 1674 * @hw: pointer to the HW structure 1675 * @data: data to send to the EEPROM 1676 * @count: number of bits to shift out 1677 * 1678 * We need to shift 'count' bits out to the EEPROM. So, the value in the 1679 * "data" parameter will be shifted out to the EEPROM one bit at a time. 1680 * In order to do this, "data" must be broken down into bits. 1681 **/ 1682static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) 1683{ 1684 struct e1000_nvm_info *nvm = &hw->nvm; 1685 u32 eecd = er32(EECD); 1686 u32 mask; 1687 1688 mask = 0x01 << (count - 1); 1689 if (nvm->type == e1000_nvm_eeprom_spi) 1690 eecd |= E1000_EECD_DO; 1691 1692 do { 1693 eecd &= ~E1000_EECD_DI; 1694 1695 if (data & mask) 1696 eecd |= E1000_EECD_DI; 1697 1698 ew32(EECD, eecd); 1699 e1e_flush(); 1700 1701 udelay(nvm->delay_usec); 1702 1703 e1000_raise_eec_clk(hw, &eecd); 1704 e1000_lower_eec_clk(hw, &eecd); 1705 1706 mask >>= 1; 1707 } while (mask); 1708 1709 eecd &= ~E1000_EECD_DI; 1710 ew32(EECD, eecd); 1711} 1712 1713/** 1714 * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM 1715 * @hw: pointer to the HW structure 1716 * @count: number of bits to shift in 1717 * 1718 * In order to read a register from the EEPROM, we need to shift 'count' bits 1719 * in from the EEPROM. Bits are "shifted in" by raising the clock input to 1720 * the EEPROM (setting the SK bit), and then reading the value of the data out 1721 * "DO" bit. During this "shifting in" process the data in "DI" bit should 1722 * always be clear. 1723 **/ 1724static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count) 1725{ 1726 u32 eecd; 1727 u32 i; 1728 u16 data; 1729 1730 eecd = er32(EECD); 1731 1732 eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); 1733 data = 0; 1734 1735 for (i = 0; i < count; i++) { 1736 data <<= 1; 1737 e1000_raise_eec_clk(hw, &eecd); 1738 1739 eecd = er32(EECD); 1740 1741 eecd &= ~E1000_EECD_DI; 1742 if (eecd & E1000_EECD_DO) 1743 data |= 1; 1744 1745 e1000_lower_eec_clk(hw, &eecd); 1746 } 1747 1748 return data; 1749} 1750 1751/** 1752 * e1000e_poll_eerd_eewr_done - Poll for EEPROM read/write completion 1753 * @hw: pointer to the HW structure 1754 * @ee_reg: EEPROM flag for polling 1755 * 1756 * Polls the EEPROM status bit for either read or write completion based 1757 * upon the value of 'ee_reg'. 1758 **/ 1759s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg) 1760{ 1761 u32 attempts = 100000; 1762 u32 i, reg = 0; 1763 1764 for (i = 0; i < attempts; i++) { 1765 if (ee_reg == E1000_NVM_POLL_READ) 1766 reg = er32(EERD); 1767 else 1768 reg = er32(EEWR); 1769 1770 if (reg & E1000_NVM_RW_REG_DONE) 1771 return 0; 1772 1773 udelay(5); 1774 } 1775 1776 return -E1000_ERR_NVM; 1777} 1778 1779/** 1780 * e1000e_acquire_nvm - Generic request for access to EEPROM 1781 * @hw: pointer to the HW structure 1782 * 1783 * Set the EEPROM access request bit and wait for EEPROM access grant bit. 1784 * Return successful if access grant bit set, else clear the request for 1785 * EEPROM access and return -E1000_ERR_NVM (-1). 1786 **/ 1787s32 e1000e_acquire_nvm(struct e1000_hw *hw) 1788{ 1789 u32 eecd = er32(EECD); 1790 s32 timeout = E1000_NVM_GRANT_ATTEMPTS; 1791 1792 ew32(EECD, eecd | E1000_EECD_REQ); 1793 eecd = er32(EECD); 1794 1795 while (timeout) { 1796 if (eecd & E1000_EECD_GNT) 1797 break; 1798 udelay(5); 1799 eecd = er32(EECD); 1800 timeout--; 1801 } 1802 1803 if (!timeout) { 1804 eecd &= ~E1000_EECD_REQ; 1805 ew32(EECD, eecd); 1806 hw_dbg(hw, "Could not acquire NVM grant\n"); 1807 return -E1000_ERR_NVM; 1808 } 1809 1810 return 0; 1811} 1812 1813/** 1814 * e1000_standby_nvm - Return EEPROM to standby state 1815 * @hw: pointer to the HW structure 1816 * 1817 * Return the EEPROM to a standby state. 1818 **/ 1819static void e1000_standby_nvm(struct e1000_hw *hw) 1820{ 1821 struct e1000_nvm_info *nvm = &hw->nvm; 1822 u32 eecd = er32(EECD); 1823 1824 if (nvm->type == e1000_nvm_eeprom_spi) { 1825 /* Toggle CS to flush commands */ 1826 eecd |= E1000_EECD_CS; 1827 ew32(EECD, eecd); 1828 e1e_flush(); 1829 udelay(nvm->delay_usec); 1830 eecd &= ~E1000_EECD_CS; 1831 ew32(EECD, eecd); 1832 e1e_flush(); 1833 udelay(nvm->delay_usec); 1834 } 1835} 1836 1837/** 1838 * e1000_stop_nvm - Terminate EEPROM command 1839 * @hw: pointer to the HW structure 1840 * 1841 * Terminates the current command by inverting the EEPROM's chip select pin. 1842 **/ 1843static void e1000_stop_nvm(struct e1000_hw *hw) 1844{ 1845 u32 eecd; 1846 1847 eecd = er32(EECD); 1848 if (hw->nvm.type == e1000_nvm_eeprom_spi) { 1849 /* Pull CS high */ 1850 eecd |= E1000_EECD_CS; 1851 e1000_lower_eec_clk(hw, &eecd); 1852 } 1853} 1854 1855/** 1856 * e1000e_release_nvm - Release exclusive access to EEPROM 1857 * @hw: pointer to the HW structure 1858 * 1859 * Stop any current commands to the EEPROM and clear the EEPROM request bit. 1860 **/ 1861void e1000e_release_nvm(struct e1000_hw *hw) 1862{ 1863 u32 eecd; 1864 1865 e1000_stop_nvm(hw); 1866 1867 eecd = er32(EECD); 1868 eecd &= ~E1000_EECD_REQ; 1869 ew32(EECD, eecd); 1870} 1871 1872/** 1873 * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write 1874 * @hw: pointer to the HW structure 1875 * 1876 * Setups the EEPROM for reading and writing. 1877 **/ 1878static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw) 1879{ 1880 struct e1000_nvm_info *nvm = &hw->nvm; 1881 u32 eecd = er32(EECD); 1882 u16 timeout = 0; 1883 u8 spi_stat_reg; 1884 1885 if (nvm->type == e1000_nvm_eeprom_spi) { 1886 /* Clear SK and CS */ 1887 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); 1888 ew32(EECD, eecd); 1889 udelay(1); 1890 timeout = NVM_MAX_RETRY_SPI; 1891 1892 /* 1893 * Read "Status Register" repeatedly until the LSB is cleared. 1894 * The EEPROM will signal that the command has been completed 1895 * by clearing bit 0 of the internal status register. If it's 1896 * not cleared within 'timeout', then error out. 1897 */ 1898 while (timeout) { 1899 e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, 1900 hw->nvm.opcode_bits); 1901 spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8); 1902 if (!(spi_stat_reg & NVM_STATUS_RDY_SPI)) 1903 break; 1904 1905 udelay(5); 1906 e1000_standby_nvm(hw); 1907 timeout--; 1908 } 1909 1910 if (!timeout) { 1911 hw_dbg(hw, "SPI NVM Status error\n"); 1912 return -E1000_ERR_NVM; 1913 } 1914 } 1915 1916 return 0; 1917} 1918 1919/** 1920 * e1000e_read_nvm_eerd - Reads EEPROM using EERD register 1921 * @hw: pointer to the HW structure 1922 * @offset: offset of word in the EEPROM to read 1923 * @words: number of words to read 1924 * @data: word read from the EEPROM 1925 * 1926 * Reads a 16 bit word from the EEPROM using the EERD register. 1927 **/ 1928s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) 1929{ 1930 struct e1000_nvm_info *nvm = &hw->nvm; 1931 u32 i, eerd = 0; 1932 s32 ret_val = 0; 1933 1934 /* 1935 * A check for invalid values: offset too large, too many words, 1936 * too many words for the offset, and not enough words. 1937 */ 1938 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || 1939 (words == 0)) { 1940 hw_dbg(hw, "nvm parameter(s) out of bounds\n"); 1941 return -E1000_ERR_NVM; 1942 } 1943 1944 for (i = 0; i < words; i++) { 1945 eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) + 1946 E1000_NVM_RW_REG_START; 1947 1948 ew32(EERD, eerd); 1949 ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ); 1950 if (ret_val) 1951 break; 1952 1953 data[i] = (er32(EERD) >> E1000_NVM_RW_REG_DATA); 1954 } 1955 1956 return ret_val; 1957} 1958 1959/** 1960 * e1000e_write_nvm_spi - Write to EEPROM using SPI 1961 * @hw: pointer to the HW structure 1962 * @offset: offset within the EEPROM to be written to 1963 * @words: number of words to write 1964 * @data: 16 bit word(s) to be written to the EEPROM 1965 * 1966 * Writes data to EEPROM at offset using SPI interface. 1967 * 1968 * If e1000e_update_nvm_checksum is not called after this function , the 1969 * EEPROM will most likely contain an invalid checksum. 1970 **/ 1971s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) 1972{ 1973 struct e1000_nvm_info *nvm = &hw->nvm; 1974 s32 ret_val; 1975 u16 widx = 0; 1976 1977 /* 1978 * A check for invalid values: offset too large, too many words, 1979 * and not enough words. 1980 */ 1981 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || 1982 (words == 0)) { 1983 hw_dbg(hw, "nvm parameter(s) out of bounds\n"); 1984 return -E1000_ERR_NVM; 1985 } 1986 1987 ret_val = nvm->ops.acquire_nvm(hw); 1988 if (ret_val) 1989 return ret_val; 1990 1991 msleep(10); 1992 1993 while (widx < words) { 1994 u8 write_opcode = NVM_WRITE_OPCODE_SPI; 1995 1996 ret_val = e1000_ready_nvm_eeprom(hw); 1997 if (ret_val) { 1998 nvm->ops.release_nvm(hw); 1999 return ret_val; 2000 } 2001 2002 e1000_standby_nvm(hw); 2003 2004 /* Send the WRITE ENABLE command (8 bit opcode) */ 2005 e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI, 2006 nvm->opcode_bits); 2007 2008 e1000_standby_nvm(hw); 2009 2010 /* 2011 * Some SPI eeproms use the 8th address bit embedded in the 2012 * opcode 2013 */ 2014 if ((nvm->address_bits == 8) && (offset >= 128)) 2015 write_opcode |= NVM_A8_OPCODE_SPI; 2016 2017 /* Send the Write command (8-bit opcode + addr) */ 2018 e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits); 2019 e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2), 2020 nvm->address_bits); 2021 2022 /* Loop to allow for up to whole page write of eeprom */ 2023 while (widx < words) { 2024 u16 word_out = data[widx]; 2025 word_out = (word_out >> 8) | (word_out << 8); 2026 e1000_shift_out_eec_bits(hw, word_out, 16); 2027 widx++; 2028 2029 if ((((offset + widx) * 2) % nvm->page_size) == 0) { 2030 e1000_standby_nvm(hw); 2031 break; 2032 } 2033 } 2034 } 2035 2036 msleep(10); 2037 nvm->ops.release_nvm(hw); 2038 return 0; 2039} 2040 2041/** 2042 * e1000e_read_mac_addr - Read device MAC address 2043 * @hw: pointer to the HW structure 2044 * 2045 * Reads the device MAC address from the EEPROM and stores the value. 2046 * Since devices with two ports use the same EEPROM, we increment the 2047 * last bit in the MAC address for the second port. 2048 **/ 2049s32 e1000e_read_mac_addr(struct e1000_hw *hw) 2050{ 2051 s32 ret_val; 2052 u16 offset, nvm_data, i; 2053 u16 mac_addr_offset = 0; 2054 2055 if (hw->mac.type == e1000_82571) { 2056 /* Check for an alternate MAC address. An alternate MAC 2057 * address can be setup by pre-boot software and must be 2058 * treated like a permanent address and must override the 2059 * actual permanent MAC address.*/ 2060 ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, 2061 &mac_addr_offset); 2062 if (ret_val) { 2063 hw_dbg(hw, "NVM Read Error\n"); 2064 return ret_val; 2065 } 2066 if (mac_addr_offset == 0xFFFF) 2067 mac_addr_offset = 0; 2068 2069 if (mac_addr_offset) { 2070 if (hw->bus.func == E1000_FUNC_1) 2071 mac_addr_offset += ETH_ALEN/sizeof(u16); 2072 2073 /* make sure we have a valid mac address here 2074 * before using it */ 2075 ret_val = e1000_read_nvm(hw, mac_addr_offset, 1, 2076 &nvm_data); 2077 if (ret_val) { 2078 hw_dbg(hw, "NVM Read Error\n"); 2079 return ret_val; 2080 } 2081 if (nvm_data & 0x0001) 2082 mac_addr_offset = 0; 2083 } 2084 2085 if (mac_addr_offset) 2086 hw->dev_spec.e82571.alt_mac_addr_is_present = 1; 2087 } 2088 2089 for (i = 0; i < ETH_ALEN; i += 2) { 2090 offset = mac_addr_offset + (i >> 1); 2091 ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data); 2092 if (ret_val) { 2093 hw_dbg(hw, "NVM Read Error\n"); 2094 return ret_val; 2095 } 2096 hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF); 2097 hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8); 2098 } 2099 2100 /* Flip last bit of mac address if we're on second port */ 2101 if (!mac_addr_offset && hw->bus.func == E1000_FUNC_1) 2102 hw->mac.perm_addr[5] ^= 1; 2103 2104 for (i = 0; i < ETH_ALEN; i++) 2105 hw->mac.addr[i] = hw->mac.perm_addr[i]; 2106 2107 return 0; 2108} 2109 2110/** 2111 * e1000e_validate_nvm_checksum_generic - Validate EEPROM checksum 2112 * @hw: pointer to the HW structure 2113 * 2114 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM 2115 * and then verifies that the sum of the EEPROM is equal to 0xBABA. 2116 **/ 2117s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw) 2118{ 2119 s32 ret_val; 2120 u16 checksum = 0; 2121 u16 i, nvm_data; 2122 2123 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { 2124 ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); 2125 if (ret_val) { 2126 hw_dbg(hw, "NVM Read Error\n"); 2127 return ret_val; 2128 } 2129 checksum += nvm_data; 2130 } 2131 2132 if (checksum != (u16) NVM_SUM) { 2133 hw_dbg(hw, "NVM Checksum Invalid\n"); 2134 return -E1000_ERR_NVM; 2135 } 2136 2137 return 0; 2138} 2139 2140/** 2141 * e1000e_update_nvm_checksum_generic - Update EEPROM checksum 2142 * @hw: pointer to the HW structure 2143 * 2144 * Updates the EEPROM checksum by reading/adding each word of the EEPROM 2145 * up to the checksum. Then calculates the EEPROM checksum and writes the 2146 * value to the EEPROM. 2147 **/ 2148s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw) 2149{ 2150 s32 ret_val; 2151 u16 checksum = 0; 2152 u16 i, nvm_data; 2153 2154 for (i = 0; i < NVM_CHECKSUM_REG; i++) { 2155 ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); 2156 if (ret_val) { 2157 hw_dbg(hw, "NVM Read Error while updating checksum.\n"); 2158 return ret_val; 2159 } 2160 checksum += nvm_data; 2161 } 2162 checksum = (u16) NVM_SUM - checksum; 2163 ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum); 2164 if (ret_val) 2165 hw_dbg(hw, "NVM Write Error while updating checksum.\n"); 2166 2167 return ret_val; 2168} 2169 2170/** 2171 * e1000e_reload_nvm - Reloads EEPROM 2172 * @hw: pointer to the HW structure 2173 * 2174 * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the 2175 * extended control register. 2176 **/ 2177void e1000e_reload_nvm(struct e1000_hw *hw) 2178{ 2179 u32 ctrl_ext; 2180 2181 udelay(10); 2182 ctrl_ext = er32(CTRL_EXT); 2183 ctrl_ext |= E1000_CTRL_EXT_EE_RST; 2184 ew32(CTRL_EXT, ctrl_ext); 2185 e1e_flush(); 2186} 2187 2188/** 2189 * e1000_calculate_checksum - Calculate checksum for buffer 2190 * @buffer: pointer to EEPROM 2191 * @length: size of EEPROM to calculate a checksum for 2192 * 2193 * Calculates the checksum for some buffer on a specified length. The 2194 * checksum calculated is returned. 2195 **/ 2196static u8 e1000_calculate_checksum(u8 *buffer, u32 length) 2197{ 2198 u32 i; 2199 u8 sum = 0; 2200 2201 if (!buffer) 2202 return 0; 2203 2204 for (i = 0; i < length; i++) 2205 sum += buffer[i]; 2206 2207 return (u8) (0 - sum); 2208} 2209 2210/** 2211 * e1000_mng_enable_host_if - Checks host interface is enabled 2212 * @hw: pointer to the HW structure 2213 * 2214 * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND 2215 * 2216 * This function checks whether the HOST IF is enabled for command operation 2217 * and also checks whether the previous command is completed. It busy waits 2218 * in case of previous command is not completed. 2219 **/ 2220static s32 e1000_mng_enable_host_if(struct e1000_hw *hw) 2221{ 2222 u32 hicr; 2223 u8 i; 2224 2225 /* Check that the host interface is enabled. */ 2226 hicr = er32(HICR); 2227 if ((hicr & E1000_HICR_EN) == 0) { 2228 hw_dbg(hw, "E1000_HOST_EN bit disabled.\n"); 2229 return -E1000_ERR_HOST_INTERFACE_COMMAND; 2230 } 2231 /* check the previous command is completed */ 2232 for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) { 2233 hicr = er32(HICR); 2234 if (!(hicr & E1000_HICR_C)) 2235 break; 2236 mdelay(1); 2237 } 2238 2239 if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { 2240 hw_dbg(hw, "Previous command timeout failed .\n"); 2241 return -E1000_ERR_HOST_INTERFACE_COMMAND; 2242 } 2243 2244 return 0; 2245} 2246 2247/** 2248 * e1000e_check_mng_mode_generic - check management mode 2249 * @hw: pointer to the HW structure 2250 * 2251 * Reads the firmware semaphore register and returns true (>0) if 2252 * manageability is enabled, else false (0). 2253 **/ 2254bool e1000e_check_mng_mode_generic(struct e1000_hw *hw) 2255{ 2256 u32 fwsm = er32(FWSM); 2257 2258 return (fwsm & E1000_FWSM_MODE_MASK) == 2259 (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT); 2260} 2261 2262/** 2263 * e1000e_enable_tx_pkt_filtering - Enable packet filtering on Tx 2264 * @hw: pointer to the HW structure 2265 * 2266 * Enables packet filtering on transmit packets if manageability is enabled 2267 * and host interface is enabled. 2268 **/ 2269bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw) 2270{ 2271 struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie; 2272 u32 *buffer = (u32 *)&hw->mng_cookie; 2273 u32 offset; 2274 s32 ret_val, hdr_csum, csum; 2275 u8 i, len; 2276 2277 /* No manageability, no filtering */ 2278 if (!e1000e_check_mng_mode(hw)) { 2279 hw->mac.tx_pkt_filtering = 0; 2280 return 0; 2281 } 2282 2283 /* 2284 * If we can't read from the host interface for whatever 2285 * reason, disable filtering. 2286 */ 2287 ret_val = e1000_mng_enable_host_if(hw); 2288 if (ret_val != 0) { 2289 hw->mac.tx_pkt_filtering = 0; 2290 return ret_val; 2291 } 2292 2293 /* Read in the header. Length and offset are in dwords. */ 2294 len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2; 2295 offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2; 2296 for (i = 0; i < len; i++) 2297 *(buffer + i) = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset + i); 2298 hdr_csum = hdr->checksum; 2299 hdr->checksum = 0; 2300 csum = e1000_calculate_checksum((u8 *)hdr, 2301 E1000_MNG_DHCP_COOKIE_LENGTH); 2302 /* 2303 * If either the checksums or signature don't match, then 2304 * the cookie area isn't considered valid, in which case we 2305 * take the safe route of assuming Tx filtering is enabled. 2306 */ 2307 if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) { 2308 hw->mac.tx_pkt_filtering = 1; 2309 return 1; 2310 } 2311 2312 /* Cookie area is valid, make the final check for filtering. */ 2313 if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) { 2314 hw->mac.tx_pkt_filtering = 0; 2315 return 0; 2316 } 2317 2318 hw->mac.tx_pkt_filtering = 1; 2319 return 1; 2320} 2321 2322/** 2323 * e1000_mng_write_cmd_header - Writes manageability command header 2324 * @hw: pointer to the HW structure 2325 * @hdr: pointer to the host interface command header 2326 * 2327 * Writes the command header after does the checksum calculation. 2328 **/ 2329static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, 2330 struct e1000_host_mng_command_header *hdr) 2331{ 2332 u16 i, length = sizeof(struct e1000_host_mng_command_header); 2333 2334 /* Write the whole command header structure with new checksum. */ 2335 2336 hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length); 2337 2338 length >>= 2; 2339 /* Write the relevant command block into the ram area. */ 2340 for (i = 0; i < length; i++) { 2341 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, i, 2342 *((u32 *) hdr + i)); 2343 e1e_flush(); 2344 } 2345 2346 return 0; 2347} 2348 2349/** 2350 * e1000_mng_host_if_write - Writes to the manageability host interface 2351 * @hw: pointer to the HW structure 2352 * @buffer: pointer to the host interface buffer 2353 * @length: size of the buffer 2354 * @offset: location in the buffer to write to 2355 * @sum: sum of the data (not checksum) 2356 * 2357 * This function writes the buffer content at the offset given on the host if. 2358 * It also does alignment considerations to do the writes in most efficient 2359 * way. Also fills up the sum of the buffer in *buffer parameter. 2360 **/ 2361static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, 2362 u16 length, u16 offset, u8 *sum) 2363{ 2364 u8 *tmp; 2365 u8 *bufptr = buffer; 2366 u32 data = 0; 2367 u16 remaining, i, j, prev_bytes; 2368 2369 /* sum = only sum of the data and it is not checksum */ 2370 2371 if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) 2372 return -E1000_ERR_PARAM; 2373 2374 tmp = (u8 *)&data; 2375 prev_bytes = offset & 0x3; 2376 offset >>= 2; 2377 2378 if (prev_bytes) { 2379 data = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset); 2380 for (j = prev_bytes; j < sizeof(u32); j++) { 2381 *(tmp + j) = *bufptr++; 2382 *sum += *(tmp + j); 2383 } 2384 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset, data); 2385 length -= j - prev_bytes; 2386 offset++; 2387 } 2388 2389 remaining = length & 0x3; 2390 length -= remaining; 2391 2392 /* Calculate length in DWORDs */ 2393 length >>= 2; 2394 2395 /* 2396 * The device driver writes the relevant command block into the 2397 * ram area. 2398 */ 2399 for (i = 0; i < length; i++) { 2400 for (j = 0; j < sizeof(u32); j++) { 2401 *(tmp + j) = *bufptr++; 2402 *sum += *(tmp + j); 2403 } 2404 2405 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data); 2406 } 2407 if (remaining) { 2408 for (j = 0; j < sizeof(u32); j++) { 2409 if (j < remaining) 2410 *(tmp + j) = *bufptr++; 2411 else 2412 *(tmp + j) = 0; 2413 2414 *sum += *(tmp + j); 2415 } 2416 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data); 2417 } 2418 2419 return 0; 2420} 2421 2422/** 2423 * e1000e_mng_write_dhcp_info - Writes DHCP info to host interface 2424 * @hw: pointer to the HW structure 2425 * @buffer: pointer to the host interface 2426 * @length: size of the buffer 2427 * 2428 * Writes the DHCP information to the host interface. 2429 **/ 2430s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length) 2431{ 2432 struct e1000_host_mng_command_header hdr; 2433 s32 ret_val; 2434 u32 hicr; 2435 2436 hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD; 2437 hdr.command_length = length; 2438 hdr.reserved1 = 0; 2439 hdr.reserved2 = 0; 2440 hdr.checksum = 0; 2441 2442 /* Enable the host interface */ 2443 ret_val = e1000_mng_enable_host_if(hw); 2444 if (ret_val) 2445 return ret_val; 2446 2447 /* Populate the host interface with the contents of "buffer". */ 2448 ret_val = e1000_mng_host_if_write(hw, buffer, length, 2449 sizeof(hdr), &(hdr.checksum)); 2450 if (ret_val) 2451 return ret_val; 2452 2453 /* Write the manageability command header */ 2454 ret_val = e1000_mng_write_cmd_header(hw, &hdr); 2455 if (ret_val) 2456 return ret_val; 2457 2458 /* Tell the ARC a new command is pending. */ 2459 hicr = er32(HICR); 2460 ew32(HICR, hicr | E1000_HICR_C); 2461 2462 return 0; 2463} 2464 2465/** 2466 * e1000e_enable_mng_pass_thru - Enable processing of ARP's 2467 * @hw: pointer to the HW structure 2468 * 2469 * Verifies the hardware needs to allow ARPs to be processed by the host. 2470 **/ 2471bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw) 2472{ 2473 u32 manc; 2474 u32 fwsm, factps; 2475 bool ret_val = 0; 2476 2477 manc = er32(MANC); 2478 2479 if (!(manc & E1000_MANC_RCV_TCO_EN) || 2480 !(manc & E1000_MANC_EN_MAC_ADDR_FILTER)) 2481 return ret_val; 2482 2483 if (hw->mac.arc_subsystem_valid) { 2484 fwsm = er32(FWSM); 2485 factps = er32(FACTPS); 2486 2487 if (!(factps & E1000_FACTPS_MNGCG) && 2488 ((fwsm & E1000_FWSM_MODE_MASK) == 2489 (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) { 2490 ret_val = 1; 2491 return ret_val; 2492 } 2493 } else { 2494 if ((manc & E1000_MANC_SMBUS_EN) && 2495 !(manc & E1000_MANC_ASF_EN)) { 2496 ret_val = 1; 2497 return ret_val; 2498 } 2499 } 2500 2501 return ret_val; 2502} 2503 2504s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num) 2505{ 2506 s32 ret_val; 2507 u16 nvm_data; 2508 2509 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); 2510 if (ret_val) { 2511 hw_dbg(hw, "NVM Read Error\n"); 2512 return ret_val; 2513 } 2514 *pba_num = (u32)(nvm_data << 16); 2515 2516 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data); 2517 if (ret_val) { 2518 hw_dbg(hw, "NVM Read Error\n"); 2519 return ret_val; 2520 } 2521 *pba_num |= nvm_data; 2522 2523 return 0; 2524}