Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

e1000e: reformat comment blocks, cosmetic changes only

Adjusting the comment blocks here to be code-style compliant. no
code changes.

Changed some copyright dates to 2008.

Indentation fixes.

Signed-off-by: Bruce Allan <bruce.w.allan@intel.com>
Signed-off-by: Auke Kok <auke-jan.h.kok@intel.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>

authored by

Bruce Allan and committed by
Jeff Garzik
ad68076e 652f093f

+1009 -642
+67 -38
drivers/net/e1000e/82571.c
··· 1 1 /******************************************************************************* 2 2 3 3 Intel PRO/1000 Linux driver 4 - Copyright(c) 1999 - 2007 Intel Corporation. 4 + Copyright(c) 1999 - 2008 Intel Corporation. 5 5 6 6 This program is free software; you can redistribute it and/or modify it 7 7 under the terms and conditions of the GNU General Public License, ··· 29 29 /* 30 30 * 82571EB Gigabit Ethernet Controller 31 31 * 82571EB Gigabit Ethernet Controller (Fiber) 32 + * 82571EB Dual Port Gigabit Mezzanine Adapter 33 + * 82571EB Quad Port Gigabit Mezzanine Adapter 34 + * 82571PT Gigabit PT Quad Port Server ExpressModule 32 35 * 82572EI Gigabit Ethernet Controller (Copper) 33 36 * 82572EI Gigabit Ethernet Controller (Fiber) 34 37 * 82572EI Gigabit Ethernet Controller ··· 153 150 if (((eecd >> 15) & 0x3) == 0x3) { 154 151 nvm->type = e1000_nvm_flash_hw; 155 152 nvm->word_size = 2048; 156 - /* Autonomous Flash update bit must be cleared due 153 + /* 154 + * Autonomous Flash update bit must be cleared due 157 155 * to Flash update issue. 158 156 */ 159 157 eecd &= ~E1000_EECD_AUPDEN; ··· 163 159 } 164 160 /* Fall Through */ 165 161 default: 166 - nvm->type = e1000_nvm_eeprom_spi; 162 + nvm->type = e1000_nvm_eeprom_spi; 167 163 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> 168 164 E1000_EECD_SIZE_EX_SHIFT); 169 - /* Added to a constant, "size" becomes the left-shift value 165 + /* 166 + * Added to a constant, "size" becomes the left-shift value 170 167 * for setting word_size. 171 168 */ 172 169 size += NVM_WORD_SIZE_BASE_SHIFT; ··· 213 208 /* Set rar entry count */ 214 209 mac->rar_entry_count = E1000_RAR_ENTRIES; 215 210 /* Set if manageability features are enabled. */ 216 - mac->arc_subsystem_valid = 217 - (er32(FWSM) & E1000_FWSM_MODE_MASK) ? 1 : 0; 211 + mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK) ? 1 : 0; 218 212 219 213 /* check for link */ 220 214 switch (hw->media_type) { ··· 223 219 func->get_link_up_info = e1000e_get_speed_and_duplex_copper; 224 220 break; 225 221 case e1000_media_type_fiber: 226 - func->setup_physical_interface = e1000_setup_fiber_serdes_link_82571; 222 + func->setup_physical_interface = 223 + e1000_setup_fiber_serdes_link_82571; 227 224 func->check_for_link = e1000e_check_for_fiber_link; 228 - func->get_link_up_info = e1000e_get_speed_and_duplex_fiber_serdes; 225 + func->get_link_up_info = 226 + e1000e_get_speed_and_duplex_fiber_serdes; 229 227 break; 230 228 case e1000_media_type_internal_serdes: 231 - func->setup_physical_interface = e1000_setup_fiber_serdes_link_82571; 229 + func->setup_physical_interface = 230 + e1000_setup_fiber_serdes_link_82571; 232 231 func->check_for_link = e1000e_check_for_serdes_link; 233 - func->get_link_up_info = e1000e_get_speed_and_duplex_fiber_serdes; 232 + func->get_link_up_info = 233 + e1000e_get_speed_and_duplex_fiber_serdes; 234 234 break; 235 235 default: 236 236 return -E1000_ERR_CONFIG; ··· 330 322 switch (hw->mac.type) { 331 323 case e1000_82571: 332 324 case e1000_82572: 333 - /* The 82571 firmware may still be configuring the PHY. 325 + /* 326 + * The 82571 firmware may still be configuring the PHY. 334 327 * In this case, we cannot access the PHY until the 335 328 * configuration is done. So we explicitly set the 336 - * PHY ID. */ 329 + * PHY ID. 330 + */ 337 331 phy->id = IGP01E1000_I_PHY_ID; 338 332 break; 339 333 case e1000_82573: ··· 489 479 if (ret_val) 490 480 return ret_val; 491 481 492 - /* If our nvm is an EEPROM, then we're done 493 - * otherwise, commit the checksum to the flash NVM. */ 482 + /* 483 + * If our nvm is an EEPROM, then we're done 484 + * otherwise, commit the checksum to the flash NVM. 485 + */ 494 486 if (hw->nvm.type != e1000_nvm_flash_hw) 495 487 return ret_val; 496 488 ··· 508 496 509 497 /* Reset the firmware if using STM opcode. */ 510 498 if ((er32(FLOP) & 0xFF00) == E1000_STM_OPCODE) { 511 - /* The enabling of and the actual reset must be done 499 + /* 500 + * The enabling of and the actual reset must be done 512 501 * in two write cycles. 513 502 */ 514 503 ew32(HICR, E1000_HICR_FW_RESET_ENABLE); ··· 570 557 u32 eewr = 0; 571 558 s32 ret_val = 0; 572 559 573 - /* A check for invalid values: offset too large, too many words, 574 - * and not enough words. */ 560 + /* 561 + * A check for invalid values: offset too large, too many words, 562 + * and not enough words. 563 + */ 575 564 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || 576 565 (words == 0)) { 577 566 hw_dbg(hw, "nvm parameter(s) out of bounds\n"); ··· 660 645 } else { 661 646 data &= ~IGP02E1000_PM_D0_LPLU; 662 647 ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); 663 - /* LPLU and SmartSpeed are mutually exclusive. LPLU is used 648 + /* 649 + * LPLU and SmartSpeed are mutually exclusive. LPLU is used 664 650 * during Dx states where the power conservation is most 665 651 * important. During driver activity we should enable 666 - * SmartSpeed, so performance is maintained. */ 652 + * SmartSpeed, so performance is maintained. 653 + */ 667 654 if (phy->smart_speed == e1000_smart_speed_on) { 668 655 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, 669 - &data); 656 + &data); 670 657 if (ret_val) 671 658 return ret_val; 672 659 673 660 data |= IGP01E1000_PSCFR_SMART_SPEED; 674 661 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, 675 - data); 662 + data); 676 663 if (ret_val) 677 664 return ret_val; 678 665 } else if (phy->smart_speed == e1000_smart_speed_off) { 679 666 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, 680 - &data); 667 + &data); 681 668 if (ret_val) 682 669 return ret_val; 683 670 684 671 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 685 672 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, 686 - data); 673 + data); 687 674 if (ret_val) 688 675 return ret_val; 689 676 } ··· 710 693 s32 ret_val; 711 694 u16 i = 0; 712 695 713 - /* Prevent the PCI-E bus from sticking if there is no TLP connection 696 + /* 697 + * Prevent the PCI-E bus from sticking if there is no TLP connection 714 698 * on the last TLP read/write transaction when MAC is reset. 715 699 */ 716 700 ret_val = e1000e_disable_pcie_master(hw); ··· 727 709 728 710 msleep(10); 729 711 730 - /* Must acquire the MDIO ownership before MAC reset. 731 - * Ownership defaults to firmware after a reset. */ 712 + /* 713 + * Must acquire the MDIO ownership before MAC reset. 714 + * Ownership defaults to firmware after a reset. 715 + */ 732 716 if (hw->mac.type == e1000_82573) { 733 717 extcnf_ctrl = er32(EXTCNF_CTRL); 734 718 extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; ··· 767 747 /* We don't want to continue accessing MAC registers. */ 768 748 return ret_val; 769 749 770 - /* Phy configuration from NVM just starts after EECD_AUTO_RD is set. 750 + /* 751 + * Phy configuration from NVM just starts after EECD_AUTO_RD is set. 771 752 * Need to wait for Phy configuration completion before accessing 772 753 * NVM and Phy. 773 754 */ ··· 814 793 e1000e_clear_vfta(hw); 815 794 816 795 /* Setup the receive address. */ 817 - /* If, however, a locally administered address was assigned to the 796 + /* 797 + * If, however, a locally administered address was assigned to the 818 798 * 82571, we must reserve a RAR for it to work around an issue where 819 799 * resetting one port will reload the MAC on the other port. 820 800 */ ··· 852 830 ew32(GCR, reg_data); 853 831 } 854 832 855 - /* Clear all of the statistics registers (clear on read). It is 833 + /* 834 + * Clear all of the statistics registers (clear on read). It is 856 835 * important that we do this after we have tried to establish link 857 836 * because the symbol error count will increment wildly if there 858 837 * is no link. ··· 945 922 946 923 if (hw->mac.type == e1000_82573) { 947 924 if (hw->mng_cookie.vlan_id != 0) { 948 - /* The VFTA is a 4096b bit-field, each identifying 925 + /* 926 + * The VFTA is a 4096b bit-field, each identifying 949 927 * a single VLAN ID. The following operations 950 928 * determine which 32b entry (i.e. offset) into the 951 929 * array we want to set the VLAN ID (i.e. bit) of ··· 960 936 } 961 937 } 962 938 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { 963 - /* If the offset we want to clear is the same offset of the 939 + /* 940 + * If the offset we want to clear is the same offset of the 964 941 * manageability VLAN ID, then clear all bits except that of 965 942 * the manageability unit. 966 943 */ ··· 1009 984 **/ 1010 985 static s32 e1000_setup_link_82571(struct e1000_hw *hw) 1011 986 { 1012 - /* 82573 does not have a word in the NVM to determine 987 + /* 988 + * 82573 does not have a word in the NVM to determine 1013 989 * the default flow control setting, so we explicitly 1014 990 * set it to full. 1015 991 */ ··· 1076 1050 switch (hw->mac.type) { 1077 1051 case e1000_82571: 1078 1052 case e1000_82572: 1079 - /* If SerDes loopback mode is entered, there is no form 1053 + /* 1054 + * If SerDes loopback mode is entered, there is no form 1080 1055 * of reset to take the adapter out of that mode. So we 1081 1056 * have to explicitly take the adapter out of loopback 1082 1057 * mode. This prevents drivers from twiddling their thumbs 1083 1058 * if another tool failed to take it out of loopback mode. 1084 1059 */ 1085 - ew32(SCTL, 1086 - E1000_SCTL_DISABLE_SERDES_LOOPBACK); 1060 + ew32(SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); 1087 1061 break; 1088 1062 default: 1089 1063 break; ··· 1150 1124 1151 1125 /* If workaround is activated... */ 1152 1126 if (state) 1153 - /* Hold a copy of the LAA in RAR[14] This is done so that 1127 + /* 1128 + * Hold a copy of the LAA in RAR[14] This is done so that 1154 1129 * between the time RAR[0] gets clobbered and the time it 1155 1130 * gets fixed, the actual LAA is in one of the RARs and no 1156 1131 * incoming packets directed to this port are dropped. ··· 1179 1152 if (nvm->type != e1000_nvm_flash_hw) 1180 1153 return 0; 1181 1154 1182 - /* Check bit 4 of word 10h. If it is 0, firmware is done updating 1155 + /* 1156 + * Check bit 4 of word 10h. If it is 0, firmware is done updating 1183 1157 * 10h-12h. Checksum may need to be fixed. 1184 1158 */ 1185 1159 ret_val = e1000_read_nvm(hw, 0x10, 1, &data); ··· 1188 1160 return ret_val; 1189 1161 1190 1162 if (!(data & 0x10)) { 1191 - /* Read 0x23 and check bit 15. This bit is a 1 1163 + /* 1164 + * Read 0x23 and check bit 15. This bit is a 1 1192 1165 * when the checksum has already been fixed. If 1193 1166 * the checksum is still wrong and this bit is a 1194 1167 * 1, we need to return bad checksum. Otherwise,
+1 -1
drivers/net/e1000e/Makefile
··· 1 1 ################################################################################ 2 2 # 3 3 # Intel PRO/1000 Linux driver 4 - # Copyright(c) 1999 - 2007 Intel Corporation. 4 + # Copyright(c) 1999 - 2008 Intel Corporation. 5 5 # 6 6 # This program is free software; you can redistribute it and/or modify it 7 7 # under the terms and conditions of the GNU General Public License,
+58 -51
drivers/net/e1000e/defines.h
··· 1 1 /******************************************************************************* 2 2 3 3 Intel PRO/1000 Linux driver 4 - Copyright(c) 1999 - 2007 Intel Corporation. 4 + Copyright(c) 1999 - 2008 Intel Corporation. 5 5 6 6 This program is free software; you can redistribute it and/or modify it 7 7 under the terms and conditions of the GNU General Public License, ··· 120 120 #define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ 121 121 #define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ 122 122 #define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ 123 - #define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address 124 - * filtering */ 125 - #define E1000_MANC_EN_MNG2HOST 0x00200000 /* Enable MNG packets to host 126 - * memory */ 123 + /* Enable MAC address filtering */ 124 + #define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 125 + /* Enable MNG packets to host memory */ 126 + #define E1000_MANC_EN_MNG2HOST 0x00200000 127 127 128 128 /* Receive Control */ 129 129 #define E1000_RCTL_EN 0x00000002 /* enable */ ··· 135 135 #define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ 136 136 #define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ 137 137 #define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ 138 - #define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ 138 + #define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min threshold size */ 139 139 #define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ 140 140 #define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ 141 141 /* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ 142 - #define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */ 143 - #define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */ 144 - #define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */ 145 - #define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ 142 + #define E1000_RCTL_SZ_2048 0x00000000 /* Rx buffer size 2048 */ 143 + #define E1000_RCTL_SZ_1024 0x00010000 /* Rx buffer size 1024 */ 144 + #define E1000_RCTL_SZ_512 0x00020000 /* Rx buffer size 512 */ 145 + #define E1000_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */ 146 146 /* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ 147 - #define E1000_RCTL_SZ_16384 0x00010000 /* rx buffer size 16384 */ 148 - #define E1000_RCTL_SZ_8192 0x00020000 /* rx buffer size 8192 */ 149 - #define E1000_RCTL_SZ_4096 0x00030000 /* rx buffer size 4096 */ 147 + #define E1000_RCTL_SZ_16384 0x00010000 /* Rx buffer size 16384 */ 148 + #define E1000_RCTL_SZ_8192 0x00020000 /* Rx buffer size 8192 */ 149 + #define E1000_RCTL_SZ_4096 0x00030000 /* Rx buffer size 4096 */ 150 150 #define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ 151 151 #define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ 152 152 #define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ 153 153 #define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ 154 154 #define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ 155 155 156 - /* Use byte values for the following shift parameters 156 + /* 157 + * Use byte values for the following shift parameters 157 158 * Usage: 158 159 * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & 159 160 * E1000_PSRCTL_BSIZE0_MASK) | ··· 207 206 #define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ 208 207 #define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ 209 208 210 - /* Bit definitions for the Management Data IO (MDIO) and Management Data 209 + /* 210 + * Bit definitions for the Management Data IO (MDIO) and Management Data 211 211 * Clock (MDC) pins in the Device Control Register. 212 212 */ 213 213 ··· 281 279 #define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ 282 280 283 281 /* Transmit Control */ 284 - #define E1000_TCTL_EN 0x00000002 /* enable tx */ 282 + #define E1000_TCTL_EN 0x00000002 /* enable Tx */ 285 283 #define E1000_TCTL_PSP 0x00000008 /* pad short packets */ 286 284 #define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ 287 285 #define E1000_TCTL_COLD 0x003ff000 /* collision distance */ ··· 339 337 #define E1000_KABGTXD_BGSQLBIAS 0x00050000 340 338 341 339 /* PBA constants */ 342 - #define E1000_PBA_8K 0x0008 /* 8KB, default Rx allocation */ 343 - #define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */ 340 + #define E1000_PBA_8K 0x0008 /* 8KB */ 341 + #define E1000_PBA_16K 0x0010 /* 16KB */ 344 342 345 343 #define E1000_PBS_16K E1000_PBA_16K 346 344 ··· 358 356 /* Interrupt Cause Read */ 359 357 #define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ 360 358 #define E1000_ICR_LSC 0x00000004 /* Link Status Change */ 361 - #define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */ 362 - #define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ 363 - #define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ 359 + #define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */ 360 + #define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ 361 + #define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ 364 362 #define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ 365 363 366 - /* This defines the bits that are set in the Interrupt Mask 364 + /* 365 + * This defines the bits that are set in the Interrupt Mask 367 366 * Set/Read Register. Each bit is documented below: 368 367 * o RXT0 = Receiver Timer Interrupt (ring 0) 369 368 * o TXDW = Transmit Descriptor Written Back ··· 382 379 /* Interrupt Mask Set */ 383 380 #define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ 384 381 #define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ 385 - #define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ 386 - #define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ 387 - #define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ 382 + #define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ 383 + #define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ 384 + #define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */ 388 385 389 386 /* Interrupt Cause Set */ 390 387 #define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ 391 388 #define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ 389 + #define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ 392 390 393 391 /* Transmit Descriptor Control */ 394 392 #define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ 395 393 #define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ 396 394 #define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ 397 395 #define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */ 398 - #define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc. 399 - still to be processed. */ 396 + /* Enable the counting of desc. still to be processed. */ 397 + #define E1000_TXDCTL_COUNT_DESC 0x00400000 400 398 401 399 /* Flow Control Constants */ 402 400 #define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 ··· 408 404 #define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ 409 405 410 406 /* Receive Address */ 411 - /* Number of high/low register pairs in the RAR. The RAR (Receive Address 407 + /* 408 + * Number of high/low register pairs in the RAR. The RAR (Receive Address 412 409 * Registers) holds the directed and multicast addresses that we monitor. 413 410 * Technically, we have 16 spots. However, we reserve one of these spots 414 411 * (RAR[15]) for our directed address used by controllers with ··· 538 533 #define E1000_EECD_REQ 0x00000040 /* NVM Access Request */ 539 534 #define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */ 540 535 #define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */ 541 - #define E1000_EECD_ADDR_BITS 0x00000400 /* NVM Addressing bits based on type 542 - * (0-small, 1-large) */ 536 + /* NVM Addressing bits based on type (0-small, 1-large) */ 537 + #define E1000_EECD_ADDR_BITS 0x00000400 543 538 #define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ 544 539 #define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ 545 540 #define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ ··· 631 626 #define MAX_PHY_MULTI_PAGE_REG 0xF 632 627 633 628 /* Bit definitions for valid PHY IDs. */ 634 - /* I = Integrated 629 + /* 630 + * I = Integrated 635 631 * E = External 636 632 */ 637 633 #define M88E1000_E_PHY_ID 0x01410C50 ··· 659 653 #define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ 660 654 /* Manual MDI configuration */ 661 655 #define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ 662 - #define M88E1000_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover, 663 - * 100BASE-TX/10BASE-T: 664 - * MDI Mode 665 - */ 666 - #define M88E1000_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled 667 - * all speeds. 668 - */ 669 - /* 1=Enable Extended 10BASE-T distance 670 - * (Lower 10BASE-T RX Threshold) 671 - * 0=Normal 10BASE-T RX Threshold */ 672 - /* 1=5-Bit interface in 100BASE-TX 673 - * 0=MII interface in 100BASE-TX */ 674 - #define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ 656 + /* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */ 657 + #define M88E1000_PSCR_AUTO_X_1000T 0x0040 658 + /* Auto crossover enabled all speeds */ 659 + #define M88E1000_PSCR_AUTO_X_MODE 0x0060 660 + /* 661 + * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold) 662 + * 0=Normal 10BASE-T Rx Threshold 663 + */ 664 + #define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ 675 665 676 666 /* M88E1000 PHY Specific Status Register */ 677 667 #define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ 678 668 #define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ 679 669 #define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ 680 - #define M88E1000_PSSR_CABLE_LENGTH 0x0380 /* 0=<50M;1=50-80M;2=80-110M; 681 - * 3=110-140M;4=>140M */ 670 + /* 0=<50M; 1=50-80M; 2=80-110M; 3=110-140M; 4=>140M */ 671 + #define M88E1000_PSSR_CABLE_LENGTH 0x0380 682 672 #define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ 683 673 #define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ 684 674 685 675 #define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 686 676 687 - /* Number of times we will attempt to autonegotiate before downshifting if we 688 - * are the master */ 677 + /* 678 + * Number of times we will attempt to autonegotiate before downshifting if we 679 + * are the master 680 + */ 689 681 #define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 690 682 #define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 691 - /* Number of times we will attempt to autonegotiate before downshifting if we 692 - * are the slave */ 683 + /* 684 + * Number of times we will attempt to autonegotiate before downshifting if we 685 + * are the slave 686 + */ 693 687 #define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 694 688 #define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 695 689 #define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ ··· 698 692 #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 699 693 #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 700 694 701 - /* Bits... 695 + /* 696 + * Bits... 702 697 * 15-5: page 703 698 * 4-0: register offset 704 699 */
+8 -8
drivers/net/e1000e/e1000.h
··· 1 1 /******************************************************************************* 2 2 3 3 Intel PRO/1000 Linux driver 4 - Copyright(c) 1999 - 2007 Intel Corporation. 4 + Copyright(c) 1999 - 2008 Intel Corporation. 5 5 6 6 This program is free software; you can redistribute it and/or modify it 7 7 under the terms and conditions of the GNU General Public License, ··· 61 61 ndev_printk(KERN_NOTICE , netdev, format, ## arg) 62 62 63 63 64 - /* TX/RX descriptor defines */ 64 + /* Tx/Rx descriptor defines */ 65 65 #define E1000_DEFAULT_TXD 256 66 66 #define E1000_MAX_TXD 4096 67 67 #define E1000_MIN_TXD 80 ··· 114 114 dma_addr_t dma; 115 115 struct sk_buff *skb; 116 116 union { 117 - /* TX */ 117 + /* Tx */ 118 118 struct { 119 119 unsigned long time_stamp; 120 120 u16 length; 121 121 u16 next_to_watch; 122 122 }; 123 - /* RX */ 123 + /* Rx */ 124 124 /* arrays of page information for packet split */ 125 125 struct e1000_ps_page *ps_pages; 126 126 }; ··· 177 177 u16 rx_itr; 178 178 179 179 /* 180 - * TX 180 + * Tx 181 181 */ 182 182 struct e1000_ring *tx_ring /* One per active queue */ 183 183 ____cacheline_aligned_in_smp; ··· 199 199 unsigned int total_rx_bytes; 200 200 unsigned int total_rx_packets; 201 201 202 - /* TX stats */ 202 + /* Tx stats */ 203 203 u64 tpt_old; 204 204 u64 colc_old; 205 205 u64 gotcl_old; ··· 211 211 u32 tx_dma_failed; 212 212 213 213 /* 214 - * RX 214 + * Rx 215 215 */ 216 216 bool (*clean_rx) (struct e1000_adapter *adapter, 217 217 int *work_done, int work_to_do) ··· 223 223 u32 rx_int_delay; 224 224 u32 rx_abs_int_delay; 225 225 226 - /* RX stats */ 226 + /* Rx stats */ 227 227 u64 hw_csum_err; 228 228 u64 hw_csum_good; 229 229 u64 rx_hdr_split;
+52 -37
drivers/net/e1000e/es2lan.c
··· 1 1 /******************************************************************************* 2 2 3 3 Intel PRO/1000 Linux driver 4 - Copyright(c) 1999 - 2007 Intel Corporation. 4 + Copyright(c) 1999 - 2008 Intel Corporation. 5 5 6 6 This program is free software; you can redistribute it and/or modify it 7 7 under the terms and conditions of the GNU General Public License, ··· 92 92 /* In-Band Control Register (Page 194, Register 18) */ 93 93 #define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */ 94 94 95 - /* A table for the GG82563 cable length where the range is defined 95 + /* 96 + * A table for the GG82563 cable length where the range is defined 96 97 * with a lower bound at "index" and the upper bound at 97 98 * "index + 5". 98 99 */ ··· 168 167 break; 169 168 } 170 169 171 - nvm->type = e1000_nvm_eeprom_spi; 170 + nvm->type = e1000_nvm_eeprom_spi; 172 171 173 172 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> 174 173 E1000_EECD_SIZE_EX_SHIFT); 175 174 176 - /* Added to a constant, "size" becomes the left-shift value 175 + /* 176 + * Added to a constant, "size" becomes the left-shift value 177 177 * for setting word_size. 178 178 */ 179 179 size += NVM_WORD_SIZE_BASE_SHIFT; ··· 210 208 /* Set rar entry count */ 211 209 mac->rar_entry_count = E1000_RAR_ENTRIES; 212 210 /* Set if manageability features are enabled. */ 213 - mac->arc_subsystem_valid = 214 - (er32(FWSM) & E1000_FWSM_MODE_MASK) ? 1 : 0; 211 + mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK) ? 1 : 0; 215 212 216 213 /* check for link */ 217 214 switch (hw->media_type) { ··· 345 344 if (!(swfw_sync & (fwmask | swmask))) 346 345 break; 347 346 348 - /* Firmware currently using resource (fwmask) 349 - * or other software thread using resource (swmask) */ 347 + /* 348 + * Firmware currently using resource (fwmask) 349 + * or other software thread using resource (swmask) 350 + */ 350 351 e1000e_put_hw_semaphore(hw); 351 352 mdelay(5); 352 353 i++; ··· 410 407 if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) 411 408 page_select = GG82563_PHY_PAGE_SELECT; 412 409 else 413 - /* Use Alternative Page Select register to access 410 + /* 411 + * Use Alternative Page Select register to access 414 412 * registers 30 and 31 415 413 */ 416 414 page_select = GG82563_PHY_PAGE_SELECT_ALT; ··· 421 417 if (ret_val) 422 418 return ret_val; 423 419 424 - /* The "ready" bit in the MDIC register may be incorrectly set 420 + /* 421 + * The "ready" bit in the MDIC register may be incorrectly set 425 422 * before the device has completed the "Page Select" MDI 426 423 * transaction. So we wait 200us after each MDI command... 427 424 */ ··· 467 462 if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) 468 463 page_select = GG82563_PHY_PAGE_SELECT; 469 464 else 470 - /* Use Alternative Page Select register to access 465 + /* 466 + * Use Alternative Page Select register to access 471 467 * registers 30 and 31 472 468 */ 473 469 page_select = GG82563_PHY_PAGE_SELECT_ALT; ··· 479 473 return ret_val; 480 474 481 475 482 - /* The "ready" bit in the MDIC register may be incorrectly set 476 + /* 477 + * The "ready" bit in the MDIC register may be incorrectly set 483 478 * before the device has completed the "Page Select" MDI 484 479 * transaction. So we wait 200us after each MDI command... 485 480 */ ··· 561 554 u16 phy_data; 562 555 bool link; 563 556 564 - /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI 557 + /* 558 + * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI 565 559 * forced whenever speed and duplex are forced. 566 560 */ 567 561 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); ··· 601 593 return ret_val; 602 594 603 595 if (!link) { 604 - /* We didn't get link. 596 + /* 597 + * We didn't get link. 605 598 * Reset the DSP and cross our fingers. 606 599 */ 607 600 ret_val = e1000e_phy_reset_dsp(hw); ··· 621 612 if (ret_val) 622 613 return ret_val; 623 614 624 - /* Resetting the phy means we need to verify the TX_CLK corresponds 615 + /* 616 + * Resetting the phy means we need to verify the TX_CLK corresponds 625 617 * to the link speed. 10Mbps -> 2.5MHz, else 25MHz. 626 618 */ 627 619 phy_data &= ~GG82563_MSCR_TX_CLK_MASK; ··· 631 621 else 632 622 phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25; 633 623 634 - /* In addition, we must re-enable CRS on Tx for both half and full 624 + /* 625 + * In addition, we must re-enable CRS on Tx for both half and full 635 626 * duplex. 636 627 */ 637 628 phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX; ··· 715 704 u32 icr; 716 705 s32 ret_val; 717 706 718 - /* Prevent the PCI-E bus from sticking if there is no TLP connection 707 + /* 708 + * Prevent the PCI-E bus from sticking if there is no TLP connection 719 709 * on the last TLP read/write transaction when MAC is reset. 720 710 */ 721 711 ret_val = e1000e_disable_pcie_master(hw); ··· 820 808 reg_data &= ~0x00100000; 821 809 E1000_WRITE_REG_ARRAY(hw, E1000_FFLT, 0x0001, reg_data); 822 810 823 - /* Clear all of the statistics registers (clear on read). It is 811 + /* 812 + * Clear all of the statistics registers (clear on read). It is 824 813 * important that we do this after we have tried to establish link 825 814 * because the symbol error count will increment wildly if there 826 815 * is no link. ··· 894 881 if (ret_val) 895 882 return ret_val; 896 883 897 - /* Options: 884 + /* 885 + * Options: 898 886 * MDI/MDI-X = 0 (default) 899 887 * 0 - Auto for all speeds 900 888 * 1 - MDI mode ··· 921 907 break; 922 908 } 923 909 924 - /* Options: 910 + /* 911 + * Options: 925 912 * disable_polarity_correction = 0 (default) 926 913 * Automatic Correction for Reversed Cable Polarity 927 914 * 0 - Disabled ··· 943 928 return ret_val; 944 929 } 945 930 946 - /* Bypass RX and TX FIFO's */ 947 - ret_val = e1000e_write_kmrn_reg(hw, 948 - E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL, 949 - E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS | 931 + /* Bypass Rx and Tx FIFO's */ 932 + ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL, 933 + E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS | 950 934 E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS); 951 935 if (ret_val) 952 936 return ret_val; ··· 967 953 if (ret_val) 968 954 return ret_val; 969 955 970 - /* Do not init these registers when the HW is in IAMT mode, since the 956 + /* 957 + * Do not init these registers when the HW is in IAMT mode, since the 971 958 * firmware will have already initialized them. We only initialize 972 959 * them if the HW is not in IAMT mode. 973 960 */ ··· 989 974 return ret_val; 990 975 } 991 976 992 - /* Workaround: Disable padding in Kumeran interface in the MAC 977 + /* 978 + * Workaround: Disable padding in Kumeran interface in the MAC 993 979 * and in the PHY to avoid CRC errors. 994 980 */ 995 981 ret_val = e1e_rphy(hw, GG82563_PHY_INBAND_CTRL, &data); ··· 1023 1007 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); 1024 1008 ew32(CTRL, ctrl); 1025 1009 1026 - /* Set the mac to wait the maximum time between each 1010 + /* 1011 + * Set the mac to wait the maximum time between each 1027 1012 * iteration and increase the max iterations when 1028 - * polling the phy; this fixes erroneous timeouts at 10Mbps. */ 1013 + * polling the phy; this fixes erroneous timeouts at 10Mbps. 1014 + */ 1029 1015 ret_val = e1000e_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF); 1030 1016 if (ret_val) 1031 1017 return ret_val; ··· 1044 1026 if (ret_val) 1045 1027 return ret_val; 1046 1028 reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING; 1047 - ret_val = e1000e_write_kmrn_reg(hw, 1048 - E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, 1049 - reg_data); 1029 + ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, 1030 + reg_data); 1050 1031 if (ret_val) 1051 1032 return ret_val; 1052 1033 ··· 1073 1056 u16 reg_data; 1074 1057 1075 1058 reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT; 1076 - ret_val = e1000e_write_kmrn_reg(hw, 1077 - E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, 1078 - reg_data); 1059 + ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, 1060 + reg_data); 1079 1061 if (ret_val) 1080 1062 return ret_val; 1081 1063 ··· 1112 1096 u32 tipg; 1113 1097 1114 1098 reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT; 1115 - ret_val = e1000e_write_kmrn_reg(hw, 1116 - E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, 1117 - reg_data); 1099 + ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, 1100 + reg_data); 1118 1101 if (ret_val) 1119 1102 return ret_val; 1120 1103
+62 -35
drivers/net/e1000e/ethtool.c
··· 1 1 /******************************************************************************* 2 2 3 3 Intel PRO/1000 Linux driver 4 - Copyright(c) 1999 - 2007 Intel Corporation. 4 + Copyright(c) 1999 - 2008 Intel Corporation. 5 5 6 6 This program is free software; you can redistribute it and/or modify it 7 7 under the terms and conditions of the GNU General Public License, ··· 102 102 "Interrupt test (offline)", "Loopback test (offline)", 103 103 "Link test (on/offline)" 104 104 }; 105 - #define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test) 105 + #define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test) 106 106 107 107 static int e1000_get_settings(struct net_device *netdev, 108 108 struct ethtool_cmd *ecmd) ··· 226 226 struct e1000_adapter *adapter = netdev_priv(netdev); 227 227 struct e1000_hw *hw = &adapter->hw; 228 228 229 - /* When SoL/IDER sessions are active, autoneg/speed/duplex 230 - * cannot be changed */ 229 + /* 230 + * When SoL/IDER sessions are active, autoneg/speed/duplex 231 + * cannot be changed 232 + */ 231 233 if (e1000_check_reset_block(hw)) { 232 234 ndev_err(netdev, "Cannot change link " 233 235 "characteristics when SoL/IDER is active.\n"); ··· 560 558 ret_val = e1000_write_nvm(hw, first_word, 561 559 last_word - first_word + 1, eeprom_buff); 562 560 563 - /* Update the checksum over the first part of the EEPROM if needed 564 - * and flush shadow RAM for 82573 controllers */ 561 + /* 562 + * Update the checksum over the first part of the EEPROM if needed 563 + * and flush shadow RAM for 82573 controllers 564 + */ 565 565 if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG) || 566 566 (hw->mac.type == e1000_82573))) 567 567 e1000e_update_nvm_checksum(hw); ··· 582 578 strncpy(drvinfo->driver, e1000e_driver_name, 32); 583 579 strncpy(drvinfo->version, e1000e_driver_version, 32); 584 580 585 - /* EEPROM image version # is reported as firmware version # for 586 - * PCI-E controllers */ 581 + /* 582 + * EEPROM image version # is reported as firmware version # for 583 + * PCI-E controllers 584 + */ 587 585 e1000_read_nvm(&adapter->hw, 5, 1, &eeprom_data); 588 586 sprintf(firmware_version, "%d.%d-%d", 589 587 (eeprom_data & 0xF000) >> 12, ··· 664 658 if (err) 665 659 goto err_setup_tx; 666 660 667 - /* save the new, restore the old in order to free it, 668 - * then restore the new back again */ 661 + /* 662 + * restore the old in order to free it, 663 + * then add in the new 664 + */ 669 665 adapter->rx_ring = rx_old; 670 666 adapter->tx_ring = tx_old; 671 667 e1000e_free_rx_resources(adapter); ··· 766 758 u32 i; 767 759 u32 toggle; 768 760 769 - /* The status register is Read Only, so a write should fail. 761 + /* 762 + * The status register is Read Only, so a write should fail. 770 763 * Some bits that get toggled are ignored. 771 764 */ 772 765 switch (mac->type) { ··· 917 908 mask = 1 << i; 918 909 919 910 if (!shared_int) { 920 - /* Disable the interrupt to be reported in 911 + /* 912 + * Disable the interrupt to be reported in 921 913 * the cause register and then force the same 922 914 * interrupt and see if one gets posted. If 923 915 * an interrupt was posted to the bus, the ··· 935 925 } 936 926 } 937 927 938 - /* Enable the interrupt to be reported in 928 + /* 929 + * Enable the interrupt to be reported in 939 930 * the cause register and then force the same 940 931 * interrupt and see if one gets posted. If 941 932 * an interrupt was not posted to the bus, the ··· 953 942 } 954 943 955 944 if (!shared_int) { 956 - /* Disable the other interrupts to be reported in 945 + /* 946 + * Disable the other interrupts to be reported in 957 947 * the cause register and then force the other 958 948 * interrupts and see if any get posted. If 959 949 * an interrupt was posted to the bus, the ··· 1228 1216 adapter->hw.phy.type == e1000_phy_m88) { 1229 1217 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ 1230 1218 } else { 1231 - /* Set the ILOS bit on the fiber Nic if half duplex link is 1232 - * detected. */ 1219 + /* 1220 + * Set the ILOS bit on the fiber Nic if half duplex link is 1221 + * detected. 1222 + */ 1233 1223 stat_reg = er32(STATUS); 1234 1224 if ((stat_reg & E1000_STATUS_FD) == 0) 1235 1225 ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); ··· 1239 1225 1240 1226 ew32(CTRL, ctrl_reg); 1241 1227 1242 - /* Disable the receiver on the PHY so when a cable is plugged in, the 1228 + /* 1229 + * Disable the receiver on the PHY so when a cable is plugged in, the 1243 1230 * PHY does not begin to autoneg when a cable is reconnected to the NIC. 1244 1231 */ 1245 1232 if (adapter->hw.phy.type == e1000_phy_m88) ··· 1259 1244 1260 1245 /* special requirements for 82571/82572 fiber adapters */ 1261 1246 1262 - /* jump through hoops to make sure link is up because serdes 1263 - * link is hardwired up */ 1247 + /* 1248 + * jump through hoops to make sure link is up because serdes 1249 + * link is hardwired up 1250 + */ 1264 1251 ctrl |= E1000_CTRL_SLU; 1265 1252 ew32(CTRL, ctrl); 1266 1253 ··· 1280 1263 ew32(CTRL, ctrl); 1281 1264 } 1282 1265 1283 - /* special write to serdes control register to enable SerDes analog 1284 - * loopback */ 1266 + /* 1267 + * special write to serdes control register to enable SerDes analog 1268 + * loopback 1269 + */ 1285 1270 #define E1000_SERDES_LB_ON 0x410 1286 1271 ew32(SCTL, E1000_SERDES_LB_ON); 1287 1272 msleep(10); ··· 1298 1279 u32 ctrlext = er32(CTRL_EXT); 1299 1280 u32 ctrl = er32(CTRL); 1300 1281 1301 - /* save CTRL_EXT to restore later, reuse an empty variable (unused 1302 - on mac_type 80003es2lan) */ 1282 + /* 1283 + * save CTRL_EXT to restore later, reuse an empty variable (unused 1284 + * on mac_type 80003es2lan) 1285 + */ 1303 1286 adapter->tx_fifo_head = ctrlext; 1304 1287 1305 1288 /* clear the serdes mode bits, putting the device into mac loopback */ ··· 1371 1350 if (hw->media_type == e1000_media_type_fiber || 1372 1351 hw->media_type == e1000_media_type_internal_serdes) { 1373 1352 /* restore CTRL_EXT, stealing space from tx_fifo_head */ 1374 - ew32(CTRL_EXT, 1375 - adapter->tx_fifo_head); 1353 + ew32(CTRL_EXT, adapter->tx_fifo_head); 1376 1354 adapter->tx_fifo_head = 0; 1377 1355 } 1378 1356 /* fall through */ ··· 1434 1414 1435 1415 ew32(RDT, rx_ring->count - 1); 1436 1416 1437 - /* Calculate the loop count based on the largest descriptor ring 1417 + /* 1418 + * Calculate the loop count based on the largest descriptor ring 1438 1419 * The idea is to wrap the largest ring a number of times using 64 1439 1420 * send/receive pairs during each loop 1440 1421 */ ··· 1475 1454 l++; 1476 1455 if (l == rx_ring->count) 1477 1456 l = 0; 1478 - /* time + 20 msecs (200 msecs on 2.4) is more than 1457 + /* 1458 + * time + 20 msecs (200 msecs on 2.4) is more than 1479 1459 * enough time to complete the receives, if it's 1480 1460 * exceeded, break and error off 1481 1461 */ ··· 1495 1473 1496 1474 static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) 1497 1475 { 1498 - /* PHY loopback cannot be performed if SoL/IDER 1499 - * sessions are active */ 1476 + /* 1477 + * PHY loopback cannot be performed if SoL/IDER 1478 + * sessions are active 1479 + */ 1500 1480 if (e1000_check_reset_block(&adapter->hw)) { 1501 1481 ndev_err(adapter->netdev, "Cannot do PHY loopback test " 1502 1482 "when SoL/IDER is active.\n"); ··· 1532 1508 int i = 0; 1533 1509 hw->mac.serdes_has_link = 0; 1534 1510 1535 - /* On some blade server designs, link establishment 1536 - * could take as long as 2-3 minutes */ 1511 + /* 1512 + * On some blade server designs, link establishment 1513 + * could take as long as 2-3 minutes 1514 + */ 1537 1515 do { 1538 1516 hw->mac.ops.check_for_link(hw); 1539 1517 if (hw->mac.serdes_has_link) ··· 1588 1562 1589 1563 ndev_info(netdev, "offline testing starting\n"); 1590 1564 1591 - /* Link test performed before hardware reset so autoneg doesn't 1592 - * interfere with test result */ 1565 + /* 1566 + * Link test performed before hardware reset so autoneg doesn't 1567 + * interfere with test result 1568 + */ 1593 1569 if (e1000_link_test(adapter, &data[4])) 1594 1570 eth_test->flags |= ETH_TEST_FL_FAILED; 1595 1571 ··· 1796 1768 1797 1769 switch (stringset) { 1798 1770 case ETH_SS_TEST: 1799 - memcpy(data, *e1000_gstrings_test, 1800 - sizeof(e1000_gstrings_test)); 1771 + memcpy(data, *e1000_gstrings_test, sizeof(e1000_gstrings_test)); 1801 1772 break; 1802 1773 case ETH_SS_STATS: 1803 1774 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
+72 -73
drivers/net/e1000e/hw.h
··· 1 1 /******************************************************************************* 2 2 3 3 Intel PRO/1000 Linux driver 4 - Copyright(c) 1999 - 2007 Intel Corporation. 4 + Copyright(c) 1999 - 2008 Intel Corporation. 5 5 6 6 This program is free software; you can redistribute it and/or modify it 7 7 under the terms and conditions of the GNU General Public License, ··· 66 66 E1000_IMS = 0x000D0, /* Interrupt Mask Set - RW */ 67 67 E1000_IMC = 0x000D8, /* Interrupt Mask Clear - WO */ 68 68 E1000_IAM = 0x000E0, /* Interrupt Acknowledge Auto Mask */ 69 - E1000_RCTL = 0x00100, /* RX Control - RW */ 69 + E1000_RCTL = 0x00100, /* Rx Control - RW */ 70 70 E1000_FCTTV = 0x00170, /* Flow Control Transmit Timer Value - RW */ 71 - E1000_TXCW = 0x00178, /* TX Configuration Word - RW */ 72 - E1000_RXCW = 0x00180, /* RX Configuration Word - RO */ 73 - E1000_TCTL = 0x00400, /* TX Control - RW */ 74 - E1000_TCTL_EXT = 0x00404, /* Extended TX Control - RW */ 75 - E1000_TIPG = 0x00410, /* TX Inter-packet gap -RW */ 76 - E1000_AIT = 0x00458, /* Adaptive Interframe Spacing Throttle - RW */ 71 + E1000_TXCW = 0x00178, /* Tx Configuration Word - RW */ 72 + E1000_RXCW = 0x00180, /* Rx Configuration Word - RO */ 73 + E1000_TCTL = 0x00400, /* Tx Control - RW */ 74 + E1000_TCTL_EXT = 0x00404, /* Extended Tx Control - RW */ 75 + E1000_TIPG = 0x00410, /* Tx Inter-packet gap -RW */ 76 + E1000_AIT = 0x00458, /* Adaptive Interframe Spacing Throttle -RW */ 77 77 E1000_LEDCTL = 0x00E00, /* LED Control - RW */ 78 78 E1000_EXTCNF_CTRL = 0x00F00, /* Extended Configuration Control */ 79 79 E1000_EXTCNF_SIZE = 0x00F08, /* Extended Configuration Size */ ··· 87 87 E1000_FCRTL = 0x02160, /* Flow Control Receive Threshold Low - RW */ 88 88 E1000_FCRTH = 0x02168, /* Flow Control Receive Threshold High - RW */ 89 89 E1000_PSRCTL = 0x02170, /* Packet Split Receive Control - RW */ 90 - E1000_RDBAL = 0x02800, /* RX Descriptor Base Address Low - RW */ 91 - E1000_RDBAH = 0x02804, /* RX Descriptor Base Address High - RW */ 92 - E1000_RDLEN = 0x02808, /* RX Descriptor Length - RW */ 93 - E1000_RDH = 0x02810, /* RX Descriptor Head - RW */ 94 - E1000_RDT = 0x02818, /* RX Descriptor Tail - RW */ 95 - E1000_RDTR = 0x02820, /* RX Delay Timer - RW */ 90 + E1000_RDBAL = 0x02800, /* Rx Descriptor Base Address Low - RW */ 91 + E1000_RDBAH = 0x02804, /* Rx Descriptor Base Address High - RW */ 92 + E1000_RDLEN = 0x02808, /* Rx Descriptor Length - RW */ 93 + E1000_RDH = 0x02810, /* Rx Descriptor Head - RW */ 94 + E1000_RDT = 0x02818, /* Rx Descriptor Tail - RW */ 95 + E1000_RDTR = 0x02820, /* Rx Delay Timer - RW */ 96 96 E1000_RADV = 0x0282C, /* RX Interrupt Absolute Delay Timer - RW */ 97 97 98 98 /* Convenience macros ··· 105 105 */ 106 106 #define E1000_RDBAL_REG(_n) (E1000_RDBAL + (_n << 8)) 107 107 E1000_KABGTXD = 0x03004, /* AFE Band Gap Transmit Ref Data */ 108 - E1000_TDBAL = 0x03800, /* TX Descriptor Base Address Low - RW */ 109 - E1000_TDBAH = 0x03804, /* TX Descriptor Base Address High - RW */ 110 - E1000_TDLEN = 0x03808, /* TX Descriptor Length - RW */ 111 - E1000_TDH = 0x03810, /* TX Descriptor Head - RW */ 112 - E1000_TDT = 0x03818, /* TX Descriptor Tail - RW */ 113 - E1000_TIDV = 0x03820, /* TX Interrupt Delay Value - RW */ 114 - E1000_TXDCTL = 0x03828, /* TX Descriptor Control - RW */ 115 - E1000_TADV = 0x0382C, /* TX Interrupt Absolute Delay Val - RW */ 116 - E1000_TARC0 = 0x03840, /* TX Arbitration Count (0) */ 117 - E1000_TXDCTL1 = 0x03928, /* TX Descriptor Control (1) - RW */ 118 - E1000_TARC1 = 0x03940, /* TX Arbitration Count (1) */ 108 + E1000_TDBAL = 0x03800, /* Tx Descriptor Base Address Low - RW */ 109 + E1000_TDBAH = 0x03804, /* Tx Descriptor Base Address High - RW */ 110 + E1000_TDLEN = 0x03808, /* Tx Descriptor Length - RW */ 111 + E1000_TDH = 0x03810, /* Tx Descriptor Head - RW */ 112 + E1000_TDT = 0x03818, /* Tx Descriptor Tail - RW */ 113 + E1000_TIDV = 0x03820, /* Tx Interrupt Delay Value - RW */ 114 + E1000_TXDCTL = 0x03828, /* Tx Descriptor Control - RW */ 115 + E1000_TADV = 0x0382C, /* Tx Interrupt Absolute Delay Val - RW */ 116 + E1000_TARC0 = 0x03840, /* Tx Arbitration Count (0) */ 117 + E1000_TXDCTL1 = 0x03928, /* Tx Descriptor Control (1) - RW */ 118 + E1000_TARC1 = 0x03940, /* Tx Arbitration Count (1) */ 119 119 E1000_CRCERRS = 0x04000, /* CRC Error Count - R/clr */ 120 120 E1000_ALGNERRC = 0x04004, /* Alignment Error Count - R/clr */ 121 121 E1000_SYMERRS = 0x04008, /* Symbol Error Count - R/clr */ ··· 127 127 E1000_LATECOL = 0x04020, /* Late Collision Count - R/clr */ 128 128 E1000_COLC = 0x04028, /* Collision Count - R/clr */ 129 129 E1000_DC = 0x04030, /* Defer Count - R/clr */ 130 - E1000_TNCRS = 0x04034, /* TX-No CRS - R/clr */ 130 + E1000_TNCRS = 0x04034, /* Tx-No CRS - R/clr */ 131 131 E1000_SEC = 0x04038, /* Sequence Error Count - R/clr */ 132 132 E1000_CEXTERR = 0x0403C, /* Carrier Extension Error Count - R/clr */ 133 133 E1000_RLEC = 0x04040, /* Receive Length Error Count - R/clr */ 134 - E1000_XONRXC = 0x04048, /* XON RX Count - R/clr */ 135 - E1000_XONTXC = 0x0404C, /* XON TX Count - R/clr */ 136 - E1000_XOFFRXC = 0x04050, /* XOFF RX Count - R/clr */ 137 - E1000_XOFFTXC = 0x04054, /* XOFF TX Count - R/clr */ 138 - E1000_FCRUC = 0x04058, /* Flow Control RX Unsupported Count- R/clr */ 139 - E1000_PRC64 = 0x0405C, /* Packets RX (64 bytes) - R/clr */ 140 - E1000_PRC127 = 0x04060, /* Packets RX (65-127 bytes) - R/clr */ 141 - E1000_PRC255 = 0x04064, /* Packets RX (128-255 bytes) - R/clr */ 142 - E1000_PRC511 = 0x04068, /* Packets RX (255-511 bytes) - R/clr */ 143 - E1000_PRC1023 = 0x0406C, /* Packets RX (512-1023 bytes) - R/clr */ 144 - E1000_PRC1522 = 0x04070, /* Packets RX (1024-1522 bytes) - R/clr */ 145 - E1000_GPRC = 0x04074, /* Good Packets RX Count - R/clr */ 146 - E1000_BPRC = 0x04078, /* Broadcast Packets RX Count - R/clr */ 147 - E1000_MPRC = 0x0407C, /* Multicast Packets RX Count - R/clr */ 148 - E1000_GPTC = 0x04080, /* Good Packets TX Count - R/clr */ 149 - E1000_GORCL = 0x04088, /* Good Octets RX Count Low - R/clr */ 150 - E1000_GORCH = 0x0408C, /* Good Octets RX Count High - R/clr */ 151 - E1000_GOTCL = 0x04090, /* Good Octets TX Count Low - R/clr */ 152 - E1000_GOTCH = 0x04094, /* Good Octets TX Count High - R/clr */ 153 - E1000_RNBC = 0x040A0, /* RX No Buffers Count - R/clr */ 154 - E1000_RUC = 0x040A4, /* RX Undersize Count - R/clr */ 155 - E1000_RFC = 0x040A8, /* RX Fragment Count - R/clr */ 156 - E1000_ROC = 0x040AC, /* RX Oversize Count - R/clr */ 157 - E1000_RJC = 0x040B0, /* RX Jabber Count - R/clr */ 158 - E1000_MGTPRC = 0x040B4, /* Management Packets RX Count - R/clr */ 134 + E1000_XONRXC = 0x04048, /* XON Rx Count - R/clr */ 135 + E1000_XONTXC = 0x0404C, /* XON Tx Count - R/clr */ 136 + E1000_XOFFRXC = 0x04050, /* XOFF Rx Count - R/clr */ 137 + E1000_XOFFTXC = 0x04054, /* XOFF Tx Count - R/clr */ 138 + E1000_FCRUC = 0x04058, /* Flow Control Rx Unsupported Count- R/clr */ 139 + E1000_PRC64 = 0x0405C, /* Packets Rx (64 bytes) - R/clr */ 140 + E1000_PRC127 = 0x04060, /* Packets Rx (65-127 bytes) - R/clr */ 141 + E1000_PRC255 = 0x04064, /* Packets Rx (128-255 bytes) - R/clr */ 142 + E1000_PRC511 = 0x04068, /* Packets Rx (255-511 bytes) - R/clr */ 143 + E1000_PRC1023 = 0x0406C, /* Packets Rx (512-1023 bytes) - R/clr */ 144 + E1000_PRC1522 = 0x04070, /* Packets Rx (1024-1522 bytes) - R/clr */ 145 + E1000_GPRC = 0x04074, /* Good Packets Rx Count - R/clr */ 146 + E1000_BPRC = 0x04078, /* Broadcast Packets Rx Count - R/clr */ 147 + E1000_MPRC = 0x0407C, /* Multicast Packets Rx Count - R/clr */ 148 + E1000_GPTC = 0x04080, /* Good Packets Tx Count - R/clr */ 149 + E1000_GORCL = 0x04088, /* Good Octets Rx Count Low - R/clr */ 150 + E1000_GORCH = 0x0408C, /* Good Octets Rx Count High - R/clr */ 151 + E1000_GOTCL = 0x04090, /* Good Octets Tx Count Low - R/clr */ 152 + E1000_GOTCH = 0x04094, /* Good Octets Tx Count High - R/clr */ 153 + E1000_RNBC = 0x040A0, /* Rx No Buffers Count - R/clr */ 154 + E1000_RUC = 0x040A4, /* Rx Undersize Count - R/clr */ 155 + E1000_RFC = 0x040A8, /* Rx Fragment Count - R/clr */ 156 + E1000_ROC = 0x040AC, /* Rx Oversize Count - R/clr */ 157 + E1000_RJC = 0x040B0, /* Rx Jabber Count - R/clr */ 158 + E1000_MGTPRC = 0x040B4, /* Management Packets Rx Count - R/clr */ 159 159 E1000_MGTPDC = 0x040B8, /* Management Packets Dropped Count - R/clr */ 160 - E1000_MGTPTC = 0x040BC, /* Management Packets TX Count - R/clr */ 161 - E1000_TORL = 0x040C0, /* Total Octets RX Low - R/clr */ 162 - E1000_TORH = 0x040C4, /* Total Octets RX High - R/clr */ 163 - E1000_TOTL = 0x040C8, /* Total Octets TX Low - R/clr */ 164 - E1000_TOTH = 0x040CC, /* Total Octets TX High - R/clr */ 165 - E1000_TPR = 0x040D0, /* Total Packets RX - R/clr */ 166 - E1000_TPT = 0x040D4, /* Total Packets TX - R/clr */ 167 - E1000_PTC64 = 0x040D8, /* Packets TX (64 bytes) - R/clr */ 168 - E1000_PTC127 = 0x040DC, /* Packets TX (65-127 bytes) - R/clr */ 169 - E1000_PTC255 = 0x040E0, /* Packets TX (128-255 bytes) - R/clr */ 170 - E1000_PTC511 = 0x040E4, /* Packets TX (256-511 bytes) - R/clr */ 171 - E1000_PTC1023 = 0x040E8, /* Packets TX (512-1023 bytes) - R/clr */ 172 - E1000_PTC1522 = 0x040EC, /* Packets TX (1024-1522 Bytes) - R/clr */ 173 - E1000_MPTC = 0x040F0, /* Multicast Packets TX Count - R/clr */ 174 - E1000_BPTC = 0x040F4, /* Broadcast Packets TX Count - R/clr */ 175 - E1000_TSCTC = 0x040F8, /* TCP Segmentation Context TX - R/clr */ 176 - E1000_TSCTFC = 0x040FC, /* TCP Segmentation Context TX Fail - R/clr */ 160 + E1000_MGTPTC = 0x040BC, /* Management Packets Tx Count - R/clr */ 161 + E1000_TORL = 0x040C0, /* Total Octets Rx Low - R/clr */ 162 + E1000_TORH = 0x040C4, /* Total Octets Rx High - R/clr */ 163 + E1000_TOTL = 0x040C8, /* Total Octets Tx Low - R/clr */ 164 + E1000_TOTH = 0x040CC, /* Total Octets Tx High - R/clr */ 165 + E1000_TPR = 0x040D0, /* Total Packets Rx - R/clr */ 166 + E1000_TPT = 0x040D4, /* Total Packets Tx - R/clr */ 167 + E1000_PTC64 = 0x040D8, /* Packets Tx (64 bytes) - R/clr */ 168 + E1000_PTC127 = 0x040DC, /* Packets Tx (65-127 bytes) - R/clr */ 169 + E1000_PTC255 = 0x040E0, /* Packets Tx (128-255 bytes) - R/clr */ 170 + E1000_PTC511 = 0x040E4, /* Packets Tx (256-511 bytes) - R/clr */ 171 + E1000_PTC1023 = 0x040E8, /* Packets Tx (512-1023 bytes) - R/clr */ 172 + E1000_PTC1522 = 0x040EC, /* Packets Tx (1024-1522 Bytes) - R/clr */ 173 + E1000_MPTC = 0x040F0, /* Multicast Packets Tx Count - R/clr */ 174 + E1000_BPTC = 0x040F4, /* Broadcast Packets Tx Count - R/clr */ 175 + E1000_TSCTC = 0x040F8, /* TCP Segmentation Context Tx - R/clr */ 176 + E1000_TSCTFC = 0x040FC, /* TCP Segmentation Context Tx Fail - R/clr */ 177 177 E1000_IAC = 0x04100, /* Interrupt Assertion Count */ 178 178 E1000_ICRXPTC = 0x04104, /* Irq Cause Rx Packet Timer Expire Count */ 179 179 E1000_ICRXATC = 0x04108, /* Irq Cause Rx Abs Timer Expire Count */ ··· 183 183 E1000_ICTXQMTC = 0x0411C, /* Irq Cause Tx Queue MinThreshold Count */ 184 184 E1000_ICRXDMTC = 0x04120, /* Irq Cause Rx Desc MinThreshold Count */ 185 185 E1000_ICRXOC = 0x04124, /* Irq Cause Receiver Overrun Count */ 186 - E1000_RXCSUM = 0x05000, /* RX Checksum Control - RW */ 186 + E1000_RXCSUM = 0x05000, /* Rx Checksum Control - RW */ 187 187 E1000_RFCTL = 0x05008, /* Receive Filter Control */ 188 188 E1000_MTA = 0x05200, /* Multicast Table Array - RW Array */ 189 189 E1000_RA = 0x05400, /* Receive Address - RW Array */ ··· 250 250 #define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F 251 251 252 252 #define E1000_HICR_EN 0x01 /* Enable bit - RO */ 253 - #define E1000_HICR_C 0x02 /* Driver sets this bit when done 254 - * to put command in RAM */ 253 + /* Driver sets this bit when done to put command in RAM */ 254 + #define E1000_HICR_C 0x02 255 255 #define E1000_HICR_FW_RESET_ENABLE 0x40 256 256 #define E1000_HICR_FW_RESET 0x80 257 257 ··· 685 685 s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *); 686 686 s32 (*led_on)(struct e1000_hw *); 687 687 s32 (*led_off)(struct e1000_hw *); 688 - void (*mc_addr_list_update)(struct e1000_hw *, u8 *, u32, u32, 689 - u32); 688 + void (*mc_addr_list_update)(struct e1000_hw *, u8 *, u32, u32, u32); 690 689 s32 (*reset_hw)(struct e1000_hw *); 691 690 s32 (*init_hw)(struct e1000_hw *); 692 691 s32 (*setup_link)(struct e1000_hw *);
+158 -104
drivers/net/e1000e/ich8lan.c
··· 1 1 /******************************************************************************* 2 2 3 3 Intel PRO/1000 Linux driver 4 - Copyright(c) 1999 - 2007 Intel Corporation. 4 + Copyright(c) 1999 - 2008 Intel Corporation. 5 5 6 6 This program is free software; you can redistribute it and/or modify it 7 7 under the terms and conditions of the GNU General Public License, ··· 243 243 u32 sector_end_addr; 244 244 u16 i; 245 245 246 - /* Can't read flash registers if the register set isn't mapped. 247 - */ 246 + /* Can't read flash registers if the register set isn't mapped. */ 248 247 if (!hw->flash_address) { 249 248 hw_dbg(hw, "ERROR: Flash registers not mapped\n"); 250 249 return -E1000_ERR_CONFIG; ··· 253 254 254 255 gfpreg = er32flash(ICH_FLASH_GFPREG); 255 256 256 - /* sector_X_addr is a "sector"-aligned address (4096 bytes) 257 + /* 258 + * sector_X_addr is a "sector"-aligned address (4096 bytes) 257 259 * Add 1 to sector_end_addr since this sector is included in 258 - * the overall size. */ 260 + * the overall size. 261 + */ 259 262 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK; 260 263 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1; 261 264 262 265 /* flash_base_addr is byte-aligned */ 263 266 nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT; 264 267 265 - /* find total size of the NVM, then cut in half since the total 266 - * size represents two separate NVM banks. */ 268 + /* 269 + * find total size of the NVM, then cut in half since the total 270 + * size represents two separate NVM banks. 271 + */ 267 272 nvm->flash_bank_size = (sector_end_addr - sector_base_addr) 268 273 << FLASH_SECTOR_ADDR_SHIFT; 269 274 nvm->flash_bank_size /= 2; ··· 499 496 if (ret_val) 500 497 return ret_val; 501 498 502 - /* Initialize the PHY from the NVM on ICH platforms. This 499 + /* 500 + * Initialize the PHY from the NVM on ICH platforms. This 503 501 * is needed due to an issue where the NVM configuration is 504 502 * not properly autoloaded after power transitions. 505 503 * Therefore, after each PHY reset, we will load the ··· 527 523 udelay(100); 528 524 } while ((!data) && --loop); 529 525 530 - /* If basic configuration is incomplete before the above loop 526 + /* 527 + * If basic configuration is incomplete before the above loop 531 528 * count reaches 0, loading the configuration from NVM will 532 529 * leave the PHY in a bad state possibly resulting in no link. 533 530 */ ··· 541 536 data &= ~E1000_STATUS_LAN_INIT_DONE; 542 537 ew32(STATUS, data); 543 538 544 - /* Make sure HW does not configure LCD from PHY 545 - * extended configuration before SW configuration */ 539 + /* 540 + * Make sure HW does not configure LCD from PHY 541 + * extended configuration before SW configuration 542 + */ 546 543 data = er32(EXTCNF_CTRL); 547 544 if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) 548 545 return 0; ··· 558 551 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; 559 552 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; 560 553 561 - /* Configure LCD from extended configuration 562 - * region. */ 554 + /* Configure LCD from extended configuration region. */ 563 555 564 556 /* cnf_base_addr is in DWORD */ 565 557 word_addr = (u16)(cnf_base_addr << 1); ··· 687 681 s32 ret_val; 688 682 u16 phy_data, offset, mask; 689 683 690 - /* Polarity is determined based on the reversal feature 691 - * being enabled. 684 + /* 685 + * Polarity is determined based on the reversal feature being enabled. 692 686 */ 693 687 if (phy->polarity_correction) { 694 688 offset = IFE_PHY_EXTENDED_STATUS_CONTROL; ··· 737 731 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU; 738 732 ew32(PHY_CTRL, phy_ctrl); 739 733 740 - /* Call gig speed drop workaround on LPLU before accessing 741 - * any PHY registers */ 734 + /* 735 + * Call gig speed drop workaround on LPLU before accessing 736 + * any PHY registers 737 + */ 742 738 if ((hw->mac.type == e1000_ich8lan) && 743 739 (hw->phy.type == e1000_phy_igp_3)) 744 740 e1000e_gig_downshift_workaround_ich8lan(hw); ··· 755 747 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU; 756 748 ew32(PHY_CTRL, phy_ctrl); 757 749 758 - /* LPLU and SmartSpeed are mutually exclusive. LPLU is used 750 + /* 751 + * LPLU and SmartSpeed are mutually exclusive. LPLU is used 759 752 * during Dx states where the power conservation is most 760 753 * important. During driver activity we should enable 761 - * SmartSpeed, so performance is maintained. */ 754 + * SmartSpeed, so performance is maintained. 755 + */ 762 756 if (phy->smart_speed == e1000_smart_speed_on) { 763 757 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, 764 - &data); 758 + &data); 765 759 if (ret_val) 766 760 return ret_val; 767 761 768 762 data |= IGP01E1000_PSCFR_SMART_SPEED; 769 763 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, 770 - data); 764 + data); 771 765 if (ret_val) 772 766 return ret_val; 773 767 } else if (phy->smart_speed == e1000_smart_speed_off) { 774 768 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, 775 - &data); 769 + &data); 776 770 if (ret_val) 777 771 return ret_val; 778 772 779 773 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 780 774 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, 781 - data); 775 + data); 782 776 if (ret_val) 783 777 return ret_val; 784 778 } ··· 814 804 if (!active) { 815 805 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU; 816 806 ew32(PHY_CTRL, phy_ctrl); 817 - /* LPLU and SmartSpeed are mutually exclusive. LPLU is used 807 + /* 808 + * LPLU and SmartSpeed are mutually exclusive. LPLU is used 818 809 * during Dx states where the power conservation is most 819 810 * important. During driver activity we should enable 820 - * SmartSpeed, so performance is maintained. */ 811 + * SmartSpeed, so performance is maintained. 812 + */ 821 813 if (phy->smart_speed == e1000_smart_speed_on) { 822 - ret_val = e1e_rphy(hw, 823 - IGP01E1000_PHY_PORT_CONFIG, 824 - &data); 814 + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, 815 + &data); 825 816 if (ret_val) 826 817 return ret_val; 827 818 828 819 data |= IGP01E1000_PSCFR_SMART_SPEED; 829 - ret_val = e1e_wphy(hw, 830 - IGP01E1000_PHY_PORT_CONFIG, 831 - data); 820 + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, 821 + data); 832 822 if (ret_val) 833 823 return ret_val; 834 824 } else if (phy->smart_speed == e1000_smart_speed_off) { 835 - ret_val = e1e_rphy(hw, 836 - IGP01E1000_PHY_PORT_CONFIG, 837 - &data); 825 + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, 826 + &data); 838 827 if (ret_val) 839 828 return ret_val; 840 829 841 830 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 842 - ret_val = e1e_wphy(hw, 843 - IGP01E1000_PHY_PORT_CONFIG, 844 - data); 831 + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, 832 + data); 845 833 if (ret_val) 846 834 return ret_val; 847 835 } ··· 849 841 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU; 850 842 ew32(PHY_CTRL, phy_ctrl); 851 843 852 - /* Call gig speed drop workaround on LPLU before accessing 853 - * any PHY registers */ 844 + /* 845 + * Call gig speed drop workaround on LPLU before accessing 846 + * any PHY registers 847 + */ 854 848 if ((hw->mac.type == e1000_ich8lan) && 855 849 (hw->phy.type == e1000_phy_igp_3)) 856 850 e1000e_gig_downshift_workaround_ich8lan(hw); 857 851 858 852 /* When LPLU is enabled, we should disable SmartSpeed */ 859 - ret_val = e1e_rphy(hw, 860 - IGP01E1000_PHY_PORT_CONFIG, 861 - &data); 853 + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); 862 854 if (ret_val) 863 855 return ret_val; 864 856 865 857 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 866 - ret_val = e1e_wphy(hw, 867 - IGP01E1000_PHY_PORT_CONFIG, 868 - data); 858 + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); 869 859 } 870 860 871 861 return 0; ··· 950 944 951 945 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); 952 946 953 - /* Either we should have a hardware SPI cycle in progress 947 + /* 948 + * Either we should have a hardware SPI cycle in progress 954 949 * bit to check against, in order to start a new cycle or 955 950 * FDONE bit should be changed in the hardware so that it 956 951 * is 1 after hardware reset, which can then be used as an ··· 960 953 */ 961 954 962 955 if (hsfsts.hsf_status.flcinprog == 0) { 963 - /* There is no cycle running at present, 964 - * so we can start a cycle */ 965 - /* Begin by setting Flash Cycle Done. */ 956 + /* 957 + * There is no cycle running at present, 958 + * so we can start a cycle 959 + * Begin by setting Flash Cycle Done. 960 + */ 966 961 hsfsts.hsf_status.flcdone = 1; 967 962 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); 968 963 ret_val = 0; 969 964 } else { 970 - /* otherwise poll for sometime so the current 971 - * cycle has a chance to end before giving up. */ 965 + /* 966 + * otherwise poll for sometime so the current 967 + * cycle has a chance to end before giving up. 968 + */ 972 969 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) { 973 970 hsfsts.regval = __er16flash(hw, ICH_FLASH_HSFSTS); 974 971 if (hsfsts.hsf_status.flcinprog == 0) { ··· 982 971 udelay(1); 983 972 } 984 973 if (ret_val == 0) { 985 - /* Successful in waiting for previous cycle to timeout, 986 - * now set the Flash Cycle Done. */ 974 + /* 975 + * Successful in waiting for previous cycle to timeout, 976 + * now set the Flash Cycle Done. 977 + */ 987 978 hsfsts.hsf_status.flcdone = 1; 988 979 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); 989 980 } else { ··· 1090 1077 ret_val = e1000_flash_cycle_ich8lan(hw, 1091 1078 ICH_FLASH_READ_COMMAND_TIMEOUT); 1092 1079 1093 - /* Check if FCERR is set to 1, if set to 1, clear it 1080 + /* 1081 + * Check if FCERR is set to 1, if set to 1, clear it 1094 1082 * and try the whole sequence a few more times, else 1095 1083 * read in (shift in) the Flash Data0, the order is 1096 - * least significant byte first msb to lsb */ 1084 + * least significant byte first msb to lsb 1085 + */ 1097 1086 if (ret_val == 0) { 1098 1087 flash_data = er32flash(ICH_FLASH_FDATA0); 1099 1088 if (size == 1) { ··· 1105 1090 } 1106 1091 break; 1107 1092 } else { 1108 - /* If we've gotten here, then things are probably 1093 + /* 1094 + * If we've gotten here, then things are probably 1109 1095 * completely hosed, but if the error condition is 1110 1096 * detected, it won't hurt to give it another try... 1111 1097 * ICH_FLASH_CYCLE_REPEAT_COUNT times. ··· 1184 1168 1185 1169 ret_val = e1000e_update_nvm_checksum_generic(hw); 1186 1170 if (ret_val) 1187 - return ret_val;; 1171 + return ret_val; 1188 1172 1189 1173 if (nvm->type != e1000_nvm_flash_sw) 1190 - return ret_val;; 1174 + return ret_val; 1191 1175 1192 1176 ret_val = e1000_acquire_swflag_ich8lan(hw); 1193 1177 if (ret_val) 1194 - return ret_val;; 1178 + return ret_val; 1195 1179 1196 - /* We're writing to the opposite bank so if we're on bank 1, 1180 + /* 1181 + * We're writing to the opposite bank so if we're on bank 1, 1197 1182 * write to bank 0 etc. We also need to erase the segment that 1198 - * is going to be written */ 1183 + * is going to be written 1184 + */ 1199 1185 if (!(er32(EECD) & E1000_EECD_SEC1VAL)) { 1200 1186 new_bank_offset = nvm->flash_bank_size; 1201 1187 old_bank_offset = 0; ··· 1209 1191 } 1210 1192 1211 1193 for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { 1212 - /* Determine whether to write the value stored 1194 + /* 1195 + * Determine whether to write the value stored 1213 1196 * in the other NVM bank or a modified value stored 1214 - * in the shadow RAM */ 1197 + * in the shadow RAM 1198 + */ 1215 1199 if (dev_spec->shadow_ram[i].modified) { 1216 1200 data = dev_spec->shadow_ram[i].value; 1217 1201 } else { ··· 1222 1202 &data); 1223 1203 } 1224 1204 1225 - /* If the word is 0x13, then make sure the signature bits 1205 + /* 1206 + * If the word is 0x13, then make sure the signature bits 1226 1207 * (15:14) are 11b until the commit has completed. 1227 1208 * This will allow us to write 10b which indicates the 1228 1209 * signature is valid. We want to do this after the write 1229 1210 * has completed so that we don't mark the segment valid 1230 - * while the write is still in progress */ 1211 + * while the write is still in progress 1212 + */ 1231 1213 if (i == E1000_ICH_NVM_SIG_WORD) 1232 1214 data |= E1000_ICH_NVM_SIG_MASK; 1233 1215 ··· 1252 1230 break; 1253 1231 } 1254 1232 1255 - /* Don't bother writing the segment valid bits if sector 1256 - * programming failed. */ 1233 + /* 1234 + * Don't bother writing the segment valid bits if sector 1235 + * programming failed. 1236 + */ 1257 1237 if (ret_val) { 1258 1238 hw_dbg(hw, "Flash commit failed.\n"); 1259 1239 e1000_release_swflag_ich8lan(hw); 1260 1240 return ret_val; 1261 1241 } 1262 1242 1263 - /* Finally validate the new segment by setting bit 15:14 1243 + /* 1244 + * Finally validate the new segment by setting bit 15:14 1264 1245 * to 10b in word 0x13 , this can be done without an 1265 1246 * erase as well since these bits are 11 to start with 1266 - * and we need to change bit 14 to 0b */ 1247 + * and we need to change bit 14 to 0b 1248 + */ 1267 1249 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; 1268 1250 e1000_read_flash_word_ich8lan(hw, act_offset, &data); 1269 1251 data &= 0xBFFF; ··· 1279 1253 return ret_val; 1280 1254 } 1281 1255 1282 - /* And invalidate the previously valid segment by setting 1256 + /* 1257 + * And invalidate the previously valid segment by setting 1283 1258 * its signature word (0x13) high_byte to 0b. This can be 1284 1259 * done without an erase because flash erase sets all bits 1285 - * to 1's. We can write 1's to 0's without an erase */ 1260 + * to 1's. We can write 1's to 0's without an erase 1261 + */ 1286 1262 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; 1287 1263 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); 1288 1264 if (ret_val) { ··· 1300 1272 1301 1273 e1000_release_swflag_ich8lan(hw); 1302 1274 1303 - /* Reload the EEPROM, or else modifications will not appear 1275 + /* 1276 + * Reload the EEPROM, or else modifications will not appear 1304 1277 * until after the next adapter reset. 1305 1278 */ 1306 1279 e1000e_reload_nvm(hw); ··· 1323 1294 s32 ret_val; 1324 1295 u16 data; 1325 1296 1326 - /* Read 0x19 and check bit 6. If this bit is 0, the checksum 1297 + /* 1298 + * Read 0x19 and check bit 6. If this bit is 0, the checksum 1327 1299 * needs to be fixed. This bit is an indication that the NVM 1328 1300 * was prepared by OEM software and did not calculate the 1329 1301 * checksum...a likely scenario. ··· 1394 1364 1395 1365 ew32flash(ICH_FLASH_FDATA0, flash_data); 1396 1366 1397 - /* check if FCERR is set to 1 , if set to 1, clear it 1398 - * and try the whole sequence a few more times else done */ 1367 + /* 1368 + * check if FCERR is set to 1 , if set to 1, clear it 1369 + * and try the whole sequence a few more times else done 1370 + */ 1399 1371 ret_val = e1000_flash_cycle_ich8lan(hw, 1400 1372 ICH_FLASH_WRITE_COMMAND_TIMEOUT); 1401 1373 if (!ret_val) 1402 1374 break; 1403 1375 1404 - /* If we're here, then things are most likely 1376 + /* 1377 + * If we're here, then things are most likely 1405 1378 * completely hosed, but if the error condition 1406 1379 * is detected, it won't hurt to give it another 1407 1380 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. ··· 1495 1462 1496 1463 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 1497 1464 1498 - /* Determine HW Sector size: Read BERASE bits of hw flash status 1499 - * register */ 1500 - /* 00: The Hw sector is 256 bytes, hence we need to erase 16 1465 + /* 1466 + * Determine HW Sector size: Read BERASE bits of hw flash status 1467 + * register 1468 + * 00: The Hw sector is 256 bytes, hence we need to erase 16 1501 1469 * consecutive sectors. The start index for the nth Hw sector 1502 1470 * can be calculated as = bank * 4096 + n * 256 1503 1471 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector. ··· 1545 1511 if (ret_val) 1546 1512 return ret_val; 1547 1513 1548 - /* Write a value 11 (block Erase) in Flash 1549 - * Cycle field in hw flash control */ 1514 + /* 1515 + * Write a value 11 (block Erase) in Flash 1516 + * Cycle field in hw flash control 1517 + */ 1550 1518 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); 1551 1519 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE; 1552 1520 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); 1553 1521 1554 - /* Write the last 24 bits of an index within the 1522 + /* 1523 + * Write the last 24 bits of an index within the 1555 1524 * block into Flash Linear address field in Flash 1556 1525 * Address. 1557 1526 */ ··· 1566 1529 if (ret_val == 0) 1567 1530 break; 1568 1531 1569 - /* Check if FCERR is set to 1. If 1, 1532 + /* 1533 + * Check if FCERR is set to 1. If 1, 1570 1534 * clear it and try the whole sequence 1571 - * a few more times else Done */ 1535 + * a few more times else Done 1536 + */ 1572 1537 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 1573 1538 if (hsfsts.hsf_status.flcerr == 1) 1574 - /* repeat for some time before 1575 - * giving up */ 1539 + /* repeat for some time before giving up */ 1576 1540 continue; 1577 1541 else if (hsfsts.hsf_status.flcdone == 0) 1578 1542 return ret_val; ··· 1623 1585 1624 1586 ret_val = e1000e_get_bus_info_pcie(hw); 1625 1587 1626 - /* ICH devices are "PCI Express"-ish. They have 1588 + /* 1589 + * ICH devices are "PCI Express"-ish. They have 1627 1590 * a configuration space, but do not contain 1628 1591 * PCI Express Capability registers, so bus width 1629 1592 * must be hardcoded. ··· 1647 1608 u32 ctrl, icr, kab; 1648 1609 s32 ret_val; 1649 1610 1650 - /* Prevent the PCI-E bus from sticking if there is no TLP connection 1611 + /* 1612 + * Prevent the PCI-E bus from sticking if there is no TLP connection 1651 1613 * on the last TLP read/write transaction when MAC is reset. 1652 1614 */ 1653 1615 ret_val = e1000e_disable_pcie_master(hw); ··· 1659 1619 hw_dbg(hw, "Masking off all interrupts\n"); 1660 1620 ew32(IMC, 0xffffffff); 1661 1621 1662 - /* Disable the Transmit and Receive units. Then delay to allow 1622 + /* 1623 + * Disable the Transmit and Receive units. Then delay to allow 1663 1624 * any pending transactions to complete before we hit the MAC 1664 1625 * with the global reset. 1665 1626 */ ··· 1681 1640 ctrl = er32(CTRL); 1682 1641 1683 1642 if (!e1000_check_reset_block(hw)) { 1684 - /* PHY HW reset requires MAC CORE reset at the same 1643 + /* 1644 + * PHY HW reset requires MAC CORE reset at the same 1685 1645 * time to make sure the interface between MAC and the 1686 1646 * external PHY is reset. 1687 1647 */ ··· 1766 1724 E1000_TXDCTL_MAX_TX_DESC_PREFETCH; 1767 1725 ew32(TXDCTL1, txdctl); 1768 1726 1769 - /* ICH8 has opposite polarity of no_snoop bits. 1770 - * By default, we should use snoop behavior. */ 1727 + /* 1728 + * ICH8 has opposite polarity of no_snoop bits. 1729 + * By default, we should use snoop behavior. 1730 + */ 1771 1731 if (mac->type == e1000_ich8lan) 1772 1732 snoop = PCIE_ICH8_SNOOP_ALL; 1773 1733 else ··· 1780 1736 ctrl_ext |= E1000_CTRL_EXT_RO_DIS; 1781 1737 ew32(CTRL_EXT, ctrl_ext); 1782 1738 1783 - /* Clear all of the statistics registers (clear on read). It is 1739 + /* 1740 + * Clear all of the statistics registers (clear on read). It is 1784 1741 * important that we do this after we have tried to establish link 1785 1742 * because the symbol error count will increment wildly if there 1786 1743 * is no link. ··· 1858 1813 if (e1000_check_reset_block(hw)) 1859 1814 return 0; 1860 1815 1861 - /* ICH parts do not have a word in the NVM to determine 1816 + /* 1817 + * ICH parts do not have a word in the NVM to determine 1862 1818 * the default flow control setting, so we explicitly 1863 1819 * set it to full. 1864 1820 */ ··· 1899 1853 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); 1900 1854 ew32(CTRL, ctrl); 1901 1855 1902 - /* Set the mac to wait the maximum time between each iteration 1856 + /* 1857 + * Set the mac to wait the maximum time between each iteration 1903 1858 * and increase the max iterations when polling the phy; 1904 - * this fixes erroneous timeouts at 10Mbps. */ 1859 + * this fixes erroneous timeouts at 10Mbps. 1860 + */ 1905 1861 ret_val = e1000e_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF); 1906 1862 if (ret_val) 1907 1863 return ret_val; ··· 1930 1882 * @speed: pointer to store current link speed 1931 1883 * @duplex: pointer to store the current link duplex 1932 1884 * 1933 - * Calls the generic get_speed_and_duplex to retreive the current link 1885 + * Calls the generic get_speed_and_duplex to retrieve the current link 1934 1886 * information and then calls the Kumeran lock loss workaround for links at 1935 1887 * gigabit speeds. 1936 1888 **/ ··· 1978 1930 if (!dev_spec->kmrn_lock_loss_workaround_enabled) 1979 1931 return 0; 1980 1932 1981 - /* Make sure link is up before proceeding. If not just return. 1933 + /* 1934 + * Make sure link is up before proceeding. If not just return. 1982 1935 * Attempting this while link is negotiating fouled up link 1983 - * stability */ 1936 + * stability 1937 + */ 1984 1938 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); 1985 1939 if (!link) 1986 1940 return 0; ··· 2011 1961 E1000_PHY_CTRL_NOND0A_GBE_DISABLE); 2012 1962 ew32(PHY_CTRL, phy_ctrl); 2013 1963 2014 - /* Call gig speed drop workaround on Gig disable before accessing 2015 - * any PHY registers */ 1964 + /* 1965 + * Call gig speed drop workaround on Gig disable before accessing 1966 + * any PHY registers 1967 + */ 2016 1968 e1000e_gig_downshift_workaround_ich8lan(hw); 2017 1969 2018 1970 /* unable to acquire PCS lock */ ··· 2022 1970 } 2023 1971 2024 1972 /** 2025 - * e1000_set_kmrn_lock_loss_workaound_ich8lan - Set Kumeran workaround state 1973 + * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state 2026 1974 * @hw: pointer to the HW structure 2027 1975 * @state: boolean value used to set the current Kumeran workaround state 2028 1976 * ··· 2069 2017 E1000_PHY_CTRL_NOND0A_GBE_DISABLE); 2070 2018 ew32(PHY_CTRL, reg); 2071 2019 2072 - /* Call gig speed drop workaround on Gig disable before 2073 - * accessing any PHY registers */ 2020 + /* 2021 + * Call gig speed drop workaround on Gig disable before 2022 + * accessing any PHY registers 2023 + */ 2074 2024 if (hw->mac.type == e1000_ich8lan) 2075 2025 e1000e_gig_downshift_workaround_ich8lan(hw); 2076 2026
+134 -79
drivers/net/e1000e/lib.c
··· 1 1 /******************************************************************************* 2 2 3 3 Intel PRO/1000 Linux driver 4 - Copyright(c) 1999 - 2007 Intel Corporation. 4 + Copyright(c) 1999 - 2008 Intel Corporation. 5 5 6 6 This program is free software; you can redistribute it and/or modify it 7 7 under the terms and conditions of the GNU General Public License, ··· 43 43 44 44 #define E1000_FACTPS_MNGCG 0x20000000 45 45 46 - #define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management 47 - * Technology signature */ 46 + /* Intel(R) Active Management Technology signature */ 47 + #define E1000_IAMT_SIGNATURE 0x544D4149 48 48 49 49 /** 50 50 * e1000e_get_bus_info_pcie - Get PCIe bus information ··· 142 142 { 143 143 u32 rar_low, rar_high; 144 144 145 - /* HW expects these in little endian so we reverse the byte order 145 + /* 146 + * HW expects these in little endian so we reverse the byte order 146 147 * from network order (big endian) to little endian 147 148 */ 148 149 rar_low = ((u32) addr[0] | ··· 172 171 { 173 172 u32 hash_bit, hash_reg, mta; 174 173 175 - /* The MTA is a register array of 32-bit registers. It is 174 + /* 175 + * The MTA is a register array of 32-bit registers. It is 176 176 * treated like an array of (32*mta_reg_count) bits. We want to 177 177 * set bit BitArray[hash_value]. So we figure out what register 178 178 * the bit is in, read it, OR in the new bit, then write ··· 210 208 /* Register count multiplied by bits per register */ 211 209 hash_mask = (hw->mac.mta_reg_count * 32) - 1; 212 210 213 - /* For a mc_filter_type of 0, bit_shift is the number of left-shifts 214 - * where 0xFF would still fall within the hash mask. */ 211 + /* 212 + * For a mc_filter_type of 0, bit_shift is the number of left-shifts 213 + * where 0xFF would still fall within the hash mask. 214 + */ 215 215 while (hash_mask >> bit_shift != 0xFF) 216 216 bit_shift++; 217 217 218 - /* The portion of the address that is used for the hash table 218 + /* 219 + * The portion of the address that is used for the hash table 219 220 * is determined by the mc_filter_type setting. 220 221 * The algorithm is such that there is a total of 8 bits of shifting. 221 222 * The bit_shift for a mc_filter_type of 0 represents the number of ··· 229 224 * cases are a variation of this algorithm...essentially raising the 230 225 * number of bits to shift mc_addr[5] left, while still keeping the 231 226 * 8-bit shifting total. 232 - */ 233 - /* For example, given the following Destination MAC Address and an 227 + * 228 + * For example, given the following Destination MAC Address and an 234 229 * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask), 235 230 * we can see that the bit_shift for case 0 is 4. These are the hash 236 231 * values resulting from each mc_filter_type... ··· 284 279 u32 hash_value; 285 280 u32 i; 286 281 287 - /* Load the first set of multicast addresses into the exact 282 + /* 283 + * Load the first set of multicast addresses into the exact 288 284 * filters (RAR). If there are not enough to fill the RAR 289 285 * array, clear the filters. 290 286 */ ··· 381 375 s32 ret_val; 382 376 bool link; 383 377 384 - /* We only want to go out to the PHY registers to see if Auto-Neg 378 + /* 379 + * We only want to go out to the PHY registers to see if Auto-Neg 385 380 * has completed and/or if our link status has changed. The 386 381 * get_link_status flag is set upon receiving a Link Status 387 382 * Change or Rx Sequence Error interrupt. ··· 390 383 if (!mac->get_link_status) 391 384 return 0; 392 385 393 - /* First we want to see if the MII Status Register reports 386 + /* 387 + * First we want to see if the MII Status Register reports 394 388 * link. If so, then we want to get the current speed/duplex 395 389 * of the PHY. 396 390 */ ··· 404 396 405 397 mac->get_link_status = 0; 406 398 407 - /* Check if there was DownShift, must be checked 408 - * immediately after link-up */ 399 + /* 400 + * Check if there was DownShift, must be checked 401 + * immediately after link-up 402 + */ 409 403 e1000e_check_downshift(hw); 410 404 411 - /* If we are forcing speed/duplex, then we simply return since 405 + /* 406 + * If we are forcing speed/duplex, then we simply return since 412 407 * we have already determined whether we have link or not. 413 408 */ 414 409 if (!mac->autoneg) { ··· 419 408 return ret_val; 420 409 } 421 410 422 - /* Auto-Neg is enabled. Auto Speed Detection takes care 411 + /* 412 + * Auto-Neg is enabled. Auto Speed Detection takes care 423 413 * of MAC speed/duplex configuration. So we only need to 424 414 * configure Collision Distance in the MAC. 425 415 */ 426 416 e1000e_config_collision_dist(hw); 427 417 428 - /* Configure Flow Control now that Auto-Neg has completed. 418 + /* 419 + * Configure Flow Control now that Auto-Neg has completed. 429 420 * First, we need to restore the desired flow control 430 421 * settings because we may have had to re-autoneg with a 431 422 * different link partner. ··· 459 446 status = er32(STATUS); 460 447 rxcw = er32(RXCW); 461 448 462 - /* If we don't have link (auto-negotiation failed or link partner 449 + /* 450 + * If we don't have link (auto-negotiation failed or link partner 463 451 * cannot auto-negotiate), the cable is plugged in (we have signal), 464 452 * and our link partner is not trying to auto-negotiate with us (we 465 453 * are receiving idles or data), we need to force link up. We also ··· 491 477 return ret_val; 492 478 } 493 479 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { 494 - /* If we are forcing link and we are receiving /C/ ordered 480 + /* 481 + * If we are forcing link and we are receiving /C/ ordered 495 482 * sets, re-enable auto-negotiation in the TXCW register 496 483 * and disable forced link in the Device Control register 497 484 * in an attempt to auto-negotiate with our link partner. ··· 526 511 status = er32(STATUS); 527 512 rxcw = er32(RXCW); 528 513 529 - /* If we don't have link (auto-negotiation failed or link partner 514 + /* 515 + * If we don't have link (auto-negotiation failed or link partner 530 516 * cannot auto-negotiate), and our link partner is not trying to 531 517 * auto-negotiate with us (we are receiving idles or data), 532 518 * we need to force link up. We also need to give auto-negotiation ··· 556 540 return ret_val; 557 541 } 558 542 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { 559 - /* If we are forcing link and we are receiving /C/ ordered 543 + /* 544 + * If we are forcing link and we are receiving /C/ ordered 560 545 * sets, re-enable auto-negotiation in the TXCW register 561 546 * and disable forced link in the Device Control register 562 547 * in an attempt to auto-negotiate with our link partner. ··· 568 551 569 552 mac->serdes_has_link = 1; 570 553 } else if (!(E1000_TXCW_ANE & er32(TXCW))) { 571 - /* If we force link for non-auto-negotiation switch, check 554 + /* 555 + * If we force link for non-auto-negotiation switch, check 572 556 * link status based on MAC synchronization for internal 573 557 * serdes media type. 574 558 */ ··· 607 589 s32 ret_val; 608 590 u16 nvm_data; 609 591 610 - /* Read and store word 0x0F of the EEPROM. This word contains bits 592 + /* 593 + * Read and store word 0x0F of the EEPROM. This word contains bits 611 594 * that determine the hardware's default PAUSE (flow control) mode, 612 595 * a bit that determines whether the HW defaults to enabling or 613 596 * disabling auto-negotiation, and the direction of the ··· 649 630 struct e1000_mac_info *mac = &hw->mac; 650 631 s32 ret_val; 651 632 652 - /* In the case of the phy reset being blocked, we already have a link. 633 + /* 634 + * In the case of the phy reset being blocked, we already have a link. 653 635 * We do not need to set it up again. 654 636 */ 655 637 if (e1000_check_reset_block(hw)) ··· 666 646 return ret_val; 667 647 } 668 648 669 - /* We want to save off the original Flow Control configuration just 649 + /* 650 + * We want to save off the original Flow Control configuration just 670 651 * in case we get disconnected and then reconnected into a different 671 652 * hub or switch with different Flow Control capabilities. 672 653 */ ··· 680 659 if (ret_val) 681 660 return ret_val; 682 661 683 - /* Initialize the flow control address, type, and PAUSE timer 662 + /* 663 + * Initialize the flow control address, type, and PAUSE timer 684 664 * registers to their default values. This is done even if flow 685 665 * control is disabled, because it does not hurt anything to 686 666 * initialize these registers. ··· 708 686 struct e1000_mac_info *mac = &hw->mac; 709 687 u32 txcw; 710 688 711 - /* Check for a software override of the flow control settings, and 689 + /* 690 + * Check for a software override of the flow control settings, and 712 691 * setup the device accordingly. If auto-negotiation is enabled, then 713 692 * software will have to set the "PAUSE" bits to the correct value in 714 693 * the Transmit Config Word Register (TXCW) and re-start auto- ··· 723 700 * but not send pause frames). 724 701 * 2: Tx flow control is enabled (we can send pause frames but we 725 702 * do not support receiving pause frames). 726 - * 3: Both Rx and TX flow control (symmetric) are enabled. 703 + * 3: Both Rx and Tx flow control (symmetric) are enabled. 727 704 */ 728 705 switch (mac->fc) { 729 706 case e1000_fc_none: ··· 731 708 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); 732 709 break; 733 710 case e1000_fc_rx_pause: 734 - /* RX Flow control is enabled and TX Flow control is disabled 711 + /* 712 + * Rx Flow control is enabled and Tx Flow control is disabled 735 713 * by a software over-ride. Since there really isn't a way to 736 - * advertise that we are capable of RX Pause ONLY, we will 737 - * advertise that we support both symmetric and asymmetric RX 714 + * advertise that we are capable of Rx Pause ONLY, we will 715 + * advertise that we support both symmetric and asymmetric Rx 738 716 * PAUSE. Later, we will disable the adapter's ability to send 739 717 * PAUSE frames. 740 718 */ 741 719 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); 742 720 break; 743 721 case e1000_fc_tx_pause: 744 - /* TX Flow control is enabled, and RX Flow control is disabled, 722 + /* 723 + * Tx Flow control is enabled, and Rx Flow control is disabled, 745 724 * by a software over-ride. 746 725 */ 747 726 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); 748 727 break; 749 728 case e1000_fc_full: 750 - /* Flow control (both RX and TX) is enabled by a software 729 + /* 730 + * Flow control (both Rx and Tx) is enabled by a software 751 731 * over-ride. 752 732 */ 753 733 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); ··· 780 754 u32 i, status; 781 755 s32 ret_val; 782 756 783 - /* If we have a signal (the cable is plugged in, or assumed true for 757 + /* 758 + * If we have a signal (the cable is plugged in, or assumed true for 784 759 * serdes media) then poll for a "Link-Up" indication in the Device 785 760 * Status Register. Time-out if a link isn't seen in 500 milliseconds 786 761 * seconds (Auto-negotiation should complete in less than 500 ··· 796 769 if (i == FIBER_LINK_UP_LIMIT) { 797 770 hw_dbg(hw, "Never got a valid link from auto-neg!!!\n"); 798 771 mac->autoneg_failed = 1; 799 - /* AutoNeg failed to achieve a link, so we'll call 772 + /* 773 + * AutoNeg failed to achieve a link, so we'll call 800 774 * mac->check_for_link. This routine will force the 801 775 * link up if we detect a signal. This will allow us to 802 776 * communicate with non-autonegotiating link partners. ··· 839 811 if (ret_val) 840 812 return ret_val; 841 813 842 - /* Since auto-negotiation is enabled, take the link out of reset (the 814 + /* 815 + * Since auto-negotiation is enabled, take the link out of reset (the 843 816 * link will be in reset, because we previously reset the chip). This 844 817 * will restart auto-negotiation. If auto-negotiation is successful 845 818 * then the link-up status bit will be set and the flow control enable ··· 852 823 e1e_flush(); 853 824 msleep(1); 854 825 855 - /* For these adapters, the SW defineable pin 1 is set when the optics 826 + /* 827 + * For these adapters, the SW definable pin 1 is set when the optics 856 828 * detect a signal. If we have a signal, then poll for a "Link-Up" 857 829 * indication. 858 830 */ ··· 894 864 * 895 865 * Sets the flow control high/low threshold (watermark) registers. If 896 866 * flow control XON frame transmission is enabled, then set XON frame 897 - * tansmission as well. 867 + * transmission as well. 898 868 **/ 899 869 s32 e1000e_set_fc_watermarks(struct e1000_hw *hw) 900 870 { 901 871 struct e1000_mac_info *mac = &hw->mac; 902 872 u32 fcrtl = 0, fcrth = 0; 903 873 904 - /* Set the flow control receive threshold registers. Normally, 874 + /* 875 + * Set the flow control receive threshold registers. Normally, 905 876 * these registers will be set to a default threshold that may be 906 877 * adjusted later by the driver's runtime code. However, if the 907 878 * ability to transmit pause frames is not enabled, then these 908 879 * registers will be set to 0. 909 880 */ 910 881 if (mac->fc & e1000_fc_tx_pause) { 911 - /* We need to set up the Receive Threshold high and low water 882 + /* 883 + * We need to set up the Receive Threshold high and low water 912 884 * marks as well as (optionally) enabling the transmission of 913 885 * XON frames. 914 886 */ ··· 941 909 942 910 ctrl = er32(CTRL); 943 911 944 - /* Because we didn't get link via the internal auto-negotiation 912 + /* 913 + * Because we didn't get link via the internal auto-negotiation 945 914 * mechanism (we either forced link or we got link via PHY 946 915 * auto-neg), we have to manually enable/disable transmit an 947 916 * receive flow control. ··· 956 923 * frames but not send pause frames). 957 924 * 2: Tx flow control is enabled (we can send pause frames 958 925 * frames but we do not receive pause frames). 959 - * 3: Both Rx and TX flow control (symmetric) is enabled. 926 + * 3: Both Rx and Tx flow control (symmetric) is enabled. 960 927 * other: No other values should be possible at this point. 961 928 */ 962 929 hw_dbg(hw, "mac->fc = %u\n", mac->fc); ··· 1003 970 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; 1004 971 u16 speed, duplex; 1005 972 1006 - /* Check for the case where we have fiber media and auto-neg failed 973 + /* 974 + * Check for the case where we have fiber media and auto-neg failed 1007 975 * so we had to force link. In this case, we need to force the 1008 976 * configuration of the MAC to match the "fc" parameter. 1009 977 */ ··· 1022 988 return ret_val; 1023 989 } 1024 990 1025 - /* Check for the case where we have copper media and auto-neg is 991 + /* 992 + * Check for the case where we have copper media and auto-neg is 1026 993 * enabled. In this case, we need to check and see if Auto-Neg 1027 994 * has completed, and if so, how the PHY and link partner has 1028 995 * flow control configured. 1029 996 */ 1030 997 if ((hw->media_type == e1000_media_type_copper) && mac->autoneg) { 1031 - /* Read the MII Status Register and check to see if AutoNeg 998 + /* 999 + * Read the MII Status Register and check to see if AutoNeg 1032 1000 * has completed. We read this twice because this reg has 1033 1001 * some "sticky" (latched) bits. 1034 1002 */ ··· 1047 1011 return ret_val; 1048 1012 } 1049 1013 1050 - /* The AutoNeg process has completed, so we now need to 1014 + /* 1015 + * The AutoNeg process has completed, so we now need to 1051 1016 * read both the Auto Negotiation Advertisement 1052 1017 * Register (Address 4) and the Auto_Negotiation Base 1053 1018 * Page Ability Register (Address 5) to determine how ··· 1061 1024 if (ret_val) 1062 1025 return ret_val; 1063 1026 1064 - /* Two bits in the Auto Negotiation Advertisement Register 1027 + /* 1028 + * Two bits in the Auto Negotiation Advertisement Register 1065 1029 * (Address 4) and two bits in the Auto Negotiation Base 1066 1030 * Page Ability Register (Address 5) determine flow control 1067 1031 * for both the PHY and the link partner. The following ··· 1083 1045 * 1 | 1 | 0 | 0 | e1000_fc_none 1084 1046 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause 1085 1047 * 1086 - */ 1087 - /* Are both PAUSE bits set to 1? If so, this implies 1048 + * 1049 + * Are both PAUSE bits set to 1? If so, this implies 1088 1050 * Symmetric Flow Control is enabled at both ends. The 1089 1051 * ASM_DIR bits are irrelevant per the spec. 1090 1052 * ··· 1098 1060 */ 1099 1061 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && 1100 1062 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { 1101 - /* Now we need to check if the user selected RX ONLY 1063 + /* 1064 + * Now we need to check if the user selected Rx ONLY 1102 1065 * of pause frames. In this case, we had to advertise 1103 - * FULL flow control because we could not advertise RX 1066 + * FULL flow control because we could not advertise Rx 1104 1067 * ONLY. Hence, we must now check to see if we need to 1105 1068 * turn OFF the TRANSMISSION of PAUSE frames. 1106 1069 */ ··· 1114 1075 "RX PAUSE frames only.\r\n"); 1115 1076 } 1116 1077 } 1117 - /* For receiving PAUSE frames ONLY. 1078 + /* 1079 + * For receiving PAUSE frames ONLY. 1118 1080 * 1119 1081 * LOCAL DEVICE | LINK PARTNER 1120 1082 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result ··· 1130 1090 mac->fc = e1000_fc_tx_pause; 1131 1091 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\r\n"); 1132 1092 } 1133 - /* For transmitting PAUSE frames ONLY. 1093 + /* 1094 + * For transmitting PAUSE frames ONLY. 1134 1095 * 1135 1096 * LOCAL DEVICE | LINK PARTNER 1136 1097 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result ··· 1154 1113 hw_dbg(hw, "Flow Control = NONE.\r\n"); 1155 1114 } 1156 1115 1157 - /* Now we need to do one last check... If we auto- 1116 + /* 1117 + * Now we need to do one last check... If we auto- 1158 1118 * negotiated to HALF DUPLEX, flow control should not be 1159 1119 * enabled per IEEE 802.3 spec. 1160 1120 */ ··· 1168 1126 if (duplex == HALF_DUPLEX) 1169 1127 mac->fc = e1000_fc_none; 1170 1128 1171 - /* Now we call a subroutine to actually force the MAC 1129 + /* 1130 + * Now we call a subroutine to actually force the MAC 1172 1131 * controller to use the correct flow control settings. 1173 1132 */ 1174 1133 ret_val = e1000e_force_mac_fc(hw); ··· 1441 1398 ledctl_blink = E1000_LEDCTL_LED0_BLINK | 1442 1399 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); 1443 1400 } else { 1444 - /* set the blink bit for each LED that's "on" (0x0E) 1445 - * in ledctl_mode2 */ 1401 + /* 1402 + * set the blink bit for each LED that's "on" (0x0E) 1403 + * in ledctl_mode2 1404 + */ 1446 1405 ledctl_blink = hw->mac.ledctl_mode2; 1447 1406 for (i = 0; i < 4; i++) 1448 1407 if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) == ··· 1607 1562 else 1608 1563 mac->current_ifs_val += 1609 1564 mac->ifs_step_size; 1610 - ew32(AIT, 1611 - mac->current_ifs_val); 1565 + ew32(AIT, mac->current_ifs_val); 1612 1566 } 1613 1567 } 1614 1568 } else { ··· 1870 1826 udelay(1); 1871 1827 timeout = NVM_MAX_RETRY_SPI; 1872 1828 1873 - /* Read "Status Register" repeatedly until the LSB is cleared. 1829 + /* 1830 + * Read "Status Register" repeatedly until the LSB is cleared. 1874 1831 * The EEPROM will signal that the command has been completed 1875 1832 * by clearing bit 0 of the internal status register. If it's 1876 - * not cleared within 'timeout', then error out. */ 1833 + * not cleared within 'timeout', then error out. 1834 + */ 1877 1835 while (timeout) { 1878 1836 e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, 1879 1837 hw->nvm.opcode_bits); ··· 1912 1866 u32 i, eerd = 0; 1913 1867 s32 ret_val = 0; 1914 1868 1915 - /* A check for invalid values: offset too large, too many words, 1916 - * and not enough words. */ 1869 + /* 1870 + * A check for invalid values: offset too large, too many words, 1871 + * too many words for the offset, and not enough words. 1872 + */ 1917 1873 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || 1918 1874 (words == 0)) { 1919 1875 hw_dbg(hw, "nvm parameter(s) out of bounds\n"); ··· 1931 1883 if (ret_val) 1932 1884 break; 1933 1885 1934 - data[i] = (er32(EERD) >> 1935 - E1000_NVM_RW_REG_DATA); 1886 + data[i] = (er32(EERD) >> E1000_NVM_RW_REG_DATA); 1936 1887 } 1937 1888 1938 1889 return ret_val; ··· 1955 1908 s32 ret_val; 1956 1909 u16 widx = 0; 1957 1910 1958 - /* A check for invalid values: offset too large, too many words, 1959 - * and not enough words. */ 1911 + /* 1912 + * A check for invalid values: offset too large, too many words, 1913 + * and not enough words. 1914 + */ 1960 1915 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || 1961 1916 (words == 0)) { 1962 1917 hw_dbg(hw, "nvm parameter(s) out of bounds\n"); ··· 1988 1939 1989 1940 e1000_standby_nvm(hw); 1990 1941 1991 - /* Some SPI eeproms use the 8th address bit embedded in the 1992 - * opcode */ 1942 + /* 1943 + * Some SPI eeproms use the 8th address bit embedded in the 1944 + * opcode 1945 + */ 1993 1946 if ((nvm->address_bits == 8) && (offset >= 128)) 1994 1947 write_opcode |= NVM_A8_OPCODE_SPI; 1995 1948 ··· 2036 1985 /* Check for an alternate MAC address. An alternate MAC 2037 1986 * address can be setup by pre-boot software and must be 2038 1987 * treated like a permanent address and must override the 2039 - * actual permanent MAC address. */ 1988 + * actual permanent MAC address.*/ 2040 1989 ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, 2041 - &mac_addr_offset); 1990 + &mac_addr_offset); 2042 1991 if (ret_val) { 2043 1992 hw_dbg(hw, "NVM Read Error\n"); 2044 1993 return ret_val; ··· 2051 2000 mac_addr_offset += ETH_ALEN/sizeof(u16); 2052 2001 2053 2002 /* make sure we have a valid mac address here 2054 - * before using it */ 2003 + * before using it */ 2055 2004 ret_val = e1000_read_nvm(hw, mac_addr_offset, 1, 2056 2005 &nvm_data); 2057 2006 if (ret_val) { ··· 2063 2012 } 2064 2013 2065 2014 if (mac_addr_offset) 2066 - hw->dev_spec.e82571.alt_mac_addr_is_present = 1; 2015 + hw->dev_spec.e82571.alt_mac_addr_is_present = 1; 2067 2016 } 2068 2017 2069 2018 for (i = 0; i < ETH_ALEN; i += 2) { ··· 2239 2188 } 2240 2189 2241 2190 /** 2242 - * e1000e_enable_tx_pkt_filtering - Enable packet filtering on TX 2191 + * e1000e_enable_tx_pkt_filtering - Enable packet filtering on Tx 2243 2192 * @hw: pointer to the HW structure 2244 2193 * 2245 2194 * Enables packet filtering on transmit packets if manageability is enabled ··· 2259 2208 return 0; 2260 2209 } 2261 2210 2262 - /* If we can't read from the host interface for whatever 2211 + /* 2212 + * If we can't read from the host interface for whatever 2263 2213 * reason, disable filtering. 2264 2214 */ 2265 2215 ret_val = e1000_mng_enable_host_if(hw); ··· 2278 2226 hdr->checksum = 0; 2279 2227 csum = e1000_calculate_checksum((u8 *)hdr, 2280 2228 E1000_MNG_DHCP_COOKIE_LENGTH); 2281 - /* If either the checksums or signature don't match, then 2229 + /* 2230 + * If either the checksums or signature don't match, then 2282 2231 * the cookie area isn't considered valid, in which case we 2283 2232 * take the safe route of assuming Tx filtering is enabled. 2284 2233 */ ··· 2371 2318 /* Calculate length in DWORDs */ 2372 2319 length >>= 2; 2373 2320 2374 - /* The device driver writes the relevant command block into the 2375 - * ram area. */ 2321 + /* 2322 + * The device driver writes the relevant command block into the 2323 + * ram area. 2324 + */ 2376 2325 for (i = 0; i < length; i++) { 2377 2326 for (j = 0; j < sizeof(u32); j++) { 2378 2327 *(tmp + j) = *bufptr++;
+279 -149
drivers/net/e1000e/netdev.c
··· 1 1 /******************************************************************************* 2 2 3 3 Intel PRO/1000 Linux driver 4 - Copyright(c) 1999 - 2007 Intel Corporation. 4 + Copyright(c) 1999 - 2008 Intel Corporation. 5 5 6 6 This program is free software; you can redistribute it and/or modify it 7 7 under the terms and conditions of the GNU General Public License, ··· 82 82 } 83 83 84 84 /** 85 - * e1000_receive_skb - helper function to handle rx indications 85 + * e1000_receive_skb - helper function to handle Rx indications 86 86 * @adapter: board private structure 87 87 * @status: descriptor status field as written by hardware 88 88 * @vlan: descriptor vlan field as written by hardware (no le/be conversion) ··· 138 138 /* TCP checksum is good */ 139 139 skb->ip_summed = CHECKSUM_UNNECESSARY; 140 140 } else { 141 - /* IP fragment with UDP payload */ 142 - /* Hardware complements the payload checksum, so we undo it 141 + /* 142 + * IP fragment with UDP payload 143 + * Hardware complements the payload checksum, so we undo it 143 144 * and then put the value in host order for further stack use. 144 145 */ 145 146 __sum16 sum = (__force __sum16)htons(csum); ··· 183 182 break; 184 183 } 185 184 186 - /* Make buffer alignment 2 beyond a 16 byte boundary 185 + /* 186 + * Make buffer alignment 2 beyond a 16 byte boundary 187 187 * this will result in a 16 byte aligned IP header after 188 188 * the 14 byte MAC header is removed 189 189 */ ··· 215 213 if (i-- == 0) 216 214 i = (rx_ring->count - 1); 217 215 218 - /* Force memory writes to complete before letting h/w 216 + /* 217 + * Force memory writes to complete before letting h/w 219 218 * know there are new descriptors to fetch. (Only 220 219 * applicable for weak-ordered memory model archs, 221 - * such as IA-64). */ 220 + * such as IA-64). 221 + */ 222 222 wmb(); 223 223 writel(i, adapter->hw.hw_addr + rx_ring->tail); 224 224 } ··· 289 285 break; 290 286 } 291 287 292 - /* Make buffer alignment 2 beyond a 16 byte boundary 288 + /* 289 + * Make buffer alignment 2 beyond a 16 byte boundary 293 290 * this will result in a 16 byte aligned IP header after 294 291 * the 14 byte MAC header is removed 295 292 */ ··· 324 319 if (!(i--)) 325 320 i = (rx_ring->count - 1); 326 321 327 - /* Force memory writes to complete before letting h/w 322 + /* 323 + * Force memory writes to complete before letting h/w 328 324 * know there are new descriptors to fetch. (Only 329 325 * applicable for weak-ordered memory model archs, 330 - * such as IA-64). */ 326 + * such as IA-64). 327 + */ 331 328 wmb(); 332 - /* Hardware increments by 16 bytes, but packet split 329 + /* 330 + * Hardware increments by 16 bytes, but packet split 333 331 * descriptors are 32 bytes...so we increment tail 334 332 * twice as much. 335 333 */ ··· 417 409 total_rx_bytes += length; 418 410 total_rx_packets++; 419 411 420 - /* code added for copybreak, this should improve 412 + /* 413 + * code added for copybreak, this should improve 421 414 * performance for small packets with large amounts 422 - * of reassembly being done in the stack */ 415 + * of reassembly being done in the stack 416 + */ 423 417 if (length < copybreak) { 424 418 struct sk_buff *new_skb = 425 419 netdev_alloc_skb(netdev, length + NET_IP_ALIGN); ··· 591 581 } 592 582 593 583 if (adapter->detect_tx_hung) { 594 - /* Detect a transmit hang in hardware, this serializes the 595 - * check with the clearing of time_stamp and movement of i */ 584 + /* 585 + * Detect a transmit hang in hardware, this serializes the 586 + * check with the clearing of time_stamp and movement of i 587 + */ 596 588 adapter->detect_tx_hung = 0; 597 589 if (tx_ring->buffer_info[eop].dma && 598 590 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp 599 591 + (adapter->tx_timeout_factor * HZ)) 600 - && !(er32(STATUS) & 601 - E1000_STATUS_TXOFF)) { 592 + && !(er32(STATUS) & E1000_STATUS_TXOFF)) { 602 593 e1000_print_tx_hang(adapter); 603 594 netif_stop_queue(netdev); 604 595 } ··· 688 677 skb_put(skb, length); 689 678 690 679 { 691 - /* this looks ugly, but it seems compiler issues make it 692 - more efficient than reusing j */ 680 + /* 681 + * this looks ugly, but it seems compiler issues make it 682 + * more efficient than reusing j 683 + */ 693 684 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); 694 685 695 - /* page alloc/put takes too long and effects small packet 696 - * throughput, so unsplit small packets and save the alloc/put*/ 686 + /* 687 + * page alloc/put takes too long and effects small packet 688 + * throughput, so unsplit small packets and save the alloc/put 689 + * only valid in softirq (napi) context to call kmap_* 690 + */ 697 691 if (l1 && (l1 <= copybreak) && 698 692 ((length + l1) <= adapter->rx_ps_bsize0)) { 699 693 u8 *vaddr; 700 694 701 695 ps_page = &buffer_info->ps_pages[0]; 702 696 703 - /* there is no documentation about how to call 697 + /* 698 + * there is no documentation about how to call 704 699 * kmap_atomic, so we can't hold the mapping 705 - * very long */ 700 + * very long 701 + */ 706 702 pci_dma_sync_single_for_cpu(pdev, ps_page->dma, 707 703 PAGE_SIZE, PCI_DMA_FROMDEVICE); 708 704 vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ); ··· 854 836 struct e1000_hw *hw = &adapter->hw; 855 837 u32 icr = er32(ICR); 856 838 857 - /* read ICR disables interrupts using IAM */ 839 + /* 840 + * read ICR disables interrupts using IAM 841 + */ 858 842 859 843 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 860 844 hw->mac.get_link_status = 1; 861 - /* ICH8 workaround-- Call gig speed drop workaround on cable 862 - * disconnect (LSC) before accessing any PHY registers */ 845 + /* 846 + * ICH8 workaround-- Call gig speed drop workaround on cable 847 + * disconnect (LSC) before accessing any PHY registers 848 + */ 863 849 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && 864 850 (!(er32(STATUS) & E1000_STATUS_LU))) 865 851 e1000e_gig_downshift_workaround_ich8lan(hw); 866 852 867 - /* 80003ES2LAN workaround-- For packet buffer work-around on 853 + /* 854 + * 80003ES2LAN workaround-- For packet buffer work-around on 868 855 * link down event; disable receives here in the ISR and reset 869 - * adapter in watchdog */ 856 + * adapter in watchdog 857 + */ 870 858 if (netif_carrier_ok(netdev) && 871 859 adapter->flags & FLAG_RX_NEEDS_RESTART) { 872 860 /* disable receives */ ··· 910 886 if (!icr) 911 887 return IRQ_NONE; /* Not our interrupt */ 912 888 913 - /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is 914 - * not set, then the adapter didn't send an interrupt */ 889 + /* 890 + * IMS will not auto-mask if INT_ASSERTED is not set, and if it is 891 + * not set, then the adapter didn't send an interrupt 892 + */ 915 893 if (!(icr & E1000_ICR_INT_ASSERTED)) 916 894 return IRQ_NONE; 917 895 918 - /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No 919 - * need for the IMC write */ 896 + /* 897 + * Interrupt Auto-Mask...upon reading ICR, 898 + * interrupts are masked. No need for the 899 + * IMC write 900 + */ 920 901 921 902 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 922 903 hw->mac.get_link_status = 1; 923 - /* ICH8 workaround-- Call gig speed drop workaround on cable 924 - * disconnect (LSC) before accessing any PHY registers */ 904 + /* 905 + * ICH8 workaround-- Call gig speed drop workaround on cable 906 + * disconnect (LSC) before accessing any PHY registers 907 + */ 925 908 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && 926 909 (!(er32(STATUS) & E1000_STATUS_LU))) 927 910 e1000e_gig_downshift_workaround_ich8lan(hw); 928 911 929 - /* 80003ES2LAN workaround-- 912 + /* 913 + * 80003ES2LAN workaround-- 930 914 * For packet buffer work-around on link down event; 931 915 * disable receives here in the ISR and 932 916 * reset adapter in watchdog ··· 1043 1011 ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD); 1044 1012 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { 1045 1013 ctrl_ext = er32(CTRL_EXT); 1046 - ew32(CTRL_EXT, 1047 - ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 1014 + ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 1048 1015 } 1049 1016 } 1050 1017 ··· 1069 1038 ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD); 1070 1039 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { 1071 1040 ctrl_ext = er32(CTRL_EXT); 1072 - ew32(CTRL_EXT, 1073 - ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 1041 + ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 1074 1042 } 1075 1043 } 1076 1044 ··· 1371 1341 1372 1342 set_itr_now: 1373 1343 if (new_itr != adapter->itr) { 1374 - /* this attempts to bias the interrupt rate towards Bulk 1344 + /* 1345 + * this attempts to bias the interrupt rate towards Bulk 1375 1346 * by adding intermediate steps when interrupt rate is 1376 - * increasing */ 1347 + * increasing 1348 + */ 1377 1349 new_itr = new_itr > adapter->itr ? 1378 1350 min(adapter->itr + (new_itr >> 2), new_itr) : 1379 1351 new_itr; ··· 1386 1354 1387 1355 /** 1388 1356 * e1000_clean - NAPI Rx polling callback 1389 - * @adapter: board private structure 1357 + * @napi: struct associated with this polling callback 1390 1358 * @budget: amount of packets driver is allowed to process this poll 1391 1359 **/ 1392 1360 static int e1000_clean(struct napi_struct *napi, int budget) ··· 1398 1366 /* Must NOT use netdev_priv macro here. */ 1399 1367 adapter = poll_dev->priv; 1400 1368 1401 - /* e1000_clean is called per-cpu. This lock protects 1369 + /* 1370 + * e1000_clean is called per-cpu. This lock protects 1402 1371 * tx_ring from being cleaned by multiple cpus 1403 1372 * simultaneously. A failure obtaining the lock means 1404 - * tx_ring is currently being cleaned anyway. */ 1373 + * tx_ring is currently being cleaned anyway. 1374 + */ 1405 1375 if (spin_trylock(&adapter->tx_queue_lock)) { 1406 1376 tx_cleaned = e1000_clean_tx_irq(adapter); 1407 1377 spin_unlock(&adapter->tx_queue_lock); ··· 1573 1539 1574 1540 manc = er32(MANC); 1575 1541 1576 - /* enable receiving management packets to the host. this will probably 1542 + /* 1543 + * enable receiving management packets to the host. this will probably 1577 1544 * generate destination unreachable messages from the host OS, but 1578 - * the packets will be handled on SMBUS */ 1545 + * the packets will be handled on SMBUS 1546 + */ 1579 1547 manc |= E1000_MANC_EN_MNG2HOST; 1580 1548 manc2h = er32(MANC2H); 1581 1549 #define E1000_MNG2HOST_PORT_623 (1 << 5) ··· 1627 1591 1628 1592 /* Set the Tx Interrupt Delay register */ 1629 1593 ew32(TIDV, adapter->tx_int_delay); 1630 - /* tx irq moderation */ 1594 + /* Tx irq moderation */ 1631 1595 ew32(TADV, adapter->tx_abs_int_delay); 1632 1596 1633 1597 /* Program the Transmit Control Register */ ··· 1638 1602 1639 1603 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { 1640 1604 tarc = er32(TARC0); 1641 - /* set the speed mode bit, we'll clear it if we're not at 1642 - * gigabit link later */ 1605 + /* 1606 + * set the speed mode bit, we'll clear it if we're not at 1607 + * gigabit link later 1608 + */ 1643 1609 #define SPEED_MODE_BIT (1 << 21) 1644 1610 tarc |= SPEED_MODE_BIT; 1645 1611 ew32(TARC0, tarc); ··· 1762 1724 /* Configure extra packet-split registers */ 1763 1725 rfctl = er32(RFCTL); 1764 1726 rfctl |= E1000_RFCTL_EXTEN; 1765 - /* disable packet split support for IPv6 extension headers, 1766 - * because some malformed IPv6 headers can hang the RX */ 1727 + /* 1728 + * disable packet split support for IPv6 extension headers, 1729 + * because some malformed IPv6 headers can hang the Rx 1730 + */ 1767 1731 rfctl |= (E1000_RFCTL_IPV6_EX_DIS | 1768 1732 E1000_RFCTL_NEW_IPV6_EXT_DIS); 1769 1733 ··· 1834 1794 /* irq moderation */ 1835 1795 ew32(RADV, adapter->rx_abs_int_delay); 1836 1796 if (adapter->itr_setting != 0) 1837 - ew32(ITR, 1838 - 1000000000 / (adapter->itr * 256)); 1797 + ew32(ITR, 1000000000 / (adapter->itr * 256)); 1839 1798 1840 1799 ctrl_ext = er32(CTRL_EXT); 1841 1800 /* Reset delay timers after every interrupt */ ··· 1845 1806 ew32(CTRL_EXT, ctrl_ext); 1846 1807 e1e_flush(); 1847 1808 1848 - /* Setup the HW Rx Head and Tail Descriptor Pointers and 1849 - * the Base and Length of the Rx Descriptor Ring */ 1809 + /* 1810 + * Setup the HW Rx Head and Tail Descriptor Pointers and 1811 + * the Base and Length of the Rx Descriptor Ring 1812 + */ 1850 1813 rdba = rx_ring->dma; 1851 1814 ew32(RDBAL, (rdba & DMA_32BIT_MASK)); 1852 1815 ew32(RDBAH, (rdba >> 32)); ··· 1863 1822 if (adapter->flags & FLAG_RX_CSUM_ENABLED) { 1864 1823 rxcsum |= E1000_RXCSUM_TUOFL; 1865 1824 1866 - /* IPv4 payload checksum for UDP fragments must be 1867 - * used in conjunction with packet-split. */ 1825 + /* 1826 + * IPv4 payload checksum for UDP fragments must be 1827 + * used in conjunction with packet-split. 1828 + */ 1868 1829 if (adapter->rx_ps_pages) 1869 1830 rxcsum |= E1000_RXCSUM_IPPCSE; 1870 1831 } else { ··· 1875 1832 } 1876 1833 ew32(RXCSUM, rxcsum); 1877 1834 1878 - /* Enable early receives on supported devices, only takes effect when 1835 + /* 1836 + * Enable early receives on supported devices, only takes effect when 1879 1837 * packet size is equal or larger than the specified value (in 8 byte 1880 - * units), e.g. using jumbo frames when setting to E1000_ERT_2048 */ 1838 + * units), e.g. using jumbo frames when setting to E1000_ERT_2048 1839 + */ 1881 1840 if ((adapter->flags & FLAG_HAS_ERT) && 1882 1841 (adapter->netdev->mtu > ETH_DATA_LEN)) 1883 1842 ew32(ERT, E1000_ERT_2048); ··· 1975 1930 } 1976 1931 1977 1932 /** 1978 - * e1000_configure - configure the hardware for RX and TX 1933 + * e1000_configure - configure the hardware for Rx and Tx 1979 1934 * @adapter: private board structure 1980 1935 **/ 1981 1936 static void e1000_configure(struct e1000_adapter *adapter) ··· 1988 1943 e1000_configure_tx(adapter); 1989 1944 e1000_setup_rctl(adapter); 1990 1945 e1000_configure_rx(adapter); 1991 - adapter->alloc_rx_buf(adapter, 1992 - e1000_desc_unused(adapter->rx_ring)); 1946 + adapter->alloc_rx_buf(adapter, e1000_desc_unused(adapter->rx_ring)); 1993 1947 } 1994 1948 1995 1949 /** ··· 2005 1961 2006 1962 /* Just clear the power down bit to wake the phy back up */ 2007 1963 if (adapter->hw.media_type == e1000_media_type_copper) { 2008 - /* according to the manual, the phy will retain its 2009 - * settings across a power-down/up cycle */ 1964 + /* 1965 + * According to the manual, the phy will retain its 1966 + * settings across a power-down/up cycle 1967 + */ 2010 1968 e1e_rphy(&adapter->hw, PHY_CONTROL, &mii_reg); 2011 1969 mii_reg &= ~MII_CR_POWER_DOWN; 2012 1970 e1e_wphy(&adapter->hw, PHY_CONTROL, mii_reg); ··· 2037 1991 return; 2038 1992 2039 1993 /* reset is blocked because of a SoL/IDER session */ 2040 - if (e1000e_check_mng_mode(hw) || 2041 - e1000_check_reset_block(hw)) 1994 + if (e1000e_check_mng_mode(hw) || e1000_check_reset_block(hw)) 2042 1995 return; 2043 1996 2044 1997 /* manageability (AMT) is enabled */ ··· 2057 2012 * This function boots the hardware and enables some settings that 2058 2013 * require a configuration cycle of the hardware - those cannot be 2059 2014 * set/changed during runtime. After reset the device needs to be 2060 - * properly configured for rx, tx etc. 2015 + * properly configured for Rx, Tx etc. 2061 2016 */ 2062 2017 void e1000e_reset(struct e1000_adapter *adapter) 2063 2018 { ··· 2067 2022 u32 pba; 2068 2023 u16 hwm; 2069 2024 2025 + /* reset Packet Buffer Allocation to default */ 2070 2026 ew32(PBA, adapter->pba); 2071 2027 2072 2028 if (mac->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN ) { 2073 - /* To maintain wire speed transmits, the Tx FIFO should be 2029 + /* 2030 + * To maintain wire speed transmits, the Tx FIFO should be 2074 2031 * large enough to accommodate two full transmit packets, 2075 2032 * rounded up to the next 1KB and expressed in KB. Likewise, 2076 2033 * the Rx FIFO should be large enough to accommodate at least 2077 2034 * one full receive packet and is similarly rounded up and 2078 - * expressed in KB. */ 2035 + * expressed in KB. 2036 + */ 2079 2037 pba = er32(PBA); 2080 2038 /* upper 16 bits has Tx packet buffer allocation size in KB */ 2081 2039 tx_space = pba >> 16; 2082 2040 /* lower 16 bits has Rx packet buffer allocation size in KB */ 2083 2041 pba &= 0xffff; 2084 - /* the tx fifo also stores 16 bytes of information about the tx 2085 - * but don't include ethernet FCS because hardware appends it */ 2086 - min_tx_space = (mac->max_frame_size + 2042 + /* 2043 + * the Tx fifo also stores 16 bytes of information about the tx 2044 + * but don't include ethernet FCS because hardware appends it 2045 + */ min_tx_space = (mac->max_frame_size + 2087 2046 sizeof(struct e1000_tx_desc) - 2088 2047 ETH_FCS_LEN) * 2; 2089 2048 min_tx_space = ALIGN(min_tx_space, 1024); ··· 2097 2048 min_rx_space = ALIGN(min_rx_space, 1024); 2098 2049 min_rx_space >>= 10; 2099 2050 2100 - /* If current Tx allocation is less than the min Tx FIFO size, 2051 + /* 2052 + * If current Tx allocation is less than the min Tx FIFO size, 2101 2053 * and the min Tx FIFO size is less than the current Rx FIFO 2102 - * allocation, take space away from current Rx allocation */ 2054 + * allocation, take space away from current Rx allocation 2055 + */ 2103 2056 if ((tx_space < min_tx_space) && 2104 2057 ((min_tx_space - tx_space) < pba)) { 2105 2058 pba -= min_tx_space - tx_space; 2106 2059 2107 - /* if short on rx space, rx wins and must trump tx 2108 - * adjustment or use Early Receive if available */ 2060 + /* 2061 + * if short on Rx space, Rx wins and must trump tx 2062 + * adjustment or use Early Receive if available 2063 + */ 2109 2064 if ((pba < min_rx_space) && 2110 2065 (!(adapter->flags & FLAG_HAS_ERT))) 2111 2066 /* ERT enabled in e1000_configure_rx */ ··· 2120 2067 } 2121 2068 2122 2069 2123 - /* flow control settings */ 2124 - /* The high water mark must be low enough to fit one full frame 2070 + /* 2071 + * flow control settings 2072 + * 2073 + * The high water mark must be low enough to fit one full frame 2125 2074 * (or the size used for early receive) above it in the Rx FIFO. 2126 2075 * Set it to the lower of: 2127 2076 * - 90% of the Rx FIFO size, and 2128 2077 * - the full Rx FIFO size minus the early receive size (for parts 2129 2078 * with ERT support assuming ERT set to E1000_ERT_2048), or 2130 - * - the full Rx FIFO size minus one full frame */ 2079 + * - the full Rx FIFO size minus one full frame 2080 + */ 2131 2081 if (adapter->flags & FLAG_HAS_ERT) 2132 2082 hwm = min(((adapter->pba << 10) * 9 / 10), 2133 2083 ((adapter->pba << 10) - (E1000_ERT_2048 << 3))); ··· 2164 2108 2165 2109 if (!(adapter->flags & FLAG_SMART_POWER_DOWN)) { 2166 2110 u16 phy_data = 0; 2167 - /* speed up time to link by disabling smart power down, ignore 2111 + /* 2112 + * speed up time to link by disabling smart power down, ignore 2168 2113 * the return value of this function because there is nothing 2169 - * different we would do if it failed */ 2114 + * different we would do if it failed 2115 + */ 2170 2116 e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); 2171 2117 phy_data &= ~IGP02E1000_PM_SPD; 2172 2118 e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); ··· 2198 2140 struct e1000_hw *hw = &adapter->hw; 2199 2141 u32 tctl, rctl; 2200 2142 2201 - /* signal that we're down so the interrupt handler does not 2202 - * reschedule our watchdog timer */ 2143 + /* 2144 + * signal that we're down so the interrupt handler does not 2145 + * reschedule our watchdog timer 2146 + */ 2203 2147 set_bit(__E1000_DOWN, &adapter->state); 2204 2148 2205 2149 /* disable receives in the hardware */ ··· 2332 2272 E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) 2333 2273 e1000_update_mng_vlan(adapter); 2334 2274 2335 - /* If AMT is enabled, let the firmware know that the network 2336 - * interface is now open */ 2275 + /* 2276 + * If AMT is enabled, let the firmware know that the network 2277 + * interface is now open 2278 + */ 2337 2279 if ((adapter->flags & FLAG_HAS_AMT) && 2338 2280 e1000e_check_mng_mode(&adapter->hw)) 2339 2281 e1000_get_hw_control(adapter); 2340 2282 2341 - /* before we allocate an interrupt, we must be ready to handle it. 2283 + /* 2284 + * before we allocate an interrupt, we must be ready to handle it. 2342 2285 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 2343 2286 * as soon as we call pci_request_irq, so we have to setup our 2344 - * clean_rx handler before we do so. */ 2287 + * clean_rx handler before we do so. 2288 + */ 2345 2289 e1000_configure(adapter); 2346 2290 2347 2291 err = e1000_request_irq(adapter); ··· 2399 2335 e1000e_free_tx_resources(adapter); 2400 2336 e1000e_free_rx_resources(adapter); 2401 2337 2402 - /* kill manageability vlan ID if supported, but not if a vlan with 2403 - * the same ID is registered on the host OS (let 8021q kill it) */ 2338 + /* 2339 + * kill manageability vlan ID if supported, but not if a vlan with 2340 + * the same ID is registered on the host OS (let 8021q kill it) 2341 + */ 2404 2342 if ((adapter->hw.mng_cookie.status & 2405 2343 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && 2406 2344 !(adapter->vlgrp && 2407 2345 vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) 2408 2346 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 2409 2347 2410 - /* If AMT is enabled, let the firmware know that the network 2411 - * interface is now closed */ 2348 + /* 2349 + * If AMT is enabled, let the firmware know that the network 2350 + * interface is now closed 2351 + */ 2412 2352 if ((adapter->flags & FLAG_HAS_AMT) && 2413 2353 e1000e_check_mng_mode(&adapter->hw)) 2414 2354 e1000_release_hw_control(adapter); ··· 2443 2375 /* activate the work around */ 2444 2376 e1000e_set_laa_state_82571(&adapter->hw, 1); 2445 2377 2446 - /* Hold a copy of the LAA in RAR[14] This is done so that 2378 + /* 2379 + * Hold a copy of the LAA in RAR[14] This is done so that 2447 2380 * between the time RAR[0] gets clobbered and the time it 2448 2381 * gets fixed (in e1000_watchdog), the actual LAA is in one 2449 2382 * of the RARs and no incoming packets directed to this port 2450 2383 * are dropped. Eventually the LAA will be in RAR[0] and 2451 - * RAR[14] */ 2384 + * RAR[14] 2385 + */ 2452 2386 e1000e_rar_set(&adapter->hw, 2453 2387 adapter->hw.mac.addr, 2454 2388 adapter->hw.mac.rar_entry_count - 1); ··· 2459 2389 return 0; 2460 2390 } 2461 2391 2462 - /* Need to wait a few seconds after link up to get diagnostic information from 2463 - * the phy */ 2392 + /* 2393 + * Need to wait a few seconds after link up to get diagnostic information from 2394 + * the phy 2395 + */ 2464 2396 static void e1000_update_phy_info(unsigned long data) 2465 2397 { 2466 2398 struct e1000_adapter *adapter = (struct e1000_adapter *) data; ··· 2493 2421 2494 2422 spin_lock_irqsave(&adapter->stats_lock, irq_flags); 2495 2423 2496 - /* these counters are modified from e1000_adjust_tbi_stats, 2424 + /* 2425 + * these counters are modified from e1000_adjust_tbi_stats, 2497 2426 * called from the interrupt context, so they must only 2498 2427 * be written while holding adapter->stats_lock 2499 2428 */ ··· 2588 2515 2589 2516 /* Rx Errors */ 2590 2517 2591 - /* RLEC on some newer hardware can be incorrect so build 2592 - * our own version based on RUC and ROC */ 2518 + /* 2519 + * RLEC on some newer hardware can be incorrect so build 2520 + * our own version based on RUC and ROC 2521 + */ 2593 2522 adapter->net_stats.rx_errors = adapter->stats.rxerrc + 2594 2523 adapter->stats.crcerrs + adapter->stats.algnerrc + 2595 2524 adapter->stats.ruc + adapter->stats.roc + ··· 2703 2628 &adapter->link_speed, 2704 2629 &adapter->link_duplex); 2705 2630 e1000_print_link_info(adapter); 2706 - /* tweak tx_queue_len according to speed/duplex 2707 - * and adjust the timeout factor */ 2631 + /* 2632 + * tweak tx_queue_len according to speed/duplex 2633 + * and adjust the timeout factor 2634 + */ 2708 2635 netdev->tx_queue_len = adapter->tx_queue_len; 2709 2636 adapter->tx_timeout_factor = 1; 2710 2637 switch (adapter->link_speed) { ··· 2722 2645 break; 2723 2646 } 2724 2647 2725 - /* workaround: re-program speed mode bit after 2726 - * link-up event */ 2648 + /* 2649 + * workaround: re-program speed mode bit after 2650 + * link-up event 2651 + */ 2727 2652 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) && 2728 2653 !txb2b) { 2729 2654 u32 tarc0; ··· 2734 2655 ew32(TARC0, tarc0); 2735 2656 } 2736 2657 2737 - /* disable TSO for pcie and 10/100 speeds, to avoid 2738 - * some hardware issues */ 2658 + /* 2659 + * disable TSO for pcie and 10/100 speeds, to avoid 2660 + * some hardware issues 2661 + */ 2739 2662 if (!(adapter->flags & FLAG_TSO_FORCE)) { 2740 2663 switch (adapter->link_speed) { 2741 2664 case SPEED_10: ··· 2757 2676 } 2758 2677 } 2759 2678 2760 - /* enable transmits in the hardware, need to do this 2761 - * after setting TARC0 */ 2679 + /* 2680 + * enable transmits in the hardware, need to do this 2681 + * after setting TARC(0) 2682 + */ 2762 2683 tctl = er32(TCTL); 2763 2684 tctl |= E1000_TCTL_EN; 2764 2685 ew32(TCTL, tctl); ··· 2814 2731 tx_pending = (e1000_desc_unused(tx_ring) + 1 < 2815 2732 tx_ring->count); 2816 2733 if (tx_pending) { 2817 - /* We've lost link, so the controller stops DMA, 2734 + /* 2735 + * We've lost link, so the controller stops DMA, 2818 2736 * but we've got queued Tx work that's never going 2819 2737 * to get done, so reset controller to flush Tx. 2820 - * (Do the reset outside of interrupt context). */ 2738 + * (Do the reset outside of interrupt context). 2739 + */ 2821 2740 adapter->tx_timeout_count++; 2822 2741 schedule_work(&adapter->reset_task); 2823 2742 } 2824 2743 } 2825 2744 2826 - /* Cause software interrupt to ensure rx ring is cleaned */ 2745 + /* Cause software interrupt to ensure Rx ring is cleaned */ 2827 2746 ew32(ICS, E1000_ICS_RXDMT0); 2828 2747 2829 2748 /* Force detection of hung controller every watchdog period */ 2830 2749 adapter->detect_tx_hung = 1; 2831 2750 2832 - /* With 82571 controllers, LAA may be overwritten due to controller 2833 - * reset from the other port. Set the appropriate LAA in RAR[0] */ 2751 + /* 2752 + * With 82571 controllers, LAA may be overwritten due to controller 2753 + * reset from the other port. Set the appropriate LAA in RAR[0] 2754 + */ 2834 2755 if (e1000e_get_laa_state_82571(hw)) 2835 2756 e1000e_rar_set(hw, adapter->hw.mac.addr, 0); 2836 2757 ··· 3110 3023 3111 3024 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); 3112 3025 3113 - /* Force memory writes to complete before letting h/w 3026 + /* 3027 + * Force memory writes to complete before letting h/w 3114 3028 * know there are new descriptors to fetch. (Only 3115 3029 * applicable for weak-ordered memory model archs, 3116 - * such as IA-64). */ 3030 + * such as IA-64). 3031 + */ 3117 3032 wmb(); 3118 3033 3119 3034 tx_ring->next_to_use = i; 3120 3035 writel(i, adapter->hw.hw_addr + tx_ring->tail); 3121 - /* we need this if more than one processor can write to our tail 3122 - * at a time, it synchronizes IO on IA64/Altix systems */ 3036 + /* 3037 + * we need this if more than one processor can write to our tail 3038 + * at a time, it synchronizes IO on IA64/Altix systems 3039 + */ 3123 3040 mmiowb(); 3124 3041 } 3125 3042 ··· 3171 3080 struct e1000_adapter *adapter = netdev_priv(netdev); 3172 3081 3173 3082 netif_stop_queue(netdev); 3174 - /* Herbert's original patch had: 3083 + /* 3084 + * Herbert's original patch had: 3175 3085 * smp_mb__after_netif_stop_queue(); 3176 - * but since that doesn't exist yet, just open code it. */ 3086 + * but since that doesn't exist yet, just open code it. 3087 + */ 3177 3088 smp_mb(); 3178 3089 3179 - /* We need to check again in a case another CPU has just 3180 - * made room available. */ 3090 + /* 3091 + * We need to check again in a case another CPU has just 3092 + * made room available. 3093 + */ 3181 3094 if (e1000_desc_unused(adapter->tx_ring) < size) 3182 3095 return -EBUSY; 3183 3096 ··· 3228 3133 } 3229 3134 3230 3135 mss = skb_shinfo(skb)->gso_size; 3231 - /* The controller does a simple calculation to 3136 + /* 3137 + * The controller does a simple calculation to 3232 3138 * make sure there is enough room in the FIFO before 3233 3139 * initiating the DMA for each buffer. The calc is: 3234 3140 * 4 = ceil(buffer len/mss). To make sure we don't 3235 3141 * overrun the FIFO, adjust the max buffer len if mss 3236 - * drops. */ 3142 + * drops. 3143 + */ 3237 3144 if (mss) { 3238 3145 u8 hdr_len; 3239 3146 max_per_txd = min(mss << 2, max_per_txd); 3240 3147 max_txd_pwr = fls(max_per_txd) - 1; 3241 3148 3242 - /* TSO Workaround for 82571/2/3 Controllers -- if skb->data 3243 - * points to just header, pull a few bytes of payload from 3244 - * frags into skb->data */ 3149 + /* 3150 + * TSO Workaround for 82571/2/3 Controllers -- if skb->data 3151 + * points to just header, pull a few bytes of payload from 3152 + * frags into skb->data 3153 + */ 3245 3154 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 3155 + /* 3156 + * we do this workaround for ES2LAN, but it is un-necessary, 3157 + * avoiding it could save a lot of cycles 3158 + */ 3246 3159 if (skb->data_len && (hdr_len == len)) { 3247 3160 unsigned int pull_size; 3248 3161 ··· 3284 3181 /* Collision - tell upper layer to requeue */ 3285 3182 return NETDEV_TX_LOCKED; 3286 3183 3287 - /* need: count + 2 desc gap to keep tail from touching 3288 - * head, otherwise try next time */ 3184 + /* 3185 + * need: count + 2 desc gap to keep tail from touching 3186 + * head, otherwise try next time 3187 + */ 3289 3188 if (e1000_maybe_stop_tx(netdev, count + 2)) { 3290 3189 spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags); 3291 3190 return NETDEV_TX_BUSY; ··· 3312 3207 else if (e1000_tx_csum(adapter, skb)) 3313 3208 tx_flags |= E1000_TX_FLAGS_CSUM; 3314 3209 3315 - /* Old method was to assume IPv4 packet by default if TSO was enabled. 3210 + /* 3211 + * Old method was to assume IPv4 packet by default if TSO was enabled. 3316 3212 * 82571 hardware supports TSO capabilities for IPv6 as well... 3317 - * no longer assume, we must. */ 3213 + * no longer assume, we must. 3214 + */ 3318 3215 if (skb->protocol == htons(ETH_P_IP)) 3319 3216 tx_flags |= E1000_TX_FLAGS_IPV4; 3320 3217 ··· 3418 3311 if (netif_running(netdev)) 3419 3312 e1000e_down(adapter); 3420 3313 3421 - /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 3314 + /* 3315 + * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 3422 3316 * means we reserve 2 more, this pushes us to allocate from the next 3423 3317 * larger slab size. 3424 - * i.e. RXBUFFER_2048 --> size-4096 slab */ 3318 + * i.e. RXBUFFER_2048 --> size-4096 slab 3319 + */ 3425 3320 3426 3321 if (max_frame <= 256) 3427 3322 adapter->rx_buffer_len = 256; ··· 3440 3331 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || 3441 3332 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) 3442 3333 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN 3443 - + ETH_FCS_LEN ; 3334 + + ETH_FCS_LEN; 3444 3335 3445 3336 ndev_info(netdev, "changing MTU from %d to %d\n", 3446 3337 netdev->mtu, new_mtu); ··· 3576 3467 if (adapter->hw.phy.type == e1000_phy_igp_3) 3577 3468 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); 3578 3469 3579 - /* Release control of h/w to f/w. If f/w is AMT enabled, this 3580 - * would have already happened in close and is redundant. */ 3470 + /* 3471 + * Release control of h/w to f/w. If f/w is AMT enabled, this 3472 + * would have already happened in close and is redundant. 3473 + */ 3581 3474 e1000_release_hw_control(adapter); 3582 3475 3583 3476 pci_disable_device(pdev); ··· 3654 3543 3655 3544 netif_device_attach(netdev); 3656 3545 3657 - /* If the controller has AMT, do not set DRV_LOAD until the interface 3546 + /* 3547 + * If the controller has AMT, do not set DRV_LOAD until the interface 3658 3548 * is up. For all other cases, let the f/w know that the h/w is now 3659 - * under the control of the driver. */ 3549 + * under the control of the driver. 3550 + */ 3660 3551 if (!(adapter->flags & FLAG_HAS_AMT) || !e1000e_check_mng_mode(&adapter->hw)) 3661 3552 e1000_get_hw_control(adapter); 3662 3553 ··· 3769 3656 3770 3657 netif_device_attach(netdev); 3771 3658 3772 - /* If the controller has AMT, do not set DRV_LOAD until the interface 3659 + /* 3660 + * If the controller has AMT, do not set DRV_LOAD until the interface 3773 3661 * is up. For all other cases, let the f/w know that the h/w is now 3774 - * under the control of the driver. */ 3662 + * under the control of the driver. 3663 + */ 3775 3664 if (!(adapter->flags & FLAG_HAS_AMT) || 3776 3665 !e1000e_check_mng_mode(&adapter->hw)) 3777 3666 e1000_get_hw_control(adapter); ··· 3967 3852 if (pci_using_dac) 3968 3853 netdev->features |= NETIF_F_HIGHDMA; 3969 3854 3970 - /* We should not be using LLTX anymore, but we are still TX faster with 3971 - * it. */ 3855 + /* 3856 + * We should not be using LLTX anymore, but we are still Tx faster with 3857 + * it. 3858 + */ 3972 3859 netdev->features |= NETIF_F_LLTX; 3973 3860 3974 3861 if (e1000e_enable_mng_pass_thru(&adapter->hw)) 3975 3862 adapter->flags |= FLAG_MNG_PT_ENABLED; 3976 3863 3977 - /* before reading the NVM, reset the controller to 3978 - * put the device in a known good starting state */ 3864 + /* 3865 + * before reading the NVM, reset the controller to 3866 + * put the device in a known good starting state 3867 + */ 3979 3868 adapter->hw.mac.ops.reset_hw(&adapter->hw); 3980 3869 3981 3870 /* ··· 4073 3954 /* reset the hardware with the new settings */ 4074 3955 e1000e_reset(adapter); 4075 3956 4076 - /* If the controller has AMT, do not set DRV_LOAD until the interface 3957 + /* 3958 + * If the controller has AMT, do not set DRV_LOAD until the interface 4077 3959 * is up. For all other cases, let the f/w know that the h/w is now 4078 - * under the control of the driver. */ 3960 + * under the control of the driver. 3961 + */ 4079 3962 if (!(adapter->flags & FLAG_HAS_AMT) || 4080 3963 !e1000e_check_mng_mode(&adapter->hw)) 4081 3964 e1000_get_hw_control(adapter); ··· 4134 4013 struct net_device *netdev = pci_get_drvdata(pdev); 4135 4014 struct e1000_adapter *adapter = netdev_priv(netdev); 4136 4015 4137 - /* flush_scheduled work may reschedule our watchdog task, so 4138 - * explicitly disable watchdog tasks from being rescheduled */ 4016 + /* 4017 + * flush_scheduled work may reschedule our watchdog task, so 4018 + * explicitly disable watchdog tasks from being rescheduled 4019 + */ 4139 4020 set_bit(__E1000_DOWN, &adapter->state); 4140 4021 del_timer_sync(&adapter->watchdog_timer); 4141 4022 del_timer_sync(&adapter->phy_info_timer); 4142 4023 4143 4024 flush_scheduled_work(); 4144 4025 4145 - /* Release control of h/w to f/w. If f/w is AMT enabled, this 4146 - * would have already happened in close and is redundant. */ 4026 + /* 4027 + * Release control of h/w to f/w. If f/w is AMT enabled, this 4028 + * would have already happened in close and is redundant. 4029 + */ 4147 4030 e1000_release_hw_control(adapter); 4148 4031 4149 4032 unregister_netdev(netdev); ··· 4185 4060 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 }, 4186 4061 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 }, 4187 4062 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 }, 4063 + 4188 4064 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 }, 4189 4065 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 }, 4190 4066 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 }, 4191 4067 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 }, 4068 + 4192 4069 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 }, 4193 4070 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 }, 4194 4071 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 }, 4072 + 4195 4073 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT), 4196 4074 board_80003es2lan }, 4197 4075 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT), ··· 4203 4075 board_80003es2lan }, 4204 4076 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT), 4205 4077 board_80003es2lan }, 4078 + 4206 4079 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan }, 4207 4080 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan }, 4208 4081 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan }, ··· 4211 4082 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan }, 4212 4083 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan }, 4213 4084 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan }, 4085 + 4214 4086 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan }, 4215 4087 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan }, 4216 4088 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan }, ··· 4229 4099 .probe = e1000_probe, 4230 4100 .remove = __devexit_p(e1000_remove), 4231 4101 #ifdef CONFIG_PM 4232 - /* Power Managment Hooks */ 4102 + /* Power Management Hooks */ 4233 4103 .suspend = e1000_suspend, 4234 4104 .resume = e1000_resume, 4235 4105 #endif ··· 4248 4118 int ret; 4249 4119 printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n", 4250 4120 e1000e_driver_name, e1000e_driver_version); 4251 - printk(KERN_INFO "%s: Copyright (c) 1999-2007 Intel Corporation.\n", 4121 + printk(KERN_INFO "%s: Copyright (c) 1999-2008 Intel Corporation.\n", 4252 4122 e1000e_driver_name); 4253 4123 ret = pci_register_driver(&e1000_driver); 4254 4124
+21 -12
drivers/net/e1000e/param.c
··· 1 1 /******************************************************************************* 2 2 3 3 Intel PRO/1000 Linux driver 4 - Copyright(c) 1999 - 2007 Intel Corporation. 4 + Copyright(c) 1999 - 2008 Intel Corporation. 5 5 6 6 This program is free software; you can redistribute it and/or modify it 7 7 under the terms and conditions of the GNU General Public License, ··· 30 30 31 31 #include "e1000.h" 32 32 33 - /* This is the only thing that needs to be changed to adjust the 33 + /* 34 + * This is the only thing that needs to be changed to adjust the 34 35 * maximum number of ports that the driver can manage. 35 36 */ 36 37 ··· 47 46 MODULE_PARM_DESC(copybreak, 48 47 "Maximum size of packet that is copied to a new buffer on receive"); 49 48 50 - /* All parameters are treated the same, as an integer array of values. 49 + /* 50 + * All parameters are treated the same, as an integer array of values. 51 51 * This macro just reduces the need to repeat the same declaration code 52 52 * over and over (plus this helps to avoid typo bugs). 53 53 */ ··· 62 60 MODULE_PARM_DESC(X, desc); 63 61 64 62 65 - /* Transmit Interrupt Delay in units of 1.024 microseconds 66 - * Tx interrupt delay needs to typically be set to something non zero 63 + /* 64 + * Transmit Interrupt Delay in units of 1.024 microseconds 65 + * Tx interrupt delay needs to typically be set to something non zero 67 66 * 68 67 * Valid Range: 0-65535 69 68 */ ··· 73 70 #define MAX_TXDELAY 0xFFFF 74 71 #define MIN_TXDELAY 0 75 72 76 - /* Transmit Absolute Interrupt Delay in units of 1.024 microseconds 73 + /* 74 + * Transmit Absolute Interrupt Delay in units of 1.024 microseconds 77 75 * 78 76 * Valid Range: 0-65535 79 77 */ ··· 83 79 #define MAX_TXABSDELAY 0xFFFF 84 80 #define MIN_TXABSDELAY 0 85 81 86 - /* Receive Interrupt Delay in units of 1.024 microseconds 87 - * hardware will likely hang if you set this to anything but zero. 82 + /* 83 + * Receive Interrupt Delay in units of 1.024 microseconds 84 + * hardware will likely hang if you set this to anything but zero. 88 85 * 89 86 * Valid Range: 0-65535 90 87 */ ··· 94 89 #define MAX_RXDELAY 0xFFFF 95 90 #define MIN_RXDELAY 0 96 91 97 - /* Receive Absolute Interrupt Delay in units of 1.024 microseconds 92 + /* 93 + * Receive Absolute Interrupt Delay in units of 1.024 microseconds 98 94 * 99 95 * Valid Range: 0-65535 100 96 */ ··· 104 98 #define MAX_RXABSDELAY 0xFFFF 105 99 #define MIN_RXABSDELAY 0 106 100 107 - /* Interrupt Throttle Rate (interrupts/sec) 101 + /* 102 + * Interrupt Throttle Rate (interrupts/sec) 108 103 * 109 104 * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative) 110 105 */ ··· 114 107 #define MAX_ITR 100000 115 108 #define MIN_ITR 100 116 109 117 - /* Enable Smart Power Down of the PHY 110 + /* 111 + * Enable Smart Power Down of the PHY 118 112 * 119 113 * Valid Range: 0, 1 120 114 * ··· 123 115 */ 124 116 E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down"); 125 117 126 - /* Enable Kumeran Lock Loss workaround 118 + /* 119 + * Enable Kumeran Lock Loss workaround 127 120 * 128 121 * Valid Range: 0, 1 129 122 *
+97 -55
drivers/net/e1000e/phy.c
··· 1 1 /******************************************************************************* 2 2 3 3 Intel PRO/1000 Linux driver 4 - Copyright(c) 1999 - 2007 Intel Corporation. 4 + Copyright(c) 1999 - 2008 Intel Corporation. 5 5 6 6 This program is free software; you can redistribute it and/or modify it 7 7 under the terms and conditions of the GNU General Public License, ··· 134 134 return -E1000_ERR_PARAM; 135 135 } 136 136 137 - /* Set up Op-code, Phy Address, and register offset in the MDI 137 + /* 138 + * Set up Op-code, Phy Address, and register offset in the MDI 138 139 * Control register. The MAC will take care of interfacing with the 139 140 * PHY to retrieve the desired data. 140 141 */ ··· 145 144 146 145 ew32(MDIC, mdic); 147 146 148 - /* Poll the ready bit to see if the MDI read completed */ 147 + /* 148 + * Poll the ready bit to see if the MDI read completed 149 + * Increasing the time out as testing showed failures with 150 + * the lower time out 151 + */ 149 152 for (i = 0; i < 64; i++) { 150 153 udelay(50); 151 154 mdic = er32(MDIC); ··· 187 182 return -E1000_ERR_PARAM; 188 183 } 189 184 190 - /* Set up Op-code, Phy Address, and register offset in the MDI 185 + /* 186 + * Set up Op-code, Phy Address, and register offset in the MDI 191 187 * Control register. The MAC will take care of interfacing with the 192 188 * PHY to retrieve the desired data. 193 189 */ ··· 415 409 s32 ret_val; 416 410 u16 phy_data; 417 411 418 - /* Enable CRS on TX. This must be set for half-duplex operation. */ 412 + /* Enable CRS on Tx. This must be set for half-duplex operation. */ 419 413 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); 420 414 if (ret_val) 421 415 return ret_val; 422 416 423 417 phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; 424 418 425 - /* Options: 419 + /* 420 + * Options: 426 421 * MDI/MDI-X = 0 (default) 427 422 * 0 - Auto for all speeds 428 423 * 1 - MDI mode ··· 448 441 break; 449 442 } 450 443 451 - /* Options: 444 + /* 445 + * Options: 452 446 * disable_polarity_correction = 0 (default) 453 447 * Automatic Correction for Reversed Cable Polarity 454 448 * 0 - Disabled ··· 464 456 return ret_val; 465 457 466 458 if (phy->revision < 4) { 467 - /* Force TX_CLK in the Extended PHY Specific Control Register 459 + /* 460 + * Force TX_CLK in the Extended PHY Specific Control Register 468 461 * to 25MHz clock. 469 462 */ 470 463 ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); ··· 552 543 553 544 /* set auto-master slave resolution settings */ 554 545 if (hw->mac.autoneg) { 555 - /* when autonegotiation advertisement is only 1000Mbps then we 546 + /* 547 + * when autonegotiation advertisement is only 1000Mbps then we 556 548 * should disable SmartSpeed and enable Auto MasterSlave 557 - * resolution as hardware default. */ 549 + * resolution as hardware default. 550 + */ 558 551 if (phy->autoneg_advertised == ADVERTISE_1000_FULL) { 559 552 /* Disable SmartSpeed */ 560 553 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, 561 - &data); 554 + &data); 562 555 if (ret_val) 563 556 return ret_val; 564 557 565 558 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 566 559 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, 567 - data); 560 + data); 568 561 if (ret_val) 569 562 return ret_val; 570 563 ··· 641 630 return ret_val; 642 631 } 643 632 644 - /* Need to parse both autoneg_advertised and fc and set up 633 + /* 634 + * Need to parse both autoneg_advertised and fc and set up 645 635 * the appropriate PHY registers. First we will parse for 646 636 * autoneg_advertised software override. Since we can advertise 647 637 * a plethora of combinations, we need to check each bit 648 638 * individually. 649 639 */ 650 640 651 - /* First we clear all the 10/100 mb speed bits in the Auto-Neg 641 + /* 642 + * First we clear all the 10/100 mb speed bits in the Auto-Neg 652 643 * Advertisement Register (Address 4) and the 1000 mb speed bits in 653 644 * the 1000Base-T Control Register (Address 9). 654 645 */ ··· 696 683 mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; 697 684 } 698 685 699 - /* Check for a software override of the flow control settings, and 686 + /* 687 + * Check for a software override of the flow control settings, and 700 688 * setup the PHY advertisement registers accordingly. If 701 689 * auto-negotiation is enabled, then software will have to set the 702 690 * "PAUSE" bits to the correct value in the Auto-Negotiation ··· 710 696 * but not send pause frames). 711 697 * 2: Tx flow control is enabled (we can send pause frames 712 698 * but we do not support receiving pause frames). 713 - * 3: Both Rx and TX flow control (symmetric) are enabled. 699 + * 3: Both Rx and Tx flow control (symmetric) are enabled. 714 700 * other: No software override. The flow control configuration 715 701 * in the EEPROM is used. 716 702 */ 717 703 switch (hw->mac.fc) { 718 704 case e1000_fc_none: 719 - /* Flow control (RX & TX) is completely disabled by a 705 + /* 706 + * Flow control (Rx & Tx) is completely disabled by a 720 707 * software over-ride. 721 708 */ 722 709 mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); 723 710 break; 724 711 case e1000_fc_rx_pause: 725 - /* RX Flow control is enabled, and TX Flow control is 712 + /* 713 + * Rx Flow control is enabled, and Tx Flow control is 726 714 * disabled, by a software over-ride. 727 - */ 728 - /* Since there really isn't a way to advertise that we are 729 - * capable of RX Pause ONLY, we will advertise that we 730 - * support both symmetric and asymmetric RX PAUSE. Later 715 + * 716 + * Since there really isn't a way to advertise that we are 717 + * capable of Rx Pause ONLY, we will advertise that we 718 + * support both symmetric and asymmetric Rx PAUSE. Later 731 719 * (in e1000e_config_fc_after_link_up) we will disable the 732 720 * hw's ability to send PAUSE frames. 733 721 */ 734 722 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); 735 723 break; 736 724 case e1000_fc_tx_pause: 737 - /* TX Flow control is enabled, and RX Flow control is 725 + /* 726 + * Tx Flow control is enabled, and Rx Flow control is 738 727 * disabled, by a software over-ride. 739 728 */ 740 729 mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; 741 730 mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; 742 731 break; 743 732 case e1000_fc_full: 744 - /* Flow control (both RX and TX) is enabled by a software 733 + /* 734 + * Flow control (both Rx and Tx) is enabled by a software 745 735 * over-ride. 746 736 */ 747 737 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); ··· 776 758 * Performs initial bounds checking on autoneg advertisement parameter, then 777 759 * configure to advertise the full capability. Setup the PHY to autoneg 778 760 * and restart the negotiation process between the link partner. If 779 - * wait_for_link, then wait for autoneg to complete before exiting. 761 + * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. 780 762 **/ 781 763 static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) 782 764 { ··· 784 766 s32 ret_val; 785 767 u16 phy_ctrl; 786 768 787 - /* Perform some bounds checking on the autoneg advertisement 769 + /* 770 + * Perform some bounds checking on the autoneg advertisement 788 771 * parameter. 789 772 */ 790 773 phy->autoneg_advertised &= phy->autoneg_mask; 791 774 792 - /* If autoneg_advertised is zero, we assume it was not defaulted 775 + /* 776 + * If autoneg_advertised is zero, we assume it was not defaulted 793 777 * by the calling code so we set to advertise full capability. 794 778 */ 795 779 if (phy->autoneg_advertised == 0) ··· 805 785 } 806 786 hw_dbg(hw, "Restarting Auto-Neg\n"); 807 787 808 - /* Restart auto-negotiation by setting the Auto Neg Enable bit and 788 + /* 789 + * Restart auto-negotiation by setting the Auto Neg Enable bit and 809 790 * the Auto Neg Restart bit in the PHY control register. 810 791 */ 811 792 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl); ··· 818 797 if (ret_val) 819 798 return ret_val; 820 799 821 - /* Does the user want to wait for Auto-Neg to complete here, or 800 + /* 801 + * Does the user want to wait for Auto-Neg to complete here, or 822 802 * check at a later time (for example, callback routine). 823 803 */ 824 804 if (phy->wait_for_link) { ··· 851 829 bool link; 852 830 853 831 if (hw->mac.autoneg) { 854 - /* Setup autoneg and flow control advertisement and perform 855 - * autonegotiation. */ 832 + /* 833 + * Setup autoneg and flow control advertisement and perform 834 + * autonegotiation. 835 + */ 856 836 ret_val = e1000_copper_link_autoneg(hw); 857 837 if (ret_val) 858 838 return ret_val; 859 839 } else { 860 - /* PHY will be set to 10H, 10F, 100H or 100F 861 - * depending on user settings. */ 840 + /* 841 + * PHY will be set to 10H, 10F, 100H or 100F 842 + * depending on user settings. 843 + */ 862 844 hw_dbg(hw, "Forcing Speed and Duplex\n"); 863 845 ret_val = e1000_phy_force_speed_duplex(hw); 864 846 if (ret_val) { ··· 871 845 } 872 846 } 873 847 874 - /* Check link status. Wait up to 100 microseconds for link to become 848 + /* 849 + * Check link status. Wait up to 100 microseconds for link to become 875 850 * valid. 876 851 */ 877 852 ret_val = e1000e_phy_has_link_generic(hw, ··· 918 891 if (ret_val) 919 892 return ret_val; 920 893 921 - /* Clear Auto-Crossover to force MDI manually. IGP requires MDI 894 + /* 895 + * Clear Auto-Crossover to force MDI manually. IGP requires MDI 922 896 * forced whenever speed and duplex are forced. 923 897 */ 924 898 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); ··· 969 941 * Calls the PHY setup function to force speed and duplex. Clears the 970 942 * auto-crossover to force MDI manually. Resets the PHY to commit the 971 943 * changes. If time expires while waiting for link up, we reset the DSP. 972 - * After reset, TX_CLK and CRS on TX must be set. Return successful upon 944 + * After reset, TX_CLK and CRS on Tx must be set. Return successful upon 973 945 * successful completion, else return corresponding error code. 974 946 **/ 975 947 s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) ··· 979 951 u16 phy_data; 980 952 bool link; 981 953 982 - /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI 954 + /* 955 + * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI 983 956 * forced whenever speed and duplex are forced. 984 957 */ 985 958 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); ··· 1018 989 return ret_val; 1019 990 1020 991 if (!link) { 1021 - /* We didn't get link. 992 + /* 993 + * We didn't get link. 1022 994 * Reset the DSP and cross our fingers. 1023 995 */ 1024 - ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT, 0x001d); 996 + ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT, 997 + 0x001d); 1025 998 if (ret_val) 1026 999 return ret_val; 1027 1000 ret_val = e1000e_phy_reset_dsp(hw); ··· 1042 1011 if (ret_val) 1043 1012 return ret_val; 1044 1013 1045 - /* Resetting the phy means we need to re-force TX_CLK in the 1014 + /* 1015 + * Resetting the phy means we need to re-force TX_CLK in the 1046 1016 * Extended PHY Specific Control Register to 25MHz clock from 1047 1017 * the reset value of 2.5MHz. 1048 1018 */ ··· 1052 1020 if (ret_val) 1053 1021 return ret_val; 1054 1022 1055 - /* In addition, we must re-enable CRS on Tx for both half and full 1023 + /* 1024 + * In addition, we must re-enable CRS on Tx for both half and full 1056 1025 * duplex. 1057 1026 */ 1058 1027 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); ··· 1157 1124 data); 1158 1125 if (ret_val) 1159 1126 return ret_val; 1160 - /* LPLU and SmartSpeed are mutually exclusive. LPLU is used 1127 + /* 1128 + * LPLU and SmartSpeed are mutually exclusive. LPLU is used 1161 1129 * during Dx states where the power conservation is most 1162 1130 * important. During driver activity we should enable 1163 - * SmartSpeed, so performance is maintained. */ 1131 + * SmartSpeed, so performance is maintained. 1132 + */ 1164 1133 if (phy->smart_speed == e1000_smart_speed_on) { 1165 1134 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, 1166 - &data); 1135 + &data); 1167 1136 if (ret_val) 1168 1137 return ret_val; 1169 1138 1170 1139 data |= IGP01E1000_PSCFR_SMART_SPEED; 1171 1140 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, 1172 - data); 1141 + data); 1173 1142 if (ret_val) 1174 1143 return ret_val; 1175 1144 } else if (phy->smart_speed == e1000_smart_speed_off) { 1176 1145 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, 1177 - &data); 1146 + &data); 1178 1147 if (ret_val) 1179 1148 return ret_val; 1180 1149 1181 1150 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 1182 1151 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, 1183 - data); 1152 + data); 1184 1153 if (ret_val) 1185 1154 return ret_val; 1186 1155 } ··· 1284 1249 s32 ret_val; 1285 1250 u16 data, offset, mask; 1286 1251 1287 - /* Polarity is determined based on the speed of 1288 - * our connection. */ 1252 + /* 1253 + * Polarity is determined based on the speed of 1254 + * our connection. 1255 + */ 1289 1256 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data); 1290 1257 if (ret_val) 1291 1258 return ret_val; ··· 1297 1260 offset = IGP01E1000_PHY_PCS_INIT_REG; 1298 1261 mask = IGP01E1000_PHY_POLARITY_MASK; 1299 1262 } else { 1300 - /* This really only applies to 10Mbps since 1263 + /* 1264 + * This really only applies to 10Mbps since 1301 1265 * there is no polarity for 100Mbps (always 0). 1302 1266 */ 1303 1267 offset = IGP01E1000_PHY_PORT_STATUS; ··· 1316 1278 } 1317 1279 1318 1280 /** 1319 - * e1000_wait_autoneg - Wait for auto-neg compeletion 1281 + * e1000_wait_autoneg - Wait for auto-neg completion 1320 1282 * @hw: pointer to the HW structure 1321 1283 * 1322 1284 * Waits for auto-negotiation to complete or for the auto-negotiation time ··· 1340 1302 msleep(100); 1341 1303 } 1342 1304 1343 - /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation 1305 + /* 1306 + * PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation 1344 1307 * has completed. 1345 1308 */ 1346 1309 return ret_val; ··· 1363 1324 u16 i, phy_status; 1364 1325 1365 1326 for (i = 0; i < iterations; i++) { 1366 - /* Some PHYs require the PHY_STATUS register to be read 1327 + /* 1328 + * Some PHYs require the PHY_STATUS register to be read 1367 1329 * twice due to the link bit being sticky. No harm doing 1368 1330 * it across the board. 1369 1331 */ ··· 1452 1412 if (ret_val) 1453 1413 return ret_val; 1454 1414 1455 - /* Getting bits 15:9, which represent the combination of 1415 + /* 1416 + * Getting bits 15:9, which represent the combination of 1456 1417 * course and fine gain values. The result is a number 1457 1418 * that can be put into the lookup table to obtain the 1458 - * approximate cable length. */ 1419 + * approximate cable length. 1420 + */ 1459 1421 cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & 1460 1422 IGP02E1000_AGC_LENGTH_MASK; 1461 1423