[PATCH] e1000:82573 specific code & packet split code

82573 specific code & packet split code

Signed-off-by: Mallikarjuna R Chilakala <mallikarjuna.chilakala@intel.com>
Signed-off-by: Ganesh Venkatesan <ganesh.venkatesan@intel.com>
Signed-off-by: John Ronciak <john.ronciak@intel.com>
diff -up net-drivers-2.6/drivers/net/e1000/e1000.h net-drivers-2.6/drivers/net/e1000.new/e1000.h

authored by Malli Chilakala and committed by Jeff Garzik 2d7edb92 f0d11ed0

+2891 -532
+26
drivers/net/e1000/e1000.h
··· 112 #define E1000_MAX_82544_RXD 4096 113 114 /* Supported Rx Buffer Sizes */ 115 #define E1000_RXBUFFER_2048 2048 116 #define E1000_RXBUFFER_4096 4096 117 #define E1000_RXBUFFER_8192 8192 ··· 148 #define E1000_MASTER_SLAVE e1000_ms_hw_default 149 #endif 150 151 /* only works for sizes that are powers of 2 */ 152 #define E1000_ROUNDUP(i, size) ((i) = (((i) + (size) - 1) & ~((size) - 1))) 153 ··· 164 uint16_t length; 165 uint16_t next_to_watch; 166 }; 167 168 struct e1000_desc_ring { 169 /* pointer to the descriptor ring memory */ ··· 183 unsigned int next_to_clean; 184 /* array of buffer information structs */ 185 struct e1000_buffer *buffer_info; 186 }; 187 188 #define E1000_DESC_UNUSED(R) \ 189 ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ 190 (R)->next_to_clean - (R)->next_to_use - 1) 191 192 #define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) 193 #define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc) 194 #define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc) ··· 208 struct timer_list watchdog_timer; 209 struct timer_list phy_info_timer; 210 struct vlan_group *vlgrp; 211 uint32_t bd_number; 212 uint32_t rx_buffer_len; 213 uint32_t part_num; ··· 245 boolean_t detect_tx_hung; 246 247 /* RX */ 248 struct e1000_desc_ring rx_ring; 249 uint64_t hw_csum_err; 250 uint64_t hw_csum_good; 251 uint32_t rx_int_delay; 252 uint32_t rx_abs_int_delay; 253 boolean_t rx_csum; 254 uint32_t gorcl; 255 uint64_t gorcl_old; 256 257 /* Interrupt Throttle Rate */ 258 uint32_t itr;
··· 112 #define E1000_MAX_82544_RXD 4096 113 114 /* Supported Rx Buffer Sizes */ 115 + #define E1000_RXBUFFER_128 128 /* Used for packet split */ 116 + #define E1000_RXBUFFER_256 256 /* Used for packet split */ 117 #define E1000_RXBUFFER_2048 2048 118 #define E1000_RXBUFFER_4096 4096 119 #define E1000_RXBUFFER_8192 8192 ··· 146 #define E1000_MASTER_SLAVE e1000_ms_hw_default 147 #endif 148 149 + #define E1000_MNG_VLAN_NONE -1 150 + /* Number of packet split data buffers (not including the header buffer) */ 151 + #define PS_PAGE_BUFFERS MAX_PS_BUFFERS-1 152 + 153 /* only works for sizes that are powers of 2 */ 154 #define E1000_ROUNDUP(i, size) ((i) = (((i) + (size) - 1) & ~((size) - 1))) 155 ··· 158 uint16_t length; 159 uint16_t next_to_watch; 160 }; 161 + 162 + struct e1000_ps_page { struct page *ps_page[MAX_PS_BUFFERS]; }; 163 + struct e1000_ps_page_dma { uint64_t ps_page_dma[MAX_PS_BUFFERS]; }; 164 165 struct e1000_desc_ring { 166 /* pointer to the descriptor ring memory */ ··· 174 unsigned int next_to_clean; 175 /* array of buffer information structs */ 176 struct e1000_buffer *buffer_info; 177 + /* arrays of page information for packet split */ 178 + struct e1000_ps_page *ps_page; 179 + struct e1000_ps_page_dma *ps_page_dma; 180 }; 181 182 #define E1000_DESC_UNUSED(R) \ 183 ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ 184 (R)->next_to_clean - (R)->next_to_use - 1) 185 186 + #define E1000_RX_DESC_PS(R, i) \ 187 + (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) 188 + #define E1000_RX_DESC_EXT(R, i) \ 189 + (&(((union e1000_rx_desc_extended *)((R).desc))[i])) 190 #define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) 191 #define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc) 192 #define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc) ··· 192 struct timer_list watchdog_timer; 193 struct timer_list phy_info_timer; 194 struct vlan_group *vlgrp; 195 + uint16_t mng_vlan_id; 196 uint32_t bd_number; 197 uint32_t rx_buffer_len; 198 uint32_t part_num; ··· 228 boolean_t detect_tx_hung; 229 230 /* RX */ 231 + #ifdef CONFIG_E1000_NAPI 232 + boolean_t (*clean_rx) (struct e1000_adapter *adapter, int *work_done, 233 + int work_to_do); 234 + #else 235 + boolean_t (*clean_rx) (struct e1000_adapter *adapter); 236 + #endif 237 + void (*alloc_rx_buf) (struct e1000_adapter *adapter); 238 struct e1000_desc_ring rx_ring; 239 uint64_t hw_csum_err; 240 uint64_t hw_csum_good; 241 uint32_t rx_int_delay; 242 uint32_t rx_abs_int_delay; 243 boolean_t rx_csum; 244 + boolean_t rx_ps; 245 uint32_t gorcl; 246 uint64_t gorcl_old; 247 + uint16_t rx_ps_bsize0; 248 249 /* Interrupt Throttle Rate */ 250 uint32_t itr;
+1602 -394
drivers/net/e1000/e1000_hw.c
··· 63 static int32_t e1000_acquire_eeprom(struct e1000_hw *hw); 64 static void e1000_release_eeprom(struct e1000_hw *hw); 65 static void e1000_standby_eeprom(struct e1000_hw *hw); 66 - static int32_t e1000_id_led_init(struct e1000_hw * hw); 67 static int32_t e1000_set_vco_speed(struct e1000_hw *hw); 68 static int32_t e1000_polarity_reversal_workaround(struct e1000_hw *hw); 69 static int32_t e1000_set_phy_mode(struct e1000_hw *hw); 70 71 /* IGP cable length table */ 72 static const ··· 81 100, 100, 100, 100, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 82 110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120}; 83 84 85 /****************************************************************************** 86 * Set the phy type member in the hw struct. ··· 103 { 104 DEBUGFUNC("e1000_set_phy_type"); 105 106 switch(hw->phy_id) { 107 case M88E1000_E_PHY_ID: 108 case M88E1000_I_PHY_ID: 109 case M88E1011_I_PHY_ID: 110 hw->phy_type = e1000_phy_m88; 111 break; 112 case IGP01E1000_I_PHY_ID: ··· 287 case E1000_DEV_ID_82546GB_FIBER: 288 case E1000_DEV_ID_82546GB_SERDES: 289 case E1000_DEV_ID_82546GB_PCIE: 290 hw->mac_type = e1000_82546_rev_3; 291 break; 292 case E1000_DEV_ID_82541EI: ··· 306 case E1000_DEV_ID_82547GI: 307 hw->mac_type = e1000_82547_rev_2; 308 break; 309 default: 310 /* Should never have loaded on this device */ 311 return -E1000_ERR_MAC_TYPE; 312 } 313 314 switch(hw->mac_type) { 315 case e1000_82541: 316 case e1000_82547: 317 case e1000_82541_rev_2: ··· 384 uint32_t icr; 385 uint32_t manc; 386 uint32_t led_ctrl; 387 388 DEBUGFUNC("e1000_reset_hw"); 389 ··· 394 if(hw->mac_type == e1000_82542_rev2_0) { 395 DEBUGOUT("Disabling MWI on 82542 rev 2.0\n"); 396 e1000_pci_clear_mwi(hw); 397 } 398 399 /* Clear interrupt mask to stop board from generating interrupts */ ··· 429 430 /* Must reset the PHY before resetting the MAC */ 431 if((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { 432 - E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_PHY_RST)); 433 msec_delay(5); 434 } 435 436 /* Issue a global reset to the MAC. This will reset the chip's ··· 508 /* Wait for EEPROM reload */ 509 msec_delay(20); 510 break; 511 default: 512 /* Wait for EEPROM reload (it happens automatically) */ 513 msec_delay(5); ··· 527 } 528 529 /* Disable HW ARPs on ASF enabled adapters */ 530 - if(hw->mac_type >= e1000_82540) { 531 manc = E1000_READ_REG(hw, MANC); 532 manc &= ~(E1000_MANC_ARP_EN); 533 E1000_WRITE_REG(hw, MANC, manc); ··· 580 uint16_t pcix_stat_hi_word; 581 uint16_t cmd_mmrbc; 582 uint16_t stat_mmrbc; 583 DEBUGFUNC("e1000_init_hw"); 584 585 /* Initialize Identification LED */ ··· 596 597 /* Disabling VLAN filtering. */ 598 DEBUGOUT("Initializing the IEEE VLAN\n"); 599 - E1000_WRITE_REG(hw, VET, 0); 600 - 601 e1000_clear_vfta(hw); 602 603 /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */ ··· 625 626 /* Zero out the Multicast HASH table */ 627 DEBUGOUT("Zeroing the MTA\n"); 628 - for(i = 0; i < E1000_MC_TBL_SIZE; i++) 629 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); 630 631 /* Set the PCI priority bit correctly in the CTRL register. This 632 * determines if the adapter gives priority to receives, or if it 633 - * gives equal priority to transmits and receives. 634 */ 635 - if(hw->dma_fairness) { 636 ctrl = E1000_READ_REG(hw, CTRL); 637 E1000_WRITE_REG(hw, CTRL, ctrl | E1000_CTRL_PRIOR); 638 } ··· 672 if(hw->mac_type > e1000_82544) { 673 ctrl = E1000_READ_REG(hw, TXDCTL); 674 ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB; 675 E1000_WRITE_REG(hw, TXDCTL, ctrl); 676 } 677 678 /* Clear all of the statistics registers (clear on read). It is 679 * important that we do this after we have tried to establish link ··· 765 * control setting, then the variable hw->fc will 766 * be initialized based on a value in the EEPROM. 767 */ 768 - if(e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data) < 0) { 769 DEBUGOUT("EEPROM Read Error\n"); 770 return -E1000_ERR_EEPROM; 771 } ··· 822 E1000_WRITE_REG(hw, FCAL, FLOW_CONTROL_ADDRESS_LOW); 823 E1000_WRITE_REG(hw, FCAH, FLOW_CONTROL_ADDRESS_HIGH); 824 E1000_WRITE_REG(hw, FCT, FLOW_CONTROL_TYPE); 825 E1000_WRITE_REG(hw, FCTTV, hw->fc_pause_time); 826 827 /* Set the flow control receive threshold registers. Normally, ··· 993 } 994 995 /****************************************************************************** 996 - * Detects which PHY is present and the speed and duplex 997 * 998 * hw - Struct containing variables accessed by shared code 999 ******************************************************************************/ 1000 static int32_t 1001 - e1000_setup_copper_link(struct e1000_hw *hw) 1002 { 1003 uint32_t ctrl; 1004 - uint32_t led_ctrl; 1005 int32_t ret_val; 1006 - uint16_t i; 1007 uint16_t phy_data; 1008 1009 - DEBUGFUNC("e1000_setup_copper_link"); 1010 1011 ctrl = E1000_READ_REG(hw, CTRL); 1012 /* With 82543, we need to force speed and duplex on the MAC equal to what ··· 1018 } else { 1019 ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU); 1020 E1000_WRITE_REG(hw, CTRL, ctrl); 1021 - e1000_phy_hw_reset(hw); 1022 } 1023 1024 /* Make sure we have a valid PHY */ ··· 1048 hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2) 1049 hw->phy_reset_disable = FALSE; 1050 1051 - if(!hw->phy_reset_disable) { 1052 - if (hw->phy_type == e1000_phy_igp) { 1053 1054 - ret_val = e1000_phy_reset(hw); 1055 - if(ret_val) { 1056 - DEBUGOUT("Error Resetting the PHY\n"); 1057 - return ret_val; 1058 - } 1059 1060 - /* Wait 10ms for MAC to configure PHY from eeprom settings */ 1061 - msec_delay(15); 1062 1063 - /* Configure activity LED after PHY reset */ 1064 - led_ctrl = E1000_READ_REG(hw, LEDCTL); 1065 - led_ctrl &= IGP_ACTIVITY_LED_MASK; 1066 - led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); 1067 - E1000_WRITE_REG(hw, LEDCTL, led_ctrl); 1068 1069 - /* disable lplu d3 during driver init */ 1070 - ret_val = e1000_set_d3_lplu_state(hw, FALSE); 1071 - if(ret_val) { 1072 - DEBUGOUT("Error Disabling LPLU D3\n"); 1073 - return ret_val; 1074 - } 1075 1076 - /* Configure mdi-mdix settings */ 1077 - ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, 1078 - &phy_data); 1079 if(ret_val) 1080 return ret_val; 1081 - 1082 - if((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { 1083 - hw->dsp_config_state = e1000_dsp_config_disabled; 1084 - /* Force MDI for earlier revs of the IGP PHY */ 1085 - phy_data &= ~(IGP01E1000_PSCR_AUTO_MDIX | 1086 - IGP01E1000_PSCR_FORCE_MDI_MDIX); 1087 - hw->mdix = 1; 1088 - 1089 - } else { 1090 - hw->dsp_config_state = e1000_dsp_config_enabled; 1091 - phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; 1092 - 1093 - switch (hw->mdix) { 1094 - case 1: 1095 - phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; 1096 - break; 1097 - case 2: 1098 - phy_data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; 1099 - break; 1100 - case 0: 1101 - default: 1102 - phy_data |= IGP01E1000_PSCR_AUTO_MDIX; 1103 - break; 1104 - } 1105 - } 1106 - ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, 1107 - phy_data); 1108 - if(ret_val) 1109 - return ret_val; 1110 - 1111 - /* set auto-master slave resolution settings */ 1112 - if(hw->autoneg) { 1113 - e1000_ms_type phy_ms_setting = hw->master_slave; 1114 - 1115 - if(hw->ffe_config_state == e1000_ffe_config_active) 1116 - hw->ffe_config_state = e1000_ffe_config_enabled; 1117 - 1118 - if(hw->dsp_config_state == e1000_dsp_config_activated) 1119 - hw->dsp_config_state = e1000_dsp_config_enabled; 1120 - 1121 - /* when autonegotiation advertisment is only 1000Mbps then we 1122 - * should disable SmartSpeed and enable Auto MasterSlave 1123 - * resolution as hardware default. */ 1124 - if(hw->autoneg_advertised == ADVERTISE_1000_FULL) { 1125 - /* Disable SmartSpeed */ 1126 - ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 1127 - &phy_data); 1128 - if(ret_val) 1129 - return ret_val; 1130 - phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; 1131 - ret_val = e1000_write_phy_reg(hw, 1132 IGP01E1000_PHY_PORT_CONFIG, 1133 phy_data); 1134 - if(ret_val) 1135 - return ret_val; 1136 - /* Set auto Master/Slave resolution process */ 1137 - ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data); 1138 - if(ret_val) 1139 - return ret_val; 1140 - phy_data &= ~CR_1000T_MS_ENABLE; 1141 - ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data); 1142 - if(ret_val) 1143 - return ret_val; 1144 - } 1145 - 1146 - ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data); 1147 - if(ret_val) 1148 - return ret_val; 1149 - 1150 - /* load defaults for future use */ 1151 - hw->original_master_slave = (phy_data & CR_1000T_MS_ENABLE) ? 1152 - ((phy_data & CR_1000T_MS_VALUE) ? 1153 - e1000_ms_force_master : 1154 - e1000_ms_force_slave) : 1155 - e1000_ms_auto; 1156 - 1157 - switch (phy_ms_setting) { 1158 - case e1000_ms_force_master: 1159 - phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); 1160 - break; 1161 - case e1000_ms_force_slave: 1162 - phy_data |= CR_1000T_MS_ENABLE; 1163 - phy_data &= ~(CR_1000T_MS_VALUE); 1164 - break; 1165 - case e1000_ms_auto: 1166 - phy_data &= ~CR_1000T_MS_ENABLE; 1167 - default: 1168 - break; 1169 - } 1170 - ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data); 1171 - if(ret_val) 1172 - return ret_val; 1173 - } 1174 - } else { 1175 - /* Enable CRS on TX. This must be set for half-duplex operation. */ 1176 - ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 1177 - &phy_data); 1178 if(ret_val) 1179 return ret_val; 1180 1181 - phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; 1182 1183 - /* Options: 1184 - * MDI/MDI-X = 0 (default) 1185 - * 0 - Auto for all speeds 1186 - * 1 - MDI mode 1187 - * 2 - MDI-X mode 1188 - * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) 1189 - */ 1190 - phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; 1191 1192 - switch (hw->mdix) { 1193 - case 1: 1194 - phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; 1195 - break; 1196 - case 2: 1197 - phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; 1198 - break; 1199 - case 3: 1200 - phy_data |= M88E1000_PSCR_AUTO_X_1000T; 1201 - break; 1202 - case 0: 1203 default: 1204 - phy_data |= M88E1000_PSCR_AUTO_X_MODE; 1205 - break; 1206 - } 1207 1208 - /* Options: 1209 - * disable_polarity_correction = 0 (default) 1210 - * Automatic Correction for Reversed Cable Polarity 1211 - * 0 - Disabled 1212 - * 1 - Enabled 1213 - */ 1214 - phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; 1215 - if(hw->disable_polarity_correction == 1) 1216 - phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; 1217 - ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 1218 - phy_data); 1219 - if(ret_val) 1220 - return ret_val; 1221 1222 - /* Force TX_CLK in the Extended PHY Specific Control Register 1223 - * to 25MHz clock. 1224 - */ 1225 - ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, 1226 - &phy_data); 1227 - if(ret_val) 1228 - return ret_val; 1229 1230 - phy_data |= M88E1000_EPSCR_TX_CLK_25; 1231 1232 - if (hw->phy_revision < M88E1011_I_REV_4) { 1233 - /* Configure Master and Slave downshift values */ 1234 - phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | 1235 M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); 1236 - phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | 1237 M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); 1238 - ret_val = e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, 1239 - phy_data); 1240 - if(ret_val) 1241 - return ret_val; 1242 - } 1243 1244 - /* SW Reset the PHY so all changes take effect */ 1245 - ret_val = e1000_phy_reset(hw); 1246 - if(ret_val) { 1247 - DEBUGOUT("Error Resetting the PHY\n"); 1248 - return ret_val; 1249 - } 1250 } 1251 1252 - /* Options: 1253 - * autoneg = 1 (default) 1254 - * PHY will advertise value(s) parsed from 1255 - * autoneg_advertised and fc 1256 - * autoneg = 0 1257 - * PHY will be set to 10H, 10F, 100H, or 100F 1258 - * depending on value parsed from forced_speed_duplex. 1259 - */ 1260 1261 - /* Is autoneg enabled? This is enabled by default or by software 1262 - * override. If so, call e1000_phy_setup_autoneg routine to parse the 1263 - * autoneg_advertised and fc options. If autoneg is NOT enabled, then 1264 - * the user should have provided a speed/duplex override. If so, then 1265 - * call e1000_phy_force_speed_duplex to parse and set this up. 1266 - */ 1267 - if(hw->autoneg) { 1268 - /* Perform some bounds checking on the hw->autoneg_advertised 1269 - * parameter. If this variable is zero, then set it to the default. 1270 - */ 1271 - hw->autoneg_advertised &= AUTONEG_ADVERTISE_SPEED_DEFAULT; 1272 1273 - /* If autoneg_advertised is zero, we assume it was not defaulted 1274 - * by the calling code so we set to advertise full capability. 1275 - */ 1276 - if(hw->autoneg_advertised == 0) 1277 - hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; 1278 1279 - DEBUGOUT("Reconfiguring auto-neg advertisement params\n"); 1280 - ret_val = e1000_phy_setup_autoneg(hw); 1281 - if(ret_val) { 1282 - DEBUGOUT("Error Setting up Auto-Negotiation\n"); 1283 - return ret_val; 1284 - } 1285 - DEBUGOUT("Restarting Auto-Neg\n"); 1286 - 1287 - /* Restart auto-negotiation by setting the Auto Neg Enable bit and 1288 - * the Auto Neg Restart bit in the PHY control register. 1289 - */ 1290 - ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); 1291 - if(ret_val) 1292 - return ret_val; 1293 - 1294 - phy_data |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); 1295 - ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data); 1296 - if(ret_val) 1297 - return ret_val; 1298 - 1299 - /* Does the user want to wait for Auto-Neg to complete here, or 1300 - * check at a later time (for example, callback routine). 1301 - */ 1302 - if(hw->wait_autoneg_complete) { 1303 - ret_val = e1000_wait_autoneg(hw); 1304 - if(ret_val) { 1305 - DEBUGOUT("Error while waiting for autoneg to complete\n"); 1306 - return ret_val; 1307 - } 1308 - } 1309 - hw->get_link_status = TRUE; 1310 - } else { 1311 - DEBUGOUT("Forcing speed and duplex\n"); 1312 - ret_val = e1000_phy_force_speed_duplex(hw); 1313 - if(ret_val) { 1314 - DEBUGOUT("Error Forcing Speed and Duplex\n"); 1315 - return ret_val; 1316 - } 1317 } 1318 - } /* !hw->phy_reset_disable */ 1319 1320 /* Check link status. Wait up to 100 microseconds for link to become 1321 * valid. ··· 1453 return ret_val; 1454 1455 if(phy_data & MII_SR_LINK_STATUS) { 1456 - /* We have link, so we need to finish the config process: 1457 - * 1) Set up the MAC to the current PHY speed/duplex 1458 - * if we are on 82543. If we 1459 - * are on newer silicon, we only need to configure 1460 - * collision distance in the Transmit Control Register. 1461 - * 2) Set up flow control on the MAC to that established with 1462 - * the link partner. 1463 - */ 1464 - if(hw->mac_type >= e1000_82544) { 1465 - e1000_config_collision_dist(hw); 1466 - } else { 1467 - ret_val = e1000_config_mac_to_phy(hw); 1468 - if(ret_val) { 1469 - DEBUGOUT("Error configuring MAC to PHY settings\n"); 1470 - return ret_val; 1471 - } 1472 - } 1473 - ret_val = e1000_config_fc_after_link_up(hw); 1474 - if(ret_val) { 1475 - DEBUGOUT("Error Configuring Flow Control\n"); 1476 return ret_val; 1477 - } 1478 - DEBUGOUT("Valid link established!!!\n"); 1479 - 1480 - if(hw->phy_type == e1000_phy_igp) { 1481 - ret_val = e1000_config_dsp_after_link_change(hw, TRUE); 1482 - if(ret_val) { 1483 - DEBUGOUT("Error Configuring DSP after link up\n"); 1484 - return ret_val; 1485 - } 1486 - } 1487 DEBUGOUT("Valid link established!!!\n"); 1488 return E1000_SUCCESS; 1489 } ··· 1487 if(ret_val) 1488 return ret_val; 1489 1490 - /* Read the MII 1000Base-T Control Register (Address 9). */ 1491 - ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); 1492 - if(ret_val) 1493 - return ret_val; 1494 1495 /* Need to parse both autoneg_advertised and fc and set up 1496 * the appropriate PHY registers. First we will parse for ··· 1602 1603 DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); 1604 1605 - ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); 1606 if(ret_val) 1607 return ret_val; 1608 ··· 1863 1864 DEBUGFUNC("e1000_config_mac_to_phy"); 1865 1866 /* Read the Device Control Register and set the bits to Force Speed 1867 * and Duplex. 1868 */ ··· 1878 /* Set up duplex in the Device Control and Transmit Control 1879 * registers depending on negotiated values. 1880 */ 1881 - if (hw->phy_type == e1000_phy_igp) { 1882 - ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, 1883 - &phy_data); 1884 - if(ret_val) 1885 - return ret_val; 1886 1887 - if(phy_data & IGP01E1000_PSSR_FULL_DUPLEX) ctrl |= E1000_CTRL_FD; 1888 - else ctrl &= ~E1000_CTRL_FD; 1889 1890 - e1000_config_collision_dist(hw); 1891 1892 - /* Set up speed in the Device Control register depending on 1893 - * negotiated values. 1894 - */ 1895 - if((phy_data & IGP01E1000_PSSR_SPEED_MASK) == 1896 - IGP01E1000_PSSR_SPEED_1000MBPS) 1897 - ctrl |= E1000_CTRL_SPD_1000; 1898 - else if((phy_data & IGP01E1000_PSSR_SPEED_MASK) == 1899 - IGP01E1000_PSSR_SPEED_100MBPS) 1900 - ctrl |= E1000_CTRL_SPD_100; 1901 - } else { 1902 - ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, 1903 - &phy_data); 1904 - if(ret_val) 1905 - return ret_val; 1906 1907 - if(phy_data & M88E1000_PSSR_DPLX) ctrl |= E1000_CTRL_FD; 1908 - else ctrl &= ~E1000_CTRL_FD; 1909 - 1910 - e1000_config_collision_dist(hw); 1911 - 1912 - /* Set up speed in the Device Control register depending on 1913 - * negotiated values. 1914 - */ 1915 - if((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) 1916 - ctrl |= E1000_CTRL_SPD_1000; 1917 - else if((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS) 1918 - ctrl |= E1000_CTRL_SPD_100; 1919 - } 1920 /* Write the configured values back to the Device Control Reg. */ 1921 E1000_WRITE_REG(hw, CTRL, ctrl); 1922 return E1000_SUCCESS; ··· 2664 2665 DEBUGFUNC("e1000_read_phy_reg"); 2666 2667 - 2668 - if(hw->phy_type == e1000_phy_igp && 2669 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { 2670 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, 2671 (uint16_t)reg_addr); ··· 2770 2771 DEBUGFUNC("e1000_write_phy_reg"); 2772 2773 - 2774 - if(hw->phy_type == e1000_phy_igp && 2775 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { 2776 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, 2777 (uint16_t)reg_addr); ··· 2849 return E1000_SUCCESS; 2850 } 2851 2852 /****************************************************************************** 2853 * Returns the PHY to the power-on reset state 2854 * 2855 * hw - Struct containing variables accessed by shared code 2856 ******************************************************************************/ 2857 - void 2858 e1000_phy_hw_reset(struct e1000_hw *hw) 2859 { 2860 uint32_t ctrl, ctrl_ext; 2861 uint32_t led_ctrl; 2862 2863 DEBUGFUNC("e1000_phy_hw_reset"); 2864 2865 DEBUGOUT("Resetting Phy...\n"); 2866 ··· 2905 led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); 2906 E1000_WRITE_REG(hw, LEDCTL, led_ctrl); 2907 } 2908 } 2909 2910 /****************************************************************************** ··· 2927 2928 DEBUGFUNC("e1000_phy_reset"); 2929 2930 - if(hw->mac_type != e1000_82541_rev_2) { 2931 ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); 2932 if(ret_val) 2933 return ret_val; ··· 2950 return ret_val; 2951 2952 udelay(1); 2953 - } else e1000_phy_hw_reset(hw); 2954 2955 - if(hw->phy_type == e1000_phy_igp) 2956 e1000_phy_init_script(hw); 2957 2958 return E1000_SUCCESS; ··· 3006 case e1000_82547: 3007 case e1000_82547_rev_2: 3008 if(hw->phy_id == IGP01E1000_I_PHY_ID) match = TRUE; 3009 break; 3010 default: 3011 DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type); ··· 3065 3066 /* The downshift status is checked only once, after link is established, 3067 * and it stored in the hw->speed_downgraded parameter. */ 3068 - phy_info->downshift = hw->speed_downgraded; 3069 3070 /* IGP01E1000 does not need to support it. */ 3071 phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal; ··· 3104 if(ret_val) 3105 return ret_val; 3106 3107 - /* transalte to old method */ 3108 average = (max_length + min_length) / 2; 3109 3110 if(average <= e1000_igp_cable_length_50) ··· 3139 3140 /* The downshift status is checked only once, after link is established, 3141 * and it stored in the hw->speed_downgraded parameter. */ 3142 - phy_info->downshift = hw->speed_downgraded; 3143 3144 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); 3145 if(ret_val) ··· 3228 return -E1000_ERR_CONFIG; 3229 } 3230 3231 - if(hw->phy_type == e1000_phy_igp) 3232 return e1000_phy_igp_get_info(hw, phy_info); 3233 else 3234 return e1000_phy_m88_get_info(hw, phy_info); ··· 3255 * 3256 * hw - Struct containing variables accessed by shared code 3257 *****************************************************************************/ 3258 - void 3259 e1000_init_eeprom_params(struct e1000_hw *hw) 3260 { 3261 struct e1000_eeprom_info *eeprom = &hw->eeprom; 3262 uint32_t eecd = E1000_READ_REG(hw, EECD); 3263 uint16_t eeprom_size; 3264 3265 DEBUGFUNC("e1000_init_eeprom_params"); ··· 3275 eeprom->opcode_bits = 3; 3276 eeprom->address_bits = 6; 3277 eeprom->delay_usec = 50; 3278 break; 3279 case e1000_82540: 3280 case e1000_82545: ··· 3293 eeprom->word_size = 64; 3294 eeprom->address_bits = 6; 3295 } 3296 break; 3297 case e1000_82541: 3298 case e1000_82541_rev_2: ··· 3323 eeprom->address_bits = 6; 3324 } 3325 } 3326 break; 3327 default: 3328 break; 3329 } 3330 3331 if (eeprom->type == e1000_eeprom_spi) { 3332 - eeprom->word_size = 64; 3333 - if (e1000_read_eeprom(hw, EEPROM_CFG, 1, &eeprom_size) == 0) { 3334 - eeprom_size &= EEPROM_SIZE_MASK; 3335 - 3336 - switch (eeprom_size) { 3337 - case EEPROM_SIZE_16KB: 3338 - eeprom->word_size = 8192; 3339 - break; 3340 - case EEPROM_SIZE_8KB: 3341 - eeprom->word_size = 4096; 3342 - break; 3343 - case EEPROM_SIZE_4KB: 3344 - eeprom->word_size = 2048; 3345 - break; 3346 - case EEPROM_SIZE_2KB: 3347 - eeprom->word_size = 1024; 3348 - break; 3349 - case EEPROM_SIZE_1KB: 3350 - eeprom->word_size = 512; 3351 - break; 3352 - case EEPROM_SIZE_512B: 3353 - eeprom->word_size = 256; 3354 - break; 3355 - case EEPROM_SIZE_128B: 3356 - default: 3357 - eeprom->word_size = 64; 3358 - break; 3359 - } 3360 } 3361 } 3362 } 3363 3364 /****************************************************************************** ··· 3529 3530 DEBUGFUNC("e1000_acquire_eeprom"); 3531 3532 eecd = E1000_READ_REG(hw, EECD); 3533 3534 /* Request EEPROM Access */ 3535 if(hw->mac_type > e1000_82544) { 3536 eecd |= E1000_EECD_REQ; ··· 3552 DEBUGOUT("Could not acquire EEPROM grant\n"); 3553 return -E1000_ERR_EEPROM; 3554 } 3555 } 3556 3557 /* Setup EEPROM for Read/Write */ ··· 3671 eecd &= ~E1000_EECD_REQ; 3672 E1000_WRITE_REG(hw, EECD, eecd); 3673 } 3674 } 3675 3676 /****************************************************************************** ··· 3734 { 3735 struct e1000_eeprom_info *eeprom = &hw->eeprom; 3736 uint32_t i = 0; 3737 3738 DEBUGFUNC("e1000_read_eeprom"); 3739 /* A check for invalid values: offset too large, too many words, and not 3740 * enough words. 3741 */ ··· 3747 return -E1000_ERR_EEPROM; 3748 } 3749 3750 - /* Prepare the EEPROM for reading */ 3751 - if(e1000_acquire_eeprom(hw) != E1000_SUCCESS) 3752 - return -E1000_ERR_EEPROM; 3753 3754 if(eeprom->type == e1000_eeprom_spi) { 3755 uint16_t word_in; ··· 3815 } 3816 3817 /****************************************************************************** 3818 * Verifies that the EEPROM has a valid checksum 3819 * 3820 * hw - Struct containing variables accessed by shared code ··· 3956 uint16_t i, eeprom_data; 3957 3958 DEBUGFUNC("e1000_validate_eeprom_checksum"); 3959 3960 for(i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { 3961 if(e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { ··· 4019 if(e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) { 4020 DEBUGOUT("EEPROM Write Error\n"); 4021 return -E1000_ERR_EEPROM; 4022 } 4023 return E1000_SUCCESS; 4024 } ··· 4055 DEBUGOUT("\"words\" parameter out of bounds\n"); 4056 return -E1000_ERR_EEPROM; 4057 } 4058 4059 /* Prepare the EEPROM for writing */ 4060 if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) ··· 4230 } 4231 4232 /****************************************************************************** 4233 * Reads the adapter's part number from the EEPROM 4234 * 4235 * hw - Struct containing variables accessed by shared code ··· 4367 e1000_init_rx_addrs(struct e1000_hw *hw) 4368 { 4369 uint32_t i; 4370 4371 DEBUGFUNC("e1000_init_rx_addrs"); 4372 ··· 4376 4377 e1000_rar_set(hw, hw->mac_addr, 0); 4378 4379 /* Zero out the other 15 receive addresses. */ 4380 DEBUGOUT("Clearing RAR[1-15]\n"); 4381 - for(i = 1; i < E1000_RAR_ENTRIES; i++) { 4382 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); 4383 E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); 4384 } ··· 4408 { 4409 uint32_t hash_value; 4410 uint32_t i; 4411 - 4412 DEBUGFUNC("e1000_mc_addr_list_update"); 4413 4414 /* Set the new number of MC addresses that we are being requested to use. */ ··· 4418 4419 /* Clear RAR[1-15] */ 4420 DEBUGOUT(" Clearing RAR[1-15]\n"); 4421 - for(i = rar_used_count; i < E1000_RAR_ENTRIES; i++) { 4422 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); 4423 E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); 4424 } 4425 4426 /* Clear the MTA */ 4427 DEBUGOUT(" Clearing MTA\n"); 4428 - for(i = 0; i < E1000_NUM_MTA_REGISTERS; i++) { 4429 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); 4430 } 4431 ··· 4451 /* Place this multicast address in the RAR if there is room, * 4452 * else put it in the MTA 4453 */ 4454 - if(rar_used_count < E1000_RAR_ENTRIES) { 4455 e1000_rar_set(hw, 4456 mc_addr_list + (i * (ETH_LENGTH_OF_ADDRESS + pad)), 4457 rar_used_count); ··· 4502 } 4503 4504 hash_value &= 0xFFF; 4505 return hash_value; 4506 } 4507 ··· 4607 e1000_clear_vfta(struct e1000_hw *hw) 4608 { 4609 uint32_t offset; 4610 4611 - for(offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) 4612 - E1000_WRITE_REG_ARRAY(hw, VFTA, offset, 0); 4613 } 4614 4615 - static int32_t 4616 e1000_id_led_init(struct e1000_hw * hw) 4617 { 4618 uint32_t ledctl; ··· 4964 temp = E1000_READ_REG(hw, MGTPRC); 4965 temp = E1000_READ_REG(hw, MGTPDC); 4966 temp = E1000_READ_REG(hw, MGTPTC); 4967 } 4968 4969 /****************************************************************************** ··· 5143 hw->bus_speed = e1000_bus_speed_unknown; 5144 hw->bus_width = e1000_bus_width_unknown; 5145 break; 5146 default: 5147 status = E1000_READ_REG(hw, STATUS); 5148 hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ? ··· 5251 5252 /* Use old method for Phy older than IGP */ 5253 if(hw->phy_type == e1000_phy_m88) { 5254 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, 5255 &phy_data); 5256 if(ret_val) ··· 5368 return ret_val; 5369 *polarity = (phy_data & M88E1000_PSSR_REV_POLARITY) >> 5370 M88E1000_PSSR_REV_POLARITY_SHIFT; 5371 - } else if(hw->phy_type == e1000_phy_igp) { 5372 /* Read the Status register to check the speed */ 5373 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, 5374 &phy_data); ··· 5421 5422 DEBUGFUNC("e1000_check_downshift"); 5423 5424 - if(hw->phy_type == e1000_phy_igp) { 5425 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH, 5426 &phy_data); 5427 if(ret_val) ··· 5438 hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >> 5439 M88E1000_PSSR_DOWNSHIFT_SHIFT; 5440 } 5441 return E1000_SUCCESS; 5442 } 5443 ··· 5553 if(ret_val) 5554 return ret_val; 5555 5556 - msec_delay(20); 5557 5558 ret_val = e1000_write_phy_reg(hw, 0x0000, 5559 IGP01E1000_IEEE_FORCE_GIGA); ··· 5577 if(ret_val) 5578 return ret_val; 5579 5580 - msec_delay(20); 5581 5582 /* Now enable the transmitter */ 5583 ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); ··· 5602 if(ret_val) 5603 return ret_val; 5604 5605 - msec_delay(20); 5606 5607 ret_val = e1000_write_phy_reg(hw, 0x0000, 5608 IGP01E1000_IEEE_FORCE_GIGA); ··· 5618 if(ret_val) 5619 return ret_val; 5620 5621 - msec_delay(20); 5622 5623 /* Now enable the transmitter */ 5624 ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); ··· 5693 uint16_t phy_data; 5694 DEBUGFUNC("e1000_set_d3_lplu_state"); 5695 5696 - if(!((hw->mac_type == e1000_82541_rev_2) || 5697 - (hw->mac_type == e1000_82547_rev_2))) 5698 return E1000_SUCCESS; 5699 5700 /* During driver activity LPLU should not be used or it will attain link 5701 * from the lowest speeds starting from 10Mbps. The capability is used for 5702 * Dx transitions and states */ 5703 - ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data); 5704 - if(ret_val) 5705 - return ret_val; 5706 - 5707 - if(!active) { 5708 - phy_data &= ~IGP01E1000_GMII_FLEX_SPD; 5709 - ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data); 5710 if(ret_val) 5711 return ret_val; 5712 5713 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during 5714 * Dx states where the power conservation is most important. During ··· 5756 (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL ) || 5757 (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) { 5758 5759 - phy_data |= IGP01E1000_GMII_FLEX_SPD; 5760 - ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data); 5761 if(ret_val) 5762 return ret_val; 5763 5764 /* When LPLU is enabled we should disable SmartSpeed */ 5765 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data); ··· 5930 return ret_val; 5931 5932 return E1000_SUCCESS; 5933 } 5934 5935 static int32_t ··· 6348 } 6349 return E1000_SUCCESS; 6350 } 6351
··· 63 static int32_t e1000_acquire_eeprom(struct e1000_hw *hw); 64 static void e1000_release_eeprom(struct e1000_hw *hw); 65 static void e1000_standby_eeprom(struct e1000_hw *hw); 66 static int32_t e1000_set_vco_speed(struct e1000_hw *hw); 67 static int32_t e1000_polarity_reversal_workaround(struct e1000_hw *hw); 68 static int32_t e1000_set_phy_mode(struct e1000_hw *hw); 69 + static int32_t e1000_host_if_read_cookie(struct e1000_hw *hw, uint8_t *buffer); 70 + static uint8_t e1000_calculate_mng_checksum(char *buffer, uint32_t length); 71 72 /* IGP cable length table */ 73 static const ··· 80 100, 100, 100, 100, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 81 110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120}; 82 83 + static const 84 + uint16_t e1000_igp_2_cable_length_table[IGP02E1000_AGC_LENGTH_TABLE_SIZE] = 85 + { 8, 13, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 86 + 22, 24, 27, 30, 32, 35, 37, 40, 42, 44, 47, 49, 51, 54, 56, 58, 87 + 32, 35, 38, 41, 44, 47, 50, 53, 55, 58, 61, 63, 66, 69, 71, 74, 88 + 43, 47, 51, 54, 58, 61, 64, 67, 71, 74, 77, 80, 82, 85, 88, 90, 89 + 57, 62, 66, 70, 74, 77, 81, 85, 88, 91, 94, 97, 100, 103, 106, 108, 90 + 73, 78, 82, 87, 91, 95, 98, 102, 105, 109, 112, 114, 117, 119, 122, 124, 91 + 91, 96, 101, 105, 109, 113, 116, 119, 122, 125, 127, 128, 128, 128, 128, 128, 92 + 108, 113, 117, 121, 124, 127, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128}; 93 + 94 95 /****************************************************************************** 96 * Set the phy type member in the hw struct. ··· 91 { 92 DEBUGFUNC("e1000_set_phy_type"); 93 94 + if(hw->mac_type == e1000_undefined) 95 + return -E1000_ERR_PHY_TYPE; 96 + 97 switch(hw->phy_id) { 98 case M88E1000_E_PHY_ID: 99 case M88E1000_I_PHY_ID: 100 case M88E1011_I_PHY_ID: 101 + case M88E1111_I_PHY_ID: 102 hw->phy_type = e1000_phy_m88; 103 break; 104 case IGP01E1000_I_PHY_ID: ··· 271 case E1000_DEV_ID_82546GB_FIBER: 272 case E1000_DEV_ID_82546GB_SERDES: 273 case E1000_DEV_ID_82546GB_PCIE: 274 + case E1000_DEV_ID_82546GB_QUAD_COPPER: 275 hw->mac_type = e1000_82546_rev_3; 276 break; 277 case E1000_DEV_ID_82541EI: ··· 289 case E1000_DEV_ID_82547GI: 290 hw->mac_type = e1000_82547_rev_2; 291 break; 292 + case E1000_DEV_ID_82573E: 293 + case E1000_DEV_ID_82573E_IAMT: 294 + hw->mac_type = e1000_82573; 295 + break; 296 default: 297 /* Should never have loaded on this device */ 298 return -E1000_ERR_MAC_TYPE; 299 } 300 301 switch(hw->mac_type) { 302 + case e1000_82573: 303 + hw->eeprom_semaphore_present = TRUE; 304 + /* fall through */ 305 case e1000_82541: 306 case e1000_82547: 307 case e1000_82541_rev_2: ··· 360 uint32_t icr; 361 uint32_t manc; 362 uint32_t led_ctrl; 363 + uint32_t timeout; 364 + uint32_t extcnf_ctrl; 365 + int32_t ret_val; 366 367 DEBUGFUNC("e1000_reset_hw"); 368 ··· 367 if(hw->mac_type == e1000_82542_rev2_0) { 368 DEBUGOUT("Disabling MWI on 82542 rev 2.0\n"); 369 e1000_pci_clear_mwi(hw); 370 + } 371 + 372 + if(hw->bus_type == e1000_bus_type_pci_express) { 373 + /* Prevent the PCI-E bus from sticking if there is no TLP connection 374 + * on the last TLP read/write transaction when MAC is reset. 375 + */ 376 + if(e1000_disable_pciex_master(hw) != E1000_SUCCESS) { 377 + DEBUGOUT("PCI-E Master disable polling has failed.\n"); 378 + } 379 } 380 381 /* Clear interrupt mask to stop board from generating interrupts */ ··· 393 394 /* Must reset the PHY before resetting the MAC */ 395 if((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { 396 + E1000_WRITE_REG(hw, CTRL, (ctrl | E1000_CTRL_PHY_RST)); 397 msec_delay(5); 398 + } 399 + 400 + /* Must acquire the MDIO ownership before MAC reset. 401 + * Ownership defaults to firmware after a reset. */ 402 + if(hw->mac_type == e1000_82573) { 403 + timeout = 10; 404 + 405 + extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL); 406 + extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; 407 + 408 + do { 409 + E1000_WRITE_REG(hw, EXTCNF_CTRL, extcnf_ctrl); 410 + extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL); 411 + 412 + if(extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP) 413 + break; 414 + else 415 + extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; 416 + 417 + msec_delay(2); 418 + timeout--; 419 + } while(timeout); 420 } 421 422 /* Issue a global reset to the MAC. This will reset the chip's ··· 450 /* Wait for EEPROM reload */ 451 msec_delay(20); 452 break; 453 + case e1000_82573: 454 + udelay(10); 455 + ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); 456 + ctrl_ext |= E1000_CTRL_EXT_EE_RST; 457 + E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); 458 + E1000_WRITE_FLUSH(hw); 459 + /* fall through */ 460 + ret_val = e1000_get_auto_rd_done(hw); 461 + if(ret_val) 462 + /* We don't want to continue accessing MAC registers. */ 463 + return ret_val; 464 + break; 465 default: 466 /* Wait for EEPROM reload (it happens automatically) */ 467 msec_delay(5); ··· 457 } 458 459 /* Disable HW ARPs on ASF enabled adapters */ 460 + if(hw->mac_type >= e1000_82540 && hw->mac_type <= e1000_82547_rev_2) { 461 manc = E1000_READ_REG(hw, MANC); 462 manc &= ~(E1000_MANC_ARP_EN); 463 E1000_WRITE_REG(hw, MANC, manc); ··· 510 uint16_t pcix_stat_hi_word; 511 uint16_t cmd_mmrbc; 512 uint16_t stat_mmrbc; 513 + uint32_t mta_size; 514 + 515 DEBUGFUNC("e1000_init_hw"); 516 517 /* Initialize Identification LED */ ··· 524 525 /* Disabling VLAN filtering. */ 526 DEBUGOUT("Initializing the IEEE VLAN\n"); 527 + if (hw->mac_type < e1000_82545_rev_3) 528 + E1000_WRITE_REG(hw, VET, 0); 529 e1000_clear_vfta(hw); 530 531 /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */ ··· 553 554 /* Zero out the Multicast HASH table */ 555 DEBUGOUT("Zeroing the MTA\n"); 556 + mta_size = E1000_MC_TBL_SIZE; 557 + for(i = 0; i < mta_size; i++) 558 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); 559 560 /* Set the PCI priority bit correctly in the CTRL register. This 561 * determines if the adapter gives priority to receives, or if it 562 + * gives equal priority to transmits and receives. Valid only on 563 + * 82542 and 82543 silicon. 564 */ 565 + if(hw->dma_fairness && hw->mac_type <= e1000_82543) { 566 ctrl = E1000_READ_REG(hw, CTRL); 567 E1000_WRITE_REG(hw, CTRL, ctrl | E1000_CTRL_PRIOR); 568 } ··· 598 if(hw->mac_type > e1000_82544) { 599 ctrl = E1000_READ_REG(hw, TXDCTL); 600 ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB; 601 + switch (hw->mac_type) { 602 + default: 603 + break; 604 + case e1000_82573: 605 + ctrl |= E1000_TXDCTL_COUNT_DESC; 606 + break; 607 + } 608 E1000_WRITE_REG(hw, TXDCTL, ctrl); 609 } 610 + 611 + if (hw->mac_type == e1000_82573) { 612 + e1000_enable_tx_pkt_filtering(hw); 613 + } 614 + 615 616 /* Clear all of the statistics registers (clear on read). It is 617 * important that we do this after we have tried to establish link ··· 679 * control setting, then the variable hw->fc will 680 * be initialized based on a value in the EEPROM. 681 */ 682 + if(e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data)) { 683 DEBUGOUT("EEPROM Read Error\n"); 684 return -E1000_ERR_EEPROM; 685 } ··· 736 E1000_WRITE_REG(hw, FCAL, FLOW_CONTROL_ADDRESS_LOW); 737 E1000_WRITE_REG(hw, FCAH, FLOW_CONTROL_ADDRESS_HIGH); 738 E1000_WRITE_REG(hw, FCT, FLOW_CONTROL_TYPE); 739 + 740 E1000_WRITE_REG(hw, FCTTV, hw->fc_pause_time); 741 742 /* Set the flow control receive threshold registers. Normally, ··· 906 } 907 908 /****************************************************************************** 909 + * Make sure we have a valid PHY and change PHY mode before link setup. 910 * 911 * hw - Struct containing variables accessed by shared code 912 ******************************************************************************/ 913 static int32_t 914 + e1000_copper_link_preconfig(struct e1000_hw *hw) 915 { 916 uint32_t ctrl; 917 int32_t ret_val; 918 uint16_t phy_data; 919 920 + DEBUGFUNC("e1000_copper_link_preconfig"); 921 922 ctrl = E1000_READ_REG(hw, CTRL); 923 /* With 82543, we need to force speed and duplex on the MAC equal to what ··· 933 } else { 934 ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU); 935 E1000_WRITE_REG(hw, CTRL, ctrl); 936 + ret_val = e1000_phy_hw_reset(hw); 937 + if(ret_val) 938 + return ret_val; 939 } 940 941 /* Make sure we have a valid PHY */ ··· 961 hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2) 962 hw->phy_reset_disable = FALSE; 963 964 + return E1000_SUCCESS; 965 + } 966 967 968 + /******************************************************************** 969 + * Copper link setup for e1000_phy_igp series. 970 + * 971 + * hw - Struct containing variables accessed by shared code 972 + *********************************************************************/ 973 + static int32_t 974 + e1000_copper_link_igp_setup(struct e1000_hw *hw) 975 + { 976 + uint32_t led_ctrl; 977 + int32_t ret_val; 978 + uint16_t phy_data; 979 980 + DEBUGFUNC("e1000_copper_link_igp_setup"); 981 982 + if (hw->phy_reset_disable) 983 + return E1000_SUCCESS; 984 + 985 + ret_val = e1000_phy_reset(hw); 986 + if (ret_val) { 987 + DEBUGOUT("Error Resetting the PHY\n"); 988 + return ret_val; 989 + } 990 991 + /* Wait 10ms for MAC to configure PHY from eeprom settings */ 992 + msec_delay(15); 993 + 994 + /* Configure activity LED after PHY reset */ 995 + led_ctrl = E1000_READ_REG(hw, LEDCTL); 996 + led_ctrl &= IGP_ACTIVITY_LED_MASK; 997 + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); 998 + E1000_WRITE_REG(hw, LEDCTL, led_ctrl); 999 + 1000 + /* disable lplu d3 during driver init */ 1001 + ret_val = e1000_set_d3_lplu_state(hw, FALSE); 1002 + if (ret_val) { 1003 + DEBUGOUT("Error Disabling LPLU D3\n"); 1004 + return ret_val; 1005 + } 1006 + 1007 + /* disable lplu d0 during driver init */ 1008 + ret_val = e1000_set_d0_lplu_state(hw, FALSE); 1009 + if (ret_val) { 1010 + DEBUGOUT("Error Disabling LPLU D0\n"); 1011 + return ret_val; 1012 + } 1013 + /* Configure mdi-mdix settings */ 1014 + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); 1015 + if (ret_val) 1016 + return ret_val; 1017 + 1018 + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { 1019 + hw->dsp_config_state = e1000_dsp_config_disabled; 1020 + /* Force MDI for earlier revs of the IGP PHY */ 1021 + phy_data &= ~(IGP01E1000_PSCR_AUTO_MDIX | IGP01E1000_PSCR_FORCE_MDI_MDIX); 1022 + hw->mdix = 1; 1023 + 1024 + } else { 1025 + hw->dsp_config_state = e1000_dsp_config_enabled; 1026 + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; 1027 + 1028 + switch (hw->mdix) { 1029 + case 1: 1030 + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; 1031 + break; 1032 + case 2: 1033 + phy_data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; 1034 + break; 1035 + case 0: 1036 + default: 1037 + phy_data |= IGP01E1000_PSCR_AUTO_MDIX; 1038 + break; 1039 + } 1040 + } 1041 + ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); 1042 + if(ret_val) 1043 + return ret_val; 1044 + 1045 + /* set auto-master slave resolution settings */ 1046 + if(hw->autoneg) { 1047 + e1000_ms_type phy_ms_setting = hw->master_slave; 1048 + 1049 + if(hw->ffe_config_state == e1000_ffe_config_active) 1050 + hw->ffe_config_state = e1000_ffe_config_enabled; 1051 + 1052 + if(hw->dsp_config_state == e1000_dsp_config_activated) 1053 + hw->dsp_config_state = e1000_dsp_config_enabled; 1054 + 1055 + /* when autonegotiation advertisment is only 1000Mbps then we 1056 + * should disable SmartSpeed and enable Auto MasterSlave 1057 + * resolution as hardware default. */ 1058 + if(hw->autoneg_advertised == ADVERTISE_1000_FULL) { 1059 + /* Disable SmartSpeed */ 1060 + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data); 1061 if(ret_val) 1062 return ret_val; 1063 + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; 1064 + ret_val = e1000_write_phy_reg(hw, 1065 IGP01E1000_PHY_PORT_CONFIG, 1066 phy_data); 1067 if(ret_val) 1068 return ret_val; 1069 + /* Set auto Master/Slave resolution process */ 1070 + ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data); 1071 + if(ret_val) 1072 + return ret_val; 1073 + phy_data &= ~CR_1000T_MS_ENABLE; 1074 + ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data); 1075 + if(ret_val) 1076 + return ret_val; 1077 + } 1078 1079 + ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data); 1080 + if(ret_val) 1081 + return ret_val; 1082 1083 + /* load defaults for future use */ 1084 + hw->original_master_slave = (phy_data & CR_1000T_MS_ENABLE) ? 1085 + ((phy_data & CR_1000T_MS_VALUE) ? 1086 + e1000_ms_force_master : 1087 + e1000_ms_force_slave) : 1088 + e1000_ms_auto; 1089 1090 + switch (phy_ms_setting) { 1091 + case e1000_ms_force_master: 1092 + phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); 1093 + break; 1094 + case e1000_ms_force_slave: 1095 + phy_data |= CR_1000T_MS_ENABLE; 1096 + phy_data &= ~(CR_1000T_MS_VALUE); 1097 + break; 1098 + case e1000_ms_auto: 1099 + phy_data &= ~CR_1000T_MS_ENABLE; 1100 default: 1101 + break; 1102 + } 1103 + ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data); 1104 + if(ret_val) 1105 + return ret_val; 1106 + } 1107 1108 + return E1000_SUCCESS; 1109 + } 1110 1111 1112 + /******************************************************************** 1113 + * Copper link setup for e1000_phy_m88 series. 1114 + * 1115 + * hw - Struct containing variables accessed by shared code 1116 + *********************************************************************/ 1117 + static int32_t 1118 + e1000_copper_link_mgp_setup(struct e1000_hw *hw) 1119 + { 1120 + int32_t ret_val; 1121 + uint16_t phy_data; 1122 1123 + DEBUGFUNC("e1000_copper_link_mgp_setup"); 1124 + 1125 + if(hw->phy_reset_disable) 1126 + return E1000_SUCCESS; 1127 + 1128 + /* Enable CRS on TX. This must be set for half-duplex operation. */ 1129 + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); 1130 + if(ret_val) 1131 + return ret_val; 1132 + 1133 + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; 1134 + 1135 + /* Options: 1136 + * MDI/MDI-X = 0 (default) 1137 + * 0 - Auto for all speeds 1138 + * 1 - MDI mode 1139 + * 2 - MDI-X mode 1140 + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) 1141 + */ 1142 + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; 1143 + 1144 + switch (hw->mdix) { 1145 + case 1: 1146 + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; 1147 + break; 1148 + case 2: 1149 + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; 1150 + break; 1151 + case 3: 1152 + phy_data |= M88E1000_PSCR_AUTO_X_1000T; 1153 + break; 1154 + case 0: 1155 + default: 1156 + phy_data |= M88E1000_PSCR_AUTO_X_MODE; 1157 + break; 1158 + } 1159 + 1160 + /* Options: 1161 + * disable_polarity_correction = 0 (default) 1162 + * Automatic Correction for Reversed Cable Polarity 1163 + * 0 - Disabled 1164 + * 1 - Enabled 1165 + */ 1166 + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; 1167 + if(hw->disable_polarity_correction == 1) 1168 + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; 1169 + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); 1170 + if(ret_val) 1171 + return ret_val; 1172 + 1173 + /* Force TX_CLK in the Extended PHY Specific Control Register 1174 + * to 25MHz clock. 1175 + */ 1176 + ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); 1177 + if(ret_val) 1178 + return ret_val; 1179 + 1180 + phy_data |= M88E1000_EPSCR_TX_CLK_25; 1181 + 1182 + if (hw->phy_revision < M88E1011_I_REV_4) { 1183 + /* Configure Master and Slave downshift values */ 1184 + phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | 1185 M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); 1186 + phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | 1187 M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); 1188 + ret_val = e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); 1189 + if(ret_val) 1190 + return ret_val; 1191 + } 1192 1193 + /* SW Reset the PHY so all changes take effect */ 1194 + ret_val = e1000_phy_reset(hw); 1195 + if(ret_val) { 1196 + DEBUGOUT("Error Resetting the PHY\n"); 1197 + return ret_val; 1198 + } 1199 + 1200 + return E1000_SUCCESS; 1201 + } 1202 + 1203 + /******************************************************************** 1204 + * Setup auto-negotiation and flow control advertisements, 1205 + * and then perform auto-negotiation. 1206 + * 1207 + * hw - Struct containing variables accessed by shared code 1208 + *********************************************************************/ 1209 + static int32_t 1210 + e1000_copper_link_autoneg(struct e1000_hw *hw) 1211 + { 1212 + int32_t ret_val; 1213 + uint16_t phy_data; 1214 + 1215 + DEBUGFUNC("e1000_copper_link_autoneg"); 1216 + 1217 + /* Perform some bounds checking on the hw->autoneg_advertised 1218 + * parameter. If this variable is zero, then set it to the default. 1219 + */ 1220 + hw->autoneg_advertised &= AUTONEG_ADVERTISE_SPEED_DEFAULT; 1221 + 1222 + /* If autoneg_advertised is zero, we assume it was not defaulted 1223 + * by the calling code so we set to advertise full capability. 1224 + */ 1225 + if(hw->autoneg_advertised == 0) 1226 + hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; 1227 + 1228 + DEBUGOUT("Reconfiguring auto-neg advertisement params\n"); 1229 + ret_val = e1000_phy_setup_autoneg(hw); 1230 + if(ret_val) { 1231 + DEBUGOUT("Error Setting up Auto-Negotiation\n"); 1232 + return ret_val; 1233 + } 1234 + DEBUGOUT("Restarting Auto-Neg\n"); 1235 + 1236 + /* Restart auto-negotiation by setting the Auto Neg Enable bit and 1237 + * the Auto Neg Restart bit in the PHY control register. 1238 + */ 1239 + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); 1240 + if(ret_val) 1241 + return ret_val; 1242 + 1243 + phy_data |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); 1244 + ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data); 1245 + if(ret_val) 1246 + return ret_val; 1247 + 1248 + /* Does the user want to wait for Auto-Neg to complete here, or 1249 + * check at a later time (for example, callback routine). 1250 + */ 1251 + if(hw->wait_autoneg_complete) { 1252 + ret_val = e1000_wait_autoneg(hw); 1253 + if(ret_val) { 1254 + DEBUGOUT("Error while waiting for autoneg to complete\n"); 1255 + return ret_val; 1256 } 1257 + } 1258 1259 + hw->get_link_status = TRUE; 1260 1261 + return E1000_SUCCESS; 1262 + } 1263 1264 1265 + /****************************************************************************** 1266 + * Config the MAC and the PHY after link is up. 1267 + * 1) Set up the MAC to the current PHY speed/duplex 1268 + * if we are on 82543. If we 1269 + * are on newer silicon, we only need to configure 1270 + * collision distance in the Transmit Control Register. 1271 + * 2) Set up flow control on the MAC to that established with 1272 + * the link partner. 1273 + * 3) Config DSP to improve Gigabit link quality for some PHY revisions. 1274 + * 1275 + * hw - Struct containing variables accessed by shared code 1276 + ******************************************************************************/ 1277 + static int32_t 1278 + e1000_copper_link_postconfig(struct e1000_hw *hw) 1279 + { 1280 + int32_t ret_val; 1281 + DEBUGFUNC("e1000_copper_link_postconfig"); 1282 + 1283 + if(hw->mac_type >= e1000_82544) { 1284 + e1000_config_collision_dist(hw); 1285 + } else { 1286 + ret_val = e1000_config_mac_to_phy(hw); 1287 + if(ret_val) { 1288 + DEBUGOUT("Error configuring MAC to PHY settings\n"); 1289 + return ret_val; 1290 } 1291 + } 1292 + ret_val = e1000_config_fc_after_link_up(hw); 1293 + if(ret_val) { 1294 + DEBUGOUT("Error Configuring Flow Control\n"); 1295 + return ret_val; 1296 + } 1297 + 1298 + /* Config DSP to improve Giga link quality */ 1299 + if(hw->phy_type == e1000_phy_igp) { 1300 + ret_val = e1000_config_dsp_after_link_change(hw, TRUE); 1301 + if(ret_val) { 1302 + DEBUGOUT("Error Configuring DSP after link up\n"); 1303 + return ret_val; 1304 + } 1305 + } 1306 + 1307 + return E1000_SUCCESS; 1308 + } 1309 + 1310 + /****************************************************************************** 1311 + * Detects which PHY is present and setup the speed and duplex 1312 + * 1313 + * hw - Struct containing variables accessed by shared code 1314 + ******************************************************************************/ 1315 + static int32_t 1316 + e1000_setup_copper_link(struct e1000_hw *hw) 1317 + { 1318 + int32_t ret_val; 1319 + uint16_t i; 1320 + uint16_t phy_data; 1321 + 1322 + DEBUGFUNC("e1000_setup_copper_link"); 1323 + 1324 + /* Check if it is a valid PHY and set PHY mode if necessary. */ 1325 + ret_val = e1000_copper_link_preconfig(hw); 1326 + if(ret_val) 1327 + return ret_val; 1328 + 1329 + if (hw->phy_type == e1000_phy_igp || 1330 + hw->phy_type == e1000_phy_igp_2) { 1331 + ret_val = e1000_copper_link_igp_setup(hw); 1332 + if(ret_val) 1333 + return ret_val; 1334 + } else if (hw->phy_type == e1000_phy_m88) { 1335 + ret_val = e1000_copper_link_mgp_setup(hw); 1336 + if(ret_val) 1337 + return ret_val; 1338 + } 1339 + 1340 + if(hw->autoneg) { 1341 + /* Setup autoneg and flow control advertisement 1342 + * and perform autonegotiation */ 1343 + ret_val = e1000_copper_link_autoneg(hw); 1344 + if(ret_val) 1345 + return ret_val; 1346 + } else { 1347 + /* PHY will be set to 10H, 10F, 100H,or 100F 1348 + * depending on value from forced_speed_duplex. */ 1349 + DEBUGOUT("Forcing speed and duplex\n"); 1350 + ret_val = e1000_phy_force_speed_duplex(hw); 1351 + if(ret_val) { 1352 + DEBUGOUT("Error Forcing Speed and Duplex\n"); 1353 + return ret_val; 1354 + } 1355 + } 1356 1357 /* Check link status. Wait up to 100 microseconds for link to become 1358 * valid. ··· 1242 return ret_val; 1243 1244 if(phy_data & MII_SR_LINK_STATUS) { 1245 + /* Config the MAC and PHY after link is up */ 1246 + ret_val = e1000_copper_link_postconfig(hw); 1247 + if(ret_val) 1248 return ret_val; 1249 + 1250 DEBUGOUT("Valid link established!!!\n"); 1251 return E1000_SUCCESS; 1252 } ··· 1302 if(ret_val) 1303 return ret_val; 1304 1305 + /* Read the MII 1000Base-T Control Register (Address 9). */ 1306 + ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); 1307 + if(ret_val) 1308 + return ret_val; 1309 1310 /* Need to parse both autoneg_advertised and fc and set up 1311 * the appropriate PHY registers. First we will parse for ··· 1417 1418 DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); 1419 1420 + ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); 1421 if(ret_val) 1422 return ret_val; 1423 ··· 1678 1679 DEBUGFUNC("e1000_config_mac_to_phy"); 1680 1681 + /* 82544 or newer MAC, Auto Speed Detection takes care of 1682 + * MAC speed/duplex configuration.*/ 1683 + if (hw->mac_type >= e1000_82544) 1684 + return E1000_SUCCESS; 1685 + 1686 /* Read the Device Control Register and set the bits to Force Speed 1687 * and Duplex. 1688 */ ··· 1688 /* Set up duplex in the Device Control and Transmit Control 1689 * registers depending on negotiated values. 1690 */ 1691 + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); 1692 + if(ret_val) 1693 + return ret_val; 1694 1695 + if(phy_data & M88E1000_PSSR_DPLX) 1696 + ctrl |= E1000_CTRL_FD; 1697 + else 1698 + ctrl &= ~E1000_CTRL_FD; 1699 1700 + e1000_config_collision_dist(hw); 1701 1702 + /* Set up speed in the Device Control register depending on 1703 + * negotiated values. 1704 + */ 1705 + if((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) 1706 + ctrl |= E1000_CTRL_SPD_1000; 1707 + else if((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS) 1708 + ctrl |= E1000_CTRL_SPD_100; 1709 1710 /* Write the configured values back to the Device Control Reg. */ 1711 E1000_WRITE_REG(hw, CTRL, ctrl); 1712 return E1000_SUCCESS; ··· 2494 2495 DEBUGFUNC("e1000_read_phy_reg"); 2496 2497 + if((hw->phy_type == e1000_phy_igp || 2498 + hw->phy_type == e1000_phy_igp_2) && 2499 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { 2500 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, 2501 (uint16_t)reg_addr); ··· 2600 2601 DEBUGFUNC("e1000_write_phy_reg"); 2602 2603 + if((hw->phy_type == e1000_phy_igp || 2604 + hw->phy_type == e1000_phy_igp_2) && 2605 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { 2606 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, 2607 (uint16_t)reg_addr); ··· 2679 return E1000_SUCCESS; 2680 } 2681 2682 + 2683 /****************************************************************************** 2684 * Returns the PHY to the power-on reset state 2685 * 2686 * hw - Struct containing variables accessed by shared code 2687 ******************************************************************************/ 2688 + int32_t 2689 e1000_phy_hw_reset(struct e1000_hw *hw) 2690 { 2691 uint32_t ctrl, ctrl_ext; 2692 uint32_t led_ctrl; 2693 + int32_t ret_val; 2694 2695 DEBUGFUNC("e1000_phy_hw_reset"); 2696 + 2697 + /* In the case of the phy reset being blocked, it's not an error, we 2698 + * simply return success without performing the reset. */ 2699 + ret_val = e1000_check_phy_reset_block(hw); 2700 + if (ret_val) 2701 + return E1000_SUCCESS; 2702 2703 DEBUGOUT("Resetting Phy...\n"); 2704 ··· 2727 led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); 2728 E1000_WRITE_REG(hw, LEDCTL, led_ctrl); 2729 } 2730 + 2731 + /* Wait for FW to finish PHY configuration. */ 2732 + ret_val = e1000_get_phy_cfg_done(hw); 2733 + 2734 + return ret_val; 2735 } 2736 2737 /****************************************************************************** ··· 2744 2745 DEBUGFUNC("e1000_phy_reset"); 2746 2747 + /* In the case of the phy reset being blocked, it's not an error, we 2748 + * simply return success without performing the reset. */ 2749 + ret_val = e1000_check_phy_reset_block(hw); 2750 + if (ret_val) 2751 + return E1000_SUCCESS; 2752 + 2753 + switch (hw->mac_type) { 2754 + case e1000_82541_rev_2: 2755 + ret_val = e1000_phy_hw_reset(hw); 2756 + if(ret_val) 2757 + return ret_val; 2758 + break; 2759 + default: 2760 ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); 2761 if(ret_val) 2762 return ret_val; ··· 2755 return ret_val; 2756 2757 udelay(1); 2758 + break; 2759 + } 2760 2761 + if(hw->phy_type == e1000_phy_igp || hw->phy_type == e1000_phy_igp_2) 2762 e1000_phy_init_script(hw); 2763 2764 return E1000_SUCCESS; ··· 2810 case e1000_82547: 2811 case e1000_82547_rev_2: 2812 if(hw->phy_id == IGP01E1000_I_PHY_ID) match = TRUE; 2813 + break; 2814 + case e1000_82573: 2815 + if(hw->phy_id == M88E1111_I_PHY_ID) match = TRUE; 2816 break; 2817 default: 2818 DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type); ··· 2866 2867 /* The downshift status is checked only once, after link is established, 2868 * and it stored in the hw->speed_downgraded parameter. */ 2869 + phy_info->downshift = (e1000_downshift)hw->speed_downgraded; 2870 2871 /* IGP01E1000 does not need to support it. */ 2872 phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal; ··· 2905 if(ret_val) 2906 return ret_val; 2907 2908 + /* Translate to old method */ 2909 average = (max_length + min_length) / 2; 2910 2911 if(average <= e1000_igp_cable_length_50) ··· 2940 2941 /* The downshift status is checked only once, after link is established, 2942 * and it stored in the hw->speed_downgraded parameter. */ 2943 + phy_info->downshift = (e1000_downshift)hw->speed_downgraded; 2944 2945 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); 2946 if(ret_val) ··· 3029 return -E1000_ERR_CONFIG; 3030 } 3031 3032 + if(hw->phy_type == e1000_phy_igp || 3033 + hw->phy_type == e1000_phy_igp_2) 3034 return e1000_phy_igp_get_info(hw, phy_info); 3035 else 3036 return e1000_phy_m88_get_info(hw, phy_info); ··· 3055 * 3056 * hw - Struct containing variables accessed by shared code 3057 *****************************************************************************/ 3058 + int32_t 3059 e1000_init_eeprom_params(struct e1000_hw *hw) 3060 { 3061 struct e1000_eeprom_info *eeprom = &hw->eeprom; 3062 uint32_t eecd = E1000_READ_REG(hw, EECD); 3063 + int32_t ret_val = E1000_SUCCESS; 3064 uint16_t eeprom_size; 3065 3066 DEBUGFUNC("e1000_init_eeprom_params"); ··· 3074 eeprom->opcode_bits = 3; 3075 eeprom->address_bits = 6; 3076 eeprom->delay_usec = 50; 3077 + eeprom->use_eerd = FALSE; 3078 + eeprom->use_eewr = FALSE; 3079 break; 3080 case e1000_82540: 3081 case e1000_82545: ··· 3090 eeprom->word_size = 64; 3091 eeprom->address_bits = 6; 3092 } 3093 + eeprom->use_eerd = FALSE; 3094 + eeprom->use_eewr = FALSE; 3095 break; 3096 case e1000_82541: 3097 case e1000_82541_rev_2: ··· 3118 eeprom->address_bits = 6; 3119 } 3120 } 3121 + eeprom->use_eerd = FALSE; 3122 + eeprom->use_eewr = FALSE; 3123 + break; 3124 + case e1000_82573: 3125 + eeprom->type = e1000_eeprom_spi; 3126 + eeprom->opcode_bits = 8; 3127 + eeprom->delay_usec = 1; 3128 + if (eecd & E1000_EECD_ADDR_BITS) { 3129 + eeprom->page_size = 32; 3130 + eeprom->address_bits = 16; 3131 + } else { 3132 + eeprom->page_size = 8; 3133 + eeprom->address_bits = 8; 3134 + } 3135 + eeprom->use_eerd = TRUE; 3136 + eeprom->use_eewr = TRUE; 3137 + if(e1000_is_onboard_nvm_eeprom(hw) == FALSE) { 3138 + eeprom->type = e1000_eeprom_flash; 3139 + eeprom->word_size = 2048; 3140 + 3141 + /* Ensure that the Autonomous FLASH update bit is cleared due to 3142 + * Flash update issue on parts which use a FLASH for NVM. */ 3143 + eecd &= ~E1000_EECD_AUPDEN; 3144 + E1000_WRITE_REG(hw, EECD, eecd); 3145 + } 3146 break; 3147 default: 3148 break; 3149 } 3150 3151 if (eeprom->type == e1000_eeprom_spi) { 3152 + /* eeprom_size will be an enum [0..8] that maps to eeprom sizes 128B to 3153 + * 32KB (incremented by powers of 2). 3154 + */ 3155 + if(hw->mac_type <= e1000_82547_rev_2) { 3156 + /* Set to default value for initial eeprom read. */ 3157 + eeprom->word_size = 64; 3158 + ret_val = e1000_read_eeprom(hw, EEPROM_CFG, 1, &eeprom_size); 3159 + if(ret_val) 3160 + return ret_val; 3161 + eeprom_size = (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT; 3162 + /* 256B eeprom size was not supported in earlier hardware, so we 3163 + * bump eeprom_size up one to ensure that "1" (which maps to 256B) 3164 + * is never the result used in the shifting logic below. */ 3165 + if(eeprom_size) 3166 + eeprom_size++; 3167 + } else { 3168 + eeprom_size = (uint16_t)((eecd & E1000_EECD_SIZE_EX_MASK) >> 3169 + E1000_EECD_SIZE_EX_SHIFT); 3170 } 3171 + 3172 + eeprom->word_size = 1 << (eeprom_size + EEPROM_WORD_SIZE_SHIFT); 3173 } 3174 + return ret_val; 3175 } 3176 3177 /****************************************************************************** ··· 3306 3307 DEBUGFUNC("e1000_acquire_eeprom"); 3308 3309 + if(e1000_get_hw_eeprom_semaphore(hw)) 3310 + return -E1000_ERR_EEPROM; 3311 + 3312 eecd = E1000_READ_REG(hw, EECD); 3313 3314 + if (hw->mac_type != e1000_82573) { 3315 /* Request EEPROM Access */ 3316 if(hw->mac_type > e1000_82544) { 3317 eecd |= E1000_EECD_REQ; ··· 3325 DEBUGOUT("Could not acquire EEPROM grant\n"); 3326 return -E1000_ERR_EEPROM; 3327 } 3328 + } 3329 } 3330 3331 /* Setup EEPROM for Read/Write */ ··· 3443 eecd &= ~E1000_EECD_REQ; 3444 E1000_WRITE_REG(hw, EECD, eecd); 3445 } 3446 + 3447 + e1000_put_hw_eeprom_semaphore(hw); 3448 } 3449 3450 /****************************************************************************** ··· 3504 { 3505 struct e1000_eeprom_info *eeprom = &hw->eeprom; 3506 uint32_t i = 0; 3507 + int32_t ret_val; 3508 3509 DEBUGFUNC("e1000_read_eeprom"); 3510 + 3511 /* A check for invalid values: offset too large, too many words, and not 3512 * enough words. 3513 */ ··· 3515 return -E1000_ERR_EEPROM; 3516 } 3517 3518 + /* FLASH reads without acquiring the semaphore are safe in 82573-based 3519 + * controllers. 3520 + */ 3521 + if ((e1000_is_onboard_nvm_eeprom(hw) == TRUE) || 3522 + (hw->mac_type != e1000_82573)) { 3523 + /* Prepare the EEPROM for reading */ 3524 + if(e1000_acquire_eeprom(hw) != E1000_SUCCESS) 3525 + return -E1000_ERR_EEPROM; 3526 + } 3527 + 3528 + if(eeprom->use_eerd == TRUE) { 3529 + ret_val = e1000_read_eeprom_eerd(hw, offset, words, data); 3530 + if ((e1000_is_onboard_nvm_eeprom(hw) == TRUE) || 3531 + (hw->mac_type != e1000_82573)) 3532 + e1000_release_eeprom(hw); 3533 + return ret_val; 3534 + } 3535 3536 if(eeprom->type == e1000_eeprom_spi) { 3537 uint16_t word_in; ··· 3569 } 3570 3571 /****************************************************************************** 3572 + * Reads a 16 bit word from the EEPROM using the EERD register. 3573 + * 3574 + * hw - Struct containing variables accessed by shared code 3575 + * offset - offset of word in the EEPROM to read 3576 + * data - word read from the EEPROM 3577 + * words - number of words to read 3578 + *****************************************************************************/ 3579 + int32_t 3580 + e1000_read_eeprom_eerd(struct e1000_hw *hw, 3581 + uint16_t offset, 3582 + uint16_t words, 3583 + uint16_t *data) 3584 + { 3585 + uint32_t i, eerd = 0; 3586 + int32_t error = 0; 3587 + 3588 + for (i = 0; i < words; i++) { 3589 + eerd = ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) + 3590 + E1000_EEPROM_RW_REG_START; 3591 + 3592 + E1000_WRITE_REG(hw, EERD, eerd); 3593 + error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_READ); 3594 + 3595 + if(error) { 3596 + break; 3597 + } 3598 + data[i] = (E1000_READ_REG(hw, EERD) >> E1000_EEPROM_RW_REG_DATA); 3599 + 3600 + } 3601 + 3602 + return error; 3603 + } 3604 + 3605 + /****************************************************************************** 3606 + * Writes a 16 bit word from the EEPROM using the EEWR register. 3607 + * 3608 + * hw - Struct containing variables accessed by shared code 3609 + * offset - offset of word in the EEPROM to read 3610 + * data - word read from the EEPROM 3611 + * words - number of words to read 3612 + *****************************************************************************/ 3613 + int32_t 3614 + e1000_write_eeprom_eewr(struct e1000_hw *hw, 3615 + uint16_t offset, 3616 + uint16_t words, 3617 + uint16_t *data) 3618 + { 3619 + uint32_t register_value = 0; 3620 + uint32_t i = 0; 3621 + int32_t error = 0; 3622 + 3623 + for (i = 0; i < words; i++) { 3624 + register_value = (data[i] << E1000_EEPROM_RW_REG_DATA) | 3625 + ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) | 3626 + E1000_EEPROM_RW_REG_START; 3627 + 3628 + error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE); 3629 + if(error) { 3630 + break; 3631 + } 3632 + 3633 + E1000_WRITE_REG(hw, EEWR, register_value); 3634 + 3635 + error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE); 3636 + 3637 + if(error) { 3638 + break; 3639 + } 3640 + } 3641 + 3642 + return error; 3643 + } 3644 + 3645 + /****************************************************************************** 3646 + * Polls the status bit (bit 1) of the EERD to determine when the read is done. 3647 + * 3648 + * hw - Struct containing variables accessed by shared code 3649 + *****************************************************************************/ 3650 + int32_t 3651 + e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd) 3652 + { 3653 + uint32_t attempts = 100000; 3654 + uint32_t i, reg = 0; 3655 + int32_t done = E1000_ERR_EEPROM; 3656 + 3657 + for(i = 0; i < attempts; i++) { 3658 + if(eerd == E1000_EEPROM_POLL_READ) 3659 + reg = E1000_READ_REG(hw, EERD); 3660 + else 3661 + reg = E1000_READ_REG(hw, EEWR); 3662 + 3663 + if(reg & E1000_EEPROM_RW_REG_DONE) { 3664 + done = E1000_SUCCESS; 3665 + break; 3666 + } 3667 + udelay(5); 3668 + } 3669 + 3670 + return done; 3671 + } 3672 + 3673 + /*************************************************************************** 3674 + * Description: Determines if the onboard NVM is FLASH or EEPROM. 3675 + * 3676 + * hw - Struct containing variables accessed by shared code 3677 + ****************************************************************************/ 3678 + boolean_t 3679 + e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw) 3680 + { 3681 + uint32_t eecd = 0; 3682 + 3683 + if(hw->mac_type == e1000_82573) { 3684 + eecd = E1000_READ_REG(hw, EECD); 3685 + 3686 + /* Isolate bits 15 & 16 */ 3687 + eecd = ((eecd >> 15) & 0x03); 3688 + 3689 + /* If both bits are set, device is Flash type */ 3690 + if(eecd == 0x03) { 3691 + return FALSE; 3692 + } 3693 + } 3694 + return TRUE; 3695 + } 3696 + 3697 + /****************************************************************************** 3698 * Verifies that the EEPROM has a valid checksum 3699 * 3700 * hw - Struct containing variables accessed by shared code ··· 3584 uint16_t i, eeprom_data; 3585 3586 DEBUGFUNC("e1000_validate_eeprom_checksum"); 3587 + 3588 + if ((hw->mac_type == e1000_82573) && 3589 + (e1000_is_onboard_nvm_eeprom(hw) == FALSE)) { 3590 + /* Check bit 4 of word 10h. If it is 0, firmware is done updating 3591 + * 10h-12h. Checksum may need to be fixed. */ 3592 + e1000_read_eeprom(hw, 0x10, 1, &eeprom_data); 3593 + if ((eeprom_data & 0x10) == 0) { 3594 + /* Read 0x23 and check bit 15. This bit is a 1 when the checksum 3595 + * has already been fixed. If the checksum is still wrong and this 3596 + * bit is a 1, we need to return bad checksum. Otherwise, we need 3597 + * to set this bit to a 1 and update the checksum. */ 3598 + e1000_read_eeprom(hw, 0x23, 1, &eeprom_data); 3599 + if ((eeprom_data & 0x8000) == 0) { 3600 + eeprom_data |= 0x8000; 3601 + e1000_write_eeprom(hw, 0x23, 1, &eeprom_data); 3602 + e1000_update_eeprom_checksum(hw); 3603 + } 3604 + } 3605 + } 3606 3607 for(i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { 3608 if(e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { ··· 3628 if(e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) { 3629 DEBUGOUT("EEPROM Write Error\n"); 3630 return -E1000_ERR_EEPROM; 3631 + } else if (hw->eeprom.type == e1000_eeprom_flash) { 3632 + e1000_commit_shadow_ram(hw); 3633 } 3634 return E1000_SUCCESS; 3635 } ··· 3662 DEBUGOUT("\"words\" parameter out of bounds\n"); 3663 return -E1000_ERR_EEPROM; 3664 } 3665 + 3666 + /* 82573 reads only through eerd */ 3667 + if(eeprom->use_eewr == TRUE) 3668 + return e1000_write_eeprom_eewr(hw, offset, words, data); 3669 3670 /* Prepare the EEPROM for writing */ 3671 if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) ··· 3833 } 3834 3835 /****************************************************************************** 3836 + * Flushes the cached eeprom to NVM. This is done by saving the modified values 3837 + * in the eeprom cache and the non modified values in the currently active bank 3838 + * to the new bank. 3839 + * 3840 + * hw - Struct containing variables accessed by shared code 3841 + * offset - offset of word in the EEPROM to read 3842 + * data - word read from the EEPROM 3843 + * words - number of words to read 3844 + *****************************************************************************/ 3845 + int32_t 3846 + e1000_commit_shadow_ram(struct e1000_hw *hw) 3847 + { 3848 + uint32_t attempts = 100000; 3849 + uint32_t eecd = 0; 3850 + uint32_t flop = 0; 3851 + uint32_t i = 0; 3852 + int32_t error = E1000_SUCCESS; 3853 + 3854 + /* The flop register will be used to determine if flash type is STM */ 3855 + flop = E1000_READ_REG(hw, FLOP); 3856 + 3857 + if (hw->mac_type == e1000_82573) { 3858 + for (i=0; i < attempts; i++) { 3859 + eecd = E1000_READ_REG(hw, EECD); 3860 + if ((eecd & E1000_EECD_FLUPD) == 0) { 3861 + break; 3862 + } 3863 + udelay(5); 3864 + } 3865 + 3866 + if (i == attempts) { 3867 + return -E1000_ERR_EEPROM; 3868 + } 3869 + 3870 + /* If STM opcode located in bits 15:8 of flop, reset firmware */ 3871 + if ((flop & 0xFF00) == E1000_STM_OPCODE) { 3872 + E1000_WRITE_REG(hw, HICR, E1000_HICR_FW_RESET); 3873 + } 3874 + 3875 + /* Perform the flash update */ 3876 + E1000_WRITE_REG(hw, EECD, eecd | E1000_EECD_FLUPD); 3877 + 3878 + for (i=0; i < attempts; i++) { 3879 + eecd = E1000_READ_REG(hw, EECD); 3880 + if ((eecd & E1000_EECD_FLUPD) == 0) { 3881 + break; 3882 + } 3883 + udelay(5); 3884 + } 3885 + 3886 + if (i == attempts) { 3887 + return -E1000_ERR_EEPROM; 3888 + } 3889 + } 3890 + 3891 + return error; 3892 + } 3893 + 3894 + /****************************************************************************** 3895 * Reads the adapter's part number from the EEPROM 3896 * 3897 * hw - Struct containing variables accessed by shared code ··· 3911 e1000_init_rx_addrs(struct e1000_hw *hw) 3912 { 3913 uint32_t i; 3914 + uint32_t rar_num; 3915 3916 DEBUGFUNC("e1000_init_rx_addrs"); 3917 ··· 3919 3920 e1000_rar_set(hw, hw->mac_addr, 0); 3921 3922 + rar_num = E1000_RAR_ENTRIES; 3923 /* Zero out the other 15 receive addresses. */ 3924 DEBUGOUT("Clearing RAR[1-15]\n"); 3925 + for(i = 1; i < rar_num; i++) { 3926 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); 3927 E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); 3928 } ··· 3950 { 3951 uint32_t hash_value; 3952 uint32_t i; 3953 + uint32_t num_rar_entry; 3954 + uint32_t num_mta_entry; 3955 + 3956 DEBUGFUNC("e1000_mc_addr_list_update"); 3957 3958 /* Set the new number of MC addresses that we are being requested to use. */ ··· 3958 3959 /* Clear RAR[1-15] */ 3960 DEBUGOUT(" Clearing RAR[1-15]\n"); 3961 + num_rar_entry = E1000_RAR_ENTRIES; 3962 + for(i = rar_used_count; i < num_rar_entry; i++) { 3963 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); 3964 E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); 3965 } 3966 3967 /* Clear the MTA */ 3968 DEBUGOUT(" Clearing MTA\n"); 3969 + num_mta_entry = E1000_NUM_MTA_REGISTERS; 3970 + for(i = 0; i < num_mta_entry; i++) { 3971 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); 3972 } 3973 ··· 3989 /* Place this multicast address in the RAR if there is room, * 3990 * else put it in the MTA 3991 */ 3992 + if (rar_used_count < num_rar_entry) { 3993 e1000_rar_set(hw, 3994 mc_addr_list + (i * (ETH_LENGTH_OF_ADDRESS + pad)), 3995 rar_used_count); ··· 4040 } 4041 4042 hash_value &= 0xFFF; 4043 + 4044 return hash_value; 4045 } 4046 ··· 4144 e1000_clear_vfta(struct e1000_hw *hw) 4145 { 4146 uint32_t offset; 4147 + uint32_t vfta_value = 0; 4148 + uint32_t vfta_offset = 0; 4149 + uint32_t vfta_bit_in_reg = 0; 4150 4151 + if (hw->mac_type == e1000_82573) { 4152 + if (hw->mng_cookie.vlan_id != 0) { 4153 + /* The VFTA is a 4096b bit-field, each identifying a single VLAN 4154 + * ID. The following operations determine which 32b entry 4155 + * (i.e. offset) into the array we want to set the VLAN ID 4156 + * (i.e. bit) of the manageability unit. */ 4157 + vfta_offset = (hw->mng_cookie.vlan_id >> 4158 + E1000_VFTA_ENTRY_SHIFT) & 4159 + E1000_VFTA_ENTRY_MASK; 4160 + vfta_bit_in_reg = 1 << (hw->mng_cookie.vlan_id & 4161 + E1000_VFTA_ENTRY_BIT_SHIFT_MASK); 4162 + } 4163 + } 4164 + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { 4165 + /* If the offset we want to clear is the same offset of the 4166 + * manageability VLAN ID, then clear all bits except that of the 4167 + * manageability unit */ 4168 + vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0; 4169 + E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value); 4170 + } 4171 } 4172 4173 + int32_t 4174 e1000_id_led_init(struct e1000_hw * hw) 4175 { 4176 uint32_t ledctl; ··· 4480 temp = E1000_READ_REG(hw, MGTPRC); 4481 temp = E1000_READ_REG(hw, MGTPDC); 4482 temp = E1000_READ_REG(hw, MGTPTC); 4483 + 4484 + if(hw->mac_type <= e1000_82547_rev_2) return; 4485 + 4486 + temp = E1000_READ_REG(hw, IAC); 4487 + temp = E1000_READ_REG(hw, ICRXOC); 4488 + temp = E1000_READ_REG(hw, ICRXPTC); 4489 + temp = E1000_READ_REG(hw, ICRXATC); 4490 + temp = E1000_READ_REG(hw, ICTXPTC); 4491 + temp = E1000_READ_REG(hw, ICTXATC); 4492 + temp = E1000_READ_REG(hw, ICTXQEC); 4493 + temp = E1000_READ_REG(hw, ICTXQMTC); 4494 + temp = E1000_READ_REG(hw, ICRXDMTC); 4495 + 4496 } 4497 4498 /****************************************************************************** ··· 4646 hw->bus_speed = e1000_bus_speed_unknown; 4647 hw->bus_width = e1000_bus_width_unknown; 4648 break; 4649 + case e1000_82573: 4650 + hw->bus_type = e1000_bus_type_pci_express; 4651 + hw->bus_speed = e1000_bus_speed_2500; 4652 + hw->bus_width = e1000_bus_width_pciex_4; 4653 + break; 4654 default: 4655 status = E1000_READ_REG(hw, STATUS); 4656 hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ? ··· 4749 4750 /* Use old method for Phy older than IGP */ 4751 if(hw->phy_type == e1000_phy_m88) { 4752 + 4753 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, 4754 &phy_data); 4755 if(ret_val) ··· 4865 return ret_val; 4866 *polarity = (phy_data & M88E1000_PSSR_REV_POLARITY) >> 4867 M88E1000_PSSR_REV_POLARITY_SHIFT; 4868 + } else if(hw->phy_type == e1000_phy_igp || 4869 + hw->phy_type == e1000_phy_igp_2) { 4870 /* Read the Status register to check the speed */ 4871 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, 4872 &phy_data); ··· 4917 4918 DEBUGFUNC("e1000_check_downshift"); 4919 4920 + if(hw->phy_type == e1000_phy_igp || 4921 + hw->phy_type == e1000_phy_igp_2) { 4922 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH, 4923 &phy_data); 4924 if(ret_val) ··· 4933 hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >> 4934 M88E1000_PSSR_DOWNSHIFT_SHIFT; 4935 } 4936 + 4937 return E1000_SUCCESS; 4938 } 4939 ··· 5047 if(ret_val) 5048 return ret_val; 5049 5050 + msec_delay_irq(20); 5051 5052 ret_val = e1000_write_phy_reg(hw, 0x0000, 5053 IGP01E1000_IEEE_FORCE_GIGA); ··· 5071 if(ret_val) 5072 return ret_val; 5073 5074 + msec_delay_irq(20); 5075 5076 /* Now enable the transmitter */ 5077 ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); ··· 5096 if(ret_val) 5097 return ret_val; 5098 5099 + msec_delay_irq(20); 5100 5101 ret_val = e1000_write_phy_reg(hw, 0x0000, 5102 IGP01E1000_IEEE_FORCE_GIGA); ··· 5112 if(ret_val) 5113 return ret_val; 5114 5115 + msec_delay_irq(20); 5116 5117 /* Now enable the transmitter */ 5118 ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); ··· 5187 uint16_t phy_data; 5188 DEBUGFUNC("e1000_set_d3_lplu_state"); 5189 5190 + if(hw->phy_type != e1000_phy_igp && hw->phy_type != e1000_phy_igp_2) 5191 return E1000_SUCCESS; 5192 5193 /* During driver activity LPLU should not be used or it will attain link 5194 * from the lowest speeds starting from 10Mbps. The capability is used for 5195 * Dx transitions and states */ 5196 + if(hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2) { 5197 + ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data); 5198 if(ret_val) 5199 return ret_val; 5200 + } else { 5201 + ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); 5202 + if(ret_val) 5203 + return ret_val; 5204 + } 5205 + 5206 + if(!active) { 5207 + if(hw->mac_type == e1000_82541_rev_2 || 5208 + hw->mac_type == e1000_82547_rev_2) { 5209 + phy_data &= ~IGP01E1000_GMII_FLEX_SPD; 5210 + ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data); 5211 + if(ret_val) 5212 + return ret_val; 5213 + } else { 5214 + phy_data &= ~IGP02E1000_PM_D3_LPLU; 5215 + ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, 5216 + phy_data); 5217 + if (ret_val) 5218 + return ret_val; 5219 + } 5220 5221 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during 5222 * Dx states where the power conservation is most important. During ··· 5236 (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL ) || 5237 (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) { 5238 5239 + if(hw->mac_type == e1000_82541_rev_2 || 5240 + hw->mac_type == e1000_82547_rev_2) { 5241 + phy_data |= IGP01E1000_GMII_FLEX_SPD; 5242 + ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data); 5243 + if(ret_val) 5244 + return ret_val; 5245 + } else { 5246 + phy_data |= IGP02E1000_PM_D3_LPLU; 5247 + ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, 5248 + phy_data); 5249 + if (ret_val) 5250 + return ret_val; 5251 + } 5252 + 5253 + /* When LPLU is enabled we should disable SmartSpeed */ 5254 + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data); 5255 if(ret_val) 5256 return ret_val; 5257 + 5258 + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; 5259 + ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, phy_data); 5260 + if(ret_val) 5261 + return ret_val; 5262 + 5263 + } 5264 + return E1000_SUCCESS; 5265 + } 5266 + 5267 + /***************************************************************************** 5268 + * 5269 + * This function sets the lplu d0 state according to the active flag. When 5270 + * activating lplu this function also disables smart speed and vise versa. 5271 + * lplu will not be activated unless the device autonegotiation advertisment 5272 + * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes. 5273 + * hw: Struct containing variables accessed by shared code 5274 + * active - true to enable lplu false to disable lplu. 5275 + * 5276 + * returns: - E1000_ERR_PHY if fail to read/write the PHY 5277 + * E1000_SUCCESS at any other case. 5278 + * 5279 + ****************************************************************************/ 5280 + 5281 + int32_t 5282 + e1000_set_d0_lplu_state(struct e1000_hw *hw, 5283 + boolean_t active) 5284 + { 5285 + int32_t ret_val; 5286 + uint16_t phy_data; 5287 + DEBUGFUNC("e1000_set_d0_lplu_state"); 5288 + 5289 + if(hw->mac_type <= e1000_82547_rev_2) 5290 + return E1000_SUCCESS; 5291 + 5292 + ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); 5293 + if(ret_val) 5294 + return ret_val; 5295 + 5296 + if (!active) { 5297 + phy_data &= ~IGP02E1000_PM_D0_LPLU; 5298 + ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); 5299 + if (ret_val) 5300 + return ret_val; 5301 + 5302 + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during 5303 + * Dx states where the power conservation is most important. During 5304 + * driver activity we should enable SmartSpeed, so performance is 5305 + * maintained. */ 5306 + if (hw->smart_speed == e1000_smart_speed_on) { 5307 + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 5308 + &phy_data); 5309 + if(ret_val) 5310 + return ret_val; 5311 + 5312 + phy_data |= IGP01E1000_PSCFR_SMART_SPEED; 5313 + ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 5314 + phy_data); 5315 + if(ret_val) 5316 + return ret_val; 5317 + } else if (hw->smart_speed == e1000_smart_speed_off) { 5318 + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 5319 + &phy_data); 5320 + if (ret_val) 5321 + return ret_val; 5322 + 5323 + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; 5324 + ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 5325 + phy_data); 5326 + if(ret_val) 5327 + return ret_val; 5328 + } 5329 + 5330 + 5331 + } else { 5332 + 5333 + phy_data |= IGP02E1000_PM_D0_LPLU; 5334 + ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); 5335 + if (ret_val) 5336 + return ret_val; 5337 5338 /* When LPLU is enabled we should disable SmartSpeed */ 5339 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data); ··· 5316 return ret_val; 5317 5318 return E1000_SUCCESS; 5319 + } 5320 + 5321 + 5322 + /***************************************************************************** 5323 + * This function reads the cookie from ARC ram. 5324 + * 5325 + * returns: - E1000_SUCCESS . 5326 + ****************************************************************************/ 5327 + int32_t 5328 + e1000_host_if_read_cookie(struct e1000_hw * hw, uint8_t *buffer) 5329 + { 5330 + uint8_t i; 5331 + uint32_t offset = E1000_MNG_DHCP_COOKIE_OFFSET; 5332 + uint8_t length = E1000_MNG_DHCP_COOKIE_LENGTH; 5333 + 5334 + length = (length >> 2); 5335 + offset = (offset >> 2); 5336 + 5337 + for (i = 0; i < length; i++) { 5338 + *((uint32_t *) buffer + i) = 5339 + E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset + i); 5340 + } 5341 + return E1000_SUCCESS; 5342 + } 5343 + 5344 + 5345 + /***************************************************************************** 5346 + * This function checks whether the HOST IF is enabled for command operaton 5347 + * and also checks whether the previous command is completed. 5348 + * It busy waits in case of previous command is not completed. 5349 + * 5350 + * returns: - E1000_ERR_HOST_INTERFACE_COMMAND in case if is not ready or 5351 + * timeout 5352 + * - E1000_SUCCESS for success. 5353 + ****************************************************************************/ 5354 + int32_t 5355 + e1000_mng_enable_host_if(struct e1000_hw * hw) 5356 + { 5357 + uint32_t hicr; 5358 + uint8_t i; 5359 + 5360 + /* Check that the host interface is enabled. */ 5361 + hicr = E1000_READ_REG(hw, HICR); 5362 + if ((hicr & E1000_HICR_EN) == 0) { 5363 + DEBUGOUT("E1000_HOST_EN bit disabled.\n"); 5364 + return -E1000_ERR_HOST_INTERFACE_COMMAND; 5365 + } 5366 + /* check the previous command is completed */ 5367 + for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) { 5368 + hicr = E1000_READ_REG(hw, HICR); 5369 + if (!(hicr & E1000_HICR_C)) 5370 + break; 5371 + msec_delay_irq(1); 5372 + } 5373 + 5374 + if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { 5375 + DEBUGOUT("Previous command timeout failed .\n"); 5376 + return -E1000_ERR_HOST_INTERFACE_COMMAND; 5377 + } 5378 + return E1000_SUCCESS; 5379 + } 5380 + 5381 + /***************************************************************************** 5382 + * This function writes the buffer content at the offset given on the host if. 5383 + * It also does alignment considerations to do the writes in most efficient way. 5384 + * Also fills up the sum of the buffer in *buffer parameter. 5385 + * 5386 + * returns - E1000_SUCCESS for success. 5387 + ****************************************************************************/ 5388 + int32_t 5389 + e1000_mng_host_if_write(struct e1000_hw * hw, uint8_t *buffer, 5390 + uint16_t length, uint16_t offset, uint8_t *sum) 5391 + { 5392 + uint8_t *tmp; 5393 + uint8_t *bufptr = buffer; 5394 + uint32_t data; 5395 + uint16_t remaining, i, j, prev_bytes; 5396 + 5397 + /* sum = only sum of the data and it is not checksum */ 5398 + 5399 + if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) { 5400 + return -E1000_ERR_PARAM; 5401 + } 5402 + 5403 + tmp = (uint8_t *)&data; 5404 + prev_bytes = offset & 0x3; 5405 + offset &= 0xFFFC; 5406 + offset >>= 2; 5407 + 5408 + if (prev_bytes) { 5409 + data = E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset); 5410 + for (j = prev_bytes; j < sizeof(uint32_t); j++) { 5411 + *(tmp + j) = *bufptr++; 5412 + *sum += *(tmp + j); 5413 + } 5414 + E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset, data); 5415 + length -= j - prev_bytes; 5416 + offset++; 5417 + } 5418 + 5419 + remaining = length & 0x3; 5420 + length -= remaining; 5421 + 5422 + /* Calculate length in DWORDs */ 5423 + length >>= 2; 5424 + 5425 + /* The device driver writes the relevant command block into the 5426 + * ram area. */ 5427 + for (i = 0; i < length; i++) { 5428 + for (j = 0; j < sizeof(uint32_t); j++) { 5429 + *(tmp + j) = *bufptr++; 5430 + *sum += *(tmp + j); 5431 + } 5432 + 5433 + E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset + i, data); 5434 + } 5435 + if (remaining) { 5436 + for (j = 0; j < sizeof(uint32_t); j++) { 5437 + if (j < remaining) 5438 + *(tmp + j) = *bufptr++; 5439 + else 5440 + *(tmp + j) = 0; 5441 + 5442 + *sum += *(tmp + j); 5443 + } 5444 + E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset + i, data); 5445 + } 5446 + 5447 + return E1000_SUCCESS; 5448 + } 5449 + 5450 + 5451 + /***************************************************************************** 5452 + * This function writes the command header after does the checksum calculation. 5453 + * 5454 + * returns - E1000_SUCCESS for success. 5455 + ****************************************************************************/ 5456 + int32_t 5457 + e1000_mng_write_cmd_header(struct e1000_hw * hw, 5458 + struct e1000_host_mng_command_header * hdr) 5459 + { 5460 + uint16_t i; 5461 + uint8_t sum; 5462 + uint8_t *buffer; 5463 + 5464 + /* Write the whole command header structure which includes sum of 5465 + * the buffer */ 5466 + 5467 + uint16_t length = sizeof(struct e1000_host_mng_command_header); 5468 + 5469 + sum = hdr->checksum; 5470 + hdr->checksum = 0; 5471 + 5472 + buffer = (uint8_t *) hdr; 5473 + i = length; 5474 + while(i--) 5475 + sum += buffer[i]; 5476 + 5477 + hdr->checksum = 0 - sum; 5478 + 5479 + length >>= 2; 5480 + /* The device driver writes the relevant command block into the ram area. */ 5481 + for (i = 0; i < length; i++) 5482 + E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, i, *((uint32_t *) hdr + i)); 5483 + 5484 + return E1000_SUCCESS; 5485 + } 5486 + 5487 + 5488 + /***************************************************************************** 5489 + * This function indicates to ARC that a new command is pending which completes 5490 + * one write operation by the driver. 5491 + * 5492 + * returns - E1000_SUCCESS for success. 5493 + ****************************************************************************/ 5494 + int32_t 5495 + e1000_mng_write_commit( 5496 + struct e1000_hw * hw) 5497 + { 5498 + uint32_t hicr; 5499 + 5500 + hicr = E1000_READ_REG(hw, HICR); 5501 + /* Setting this bit tells the ARC that a new command is pending. */ 5502 + E1000_WRITE_REG(hw, HICR, hicr | E1000_HICR_C); 5503 + 5504 + return E1000_SUCCESS; 5505 + } 5506 + 5507 + 5508 + /***************************************************************************** 5509 + * This function checks the mode of the firmware. 5510 + * 5511 + * returns - TRUE when the mode is IAMT or FALSE. 5512 + ****************************************************************************/ 5513 + boolean_t 5514 + e1000_check_mng_mode( 5515 + struct e1000_hw *hw) 5516 + { 5517 + uint32_t fwsm; 5518 + 5519 + fwsm = E1000_READ_REG(hw, FWSM); 5520 + 5521 + if((fwsm & E1000_FWSM_MODE_MASK) == 5522 + (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)) 5523 + return TRUE; 5524 + 5525 + return FALSE; 5526 + } 5527 + 5528 + 5529 + /***************************************************************************** 5530 + * This function writes the dhcp info . 5531 + ****************************************************************************/ 5532 + int32_t 5533 + e1000_mng_write_dhcp_info(struct e1000_hw * hw, uint8_t *buffer, 5534 + uint16_t length) 5535 + { 5536 + int32_t ret_val; 5537 + struct e1000_host_mng_command_header hdr; 5538 + 5539 + hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD; 5540 + hdr.command_length = length; 5541 + hdr.reserved1 = 0; 5542 + hdr.reserved2 = 0; 5543 + hdr.checksum = 0; 5544 + 5545 + ret_val = e1000_mng_enable_host_if(hw); 5546 + if (ret_val == E1000_SUCCESS) { 5547 + ret_val = e1000_mng_host_if_write(hw, buffer, length, sizeof(hdr), 5548 + &(hdr.checksum)); 5549 + if (ret_val == E1000_SUCCESS) { 5550 + ret_val = e1000_mng_write_cmd_header(hw, &hdr); 5551 + if (ret_val == E1000_SUCCESS) 5552 + ret_val = e1000_mng_write_commit(hw); 5553 + } 5554 + } 5555 + return ret_val; 5556 + } 5557 + 5558 + 5559 + /***************************************************************************** 5560 + * This function calculates the checksum. 5561 + * 5562 + * returns - checksum of buffer contents. 5563 + ****************************************************************************/ 5564 + uint8_t 5565 + e1000_calculate_mng_checksum(char *buffer, uint32_t length) 5566 + { 5567 + uint8_t sum = 0; 5568 + uint32_t i; 5569 + 5570 + if (!buffer) 5571 + return 0; 5572 + 5573 + for (i=0; i < length; i++) 5574 + sum += buffer[i]; 5575 + 5576 + return (uint8_t) (0 - sum); 5577 + } 5578 + 5579 + /***************************************************************************** 5580 + * This function checks whether tx pkt filtering needs to be enabled or not. 5581 + * 5582 + * returns - TRUE for packet filtering or FALSE. 5583 + ****************************************************************************/ 5584 + boolean_t 5585 + e1000_enable_tx_pkt_filtering(struct e1000_hw *hw) 5586 + { 5587 + /* called in init as well as watchdog timer functions */ 5588 + 5589 + int32_t ret_val, checksum; 5590 + boolean_t tx_filter = FALSE; 5591 + struct e1000_host_mng_dhcp_cookie *hdr = &(hw->mng_cookie); 5592 + uint8_t *buffer = (uint8_t *) &(hw->mng_cookie); 5593 + 5594 + if (e1000_check_mng_mode(hw)) { 5595 + ret_val = e1000_mng_enable_host_if(hw); 5596 + if (ret_val == E1000_SUCCESS) { 5597 + ret_val = e1000_host_if_read_cookie(hw, buffer); 5598 + if (ret_val == E1000_SUCCESS) { 5599 + checksum = hdr->checksum; 5600 + hdr->checksum = 0; 5601 + if ((hdr->signature == E1000_IAMT_SIGNATURE) && 5602 + checksum == e1000_calculate_mng_checksum((char *)buffer, 5603 + E1000_MNG_DHCP_COOKIE_LENGTH)) { 5604 + if (hdr->status & 5605 + E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT) 5606 + tx_filter = TRUE; 5607 + } else 5608 + tx_filter = TRUE; 5609 + } else 5610 + tx_filter = TRUE; 5611 + } 5612 + } 5613 + 5614 + hw->tx_pkt_filtering = tx_filter; 5615 + return tx_filter; 5616 + } 5617 + 5618 + /****************************************************************************** 5619 + * Verifies the hardware needs to allow ARPs to be processed by the host 5620 + * 5621 + * hw - Struct containing variables accessed by shared code 5622 + * 5623 + * returns: - TRUE/FALSE 5624 + * 5625 + *****************************************************************************/ 5626 + uint32_t 5627 + e1000_enable_mng_pass_thru(struct e1000_hw *hw) 5628 + { 5629 + uint32_t manc; 5630 + uint32_t fwsm, factps; 5631 + 5632 + if (hw->asf_firmware_present) { 5633 + manc = E1000_READ_REG(hw, MANC); 5634 + 5635 + if (!(manc & E1000_MANC_RCV_TCO_EN) || 5636 + !(manc & E1000_MANC_EN_MAC_ADDR_FILTER)) 5637 + return FALSE; 5638 + if (e1000_arc_subsystem_valid(hw) == TRUE) { 5639 + fwsm = E1000_READ_REG(hw, FWSM); 5640 + factps = E1000_READ_REG(hw, FACTPS); 5641 + 5642 + if (((fwsm & E1000_FWSM_MODE_MASK) == 5643 + (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT)) && 5644 + (factps & E1000_FACTPS_MNGCG)) 5645 + return TRUE; 5646 + } else 5647 + if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN)) 5648 + return TRUE; 5649 + } 5650 + return FALSE; 5651 } 5652 5653 static int32_t ··· 5402 } 5403 return E1000_SUCCESS; 5404 } 5405 + 5406 + /*************************************************************************** 5407 + * 5408 + * Disables PCI-Express master access. 5409 + * 5410 + * hw: Struct containing variables accessed by shared code 5411 + * 5412 + * returns: - none. 5413 + * 5414 + ***************************************************************************/ 5415 + void 5416 + e1000_set_pci_express_master_disable(struct e1000_hw *hw) 5417 + { 5418 + uint32_t ctrl; 5419 + 5420 + DEBUGFUNC("e1000_set_pci_express_master_disable"); 5421 + 5422 + if (hw->bus_type != e1000_bus_type_pci_express) 5423 + return; 5424 + 5425 + ctrl = E1000_READ_REG(hw, CTRL); 5426 + ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; 5427 + E1000_WRITE_REG(hw, CTRL, ctrl); 5428 + } 5429 + 5430 + /*************************************************************************** 5431 + * 5432 + * Enables PCI-Express master access. 5433 + * 5434 + * hw: Struct containing variables accessed by shared code 5435 + * 5436 + * returns: - none. 5437 + * 5438 + ***************************************************************************/ 5439 + void 5440 + e1000_enable_pciex_master(struct e1000_hw *hw) 5441 + { 5442 + uint32_t ctrl; 5443 + 5444 + DEBUGFUNC("e1000_enable_pciex_master"); 5445 + 5446 + if (hw->bus_type != e1000_bus_type_pci_express) 5447 + return; 5448 + 5449 + ctrl = E1000_READ_REG(hw, CTRL); 5450 + ctrl &= ~E1000_CTRL_GIO_MASTER_DISABLE; 5451 + E1000_WRITE_REG(hw, CTRL, ctrl); 5452 + } 5453 + 5454 + /******************************************************************************* 5455 + * 5456 + * Disables PCI-Express master access and verifies there are no pending requests 5457 + * 5458 + * hw: Struct containing variables accessed by shared code 5459 + * 5460 + * returns: - E1000_ERR_MASTER_REQUESTS_PENDING if master disable bit hasn't 5461 + * caused the master requests to be disabled. 5462 + * E1000_SUCCESS master requests disabled. 5463 + * 5464 + ******************************************************************************/ 5465 + int32_t 5466 + e1000_disable_pciex_master(struct e1000_hw *hw) 5467 + { 5468 + int32_t timeout = MASTER_DISABLE_TIMEOUT; /* 80ms */ 5469 + 5470 + DEBUGFUNC("e1000_disable_pciex_master"); 5471 + 5472 + if (hw->bus_type != e1000_bus_type_pci_express) 5473 + return E1000_SUCCESS; 5474 + 5475 + e1000_set_pci_express_master_disable(hw); 5476 + 5477 + while(timeout) { 5478 + if(!(E1000_READ_REG(hw, STATUS) & E1000_STATUS_GIO_MASTER_ENABLE)) 5479 + break; 5480 + else 5481 + udelay(100); 5482 + timeout--; 5483 + } 5484 + 5485 + if(!timeout) { 5486 + DEBUGOUT("Master requests are pending.\n"); 5487 + return -E1000_ERR_MASTER_REQUESTS_PENDING; 5488 + } 5489 + 5490 + return E1000_SUCCESS; 5491 + } 5492 + 5493 + /******************************************************************************* 5494 + * 5495 + * Check for EEPROM Auto Read bit done. 5496 + * 5497 + * hw: Struct containing variables accessed by shared code 5498 + * 5499 + * returns: - E1000_ERR_RESET if fail to reset MAC 5500 + * E1000_SUCCESS at any other case. 5501 + * 5502 + ******************************************************************************/ 5503 + int32_t 5504 + e1000_get_auto_rd_done(struct e1000_hw *hw) 5505 + { 5506 + int32_t timeout = AUTO_READ_DONE_TIMEOUT; 5507 + 5508 + DEBUGFUNC("e1000_get_auto_rd_done"); 5509 + 5510 + switch (hw->mac_type) { 5511 + default: 5512 + msec_delay(5); 5513 + break; 5514 + case e1000_82573: 5515 + while(timeout) { 5516 + if (E1000_READ_REG(hw, EECD) & E1000_EECD_AUTO_RD) break; 5517 + else msec_delay(1); 5518 + timeout--; 5519 + } 5520 + 5521 + if(!timeout) { 5522 + DEBUGOUT("Auto read by HW from EEPROM has not completed.\n"); 5523 + return -E1000_ERR_RESET; 5524 + } 5525 + break; 5526 + } 5527 + 5528 + return E1000_SUCCESS; 5529 + } 5530 + 5531 + /*************************************************************************** 5532 + * Checks if the PHY configuration is done 5533 + * 5534 + * hw: Struct containing variables accessed by shared code 5535 + * 5536 + * returns: - E1000_ERR_RESET if fail to reset MAC 5537 + * E1000_SUCCESS at any other case. 5538 + * 5539 + ***************************************************************************/ 5540 + int32_t 5541 + e1000_get_phy_cfg_done(struct e1000_hw *hw) 5542 + { 5543 + DEBUGFUNC("e1000_get_phy_cfg_done"); 5544 + 5545 + /* Simply wait for 10ms */ 5546 + msec_delay(10); 5547 + 5548 + return E1000_SUCCESS; 5549 + } 5550 + 5551 + /*************************************************************************** 5552 + * 5553 + * Using the combination of SMBI and SWESMBI semaphore bits when resetting 5554 + * adapter or Eeprom access. 5555 + * 5556 + * hw: Struct containing variables accessed by shared code 5557 + * 5558 + * returns: - E1000_ERR_EEPROM if fail to access EEPROM. 5559 + * E1000_SUCCESS at any other case. 5560 + * 5561 + ***************************************************************************/ 5562 + int32_t 5563 + e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw) 5564 + { 5565 + int32_t timeout; 5566 + uint32_t swsm; 5567 + 5568 + DEBUGFUNC("e1000_get_hw_eeprom_semaphore"); 5569 + 5570 + if(!hw->eeprom_semaphore_present) 5571 + return E1000_SUCCESS; 5572 + 5573 + 5574 + /* Get the FW semaphore. */ 5575 + timeout = hw->eeprom.word_size + 1; 5576 + while(timeout) { 5577 + swsm = E1000_READ_REG(hw, SWSM); 5578 + swsm |= E1000_SWSM_SWESMBI; 5579 + E1000_WRITE_REG(hw, SWSM, swsm); 5580 + /* if we managed to set the bit we got the semaphore. */ 5581 + swsm = E1000_READ_REG(hw, SWSM); 5582 + if(swsm & E1000_SWSM_SWESMBI) 5583 + break; 5584 + 5585 + udelay(50); 5586 + timeout--; 5587 + } 5588 + 5589 + if(!timeout) { 5590 + /* Release semaphores */ 5591 + e1000_put_hw_eeprom_semaphore(hw); 5592 + DEBUGOUT("Driver can't access the Eeprom - SWESMBI bit is set.\n"); 5593 + return -E1000_ERR_EEPROM; 5594 + } 5595 + 5596 + return E1000_SUCCESS; 5597 + } 5598 + 5599 + /*************************************************************************** 5600 + * This function clears HW semaphore bits. 5601 + * 5602 + * hw: Struct containing variables accessed by shared code 5603 + * 5604 + * returns: - None. 5605 + * 5606 + ***************************************************************************/ 5607 + void 5608 + e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw) 5609 + { 5610 + uint32_t swsm; 5611 + 5612 + DEBUGFUNC("e1000_put_hw_eeprom_semaphore"); 5613 + 5614 + if(!hw->eeprom_semaphore_present) 5615 + return; 5616 + 5617 + swsm = E1000_READ_REG(hw, SWSM); 5618 + /* Release both semaphores. */ 5619 + swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); 5620 + E1000_WRITE_REG(hw, SWSM, swsm); 5621 + } 5622 + 5623 + /****************************************************************************** 5624 + * Checks if PHY reset is blocked due to SOL/IDER session, for example. 5625 + * Returning E1000_BLK_PHY_RESET isn't necessarily an error. But it's up to 5626 + * the caller to figure out how to deal with it. 5627 + * 5628 + * hw - Struct containing variables accessed by shared code 5629 + * 5630 + * returns: - E1000_BLK_PHY_RESET 5631 + * E1000_SUCCESS 5632 + * 5633 + *****************************************************************************/ 5634 + int32_t 5635 + e1000_check_phy_reset_block(struct e1000_hw *hw) 5636 + { 5637 + uint32_t manc = 0; 5638 + if(hw->mac_type > e1000_82547_rev_2) 5639 + manc = E1000_READ_REG(hw, MANC); 5640 + return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? 5641 + E1000_BLK_PHY_RESET : E1000_SUCCESS; 5642 + } 5643 + 5644 + uint8_t 5645 + e1000_arc_subsystem_valid(struct e1000_hw *hw) 5646 + { 5647 + uint32_t fwsm; 5648 + 5649 + /* On 8257x silicon, registers in the range of 0x8800 - 0x8FFC 5650 + * may not be provided a DMA clock when no manageability features are 5651 + * enabled. We do not want to perform any reads/writes to these registers 5652 + * if this is the case. We read FWSM to determine the manageability mode. 5653 + */ 5654 + switch (hw->mac_type) { 5655 + case e1000_82573: 5656 + fwsm = E1000_READ_REG(hw, FWSM); 5657 + if((fwsm & E1000_FWSM_MODE_MASK) != 0) 5658 + return TRUE; 5659 + break; 5660 + default: 5661 + break; 5662 + } 5663 + return FALSE; 5664 + } 5665 + 5666 + 5667
+546 -22
drivers/net/e1000/e1000_hw.h
··· 57 e1000_82541_rev_2, 58 e1000_82547, 59 e1000_82547_rev_2, 60 e1000_num_macs 61 } e1000_mac_type; 62 ··· 65 e1000_eeprom_uninitialized = 0, 66 e1000_eeprom_spi, 67 e1000_eeprom_microwire, 68 e1000_num_eeprom_types 69 } e1000_eeprom_type; 70 ··· 98 e1000_bus_type_unknown = 0, 99 e1000_bus_type_pci, 100 e1000_bus_type_pcix, 101 e1000_bus_type_reserved 102 } e1000_bus_type; 103 ··· 110 e1000_bus_speed_100, 111 e1000_bus_speed_120, 112 e1000_bus_speed_133, 113 e1000_bus_speed_reserved 114 } e1000_bus_speed; 115 ··· 119 e1000_bus_width_unknown = 0, 120 e1000_bus_width_32, 121 e1000_bus_width_64, 122 e1000_bus_width_reserved 123 } e1000_bus_width; 124 ··· 202 typedef enum { 203 e1000_phy_m88 = 0, 204 e1000_phy_igp, 205 e1000_phy_undefined = 0xFF 206 } e1000_phy_type; 207 ··· 249 uint16_t address_bits; 250 uint16_t delay_usec; 251 uint16_t page_size; 252 }; 253 254 255 ··· 272 #define E1000_ERR_PARAM 4 273 #define E1000_ERR_MAC_TYPE 5 274 #define E1000_ERR_PHY_TYPE 6 275 276 /* Function prototypes */ 277 /* Initialization */ 278 int32_t e1000_reset_hw(struct e1000_hw *hw); 279 int32_t e1000_init_hw(struct e1000_hw *hw); 280 int32_t e1000_set_mac_type(struct e1000_hw *hw); 281 void e1000_set_media_type(struct e1000_hw *hw); 282 ··· 298 /* PHY */ 299 int32_t e1000_read_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *phy_data); 300 int32_t e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t data); 301 - void e1000_phy_hw_reset(struct e1000_hw *hw); 302 int32_t e1000_phy_reset(struct e1000_hw *hw); 303 int32_t e1000_detect_gig_phy(struct e1000_hw *hw); 304 int32_t e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); ··· 310 int32_t e1000_validate_mdi_setting(struct e1000_hw *hw); 311 312 /* EEPROM Functions */ 313 - void e1000_init_eeprom_params(struct e1000_hw *hw); 314 int32_t e1000_read_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data); 315 int32_t e1000_validate_eeprom_checksum(struct e1000_hw *hw); 316 int32_t e1000_update_eeprom_checksum(struct e1000_hw *hw); 317 int32_t e1000_write_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data); 318 int32_t e1000_read_part_num(struct e1000_hw *hw, uint32_t * part_num); 319 int32_t e1000_read_mac_addr(struct e1000_hw * hw); 320 321 /* Filters (multicast, vlan, receive) */ 322 void e1000_init_rx_addrs(struct e1000_hw *hw); ··· 409 /* Adaptive IFS Functions */ 410 411 /* Everything else */ 412 - uint32_t e1000_enable_mng_pass_thru(struct e1000_hw *hw); 413 void e1000_clear_hw_cntrs(struct e1000_hw *hw); 414 void e1000_reset_adaptive(struct e1000_hw *hw); 415 void e1000_update_adaptive(struct e1000_hw *hw); ··· 425 void e1000_write_reg_io(struct e1000_hw *hw, uint32_t offset, uint32_t value); 426 int32_t e1000_config_dsp_after_link_change(struct e1000_hw *hw, boolean_t link_up); 427 int32_t e1000_set_d3_lplu_state(struct e1000_hw *hw, boolean_t active); 428 429 #define E1000_READ_REG_IO(a, reg) \ 430 e1000_read_reg_io((a), E1000_##reg) ··· 477 #define E1000_DEV_ID_82546GB_SERDES 0x107B 478 #define E1000_DEV_ID_82546GB_PCIE 0x108A 479 #define E1000_DEV_ID_82547EI 0x1019 480 481 #define NODE_ADDRESS_SIZE 6 482 #define ETH_LENGTH_OF_ADDRESS 6 ··· 493 #define E1000_REVISION_0 0 494 #define E1000_REVISION_1 1 495 #define E1000_REVISION_2 2 496 497 #define SPEED_10 10 498 #define SPEED_100 100 ··· 550 E1000_IMS_RXSEQ | \ 551 E1000_IMS_LSC) 552 553 /* Number of high/low register pairs in the RAR. The RAR (Receive Address 554 * Registers) holds the directed and multicast addresses that we monitor. We 555 * reserve one of these spots for our directed address, allowing us room for ··· 571 uint16_t special; 572 }; 573 574 /* Receive Decriptor bit definitions */ 575 #define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ 576 #define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ 577 #define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ 578 #define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ 579 #define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ 580 #define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ 581 #define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */ 582 #define E1000_RXD_ERR_CE 0x01 /* CRC Error */ 583 #define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ 584 #define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ ··· 648 #define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ 649 #define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ 650 #define E1000_RXD_SPC_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ 651 - #define E1000_RXD_SPC_PRI_SHIFT 0x000D /* Priority is in upper 3 of 16 */ 652 #define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */ 653 - #define E1000_RXD_SPC_CFI_SHIFT 0x000C /* CFI is bit 12 */ 654 655 /* mask to determine if packets should be dropped due to frame errors */ 656 #define E1000_RXD_ERR_FRAME_ERR_MASK ( \ ··· 670 E1000_RXD_ERR_SEQ | \ 671 E1000_RXD_ERR_CXE | \ 672 E1000_RXD_ERR_RXE) 673 674 /* Transmit Descriptor */ 675 struct e1000_tx_desc { ··· 861 #define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ 862 #define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ 863 #define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ 864 #define E1000_RCTL 0x00100 /* RX Control - RW */ 865 #define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ 866 #define E1000_TXCW 0x00178 /* TX Configuration Word - RW */ ··· 871 #define E1000_TBT 0x00448 /* TX Burst Timer - RW */ 872 #define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ 873 #define E1000_LEDCTL 0x00E00 /* LED Control - RW */ 874 #define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ 875 #define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ 876 #define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ 877 #define E1000_RDBAL 0x02800 /* RX Descriptor Base Address Low - RW */ 878 #define E1000_RDBAH 0x02804 /* RX Descriptor Base Address High - RW */ 879 #define E1000_RDLEN 0x02808 /* RX Descriptor Length - RW */ ··· 897 #define E1000_RXDCTL 0x02828 /* RX Descriptor Control - RW */ 898 #define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */ 899 #define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */ 900 #define E1000_TXDMAC 0x03000 /* TX DMA Control - RW */ 901 #define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ 902 #define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ ··· 913 #define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */ 914 #define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */ 915 #define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */ 916 #define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ 917 #define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ 918 #define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ ··· 979 #define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */ 980 #define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */ 981 #define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */ 982 #define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */ 983 #define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ 984 #define E1000_RA 0x05400 /* Receive Address - RW Array */ 985 #define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ ··· 1007 #define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */ 1008 #define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */ 1009 1010 /* Register Set (82542) 1011 * 1012 * Some of the 82542 registers are located at different offsets than they are ··· 1067 #define E1000_82542_VFTA 0x00600 1068 #define E1000_82542_LEDCTL E1000_LEDCTL 1069 #define E1000_82542_PBA E1000_PBA 1070 #define E1000_82542_RXDCTL E1000_RXDCTL 1071 #define E1000_82542_RADV E1000_RADV 1072 #define E1000_82542_RSRPD E1000_RSRPD ··· 1163 #define E1000_82542_FFMT E1000_FFMT 1164 #define E1000_82542_FFVT E1000_FFVT 1165 #define E1000_82542_HOST_IF E1000_HOST_IF 1166 1167 /* Statistics counters collected by the MAC */ 1168 struct e1000_hw_stats { ··· 1256 uint64_t bptc; 1257 uint64_t tsctc; 1258 uint64_t tsctfc; 1259 }; 1260 1261 /* Structure containing variables used by the shared code (e1000_hw.c) */ 1262 struct e1000_hw { 1263 - uint8_t __iomem *hw_addr; 1264 e1000_mac_type mac_type; 1265 e1000_phy_type phy_type; 1266 uint32_t phy_init_script; ··· 1285 e1000_ms_type original_master_slave; 1286 e1000_ffe_config ffe_config_state; 1287 uint32_t asf_firmware_present; 1288 unsigned long io_base; 1289 uint32_t phy_id; 1290 uint32_t phy_revision; ··· 1302 uint32_t ledctl_default; 1303 uint32_t ledctl_mode1; 1304 uint32_t ledctl_mode2; 1305 uint16_t phy_spd_default; 1306 uint16_t autoneg_advertised; 1307 uint16_t pci_cmd_word; ··· 1342 boolean_t adaptive_ifs; 1343 boolean_t ifs_params_forced; 1344 boolean_t in_ifs_mode; 1345 }; 1346 1347 1348 #define E1000_EEPROM_SWDPIN0 0x0001 /* SWDPIN 0 EEPROM Value */ 1349 #define E1000_EEPROM_LED_LOGIC 0x0020 /* Led Logic Word */ 1350 /* Register Bit Masks */ 1351 /* Device Control */ 1352 #define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ 1353 #define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */ 1354 #define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */ 1355 #define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ 1356 #define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */ 1357 #define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */ ··· 1373 #define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */ 1374 #define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ 1375 #define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ 1376 #define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ 1377 #define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ 1378 #define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ ··· 1393 #define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ 1394 #define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ 1395 #define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ 1396 #define E1000_STATUS_FUNC_0 0x00000000 /* Function 0 */ 1397 #define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ 1398 #define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ ··· 1403 #define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ 1404 #define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ 1405 #define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */ 1406 #define E1000_STATUS_MTXCKOK 0x00000400 /* MTX clock running OK */ 1407 #define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */ 1408 #define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */ ··· 1435 #ifndef E1000_EEPROM_GRANT_ATTEMPTS 1436 #define E1000_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ 1437 #endif 1438 1439 /* EEPROM Read */ 1440 #define E1000_EERD_START 0x00000001 /* Start Read */ ··· 1490 #define E1000_CTRL_EXT_WR_WMARK_320 0x01000000 1491 #define E1000_CTRL_EXT_WR_WMARK_384 0x02000000 1492 #define E1000_CTRL_EXT_WR_WMARK_448 0x03000000 1493 1494 /* MDI Control */ 1495 #define E1000_MDIC_DATA_MASK 0x0000FFFF ··· 1508 /* LED Control */ 1509 #define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F 1510 #define E1000_LEDCTL_LED0_MODE_SHIFT 0 1511 #define E1000_LEDCTL_LED0_IVRT 0x00000040 1512 #define E1000_LEDCTL_LED0_BLINK 0x00000080 1513 #define E1000_LEDCTL_LED1_MODE_MASK 0x00000F00 1514 #define E1000_LEDCTL_LED1_MODE_SHIFT 8 1515 #define E1000_LEDCTL_LED1_IVRT 0x00004000 1516 #define E1000_LEDCTL_LED1_BLINK 0x00008000 1517 #define E1000_LEDCTL_LED2_MODE_MASK 0x000F0000 1518 #define E1000_LEDCTL_LED2_MODE_SHIFT 16 1519 #define E1000_LEDCTL_LED2_IVRT 0x00400000 1520 #define E1000_LEDCTL_LED2_BLINK 0x00800000 1521 #define E1000_LEDCTL_LED3_MODE_MASK 0x0F000000 ··· 1562 #define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */ 1563 #define E1000_ICR_TXD_LOW 0x00008000 1564 #define E1000_ICR_SRPD 0x00010000 1565 1566 /* Interrupt Cause Set */ 1567 #define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ ··· 1583 #define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ 1584 #define E1000_ICS_TXD_LOW E1000_ICR_TXD_LOW 1585 #define E1000_ICS_SRPD E1000_ICR_SRPD 1586 1587 /* Interrupt Mask Set */ 1588 #define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ ··· 1603 #define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ 1604 #define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW 1605 #define E1000_IMS_SRPD E1000_ICR_SRPD 1606 1607 /* Interrupt Mask Clear */ 1608 #define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */ ··· 1623 #define E1000_IMC_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ 1624 #define E1000_IMC_TXD_LOW E1000_ICR_TXD_LOW 1625 #define E1000_IMC_SRPD E1000_ICR_SRPD 1626 1627 /* Receive Control */ 1628 #define E1000_RCTL_RST 0x00000001 /* Software reset */ ··· 1638 #define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ 1639 #define E1000_RCTL_LBM_SLP 0x00000080 /* serial link loopback mode */ 1640 #define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ 1641 #define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ 1642 #define E1000_RCTL_RDMTS_QUAT 0x00000100 /* rx desc min threshold size */ 1643 #define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* rx desc min threshold size */ ··· 1666 #define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ 1667 #define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ 1668 #define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ 1669 1670 /* Receive Descriptor */ 1671 #define E1000_RDT_DELAY 0x0000ffff /* Delay timer (1=1024us) */ ··· 1708 #define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */ 1709 #define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ 1710 1711 /* Receive Descriptor Control */ 1712 #define E1000_RXDCTL_PTHRESH 0x0000003F /* RXDCTL Prefetch Threshold */ 1713 #define E1000_RXDCTL_HTHRESH 0x00003F00 /* RXDCTL Host Threshold */ ··· 1738 #define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */ 1739 #define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */ 1740 #define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ 1741 1742 /* Transmit Configuration Word */ 1743 #define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ ··· 1773 #define E1000_TCTL_PBE 0x00800000 /* Packet Burst Enable */ 1774 #define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ 1775 #define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */ 1776 1777 /* Receive Checksum Control */ 1778 #define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */ 1779 #define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ 1780 #define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ 1781 #define E1000_RXCSUM_IPV6OFL 0x00000400 /* IPv6 checksum offload */ 1782 1783 /* Definitions for power management and wakeup registers */ 1784 /* Wake Up Control */ ··· 1801 #define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ 1802 #define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ 1803 #define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */ 1804 #define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ 1805 #define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */ 1806 #define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */ ··· 1837 #define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ 1838 #define E1000_MANC_NEIGHBOR_EN 0x00004000 /* Enable Neighbor Discovery 1839 * Filtering */ 1840 #define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */ 1841 #define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ 1842 #define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */ 1843 #define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address 1844 * filtering */ 1845 #define E1000_MANC_EN_MNG2HOST 0x00200000 /* Enable MNG packets to host 1846 * memory */ 1847 #define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */ 1848 #define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */ 1849 #define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */ ··· 1860 #define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */ 1861 #define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */ 1862 1863 /* Wake Up Packet Length */ 1864 #define E1000_WUPL_LENGTH_MASK 0x0FFF /* Only the lower 12 bits are valid */ 1865 1866 #define E1000_MDALIGN 4096 1867 1868 /* EEPROM Commands - Microwire */ 1869 #define EEPROM_READ_OPCODE_MICROWIRE 0x6 /* EEPROM read opcode */ ··· 1960 1961 /* EEPROM Commands - SPI */ 1962 #define EEPROM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ 1963 - #define EEPROM_READ_OPCODE_SPI 0x3 /* EEPROM read opcode */ 1964 - #define EEPROM_WRITE_OPCODE_SPI 0x2 /* EEPROM write opcode */ 1965 - #define EEPROM_A8_OPCODE_SPI 0x8 /* opcode bit-3 = address bit-8 */ 1966 - #define EEPROM_WREN_OPCODE_SPI 0x6 /* EEPROM set Write Enable latch */ 1967 - #define EEPROM_WRDI_OPCODE_SPI 0x4 /* EEPROM reset Write Enable latch */ 1968 - #define EEPROM_RDSR_OPCODE_SPI 0x5 /* EEPROM read Status register */ 1969 - #define EEPROM_WRSR_OPCODE_SPI 0x1 /* EEPROM write Status register */ 1970 1971 /* EEPROM Size definitions */ 1972 - #define EEPROM_SIZE_16KB 0x1800 1973 - #define EEPROM_SIZE_8KB 0x1400 1974 - #define EEPROM_SIZE_4KB 0x1000 1975 - #define EEPROM_SIZE_2KB 0x0C00 1976 - #define EEPROM_SIZE_1KB 0x0800 1977 - #define EEPROM_SIZE_512B 0x0400 1978 - #define EEPROM_SIZE_128B 0x0000 1979 #define EEPROM_SIZE_MASK 0x1C00 1980 1981 /* EEPROM Word Offsets */ ··· 2087 #define IFS_MIN 40 2088 #define IFS_RATIO 4 2089 2090 /* PBA constants */ 2091 #define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */ 2092 #define E1000_PBA_22K 0x0016 2093 #define E1000_PBA_24K 0x0018 ··· 2158 2159 /* Number of milliseconds we wait for auto-negotiation to complete */ 2160 #define LINK_UP_TIMEOUT 500 2161 2162 #define E1000_TX_BUFFER_SIZE ((uint32_t)1514) 2163 ··· 2266 #define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health Register */ 2267 #define IGP01E1000_GMII_FIFO 0x14 /* GMII FIFO Register */ 2268 #define IGP01E1000_PHY_CHANNEL_QUALITY 0x15 /* PHY Channel Quality Register */ 2269 #define IGP01E1000_PHY_PAGE_SELECT 0x1F /* PHY Page Select Core Register */ 2270 2271 /* IGP01E1000 AGC Registers - stores the cable length values*/ ··· 2275 #define IGP01E1000_PHY_AGC_C 0x1472 2276 #define IGP01E1000_PHY_AGC_D 0x1872 2277 2278 /* IGP01E1000 DSP Reset Register */ 2279 #define IGP01E1000_PHY_DSP_RESET 0x1F33 2280 #define IGP01E1000_PHY_DSP_SET 0x1F71 2281 #define IGP01E1000_PHY_DSP_FFE 0x1F35 2282 2283 #define IGP01E1000_PHY_CHANNEL_NUM 4 2284 #define IGP01E1000_PHY_AGC_PARAM_A 0x1171 2285 #define IGP01E1000_PHY_AGC_PARAM_B 0x1271 2286 #define IGP01E1000_PHY_AGC_PARAM_C 0x1471 ··· 2572 #define IGP01E1000_MSE_CHANNEL_B 0x0F00 2573 #define IGP01E1000_MSE_CHANNEL_A 0xF000 2574 2575 /* IGP01E1000 DSP reset macros */ 2576 #define DSP_RESET_ENABLE 0x0 2577 #define DSP_RESET_DISABLE 0x2 2578 #define E1000_MAX_DSP_RESETS 10 2579 2580 - /* IGP01E1000 AGC Registers */ 2581 2582 #define IGP01E1000_AGC_LENGTH_SHIFT 7 /* Coarse - 13:11, Fine - 10:7 */ 2583 2584 /* 7 bits (3 Coarse + 4 Fine) --> 128 optional values */ 2585 #define IGP01E1000_AGC_LENGTH_TABLE_SIZE 128 2586 2587 - /* The precision of the length is +/- 10 meters */ 2588 #define IGP01E1000_AGC_RANGE 10 2589 2590 /* IGP01E1000 PCS Initialization register */ 2591 /* bits 3:6 in the PCS registers stores the channels polarity */ ··· 2635 #define M88E1000_12_PHY_ID M88E1000_E_PHY_ID 2636 #define M88E1000_14_PHY_ID M88E1000_E_PHY_ID 2637 #define M88E1011_I_REV_4 0x04 2638 2639 /* Miscellaneous PHY bit definitions. */ 2640 #define PHY_PREAMBLE 0xFFFFFFFF
··· 57 e1000_82541_rev_2, 58 e1000_82547, 59 e1000_82547_rev_2, 60 + e1000_82573, 61 e1000_num_macs 62 } e1000_mac_type; 63 ··· 64 e1000_eeprom_uninitialized = 0, 65 e1000_eeprom_spi, 66 e1000_eeprom_microwire, 67 + e1000_eeprom_flash, 68 e1000_num_eeprom_types 69 } e1000_eeprom_type; 70 ··· 96 e1000_bus_type_unknown = 0, 97 e1000_bus_type_pci, 98 e1000_bus_type_pcix, 99 + e1000_bus_type_pci_express, 100 e1000_bus_type_reserved 101 } e1000_bus_type; 102 ··· 107 e1000_bus_speed_100, 108 e1000_bus_speed_120, 109 e1000_bus_speed_133, 110 + e1000_bus_speed_2500, 111 e1000_bus_speed_reserved 112 } e1000_bus_speed; 113 ··· 115 e1000_bus_width_unknown = 0, 116 e1000_bus_width_32, 117 e1000_bus_width_64, 118 + e1000_bus_width_pciex_1, 119 + e1000_bus_width_pciex_4, 120 e1000_bus_width_reserved 121 } e1000_bus_width; 122 ··· 196 typedef enum { 197 e1000_phy_m88 = 0, 198 e1000_phy_igp, 199 + e1000_phy_igp_2, 200 e1000_phy_undefined = 0xFF 201 } e1000_phy_type; 202 ··· 242 uint16_t address_bits; 243 uint16_t delay_usec; 244 uint16_t page_size; 245 + boolean_t use_eerd; 246 + boolean_t use_eewr; 247 }; 248 + 249 + /* Flex ASF Information */ 250 + #define E1000_HOST_IF_MAX_SIZE 2048 251 + 252 + typedef enum { 253 + e1000_byte_align = 0, 254 + e1000_word_align = 1, 255 + e1000_dword_align = 2 256 + } e1000_align_type; 257 258 259 ··· 254 #define E1000_ERR_PARAM 4 255 #define E1000_ERR_MAC_TYPE 5 256 #define E1000_ERR_PHY_TYPE 6 257 + #define E1000_ERR_RESET 9 258 + #define E1000_ERR_MASTER_REQUESTS_PENDING 10 259 + #define E1000_ERR_HOST_INTERFACE_COMMAND 11 260 + #define E1000_BLK_PHY_RESET 12 261 262 /* Function prototypes */ 263 /* Initialization */ 264 int32_t e1000_reset_hw(struct e1000_hw *hw); 265 int32_t e1000_init_hw(struct e1000_hw *hw); 266 + int32_t e1000_id_led_init(struct e1000_hw * hw); 267 int32_t e1000_set_mac_type(struct e1000_hw *hw); 268 void e1000_set_media_type(struct e1000_hw *hw); 269 ··· 275 /* PHY */ 276 int32_t e1000_read_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *phy_data); 277 int32_t e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t data); 278 + int32_t e1000_phy_hw_reset(struct e1000_hw *hw); 279 int32_t e1000_phy_reset(struct e1000_hw *hw); 280 int32_t e1000_detect_gig_phy(struct e1000_hw *hw); 281 int32_t e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); ··· 287 int32_t e1000_validate_mdi_setting(struct e1000_hw *hw); 288 289 /* EEPROM Functions */ 290 + int32_t e1000_init_eeprom_params(struct e1000_hw *hw); 291 + boolean_t e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw); 292 + int32_t e1000_read_eeprom_eerd(struct e1000_hw *hw, uint16_t offset, uint16_t words, uint16_t *data); 293 + int32_t e1000_write_eeprom_eewr(struct e1000_hw *hw, uint16_t offset, uint16_t words, uint16_t *data); 294 + int32_t e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd); 295 + 296 + /* MNG HOST IF functions */ 297 + uint32_t e1000_enable_mng_pass_thru(struct e1000_hw *hw); 298 + 299 + #define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 300 + #define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 /* Host Interface data length */ 301 + 302 + #define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 /* Time in ms to process MNG command */ 303 + #define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 /* Cookie offset */ 304 + #define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 /* Cookie length */ 305 + #define E1000_MNG_IAMT_MODE 0x3 306 + #define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management Technology signature */ 307 + 308 + #define E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT 0x1 /* DHCP parsing enabled */ 309 + #define E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT 0x2 /* DHCP parsing enabled */ 310 + #define E1000_VFTA_ENTRY_SHIFT 0x5 311 + #define E1000_VFTA_ENTRY_MASK 0x7F 312 + #define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F 313 + 314 + struct e1000_host_mng_command_header { 315 + uint8_t command_id; 316 + uint8_t checksum; 317 + uint16_t reserved1; 318 + uint16_t reserved2; 319 + uint16_t command_length; 320 + }; 321 + 322 + struct e1000_host_mng_command_info { 323 + struct e1000_host_mng_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ 324 + uint8_t command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; /* Command data can length 0..0x658*/ 325 + }; 326 + #ifdef __BIG_ENDIAN 327 + struct e1000_host_mng_dhcp_cookie{ 328 + uint32_t signature; 329 + uint16_t vlan_id; 330 + uint8_t reserved0; 331 + uint8_t status; 332 + uint32_t reserved1; 333 + uint8_t checksum; 334 + uint8_t reserved3; 335 + uint16_t reserved2; 336 + }; 337 + #else 338 + struct e1000_host_mng_dhcp_cookie{ 339 + uint32_t signature; 340 + uint8_t status; 341 + uint8_t reserved0; 342 + uint16_t vlan_id; 343 + uint32_t reserved1; 344 + uint16_t reserved2; 345 + uint8_t reserved3; 346 + uint8_t checksum; 347 + }; 348 + #endif 349 + 350 + int32_t e1000_mng_write_dhcp_info(struct e1000_hw *hw, uint8_t *buffer, 351 + uint16_t length); 352 + boolean_t e1000_check_mng_mode(struct e1000_hw *hw); 353 + boolean_t e1000_enable_tx_pkt_filtering(struct e1000_hw *hw); 354 + int32_t e1000_mng_enable_host_if(struct e1000_hw *hw); 355 + int32_t e1000_mng_host_if_write(struct e1000_hw *hw, uint8_t *buffer, 356 + uint16_t length, uint16_t offset, uint8_t *sum); 357 + int32_t e1000_mng_write_cmd_header(struct e1000_hw* hw, 358 + struct e1000_host_mng_command_header* hdr); 359 + 360 + int32_t e1000_mng_write_commit(struct e1000_hw *hw); 361 + 362 int32_t e1000_read_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data); 363 int32_t e1000_validate_eeprom_checksum(struct e1000_hw *hw); 364 int32_t e1000_update_eeprom_checksum(struct e1000_hw *hw); 365 int32_t e1000_write_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data); 366 int32_t e1000_read_part_num(struct e1000_hw *hw, uint32_t * part_num); 367 int32_t e1000_read_mac_addr(struct e1000_hw * hw); 368 + int32_t e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask); 369 + void e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask); 370 371 /* Filters (multicast, vlan, receive) */ 372 void e1000_init_rx_addrs(struct e1000_hw *hw); ··· 313 /* Adaptive IFS Functions */ 314 315 /* Everything else */ 316 void e1000_clear_hw_cntrs(struct e1000_hw *hw); 317 void e1000_reset_adaptive(struct e1000_hw *hw); 318 void e1000_update_adaptive(struct e1000_hw *hw); ··· 330 void e1000_write_reg_io(struct e1000_hw *hw, uint32_t offset, uint32_t value); 331 int32_t e1000_config_dsp_after_link_change(struct e1000_hw *hw, boolean_t link_up); 332 int32_t e1000_set_d3_lplu_state(struct e1000_hw *hw, boolean_t active); 333 + int32_t e1000_set_d0_lplu_state(struct e1000_hw *hw, boolean_t active); 334 + void e1000_set_pci_express_master_disable(struct e1000_hw *hw); 335 + void e1000_enable_pciex_master(struct e1000_hw *hw); 336 + int32_t e1000_disable_pciex_master(struct e1000_hw *hw); 337 + int32_t e1000_get_auto_rd_done(struct e1000_hw *hw); 338 + int32_t e1000_get_phy_cfg_done(struct e1000_hw *hw); 339 + int32_t e1000_get_software_semaphore(struct e1000_hw *hw); 340 + void e1000_release_software_semaphore(struct e1000_hw *hw); 341 + int32_t e1000_check_phy_reset_block(struct e1000_hw *hw); 342 + int32_t e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw); 343 + void e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw); 344 + int32_t e1000_commit_shadow_ram(struct e1000_hw *hw); 345 + uint8_t e1000_arc_subsystem_valid(struct e1000_hw *hw); 346 347 #define E1000_READ_REG_IO(a, reg) \ 348 e1000_read_reg_io((a), E1000_##reg) ··· 369 #define E1000_DEV_ID_82546GB_SERDES 0x107B 370 #define E1000_DEV_ID_82546GB_PCIE 0x108A 371 #define E1000_DEV_ID_82547EI 0x1019 372 + #define E1000_DEV_ID_82573E 0x108B 373 + #define E1000_DEV_ID_82573E_IAMT 0x108C 374 + 375 + #define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099 376 377 #define NODE_ADDRESS_SIZE 6 378 #define ETH_LENGTH_OF_ADDRESS 6 ··· 381 #define E1000_REVISION_0 0 382 #define E1000_REVISION_1 1 383 #define E1000_REVISION_2 2 384 + #define E1000_REVISION_3 3 385 386 #define SPEED_10 10 387 #define SPEED_100 100 ··· 437 E1000_IMS_RXSEQ | \ 438 E1000_IMS_LSC) 439 440 + 441 /* Number of high/low register pairs in the RAR. The RAR (Receive Address 442 * Registers) holds the directed and multicast addresses that we monitor. We 443 * reserve one of these spots for our directed address, allowing us room for ··· 457 uint16_t special; 458 }; 459 460 + /* Receive Descriptor - Extended */ 461 + union e1000_rx_desc_extended { 462 + struct { 463 + uint64_t buffer_addr; 464 + uint64_t reserved; 465 + } read; 466 + struct { 467 + struct { 468 + uint32_t mrq; /* Multiple Rx Queues */ 469 + union { 470 + uint32_t rss; /* RSS Hash */ 471 + struct { 472 + uint16_t ip_id; /* IP id */ 473 + uint16_t csum; /* Packet Checksum */ 474 + } csum_ip; 475 + } hi_dword; 476 + } lower; 477 + struct { 478 + uint32_t status_error; /* ext status/error */ 479 + uint16_t length; 480 + uint16_t vlan; /* VLAN tag */ 481 + } upper; 482 + } wb; /* writeback */ 483 + }; 484 + 485 + #define MAX_PS_BUFFERS 4 486 + /* Receive Descriptor - Packet Split */ 487 + union e1000_rx_desc_packet_split { 488 + struct { 489 + /* one buffer for protocol header(s), three data buffers */ 490 + uint64_t buffer_addr[MAX_PS_BUFFERS]; 491 + } read; 492 + struct { 493 + struct { 494 + uint32_t mrq; /* Multiple Rx Queues */ 495 + union { 496 + uint32_t rss; /* RSS Hash */ 497 + struct { 498 + uint16_t ip_id; /* IP id */ 499 + uint16_t csum; /* Packet Checksum */ 500 + } csum_ip; 501 + } hi_dword; 502 + } lower; 503 + struct { 504 + uint32_t status_error; /* ext status/error */ 505 + uint16_t length0; /* length of buffer 0 */ 506 + uint16_t vlan; /* VLAN tag */ 507 + } middle; 508 + struct { 509 + uint16_t header_status; 510 + uint16_t length[3]; /* length of buffers 1-3 */ 511 + } upper; 512 + uint64_t reserved; 513 + } wb; /* writeback */ 514 + }; 515 + 516 /* Receive Decriptor bit definitions */ 517 #define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ 518 #define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ 519 #define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ 520 #define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ 521 + #define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum caculated */ 522 #define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ 523 #define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ 524 #define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */ 525 + #define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */ 526 + #define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ 527 + #define E1000_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ 528 #define E1000_RXD_ERR_CE 0x01 /* CRC Error */ 529 #define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ 530 #define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ ··· 474 #define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ 475 #define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ 476 #define E1000_RXD_SPC_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ 477 + #define E1000_RXD_SPC_PRI_SHIFT 13 478 #define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */ 479 + #define E1000_RXD_SPC_CFI_SHIFT 12 480 + 481 + #define E1000_RXDEXT_STATERR_CE 0x01000000 482 + #define E1000_RXDEXT_STATERR_SE 0x02000000 483 + #define E1000_RXDEXT_STATERR_SEQ 0x04000000 484 + #define E1000_RXDEXT_STATERR_CXE 0x10000000 485 + #define E1000_RXDEXT_STATERR_TCPE 0x20000000 486 + #define E1000_RXDEXT_STATERR_IPE 0x40000000 487 + #define E1000_RXDEXT_STATERR_RXE 0x80000000 488 + 489 + #define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 490 + #define E1000_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF 491 492 /* mask to determine if packets should be dropped due to frame errors */ 493 #define E1000_RXD_ERR_FRAME_ERR_MASK ( \ ··· 485 E1000_RXD_ERR_SEQ | \ 486 E1000_RXD_ERR_CXE | \ 487 E1000_RXD_ERR_RXE) 488 + 489 + 490 + /* Same mask, but for extended and packet split descriptors */ 491 + #define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ 492 + E1000_RXDEXT_STATERR_CE | \ 493 + E1000_RXDEXT_STATERR_SE | \ 494 + E1000_RXDEXT_STATERR_SEQ | \ 495 + E1000_RXDEXT_STATERR_CXE | \ 496 + E1000_RXDEXT_STATERR_RXE) 497 498 /* Transmit Descriptor */ 499 struct e1000_tx_desc { ··· 667 #define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ 668 #define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ 669 #define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ 670 + #define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ 671 #define E1000_RCTL 0x00100 /* RX Control - RW */ 672 #define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ 673 #define E1000_TXCW 0x00178 /* TX Configuration Word - RW */ ··· 676 #define E1000_TBT 0x00448 /* TX Burst Timer - RW */ 677 #define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ 678 #define E1000_LEDCTL 0x00E00 /* LED Control - RW */ 679 + #define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ 680 + #define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ 681 #define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ 682 + #define E1000_PBS 0x01008 /* Packet Buffer Size */ 683 + #define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ 684 + #define E1000_FLASH_UPDATES 1000 685 + #define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */ 686 + #define E1000_FLASHT 0x01028 /* FLASH Timer Register */ 687 + #define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ 688 + #define E1000_FLSWCTL 0x01030 /* FLASH control register */ 689 + #define E1000_FLSWDATA 0x01034 /* FLASH data register */ 690 + #define E1000_FLSWCNT 0x01038 /* FLASH Access Counter */ 691 + #define E1000_FLOP 0x0103C /* FLASH Opcode Register */ 692 + #define E1000_ERT 0x02008 /* Early Rx Threshold - RW */ 693 #define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ 694 #define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ 695 + #define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */ 696 #define E1000_RDBAL 0x02800 /* RX Descriptor Base Address Low - RW */ 697 #define E1000_RDBAH 0x02804 /* RX Descriptor Base Address High - RW */ 698 #define E1000_RDLEN 0x02808 /* RX Descriptor Length - RW */ ··· 688 #define E1000_RXDCTL 0x02828 /* RX Descriptor Control - RW */ 689 #define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */ 690 #define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */ 691 + #define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */ 692 #define E1000_TXDMAC 0x03000 /* TX DMA Control - RW */ 693 #define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ 694 #define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ ··· 703 #define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */ 704 #define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */ 705 #define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */ 706 + #define E1000_TARC0 0x03840 /* TX Arbitration Count (0) */ 707 + #define E1000_TDBAL1 0x03900 /* TX Desc Base Address Low (1) - RW */ 708 + #define E1000_TDBAH1 0x03904 /* TX Desc Base Address High (1) - RW */ 709 + #define E1000_TDLEN1 0x03908 /* TX Desc Length (1) - RW */ 710 + #define E1000_TDH1 0x03910 /* TX Desc Head (1) - RW */ 711 + #define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */ 712 + #define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */ 713 + #define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */ 714 #define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ 715 #define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ 716 #define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ ··· 761 #define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */ 762 #define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */ 763 #define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */ 764 + #define E1000_IAC 0x4100 /* Interrupt Assertion Count */ 765 + #define E1000_ICRXPTC 0x4104 /* Interrupt Cause Rx Packet Timer Expire Count */ 766 + #define E1000_ICRXATC 0x4108 /* Interrupt Cause Rx Absolute Timer Expire Count */ 767 + #define E1000_ICTXPTC 0x410C /* Interrupt Cause Tx Packet Timer Expire Count */ 768 + #define E1000_ICTXATC 0x4110 /* Interrupt Cause Tx Absolute Timer Expire Count */ 769 + #define E1000_ICTXQEC 0x4118 /* Interrupt Cause Tx Queue Empty Count */ 770 + #define E1000_ICTXQMTC 0x411C /* Interrupt Cause Tx Queue Minimum Threshold Count */ 771 + #define E1000_ICRXDMTC 0x4120 /* Interrupt Cause Rx Descriptor Minimum Threshold Count */ 772 + #define E1000_ICRXOC 0x4124 /* Interrupt Cause Receiver Overrun Count */ 773 #define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */ 774 + #define E1000_RFCTL 0x05008 /* Receive Filter Control*/ 775 #define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ 776 #define E1000_RA 0x05400 /* Receive Address - RW Array */ 777 #define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ ··· 779 #define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */ 780 #define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */ 781 782 + #define E1000_GCR 0x05B00 /* PCI-Ex Control */ 783 + #define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */ 784 + #define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */ 785 + #define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */ 786 + #define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */ 787 + #define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ 788 + #define E1000_SWSM 0x05B50 /* SW Semaphore */ 789 + #define E1000_FWSM 0x05B54 /* FW Semaphore */ 790 + #define E1000_FFLT_DBG 0x05F04 /* Debug Register */ 791 + #define E1000_HICR 0x08F00 /* Host Inteface Control */ 792 /* Register Set (82542) 793 * 794 * Some of the 82542 registers are located at different offsets than they are ··· 829 #define E1000_82542_VFTA 0x00600 830 #define E1000_82542_LEDCTL E1000_LEDCTL 831 #define E1000_82542_PBA E1000_PBA 832 + #define E1000_82542_PBS E1000_PBS 833 + #define E1000_82542_EEMNGCTL E1000_EEMNGCTL 834 + #define E1000_82542_EEARBC E1000_EEARBC 835 + #define E1000_82542_FLASHT E1000_FLASHT 836 + #define E1000_82542_EEWR E1000_EEWR 837 + #define E1000_82542_FLSWCTL E1000_FLSWCTL 838 + #define E1000_82542_FLSWDATA E1000_FLSWDATA 839 + #define E1000_82542_FLSWCNT E1000_FLSWCNT 840 + #define E1000_82542_FLOP E1000_FLOP 841 + #define E1000_82542_EXTCNF_CTRL E1000_EXTCNF_CTRL 842 + #define E1000_82542_EXTCNF_SIZE E1000_EXTCNF_SIZE 843 + #define E1000_82542_ERT E1000_ERT 844 #define E1000_82542_RXDCTL E1000_RXDCTL 845 #define E1000_82542_RADV E1000_RADV 846 #define E1000_82542_RSRPD E1000_RSRPD ··· 913 #define E1000_82542_FFMT E1000_FFMT 914 #define E1000_82542_FFVT E1000_FFVT 915 #define E1000_82542_HOST_IF E1000_HOST_IF 916 + #define E1000_82542_IAM E1000_IAM 917 + #define E1000_82542_EEMNGCTL E1000_EEMNGCTL 918 + #define E1000_82542_PSRCTL E1000_PSRCTL 919 + #define E1000_82542_RAID E1000_RAID 920 + #define E1000_82542_TARC0 E1000_TARC0 921 + #define E1000_82542_TDBAL1 E1000_TDBAL1 922 + #define E1000_82542_TDBAH1 E1000_TDBAH1 923 + #define E1000_82542_TDLEN1 E1000_TDLEN1 924 + #define E1000_82542_TDH1 E1000_TDH1 925 + #define E1000_82542_TDT1 E1000_TDT1 926 + #define E1000_82542_TXDCTL1 E1000_TXDCTL1 927 + #define E1000_82542_TARC1 E1000_TARC1 928 + #define E1000_82542_RFCTL E1000_RFCTL 929 + #define E1000_82542_GCR E1000_GCR 930 + #define E1000_82542_GSCL_1 E1000_GSCL_1 931 + #define E1000_82542_GSCL_2 E1000_GSCL_2 932 + #define E1000_82542_GSCL_3 E1000_GSCL_3 933 + #define E1000_82542_GSCL_4 E1000_GSCL_4 934 + #define E1000_82542_FACTPS E1000_FACTPS 935 + #define E1000_82542_SWSM E1000_SWSM 936 + #define E1000_82542_FWSM E1000_FWSM 937 + #define E1000_82542_FFLT_DBG E1000_FFLT_DBG 938 + #define E1000_82542_IAC E1000_IAC 939 + #define E1000_82542_ICRXPTC E1000_ICRXPTC 940 + #define E1000_82542_ICRXATC E1000_ICRXATC 941 + #define E1000_82542_ICTXPTC E1000_ICTXPTC 942 + #define E1000_82542_ICTXATC E1000_ICTXATC 943 + #define E1000_82542_ICTXQEC E1000_ICTXQEC 944 + #define E1000_82542_ICTXQMTC E1000_ICTXQMTC 945 + #define E1000_82542_ICRXDMTC E1000_ICRXDMTC 946 + #define E1000_82542_ICRXOC E1000_ICRXOC 947 + #define E1000_82542_HICR E1000_HICR 948 949 /* Statistics counters collected by the MAC */ 950 struct e1000_hw_stats { ··· 974 uint64_t bptc; 975 uint64_t tsctc; 976 uint64_t tsctfc; 977 + uint64_t iac; 978 + uint64_t icrxptc; 979 + uint64_t icrxatc; 980 + uint64_t ictxptc; 981 + uint64_t ictxatc; 982 + uint64_t ictxqec; 983 + uint64_t ictxqmtc; 984 + uint64_t icrxdmtc; 985 + uint64_t icrxoc; 986 }; 987 988 /* Structure containing variables used by the shared code (e1000_hw.c) */ 989 struct e1000_hw { 990 + uint8_t *hw_addr; 991 + uint8_t *flash_address; 992 e1000_mac_type mac_type; 993 e1000_phy_type phy_type; 994 uint32_t phy_init_script; ··· 993 e1000_ms_type original_master_slave; 994 e1000_ffe_config ffe_config_state; 995 uint32_t asf_firmware_present; 996 + uint32_t eeprom_semaphore_present; 997 unsigned long io_base; 998 uint32_t phy_id; 999 uint32_t phy_revision; ··· 1009 uint32_t ledctl_default; 1010 uint32_t ledctl_mode1; 1011 uint32_t ledctl_mode2; 1012 + boolean_t tx_pkt_filtering; 1013 + struct e1000_host_mng_dhcp_cookie mng_cookie; 1014 uint16_t phy_spd_default; 1015 uint16_t autoneg_advertised; 1016 uint16_t pci_cmd_word; ··· 1047 boolean_t adaptive_ifs; 1048 boolean_t ifs_params_forced; 1049 boolean_t in_ifs_mode; 1050 + boolean_t mng_reg_access_disabled; 1051 }; 1052 1053 1054 #define E1000_EEPROM_SWDPIN0 0x0001 /* SWDPIN 0 EEPROM Value */ 1055 #define E1000_EEPROM_LED_LOGIC 0x0020 /* Led Logic Word */ 1056 + #define E1000_EEPROM_RW_REG_DATA 16 /* Offset to data in EEPROM read/write registers */ 1057 + #define E1000_EEPROM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ 1058 + #define E1000_EEPROM_RW_REG_START 1 /* First bit for telling part to start operation */ 1059 + #define E1000_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ 1060 + #define E1000_EEPROM_POLL_WRITE 1 /* Flag for polling for write complete */ 1061 + #define E1000_EEPROM_POLL_READ 0 /* Flag for polling for read complete */ 1062 /* Register Bit Masks */ 1063 /* Device Control */ 1064 #define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ 1065 #define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */ 1066 #define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */ 1067 + #define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ 1068 #define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ 1069 #define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */ 1070 #define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */ ··· 1070 #define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */ 1071 #define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ 1072 #define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ 1073 + #define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */ 1074 #define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ 1075 #define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ 1076 #define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ ··· 1089 #define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ 1090 #define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ 1091 #define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ 1092 + #define E1000_STATUS_FUNC_SHIFT 2 1093 #define E1000_STATUS_FUNC_0 0x00000000 /* Function 0 */ 1094 #define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ 1095 #define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ ··· 1098 #define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ 1099 #define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ 1100 #define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */ 1101 + #define E1000_STATUS_DOCK_CI 0x00000800 /* Change in Dock/Undock state. Clear on write '0'. */ 1102 + #define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */ 1103 #define E1000_STATUS_MTXCKOK 0x00000400 /* MTX clock running OK */ 1104 #define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */ 1105 #define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */ ··· 1128 #ifndef E1000_EEPROM_GRANT_ATTEMPTS 1129 #define E1000_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ 1130 #endif 1131 + #define E1000_EECD_AUTO_RD 0x00000200 /* EEPROM Auto Read done */ 1132 + #define E1000_EECD_SIZE_EX_MASK 0x00007800 /* EEprom Size */ 1133 + #define E1000_EECD_SIZE_EX_SHIFT 11 1134 + #define E1000_EECD_NVADDS 0x00018000 /* NVM Address Size */ 1135 + #define E1000_EECD_SELSHAD 0x00020000 /* Select Shadow RAM */ 1136 + #define E1000_EECD_INITSRAM 0x00040000 /* Initialize Shadow RAM */ 1137 + #define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ 1138 + #define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */ 1139 + #define E1000_EECD_SHADV 0x00200000 /* Shadow RAM Data Valid */ 1140 + #define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ 1141 + #define E1000_STM_OPCODE 0xDB00 1142 + #define E1000_HICR_FW_RESET 0xC0 1143 1144 /* EEPROM Read */ 1145 #define E1000_EERD_START 0x00000001 /* Start Read */ ··· 1171 #define E1000_CTRL_EXT_WR_WMARK_320 0x01000000 1172 #define E1000_CTRL_EXT_WR_WMARK_384 0x02000000 1173 #define E1000_CTRL_EXT_WR_WMARK_448 0x03000000 1174 + #define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ 1175 + #define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ 1176 1177 /* MDI Control */ 1178 #define E1000_MDIC_DATA_MASK 0x0000FFFF ··· 1187 /* LED Control */ 1188 #define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F 1189 #define E1000_LEDCTL_LED0_MODE_SHIFT 0 1190 + #define E1000_LEDCTL_LED0_BLINK_RATE 0x0000020 1191 #define E1000_LEDCTL_LED0_IVRT 0x00000040 1192 #define E1000_LEDCTL_LED0_BLINK 0x00000080 1193 #define E1000_LEDCTL_LED1_MODE_MASK 0x00000F00 1194 #define E1000_LEDCTL_LED1_MODE_SHIFT 8 1195 + #define E1000_LEDCTL_LED1_BLINK_RATE 0x0002000 1196 #define E1000_LEDCTL_LED1_IVRT 0x00004000 1197 #define E1000_LEDCTL_LED1_BLINK 0x00008000 1198 #define E1000_LEDCTL_LED2_MODE_MASK 0x000F0000 1199 #define E1000_LEDCTL_LED2_MODE_SHIFT 16 1200 + #define E1000_LEDCTL_LED2_BLINK_RATE 0x00200000 1201 #define E1000_LEDCTL_LED2_IVRT 0x00400000 1202 #define E1000_LEDCTL_LED2_BLINK 0x00800000 1203 #define E1000_LEDCTL_LED3_MODE_MASK 0x0F000000 ··· 1238 #define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */ 1239 #define E1000_ICR_TXD_LOW 0x00008000 1240 #define E1000_ICR_SRPD 0x00010000 1241 + #define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */ 1242 + #define E1000_ICR_MNG 0x00040000 /* Manageability event */ 1243 + #define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */ 1244 + #define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ 1245 1246 /* Interrupt Cause Set */ 1247 #define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ ··· 1255 #define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ 1256 #define E1000_ICS_TXD_LOW E1000_ICR_TXD_LOW 1257 #define E1000_ICS_SRPD E1000_ICR_SRPD 1258 + #define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */ 1259 + #define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */ 1260 + #define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */ 1261 1262 /* Interrupt Mask Set */ 1263 #define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ ··· 1272 #define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ 1273 #define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW 1274 #define E1000_IMS_SRPD E1000_ICR_SRPD 1275 + #define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */ 1276 + #define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */ 1277 + #define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */ 1278 1279 /* Interrupt Mask Clear */ 1280 #define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */ ··· 1289 #define E1000_IMC_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ 1290 #define E1000_IMC_TXD_LOW E1000_ICR_TXD_LOW 1291 #define E1000_IMC_SRPD E1000_ICR_SRPD 1292 + #define E1000_IMC_ACK E1000_ICR_ACK /* Receive Ack frame */ 1293 + #define E1000_IMC_MNG E1000_ICR_MNG /* Manageability event */ 1294 + #define E1000_IMC_DOCK E1000_ICR_DOCK /* Dock/Undock */ 1295 1296 /* Receive Control */ 1297 #define E1000_RCTL_RST 0x00000001 /* Software reset */ ··· 1301 #define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ 1302 #define E1000_RCTL_LBM_SLP 0x00000080 /* serial link loopback mode */ 1303 #define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ 1304 + #define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */ 1305 + #define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ 1306 #define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ 1307 #define E1000_RCTL_RDMTS_QUAT 0x00000100 /* rx desc min threshold size */ 1308 #define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* rx desc min threshold size */ ··· 1327 #define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ 1328 #define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ 1329 #define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ 1330 + #define E1000_RCTL_FLXBUF_MASK 0x78000000 /* Flexible buffer size */ 1331 + #define E1000_RCTL_FLXBUF_SHIFT 27 /* Flexible buffer shift */ 1332 + 1333 + /* Use byte values for the following shift parameters 1334 + * Usage: 1335 + * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & 1336 + * E1000_PSRCTL_BSIZE0_MASK) | 1337 + * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & 1338 + * E1000_PSRCTL_BSIZE1_MASK) | 1339 + * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & 1340 + * E1000_PSRCTL_BSIZE2_MASK) | 1341 + * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; 1342 + * E1000_PSRCTL_BSIZE3_MASK)) 1343 + * where value0 = [128..16256], default=256 1344 + * value1 = [1024..64512], default=4096 1345 + * value2 = [0..64512], default=4096 1346 + * value3 = [0..64512], default=0 1347 + */ 1348 + 1349 + #define E1000_PSRCTL_BSIZE0_MASK 0x0000007F 1350 + #define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 1351 + #define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 1352 + #define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 1353 + 1354 + #define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ 1355 + #define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ 1356 + #define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ 1357 + #define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ 1358 1359 /* Receive Descriptor */ 1360 #define E1000_RDT_DELAY 0x0000ffff /* Delay timer (1=1024us) */ ··· 1341 #define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */ 1342 #define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ 1343 1344 + /* Header split receive */ 1345 + #define E1000_RFCTL_ISCSI_DIS 0x00000001 1346 + #define E1000_RFCTL_ISCSI_DWC_MASK 0x0000003E 1347 + #define E1000_RFCTL_ISCSI_DWC_SHIFT 1 1348 + #define E1000_RFCTL_NFSW_DIS 0x00000040 1349 + #define E1000_RFCTL_NFSR_DIS 0x00000080 1350 + #define E1000_RFCTL_NFS_VER_MASK 0x00000300 1351 + #define E1000_RFCTL_NFS_VER_SHIFT 8 1352 + #define E1000_RFCTL_IPV6_DIS 0x00000400 1353 + #define E1000_RFCTL_IPV6_XSUM_DIS 0x00000800 1354 + #define E1000_RFCTL_ACK_DIS 0x00001000 1355 + #define E1000_RFCTL_ACKD_DIS 0x00002000 1356 + #define E1000_RFCTL_IPFRSP_DIS 0x00004000 1357 + #define E1000_RFCTL_EXTEN 0x00008000 1358 + #define E1000_RFCTL_IPV6_EX_DIS 0x00010000 1359 + #define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 1360 + 1361 /* Receive Descriptor Control */ 1362 #define E1000_RXDCTL_PTHRESH 0x0000003F /* RXDCTL Prefetch Threshold */ 1363 #define E1000_RXDCTL_HTHRESH 0x00003F00 /* RXDCTL Host Threshold */ ··· 1354 #define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */ 1355 #define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */ 1356 #define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ 1357 + #define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc. 1358 + still to be processed. */ 1359 1360 /* Transmit Configuration Word */ 1361 #define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ ··· 1387 #define E1000_TCTL_PBE 0x00800000 /* Packet Burst Enable */ 1388 #define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ 1389 #define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */ 1390 + #define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ 1391 1392 /* Receive Checksum Control */ 1393 #define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */ 1394 #define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ 1395 #define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ 1396 #define E1000_RXCSUM_IPV6OFL 0x00000400 /* IPv6 checksum offload */ 1397 + #define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ 1398 + #define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ 1399 + 1400 1401 /* Definitions for power management and wakeup registers */ 1402 /* Wake Up Control */ ··· 1411 #define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ 1412 #define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ 1413 #define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */ 1414 + #define E1000_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */ 1415 #define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ 1416 #define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */ 1417 #define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */ ··· 1446 #define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ 1447 #define E1000_MANC_NEIGHBOR_EN 0x00004000 /* Enable Neighbor Discovery 1448 * Filtering */ 1449 + #define E1000_MANC_ARP_RES_EN 0x00008000 /* Enable ARP response Filtering */ 1450 #define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */ 1451 #define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ 1452 #define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */ 1453 + #define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ 1454 #define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address 1455 * filtering */ 1456 #define E1000_MANC_EN_MNG2HOST 0x00200000 /* Enable MNG packets to host 1457 * memory */ 1458 + #define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000 /* Enable IP address 1459 + * filtering */ 1460 + #define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Enable checksum filtering */ 1461 + #define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */ 1462 #define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */ 1463 #define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */ 1464 #define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */ ··· 1463 #define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */ 1464 #define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */ 1465 1466 + /* SW Semaphore Register */ 1467 + #define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ 1468 + #define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ 1469 + #define E1000_SWSM_WMNG 0x00000004 /* Wake MNG Clock */ 1470 + #define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ 1471 + 1472 + /* FW Semaphore Register */ 1473 + #define E1000_FWSM_MODE_MASK 0x0000000E /* FW mode */ 1474 + #define E1000_FWSM_MODE_SHIFT 1 1475 + #define E1000_FWSM_FW_VALID 0x00008000 /* FW established a valid mode */ 1476 + 1477 + /* FFLT Debug Register */ 1478 + #define E1000_FFLT_DBG_INVC 0x00100000 /* Invalid /C/ code handling */ 1479 + 1480 + typedef enum { 1481 + e1000_mng_mode_none = 0, 1482 + e1000_mng_mode_asf, 1483 + e1000_mng_mode_pt, 1484 + e1000_mng_mode_ipmi, 1485 + e1000_mng_mode_host_interface_only 1486 + } e1000_mng_mode; 1487 + 1488 + /* Host Inteface Control Register */ 1489 + #define E1000_HICR_EN 0x00000001 /* Enable Bit - RO */ 1490 + #define E1000_HICR_C 0x00000002 /* Driver sets this bit when done 1491 + * to put command in RAM */ 1492 + #define E1000_HICR_SV 0x00000004 /* Status Validity */ 1493 + #define E1000_HICR_FWR 0x00000080 /* FW reset. Set by the Host */ 1494 + 1495 + /* Host Interface Command Interface - Address range 0x8800-0x8EFF */ 1496 + #define E1000_HI_MAX_DATA_LENGTH 252 /* Host Interface data length */ 1497 + #define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Number of bytes in range */ 1498 + #define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Number of dwords in range */ 1499 + #define E1000_HI_COMMAND_TIMEOUT 500 /* Time in ms to process HI command */ 1500 + 1501 + struct e1000_host_command_header { 1502 + uint8_t command_id; 1503 + uint8_t command_length; 1504 + uint8_t command_options; /* I/F bits for command, status for return */ 1505 + uint8_t checksum; 1506 + }; 1507 + struct e1000_host_command_info { 1508 + struct e1000_host_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ 1509 + uint8_t command_data[E1000_HI_MAX_DATA_LENGTH]; /* Command data can length 0..252 */ 1510 + }; 1511 + 1512 + /* Host SMB register #0 */ 1513 + #define E1000_HSMC0R_CLKIN 0x00000001 /* SMB Clock in */ 1514 + #define E1000_HSMC0R_DATAIN 0x00000002 /* SMB Data in */ 1515 + #define E1000_HSMC0R_DATAOUT 0x00000004 /* SMB Data out */ 1516 + #define E1000_HSMC0R_CLKOUT 0x00000008 /* SMB Clock out */ 1517 + 1518 + /* Host SMB register #1 */ 1519 + #define E1000_HSMC1R_CLKIN E1000_HSMC0R_CLKIN 1520 + #define E1000_HSMC1R_DATAIN E1000_HSMC0R_DATAIN 1521 + #define E1000_HSMC1R_DATAOUT E1000_HSMC0R_DATAOUT 1522 + #define E1000_HSMC1R_CLKOUT E1000_HSMC0R_CLKOUT 1523 + 1524 + /* FW Status Register */ 1525 + #define E1000_FWSTS_FWS_MASK 0x000000FF /* FW Status */ 1526 + 1527 /* Wake Up Packet Length */ 1528 #define E1000_WUPL_LENGTH_MASK 0x0FFF /* Only the lower 12 bits are valid */ 1529 1530 #define E1000_MDALIGN 4096 1531 + 1532 + #define E1000_GCR_BEM32 0x00400000 1533 + /* Function Active and Power State to MNG */ 1534 + #define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003 1535 + #define E1000_FACTPS_LAN0_VALID 0x00000004 1536 + #define E1000_FACTPS_FUNC0_AUX_EN 0x00000008 1537 + #define E1000_FACTPS_FUNC1_POWER_STATE_MASK 0x000000C0 1538 + #define E1000_FACTPS_FUNC1_POWER_STATE_SHIFT 6 1539 + #define E1000_FACTPS_LAN1_VALID 0x00000100 1540 + #define E1000_FACTPS_FUNC1_AUX_EN 0x00000200 1541 + #define E1000_FACTPS_FUNC2_POWER_STATE_MASK 0x00003000 1542 + #define E1000_FACTPS_FUNC2_POWER_STATE_SHIFT 12 1543 + #define E1000_FACTPS_IDE_ENABLE 0x00004000 1544 + #define E1000_FACTPS_FUNC2_AUX_EN 0x00008000 1545 + #define E1000_FACTPS_FUNC3_POWER_STATE_MASK 0x000C0000 1546 + #define E1000_FACTPS_FUNC3_POWER_STATE_SHIFT 18 1547 + #define E1000_FACTPS_SP_ENABLE 0x00100000 1548 + #define E1000_FACTPS_FUNC3_AUX_EN 0x00200000 1549 + #define E1000_FACTPS_FUNC4_POWER_STATE_MASK 0x03000000 1550 + #define E1000_FACTPS_FUNC4_POWER_STATE_SHIFT 24 1551 + #define E1000_FACTPS_IPMI_ENABLE 0x04000000 1552 + #define E1000_FACTPS_FUNC4_AUX_EN 0x08000000 1553 + #define E1000_FACTPS_MNGCG 0x20000000 1554 + #define E1000_FACTPS_LAN_FUNC_SEL 0x40000000 1555 + #define E1000_FACTPS_PM_STATE_CHANGED 0x80000000 1556 1557 /* EEPROM Commands - Microwire */ 1558 #define EEPROM_READ_OPCODE_MICROWIRE 0x6 /* EEPROM read opcode */ ··· 1477 1478 /* EEPROM Commands - SPI */ 1479 #define EEPROM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ 1480 + #define EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ 1481 + #define EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ 1482 + #define EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ 1483 + #define EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Enable latch */ 1484 + #define EEPROM_WRDI_OPCODE_SPI 0x04 /* EEPROM reset Write Enable latch */ 1485 + #define EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status register */ 1486 + #define EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status register */ 1487 + #define EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */ 1488 + #define EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */ 1489 + #define EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */ 1490 1491 /* EEPROM Size definitions */ 1492 + #define EEPROM_WORD_SIZE_SHIFT 6 1493 + #define EEPROM_SIZE_SHIFT 10 1494 #define EEPROM_SIZE_MASK 0x1C00 1495 1496 /* EEPROM Word Offsets */ ··· 1606 #define IFS_MIN 40 1607 #define IFS_RATIO 4 1608 1609 + /* Extended Configuration Control and Size */ 1610 + #define E1000_EXTCNF_CTRL_PCIE_WRITE_ENABLE 0x00000001 1611 + #define E1000_EXTCNF_CTRL_PHY_WRITE_ENABLE 0x00000002 1612 + #define E1000_EXTCNF_CTRL_D_UD_ENABLE 0x00000004 1613 + #define E1000_EXTCNF_CTRL_D_UD_LATENCY 0x00000008 1614 + #define E1000_EXTCNF_CTRL_D_UD_OWNER 0x00000010 1615 + #define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 1616 + #define E1000_EXTCNF_CTRL_MDIO_HW_OWNERSHIP 0x00000040 1617 + #define E1000_EXTCNF_CTRL_EXT_CNF_POINTER 0x1FFF0000 1618 + 1619 + #define E1000_EXTCNF_SIZE_EXT_PHY_LENGTH 0x000000FF 1620 + #define E1000_EXTCNF_SIZE_EXT_DOCK_LENGTH 0x0000FF00 1621 + #define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH 0x00FF0000 1622 + 1623 /* PBA constants */ 1624 + #define E1000_PBA_12K 0x000C /* 12KB, default Rx allocation */ 1625 #define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */ 1626 #define E1000_PBA_22K 0x0016 1627 #define E1000_PBA_24K 0x0018 ··· 1662 1663 /* Number of milliseconds we wait for auto-negotiation to complete */ 1664 #define LINK_UP_TIMEOUT 500 1665 + 1666 + /* Number of 100 microseconds we wait for PCI Express master disable */ 1667 + #define MASTER_DISABLE_TIMEOUT 800 1668 + /* Number of milliseconds we wait for Eeprom auto read bit done after MAC reset */ 1669 + #define AUTO_READ_DONE_TIMEOUT 10 1670 + /* Number of milliseconds we wait for PHY configuration done after MAC reset */ 1671 + #define PHY_CFG_TIMEOUT 40 1672 1673 #define E1000_TX_BUFFER_SIZE ((uint32_t)1514) 1674 ··· 1763 #define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health Register */ 1764 #define IGP01E1000_GMII_FIFO 0x14 /* GMII FIFO Register */ 1765 #define IGP01E1000_PHY_CHANNEL_QUALITY 0x15 /* PHY Channel Quality Register */ 1766 + #define IGP02E1000_PHY_POWER_MGMT 0x19 1767 #define IGP01E1000_PHY_PAGE_SELECT 0x1F /* PHY Page Select Core Register */ 1768 1769 /* IGP01E1000 AGC Registers - stores the cable length values*/ ··· 1771 #define IGP01E1000_PHY_AGC_C 0x1472 1772 #define IGP01E1000_PHY_AGC_D 0x1872 1773 1774 + /* IGP02E1000 AGC Registers for cable length values */ 1775 + #define IGP02E1000_PHY_AGC_A 0x11B1 1776 + #define IGP02E1000_PHY_AGC_B 0x12B1 1777 + #define IGP02E1000_PHY_AGC_C 0x14B1 1778 + #define IGP02E1000_PHY_AGC_D 0x18B1 1779 + 1780 /* IGP01E1000 DSP Reset Register */ 1781 #define IGP01E1000_PHY_DSP_RESET 0x1F33 1782 #define IGP01E1000_PHY_DSP_SET 0x1F71 1783 #define IGP01E1000_PHY_DSP_FFE 0x1F35 1784 1785 #define IGP01E1000_PHY_CHANNEL_NUM 4 1786 + #define IGP02E1000_PHY_CHANNEL_NUM 4 1787 + 1788 #define IGP01E1000_PHY_AGC_PARAM_A 0x1171 1789 #define IGP01E1000_PHY_AGC_PARAM_B 0x1271 1790 #define IGP01E1000_PHY_AGC_PARAM_C 0x1471 ··· 2060 #define IGP01E1000_MSE_CHANNEL_B 0x0F00 2061 #define IGP01E1000_MSE_CHANNEL_A 0xF000 2062 2063 + #define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */ 2064 + #define IGP02E1000_PM_D3_LPLU 0x0004 /* Enable LPLU in non-D0a modes */ 2065 + #define IGP02E1000_PM_D0_LPLU 0x0002 /* Enable LPLU in D0a mode */ 2066 + 2067 /* IGP01E1000 DSP reset macros */ 2068 #define DSP_RESET_ENABLE 0x0 2069 #define DSP_RESET_DISABLE 0x2 2070 #define E1000_MAX_DSP_RESETS 10 2071 2072 + /* IGP01E1000 & IGP02E1000 AGC Registers */ 2073 2074 #define IGP01E1000_AGC_LENGTH_SHIFT 7 /* Coarse - 13:11, Fine - 10:7 */ 2075 + #define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Coarse - 15:13, Fine - 12:9 */ 2076 + 2077 + /* IGP02E1000 AGC Register Length 9-bit mask */ 2078 + #define IGP02E1000_AGC_LENGTH_MASK 0x7F 2079 2080 /* 7 bits (3 Coarse + 4 Fine) --> 128 optional values */ 2081 #define IGP01E1000_AGC_LENGTH_TABLE_SIZE 128 2082 + #define IGP02E1000_AGC_LENGTH_TABLE_SIZE 128 2083 2084 + /* The precision error of the cable length is +/- 10 meters */ 2085 #define IGP01E1000_AGC_RANGE 10 2086 + #define IGP02E1000_AGC_RANGE 10 2087 2088 /* IGP01E1000 PCS Initialization register */ 2089 /* bits 3:6 in the PCS registers stores the channels polarity */ ··· 2113 #define M88E1000_12_PHY_ID M88E1000_E_PHY_ID 2114 #define M88E1000_14_PHY_ID M88E1000_E_PHY_ID 2115 #define M88E1011_I_REV_4 0x04 2116 + #define M88E1111_I_PHY_ID 0x01410CC0 2117 + #define L1LXT971A_PHY_ID 0x001378E0 2118 2119 /* Miscellaneous PHY bit definitions. */ 2120 #define PHY_PREAMBLE 0xFFFFFFFF
+694 -116
drivers/net/e1000/e1000_main.c
··· 155 static int e1000_clean(struct net_device *netdev, int *budget); 156 static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter, 157 int *work_done, int work_to_do); 158 #else 159 static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter); 160 #endif 161 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter); 162 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); 163 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 164 int cmd); ··· 290 E1000_WRITE_FLUSH(&adapter->hw); 291 } 292 } 293 - 294 int 295 e1000_up(struct e1000_adapter *adapter) 296 { ··· 336 e1000_configure_tx(adapter); 337 e1000_setup_rctl(adapter); 338 e1000_configure_rx(adapter); 339 - e1000_alloc_rx_buffers(adapter); 340 341 #ifdef CONFIG_PCI_MSI 342 if(adapter->hw.mac_type > e1000_82547_rev_2) { ··· 392 e1000_clean_rx_ring(adapter); 393 394 /* If WoL is not enabled 395 * Power down the PHY so no link is implied when interface is down */ 396 - if(!adapter->wol && adapter->hw.media_type == e1000_media_type_copper) { 397 uint16_t mii_reg; 398 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); 399 mii_reg |= MII_CR_POWER_DOWN; ··· 409 void 410 e1000_reset(struct e1000_adapter *adapter) 411 { 412 - uint32_t pba; 413 414 /* Repartition Pba for greater than 9k mtu 415 * To take effect CTRL.RST is required. 416 */ 417 418 - if(adapter->hw.mac_type < e1000_82547) { 419 - if(adapter->rx_buffer_len > E1000_RXBUFFER_8192) 420 - pba = E1000_PBA_40K; 421 - else 422 - pba = E1000_PBA_48K; 423 - } else { 424 - if(adapter->rx_buffer_len > E1000_RXBUFFER_8192) 425 - pba = E1000_PBA_22K; 426 - else 427 - pba = E1000_PBA_30K; 428 adapter->tx_fifo_head = 0; 429 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; 430 adapter->tx_fifo_size = 431 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT; 432 atomic_set(&adapter->tx_fifo_stall, 0); 433 } 434 E1000_WRITE_REG(&adapter->hw, PBA, pba); 435 436 /* flow control settings */ ··· 448 adapter->hw.fc_send_xon = 1; 449 adapter->hw.fc = adapter->hw.original_fc; 450 451 e1000_reset_hw(&adapter->hw); 452 if(adapter->hw.mac_type >= e1000_82544) 453 E1000_WRITE_REG(&adapter->hw, WUC, 0); 454 if(e1000_init_hw(&adapter->hw)) 455 DPRINTK(PROBE, ERR, "Hardware Error\n"); 456 - 457 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ 458 E1000_WRITE_REG(&adapter->hw, VET, ETHERNET_IEEE_VLAN_TYPE); 459 460 e1000_reset_adaptive(&adapter->hw); 461 e1000_phy_get_info(&adapter->hw, &adapter->phy_info); 462 } 463 464 /** ··· 485 { 486 struct net_device *netdev; 487 struct e1000_adapter *adapter; 488 static int cards_found = 0; 489 - unsigned long mmio_start; 490 - int mmio_len; 491 - int pci_using_dac; 492 - int i; 493 - int err; 494 uint16_t eeprom_data; 495 uint16_t eeprom_apme_mask = E1000_EEPROM_APME; 496 - 497 if((err = pci_enable_device(pdev))) 498 return err; 499 ··· 578 if((err = e1000_sw_init(adapter))) 579 goto err_sw_init; 580 581 if(adapter->hw.mac_type >= e1000_82543) { 582 netdev->features = NETIF_F_SG | 583 NETIF_F_HW_CSUM | ··· 593 if((adapter->hw.mac_type >= e1000_82544) && 594 (adapter->hw.mac_type != e1000_82547)) 595 netdev->features |= NETIF_F_TSO; 596 #endif 597 if(pci_using_dac) 598 netdev->features |= NETIF_F_HIGHDMA; ··· 605 /* hard_start_xmit is safe against parallel locking */ 606 netdev->features |= NETIF_F_LLTX; 607 608 /* before reading the EEPROM, reset the controller to 609 * put the device in a known good starting state */ 610 ··· 696 /* reset the hardware with the new settings */ 697 e1000_reset(adapter); 698 699 strcpy(netdev->name, "eth%d"); 700 if((err = register_netdev(netdev))) 701 goto err_register; ··· 742 { 743 struct net_device *netdev = pci_get_drvdata(pdev); 744 struct e1000_adapter *adapter = netdev->priv; 745 - uint32_t manc; 746 747 flush_scheduled_work(); 748 ··· 755 } 756 } 757 758 unregister_netdev(netdev); 759 760 - e1000_phy_hw_reset(&adapter->hw); 761 762 iounmap(adapter->hw.hw_addr); 763 pci_release_regions(pdev); ··· 807 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); 808 809 adapter->rx_buffer_len = E1000_RXBUFFER_2048; 810 hw->max_frame_size = netdev->mtu + 811 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 812 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; ··· 821 822 /* initialize eeprom parameters */ 823 824 - e1000_init_eeprom_params(hw); 825 826 switch(hw->mac_type) { 827 default: ··· 889 890 if((err = e1000_up(adapter))) 891 goto err_up; 892 893 return E1000_SUCCESS; 894 ··· 929 e1000_free_tx_resources(adapter); 930 e1000_free_rx_resources(adapter); 931 932 return 0; 933 } 934 935 /** 936 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary 937 * @adapter: address of board private structure 938 - * @begin: address of beginning of memory 939 - * @end: address of end of memory 940 **/ 941 static inline boolean_t 942 e1000_check_64k_bound(struct e1000_adapter *adapter, ··· 1125 { 1126 struct e1000_desc_ring *rxdr = &adapter->rx_ring; 1127 struct pci_dev *pdev = adapter->pdev; 1128 - int size; 1129 1130 size = sizeof(struct e1000_buffer) * rxdr->count; 1131 rxdr->buffer_info = vmalloc(size); ··· 1136 } 1137 memset(rxdr->buffer_info, 0, size); 1138 1139 /* Round up to nearest 4K */ 1140 1141 - rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); 1142 E1000_ROUNDUP(rxdr->size, 4096); 1143 1144 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); ··· 1174 DPRINTK(PROBE, ERR, 1175 "Unble to Allocate Memory for the Recieve descriptor ring\n"); 1176 vfree(rxdr->buffer_info); 1177 return -ENOMEM; 1178 } 1179 ··· 1203 "Unable to Allocate aligned Memory for the" 1204 " Receive descriptor ring\n"); 1205 vfree(rxdr->buffer_info); 1206 return -ENOMEM; 1207 } else { 1208 /* free old, move on with the new one since its okay */ ··· 1227 static void 1228 e1000_setup_rctl(struct e1000_adapter *adapter) 1229 { 1230 - uint32_t rctl; 1231 1232 rctl = E1000_READ_REG(&adapter->hw, RCTL); 1233 ··· 1243 else 1244 rctl &= ~E1000_RCTL_SBP; 1245 1246 /* Setup buffer sizes */ 1247 - rctl &= ~(E1000_RCTL_SZ_4096); 1248 - rctl |= (E1000_RCTL_BSEX | E1000_RCTL_LPE); 1249 - switch (adapter->rx_buffer_len) { 1250 - case E1000_RXBUFFER_2048: 1251 - default: 1252 - rctl |= E1000_RCTL_SZ_2048; 1253 - rctl &= ~(E1000_RCTL_BSEX | E1000_RCTL_LPE); 1254 - break; 1255 - case E1000_RXBUFFER_4096: 1256 - rctl |= E1000_RCTL_SZ_4096; 1257 - break; 1258 - case E1000_RXBUFFER_8192: 1259 - rctl |= E1000_RCTL_SZ_8192; 1260 - break; 1261 - case E1000_RXBUFFER_16384: 1262 - rctl |= E1000_RCTL_SZ_16384; 1263 - break; 1264 } 1265 1266 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); ··· 1322 e1000_configure_rx(struct e1000_adapter *adapter) 1323 { 1324 uint64_t rdba = adapter->rx_ring.dma; 1325 - uint32_t rdlen = adapter->rx_ring.count * sizeof(struct e1000_rx_desc); 1326 - uint32_t rctl; 1327 - uint32_t rxcsum; 1328 1329 /* disable receives while setting up the descriptors */ 1330 rctl = E1000_READ_REG(&adapter->hw, RCTL); ··· 1360 E1000_WRITE_REG(&adapter->hw, RDT, 0); 1361 1362 /* Enable 82543 Receive Checksum Offload for TCP and UDP */ 1363 - if((adapter->hw.mac_type >= e1000_82543) && 1364 - (adapter->rx_csum == TRUE)) { 1365 rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM); 1366 - rxcsum |= E1000_RXCSUM_TUOFL; 1367 E1000_WRITE_REG(&adapter->hw, RXCSUM, rxcsum); 1368 } 1369 1370 /* Enable Receives */ 1371 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); ··· 1483 1484 vfree(rx_ring->buffer_info); 1485 rx_ring->buffer_info = NULL; 1486 1487 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); 1488 ··· 1503 { 1504 struct e1000_desc_ring *rx_ring = &adapter->rx_ring; 1505 struct e1000_buffer *buffer_info; 1506 struct pci_dev *pdev = adapter->pdev; 1507 unsigned long size; 1508 - unsigned int i; 1509 1510 /* Free all the Rx ring sk_buffs */ 1511 1512 for(i = 0; i < rx_ring->count; i++) { 1513 buffer_info = &rx_ring->buffer_info[i]; 1514 if(buffer_info->skb) { 1515 - 1516 pci_unmap_single(pdev, 1517 buffer_info->dma, 1518 buffer_info->length, ··· 1523 1524 dev_kfree_skb(buffer_info->skb); 1525 buffer_info->skb = NULL; 1526 } 1527 } 1528 1529 size = sizeof(struct e1000_buffer) * rx_ring->count; 1530 memset(rx_ring->buffer_info, 0, size); 1531 1532 /* Zero out the descriptor ring */ 1533 ··· 1779 uint32_t link; 1780 1781 e1000_check_for_link(&adapter->hw); 1782 1783 if((adapter->hw.media_type == e1000_media_type_internal_serdes) && 1784 !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE)) ··· 1870 #define E1000_TX_FLAGS_CSUM 0x00000001 1871 #define E1000_TX_FLAGS_VLAN 0x00000002 1872 #define E1000_TX_FLAGS_TSO 0x00000004 1873 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 1874 #define E1000_TX_FLAGS_VLAN_SHIFT 16 1875 ··· 1881 struct e1000_context_desc *context_desc; 1882 unsigned int i; 1883 uint32_t cmd_length = 0; 1884 - uint16_t ipcse, tucse, mss; 1885 uint8_t ipcss, ipcso, tucss, tucso, hdr_len; 1886 int err; 1887 ··· 1894 1895 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); 1896 mss = skb_shinfo(skb)->tso_size; 1897 - skb->nh.iph->tot_len = 0; 1898 - skb->nh.iph->check = 0; 1899 - skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr, 1900 - skb->nh.iph->daddr, 1901 - 0, 1902 - IPPROTO_TCP, 1903 - 0); 1904 ipcss = skb->nh.raw - skb->data; 1905 ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data; 1906 - ipcse = skb->h.raw - skb->data - 1; 1907 tucss = skb->h.raw - skb->data; 1908 tucso = (void *)&(skb->h.th->check) - (void *)skb->data; 1909 tucse = 0; 1910 1911 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | 1912 - E1000_TXD_CMD_IP | E1000_TXD_CMD_TCP | 1913 - (skb->len - (hdr_len))); 1914 1915 i = adapter->tx_ring.next_to_use; 1916 context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i); ··· 2092 if(likely(tx_flags & E1000_TX_FLAGS_TSO)) { 2093 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | 2094 E1000_TXD_CMD_TSE; 2095 - txd_upper |= (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8; 2096 } 2097 2098 if(likely(tx_flags & E1000_TX_FLAGS_CSUM)) { ··· 2170 return 0; 2171 } 2172 2173 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) 2174 static int 2175 e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) ··· 2284 local_irq_restore(flags); 2285 return NETDEV_TX_LOCKED; 2286 } 2287 2288 /* need: count + 2 desc gap to keep tail from touching 2289 * head, otherwise try next time */ ··· 2322 tx_flags |= E1000_TX_FLAGS_TSO; 2323 else if(likely(e1000_tx_csum(adapter, skb))) 2324 tx_flags |= E1000_TX_FLAGS_CSUM; 2325 2326 e1000_tx_queue(adapter, 2327 e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss), ··· 2395 e1000_change_mtu(struct net_device *netdev, int new_mtu) 2396 { 2397 struct e1000_adapter *adapter = netdev->priv; 2398 - int old_mtu = adapter->rx_buffer_len; 2399 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 2400 2401 if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || ··· 2403 return -EINVAL; 2404 } 2405 2406 - if(max_frame <= MAXIMUM_ETHERNET_FRAME_SIZE) { 2407 - adapter->rx_buffer_len = E1000_RXBUFFER_2048; 2408 - 2409 - } else if(adapter->hw.mac_type < e1000_82543) { 2410 - DPRINTK(PROBE, ERR, "Jumbo Frames not supported on 82542\n"); 2411 return -EINVAL; 2412 - 2413 - } else if(max_frame <= E1000_RXBUFFER_4096) { 2414 - adapter->rx_buffer_len = E1000_RXBUFFER_4096; 2415 - 2416 - } else if(max_frame <= E1000_RXBUFFER_8192) { 2417 - adapter->rx_buffer_len = E1000_RXBUFFER_8192; 2418 - 2419 - } else { 2420 - adapter->rx_buffer_len = E1000_RXBUFFER_16384; 2421 } 2422 2423 - if(old_mtu != adapter->rx_buffer_len && netif_running(netdev)) { 2424 e1000_down(adapter); 2425 e1000_up(adapter); 2426 } 2427 2428 - netdev->mtu = new_mtu; 2429 adapter->hw.max_frame_size = max_frame; 2430 2431 return 0; ··· 2531 adapter->stats.cexterr += E1000_READ_REG(hw, CEXTERR); 2532 adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC); 2533 adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC); 2534 } 2535 2536 /* Fill out the OS statistics structure */ ··· 2648 } 2649 2650 for(i = 0; i < E1000_MAX_INTR; i++) 2651 - if(unlikely(!e1000_clean_rx_irq(adapter) & 2652 !e1000_clean_tx_irq(adapter))) 2653 break; 2654 ··· 2674 int work_done = 0; 2675 2676 tx_cleaned = e1000_clean_tx_irq(adapter); 2677 - e1000_clean_rx_irq(adapter, &work_done, work_to_do); 2678 2679 *budget -= work_done; 2680 netdev->quota -= work_done; ··· 2812 2813 /** 2814 * e1000_rx_checksum - Receive Checksum Offload for 82543 2815 - * @adapter: board private structure 2816 - * @rx_desc: receive descriptor 2817 - * @sk_buff: socket buffer with received data 2818 **/ 2819 2820 static inline void 2821 e1000_rx_checksum(struct e1000_adapter *adapter, 2822 - struct e1000_rx_desc *rx_desc, 2823 - struct sk_buff *skb) 2824 { 2825 /* 82543 or newer only */ 2826 - if(unlikely((adapter->hw.mac_type < e1000_82543) || 2827 /* Ignore Checksum bit is set */ 2828 - (rx_desc->status & E1000_RXD_STAT_IXSM) || 2829 - /* TCP Checksum has not been calculated */ 2830 - (!(rx_desc->status & E1000_RXD_STAT_TCPCS)))) { 2831 - skb->ip_summed = CHECKSUM_NONE; 2832 return; 2833 } 2834 - 2835 - /* At this point we know the hardware did the TCP checksum */ 2836 - /* now look at the TCP checksum error bit */ 2837 - if(rx_desc->errors & E1000_RXD_ERR_TCPE) { 2838 - /* let the stack verify checksum errors */ 2839 - skb->ip_summed = CHECKSUM_NONE; 2840 - adapter->hw_csum_err++; 2841 } else { 2842 /* TCP checksum is good */ 2843 skb->ip_summed = CHECKSUM_UNNECESSARY; 2844 - adapter->hw_csum_good++; 2845 } 2846 } 2847 2848 /** 2849 - * e1000_clean_rx_irq - Send received data up the network stack 2850 * @adapter: board private structure 2851 **/ 2852 ··· 2935 skb_put(skb, length - ETHERNET_FCS_SIZE); 2936 2937 /* Receive Checksum Offload */ 2938 - e1000_rx_checksum(adapter, rx_desc, skb); 2939 - 2940 skb->protocol = eth_type_trans(skb, netdev); 2941 #ifdef CONFIG_E1000_NAPI 2942 if(unlikely(adapter->vlgrp && 2943 (rx_desc->status & E1000_RXD_STAT_VP))) { 2944 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 2945 - le16_to_cpu(rx_desc->special) & 2946 - E1000_RXD_SPC_VLAN_MASK); 2947 } else { 2948 netif_receive_skb(skb); 2949 } ··· 2968 2969 rx_desc = E1000_RX_DESC(*rx_ring, i); 2970 } 2971 - 2972 rx_ring->next_to_clean = i; 2973 - 2974 - e1000_alloc_rx_buffers(adapter); 2975 2976 return cleaned; 2977 } 2978 2979 /** 2980 - * e1000_alloc_rx_buffers - Replace used receive buffers 2981 * @adapter: address of board private structure 2982 **/ 2983 ··· 3204 buffer_info = &rx_ring->buffer_info[i]; 3205 } 3206 3207 rx_ring->next_to_use = i; 3208 } 3209 ··· 3530 rctl |= E1000_RCTL_VFE; 3531 rctl &= ~E1000_RCTL_CFIEN; 3532 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 3533 } else { 3534 /* disable VLAN tag insert/strip */ 3535 ctrl = E1000_READ_REG(&adapter->hw, CTRL); ··· 3541 rctl = E1000_READ_REG(&adapter->hw, RCTL); 3542 rctl &= ~E1000_RCTL_VFE; 3543 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 3544 } 3545 3546 e1000_irq_enable(adapter); ··· 3555 { 3556 struct e1000_adapter *adapter = netdev->priv; 3557 uint32_t vfta, index; 3558 - 3559 /* add VID to filter table */ 3560 index = (vid >> 5) & 0x7F; 3561 vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index); ··· 3579 3580 e1000_irq_enable(adapter); 3581 3582 /* remove VID from filter table */ 3583 index = (vid >> 5) & 0x7F; 3584 vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index); ··· 3658 { 3659 struct net_device *netdev = pci_get_drvdata(pdev); 3660 struct e1000_adapter *adapter = netdev->priv; 3661 - uint32_t ctrl, ctrl_ext, rctl, manc, status; 3662 uint32_t wufc = adapter->wol; 3663 3664 netif_device_detach(netdev); ··· 3700 E1000_WRITE_REG(&adapter->hw, CTRL_EXT, ctrl_ext); 3701 } 3702 3703 E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN); 3704 E1000_WRITE_REG(&adapter->hw, WUFC, wufc); 3705 pci_enable_wake(pdev, 3, 1); ··· 3727 } 3728 } 3729 3730 pci_disable_device(pdev); 3731 3732 state = (state > 0) ? 3 : 0; ··· 3751 { 3752 struct net_device *netdev = pci_get_drvdata(pdev); 3753 struct e1000_adapter *adapter = netdev->priv; 3754 - uint32_t manc, ret; 3755 3756 pci_set_power_state(pdev, 0); 3757 pci_restore_state(pdev); ··· 3776 E1000_WRITE_REG(&adapter->hw, MANC, manc); 3777 } 3778 3779 return 0; 3780 } 3781 #endif 3782 - 3783 #ifdef CONFIG_NET_POLL_CONTROLLER 3784 /* 3785 * Polling 'interrupt' - used by things like netconsole to send skbs
··· 155 static int e1000_clean(struct net_device *netdev, int *budget); 156 static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter, 157 int *work_done, int work_to_do); 158 + static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, 159 + int *work_done, int work_to_do); 160 #else 161 static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter); 162 + static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter); 163 #endif 164 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter); 165 + static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter); 166 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); 167 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 168 int cmd); ··· 286 E1000_WRITE_FLUSH(&adapter->hw); 287 } 288 } 289 + void 290 + e1000_update_mng_vlan(struct e1000_adapter *adapter) 291 + { 292 + struct net_device *netdev = adapter->netdev; 293 + uint16_t vid = adapter->hw.mng_cookie.vlan_id; 294 + uint16_t old_vid = adapter->mng_vlan_id; 295 + if(adapter->vlgrp) { 296 + if(!adapter->vlgrp->vlan_devices[vid]) { 297 + if(adapter->hw.mng_cookie.status & 298 + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { 299 + e1000_vlan_rx_add_vid(netdev, vid); 300 + adapter->mng_vlan_id = vid; 301 + } else 302 + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 303 + 304 + if((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) && 305 + (vid != old_vid) && 306 + !adapter->vlgrp->vlan_devices[old_vid]) 307 + e1000_vlan_rx_kill_vid(netdev, old_vid); 308 + } 309 + } 310 + } 311 + 312 int 313 e1000_up(struct e1000_adapter *adapter) 314 { ··· 310 e1000_configure_tx(adapter); 311 e1000_setup_rctl(adapter); 312 e1000_configure_rx(adapter); 313 + adapter->alloc_rx_buf(adapter); 314 315 #ifdef CONFIG_PCI_MSI 316 if(adapter->hw.mac_type > e1000_82547_rev_2) { ··· 366 e1000_clean_rx_ring(adapter); 367 368 /* If WoL is not enabled 369 + * and management mode is not IAMT 370 * Power down the PHY so no link is implied when interface is down */ 371 + if(!adapter->wol && adapter->hw.mac_type >= e1000_82540 && 372 + adapter->hw.media_type == e1000_media_type_copper && 373 + !e1000_check_mng_mode(&adapter->hw) && 374 + !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN)) { 375 uint16_t mii_reg; 376 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); 377 mii_reg |= MII_CR_POWER_DOWN; ··· 379 void 380 e1000_reset(struct e1000_adapter *adapter) 381 { 382 + uint32_t pba, manc; 383 384 /* Repartition Pba for greater than 9k mtu 385 * To take effect CTRL.RST is required. 386 */ 387 388 + switch (adapter->hw.mac_type) { 389 + case e1000_82547: 390 + pba = E1000_PBA_30K; 391 + break; 392 + case e1000_82573: 393 + pba = E1000_PBA_12K; 394 + break; 395 + default: 396 + pba = E1000_PBA_48K; 397 + break; 398 + } 399 + 400 + 401 + 402 + if(adapter->hw.mac_type == e1000_82547) { 403 adapter->tx_fifo_head = 0; 404 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; 405 adapter->tx_fifo_size = 406 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT; 407 atomic_set(&adapter->tx_fifo_stall, 0); 408 } 409 + 410 E1000_WRITE_REG(&adapter->hw, PBA, pba); 411 412 /* flow control settings */ ··· 412 adapter->hw.fc_send_xon = 1; 413 adapter->hw.fc = adapter->hw.original_fc; 414 415 + /* Allow time for pending master requests to run */ 416 e1000_reset_hw(&adapter->hw); 417 if(adapter->hw.mac_type >= e1000_82544) 418 E1000_WRITE_REG(&adapter->hw, WUC, 0); 419 if(e1000_init_hw(&adapter->hw)) 420 DPRINTK(PROBE, ERR, "Hardware Error\n"); 421 + e1000_update_mng_vlan(adapter); 422 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ 423 E1000_WRITE_REG(&adapter->hw, VET, ETHERNET_IEEE_VLAN_TYPE); 424 425 e1000_reset_adaptive(&adapter->hw); 426 e1000_phy_get_info(&adapter->hw, &adapter->phy_info); 427 + if (adapter->en_mng_pt) { 428 + manc = E1000_READ_REG(&adapter->hw, MANC); 429 + manc |= (E1000_MANC_ARP_EN | E1000_MANC_EN_MNG2HOST); 430 + E1000_WRITE_REG(&adapter->hw, MANC, manc); 431 + } 432 } 433 434 /** ··· 443 { 444 struct net_device *netdev; 445 struct e1000_adapter *adapter; 446 + unsigned long mmio_start, mmio_len; 447 + uint32_t swsm; 448 + 449 static int cards_found = 0; 450 + int i, err, pci_using_dac; 451 uint16_t eeprom_data; 452 uint16_t eeprom_apme_mask = E1000_EEPROM_APME; 453 if((err = pci_enable_device(pdev))) 454 return err; 455 ··· 538 if((err = e1000_sw_init(adapter))) 539 goto err_sw_init; 540 541 + if((err = e1000_check_phy_reset_block(&adapter->hw))) 542 + DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n"); 543 + 544 if(adapter->hw.mac_type >= e1000_82543) { 545 netdev->features = NETIF_F_SG | 546 NETIF_F_HW_CSUM | ··· 550 if((adapter->hw.mac_type >= e1000_82544) && 551 (adapter->hw.mac_type != e1000_82547)) 552 netdev->features |= NETIF_F_TSO; 553 + 554 + #ifdef NETIF_F_TSO_IPV6 555 + if(adapter->hw.mac_type > e1000_82547_rev_2) 556 + netdev->features |= NETIF_F_TSO_IPV6; 557 + #endif 558 #endif 559 if(pci_using_dac) 560 netdev->features |= NETIF_F_HIGHDMA; ··· 557 /* hard_start_xmit is safe against parallel locking */ 558 netdev->features |= NETIF_F_LLTX; 559 560 + adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw); 561 + 562 /* before reading the EEPROM, reset the controller to 563 * put the device in a known good starting state */ 564 ··· 646 /* reset the hardware with the new settings */ 647 e1000_reset(adapter); 648 649 + /* Let firmware know the driver has taken over */ 650 + switch(adapter->hw.mac_type) { 651 + case e1000_82573: 652 + swsm = E1000_READ_REG(&adapter->hw, SWSM); 653 + E1000_WRITE_REG(&adapter->hw, SWSM, 654 + swsm | E1000_SWSM_DRV_LOAD); 655 + break; 656 + default: 657 + break; 658 + } 659 + 660 strcpy(netdev->name, "eth%d"); 661 if((err = register_netdev(netdev))) 662 goto err_register; ··· 681 { 682 struct net_device *netdev = pci_get_drvdata(pdev); 683 struct e1000_adapter *adapter = netdev->priv; 684 + uint32_t manc, swsm; 685 686 flush_scheduled_work(); 687 ··· 694 } 695 } 696 697 + switch(adapter->hw.mac_type) { 698 + case e1000_82573: 699 + swsm = E1000_READ_REG(&adapter->hw, SWSM); 700 + E1000_WRITE_REG(&adapter->hw, SWSM, 701 + swsm & ~E1000_SWSM_DRV_LOAD); 702 + break; 703 + 704 + default: 705 + break; 706 + } 707 + 708 unregister_netdev(netdev); 709 710 + if(!e1000_check_phy_reset_block(&adapter->hw)) 711 + e1000_phy_hw_reset(&adapter->hw); 712 713 iounmap(adapter->hw.hw_addr); 714 pci_release_regions(pdev); ··· 734 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); 735 736 adapter->rx_buffer_len = E1000_RXBUFFER_2048; 737 + adapter->rx_ps_bsize0 = E1000_RXBUFFER_256; 738 hw->max_frame_size = netdev->mtu + 739 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 740 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; ··· 747 748 /* initialize eeprom parameters */ 749 750 + if(e1000_init_eeprom_params(hw)) { 751 + E1000_ERR("EEPROM initialization failed\n"); 752 + return -EIO; 753 + } 754 755 switch(hw->mac_type) { 756 default: ··· 812 813 if((err = e1000_up(adapter))) 814 goto err_up; 815 + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 816 + if((adapter->hw.mng_cookie.status & 817 + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { 818 + e1000_update_mng_vlan(adapter); 819 + } 820 821 return E1000_SUCCESS; 822 ··· 847 e1000_free_tx_resources(adapter); 848 e1000_free_rx_resources(adapter); 849 850 + if((adapter->hw.mng_cookie.status & 851 + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { 852 + e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 853 + } 854 return 0; 855 } 856 857 /** 858 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary 859 * @adapter: address of board private structure 860 + * @start: address of beginning of memory 861 + * @len: length of memory 862 **/ 863 static inline boolean_t 864 e1000_check_64k_bound(struct e1000_adapter *adapter, ··· 1039 { 1040 struct e1000_desc_ring *rxdr = &adapter->rx_ring; 1041 struct pci_dev *pdev = adapter->pdev; 1042 + int size, desc_len; 1043 1044 size = sizeof(struct e1000_buffer) * rxdr->count; 1045 rxdr->buffer_info = vmalloc(size); ··· 1050 } 1051 memset(rxdr->buffer_info, 0, size); 1052 1053 + size = sizeof(struct e1000_ps_page) * rxdr->count; 1054 + rxdr->ps_page = kmalloc(size, GFP_KERNEL); 1055 + if(!rxdr->ps_page) { 1056 + vfree(rxdr->buffer_info); 1057 + DPRINTK(PROBE, ERR, 1058 + "Unable to allocate memory for the receive descriptor ring\n"); 1059 + return -ENOMEM; 1060 + } 1061 + memset(rxdr->ps_page, 0, size); 1062 + 1063 + size = sizeof(struct e1000_ps_page_dma) * rxdr->count; 1064 + rxdr->ps_page_dma = kmalloc(size, GFP_KERNEL); 1065 + if(!rxdr->ps_page_dma) { 1066 + vfree(rxdr->buffer_info); 1067 + kfree(rxdr->ps_page); 1068 + DPRINTK(PROBE, ERR, 1069 + "Unable to allocate memory for the receive descriptor ring\n"); 1070 + return -ENOMEM; 1071 + } 1072 + memset(rxdr->ps_page_dma, 0, size); 1073 + 1074 + if(adapter->hw.mac_type <= e1000_82547_rev_2) 1075 + desc_len = sizeof(struct e1000_rx_desc); 1076 + else 1077 + desc_len = sizeof(union e1000_rx_desc_packet_split); 1078 + 1079 /* Round up to nearest 4K */ 1080 1081 + rxdr->size = rxdr->count * desc_len; 1082 E1000_ROUNDUP(rxdr->size, 4096); 1083 1084 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); ··· 1062 DPRINTK(PROBE, ERR, 1063 "Unble to Allocate Memory for the Recieve descriptor ring\n"); 1064 vfree(rxdr->buffer_info); 1065 + kfree(rxdr->ps_page); 1066 + kfree(rxdr->ps_page_dma); 1067 return -ENOMEM; 1068 } 1069 ··· 1089 "Unable to Allocate aligned Memory for the" 1090 " Receive descriptor ring\n"); 1091 vfree(rxdr->buffer_info); 1092 + kfree(rxdr->ps_page); 1093 + kfree(rxdr->ps_page_dma); 1094 return -ENOMEM; 1095 } else { 1096 /* free old, move on with the new one since its okay */ ··· 1111 static void 1112 e1000_setup_rctl(struct e1000_adapter *adapter) 1113 { 1114 + uint32_t rctl, rfctl; 1115 + uint32_t psrctl = 0; 1116 1117 rctl = E1000_READ_REG(&adapter->hw, RCTL); 1118 ··· 1126 else 1127 rctl &= ~E1000_RCTL_SBP; 1128 1129 + if (adapter->netdev->mtu <= ETH_DATA_LEN) 1130 + rctl &= ~E1000_RCTL_LPE; 1131 + else 1132 + rctl |= E1000_RCTL_LPE; 1133 + 1134 /* Setup buffer sizes */ 1135 + if(adapter->hw.mac_type == e1000_82573) { 1136 + /* We can now specify buffers in 1K increments. 1137 + * BSIZE and BSEX are ignored in this case. */ 1138 + rctl |= adapter->rx_buffer_len << 0x11; 1139 + } else { 1140 + rctl &= ~E1000_RCTL_SZ_4096; 1141 + rctl |= E1000_RCTL_BSEX; 1142 + switch (adapter->rx_buffer_len) { 1143 + case E1000_RXBUFFER_2048: 1144 + default: 1145 + rctl |= E1000_RCTL_SZ_2048; 1146 + rctl &= ~E1000_RCTL_BSEX; 1147 + break; 1148 + case E1000_RXBUFFER_4096: 1149 + rctl |= E1000_RCTL_SZ_4096; 1150 + break; 1151 + case E1000_RXBUFFER_8192: 1152 + rctl |= E1000_RCTL_SZ_8192; 1153 + break; 1154 + case E1000_RXBUFFER_16384: 1155 + rctl |= E1000_RCTL_SZ_16384; 1156 + break; 1157 + } 1158 + } 1159 + 1160 + #ifdef CONFIG_E1000_PACKET_SPLIT 1161 + /* 82571 and greater support packet-split where the protocol 1162 + * header is placed in skb->data and the packet data is 1163 + * placed in pages hanging off of skb_shinfo(skb)->nr_frags. 1164 + * In the case of a non-split, skb->data is linearly filled, 1165 + * followed by the page buffers. Therefore, skb->data is 1166 + * sized to hold the largest protocol header. 1167 + */ 1168 + adapter->rx_ps = (adapter->hw.mac_type > e1000_82547_rev_2) 1169 + && (adapter->netdev->mtu 1170 + < ((3 * PAGE_SIZE) + adapter->rx_ps_bsize0)); 1171 + #endif 1172 + if(adapter->rx_ps) { 1173 + /* Configure extra packet-split registers */ 1174 + rfctl = E1000_READ_REG(&adapter->hw, RFCTL); 1175 + rfctl |= E1000_RFCTL_EXTEN; 1176 + /* disable IPv6 packet split support */ 1177 + rfctl |= E1000_RFCTL_IPV6_DIS; 1178 + E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl); 1179 + 1180 + rctl |= E1000_RCTL_DTYP_PS | E1000_RCTL_SECRC; 1181 + 1182 + psrctl |= adapter->rx_ps_bsize0 >> 1183 + E1000_PSRCTL_BSIZE0_SHIFT; 1184 + psrctl |= PAGE_SIZE >> 1185 + E1000_PSRCTL_BSIZE1_SHIFT; 1186 + psrctl |= PAGE_SIZE << 1187 + E1000_PSRCTL_BSIZE2_SHIFT; 1188 + psrctl |= PAGE_SIZE << 1189 + E1000_PSRCTL_BSIZE3_SHIFT; 1190 + 1191 + E1000_WRITE_REG(&adapter->hw, PSRCTL, psrctl); 1192 } 1193 1194 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); ··· 1160 e1000_configure_rx(struct e1000_adapter *adapter) 1161 { 1162 uint64_t rdba = adapter->rx_ring.dma; 1163 + uint32_t rdlen, rctl, rxcsum; 1164 + 1165 + if(adapter->rx_ps) { 1166 + rdlen = adapter->rx_ring.count * 1167 + sizeof(union e1000_rx_desc_packet_split); 1168 + adapter->clean_rx = e1000_clean_rx_irq_ps; 1169 + adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; 1170 + } else { 1171 + rdlen = adapter->rx_ring.count * sizeof(struct e1000_rx_desc); 1172 + adapter->clean_rx = e1000_clean_rx_irq; 1173 + adapter->alloc_rx_buf = e1000_alloc_rx_buffers; 1174 + } 1175 1176 /* disable receives while setting up the descriptors */ 1177 rctl = E1000_READ_REG(&adapter->hw, RCTL); ··· 1189 E1000_WRITE_REG(&adapter->hw, RDT, 0); 1190 1191 /* Enable 82543 Receive Checksum Offload for TCP and UDP */ 1192 + if(adapter->hw.mac_type >= e1000_82543) { 1193 rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM); 1194 + if(adapter->rx_csum == TRUE) { 1195 + rxcsum |= E1000_RXCSUM_TUOFL; 1196 + 1197 + /* Enable 82573 IPv4 payload checksum for UDP fragments 1198 + * Must be used in conjunction with packet-split. */ 1199 + if((adapter->hw.mac_type > e1000_82547_rev_2) && 1200 + (adapter->rx_ps)) { 1201 + rxcsum |= E1000_RXCSUM_IPPCSE; 1202 + } 1203 + } else { 1204 + rxcsum &= ~E1000_RXCSUM_TUOFL; 1205 + /* don't need to clear IPPCSE as it defaults to 0 */ 1206 + } 1207 E1000_WRITE_REG(&adapter->hw, RXCSUM, rxcsum); 1208 } 1209 + 1210 + if (adapter->hw.mac_type == e1000_82573) 1211 + E1000_WRITE_REG(&adapter->hw, ERT, 0x0100); 1212 1213 /* Enable Receives */ 1214 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); ··· 1298 1299 vfree(rx_ring->buffer_info); 1300 rx_ring->buffer_info = NULL; 1301 + kfree(rx_ring->ps_page); 1302 + rx_ring->ps_page = NULL; 1303 + kfree(rx_ring->ps_page_dma); 1304 + rx_ring->ps_page_dma = NULL; 1305 1306 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); 1307 ··· 1314 { 1315 struct e1000_desc_ring *rx_ring = &adapter->rx_ring; 1316 struct e1000_buffer *buffer_info; 1317 + struct e1000_ps_page *ps_page; 1318 + struct e1000_ps_page_dma *ps_page_dma; 1319 struct pci_dev *pdev = adapter->pdev; 1320 unsigned long size; 1321 + unsigned int i, j; 1322 1323 /* Free all the Rx ring sk_buffs */ 1324 1325 for(i = 0; i < rx_ring->count; i++) { 1326 buffer_info = &rx_ring->buffer_info[i]; 1327 if(buffer_info->skb) { 1328 + ps_page = &rx_ring->ps_page[i]; 1329 + ps_page_dma = &rx_ring->ps_page_dma[i]; 1330 pci_unmap_single(pdev, 1331 buffer_info->dma, 1332 buffer_info->length, ··· 1331 1332 dev_kfree_skb(buffer_info->skb); 1333 buffer_info->skb = NULL; 1334 + 1335 + for(j = 0; j < PS_PAGE_BUFFERS; j++) { 1336 + if(!ps_page->ps_page[j]) break; 1337 + pci_unmap_single(pdev, 1338 + ps_page_dma->ps_page_dma[j], 1339 + PAGE_SIZE, PCI_DMA_FROMDEVICE); 1340 + ps_page_dma->ps_page_dma[j] = 0; 1341 + put_page(ps_page->ps_page[j]); 1342 + ps_page->ps_page[j] = NULL; 1343 + } 1344 } 1345 } 1346 1347 size = sizeof(struct e1000_buffer) * rx_ring->count; 1348 memset(rx_ring->buffer_info, 0, size); 1349 + size = sizeof(struct e1000_ps_page) * rx_ring->count; 1350 + memset(rx_ring->ps_page, 0, size); 1351 + size = sizeof(struct e1000_ps_page_dma) * rx_ring->count; 1352 + memset(rx_ring->ps_page_dma, 0, size); 1353 1354 /* Zero out the descriptor ring */ 1355 ··· 1573 uint32_t link; 1574 1575 e1000_check_for_link(&adapter->hw); 1576 + if (adapter->hw.mac_type == e1000_82573) { 1577 + e1000_enable_tx_pkt_filtering(&adapter->hw); 1578 + if(adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id) 1579 + e1000_update_mng_vlan(adapter); 1580 + } 1581 1582 if((adapter->hw.media_type == e1000_media_type_internal_serdes) && 1583 !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE)) ··· 1659 #define E1000_TX_FLAGS_CSUM 0x00000001 1660 #define E1000_TX_FLAGS_VLAN 0x00000002 1661 #define E1000_TX_FLAGS_TSO 0x00000004 1662 + #define E1000_TX_FLAGS_IPV4 0x00000008 1663 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 1664 #define E1000_TX_FLAGS_VLAN_SHIFT 16 1665 ··· 1669 struct e1000_context_desc *context_desc; 1670 unsigned int i; 1671 uint32_t cmd_length = 0; 1672 + uint16_t ipcse = 0, tucse, mss; 1673 uint8_t ipcss, ipcso, tucss, tucso, hdr_len; 1674 int err; 1675 ··· 1682 1683 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); 1684 mss = skb_shinfo(skb)->tso_size; 1685 + if(skb->protocol == ntohs(ETH_P_IP)) { 1686 + skb->nh.iph->tot_len = 0; 1687 + skb->nh.iph->check = 0; 1688 + skb->h.th->check = 1689 + ~csum_tcpudp_magic(skb->nh.iph->saddr, 1690 + skb->nh.iph->daddr, 1691 + 0, 1692 + IPPROTO_TCP, 1693 + 0); 1694 + cmd_length = E1000_TXD_CMD_IP; 1695 + ipcse = skb->h.raw - skb->data - 1; 1696 + #ifdef NETIF_F_TSO_IPV6 1697 + } else if(skb->protocol == ntohs(ETH_P_IPV6)) { 1698 + skb->nh.ipv6h->payload_len = 0; 1699 + skb->h.th->check = 1700 + ~csum_ipv6_magic(&skb->nh.ipv6h->saddr, 1701 + &skb->nh.ipv6h->daddr, 1702 + 0, 1703 + IPPROTO_TCP, 1704 + 0); 1705 + ipcse = 0; 1706 + #endif 1707 + } 1708 ipcss = skb->nh.raw - skb->data; 1709 ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data; 1710 tucss = skb->h.raw - skb->data; 1711 tucso = (void *)&(skb->h.th->check) - (void *)skb->data; 1712 tucse = 0; 1713 1714 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | 1715 + E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); 1716 1717 i = adapter->tx_ring.next_to_use; 1718 context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i); ··· 1866 if(likely(tx_flags & E1000_TX_FLAGS_TSO)) { 1867 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | 1868 E1000_TXD_CMD_TSE; 1869 + txd_upper |= E1000_TXD_POPTS_TXSM << 8; 1870 + 1871 + if(likely(tx_flags & E1000_TX_FLAGS_IPV4)) 1872 + txd_upper |= E1000_TXD_POPTS_IXSM << 8; 1873 } 1874 1875 if(likely(tx_flags & E1000_TX_FLAGS_CSUM)) { ··· 1941 return 0; 1942 } 1943 1944 + #define MINIMUM_DHCP_PACKET_SIZE 282 1945 + static inline int 1946 + e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb) 1947 + { 1948 + struct e1000_hw *hw = &adapter->hw; 1949 + uint16_t length, offset; 1950 + if(vlan_tx_tag_present(skb)) { 1951 + if(!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && 1952 + ( adapter->hw.mng_cookie.status & 1953 + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) ) 1954 + return 0; 1955 + } 1956 + if(htons(ETH_P_IP) == skb->protocol) { 1957 + const struct iphdr *ip = skb->nh.iph; 1958 + if(IPPROTO_UDP == ip->protocol) { 1959 + struct udphdr *udp = (struct udphdr *)(skb->h.uh); 1960 + if(ntohs(udp->dest) == 67) { 1961 + offset = (uint8_t *)udp + 8 - skb->data; 1962 + length = skb->len - offset; 1963 + 1964 + return e1000_mng_write_dhcp_info(hw, 1965 + (uint8_t *)udp + 8, length); 1966 + } 1967 + } 1968 + } else if((skb->len > MINIMUM_DHCP_PACKET_SIZE) && (!skb->protocol)) { 1969 + struct ethhdr *eth = (struct ethhdr *) skb->data; 1970 + if((htons(ETH_P_IP) == eth->h_proto)) { 1971 + const struct iphdr *ip = 1972 + (struct iphdr *)((uint8_t *)skb->data+14); 1973 + if(IPPROTO_UDP == ip->protocol) { 1974 + struct udphdr *udp = 1975 + (struct udphdr *)((uint8_t *)ip + 1976 + (ip->ihl << 2)); 1977 + if(ntohs(udp->dest) == 67) { 1978 + offset = (uint8_t *)udp + 8 - skb->data; 1979 + length = skb->len - offset; 1980 + 1981 + return e1000_mng_write_dhcp_info(hw, 1982 + (uint8_t *)udp + 8, 1983 + length); 1984 + } 1985 + } 1986 + } 1987 + } 1988 + return 0; 1989 + } 1990 + 1991 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) 1992 static int 1993 e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) ··· 2008 local_irq_restore(flags); 2009 return NETDEV_TX_LOCKED; 2010 } 2011 + if(adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) ) 2012 + e1000_transfer_dhcp_info(adapter, skb); 2013 + 2014 2015 /* need: count + 2 desc gap to keep tail from touching 2016 * head, otherwise try next time */ ··· 2043 tx_flags |= E1000_TX_FLAGS_TSO; 2044 else if(likely(e1000_tx_csum(adapter, skb))) 2045 tx_flags |= E1000_TX_FLAGS_CSUM; 2046 + 2047 + /* Old method was to assume IPv4 packet by default if TSO was enabled. 2048 + * 82573 hardware supports TSO capabilities for IPv6 as well... 2049 + * no longer assume, we must. */ 2050 + if(likely(skb->protocol == ntohs(ETH_P_IP))) 2051 + tx_flags |= E1000_TX_FLAGS_IPV4; 2052 2053 e1000_tx_queue(adapter, 2054 e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss), ··· 2110 e1000_change_mtu(struct net_device *netdev, int new_mtu) 2111 { 2112 struct e1000_adapter *adapter = netdev->priv; 2113 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 2114 2115 if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || ··· 2119 return -EINVAL; 2120 } 2121 2122 + #define MAX_STD_JUMBO_FRAME_SIZE 9216 2123 + /* might want this to be bigger enum check... */ 2124 + if (adapter->hw.mac_type == e1000_82573 && 2125 + max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { 2126 + DPRINTK(PROBE, ERR, "Jumbo Frames not supported " 2127 + "on 82573\n"); 2128 return -EINVAL; 2129 } 2130 2131 + if(adapter->hw.mac_type > e1000_82547_rev_2) { 2132 + adapter->rx_buffer_len = max_frame; 2133 + E1000_ROUNDUP(adapter->rx_buffer_len, 1024); 2134 + } else { 2135 + if(unlikely((adapter->hw.mac_type < e1000_82543) && 2136 + (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE))) { 2137 + DPRINTK(PROBE, ERR, "Jumbo Frames not supported " 2138 + "on 82542\n"); 2139 + return -EINVAL; 2140 + 2141 + } else { 2142 + if(max_frame <= E1000_RXBUFFER_2048) { 2143 + adapter->rx_buffer_len = E1000_RXBUFFER_2048; 2144 + } else if(max_frame <= E1000_RXBUFFER_4096) { 2145 + adapter->rx_buffer_len = E1000_RXBUFFER_4096; 2146 + } else if(max_frame <= E1000_RXBUFFER_8192) { 2147 + adapter->rx_buffer_len = E1000_RXBUFFER_8192; 2148 + } else if(max_frame <= E1000_RXBUFFER_16384) { 2149 + adapter->rx_buffer_len = E1000_RXBUFFER_16384; 2150 + } 2151 + } 2152 + } 2153 + 2154 + netdev->mtu = new_mtu; 2155 + 2156 + if(netif_running(netdev)) { 2157 e1000_down(adapter); 2158 e1000_up(adapter); 2159 } 2160 2161 adapter->hw.max_frame_size = max_frame; 2162 2163 return 0; ··· 2231 adapter->stats.cexterr += E1000_READ_REG(hw, CEXTERR); 2232 adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC); 2233 adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC); 2234 + } 2235 + if(hw->mac_type > e1000_82547_rev_2) { 2236 + adapter->stats.iac += E1000_READ_REG(hw, IAC); 2237 + adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC); 2238 + adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC); 2239 + adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC); 2240 + adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC); 2241 + adapter->stats.ictxatc += E1000_READ_REG(hw, ICTXATC); 2242 + adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC); 2243 + adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC); 2244 + adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC); 2245 } 2246 2247 /* Fill out the OS statistics structure */ ··· 2337 } 2338 2339 for(i = 0; i < E1000_MAX_INTR; i++) 2340 + if(unlikely(!adapter->clean_rx(adapter) & 2341 !e1000_clean_tx_irq(adapter))) 2342 break; 2343 ··· 2363 int work_done = 0; 2364 2365 tx_cleaned = e1000_clean_tx_irq(adapter); 2366 + adapter->clean_rx(adapter, &work_done, work_to_do); 2367 2368 *budget -= work_done; 2369 netdev->quota -= work_done; ··· 2501 2502 /** 2503 * e1000_rx_checksum - Receive Checksum Offload for 82543 2504 + * @adapter: board private structure 2505 + * @status_err: receive descriptor status and error fields 2506 + * @csum: receive descriptor csum field 2507 + * @sk_buff: socket buffer with received data 2508 **/ 2509 2510 static inline void 2511 e1000_rx_checksum(struct e1000_adapter *adapter, 2512 + uint32_t status_err, uint32_t csum, 2513 + struct sk_buff *skb) 2514 { 2515 + uint16_t status = (uint16_t)status_err; 2516 + uint8_t errors = (uint8_t)(status_err >> 24); 2517 + skb->ip_summed = CHECKSUM_NONE; 2518 + 2519 /* 82543 or newer only */ 2520 + if(unlikely(adapter->hw.mac_type < e1000_82543)) return; 2521 /* Ignore Checksum bit is set */ 2522 + if(unlikely(status & E1000_RXD_STAT_IXSM)) return; 2523 + /* TCP/UDP checksum error bit is set */ 2524 + if(unlikely(errors & E1000_RXD_ERR_TCPE)) { 2525 + /* let the stack verify checksum errors */ 2526 + adapter->hw_csum_err++; 2527 return; 2528 } 2529 + /* TCP/UDP Checksum has not been calculated */ 2530 + if(adapter->hw.mac_type <= e1000_82547_rev_2) { 2531 + if(!(status & E1000_RXD_STAT_TCPCS)) 2532 + return; 2533 } else { 2534 + if(!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) 2535 + return; 2536 + } 2537 + /* It must be a TCP or UDP packet with a valid checksum */ 2538 + if (likely(status & E1000_RXD_STAT_TCPCS)) { 2539 /* TCP checksum is good */ 2540 skb->ip_summed = CHECKSUM_UNNECESSARY; 2541 + } else if (adapter->hw.mac_type > e1000_82547_rev_2) { 2542 + /* IP fragment with UDP payload */ 2543 + /* Hardware complements the payload checksum, so we undo it 2544 + * and then put the value in host order for further stack use. 2545 + */ 2546 + csum = ntohl(csum ^ 0xFFFF); 2547 + skb->csum = csum; 2548 + skb->ip_summed = CHECKSUM_HW; 2549 } 2550 + adapter->hw_csum_good++; 2551 } 2552 2553 /** 2554 + * e1000_clean_rx_irq - Send received data up the network stack; legacy 2555 * @adapter: board private structure 2556 **/ 2557 ··· 2608 skb_put(skb, length - ETHERNET_FCS_SIZE); 2609 2610 /* Receive Checksum Offload */ 2611 + e1000_rx_checksum(adapter, 2612 + (uint32_t)(rx_desc->status) | 2613 + ((uint32_t)(rx_desc->errors) << 24), 2614 + rx_desc->csum, skb); 2615 skb->protocol = eth_type_trans(skb, netdev); 2616 #ifdef CONFIG_E1000_NAPI 2617 if(unlikely(adapter->vlgrp && 2618 (rx_desc->status & E1000_RXD_STAT_VP))) { 2619 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 2620 + le16_to_cpu(rx_desc->special) & 2621 + E1000_RXD_SPC_VLAN_MASK); 2622 } else { 2623 netif_receive_skb(skb); 2624 } ··· 2639 2640 rx_desc = E1000_RX_DESC(*rx_ring, i); 2641 } 2642 rx_ring->next_to_clean = i; 2643 + adapter->alloc_rx_buf(adapter); 2644 2645 return cleaned; 2646 } 2647 2648 /** 2649 + * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split 2650 + * @adapter: board private structure 2651 + **/ 2652 + 2653 + static boolean_t 2654 + #ifdef CONFIG_E1000_NAPI 2655 + e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, int *work_done, 2656 + int work_to_do) 2657 + #else 2658 + e1000_clean_rx_irq_ps(struct e1000_adapter *adapter) 2659 + #endif 2660 + { 2661 + struct e1000_desc_ring *rx_ring = &adapter->rx_ring; 2662 + union e1000_rx_desc_packet_split *rx_desc; 2663 + struct net_device *netdev = adapter->netdev; 2664 + struct pci_dev *pdev = adapter->pdev; 2665 + struct e1000_buffer *buffer_info; 2666 + struct e1000_ps_page *ps_page; 2667 + struct e1000_ps_page_dma *ps_page_dma; 2668 + struct sk_buff *skb; 2669 + unsigned int i, j; 2670 + uint32_t length, staterr; 2671 + boolean_t cleaned = FALSE; 2672 + 2673 + i = rx_ring->next_to_clean; 2674 + rx_desc = E1000_RX_DESC_PS(*rx_ring, i); 2675 + staterr = rx_desc->wb.middle.status_error; 2676 + 2677 + while(staterr & E1000_RXD_STAT_DD) { 2678 + buffer_info = &rx_ring->buffer_info[i]; 2679 + ps_page = &rx_ring->ps_page[i]; 2680 + ps_page_dma = &rx_ring->ps_page_dma[i]; 2681 + #ifdef CONFIG_E1000_NAPI 2682 + if(unlikely(*work_done >= work_to_do)) 2683 + break; 2684 + (*work_done)++; 2685 + #endif 2686 + cleaned = TRUE; 2687 + pci_unmap_single(pdev, buffer_info->dma, 2688 + buffer_info->length, 2689 + PCI_DMA_FROMDEVICE); 2690 + 2691 + skb = buffer_info->skb; 2692 + 2693 + if(unlikely(!(staterr & E1000_RXD_STAT_EOP))) { 2694 + E1000_DBG("%s: Packet Split buffers didn't pick up" 2695 + " the full packet\n", netdev->name); 2696 + dev_kfree_skb_irq(skb); 2697 + goto next_desc; 2698 + } 2699 + 2700 + if(unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) { 2701 + dev_kfree_skb_irq(skb); 2702 + goto next_desc; 2703 + } 2704 + 2705 + length = le16_to_cpu(rx_desc->wb.middle.length0); 2706 + 2707 + if(unlikely(!length)) { 2708 + E1000_DBG("%s: Last part of the packet spanning" 2709 + " multiple descriptors\n", netdev->name); 2710 + dev_kfree_skb_irq(skb); 2711 + goto next_desc; 2712 + } 2713 + 2714 + /* Good Receive */ 2715 + skb_put(skb, length); 2716 + 2717 + for(j = 0; j < PS_PAGE_BUFFERS; j++) { 2718 + if(!(length = le16_to_cpu(rx_desc->wb.upper.length[j]))) 2719 + break; 2720 + 2721 + pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j], 2722 + PAGE_SIZE, PCI_DMA_FROMDEVICE); 2723 + ps_page_dma->ps_page_dma[j] = 0; 2724 + skb_shinfo(skb)->frags[j].page = 2725 + ps_page->ps_page[j]; 2726 + ps_page->ps_page[j] = NULL; 2727 + skb_shinfo(skb)->frags[j].page_offset = 0; 2728 + skb_shinfo(skb)->frags[j].size = length; 2729 + skb_shinfo(skb)->nr_frags++; 2730 + skb->len += length; 2731 + skb->data_len += length; 2732 + } 2733 + 2734 + e1000_rx_checksum(adapter, staterr, 2735 + rx_desc->wb.lower.hi_dword.csum_ip.csum, skb); 2736 + skb->protocol = eth_type_trans(skb, netdev); 2737 + 2738 + #ifdef HAVE_RX_ZERO_COPY 2739 + if(likely(rx_desc->wb.upper.header_status & 2740 + E1000_RXDPS_HDRSTAT_HDRSP)) 2741 + skb_shinfo(skb)->zero_copy = TRUE; 2742 + #endif 2743 + #ifdef CONFIG_E1000_NAPI 2744 + if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) { 2745 + vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 2746 + le16_to_cpu(rx_desc->wb.middle.vlan & 2747 + E1000_RXD_SPC_VLAN_MASK)); 2748 + } else { 2749 + netif_receive_skb(skb); 2750 + } 2751 + #else /* CONFIG_E1000_NAPI */ 2752 + if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) { 2753 + vlan_hwaccel_rx(skb, adapter->vlgrp, 2754 + le16_to_cpu(rx_desc->wb.middle.vlan & 2755 + E1000_RXD_SPC_VLAN_MASK)); 2756 + } else { 2757 + netif_rx(skb); 2758 + } 2759 + #endif /* CONFIG_E1000_NAPI */ 2760 + netdev->last_rx = jiffies; 2761 + 2762 + next_desc: 2763 + rx_desc->wb.middle.status_error &= ~0xFF; 2764 + buffer_info->skb = NULL; 2765 + if(unlikely(++i == rx_ring->count)) i = 0; 2766 + 2767 + rx_desc = E1000_RX_DESC_PS(*rx_ring, i); 2768 + staterr = rx_desc->wb.middle.status_error; 2769 + } 2770 + rx_ring->next_to_clean = i; 2771 + adapter->alloc_rx_buf(adapter); 2772 + 2773 + return cleaned; 2774 + } 2775 + 2776 + /** 2777 + * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended 2778 * @adapter: address of board private structure 2779 **/ 2780 ··· 2749 buffer_info = &rx_ring->buffer_info[i]; 2750 } 2751 2752 + rx_ring->next_to_use = i; 2753 + } 2754 + 2755 + /** 2756 + * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split 2757 + * @adapter: address of board private structure 2758 + **/ 2759 + 2760 + static void 2761 + e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter) 2762 + { 2763 + struct e1000_desc_ring *rx_ring = &adapter->rx_ring; 2764 + struct net_device *netdev = adapter->netdev; 2765 + struct pci_dev *pdev = adapter->pdev; 2766 + union e1000_rx_desc_packet_split *rx_desc; 2767 + struct e1000_buffer *buffer_info; 2768 + struct e1000_ps_page *ps_page; 2769 + struct e1000_ps_page_dma *ps_page_dma; 2770 + struct sk_buff *skb; 2771 + unsigned int i, j; 2772 + 2773 + i = rx_ring->next_to_use; 2774 + buffer_info = &rx_ring->buffer_info[i]; 2775 + ps_page = &rx_ring->ps_page[i]; 2776 + ps_page_dma = &rx_ring->ps_page_dma[i]; 2777 + 2778 + while(!buffer_info->skb) { 2779 + rx_desc = E1000_RX_DESC_PS(*rx_ring, i); 2780 + 2781 + for(j = 0; j < PS_PAGE_BUFFERS; j++) { 2782 + if(unlikely(!ps_page->ps_page[j])) { 2783 + ps_page->ps_page[j] = 2784 + alloc_page(GFP_ATOMIC); 2785 + if(unlikely(!ps_page->ps_page[j])) 2786 + goto no_buffers; 2787 + ps_page_dma->ps_page_dma[j] = 2788 + pci_map_page(pdev, 2789 + ps_page->ps_page[j], 2790 + 0, PAGE_SIZE, 2791 + PCI_DMA_FROMDEVICE); 2792 + } 2793 + /* Refresh the desc even if buffer_addrs didn't 2794 + * change because each write-back erases this info. 2795 + */ 2796 + rx_desc->read.buffer_addr[j+1] = 2797 + cpu_to_le64(ps_page_dma->ps_page_dma[j]); 2798 + } 2799 + 2800 + skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN); 2801 + 2802 + if(unlikely(!skb)) 2803 + break; 2804 + 2805 + /* Make buffer alignment 2 beyond a 16 byte boundary 2806 + * this will result in a 16 byte aligned IP header after 2807 + * the 14 byte MAC header is removed 2808 + */ 2809 + skb_reserve(skb, NET_IP_ALIGN); 2810 + 2811 + skb->dev = netdev; 2812 + 2813 + buffer_info->skb = skb; 2814 + buffer_info->length = adapter->rx_ps_bsize0; 2815 + buffer_info->dma = pci_map_single(pdev, skb->data, 2816 + adapter->rx_ps_bsize0, 2817 + PCI_DMA_FROMDEVICE); 2818 + 2819 + rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); 2820 + 2821 + if(unlikely((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i)) { 2822 + /* Force memory writes to complete before letting h/w 2823 + * know there are new descriptors to fetch. (Only 2824 + * applicable for weak-ordered memory model archs, 2825 + * such as IA-64). */ 2826 + wmb(); 2827 + /* Hardware increments by 16 bytes, but packet split 2828 + * descriptors are 32 bytes...so we increment tail 2829 + * twice as much. 2830 + */ 2831 + E1000_WRITE_REG(&adapter->hw, RDT, i<<1); 2832 + } 2833 + 2834 + if(unlikely(++i == rx_ring->count)) i = 0; 2835 + buffer_info = &rx_ring->buffer_info[i]; 2836 + ps_page = &rx_ring->ps_page[i]; 2837 + ps_page_dma = &rx_ring->ps_page_dma[i]; 2838 + } 2839 + 2840 + no_buffers: 2841 rx_ring->next_to_use = i; 2842 } 2843 ··· 2986 rctl |= E1000_RCTL_VFE; 2987 rctl &= ~E1000_RCTL_CFIEN; 2988 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 2989 + e1000_update_mng_vlan(adapter); 2990 } else { 2991 /* disable VLAN tag insert/strip */ 2992 ctrl = E1000_READ_REG(&adapter->hw, CTRL); ··· 2996 rctl = E1000_READ_REG(&adapter->hw, RCTL); 2997 rctl &= ~E1000_RCTL_VFE; 2998 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 2999 + if(adapter->mng_vlan_id != (uint16_t)E1000_MNG_VLAN_NONE) { 3000 + e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 3001 + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 3002 + } 3003 } 3004 3005 e1000_irq_enable(adapter); ··· 3006 { 3007 struct e1000_adapter *adapter = netdev->priv; 3008 uint32_t vfta, index; 3009 + if((adapter->hw.mng_cookie.status & 3010 + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 3011 + (vid == adapter->mng_vlan_id)) 3012 + return; 3013 /* add VID to filter table */ 3014 index = (vid >> 5) & 0x7F; 3015 vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index); ··· 3027 3028 e1000_irq_enable(adapter); 3029 3030 + if((adapter->hw.mng_cookie.status & 3031 + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 3032 + (vid == adapter->mng_vlan_id)) 3033 + return; 3034 /* remove VID from filter table */ 3035 index = (vid >> 5) & 0x7F; 3036 vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index); ··· 3102 { 3103 struct net_device *netdev = pci_get_drvdata(pdev); 3104 struct e1000_adapter *adapter = netdev->priv; 3105 + uint32_t ctrl, ctrl_ext, rctl, manc, status, swsm; 3106 uint32_t wufc = adapter->wol; 3107 3108 netif_device_detach(netdev); ··· 3144 E1000_WRITE_REG(&adapter->hw, CTRL_EXT, ctrl_ext); 3145 } 3146 3147 + /* Allow time for pending master requests to run */ 3148 + e1000_disable_pciex_master(&adapter->hw); 3149 + 3150 E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN); 3151 E1000_WRITE_REG(&adapter->hw, WUFC, wufc); 3152 pci_enable_wake(pdev, 3, 1); ··· 3168 } 3169 } 3170 3171 + switch(adapter->hw.mac_type) { 3172 + case e1000_82573: 3173 + swsm = E1000_READ_REG(&adapter->hw, SWSM); 3174 + E1000_WRITE_REG(&adapter->hw, SWSM, 3175 + swsm & ~E1000_SWSM_DRV_LOAD); 3176 + break; 3177 + default: 3178 + break; 3179 + } 3180 + 3181 pci_disable_device(pdev); 3182 3183 state = (state > 0) ? 3 : 0; ··· 3182 { 3183 struct net_device *netdev = pci_get_drvdata(pdev); 3184 struct e1000_adapter *adapter = netdev->priv; 3185 + uint32_t manc, ret, swsm; 3186 3187 pci_set_power_state(pdev, 0); 3188 pci_restore_state(pdev); ··· 3207 E1000_WRITE_REG(&adapter->hw, MANC, manc); 3208 } 3209 3210 + switch(adapter->hw.mac_type) { 3211 + case e1000_82573: 3212 + swsm = E1000_READ_REG(&adapter->hw, SWSM); 3213 + E1000_WRITE_REG(&adapter->hw, SWSM, 3214 + swsm | E1000_SWSM_DRV_LOAD); 3215 + break; 3216 + default: 3217 + break; 3218 + } 3219 + 3220 return 0; 3221 } 3222 #endif 3223 #ifdef CONFIG_NET_POLL_CONTROLLER 3224 /* 3225 * Polling 'interrupt' - used by things like netconsole to send skbs
+23
drivers/net/e1000/e1000_osdep.h
··· 101 (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ 102 ((offset) << 2))) 103 104 #define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, STATUS) 105 106 #endif /* _E1000_OSDEP_H_ */
··· 101 (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ 102 ((offset) << 2))) 103 104 + #define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY 105 + #define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY 106 + 107 + #define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \ 108 + writew((value), ((a)->hw_addr + \ 109 + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ 110 + ((offset) << 1)))) 111 + 112 + #define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \ 113 + readw((a)->hw_addr + \ 114 + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ 115 + ((offset) << 1))) 116 + 117 + #define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \ 118 + writeb((value), ((a)->hw_addr + \ 119 + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ 120 + (offset)))) 121 + 122 + #define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \ 123 + readb((a)->hw_addr + \ 124 + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ 125 + (offset))) 126 + 127 #define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, STATUS) 128 129 #endif /* _E1000_OSDEP_H_ */