e1000e: Add support for BM PHYs on ICH9

This patch adds support for the BM PHY, a new PHY model being used
on ICH9-based implementations.

This new PHY exposes issues in the ICH9 silicon when receiving
jumbo frames large enough to use more than a certain part of the
Rx FIFO, and this unfortunately breaks packet split jumbo receives.
For this reason we re-introduce (for affected adapters only) the
jumbo single-skb receive routine back so that people who do
wish to use jumbo frames on these ich9 platforms can do so.
Part of this problem has to do with CPU sleep states and to make
sure that all the wake up timings are correctly we force them
with the recently merged pm_qos infrastructure written by Mark
Gross. (See http://lkml.org/lkml/2007/10/4/400).

To make code read a bit easier we introduce a _IS_ICH flag so
that we don't need to do mac type checks over the code.

Signed-off-by: Bruce Allan <bruce.w.allan@intel.com>
Signed-off-by: Auke Kok <auke-jan.h.kok@intel.com>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>

authored by Bruce Allan and committed by Jeff Garzik 97ac8cae e284e5c6

+748 -21
+10
drivers/net/e1000e/defines.h
··· 648 #define IFE_E_PHY_ID 0x02A80330 649 #define IFE_PLUS_E_PHY_ID 0x02A80320 650 #define IFE_C_E_PHY_ID 0x02A80310 651 652 /* M88E1000 Specific Registers */ 653 #define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ ··· 702 /* M88EC018 Rev 2 specific DownShift settings */ 703 #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 704 #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 705 706 /* 707 * Bits...
··· 648 #define IFE_E_PHY_ID 0x02A80330 649 #define IFE_PLUS_E_PHY_ID 0x02A80320 650 #define IFE_C_E_PHY_ID 0x02A80310 651 + #define BME1000_E_PHY_ID 0x01410CB0 652 + #define BME1000_E_PHY_ID_R2 0x01410CB1 653 654 /* M88E1000 Specific Registers */ 655 #define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ ··· 700 /* M88EC018 Rev 2 specific DownShift settings */ 701 #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 702 #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 703 + 704 + /* BME1000 PHY Specific Control Register */ 705 + #define BME1000_PSCR_ENABLE_DOWNSHIFT 0x0800 /* 1 = enable downshift */ 706 + 707 + 708 + #define PHY_PAGE_SHIFT 5 709 + #define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \ 710 + ((reg) & MAX_PHY_REG_ADDRESS)) 711 712 /* 713 * Bits...
+6 -1
drivers/net/e1000e/e1000.h
··· 127 /* arrays of page information for packet split */ 128 struct e1000_ps_page *ps_pages; 129 }; 130 - 131 }; 132 133 struct e1000_ring { ··· 304 #define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5) 305 #define FLAG_HAS_SWSM_ON_LOAD (1 << 6) 306 #define FLAG_HAS_JUMBO_FRAMES (1 << 7) 307 #define FLAG_HAS_SMART_POWER_DOWN (1 << 11) 308 #define FLAG_IS_QUAD_PORT_A (1 << 12) 309 #define FLAG_IS_QUAD_PORT (1 << 13) ··· 387 bool state); 388 extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw); 389 extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw); 390 391 extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw); 392 extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw); ··· 445 extern s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data); 446 extern s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data); 447 extern enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id); 448 extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl); 449 extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data); 450 extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
··· 127 /* arrays of page information for packet split */ 128 struct e1000_ps_page *ps_pages; 129 }; 130 + struct page *page; 131 }; 132 133 struct e1000_ring { ··· 304 #define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5) 305 #define FLAG_HAS_SWSM_ON_LOAD (1 << 6) 306 #define FLAG_HAS_JUMBO_FRAMES (1 << 7) 307 + #define FLAG_IS_ICH (1 << 9) 308 #define FLAG_HAS_SMART_POWER_DOWN (1 << 11) 309 #define FLAG_IS_QUAD_PORT_A (1 << 12) 310 #define FLAG_IS_QUAD_PORT (1 << 13) ··· 386 bool state); 387 extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw); 388 extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw); 389 + extern void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw); 390 391 extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw); 392 extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw); ··· 443 extern s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data); 444 extern s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data); 445 extern enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id); 446 + extern s32 e1000e_determine_phy_address(struct e1000_hw *hw); 447 + extern s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data); 448 + extern s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data); 449 extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl); 450 extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data); 451 extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
+28 -11
drivers/net/e1000e/ethtool.c
··· 803 /* restore previous status */ 804 ew32(STATUS, before); 805 806 - if ((mac->type != e1000_ich8lan) && 807 - (mac->type != e1000_ich9lan)) { 808 REG_PATTERN_TEST(E1000_FCAL, 0xFFFFFFFF, 0xFFFFFFFF); 809 REG_PATTERN_TEST(E1000_FCAH, 0x0000FFFF, 0xFFFFFFFF); 810 REG_PATTERN_TEST(E1000_FCT, 0x0000FFFF, 0xFFFFFFFF); ··· 823 824 REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000); 825 826 - before = (((mac->type == e1000_ich8lan) || 827 - (mac->type == e1000_ich9lan)) ? 0x06C3B33E : 0x06DFB3FE); 828 REG_SET_AND_CHECK(E1000_RCTL, before, 0x003FFFFB); 829 REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000); 830 831 REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF); 832 REG_PATTERN_TEST(E1000_RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); 833 - if ((mac->type != e1000_ich8lan) && 834 - (mac->type != e1000_ich9lan)) 835 REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF); 836 REG_PATTERN_TEST(E1000_TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); 837 REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF); ··· 908 909 /* Test each interrupt */ 910 for (i = 0; i < 10; i++) { 911 - 912 - if (((adapter->hw.mac.type == e1000_ich8lan) || 913 - (adapter->hw.mac.type == e1000_ich9lan)) && i == 8) 914 continue; 915 916 /* Interrupt to test */ ··· 1179 struct e1000_hw *hw = &adapter->hw; 1180 u32 ctrl_reg = 0; 1181 u32 stat_reg = 0; 1182 1183 hw->mac.autoneg = 0; 1184 ··· 1207 E1000_CTRL_SPD_100 |/* Force Speed to 100 */ 1208 E1000_CTRL_FD); /* Force Duplex to FULL */ 1209 break; 1210 default: 1211 /* force 1000, set loopback */ 1212 e1e_wphy(hw, PHY_CONTROL, 0x4140); ··· 1242 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ 1243 E1000_CTRL_FD); /* Force Duplex to FULL */ 1244 1245 - if ((adapter->hw.mac.type == e1000_ich8lan) || 1246 - (adapter->hw.mac.type == e1000_ich9lan)) 1247 ctrl_reg |= E1000_CTRL_SLU; /* Set Link Up */ 1248 } 1249
··· 803 /* restore previous status */ 804 ew32(STATUS, before); 805 806 + if (!(adapter->flags & FLAG_IS_ICH)) { 807 REG_PATTERN_TEST(E1000_FCAL, 0xFFFFFFFF, 0xFFFFFFFF); 808 REG_PATTERN_TEST(E1000_FCAH, 0x0000FFFF, 0xFFFFFFFF); 809 REG_PATTERN_TEST(E1000_FCT, 0x0000FFFF, 0xFFFFFFFF); ··· 824 825 REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000); 826 827 + before = ((adapter->flags & FLAG_IS_ICH) ? 0x06C3B33E : 0x06DFB3FE); 828 REG_SET_AND_CHECK(E1000_RCTL, before, 0x003FFFFB); 829 REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000); 830 831 REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF); 832 REG_PATTERN_TEST(E1000_RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); 833 + if (!(adapter->flags & FLAG_IS_ICH)) 834 REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF); 835 REG_PATTERN_TEST(E1000_TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); 836 REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF); ··· 911 912 /* Test each interrupt */ 913 for (i = 0; i < 10; i++) { 914 + if ((adapter->flags & FLAG_IS_ICH) && (i == 8)) 915 continue; 916 917 /* Interrupt to test */ ··· 1184 struct e1000_hw *hw = &adapter->hw; 1185 u32 ctrl_reg = 0; 1186 u32 stat_reg = 0; 1187 + u16 phy_reg = 0; 1188 1189 hw->mac.autoneg = 0; 1190 ··· 1211 E1000_CTRL_SPD_100 |/* Force Speed to 100 */ 1212 E1000_CTRL_FD); /* Force Duplex to FULL */ 1213 break; 1214 + case e1000_phy_bm: 1215 + /* Set Default MAC Interface speed to 1GB */ 1216 + e1e_rphy(hw, PHY_REG(2, 21), &phy_reg); 1217 + phy_reg &= ~0x0007; 1218 + phy_reg |= 0x006; 1219 + e1e_wphy(hw, PHY_REG(2, 21), phy_reg); 1220 + /* Assert SW reset for above settings to take effect */ 1221 + e1000e_commit_phy(hw); 1222 + mdelay(1); 1223 + /* Force Full Duplex */ 1224 + e1e_rphy(hw, PHY_REG(769, 16), &phy_reg); 1225 + e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x000C); 1226 + /* Set Link Up (in force link) */ 1227 + e1e_rphy(hw, PHY_REG(776, 16), &phy_reg); 1228 + e1e_wphy(hw, PHY_REG(776, 16), phy_reg | 0x0040); 1229 + /* Force Link */ 1230 + e1e_rphy(hw, PHY_REG(769, 16), &phy_reg); 1231 + e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x0040); 1232 + /* Set Early Link Enable */ 1233 + e1e_rphy(hw, PHY_REG(769, 20), &phy_reg); 1234 + e1e_wphy(hw, PHY_REG(769, 20), phy_reg | 0x0400); 1235 + /* fall through */ 1236 default: 1237 /* force 1000, set loopback */ 1238 e1e_wphy(hw, PHY_CONTROL, 0x4140); ··· 1224 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ 1225 E1000_CTRL_FD); /* Force Duplex to FULL */ 1226 1227 + if (adapter->flags & FLAG_IS_ICH) 1228 ctrl_reg |= E1000_CTRL_SLU; /* Set Link Up */ 1229 } 1230
+22
drivers/net/e1000e/hw.h
··· 216 #define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ 217 #define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ 218 #define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ 219 220 #define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 221 #define IGP01E1000_PHY_POLARITY_MASK 0x0078 ··· 346 #define E1000_DEV_ID_ICH8_IFE_G 0x10C5 347 #define E1000_DEV_ID_ICH8_IGP_M 0x104D 348 #define E1000_DEV_ID_ICH9_IGP_AMT 0x10BD 349 #define E1000_DEV_ID_ICH9_IGP_C 0x294C 350 #define E1000_DEV_ID_ICH9_IFE 0x10C0 351 #define E1000_DEV_ID_ICH9_IFE_GT 0x10C3 352 #define E1000_DEV_ID_ICH9_IFE_G 0x10C2 353 354 #define E1000_FUNC_1 1 355 ··· 399 e1000_phy_gg82563, 400 e1000_phy_igp_3, 401 e1000_phy_ife, 402 }; 403 404 enum e1000_bus_width {
··· 216 #define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ 217 #define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ 218 #define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ 219 + #define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */ 220 + #define IGP_PAGE_SHIFT 5 221 + #define PHY_REG_MASK 0x1F 222 + 223 + #define BM_WUC_PAGE 800 224 + #define BM_WUC_ADDRESS_OPCODE 0x11 225 + #define BM_WUC_DATA_OPCODE 0x12 226 + #define BM_WUC_ENABLE_PAGE 769 227 + #define BM_WUC_ENABLE_REG 17 228 + #define BM_WUC_ENABLE_BIT (1 << 2) 229 + #define BM_WUC_HOST_WU_BIT (1 << 4) 230 + 231 + #define BM_WUC PHY_REG(BM_WUC_PAGE, 1) 232 + #define BM_WUFC PHY_REG(BM_WUC_PAGE, 2) 233 + #define BM_WUS PHY_REG(BM_WUC_PAGE, 3) 234 235 #define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 236 #define IGP01E1000_PHY_POLARITY_MASK 0x0078 ··· 331 #define E1000_DEV_ID_ICH8_IFE_G 0x10C5 332 #define E1000_DEV_ID_ICH8_IGP_M 0x104D 333 #define E1000_DEV_ID_ICH9_IGP_AMT 0x10BD 334 + #define E1000_DEV_ID_ICH9_IGP_M_AMT 0x10F5 335 + #define E1000_DEV_ID_ICH9_IGP_M 0x10BF 336 + #define E1000_DEV_ID_ICH9_IGP_M_V 0x10CB 337 #define E1000_DEV_ID_ICH9_IGP_C 0x294C 338 #define E1000_DEV_ID_ICH9_IFE 0x10C0 339 #define E1000_DEV_ID_ICH9_IFE_GT 0x10C3 340 #define E1000_DEV_ID_ICH9_IFE_G 0x10C2 341 + #define E1000_DEV_ID_ICH10_R_BM_LM 0x10CC 342 + #define E1000_DEV_ID_ICH10_R_BM_LF 0x10CD 343 + #define E1000_DEV_ID_ICH10_R_BM_V 0x10CE 344 345 #define E1000_FUNC_1 1 346 ··· 378 e1000_phy_gg82563, 379 e1000_phy_igp_3, 380 e1000_phy_ife, 381 + e1000_phy_bm, 382 }; 383 384 enum e1000_bus_width {
+82 -1
drivers/net/e1000e/ich8lan.c
··· 38 * 82566DM Gigabit Network Connection 39 * 82566MC Gigabit Network Connection 40 * 82566MM Gigabit Network Connection 41 */ 42 43 #include <linux/netdevice.h> ··· 204 phy->addr = 1; 205 phy->reset_delay_us = 100; 206 207 phy->id = 0; 208 while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) && 209 (i++ < 100)) { ··· 237 case IFE_C_E_PHY_ID: 238 phy->type = e1000_phy_ife; 239 phy->autoneg_mask = E1000_ALL_NOT_GIG; 240 break; 241 default: 242 return -E1000_ERR_PHY; ··· 690 return e1000_get_phy_info_ife_ich8lan(hw); 691 break; 692 case e1000_phy_igp_3: 693 return e1000e_get_phy_info_igp(hw); 694 break; 695 default: ··· 755 s32 ret_val = 0; 756 u16 data; 757 758 - if (phy->type != e1000_phy_igp_3) 759 return ret_val; 760 761 phy_ctrl = er32(PHY_CTRL); ··· 1945 ret_val = e1000e_copper_link_setup_igp(hw); 1946 if (ret_val) 1947 return ret_val; 1948 } 1949 1950 return e1000e_setup_copper_link(hw); 1951 } 1952 ··· 2181 } 2182 2183 /** 2184 * e1000_cleanup_led_ich8lan - Restore the default LED operation 2185 * @hw: pointer to the HW structure 2186 * ··· 2326 struct e1000_info e1000_ich8_info = { 2327 .mac = e1000_ich8lan, 2328 .flags = FLAG_HAS_WOL 2329 | FLAG_RX_CSUM_ENABLED 2330 | FLAG_HAS_CTRLEXT_ON_LOAD 2331 | FLAG_HAS_AMT ··· 2342 struct e1000_info e1000_ich9_info = { 2343 .mac = e1000_ich9lan, 2344 .flags = FLAG_HAS_JUMBO_FRAMES 2345 | FLAG_HAS_WOL 2346 | FLAG_RX_CSUM_ENABLED 2347 | FLAG_HAS_CTRLEXT_ON_LOAD
··· 38 * 82566DM Gigabit Network Connection 39 * 82566MC Gigabit Network Connection 40 * 82566MM Gigabit Network Connection 41 + * 82567LM Gigabit Network Connection 42 + * 82567LF Gigabit Network Connection 43 + * 82567LM-2 Gigabit Network Connection 44 + * 82567LF-2 Gigabit Network Connection 45 + * 82567V-2 Gigabit Network Connection 46 + * 82562GT-3 10/100 Network Connection 47 */ 48 49 #include <linux/netdevice.h> ··· 198 phy->addr = 1; 199 phy->reset_delay_us = 100; 200 201 + /* 202 + * We may need to do this twice - once for IGP and if that fails, 203 + * we'll set BM func pointers and try again 204 + */ 205 + ret_val = e1000e_determine_phy_address(hw); 206 + if (ret_val) { 207 + hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm; 208 + hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm; 209 + ret_val = e1000e_determine_phy_address(hw); 210 + if (ret_val) 211 + return ret_val; 212 + } 213 + 214 phy->id = 0; 215 while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) && 216 (i++ < 100)) { ··· 218 case IFE_C_E_PHY_ID: 219 phy->type = e1000_phy_ife; 220 phy->autoneg_mask = E1000_ALL_NOT_GIG; 221 + break; 222 + case BME1000_E_PHY_ID: 223 + phy->type = e1000_phy_bm; 224 + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 225 + hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm; 226 + hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm; 227 + hw->phy.ops.commit_phy = e1000e_phy_sw_reset; 228 break; 229 default: 230 return -E1000_ERR_PHY; ··· 664 return e1000_get_phy_info_ife_ich8lan(hw); 665 break; 666 case e1000_phy_igp_3: 667 + case e1000_phy_bm: 668 return e1000e_get_phy_info_igp(hw); 669 break; 670 default: ··· 728 s32 ret_val = 0; 729 u16 data; 730 731 + if (phy->type == e1000_phy_ife) 732 return ret_val; 733 734 phy_ctrl = er32(PHY_CTRL); ··· 1918 ret_val = e1000e_copper_link_setup_igp(hw); 1919 if (ret_val) 1920 return ret_val; 1921 + } else if (hw->phy.type == e1000_phy_bm) { 1922 + ret_val = e1000e_copper_link_setup_m88(hw); 1923 + if (ret_val) 1924 + return ret_val; 1925 } 1926 1927 + if (hw->phy.type == e1000_phy_ife) { 1928 + ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &reg_data); 1929 + if (ret_val) 1930 + return ret_val; 1931 + 1932 + reg_data &= ~IFE_PMC_AUTO_MDIX; 1933 + 1934 + switch (hw->phy.mdix) { 1935 + case 1: 1936 + reg_data &= ~IFE_PMC_FORCE_MDIX; 1937 + break; 1938 + case 2: 1939 + reg_data |= IFE_PMC_FORCE_MDIX; 1940 + break; 1941 + case 0: 1942 + default: 1943 + reg_data |= IFE_PMC_AUTO_MDIX; 1944 + break; 1945 + } 1946 + ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data); 1947 + if (ret_val) 1948 + return ret_val; 1949 + } 1950 return e1000e_setup_copper_link(hw); 1951 } 1952 ··· 2127 } 2128 2129 /** 2130 + * e1000e_disable_gig_wol_ich8lan - disable gig during WoL 2131 + * @hw: pointer to the HW structure 2132 + * 2133 + * During S0 to Sx transition, it is possible the link remains at gig 2134 + * instead of negotiating to a lower speed. Before going to Sx, set 2135 + * 'LPLU Enabled' and 'Gig Disable' to force link speed negotiation 2136 + * to a lower speed. 2137 + * 2138 + * Should only be called for ICH9 devices. 2139 + **/ 2140 + void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw) 2141 + { 2142 + u32 phy_ctrl; 2143 + 2144 + if (hw->mac.type == e1000_ich9lan) { 2145 + phy_ctrl = er32(PHY_CTRL); 2146 + phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | 2147 + E1000_PHY_CTRL_GBE_DISABLE; 2148 + ew32(PHY_CTRL, phy_ctrl); 2149 + } 2150 + 2151 + return; 2152 + } 2153 + 2154 + /** 2155 * e1000_cleanup_led_ich8lan - Restore the default LED operation 2156 * @hw: pointer to the HW structure 2157 * ··· 2247 struct e1000_info e1000_ich8_info = { 2248 .mac = e1000_ich8lan, 2249 .flags = FLAG_HAS_WOL 2250 + | FLAG_IS_ICH 2251 | FLAG_RX_CSUM_ENABLED 2252 | FLAG_HAS_CTRLEXT_ON_LOAD 2253 | FLAG_HAS_AMT ··· 2262 struct e1000_info e1000_ich9_info = { 2263 .mac = e1000_ich9lan, 2264 .flags = FLAG_HAS_JUMBO_FRAMES 2265 + | FLAG_IS_ICH 2266 | FLAG_HAS_WOL 2267 | FLAG_RX_CSUM_ENABLED 2268 | FLAG_HAS_CTRLEXT_ON_LOAD
+322 -8
drivers/net/e1000e/netdev.c
··· 43 #include <linux/if_vlan.h> 44 #include <linux/cpu.h> 45 #include <linux/smp.h> 46 47 #include "e1000.h" 48 49 - #define DRV_VERSION "0.2.1" 50 char e1000e_driver_name[] = "e1000e"; 51 const char e1000e_driver_version[] = DRV_VERSION; 52 ··· 338 * twice as much. 339 */ 340 writel(i<<1, adapter->hw.hw_addr + rx_ring->tail); 341 } 342 } 343 ··· 867 } 868 869 /** 870 * e1000_clean_rx_ring - Free Rx Buffers per Queue 871 * @adapter: board private structure 872 **/ ··· 1066 pci_unmap_single(pdev, buffer_info->dma, 1067 adapter->rx_buffer_len, 1068 PCI_DMA_FROMDEVICE); 1069 else if (adapter->clean_rx == e1000_clean_rx_irq_ps) 1070 pci_unmap_single(pdev, buffer_info->dma, 1071 adapter->rx_ps_bsize0, 1072 PCI_DMA_FROMDEVICE); 1073 buffer_info->dma = 0; 1074 } 1075 1076 if (buffer_info->skb) { ··· 2028 * a lot of memory, since we allocate 3 pages at all times 2029 * per packet. 2030 */ 2031 - adapter->rx_ps_pages = 0; 2032 pages = PAGE_USE_COUNT(adapter->netdev->mtu); 2033 - if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE)) 2034 adapter->rx_ps_pages = pages; 2035 2036 if (adapter->rx_ps_pages) { 2037 /* Configure extra packet-split registers */ ··· 2094 sizeof(union e1000_rx_desc_packet_split); 2095 adapter->clean_rx = e1000_clean_rx_irq_ps; 2096 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; 2097 } else { 2098 - rdlen = rx_ring->count * 2099 - sizeof(struct e1000_rx_desc); 2100 adapter->clean_rx = e1000_clean_rx_irq; 2101 adapter->alloc_rx_buf = e1000_alloc_rx_buffers; 2102 } ··· 2163 * units), e.g. using jumbo frames when setting to E1000_ERT_2048 2164 */ 2165 if ((adapter->flags & FLAG_HAS_ERT) && 2166 - (adapter->netdev->mtu > ETH_DATA_LEN)) 2167 - ew32(ERT, E1000_ERT_2048); 2168 2169 /* Enable Receives */ 2170 ew32(RCTL, rctl); ··· 2446 2447 /* Allow time for pending master requests to run */ 2448 mac->ops.reset_hw(hw); 2449 ew32(WUC, 0); 2450 2451 if (mac->ops.init_hw(hw)) ··· 3768 * means we reserve 2 more, this pushes us to allocate from the next 3769 * larger slab size. 3770 * i.e. RXBUFFER_2048 --> size-4096 slab 3771 */ 3772 3773 if (max_frame <= 256) ··· 3926 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; 3927 ew32(CTRL_EXT, ctrl_ext); 3928 } 3929 3930 /* Allow time for pending master requests to run */ 3931 e1000e_disable_pcie_master(&adapter->hw); ··· 4596 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan }, 4597 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan }, 4598 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan }, 4599 4600 { } /* terminate list */ 4601 }; ··· 4637 printk(KERN_INFO "%s: Copyright (c) 1999-2008 Intel Corporation.\n", 4638 e1000e_driver_name); 4639 ret = pci_register_driver(&e1000_driver); 4640 - 4641 return ret; 4642 } 4643 module_init(e1000_init_module); ··· 4653 static void __exit e1000_exit_module(void) 4654 { 4655 pci_unregister_driver(&e1000_driver); 4656 } 4657 module_exit(e1000_exit_module); 4658
··· 43 #include <linux/if_vlan.h> 44 #include <linux/cpu.h> 45 #include <linux/smp.h> 46 + #include <linux/pm_qos_params.h> 47 48 #include "e1000.h" 49 50 + #define DRV_VERSION "0.3.3.3-k2" 51 char e1000e_driver_name[] = "e1000e"; 52 const char e1000e_driver_version[] = DRV_VERSION; 53 ··· 337 * twice as much. 338 */ 339 writel(i<<1, adapter->hw.hw_addr + rx_ring->tail); 340 + } 341 + } 342 + 343 + /** 344 + * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers 345 + * @adapter: address of board private structure 346 + * @rx_ring: pointer to receive ring structure 347 + * @cleaned_count: number of buffers to allocate this pass 348 + **/ 349 + 350 + static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, 351 + int cleaned_count) 352 + { 353 + struct net_device *netdev = adapter->netdev; 354 + struct pci_dev *pdev = adapter->pdev; 355 + struct e1000_rx_desc *rx_desc; 356 + struct e1000_ring *rx_ring = adapter->rx_ring; 357 + struct e1000_buffer *buffer_info; 358 + struct sk_buff *skb; 359 + unsigned int i; 360 + unsigned int bufsz = 256 - 361 + 16 /* for skb_reserve */ - 362 + NET_IP_ALIGN; 363 + 364 + i = rx_ring->next_to_use; 365 + buffer_info = &rx_ring->buffer_info[i]; 366 + 367 + while (cleaned_count--) { 368 + skb = buffer_info->skb; 369 + if (skb) { 370 + skb_trim(skb, 0); 371 + goto check_page; 372 + } 373 + 374 + skb = netdev_alloc_skb(netdev, bufsz); 375 + if (unlikely(!skb)) { 376 + /* Better luck next round */ 377 + adapter->alloc_rx_buff_failed++; 378 + break; 379 + } 380 + 381 + /* Make buffer alignment 2 beyond a 16 byte boundary 382 + * this will result in a 16 byte aligned IP header after 383 + * the 14 byte MAC header is removed 384 + */ 385 + skb_reserve(skb, NET_IP_ALIGN); 386 + 387 + buffer_info->skb = skb; 388 + check_page: 389 + /* allocate a new page if necessary */ 390 + if (!buffer_info->page) { 391 + buffer_info->page = alloc_page(GFP_ATOMIC); 392 + if (unlikely(!buffer_info->page)) { 393 + adapter->alloc_rx_buff_failed++; 394 + break; 395 + } 396 + } 397 + 398 + if (!buffer_info->dma) 399 + buffer_info->dma = pci_map_page(pdev, 400 + buffer_info->page, 0, 401 + PAGE_SIZE, 402 + PCI_DMA_FROMDEVICE); 403 + 404 + rx_desc = E1000_RX_DESC(*rx_ring, i); 405 + rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 406 + 407 + if (unlikely(++i == rx_ring->count)) 408 + i = 0; 409 + buffer_info = &rx_ring->buffer_info[i]; 410 + } 411 + 412 + if (likely(rx_ring->next_to_use != i)) { 413 + rx_ring->next_to_use = i; 414 + if (unlikely(i-- == 0)) 415 + i = (rx_ring->count - 1); 416 + 417 + /* Force memory writes to complete before letting h/w 418 + * know there are new descriptors to fetch. (Only 419 + * applicable for weak-ordered memory model archs, 420 + * such as IA-64). */ 421 + wmb(); 422 + writel(i, adapter->hw.hw_addr + rx_ring->tail); 423 } 424 } 425 ··· 783 } 784 785 /** 786 + * e1000_consume_page - helper function 787 + **/ 788 + static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, 789 + u16 length) 790 + { 791 + bi->page = NULL; 792 + skb->len += length; 793 + skb->data_len += length; 794 + skb->truesize += length; 795 + } 796 + 797 + /** 798 + * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy 799 + * @adapter: board private structure 800 + * 801 + * the return value indicates whether actual cleaning was done, there 802 + * is no guarantee that everything was cleaned 803 + **/ 804 + 805 + static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, 806 + int *work_done, int work_to_do) 807 + { 808 + struct net_device *netdev = adapter->netdev; 809 + struct pci_dev *pdev = adapter->pdev; 810 + struct e1000_ring *rx_ring = adapter->rx_ring; 811 + struct e1000_rx_desc *rx_desc, *next_rxd; 812 + struct e1000_buffer *buffer_info, *next_buffer; 813 + u32 length; 814 + unsigned int i; 815 + int cleaned_count = 0; 816 + bool cleaned = false; 817 + unsigned int total_rx_bytes=0, total_rx_packets=0; 818 + 819 + i = rx_ring->next_to_clean; 820 + rx_desc = E1000_RX_DESC(*rx_ring, i); 821 + buffer_info = &rx_ring->buffer_info[i]; 822 + 823 + while (rx_desc->status & E1000_RXD_STAT_DD) { 824 + struct sk_buff *skb; 825 + u8 status; 826 + 827 + if (*work_done >= work_to_do) 828 + break; 829 + (*work_done)++; 830 + 831 + status = rx_desc->status; 832 + skb = buffer_info->skb; 833 + buffer_info->skb = NULL; 834 + 835 + ++i; 836 + if (i == rx_ring->count) 837 + i = 0; 838 + next_rxd = E1000_RX_DESC(*rx_ring, i); 839 + prefetch(next_rxd); 840 + 841 + next_buffer = &rx_ring->buffer_info[i]; 842 + 843 + cleaned = true; 844 + cleaned_count++; 845 + pci_unmap_page(pdev, buffer_info->dma, PAGE_SIZE, 846 + PCI_DMA_FROMDEVICE); 847 + buffer_info->dma = 0; 848 + 849 + length = le16_to_cpu(rx_desc->length); 850 + 851 + /* errors is only valid for DD + EOP descriptors */ 852 + if (unlikely((status & E1000_RXD_STAT_EOP) && 853 + (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) { 854 + /* recycle both page and skb */ 855 + buffer_info->skb = skb; 856 + /* an error means any chain goes out the window 857 + * too */ 858 + if (rx_ring->rx_skb_top) 859 + dev_kfree_skb(rx_ring->rx_skb_top); 860 + rx_ring->rx_skb_top = NULL; 861 + goto next_desc; 862 + } 863 + 864 + #define rxtop rx_ring->rx_skb_top 865 + if (!(status & E1000_RXD_STAT_EOP)) { 866 + /* this descriptor is only the beginning (or middle) */ 867 + if (!rxtop) { 868 + /* this is the beginning of a chain */ 869 + rxtop = skb; 870 + skb_fill_page_desc(rxtop, 0, buffer_info->page, 871 + 0, length); 872 + } else { 873 + /* this is the middle of a chain */ 874 + skb_fill_page_desc(rxtop, 875 + skb_shinfo(rxtop)->nr_frags, 876 + buffer_info->page, 0, length); 877 + /* re-use the skb, only consumed the page */ 878 + buffer_info->skb = skb; 879 + } 880 + e1000_consume_page(buffer_info, rxtop, length); 881 + goto next_desc; 882 + } else { 883 + if (rxtop) { 884 + /* end of the chain */ 885 + skb_fill_page_desc(rxtop, 886 + skb_shinfo(rxtop)->nr_frags, 887 + buffer_info->page, 0, length); 888 + /* re-use the current skb, we only consumed the 889 + * page */ 890 + buffer_info->skb = skb; 891 + skb = rxtop; 892 + rxtop = NULL; 893 + e1000_consume_page(buffer_info, skb, length); 894 + } else { 895 + /* no chain, got EOP, this buf is the packet 896 + * copybreak to save the put_page/alloc_page */ 897 + if (length <= copybreak && 898 + skb_tailroom(skb) >= length) { 899 + u8 *vaddr; 900 + vaddr = kmap_atomic(buffer_info->page, 901 + KM_SKB_DATA_SOFTIRQ); 902 + memcpy(skb_tail_pointer(skb), vaddr, 903 + length); 904 + kunmap_atomic(vaddr, 905 + KM_SKB_DATA_SOFTIRQ); 906 + /* re-use the page, so don't erase 907 + * buffer_info->page */ 908 + skb_put(skb, length); 909 + } else { 910 + skb_fill_page_desc(skb, 0, 911 + buffer_info->page, 0, 912 + length); 913 + e1000_consume_page(buffer_info, skb, 914 + length); 915 + } 916 + } 917 + } 918 + 919 + /* Receive Checksum Offload XXX recompute due to CRC strip? */ 920 + e1000_rx_checksum(adapter, 921 + (u32)(status) | 922 + ((u32)(rx_desc->errors) << 24), 923 + le16_to_cpu(rx_desc->csum), skb); 924 + 925 + /* probably a little skewed due to removing CRC */ 926 + total_rx_bytes += skb->len; 927 + total_rx_packets++; 928 + 929 + /* eth type trans needs skb->data to point to something */ 930 + if (!pskb_may_pull(skb, ETH_HLEN)) { 931 + ndev_err(netdev, "pskb_may_pull failed.\n"); 932 + dev_kfree_skb(skb); 933 + goto next_desc; 934 + } 935 + 936 + e1000_receive_skb(adapter, netdev, skb, status, 937 + rx_desc->special); 938 + 939 + next_desc: 940 + rx_desc->status = 0; 941 + 942 + /* return some buffers to hardware, one at a time is too slow */ 943 + if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { 944 + adapter->alloc_rx_buf(adapter, cleaned_count); 945 + cleaned_count = 0; 946 + } 947 + 948 + /* use prefetched values */ 949 + rx_desc = next_rxd; 950 + buffer_info = next_buffer; 951 + } 952 + rx_ring->next_to_clean = i; 953 + 954 + cleaned_count = e1000_desc_unused(rx_ring); 955 + if (cleaned_count) 956 + adapter->alloc_rx_buf(adapter, cleaned_count); 957 + 958 + adapter->total_rx_bytes += total_rx_bytes; 959 + adapter->total_rx_packets += total_rx_packets; 960 + adapter->net_stats.rx_bytes += total_rx_bytes; 961 + adapter->net_stats.rx_packets += total_rx_packets; 962 + return cleaned; 963 + } 964 + 965 + /** 966 * e1000_clean_rx_ring - Free Rx Buffers per Queue 967 * @adapter: board private structure 968 **/ ··· 802 pci_unmap_single(pdev, buffer_info->dma, 803 adapter->rx_buffer_len, 804 PCI_DMA_FROMDEVICE); 805 + else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) 806 + pci_unmap_page(pdev, buffer_info->dma, 807 + PAGE_SIZE, 808 + PCI_DMA_FROMDEVICE); 809 else if (adapter->clean_rx == e1000_clean_rx_irq_ps) 810 pci_unmap_single(pdev, buffer_info->dma, 811 adapter->rx_ps_bsize0, 812 PCI_DMA_FROMDEVICE); 813 buffer_info->dma = 0; 814 + } 815 + 816 + if (buffer_info->page) { 817 + put_page(buffer_info->page); 818 + buffer_info->page = NULL; 819 } 820 821 if (buffer_info->skb) { ··· 1755 * a lot of memory, since we allocate 3 pages at all times 1756 * per packet. 1757 */ 1758 pages = PAGE_USE_COUNT(adapter->netdev->mtu); 1759 + if (!(adapter->flags & FLAG_IS_ICH) && (pages <= 3) && 1760 + (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE)) 1761 adapter->rx_ps_pages = pages; 1762 + else 1763 + adapter->rx_ps_pages = 0; 1764 1765 if (adapter->rx_ps_pages) { 1766 /* Configure extra packet-split registers */ ··· 1819 sizeof(union e1000_rx_desc_packet_split); 1820 adapter->clean_rx = e1000_clean_rx_irq_ps; 1821 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; 1822 + } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) { 1823 + rdlen = rx_ring->count * sizeof(struct e1000_rx_desc); 1824 + adapter->clean_rx = e1000_clean_jumbo_rx_irq; 1825 + adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; 1826 } else { 1827 + rdlen = rx_ring->count * sizeof(struct e1000_rx_desc); 1828 adapter->clean_rx = e1000_clean_rx_irq; 1829 adapter->alloc_rx_buf = e1000_alloc_rx_buffers; 1830 } ··· 1885 * units), e.g. using jumbo frames when setting to E1000_ERT_2048 1886 */ 1887 if ((adapter->flags & FLAG_HAS_ERT) && 1888 + (adapter->netdev->mtu > ETH_DATA_LEN)) { 1889 + u32 rxdctl = er32(RXDCTL(0)); 1890 + ew32(RXDCTL(0), rxdctl | 0x3); 1891 + ew32(ERT, E1000_ERT_2048 | (1 << 13)); 1892 + /* 1893 + * With jumbo frames and early-receive enabled, excessive 1894 + * C4->C2 latencies result in dropped transactions. 1895 + */ 1896 + pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, 1897 + e1000e_driver_name, 55); 1898 + } else { 1899 + pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, 1900 + e1000e_driver_name, 1901 + PM_QOS_DEFAULT_VALUE); 1902 + } 1903 1904 /* Enable Receives */ 1905 ew32(RCTL, rctl); ··· 2155 2156 /* Allow time for pending master requests to run */ 2157 mac->ops.reset_hw(hw); 2158 + 2159 + /* 2160 + * For parts with AMT enabled, let the firmware know 2161 + * that the network interface is in control 2162 + */ 2163 + if ((adapter->flags & FLAG_HAS_AMT) && e1000e_check_mng_mode(hw)) 2164 + e1000_get_hw_control(adapter); 2165 + 2166 ew32(WUC, 0); 2167 2168 if (mac->ops.init_hw(hw)) ··· 3469 * means we reserve 2 more, this pushes us to allocate from the next 3470 * larger slab size. 3471 * i.e. RXBUFFER_2048 --> size-4096 slab 3472 + * However with the new *_jumbo_rx* routines, jumbo receives will use 3473 + * fragmented skbs 3474 */ 3475 3476 if (max_frame <= 256) ··· 3625 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; 3626 ew32(CTRL_EXT, ctrl_ext); 3627 } 3628 + 3629 + if (adapter->flags & FLAG_IS_ICH) 3630 + e1000e_disable_gig_wol_ich8lan(&adapter->hw); 3631 3632 /* Allow time for pending master requests to run */ 3633 e1000e_disable_pcie_master(&adapter->hw); ··· 4292 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan }, 4293 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan }, 4294 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan }, 4295 + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan }, 4296 + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan }, 4297 + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan }, 4298 + 4299 + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan }, 4300 + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan }, 4301 + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan }, 4302 4303 { } /* terminate list */ 4304 }; ··· 4326 printk(KERN_INFO "%s: Copyright (c) 1999-2008 Intel Corporation.\n", 4327 e1000e_driver_name); 4328 ret = pci_register_driver(&e1000_driver); 4329 + pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, e1000e_driver_name, 4330 + PM_QOS_DEFAULT_VALUE); 4331 + 4332 return ret; 4333 } 4334 module_init(e1000_init_module); ··· 4340 static void __exit e1000_exit_module(void) 4341 { 4342 pci_unregister_driver(&e1000_driver); 4343 + pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, e1000e_driver_name); 4344 } 4345 module_exit(e1000_exit_module); 4346
+278
drivers/net/e1000e/phy.c
··· 34 static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw); 35 static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active); 36 static s32 e1000_wait_autoneg(struct e1000_hw *hw); 37 38 /* Cable length tables */ 39 static const u16 e1000_m88_cable_length_table[] = ··· 467 phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; 468 if (phy->disable_polarity_correction == 1) 469 phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; 470 471 ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); 472 if (ret_val) ··· 1783 case IFE_C_E_PHY_ID: 1784 phy_type = e1000_phy_ife; 1785 break; 1786 default: 1787 phy_type = e1000_phy_unknown; 1788 break; 1789 } 1790 return phy_type; 1791 } 1792 1793 /**
··· 34 static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw); 35 static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active); 36 static s32 e1000_wait_autoneg(struct e1000_hw *hw); 37 + static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg); 38 + static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, 39 + u16 *data, bool read); 40 41 /* Cable length tables */ 42 static const u16 e1000_m88_cable_length_table[] = ··· 464 phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; 465 if (phy->disable_polarity_correction == 1) 466 phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; 467 + 468 + /* Enable downshift on BM (disabled by default) */ 469 + if (phy->type == e1000_phy_bm) 470 + phy_data |= BME1000_PSCR_ENABLE_DOWNSHIFT; 471 472 ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); 473 if (ret_val) ··· 1776 case IFE_C_E_PHY_ID: 1777 phy_type = e1000_phy_ife; 1778 break; 1779 + case BME1000_E_PHY_ID: 1780 + case BME1000_E_PHY_ID_R2: 1781 + phy_type = e1000_phy_bm; 1782 + break; 1783 default: 1784 phy_type = e1000_phy_unknown; 1785 break; 1786 } 1787 return phy_type; 1788 + } 1789 + 1790 + /** 1791 + * e1000e_determine_phy_address - Determines PHY address. 1792 + * @hw: pointer to the HW structure 1793 + * 1794 + * This uses a trial and error method to loop through possible PHY 1795 + * addresses. It tests each by reading the PHY ID registers and 1796 + * checking for a match. 1797 + **/ 1798 + s32 e1000e_determine_phy_address(struct e1000_hw *hw) 1799 + { 1800 + s32 ret_val = -E1000_ERR_PHY_TYPE; 1801 + u32 phy_addr= 0; 1802 + u32 i = 0; 1803 + enum e1000_phy_type phy_type = e1000_phy_unknown; 1804 + 1805 + do { 1806 + for (phy_addr = 0; phy_addr < 4; phy_addr++) { 1807 + hw->phy.addr = phy_addr; 1808 + e1000e_get_phy_id(hw); 1809 + phy_type = e1000e_get_phy_type_from_id(hw->phy.id); 1810 + 1811 + /* 1812 + * If phy_type is valid, break - we found our 1813 + * PHY address 1814 + */ 1815 + if (phy_type != e1000_phy_unknown) { 1816 + ret_val = 0; 1817 + break; 1818 + } 1819 + } 1820 + i++; 1821 + } while ((ret_val != 0) && (i < 100)); 1822 + 1823 + return ret_val; 1824 + } 1825 + 1826 + /** 1827 + * e1000_get_phy_addr_for_bm_page - Retrieve PHY page address 1828 + * @page: page to access 1829 + * 1830 + * Returns the phy address for the page requested. 1831 + **/ 1832 + static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg) 1833 + { 1834 + u32 phy_addr = 2; 1835 + 1836 + if ((page >= 768) || (page == 0 && reg == 25) || (reg == 31)) 1837 + phy_addr = 1; 1838 + 1839 + return phy_addr; 1840 + } 1841 + 1842 + /** 1843 + * e1000e_write_phy_reg_bm - Write BM PHY register 1844 + * @hw: pointer to the HW structure 1845 + * @offset: register offset to write to 1846 + * @data: data to write at register offset 1847 + * 1848 + * Acquires semaphore, if necessary, then writes the data to PHY register 1849 + * at the offset. Release any acquired semaphores before exiting. 1850 + **/ 1851 + s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data) 1852 + { 1853 + s32 ret_val; 1854 + u32 page_select = 0; 1855 + u32 page = offset >> IGP_PAGE_SHIFT; 1856 + u32 page_shift = 0; 1857 + 1858 + /* Page 800 works differently than the rest so it has its own func */ 1859 + if (page == BM_WUC_PAGE) { 1860 + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, 1861 + false); 1862 + goto out; 1863 + } 1864 + 1865 + ret_val = hw->phy.ops.acquire_phy(hw); 1866 + if (ret_val) 1867 + goto out; 1868 + 1869 + hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); 1870 + 1871 + if (offset > MAX_PHY_MULTI_PAGE_REG) { 1872 + /* 1873 + * Page select is register 31 for phy address 1 and 22 for 1874 + * phy address 2 and 3. Page select is shifted only for 1875 + * phy address 1. 1876 + */ 1877 + if (hw->phy.addr == 1) { 1878 + page_shift = IGP_PAGE_SHIFT; 1879 + page_select = IGP01E1000_PHY_PAGE_SELECT; 1880 + } else { 1881 + page_shift = 0; 1882 + page_select = BM_PHY_PAGE_SELECT; 1883 + } 1884 + 1885 + /* Page is shifted left, PHY expects (page x 32) */ 1886 + ret_val = e1000e_write_phy_reg_mdic(hw, page_select, 1887 + (page << page_shift)); 1888 + if (ret_val) { 1889 + hw->phy.ops.release_phy(hw); 1890 + goto out; 1891 + } 1892 + } 1893 + 1894 + ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, 1895 + data); 1896 + 1897 + hw->phy.ops.release_phy(hw); 1898 + 1899 + out: 1900 + return ret_val; 1901 + } 1902 + 1903 + /** 1904 + * e1000e_read_phy_reg_bm - Read BM PHY register 1905 + * @hw: pointer to the HW structure 1906 + * @offset: register offset to be read 1907 + * @data: pointer to the read data 1908 + * 1909 + * Acquires semaphore, if necessary, then reads the PHY register at offset 1910 + * and storing the retrieved information in data. Release any acquired 1911 + * semaphores before exiting. 1912 + **/ 1913 + s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data) 1914 + { 1915 + s32 ret_val; 1916 + u32 page_select = 0; 1917 + u32 page = offset >> IGP_PAGE_SHIFT; 1918 + u32 page_shift = 0; 1919 + 1920 + /* Page 800 works differently than the rest so it has its own func */ 1921 + if (page == BM_WUC_PAGE) { 1922 + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, 1923 + true); 1924 + goto out; 1925 + } 1926 + 1927 + ret_val = hw->phy.ops.acquire_phy(hw); 1928 + if (ret_val) 1929 + goto out; 1930 + 1931 + hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); 1932 + 1933 + if (offset > MAX_PHY_MULTI_PAGE_REG) { 1934 + /* 1935 + * Page select is register 31 for phy address 1 and 22 for 1936 + * phy address 2 and 3. Page select is shifted only for 1937 + * phy address 1. 1938 + */ 1939 + if (hw->phy.addr == 1) { 1940 + page_shift = IGP_PAGE_SHIFT; 1941 + page_select = IGP01E1000_PHY_PAGE_SELECT; 1942 + } else { 1943 + page_shift = 0; 1944 + page_select = BM_PHY_PAGE_SELECT; 1945 + } 1946 + 1947 + /* Page is shifted left, PHY expects (page x 32) */ 1948 + ret_val = e1000e_write_phy_reg_mdic(hw, page_select, 1949 + (page << page_shift)); 1950 + if (ret_val) { 1951 + hw->phy.ops.release_phy(hw); 1952 + goto out; 1953 + } 1954 + } 1955 + 1956 + ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, 1957 + data); 1958 + hw->phy.ops.release_phy(hw); 1959 + 1960 + out: 1961 + return ret_val; 1962 + } 1963 + 1964 + /** 1965 + * e1000_access_phy_wakeup_reg_bm - Read BM PHY wakeup register 1966 + * @hw: pointer to the HW structure 1967 + * @offset: register offset to be read or written 1968 + * @data: pointer to the data to read or write 1969 + * @read: determines if operation is read or write 1970 + * 1971 + * Acquires semaphore, if necessary, then reads the PHY register at offset 1972 + * and storing the retrieved information in data. Release any acquired 1973 + * semaphores before exiting. Note that procedure to read the wakeup 1974 + * registers are different. It works as such: 1975 + * 1) Set page 769, register 17, bit 2 = 1 1976 + * 2) Set page to 800 for host (801 if we were manageability) 1977 + * 3) Write the address using the address opcode (0x11) 1978 + * 4) Read or write the data using the data opcode (0x12) 1979 + * 5) Restore 769_17.2 to its original value 1980 + **/ 1981 + static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, 1982 + u16 *data, bool read) 1983 + { 1984 + s32 ret_val; 1985 + u16 reg = ((u16)offset) & PHY_REG_MASK; 1986 + u16 phy_reg = 0; 1987 + u8 phy_acquired = 1; 1988 + 1989 + 1990 + ret_val = hw->phy.ops.acquire_phy(hw); 1991 + if (ret_val) { 1992 + phy_acquired = 0; 1993 + goto out; 1994 + } 1995 + 1996 + /* All operations in this function are phy address 1 */ 1997 + hw->phy.addr = 1; 1998 + 1999 + /* Set page 769 */ 2000 + e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 2001 + (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT)); 2002 + 2003 + ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &phy_reg); 2004 + if (ret_val) 2005 + goto out; 2006 + 2007 + /* First clear bit 4 to avoid a power state change */ 2008 + phy_reg &= ~(BM_WUC_HOST_WU_BIT); 2009 + ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg); 2010 + if (ret_val) 2011 + goto out; 2012 + 2013 + /* Write bit 2 = 1, and clear bit 4 to 769_17 */ 2014 + ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, 2015 + phy_reg | BM_WUC_ENABLE_BIT); 2016 + if (ret_val) 2017 + goto out; 2018 + 2019 + /* Select page 800 */ 2020 + ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 2021 + (BM_WUC_PAGE << IGP_PAGE_SHIFT)); 2022 + 2023 + /* Write the page 800 offset value using opcode 0x11 */ 2024 + ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ADDRESS_OPCODE, reg); 2025 + if (ret_val) 2026 + goto out; 2027 + 2028 + if (read) { 2029 + /* Read the page 800 value using opcode 0x12 */ 2030 + ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, 2031 + data); 2032 + } else { 2033 + /* Read the page 800 value using opcode 0x12 */ 2034 + ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, 2035 + *data); 2036 + } 2037 + 2038 + if (ret_val) 2039 + goto out; 2040 + 2041 + /* 2042 + * Restore 769_17.2 to its original value 2043 + * Set page 769 2044 + */ 2045 + e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 2046 + (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT)); 2047 + 2048 + /* Clear 769_17.2 */ 2049 + ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg); 2050 + 2051 + out: 2052 + if (phy_acquired == 1) 2053 + hw->phy.ops.release_phy(hw); 2054 + return ret_val; 2055 } 2056 2057 /**