Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net

Jeff Kirsher says:

====================
Intel Wired LAN Driver Updates

This series contains updates to e1000, e1000e, igb, igbvf, ixgb, ixgbe,
ixgbevf and i40evf.

Mark fixes an issue with ixgbe and ixgbevf by adding a bit to indicate
when workqueues have been initialized. This permits the register read
error handling from attempting to use them prior to that, which also
generates warnings. Checking for a detected removal after initializing
the work queues allows the probe function to return an error without
getting the workqueue involved. Further, if the error_detected
callback is entered before the workqueues are initialized, exit without
recovery since the device initialization was so truncated.

Francois Romieu provides several patches to all the drivers to remove
the open coded skb_cow_head.

Jakub Kicinski provides a fix for igb where last_rx_timestamp should be
updated only when Rx time stamp is read.

Mitch provides a fix for i40evf where a recent change broke the RSS LUT
programming causing it to be programmed with all 0's.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+91 -203
-124
drivers/net/ethernet/intel/e1000/e1000_hw.c
··· 115 */ 116 static s32 e1000_set_phy_type(struct e1000_hw *hw) 117 { 118 - e_dbg("e1000_set_phy_type"); 119 - 120 if (hw->mac_type == e1000_undefined) 121 return -E1000_ERR_PHY_TYPE; 122 ··· 156 { 157 u32 ret_val; 158 u16 phy_saved_data; 159 - 160 - e_dbg("e1000_phy_init_script"); 161 162 if (hw->phy_init_script) { 163 msleep(20); ··· 249 */ 250 s32 e1000_set_mac_type(struct e1000_hw *hw) 251 { 252 - e_dbg("e1000_set_mac_type"); 253 - 254 switch (hw->device_id) { 255 case E1000_DEV_ID_82542: 256 switch (hw->revision_id) { ··· 359 { 360 u32 status; 361 362 - e_dbg("e1000_set_media_type"); 363 - 364 if (hw->mac_type != e1000_82543) { 365 /* tbi_compatibility is only valid on 82543 */ 366 hw->tbi_compatibility_en = false; ··· 406 u32 manc; 407 u32 led_ctrl; 408 s32 ret_val; 409 - 410 - e_dbg("e1000_reset_hw"); 411 412 /* For 82542 (rev 2.0), disable MWI before issuing a device reset */ 413 if (hw->mac_type == e1000_82542_rev2_0) { ··· 556 u32 mta_size; 557 u32 ctrl_ext; 558 559 - e_dbg("e1000_init_hw"); 560 - 561 /* Initialize Identification LED */ 562 ret_val = e1000_id_led_init(hw); 563 if (ret_val) { ··· 671 u16 eeprom_data; 672 s32 ret_val; 673 674 - e_dbg("e1000_adjust_serdes_amplitude"); 675 - 676 if (hw->media_type != e1000_media_type_internal_serdes) 677 return E1000_SUCCESS; 678 ··· 715 u32 ctrl_ext; 716 s32 ret_val; 717 u16 eeprom_data; 718 - 719 - e_dbg("e1000_setup_link"); 720 721 /* Read and store word 0x0F of the EEPROM. This word contains bits 722 * that determine the hardware's default PAUSE (flow control) mode, ··· 831 u32 i; 832 u32 signal = 0; 833 s32 ret_val; 834 - 835 - e_dbg("e1000_setup_fiber_serdes_link"); 836 837 /* On adapters with a MAC newer than 82544, SWDP 1 will be 838 * set when the optics detect a signal. On older adapters, it will be ··· 1033 s32 ret_val; 1034 u16 phy_data; 1035 1036 - e_dbg("e1000_copper_link_preconfig"); 1037 - 1038 ctrl = er32(CTRL); 1039 /* With 82543, we need to force speed and duplex on the MAC equal to 1040 * what the PHY speed and duplex configuration is. In addition, we need ··· 1091 u32 led_ctrl; 1092 s32 ret_val; 1093 u16 phy_data; 1094 - 1095 - e_dbg("e1000_copper_link_igp_setup"); 1096 1097 if (hw->phy_reset_disable) 1098 return E1000_SUCCESS; ··· 1232 s32 ret_val; 1233 u16 phy_data; 1234 1235 - e_dbg("e1000_copper_link_mgp_setup"); 1236 - 1237 if (hw->phy_reset_disable) 1238 return E1000_SUCCESS; 1239 ··· 1338 s32 ret_val; 1339 u16 phy_data; 1340 1341 - e_dbg("e1000_copper_link_autoneg"); 1342 - 1343 /* Perform some bounds checking on the hw->autoneg_advertised 1344 * parameter. If this variable is zero, then set it to the default. 1345 */ ··· 1406 static s32 e1000_copper_link_postconfig(struct e1000_hw *hw) 1407 { 1408 s32 ret_val; 1409 - e_dbg("e1000_copper_link_postconfig"); 1410 1411 if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) { 1412 e1000_config_collision_dist(hw); ··· 1445 s32 ret_val; 1446 u16 i; 1447 u16 phy_data; 1448 - 1449 - e_dbg("e1000_setup_copper_link"); 1450 1451 /* Check if it is a valid PHY and set PHY mode if necessary. */ 1452 ret_val = e1000_copper_link_preconfig(hw); ··· 1524 s32 ret_val; 1525 u16 mii_autoneg_adv_reg; 1526 u16 mii_1000t_ctrl_reg; 1527 - 1528 - e_dbg("e1000_phy_setup_autoneg"); 1529 1530 /* Read the MII Auto-Neg Advertisement Register (Address 4). */ 1531 ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); ··· 1675 u16 mii_status_reg; 1676 u16 phy_data; 1677 u16 i; 1678 - 1679 - e_dbg("e1000_phy_force_speed_duplex"); 1680 1681 /* Turn off Flow control if we are forcing speed and duplex. */ 1682 hw->fc = E1000_FC_NONE; ··· 1906 { 1907 u32 tctl, coll_dist; 1908 1909 - e_dbg("e1000_config_collision_dist"); 1910 - 1911 if (hw->mac_type < e1000_82543) 1912 coll_dist = E1000_COLLISION_DISTANCE_82542; 1913 else ··· 1934 u32 ctrl; 1935 s32 ret_val; 1936 u16 phy_data; 1937 - 1938 - e_dbg("e1000_config_mac_to_phy"); 1939 1940 /* 82544 or newer MAC, Auto Speed Detection takes care of 1941 * MAC speed/duplex configuration. ··· 2012 { 2013 u32 ctrl; 2014 2015 - e_dbg("e1000_force_mac_fc"); 2016 - 2017 /* Get the current configuration of the Device Control Register */ 2018 ctrl = er32(CTRL); 2019 ··· 2080 u16 mii_nway_lp_ability_reg; 2081 u16 speed; 2082 u16 duplex; 2083 - 2084 - e_dbg("e1000_config_fc_after_link_up"); 2085 2086 /* Check for the case where we have fiber media and auto-neg failed 2087 * so we had to force link. In this case, we need to force the ··· 2296 u32 status; 2297 s32 ret_val = E1000_SUCCESS; 2298 2299 - e_dbg("e1000_check_for_serdes_link_generic"); 2300 - 2301 ctrl = er32(CTRL); 2302 status = er32(STATUS); 2303 rxcw = er32(RXCW); ··· 2405 u32 signal = 0; 2406 s32 ret_val; 2407 u16 phy_data; 2408 - 2409 - e_dbg("e1000_check_for_link"); 2410 2411 ctrl = er32(CTRL); 2412 status = er32(STATUS); ··· 2587 s32 ret_val; 2588 u16 phy_data; 2589 2590 - e_dbg("e1000_get_speed_and_duplex"); 2591 - 2592 if (hw->mac_type >= e1000_82543) { 2593 status = er32(STATUS); 2594 if (status & E1000_STATUS_SPEED_1000) { ··· 2652 u16 i; 2653 u16 phy_data; 2654 2655 - e_dbg("e1000_wait_autoneg"); 2656 e_dbg("Waiting for Auto-Neg to complete.\n"); 2657 2658 /* We will wait for autoneg to complete or 4.5 seconds to expire. */ ··· 2818 u32 ret_val; 2819 unsigned long flags; 2820 2821 - e_dbg("e1000_read_phy_reg"); 2822 - 2823 spin_lock_irqsave(&e1000_phy_lock, flags); 2824 2825 if ((hw->phy_type == e1000_phy_igp) && ··· 2843 u32 i; 2844 u32 mdic = 0; 2845 const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; 2846 - 2847 - e_dbg("e1000_read_phy_reg_ex"); 2848 2849 if (reg_addr > MAX_PHY_REG_ADDRESS) { 2850 e_dbg("PHY Address %d is out of range\n", reg_addr); ··· 2956 u32 ret_val; 2957 unsigned long flags; 2958 2959 - e_dbg("e1000_write_phy_reg"); 2960 - 2961 spin_lock_irqsave(&e1000_phy_lock, flags); 2962 2963 if ((hw->phy_type == e1000_phy_igp) && ··· 2981 u32 i; 2982 u32 mdic = 0; 2983 const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; 2984 - 2985 - e_dbg("e1000_write_phy_reg_ex"); 2986 2987 if (reg_addr > MAX_PHY_REG_ADDRESS) { 2988 e_dbg("PHY Address %d is out of range\n", reg_addr); ··· 3073 u32 ctrl, ctrl_ext; 3074 u32 led_ctrl; 3075 3076 - e_dbg("e1000_phy_hw_reset"); 3077 - 3078 e_dbg("Resetting Phy...\n"); 3079 3080 if (hw->mac_type > e1000_82543) { ··· 3131 s32 ret_val; 3132 u16 phy_data; 3133 3134 - e_dbg("e1000_phy_reset"); 3135 - 3136 switch (hw->phy_type) { 3137 case e1000_phy_igp: 3138 ret_val = e1000_phy_hw_reset(hw); ··· 3168 s32 phy_init_status, ret_val; 3169 u16 phy_id_high, phy_id_low; 3170 bool match = false; 3171 - 3172 - e_dbg("e1000_detect_gig_phy"); 3173 3174 if (hw->phy_id != 0) 3175 return E1000_SUCCESS; ··· 3239 static s32 e1000_phy_reset_dsp(struct e1000_hw *hw) 3240 { 3241 s32 ret_val; 3242 - e_dbg("e1000_phy_reset_dsp"); 3243 3244 do { 3245 ret_val = e1000_write_phy_reg(hw, 29, 0x001d); ··· 3269 s32 ret_val; 3270 u16 phy_data, min_length, max_length, average; 3271 e1000_rev_polarity polarity; 3272 - 3273 - e_dbg("e1000_phy_igp_get_info"); 3274 3275 /* The downshift status is checked only once, after link is established, 3276 * and it stored in the hw->speed_downgraded parameter. ··· 3349 u16 phy_data; 3350 e1000_rev_polarity polarity; 3351 3352 - e_dbg("e1000_phy_m88_get_info"); 3353 - 3354 /* The downshift status is checked only once, after link is established, 3355 * and it stored in the hw->speed_downgraded parameter. 3356 */ ··· 3420 s32 ret_val; 3421 u16 phy_data; 3422 3423 - e_dbg("e1000_phy_get_info"); 3424 - 3425 phy_info->cable_length = e1000_cable_length_undefined; 3426 phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined; 3427 phy_info->cable_polarity = e1000_rev_polarity_undefined; ··· 3458 3459 s32 e1000_validate_mdi_setting(struct e1000_hw *hw) 3460 { 3461 - e_dbg("e1000_validate_mdi_settings"); 3462 - 3463 if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) { 3464 e_dbg("Invalid MDI setting detected\n"); 3465 hw->mdix = 1; ··· 3479 u32 eecd = er32(EECD); 3480 s32 ret_val = E1000_SUCCESS; 3481 u16 eeprom_size; 3482 - 3483 - e_dbg("e1000_init_eeprom_params"); 3484 3485 switch (hw->mac_type) { 3486 case e1000_82542_rev2_0: ··· 3697 struct e1000_eeprom_info *eeprom = &hw->eeprom; 3698 u32 eecd, i = 0; 3699 3700 - e_dbg("e1000_acquire_eeprom"); 3701 - 3702 eecd = er32(EECD); 3703 3704 /* Request EEPROM Access */ ··· 3796 { 3797 u32 eecd; 3798 3799 - e_dbg("e1000_release_eeprom"); 3800 - 3801 eecd = er32(EECD); 3802 3803 if (hw->eeprom.type == e1000_eeprom_spi) { ··· 3842 { 3843 u16 retry_count = 0; 3844 u8 spi_stat_reg; 3845 - 3846 - e_dbg("e1000_spi_eeprom_ready"); 3847 3848 /* Read "Status Register" repeatedly until the LSB is cleared. The 3849 * EEPROM will signal that the command has been completed by clearing ··· 3894 { 3895 struct e1000_eeprom_info *eeprom = &hw->eeprom; 3896 u32 i = 0; 3897 - 3898 - e_dbg("e1000_read_eeprom"); 3899 3900 if (hw->mac_type == e1000_ce4100) { 3901 GBE_CONFIG_FLASH_READ(GBE_CONFIG_BASE_VIRT, offset, words, ··· 3995 u16 checksum = 0; 3996 u16 i, eeprom_data; 3997 3998 - e_dbg("e1000_validate_eeprom_checksum"); 3999 - 4000 for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { 4001 if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { 4002 e_dbg("EEPROM Read Error\n"); ··· 4028 { 4029 u16 checksum = 0; 4030 u16 i, eeprom_data; 4031 - 4032 - e_dbg("e1000_update_eeprom_checksum"); 4033 4034 for (i = 0; i < EEPROM_CHECKSUM_REG; i++) { 4035 if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { ··· 4068 { 4069 struct e1000_eeprom_info *eeprom = &hw->eeprom; 4070 s32 status = 0; 4071 - 4072 - e_dbg("e1000_write_eeprom"); 4073 4074 if (hw->mac_type == e1000_ce4100) { 4075 GBE_CONFIG_FLASH_WRITE(GBE_CONFIG_BASE_VIRT, offset, words, ··· 4117 { 4118 struct e1000_eeprom_info *eeprom = &hw->eeprom; 4119 u16 widx = 0; 4120 - 4121 - e_dbg("e1000_write_eeprom_spi"); 4122 4123 while (widx < words) { 4124 u8 write_opcode = EEPROM_WRITE_OPCODE_SPI; ··· 4184 u32 eecd; 4185 u16 words_written = 0; 4186 u16 i = 0; 4187 - 4188 - e_dbg("e1000_write_eeprom_microwire"); 4189 4190 /* Send the write enable command to the EEPROM (3-bit opcode plus 4191 * 6/8-bit dummy address beginning with 11). It's less work to include ··· 4263 u16 offset; 4264 u16 eeprom_data, i; 4265 4266 - e_dbg("e1000_read_mac_addr"); 4267 - 4268 for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) { 4269 offset = i >> 1; 4270 if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) { ··· 4300 { 4301 u32 i; 4302 u32 rar_num; 4303 - 4304 - e_dbg("e1000_init_rx_addrs"); 4305 4306 /* Setup the receive address. */ 4307 e_dbg("Programming MAC Address into RAR[0]\n"); ··· 4458 u16 eeprom_data, i, temp; 4459 const u16 led_mask = 0x0F; 4460 4461 - e_dbg("e1000_id_led_init"); 4462 - 4463 if (hw->mac_type < e1000_82540) { 4464 /* Nothing to do */ 4465 return E1000_SUCCESS; ··· 4529 u32 ledctl; 4530 s32 ret_val = E1000_SUCCESS; 4531 4532 - e_dbg("e1000_setup_led"); 4533 - 4534 switch (hw->mac_type) { 4535 case e1000_82542_rev2_0: 4536 case e1000_82542_rev2_1: ··· 4579 { 4580 s32 ret_val = E1000_SUCCESS; 4581 4582 - e_dbg("e1000_cleanup_led"); 4583 - 4584 switch (hw->mac_type) { 4585 case e1000_82542_rev2_0: 4586 case e1000_82542_rev2_1: ··· 4612 s32 e1000_led_on(struct e1000_hw *hw) 4613 { 4614 u32 ctrl = er32(CTRL); 4615 - 4616 - e_dbg("e1000_led_on"); 4617 4618 switch (hw->mac_type) { 4619 case e1000_82542_rev2_0: ··· 4656 s32 e1000_led_off(struct e1000_hw *hw) 4657 { 4658 u32 ctrl = er32(CTRL); 4659 - 4660 - e_dbg("e1000_led_off"); 4661 4662 switch (hw->mac_type) { 4663 case e1000_82542_rev2_0: ··· 4784 */ 4785 void e1000_reset_adaptive(struct e1000_hw *hw) 4786 { 4787 - e_dbg("e1000_reset_adaptive"); 4788 - 4789 if (hw->adaptive_ifs) { 4790 if (!hw->ifs_params_forced) { 4791 hw->current_ifs_val = 0; ··· 4810 */ 4811 void e1000_update_adaptive(struct e1000_hw *hw) 4812 { 4813 - e_dbg("e1000_update_adaptive"); 4814 - 4815 if (hw->adaptive_ifs) { 4816 if ((hw->collision_delta *hw->ifs_ratio) > hw->tx_packet_delta) { 4817 if (hw->tx_packet_delta > MIN_NUM_XMITS) { ··· 5005 u16 i, phy_data; 5006 u16 cable_length; 5007 5008 - e_dbg("e1000_get_cable_length"); 5009 - 5010 *min_length = *max_length = 0; 5011 5012 /* Use old method for Phy older than IGP */ ··· 5120 s32 ret_val; 5121 u16 phy_data; 5122 5123 - e_dbg("e1000_check_polarity"); 5124 - 5125 if (hw->phy_type == e1000_phy_m88) { 5126 /* return the Polarity bit in the Status register. */ 5127 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, ··· 5185 { 5186 s32 ret_val; 5187 u16 phy_data; 5188 - 5189 - e_dbg("e1000_check_downshift"); 5190 5191 if (hw->phy_type == e1000_phy_igp) { 5192 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH, ··· 5295 { 5296 s32 ret_val; 5297 u16 phy_data, phy_saved_data, speed, duplex, i; 5298 - 5299 - e_dbg("e1000_config_dsp_after_link_change"); 5300 5301 if (hw->phy_type != e1000_phy_igp) 5302 return E1000_SUCCESS; ··· 5429 s32 ret_val; 5430 u16 eeprom_data; 5431 5432 - e_dbg("e1000_set_phy_mode"); 5433 - 5434 if ((hw->mac_type == e1000_82545_rev_3) && 5435 (hw->media_type == e1000_media_type_copper)) { 5436 ret_val = ··· 5475 { 5476 s32 ret_val; 5477 u16 phy_data; 5478 - e_dbg("e1000_set_d3_lplu_state"); 5479 5480 if (hw->phy_type != e1000_phy_igp) 5481 return E1000_SUCCESS; ··· 5578 s32 ret_val; 5579 u16 default_page = 0; 5580 u16 phy_data; 5581 - 5582 - e_dbg("e1000_set_vco_speed"); 5583 5584 switch (hw->mac_type) { 5585 case e1000_82545_rev_3: ··· 5750 */ 5751 static s32 e1000_get_auto_rd_done(struct e1000_hw *hw) 5752 { 5753 - e_dbg("e1000_get_auto_rd_done"); 5754 msleep(5); 5755 return E1000_SUCCESS; 5756 } ··· 5764 */ 5765 static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw) 5766 { 5767 - e_dbg("e1000_get_phy_cfg_done"); 5768 msleep(10); 5769 return E1000_SUCCESS; 5770 }
··· 115 */ 116 static s32 e1000_set_phy_type(struct e1000_hw *hw) 117 { 118 if (hw->mac_type == e1000_undefined) 119 return -E1000_ERR_PHY_TYPE; 120 ··· 158 { 159 u32 ret_val; 160 u16 phy_saved_data; 161 162 if (hw->phy_init_script) { 163 msleep(20); ··· 253 */ 254 s32 e1000_set_mac_type(struct e1000_hw *hw) 255 { 256 switch (hw->device_id) { 257 case E1000_DEV_ID_82542: 258 switch (hw->revision_id) { ··· 365 { 366 u32 status; 367 368 if (hw->mac_type != e1000_82543) { 369 /* tbi_compatibility is only valid on 82543 */ 370 hw->tbi_compatibility_en = false; ··· 414 u32 manc; 415 u32 led_ctrl; 416 s32 ret_val; 417 418 /* For 82542 (rev 2.0), disable MWI before issuing a device reset */ 419 if (hw->mac_type == e1000_82542_rev2_0) { ··· 566 u32 mta_size; 567 u32 ctrl_ext; 568 569 /* Initialize Identification LED */ 570 ret_val = e1000_id_led_init(hw); 571 if (ret_val) { ··· 683 u16 eeprom_data; 684 s32 ret_val; 685 686 if (hw->media_type != e1000_media_type_internal_serdes) 687 return E1000_SUCCESS; 688 ··· 729 u32 ctrl_ext; 730 s32 ret_val; 731 u16 eeprom_data; 732 733 /* Read and store word 0x0F of the EEPROM. This word contains bits 734 * that determine the hardware's default PAUSE (flow control) mode, ··· 847 u32 i; 848 u32 signal = 0; 849 s32 ret_val; 850 851 /* On adapters with a MAC newer than 82544, SWDP 1 will be 852 * set when the optics detect a signal. On older adapters, it will be ··· 1051 s32 ret_val; 1052 u16 phy_data; 1053 1054 ctrl = er32(CTRL); 1055 /* With 82543, we need to force speed and duplex on the MAC equal to 1056 * what the PHY speed and duplex configuration is. In addition, we need ··· 1111 u32 led_ctrl; 1112 s32 ret_val; 1113 u16 phy_data; 1114 1115 if (hw->phy_reset_disable) 1116 return E1000_SUCCESS; ··· 1254 s32 ret_val; 1255 u16 phy_data; 1256 1257 if (hw->phy_reset_disable) 1258 return E1000_SUCCESS; 1259 ··· 1362 s32 ret_val; 1363 u16 phy_data; 1364 1365 /* Perform some bounds checking on the hw->autoneg_advertised 1366 * parameter. If this variable is zero, then set it to the default. 1367 */ ··· 1432 static s32 e1000_copper_link_postconfig(struct e1000_hw *hw) 1433 { 1434 s32 ret_val; 1435 1436 if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) { 1437 e1000_config_collision_dist(hw); ··· 1472 s32 ret_val; 1473 u16 i; 1474 u16 phy_data; 1475 1476 /* Check if it is a valid PHY and set PHY mode if necessary. */ 1477 ret_val = e1000_copper_link_preconfig(hw); ··· 1553 s32 ret_val; 1554 u16 mii_autoneg_adv_reg; 1555 u16 mii_1000t_ctrl_reg; 1556 1557 /* Read the MII Auto-Neg Advertisement Register (Address 4). */ 1558 ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); ··· 1706 u16 mii_status_reg; 1707 u16 phy_data; 1708 u16 i; 1709 1710 /* Turn off Flow control if we are forcing speed and duplex. */ 1711 hw->fc = E1000_FC_NONE; ··· 1939 { 1940 u32 tctl, coll_dist; 1941 1942 if (hw->mac_type < e1000_82543) 1943 coll_dist = E1000_COLLISION_DISTANCE_82542; 1944 else ··· 1969 u32 ctrl; 1970 s32 ret_val; 1971 u16 phy_data; 1972 1973 /* 82544 or newer MAC, Auto Speed Detection takes care of 1974 * MAC speed/duplex configuration. ··· 2049 { 2050 u32 ctrl; 2051 2052 /* Get the current configuration of the Device Control Register */ 2053 ctrl = er32(CTRL); 2054 ··· 2119 u16 mii_nway_lp_ability_reg; 2120 u16 speed; 2121 u16 duplex; 2122 2123 /* Check for the case where we have fiber media and auto-neg failed 2124 * so we had to force link. In this case, we need to force the ··· 2337 u32 status; 2338 s32 ret_val = E1000_SUCCESS; 2339 2340 ctrl = er32(CTRL); 2341 status = er32(STATUS); 2342 rxcw = er32(RXCW); ··· 2448 u32 signal = 0; 2449 s32 ret_val; 2450 u16 phy_data; 2451 2452 ctrl = er32(CTRL); 2453 status = er32(STATUS); ··· 2632 s32 ret_val; 2633 u16 phy_data; 2634 2635 if (hw->mac_type >= e1000_82543) { 2636 status = er32(STATUS); 2637 if (status & E1000_STATUS_SPEED_1000) { ··· 2699 u16 i; 2700 u16 phy_data; 2701 2702 e_dbg("Waiting for Auto-Neg to complete.\n"); 2703 2704 /* We will wait for autoneg to complete or 4.5 seconds to expire. */ ··· 2866 u32 ret_val; 2867 unsigned long flags; 2868 2869 spin_lock_irqsave(&e1000_phy_lock, flags); 2870 2871 if ((hw->phy_type == e1000_phy_igp) && ··· 2893 u32 i; 2894 u32 mdic = 0; 2895 const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; 2896 2897 if (reg_addr > MAX_PHY_REG_ADDRESS) { 2898 e_dbg("PHY Address %d is out of range\n", reg_addr); ··· 3008 u32 ret_val; 3009 unsigned long flags; 3010 3011 spin_lock_irqsave(&e1000_phy_lock, flags); 3012 3013 if ((hw->phy_type == e1000_phy_igp) && ··· 3035 u32 i; 3036 u32 mdic = 0; 3037 const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; 3038 3039 if (reg_addr > MAX_PHY_REG_ADDRESS) { 3040 e_dbg("PHY Address %d is out of range\n", reg_addr); ··· 3129 u32 ctrl, ctrl_ext; 3130 u32 led_ctrl; 3131 3132 e_dbg("Resetting Phy...\n"); 3133 3134 if (hw->mac_type > e1000_82543) { ··· 3189 s32 ret_val; 3190 u16 phy_data; 3191 3192 switch (hw->phy_type) { 3193 case e1000_phy_igp: 3194 ret_val = e1000_phy_hw_reset(hw); ··· 3228 s32 phy_init_status, ret_val; 3229 u16 phy_id_high, phy_id_low; 3230 bool match = false; 3231 3232 if (hw->phy_id != 0) 3233 return E1000_SUCCESS; ··· 3301 static s32 e1000_phy_reset_dsp(struct e1000_hw *hw) 3302 { 3303 s32 ret_val; 3304 3305 do { 3306 ret_val = e1000_write_phy_reg(hw, 29, 0x001d); ··· 3332 s32 ret_val; 3333 u16 phy_data, min_length, max_length, average; 3334 e1000_rev_polarity polarity; 3335 3336 /* The downshift status is checked only once, after link is established, 3337 * and it stored in the hw->speed_downgraded parameter. ··· 3414 u16 phy_data; 3415 e1000_rev_polarity polarity; 3416 3417 /* The downshift status is checked only once, after link is established, 3418 * and it stored in the hw->speed_downgraded parameter. 3419 */ ··· 3487 s32 ret_val; 3488 u16 phy_data; 3489 3490 phy_info->cable_length = e1000_cable_length_undefined; 3491 phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined; 3492 phy_info->cable_polarity = e1000_rev_polarity_undefined; ··· 3527 3528 s32 e1000_validate_mdi_setting(struct e1000_hw *hw) 3529 { 3530 if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) { 3531 e_dbg("Invalid MDI setting detected\n"); 3532 hw->mdix = 1; ··· 3550 u32 eecd = er32(EECD); 3551 s32 ret_val = E1000_SUCCESS; 3552 u16 eeprom_size; 3553 3554 switch (hw->mac_type) { 3555 case e1000_82542_rev2_0: ··· 3770 struct e1000_eeprom_info *eeprom = &hw->eeprom; 3771 u32 eecd, i = 0; 3772 3773 eecd = er32(EECD); 3774 3775 /* Request EEPROM Access */ ··· 3871 { 3872 u32 eecd; 3873 3874 eecd = er32(EECD); 3875 3876 if (hw->eeprom.type == e1000_eeprom_spi) { ··· 3919 { 3920 u16 retry_count = 0; 3921 u8 spi_stat_reg; 3922 3923 /* Read "Status Register" repeatedly until the LSB is cleared. The 3924 * EEPROM will signal that the command has been completed by clearing ··· 3973 { 3974 struct e1000_eeprom_info *eeprom = &hw->eeprom; 3975 u32 i = 0; 3976 3977 if (hw->mac_type == e1000_ce4100) { 3978 GBE_CONFIG_FLASH_READ(GBE_CONFIG_BASE_VIRT, offset, words, ··· 4076 u16 checksum = 0; 4077 u16 i, eeprom_data; 4078 4079 for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { 4080 if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { 4081 e_dbg("EEPROM Read Error\n"); ··· 4111 { 4112 u16 checksum = 0; 4113 u16 i, eeprom_data; 4114 4115 for (i = 0; i < EEPROM_CHECKSUM_REG; i++) { 4116 if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { ··· 4153 { 4154 struct e1000_eeprom_info *eeprom = &hw->eeprom; 4155 s32 status = 0; 4156 4157 if (hw->mac_type == e1000_ce4100) { 4158 GBE_CONFIG_FLASH_WRITE(GBE_CONFIG_BASE_VIRT, offset, words, ··· 4204 { 4205 struct e1000_eeprom_info *eeprom = &hw->eeprom; 4206 u16 widx = 0; 4207 4208 while (widx < words) { 4209 u8 write_opcode = EEPROM_WRITE_OPCODE_SPI; ··· 4273 u32 eecd; 4274 u16 words_written = 0; 4275 u16 i = 0; 4276 4277 /* Send the write enable command to the EEPROM (3-bit opcode plus 4278 * 6/8-bit dummy address beginning with 11). It's less work to include ··· 4354 u16 offset; 4355 u16 eeprom_data, i; 4356 4357 for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) { 4358 offset = i >> 1; 4359 if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) { ··· 4393 { 4394 u32 i; 4395 u32 rar_num; 4396 4397 /* Setup the receive address. */ 4398 e_dbg("Programming MAC Address into RAR[0]\n"); ··· 4553 u16 eeprom_data, i, temp; 4554 const u16 led_mask = 0x0F; 4555 4556 if (hw->mac_type < e1000_82540) { 4557 /* Nothing to do */ 4558 return E1000_SUCCESS; ··· 4626 u32 ledctl; 4627 s32 ret_val = E1000_SUCCESS; 4628 4629 switch (hw->mac_type) { 4630 case e1000_82542_rev2_0: 4631 case e1000_82542_rev2_1: ··· 4678 { 4679 s32 ret_val = E1000_SUCCESS; 4680 4681 switch (hw->mac_type) { 4682 case e1000_82542_rev2_0: 4683 case e1000_82542_rev2_1: ··· 4713 s32 e1000_led_on(struct e1000_hw *hw) 4714 { 4715 u32 ctrl = er32(CTRL); 4716 4717 switch (hw->mac_type) { 4718 case e1000_82542_rev2_0: ··· 4759 s32 e1000_led_off(struct e1000_hw *hw) 4760 { 4761 u32 ctrl = er32(CTRL); 4762 4763 switch (hw->mac_type) { 4764 case e1000_82542_rev2_0: ··· 4889 */ 4890 void e1000_reset_adaptive(struct e1000_hw *hw) 4891 { 4892 if (hw->adaptive_ifs) { 4893 if (!hw->ifs_params_forced) { 4894 hw->current_ifs_val = 0; ··· 4917 */ 4918 void e1000_update_adaptive(struct e1000_hw *hw) 4919 { 4920 if (hw->adaptive_ifs) { 4921 if ((hw->collision_delta *hw->ifs_ratio) > hw->tx_packet_delta) { 4922 if (hw->tx_packet_delta > MIN_NUM_XMITS) { ··· 5114 u16 i, phy_data; 5115 u16 cable_length; 5116 5117 *min_length = *max_length = 0; 5118 5119 /* Use old method for Phy older than IGP */ ··· 5231 s32 ret_val; 5232 u16 phy_data; 5233 5234 if (hw->phy_type == e1000_phy_m88) { 5235 /* return the Polarity bit in the Status register. */ 5236 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, ··· 5298 { 5299 s32 ret_val; 5300 u16 phy_data; 5301 5302 if (hw->phy_type == e1000_phy_igp) { 5303 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH, ··· 5410 { 5411 s32 ret_val; 5412 u16 phy_data, phy_saved_data, speed, duplex, i; 5413 5414 if (hw->phy_type != e1000_phy_igp) 5415 return E1000_SUCCESS; ··· 5546 s32 ret_val; 5547 u16 eeprom_data; 5548 5549 if ((hw->mac_type == e1000_82545_rev_3) && 5550 (hw->media_type == e1000_media_type_copper)) { 5551 ret_val = ··· 5594 { 5595 s32 ret_val; 5596 u16 phy_data; 5597 5598 if (hw->phy_type != e1000_phy_igp) 5599 return E1000_SUCCESS; ··· 5698 s32 ret_val; 5699 u16 default_page = 0; 5700 u16 phy_data; 5701 5702 switch (hw->mac_type) { 5703 case e1000_82545_rev_3: ··· 5872 */ 5873 static s32 e1000_get_auto_rd_done(struct e1000_hw *hw) 5874 { 5875 msleep(5); 5876 return E1000_SUCCESS; 5877 } ··· 5887 */ 5888 static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw) 5889 { 5890 msleep(10); 5891 return E1000_SUCCESS; 5892 }
+5 -6
drivers/net/ethernet/intel/e1000/e1000_main.c
··· 2682 u32 cmd_length = 0; 2683 u16 ipcse = 0, tucse, mss; 2684 u8 ipcss, ipcso, tucss, tucso, hdr_len; 2685 - int err; 2686 2687 if (skb_is_gso(skb)) { 2688 - if (skb_header_cloned(skb)) { 2689 - err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2690 - if (err) 2691 - return err; 2692 - } 2693 2694 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 2695 mss = skb_shinfo(skb)->gso_size;
··· 2682 u32 cmd_length = 0; 2683 u16 ipcse = 0, tucse, mss; 2684 u8 ipcss, ipcso, tucss, tucso, hdr_len; 2685 2686 if (skb_is_gso(skb)) { 2687 + int err; 2688 + 2689 + err = skb_cow_head(skb, 0); 2690 + if (err < 0) 2691 + return err; 2692 2693 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 2694 mss = skb_shinfo(skb)->gso_size;
+4 -6
drivers/net/ethernet/intel/e1000e/netdev.c
··· 5100 u32 cmd_length = 0; 5101 u16 ipcse = 0, mss; 5102 u8 ipcss, ipcso, tucss, tucso, hdr_len; 5103 5104 if (!skb_is_gso(skb)) 5105 return 0; 5106 5107 - if (skb_header_cloned(skb)) { 5108 - int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 5109 - 5110 - if (err) 5111 - return err; 5112 - } 5113 5114 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 5115 mss = skb_shinfo(skb)->gso_size;
··· 5100 u32 cmd_length = 0; 5101 u16 ipcse = 0, mss; 5102 u8 ipcss, ipcso, tucss, tucso, hdr_len; 5103 + int err; 5104 5105 if (!skb_is_gso(skb)) 5106 return 0; 5107 5108 + err = skb_cow_head(skb, 0); 5109 + if (err < 0) 5110 + return err; 5111 5112 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 5113 mss = skb_shinfo(skb)->gso_size;
+4 -6
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
··· 1114 u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling) 1115 { 1116 u32 cd_cmd, cd_tso_len, cd_mss; 1117 struct tcphdr *tcph; 1118 struct iphdr *iph; 1119 u32 l4len; 1120 int err; 1121 - struct ipv6hdr *ipv6h; 1122 1123 if (!skb_is_gso(skb)) 1124 return 0; 1125 1126 - if (skb_header_cloned(skb)) { 1127 - err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 1128 - if (err) 1129 - return err; 1130 - } 1131 1132 if (protocol == htons(ETH_P_IP)) { 1133 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
··· 1114 u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling) 1115 { 1116 u32 cd_cmd, cd_tso_len, cd_mss; 1117 + struct ipv6hdr *ipv6h; 1118 struct tcphdr *tcph; 1119 struct iphdr *iph; 1120 u32 l4len; 1121 int err; 1122 1123 if (!skb_is_gso(skb)) 1124 return 0; 1125 1126 + err = skb_cow_head(skb, 0); 1127 + if (err < 0) 1128 + return err; 1129 1130 if (protocol == htons(ETH_P_IP)) { 1131 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+16 -4
drivers/net/ethernet/intel/i40evf/i40evf_main.c
··· 1412 schedule_work(&adapter->adminq_task); 1413 } 1414 1415 static int next_queue(struct i40evf_adapter *adapter, int j) 1416 { 1417 j += 1; ··· 1459 /* Populate the LUT with max no. of queues in round robin fashion */ 1460 j = adapter->vsi_res->num_queue_pairs; 1461 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { 1462 - lut = next_queue(adapter, j); 1463 - lut |= next_queue(adapter, j) << 8; 1464 - lut |= next_queue(adapter, j) << 16; 1465 - lut |= next_queue(adapter, j) << 24; 1466 wr32(hw, I40E_VFQF_HLUT(i), lut); 1467 } 1468 i40e_flush(hw);
··· 1412 schedule_work(&adapter->adminq_task); 1413 } 1414 1415 + /** 1416 + * i40evf_configure_rss - increment to next available tx queue 1417 + * @adapter: board private structure 1418 + * @j: queue counter 1419 + * 1420 + * Helper function for RSS programming to increment through available 1421 + * queus. Returns the next queue value. 1422 + **/ 1423 static int next_queue(struct i40evf_adapter *adapter, int j) 1424 { 1425 j += 1; ··· 1451 /* Populate the LUT with max no. of queues in round robin fashion */ 1452 j = adapter->vsi_res->num_queue_pairs; 1453 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { 1454 + j = next_queue(adapter, j); 1455 + lut = j; 1456 + j = next_queue(adapter, j); 1457 + lut |= j << 8; 1458 + j = next_queue(adapter, j); 1459 + lut |= j << 16; 1460 + j = next_queue(adapter, j); 1461 + lut |= j << 24; 1462 wr32(hw, I40E_VFQF_HLUT(i), lut); 1463 } 1464 i40e_flush(hw);
+1 -15
drivers/net/ethernet/intel/igb/igb.h
··· 241 struct igb_tx_buffer *tx_buffer_info; 242 struct igb_rx_buffer *rx_buffer_info; 243 }; 244 - unsigned long last_rx_timestamp; 245 void *desc; /* descriptor ring memory */ 246 unsigned long flags; /* ring specific flags */ 247 void __iomem *tail; /* pointer to ring tail register */ ··· 436 struct hwtstamp_config tstamp_config; 437 unsigned long ptp_tx_start; 438 unsigned long last_rx_ptp_check; 439 spinlock_t tmreg_lock; 440 struct cyclecounter cc; 441 struct timecounter tc; ··· 533 void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb); 534 void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va, 535 struct sk_buff *skb); 536 - static inline void igb_ptp_rx_hwtstamp(struct igb_ring *rx_ring, 537 - union e1000_adv_rx_desc *rx_desc, 538 - struct sk_buff *skb) 539 - { 540 - if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) && 541 - !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) 542 - igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb); 543 - 544 - /* Update the last_rx_timestamp timer in order to enable watchdog check 545 - * for error case of latched timestamp on a dropped packet. 546 - */ 547 - rx_ring->last_rx_timestamp = jiffies; 548 - } 549 - 550 int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); 551 int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); 552 #ifdef CONFIG_IGB_HWMON
··· 241 struct igb_tx_buffer *tx_buffer_info; 242 struct igb_rx_buffer *rx_buffer_info; 243 }; 244 void *desc; /* descriptor ring memory */ 245 unsigned long flags; /* ring specific flags */ 246 void __iomem *tail; /* pointer to ring tail register */ ··· 437 struct hwtstamp_config tstamp_config; 438 unsigned long ptp_tx_start; 439 unsigned long last_rx_ptp_check; 440 + unsigned long last_rx_timestamp; 441 spinlock_t tmreg_lock; 442 struct cyclecounter cc; 443 struct timecounter tc; ··· 533 void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb); 534 void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va, 535 struct sk_buff *skb); 536 int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); 537 int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); 538 #ifdef CONFIG_IGB_HWMON
+7 -6
drivers/net/ethernet/intel/igb/igb_main.c
··· 4605 struct sk_buff *skb = first->skb; 4606 u32 vlan_macip_lens, type_tucmd; 4607 u32 mss_l4len_idx, l4len; 4608 4609 if (skb->ip_summed != CHECKSUM_PARTIAL) 4610 return 0; ··· 4613 if (!skb_is_gso(skb)) 4614 return 0; 4615 4616 - if (skb_header_cloned(skb)) { 4617 - int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 4618 - if (err) 4619 - return err; 4620 - } 4621 4622 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 4623 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; ··· 6954 6955 igb_rx_checksum(rx_ring, rx_desc, skb); 6956 6957 - igb_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); 6958 6959 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && 6960 igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
··· 4605 struct sk_buff *skb = first->skb; 4606 u32 vlan_macip_lens, type_tucmd; 4607 u32 mss_l4len_idx, l4len; 4608 + int err; 4609 4610 if (skb->ip_summed != CHECKSUM_PARTIAL) 4611 return 0; ··· 4612 if (!skb_is_gso(skb)) 4613 return 0; 4614 4615 + err = skb_cow_head(skb, 0); 4616 + if (err < 0) 4617 + return err; 4618 4619 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 4620 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; ··· 6955 6956 igb_rx_checksum(rx_ring, rx_desc, skb); 6957 6958 + if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) && 6959 + !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) 6960 + igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb); 6961 6962 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && 6963 igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
+7 -7
drivers/net/ethernet/intel/igb/igb_ptp.c
··· 427 void igb_ptp_rx_hang(struct igb_adapter *adapter) 428 { 429 struct e1000_hw *hw = &adapter->hw; 430 - struct igb_ring *rx_ring; 431 u32 tsyncrxctl = rd32(E1000_TSYNCRXCTL); 432 unsigned long rx_event; 433 - int n; 434 435 if (hw->mac.type != e1000_82576) 436 return; ··· 443 444 /* Determine the most recent watchdog or rx_timestamp event */ 445 rx_event = adapter->last_rx_ptp_check; 446 - for (n = 0; n < adapter->num_rx_queues; n++) { 447 - rx_ring = adapter->rx_ring[n]; 448 - if (time_after(rx_ring->last_rx_timestamp, rx_event)) 449 - rx_event = rx_ring->last_rx_timestamp; 450 - } 451 452 /* Only need to read the high RXSTMP register to clear the lock */ 453 if (time_is_before_jiffies(rx_event + 5 * HZ)) { ··· 535 regval |= (u64)rd32(E1000_RXSTMPH) << 32; 536 537 igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); 538 } 539 540 /**
··· 427 void igb_ptp_rx_hang(struct igb_adapter *adapter) 428 { 429 struct e1000_hw *hw = &adapter->hw; 430 u32 tsyncrxctl = rd32(E1000_TSYNCRXCTL); 431 unsigned long rx_event; 432 433 if (hw->mac.type != e1000_82576) 434 return; ··· 445 446 /* Determine the most recent watchdog or rx_timestamp event */ 447 rx_event = adapter->last_rx_ptp_check; 448 + if (time_after(adapter->last_rx_timestamp, rx_event)) 449 + rx_event = adapter->last_rx_timestamp; 450 451 /* Only need to read the high RXSTMP register to clear the lock */ 452 if (time_is_before_jiffies(rx_event + 5 * HZ)) { ··· 540 regval |= (u64)rd32(E1000_RXSTMPH) << 32; 541 542 igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); 543 + 544 + /* Update the last_rx_timestamp timer in order to enable watchdog check 545 + * for error case of latched timestamp on a dropped packet. 546 + */ 547 + adapter->last_rx_timestamp = jiffies; 548 } 549 550 /**
+7 -9
drivers/net/ethernet/intel/igbvf/netdev.c
··· 1910 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) 1911 { 1912 struct e1000_adv_tx_context_desc *context_desc; 1913 - unsigned int i; 1914 - int err; 1915 struct igbvf_buffer *buffer_info; 1916 u32 info = 0, tu_cmd = 0; 1917 u32 mss_l4len_idx, l4len; 1918 *hdr_len = 0; 1919 1920 - if (skb_header_cloned(skb)) { 1921 - err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 1922 - if (err) { 1923 - dev_err(&adapter->pdev->dev, 1924 - "igbvf_tso returning an error\n"); 1925 - return err; 1926 - } 1927 } 1928 1929 l4len = tcp_hdrlen(skb);
··· 1910 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) 1911 { 1912 struct e1000_adv_tx_context_desc *context_desc; 1913 struct igbvf_buffer *buffer_info; 1914 u32 info = 0, tu_cmd = 0; 1915 u32 mss_l4len_idx, l4len; 1916 + unsigned int i; 1917 + int err; 1918 + 1919 *hdr_len = 0; 1920 1921 + err = skb_cow_head(skb, 0); 1922 + if (err < 0) { 1923 + dev_err(&adapter->pdev->dev, "igbvf_tso returning an error\n"); 1924 + return err; 1925 } 1926 1927 l4len = tcp_hdrlen(skb);
+4 -6
drivers/net/ethernet/intel/ixgb/ixgb_main.c
··· 1220 unsigned int i; 1221 u8 ipcss, ipcso, tucss, tucso, hdr_len; 1222 u16 ipcse, tucse, mss; 1223 - int err; 1224 1225 if (likely(skb_is_gso(skb))) { 1226 struct ixgb_buffer *buffer_info; 1227 struct iphdr *iph; 1228 1229 - if (skb_header_cloned(skb)) { 1230 - err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 1231 - if (err) 1232 - return err; 1233 - } 1234 1235 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1236 mss = skb_shinfo(skb)->gso_size;
··· 1220 unsigned int i; 1221 u8 ipcss, ipcso, tucss, tucso, hdr_len; 1222 u16 ipcse, tucse, mss; 1223 1224 if (likely(skb_is_gso(skb))) { 1225 struct ixgb_buffer *buffer_info; 1226 struct iphdr *iph; 1227 + int err; 1228 1229 + err = skb_cow_head(skb, 0); 1230 + if (err < 0) 1231 + return err; 1232 1233 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1234 mss = skb_shinfo(skb)->gso_size;
+1
drivers/net/ethernet/intel/ixgbe/ixgbe.h
··· 811 __IXGBE_DISABLED, 812 __IXGBE_REMOVING, 813 __IXGBE_SERVICE_SCHED, 814 __IXGBE_IN_SFP_INIT, 815 __IXGBE_PTP_RUNNING, 816 __IXGBE_PTP_TX_IN_PROGRESS,
··· 811 __IXGBE_DISABLED, 812 __IXGBE_REMOVING, 813 __IXGBE_SERVICE_SCHED, 814 + __IXGBE_SERVICE_INITED, 815 __IXGBE_IN_SFP_INIT, 816 __IXGBE_PTP_RUNNING, 817 __IXGBE_PTP_TX_IN_PROGRESS,
+20 -8
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 297 return; 298 hw->hw_addr = NULL; 299 e_dev_err("Adapter removed\n"); 300 - ixgbe_service_event_schedule(adapter); 301 } 302 303 void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg) ··· 6510 struct sk_buff *skb = first->skb; 6511 u32 vlan_macip_lens, type_tucmd; 6512 u32 mss_l4len_idx, l4len; 6513 6514 if (skb->ip_summed != CHECKSUM_PARTIAL) 6515 return 0; ··· 6518 if (!skb_is_gso(skb)) 6519 return 0; 6520 6521 - if (skb_header_cloned(skb)) { 6522 - int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 6523 - if (err) 6524 - return err; 6525 - } 6526 6527 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 6528 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; ··· 7077 IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT; 7078 if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) { 7079 struct vlan_ethhdr *vhdr; 7080 - if (skb_header_cloned(skb) && 7081 - pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 7082 goto out_drop; 7083 vhdr = (struct vlan_ethhdr *)skb->data; 7084 vhdr->h_vlan_TCI = htons(tx_flags >> ··· 8023 /* EEPROM */ 8024 memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops)); 8025 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 8026 /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */ 8027 if (!(eec & (1 << 8))) 8028 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic; ··· 8189 setup_timer(&adapter->service_timer, &ixgbe_service_timer, 8190 (unsigned long) adapter); 8191 8192 INIT_WORK(&adapter->service_task, ixgbe_service_task); 8193 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); 8194 8195 err = ixgbe_init_interrupt_scheme(adapter); ··· 8503 8504 skip_bad_vf_detection: 8505 #endif /* CONFIG_PCI_IOV */ 8506 rtnl_lock(); 8507 netif_device_detach(netdev); 8508
··· 297 return; 298 hw->hw_addr = NULL; 299 e_dev_err("Adapter removed\n"); 300 + if (test_bit(__IXGBE_SERVICE_INITED, &adapter->state)) 301 + ixgbe_service_event_schedule(adapter); 302 } 303 304 void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg) ··· 6509 struct sk_buff *skb = first->skb; 6510 u32 vlan_macip_lens, type_tucmd; 6511 u32 mss_l4len_idx, l4len; 6512 + int err; 6513 6514 if (skb->ip_summed != CHECKSUM_PARTIAL) 6515 return 0; ··· 6516 if (!skb_is_gso(skb)) 6517 return 0; 6518 6519 + err = skb_cow_head(skb, 0); 6520 + if (err < 0) 6521 + return err; 6522 6523 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 6524 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; ··· 7077 IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT; 7078 if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) { 7079 struct vlan_ethhdr *vhdr; 7080 + 7081 + if (skb_cow_head(skb, 0)) 7082 goto out_drop; 7083 vhdr = (struct vlan_ethhdr *)skb->data; 7084 vhdr->h_vlan_TCI = htons(tx_flags >> ··· 8023 /* EEPROM */ 8024 memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops)); 8025 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 8026 + if (ixgbe_removed(hw->hw_addr)) { 8027 + err = -EIO; 8028 + goto err_ioremap; 8029 + } 8030 /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */ 8031 if (!(eec & (1 << 8))) 8032 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic; ··· 8185 setup_timer(&adapter->service_timer, &ixgbe_service_timer, 8186 (unsigned long) adapter); 8187 8188 + if (ixgbe_removed(hw->hw_addr)) { 8189 + err = -EIO; 8190 + goto err_sw_init; 8191 + } 8192 INIT_WORK(&adapter->service_task, ixgbe_service_task); 8193 + set_bit(__IXGBE_SERVICE_INITED, &adapter->state); 8194 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); 8195 8196 err = ixgbe_init_interrupt_scheme(adapter); ··· 8494 8495 skip_bad_vf_detection: 8496 #endif /* CONFIG_PCI_IOV */ 8497 + if (!test_bit(__IXGBE_SERVICE_INITED, &adapter->state)) 8498 + return PCI_ERS_RESULT_DISCONNECT; 8499 + 8500 rtnl_lock(); 8501 netif_device_detach(netdev); 8502
+1
drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
··· 421 __IXGBEVF_DOWN, 422 __IXGBEVF_DISABLED, 423 __IXGBEVF_REMOVING, 424 }; 425 426 struct ixgbevf_cb {
··· 421 __IXGBEVF_DOWN, 422 __IXGBEVF_DISABLED, 423 __IXGBEVF_REMOVING, 424 + __IXGBEVF_WORK_INIT, 425 }; 426 427 struct ixgbevf_cb {
+14 -6
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
··· 107 return; 108 hw->hw_addr = NULL; 109 dev_err(&adapter->pdev->dev, "Adapter removed\n"); 110 - schedule_work(&adapter->watchdog_task); 111 } 112 113 static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg) ··· 2839 struct sk_buff *skb = first->skb; 2840 u32 vlan_macip_lens, type_tucmd; 2841 u32 mss_l4len_idx, l4len; 2842 2843 if (skb->ip_summed != CHECKSUM_PARTIAL) 2844 return 0; ··· 2847 if (!skb_is_gso(skb)) 2848 return 0; 2849 2850 - if (skb_header_cloned(skb)) { 2851 - int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2852 - if (err) 2853 - return err; 2854 - } 2855 2856 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 2857 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; ··· 3573 adapter->watchdog_timer.function = ixgbevf_watchdog; 3574 adapter->watchdog_timer.data = (unsigned long)adapter; 3575 3576 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task); 3577 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task); 3578 3579 err = ixgbevf_init_interrupt_scheme(adapter); 3580 if (err) ··· 3671 { 3672 struct net_device *netdev = pci_get_drvdata(pdev); 3673 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3674 3675 rtnl_lock(); 3676 netif_device_detach(netdev);
··· 107 return; 108 hw->hw_addr = NULL; 109 dev_err(&adapter->pdev->dev, "Adapter removed\n"); 110 + if (test_bit(__IXGBEVF_WORK_INIT, &adapter->state)) 111 + schedule_work(&adapter->watchdog_task); 112 } 113 114 static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg) ··· 2838 struct sk_buff *skb = first->skb; 2839 u32 vlan_macip_lens, type_tucmd; 2840 u32 mss_l4len_idx, l4len; 2841 + int err; 2842 2843 if (skb->ip_summed != CHECKSUM_PARTIAL) 2844 return 0; ··· 2845 if (!skb_is_gso(skb)) 2846 return 0; 2847 2848 + err = skb_cow_head(skb, 0); 2849 + if (err < 0) 2850 + return err; 2851 2852 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 2853 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; ··· 3573 adapter->watchdog_timer.function = ixgbevf_watchdog; 3574 adapter->watchdog_timer.data = (unsigned long)adapter; 3575 3576 + if (IXGBE_REMOVED(hw->hw_addr)) { 3577 + err = -EIO; 3578 + goto err_sw_init; 3579 + } 3580 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task); 3581 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task); 3582 + set_bit(__IXGBEVF_WORK_INIT, &adapter->state); 3583 3584 err = ixgbevf_init_interrupt_scheme(adapter); 3585 if (err) ··· 3666 { 3667 struct net_device *netdev = pci_get_drvdata(pdev); 3668 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3669 + 3670 + if (!test_bit(__IXGBEVF_WORK_INIT, &adapter->state)) 3671 + return PCI_ERS_RESULT_DISCONNECT; 3672 3673 rtnl_lock(); 3674 netif_device_detach(netdev);