Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net

Jeff Kirsher says:

====================
Intel Wired LAN Driver Updates

This series contains updates to e1000, e1000e, igb, igbvf, ixgb, ixgbe,
ixgbevf and i40evf.

Mark fixes an issue with ixgbe and ixgbevf by adding a bit to indicate
when workqueues have been initialized. This permits the register read
error handling from attempting to use them prior to that, which also
generates warnings. Checking for a detected removal after initializing
the work queues allows the probe function to return an error without
getting the workqueue involved. Further, if the error_detected
callback is entered before the workqueues are initialized, exit without
recovery since the device initialization was so truncated.

Francois Romieu provides several patches to all the drivers to remove
the open coded skb_cow_head.

Jakub Kicinski provides a fix for igb where last_rx_timestamp should be
updated only when Rx time stamp is read.

Mitch provides a fix for i40evf where a recent change broke the RSS LUT
programming causing it to be programmed with all 0's.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+91 -203
-124
drivers/net/ethernet/intel/e1000/e1000_hw.c
··· 115 115 */ 116 116 static s32 e1000_set_phy_type(struct e1000_hw *hw) 117 117 { 118 - e_dbg("e1000_set_phy_type"); 119 - 120 118 if (hw->mac_type == e1000_undefined) 121 119 return -E1000_ERR_PHY_TYPE; 122 120 ··· 156 158 { 157 159 u32 ret_val; 158 160 u16 phy_saved_data; 159 - 160 - e_dbg("e1000_phy_init_script"); 161 161 162 162 if (hw->phy_init_script) { 163 163 msleep(20); ··· 249 253 */ 250 254 s32 e1000_set_mac_type(struct e1000_hw *hw) 251 255 { 252 - e_dbg("e1000_set_mac_type"); 253 - 254 256 switch (hw->device_id) { 255 257 case E1000_DEV_ID_82542: 256 258 switch (hw->revision_id) { ··· 359 365 { 360 366 u32 status; 361 367 362 - e_dbg("e1000_set_media_type"); 363 - 364 368 if (hw->mac_type != e1000_82543) { 365 369 /* tbi_compatibility is only valid on 82543 */ 366 370 hw->tbi_compatibility_en = false; ··· 406 414 u32 manc; 407 415 u32 led_ctrl; 408 416 s32 ret_val; 409 - 410 - e_dbg("e1000_reset_hw"); 411 417 412 418 /* For 82542 (rev 2.0), disable MWI before issuing a device reset */ 413 419 if (hw->mac_type == e1000_82542_rev2_0) { ··· 556 566 u32 mta_size; 557 567 u32 ctrl_ext; 558 568 559 - e_dbg("e1000_init_hw"); 560 - 561 569 /* Initialize Identification LED */ 562 570 ret_val = e1000_id_led_init(hw); 563 571 if (ret_val) { ··· 671 683 u16 eeprom_data; 672 684 s32 ret_val; 673 685 674 - e_dbg("e1000_adjust_serdes_amplitude"); 675 - 676 686 if (hw->media_type != e1000_media_type_internal_serdes) 677 687 return E1000_SUCCESS; 678 688 ··· 715 729 u32 ctrl_ext; 716 730 s32 ret_val; 717 731 u16 eeprom_data; 718 - 719 - e_dbg("e1000_setup_link"); 720 732 721 733 /* Read and store word 0x0F of the EEPROM. This word contains bits 722 734 * that determine the hardware's default PAUSE (flow control) mode, ··· 831 847 u32 i; 832 848 u32 signal = 0; 833 849 s32 ret_val; 834 - 835 - e_dbg("e1000_setup_fiber_serdes_link"); 836 850 837 851 /* On adapters with a MAC newer than 82544, SWDP 1 will be 838 852 * set when the optics detect a signal. On older adapters, it will be ··· 1033 1051 s32 ret_val; 1034 1052 u16 phy_data; 1035 1053 1036 - e_dbg("e1000_copper_link_preconfig"); 1037 - 1038 1054 ctrl = er32(CTRL); 1039 1055 /* With 82543, we need to force speed and duplex on the MAC equal to 1040 1056 * what the PHY speed and duplex configuration is. In addition, we need ··· 1091 1111 u32 led_ctrl; 1092 1112 s32 ret_val; 1093 1113 u16 phy_data; 1094 - 1095 - e_dbg("e1000_copper_link_igp_setup"); 1096 1114 1097 1115 if (hw->phy_reset_disable) 1098 1116 return E1000_SUCCESS; ··· 1232 1254 s32 ret_val; 1233 1255 u16 phy_data; 1234 1256 1235 - e_dbg("e1000_copper_link_mgp_setup"); 1236 - 1237 1257 if (hw->phy_reset_disable) 1238 1258 return E1000_SUCCESS; 1239 1259 ··· 1338 1362 s32 ret_val; 1339 1363 u16 phy_data; 1340 1364 1341 - e_dbg("e1000_copper_link_autoneg"); 1342 - 1343 1365 /* Perform some bounds checking on the hw->autoneg_advertised 1344 1366 * parameter. If this variable is zero, then set it to the default. 1345 1367 */ ··· 1406 1432 static s32 e1000_copper_link_postconfig(struct e1000_hw *hw) 1407 1433 { 1408 1434 s32 ret_val; 1409 - e_dbg("e1000_copper_link_postconfig"); 1410 1435 1411 1436 if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) { 1412 1437 e1000_config_collision_dist(hw); ··· 1445 1472 s32 ret_val; 1446 1473 u16 i; 1447 1474 u16 phy_data; 1448 - 1449 - e_dbg("e1000_setup_copper_link"); 1450 1475 1451 1476 /* Check if it is a valid PHY and set PHY mode if necessary. */ 1452 1477 ret_val = e1000_copper_link_preconfig(hw); ··· 1524 1553 s32 ret_val; 1525 1554 u16 mii_autoneg_adv_reg; 1526 1555 u16 mii_1000t_ctrl_reg; 1527 - 1528 - e_dbg("e1000_phy_setup_autoneg"); 1529 1556 1530 1557 /* Read the MII Auto-Neg Advertisement Register (Address 4). */ 1531 1558 ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); ··· 1675 1706 u16 mii_status_reg; 1676 1707 u16 phy_data; 1677 1708 u16 i; 1678 - 1679 - e_dbg("e1000_phy_force_speed_duplex"); 1680 1709 1681 1710 /* Turn off Flow control if we are forcing speed and duplex. */ 1682 1711 hw->fc = E1000_FC_NONE; ··· 1906 1939 { 1907 1940 u32 tctl, coll_dist; 1908 1941 1909 - e_dbg("e1000_config_collision_dist"); 1910 - 1911 1942 if (hw->mac_type < e1000_82543) 1912 1943 coll_dist = E1000_COLLISION_DISTANCE_82542; 1913 1944 else ··· 1934 1969 u32 ctrl; 1935 1970 s32 ret_val; 1936 1971 u16 phy_data; 1937 - 1938 - e_dbg("e1000_config_mac_to_phy"); 1939 1972 1940 1973 /* 82544 or newer MAC, Auto Speed Detection takes care of 1941 1974 * MAC speed/duplex configuration. ··· 2012 2049 { 2013 2050 u32 ctrl; 2014 2051 2015 - e_dbg("e1000_force_mac_fc"); 2016 - 2017 2052 /* Get the current configuration of the Device Control Register */ 2018 2053 ctrl = er32(CTRL); 2019 2054 ··· 2080 2119 u16 mii_nway_lp_ability_reg; 2081 2120 u16 speed; 2082 2121 u16 duplex; 2083 - 2084 - e_dbg("e1000_config_fc_after_link_up"); 2085 2122 2086 2123 /* Check for the case where we have fiber media and auto-neg failed 2087 2124 * so we had to force link. In this case, we need to force the ··· 2296 2337 u32 status; 2297 2338 s32 ret_val = E1000_SUCCESS; 2298 2339 2299 - e_dbg("e1000_check_for_serdes_link_generic"); 2300 - 2301 2340 ctrl = er32(CTRL); 2302 2341 status = er32(STATUS); 2303 2342 rxcw = er32(RXCW); ··· 2405 2448 u32 signal = 0; 2406 2449 s32 ret_val; 2407 2450 u16 phy_data; 2408 - 2409 - e_dbg("e1000_check_for_link"); 2410 2451 2411 2452 ctrl = er32(CTRL); 2412 2453 status = er32(STATUS); ··· 2587 2632 s32 ret_val; 2588 2633 u16 phy_data; 2589 2634 2590 - e_dbg("e1000_get_speed_and_duplex"); 2591 - 2592 2635 if (hw->mac_type >= e1000_82543) { 2593 2636 status = er32(STATUS); 2594 2637 if (status & E1000_STATUS_SPEED_1000) { ··· 2652 2699 u16 i; 2653 2700 u16 phy_data; 2654 2701 2655 - e_dbg("e1000_wait_autoneg"); 2656 2702 e_dbg("Waiting for Auto-Neg to complete.\n"); 2657 2703 2658 2704 /* We will wait for autoneg to complete or 4.5 seconds to expire. */ ··· 2818 2866 u32 ret_val; 2819 2867 unsigned long flags; 2820 2868 2821 - e_dbg("e1000_read_phy_reg"); 2822 - 2823 2869 spin_lock_irqsave(&e1000_phy_lock, flags); 2824 2870 2825 2871 if ((hw->phy_type == e1000_phy_igp) && ··· 2843 2893 u32 i; 2844 2894 u32 mdic = 0; 2845 2895 const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; 2846 - 2847 - e_dbg("e1000_read_phy_reg_ex"); 2848 2896 2849 2897 if (reg_addr > MAX_PHY_REG_ADDRESS) { 2850 2898 e_dbg("PHY Address %d is out of range\n", reg_addr); ··· 2956 3008 u32 ret_val; 2957 3009 unsigned long flags; 2958 3010 2959 - e_dbg("e1000_write_phy_reg"); 2960 - 2961 3011 spin_lock_irqsave(&e1000_phy_lock, flags); 2962 3012 2963 3013 if ((hw->phy_type == e1000_phy_igp) && ··· 2981 3035 u32 i; 2982 3036 u32 mdic = 0; 2983 3037 const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; 2984 - 2985 - e_dbg("e1000_write_phy_reg_ex"); 2986 3038 2987 3039 if (reg_addr > MAX_PHY_REG_ADDRESS) { 2988 3040 e_dbg("PHY Address %d is out of range\n", reg_addr); ··· 3073 3129 u32 ctrl, ctrl_ext; 3074 3130 u32 led_ctrl; 3075 3131 3076 - e_dbg("e1000_phy_hw_reset"); 3077 - 3078 3132 e_dbg("Resetting Phy...\n"); 3079 3133 3080 3134 if (hw->mac_type > e1000_82543) { ··· 3131 3189 s32 ret_val; 3132 3190 u16 phy_data; 3133 3191 3134 - e_dbg("e1000_phy_reset"); 3135 - 3136 3192 switch (hw->phy_type) { 3137 3193 case e1000_phy_igp: 3138 3194 ret_val = e1000_phy_hw_reset(hw); ··· 3168 3228 s32 phy_init_status, ret_val; 3169 3229 u16 phy_id_high, phy_id_low; 3170 3230 bool match = false; 3171 - 3172 - e_dbg("e1000_detect_gig_phy"); 3173 3231 3174 3232 if (hw->phy_id != 0) 3175 3233 return E1000_SUCCESS; ··· 3239 3301 static s32 e1000_phy_reset_dsp(struct e1000_hw *hw) 3240 3302 { 3241 3303 s32 ret_val; 3242 - e_dbg("e1000_phy_reset_dsp"); 3243 3304 3244 3305 do { 3245 3306 ret_val = e1000_write_phy_reg(hw, 29, 0x001d); ··· 3269 3332 s32 ret_val; 3270 3333 u16 phy_data, min_length, max_length, average; 3271 3334 e1000_rev_polarity polarity; 3272 - 3273 - e_dbg("e1000_phy_igp_get_info"); 3274 3335 3275 3336 /* The downshift status is checked only once, after link is established, 3276 3337 * and it stored in the hw->speed_downgraded parameter. ··· 3349 3414 u16 phy_data; 3350 3415 e1000_rev_polarity polarity; 3351 3416 3352 - e_dbg("e1000_phy_m88_get_info"); 3353 - 3354 3417 /* The downshift status is checked only once, after link is established, 3355 3418 * and it stored in the hw->speed_downgraded parameter. 3356 3419 */ ··· 3420 3487 s32 ret_val; 3421 3488 u16 phy_data; 3422 3489 3423 - e_dbg("e1000_phy_get_info"); 3424 - 3425 3490 phy_info->cable_length = e1000_cable_length_undefined; 3426 3491 phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined; 3427 3492 phy_info->cable_polarity = e1000_rev_polarity_undefined; ··· 3458 3527 3459 3528 s32 e1000_validate_mdi_setting(struct e1000_hw *hw) 3460 3529 { 3461 - e_dbg("e1000_validate_mdi_settings"); 3462 - 3463 3530 if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) { 3464 3531 e_dbg("Invalid MDI setting detected\n"); 3465 3532 hw->mdix = 1; ··· 3479 3550 u32 eecd = er32(EECD); 3480 3551 s32 ret_val = E1000_SUCCESS; 3481 3552 u16 eeprom_size; 3482 - 3483 - e_dbg("e1000_init_eeprom_params"); 3484 3553 3485 3554 switch (hw->mac_type) { 3486 3555 case e1000_82542_rev2_0: ··· 3697 3770 struct e1000_eeprom_info *eeprom = &hw->eeprom; 3698 3771 u32 eecd, i = 0; 3699 3772 3700 - e_dbg("e1000_acquire_eeprom"); 3701 - 3702 3773 eecd = er32(EECD); 3703 3774 3704 3775 /* Request EEPROM Access */ ··· 3796 3871 { 3797 3872 u32 eecd; 3798 3873 3799 - e_dbg("e1000_release_eeprom"); 3800 - 3801 3874 eecd = er32(EECD); 3802 3875 3803 3876 if (hw->eeprom.type == e1000_eeprom_spi) { ··· 3842 3919 { 3843 3920 u16 retry_count = 0; 3844 3921 u8 spi_stat_reg; 3845 - 3846 - e_dbg("e1000_spi_eeprom_ready"); 3847 3922 3848 3923 /* Read "Status Register" repeatedly until the LSB is cleared. The 3849 3924 * EEPROM will signal that the command has been completed by clearing ··· 3894 3973 { 3895 3974 struct e1000_eeprom_info *eeprom = &hw->eeprom; 3896 3975 u32 i = 0; 3897 - 3898 - e_dbg("e1000_read_eeprom"); 3899 3976 3900 3977 if (hw->mac_type == e1000_ce4100) { 3901 3978 GBE_CONFIG_FLASH_READ(GBE_CONFIG_BASE_VIRT, offset, words, ··· 3995 4076 u16 checksum = 0; 3996 4077 u16 i, eeprom_data; 3997 4078 3998 - e_dbg("e1000_validate_eeprom_checksum"); 3999 - 4000 4079 for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { 4001 4080 if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { 4002 4081 e_dbg("EEPROM Read Error\n"); ··· 4028 4111 { 4029 4112 u16 checksum = 0; 4030 4113 u16 i, eeprom_data; 4031 - 4032 - e_dbg("e1000_update_eeprom_checksum"); 4033 4114 4034 4115 for (i = 0; i < EEPROM_CHECKSUM_REG; i++) { 4035 4116 if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { ··· 4068 4153 { 4069 4154 struct e1000_eeprom_info *eeprom = &hw->eeprom; 4070 4155 s32 status = 0; 4071 - 4072 - e_dbg("e1000_write_eeprom"); 4073 4156 4074 4157 if (hw->mac_type == e1000_ce4100) { 4075 4158 GBE_CONFIG_FLASH_WRITE(GBE_CONFIG_BASE_VIRT, offset, words, ··· 4117 4204 { 4118 4205 struct e1000_eeprom_info *eeprom = &hw->eeprom; 4119 4206 u16 widx = 0; 4120 - 4121 - e_dbg("e1000_write_eeprom_spi"); 4122 4207 4123 4208 while (widx < words) { 4124 4209 u8 write_opcode = EEPROM_WRITE_OPCODE_SPI; ··· 4184 4273 u32 eecd; 4185 4274 u16 words_written = 0; 4186 4275 u16 i = 0; 4187 - 4188 - e_dbg("e1000_write_eeprom_microwire"); 4189 4276 4190 4277 /* Send the write enable command to the EEPROM (3-bit opcode plus 4191 4278 * 6/8-bit dummy address beginning with 11). It's less work to include ··· 4263 4354 u16 offset; 4264 4355 u16 eeprom_data, i; 4265 4356 4266 - e_dbg("e1000_read_mac_addr"); 4267 - 4268 4357 for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) { 4269 4358 offset = i >> 1; 4270 4359 if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) { ··· 4300 4393 { 4301 4394 u32 i; 4302 4395 u32 rar_num; 4303 - 4304 - e_dbg("e1000_init_rx_addrs"); 4305 4396 4306 4397 /* Setup the receive address. */ 4307 4398 e_dbg("Programming MAC Address into RAR[0]\n"); ··· 4458 4553 u16 eeprom_data, i, temp; 4459 4554 const u16 led_mask = 0x0F; 4460 4555 4461 - e_dbg("e1000_id_led_init"); 4462 - 4463 4556 if (hw->mac_type < e1000_82540) { 4464 4557 /* Nothing to do */ 4465 4558 return E1000_SUCCESS; ··· 4529 4626 u32 ledctl; 4530 4627 s32 ret_val = E1000_SUCCESS; 4531 4628 4532 - e_dbg("e1000_setup_led"); 4533 - 4534 4629 switch (hw->mac_type) { 4535 4630 case e1000_82542_rev2_0: 4536 4631 case e1000_82542_rev2_1: ··· 4579 4678 { 4580 4679 s32 ret_val = E1000_SUCCESS; 4581 4680 4582 - e_dbg("e1000_cleanup_led"); 4583 - 4584 4681 switch (hw->mac_type) { 4585 4682 case e1000_82542_rev2_0: 4586 4683 case e1000_82542_rev2_1: ··· 4612 4713 s32 e1000_led_on(struct e1000_hw *hw) 4613 4714 { 4614 4715 u32 ctrl = er32(CTRL); 4615 - 4616 - e_dbg("e1000_led_on"); 4617 4716 4618 4717 switch (hw->mac_type) { 4619 4718 case e1000_82542_rev2_0: ··· 4656 4759 s32 e1000_led_off(struct e1000_hw *hw) 4657 4760 { 4658 4761 u32 ctrl = er32(CTRL); 4659 - 4660 - e_dbg("e1000_led_off"); 4661 4762 4662 4763 switch (hw->mac_type) { 4663 4764 case e1000_82542_rev2_0: ··· 4784 4889 */ 4785 4890 void e1000_reset_adaptive(struct e1000_hw *hw) 4786 4891 { 4787 - e_dbg("e1000_reset_adaptive"); 4788 - 4789 4892 if (hw->adaptive_ifs) { 4790 4893 if (!hw->ifs_params_forced) { 4791 4894 hw->current_ifs_val = 0; ··· 4810 4917 */ 4811 4918 void e1000_update_adaptive(struct e1000_hw *hw) 4812 4919 { 4813 - e_dbg("e1000_update_adaptive"); 4814 - 4815 4920 if (hw->adaptive_ifs) { 4816 4921 if ((hw->collision_delta *hw->ifs_ratio) > hw->tx_packet_delta) { 4817 4922 if (hw->tx_packet_delta > MIN_NUM_XMITS) { ··· 5005 5114 u16 i, phy_data; 5006 5115 u16 cable_length; 5007 5116 5008 - e_dbg("e1000_get_cable_length"); 5009 - 5010 5117 *min_length = *max_length = 0; 5011 5118 5012 5119 /* Use old method for Phy older than IGP */ ··· 5120 5231 s32 ret_val; 5121 5232 u16 phy_data; 5122 5233 5123 - e_dbg("e1000_check_polarity"); 5124 - 5125 5234 if (hw->phy_type == e1000_phy_m88) { 5126 5235 /* return the Polarity bit in the Status register. */ 5127 5236 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, ··· 5185 5298 { 5186 5299 s32 ret_val; 5187 5300 u16 phy_data; 5188 - 5189 - e_dbg("e1000_check_downshift"); 5190 5301 5191 5302 if (hw->phy_type == e1000_phy_igp) { 5192 5303 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH, ··· 5295 5410 { 5296 5411 s32 ret_val; 5297 5412 u16 phy_data, phy_saved_data, speed, duplex, i; 5298 - 5299 - e_dbg("e1000_config_dsp_after_link_change"); 5300 5413 5301 5414 if (hw->phy_type != e1000_phy_igp) 5302 5415 return E1000_SUCCESS; ··· 5429 5546 s32 ret_val; 5430 5547 u16 eeprom_data; 5431 5548 5432 - e_dbg("e1000_set_phy_mode"); 5433 - 5434 5549 if ((hw->mac_type == e1000_82545_rev_3) && 5435 5550 (hw->media_type == e1000_media_type_copper)) { 5436 5551 ret_val = ··· 5475 5594 { 5476 5595 s32 ret_val; 5477 5596 u16 phy_data; 5478 - e_dbg("e1000_set_d3_lplu_state"); 5479 5597 5480 5598 if (hw->phy_type != e1000_phy_igp) 5481 5599 return E1000_SUCCESS; ··· 5578 5698 s32 ret_val; 5579 5699 u16 default_page = 0; 5580 5700 u16 phy_data; 5581 - 5582 - e_dbg("e1000_set_vco_speed"); 5583 5701 5584 5702 switch (hw->mac_type) { 5585 5703 case e1000_82545_rev_3: ··· 5750 5872 */ 5751 5873 static s32 e1000_get_auto_rd_done(struct e1000_hw *hw) 5752 5874 { 5753 - e_dbg("e1000_get_auto_rd_done"); 5754 5875 msleep(5); 5755 5876 return E1000_SUCCESS; 5756 5877 } ··· 5764 5887 */ 5765 5888 static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw) 5766 5889 { 5767 - e_dbg("e1000_get_phy_cfg_done"); 5768 5890 msleep(10); 5769 5891 return E1000_SUCCESS; 5770 5892 }
+5 -6
drivers/net/ethernet/intel/e1000/e1000_main.c
··· 2682 2682 u32 cmd_length = 0; 2683 2683 u16 ipcse = 0, tucse, mss; 2684 2684 u8 ipcss, ipcso, tucss, tucso, hdr_len; 2685 - int err; 2686 2685 2687 2686 if (skb_is_gso(skb)) { 2688 - if (skb_header_cloned(skb)) { 2689 - err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2690 - if (err) 2691 - return err; 2692 - } 2687 + int err; 2688 + 2689 + err = skb_cow_head(skb, 0); 2690 + if (err < 0) 2691 + return err; 2693 2692 2694 2693 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 2695 2694 mss = skb_shinfo(skb)->gso_size;
+4 -6
drivers/net/ethernet/intel/e1000e/netdev.c
··· 5100 5100 u32 cmd_length = 0; 5101 5101 u16 ipcse = 0, mss; 5102 5102 u8 ipcss, ipcso, tucss, tucso, hdr_len; 5103 + int err; 5103 5104 5104 5105 if (!skb_is_gso(skb)) 5105 5106 return 0; 5106 5107 5107 - if (skb_header_cloned(skb)) { 5108 - int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 5109 - 5110 - if (err) 5111 - return err; 5112 - } 5108 + err = skb_cow_head(skb, 0); 5109 + if (err < 0) 5110 + return err; 5113 5111 5114 5112 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 5115 5113 mss = skb_shinfo(skb)->gso_size;
+4 -6
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
··· 1114 1114 u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling) 1115 1115 { 1116 1116 u32 cd_cmd, cd_tso_len, cd_mss; 1117 + struct ipv6hdr *ipv6h; 1117 1118 struct tcphdr *tcph; 1118 1119 struct iphdr *iph; 1119 1120 u32 l4len; 1120 1121 int err; 1121 - struct ipv6hdr *ipv6h; 1122 1122 1123 1123 if (!skb_is_gso(skb)) 1124 1124 return 0; 1125 1125 1126 - if (skb_header_cloned(skb)) { 1127 - err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 1128 - if (err) 1129 - return err; 1130 - } 1126 + err = skb_cow_head(skb, 0); 1127 + if (err < 0) 1128 + return err; 1131 1129 1132 1130 if (protocol == htons(ETH_P_IP)) { 1133 1131 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+16 -4
drivers/net/ethernet/intel/i40evf/i40evf_main.c
··· 1412 1412 schedule_work(&adapter->adminq_task); 1413 1413 } 1414 1414 1415 + /** 1416 + * i40evf_configure_rss - increment to next available tx queue 1417 + * @adapter: board private structure 1418 + * @j: queue counter 1419 + * 1420 + * Helper function for RSS programming to increment through available 1421 + * queus. Returns the next queue value. 1422 + **/ 1415 1423 static int next_queue(struct i40evf_adapter *adapter, int j) 1416 1424 { 1417 1425 j += 1; ··· 1459 1451 /* Populate the LUT with max no. of queues in round robin fashion */ 1460 1452 j = adapter->vsi_res->num_queue_pairs; 1461 1453 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { 1462 - lut = next_queue(adapter, j); 1463 - lut |= next_queue(adapter, j) << 8; 1464 - lut |= next_queue(adapter, j) << 16; 1465 - lut |= next_queue(adapter, j) << 24; 1454 + j = next_queue(adapter, j); 1455 + lut = j; 1456 + j = next_queue(adapter, j); 1457 + lut |= j << 8; 1458 + j = next_queue(adapter, j); 1459 + lut |= j << 16; 1460 + j = next_queue(adapter, j); 1461 + lut |= j << 24; 1466 1462 wr32(hw, I40E_VFQF_HLUT(i), lut); 1467 1463 } 1468 1464 i40e_flush(hw);
+1 -15
drivers/net/ethernet/intel/igb/igb.h
··· 241 241 struct igb_tx_buffer *tx_buffer_info; 242 242 struct igb_rx_buffer *rx_buffer_info; 243 243 }; 244 - unsigned long last_rx_timestamp; 245 244 void *desc; /* descriptor ring memory */ 246 245 unsigned long flags; /* ring specific flags */ 247 246 void __iomem *tail; /* pointer to ring tail register */ ··· 436 437 struct hwtstamp_config tstamp_config; 437 438 unsigned long ptp_tx_start; 438 439 unsigned long last_rx_ptp_check; 440 + unsigned long last_rx_timestamp; 439 441 spinlock_t tmreg_lock; 440 442 struct cyclecounter cc; 441 443 struct timecounter tc; ··· 533 533 void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb); 534 534 void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va, 535 535 struct sk_buff *skb); 536 - static inline void igb_ptp_rx_hwtstamp(struct igb_ring *rx_ring, 537 - union e1000_adv_rx_desc *rx_desc, 538 - struct sk_buff *skb) 539 - { 540 - if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) && 541 - !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) 542 - igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb); 543 - 544 - /* Update the last_rx_timestamp timer in order to enable watchdog check 545 - * for error case of latched timestamp on a dropped packet. 546 - */ 547 - rx_ring->last_rx_timestamp = jiffies; 548 - } 549 - 550 536 int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); 551 537 int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); 552 538 #ifdef CONFIG_IGB_HWMON
+7 -6
drivers/net/ethernet/intel/igb/igb_main.c
··· 4605 4605 struct sk_buff *skb = first->skb; 4606 4606 u32 vlan_macip_lens, type_tucmd; 4607 4607 u32 mss_l4len_idx, l4len; 4608 + int err; 4608 4609 4609 4610 if (skb->ip_summed != CHECKSUM_PARTIAL) 4610 4611 return 0; ··· 4613 4612 if (!skb_is_gso(skb)) 4614 4613 return 0; 4615 4614 4616 - if (skb_header_cloned(skb)) { 4617 - int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 4618 - if (err) 4619 - return err; 4620 - } 4615 + err = skb_cow_head(skb, 0); 4616 + if (err < 0) 4617 + return err; 4621 4618 4622 4619 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 4623 4620 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; ··· 6954 6955 6955 6956 igb_rx_checksum(rx_ring, rx_desc, skb); 6956 6957 6957 - igb_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); 6958 + if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) && 6959 + !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) 6960 + igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb); 6958 6961 6959 6962 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && 6960 6963 igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
+7 -7
drivers/net/ethernet/intel/igb/igb_ptp.c
··· 427 427 void igb_ptp_rx_hang(struct igb_adapter *adapter) 428 428 { 429 429 struct e1000_hw *hw = &adapter->hw; 430 - struct igb_ring *rx_ring; 431 430 u32 tsyncrxctl = rd32(E1000_TSYNCRXCTL); 432 431 unsigned long rx_event; 433 - int n; 434 432 435 433 if (hw->mac.type != e1000_82576) 436 434 return; ··· 443 445 444 446 /* Determine the most recent watchdog or rx_timestamp event */ 445 447 rx_event = adapter->last_rx_ptp_check; 446 - for (n = 0; n < adapter->num_rx_queues; n++) { 447 - rx_ring = adapter->rx_ring[n]; 448 - if (time_after(rx_ring->last_rx_timestamp, rx_event)) 449 - rx_event = rx_ring->last_rx_timestamp; 450 - } 448 + if (time_after(adapter->last_rx_timestamp, rx_event)) 449 + rx_event = adapter->last_rx_timestamp; 451 450 452 451 /* Only need to read the high RXSTMP register to clear the lock */ 453 452 if (time_is_before_jiffies(rx_event + 5 * HZ)) { ··· 535 540 regval |= (u64)rd32(E1000_RXSTMPH) << 32; 536 541 537 542 igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); 543 + 544 + /* Update the last_rx_timestamp timer in order to enable watchdog check 545 + * for error case of latched timestamp on a dropped packet. 546 + */ 547 + adapter->last_rx_timestamp = jiffies; 538 548 } 539 549 540 550 /**
+7 -9
drivers/net/ethernet/intel/igbvf/netdev.c
··· 1910 1910 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) 1911 1911 { 1912 1912 struct e1000_adv_tx_context_desc *context_desc; 1913 - unsigned int i; 1914 - int err; 1915 1913 struct igbvf_buffer *buffer_info; 1916 1914 u32 info = 0, tu_cmd = 0; 1917 1915 u32 mss_l4len_idx, l4len; 1916 + unsigned int i; 1917 + int err; 1918 + 1918 1919 *hdr_len = 0; 1919 1920 1920 - if (skb_header_cloned(skb)) { 1921 - err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 1922 - if (err) { 1923 - dev_err(&adapter->pdev->dev, 1924 - "igbvf_tso returning an error\n"); 1925 - return err; 1926 - } 1921 + err = skb_cow_head(skb, 0); 1922 + if (err < 0) { 1923 + dev_err(&adapter->pdev->dev, "igbvf_tso returning an error\n"); 1924 + return err; 1927 1925 } 1928 1926 1929 1927 l4len = tcp_hdrlen(skb);
+4 -6
drivers/net/ethernet/intel/ixgb/ixgb_main.c
··· 1220 1220 unsigned int i; 1221 1221 u8 ipcss, ipcso, tucss, tucso, hdr_len; 1222 1222 u16 ipcse, tucse, mss; 1223 - int err; 1224 1223 1225 1224 if (likely(skb_is_gso(skb))) { 1226 1225 struct ixgb_buffer *buffer_info; 1227 1226 struct iphdr *iph; 1227 + int err; 1228 1228 1229 - if (skb_header_cloned(skb)) { 1230 - err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 1231 - if (err) 1232 - return err; 1233 - } 1229 + err = skb_cow_head(skb, 0); 1230 + if (err < 0) 1231 + return err; 1234 1232 1235 1233 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1236 1234 mss = skb_shinfo(skb)->gso_size;
+1
drivers/net/ethernet/intel/ixgbe/ixgbe.h
··· 811 811 __IXGBE_DISABLED, 812 812 __IXGBE_REMOVING, 813 813 __IXGBE_SERVICE_SCHED, 814 + __IXGBE_SERVICE_INITED, 814 815 __IXGBE_IN_SFP_INIT, 815 816 __IXGBE_PTP_RUNNING, 816 817 __IXGBE_PTP_TX_IN_PROGRESS,
+20 -8
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 297 297 return; 298 298 hw->hw_addr = NULL; 299 299 e_dev_err("Adapter removed\n"); 300 - ixgbe_service_event_schedule(adapter); 300 + if (test_bit(__IXGBE_SERVICE_INITED, &adapter->state)) 301 + ixgbe_service_event_schedule(adapter); 301 302 } 302 303 303 304 void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg) ··· 6510 6509 struct sk_buff *skb = first->skb; 6511 6510 u32 vlan_macip_lens, type_tucmd; 6512 6511 u32 mss_l4len_idx, l4len; 6512 + int err; 6513 6513 6514 6514 if (skb->ip_summed != CHECKSUM_PARTIAL) 6515 6515 return 0; ··· 6518 6516 if (!skb_is_gso(skb)) 6519 6517 return 0; 6520 6518 6521 - if (skb_header_cloned(skb)) { 6522 - int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 6523 - if (err) 6524 - return err; 6525 - } 6519 + err = skb_cow_head(skb, 0); 6520 + if (err < 0) 6521 + return err; 6526 6522 6527 6523 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 6528 6524 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; ··· 7077 7077 IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT; 7078 7078 if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) { 7079 7079 struct vlan_ethhdr *vhdr; 7080 - if (skb_header_cloned(skb) && 7081 - pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 7080 + 7081 + if (skb_cow_head(skb, 0)) 7082 7082 goto out_drop; 7083 7083 vhdr = (struct vlan_ethhdr *)skb->data; 7084 7084 vhdr->h_vlan_TCI = htons(tx_flags >> ··· 8023 8023 /* EEPROM */ 8024 8024 memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops)); 8025 8025 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 8026 + if (ixgbe_removed(hw->hw_addr)) { 8027 + err = -EIO; 8028 + goto err_ioremap; 8029 + } 8026 8030 /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */ 8027 8031 if (!(eec & (1 << 8))) 8028 8032 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic; ··· 8189 8185 setup_timer(&adapter->service_timer, &ixgbe_service_timer, 8190 8186 (unsigned long) adapter); 8191 8187 8188 + if (ixgbe_removed(hw->hw_addr)) { 8189 + err = -EIO; 8190 + goto err_sw_init; 8191 + } 8192 8192 INIT_WORK(&adapter->service_task, ixgbe_service_task); 8193 + set_bit(__IXGBE_SERVICE_INITED, &adapter->state); 8193 8194 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); 8194 8195 8195 8196 err = ixgbe_init_interrupt_scheme(adapter); ··· 8503 8494 8504 8495 skip_bad_vf_detection: 8505 8496 #endif /* CONFIG_PCI_IOV */ 8497 + if (!test_bit(__IXGBE_SERVICE_INITED, &adapter->state)) 8498 + return PCI_ERS_RESULT_DISCONNECT; 8499 + 8506 8500 rtnl_lock(); 8507 8501 netif_device_detach(netdev); 8508 8502
+1
drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
··· 421 421 __IXGBEVF_DOWN, 422 422 __IXGBEVF_DISABLED, 423 423 __IXGBEVF_REMOVING, 424 + __IXGBEVF_WORK_INIT, 424 425 }; 425 426 426 427 struct ixgbevf_cb {
+14 -6
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
··· 107 107 return; 108 108 hw->hw_addr = NULL; 109 109 dev_err(&adapter->pdev->dev, "Adapter removed\n"); 110 - schedule_work(&adapter->watchdog_task); 110 + if (test_bit(__IXGBEVF_WORK_INIT, &adapter->state)) 111 + schedule_work(&adapter->watchdog_task); 111 112 } 112 113 113 114 static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg) ··· 2839 2838 struct sk_buff *skb = first->skb; 2840 2839 u32 vlan_macip_lens, type_tucmd; 2841 2840 u32 mss_l4len_idx, l4len; 2841 + int err; 2842 2842 2843 2843 if (skb->ip_summed != CHECKSUM_PARTIAL) 2844 2844 return 0; ··· 2847 2845 if (!skb_is_gso(skb)) 2848 2846 return 0; 2849 2847 2850 - if (skb_header_cloned(skb)) { 2851 - int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2852 - if (err) 2853 - return err; 2854 - } 2848 + err = skb_cow_head(skb, 0); 2849 + if (err < 0) 2850 + return err; 2855 2851 2856 2852 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 2857 2853 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; ··· 3573 3573 adapter->watchdog_timer.function = ixgbevf_watchdog; 3574 3574 adapter->watchdog_timer.data = (unsigned long)adapter; 3575 3575 3576 + if (IXGBE_REMOVED(hw->hw_addr)) { 3577 + err = -EIO; 3578 + goto err_sw_init; 3579 + } 3576 3580 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task); 3577 3581 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task); 3582 + set_bit(__IXGBEVF_WORK_INIT, &adapter->state); 3578 3583 3579 3584 err = ixgbevf_init_interrupt_scheme(adapter); 3580 3585 if (err) ··· 3671 3666 { 3672 3667 struct net_device *netdev = pci_get_drvdata(pdev); 3673 3668 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3669 + 3670 + if (!test_bit(__IXGBEVF_WORK_INIT, &adapter->state)) 3671 + return PCI_ERS_RESULT_DISCONNECT; 3674 3672 3675 3673 rtnl_lock(); 3676 3674 netif_device_detach(netdev);