Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull yet more networking updates from David Miller:

1) Various fixes to the new Redpine Signals wireless driver, from
Fariya Fatima.

2) L2TP PPP connect code takes PMTU from the wrong socket, fix from
Dmitry Petukhov.

3) UFO and TSO packets differ in whether they include the protocol
header in gso_size, account for that in skb_gso_transport_seglen().
From Florian Westphal.

4) If VLAN untagging fails, we double free the SKB in the bridging
output path. From Toshiaki Makita.

5) Several call sites of sk->sk_data_ready() were referencing an SKB
just added to the socket receive queue in order to calculate the
second argument via skb->len. This is dangerous because the moment
the skb is added to the receive queue it can be consumed in another
context and freed up.

It turns out also that none of the sk->sk_data_ready()
implementations even care about this second argument.

So just kill it off and thus fix all these use-after-free bugs as a
side effect.

6) Fix inverted test in tcp_v6_send_response(), from Lorenzo Colitti.

7) pktgen needs to do locking properly for LLTX devices, from Daniel
Borkmann.

8) xen-netfront driver initializes TX array entries in RX loop :-) From
Vincenzo Maffione.

9) After refactoring, some tunnel drivers allow a tunnel to be
configured on top itself. Fix from Nicolas Dichtel.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (46 commits)
vti: don't allow to add the same tunnel twice
gre: don't allow to add the same tunnel twice
drivers: net: xen-netfront: fix array initialization bug
pktgen: be friendly to LLTX devices
r8152: check RTL8152_UNPLUG
net: sun4i-emac: add promiscuous support
net/apne: replace IS_ERR and PTR_ERR with PTR_ERR_OR_ZERO
net: ipv6: Fix oif in TCP SYN+ACK route lookup.
drivers: net: cpsw: enable interrupts after napi enable and clearing previous interrupts
drivers: net: cpsw: discard all packets received when interface is down
net: Fix use after free by removing length arg from sk_data_ready callbacks.
Drivers: net: hyperv: Address UDP checksum issues
Drivers: net: hyperv: Negotiate suitable ndis version for offload support
Drivers: net: hyperv: Allocate memory for all possible per-pecket information
bridge: Fix double free and memory leak around br_allowed_ingress
bonding: Remove debug_fs files when module init fails
i40evf: program RSS LUT correctly
i40evf: remove open-coded skb_cow_head
ixgb: remove open-coded skb_cow_head
igbvf: remove open-coded skb_cow_head
...

+420 -433
+1
drivers/net/bonding/bond_main.c
··· 4492 4492 out: 4493 4493 return res; 4494 4494 err: 4495 + bond_destroy_debugfs(); 4495 4496 bond_netlink_fini(); 4496 4497 err_link: 4497 4498 unregister_pernet_subsys(&bond_net_ops);
+1 -3
drivers/net/ethernet/8390/apne.c
··· 560 560 static int __init apne_module_init(void) 561 561 { 562 562 apne_dev = apne_probe(-1); 563 - if (IS_ERR(apne_dev)) 564 - return PTR_ERR(apne_dev); 565 - return 0; 563 + return PTR_ERR_OR_ZERO(apne_dev); 566 564 } 567 565 568 566 static void __exit apne_module_exit(void)
+21 -9
drivers/net/ethernet/allwinner/sun4i-emac.c
··· 268 268 writel(reg_val | EMAC_TX_MODE_ABORTED_FRAME_EN, 269 269 db->membase + EMAC_TX_MODE_REG); 270 270 271 - /* set up RX */ 272 - reg_val = readl(db->membase + EMAC_RX_CTL_REG); 273 - 274 - writel(reg_val | EMAC_RX_CTL_PASS_LEN_OOR_EN | 275 - EMAC_RX_CTL_ACCEPT_UNICAST_EN | EMAC_RX_CTL_DA_FILTER_EN | 276 - EMAC_RX_CTL_ACCEPT_MULTICAST_EN | 277 - EMAC_RX_CTL_ACCEPT_BROADCAST_EN, 278 - db->membase + EMAC_RX_CTL_REG); 279 - 280 271 /* set MAC */ 281 272 /* set MAC CTL0 */ 282 273 reg_val = readl(db->membase + EMAC_MAC_CTL0_REG); ··· 298 307 db->membase + EMAC_MAC_MAXF_REG); 299 308 300 309 return 0; 310 + } 311 + 312 + static void emac_set_rx_mode(struct net_device *ndev) 313 + { 314 + struct emac_board_info *db = netdev_priv(ndev); 315 + unsigned int reg_val; 316 + 317 + /* set up RX */ 318 + reg_val = readl(db->membase + EMAC_RX_CTL_REG); 319 + 320 + if (ndev->flags & IFF_PROMISC) 321 + reg_val |= EMAC_RX_CTL_PASS_ALL_EN; 322 + else 323 + reg_val &= ~EMAC_RX_CTL_PASS_ALL_EN; 324 + 325 + writel(reg_val | EMAC_RX_CTL_PASS_LEN_OOR_EN | 326 + EMAC_RX_CTL_ACCEPT_UNICAST_EN | EMAC_RX_CTL_DA_FILTER_EN | 327 + EMAC_RX_CTL_ACCEPT_MULTICAST_EN | 328 + EMAC_RX_CTL_ACCEPT_BROADCAST_EN, 329 + db->membase + EMAC_RX_CTL_REG); 301 330 } 302 331 303 332 static unsigned int emac_powerup(struct net_device *ndev) ··· 793 782 .ndo_stop = emac_stop, 794 783 .ndo_start_xmit = emac_start_xmit, 795 784 .ndo_tx_timeout = emac_timeout, 785 + .ndo_set_rx_mode = emac_set_rx_mode, 796 786 .ndo_do_ioctl = emac_ioctl, 797 787 .ndo_change_mtu = eth_change_mtu, 798 788 .ndo_validate_addr = eth_validate_addr,
-124
drivers/net/ethernet/intel/e1000/e1000_hw.c
··· 115 115 */ 116 116 static s32 e1000_set_phy_type(struct e1000_hw *hw) 117 117 { 118 - e_dbg("e1000_set_phy_type"); 119 - 120 118 if (hw->mac_type == e1000_undefined) 121 119 return -E1000_ERR_PHY_TYPE; 122 120 ··· 156 158 { 157 159 u32 ret_val; 158 160 u16 phy_saved_data; 159 - 160 - e_dbg("e1000_phy_init_script"); 161 161 162 162 if (hw->phy_init_script) { 163 163 msleep(20); ··· 249 253 */ 250 254 s32 e1000_set_mac_type(struct e1000_hw *hw) 251 255 { 252 - e_dbg("e1000_set_mac_type"); 253 - 254 256 switch (hw->device_id) { 255 257 case E1000_DEV_ID_82542: 256 258 switch (hw->revision_id) { ··· 359 365 { 360 366 u32 status; 361 367 362 - e_dbg("e1000_set_media_type"); 363 - 364 368 if (hw->mac_type != e1000_82543) { 365 369 /* tbi_compatibility is only valid on 82543 */ 366 370 hw->tbi_compatibility_en = false; ··· 406 414 u32 manc; 407 415 u32 led_ctrl; 408 416 s32 ret_val; 409 - 410 - e_dbg("e1000_reset_hw"); 411 417 412 418 /* For 82542 (rev 2.0), disable MWI before issuing a device reset */ 413 419 if (hw->mac_type == e1000_82542_rev2_0) { ··· 556 566 u32 mta_size; 557 567 u32 ctrl_ext; 558 568 559 - e_dbg("e1000_init_hw"); 560 - 561 569 /* Initialize Identification LED */ 562 570 ret_val = e1000_id_led_init(hw); 563 571 if (ret_val) { ··· 671 683 u16 eeprom_data; 672 684 s32 ret_val; 673 685 674 - e_dbg("e1000_adjust_serdes_amplitude"); 675 - 676 686 if (hw->media_type != e1000_media_type_internal_serdes) 677 687 return E1000_SUCCESS; 678 688 ··· 715 729 u32 ctrl_ext; 716 730 s32 ret_val; 717 731 u16 eeprom_data; 718 - 719 - e_dbg("e1000_setup_link"); 720 732 721 733 /* Read and store word 0x0F of the EEPROM. This word contains bits 722 734 * that determine the hardware's default PAUSE (flow control) mode, ··· 831 847 u32 i; 832 848 u32 signal = 0; 833 849 s32 ret_val; 834 - 835 - e_dbg("e1000_setup_fiber_serdes_link"); 836 850 837 851 /* On adapters with a MAC newer than 82544, SWDP 1 will be 838 852 * set when the optics detect a signal. On older adapters, it will be ··· 1033 1051 s32 ret_val; 1034 1052 u16 phy_data; 1035 1053 1036 - e_dbg("e1000_copper_link_preconfig"); 1037 - 1038 1054 ctrl = er32(CTRL); 1039 1055 /* With 82543, we need to force speed and duplex on the MAC equal to 1040 1056 * what the PHY speed and duplex configuration is. In addition, we need ··· 1091 1111 u32 led_ctrl; 1092 1112 s32 ret_val; 1093 1113 u16 phy_data; 1094 - 1095 - e_dbg("e1000_copper_link_igp_setup"); 1096 1114 1097 1115 if (hw->phy_reset_disable) 1098 1116 return E1000_SUCCESS; ··· 1232 1254 s32 ret_val; 1233 1255 u16 phy_data; 1234 1256 1235 - e_dbg("e1000_copper_link_mgp_setup"); 1236 - 1237 1257 if (hw->phy_reset_disable) 1238 1258 return E1000_SUCCESS; 1239 1259 ··· 1338 1362 s32 ret_val; 1339 1363 u16 phy_data; 1340 1364 1341 - e_dbg("e1000_copper_link_autoneg"); 1342 - 1343 1365 /* Perform some bounds checking on the hw->autoneg_advertised 1344 1366 * parameter. If this variable is zero, then set it to the default. 1345 1367 */ ··· 1406 1432 static s32 e1000_copper_link_postconfig(struct e1000_hw *hw) 1407 1433 { 1408 1434 s32 ret_val; 1409 - e_dbg("e1000_copper_link_postconfig"); 1410 1435 1411 1436 if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) { 1412 1437 e1000_config_collision_dist(hw); ··· 1445 1472 s32 ret_val; 1446 1473 u16 i; 1447 1474 u16 phy_data; 1448 - 1449 - e_dbg("e1000_setup_copper_link"); 1450 1475 1451 1476 /* Check if it is a valid PHY and set PHY mode if necessary. */ 1452 1477 ret_val = e1000_copper_link_preconfig(hw); ··· 1524 1553 s32 ret_val; 1525 1554 u16 mii_autoneg_adv_reg; 1526 1555 u16 mii_1000t_ctrl_reg; 1527 - 1528 - e_dbg("e1000_phy_setup_autoneg"); 1529 1556 1530 1557 /* Read the MII Auto-Neg Advertisement Register (Address 4). */ 1531 1558 ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); ··· 1675 1706 u16 mii_status_reg; 1676 1707 u16 phy_data; 1677 1708 u16 i; 1678 - 1679 - e_dbg("e1000_phy_force_speed_duplex"); 1680 1709 1681 1710 /* Turn off Flow control if we are forcing speed and duplex. */ 1682 1711 hw->fc = E1000_FC_NONE; ··· 1906 1939 { 1907 1940 u32 tctl, coll_dist; 1908 1941 1909 - e_dbg("e1000_config_collision_dist"); 1910 - 1911 1942 if (hw->mac_type < e1000_82543) 1912 1943 coll_dist = E1000_COLLISION_DISTANCE_82542; 1913 1944 else ··· 1934 1969 u32 ctrl; 1935 1970 s32 ret_val; 1936 1971 u16 phy_data; 1937 - 1938 - e_dbg("e1000_config_mac_to_phy"); 1939 1972 1940 1973 /* 82544 or newer MAC, Auto Speed Detection takes care of 1941 1974 * MAC speed/duplex configuration. ··· 2012 2049 { 2013 2050 u32 ctrl; 2014 2051 2015 - e_dbg("e1000_force_mac_fc"); 2016 - 2017 2052 /* Get the current configuration of the Device Control Register */ 2018 2053 ctrl = er32(CTRL); 2019 2054 ··· 2080 2119 u16 mii_nway_lp_ability_reg; 2081 2120 u16 speed; 2082 2121 u16 duplex; 2083 - 2084 - e_dbg("e1000_config_fc_after_link_up"); 2085 2122 2086 2123 /* Check for the case where we have fiber media and auto-neg failed 2087 2124 * so we had to force link. In this case, we need to force the ··· 2296 2337 u32 status; 2297 2338 s32 ret_val = E1000_SUCCESS; 2298 2339 2299 - e_dbg("e1000_check_for_serdes_link_generic"); 2300 - 2301 2340 ctrl = er32(CTRL); 2302 2341 status = er32(STATUS); 2303 2342 rxcw = er32(RXCW); ··· 2405 2448 u32 signal = 0; 2406 2449 s32 ret_val; 2407 2450 u16 phy_data; 2408 - 2409 - e_dbg("e1000_check_for_link"); 2410 2451 2411 2452 ctrl = er32(CTRL); 2412 2453 status = er32(STATUS); ··· 2587 2632 s32 ret_val; 2588 2633 u16 phy_data; 2589 2634 2590 - e_dbg("e1000_get_speed_and_duplex"); 2591 - 2592 2635 if (hw->mac_type >= e1000_82543) { 2593 2636 status = er32(STATUS); 2594 2637 if (status & E1000_STATUS_SPEED_1000) { ··· 2652 2699 u16 i; 2653 2700 u16 phy_data; 2654 2701 2655 - e_dbg("e1000_wait_autoneg"); 2656 2702 e_dbg("Waiting for Auto-Neg to complete.\n"); 2657 2703 2658 2704 /* We will wait for autoneg to complete or 4.5 seconds to expire. */ ··· 2818 2866 u32 ret_val; 2819 2867 unsigned long flags; 2820 2868 2821 - e_dbg("e1000_read_phy_reg"); 2822 - 2823 2869 spin_lock_irqsave(&e1000_phy_lock, flags); 2824 2870 2825 2871 if ((hw->phy_type == e1000_phy_igp) && ··· 2843 2893 u32 i; 2844 2894 u32 mdic = 0; 2845 2895 const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; 2846 - 2847 - e_dbg("e1000_read_phy_reg_ex"); 2848 2896 2849 2897 if (reg_addr > MAX_PHY_REG_ADDRESS) { 2850 2898 e_dbg("PHY Address %d is out of range\n", reg_addr); ··· 2956 3008 u32 ret_val; 2957 3009 unsigned long flags; 2958 3010 2959 - e_dbg("e1000_write_phy_reg"); 2960 - 2961 3011 spin_lock_irqsave(&e1000_phy_lock, flags); 2962 3012 2963 3013 if ((hw->phy_type == e1000_phy_igp) && ··· 2981 3035 u32 i; 2982 3036 u32 mdic = 0; 2983 3037 const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; 2984 - 2985 - e_dbg("e1000_write_phy_reg_ex"); 2986 3038 2987 3039 if (reg_addr > MAX_PHY_REG_ADDRESS) { 2988 3040 e_dbg("PHY Address %d is out of range\n", reg_addr); ··· 3073 3129 u32 ctrl, ctrl_ext; 3074 3130 u32 led_ctrl; 3075 3131 3076 - e_dbg("e1000_phy_hw_reset"); 3077 - 3078 3132 e_dbg("Resetting Phy...\n"); 3079 3133 3080 3134 if (hw->mac_type > e1000_82543) { ··· 3131 3189 s32 ret_val; 3132 3190 u16 phy_data; 3133 3191 3134 - e_dbg("e1000_phy_reset"); 3135 - 3136 3192 switch (hw->phy_type) { 3137 3193 case e1000_phy_igp: 3138 3194 ret_val = e1000_phy_hw_reset(hw); ··· 3168 3228 s32 phy_init_status, ret_val; 3169 3229 u16 phy_id_high, phy_id_low; 3170 3230 bool match = false; 3171 - 3172 - e_dbg("e1000_detect_gig_phy"); 3173 3231 3174 3232 if (hw->phy_id != 0) 3175 3233 return E1000_SUCCESS; ··· 3239 3301 static s32 e1000_phy_reset_dsp(struct e1000_hw *hw) 3240 3302 { 3241 3303 s32 ret_val; 3242 - e_dbg("e1000_phy_reset_dsp"); 3243 3304 3244 3305 do { 3245 3306 ret_val = e1000_write_phy_reg(hw, 29, 0x001d); ··· 3269 3332 s32 ret_val; 3270 3333 u16 phy_data, min_length, max_length, average; 3271 3334 e1000_rev_polarity polarity; 3272 - 3273 - e_dbg("e1000_phy_igp_get_info"); 3274 3335 3275 3336 /* The downshift status is checked only once, after link is established, 3276 3337 * and it stored in the hw->speed_downgraded parameter. ··· 3349 3414 u16 phy_data; 3350 3415 e1000_rev_polarity polarity; 3351 3416 3352 - e_dbg("e1000_phy_m88_get_info"); 3353 - 3354 3417 /* The downshift status is checked only once, after link is established, 3355 3418 * and it stored in the hw->speed_downgraded parameter. 3356 3419 */ ··· 3420 3487 s32 ret_val; 3421 3488 u16 phy_data; 3422 3489 3423 - e_dbg("e1000_phy_get_info"); 3424 - 3425 3490 phy_info->cable_length = e1000_cable_length_undefined; 3426 3491 phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined; 3427 3492 phy_info->cable_polarity = e1000_rev_polarity_undefined; ··· 3458 3527 3459 3528 s32 e1000_validate_mdi_setting(struct e1000_hw *hw) 3460 3529 { 3461 - e_dbg("e1000_validate_mdi_settings"); 3462 - 3463 3530 if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) { 3464 3531 e_dbg("Invalid MDI setting detected\n"); 3465 3532 hw->mdix = 1; ··· 3479 3550 u32 eecd = er32(EECD); 3480 3551 s32 ret_val = E1000_SUCCESS; 3481 3552 u16 eeprom_size; 3482 - 3483 - e_dbg("e1000_init_eeprom_params"); 3484 3553 3485 3554 switch (hw->mac_type) { 3486 3555 case e1000_82542_rev2_0: ··· 3697 3770 struct e1000_eeprom_info *eeprom = &hw->eeprom; 3698 3771 u32 eecd, i = 0; 3699 3772 3700 - e_dbg("e1000_acquire_eeprom"); 3701 - 3702 3773 eecd = er32(EECD); 3703 3774 3704 3775 /* Request EEPROM Access */ ··· 3796 3871 { 3797 3872 u32 eecd; 3798 3873 3799 - e_dbg("e1000_release_eeprom"); 3800 - 3801 3874 eecd = er32(EECD); 3802 3875 3803 3876 if (hw->eeprom.type == e1000_eeprom_spi) { ··· 3842 3919 { 3843 3920 u16 retry_count = 0; 3844 3921 u8 spi_stat_reg; 3845 - 3846 - e_dbg("e1000_spi_eeprom_ready"); 3847 3922 3848 3923 /* Read "Status Register" repeatedly until the LSB is cleared. The 3849 3924 * EEPROM will signal that the command has been completed by clearing ··· 3894 3973 { 3895 3974 struct e1000_eeprom_info *eeprom = &hw->eeprom; 3896 3975 u32 i = 0; 3897 - 3898 - e_dbg("e1000_read_eeprom"); 3899 3976 3900 3977 if (hw->mac_type == e1000_ce4100) { 3901 3978 GBE_CONFIG_FLASH_READ(GBE_CONFIG_BASE_VIRT, offset, words, ··· 3995 4076 u16 checksum = 0; 3996 4077 u16 i, eeprom_data; 3997 4078 3998 - e_dbg("e1000_validate_eeprom_checksum"); 3999 - 4000 4079 for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { 4001 4080 if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { 4002 4081 e_dbg("EEPROM Read Error\n"); ··· 4028 4111 { 4029 4112 u16 checksum = 0; 4030 4113 u16 i, eeprom_data; 4031 - 4032 - e_dbg("e1000_update_eeprom_checksum"); 4033 4114 4034 4115 for (i = 0; i < EEPROM_CHECKSUM_REG; i++) { 4035 4116 if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { ··· 4068 4153 { 4069 4154 struct e1000_eeprom_info *eeprom = &hw->eeprom; 4070 4155 s32 status = 0; 4071 - 4072 - e_dbg("e1000_write_eeprom"); 4073 4156 4074 4157 if (hw->mac_type == e1000_ce4100) { 4075 4158 GBE_CONFIG_FLASH_WRITE(GBE_CONFIG_BASE_VIRT, offset, words, ··· 4117 4204 { 4118 4205 struct e1000_eeprom_info *eeprom = &hw->eeprom; 4119 4206 u16 widx = 0; 4120 - 4121 - e_dbg("e1000_write_eeprom_spi"); 4122 4207 4123 4208 while (widx < words) { 4124 4209 u8 write_opcode = EEPROM_WRITE_OPCODE_SPI; ··· 4184 4273 u32 eecd; 4185 4274 u16 words_written = 0; 4186 4275 u16 i = 0; 4187 - 4188 - e_dbg("e1000_write_eeprom_microwire"); 4189 4276 4190 4277 /* Send the write enable command to the EEPROM (3-bit opcode plus 4191 4278 * 6/8-bit dummy address beginning with 11). It's less work to include ··· 4263 4354 u16 offset; 4264 4355 u16 eeprom_data, i; 4265 4356 4266 - e_dbg("e1000_read_mac_addr"); 4267 - 4268 4357 for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) { 4269 4358 offset = i >> 1; 4270 4359 if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) { ··· 4300 4393 { 4301 4394 u32 i; 4302 4395 u32 rar_num; 4303 - 4304 - e_dbg("e1000_init_rx_addrs"); 4305 4396 4306 4397 /* Setup the receive address. */ 4307 4398 e_dbg("Programming MAC Address into RAR[0]\n"); ··· 4458 4553 u16 eeprom_data, i, temp; 4459 4554 const u16 led_mask = 0x0F; 4460 4555 4461 - e_dbg("e1000_id_led_init"); 4462 - 4463 4556 if (hw->mac_type < e1000_82540) { 4464 4557 /* Nothing to do */ 4465 4558 return E1000_SUCCESS; ··· 4529 4626 u32 ledctl; 4530 4627 s32 ret_val = E1000_SUCCESS; 4531 4628 4532 - e_dbg("e1000_setup_led"); 4533 - 4534 4629 switch (hw->mac_type) { 4535 4630 case e1000_82542_rev2_0: 4536 4631 case e1000_82542_rev2_1: ··· 4579 4678 { 4580 4679 s32 ret_val = E1000_SUCCESS; 4581 4680 4582 - e_dbg("e1000_cleanup_led"); 4583 - 4584 4681 switch (hw->mac_type) { 4585 4682 case e1000_82542_rev2_0: 4586 4683 case e1000_82542_rev2_1: ··· 4612 4713 s32 e1000_led_on(struct e1000_hw *hw) 4613 4714 { 4614 4715 u32 ctrl = er32(CTRL); 4615 - 4616 - e_dbg("e1000_led_on"); 4617 4716 4618 4717 switch (hw->mac_type) { 4619 4718 case e1000_82542_rev2_0: ··· 4656 4759 s32 e1000_led_off(struct e1000_hw *hw) 4657 4760 { 4658 4761 u32 ctrl = er32(CTRL); 4659 - 4660 - e_dbg("e1000_led_off"); 4661 4762 4662 4763 switch (hw->mac_type) { 4663 4764 case e1000_82542_rev2_0: ··· 4784 4889 */ 4785 4890 void e1000_reset_adaptive(struct e1000_hw *hw) 4786 4891 { 4787 - e_dbg("e1000_reset_adaptive"); 4788 - 4789 4892 if (hw->adaptive_ifs) { 4790 4893 if (!hw->ifs_params_forced) { 4791 4894 hw->current_ifs_val = 0; ··· 4810 4917 */ 4811 4918 void e1000_update_adaptive(struct e1000_hw *hw) 4812 4919 { 4813 - e_dbg("e1000_update_adaptive"); 4814 - 4815 4920 if (hw->adaptive_ifs) { 4816 4921 if ((hw->collision_delta *hw->ifs_ratio) > hw->tx_packet_delta) { 4817 4922 if (hw->tx_packet_delta > MIN_NUM_XMITS) { ··· 5005 5114 u16 i, phy_data; 5006 5115 u16 cable_length; 5007 5116 5008 - e_dbg("e1000_get_cable_length"); 5009 - 5010 5117 *min_length = *max_length = 0; 5011 5118 5012 5119 /* Use old method for Phy older than IGP */ ··· 5120 5231 s32 ret_val; 5121 5232 u16 phy_data; 5122 5233 5123 - e_dbg("e1000_check_polarity"); 5124 - 5125 5234 if (hw->phy_type == e1000_phy_m88) { 5126 5235 /* return the Polarity bit in the Status register. */ 5127 5236 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, ··· 5185 5298 { 5186 5299 s32 ret_val; 5187 5300 u16 phy_data; 5188 - 5189 - e_dbg("e1000_check_downshift"); 5190 5301 5191 5302 if (hw->phy_type == e1000_phy_igp) { 5192 5303 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH, ··· 5295 5410 { 5296 5411 s32 ret_val; 5297 5412 u16 phy_data, phy_saved_data, speed, duplex, i; 5298 - 5299 - e_dbg("e1000_config_dsp_after_link_change"); 5300 5413 5301 5414 if (hw->phy_type != e1000_phy_igp) 5302 5415 return E1000_SUCCESS; ··· 5429 5546 s32 ret_val; 5430 5547 u16 eeprom_data; 5431 5548 5432 - e_dbg("e1000_set_phy_mode"); 5433 - 5434 5549 if ((hw->mac_type == e1000_82545_rev_3) && 5435 5550 (hw->media_type == e1000_media_type_copper)) { 5436 5551 ret_val = ··· 5475 5594 { 5476 5595 s32 ret_val; 5477 5596 u16 phy_data; 5478 - e_dbg("e1000_set_d3_lplu_state"); 5479 5597 5480 5598 if (hw->phy_type != e1000_phy_igp) 5481 5599 return E1000_SUCCESS; ··· 5578 5698 s32 ret_val; 5579 5699 u16 default_page = 0; 5580 5700 u16 phy_data; 5581 - 5582 - e_dbg("e1000_set_vco_speed"); 5583 5701 5584 5702 switch (hw->mac_type) { 5585 5703 case e1000_82545_rev_3: ··· 5750 5872 */ 5751 5873 static s32 e1000_get_auto_rd_done(struct e1000_hw *hw) 5752 5874 { 5753 - e_dbg("e1000_get_auto_rd_done"); 5754 5875 msleep(5); 5755 5876 return E1000_SUCCESS; 5756 5877 } ··· 5764 5887 */ 5765 5888 static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw) 5766 5889 { 5767 - e_dbg("e1000_get_phy_cfg_done"); 5768 5890 msleep(10); 5769 5891 return E1000_SUCCESS; 5770 5892 }
+5 -6
drivers/net/ethernet/intel/e1000/e1000_main.c
··· 2682 2682 u32 cmd_length = 0; 2683 2683 u16 ipcse = 0, tucse, mss; 2684 2684 u8 ipcss, ipcso, tucss, tucso, hdr_len; 2685 - int err; 2686 2685 2687 2686 if (skb_is_gso(skb)) { 2688 - if (skb_header_cloned(skb)) { 2689 - err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2690 - if (err) 2691 - return err; 2692 - } 2687 + int err; 2688 + 2689 + err = skb_cow_head(skb, 0); 2690 + if (err < 0) 2691 + return err; 2693 2692 2694 2693 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 2695 2694 mss = skb_shinfo(skb)->gso_size;
+4 -6
drivers/net/ethernet/intel/e1000e/netdev.c
··· 5100 5100 u32 cmd_length = 0; 5101 5101 u16 ipcse = 0, mss; 5102 5102 u8 ipcss, ipcso, tucss, tucso, hdr_len; 5103 + int err; 5103 5104 5104 5105 if (!skb_is_gso(skb)) 5105 5106 return 0; 5106 5107 5107 - if (skb_header_cloned(skb)) { 5108 - int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 5109 - 5110 - if (err) 5111 - return err; 5112 - } 5108 + err = skb_cow_head(skb, 0); 5109 + if (err < 0) 5110 + return err; 5113 5111 5114 5112 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 5115 5113 mss = skb_shinfo(skb)->gso_size;
+4 -6
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
··· 1114 1114 u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling) 1115 1115 { 1116 1116 u32 cd_cmd, cd_tso_len, cd_mss; 1117 + struct ipv6hdr *ipv6h; 1117 1118 struct tcphdr *tcph; 1118 1119 struct iphdr *iph; 1119 1120 u32 l4len; 1120 1121 int err; 1121 - struct ipv6hdr *ipv6h; 1122 1122 1123 1123 if (!skb_is_gso(skb)) 1124 1124 return 0; 1125 1125 1126 - if (skb_header_cloned(skb)) { 1127 - err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 1128 - if (err) 1129 - return err; 1130 - } 1126 + err = skb_cow_head(skb, 0); 1127 + if (err < 0) 1128 + return err; 1131 1129 1132 1130 if (protocol == htons(ETH_P_IP)) { 1133 1131 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+16 -4
drivers/net/ethernet/intel/i40evf/i40evf_main.c
··· 1412 1412 schedule_work(&adapter->adminq_task); 1413 1413 } 1414 1414 1415 + /** 1416 + * i40evf_configure_rss - increment to next available tx queue 1417 + * @adapter: board private structure 1418 + * @j: queue counter 1419 + * 1420 + * Helper function for RSS programming to increment through available 1421 + * queus. Returns the next queue value. 1422 + **/ 1415 1423 static int next_queue(struct i40evf_adapter *adapter, int j) 1416 1424 { 1417 1425 j += 1; ··· 1459 1451 /* Populate the LUT with max no. of queues in round robin fashion */ 1460 1452 j = adapter->vsi_res->num_queue_pairs; 1461 1453 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { 1462 - lut = next_queue(adapter, j); 1463 - lut |= next_queue(adapter, j) << 8; 1464 - lut |= next_queue(adapter, j) << 16; 1465 - lut |= next_queue(adapter, j) << 24; 1454 + j = next_queue(adapter, j); 1455 + lut = j; 1456 + j = next_queue(adapter, j); 1457 + lut |= j << 8; 1458 + j = next_queue(adapter, j); 1459 + lut |= j << 16; 1460 + j = next_queue(adapter, j); 1461 + lut |= j << 24; 1466 1462 wr32(hw, I40E_VFQF_HLUT(i), lut); 1467 1463 } 1468 1464 i40e_flush(hw);
+1 -15
drivers/net/ethernet/intel/igb/igb.h
··· 241 241 struct igb_tx_buffer *tx_buffer_info; 242 242 struct igb_rx_buffer *rx_buffer_info; 243 243 }; 244 - unsigned long last_rx_timestamp; 245 244 void *desc; /* descriptor ring memory */ 246 245 unsigned long flags; /* ring specific flags */ 247 246 void __iomem *tail; /* pointer to ring tail register */ ··· 436 437 struct hwtstamp_config tstamp_config; 437 438 unsigned long ptp_tx_start; 438 439 unsigned long last_rx_ptp_check; 440 + unsigned long last_rx_timestamp; 439 441 spinlock_t tmreg_lock; 440 442 struct cyclecounter cc; 441 443 struct timecounter tc; ··· 533 533 void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb); 534 534 void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va, 535 535 struct sk_buff *skb); 536 - static inline void igb_ptp_rx_hwtstamp(struct igb_ring *rx_ring, 537 - union e1000_adv_rx_desc *rx_desc, 538 - struct sk_buff *skb) 539 - { 540 - if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) && 541 - !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) 542 - igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb); 543 - 544 - /* Update the last_rx_timestamp timer in order to enable watchdog check 545 - * for error case of latched timestamp on a dropped packet. 546 - */ 547 - rx_ring->last_rx_timestamp = jiffies; 548 - } 549 - 550 536 int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); 551 537 int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); 552 538 #ifdef CONFIG_IGB_HWMON
+7 -6
drivers/net/ethernet/intel/igb/igb_main.c
··· 4605 4605 struct sk_buff *skb = first->skb; 4606 4606 u32 vlan_macip_lens, type_tucmd; 4607 4607 u32 mss_l4len_idx, l4len; 4608 + int err; 4608 4609 4609 4610 if (skb->ip_summed != CHECKSUM_PARTIAL) 4610 4611 return 0; ··· 4613 4612 if (!skb_is_gso(skb)) 4614 4613 return 0; 4615 4614 4616 - if (skb_header_cloned(skb)) { 4617 - int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 4618 - if (err) 4619 - return err; 4620 - } 4615 + err = skb_cow_head(skb, 0); 4616 + if (err < 0) 4617 + return err; 4621 4618 4622 4619 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 4623 4620 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; ··· 6954 6955 6955 6956 igb_rx_checksum(rx_ring, rx_desc, skb); 6956 6957 6957 - igb_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); 6958 + if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) && 6959 + !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) 6960 + igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb); 6958 6961 6959 6962 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && 6960 6963 igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
+7 -7
drivers/net/ethernet/intel/igb/igb_ptp.c
··· 427 427 void igb_ptp_rx_hang(struct igb_adapter *adapter) 428 428 { 429 429 struct e1000_hw *hw = &adapter->hw; 430 - struct igb_ring *rx_ring; 431 430 u32 tsyncrxctl = rd32(E1000_TSYNCRXCTL); 432 431 unsigned long rx_event; 433 - int n; 434 432 435 433 if (hw->mac.type != e1000_82576) 436 434 return; ··· 443 445 444 446 /* Determine the most recent watchdog or rx_timestamp event */ 445 447 rx_event = adapter->last_rx_ptp_check; 446 - for (n = 0; n < adapter->num_rx_queues; n++) { 447 - rx_ring = adapter->rx_ring[n]; 448 - if (time_after(rx_ring->last_rx_timestamp, rx_event)) 449 - rx_event = rx_ring->last_rx_timestamp; 450 - } 448 + if (time_after(adapter->last_rx_timestamp, rx_event)) 449 + rx_event = adapter->last_rx_timestamp; 451 450 452 451 /* Only need to read the high RXSTMP register to clear the lock */ 453 452 if (time_is_before_jiffies(rx_event + 5 * HZ)) { ··· 535 540 regval |= (u64)rd32(E1000_RXSTMPH) << 32; 536 541 537 542 igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); 543 + 544 + /* Update the last_rx_timestamp timer in order to enable watchdog check 545 + * for error case of latched timestamp on a dropped packet. 546 + */ 547 + adapter->last_rx_timestamp = jiffies; 538 548 } 539 549 540 550 /**
+7 -9
drivers/net/ethernet/intel/igbvf/netdev.c
··· 1910 1910 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) 1911 1911 { 1912 1912 struct e1000_adv_tx_context_desc *context_desc; 1913 - unsigned int i; 1914 - int err; 1915 1913 struct igbvf_buffer *buffer_info; 1916 1914 u32 info = 0, tu_cmd = 0; 1917 1915 u32 mss_l4len_idx, l4len; 1916 + unsigned int i; 1917 + int err; 1918 + 1918 1919 *hdr_len = 0; 1919 1920 1920 - if (skb_header_cloned(skb)) { 1921 - err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 1922 - if (err) { 1923 - dev_err(&adapter->pdev->dev, 1924 - "igbvf_tso returning an error\n"); 1925 - return err; 1926 - } 1921 + err = skb_cow_head(skb, 0); 1922 + if (err < 0) { 1923 + dev_err(&adapter->pdev->dev, "igbvf_tso returning an error\n"); 1924 + return err; 1927 1925 } 1928 1926 1929 1927 l4len = tcp_hdrlen(skb);
+4 -6
drivers/net/ethernet/intel/ixgb/ixgb_main.c
··· 1220 1220 unsigned int i; 1221 1221 u8 ipcss, ipcso, tucss, tucso, hdr_len; 1222 1222 u16 ipcse, tucse, mss; 1223 - int err; 1224 1223 1225 1224 if (likely(skb_is_gso(skb))) { 1226 1225 struct ixgb_buffer *buffer_info; 1227 1226 struct iphdr *iph; 1227 + int err; 1228 1228 1229 - if (skb_header_cloned(skb)) { 1230 - err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 1231 - if (err) 1232 - return err; 1233 - } 1229 + err = skb_cow_head(skb, 0); 1230 + if (err < 0) 1231 + return err; 1234 1232 1235 1233 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1236 1234 mss = skb_shinfo(skb)->gso_size;
+1
drivers/net/ethernet/intel/ixgbe/ixgbe.h
··· 811 811 __IXGBE_DISABLED, 812 812 __IXGBE_REMOVING, 813 813 __IXGBE_SERVICE_SCHED, 814 + __IXGBE_SERVICE_INITED, 814 815 __IXGBE_IN_SFP_INIT, 815 816 __IXGBE_PTP_RUNNING, 816 817 __IXGBE_PTP_TX_IN_PROGRESS,
+20 -8
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 297 297 return; 298 298 hw->hw_addr = NULL; 299 299 e_dev_err("Adapter removed\n"); 300 - ixgbe_service_event_schedule(adapter); 300 + if (test_bit(__IXGBE_SERVICE_INITED, &adapter->state)) 301 + ixgbe_service_event_schedule(adapter); 301 302 } 302 303 303 304 void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg) ··· 6510 6509 struct sk_buff *skb = first->skb; 6511 6510 u32 vlan_macip_lens, type_tucmd; 6512 6511 u32 mss_l4len_idx, l4len; 6512 + int err; 6513 6513 6514 6514 if (skb->ip_summed != CHECKSUM_PARTIAL) 6515 6515 return 0; ··· 6518 6516 if (!skb_is_gso(skb)) 6519 6517 return 0; 6520 6518 6521 - if (skb_header_cloned(skb)) { 6522 - int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 6523 - if (err) 6524 - return err; 6525 - } 6519 + err = skb_cow_head(skb, 0); 6520 + if (err < 0) 6521 + return err; 6526 6522 6527 6523 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 6528 6524 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; ··· 7077 7077 IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT; 7078 7078 if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) { 7079 7079 struct vlan_ethhdr *vhdr; 7080 - if (skb_header_cloned(skb) && 7081 - pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 7080 + 7081 + if (skb_cow_head(skb, 0)) 7082 7082 goto out_drop; 7083 7083 vhdr = (struct vlan_ethhdr *)skb->data; 7084 7084 vhdr->h_vlan_TCI = htons(tx_flags >> ··· 8023 8023 /* EEPROM */ 8024 8024 memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops)); 8025 8025 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 8026 + if (ixgbe_removed(hw->hw_addr)) { 8027 + err = -EIO; 8028 + goto err_ioremap; 8029 + } 8026 8030 /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */ 8027 8031 if (!(eec & (1 << 8))) 8028 8032 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic; ··· 8189 8185 setup_timer(&adapter->service_timer, &ixgbe_service_timer, 8190 8186 (unsigned long) adapter); 8191 8187 8188 + if (ixgbe_removed(hw->hw_addr)) { 8189 + err = -EIO; 8190 + goto err_sw_init; 8191 + } 8192 8192 INIT_WORK(&adapter->service_task, ixgbe_service_task); 8193 + set_bit(__IXGBE_SERVICE_INITED, &adapter->state); 8193 8194 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); 8194 8195 8195 8196 err = ixgbe_init_interrupt_scheme(adapter); ··· 8503 8494 8504 8495 skip_bad_vf_detection: 8505 8496 #endif /* CONFIG_PCI_IOV */ 8497 + if (!test_bit(__IXGBE_SERVICE_INITED, &adapter->state)) 8498 + return PCI_ERS_RESULT_DISCONNECT; 8499 + 8506 8500 rtnl_lock(); 8507 8501 netif_device_detach(netdev); 8508 8502
+1
drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
··· 421 421 __IXGBEVF_DOWN, 422 422 __IXGBEVF_DISABLED, 423 423 __IXGBEVF_REMOVING, 424 + __IXGBEVF_WORK_INIT, 424 425 }; 425 426 426 427 struct ixgbevf_cb {
+14 -6
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
··· 107 107 return; 108 108 hw->hw_addr = NULL; 109 109 dev_err(&adapter->pdev->dev, "Adapter removed\n"); 110 - schedule_work(&adapter->watchdog_task); 110 + if (test_bit(__IXGBEVF_WORK_INIT, &adapter->state)) 111 + schedule_work(&adapter->watchdog_task); 111 112 } 112 113 113 114 static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg) ··· 2839 2838 struct sk_buff *skb = first->skb; 2840 2839 u32 vlan_macip_lens, type_tucmd; 2841 2840 u32 mss_l4len_idx, l4len; 2841 + int err; 2842 2842 2843 2843 if (skb->ip_summed != CHECKSUM_PARTIAL) 2844 2844 return 0; ··· 2847 2845 if (!skb_is_gso(skb)) 2848 2846 return 0; 2849 2847 2850 - if (skb_header_cloned(skb)) { 2851 - int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2852 - if (err) 2853 - return err; 2854 - } 2848 + err = skb_cow_head(skb, 0); 2849 + if (err < 0) 2850 + return err; 2855 2851 2856 2852 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 2857 2853 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; ··· 3573 3573 adapter->watchdog_timer.function = ixgbevf_watchdog; 3574 3574 adapter->watchdog_timer.data = (unsigned long)adapter; 3575 3575 3576 + if (IXGBE_REMOVED(hw->hw_addr)) { 3577 + err = -EIO; 3578 + goto err_sw_init; 3579 + } 3576 3580 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task); 3577 3581 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task); 3582 + set_bit(__IXGBEVF_WORK_INIT, &adapter->state); 3578 3583 3579 3584 err = ixgbevf_init_interrupt_scheme(adapter); 3580 3585 if (err) ··· 3671 3666 { 3672 3667 struct net_device *netdev = pci_get_drvdata(pdev); 3673 3668 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3669 + 3670 + if (!test_bit(__IXGBEVF_WORK_INIT, &adapter->state)) 3671 + return PCI_ERS_RESULT_DISCONNECT; 3674 3672 3675 3673 rtnl_lock(); 3676 3674 netif_device_detach(netdev);
+8 -9
drivers/net/ethernet/ti/cpsw.c
··· 687 687 688 688 cpsw_dual_emac_src_port_detect(status, priv, ndev, skb); 689 689 690 - if (unlikely(status < 0)) { 690 + if (unlikely(status < 0) || unlikely(!netif_running(ndev))) { 691 691 /* the interface is going down, skbs are purged */ 692 692 dev_kfree_skb_any(skb); 693 693 return; ··· 1201 1201 for_each_slave(priv, cpsw_slave_open, priv); 1202 1202 1203 1203 /* Add default VLAN */ 1204 - if (!priv->data.dual_emac) 1205 - cpsw_add_default_vlan(priv); 1204 + cpsw_add_default_vlan(priv); 1206 1205 1207 1206 if (!cpsw_common_res_usage_state(priv)) { 1208 1207 /* setup tx dma to fixed prio and zero offset */ ··· 1252 1253 cpsw_set_coalesce(ndev, &coal); 1253 1254 } 1254 1255 1256 + napi_enable(&priv->napi); 1257 + cpdma_ctlr_start(priv->dma); 1258 + cpsw_intr_enable(priv); 1259 + cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); 1260 + cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); 1261 + 1255 1262 prim_cpsw = cpsw_get_slave_priv(priv, 0); 1256 1263 if (prim_cpsw->irq_enabled == false) { 1257 1264 if ((priv == prim_cpsw) || !netif_running(prim_cpsw->ndev)) { ··· 1265 1260 cpsw_enable_irq(prim_cpsw); 1266 1261 } 1267 1262 } 1268 - 1269 - napi_enable(&priv->napi); 1270 - cpdma_ctlr_start(priv->dma); 1271 - cpsw_intr_enable(priv); 1272 - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); 1273 - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); 1274 1263 1275 1264 if (priv->data.dual_emac) 1276 1265 priv->slaves[priv->emac_port].open_stat = true;
+1
drivers/net/hyperv/hyperv_net.h
··· 747 747 #define NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4 0 748 748 #define NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6 1 749 749 750 + #define VERSION_4_OFFLOAD_SIZE 22 750 751 /* 751 752 * New offload OIDs for NDIS 6 752 753 */
+1 -1
drivers/net/hyperv/netvsc.c
··· 344 344 memset(init_packet, 0, sizeof(struct nvsp_message)); 345 345 346 346 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4) 347 - ndis_version = 0x00050001; 347 + ndis_version = 0x00060001; 348 348 else 349 349 ndis_version = 0x0006001e; 350 350
+28 -2
drivers/net/hyperv/netvsc_drv.c
··· 319 319 packet = kzalloc(sizeof(struct hv_netvsc_packet) + 320 320 (num_data_pgs * sizeof(struct hv_page_buffer)) + 321 321 sizeof(struct rndis_message) + 322 - NDIS_VLAN_PPI_SIZE, GFP_ATOMIC); 322 + NDIS_VLAN_PPI_SIZE + 323 + NDIS_CSUM_PPI_SIZE + 324 + NDIS_LSO_PPI_SIZE, GFP_ATOMIC); 323 325 if (!packet) { 324 326 /* out of memory, drop packet */ 325 327 netdev_err(net, "unable to allocate hv_netvsc_packet\n"); ··· 398 396 csum_info->transmit.tcp_checksum = 1; 399 397 csum_info->transmit.tcp_header_offset = hdr_offset; 400 398 } else if (net_trans_info & INFO_UDP) { 401 - csum_info->transmit.udp_checksum = 1; 399 + /* UDP checksum offload is not supported on ws2008r2. 400 + * Furthermore, on ws2012 and ws2012r2, there are some 401 + * issues with udp checksum offload from Linux guests. 402 + * (these are host issues). 403 + * For now compute the checksum here. 404 + */ 405 + struct udphdr *uh; 406 + u16 udp_len; 407 + 408 + ret = skb_cow_head(skb, 0); 409 + if (ret) 410 + goto drop; 411 + 412 + uh = udp_hdr(skb); 413 + udp_len = ntohs(uh->len); 414 + uh->check = 0; 415 + uh->check = csum_tcpudp_magic(ip_hdr(skb)->saddr, 416 + ip_hdr(skb)->daddr, 417 + udp_len, IPPROTO_UDP, 418 + csum_partial(uh, udp_len, 0)); 419 + if (uh->check == 0) 420 + uh->check = CSUM_MANGLED_0; 421 + 422 + csum_info->transmit.udp_checksum = 0; 402 423 } 403 424 goto do_send; 404 425 ··· 461 436 462 437 ret = netvsc_send(net_device_ctx->device_ctx, packet); 463 438 439 + drop: 464 440 if (ret == 0) { 465 441 net->stats.tx_bytes += skb->len; 466 442 net->stats.tx_packets++;
+11 -1
drivers/net/hyperv/rndis_filter.c
··· 641 641 struct rndis_set_complete *set_complete; 642 642 u32 extlen = sizeof(struct ndis_offload_params); 643 643 int ret, t; 644 + u32 vsp_version = nvdev->nvsp_version; 645 + 646 + if (vsp_version <= NVSP_PROTOCOL_VERSION_4) { 647 + extlen = VERSION_4_OFFLOAD_SIZE; 648 + /* On NVSP_PROTOCOL_VERSION_4 and below, we do not support 649 + * UDP checksum offload. 650 + */ 651 + req_offloads->udp_ip_v4_csum = 0; 652 + req_offloads->udp_ip_v6_csum = 0; 653 + } 644 654 645 655 request = get_rndis_request(rdev, RNDIS_MSG_SET, 646 656 RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen); ··· 684 674 } else { 685 675 set_complete = &request->response_msg.msg.set_complete; 686 676 if (set_complete->status != RNDIS_STATUS_SUCCESS) { 687 - netdev_err(ndev, "Fail to set MAC on host side:0x%x\n", 677 + netdev_err(ndev, "Fail to set offload on host side:0x%x\n", 688 678 set_complete->status); 689 679 ret = -EINVAL; 690 680 }
+1 -5
drivers/net/phy/phy.c
··· 756 756 netif_carrier_on(phydev->attached_dev); 757 757 phydev->adjust_link(phydev->attached_dev); 758 758 759 - } else if (0 == phydev->link_timeout--) { 759 + } else if (0 == phydev->link_timeout--) 760 760 needs_aneg = 1; 761 - /* If we have the magic_aneg bit, we try again */ 762 - if (phydev->drv->flags & PHY_HAS_MAGICANEG) 763 - break; 764 - } 765 761 break; 766 762 case PHY_NOLINK: 767 763 err = phy_read_status(phydev);
+48
drivers/net/usb/r8152.c
··· 929 929 struct r8152 *tp = netdev_priv(netdev); 930 930 int ret; 931 931 932 + if (test_bit(RTL8152_UNPLUG, &tp->flags)) 933 + return -ENODEV; 934 + 932 935 if (phy_id != R8152_PHY_ID) 933 936 return -EINVAL; 934 937 ··· 951 948 void write_mii_word(struct net_device *netdev, int phy_id, int reg, int val) 952 949 { 953 950 struct r8152 *tp = netdev_priv(netdev); 951 + 952 + if (test_bit(RTL8152_UNPLUG, &tp->flags)) 953 + return; 954 954 955 955 if (phy_id != R8152_PHY_ID) 956 956 return; ··· 1968 1962 1969 1963 static int rtl8152_enable(struct r8152 *tp) 1970 1964 { 1965 + if (test_bit(RTL8152_UNPLUG, &tp->flags)) 1966 + return -ENODEV; 1967 + 1971 1968 set_tx_qlen(tp); 1972 1969 rtl_set_eee_plus(tp); 1973 1970 ··· 2003 1994 2004 1995 static int rtl8153_enable(struct r8152 *tp) 2005 1996 { 1997 + if (test_bit(RTL8152_UNPLUG, &tp->flags)) 1998 + return -ENODEV; 1999 + 2006 2000 set_tx_qlen(tp); 2007 2001 rtl_set_eee_plus(tp); 2008 2002 r8153_set_rx_agg(tp); ··· 2017 2005 { 2018 2006 u32 ocp_data; 2019 2007 int i; 2008 + 2009 + if (test_bit(RTL8152_UNPLUG, &tp->flags)) { 2010 + rtl_drop_queued_tx(tp); 2011 + return; 2012 + } 2020 2013 2021 2014 ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); 2022 2015 ocp_data &= ~RCR_ACPT_ALL; ··· 2248 2231 { 2249 2232 u32 ocp_data; 2250 2233 int i; 2234 + 2235 + if (test_bit(RTL8152_UNPLUG, &tp->flags)) 2236 + return; 2251 2237 2252 2238 ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); 2253 2239 ocp_data &= ~RCR_ACPT_ALL; ··· 2480 2460 u32 ocp_data; 2481 2461 int i; 2482 2462 2463 + if (test_bit(RTL8152_UNPLUG, &tp->flags)) 2464 + return; 2465 + 2483 2466 rxdy_gated_en(tp, true); 2484 2467 r8153_teredo_off(tp); 2485 2468 ··· 2710 2687 2711 2688 static void rtl8152_down(struct r8152 *tp) 2712 2689 { 2690 + if (test_bit(RTL8152_UNPLUG, &tp->flags)) { 2691 + rtl_drop_queued_tx(tp); 2692 + return; 2693 + } 2694 + 2713 2695 r8152_power_cut_en(tp, false); 2714 2696 r8152b_disable_aldps(tp); 2715 2697 r8152b_enter_oob(tp); ··· 2723 2695 2724 2696 static void rtl8153_down(struct r8152 *tp) 2725 2697 { 2698 + if (test_bit(RTL8152_UNPLUG, &tp->flags)) { 2699 + rtl_drop_queued_tx(tp); 2700 + return; 2701 + } 2702 + 2726 2703 r8153_u1u2en(tp, false); 2727 2704 r8153_power_cut_en(tp, false); 2728 2705 r8153_disable_aldps(tp); ··· 2937 2904 { 2938 2905 u32 ocp_data; 2939 2906 2907 + if (test_bit(RTL8152_UNPLUG, &tp->flags)) 2908 + return; 2909 + 2940 2910 if (tp->version == RTL_VER_01) { 2941 2911 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE); 2942 2912 ocp_data &= ~LED_MODE_MASK; ··· 2974 2938 { 2975 2939 u32 ocp_data; 2976 2940 int i; 2941 + 2942 + if (test_bit(RTL8152_UNPLUG, &tp->flags)) 2943 + return; 2977 2944 2978 2945 r8153_u1u2en(tp, false); 2979 2946 ··· 3252 3213 struct mii_ioctl_data *data = if_mii(rq); 3253 3214 int res; 3254 3215 3216 + if (test_bit(RTL8152_UNPLUG, &tp->flags)) 3217 + return -ENODEV; 3218 + 3255 3219 res = usb_autopm_get_interface(tp->intf); 3256 3220 if (res < 0) 3257 3221 goto out; ··· 3335 3293 3336 3294 static void rtl8152_unload(struct r8152 *tp) 3337 3295 { 3296 + if (test_bit(RTL8152_UNPLUG, &tp->flags)) 3297 + return; 3298 + 3338 3299 if (tp->version != RTL_VER_01) 3339 3300 r8152_power_cut_en(tp, true); 3340 3301 } 3341 3302 3342 3303 static void rtl8153_unload(struct r8152 *tp) 3343 3304 { 3305 + if (test_bit(RTL8152_UNPLUG, &tp->flags)) 3306 + return; 3307 + 3344 3308 r8153_power_cut_en(tp, true); 3345 3309 } 3346 3310
+1 -3
drivers/net/wireless/ath/ath9k/ar5008_phy.c
··· 1004 1004 case ATH9K_ANI_FIRSTEP_LEVEL:{ 1005 1005 u32 level = param; 1006 1006 1007 - value = level * 2; 1007 + value = level; 1008 1008 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG, 1009 1009 AR_PHY_FIND_SIG_FIRSTEP, value); 1010 - REG_RMW_FIELD(ah, AR_PHY_FIND_SIG_LOW, 1011 - AR_PHY_FIND_SIG_FIRSTEP_LOW, value); 1012 1010 1013 1011 if (level != aniState->firstepLevel) { 1014 1012 ath_dbg(common, ANI,
+3 -4
drivers/net/wireless/ath/ath9k/beacon.c
··· 312 312 313 313 void ath9k_csa_update(struct ath_softc *sc) 314 314 { 315 - ieee80211_iterate_active_interfaces(sc->hw, 316 - IEEE80211_IFACE_ITER_NORMAL, 317 - ath9k_csa_update_vif, 318 - sc); 315 + ieee80211_iterate_active_interfaces_atomic(sc->hw, 316 + IEEE80211_IFACE_ITER_NORMAL, 317 + ath9k_csa_update_vif, sc); 319 318 } 320 319 321 320 void ath9k_beacon_tasklet(unsigned long data)
+4 -1
drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
··· 471 471 if (!txok || !vif || !txs) 472 472 goto send_mac80211; 473 473 474 - if (txs->ts_flags & ATH9K_HTC_TXSTAT_ACK) 474 + if (txs->ts_flags & ATH9K_HTC_TXSTAT_ACK) { 475 475 tx_info->flags |= IEEE80211_TX_STAT_ACK; 476 + if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) 477 + tx_info->flags |= IEEE80211_TX_STAT_AMPDU; 478 + } 476 479 477 480 if (txs->ts_flags & ATH9K_HTC_TXSTAT_FILT) 478 481 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
+2
drivers/net/wireless/ath/ath9k/init.c
··· 670 670 .num_different_channels = 1, 671 671 .beacon_int_infra_match = true, 672 672 }, 673 + #ifdef CONFIG_ATH9K_DFS_CERTIFIED 673 674 { 674 675 .limits = if_dfs_limits, 675 676 .n_limits = ARRAY_SIZE(if_dfs_limits), ··· 680 679 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | 681 680 BIT(NL80211_CHAN_WIDTH_20), 682 681 } 682 + #endif 683 683 }; 684 684 685 685 static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
+7 -7
drivers/net/wireless/b43/phy_n.c
··· 5176 5176 int ch = new_channel->hw_value; 5177 5177 5178 5178 u16 old_band_5ghz; 5179 - u32 tmp32; 5179 + u16 tmp16; 5180 5180 5181 5181 old_band_5ghz = 5182 5182 b43_phy_read(dev, B43_NPHY_BANDCTL) & B43_NPHY_BANDCTL_5GHZ; 5183 5183 if (new_channel->band == IEEE80211_BAND_5GHZ && !old_band_5ghz) { 5184 - tmp32 = b43_read32(dev, B43_MMIO_PSM_PHY_HDR); 5185 - b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32 | 4); 5184 + tmp16 = b43_read16(dev, B43_MMIO_PSM_PHY_HDR); 5185 + b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16 | 4); 5186 5186 b43_phy_set(dev, B43_PHY_B_BBCFG, 0xC000); 5187 - b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32); 5187 + b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16); 5188 5188 b43_phy_set(dev, B43_NPHY_BANDCTL, B43_NPHY_BANDCTL_5GHZ); 5189 5189 } else if (new_channel->band == IEEE80211_BAND_2GHZ && old_band_5ghz) { 5190 5190 b43_phy_mask(dev, B43_NPHY_BANDCTL, ~B43_NPHY_BANDCTL_5GHZ); 5191 - tmp32 = b43_read32(dev, B43_MMIO_PSM_PHY_HDR); 5192 - b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32 | 4); 5191 + tmp16 = b43_read16(dev, B43_MMIO_PSM_PHY_HDR); 5192 + b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16 | 4); 5193 5193 b43_phy_mask(dev, B43_PHY_B_BBCFG, 0x3FFF); 5194 - b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32); 5194 + b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16); 5195 5195 } 5196 5196 5197 5197 b43_chantab_phy_upload(dev, e);
+4 -2
drivers/net/wireless/rsi/rsi_91x_core.c
··· 102 102 } 103 103 104 104 get_queue_num: 105 - q_num = 0; 106 105 recontend_queue = false; 107 106 108 107 q_num = rsi_determine_min_weight_queue(common); 108 + 109 109 q_len = skb_queue_len(&common->tx_queue[ii]); 110 110 ii = q_num; 111 111 ··· 118 118 } 119 119 } 120 120 121 - common->tx_qinfo[q_num].pkt_contended = 0; 121 + if (q_num < NUM_EDCA_QUEUES) 122 + common->tx_qinfo[q_num].pkt_contended = 0; 123 + 122 124 /* Adjust the back off values for all queues again */ 123 125 recontend_queue = rsi_recalculate_weights(common); 124 126
+16 -19
drivers/net/wireless/rsi/rsi_91x_debugfs.c
··· 289 289 const struct rsi_dbg_files *files; 290 290 291 291 dev_dbgfs = kzalloc(sizeof(*dev_dbgfs), GFP_KERNEL); 292 + if (!dev_dbgfs) 293 + return -ENOMEM; 294 + 292 295 adapter->dfsentry = dev_dbgfs; 293 296 294 297 snprintf(devdir, sizeof(devdir), "%s", 295 298 wiphy_name(adapter->hw->wiphy)); 299 + 296 300 dev_dbgfs->subdir = debugfs_create_dir(devdir, NULL); 297 301 298 - if (IS_ERR(dev_dbgfs->subdir)) { 299 - if (dev_dbgfs->subdir == ERR_PTR(-ENODEV)) 300 - rsi_dbg(ERR_ZONE, 301 - "%s:Debugfs has not been mounted\n", __func__); 302 - else 303 - rsi_dbg(ERR_ZONE, "debugfs:%s not created\n", devdir); 304 - 305 - adapter->dfsentry = NULL; 302 + if (!dev_dbgfs->subdir) { 306 303 kfree(dev_dbgfs); 307 - return (int)PTR_ERR(dev_dbgfs->subdir); 308 - } else { 309 - for (ii = 0; ii < adapter->num_debugfs_entries; ii++) { 310 - files = &dev_debugfs_files[ii]; 311 - dev_dbgfs->rsi_files[ii] = 312 - debugfs_create_file(files->name, 313 - files->perms, 314 - dev_dbgfs->subdir, 315 - common, 316 - &files->fops); 317 - } 304 + return -ENOMEM; 305 + } 306 + 307 + for (ii = 0; ii < adapter->num_debugfs_entries; ii++) { 308 + files = &dev_debugfs_files[ii]; 309 + dev_dbgfs->rsi_files[ii] = 310 + debugfs_create_file(files->name, 311 + files->perms, 312 + dev_dbgfs->subdir, 313 + common, 314 + &files->fops); 318 315 } 319 316 return 0; 320 317 }
+5 -3
drivers/net/wireless/rsi/rsi_91x_mgmt.c
··· 738 738 * 739 739 * Return: 0 on success, corresponding error code on failure. 740 740 */ 741 - static u8 rsi_load_bootup_params(struct rsi_common *common) 741 + static int rsi_load_bootup_params(struct rsi_common *common) 742 742 { 743 743 struct sk_buff *skb; 744 744 struct rsi_boot_params *boot_params; ··· 1272 1272 { 1273 1273 s32 msg_len = (le16_to_cpu(*(__le16 *)&msg[0]) & 0x0fff); 1274 1274 u16 msg_type = (msg[2]); 1275 + int ret; 1275 1276 1276 1277 rsi_dbg(FSM_ZONE, "%s: Msg Len: %d, Msg Type: %4x\n", 1277 1278 __func__, msg_len, msg_type); ··· 1285 1284 if (common->fsm_state == FSM_CARD_NOT_READY) { 1286 1285 rsi_set_default_parameters(common); 1287 1286 1288 - if (rsi_load_bootup_params(common)) 1289 - return -ENOMEM; 1287 + ret = rsi_load_bootup_params(common); 1288 + if (ret) 1289 + return ret; 1290 1290 else 1291 1291 common->fsm_state = FSM_BOOT_PARAMS_SENT; 1292 1292 } else {
+3 -2
drivers/net/wireless/rsi/rsi_91x_sdio.c
··· 756 756 static void rsi_disconnect(struct sdio_func *pfunction) 757 757 { 758 758 struct rsi_hw *adapter = sdio_get_drvdata(pfunction); 759 - struct rsi_91x_sdiodev *dev = 760 - (struct rsi_91x_sdiodev *)adapter->rsi_dev; 759 + struct rsi_91x_sdiodev *dev; 761 760 762 761 if (!adapter) 763 762 return; 763 + 764 + dev = (struct rsi_91x_sdiodev *)adapter->rsi_dev; 764 765 765 766 dev->write_fail = 2; 766 767 rsi_mac80211_detach(adapter);
+2 -4
drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
··· 247 247 if (!common->rx_data_pkt) { 248 248 rsi_dbg(ERR_ZONE, "%s: Failed in memory allocation\n", 249 249 __func__); 250 - return -1; 250 + return -ENOMEM; 251 251 } 252 252 253 253 status = rsi_sdio_host_intf_read_pkt(adapter, ··· 260 260 } 261 261 262 262 status = rsi_read_pkt(common, rcv_pkt_len); 263 - kfree(common->rx_data_pkt); 264 - return status; 265 263 266 264 fail: 267 265 kfree(common->rx_data_pkt); 268 - return -1; 266 + return status; 269 267 } 270 268 271 269 /**
+19 -7
drivers/net/wireless/rsi/rsi_91x_usb.c
··· 154 154 u16 *value, 155 155 u16 len) 156 156 { 157 - u8 temp_buf[4]; 158 - int status = 0; 157 + u8 *buf; 158 + int status = -ENOMEM; 159 + 160 + buf = kmalloc(0x04, GFP_KERNEL); 161 + if (!buf) 162 + return status; 159 163 160 164 status = usb_control_msg(usbdev, 161 165 usb_rcvctrlpipe(usbdev, 0), 162 166 USB_VENDOR_REGISTER_READ, 163 167 USB_TYPE_VENDOR, 164 168 ((reg & 0xffff0000) >> 16), (reg & 0xffff), 165 - (void *)temp_buf, 169 + (void *)buf, 166 170 len, 167 171 HZ * 5); 168 172 169 - *value = (temp_buf[0] | (temp_buf[1] << 8)); 173 + *value = (buf[0] | (buf[1] << 8)); 170 174 if (status < 0) { 171 175 rsi_dbg(ERR_ZONE, 172 176 "%s: Reg read failed with error code :%d\n", 173 177 __func__, status); 174 178 } 179 + kfree(buf); 180 + 175 181 return status; 176 182 } 177 183 ··· 196 190 u16 value, 197 191 u16 len) 198 192 { 199 - u8 usb_reg_buf[4]; 200 - int status = 0; 193 + u8 *usb_reg_buf; 194 + int status = -ENOMEM; 195 + 196 + usb_reg_buf = kmalloc(0x04, GFP_KERNEL); 197 + if (!usb_reg_buf) 198 + return status; 201 199 202 200 usb_reg_buf[0] = (value & 0x00ff); 203 201 usb_reg_buf[1] = (value & 0xff00) >> 8; ··· 222 212 "%s: Reg write failed with error code :%d\n", 223 213 __func__, status); 224 214 } 215 + kfree(usb_reg_buf); 216 + 225 217 return status; 226 218 } 227 219 ··· 298 286 return -ENOMEM; 299 287 300 288 while (count) { 301 - transfer = min_t(int, count, 4096); 289 + transfer = (u8)(min_t(u32, count, 4096)); 302 290 memcpy(buf, data, transfer); 303 291 status = usb_control_msg(dev->usbdev, 304 292 usb_sndctrlpipe(dev->usbdev, 0),
-10
drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c
··· 625 625 else 626 626 btcoexist->binded = true; 627 627 628 - #if (defined(CONFIG_PCI_HCI)) 629 - btcoexist->chip_interface = BTC_INTF_PCI; 630 - #elif (defined(CONFIG_USB_HCI)) 631 - btcoexist->chip_interface = BTC_INTF_USB; 632 - #elif (defined(CONFIG_SDIO_HCI)) 633 - btcoexist->chip_interface = BTC_INTF_SDIO; 634 - #elif (defined(CONFIG_GSPI_HCI)) 635 - btcoexist->chip_interface = BTC_INTF_GSPI; 636 - #else 637 628 btcoexist->chip_interface = BTC_INTF_UNKNOWN; 638 - #endif 639 629 640 630 if (NULL == btcoexist->adapter) 641 631 btcoexist->adapter = adapter;
+1 -1
drivers/net/xen-netfront.c
··· 1291 1291 for (i = 0; i < NET_TX_RING_SIZE; i++) { 1292 1292 skb_entry_set_link(&np->tx_skbs[i], i+1); 1293 1293 np->grant_tx_ref[i] = GRANT_INVALID_REF; 1294 + np->grant_tx_page[i] = NULL; 1294 1295 } 1295 1296 1296 1297 /* Clear out rx_skbs */ 1297 1298 for (i = 0; i < NET_RX_RING_SIZE; i++) { 1298 1299 np->rx_skbs[i] = NULL; 1299 1300 np->grant_rx_ref[i] = GRANT_INVALID_REF; 1300 - np->grant_tx_page[i] = NULL; 1301 1301 } 1302 1302 1303 1303 /* A grant for every tx ring slot */
+1 -1
drivers/scsi/iscsi_tcp.c
··· 125 125 return 0; 126 126 } 127 127 128 - static void iscsi_sw_tcp_data_ready(struct sock *sk, int flag) 128 + static void iscsi_sw_tcp_data_ready(struct sock *sk) 129 129 { 130 130 struct iscsi_conn *conn; 131 131 struct iscsi_tcp_conn *tcp_conn;
+1 -1
drivers/scsi/iscsi_tcp.h
··· 40 40 41 41 struct iscsi_sw_tcp_send out; 42 42 /* old values for socket callbacks */ 43 - void (*old_data_ready)(struct sock *, int); 43 + void (*old_data_ready)(struct sock *); 44 44 void (*old_state_change)(struct sock *); 45 45 void (*old_write_space)(struct sock *); 46 46
+2 -2
drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c
··· 617 617 * socket call back in Linux 618 618 */ 619 619 static void 620 - ksocknal_data_ready (struct sock *sk, int n) 620 + ksocknal_data_ready (struct sock *sk) 621 621 { 622 622 ksock_conn_t *conn; 623 623 ··· 628 628 conn = sk->sk_user_data; 629 629 if (conn == NULL) { /* raced with ksocknal_terminate_conn */ 630 630 LASSERT (sk->sk_data_ready != &ksocknal_data_ready); 631 - sk->sk_data_ready (sk, n); 631 + sk->sk_data_ready (sk); 632 632 } else 633 633 ksocknal_read_callback(conn); 634 634
+1 -1
drivers/target/iscsi/iscsi_target_core.h
··· 557 557 struct completion rx_half_close_comp; 558 558 /* socket used by this connection */ 559 559 struct socket *sock; 560 - void (*orig_data_ready)(struct sock *, int); 560 + void (*orig_data_ready)(struct sock *); 561 561 void (*orig_state_change)(struct sock *); 562 562 #define LOGIN_FLAGS_READ_ACTIVE 1 563 563 #define LOGIN_FLAGS_CLOSED 2
+1 -1
drivers/target/iscsi/iscsi_target_nego.c
··· 375 375 return 0; 376 376 } 377 377 378 - static void iscsi_target_sk_data_ready(struct sock *sk, int count) 378 + static void iscsi_target_sk_data_ready(struct sock *sk) 379 379 { 380 380 struct iscsi_conn *conn = sk->sk_user_data; 381 381 bool rc;
+1 -1
fs/dlm/lowcomms.c
··· 424 424 } 425 425 426 426 /* Data available on socket or listen socket received a connect */ 427 - static void lowcomms_data_ready(struct sock *sk, int count_unused) 427 + static void lowcomms_data_ready(struct sock *sk) 428 428 { 429 429 struct connection *con = sock2con(sk); 430 430 if (con && !test_and_set_bit(CF_READ_PENDING, &con->flags))
+2 -2
fs/ncpfs/ncp_fs_sb.h
··· 109 109 110 110 spinlock_t requests_lock; /* Lock accesses to tx.requests, tx.creq and rcv.creq when STREAM mode */ 111 111 112 - void (*data_ready)(struct sock* sk, int len); 112 + void (*data_ready)(struct sock* sk); 113 113 void (*error_report)(struct sock* sk); 114 114 void (*write_space)(struct sock* sk); /* STREAM mode only */ 115 115 struct { ··· 151 151 extern void ncpdgram_rcv_proc(struct work_struct *work); 152 152 extern void ncpdgram_timeout_proc(struct work_struct *work); 153 153 extern void ncpdgram_timeout_call(unsigned long server); 154 - extern void ncp_tcp_data_ready(struct sock* sk, int len); 154 + extern void ncp_tcp_data_ready(struct sock* sk); 155 155 extern void ncp_tcp_write_space(struct sock* sk); 156 156 extern void ncp_tcp_error_report(struct sock* sk); 157 157
+2 -2
fs/ncpfs/sock.c
··· 97 97 kfree(req); 98 98 } 99 99 100 - void ncp_tcp_data_ready(struct sock *sk, int len) 100 + void ncp_tcp_data_ready(struct sock *sk) 101 101 { 102 102 struct ncp_server *server = sk->sk_user_data; 103 103 104 - server->data_ready(sk, len); 104 + server->data_ready(sk); 105 105 schedule_work(&server->rcv.tq); 106 106 } 107 107
+7 -8
fs/ocfs2/cluster/tcp.c
··· 137 137 static void o2net_sc_connect_completed(struct work_struct *work); 138 138 static void o2net_rx_until_empty(struct work_struct *work); 139 139 static void o2net_shutdown_sc(struct work_struct *work); 140 - static void o2net_listen_data_ready(struct sock *sk, int bytes); 140 + static void o2net_listen_data_ready(struct sock *sk); 141 141 static void o2net_sc_send_keep_req(struct work_struct *work); 142 142 static void o2net_idle_timer(unsigned long data); 143 143 static void o2net_sc_postpone_idle(struct o2net_sock_container *sc); ··· 597 597 } 598 598 599 599 /* see o2net_register_callbacks() */ 600 - static void o2net_data_ready(struct sock *sk, int bytes) 600 + static void o2net_data_ready(struct sock *sk) 601 601 { 602 - void (*ready)(struct sock *sk, int bytes); 602 + void (*ready)(struct sock *sk); 603 603 604 604 read_lock(&sk->sk_callback_lock); 605 605 if (sk->sk_user_data) { ··· 613 613 } 614 614 read_unlock(&sk->sk_callback_lock); 615 615 616 - ready(sk, bytes); 616 + ready(sk); 617 617 } 618 618 619 619 /* see o2net_register_callbacks() */ ··· 1926 1926 cond_resched(); 1927 1927 } 1928 1928 1929 - static void o2net_listen_data_ready(struct sock *sk, int bytes) 1929 + static void o2net_listen_data_ready(struct sock *sk) 1930 1930 { 1931 - void (*ready)(struct sock *sk, int bytes); 1931 + void (*ready)(struct sock *sk); 1932 1932 1933 1933 read_lock(&sk->sk_callback_lock); 1934 1934 ready = sk->sk_user_data; ··· 1951 1951 */ 1952 1952 1953 1953 if (sk->sk_state == TCP_LISTEN) { 1954 - mlog(ML_TCP, "bytes: %d\n", bytes); 1955 1954 queue_work(o2net_wq, &o2net_listen_work); 1956 1955 } else { 1957 1956 ready = NULL; ··· 1959 1960 out: 1960 1961 read_unlock(&sk->sk_callback_lock); 1961 1962 if (ready != NULL) 1962 - ready(sk, bytes); 1963 + ready(sk); 1963 1964 } 1964 1965 1965 1966 static int o2net_open_listening_sock(__be32 addr, __be16 port)
+1 -1
fs/ocfs2/cluster/tcp_internal.h
··· 165 165 166 166 /* original handlers for the sockets */ 167 167 void (*sc_state_change)(struct sock *sk); 168 - void (*sc_data_ready)(struct sock *sk, int bytes); 168 + void (*sc_data_ready)(struct sock *sk); 169 169 170 170 u32 sc_msg_key; 171 171 u16 sc_msg_type;
+1 -1
include/linux/sunrpc/svcsock.h
··· 22 22 23 23 /* We keep the old state_change and data_ready CB's here */ 24 24 void (*sk_ostate)(struct sock *); 25 - void (*sk_odata)(struct sock *, int bytes); 25 + void (*sk_odata)(struct sock *); 26 26 void (*sk_owspace)(struct sock *); 27 27 28 28 /* private TCP part */
+1 -1
include/net/sctp/sctp.h
··· 101 101 int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb); 102 102 int sctp_inet_listen(struct socket *sock, int backlog); 103 103 void sctp_write_space(struct sock *sk); 104 - void sctp_data_ready(struct sock *sk, int len); 104 + void sctp_data_ready(struct sock *sk); 105 105 unsigned int sctp_poll(struct file *file, struct socket *sock, 106 106 poll_table *wait); 107 107 void sctp_sock_rfree(struct sk_buff *skb);
+1 -1
include/net/sock.h
··· 418 418 u32 sk_classid; 419 419 struct cg_proto *sk_cgrp; 420 420 void (*sk_state_change)(struct sock *sk); 421 - void (*sk_data_ready)(struct sock *sk, int bytes); 421 + void (*sk_data_ready)(struct sock *sk); 422 422 void (*sk_write_space)(struct sock *sk); 423 423 void (*sk_error_report)(struct sock *sk); 424 424 int (*sk_backlog_rcv)(struct sock *sk,
+1 -1
net/atm/clip.c
··· 68 68 69 69 sk = sk_atm(atmarpd); 70 70 skb_queue_tail(&sk->sk_receive_queue, skb); 71 - sk->sk_data_ready(sk, skb->len); 71 + sk->sk_data_ready(sk); 72 72 return 0; 73 73 } 74 74
+5 -5
net/atm/lec.c
··· 152 152 atm_force_charge(priv->lecd, skb2->truesize); 153 153 sk = sk_atm(priv->lecd); 154 154 skb_queue_tail(&sk->sk_receive_queue, skb2); 155 - sk->sk_data_ready(sk, skb2->len); 155 + sk->sk_data_ready(sk); 156 156 } 157 157 } 158 158 #endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */ ··· 447 447 atm_force_charge(priv->lecd, skb2->truesize); 448 448 sk = sk_atm(priv->lecd); 449 449 skb_queue_tail(&sk->sk_receive_queue, skb2); 450 - sk->sk_data_ready(sk, skb2->len); 450 + sk->sk_data_ready(sk); 451 451 } 452 452 } 453 453 #endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */ ··· 530 530 atm_force_charge(priv->lecd, skb->truesize); 531 531 sk = sk_atm(priv->lecd); 532 532 skb_queue_tail(&sk->sk_receive_queue, skb); 533 - sk->sk_data_ready(sk, skb->len); 533 + sk->sk_data_ready(sk); 534 534 535 535 if (data != NULL) { 536 536 pr_debug("about to send %d bytes of data\n", data->len); 537 537 atm_force_charge(priv->lecd, data->truesize); 538 538 skb_queue_tail(&sk->sk_receive_queue, data); 539 - sk->sk_data_ready(sk, skb->len); 539 + sk->sk_data_ready(sk); 540 540 } 541 541 542 542 return 0; ··· 616 616 617 617 pr_debug("%s: To daemon\n", dev->name); 618 618 skb_queue_tail(&sk->sk_receive_queue, skb); 619 - sk->sk_data_ready(sk, skb->len); 619 + sk->sk_data_ready(sk); 620 620 } else { /* Data frame, queue to protocol handlers */ 621 621 struct lec_arp_table *entry; 622 622 unsigned char *src, *dst;
+3 -3
net/atm/mpc.c
··· 706 706 dprintk("(%s) control packet arrived\n", dev->name); 707 707 /* Pass control packets to daemon */ 708 708 skb_queue_tail(&sk->sk_receive_queue, skb); 709 - sk->sk_data_ready(sk, skb->len); 709 + sk->sk_data_ready(sk); 710 710 return; 711 711 } 712 712 ··· 992 992 993 993 sk = sk_atm(mpc->mpoad_vcc); 994 994 skb_queue_tail(&sk->sk_receive_queue, skb); 995 - sk->sk_data_ready(sk, skb->len); 995 + sk->sk_data_ready(sk); 996 996 997 997 return 0; 998 998 } ··· 1273 1273 1274 1274 sk = sk_atm(vcc); 1275 1275 skb_queue_tail(&sk->sk_receive_queue, skb); 1276 - sk->sk_data_ready(sk, skb->len); 1276 + sk->sk_data_ready(sk); 1277 1277 dprintk("exiting\n"); 1278 1278 } 1279 1279
+1 -1
net/atm/raw.c
··· 25 25 struct sock *sk = sk_atm(vcc); 26 26 27 27 skb_queue_tail(&sk->sk_receive_queue, skb); 28 - sk->sk_data_ready(sk, skb->len); 28 + sk->sk_data_ready(sk); 29 29 } 30 30 } 31 31
+1 -1
net/atm/signaling.c
··· 51 51 #endif 52 52 atm_force_charge(sigd, skb->truesize); 53 53 skb_queue_tail(&sk_atm(sigd)->sk_receive_queue, skb); 54 - sk_atm(sigd)->sk_data_ready(sk_atm(sigd), skb->len); 54 + sk_atm(sigd)->sk_data_ready(sk_atm(sigd)); 55 55 } 56 56 57 57 static void modify_qos(struct atm_vcc *vcc, struct atmsvc_msg *msg)
+1 -1
net/ax25/ax25_in.c
··· 422 422 423 423 if (sk) { 424 424 if (!sock_flag(sk, SOCK_DEAD)) 425 - sk->sk_data_ready(sk, skb->len); 425 + sk->sk_data_ready(sk); 426 426 sock_put(sk); 427 427 } else { 428 428 free:
+3 -3
net/bluetooth/l2cap_sock.c
··· 1271 1271 1272 1272 if (parent) { 1273 1273 bt_accept_unlink(sk); 1274 - parent->sk_data_ready(parent, 0); 1274 + parent->sk_data_ready(parent); 1275 1275 } else { 1276 1276 sk->sk_state_change(sk); 1277 1277 } ··· 1327 1327 sk->sk_state_change(sk); 1328 1328 1329 1329 if (parent) 1330 - parent->sk_data_ready(parent, 0); 1330 + parent->sk_data_ready(parent); 1331 1331 1332 1332 release_sock(sk); 1333 1333 } ··· 1340 1340 1341 1341 parent = bt_sk(sk)->parent; 1342 1342 if (parent) 1343 - parent->sk_data_ready(parent, 0); 1343 + parent->sk_data_ready(parent); 1344 1344 1345 1345 release_sock(sk); 1346 1346 }
+2 -2
net/bluetooth/rfcomm/core.c
··· 186 186 rfcomm_schedule(); 187 187 } 188 188 189 - static void rfcomm_l2data_ready(struct sock *sk, int bytes) 189 + static void rfcomm_l2data_ready(struct sock *sk) 190 190 { 191 - BT_DBG("%p bytes %d", sk, bytes); 191 + BT_DBG("%p", sk); 192 192 rfcomm_schedule(); 193 193 } 194 194
+2 -2
net/bluetooth/rfcomm/sock.c
··· 54 54 55 55 atomic_add(skb->len, &sk->sk_rmem_alloc); 56 56 skb_queue_tail(&sk->sk_receive_queue, skb); 57 - sk->sk_data_ready(sk, skb->len); 57 + sk->sk_data_ready(sk); 58 58 59 59 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) 60 60 rfcomm_dlc_throttle(d); ··· 84 84 sock_set_flag(sk, SOCK_ZAPPED); 85 85 bt_accept_unlink(sk); 86 86 } 87 - parent->sk_data_ready(parent, 0); 87 + parent->sk_data_ready(parent); 88 88 } else { 89 89 if (d->state == BT_CONNECTED) 90 90 rfcomm_session_getaddr(d->session,
+1 -1
net/bluetooth/sco.c
··· 1024 1024 sk->sk_state = BT_CONNECTED; 1025 1025 1026 1026 /* Wake up parent */ 1027 - parent->sk_data_ready(parent, 1); 1027 + parent->sk_data_ready(parent); 1028 1028 1029 1029 bh_unlock_sock(parent); 1030 1030
+1 -1
net/bridge/br_input.c
··· 73 73 goto drop; 74 74 75 75 if (!br_allowed_ingress(p->br, nbp_get_vlan_info(p), skb, &vid)) 76 - goto drop; 76 + goto out; 77 77 78 78 /* insert into forwarding database after filtering to avoid spoofing */ 79 79 br = p->br;
+4 -3
net/bridge/br_vlan.c
··· 170 170 * rejected. 171 171 */ 172 172 if (!v) 173 - return false; 173 + goto drop; 174 174 175 175 /* If vlan tx offload is disabled on bridge device and frame was 176 176 * sent from vlan device on the bridge device, it does not have ··· 193 193 * vlan untagged or priority-tagged traffic belongs to. 194 194 */ 195 195 if (pvid == VLAN_N_VID) 196 - return false; 196 + goto drop; 197 197 198 198 /* PVID is set on this port. Any untagged or priority-tagged 199 199 * ingress frame is considered to belong to this vlan. ··· 216 216 /* Frame had a valid vlan tag. See if vlan is allowed */ 217 217 if (test_bit(*vid, v->vlan_bitmap)) 218 218 return true; 219 - 219 + drop: 220 + kfree_skb(skb); 220 221 return false; 221 222 } 222 223
+1 -3
net/caif/caif_socket.c
··· 124 124 static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 125 125 { 126 126 int err; 127 - int skb_len; 128 127 unsigned long flags; 129 128 struct sk_buff_head *list = &sk->sk_receive_queue; 130 129 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); ··· 152 153 * may be freed by other threads of control pulling packets 153 154 * from the queue. 154 155 */ 155 - skb_len = skb->len; 156 156 spin_lock_irqsave(&list->lock, flags); 157 157 if (!sock_flag(sk, SOCK_DEAD)) 158 158 __skb_queue_tail(list, skb); 159 159 spin_unlock_irqrestore(&list->lock, flags); 160 160 161 161 if (!sock_flag(sk, SOCK_DEAD)) 162 - sk->sk_data_ready(sk, skb_len); 162 + sk->sk_data_ready(sk); 163 163 else 164 164 kfree_skb(skb); 165 165 return 0;
+1 -1
net/ceph/messenger.c
··· 383 383 */ 384 384 385 385 /* data available on socket, or listen socket received a connect */ 386 - static void ceph_sock_data_ready(struct sock *sk, int count_unused) 386 + static void ceph_sock_data_ready(struct sock *sk) 387 387 { 388 388 struct ceph_connection *con = sk->sk_user_data; 389 389 if (atomic_read(&con->msgr->stopping)) {
+6 -2
net/core/pktgen.c
··· 3338 3338 queue_map = skb_get_queue_mapping(pkt_dev->skb); 3339 3339 txq = netdev_get_tx_queue(odev, queue_map); 3340 3340 3341 - __netif_tx_lock_bh(txq); 3341 + local_bh_disable(); 3342 + 3343 + HARD_TX_LOCK(odev, txq, smp_processor_id()); 3342 3344 3343 3345 if (unlikely(netif_xmit_frozen_or_drv_stopped(txq))) { 3344 3346 ret = NETDEV_TX_BUSY; ··· 3376 3374 pkt_dev->last_ok = 0; 3377 3375 } 3378 3376 unlock: 3379 - __netif_tx_unlock_bh(txq); 3377 + HARD_TX_UNLOCK(odev, txq); 3378 + 3379 + local_bh_enable(); 3380 3380 3381 3381 /* If pkt_dev->count is zero, then run forever */ 3382 3382 if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) {
+8 -8
net/core/skbuff.c
··· 3458 3458 */ 3459 3459 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 3460 3460 { 3461 - int len = skb->len; 3462 - 3463 3461 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 3464 3462 (unsigned int)sk->sk_rcvbuf) 3465 3463 return -ENOMEM; ··· 3472 3474 3473 3475 skb_queue_tail(&sk->sk_error_queue, skb); 3474 3476 if (!sock_flag(sk, SOCK_DEAD)) 3475 - sk->sk_data_ready(sk, len); 3477 + sk->sk_data_ready(sk); 3476 3478 return 0; 3477 3479 } 3478 3480 EXPORT_SYMBOL(sock_queue_err_skb); ··· 3935 3937 unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) 3936 3938 { 3937 3939 const struct skb_shared_info *shinfo = skb_shinfo(skb); 3938 - unsigned int hdr_len; 3939 3940 3940 3941 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) 3941 - hdr_len = tcp_hdrlen(skb); 3942 - else 3943 - hdr_len = sizeof(struct udphdr); 3944 - return hdr_len + shinfo->gso_size; 3942 + return tcp_hdrlen(skb) + shinfo->gso_size; 3943 + 3944 + /* UFO sets gso_size to the size of the fragmentation 3945 + * payload, i.e. the size of the L4 (UDP) header is already 3946 + * accounted for. 3947 + */ 3948 + return shinfo->gso_size; 3945 3949 } 3946 3950 EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
+2 -2
net/core/sock.c
··· 428 428 spin_unlock_irqrestore(&list->lock, flags); 429 429 430 430 if (!sock_flag(sk, SOCK_DEAD)) 431 - sk->sk_data_ready(sk, skb_len); 431 + sk->sk_data_ready(sk); 432 432 return 0; 433 433 } 434 434 EXPORT_SYMBOL(sock_queue_rcv_skb); ··· 2196 2196 rcu_read_unlock(); 2197 2197 } 2198 2198 2199 - static void sock_def_readable(struct sock *sk, int len) 2199 + static void sock_def_readable(struct sock *sk) 2200 2200 { 2201 2201 struct socket_wq *wq; 2202 2202
+1 -1
net/dccp/input.c
··· 28 28 __skb_pull(skb, dccp_hdr(skb)->dccph_doff * 4); 29 29 __skb_queue_tail(&sk->sk_receive_queue, skb); 30 30 skb_set_owner_r(skb, sk); 31 - sk->sk_data_ready(sk, 0); 31 + sk->sk_data_ready(sk); 32 32 } 33 33 34 34 static void dccp_fin(struct sock *sk, struct sk_buff *skb)
+1 -1
net/dccp/minisocks.c
··· 237 237 238 238 /* Wakeup parent, send SIGIO */ 239 239 if (state == DCCP_RESPOND && child->sk_state != state) 240 - parent->sk_data_ready(parent, 0); 240 + parent->sk_data_ready(parent); 241 241 } else { 242 242 /* Alas, it is possible again, because we do lookup 243 243 * in main socket hash table and lock on listening
+1 -3
net/decnet/dn_nsp_in.c
··· 585 585 static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig, struct sk_buff_head *queue) 586 586 { 587 587 int err; 588 - int skb_len; 589 588 590 589 /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces 591 590 number of warnings when compiling with -W --ANK ··· 599 600 if (err) 600 601 goto out; 601 602 602 - skb_len = skb->len; 603 603 skb_set_owner_r(skb, sk); 604 604 skb_queue_tail(queue, skb); 605 605 606 606 if (!sock_flag(sk, SOCK_DEAD)) 607 - sk->sk_data_ready(sk, skb_len); 607 + sk->sk_data_ready(sk); 608 608 out: 609 609 return err; 610 610 }
+1 -1
net/ipv4/ip_gre.c
··· 463 463 static void ipgre_tunnel_setup(struct net_device *dev) 464 464 { 465 465 dev->netdev_ops = &ipgre_netdev_ops; 466 + dev->type = ARPHRD_IPGRE; 466 467 ip_tunnel_setup(dev, ipgre_net_id); 467 468 } 468 469 ··· 502 501 memcpy(dev->dev_addr, &iph->saddr, 4); 503 502 memcpy(dev->broadcast, &iph->daddr, 4); 504 503 505 - dev->type = ARPHRD_IPGRE; 506 504 dev->flags = IFF_NOARP; 507 505 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 508 506 dev->addr_len = 4;
+1 -1
net/ipv4/ip_vti.c
··· 337 337 static void vti_tunnel_setup(struct net_device *dev) 338 338 { 339 339 dev->netdev_ops = &vti_netdev_ops; 340 + dev->type = ARPHRD_TUNNEL; 340 341 ip_tunnel_setup(dev, vti_net_id); 341 342 } 342 343 ··· 349 348 memcpy(dev->dev_addr, &iph->saddr, 4); 350 349 memcpy(dev->broadcast, &iph->daddr, 4); 351 350 352 - dev->type = ARPHRD_TUNNEL; 353 351 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr); 354 352 dev->mtu = ETH_DATA_LEN; 355 353 dev->flags = IFF_NOARP;
+5 -5
net/ipv4/tcp_input.c
··· 4413 4413 if (eaten > 0) 4414 4414 kfree_skb_partial(skb, fragstolen); 4415 4415 if (!sock_flag(sk, SOCK_DEAD)) 4416 - sk->sk_data_ready(sk, 0); 4416 + sk->sk_data_ready(sk); 4417 4417 return; 4418 4418 } 4419 4419 ··· 4914 4914 BUG(); 4915 4915 tp->urg_data = TCP_URG_VALID | tmp; 4916 4916 if (!sock_flag(sk, SOCK_DEAD)) 4917 - sk->sk_data_ready(sk, 0); 4917 + sk->sk_data_ready(sk); 4918 4918 } 4919 4919 } 4920 4920 } ··· 5000 5000 (tcp_flag_word(tcp_hdr(skb)) & TCP_FLAG_PSH) || 5001 5001 (atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) { 5002 5002 tp->ucopy.wakeup = 1; 5003 - sk->sk_data_ready(sk, 0); 5003 + sk->sk_data_ready(sk); 5004 5004 } 5005 5005 } else if (chunk > 0) { 5006 5006 tp->ucopy.wakeup = 1; 5007 - sk->sk_data_ready(sk, 0); 5007 + sk->sk_data_ready(sk); 5008 5008 } 5009 5009 out: 5010 5010 return copied_early; ··· 5275 5275 #endif 5276 5276 if (eaten) 5277 5277 kfree_skb_partial(skb, fragstolen); 5278 - sk->sk_data_ready(sk, 0); 5278 + sk->sk_data_ready(sk); 5279 5279 return; 5280 5280 } 5281 5281 }
+1 -1
net/ipv4/tcp_ipv4.c
··· 1434 1434 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 1435 1435 tp->syn_data_acked = 1; 1436 1436 } 1437 - sk->sk_data_ready(sk, 0); 1437 + sk->sk_data_ready(sk); 1438 1438 bh_unlock_sock(child); 1439 1439 sock_put(child); 1440 1440 WARN_ON(req->sk == NULL);
+1 -1
net/ipv4/tcp_minisocks.c
··· 745 745 skb->len); 746 746 /* Wakeup parent, send SIGIO */ 747 747 if (state == TCP_SYN_RECV && child->sk_state != state) 748 - parent->sk_data_ready(parent, 0); 748 + parent->sk_data_ready(parent); 749 749 } else { 750 750 /* Alas, it is possible again, because we do lookup 751 751 * in main socket hash table and lock on listening
+1 -1
net/ipv6/tcp_ipv6.c
··· 798 798 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr); 799 799 800 800 fl6.flowi6_proto = IPPROTO_TCP; 801 - if (rt6_need_strict(&fl6.daddr) || !oif) 801 + if (rt6_need_strict(&fl6.daddr) && !oif) 802 802 fl6.flowi6_oif = inet6_iif(skb); 803 803 else 804 804 fl6.flowi6_oif = oif;
+2 -2
net/iucv/af_iucv.c
··· 1757 1757 1758 1758 /* Wake up accept */ 1759 1759 nsk->sk_state = IUCV_CONNECTED; 1760 - sk->sk_data_ready(sk, 1); 1760 + sk->sk_data_ready(sk); 1761 1761 err = 0; 1762 1762 fail: 1763 1763 bh_unlock_sock(sk); ··· 1968 1968 if (!err) { 1969 1969 iucv_accept_enqueue(sk, nsk); 1970 1970 nsk->sk_state = IUCV_CONNECTED; 1971 - sk->sk_data_ready(sk, 1); 1971 + sk->sk_data_ready(sk); 1972 1972 } else 1973 1973 iucv_sock_kill(nsk); 1974 1974 bh_unlock_sock(sk);
+1 -1
net/key/af_key.c
··· 205 205 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) { 206 206 skb_set_owner_r(*skb2, sk); 207 207 skb_queue_tail(&sk->sk_receive_queue, *skb2); 208 - sk->sk_data_ready(sk, (*skb2)->len); 208 + sk->sk_data_ready(sk); 209 209 *skb2 = NULL; 210 210 err = 0; 211 211 }
+2 -2
net/l2tp/l2tp_ppp.c
··· 753 753 session->deref = pppol2tp_session_sock_put; 754 754 755 755 /* If PMTU discovery was enabled, use the MTU that was discovered */ 756 - dst = sk_dst_get(sk); 756 + dst = sk_dst_get(tunnel->sock); 757 757 if (dst != NULL) { 758 - u32 pmtu = dst_mtu(__sk_dst_get(sk)); 758 + u32 pmtu = dst_mtu(__sk_dst_get(tunnel->sock)); 759 759 if (pmtu != 0) 760 760 session->mtu = session->mru = pmtu - 761 761 PPPOL2TP_HEADER_OVERHEAD;
+2 -2
net/netlink/af_netlink.c
··· 1653 1653 else 1654 1654 #endif /* CONFIG_NETLINK_MMAP */ 1655 1655 skb_queue_tail(&sk->sk_receive_queue, skb); 1656 - sk->sk_data_ready(sk, len); 1656 + sk->sk_data_ready(sk); 1657 1657 return len; 1658 1658 } 1659 1659 ··· 2394 2394 return err ? : copied; 2395 2395 } 2396 2396 2397 - static void netlink_data_ready(struct sock *sk, int len) 2397 + static void netlink_data_ready(struct sock *sk) 2398 2398 { 2399 2399 BUG(); 2400 2400 }
+1 -1
net/netrom/af_netrom.c
··· 1011 1011 skb_queue_head(&sk->sk_receive_queue, skb); 1012 1012 1013 1013 if (!sock_flag(sk, SOCK_DEAD)) 1014 - sk->sk_data_ready(sk, skb->len); 1014 + sk->sk_data_ready(sk); 1015 1015 1016 1016 bh_unlock_sock(sk); 1017 1017
+1 -1
net/nfc/llcp_core.c
··· 976 976 new_sk->sk_state = LLCP_CONNECTED; 977 977 978 978 /* Wake the listening processes */ 979 - parent->sk_data_ready(parent, 0); 979 + parent->sk_data_ready(parent); 980 980 981 981 /* Send CC */ 982 982 nfc_llcp_send_cc(new_sock);
+3 -3
net/packet/af_packet.c
··· 1848 1848 skb->dropcount = atomic_read(&sk->sk_drops); 1849 1849 __skb_queue_tail(&sk->sk_receive_queue, skb); 1850 1850 spin_unlock(&sk->sk_receive_queue.lock); 1851 - sk->sk_data_ready(sk, skb->len); 1851 + sk->sk_data_ready(sk); 1852 1852 return 0; 1853 1853 1854 1854 drop_n_acct: ··· 2054 2054 else 2055 2055 prb_clear_blk_fill_status(&po->rx_ring); 2056 2056 2057 - sk->sk_data_ready(sk, 0); 2057 + sk->sk_data_ready(sk); 2058 2058 2059 2059 drop_n_restore: 2060 2060 if (skb_head != skb->data && skb_shared(skb)) { ··· 2069 2069 po->stats.stats1.tp_drops++; 2070 2070 spin_unlock(&sk->sk_receive_queue.lock); 2071 2071 2072 - sk->sk_data_ready(sk, 0); 2072 + sk->sk_data_ready(sk); 2073 2073 kfree_skb(copy_skb); 2074 2074 goto drop_n_restore; 2075 2075 }
+2 -2
net/phonet/pep-gprs.c
··· 37 37 struct gprs_dev { 38 38 struct sock *sk; 39 39 void (*old_state_change)(struct sock *); 40 - void (*old_data_ready)(struct sock *, int); 40 + void (*old_data_ready)(struct sock *); 41 41 void (*old_write_space)(struct sock *); 42 42 43 43 struct net_device *dev; ··· 146 146 return err; 147 147 } 148 148 149 - static void gprs_data_ready(struct sock *sk, int len) 149 + static void gprs_data_ready(struct sock *sk) 150 150 { 151 151 struct gprs_dev *gp = sk->sk_user_data; 152 152 struct sk_buff *skb;
+3 -5
net/phonet/pep.c
··· 462 462 queue: 463 463 skb->dev = NULL; 464 464 skb_set_owner_r(skb, sk); 465 - err = skb->len; 466 465 skb_queue_tail(queue, skb); 467 466 if (!sock_flag(sk, SOCK_DEAD)) 468 - sk->sk_data_ready(sk, err); 467 + sk->sk_data_ready(sk); 469 468 return NET_RX_SUCCESS; 470 469 } 471 470 ··· 586 587 pn->rx_credits--; 587 588 skb->dev = NULL; 588 589 skb_set_owner_r(skb, sk); 589 - err = skb->len; 590 590 skb_queue_tail(&sk->sk_receive_queue, skb); 591 591 if (!sock_flag(sk, SOCK_DEAD)) 592 - sk->sk_data_ready(sk, err); 592 + sk->sk_data_ready(sk); 593 593 return NET_RX_SUCCESS; 594 594 595 595 case PNS_PEP_CONNECT_RESP: ··· 696 698 skb_queue_head(&sk->sk_receive_queue, skb); 697 699 sk_acceptq_added(sk); 698 700 if (!sock_flag(sk, SOCK_DEAD)) 699 - sk->sk_data_ready(sk, 0); 701 + sk->sk_data_ready(sk); 700 702 return NET_RX_SUCCESS; 701 703 702 704 case PNS_PEP_DISCONNECT_REQ:
+2 -2
net/rds/tcp.h
··· 61 61 /* tcp_listen.c */ 62 62 int rds_tcp_listen_init(void); 63 63 void rds_tcp_listen_stop(void); 64 - void rds_tcp_listen_data_ready(struct sock *sk, int bytes); 64 + void rds_tcp_listen_data_ready(struct sock *sk); 65 65 66 66 /* tcp_recv.c */ 67 67 int rds_tcp_recv_init(void); 68 68 void rds_tcp_recv_exit(void); 69 - void rds_tcp_data_ready(struct sock *sk, int bytes); 69 + void rds_tcp_data_ready(struct sock *sk); 70 70 int rds_tcp_recv(struct rds_connection *conn); 71 71 void rds_tcp_inc_free(struct rds_incoming *inc); 72 72 int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
+3 -3
net/rds/tcp_listen.c
··· 108 108 cond_resched(); 109 109 } 110 110 111 - void rds_tcp_listen_data_ready(struct sock *sk, int bytes) 111 + void rds_tcp_listen_data_ready(struct sock *sk) 112 112 { 113 - void (*ready)(struct sock *sk, int bytes); 113 + void (*ready)(struct sock *sk); 114 114 115 115 rdsdebug("listen data ready sk %p\n", sk); 116 116 ··· 132 132 133 133 out: 134 134 read_unlock(&sk->sk_callback_lock); 135 - ready(sk, bytes); 135 + ready(sk); 136 136 } 137 137 138 138 int rds_tcp_listen_init(void)
+4 -4
net/rds/tcp_recv.c
··· 314 314 return ret; 315 315 } 316 316 317 - void rds_tcp_data_ready(struct sock *sk, int bytes) 317 + void rds_tcp_data_ready(struct sock *sk) 318 318 { 319 - void (*ready)(struct sock *sk, int bytes); 319 + void (*ready)(struct sock *sk); 320 320 struct rds_connection *conn; 321 321 struct rds_tcp_connection *tc; 322 322 323 - rdsdebug("data ready sk %p bytes %d\n", sk, bytes); 323 + rdsdebug("data ready sk %p\n", sk); 324 324 325 325 read_lock(&sk->sk_callback_lock); 326 326 conn = sk->sk_user_data; ··· 337 337 queue_delayed_work(rds_wq, &conn->c_recv_w, 0); 338 338 out: 339 339 read_unlock(&sk->sk_callback_lock); 340 - ready(sk, bytes); 340 + ready(sk); 341 341 } 342 342 343 343 int rds_tcp_recv_init(void)
+1 -1
net/rose/af_rose.c
··· 1041 1041 rose_start_heartbeat(make); 1042 1042 1043 1043 if (!sock_flag(sk, SOCK_DEAD)) 1044 - sk->sk_data_ready(sk, skb->len); 1044 + sk->sk_data_ready(sk); 1045 1045 1046 1046 return 1; 1047 1047 }
+3 -3
net/rxrpc/ar-input.c
··· 113 113 spin_unlock_bh(&sk->sk_receive_queue.lock); 114 114 115 115 if (!sock_flag(sk, SOCK_DEAD)) 116 - sk->sk_data_ready(sk, skb_len); 116 + sk->sk_data_ready(sk); 117 117 } 118 118 skb = NULL; 119 119 } else { ··· 632 632 * handle data received on the local endpoint 633 633 * - may be called in interrupt context 634 634 */ 635 - void rxrpc_data_ready(struct sock *sk, int count) 635 + void rxrpc_data_ready(struct sock *sk) 636 636 { 637 637 struct rxrpc_skb_priv *sp; 638 638 struct rxrpc_local *local; 639 639 struct sk_buff *skb; 640 640 int ret; 641 641 642 - _enter("%p, %d", sk, count); 642 + _enter("%p", sk); 643 643 644 644 ASSERT(!irqs_disabled()); 645 645
+1 -1
net/rxrpc/ar-internal.h
··· 518 518 */ 519 519 extern const char *rxrpc_pkts[]; 520 520 521 - void rxrpc_data_ready(struct sock *, int); 521 + void rxrpc_data_ready(struct sock *); 522 522 int rxrpc_queue_rcv_skb(struct rxrpc_call *, struct sk_buff *, bool, bool); 523 523 void rxrpc_fast_process_packet(struct rxrpc_call *, struct sk_buff *); 524 524
+7 -1
net/sctp/socket.c
··· 6604 6604 if (asoc->ep->sndbuf_policy) 6605 6605 return __sctp_write_space(asoc); 6606 6606 6607 + /* If association goes down and is just flushing its 6608 + * outq, then just normally notify others. 6609 + */ 6610 + if (asoc->base.dead) 6611 + return sctp_write_space(sk); 6612 + 6607 6613 /* Accounting for the sndbuf space is per socket, so we 6608 6614 * need to wake up others, try to be fair and in case of 6609 6615 * other associations, let them have a go first instead ··· 6745 6739 goto out; 6746 6740 } 6747 6741 6748 - void sctp_data_ready(struct sock *sk, int len) 6742 + void sctp_data_ready(struct sock *sk) 6749 6743 { 6750 6744 struct socket_wq *wq; 6751 6745
+2 -2
net/sctp/ulpqueue.c
··· 259 259 sctp_ulpq_clear_pd(ulpq); 260 260 261 261 if (queue == &sk->sk_receive_queue) 262 - sk->sk_data_ready(sk, 0); 262 + sk->sk_data_ready(sk); 263 263 return 1; 264 264 265 265 out_free: ··· 1135 1135 1136 1136 /* If there is data waiting, send it up the socket now. */ 1137 1137 if (sctp_ulpq_clear_pd(ulpq) || ev) 1138 - sk->sk_data_ready(sk, 0); 1138 + sk->sk_data_ready(sk); 1139 1139 }
+6 -6
net/sunrpc/svcsock.c
··· 60 60 61 61 static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *, 62 62 int flags); 63 - static void svc_udp_data_ready(struct sock *, int); 63 + static void svc_udp_data_ready(struct sock *); 64 64 static int svc_udp_recvfrom(struct svc_rqst *); 65 65 static int svc_udp_sendto(struct svc_rqst *); 66 66 static void svc_sock_detach(struct svc_xprt *); ··· 403 403 /* 404 404 * INET callback when data has been received on the socket. 405 405 */ 406 - static void svc_udp_data_ready(struct sock *sk, int count) 406 + static void svc_udp_data_ready(struct sock *sk) 407 407 { 408 408 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; 409 409 wait_queue_head_t *wq = sk_sleep(sk); 410 410 411 411 if (svsk) { 412 - dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n", 413 - svsk, sk, count, 412 + dprintk("svc: socket %p(inet %p), busy=%d\n", 413 + svsk, sk, 414 414 test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags)); 415 415 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); 416 416 svc_xprt_enqueue(&svsk->sk_xprt); ··· 731 731 * A data_ready event on a listening socket means there's a connection 732 732 * pending. Do not use state_change as a substitute for it. 733 733 */ 734 - static void svc_tcp_listen_data_ready(struct sock *sk, int count_unused) 734 + static void svc_tcp_listen_data_ready(struct sock *sk) 735 735 { 736 736 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; 737 737 wait_queue_head_t *wq; ··· 783 783 wake_up_interruptible_all(wq); 784 784 } 785 785 786 - static void svc_tcp_data_ready(struct sock *sk, int count) 786 + static void svc_tcp_data_ready(struct sock *sk) 787 787 { 788 788 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; 789 789 wait_queue_head_t *wq = sk_sleep(sk);
+4 -4
net/sunrpc/xprtsock.c
··· 254 254 /* 255 255 * Saved socket callback addresses 256 256 */ 257 - void (*old_data_ready)(struct sock *, int); 257 + void (*old_data_ready)(struct sock *); 258 258 void (*old_state_change)(struct sock *); 259 259 void (*old_write_space)(struct sock *); 260 260 void (*old_error_report)(struct sock *); ··· 951 951 * 952 952 * Currently this assumes we can read the whole reply in a single gulp. 953 953 */ 954 - static void xs_local_data_ready(struct sock *sk, int len) 954 + static void xs_local_data_ready(struct sock *sk) 955 955 { 956 956 struct rpc_task *task; 957 957 struct rpc_xprt *xprt; ··· 1014 1014 * @len: how much data to read 1015 1015 * 1016 1016 */ 1017 - static void xs_udp_data_ready(struct sock *sk, int len) 1017 + static void xs_udp_data_ready(struct sock *sk) 1018 1018 { 1019 1019 struct rpc_task *task; 1020 1020 struct rpc_xprt *xprt; ··· 1437 1437 * @bytes: how much data to read 1438 1438 * 1439 1439 */ 1440 - static void xs_tcp_data_ready(struct sock *sk, int bytes) 1440 + static void xs_tcp_data_ready(struct sock *sk) 1441 1441 { 1442 1442 struct rpc_xprt *xprt; 1443 1443 read_descriptor_t rd_desc;
+2 -2
net/tipc/server.c
··· 119 119 return con; 120 120 } 121 121 122 - static void sock_data_ready(struct sock *sk, int unused) 122 + static void sock_data_ready(struct sock *sk) 123 123 { 124 124 struct tipc_conn *con; 125 125 ··· 297 297 newcon->usr_data = s->tipc_conn_new(newcon->conid); 298 298 299 299 /* Wake up receive process in case of 'SYN+' message */ 300 - newsock->sk->sk_data_ready(newsock->sk, 0); 300 + newsock->sk->sk_data_ready(newsock->sk); 301 301 return ret; 302 302 } 303 303
+3 -3
net/tipc/socket.c
··· 45 45 #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */ 46 46 47 47 static int backlog_rcv(struct sock *sk, struct sk_buff *skb); 48 - static void tipc_data_ready(struct sock *sk, int len); 48 + static void tipc_data_ready(struct sock *sk); 49 49 static void tipc_write_space(struct sock *sk); 50 50 static int tipc_release(struct socket *sock); 51 51 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags); ··· 1248 1248 * @sk: socket 1249 1249 * @len: the length of messages 1250 1250 */ 1251 - static void tipc_data_ready(struct sock *sk, int len) 1251 + static void tipc_data_ready(struct sock *sk) 1252 1252 { 1253 1253 struct socket_wq *wq; 1254 1254 ··· 1410 1410 __skb_queue_tail(&sk->sk_receive_queue, buf); 1411 1411 skb_set_owner_r(buf, sk); 1412 1412 1413 - sk->sk_data_ready(sk, 0); 1413 + sk->sk_data_ready(sk); 1414 1414 return TIPC_OK; 1415 1415 } 1416 1416
+3 -3
net/unix/af_unix.c
··· 1217 1217 __skb_queue_tail(&other->sk_receive_queue, skb); 1218 1218 spin_unlock(&other->sk_receive_queue.lock); 1219 1219 unix_state_unlock(other); 1220 - other->sk_data_ready(other, 0); 1220 + other->sk_data_ready(other); 1221 1221 sock_put(other); 1222 1222 return 0; 1223 1223 ··· 1600 1600 if (max_level > unix_sk(other)->recursion_level) 1601 1601 unix_sk(other)->recursion_level = max_level; 1602 1602 unix_state_unlock(other); 1603 - other->sk_data_ready(other, len); 1603 + other->sk_data_ready(other); 1604 1604 sock_put(other); 1605 1605 scm_destroy(siocb->scm); 1606 1606 return len; ··· 1706 1706 if (max_level > unix_sk(other)->recursion_level) 1707 1707 unix_sk(other)->recursion_level = max_level; 1708 1708 unix_state_unlock(other); 1709 - other->sk_data_ready(other, size); 1709 + other->sk_data_ready(other); 1710 1710 sent += size; 1711 1711 } 1712 1712
+1 -1
net/vmw_vsock/vmci_transport_notify.c
··· 315 315 struct vsock_sock *vsk = vsock_sk(sk); 316 316 PKT_FIELD(vsk, sent_waiting_read) = false; 317 317 #endif 318 - sk->sk_data_ready(sk, 0); 318 + sk->sk_data_ready(sk); 319 319 } 320 320 321 321 static void vmci_transport_notify_pkt_socket_init(struct sock *sk)
+2 -2
net/vmw_vsock/vmci_transport_notify_qstate.c
··· 92 92 bool bottom_half, 93 93 struct sockaddr_vm *dst, struct sockaddr_vm *src) 94 94 { 95 - sk->sk_data_ready(sk, 0); 95 + sk->sk_data_ready(sk); 96 96 } 97 97 98 98 static void vsock_block_update_write_window(struct sock *sk) ··· 290 290 /* See the comment in 291 291 * vmci_transport_notify_pkt_send_post_enqueue(). 292 292 */ 293 - sk->sk_data_ready(sk, 0); 293 + sk->sk_data_ready(sk); 294 294 } 295 295 296 296 return err;
+1 -1
net/x25/af_x25.c
··· 1064 1064 x25_start_heartbeat(make); 1065 1065 1066 1066 if (!sock_flag(sk, SOCK_DEAD)) 1067 - sk->sk_data_ready(sk, skb->len); 1067 + sk->sk_data_ready(sk); 1068 1068 rc = 1; 1069 1069 sock_put(sk); 1070 1070 out:
+1 -1
net/x25/x25_in.c
··· 79 79 skb_set_owner_r(skbn, sk); 80 80 skb_queue_tail(&sk->sk_receive_queue, skbn); 81 81 if (!sock_flag(sk, SOCK_DEAD)) 82 - sk->sk_data_ready(sk, skbn->len); 82 + sk->sk_data_ready(sk); 83 83 84 84 return 0; 85 85 }