Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull yet more networking updates from David Miller:

1) Various fixes to the new Redpine Signals wireless driver, from
Fariya Fatima.

2) L2TP PPP connect code takes PMTU from the wrong socket, fix from
Dmitry Petukhov.

3) UFO and TSO packets differ in whether they include the protocol
header in gso_size, account for that in skb_gso_transport_seglen().
From Florian Westphal.

4) If VLAN untagging fails, we double free the SKB in the bridging
output path. From Toshiaki Makita.

5) Several call sites of sk->sk_data_ready() were referencing an SKB
just added to the socket receive queue in order to calculate the
second argument via skb->len. This is dangerous because the moment
the skb is added to the receive queue it can be consumed in another
context and freed up.

It turns out also that none of the sk->sk_data_ready()
implementations even care about this second argument.

So just kill it off and thus fix all these use-after-free bugs as a
side effect.

6) Fix inverted test in tcp_v6_send_response(), from Lorenzo Colitti.

7) pktgen needs to do locking properly for LLTX devices, from Daniel
Borkmann.

8) xen-netfront driver initializes TX array entries in RX loop :-) From
Vincenzo Maffione.

9) After refactoring, some tunnel drivers allow a tunnel to be
configured on top itself. Fix from Nicolas Dichtel.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (46 commits)
vti: don't allow to add the same tunnel twice
gre: don't allow to add the same tunnel twice
drivers: net: xen-netfront: fix array initialization bug
pktgen: be friendly to LLTX devices
r8152: check RTL8152_UNPLUG
net: sun4i-emac: add promiscuous support
net/apne: replace IS_ERR and PTR_ERR with PTR_ERR_OR_ZERO
net: ipv6: Fix oif in TCP SYN+ACK route lookup.
drivers: net: cpsw: enable interrupts after napi enable and clearing previous interrupts
drivers: net: cpsw: discard all packets received when interface is down
net: Fix use after free by removing length arg from sk_data_ready callbacks.
Drivers: net: hyperv: Address UDP checksum issues
Drivers: net: hyperv: Negotiate suitable ndis version for offload support
Drivers: net: hyperv: Allocate memory for all possible per-pecket information
bridge: Fix double free and memory leak around br_allowed_ingress
bonding: Remove debug_fs files when module init fails
i40evf: program RSS LUT correctly
i40evf: remove open-coded skb_cow_head
ixgb: remove open-coded skb_cow_head
igbvf: remove open-coded skb_cow_head
...

+420 -433
+1
drivers/net/bonding/bond_main.c
··· 4492 out: 4493 return res; 4494 err: 4495 bond_netlink_fini(); 4496 err_link: 4497 unregister_pernet_subsys(&bond_net_ops);
··· 4492 out: 4493 return res; 4494 err: 4495 + bond_destroy_debugfs(); 4496 bond_netlink_fini(); 4497 err_link: 4498 unregister_pernet_subsys(&bond_net_ops);
+1 -3
drivers/net/ethernet/8390/apne.c
··· 560 static int __init apne_module_init(void) 561 { 562 apne_dev = apne_probe(-1); 563 - if (IS_ERR(apne_dev)) 564 - return PTR_ERR(apne_dev); 565 - return 0; 566 } 567 568 static void __exit apne_module_exit(void)
··· 560 static int __init apne_module_init(void) 561 { 562 apne_dev = apne_probe(-1); 563 + return PTR_ERR_OR_ZERO(apne_dev); 564 } 565 566 static void __exit apne_module_exit(void)
+21 -9
drivers/net/ethernet/allwinner/sun4i-emac.c
··· 268 writel(reg_val | EMAC_TX_MODE_ABORTED_FRAME_EN, 269 db->membase + EMAC_TX_MODE_REG); 270 271 - /* set up RX */ 272 - reg_val = readl(db->membase + EMAC_RX_CTL_REG); 273 - 274 - writel(reg_val | EMAC_RX_CTL_PASS_LEN_OOR_EN | 275 - EMAC_RX_CTL_ACCEPT_UNICAST_EN | EMAC_RX_CTL_DA_FILTER_EN | 276 - EMAC_RX_CTL_ACCEPT_MULTICAST_EN | 277 - EMAC_RX_CTL_ACCEPT_BROADCAST_EN, 278 - db->membase + EMAC_RX_CTL_REG); 279 - 280 /* set MAC */ 281 /* set MAC CTL0 */ 282 reg_val = readl(db->membase + EMAC_MAC_CTL0_REG); ··· 298 db->membase + EMAC_MAC_MAXF_REG); 299 300 return 0; 301 } 302 303 static unsigned int emac_powerup(struct net_device *ndev) ··· 793 .ndo_stop = emac_stop, 794 .ndo_start_xmit = emac_start_xmit, 795 .ndo_tx_timeout = emac_timeout, 796 .ndo_do_ioctl = emac_ioctl, 797 .ndo_change_mtu = eth_change_mtu, 798 .ndo_validate_addr = eth_validate_addr,
··· 268 writel(reg_val | EMAC_TX_MODE_ABORTED_FRAME_EN, 269 db->membase + EMAC_TX_MODE_REG); 270 271 /* set MAC */ 272 /* set MAC CTL0 */ 273 reg_val = readl(db->membase + EMAC_MAC_CTL0_REG); ··· 307 db->membase + EMAC_MAC_MAXF_REG); 308 309 return 0; 310 + } 311 + 312 + static void emac_set_rx_mode(struct net_device *ndev) 313 + { 314 + struct emac_board_info *db = netdev_priv(ndev); 315 + unsigned int reg_val; 316 + 317 + /* set up RX */ 318 + reg_val = readl(db->membase + EMAC_RX_CTL_REG); 319 + 320 + if (ndev->flags & IFF_PROMISC) 321 + reg_val |= EMAC_RX_CTL_PASS_ALL_EN; 322 + else 323 + reg_val &= ~EMAC_RX_CTL_PASS_ALL_EN; 324 + 325 + writel(reg_val | EMAC_RX_CTL_PASS_LEN_OOR_EN | 326 + EMAC_RX_CTL_ACCEPT_UNICAST_EN | EMAC_RX_CTL_DA_FILTER_EN | 327 + EMAC_RX_CTL_ACCEPT_MULTICAST_EN | 328 + EMAC_RX_CTL_ACCEPT_BROADCAST_EN, 329 + db->membase + EMAC_RX_CTL_REG); 330 } 331 332 static unsigned int emac_powerup(struct net_device *ndev) ··· 782 .ndo_stop = emac_stop, 783 .ndo_start_xmit = emac_start_xmit, 784 .ndo_tx_timeout = emac_timeout, 785 + .ndo_set_rx_mode = emac_set_rx_mode, 786 .ndo_do_ioctl = emac_ioctl, 787 .ndo_change_mtu = eth_change_mtu, 788 .ndo_validate_addr = eth_validate_addr,
-124
drivers/net/ethernet/intel/e1000/e1000_hw.c
··· 115 */ 116 static s32 e1000_set_phy_type(struct e1000_hw *hw) 117 { 118 - e_dbg("e1000_set_phy_type"); 119 - 120 if (hw->mac_type == e1000_undefined) 121 return -E1000_ERR_PHY_TYPE; 122 ··· 156 { 157 u32 ret_val; 158 u16 phy_saved_data; 159 - 160 - e_dbg("e1000_phy_init_script"); 161 162 if (hw->phy_init_script) { 163 msleep(20); ··· 249 */ 250 s32 e1000_set_mac_type(struct e1000_hw *hw) 251 { 252 - e_dbg("e1000_set_mac_type"); 253 - 254 switch (hw->device_id) { 255 case E1000_DEV_ID_82542: 256 switch (hw->revision_id) { ··· 359 { 360 u32 status; 361 362 - e_dbg("e1000_set_media_type"); 363 - 364 if (hw->mac_type != e1000_82543) { 365 /* tbi_compatibility is only valid on 82543 */ 366 hw->tbi_compatibility_en = false; ··· 406 u32 manc; 407 u32 led_ctrl; 408 s32 ret_val; 409 - 410 - e_dbg("e1000_reset_hw"); 411 412 /* For 82542 (rev 2.0), disable MWI before issuing a device reset */ 413 if (hw->mac_type == e1000_82542_rev2_0) { ··· 556 u32 mta_size; 557 u32 ctrl_ext; 558 559 - e_dbg("e1000_init_hw"); 560 - 561 /* Initialize Identification LED */ 562 ret_val = e1000_id_led_init(hw); 563 if (ret_val) { ··· 671 u16 eeprom_data; 672 s32 ret_val; 673 674 - e_dbg("e1000_adjust_serdes_amplitude"); 675 - 676 if (hw->media_type != e1000_media_type_internal_serdes) 677 return E1000_SUCCESS; 678 ··· 715 u32 ctrl_ext; 716 s32 ret_val; 717 u16 eeprom_data; 718 - 719 - e_dbg("e1000_setup_link"); 720 721 /* Read and store word 0x0F of the EEPROM. This word contains bits 722 * that determine the hardware's default PAUSE (flow control) mode, ··· 831 u32 i; 832 u32 signal = 0; 833 s32 ret_val; 834 - 835 - e_dbg("e1000_setup_fiber_serdes_link"); 836 837 /* On adapters with a MAC newer than 82544, SWDP 1 will be 838 * set when the optics detect a signal. On older adapters, it will be ··· 1033 s32 ret_val; 1034 u16 phy_data; 1035 1036 - e_dbg("e1000_copper_link_preconfig"); 1037 - 1038 ctrl = er32(CTRL); 1039 /* With 82543, we need to force speed and duplex on the MAC equal to 1040 * what the PHY speed and duplex configuration is. In addition, we need ··· 1091 u32 led_ctrl; 1092 s32 ret_val; 1093 u16 phy_data; 1094 - 1095 - e_dbg("e1000_copper_link_igp_setup"); 1096 1097 if (hw->phy_reset_disable) 1098 return E1000_SUCCESS; ··· 1232 s32 ret_val; 1233 u16 phy_data; 1234 1235 - e_dbg("e1000_copper_link_mgp_setup"); 1236 - 1237 if (hw->phy_reset_disable) 1238 return E1000_SUCCESS; 1239 ··· 1338 s32 ret_val; 1339 u16 phy_data; 1340 1341 - e_dbg("e1000_copper_link_autoneg"); 1342 - 1343 /* Perform some bounds checking on the hw->autoneg_advertised 1344 * parameter. If this variable is zero, then set it to the default. 1345 */ ··· 1406 static s32 e1000_copper_link_postconfig(struct e1000_hw *hw) 1407 { 1408 s32 ret_val; 1409 - e_dbg("e1000_copper_link_postconfig"); 1410 1411 if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) { 1412 e1000_config_collision_dist(hw); ··· 1445 s32 ret_val; 1446 u16 i; 1447 u16 phy_data; 1448 - 1449 - e_dbg("e1000_setup_copper_link"); 1450 1451 /* Check if it is a valid PHY and set PHY mode if necessary. */ 1452 ret_val = e1000_copper_link_preconfig(hw); ··· 1524 s32 ret_val; 1525 u16 mii_autoneg_adv_reg; 1526 u16 mii_1000t_ctrl_reg; 1527 - 1528 - e_dbg("e1000_phy_setup_autoneg"); 1529 1530 /* Read the MII Auto-Neg Advertisement Register (Address 4). */ 1531 ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); ··· 1675 u16 mii_status_reg; 1676 u16 phy_data; 1677 u16 i; 1678 - 1679 - e_dbg("e1000_phy_force_speed_duplex"); 1680 1681 /* Turn off Flow control if we are forcing speed and duplex. */ 1682 hw->fc = E1000_FC_NONE; ··· 1906 { 1907 u32 tctl, coll_dist; 1908 1909 - e_dbg("e1000_config_collision_dist"); 1910 - 1911 if (hw->mac_type < e1000_82543) 1912 coll_dist = E1000_COLLISION_DISTANCE_82542; 1913 else ··· 1934 u32 ctrl; 1935 s32 ret_val; 1936 u16 phy_data; 1937 - 1938 - e_dbg("e1000_config_mac_to_phy"); 1939 1940 /* 82544 or newer MAC, Auto Speed Detection takes care of 1941 * MAC speed/duplex configuration. ··· 2012 { 2013 u32 ctrl; 2014 2015 - e_dbg("e1000_force_mac_fc"); 2016 - 2017 /* Get the current configuration of the Device Control Register */ 2018 ctrl = er32(CTRL); 2019 ··· 2080 u16 mii_nway_lp_ability_reg; 2081 u16 speed; 2082 u16 duplex; 2083 - 2084 - e_dbg("e1000_config_fc_after_link_up"); 2085 2086 /* Check for the case where we have fiber media and auto-neg failed 2087 * so we had to force link. In this case, we need to force the ··· 2296 u32 status; 2297 s32 ret_val = E1000_SUCCESS; 2298 2299 - e_dbg("e1000_check_for_serdes_link_generic"); 2300 - 2301 ctrl = er32(CTRL); 2302 status = er32(STATUS); 2303 rxcw = er32(RXCW); ··· 2405 u32 signal = 0; 2406 s32 ret_val; 2407 u16 phy_data; 2408 - 2409 - e_dbg("e1000_check_for_link"); 2410 2411 ctrl = er32(CTRL); 2412 status = er32(STATUS); ··· 2587 s32 ret_val; 2588 u16 phy_data; 2589 2590 - e_dbg("e1000_get_speed_and_duplex"); 2591 - 2592 if (hw->mac_type >= e1000_82543) { 2593 status = er32(STATUS); 2594 if (status & E1000_STATUS_SPEED_1000) { ··· 2652 u16 i; 2653 u16 phy_data; 2654 2655 - e_dbg("e1000_wait_autoneg"); 2656 e_dbg("Waiting for Auto-Neg to complete.\n"); 2657 2658 /* We will wait for autoneg to complete or 4.5 seconds to expire. */ ··· 2818 u32 ret_val; 2819 unsigned long flags; 2820 2821 - e_dbg("e1000_read_phy_reg"); 2822 - 2823 spin_lock_irqsave(&e1000_phy_lock, flags); 2824 2825 if ((hw->phy_type == e1000_phy_igp) && ··· 2843 u32 i; 2844 u32 mdic = 0; 2845 const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; 2846 - 2847 - e_dbg("e1000_read_phy_reg_ex"); 2848 2849 if (reg_addr > MAX_PHY_REG_ADDRESS) { 2850 e_dbg("PHY Address %d is out of range\n", reg_addr); ··· 2956 u32 ret_val; 2957 unsigned long flags; 2958 2959 - e_dbg("e1000_write_phy_reg"); 2960 - 2961 spin_lock_irqsave(&e1000_phy_lock, flags); 2962 2963 if ((hw->phy_type == e1000_phy_igp) && ··· 2981 u32 i; 2982 u32 mdic = 0; 2983 const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; 2984 - 2985 - e_dbg("e1000_write_phy_reg_ex"); 2986 2987 if (reg_addr > MAX_PHY_REG_ADDRESS) { 2988 e_dbg("PHY Address %d is out of range\n", reg_addr); ··· 3073 u32 ctrl, ctrl_ext; 3074 u32 led_ctrl; 3075 3076 - e_dbg("e1000_phy_hw_reset"); 3077 - 3078 e_dbg("Resetting Phy...\n"); 3079 3080 if (hw->mac_type > e1000_82543) { ··· 3131 s32 ret_val; 3132 u16 phy_data; 3133 3134 - e_dbg("e1000_phy_reset"); 3135 - 3136 switch (hw->phy_type) { 3137 case e1000_phy_igp: 3138 ret_val = e1000_phy_hw_reset(hw); ··· 3168 s32 phy_init_status, ret_val; 3169 u16 phy_id_high, phy_id_low; 3170 bool match = false; 3171 - 3172 - e_dbg("e1000_detect_gig_phy"); 3173 3174 if (hw->phy_id != 0) 3175 return E1000_SUCCESS; ··· 3239 static s32 e1000_phy_reset_dsp(struct e1000_hw *hw) 3240 { 3241 s32 ret_val; 3242 - e_dbg("e1000_phy_reset_dsp"); 3243 3244 do { 3245 ret_val = e1000_write_phy_reg(hw, 29, 0x001d); ··· 3269 s32 ret_val; 3270 u16 phy_data, min_length, max_length, average; 3271 e1000_rev_polarity polarity; 3272 - 3273 - e_dbg("e1000_phy_igp_get_info"); 3274 3275 /* The downshift status is checked only once, after link is established, 3276 * and it stored in the hw->speed_downgraded parameter. ··· 3349 u16 phy_data; 3350 e1000_rev_polarity polarity; 3351 3352 - e_dbg("e1000_phy_m88_get_info"); 3353 - 3354 /* The downshift status is checked only once, after link is established, 3355 * and it stored in the hw->speed_downgraded parameter. 3356 */ ··· 3420 s32 ret_val; 3421 u16 phy_data; 3422 3423 - e_dbg("e1000_phy_get_info"); 3424 - 3425 phy_info->cable_length = e1000_cable_length_undefined; 3426 phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined; 3427 phy_info->cable_polarity = e1000_rev_polarity_undefined; ··· 3458 3459 s32 e1000_validate_mdi_setting(struct e1000_hw *hw) 3460 { 3461 - e_dbg("e1000_validate_mdi_settings"); 3462 - 3463 if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) { 3464 e_dbg("Invalid MDI setting detected\n"); 3465 hw->mdix = 1; ··· 3479 u32 eecd = er32(EECD); 3480 s32 ret_val = E1000_SUCCESS; 3481 u16 eeprom_size; 3482 - 3483 - e_dbg("e1000_init_eeprom_params"); 3484 3485 switch (hw->mac_type) { 3486 case e1000_82542_rev2_0: ··· 3697 struct e1000_eeprom_info *eeprom = &hw->eeprom; 3698 u32 eecd, i = 0; 3699 3700 - e_dbg("e1000_acquire_eeprom"); 3701 - 3702 eecd = er32(EECD); 3703 3704 /* Request EEPROM Access */ ··· 3796 { 3797 u32 eecd; 3798 3799 - e_dbg("e1000_release_eeprom"); 3800 - 3801 eecd = er32(EECD); 3802 3803 if (hw->eeprom.type == e1000_eeprom_spi) { ··· 3842 { 3843 u16 retry_count = 0; 3844 u8 spi_stat_reg; 3845 - 3846 - e_dbg("e1000_spi_eeprom_ready"); 3847 3848 /* Read "Status Register" repeatedly until the LSB is cleared. The 3849 * EEPROM will signal that the command has been completed by clearing ··· 3894 { 3895 struct e1000_eeprom_info *eeprom = &hw->eeprom; 3896 u32 i = 0; 3897 - 3898 - e_dbg("e1000_read_eeprom"); 3899 3900 if (hw->mac_type == e1000_ce4100) { 3901 GBE_CONFIG_FLASH_READ(GBE_CONFIG_BASE_VIRT, offset, words, ··· 3995 u16 checksum = 0; 3996 u16 i, eeprom_data; 3997 3998 - e_dbg("e1000_validate_eeprom_checksum"); 3999 - 4000 for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { 4001 if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { 4002 e_dbg("EEPROM Read Error\n"); ··· 4028 { 4029 u16 checksum = 0; 4030 u16 i, eeprom_data; 4031 - 4032 - e_dbg("e1000_update_eeprom_checksum"); 4033 4034 for (i = 0; i < EEPROM_CHECKSUM_REG; i++) { 4035 if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { ··· 4068 { 4069 struct e1000_eeprom_info *eeprom = &hw->eeprom; 4070 s32 status = 0; 4071 - 4072 - e_dbg("e1000_write_eeprom"); 4073 4074 if (hw->mac_type == e1000_ce4100) { 4075 GBE_CONFIG_FLASH_WRITE(GBE_CONFIG_BASE_VIRT, offset, words, ··· 4117 { 4118 struct e1000_eeprom_info *eeprom = &hw->eeprom; 4119 u16 widx = 0; 4120 - 4121 - e_dbg("e1000_write_eeprom_spi"); 4122 4123 while (widx < words) { 4124 u8 write_opcode = EEPROM_WRITE_OPCODE_SPI; ··· 4184 u32 eecd; 4185 u16 words_written = 0; 4186 u16 i = 0; 4187 - 4188 - e_dbg("e1000_write_eeprom_microwire"); 4189 4190 /* Send the write enable command to the EEPROM (3-bit opcode plus 4191 * 6/8-bit dummy address beginning with 11). It's less work to include ··· 4263 u16 offset; 4264 u16 eeprom_data, i; 4265 4266 - e_dbg("e1000_read_mac_addr"); 4267 - 4268 for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) { 4269 offset = i >> 1; 4270 if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) { ··· 4300 { 4301 u32 i; 4302 u32 rar_num; 4303 - 4304 - e_dbg("e1000_init_rx_addrs"); 4305 4306 /* Setup the receive address. */ 4307 e_dbg("Programming MAC Address into RAR[0]\n"); ··· 4458 u16 eeprom_data, i, temp; 4459 const u16 led_mask = 0x0F; 4460 4461 - e_dbg("e1000_id_led_init"); 4462 - 4463 if (hw->mac_type < e1000_82540) { 4464 /* Nothing to do */ 4465 return E1000_SUCCESS; ··· 4529 u32 ledctl; 4530 s32 ret_val = E1000_SUCCESS; 4531 4532 - e_dbg("e1000_setup_led"); 4533 - 4534 switch (hw->mac_type) { 4535 case e1000_82542_rev2_0: 4536 case e1000_82542_rev2_1: ··· 4579 { 4580 s32 ret_val = E1000_SUCCESS; 4581 4582 - e_dbg("e1000_cleanup_led"); 4583 - 4584 switch (hw->mac_type) { 4585 case e1000_82542_rev2_0: 4586 case e1000_82542_rev2_1: ··· 4612 s32 e1000_led_on(struct e1000_hw *hw) 4613 { 4614 u32 ctrl = er32(CTRL); 4615 - 4616 - e_dbg("e1000_led_on"); 4617 4618 switch (hw->mac_type) { 4619 case e1000_82542_rev2_0: ··· 4656 s32 e1000_led_off(struct e1000_hw *hw) 4657 { 4658 u32 ctrl = er32(CTRL); 4659 - 4660 - e_dbg("e1000_led_off"); 4661 4662 switch (hw->mac_type) { 4663 case e1000_82542_rev2_0: ··· 4784 */ 4785 void e1000_reset_adaptive(struct e1000_hw *hw) 4786 { 4787 - e_dbg("e1000_reset_adaptive"); 4788 - 4789 if (hw->adaptive_ifs) { 4790 if (!hw->ifs_params_forced) { 4791 hw->current_ifs_val = 0; ··· 4810 */ 4811 void e1000_update_adaptive(struct e1000_hw *hw) 4812 { 4813 - e_dbg("e1000_update_adaptive"); 4814 - 4815 if (hw->adaptive_ifs) { 4816 if ((hw->collision_delta *hw->ifs_ratio) > hw->tx_packet_delta) { 4817 if (hw->tx_packet_delta > MIN_NUM_XMITS) { ··· 5005 u16 i, phy_data; 5006 u16 cable_length; 5007 5008 - e_dbg("e1000_get_cable_length"); 5009 - 5010 *min_length = *max_length = 0; 5011 5012 /* Use old method for Phy older than IGP */ ··· 5120 s32 ret_val; 5121 u16 phy_data; 5122 5123 - e_dbg("e1000_check_polarity"); 5124 - 5125 if (hw->phy_type == e1000_phy_m88) { 5126 /* return the Polarity bit in the Status register. */ 5127 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, ··· 5185 { 5186 s32 ret_val; 5187 u16 phy_data; 5188 - 5189 - e_dbg("e1000_check_downshift"); 5190 5191 if (hw->phy_type == e1000_phy_igp) { 5192 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH, ··· 5295 { 5296 s32 ret_val; 5297 u16 phy_data, phy_saved_data, speed, duplex, i; 5298 - 5299 - e_dbg("e1000_config_dsp_after_link_change"); 5300 5301 if (hw->phy_type != e1000_phy_igp) 5302 return E1000_SUCCESS; ··· 5429 s32 ret_val; 5430 u16 eeprom_data; 5431 5432 - e_dbg("e1000_set_phy_mode"); 5433 - 5434 if ((hw->mac_type == e1000_82545_rev_3) && 5435 (hw->media_type == e1000_media_type_copper)) { 5436 ret_val = ··· 5475 { 5476 s32 ret_val; 5477 u16 phy_data; 5478 - e_dbg("e1000_set_d3_lplu_state"); 5479 5480 if (hw->phy_type != e1000_phy_igp) 5481 return E1000_SUCCESS; ··· 5578 s32 ret_val; 5579 u16 default_page = 0; 5580 u16 phy_data; 5581 - 5582 - e_dbg("e1000_set_vco_speed"); 5583 5584 switch (hw->mac_type) { 5585 case e1000_82545_rev_3: ··· 5750 */ 5751 static s32 e1000_get_auto_rd_done(struct e1000_hw *hw) 5752 { 5753 - e_dbg("e1000_get_auto_rd_done"); 5754 msleep(5); 5755 return E1000_SUCCESS; 5756 } ··· 5764 */ 5765 static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw) 5766 { 5767 - e_dbg("e1000_get_phy_cfg_done"); 5768 msleep(10); 5769 return E1000_SUCCESS; 5770 }
··· 115 */ 116 static s32 e1000_set_phy_type(struct e1000_hw *hw) 117 { 118 if (hw->mac_type == e1000_undefined) 119 return -E1000_ERR_PHY_TYPE; 120 ··· 158 { 159 u32 ret_val; 160 u16 phy_saved_data; 161 162 if (hw->phy_init_script) { 163 msleep(20); ··· 253 */ 254 s32 e1000_set_mac_type(struct e1000_hw *hw) 255 { 256 switch (hw->device_id) { 257 case E1000_DEV_ID_82542: 258 switch (hw->revision_id) { ··· 365 { 366 u32 status; 367 368 if (hw->mac_type != e1000_82543) { 369 /* tbi_compatibility is only valid on 82543 */ 370 hw->tbi_compatibility_en = false; ··· 414 u32 manc; 415 u32 led_ctrl; 416 s32 ret_val; 417 418 /* For 82542 (rev 2.0), disable MWI before issuing a device reset */ 419 if (hw->mac_type == e1000_82542_rev2_0) { ··· 566 u32 mta_size; 567 u32 ctrl_ext; 568 569 /* Initialize Identification LED */ 570 ret_val = e1000_id_led_init(hw); 571 if (ret_val) { ··· 683 u16 eeprom_data; 684 s32 ret_val; 685 686 if (hw->media_type != e1000_media_type_internal_serdes) 687 return E1000_SUCCESS; 688 ··· 729 u32 ctrl_ext; 730 s32 ret_val; 731 u16 eeprom_data; 732 733 /* Read and store word 0x0F of the EEPROM. This word contains bits 734 * that determine the hardware's default PAUSE (flow control) mode, ··· 847 u32 i; 848 u32 signal = 0; 849 s32 ret_val; 850 851 /* On adapters with a MAC newer than 82544, SWDP 1 will be 852 * set when the optics detect a signal. On older adapters, it will be ··· 1051 s32 ret_val; 1052 u16 phy_data; 1053 1054 ctrl = er32(CTRL); 1055 /* With 82543, we need to force speed and duplex on the MAC equal to 1056 * what the PHY speed and duplex configuration is. In addition, we need ··· 1111 u32 led_ctrl; 1112 s32 ret_val; 1113 u16 phy_data; 1114 1115 if (hw->phy_reset_disable) 1116 return E1000_SUCCESS; ··· 1254 s32 ret_val; 1255 u16 phy_data; 1256 1257 if (hw->phy_reset_disable) 1258 return E1000_SUCCESS; 1259 ··· 1362 s32 ret_val; 1363 u16 phy_data; 1364 1365 /* Perform some bounds checking on the hw->autoneg_advertised 1366 * parameter. If this variable is zero, then set it to the default. 1367 */ ··· 1432 static s32 e1000_copper_link_postconfig(struct e1000_hw *hw) 1433 { 1434 s32 ret_val; 1435 1436 if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) { 1437 e1000_config_collision_dist(hw); ··· 1472 s32 ret_val; 1473 u16 i; 1474 u16 phy_data; 1475 1476 /* Check if it is a valid PHY and set PHY mode if necessary. */ 1477 ret_val = e1000_copper_link_preconfig(hw); ··· 1553 s32 ret_val; 1554 u16 mii_autoneg_adv_reg; 1555 u16 mii_1000t_ctrl_reg; 1556 1557 /* Read the MII Auto-Neg Advertisement Register (Address 4). */ 1558 ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); ··· 1706 u16 mii_status_reg; 1707 u16 phy_data; 1708 u16 i; 1709 1710 /* Turn off Flow control if we are forcing speed and duplex. */ 1711 hw->fc = E1000_FC_NONE; ··· 1939 { 1940 u32 tctl, coll_dist; 1941 1942 if (hw->mac_type < e1000_82543) 1943 coll_dist = E1000_COLLISION_DISTANCE_82542; 1944 else ··· 1969 u32 ctrl; 1970 s32 ret_val; 1971 u16 phy_data; 1972 1973 /* 82544 or newer MAC, Auto Speed Detection takes care of 1974 * MAC speed/duplex configuration. ··· 2049 { 2050 u32 ctrl; 2051 2052 /* Get the current configuration of the Device Control Register */ 2053 ctrl = er32(CTRL); 2054 ··· 2119 u16 mii_nway_lp_ability_reg; 2120 u16 speed; 2121 u16 duplex; 2122 2123 /* Check for the case where we have fiber media and auto-neg failed 2124 * so we had to force link. In this case, we need to force the ··· 2337 u32 status; 2338 s32 ret_val = E1000_SUCCESS; 2339 2340 ctrl = er32(CTRL); 2341 status = er32(STATUS); 2342 rxcw = er32(RXCW); ··· 2448 u32 signal = 0; 2449 s32 ret_val; 2450 u16 phy_data; 2451 2452 ctrl = er32(CTRL); 2453 status = er32(STATUS); ··· 2632 s32 ret_val; 2633 u16 phy_data; 2634 2635 if (hw->mac_type >= e1000_82543) { 2636 status = er32(STATUS); 2637 if (status & E1000_STATUS_SPEED_1000) { ··· 2699 u16 i; 2700 u16 phy_data; 2701 2702 e_dbg("Waiting for Auto-Neg to complete.\n"); 2703 2704 /* We will wait for autoneg to complete or 4.5 seconds to expire. */ ··· 2866 u32 ret_val; 2867 unsigned long flags; 2868 2869 spin_lock_irqsave(&e1000_phy_lock, flags); 2870 2871 if ((hw->phy_type == e1000_phy_igp) && ··· 2893 u32 i; 2894 u32 mdic = 0; 2895 const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; 2896 2897 if (reg_addr > MAX_PHY_REG_ADDRESS) { 2898 e_dbg("PHY Address %d is out of range\n", reg_addr); ··· 3008 u32 ret_val; 3009 unsigned long flags; 3010 3011 spin_lock_irqsave(&e1000_phy_lock, flags); 3012 3013 if ((hw->phy_type == e1000_phy_igp) && ··· 3035 u32 i; 3036 u32 mdic = 0; 3037 const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; 3038 3039 if (reg_addr > MAX_PHY_REG_ADDRESS) { 3040 e_dbg("PHY Address %d is out of range\n", reg_addr); ··· 3129 u32 ctrl, ctrl_ext; 3130 u32 led_ctrl; 3131 3132 e_dbg("Resetting Phy...\n"); 3133 3134 if (hw->mac_type > e1000_82543) { ··· 3189 s32 ret_val; 3190 u16 phy_data; 3191 3192 switch (hw->phy_type) { 3193 case e1000_phy_igp: 3194 ret_val = e1000_phy_hw_reset(hw); ··· 3228 s32 phy_init_status, ret_val; 3229 u16 phy_id_high, phy_id_low; 3230 bool match = false; 3231 3232 if (hw->phy_id != 0) 3233 return E1000_SUCCESS; ··· 3301 static s32 e1000_phy_reset_dsp(struct e1000_hw *hw) 3302 { 3303 s32 ret_val; 3304 3305 do { 3306 ret_val = e1000_write_phy_reg(hw, 29, 0x001d); ··· 3332 s32 ret_val; 3333 u16 phy_data, min_length, max_length, average; 3334 e1000_rev_polarity polarity; 3335 3336 /* The downshift status is checked only once, after link is established, 3337 * and it stored in the hw->speed_downgraded parameter. ··· 3414 u16 phy_data; 3415 e1000_rev_polarity polarity; 3416 3417 /* The downshift status is checked only once, after link is established, 3418 * and it stored in the hw->speed_downgraded parameter. 3419 */ ··· 3487 s32 ret_val; 3488 u16 phy_data; 3489 3490 phy_info->cable_length = e1000_cable_length_undefined; 3491 phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined; 3492 phy_info->cable_polarity = e1000_rev_polarity_undefined; ··· 3527 3528 s32 e1000_validate_mdi_setting(struct e1000_hw *hw) 3529 { 3530 if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) { 3531 e_dbg("Invalid MDI setting detected\n"); 3532 hw->mdix = 1; ··· 3550 u32 eecd = er32(EECD); 3551 s32 ret_val = E1000_SUCCESS; 3552 u16 eeprom_size; 3553 3554 switch (hw->mac_type) { 3555 case e1000_82542_rev2_0: ··· 3770 struct e1000_eeprom_info *eeprom = &hw->eeprom; 3771 u32 eecd, i = 0; 3772 3773 eecd = er32(EECD); 3774 3775 /* Request EEPROM Access */ ··· 3871 { 3872 u32 eecd; 3873 3874 eecd = er32(EECD); 3875 3876 if (hw->eeprom.type == e1000_eeprom_spi) { ··· 3919 { 3920 u16 retry_count = 0; 3921 u8 spi_stat_reg; 3922 3923 /* Read "Status Register" repeatedly until the LSB is cleared. The 3924 * EEPROM will signal that the command has been completed by clearing ··· 3973 { 3974 struct e1000_eeprom_info *eeprom = &hw->eeprom; 3975 u32 i = 0; 3976 3977 if (hw->mac_type == e1000_ce4100) { 3978 GBE_CONFIG_FLASH_READ(GBE_CONFIG_BASE_VIRT, offset, words, ··· 4076 u16 checksum = 0; 4077 u16 i, eeprom_data; 4078 4079 for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { 4080 if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { 4081 e_dbg("EEPROM Read Error\n"); ··· 4111 { 4112 u16 checksum = 0; 4113 u16 i, eeprom_data; 4114 4115 for (i = 0; i < EEPROM_CHECKSUM_REG; i++) { 4116 if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { ··· 4153 { 4154 struct e1000_eeprom_info *eeprom = &hw->eeprom; 4155 s32 status = 0; 4156 4157 if (hw->mac_type == e1000_ce4100) { 4158 GBE_CONFIG_FLASH_WRITE(GBE_CONFIG_BASE_VIRT, offset, words, ··· 4204 { 4205 struct e1000_eeprom_info *eeprom = &hw->eeprom; 4206 u16 widx = 0; 4207 4208 while (widx < words) { 4209 u8 write_opcode = EEPROM_WRITE_OPCODE_SPI; ··· 4273 u32 eecd; 4274 u16 words_written = 0; 4275 u16 i = 0; 4276 4277 /* Send the write enable command to the EEPROM (3-bit opcode plus 4278 * 6/8-bit dummy address beginning with 11). It's less work to include ··· 4354 u16 offset; 4355 u16 eeprom_data, i; 4356 4357 for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) { 4358 offset = i >> 1; 4359 if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) { ··· 4393 { 4394 u32 i; 4395 u32 rar_num; 4396 4397 /* Setup the receive address. */ 4398 e_dbg("Programming MAC Address into RAR[0]\n"); ··· 4553 u16 eeprom_data, i, temp; 4554 const u16 led_mask = 0x0F; 4555 4556 if (hw->mac_type < e1000_82540) { 4557 /* Nothing to do */ 4558 return E1000_SUCCESS; ··· 4626 u32 ledctl; 4627 s32 ret_val = E1000_SUCCESS; 4628 4629 switch (hw->mac_type) { 4630 case e1000_82542_rev2_0: 4631 case e1000_82542_rev2_1: ··· 4678 { 4679 s32 ret_val = E1000_SUCCESS; 4680 4681 switch (hw->mac_type) { 4682 case e1000_82542_rev2_0: 4683 case e1000_82542_rev2_1: ··· 4713 s32 e1000_led_on(struct e1000_hw *hw) 4714 { 4715 u32 ctrl = er32(CTRL); 4716 4717 switch (hw->mac_type) { 4718 case e1000_82542_rev2_0: ··· 4759 s32 e1000_led_off(struct e1000_hw *hw) 4760 { 4761 u32 ctrl = er32(CTRL); 4762 4763 switch (hw->mac_type) { 4764 case e1000_82542_rev2_0: ··· 4889 */ 4890 void e1000_reset_adaptive(struct e1000_hw *hw) 4891 { 4892 if (hw->adaptive_ifs) { 4893 if (!hw->ifs_params_forced) { 4894 hw->current_ifs_val = 0; ··· 4917 */ 4918 void e1000_update_adaptive(struct e1000_hw *hw) 4919 { 4920 if (hw->adaptive_ifs) { 4921 if ((hw->collision_delta *hw->ifs_ratio) > hw->tx_packet_delta) { 4922 if (hw->tx_packet_delta > MIN_NUM_XMITS) { ··· 5114 u16 i, phy_data; 5115 u16 cable_length; 5116 5117 *min_length = *max_length = 0; 5118 5119 /* Use old method for Phy older than IGP */ ··· 5231 s32 ret_val; 5232 u16 phy_data; 5233 5234 if (hw->phy_type == e1000_phy_m88) { 5235 /* return the Polarity bit in the Status register. */ 5236 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, ··· 5298 { 5299 s32 ret_val; 5300 u16 phy_data; 5301 5302 if (hw->phy_type == e1000_phy_igp) { 5303 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH, ··· 5410 { 5411 s32 ret_val; 5412 u16 phy_data, phy_saved_data, speed, duplex, i; 5413 5414 if (hw->phy_type != e1000_phy_igp) 5415 return E1000_SUCCESS; ··· 5546 s32 ret_val; 5547 u16 eeprom_data; 5548 5549 if ((hw->mac_type == e1000_82545_rev_3) && 5550 (hw->media_type == e1000_media_type_copper)) { 5551 ret_val = ··· 5594 { 5595 s32 ret_val; 5596 u16 phy_data; 5597 5598 if (hw->phy_type != e1000_phy_igp) 5599 return E1000_SUCCESS; ··· 5698 s32 ret_val; 5699 u16 default_page = 0; 5700 u16 phy_data; 5701 5702 switch (hw->mac_type) { 5703 case e1000_82545_rev_3: ··· 5872 */ 5873 static s32 e1000_get_auto_rd_done(struct e1000_hw *hw) 5874 { 5875 msleep(5); 5876 return E1000_SUCCESS; 5877 } ··· 5887 */ 5888 static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw) 5889 { 5890 msleep(10); 5891 return E1000_SUCCESS; 5892 }
+5 -6
drivers/net/ethernet/intel/e1000/e1000_main.c
··· 2682 u32 cmd_length = 0; 2683 u16 ipcse = 0, tucse, mss; 2684 u8 ipcss, ipcso, tucss, tucso, hdr_len; 2685 - int err; 2686 2687 if (skb_is_gso(skb)) { 2688 - if (skb_header_cloned(skb)) { 2689 - err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2690 - if (err) 2691 - return err; 2692 - } 2693 2694 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 2695 mss = skb_shinfo(skb)->gso_size;
··· 2682 u32 cmd_length = 0; 2683 u16 ipcse = 0, tucse, mss; 2684 u8 ipcss, ipcso, tucss, tucso, hdr_len; 2685 2686 if (skb_is_gso(skb)) { 2687 + int err; 2688 + 2689 + err = skb_cow_head(skb, 0); 2690 + if (err < 0) 2691 + return err; 2692 2693 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 2694 mss = skb_shinfo(skb)->gso_size;
+4 -6
drivers/net/ethernet/intel/e1000e/netdev.c
··· 5100 u32 cmd_length = 0; 5101 u16 ipcse = 0, mss; 5102 u8 ipcss, ipcso, tucss, tucso, hdr_len; 5103 5104 if (!skb_is_gso(skb)) 5105 return 0; 5106 5107 - if (skb_header_cloned(skb)) { 5108 - int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 5109 - 5110 - if (err) 5111 - return err; 5112 - } 5113 5114 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 5115 mss = skb_shinfo(skb)->gso_size;
··· 5100 u32 cmd_length = 0; 5101 u16 ipcse = 0, mss; 5102 u8 ipcss, ipcso, tucss, tucso, hdr_len; 5103 + int err; 5104 5105 if (!skb_is_gso(skb)) 5106 return 0; 5107 5108 + err = skb_cow_head(skb, 0); 5109 + if (err < 0) 5110 + return err; 5111 5112 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 5113 mss = skb_shinfo(skb)->gso_size;
+4 -6
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
··· 1114 u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling) 1115 { 1116 u32 cd_cmd, cd_tso_len, cd_mss; 1117 struct tcphdr *tcph; 1118 struct iphdr *iph; 1119 u32 l4len; 1120 int err; 1121 - struct ipv6hdr *ipv6h; 1122 1123 if (!skb_is_gso(skb)) 1124 return 0; 1125 1126 - if (skb_header_cloned(skb)) { 1127 - err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 1128 - if (err) 1129 - return err; 1130 - } 1131 1132 if (protocol == htons(ETH_P_IP)) { 1133 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
··· 1114 u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling) 1115 { 1116 u32 cd_cmd, cd_tso_len, cd_mss; 1117 + struct ipv6hdr *ipv6h; 1118 struct tcphdr *tcph; 1119 struct iphdr *iph; 1120 u32 l4len; 1121 int err; 1122 1123 if (!skb_is_gso(skb)) 1124 return 0; 1125 1126 + err = skb_cow_head(skb, 0); 1127 + if (err < 0) 1128 + return err; 1129 1130 if (protocol == htons(ETH_P_IP)) { 1131 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+16 -4
drivers/net/ethernet/intel/i40evf/i40evf_main.c
··· 1412 schedule_work(&adapter->adminq_task); 1413 } 1414 1415 static int next_queue(struct i40evf_adapter *adapter, int j) 1416 { 1417 j += 1; ··· 1459 /* Populate the LUT with max no. of queues in round robin fashion */ 1460 j = adapter->vsi_res->num_queue_pairs; 1461 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { 1462 - lut = next_queue(adapter, j); 1463 - lut |= next_queue(adapter, j) << 8; 1464 - lut |= next_queue(adapter, j) << 16; 1465 - lut |= next_queue(adapter, j) << 24; 1466 wr32(hw, I40E_VFQF_HLUT(i), lut); 1467 } 1468 i40e_flush(hw);
··· 1412 schedule_work(&adapter->adminq_task); 1413 } 1414 1415 + /** 1416 + * i40evf_configure_rss - increment to next available tx queue 1417 + * @adapter: board private structure 1418 + * @j: queue counter 1419 + * 1420 + * Helper function for RSS programming to increment through available 1421 + * queus. Returns the next queue value. 1422 + **/ 1423 static int next_queue(struct i40evf_adapter *adapter, int j) 1424 { 1425 j += 1; ··· 1451 /* Populate the LUT with max no. of queues in round robin fashion */ 1452 j = adapter->vsi_res->num_queue_pairs; 1453 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { 1454 + j = next_queue(adapter, j); 1455 + lut = j; 1456 + j = next_queue(adapter, j); 1457 + lut |= j << 8; 1458 + j = next_queue(adapter, j); 1459 + lut |= j << 16; 1460 + j = next_queue(adapter, j); 1461 + lut |= j << 24; 1462 wr32(hw, I40E_VFQF_HLUT(i), lut); 1463 } 1464 i40e_flush(hw);
+1 -15
drivers/net/ethernet/intel/igb/igb.h
··· 241 struct igb_tx_buffer *tx_buffer_info; 242 struct igb_rx_buffer *rx_buffer_info; 243 }; 244 - unsigned long last_rx_timestamp; 245 void *desc; /* descriptor ring memory */ 246 unsigned long flags; /* ring specific flags */ 247 void __iomem *tail; /* pointer to ring tail register */ ··· 436 struct hwtstamp_config tstamp_config; 437 unsigned long ptp_tx_start; 438 unsigned long last_rx_ptp_check; 439 spinlock_t tmreg_lock; 440 struct cyclecounter cc; 441 struct timecounter tc; ··· 533 void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb); 534 void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va, 535 struct sk_buff *skb); 536 - static inline void igb_ptp_rx_hwtstamp(struct igb_ring *rx_ring, 537 - union e1000_adv_rx_desc *rx_desc, 538 - struct sk_buff *skb) 539 - { 540 - if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) && 541 - !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) 542 - igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb); 543 - 544 - /* Update the last_rx_timestamp timer in order to enable watchdog check 545 - * for error case of latched timestamp on a dropped packet. 546 - */ 547 - rx_ring->last_rx_timestamp = jiffies; 548 - } 549 - 550 int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); 551 int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); 552 #ifdef CONFIG_IGB_HWMON
··· 241 struct igb_tx_buffer *tx_buffer_info; 242 struct igb_rx_buffer *rx_buffer_info; 243 }; 244 void *desc; /* descriptor ring memory */ 245 unsigned long flags; /* ring specific flags */ 246 void __iomem *tail; /* pointer to ring tail register */ ··· 437 struct hwtstamp_config tstamp_config; 438 unsigned long ptp_tx_start; 439 unsigned long last_rx_ptp_check; 440 + unsigned long last_rx_timestamp; 441 spinlock_t tmreg_lock; 442 struct cyclecounter cc; 443 struct timecounter tc; ··· 533 void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb); 534 void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va, 535 struct sk_buff *skb); 536 int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); 537 int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); 538 #ifdef CONFIG_IGB_HWMON
+7 -6
drivers/net/ethernet/intel/igb/igb_main.c
··· 4605 struct sk_buff *skb = first->skb; 4606 u32 vlan_macip_lens, type_tucmd; 4607 u32 mss_l4len_idx, l4len; 4608 4609 if (skb->ip_summed != CHECKSUM_PARTIAL) 4610 return 0; ··· 4613 if (!skb_is_gso(skb)) 4614 return 0; 4615 4616 - if (skb_header_cloned(skb)) { 4617 - int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 4618 - if (err) 4619 - return err; 4620 - } 4621 4622 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 4623 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; ··· 6954 6955 igb_rx_checksum(rx_ring, rx_desc, skb); 6956 6957 - igb_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); 6958 6959 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && 6960 igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
··· 4605 struct sk_buff *skb = first->skb; 4606 u32 vlan_macip_lens, type_tucmd; 4607 u32 mss_l4len_idx, l4len; 4608 + int err; 4609 4610 if (skb->ip_summed != CHECKSUM_PARTIAL) 4611 return 0; ··· 4612 if (!skb_is_gso(skb)) 4613 return 0; 4614 4615 + err = skb_cow_head(skb, 0); 4616 + if (err < 0) 4617 + return err; 4618 4619 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 4620 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; ··· 6955 6956 igb_rx_checksum(rx_ring, rx_desc, skb); 6957 6958 + if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) && 6959 + !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) 6960 + igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb); 6961 6962 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && 6963 igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
+7 -7
drivers/net/ethernet/intel/igb/igb_ptp.c
··· 427 void igb_ptp_rx_hang(struct igb_adapter *adapter) 428 { 429 struct e1000_hw *hw = &adapter->hw; 430 - struct igb_ring *rx_ring; 431 u32 tsyncrxctl = rd32(E1000_TSYNCRXCTL); 432 unsigned long rx_event; 433 - int n; 434 435 if (hw->mac.type != e1000_82576) 436 return; ··· 443 444 /* Determine the most recent watchdog or rx_timestamp event */ 445 rx_event = adapter->last_rx_ptp_check; 446 - for (n = 0; n < adapter->num_rx_queues; n++) { 447 - rx_ring = adapter->rx_ring[n]; 448 - if (time_after(rx_ring->last_rx_timestamp, rx_event)) 449 - rx_event = rx_ring->last_rx_timestamp; 450 - } 451 452 /* Only need to read the high RXSTMP register to clear the lock */ 453 if (time_is_before_jiffies(rx_event + 5 * HZ)) { ··· 535 regval |= (u64)rd32(E1000_RXSTMPH) << 32; 536 537 igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); 538 } 539 540 /**
··· 427 void igb_ptp_rx_hang(struct igb_adapter *adapter) 428 { 429 struct e1000_hw *hw = &adapter->hw; 430 u32 tsyncrxctl = rd32(E1000_TSYNCRXCTL); 431 unsigned long rx_event; 432 433 if (hw->mac.type != e1000_82576) 434 return; ··· 445 446 /* Determine the most recent watchdog or rx_timestamp event */ 447 rx_event = adapter->last_rx_ptp_check; 448 + if (time_after(adapter->last_rx_timestamp, rx_event)) 449 + rx_event = adapter->last_rx_timestamp; 450 451 /* Only need to read the high RXSTMP register to clear the lock */ 452 if (time_is_before_jiffies(rx_event + 5 * HZ)) { ··· 540 regval |= (u64)rd32(E1000_RXSTMPH) << 32; 541 542 igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); 543 + 544 + /* Update the last_rx_timestamp timer in order to enable watchdog check 545 + * for error case of latched timestamp on a dropped packet. 546 + */ 547 + adapter->last_rx_timestamp = jiffies; 548 } 549 550 /**
+7 -9
drivers/net/ethernet/intel/igbvf/netdev.c
··· 1910 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) 1911 { 1912 struct e1000_adv_tx_context_desc *context_desc; 1913 - unsigned int i; 1914 - int err; 1915 struct igbvf_buffer *buffer_info; 1916 u32 info = 0, tu_cmd = 0; 1917 u32 mss_l4len_idx, l4len; 1918 *hdr_len = 0; 1919 1920 - if (skb_header_cloned(skb)) { 1921 - err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 1922 - if (err) { 1923 - dev_err(&adapter->pdev->dev, 1924 - "igbvf_tso returning an error\n"); 1925 - return err; 1926 - } 1927 } 1928 1929 l4len = tcp_hdrlen(skb);
··· 1910 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) 1911 { 1912 struct e1000_adv_tx_context_desc *context_desc; 1913 struct igbvf_buffer *buffer_info; 1914 u32 info = 0, tu_cmd = 0; 1915 u32 mss_l4len_idx, l4len; 1916 + unsigned int i; 1917 + int err; 1918 + 1919 *hdr_len = 0; 1920 1921 + err = skb_cow_head(skb, 0); 1922 + if (err < 0) { 1923 + dev_err(&adapter->pdev->dev, "igbvf_tso returning an error\n"); 1924 + return err; 1925 } 1926 1927 l4len = tcp_hdrlen(skb);
+4 -6
drivers/net/ethernet/intel/ixgb/ixgb_main.c
··· 1220 unsigned int i; 1221 u8 ipcss, ipcso, tucss, tucso, hdr_len; 1222 u16 ipcse, tucse, mss; 1223 - int err; 1224 1225 if (likely(skb_is_gso(skb))) { 1226 struct ixgb_buffer *buffer_info; 1227 struct iphdr *iph; 1228 1229 - if (skb_header_cloned(skb)) { 1230 - err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 1231 - if (err) 1232 - return err; 1233 - } 1234 1235 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1236 mss = skb_shinfo(skb)->gso_size;
··· 1220 unsigned int i; 1221 u8 ipcss, ipcso, tucss, tucso, hdr_len; 1222 u16 ipcse, tucse, mss; 1223 1224 if (likely(skb_is_gso(skb))) { 1225 struct ixgb_buffer *buffer_info; 1226 struct iphdr *iph; 1227 + int err; 1228 1229 + err = skb_cow_head(skb, 0); 1230 + if (err < 0) 1231 + return err; 1232 1233 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1234 mss = skb_shinfo(skb)->gso_size;
+1
drivers/net/ethernet/intel/ixgbe/ixgbe.h
··· 811 __IXGBE_DISABLED, 812 __IXGBE_REMOVING, 813 __IXGBE_SERVICE_SCHED, 814 __IXGBE_IN_SFP_INIT, 815 __IXGBE_PTP_RUNNING, 816 __IXGBE_PTP_TX_IN_PROGRESS,
··· 811 __IXGBE_DISABLED, 812 __IXGBE_REMOVING, 813 __IXGBE_SERVICE_SCHED, 814 + __IXGBE_SERVICE_INITED, 815 __IXGBE_IN_SFP_INIT, 816 __IXGBE_PTP_RUNNING, 817 __IXGBE_PTP_TX_IN_PROGRESS,
+20 -8
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 297 return; 298 hw->hw_addr = NULL; 299 e_dev_err("Adapter removed\n"); 300 - ixgbe_service_event_schedule(adapter); 301 } 302 303 void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg) ··· 6510 struct sk_buff *skb = first->skb; 6511 u32 vlan_macip_lens, type_tucmd; 6512 u32 mss_l4len_idx, l4len; 6513 6514 if (skb->ip_summed != CHECKSUM_PARTIAL) 6515 return 0; ··· 6518 if (!skb_is_gso(skb)) 6519 return 0; 6520 6521 - if (skb_header_cloned(skb)) { 6522 - int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 6523 - if (err) 6524 - return err; 6525 - } 6526 6527 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 6528 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; ··· 7077 IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT; 7078 if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) { 7079 struct vlan_ethhdr *vhdr; 7080 - if (skb_header_cloned(skb) && 7081 - pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 7082 goto out_drop; 7083 vhdr = (struct vlan_ethhdr *)skb->data; 7084 vhdr->h_vlan_TCI = htons(tx_flags >> ··· 8023 /* EEPROM */ 8024 memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops)); 8025 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 8026 /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */ 8027 if (!(eec & (1 << 8))) 8028 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic; ··· 8189 setup_timer(&adapter->service_timer, &ixgbe_service_timer, 8190 (unsigned long) adapter); 8191 8192 INIT_WORK(&adapter->service_task, ixgbe_service_task); 8193 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); 8194 8195 err = ixgbe_init_interrupt_scheme(adapter); ··· 8503 8504 skip_bad_vf_detection: 8505 #endif /* CONFIG_PCI_IOV */ 8506 rtnl_lock(); 8507 netif_device_detach(netdev); 8508
··· 297 return; 298 hw->hw_addr = NULL; 299 e_dev_err("Adapter removed\n"); 300 + if (test_bit(__IXGBE_SERVICE_INITED, &adapter->state)) 301 + ixgbe_service_event_schedule(adapter); 302 } 303 304 void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg) ··· 6509 struct sk_buff *skb = first->skb; 6510 u32 vlan_macip_lens, type_tucmd; 6511 u32 mss_l4len_idx, l4len; 6512 + int err; 6513 6514 if (skb->ip_summed != CHECKSUM_PARTIAL) 6515 return 0; ··· 6516 if (!skb_is_gso(skb)) 6517 return 0; 6518 6519 + err = skb_cow_head(skb, 0); 6520 + if (err < 0) 6521 + return err; 6522 6523 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 6524 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; ··· 7077 IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT; 7078 if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) { 7079 struct vlan_ethhdr *vhdr; 7080 + 7081 + if (skb_cow_head(skb, 0)) 7082 goto out_drop; 7083 vhdr = (struct vlan_ethhdr *)skb->data; 7084 vhdr->h_vlan_TCI = htons(tx_flags >> ··· 8023 /* EEPROM */ 8024 memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops)); 8025 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 8026 + if (ixgbe_removed(hw->hw_addr)) { 8027 + err = -EIO; 8028 + goto err_ioremap; 8029 + } 8030 /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */ 8031 if (!(eec & (1 << 8))) 8032 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic; ··· 8185 setup_timer(&adapter->service_timer, &ixgbe_service_timer, 8186 (unsigned long) adapter); 8187 8188 + if (ixgbe_removed(hw->hw_addr)) { 8189 + err = -EIO; 8190 + goto err_sw_init; 8191 + } 8192 INIT_WORK(&adapter->service_task, ixgbe_service_task); 8193 + set_bit(__IXGBE_SERVICE_INITED, &adapter->state); 8194 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); 8195 8196 err = ixgbe_init_interrupt_scheme(adapter); ··· 8494 8495 skip_bad_vf_detection: 8496 #endif /* CONFIG_PCI_IOV */ 8497 + if (!test_bit(__IXGBE_SERVICE_INITED, &adapter->state)) 8498 + return PCI_ERS_RESULT_DISCONNECT; 8499 + 8500 rtnl_lock(); 8501 netif_device_detach(netdev); 8502
+1
drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
··· 421 __IXGBEVF_DOWN, 422 __IXGBEVF_DISABLED, 423 __IXGBEVF_REMOVING, 424 }; 425 426 struct ixgbevf_cb {
··· 421 __IXGBEVF_DOWN, 422 __IXGBEVF_DISABLED, 423 __IXGBEVF_REMOVING, 424 + __IXGBEVF_WORK_INIT, 425 }; 426 427 struct ixgbevf_cb {
+14 -6
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
··· 107 return; 108 hw->hw_addr = NULL; 109 dev_err(&adapter->pdev->dev, "Adapter removed\n"); 110 - schedule_work(&adapter->watchdog_task); 111 } 112 113 static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg) ··· 2839 struct sk_buff *skb = first->skb; 2840 u32 vlan_macip_lens, type_tucmd; 2841 u32 mss_l4len_idx, l4len; 2842 2843 if (skb->ip_summed != CHECKSUM_PARTIAL) 2844 return 0; ··· 2847 if (!skb_is_gso(skb)) 2848 return 0; 2849 2850 - if (skb_header_cloned(skb)) { 2851 - int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2852 - if (err) 2853 - return err; 2854 - } 2855 2856 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 2857 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; ··· 3573 adapter->watchdog_timer.function = ixgbevf_watchdog; 3574 adapter->watchdog_timer.data = (unsigned long)adapter; 3575 3576 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task); 3577 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task); 3578 3579 err = ixgbevf_init_interrupt_scheme(adapter); 3580 if (err) ··· 3671 { 3672 struct net_device *netdev = pci_get_drvdata(pdev); 3673 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3674 3675 rtnl_lock(); 3676 netif_device_detach(netdev);
··· 107 return; 108 hw->hw_addr = NULL; 109 dev_err(&adapter->pdev->dev, "Adapter removed\n"); 110 + if (test_bit(__IXGBEVF_WORK_INIT, &adapter->state)) 111 + schedule_work(&adapter->watchdog_task); 112 } 113 114 static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg) ··· 2838 struct sk_buff *skb = first->skb; 2839 u32 vlan_macip_lens, type_tucmd; 2840 u32 mss_l4len_idx, l4len; 2841 + int err; 2842 2843 if (skb->ip_summed != CHECKSUM_PARTIAL) 2844 return 0; ··· 2845 if (!skb_is_gso(skb)) 2846 return 0; 2847 2848 + err = skb_cow_head(skb, 0); 2849 + if (err < 0) 2850 + return err; 2851 2852 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 2853 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; ··· 3573 adapter->watchdog_timer.function = ixgbevf_watchdog; 3574 adapter->watchdog_timer.data = (unsigned long)adapter; 3575 3576 + if (IXGBE_REMOVED(hw->hw_addr)) { 3577 + err = -EIO; 3578 + goto err_sw_init; 3579 + } 3580 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task); 3581 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task); 3582 + set_bit(__IXGBEVF_WORK_INIT, &adapter->state); 3583 3584 err = ixgbevf_init_interrupt_scheme(adapter); 3585 if (err) ··· 3666 { 3667 struct net_device *netdev = pci_get_drvdata(pdev); 3668 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3669 + 3670 + if (!test_bit(__IXGBEVF_WORK_INIT, &adapter->state)) 3671 + return PCI_ERS_RESULT_DISCONNECT; 3672 3673 rtnl_lock(); 3674 netif_device_detach(netdev);
+8 -9
drivers/net/ethernet/ti/cpsw.c
··· 687 688 cpsw_dual_emac_src_port_detect(status, priv, ndev, skb); 689 690 - if (unlikely(status < 0)) { 691 /* the interface is going down, skbs are purged */ 692 dev_kfree_skb_any(skb); 693 return; ··· 1201 for_each_slave(priv, cpsw_slave_open, priv); 1202 1203 /* Add default VLAN */ 1204 - if (!priv->data.dual_emac) 1205 - cpsw_add_default_vlan(priv); 1206 1207 if (!cpsw_common_res_usage_state(priv)) { 1208 /* setup tx dma to fixed prio and zero offset */ ··· 1252 cpsw_set_coalesce(ndev, &coal); 1253 } 1254 1255 prim_cpsw = cpsw_get_slave_priv(priv, 0); 1256 if (prim_cpsw->irq_enabled == false) { 1257 if ((priv == prim_cpsw) || !netif_running(prim_cpsw->ndev)) { ··· 1265 cpsw_enable_irq(prim_cpsw); 1266 } 1267 } 1268 - 1269 - napi_enable(&priv->napi); 1270 - cpdma_ctlr_start(priv->dma); 1271 - cpsw_intr_enable(priv); 1272 - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); 1273 - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); 1274 1275 if (priv->data.dual_emac) 1276 priv->slaves[priv->emac_port].open_stat = true;
··· 687 688 cpsw_dual_emac_src_port_detect(status, priv, ndev, skb); 689 690 + if (unlikely(status < 0) || unlikely(!netif_running(ndev))) { 691 /* the interface is going down, skbs are purged */ 692 dev_kfree_skb_any(skb); 693 return; ··· 1201 for_each_slave(priv, cpsw_slave_open, priv); 1202 1203 /* Add default VLAN */ 1204 + cpsw_add_default_vlan(priv); 1205 1206 if (!cpsw_common_res_usage_state(priv)) { 1207 /* setup tx dma to fixed prio and zero offset */ ··· 1253 cpsw_set_coalesce(ndev, &coal); 1254 } 1255 1256 + napi_enable(&priv->napi); 1257 + cpdma_ctlr_start(priv->dma); 1258 + cpsw_intr_enable(priv); 1259 + cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); 1260 + cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); 1261 + 1262 prim_cpsw = cpsw_get_slave_priv(priv, 0); 1263 if (prim_cpsw->irq_enabled == false) { 1264 if ((priv == prim_cpsw) || !netif_running(prim_cpsw->ndev)) { ··· 1260 cpsw_enable_irq(prim_cpsw); 1261 } 1262 } 1263 1264 if (priv->data.dual_emac) 1265 priv->slaves[priv->emac_port].open_stat = true;
+1
drivers/net/hyperv/hyperv_net.h
··· 747 #define NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4 0 748 #define NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6 1 749 750 /* 751 * New offload OIDs for NDIS 6 752 */
··· 747 #define NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4 0 748 #define NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6 1 749 750 + #define VERSION_4_OFFLOAD_SIZE 22 751 /* 752 * New offload OIDs for NDIS 6 753 */
+1 -1
drivers/net/hyperv/netvsc.c
··· 344 memset(init_packet, 0, sizeof(struct nvsp_message)); 345 346 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4) 347 - ndis_version = 0x00050001; 348 else 349 ndis_version = 0x0006001e; 350
··· 344 memset(init_packet, 0, sizeof(struct nvsp_message)); 345 346 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4) 347 + ndis_version = 0x00060001; 348 else 349 ndis_version = 0x0006001e; 350
+28 -2
drivers/net/hyperv/netvsc_drv.c
··· 319 packet = kzalloc(sizeof(struct hv_netvsc_packet) + 320 (num_data_pgs * sizeof(struct hv_page_buffer)) + 321 sizeof(struct rndis_message) + 322 - NDIS_VLAN_PPI_SIZE, GFP_ATOMIC); 323 if (!packet) { 324 /* out of memory, drop packet */ 325 netdev_err(net, "unable to allocate hv_netvsc_packet\n"); ··· 398 csum_info->transmit.tcp_checksum = 1; 399 csum_info->transmit.tcp_header_offset = hdr_offset; 400 } else if (net_trans_info & INFO_UDP) { 401 - csum_info->transmit.udp_checksum = 1; 402 } 403 goto do_send; 404 ··· 461 462 ret = netvsc_send(net_device_ctx->device_ctx, packet); 463 464 if (ret == 0) { 465 net->stats.tx_bytes += skb->len; 466 net->stats.tx_packets++;
··· 319 packet = kzalloc(sizeof(struct hv_netvsc_packet) + 320 (num_data_pgs * sizeof(struct hv_page_buffer)) + 321 sizeof(struct rndis_message) + 322 + NDIS_VLAN_PPI_SIZE + 323 + NDIS_CSUM_PPI_SIZE + 324 + NDIS_LSO_PPI_SIZE, GFP_ATOMIC); 325 if (!packet) { 326 /* out of memory, drop packet */ 327 netdev_err(net, "unable to allocate hv_netvsc_packet\n"); ··· 396 csum_info->transmit.tcp_checksum = 1; 397 csum_info->transmit.tcp_header_offset = hdr_offset; 398 } else if (net_trans_info & INFO_UDP) { 399 + /* UDP checksum offload is not supported on ws2008r2. 400 + * Furthermore, on ws2012 and ws2012r2, there are some 401 + * issues with udp checksum offload from Linux guests. 402 + * (these are host issues). 403 + * For now compute the checksum here. 404 + */ 405 + struct udphdr *uh; 406 + u16 udp_len; 407 + 408 + ret = skb_cow_head(skb, 0); 409 + if (ret) 410 + goto drop; 411 + 412 + uh = udp_hdr(skb); 413 + udp_len = ntohs(uh->len); 414 + uh->check = 0; 415 + uh->check = csum_tcpudp_magic(ip_hdr(skb)->saddr, 416 + ip_hdr(skb)->daddr, 417 + udp_len, IPPROTO_UDP, 418 + csum_partial(uh, udp_len, 0)); 419 + if (uh->check == 0) 420 + uh->check = CSUM_MANGLED_0; 421 + 422 + csum_info->transmit.udp_checksum = 0; 423 } 424 goto do_send; 425 ··· 436 437 ret = netvsc_send(net_device_ctx->device_ctx, packet); 438 439 + drop: 440 if (ret == 0) { 441 net->stats.tx_bytes += skb->len; 442 net->stats.tx_packets++;
+11 -1
drivers/net/hyperv/rndis_filter.c
··· 641 struct rndis_set_complete *set_complete; 642 u32 extlen = sizeof(struct ndis_offload_params); 643 int ret, t; 644 645 request = get_rndis_request(rdev, RNDIS_MSG_SET, 646 RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen); ··· 684 } else { 685 set_complete = &request->response_msg.msg.set_complete; 686 if (set_complete->status != RNDIS_STATUS_SUCCESS) { 687 - netdev_err(ndev, "Fail to set MAC on host side:0x%x\n", 688 set_complete->status); 689 ret = -EINVAL; 690 }
··· 641 struct rndis_set_complete *set_complete; 642 u32 extlen = sizeof(struct ndis_offload_params); 643 int ret, t; 644 + u32 vsp_version = nvdev->nvsp_version; 645 + 646 + if (vsp_version <= NVSP_PROTOCOL_VERSION_4) { 647 + extlen = VERSION_4_OFFLOAD_SIZE; 648 + /* On NVSP_PROTOCOL_VERSION_4 and below, we do not support 649 + * UDP checksum offload. 650 + */ 651 + req_offloads->udp_ip_v4_csum = 0; 652 + req_offloads->udp_ip_v6_csum = 0; 653 + } 654 655 request = get_rndis_request(rdev, RNDIS_MSG_SET, 656 RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen); ··· 674 } else { 675 set_complete = &request->response_msg.msg.set_complete; 676 if (set_complete->status != RNDIS_STATUS_SUCCESS) { 677 + netdev_err(ndev, "Fail to set offload on host side:0x%x\n", 678 set_complete->status); 679 ret = -EINVAL; 680 }
+1 -5
drivers/net/phy/phy.c
··· 756 netif_carrier_on(phydev->attached_dev); 757 phydev->adjust_link(phydev->attached_dev); 758 759 - } else if (0 == phydev->link_timeout--) { 760 needs_aneg = 1; 761 - /* If we have the magic_aneg bit, we try again */ 762 - if (phydev->drv->flags & PHY_HAS_MAGICANEG) 763 - break; 764 - } 765 break; 766 case PHY_NOLINK: 767 err = phy_read_status(phydev);
··· 756 netif_carrier_on(phydev->attached_dev); 757 phydev->adjust_link(phydev->attached_dev); 758 759 + } else if (0 == phydev->link_timeout--) 760 needs_aneg = 1; 761 break; 762 case PHY_NOLINK: 763 err = phy_read_status(phydev);
+48
drivers/net/usb/r8152.c
··· 929 struct r8152 *tp = netdev_priv(netdev); 930 int ret; 931 932 if (phy_id != R8152_PHY_ID) 933 return -EINVAL; 934 ··· 951 void write_mii_word(struct net_device *netdev, int phy_id, int reg, int val) 952 { 953 struct r8152 *tp = netdev_priv(netdev); 954 955 if (phy_id != R8152_PHY_ID) 956 return; ··· 1968 1969 static int rtl8152_enable(struct r8152 *tp) 1970 { 1971 set_tx_qlen(tp); 1972 rtl_set_eee_plus(tp); 1973 ··· 2003 2004 static int rtl8153_enable(struct r8152 *tp) 2005 { 2006 set_tx_qlen(tp); 2007 rtl_set_eee_plus(tp); 2008 r8153_set_rx_agg(tp); ··· 2017 { 2018 u32 ocp_data; 2019 int i; 2020 2021 ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); 2022 ocp_data &= ~RCR_ACPT_ALL; ··· 2248 { 2249 u32 ocp_data; 2250 int i; 2251 2252 ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); 2253 ocp_data &= ~RCR_ACPT_ALL; ··· 2480 u32 ocp_data; 2481 int i; 2482 2483 rxdy_gated_en(tp, true); 2484 r8153_teredo_off(tp); 2485 ··· 2710 2711 static void rtl8152_down(struct r8152 *tp) 2712 { 2713 r8152_power_cut_en(tp, false); 2714 r8152b_disable_aldps(tp); 2715 r8152b_enter_oob(tp); ··· 2723 2724 static void rtl8153_down(struct r8152 *tp) 2725 { 2726 r8153_u1u2en(tp, false); 2727 r8153_power_cut_en(tp, false); 2728 r8153_disable_aldps(tp); ··· 2937 { 2938 u32 ocp_data; 2939 2940 if (tp->version == RTL_VER_01) { 2941 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE); 2942 ocp_data &= ~LED_MODE_MASK; ··· 2974 { 2975 u32 ocp_data; 2976 int i; 2977 2978 r8153_u1u2en(tp, false); 2979 ··· 3252 struct mii_ioctl_data *data = if_mii(rq); 3253 int res; 3254 3255 res = usb_autopm_get_interface(tp->intf); 3256 if (res < 0) 3257 goto out; ··· 3335 3336 static void rtl8152_unload(struct r8152 *tp) 3337 { 3338 if (tp->version != RTL_VER_01) 3339 r8152_power_cut_en(tp, true); 3340 } 3341 3342 static void rtl8153_unload(struct r8152 *tp) 3343 { 3344 r8153_power_cut_en(tp, true); 3345 } 3346
··· 929 struct r8152 *tp = netdev_priv(netdev); 930 int ret; 931 932 + if (test_bit(RTL8152_UNPLUG, &tp->flags)) 933 + return -ENODEV; 934 + 935 if (phy_id != R8152_PHY_ID) 936 return -EINVAL; 937 ··· 948 void write_mii_word(struct net_device *netdev, int phy_id, int reg, int val) 949 { 950 struct r8152 *tp = netdev_priv(netdev); 951 + 952 + if (test_bit(RTL8152_UNPLUG, &tp->flags)) 953 + return; 954 955 if (phy_id != R8152_PHY_ID) 956 return; ··· 1962 1963 static int rtl8152_enable(struct r8152 *tp) 1964 { 1965 + if (test_bit(RTL8152_UNPLUG, &tp->flags)) 1966 + return -ENODEV; 1967 + 1968 set_tx_qlen(tp); 1969 rtl_set_eee_plus(tp); 1970 ··· 1994 1995 static int rtl8153_enable(struct r8152 *tp) 1996 { 1997 + if (test_bit(RTL8152_UNPLUG, &tp->flags)) 1998 + return -ENODEV; 1999 + 2000 set_tx_qlen(tp); 2001 rtl_set_eee_plus(tp); 2002 r8153_set_rx_agg(tp); ··· 2005 { 2006 u32 ocp_data; 2007 int i; 2008 + 2009 + if (test_bit(RTL8152_UNPLUG, &tp->flags)) { 2010 + rtl_drop_queued_tx(tp); 2011 + return; 2012 + } 2013 2014 ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); 2015 ocp_data &= ~RCR_ACPT_ALL; ··· 2231 { 2232 u32 ocp_data; 2233 int i; 2234 + 2235 + if (test_bit(RTL8152_UNPLUG, &tp->flags)) 2236 + return; 2237 2238 ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); 2239 ocp_data &= ~RCR_ACPT_ALL; ··· 2460 u32 ocp_data; 2461 int i; 2462 2463 + if (test_bit(RTL8152_UNPLUG, &tp->flags)) 2464 + return; 2465 + 2466 rxdy_gated_en(tp, true); 2467 r8153_teredo_off(tp); 2468 ··· 2687 2688 static void rtl8152_down(struct r8152 *tp) 2689 { 2690 + if (test_bit(RTL8152_UNPLUG, &tp->flags)) { 2691 + rtl_drop_queued_tx(tp); 2692 + return; 2693 + } 2694 + 2695 r8152_power_cut_en(tp, false); 2696 r8152b_disable_aldps(tp); 2697 r8152b_enter_oob(tp); ··· 2695 2696 static void rtl8153_down(struct r8152 *tp) 2697 { 2698 + if (test_bit(RTL8152_UNPLUG, &tp->flags)) { 2699 + rtl_drop_queued_tx(tp); 2700 + return; 2701 + } 2702 + 2703 r8153_u1u2en(tp, false); 2704 r8153_power_cut_en(tp, false); 2705 r8153_disable_aldps(tp); ··· 2904 { 2905 u32 ocp_data; 2906 2907 + if (test_bit(RTL8152_UNPLUG, &tp->flags)) 2908 + return; 2909 + 2910 if (tp->version == RTL_VER_01) { 2911 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE); 2912 ocp_data &= ~LED_MODE_MASK; ··· 2938 { 2939 u32 ocp_data; 2940 int i; 2941 + 2942 + if (test_bit(RTL8152_UNPLUG, &tp->flags)) 2943 + return; 2944 2945 r8153_u1u2en(tp, false); 2946 ··· 3213 struct mii_ioctl_data *data = if_mii(rq); 3214 int res; 3215 3216 + if (test_bit(RTL8152_UNPLUG, &tp->flags)) 3217 + return -ENODEV; 3218 + 3219 res = usb_autopm_get_interface(tp->intf); 3220 if (res < 0) 3221 goto out; ··· 3293 3294 static void rtl8152_unload(struct r8152 *tp) 3295 { 3296 + if (test_bit(RTL8152_UNPLUG, &tp->flags)) 3297 + return; 3298 + 3299 if (tp->version != RTL_VER_01) 3300 r8152_power_cut_en(tp, true); 3301 } 3302 3303 static void rtl8153_unload(struct r8152 *tp) 3304 { 3305 + if (test_bit(RTL8152_UNPLUG, &tp->flags)) 3306 + return; 3307 + 3308 r8153_power_cut_en(tp, true); 3309 } 3310
+1 -3
drivers/net/wireless/ath/ath9k/ar5008_phy.c
··· 1004 case ATH9K_ANI_FIRSTEP_LEVEL:{ 1005 u32 level = param; 1006 1007 - value = level * 2; 1008 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG, 1009 AR_PHY_FIND_SIG_FIRSTEP, value); 1010 - REG_RMW_FIELD(ah, AR_PHY_FIND_SIG_LOW, 1011 - AR_PHY_FIND_SIG_FIRSTEP_LOW, value); 1012 1013 if (level != aniState->firstepLevel) { 1014 ath_dbg(common, ANI,
··· 1004 case ATH9K_ANI_FIRSTEP_LEVEL:{ 1005 u32 level = param; 1006 1007 + value = level; 1008 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG, 1009 AR_PHY_FIND_SIG_FIRSTEP, value); 1010 1011 if (level != aniState->firstepLevel) { 1012 ath_dbg(common, ANI,
+3 -4
drivers/net/wireless/ath/ath9k/beacon.c
··· 312 313 void ath9k_csa_update(struct ath_softc *sc) 314 { 315 - ieee80211_iterate_active_interfaces(sc->hw, 316 - IEEE80211_IFACE_ITER_NORMAL, 317 - ath9k_csa_update_vif, 318 - sc); 319 } 320 321 void ath9k_beacon_tasklet(unsigned long data)
··· 312 313 void ath9k_csa_update(struct ath_softc *sc) 314 { 315 + ieee80211_iterate_active_interfaces_atomic(sc->hw, 316 + IEEE80211_IFACE_ITER_NORMAL, 317 + ath9k_csa_update_vif, sc); 318 } 319 320 void ath9k_beacon_tasklet(unsigned long data)
+4 -1
drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
··· 471 if (!txok || !vif || !txs) 472 goto send_mac80211; 473 474 - if (txs->ts_flags & ATH9K_HTC_TXSTAT_ACK) 475 tx_info->flags |= IEEE80211_TX_STAT_ACK; 476 477 if (txs->ts_flags & ATH9K_HTC_TXSTAT_FILT) 478 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
··· 471 if (!txok || !vif || !txs) 472 goto send_mac80211; 473 474 + if (txs->ts_flags & ATH9K_HTC_TXSTAT_ACK) { 475 tx_info->flags |= IEEE80211_TX_STAT_ACK; 476 + if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) 477 + tx_info->flags |= IEEE80211_TX_STAT_AMPDU; 478 + } 479 480 if (txs->ts_flags & ATH9K_HTC_TXSTAT_FILT) 481 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
+2
drivers/net/wireless/ath/ath9k/init.c
··· 670 .num_different_channels = 1, 671 .beacon_int_infra_match = true, 672 }, 673 { 674 .limits = if_dfs_limits, 675 .n_limits = ARRAY_SIZE(if_dfs_limits), ··· 680 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | 681 BIT(NL80211_CHAN_WIDTH_20), 682 } 683 }; 684 685 static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
··· 670 .num_different_channels = 1, 671 .beacon_int_infra_match = true, 672 }, 673 + #ifdef CONFIG_ATH9K_DFS_CERTIFIED 674 { 675 .limits = if_dfs_limits, 676 .n_limits = ARRAY_SIZE(if_dfs_limits), ··· 679 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | 680 BIT(NL80211_CHAN_WIDTH_20), 681 } 682 + #endif 683 }; 684 685 static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
+7 -7
drivers/net/wireless/b43/phy_n.c
··· 5176 int ch = new_channel->hw_value; 5177 5178 u16 old_band_5ghz; 5179 - u32 tmp32; 5180 5181 old_band_5ghz = 5182 b43_phy_read(dev, B43_NPHY_BANDCTL) & B43_NPHY_BANDCTL_5GHZ; 5183 if (new_channel->band == IEEE80211_BAND_5GHZ && !old_band_5ghz) { 5184 - tmp32 = b43_read32(dev, B43_MMIO_PSM_PHY_HDR); 5185 - b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32 | 4); 5186 b43_phy_set(dev, B43_PHY_B_BBCFG, 0xC000); 5187 - b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32); 5188 b43_phy_set(dev, B43_NPHY_BANDCTL, B43_NPHY_BANDCTL_5GHZ); 5189 } else if (new_channel->band == IEEE80211_BAND_2GHZ && old_band_5ghz) { 5190 b43_phy_mask(dev, B43_NPHY_BANDCTL, ~B43_NPHY_BANDCTL_5GHZ); 5191 - tmp32 = b43_read32(dev, B43_MMIO_PSM_PHY_HDR); 5192 - b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32 | 4); 5193 b43_phy_mask(dev, B43_PHY_B_BBCFG, 0x3FFF); 5194 - b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32); 5195 } 5196 5197 b43_chantab_phy_upload(dev, e);
··· 5176 int ch = new_channel->hw_value; 5177 5178 u16 old_band_5ghz; 5179 + u16 tmp16; 5180 5181 old_band_5ghz = 5182 b43_phy_read(dev, B43_NPHY_BANDCTL) & B43_NPHY_BANDCTL_5GHZ; 5183 if (new_channel->band == IEEE80211_BAND_5GHZ && !old_band_5ghz) { 5184 + tmp16 = b43_read16(dev, B43_MMIO_PSM_PHY_HDR); 5185 + b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16 | 4); 5186 b43_phy_set(dev, B43_PHY_B_BBCFG, 0xC000); 5187 + b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16); 5188 b43_phy_set(dev, B43_NPHY_BANDCTL, B43_NPHY_BANDCTL_5GHZ); 5189 } else if (new_channel->band == IEEE80211_BAND_2GHZ && old_band_5ghz) { 5190 b43_phy_mask(dev, B43_NPHY_BANDCTL, ~B43_NPHY_BANDCTL_5GHZ); 5191 + tmp16 = b43_read16(dev, B43_MMIO_PSM_PHY_HDR); 5192 + b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16 | 4); 5193 b43_phy_mask(dev, B43_PHY_B_BBCFG, 0x3FFF); 5194 + b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16); 5195 } 5196 5197 b43_chantab_phy_upload(dev, e);
+4 -2
drivers/net/wireless/rsi/rsi_91x_core.c
··· 102 } 103 104 get_queue_num: 105 - q_num = 0; 106 recontend_queue = false; 107 108 q_num = rsi_determine_min_weight_queue(common); 109 q_len = skb_queue_len(&common->tx_queue[ii]); 110 ii = q_num; 111 ··· 118 } 119 } 120 121 - common->tx_qinfo[q_num].pkt_contended = 0; 122 /* Adjust the back off values for all queues again */ 123 recontend_queue = rsi_recalculate_weights(common); 124
··· 102 } 103 104 get_queue_num: 105 recontend_queue = false; 106 107 q_num = rsi_determine_min_weight_queue(common); 108 + 109 q_len = skb_queue_len(&common->tx_queue[ii]); 110 ii = q_num; 111 ··· 118 } 119 } 120 121 + if (q_num < NUM_EDCA_QUEUES) 122 + common->tx_qinfo[q_num].pkt_contended = 0; 123 + 124 /* Adjust the back off values for all queues again */ 125 recontend_queue = rsi_recalculate_weights(common); 126
+16 -19
drivers/net/wireless/rsi/rsi_91x_debugfs.c
··· 289 const struct rsi_dbg_files *files; 290 291 dev_dbgfs = kzalloc(sizeof(*dev_dbgfs), GFP_KERNEL); 292 adapter->dfsentry = dev_dbgfs; 293 294 snprintf(devdir, sizeof(devdir), "%s", 295 wiphy_name(adapter->hw->wiphy)); 296 dev_dbgfs->subdir = debugfs_create_dir(devdir, NULL); 297 298 - if (IS_ERR(dev_dbgfs->subdir)) { 299 - if (dev_dbgfs->subdir == ERR_PTR(-ENODEV)) 300 - rsi_dbg(ERR_ZONE, 301 - "%s:Debugfs has not been mounted\n", __func__); 302 - else 303 - rsi_dbg(ERR_ZONE, "debugfs:%s not created\n", devdir); 304 - 305 - adapter->dfsentry = NULL; 306 kfree(dev_dbgfs); 307 - return (int)PTR_ERR(dev_dbgfs->subdir); 308 - } else { 309 - for (ii = 0; ii < adapter->num_debugfs_entries; ii++) { 310 - files = &dev_debugfs_files[ii]; 311 - dev_dbgfs->rsi_files[ii] = 312 - debugfs_create_file(files->name, 313 - files->perms, 314 - dev_dbgfs->subdir, 315 - common, 316 - &files->fops); 317 - } 318 } 319 return 0; 320 }
··· 289 const struct rsi_dbg_files *files; 290 291 dev_dbgfs = kzalloc(sizeof(*dev_dbgfs), GFP_KERNEL); 292 + if (!dev_dbgfs) 293 + return -ENOMEM; 294 + 295 adapter->dfsentry = dev_dbgfs; 296 297 snprintf(devdir, sizeof(devdir), "%s", 298 wiphy_name(adapter->hw->wiphy)); 299 + 300 dev_dbgfs->subdir = debugfs_create_dir(devdir, NULL); 301 302 + if (!dev_dbgfs->subdir) { 303 kfree(dev_dbgfs); 304 + return -ENOMEM; 305 + } 306 + 307 + for (ii = 0; ii < adapter->num_debugfs_entries; ii++) { 308 + files = &dev_debugfs_files[ii]; 309 + dev_dbgfs->rsi_files[ii] = 310 + debugfs_create_file(files->name, 311 + files->perms, 312 + dev_dbgfs->subdir, 313 + common, 314 + &files->fops); 315 } 316 return 0; 317 }
+5 -3
drivers/net/wireless/rsi/rsi_91x_mgmt.c
··· 738 * 739 * Return: 0 on success, corresponding error code on failure. 740 */ 741 - static u8 rsi_load_bootup_params(struct rsi_common *common) 742 { 743 struct sk_buff *skb; 744 struct rsi_boot_params *boot_params; ··· 1272 { 1273 s32 msg_len = (le16_to_cpu(*(__le16 *)&msg[0]) & 0x0fff); 1274 u16 msg_type = (msg[2]); 1275 1276 rsi_dbg(FSM_ZONE, "%s: Msg Len: %d, Msg Type: %4x\n", 1277 __func__, msg_len, msg_type); ··· 1285 if (common->fsm_state == FSM_CARD_NOT_READY) { 1286 rsi_set_default_parameters(common); 1287 1288 - if (rsi_load_bootup_params(common)) 1289 - return -ENOMEM; 1290 else 1291 common->fsm_state = FSM_BOOT_PARAMS_SENT; 1292 } else {
··· 738 * 739 * Return: 0 on success, corresponding error code on failure. 740 */ 741 + static int rsi_load_bootup_params(struct rsi_common *common) 742 { 743 struct sk_buff *skb; 744 struct rsi_boot_params *boot_params; ··· 1272 { 1273 s32 msg_len = (le16_to_cpu(*(__le16 *)&msg[0]) & 0x0fff); 1274 u16 msg_type = (msg[2]); 1275 + int ret; 1276 1277 rsi_dbg(FSM_ZONE, "%s: Msg Len: %d, Msg Type: %4x\n", 1278 __func__, msg_len, msg_type); ··· 1284 if (common->fsm_state == FSM_CARD_NOT_READY) { 1285 rsi_set_default_parameters(common); 1286 1287 + ret = rsi_load_bootup_params(common); 1288 + if (ret) 1289 + return ret; 1290 else 1291 common->fsm_state = FSM_BOOT_PARAMS_SENT; 1292 } else {
+3 -2
drivers/net/wireless/rsi/rsi_91x_sdio.c
··· 756 static void rsi_disconnect(struct sdio_func *pfunction) 757 { 758 struct rsi_hw *adapter = sdio_get_drvdata(pfunction); 759 - struct rsi_91x_sdiodev *dev = 760 - (struct rsi_91x_sdiodev *)adapter->rsi_dev; 761 762 if (!adapter) 763 return; 764 765 dev->write_fail = 2; 766 rsi_mac80211_detach(adapter);
··· 756 static void rsi_disconnect(struct sdio_func *pfunction) 757 { 758 struct rsi_hw *adapter = sdio_get_drvdata(pfunction); 759 + struct rsi_91x_sdiodev *dev; 760 761 if (!adapter) 762 return; 763 + 764 + dev = (struct rsi_91x_sdiodev *)adapter->rsi_dev; 765 766 dev->write_fail = 2; 767 rsi_mac80211_detach(adapter);
+2 -4
drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
··· 247 if (!common->rx_data_pkt) { 248 rsi_dbg(ERR_ZONE, "%s: Failed in memory allocation\n", 249 __func__); 250 - return -1; 251 } 252 253 status = rsi_sdio_host_intf_read_pkt(adapter, ··· 260 } 261 262 status = rsi_read_pkt(common, rcv_pkt_len); 263 - kfree(common->rx_data_pkt); 264 - return status; 265 266 fail: 267 kfree(common->rx_data_pkt); 268 - return -1; 269 } 270 271 /**
··· 247 if (!common->rx_data_pkt) { 248 rsi_dbg(ERR_ZONE, "%s: Failed in memory allocation\n", 249 __func__); 250 + return -ENOMEM; 251 } 252 253 status = rsi_sdio_host_intf_read_pkt(adapter, ··· 260 } 261 262 status = rsi_read_pkt(common, rcv_pkt_len); 263 264 fail: 265 kfree(common->rx_data_pkt); 266 + return status; 267 } 268 269 /**
+19 -7
drivers/net/wireless/rsi/rsi_91x_usb.c
··· 154 u16 *value, 155 u16 len) 156 { 157 - u8 temp_buf[4]; 158 - int status = 0; 159 160 status = usb_control_msg(usbdev, 161 usb_rcvctrlpipe(usbdev, 0), 162 USB_VENDOR_REGISTER_READ, 163 USB_TYPE_VENDOR, 164 ((reg & 0xffff0000) >> 16), (reg & 0xffff), 165 - (void *)temp_buf, 166 len, 167 HZ * 5); 168 169 - *value = (temp_buf[0] | (temp_buf[1] << 8)); 170 if (status < 0) { 171 rsi_dbg(ERR_ZONE, 172 "%s: Reg read failed with error code :%d\n", 173 __func__, status); 174 } 175 return status; 176 } 177 ··· 196 u16 value, 197 u16 len) 198 { 199 - u8 usb_reg_buf[4]; 200 - int status = 0; 201 202 usb_reg_buf[0] = (value & 0x00ff); 203 usb_reg_buf[1] = (value & 0xff00) >> 8; ··· 222 "%s: Reg write failed with error code :%d\n", 223 __func__, status); 224 } 225 return status; 226 } 227 ··· 298 return -ENOMEM; 299 300 while (count) { 301 - transfer = min_t(int, count, 4096); 302 memcpy(buf, data, transfer); 303 status = usb_control_msg(dev->usbdev, 304 usb_sndctrlpipe(dev->usbdev, 0),
··· 154 u16 *value, 155 u16 len) 156 { 157 + u8 *buf; 158 + int status = -ENOMEM; 159 + 160 + buf = kmalloc(0x04, GFP_KERNEL); 161 + if (!buf) 162 + return status; 163 164 status = usb_control_msg(usbdev, 165 usb_rcvctrlpipe(usbdev, 0), 166 USB_VENDOR_REGISTER_READ, 167 USB_TYPE_VENDOR, 168 ((reg & 0xffff0000) >> 16), (reg & 0xffff), 169 + (void *)buf, 170 len, 171 HZ * 5); 172 173 + *value = (buf[0] | (buf[1] << 8)); 174 if (status < 0) { 175 rsi_dbg(ERR_ZONE, 176 "%s: Reg read failed with error code :%d\n", 177 __func__, status); 178 } 179 + kfree(buf); 180 + 181 return status; 182 } 183 ··· 190 u16 value, 191 u16 len) 192 { 193 + u8 *usb_reg_buf; 194 + int status = -ENOMEM; 195 + 196 + usb_reg_buf = kmalloc(0x04, GFP_KERNEL); 197 + if (!usb_reg_buf) 198 + return status; 199 200 usb_reg_buf[0] = (value & 0x00ff); 201 usb_reg_buf[1] = (value & 0xff00) >> 8; ··· 212 "%s: Reg write failed with error code :%d\n", 213 __func__, status); 214 } 215 + kfree(usb_reg_buf); 216 + 217 return status; 218 } 219 ··· 286 return -ENOMEM; 287 288 while (count) { 289 + transfer = (u8)(min_t(u32, count, 4096)); 290 memcpy(buf, data, transfer); 291 status = usb_control_msg(dev->usbdev, 292 usb_sndctrlpipe(dev->usbdev, 0),
-10
drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c
··· 625 else 626 btcoexist->binded = true; 627 628 - #if (defined(CONFIG_PCI_HCI)) 629 - btcoexist->chip_interface = BTC_INTF_PCI; 630 - #elif (defined(CONFIG_USB_HCI)) 631 - btcoexist->chip_interface = BTC_INTF_USB; 632 - #elif (defined(CONFIG_SDIO_HCI)) 633 - btcoexist->chip_interface = BTC_INTF_SDIO; 634 - #elif (defined(CONFIG_GSPI_HCI)) 635 - btcoexist->chip_interface = BTC_INTF_GSPI; 636 - #else 637 btcoexist->chip_interface = BTC_INTF_UNKNOWN; 638 - #endif 639 640 if (NULL == btcoexist->adapter) 641 btcoexist->adapter = adapter;
··· 625 else 626 btcoexist->binded = true; 627 628 btcoexist->chip_interface = BTC_INTF_UNKNOWN; 629 630 if (NULL == btcoexist->adapter) 631 btcoexist->adapter = adapter;
+1 -1
drivers/net/xen-netfront.c
··· 1291 for (i = 0; i < NET_TX_RING_SIZE; i++) { 1292 skb_entry_set_link(&np->tx_skbs[i], i+1); 1293 np->grant_tx_ref[i] = GRANT_INVALID_REF; 1294 } 1295 1296 /* Clear out rx_skbs */ 1297 for (i = 0; i < NET_RX_RING_SIZE; i++) { 1298 np->rx_skbs[i] = NULL; 1299 np->grant_rx_ref[i] = GRANT_INVALID_REF; 1300 - np->grant_tx_page[i] = NULL; 1301 } 1302 1303 /* A grant for every tx ring slot */
··· 1291 for (i = 0; i < NET_TX_RING_SIZE; i++) { 1292 skb_entry_set_link(&np->tx_skbs[i], i+1); 1293 np->grant_tx_ref[i] = GRANT_INVALID_REF; 1294 + np->grant_tx_page[i] = NULL; 1295 } 1296 1297 /* Clear out rx_skbs */ 1298 for (i = 0; i < NET_RX_RING_SIZE; i++) { 1299 np->rx_skbs[i] = NULL; 1300 np->grant_rx_ref[i] = GRANT_INVALID_REF; 1301 } 1302 1303 /* A grant for every tx ring slot */
+1 -1
drivers/scsi/iscsi_tcp.c
··· 125 return 0; 126 } 127 128 - static void iscsi_sw_tcp_data_ready(struct sock *sk, int flag) 129 { 130 struct iscsi_conn *conn; 131 struct iscsi_tcp_conn *tcp_conn;
··· 125 return 0; 126 } 127 128 + static void iscsi_sw_tcp_data_ready(struct sock *sk) 129 { 130 struct iscsi_conn *conn; 131 struct iscsi_tcp_conn *tcp_conn;
+1 -1
drivers/scsi/iscsi_tcp.h
··· 40 41 struct iscsi_sw_tcp_send out; 42 /* old values for socket callbacks */ 43 - void (*old_data_ready)(struct sock *, int); 44 void (*old_state_change)(struct sock *); 45 void (*old_write_space)(struct sock *); 46
··· 40 41 struct iscsi_sw_tcp_send out; 42 /* old values for socket callbacks */ 43 + void (*old_data_ready)(struct sock *); 44 void (*old_state_change)(struct sock *); 45 void (*old_write_space)(struct sock *); 46
+2 -2
drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c
··· 617 * socket call back in Linux 618 */ 619 static void 620 - ksocknal_data_ready (struct sock *sk, int n) 621 { 622 ksock_conn_t *conn; 623 ··· 628 conn = sk->sk_user_data; 629 if (conn == NULL) { /* raced with ksocknal_terminate_conn */ 630 LASSERT (sk->sk_data_ready != &ksocknal_data_ready); 631 - sk->sk_data_ready (sk, n); 632 } else 633 ksocknal_read_callback(conn); 634
··· 617 * socket call back in Linux 618 */ 619 static void 620 + ksocknal_data_ready (struct sock *sk) 621 { 622 ksock_conn_t *conn; 623 ··· 628 conn = sk->sk_user_data; 629 if (conn == NULL) { /* raced with ksocknal_terminate_conn */ 630 LASSERT (sk->sk_data_ready != &ksocknal_data_ready); 631 + sk->sk_data_ready (sk); 632 } else 633 ksocknal_read_callback(conn); 634
+1 -1
drivers/target/iscsi/iscsi_target_core.h
··· 557 struct completion rx_half_close_comp; 558 /* socket used by this connection */ 559 struct socket *sock; 560 - void (*orig_data_ready)(struct sock *, int); 561 void (*orig_state_change)(struct sock *); 562 #define LOGIN_FLAGS_READ_ACTIVE 1 563 #define LOGIN_FLAGS_CLOSED 2
··· 557 struct completion rx_half_close_comp; 558 /* socket used by this connection */ 559 struct socket *sock; 560 + void (*orig_data_ready)(struct sock *); 561 void (*orig_state_change)(struct sock *); 562 #define LOGIN_FLAGS_READ_ACTIVE 1 563 #define LOGIN_FLAGS_CLOSED 2
+1 -1
drivers/target/iscsi/iscsi_target_nego.c
··· 375 return 0; 376 } 377 378 - static void iscsi_target_sk_data_ready(struct sock *sk, int count) 379 { 380 struct iscsi_conn *conn = sk->sk_user_data; 381 bool rc;
··· 375 return 0; 376 } 377 378 + static void iscsi_target_sk_data_ready(struct sock *sk) 379 { 380 struct iscsi_conn *conn = sk->sk_user_data; 381 bool rc;
+1 -1
fs/dlm/lowcomms.c
··· 424 } 425 426 /* Data available on socket or listen socket received a connect */ 427 - static void lowcomms_data_ready(struct sock *sk, int count_unused) 428 { 429 struct connection *con = sock2con(sk); 430 if (con && !test_and_set_bit(CF_READ_PENDING, &con->flags))
··· 424 } 425 426 /* Data available on socket or listen socket received a connect */ 427 + static void lowcomms_data_ready(struct sock *sk) 428 { 429 struct connection *con = sock2con(sk); 430 if (con && !test_and_set_bit(CF_READ_PENDING, &con->flags))
+2 -2
fs/ncpfs/ncp_fs_sb.h
··· 109 110 spinlock_t requests_lock; /* Lock accesses to tx.requests, tx.creq and rcv.creq when STREAM mode */ 111 112 - void (*data_ready)(struct sock* sk, int len); 113 void (*error_report)(struct sock* sk); 114 void (*write_space)(struct sock* sk); /* STREAM mode only */ 115 struct { ··· 151 extern void ncpdgram_rcv_proc(struct work_struct *work); 152 extern void ncpdgram_timeout_proc(struct work_struct *work); 153 extern void ncpdgram_timeout_call(unsigned long server); 154 - extern void ncp_tcp_data_ready(struct sock* sk, int len); 155 extern void ncp_tcp_write_space(struct sock* sk); 156 extern void ncp_tcp_error_report(struct sock* sk); 157
··· 109 110 spinlock_t requests_lock; /* Lock accesses to tx.requests, tx.creq and rcv.creq when STREAM mode */ 111 112 + void (*data_ready)(struct sock* sk); 113 void (*error_report)(struct sock* sk); 114 void (*write_space)(struct sock* sk); /* STREAM mode only */ 115 struct { ··· 151 extern void ncpdgram_rcv_proc(struct work_struct *work); 152 extern void ncpdgram_timeout_proc(struct work_struct *work); 153 extern void ncpdgram_timeout_call(unsigned long server); 154 + extern void ncp_tcp_data_ready(struct sock* sk); 155 extern void ncp_tcp_write_space(struct sock* sk); 156 extern void ncp_tcp_error_report(struct sock* sk); 157
+2 -2
fs/ncpfs/sock.c
··· 97 kfree(req); 98 } 99 100 - void ncp_tcp_data_ready(struct sock *sk, int len) 101 { 102 struct ncp_server *server = sk->sk_user_data; 103 104 - server->data_ready(sk, len); 105 schedule_work(&server->rcv.tq); 106 } 107
··· 97 kfree(req); 98 } 99 100 + void ncp_tcp_data_ready(struct sock *sk) 101 { 102 struct ncp_server *server = sk->sk_user_data; 103 104 + server->data_ready(sk); 105 schedule_work(&server->rcv.tq); 106 } 107
+7 -8
fs/ocfs2/cluster/tcp.c
··· 137 static void o2net_sc_connect_completed(struct work_struct *work); 138 static void o2net_rx_until_empty(struct work_struct *work); 139 static void o2net_shutdown_sc(struct work_struct *work); 140 - static void o2net_listen_data_ready(struct sock *sk, int bytes); 141 static void o2net_sc_send_keep_req(struct work_struct *work); 142 static void o2net_idle_timer(unsigned long data); 143 static void o2net_sc_postpone_idle(struct o2net_sock_container *sc); ··· 597 } 598 599 /* see o2net_register_callbacks() */ 600 - static void o2net_data_ready(struct sock *sk, int bytes) 601 { 602 - void (*ready)(struct sock *sk, int bytes); 603 604 read_lock(&sk->sk_callback_lock); 605 if (sk->sk_user_data) { ··· 613 } 614 read_unlock(&sk->sk_callback_lock); 615 616 - ready(sk, bytes); 617 } 618 619 /* see o2net_register_callbacks() */ ··· 1926 cond_resched(); 1927 } 1928 1929 - static void o2net_listen_data_ready(struct sock *sk, int bytes) 1930 { 1931 - void (*ready)(struct sock *sk, int bytes); 1932 1933 read_lock(&sk->sk_callback_lock); 1934 ready = sk->sk_user_data; ··· 1951 */ 1952 1953 if (sk->sk_state == TCP_LISTEN) { 1954 - mlog(ML_TCP, "bytes: %d\n", bytes); 1955 queue_work(o2net_wq, &o2net_listen_work); 1956 } else { 1957 ready = NULL; ··· 1959 out: 1960 read_unlock(&sk->sk_callback_lock); 1961 if (ready != NULL) 1962 - ready(sk, bytes); 1963 } 1964 1965 static int o2net_open_listening_sock(__be32 addr, __be16 port)
··· 137 static void o2net_sc_connect_completed(struct work_struct *work); 138 static void o2net_rx_until_empty(struct work_struct *work); 139 static void o2net_shutdown_sc(struct work_struct *work); 140 + static void o2net_listen_data_ready(struct sock *sk); 141 static void o2net_sc_send_keep_req(struct work_struct *work); 142 static void o2net_idle_timer(unsigned long data); 143 static void o2net_sc_postpone_idle(struct o2net_sock_container *sc); ··· 597 } 598 599 /* see o2net_register_callbacks() */ 600 + static void o2net_data_ready(struct sock *sk) 601 { 602 + void (*ready)(struct sock *sk); 603 604 read_lock(&sk->sk_callback_lock); 605 if (sk->sk_user_data) { ··· 613 } 614 read_unlock(&sk->sk_callback_lock); 615 616 + ready(sk); 617 } 618 619 /* see o2net_register_callbacks() */ ··· 1926 cond_resched(); 1927 } 1928 1929 + static void o2net_listen_data_ready(struct sock *sk) 1930 { 1931 + void (*ready)(struct sock *sk); 1932 1933 read_lock(&sk->sk_callback_lock); 1934 ready = sk->sk_user_data; ··· 1951 */ 1952 1953 if (sk->sk_state == TCP_LISTEN) { 1954 queue_work(o2net_wq, &o2net_listen_work); 1955 } else { 1956 ready = NULL; ··· 1960 out: 1961 read_unlock(&sk->sk_callback_lock); 1962 if (ready != NULL) 1963 + ready(sk); 1964 } 1965 1966 static int o2net_open_listening_sock(__be32 addr, __be16 port)
+1 -1
fs/ocfs2/cluster/tcp_internal.h
··· 165 166 /* original handlers for the sockets */ 167 void (*sc_state_change)(struct sock *sk); 168 - void (*sc_data_ready)(struct sock *sk, int bytes); 169 170 u32 sc_msg_key; 171 u16 sc_msg_type;
··· 165 166 /* original handlers for the sockets */ 167 void (*sc_state_change)(struct sock *sk); 168 + void (*sc_data_ready)(struct sock *sk); 169 170 u32 sc_msg_key; 171 u16 sc_msg_type;
+1 -1
include/linux/sunrpc/svcsock.h
··· 22 23 /* We keep the old state_change and data_ready CB's here */ 24 void (*sk_ostate)(struct sock *); 25 - void (*sk_odata)(struct sock *, int bytes); 26 void (*sk_owspace)(struct sock *); 27 28 /* private TCP part */
··· 22 23 /* We keep the old state_change and data_ready CB's here */ 24 void (*sk_ostate)(struct sock *); 25 + void (*sk_odata)(struct sock *); 26 void (*sk_owspace)(struct sock *); 27 28 /* private TCP part */
+1 -1
include/net/sctp/sctp.h
··· 101 int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb); 102 int sctp_inet_listen(struct socket *sock, int backlog); 103 void sctp_write_space(struct sock *sk); 104 - void sctp_data_ready(struct sock *sk, int len); 105 unsigned int sctp_poll(struct file *file, struct socket *sock, 106 poll_table *wait); 107 void sctp_sock_rfree(struct sk_buff *skb);
··· 101 int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb); 102 int sctp_inet_listen(struct socket *sock, int backlog); 103 void sctp_write_space(struct sock *sk); 104 + void sctp_data_ready(struct sock *sk); 105 unsigned int sctp_poll(struct file *file, struct socket *sock, 106 poll_table *wait); 107 void sctp_sock_rfree(struct sk_buff *skb);
+1 -1
include/net/sock.h
··· 418 u32 sk_classid; 419 struct cg_proto *sk_cgrp; 420 void (*sk_state_change)(struct sock *sk); 421 - void (*sk_data_ready)(struct sock *sk, int bytes); 422 void (*sk_write_space)(struct sock *sk); 423 void (*sk_error_report)(struct sock *sk); 424 int (*sk_backlog_rcv)(struct sock *sk,
··· 418 u32 sk_classid; 419 struct cg_proto *sk_cgrp; 420 void (*sk_state_change)(struct sock *sk); 421 + void (*sk_data_ready)(struct sock *sk); 422 void (*sk_write_space)(struct sock *sk); 423 void (*sk_error_report)(struct sock *sk); 424 int (*sk_backlog_rcv)(struct sock *sk,
+1 -1
net/atm/clip.c
··· 68 69 sk = sk_atm(atmarpd); 70 skb_queue_tail(&sk->sk_receive_queue, skb); 71 - sk->sk_data_ready(sk, skb->len); 72 return 0; 73 } 74
··· 68 69 sk = sk_atm(atmarpd); 70 skb_queue_tail(&sk->sk_receive_queue, skb); 71 + sk->sk_data_ready(sk); 72 return 0; 73 } 74
+5 -5
net/atm/lec.c
··· 152 atm_force_charge(priv->lecd, skb2->truesize); 153 sk = sk_atm(priv->lecd); 154 skb_queue_tail(&sk->sk_receive_queue, skb2); 155 - sk->sk_data_ready(sk, skb2->len); 156 } 157 } 158 #endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */ ··· 447 atm_force_charge(priv->lecd, skb2->truesize); 448 sk = sk_atm(priv->lecd); 449 skb_queue_tail(&sk->sk_receive_queue, skb2); 450 - sk->sk_data_ready(sk, skb2->len); 451 } 452 } 453 #endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */ ··· 530 atm_force_charge(priv->lecd, skb->truesize); 531 sk = sk_atm(priv->lecd); 532 skb_queue_tail(&sk->sk_receive_queue, skb); 533 - sk->sk_data_ready(sk, skb->len); 534 535 if (data != NULL) { 536 pr_debug("about to send %d bytes of data\n", data->len); 537 atm_force_charge(priv->lecd, data->truesize); 538 skb_queue_tail(&sk->sk_receive_queue, data); 539 - sk->sk_data_ready(sk, skb->len); 540 } 541 542 return 0; ··· 616 617 pr_debug("%s: To daemon\n", dev->name); 618 skb_queue_tail(&sk->sk_receive_queue, skb); 619 - sk->sk_data_ready(sk, skb->len); 620 } else { /* Data frame, queue to protocol handlers */ 621 struct lec_arp_table *entry; 622 unsigned char *src, *dst;
··· 152 atm_force_charge(priv->lecd, skb2->truesize); 153 sk = sk_atm(priv->lecd); 154 skb_queue_tail(&sk->sk_receive_queue, skb2); 155 + sk->sk_data_ready(sk); 156 } 157 } 158 #endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */ ··· 447 atm_force_charge(priv->lecd, skb2->truesize); 448 sk = sk_atm(priv->lecd); 449 skb_queue_tail(&sk->sk_receive_queue, skb2); 450 + sk->sk_data_ready(sk); 451 } 452 } 453 #endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */ ··· 530 atm_force_charge(priv->lecd, skb->truesize); 531 sk = sk_atm(priv->lecd); 532 skb_queue_tail(&sk->sk_receive_queue, skb); 533 + sk->sk_data_ready(sk); 534 535 if (data != NULL) { 536 pr_debug("about to send %d bytes of data\n", data->len); 537 atm_force_charge(priv->lecd, data->truesize); 538 skb_queue_tail(&sk->sk_receive_queue, data); 539 + sk->sk_data_ready(sk); 540 } 541 542 return 0; ··· 616 617 pr_debug("%s: To daemon\n", dev->name); 618 skb_queue_tail(&sk->sk_receive_queue, skb); 619 + sk->sk_data_ready(sk); 620 } else { /* Data frame, queue to protocol handlers */ 621 struct lec_arp_table *entry; 622 unsigned char *src, *dst;
+3 -3
net/atm/mpc.c
··· 706 dprintk("(%s) control packet arrived\n", dev->name); 707 /* Pass control packets to daemon */ 708 skb_queue_tail(&sk->sk_receive_queue, skb); 709 - sk->sk_data_ready(sk, skb->len); 710 return; 711 } 712 ··· 992 993 sk = sk_atm(mpc->mpoad_vcc); 994 skb_queue_tail(&sk->sk_receive_queue, skb); 995 - sk->sk_data_ready(sk, skb->len); 996 997 return 0; 998 } ··· 1273 1274 sk = sk_atm(vcc); 1275 skb_queue_tail(&sk->sk_receive_queue, skb); 1276 - sk->sk_data_ready(sk, skb->len); 1277 dprintk("exiting\n"); 1278 } 1279
··· 706 dprintk("(%s) control packet arrived\n", dev->name); 707 /* Pass control packets to daemon */ 708 skb_queue_tail(&sk->sk_receive_queue, skb); 709 + sk->sk_data_ready(sk); 710 return; 711 } 712 ··· 992 993 sk = sk_atm(mpc->mpoad_vcc); 994 skb_queue_tail(&sk->sk_receive_queue, skb); 995 + sk->sk_data_ready(sk); 996 997 return 0; 998 } ··· 1273 1274 sk = sk_atm(vcc); 1275 skb_queue_tail(&sk->sk_receive_queue, skb); 1276 + sk->sk_data_ready(sk); 1277 dprintk("exiting\n"); 1278 } 1279
+1 -1
net/atm/raw.c
··· 25 struct sock *sk = sk_atm(vcc); 26 27 skb_queue_tail(&sk->sk_receive_queue, skb); 28 - sk->sk_data_ready(sk, skb->len); 29 } 30 } 31
··· 25 struct sock *sk = sk_atm(vcc); 26 27 skb_queue_tail(&sk->sk_receive_queue, skb); 28 + sk->sk_data_ready(sk); 29 } 30 } 31
+1 -1
net/atm/signaling.c
··· 51 #endif 52 atm_force_charge(sigd, skb->truesize); 53 skb_queue_tail(&sk_atm(sigd)->sk_receive_queue, skb); 54 - sk_atm(sigd)->sk_data_ready(sk_atm(sigd), skb->len); 55 } 56 57 static void modify_qos(struct atm_vcc *vcc, struct atmsvc_msg *msg)
··· 51 #endif 52 atm_force_charge(sigd, skb->truesize); 53 skb_queue_tail(&sk_atm(sigd)->sk_receive_queue, skb); 54 + sk_atm(sigd)->sk_data_ready(sk_atm(sigd)); 55 } 56 57 static void modify_qos(struct atm_vcc *vcc, struct atmsvc_msg *msg)
+1 -1
net/ax25/ax25_in.c
··· 422 423 if (sk) { 424 if (!sock_flag(sk, SOCK_DEAD)) 425 - sk->sk_data_ready(sk, skb->len); 426 sock_put(sk); 427 } else { 428 free:
··· 422 423 if (sk) { 424 if (!sock_flag(sk, SOCK_DEAD)) 425 + sk->sk_data_ready(sk); 426 sock_put(sk); 427 } else { 428 free:
+3 -3
net/bluetooth/l2cap_sock.c
··· 1271 1272 if (parent) { 1273 bt_accept_unlink(sk); 1274 - parent->sk_data_ready(parent, 0); 1275 } else { 1276 sk->sk_state_change(sk); 1277 } ··· 1327 sk->sk_state_change(sk); 1328 1329 if (parent) 1330 - parent->sk_data_ready(parent, 0); 1331 1332 release_sock(sk); 1333 } ··· 1340 1341 parent = bt_sk(sk)->parent; 1342 if (parent) 1343 - parent->sk_data_ready(parent, 0); 1344 1345 release_sock(sk); 1346 }
··· 1271 1272 if (parent) { 1273 bt_accept_unlink(sk); 1274 + parent->sk_data_ready(parent); 1275 } else { 1276 sk->sk_state_change(sk); 1277 } ··· 1327 sk->sk_state_change(sk); 1328 1329 if (parent) 1330 + parent->sk_data_ready(parent); 1331 1332 release_sock(sk); 1333 } ··· 1340 1341 parent = bt_sk(sk)->parent; 1342 if (parent) 1343 + parent->sk_data_ready(parent); 1344 1345 release_sock(sk); 1346 }
+2 -2
net/bluetooth/rfcomm/core.c
··· 186 rfcomm_schedule(); 187 } 188 189 - static void rfcomm_l2data_ready(struct sock *sk, int bytes) 190 { 191 - BT_DBG("%p bytes %d", sk, bytes); 192 rfcomm_schedule(); 193 } 194
··· 186 rfcomm_schedule(); 187 } 188 189 + static void rfcomm_l2data_ready(struct sock *sk) 190 { 191 + BT_DBG("%p", sk); 192 rfcomm_schedule(); 193 } 194
+2 -2
net/bluetooth/rfcomm/sock.c
··· 54 55 atomic_add(skb->len, &sk->sk_rmem_alloc); 56 skb_queue_tail(&sk->sk_receive_queue, skb); 57 - sk->sk_data_ready(sk, skb->len); 58 59 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) 60 rfcomm_dlc_throttle(d); ··· 84 sock_set_flag(sk, SOCK_ZAPPED); 85 bt_accept_unlink(sk); 86 } 87 - parent->sk_data_ready(parent, 0); 88 } else { 89 if (d->state == BT_CONNECTED) 90 rfcomm_session_getaddr(d->session,
··· 54 55 atomic_add(skb->len, &sk->sk_rmem_alloc); 56 skb_queue_tail(&sk->sk_receive_queue, skb); 57 + sk->sk_data_ready(sk); 58 59 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) 60 rfcomm_dlc_throttle(d); ··· 84 sock_set_flag(sk, SOCK_ZAPPED); 85 bt_accept_unlink(sk); 86 } 87 + parent->sk_data_ready(parent); 88 } else { 89 if (d->state == BT_CONNECTED) 90 rfcomm_session_getaddr(d->session,
+1 -1
net/bluetooth/sco.c
··· 1024 sk->sk_state = BT_CONNECTED; 1025 1026 /* Wake up parent */ 1027 - parent->sk_data_ready(parent, 1); 1028 1029 bh_unlock_sock(parent); 1030
··· 1024 sk->sk_state = BT_CONNECTED; 1025 1026 /* Wake up parent */ 1027 + parent->sk_data_ready(parent); 1028 1029 bh_unlock_sock(parent); 1030
+1 -1
net/bridge/br_input.c
··· 73 goto drop; 74 75 if (!br_allowed_ingress(p->br, nbp_get_vlan_info(p), skb, &vid)) 76 - goto drop; 77 78 /* insert into forwarding database after filtering to avoid spoofing */ 79 br = p->br;
··· 73 goto drop; 74 75 if (!br_allowed_ingress(p->br, nbp_get_vlan_info(p), skb, &vid)) 76 + goto out; 77 78 /* insert into forwarding database after filtering to avoid spoofing */ 79 br = p->br;
+4 -3
net/bridge/br_vlan.c
··· 170 * rejected. 171 */ 172 if (!v) 173 - return false; 174 175 /* If vlan tx offload is disabled on bridge device and frame was 176 * sent from vlan device on the bridge device, it does not have ··· 193 * vlan untagged or priority-tagged traffic belongs to. 194 */ 195 if (pvid == VLAN_N_VID) 196 - return false; 197 198 /* PVID is set on this port. Any untagged or priority-tagged 199 * ingress frame is considered to belong to this vlan. ··· 216 /* Frame had a valid vlan tag. See if vlan is allowed */ 217 if (test_bit(*vid, v->vlan_bitmap)) 218 return true; 219 - 220 return false; 221 } 222
··· 170 * rejected. 171 */ 172 if (!v) 173 + goto drop; 174 175 /* If vlan tx offload is disabled on bridge device and frame was 176 * sent from vlan device on the bridge device, it does not have ··· 193 * vlan untagged or priority-tagged traffic belongs to. 194 */ 195 if (pvid == VLAN_N_VID) 196 + goto drop; 197 198 /* PVID is set on this port. Any untagged or priority-tagged 199 * ingress frame is considered to belong to this vlan. ··· 216 /* Frame had a valid vlan tag. See if vlan is allowed */ 217 if (test_bit(*vid, v->vlan_bitmap)) 218 return true; 219 + drop: 220 + kfree_skb(skb); 221 return false; 222 } 223
+1 -3
net/caif/caif_socket.c
··· 124 static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 125 { 126 int err; 127 - int skb_len; 128 unsigned long flags; 129 struct sk_buff_head *list = &sk->sk_receive_queue; 130 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); ··· 152 * may be freed by other threads of control pulling packets 153 * from the queue. 154 */ 155 - skb_len = skb->len; 156 spin_lock_irqsave(&list->lock, flags); 157 if (!sock_flag(sk, SOCK_DEAD)) 158 __skb_queue_tail(list, skb); 159 spin_unlock_irqrestore(&list->lock, flags); 160 161 if (!sock_flag(sk, SOCK_DEAD)) 162 - sk->sk_data_ready(sk, skb_len); 163 else 164 kfree_skb(skb); 165 return 0;
··· 124 static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 125 { 126 int err; 127 unsigned long flags; 128 struct sk_buff_head *list = &sk->sk_receive_queue; 129 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); ··· 153 * may be freed by other threads of control pulling packets 154 * from the queue. 155 */ 156 spin_lock_irqsave(&list->lock, flags); 157 if (!sock_flag(sk, SOCK_DEAD)) 158 __skb_queue_tail(list, skb); 159 spin_unlock_irqrestore(&list->lock, flags); 160 161 if (!sock_flag(sk, SOCK_DEAD)) 162 + sk->sk_data_ready(sk); 163 else 164 kfree_skb(skb); 165 return 0;
+1 -1
net/ceph/messenger.c
··· 383 */ 384 385 /* data available on socket, or listen socket received a connect */ 386 - static void ceph_sock_data_ready(struct sock *sk, int count_unused) 387 { 388 struct ceph_connection *con = sk->sk_user_data; 389 if (atomic_read(&con->msgr->stopping)) {
··· 383 */ 384 385 /* data available on socket, or listen socket received a connect */ 386 + static void ceph_sock_data_ready(struct sock *sk) 387 { 388 struct ceph_connection *con = sk->sk_user_data; 389 if (atomic_read(&con->msgr->stopping)) {
+6 -2
net/core/pktgen.c
··· 3338 queue_map = skb_get_queue_mapping(pkt_dev->skb); 3339 txq = netdev_get_tx_queue(odev, queue_map); 3340 3341 - __netif_tx_lock_bh(txq); 3342 3343 if (unlikely(netif_xmit_frozen_or_drv_stopped(txq))) { 3344 ret = NETDEV_TX_BUSY; ··· 3376 pkt_dev->last_ok = 0; 3377 } 3378 unlock: 3379 - __netif_tx_unlock_bh(txq); 3380 3381 /* If pkt_dev->count is zero, then run forever */ 3382 if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) {
··· 3338 queue_map = skb_get_queue_mapping(pkt_dev->skb); 3339 txq = netdev_get_tx_queue(odev, queue_map); 3340 3341 + local_bh_disable(); 3342 + 3343 + HARD_TX_LOCK(odev, txq, smp_processor_id()); 3344 3345 if (unlikely(netif_xmit_frozen_or_drv_stopped(txq))) { 3346 ret = NETDEV_TX_BUSY; ··· 3374 pkt_dev->last_ok = 0; 3375 } 3376 unlock: 3377 + HARD_TX_UNLOCK(odev, txq); 3378 + 3379 + local_bh_enable(); 3380 3381 /* If pkt_dev->count is zero, then run forever */ 3382 if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) {
+8 -8
net/core/skbuff.c
··· 3458 */ 3459 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 3460 { 3461 - int len = skb->len; 3462 - 3463 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 3464 (unsigned int)sk->sk_rcvbuf) 3465 return -ENOMEM; ··· 3472 3473 skb_queue_tail(&sk->sk_error_queue, skb); 3474 if (!sock_flag(sk, SOCK_DEAD)) 3475 - sk->sk_data_ready(sk, len); 3476 return 0; 3477 } 3478 EXPORT_SYMBOL(sock_queue_err_skb); ··· 3935 unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) 3936 { 3937 const struct skb_shared_info *shinfo = skb_shinfo(skb); 3938 - unsigned int hdr_len; 3939 3940 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) 3941 - hdr_len = tcp_hdrlen(skb); 3942 - else 3943 - hdr_len = sizeof(struct udphdr); 3944 - return hdr_len + shinfo->gso_size; 3945 } 3946 EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
··· 3458 */ 3459 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 3460 { 3461 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 3462 (unsigned int)sk->sk_rcvbuf) 3463 return -ENOMEM; ··· 3474 3475 skb_queue_tail(&sk->sk_error_queue, skb); 3476 if (!sock_flag(sk, SOCK_DEAD)) 3477 + sk->sk_data_ready(sk); 3478 return 0; 3479 } 3480 EXPORT_SYMBOL(sock_queue_err_skb); ··· 3937 unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) 3938 { 3939 const struct skb_shared_info *shinfo = skb_shinfo(skb); 3940 3941 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) 3942 + return tcp_hdrlen(skb) + shinfo->gso_size; 3943 + 3944 + /* UFO sets gso_size to the size of the fragmentation 3945 + * payload, i.e. the size of the L4 (UDP) header is already 3946 + * accounted for. 3947 + */ 3948 + return shinfo->gso_size; 3949 } 3950 EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
+2 -2
net/core/sock.c
··· 428 spin_unlock_irqrestore(&list->lock, flags); 429 430 if (!sock_flag(sk, SOCK_DEAD)) 431 - sk->sk_data_ready(sk, skb_len); 432 return 0; 433 } 434 EXPORT_SYMBOL(sock_queue_rcv_skb); ··· 2196 rcu_read_unlock(); 2197 } 2198 2199 - static void sock_def_readable(struct sock *sk, int len) 2200 { 2201 struct socket_wq *wq; 2202
··· 428 spin_unlock_irqrestore(&list->lock, flags); 429 430 if (!sock_flag(sk, SOCK_DEAD)) 431 + sk->sk_data_ready(sk); 432 return 0; 433 } 434 EXPORT_SYMBOL(sock_queue_rcv_skb); ··· 2196 rcu_read_unlock(); 2197 } 2198 2199 + static void sock_def_readable(struct sock *sk) 2200 { 2201 struct socket_wq *wq; 2202
+1 -1
net/dccp/input.c
··· 28 __skb_pull(skb, dccp_hdr(skb)->dccph_doff * 4); 29 __skb_queue_tail(&sk->sk_receive_queue, skb); 30 skb_set_owner_r(skb, sk); 31 - sk->sk_data_ready(sk, 0); 32 } 33 34 static void dccp_fin(struct sock *sk, struct sk_buff *skb)
··· 28 __skb_pull(skb, dccp_hdr(skb)->dccph_doff * 4); 29 __skb_queue_tail(&sk->sk_receive_queue, skb); 30 skb_set_owner_r(skb, sk); 31 + sk->sk_data_ready(sk); 32 } 33 34 static void dccp_fin(struct sock *sk, struct sk_buff *skb)
+1 -1
net/dccp/minisocks.c
··· 237 238 /* Wakeup parent, send SIGIO */ 239 if (state == DCCP_RESPOND && child->sk_state != state) 240 - parent->sk_data_ready(parent, 0); 241 } else { 242 /* Alas, it is possible again, because we do lookup 243 * in main socket hash table and lock on listening
··· 237 238 /* Wakeup parent, send SIGIO */ 239 if (state == DCCP_RESPOND && child->sk_state != state) 240 + parent->sk_data_ready(parent); 241 } else { 242 /* Alas, it is possible again, because we do lookup 243 * in main socket hash table and lock on listening
+1 -3
net/decnet/dn_nsp_in.c
··· 585 static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig, struct sk_buff_head *queue) 586 { 587 int err; 588 - int skb_len; 589 590 /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces 591 number of warnings when compiling with -W --ANK ··· 599 if (err) 600 goto out; 601 602 - skb_len = skb->len; 603 skb_set_owner_r(skb, sk); 604 skb_queue_tail(queue, skb); 605 606 if (!sock_flag(sk, SOCK_DEAD)) 607 - sk->sk_data_ready(sk, skb_len); 608 out: 609 return err; 610 }
··· 585 static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig, struct sk_buff_head *queue) 586 { 587 int err; 588 589 /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces 590 number of warnings when compiling with -W --ANK ··· 600 if (err) 601 goto out; 602 603 skb_set_owner_r(skb, sk); 604 skb_queue_tail(queue, skb); 605 606 if (!sock_flag(sk, SOCK_DEAD)) 607 + sk->sk_data_ready(sk); 608 out: 609 return err; 610 }
+1 -1
net/ipv4/ip_gre.c
··· 463 static void ipgre_tunnel_setup(struct net_device *dev) 464 { 465 dev->netdev_ops = &ipgre_netdev_ops; 466 ip_tunnel_setup(dev, ipgre_net_id); 467 } 468 ··· 502 memcpy(dev->dev_addr, &iph->saddr, 4); 503 memcpy(dev->broadcast, &iph->daddr, 4); 504 505 - dev->type = ARPHRD_IPGRE; 506 dev->flags = IFF_NOARP; 507 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 508 dev->addr_len = 4;
··· 463 static void ipgre_tunnel_setup(struct net_device *dev) 464 { 465 dev->netdev_ops = &ipgre_netdev_ops; 466 + dev->type = ARPHRD_IPGRE; 467 ip_tunnel_setup(dev, ipgre_net_id); 468 } 469 ··· 501 memcpy(dev->dev_addr, &iph->saddr, 4); 502 memcpy(dev->broadcast, &iph->daddr, 4); 503 504 dev->flags = IFF_NOARP; 505 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 506 dev->addr_len = 4;
+1 -1
net/ipv4/ip_vti.c
··· 337 static void vti_tunnel_setup(struct net_device *dev) 338 { 339 dev->netdev_ops = &vti_netdev_ops; 340 ip_tunnel_setup(dev, vti_net_id); 341 } 342 ··· 349 memcpy(dev->dev_addr, &iph->saddr, 4); 350 memcpy(dev->broadcast, &iph->daddr, 4); 351 352 - dev->type = ARPHRD_TUNNEL; 353 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr); 354 dev->mtu = ETH_DATA_LEN; 355 dev->flags = IFF_NOARP;
··· 337 static void vti_tunnel_setup(struct net_device *dev) 338 { 339 dev->netdev_ops = &vti_netdev_ops; 340 + dev->type = ARPHRD_TUNNEL; 341 ip_tunnel_setup(dev, vti_net_id); 342 } 343 ··· 348 memcpy(dev->dev_addr, &iph->saddr, 4); 349 memcpy(dev->broadcast, &iph->daddr, 4); 350 351 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr); 352 dev->mtu = ETH_DATA_LEN; 353 dev->flags = IFF_NOARP;
+5 -5
net/ipv4/tcp_input.c
··· 4413 if (eaten > 0) 4414 kfree_skb_partial(skb, fragstolen); 4415 if (!sock_flag(sk, SOCK_DEAD)) 4416 - sk->sk_data_ready(sk, 0); 4417 return; 4418 } 4419 ··· 4914 BUG(); 4915 tp->urg_data = TCP_URG_VALID | tmp; 4916 if (!sock_flag(sk, SOCK_DEAD)) 4917 - sk->sk_data_ready(sk, 0); 4918 } 4919 } 4920 } ··· 5000 (tcp_flag_word(tcp_hdr(skb)) & TCP_FLAG_PSH) || 5001 (atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) { 5002 tp->ucopy.wakeup = 1; 5003 - sk->sk_data_ready(sk, 0); 5004 } 5005 } else if (chunk > 0) { 5006 tp->ucopy.wakeup = 1; 5007 - sk->sk_data_ready(sk, 0); 5008 } 5009 out: 5010 return copied_early; ··· 5275 #endif 5276 if (eaten) 5277 kfree_skb_partial(skb, fragstolen); 5278 - sk->sk_data_ready(sk, 0); 5279 return; 5280 } 5281 }
··· 4413 if (eaten > 0) 4414 kfree_skb_partial(skb, fragstolen); 4415 if (!sock_flag(sk, SOCK_DEAD)) 4416 + sk->sk_data_ready(sk); 4417 return; 4418 } 4419 ··· 4914 BUG(); 4915 tp->urg_data = TCP_URG_VALID | tmp; 4916 if (!sock_flag(sk, SOCK_DEAD)) 4917 + sk->sk_data_ready(sk); 4918 } 4919 } 4920 } ··· 5000 (tcp_flag_word(tcp_hdr(skb)) & TCP_FLAG_PSH) || 5001 (atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) { 5002 tp->ucopy.wakeup = 1; 5003 + sk->sk_data_ready(sk); 5004 } 5005 } else if (chunk > 0) { 5006 tp->ucopy.wakeup = 1; 5007 + sk->sk_data_ready(sk); 5008 } 5009 out: 5010 return copied_early; ··· 5275 #endif 5276 if (eaten) 5277 kfree_skb_partial(skb, fragstolen); 5278 + sk->sk_data_ready(sk); 5279 return; 5280 } 5281 }
+1 -1
net/ipv4/tcp_ipv4.c
··· 1434 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 1435 tp->syn_data_acked = 1; 1436 } 1437 - sk->sk_data_ready(sk, 0); 1438 bh_unlock_sock(child); 1439 sock_put(child); 1440 WARN_ON(req->sk == NULL);
··· 1434 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 1435 tp->syn_data_acked = 1; 1436 } 1437 + sk->sk_data_ready(sk); 1438 bh_unlock_sock(child); 1439 sock_put(child); 1440 WARN_ON(req->sk == NULL);
+1 -1
net/ipv4/tcp_minisocks.c
··· 745 skb->len); 746 /* Wakeup parent, send SIGIO */ 747 if (state == TCP_SYN_RECV && child->sk_state != state) 748 - parent->sk_data_ready(parent, 0); 749 } else { 750 /* Alas, it is possible again, because we do lookup 751 * in main socket hash table and lock on listening
··· 745 skb->len); 746 /* Wakeup parent, send SIGIO */ 747 if (state == TCP_SYN_RECV && child->sk_state != state) 748 + parent->sk_data_ready(parent); 749 } else { 750 /* Alas, it is possible again, because we do lookup 751 * in main socket hash table and lock on listening
+1 -1
net/ipv6/tcp_ipv6.c
··· 798 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr); 799 800 fl6.flowi6_proto = IPPROTO_TCP; 801 - if (rt6_need_strict(&fl6.daddr) || !oif) 802 fl6.flowi6_oif = inet6_iif(skb); 803 else 804 fl6.flowi6_oif = oif;
··· 798 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr); 799 800 fl6.flowi6_proto = IPPROTO_TCP; 801 + if (rt6_need_strict(&fl6.daddr) && !oif) 802 fl6.flowi6_oif = inet6_iif(skb); 803 else 804 fl6.flowi6_oif = oif;
+2 -2
net/iucv/af_iucv.c
··· 1757 1758 /* Wake up accept */ 1759 nsk->sk_state = IUCV_CONNECTED; 1760 - sk->sk_data_ready(sk, 1); 1761 err = 0; 1762 fail: 1763 bh_unlock_sock(sk); ··· 1968 if (!err) { 1969 iucv_accept_enqueue(sk, nsk); 1970 nsk->sk_state = IUCV_CONNECTED; 1971 - sk->sk_data_ready(sk, 1); 1972 } else 1973 iucv_sock_kill(nsk); 1974 bh_unlock_sock(sk);
··· 1757 1758 /* Wake up accept */ 1759 nsk->sk_state = IUCV_CONNECTED; 1760 + sk->sk_data_ready(sk); 1761 err = 0; 1762 fail: 1763 bh_unlock_sock(sk); ··· 1968 if (!err) { 1969 iucv_accept_enqueue(sk, nsk); 1970 nsk->sk_state = IUCV_CONNECTED; 1971 + sk->sk_data_ready(sk); 1972 } else 1973 iucv_sock_kill(nsk); 1974 bh_unlock_sock(sk);
+1 -1
net/key/af_key.c
··· 205 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) { 206 skb_set_owner_r(*skb2, sk); 207 skb_queue_tail(&sk->sk_receive_queue, *skb2); 208 - sk->sk_data_ready(sk, (*skb2)->len); 209 *skb2 = NULL; 210 err = 0; 211 }
··· 205 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) { 206 skb_set_owner_r(*skb2, sk); 207 skb_queue_tail(&sk->sk_receive_queue, *skb2); 208 + sk->sk_data_ready(sk); 209 *skb2 = NULL; 210 err = 0; 211 }
+2 -2
net/l2tp/l2tp_ppp.c
··· 753 session->deref = pppol2tp_session_sock_put; 754 755 /* If PMTU discovery was enabled, use the MTU that was discovered */ 756 - dst = sk_dst_get(sk); 757 if (dst != NULL) { 758 - u32 pmtu = dst_mtu(__sk_dst_get(sk)); 759 if (pmtu != 0) 760 session->mtu = session->mru = pmtu - 761 PPPOL2TP_HEADER_OVERHEAD;
··· 753 session->deref = pppol2tp_session_sock_put; 754 755 /* If PMTU discovery was enabled, use the MTU that was discovered */ 756 + dst = sk_dst_get(tunnel->sock); 757 if (dst != NULL) { 758 + u32 pmtu = dst_mtu(__sk_dst_get(tunnel->sock)); 759 if (pmtu != 0) 760 session->mtu = session->mru = pmtu - 761 PPPOL2TP_HEADER_OVERHEAD;
+2 -2
net/netlink/af_netlink.c
··· 1653 else 1654 #endif /* CONFIG_NETLINK_MMAP */ 1655 skb_queue_tail(&sk->sk_receive_queue, skb); 1656 - sk->sk_data_ready(sk, len); 1657 return len; 1658 } 1659 ··· 2394 return err ? : copied; 2395 } 2396 2397 - static void netlink_data_ready(struct sock *sk, int len) 2398 { 2399 BUG(); 2400 }
··· 1653 else 1654 #endif /* CONFIG_NETLINK_MMAP */ 1655 skb_queue_tail(&sk->sk_receive_queue, skb); 1656 + sk->sk_data_ready(sk); 1657 return len; 1658 } 1659 ··· 2394 return err ? : copied; 2395 } 2396 2397 + static void netlink_data_ready(struct sock *sk) 2398 { 2399 BUG(); 2400 }
+1 -1
net/netrom/af_netrom.c
··· 1011 skb_queue_head(&sk->sk_receive_queue, skb); 1012 1013 if (!sock_flag(sk, SOCK_DEAD)) 1014 - sk->sk_data_ready(sk, skb->len); 1015 1016 bh_unlock_sock(sk); 1017
··· 1011 skb_queue_head(&sk->sk_receive_queue, skb); 1012 1013 if (!sock_flag(sk, SOCK_DEAD)) 1014 + sk->sk_data_ready(sk); 1015 1016 bh_unlock_sock(sk); 1017
+1 -1
net/nfc/llcp_core.c
··· 976 new_sk->sk_state = LLCP_CONNECTED; 977 978 /* Wake the listening processes */ 979 - parent->sk_data_ready(parent, 0); 980 981 /* Send CC */ 982 nfc_llcp_send_cc(new_sock);
··· 976 new_sk->sk_state = LLCP_CONNECTED; 977 978 /* Wake the listening processes */ 979 + parent->sk_data_ready(parent); 980 981 /* Send CC */ 982 nfc_llcp_send_cc(new_sock);
+3 -3
net/packet/af_packet.c
··· 1848 skb->dropcount = atomic_read(&sk->sk_drops); 1849 __skb_queue_tail(&sk->sk_receive_queue, skb); 1850 spin_unlock(&sk->sk_receive_queue.lock); 1851 - sk->sk_data_ready(sk, skb->len); 1852 return 0; 1853 1854 drop_n_acct: ··· 2054 else 2055 prb_clear_blk_fill_status(&po->rx_ring); 2056 2057 - sk->sk_data_ready(sk, 0); 2058 2059 drop_n_restore: 2060 if (skb_head != skb->data && skb_shared(skb)) { ··· 2069 po->stats.stats1.tp_drops++; 2070 spin_unlock(&sk->sk_receive_queue.lock); 2071 2072 - sk->sk_data_ready(sk, 0); 2073 kfree_skb(copy_skb); 2074 goto drop_n_restore; 2075 }
··· 1848 skb->dropcount = atomic_read(&sk->sk_drops); 1849 __skb_queue_tail(&sk->sk_receive_queue, skb); 1850 spin_unlock(&sk->sk_receive_queue.lock); 1851 + sk->sk_data_ready(sk); 1852 return 0; 1853 1854 drop_n_acct: ··· 2054 else 2055 prb_clear_blk_fill_status(&po->rx_ring); 2056 2057 + sk->sk_data_ready(sk); 2058 2059 drop_n_restore: 2060 if (skb_head != skb->data && skb_shared(skb)) { ··· 2069 po->stats.stats1.tp_drops++; 2070 spin_unlock(&sk->sk_receive_queue.lock); 2071 2072 + sk->sk_data_ready(sk); 2073 kfree_skb(copy_skb); 2074 goto drop_n_restore; 2075 }
+2 -2
net/phonet/pep-gprs.c
··· 37 struct gprs_dev { 38 struct sock *sk; 39 void (*old_state_change)(struct sock *); 40 - void (*old_data_ready)(struct sock *, int); 41 void (*old_write_space)(struct sock *); 42 43 struct net_device *dev; ··· 146 return err; 147 } 148 149 - static void gprs_data_ready(struct sock *sk, int len) 150 { 151 struct gprs_dev *gp = sk->sk_user_data; 152 struct sk_buff *skb;
··· 37 struct gprs_dev { 38 struct sock *sk; 39 void (*old_state_change)(struct sock *); 40 + void (*old_data_ready)(struct sock *); 41 void (*old_write_space)(struct sock *); 42 43 struct net_device *dev; ··· 146 return err; 147 } 148 149 + static void gprs_data_ready(struct sock *sk) 150 { 151 struct gprs_dev *gp = sk->sk_user_data; 152 struct sk_buff *skb;
+3 -5
net/phonet/pep.c
··· 462 queue: 463 skb->dev = NULL; 464 skb_set_owner_r(skb, sk); 465 - err = skb->len; 466 skb_queue_tail(queue, skb); 467 if (!sock_flag(sk, SOCK_DEAD)) 468 - sk->sk_data_ready(sk, err); 469 return NET_RX_SUCCESS; 470 } 471 ··· 586 pn->rx_credits--; 587 skb->dev = NULL; 588 skb_set_owner_r(skb, sk); 589 - err = skb->len; 590 skb_queue_tail(&sk->sk_receive_queue, skb); 591 if (!sock_flag(sk, SOCK_DEAD)) 592 - sk->sk_data_ready(sk, err); 593 return NET_RX_SUCCESS; 594 595 case PNS_PEP_CONNECT_RESP: ··· 696 skb_queue_head(&sk->sk_receive_queue, skb); 697 sk_acceptq_added(sk); 698 if (!sock_flag(sk, SOCK_DEAD)) 699 - sk->sk_data_ready(sk, 0); 700 return NET_RX_SUCCESS; 701 702 case PNS_PEP_DISCONNECT_REQ:
··· 462 queue: 463 skb->dev = NULL; 464 skb_set_owner_r(skb, sk); 465 skb_queue_tail(queue, skb); 466 if (!sock_flag(sk, SOCK_DEAD)) 467 + sk->sk_data_ready(sk); 468 return NET_RX_SUCCESS; 469 } 470 ··· 587 pn->rx_credits--; 588 skb->dev = NULL; 589 skb_set_owner_r(skb, sk); 590 skb_queue_tail(&sk->sk_receive_queue, skb); 591 if (!sock_flag(sk, SOCK_DEAD)) 592 + sk->sk_data_ready(sk); 593 return NET_RX_SUCCESS; 594 595 case PNS_PEP_CONNECT_RESP: ··· 698 skb_queue_head(&sk->sk_receive_queue, skb); 699 sk_acceptq_added(sk); 700 if (!sock_flag(sk, SOCK_DEAD)) 701 + sk->sk_data_ready(sk); 702 return NET_RX_SUCCESS; 703 704 case PNS_PEP_DISCONNECT_REQ:
+2 -2
net/rds/tcp.h
··· 61 /* tcp_listen.c */ 62 int rds_tcp_listen_init(void); 63 void rds_tcp_listen_stop(void); 64 - void rds_tcp_listen_data_ready(struct sock *sk, int bytes); 65 66 /* tcp_recv.c */ 67 int rds_tcp_recv_init(void); 68 void rds_tcp_recv_exit(void); 69 - void rds_tcp_data_ready(struct sock *sk, int bytes); 70 int rds_tcp_recv(struct rds_connection *conn); 71 void rds_tcp_inc_free(struct rds_incoming *inc); 72 int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
··· 61 /* tcp_listen.c */ 62 int rds_tcp_listen_init(void); 63 void rds_tcp_listen_stop(void); 64 + void rds_tcp_listen_data_ready(struct sock *sk); 65 66 /* tcp_recv.c */ 67 int rds_tcp_recv_init(void); 68 void rds_tcp_recv_exit(void); 69 + void rds_tcp_data_ready(struct sock *sk); 70 int rds_tcp_recv(struct rds_connection *conn); 71 void rds_tcp_inc_free(struct rds_incoming *inc); 72 int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
+3 -3
net/rds/tcp_listen.c
··· 108 cond_resched(); 109 } 110 111 - void rds_tcp_listen_data_ready(struct sock *sk, int bytes) 112 { 113 - void (*ready)(struct sock *sk, int bytes); 114 115 rdsdebug("listen data ready sk %p\n", sk); 116 ··· 132 133 out: 134 read_unlock(&sk->sk_callback_lock); 135 - ready(sk, bytes); 136 } 137 138 int rds_tcp_listen_init(void)
··· 108 cond_resched(); 109 } 110 111 + void rds_tcp_listen_data_ready(struct sock *sk) 112 { 113 + void (*ready)(struct sock *sk); 114 115 rdsdebug("listen data ready sk %p\n", sk); 116 ··· 132 133 out: 134 read_unlock(&sk->sk_callback_lock); 135 + ready(sk); 136 } 137 138 int rds_tcp_listen_init(void)
+4 -4
net/rds/tcp_recv.c
··· 314 return ret; 315 } 316 317 - void rds_tcp_data_ready(struct sock *sk, int bytes) 318 { 319 - void (*ready)(struct sock *sk, int bytes); 320 struct rds_connection *conn; 321 struct rds_tcp_connection *tc; 322 323 - rdsdebug("data ready sk %p bytes %d\n", sk, bytes); 324 325 read_lock(&sk->sk_callback_lock); 326 conn = sk->sk_user_data; ··· 337 queue_delayed_work(rds_wq, &conn->c_recv_w, 0); 338 out: 339 read_unlock(&sk->sk_callback_lock); 340 - ready(sk, bytes); 341 } 342 343 int rds_tcp_recv_init(void)
··· 314 return ret; 315 } 316 317 + void rds_tcp_data_ready(struct sock *sk) 318 { 319 + void (*ready)(struct sock *sk); 320 struct rds_connection *conn; 321 struct rds_tcp_connection *tc; 322 323 + rdsdebug("data ready sk %p\n", sk); 324 325 read_lock(&sk->sk_callback_lock); 326 conn = sk->sk_user_data; ··· 337 queue_delayed_work(rds_wq, &conn->c_recv_w, 0); 338 out: 339 read_unlock(&sk->sk_callback_lock); 340 + ready(sk); 341 } 342 343 int rds_tcp_recv_init(void)
+1 -1
net/rose/af_rose.c
··· 1041 rose_start_heartbeat(make); 1042 1043 if (!sock_flag(sk, SOCK_DEAD)) 1044 - sk->sk_data_ready(sk, skb->len); 1045 1046 return 1; 1047 }
··· 1041 rose_start_heartbeat(make); 1042 1043 if (!sock_flag(sk, SOCK_DEAD)) 1044 + sk->sk_data_ready(sk); 1045 1046 return 1; 1047 }
+3 -3
net/rxrpc/ar-input.c
··· 113 spin_unlock_bh(&sk->sk_receive_queue.lock); 114 115 if (!sock_flag(sk, SOCK_DEAD)) 116 - sk->sk_data_ready(sk, skb_len); 117 } 118 skb = NULL; 119 } else { ··· 632 * handle data received on the local endpoint 633 * - may be called in interrupt context 634 */ 635 - void rxrpc_data_ready(struct sock *sk, int count) 636 { 637 struct rxrpc_skb_priv *sp; 638 struct rxrpc_local *local; 639 struct sk_buff *skb; 640 int ret; 641 642 - _enter("%p, %d", sk, count); 643 644 ASSERT(!irqs_disabled()); 645
··· 113 spin_unlock_bh(&sk->sk_receive_queue.lock); 114 115 if (!sock_flag(sk, SOCK_DEAD)) 116 + sk->sk_data_ready(sk); 117 } 118 skb = NULL; 119 } else { ··· 632 * handle data received on the local endpoint 633 * - may be called in interrupt context 634 */ 635 + void rxrpc_data_ready(struct sock *sk) 636 { 637 struct rxrpc_skb_priv *sp; 638 struct rxrpc_local *local; 639 struct sk_buff *skb; 640 int ret; 641 642 + _enter("%p", sk); 643 644 ASSERT(!irqs_disabled()); 645
+1 -1
net/rxrpc/ar-internal.h
··· 518 */ 519 extern const char *rxrpc_pkts[]; 520 521 - void rxrpc_data_ready(struct sock *, int); 522 int rxrpc_queue_rcv_skb(struct rxrpc_call *, struct sk_buff *, bool, bool); 523 void rxrpc_fast_process_packet(struct rxrpc_call *, struct sk_buff *); 524
··· 518 */ 519 extern const char *rxrpc_pkts[]; 520 521 + void rxrpc_data_ready(struct sock *); 522 int rxrpc_queue_rcv_skb(struct rxrpc_call *, struct sk_buff *, bool, bool); 523 void rxrpc_fast_process_packet(struct rxrpc_call *, struct sk_buff *); 524
+7 -1
net/sctp/socket.c
··· 6604 if (asoc->ep->sndbuf_policy) 6605 return __sctp_write_space(asoc); 6606 6607 /* Accounting for the sndbuf space is per socket, so we 6608 * need to wake up others, try to be fair and in case of 6609 * other associations, let them have a go first instead ··· 6745 goto out; 6746 } 6747 6748 - void sctp_data_ready(struct sock *sk, int len) 6749 { 6750 struct socket_wq *wq; 6751
··· 6604 if (asoc->ep->sndbuf_policy) 6605 return __sctp_write_space(asoc); 6606 6607 + /* If association goes down and is just flushing its 6608 + * outq, then just normally notify others. 6609 + */ 6610 + if (asoc->base.dead) 6611 + return sctp_write_space(sk); 6612 + 6613 /* Accounting for the sndbuf space is per socket, so we 6614 * need to wake up others, try to be fair and in case of 6615 * other associations, let them have a go first instead ··· 6739 goto out; 6740 } 6741 6742 + void sctp_data_ready(struct sock *sk) 6743 { 6744 struct socket_wq *wq; 6745
+2 -2
net/sctp/ulpqueue.c
··· 259 sctp_ulpq_clear_pd(ulpq); 260 261 if (queue == &sk->sk_receive_queue) 262 - sk->sk_data_ready(sk, 0); 263 return 1; 264 265 out_free: ··· 1135 1136 /* If there is data waiting, send it up the socket now. */ 1137 if (sctp_ulpq_clear_pd(ulpq) || ev) 1138 - sk->sk_data_ready(sk, 0); 1139 }
··· 259 sctp_ulpq_clear_pd(ulpq); 260 261 if (queue == &sk->sk_receive_queue) 262 + sk->sk_data_ready(sk); 263 return 1; 264 265 out_free: ··· 1135 1136 /* If there is data waiting, send it up the socket now. */ 1137 if (sctp_ulpq_clear_pd(ulpq) || ev) 1138 + sk->sk_data_ready(sk); 1139 }
+6 -6
net/sunrpc/svcsock.c
··· 60 61 static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *, 62 int flags); 63 - static void svc_udp_data_ready(struct sock *, int); 64 static int svc_udp_recvfrom(struct svc_rqst *); 65 static int svc_udp_sendto(struct svc_rqst *); 66 static void svc_sock_detach(struct svc_xprt *); ··· 403 /* 404 * INET callback when data has been received on the socket. 405 */ 406 - static void svc_udp_data_ready(struct sock *sk, int count) 407 { 408 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; 409 wait_queue_head_t *wq = sk_sleep(sk); 410 411 if (svsk) { 412 - dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n", 413 - svsk, sk, count, 414 test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags)); 415 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); 416 svc_xprt_enqueue(&svsk->sk_xprt); ··· 731 * A data_ready event on a listening socket means there's a connection 732 * pending. Do not use state_change as a substitute for it. 733 */ 734 - static void svc_tcp_listen_data_ready(struct sock *sk, int count_unused) 735 { 736 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; 737 wait_queue_head_t *wq; ··· 783 wake_up_interruptible_all(wq); 784 } 785 786 - static void svc_tcp_data_ready(struct sock *sk, int count) 787 { 788 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; 789 wait_queue_head_t *wq = sk_sleep(sk);
··· 60 61 static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *, 62 int flags); 63 + static void svc_udp_data_ready(struct sock *); 64 static int svc_udp_recvfrom(struct svc_rqst *); 65 static int svc_udp_sendto(struct svc_rqst *); 66 static void svc_sock_detach(struct svc_xprt *); ··· 403 /* 404 * INET callback when data has been received on the socket. 405 */ 406 + static void svc_udp_data_ready(struct sock *sk) 407 { 408 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; 409 wait_queue_head_t *wq = sk_sleep(sk); 410 411 if (svsk) { 412 + dprintk("svc: socket %p(inet %p), busy=%d\n", 413 + svsk, sk, 414 test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags)); 415 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); 416 svc_xprt_enqueue(&svsk->sk_xprt); ··· 731 * A data_ready event on a listening socket means there's a connection 732 * pending. Do not use state_change as a substitute for it. 733 */ 734 + static void svc_tcp_listen_data_ready(struct sock *sk) 735 { 736 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; 737 wait_queue_head_t *wq; ··· 783 wake_up_interruptible_all(wq); 784 } 785 786 + static void svc_tcp_data_ready(struct sock *sk) 787 { 788 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; 789 wait_queue_head_t *wq = sk_sleep(sk);
+4 -4
net/sunrpc/xprtsock.c
··· 254 /* 255 * Saved socket callback addresses 256 */ 257 - void (*old_data_ready)(struct sock *, int); 258 void (*old_state_change)(struct sock *); 259 void (*old_write_space)(struct sock *); 260 void (*old_error_report)(struct sock *); ··· 951 * 952 * Currently this assumes we can read the whole reply in a single gulp. 953 */ 954 - static void xs_local_data_ready(struct sock *sk, int len) 955 { 956 struct rpc_task *task; 957 struct rpc_xprt *xprt; ··· 1014 * @len: how much data to read 1015 * 1016 */ 1017 - static void xs_udp_data_ready(struct sock *sk, int len) 1018 { 1019 struct rpc_task *task; 1020 struct rpc_xprt *xprt; ··· 1437 * @bytes: how much data to read 1438 * 1439 */ 1440 - static void xs_tcp_data_ready(struct sock *sk, int bytes) 1441 { 1442 struct rpc_xprt *xprt; 1443 read_descriptor_t rd_desc;
··· 254 /* 255 * Saved socket callback addresses 256 */ 257 + void (*old_data_ready)(struct sock *); 258 void (*old_state_change)(struct sock *); 259 void (*old_write_space)(struct sock *); 260 void (*old_error_report)(struct sock *); ··· 951 * 952 * Currently this assumes we can read the whole reply in a single gulp. 953 */ 954 + static void xs_local_data_ready(struct sock *sk) 955 { 956 struct rpc_task *task; 957 struct rpc_xprt *xprt; ··· 1014 * @len: how much data to read 1015 * 1016 */ 1017 + static void xs_udp_data_ready(struct sock *sk) 1018 { 1019 struct rpc_task *task; 1020 struct rpc_xprt *xprt; ··· 1437 * @bytes: how much data to read 1438 * 1439 */ 1440 + static void xs_tcp_data_ready(struct sock *sk) 1441 { 1442 struct rpc_xprt *xprt; 1443 read_descriptor_t rd_desc;
+2 -2
net/tipc/server.c
··· 119 return con; 120 } 121 122 - static void sock_data_ready(struct sock *sk, int unused) 123 { 124 struct tipc_conn *con; 125 ··· 297 newcon->usr_data = s->tipc_conn_new(newcon->conid); 298 299 /* Wake up receive process in case of 'SYN+' message */ 300 - newsock->sk->sk_data_ready(newsock->sk, 0); 301 return ret; 302 } 303
··· 119 return con; 120 } 121 122 + static void sock_data_ready(struct sock *sk) 123 { 124 struct tipc_conn *con; 125 ··· 297 newcon->usr_data = s->tipc_conn_new(newcon->conid); 298 299 /* Wake up receive process in case of 'SYN+' message */ 300 + newsock->sk->sk_data_ready(newsock->sk); 301 return ret; 302 } 303
+3 -3
net/tipc/socket.c
··· 45 #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */ 46 47 static int backlog_rcv(struct sock *sk, struct sk_buff *skb); 48 - static void tipc_data_ready(struct sock *sk, int len); 49 static void tipc_write_space(struct sock *sk); 50 static int tipc_release(struct socket *sock); 51 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags); ··· 1248 * @sk: socket 1249 * @len: the length of messages 1250 */ 1251 - static void tipc_data_ready(struct sock *sk, int len) 1252 { 1253 struct socket_wq *wq; 1254 ··· 1410 __skb_queue_tail(&sk->sk_receive_queue, buf); 1411 skb_set_owner_r(buf, sk); 1412 1413 - sk->sk_data_ready(sk, 0); 1414 return TIPC_OK; 1415 } 1416
··· 45 #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */ 46 47 static int backlog_rcv(struct sock *sk, struct sk_buff *skb); 48 + static void tipc_data_ready(struct sock *sk); 49 static void tipc_write_space(struct sock *sk); 50 static int tipc_release(struct socket *sock); 51 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags); ··· 1248 * @sk: socket 1249 * @len: the length of messages 1250 */ 1251 + static void tipc_data_ready(struct sock *sk) 1252 { 1253 struct socket_wq *wq; 1254 ··· 1410 __skb_queue_tail(&sk->sk_receive_queue, buf); 1411 skb_set_owner_r(buf, sk); 1412 1413 + sk->sk_data_ready(sk); 1414 return TIPC_OK; 1415 } 1416
+3 -3
net/unix/af_unix.c
··· 1217 __skb_queue_tail(&other->sk_receive_queue, skb); 1218 spin_unlock(&other->sk_receive_queue.lock); 1219 unix_state_unlock(other); 1220 - other->sk_data_ready(other, 0); 1221 sock_put(other); 1222 return 0; 1223 ··· 1600 if (max_level > unix_sk(other)->recursion_level) 1601 unix_sk(other)->recursion_level = max_level; 1602 unix_state_unlock(other); 1603 - other->sk_data_ready(other, len); 1604 sock_put(other); 1605 scm_destroy(siocb->scm); 1606 return len; ··· 1706 if (max_level > unix_sk(other)->recursion_level) 1707 unix_sk(other)->recursion_level = max_level; 1708 unix_state_unlock(other); 1709 - other->sk_data_ready(other, size); 1710 sent += size; 1711 } 1712
··· 1217 __skb_queue_tail(&other->sk_receive_queue, skb); 1218 spin_unlock(&other->sk_receive_queue.lock); 1219 unix_state_unlock(other); 1220 + other->sk_data_ready(other); 1221 sock_put(other); 1222 return 0; 1223 ··· 1600 if (max_level > unix_sk(other)->recursion_level) 1601 unix_sk(other)->recursion_level = max_level; 1602 unix_state_unlock(other); 1603 + other->sk_data_ready(other); 1604 sock_put(other); 1605 scm_destroy(siocb->scm); 1606 return len; ··· 1706 if (max_level > unix_sk(other)->recursion_level) 1707 unix_sk(other)->recursion_level = max_level; 1708 unix_state_unlock(other); 1709 + other->sk_data_ready(other); 1710 sent += size; 1711 } 1712
+1 -1
net/vmw_vsock/vmci_transport_notify.c
··· 315 struct vsock_sock *vsk = vsock_sk(sk); 316 PKT_FIELD(vsk, sent_waiting_read) = false; 317 #endif 318 - sk->sk_data_ready(sk, 0); 319 } 320 321 static void vmci_transport_notify_pkt_socket_init(struct sock *sk)
··· 315 struct vsock_sock *vsk = vsock_sk(sk); 316 PKT_FIELD(vsk, sent_waiting_read) = false; 317 #endif 318 + sk->sk_data_ready(sk); 319 } 320 321 static void vmci_transport_notify_pkt_socket_init(struct sock *sk)
+2 -2
net/vmw_vsock/vmci_transport_notify_qstate.c
··· 92 bool bottom_half, 93 struct sockaddr_vm *dst, struct sockaddr_vm *src) 94 { 95 - sk->sk_data_ready(sk, 0); 96 } 97 98 static void vsock_block_update_write_window(struct sock *sk) ··· 290 /* See the comment in 291 * vmci_transport_notify_pkt_send_post_enqueue(). 292 */ 293 - sk->sk_data_ready(sk, 0); 294 } 295 296 return err;
··· 92 bool bottom_half, 93 struct sockaddr_vm *dst, struct sockaddr_vm *src) 94 { 95 + sk->sk_data_ready(sk); 96 } 97 98 static void vsock_block_update_write_window(struct sock *sk) ··· 290 /* See the comment in 291 * vmci_transport_notify_pkt_send_post_enqueue(). 292 */ 293 + sk->sk_data_ready(sk); 294 } 295 296 return err;
+1 -1
net/x25/af_x25.c
··· 1064 x25_start_heartbeat(make); 1065 1066 if (!sock_flag(sk, SOCK_DEAD)) 1067 - sk->sk_data_ready(sk, skb->len); 1068 rc = 1; 1069 sock_put(sk); 1070 out:
··· 1064 x25_start_heartbeat(make); 1065 1066 if (!sock_flag(sk, SOCK_DEAD)) 1067 + sk->sk_data_ready(sk); 1068 rc = 1; 1069 sock_put(sk); 1070 out:
+1 -1
net/x25/x25_in.c
··· 79 skb_set_owner_r(skbn, sk); 80 skb_queue_tail(&sk->sk_receive_queue, skbn); 81 if (!sock_flag(sk, SOCK_DEAD)) 82 - sk->sk_data_ready(sk, skbn->len); 83 84 return 0; 85 }
··· 79 skb_set_owner_r(skbn, sk); 80 skb_queue_tail(&sk->sk_receive_queue, skbn); 81 if (!sock_flag(sk, SOCK_DEAD)) 82 + sk->sk_data_ready(sk); 83 84 return 0; 85 }