Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (37 commits)
net: deinit automatic LIST_HEAD
net: dont leave active on stack LIST_HEAD
net: provide default_advmss() methods to blackhole dst_ops
tg3: Restrict phy ioctl access
drivers/net: Call netif_carrier_off at the end of the probe
ixgbe: work around for DDP last buffer size
ixgbe: fix panic due to uninitialised pointer
e1000e: flush all writebacks before unload
e1000e: check down flag in tasks
isdn: hisax: Use l2headersize() instead of dup (and buggy) func.
arp_notify: unconditionally send gratuitous ARP for NETDEV_NOTIFY_PEERS.
cxgb4vf: Use defined Mailbox Timeout
cxgb4vf: Quiesce Virtual Interfaces on shutdown ...
cxgb4vf: Behave properly when CONFIG_DEBUG_FS isn't defined ...
cxgb4vf: Check driver parameters in the right place ...
pch_gbe: Fix the MAC Address load issue.
iwlwifi: Delete iwl3945_good_plcp_health.
net/can/softing: make CAN_SOFTING_CS depend on CAN_SOFTING
netfilter: nf_iterate: fix incorrect RCU usage
pch_gbe: Fix the issue that the receiving data is not normal.
...

+328 -202
+2
Documentation/networking/Makefile
··· 4 # List of programs to build 5 hostprogs-y := ifenslave 6 7 # Tell kbuild to always build the programs 8 always := $(hostprogs-y) 9
··· 4 # List of programs to build 5 hostprogs-y := ifenslave 6 7 + HOSTCFLAGS_ifenslave.o += -I$(objtree)/usr/include 8 + 9 # Tell kbuild to always build the programs 10 always := $(hostprogs-y) 11
+3 -2
drivers/atm/solos-pci.c
··· 866 } 867 868 skb = alloc_skb(sizeof(*header), GFP_ATOMIC); 869 - if (!skb && net_ratelimit()) { 870 - dev_warn(&card->dev->dev, "Failed to allocate sk_buff in popen()\n"); 871 return -ENOMEM; 872 } 873 header = (void *)skb_put(skb, sizeof(*header));
··· 866 } 867 868 skb = alloc_skb(sizeof(*header), GFP_ATOMIC); 869 + if (!skb) { 870 + if (net_ratelimit()) 871 + dev_warn(&card->dev->dev, "Failed to allocate sk_buff in popen()\n"); 872 return -ENOMEM; 873 } 874 header = (void *)skb_put(skb, sizeof(*header));
+2
drivers/bluetooth/ath3k.c
··· 39 /* Atheros AR3011 with sflash firmware*/ 40 { USB_DEVICE(0x0CF3, 0x3002) }, 41 42 { } /* Terminating entry */ 43 }; 44
··· 39 /* Atheros AR3011 with sflash firmware*/ 40 { USB_DEVICE(0x0CF3, 0x3002) }, 41 42 + /* Atheros AR9285 Malbec with sflash firmware */ 43 + { USB_DEVICE(0x03F0, 0x311D) }, 44 { } /* Terminating entry */ 45 }; 46
+3
drivers/bluetooth/btusb.c
··· 102 /* Atheros 3011 with sflash firmware */ 103 { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE }, 104 105 /* Broadcom BCM2035 */ 106 { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU }, 107 { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU },
··· 102 /* Atheros 3011 with sflash firmware */ 103 { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE }, 104 105 + /* Atheros AR9285 Malbec with sflash firmware */ 106 + { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE }, 107 + 108 /* Broadcom BCM2035 */ 109 { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU }, 110 { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU },
+13 -15
drivers/isdn/hisax/isdnl2.c
··· 1247 l2_pull_iqueue(struct FsmInst *fi, int event, void *arg) 1248 { 1249 struct PStack *st = fi->userdata; 1250 - struct sk_buff *skb, *oskb; 1251 struct Layer2 *l2 = &st->l2; 1252 u_char header[MAX_HEADER_LEN]; 1253 - int i; 1254 int unsigned p1; 1255 u_long flags; 1256 ··· 1261 if (!skb) 1262 return; 1263 1264 spin_lock_irqsave(&l2->lock, flags); 1265 if(test_bit(FLG_MOD128, &l2->flag)) 1266 p1 = (l2->vs - l2->va) % 128; ··· 1295 l2->vs = (l2->vs + 1) % 8; 1296 } 1297 spin_unlock_irqrestore(&l2->lock, flags); 1298 - p1 = skb->data - skb->head; 1299 - if (p1 >= i) 1300 - memcpy(skb_push(skb, i), header, i); 1301 - else { 1302 - printk(KERN_WARNING 1303 - "isdl2 pull_iqueue skb header(%d/%d) too short\n", i, p1); 1304 - oskb = skb; 1305 - skb = alloc_skb(oskb->len + i, GFP_ATOMIC); 1306 - memcpy(skb_put(skb, i), header, i); 1307 - skb_copy_from_linear_data(oskb, 1308 - skb_put(skb, oskb->len), oskb->len); 1309 - dev_kfree_skb(oskb); 1310 - } 1311 st->l2.l2l1(st, PH_PULL | INDICATION, skb); 1312 test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag); 1313 if (!test_and_set_bit(FLG_T200_RUN, &st->l2.flag)) {
··· 1247 l2_pull_iqueue(struct FsmInst *fi, int event, void *arg) 1248 { 1249 struct PStack *st = fi->userdata; 1250 + struct sk_buff *skb; 1251 struct Layer2 *l2 = &st->l2; 1252 u_char header[MAX_HEADER_LEN]; 1253 + int i, hdr_space_needed; 1254 int unsigned p1; 1255 u_long flags; 1256 ··· 1261 if (!skb) 1262 return; 1263 1264 + hdr_space_needed = l2headersize(l2, 0); 1265 + if (hdr_space_needed > skb_headroom(skb)) { 1266 + struct sk_buff *orig_skb = skb; 1267 + 1268 + skb = skb_realloc_headroom(skb, hdr_space_needed); 1269 + if (!skb) { 1270 + dev_kfree_skb(orig_skb); 1271 + return; 1272 + } 1273 + } 1274 spin_lock_irqsave(&l2->lock, flags); 1275 if(test_bit(FLG_MOD128, &l2->flag)) 1276 p1 = (l2->vs - l2->va) % 128; ··· 1285 l2->vs = (l2->vs + 1) % 8; 1286 } 1287 spin_unlock_irqrestore(&l2->lock, flags); 1288 + memcpy(skb_push(skb, i), header, i); 1289 st->l2.l2l1(st, PH_PULL | INDICATION, skb); 1290 test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag); 1291 if (!test_and_set_bit(FLG_T200_RUN, &st->l2.flag)) {
+1 -1
drivers/net/can/softing/Kconfig
··· 18 config CAN_SOFTING_CS 19 tristate "Softing Gmbh CAN pcmcia cards" 20 depends on PCMCIA 21 - select CAN_SOFTING 22 ---help--- 23 Support for PCMCIA cards from Softing Gmbh & some cards 24 from Vector Gmbh.
··· 18 config CAN_SOFTING_CS 19 tristate "Softing Gmbh CAN pcmcia cards" 20 depends on PCMCIA 21 + depends on CAN_SOFTING 22 ---help--- 23 Support for PCMCIA cards from Softing Gmbh & some cards 24 from Vector Gmbh.
+60 -20
drivers/net/cxgb4vf/cxgb4vf_main.c
··· 2040 { 2041 int i; 2042 2043 - BUG_ON(adapter->debugfs_root == NULL); 2044 2045 /* 2046 * Debugfs support is best effort. ··· 2061 */ 2062 static void cleanup_debugfs(struct adapter *adapter) 2063 { 2064 - BUG_ON(adapter->debugfs_root == NULL); 2065 2066 /* 2067 * Unlike our sister routine cleanup_proc(), we don't need to remove ··· 2489 struct net_device *netdev; 2490 2491 /* 2492 - * Vet our module parameters. 2493 - */ 2494 - if (msi != MSI_MSIX && msi != MSI_MSI) { 2495 - dev_err(&pdev->dev, "bad module parameter msi=%d; must be %d" 2496 - " (MSI-X or MSI) or %d (MSI)\n", msi, MSI_MSIX, 2497 - MSI_MSI); 2498 - err = -EINVAL; 2499 - goto err_out; 2500 - } 2501 - 2502 - /* 2503 * Print our driver banner the first time we're called to initialize a 2504 * device. 2505 */ ··· 2700 /* 2701 * Set up our debugfs entries. 2702 */ 2703 - if (cxgb4vf_debugfs_root) { 2704 adapter->debugfs_root = 2705 debugfs_create_dir(pci_name(pdev), 2706 cxgb4vf_debugfs_root); 2707 - if (adapter->debugfs_root == NULL) 2708 dev_warn(&pdev->dev, "could not create debugfs" 2709 " directory"); 2710 else ··· 2759 */ 2760 2761 err_free_debugfs: 2762 - if (adapter->debugfs_root) { 2763 cleanup_debugfs(adapter); 2764 debugfs_remove_recursive(adapter->debugfs_root); 2765 } ··· 2791 err_disable_device: 2792 pci_disable_device(pdev); 2793 2794 - err_out: 2795 return err; 2796 } 2797 ··· 2828 /* 2829 * Tear down our debugfs entries. 2830 */ 2831 - if (adapter->debugfs_root) { 2832 cleanup_debugfs(adapter); 2833 debugfs_remove_recursive(adapter->debugfs_root); 2834 } ··· 2859 pci_disable_device(pdev); 2860 pci_clear_master(pdev); 2861 pci_release_regions(pdev); 2862 } 2863 2864 /* ··· 2934 .id_table = cxgb4vf_pci_tbl, 2935 .probe = cxgb4vf_pci_probe, 2936 .remove = __devexit_p(cxgb4vf_pci_remove), 2937 }; 2938 2939 /* ··· 2944 { 2945 int ret; 2946 2947 /* Debugfs support is optional, just warn if this fails */ 2948 cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); 2949 - if (!cxgb4vf_debugfs_root) 2950 printk(KERN_WARNING KBUILD_MODNAME ": could not create" 2951 " debugfs entry, continuing\n"); 2952 2953 ret = pci_register_driver(&cxgb4vf_driver); 2954 - if (ret < 0) 2955 debugfs_remove(cxgb4vf_debugfs_root); 2956 return ret; 2957 }
··· 2040 { 2041 int i; 2042 2043 + BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root)); 2044 2045 /* 2046 * Debugfs support is best effort. ··· 2061 */ 2062 static void cleanup_debugfs(struct adapter *adapter) 2063 { 2064 + BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root)); 2065 2066 /* 2067 * Unlike our sister routine cleanup_proc(), we don't need to remove ··· 2489 struct net_device *netdev; 2490 2491 /* 2492 * Print our driver banner the first time we're called to initialize a 2493 * device. 2494 */ ··· 2711 /* 2712 * Set up our debugfs entries. 2713 */ 2714 + if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) { 2715 adapter->debugfs_root = 2716 debugfs_create_dir(pci_name(pdev), 2717 cxgb4vf_debugfs_root); 2718 + if (IS_ERR_OR_NULL(adapter->debugfs_root)) 2719 dev_warn(&pdev->dev, "could not create debugfs" 2720 " directory"); 2721 else ··· 2770 */ 2771 2772 err_free_debugfs: 2773 + if (!IS_ERR_OR_NULL(adapter->debugfs_root)) { 2774 cleanup_debugfs(adapter); 2775 debugfs_remove_recursive(adapter->debugfs_root); 2776 } ··· 2802 err_disable_device: 2803 pci_disable_device(pdev); 2804 2805 return err; 2806 } 2807 ··· 2840 /* 2841 * Tear down our debugfs entries. 2842 */ 2843 + if (!IS_ERR_OR_NULL(adapter->debugfs_root)) { 2844 cleanup_debugfs(adapter); 2845 debugfs_remove_recursive(adapter->debugfs_root); 2846 } ··· 2871 pci_disable_device(pdev); 2872 pci_clear_master(pdev); 2873 pci_release_regions(pdev); 2874 + } 2875 + 2876 + /* 2877 + * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt 2878 + * delivery. 2879 + */ 2880 + static void __devexit cxgb4vf_pci_shutdown(struct pci_dev *pdev) 2881 + { 2882 + struct adapter *adapter; 2883 + int pidx; 2884 + 2885 + adapter = pci_get_drvdata(pdev); 2886 + if (!adapter) 2887 + return; 2888 + 2889 + /* 2890 + * Disable all Virtual Interfaces. This will shut down the 2891 + * delivery of all ingress packets into the chip for these 2892 + * Virtual Interfaces. 2893 + */ 2894 + for_each_port(adapter, pidx) { 2895 + struct net_device *netdev; 2896 + struct port_info *pi; 2897 + 2898 + if (!test_bit(pidx, &adapter->registered_device_map)) 2899 + continue; 2900 + 2901 + netdev = adapter->port[pidx]; 2902 + if (!netdev) 2903 + continue; 2904 + 2905 + pi = netdev_priv(netdev); 2906 + t4vf_enable_vi(adapter, pi->viid, false, false); 2907 + } 2908 + 2909 + /* 2910 + * Free up all Queues which will prevent further DMA and 2911 + * Interrupts allowing various internal pathways to drain. 2912 + */ 2913 + t4vf_free_sge_resources(adapter); 2914 } 2915 2916 /* ··· 2906 .id_table = cxgb4vf_pci_tbl, 2907 .probe = cxgb4vf_pci_probe, 2908 .remove = __devexit_p(cxgb4vf_pci_remove), 2909 + .shutdown = __devexit_p(cxgb4vf_pci_shutdown), 2910 }; 2911 2912 /* ··· 2915 { 2916 int ret; 2917 2918 + /* 2919 + * Vet our module parameters. 2920 + */ 2921 + if (msi != MSI_MSIX && msi != MSI_MSI) { 2922 + printk(KERN_WARNING KBUILD_MODNAME 2923 + ": bad module parameter msi=%d; must be %d" 2924 + " (MSI-X or MSI) or %d (MSI)\n", 2925 + msi, MSI_MSIX, MSI_MSI); 2926 + return -EINVAL; 2927 + } 2928 + 2929 /* Debugfs support is optional, just warn if this fails */ 2930 cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); 2931 + if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) 2932 printk(KERN_WARNING KBUILD_MODNAME ": could not create" 2933 " debugfs entry, continuing\n"); 2934 2935 ret = pci_register_driver(&cxgb4vf_driver); 2936 + if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) 2937 debugfs_remove(cxgb4vf_debugfs_root); 2938 return ret; 2939 }
+1 -1
drivers/net/cxgb4vf/t4vf_hw.c
··· 171 delay_idx = 0; 172 ms = delay[0]; 173 174 - for (i = 0; i < 500; i += ms) { 175 if (sleep_ok) { 176 ms = delay[delay_idx]; 177 if (delay_idx < ARRAY_SIZE(delay) - 1)
··· 171 delay_idx = 0; 172 ms = delay[0]; 173 174 + for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) { 175 if (sleep_ok) { 176 ms = delay[delay_idx]; 177 if (delay_idx < ARRAY_SIZE(delay) - 1)
+42 -10
drivers/net/e1000e/netdev.c
··· 937 u16 phy_status, phy_1000t_status, phy_ext_status; 938 u16 pci_status; 939 940 e1e_rphy(hw, PHY_STATUS, &phy_status); 941 e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status); 942 e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status); ··· 1508 { 1509 struct e1000_adapter *adapter = container_of(work, 1510 struct e1000_adapter, downshift_task); 1511 1512 e1000e_gig_downshift_workaround_ich8lan(&adapter->hw); 1513 } ··· 3344 return 0; 3345 } 3346 3347 void e1000e_down(struct e1000_adapter *adapter) 3348 { 3349 struct net_device *netdev = adapter->netdev; ··· 3398 3399 if (!pci_channel_offline(adapter->pdev)) 3400 e1000e_reset(adapter); 3401 e1000_clean_tx_ring(adapter); 3402 e1000_clean_rx_ring(adapter); 3403 ··· 3789 { 3790 struct e1000_adapter *adapter = container_of(work, 3791 struct e1000_adapter, update_phy_task); 3792 e1000_get_phy_info(&adapter->hw); 3793 } 3794 ··· 3803 static void e1000_update_phy_info(unsigned long data) 3804 { 3805 struct e1000_adapter *adapter = (struct e1000_adapter *) data; 3806 schedule_work(&adapter->update_phy_task); 3807 } 3808 ··· 4181 u32 link, tctl; 4182 int tx_pending = 0; 4183 4184 link = e1000e_has_link(adapter); 4185 if ((netif_carrier_ok(netdev)) && link) { 4186 /* Cancel scheduled suspend requests. */ ··· 4372 else 4373 ew32(ICS, E1000_ICS_RXDMT0); 4374 4375 /* Force detection of hung controller every watchdog period */ 4376 adapter->detect_tx_hung = 1; 4377 - 4378 - /* flush partial descriptors to memory before detecting Tx hang */ 4379 - if (adapter->flags2 & FLAG2_DMA_BURST) { 4380 - ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); 4381 - ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); 4382 - /* 4383 - * no need to flush the writes because the timeout code does 4384 - * an er32 first thing 4385 - */ 4386 - } 4387 4388 /* 4389 * With 82571 controllers, LAA may be overwritten due to controller ··· 4914 { 4915 struct e1000_adapter *adapter; 4916 adapter = container_of(work, struct e1000_adapter, reset_task); 4917 4918 if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) && 4919 (adapter->flags & FLAG_RX_RESTART_NOW))) {
··· 937 u16 phy_status, phy_1000t_status, phy_ext_status; 938 u16 pci_status; 939 940 + if (test_bit(__E1000_DOWN, &adapter->state)) 941 + return; 942 + 943 e1e_rphy(hw, PHY_STATUS, &phy_status); 944 e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status); 945 e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status); ··· 1505 { 1506 struct e1000_adapter *adapter = container_of(work, 1507 struct e1000_adapter, downshift_task); 1508 + 1509 + if (test_bit(__E1000_DOWN, &adapter->state)) 1510 + return; 1511 1512 e1000e_gig_downshift_workaround_ich8lan(&adapter->hw); 1513 } ··· 3338 return 0; 3339 } 3340 3341 + static void e1000e_flush_descriptors(struct e1000_adapter *adapter) 3342 + { 3343 + struct e1000_hw *hw = &adapter->hw; 3344 + 3345 + if (!(adapter->flags2 & FLAG2_DMA_BURST)) 3346 + return; 3347 + 3348 + /* flush pending descriptor writebacks to memory */ 3349 + ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); 3350 + ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); 3351 + 3352 + /* execute the writes immediately */ 3353 + e1e_flush(); 3354 + } 3355 + 3356 void e1000e_down(struct e1000_adapter *adapter) 3357 { 3358 struct net_device *netdev = adapter->netdev; ··· 3377 3378 if (!pci_channel_offline(adapter->pdev)) 3379 e1000e_reset(adapter); 3380 + 3381 + e1000e_flush_descriptors(adapter); 3382 + 3383 e1000_clean_tx_ring(adapter); 3384 e1000_clean_rx_ring(adapter); 3385 ··· 3765 { 3766 struct e1000_adapter *adapter = container_of(work, 3767 struct e1000_adapter, update_phy_task); 3768 + 3769 + if (test_bit(__E1000_DOWN, &adapter->state)) 3770 + return; 3771 + 3772 e1000_get_phy_info(&adapter->hw); 3773 } 3774 ··· 3775 static void e1000_update_phy_info(unsigned long data) 3776 { 3777 struct e1000_adapter *adapter = (struct e1000_adapter *) data; 3778 + 3779 + if (test_bit(__E1000_DOWN, &adapter->state)) 3780 + return; 3781 + 3782 schedule_work(&adapter->update_phy_task); 3783 } 3784 ··· 4149 u32 link, tctl; 4150 int tx_pending = 0; 4151 4152 + if (test_bit(__E1000_DOWN, &adapter->state)) 4153 + return; 4154 + 4155 link = e1000e_has_link(adapter); 4156 if ((netif_carrier_ok(netdev)) && link) { 4157 /* Cancel scheduled suspend requests. */ ··· 4337 else 4338 ew32(ICS, E1000_ICS_RXDMT0); 4339 4340 + /* flush pending descriptors to memory before detecting Tx hang */ 4341 + e1000e_flush_descriptors(adapter); 4342 + 4343 /* Force detection of hung controller every watchdog period */ 4344 adapter->detect_tx_hung = 1; 4345 4346 /* 4347 * With 82571 controllers, LAA may be overwritten due to controller ··· 4886 { 4887 struct e1000_adapter *adapter; 4888 adapter = container_of(work, struct e1000_adapter, reset_task); 4889 + 4890 + /* don't run the task if already down */ 4891 + if (test_bit(__E1000_DOWN, &adapter->state)) 4892 + return; 4893 4894 if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) && 4895 (adapter->flags & FLAG_RX_RESTART_NOW))) {
+2
drivers/net/forcedeth.c
··· 5645 goto out_error; 5646 } 5647 5648 dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n", 5649 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr); 5650
··· 5645 goto out_error; 5646 } 5647 5648 + netif_carrier_off(dev); 5649 + 5650 dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n", 5651 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr); 5652
+50 -1
drivers/net/ixgbe/ixgbe_fcoe.c
··· 159 struct scatterlist *sg; 160 unsigned int i, j, dmacount; 161 unsigned int len; 162 - static const unsigned int bufflen = 4096; 163 unsigned int firstoff = 0; 164 unsigned int lastsize; 165 unsigned int thisoff = 0; ··· 253 } 254 /* only the last buffer may have non-full bufflen */ 255 lastsize = thisoff + thislen; 256 257 fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); 258 fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); ··· 550 e_err(drv, "failed to allocated FCoE DDP pool\n"); 551 552 spin_lock_init(&fcoe->lock); 553 } 554 555 /* Enable L2 eth type filter for FCoE */ ··· 617 } 618 } 619 #endif 620 } 621 622 /** ··· 644 if (fcoe->pool) { 645 for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) 646 ixgbe_fcoe_ddp_put(adapter->netdev, i); 647 pci_pool_destroy(fcoe->pool); 648 fcoe->pool = NULL; 649 }
··· 159 struct scatterlist *sg; 160 unsigned int i, j, dmacount; 161 unsigned int len; 162 + static const unsigned int bufflen = IXGBE_FCBUFF_MIN; 163 unsigned int firstoff = 0; 164 unsigned int lastsize; 165 unsigned int thisoff = 0; ··· 253 } 254 /* only the last buffer may have non-full bufflen */ 255 lastsize = thisoff + thislen; 256 + 257 + /* 258 + * lastsize can not be buffer len. 259 + * If it is then adding another buffer with lastsize = 1. 260 + */ 261 + if (lastsize == bufflen) { 262 + if (j >= IXGBE_BUFFCNT_MAX) { 263 + e_err(drv, "xid=%x:%d,%d,%d:addr=%llx " 264 + "not enough user buffers. We need an extra " 265 + "buffer because lastsize is bufflen.\n", 266 + xid, i, j, dmacount, (u64)addr); 267 + goto out_noddp_free; 268 + } 269 + 270 + ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma); 271 + j++; 272 + lastsize = 1; 273 + } 274 275 fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); 276 fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); ··· 532 e_err(drv, "failed to allocated FCoE DDP pool\n"); 533 534 spin_lock_init(&fcoe->lock); 535 + 536 + /* Extra buffer to be shared by all DDPs for HW work around */ 537 + fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC); 538 + if (fcoe->extra_ddp_buffer == NULL) { 539 + e_err(drv, "failed to allocated extra DDP buffer\n"); 540 + goto out_extra_ddp_buffer_alloc; 541 + } 542 + 543 + fcoe->extra_ddp_buffer_dma = 544 + dma_map_single(&adapter->pdev->dev, 545 + fcoe->extra_ddp_buffer, 546 + IXGBE_FCBUFF_MIN, 547 + DMA_FROM_DEVICE); 548 + if (dma_mapping_error(&adapter->pdev->dev, 549 + fcoe->extra_ddp_buffer_dma)) { 550 + e_err(drv, "failed to map extra DDP buffer\n"); 551 + goto out_extra_ddp_buffer_dma; 552 + } 553 } 554 555 /* Enable L2 eth type filter for FCoE */ ··· 581 } 582 } 583 #endif 584 + 585 + return; 586 + 587 + out_extra_ddp_buffer_dma: 588 + kfree(fcoe->extra_ddp_buffer); 589 + out_extra_ddp_buffer_alloc: 590 + pci_pool_destroy(fcoe->pool); 591 + fcoe->pool = NULL; 592 } 593 594 /** ··· 600 if (fcoe->pool) { 601 for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) 602 ixgbe_fcoe_ddp_put(adapter->netdev, i); 603 + dma_unmap_single(&adapter->pdev->dev, 604 + fcoe->extra_ddp_buffer_dma, 605 + IXGBE_FCBUFF_MIN, 606 + DMA_FROM_DEVICE); 607 + kfree(fcoe->extra_ddp_buffer); 608 pci_pool_destroy(fcoe->pool); 609 fcoe->pool = NULL; 610 }
+2
drivers/net/ixgbe/ixgbe_fcoe.h
··· 70 spinlock_t lock; 71 struct pci_pool *pool; 72 struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX]; 73 }; 74 75 #endif /* _IXGBE_FCOE_H */
··· 70 spinlock_t lock; 71 struct pci_pool *pool; 72 struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX]; 73 + unsigned char *extra_ddp_buffer; 74 + dma_addr_t extra_ddp_buffer_dma; 75 }; 76 77 #endif /* _IXGBE_FCOE_H */
+4 -2
drivers/net/ixgbe/ixgbe_main.c
··· 3728 * We need to try and force an autonegotiation 3729 * session, then bring up link. 3730 */ 3731 - hw->mac.ops.setup_sfp(hw); 3732 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) 3733 schedule_work(&adapter->multispeed_fiber_task); 3734 } else { ··· 5969 unregister_netdev(adapter->netdev); 5970 return; 5971 } 5972 - hw->mac.ops.setup_sfp(hw); 5973 5974 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) 5975 /* This will also work for DA Twinax connections */
··· 3728 * We need to try and force an autonegotiation 3729 * session, then bring up link. 3730 */ 3731 + if (hw->mac.ops.setup_sfp) 3732 + hw->mac.ops.setup_sfp(hw); 3733 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) 3734 schedule_work(&adapter->multispeed_fiber_task); 3735 } else { ··· 5968 unregister_netdev(adapter->netdev); 5969 return; 5970 } 5971 + if (hw->mac.ops.setup_sfp) 5972 + hw->mac.ops.setup_sfp(hw); 5973 5974 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) 5975 /* This will also work for DA Twinax connections */
+1 -1
drivers/net/pch_gbe/pch_gbe.h
··· 73 struct pch_gbe_regs_mac_adr mac_adr[16]; 74 u32 ADDR_MASK; 75 u32 MIIM; 76 - u32 reserve2; 77 u32 RGMII_ST; 78 u32 RGMII_CTRL; 79 u32 reserve3[3];
··· 73 struct pch_gbe_regs_mac_adr mac_adr[16]; 74 u32 ADDR_MASK; 75 u32 MIIM; 76 + u32 MAC_ADDR_LOAD; 77 u32 RGMII_ST; 78 u32 RGMII_CTRL; 79 u32 reserve3[3];
+63 -43
drivers/net/pch_gbe/pch_gbe_main.c
··· 29 #define PCH_GBE_SHORT_PKT 64 30 #define DSC_INIT16 0xC000 31 #define PCH_GBE_DMA_ALIGN 0 32 #define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */ 33 #define PCH_GBE_COPYBREAK_DEFAULT 256 34 #define PCH_GBE_PCI_BAR 1 ··· 89 static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg); 90 static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg, 91 int data); 92 /** 93 * pch_gbe_mac_read_mac_addr - Read MAC address 94 * @hw: Pointer to the HW structure ··· 1372 struct pch_gbe_buffer *buffer_info; 1373 struct pch_gbe_rx_desc *rx_desc; 1374 u32 length; 1375 - unsigned char tmp_packet[ETH_HLEN]; 1376 unsigned int i; 1377 unsigned int cleaned_count = 0; 1378 bool cleaned = false; 1379 - struct sk_buff *skb; 1380 u8 dma_status; 1381 u16 gbec_status; 1382 u32 tcp_ip_status; 1383 - u8 skb_copy_flag = 0; 1384 - u8 skb_padding_flag = 0; 1385 1386 i = rx_ring->next_to_clean; 1387 ··· 1422 pr_err("Receive CRC Error\n"); 1423 } else { 1424 /* get receive length */ 1425 - /* length convert[-3], padding[-2] */ 1426 - length = (rx_desc->rx_words_eob) - 3 - 2; 1427 1428 /* Decide the data conversion method */ 1429 if (!adapter->rx_csum) { 1430 /* [Header:14][payload] */ 1431 - skb_padding_flag = 0; 1432 - skb_copy_flag = 1; 1433 - } else { 1434 - /* [Header:14][padding:2][payload] */ 1435 - skb_padding_flag = 1; 1436 - if (length < copybreak) 1437 - skb_copy_flag = 1; 1438 - else 1439 - skb_copy_flag = 0; 1440 - } 1441 - 1442 - /* Data conversion */ 1443 - if (skb_copy_flag) { /* recycle skb */ 1444 - struct sk_buff *new_skb; 1445 - new_skb = 1446 - netdev_alloc_skb(netdev, 1447 - length + NET_IP_ALIGN); 1448 - if (new_skb) { 1449 - if (!skb_padding_flag) { 1450 - skb_reserve(new_skb, 1451 - NET_IP_ALIGN); 1452 } 1453 memcpy(new_skb->data, skb->data, 1454 - length); 1455 - /* save the skb 1456 - * in buffer_info as good */ 1457 skb = new_skb; 1458 - } else if (!skb_padding_flag) { 1459 - /* dorrop error */ 1460 - pr_err("New skb allocation Error\n"); 1461 - goto dorrop; 1462 } 1463 } else { 1464 - buffer_info->skb = NULL; 1465 } 1466 - if (skb_padding_flag) { 1467 - memcpy(&tmp_packet[0], &skb->data[0], ETH_HLEN); 1468 - memcpy(&skb->data[NET_IP_ALIGN], &tmp_packet[0], 1469 - ETH_HLEN); 1470 - skb_reserve(skb, NET_IP_ALIGN); 1471 - 1472 - } 1473 - 1474 /* update status of driver */ 1475 adapter->stats.rx_bytes += length; 1476 adapter->stats.rx_packets++; ··· 2337 netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO; 2338 pch_gbe_set_ethtool_ops(netdev); 2339 2340 pch_gbe_mac_reset_hw(&adapter->hw); 2341 2342 /* setup the private structure */
··· 29 #define PCH_GBE_SHORT_PKT 64 30 #define DSC_INIT16 0xC000 31 #define PCH_GBE_DMA_ALIGN 0 32 + #define PCH_GBE_DMA_PADDING 2 33 #define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */ 34 #define PCH_GBE_COPYBREAK_DEFAULT 256 35 #define PCH_GBE_PCI_BAR 1 ··· 88 static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg); 89 static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg, 90 int data); 91 + 92 + inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw) 93 + { 94 + iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD); 95 + } 96 + 97 /** 98 * pch_gbe_mac_read_mac_addr - Read MAC address 99 * @hw: Pointer to the HW structure ··· 1365 struct pch_gbe_buffer *buffer_info; 1366 struct pch_gbe_rx_desc *rx_desc; 1367 u32 length; 1368 unsigned int i; 1369 unsigned int cleaned_count = 0; 1370 bool cleaned = false; 1371 + struct sk_buff *skb, *new_skb; 1372 u8 dma_status; 1373 u16 gbec_status; 1374 u32 tcp_ip_status; 1375 1376 i = rx_ring->next_to_clean; 1377 ··· 1418 pr_err("Receive CRC Error\n"); 1419 } else { 1420 /* get receive length */ 1421 + /* length convert[-3] */ 1422 + length = (rx_desc->rx_words_eob) - 3; 1423 1424 /* Decide the data conversion method */ 1425 if (!adapter->rx_csum) { 1426 /* [Header:14][payload] */ 1427 + if (NET_IP_ALIGN) { 1428 + /* Because alignment differs, 1429 + * the new_skb is newly allocated, 1430 + * and data is copied to new_skb.*/ 1431 + new_skb = netdev_alloc_skb(netdev, 1432 + length + NET_IP_ALIGN); 1433 + if (!new_skb) { 1434 + /* dorrop error */ 1435 + pr_err("New skb allocation " 1436 + "Error\n"); 1437 + goto dorrop; 1438 } 1439 + skb_reserve(new_skb, NET_IP_ALIGN); 1440 memcpy(new_skb->data, skb->data, 1441 + length); 1442 skb = new_skb; 1443 + } else { 1444 + /* DMA buffer is used as SKB as it is.*/ 1445 + buffer_info->skb = NULL; 1446 } 1447 } else { 1448 + /* [Header:14][padding:2][payload] */ 1449 + /* The length includes padding length */ 1450 + length = length - PCH_GBE_DMA_PADDING; 1451 + if ((length < copybreak) || 1452 + (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) { 1453 + /* Because alignment differs, 1454 + * the new_skb is newly allocated, 1455 + * and data is copied to new_skb. 1456 + * Padding data is deleted 1457 + * at the time of a copy.*/ 1458 + new_skb = netdev_alloc_skb(netdev, 1459 + length + NET_IP_ALIGN); 1460 + if (!new_skb) { 1461 + /* dorrop error */ 1462 + pr_err("New skb allocation " 1463 + "Error\n"); 1464 + goto dorrop; 1465 + } 1466 + skb_reserve(new_skb, NET_IP_ALIGN); 1467 + memcpy(new_skb->data, skb->data, 1468 + ETH_HLEN); 1469 + memcpy(&new_skb->data[ETH_HLEN], 1470 + &skb->data[ETH_HLEN + 1471 + PCH_GBE_DMA_PADDING], 1472 + length - ETH_HLEN); 1473 + skb = new_skb; 1474 + } else { 1475 + /* Padding data is deleted 1476 + * by moving header data.*/ 1477 + memmove(&skb->data[PCH_GBE_DMA_PADDING], 1478 + &skb->data[0], ETH_HLEN); 1479 + skb_reserve(skb, NET_IP_ALIGN); 1480 + buffer_info->skb = NULL; 1481 + } 1482 } 1483 + /* The length includes FCS length */ 1484 + length = length - ETH_FCS_LEN; 1485 /* update status of driver */ 1486 adapter->stats.rx_bytes += length; 1487 adapter->stats.rx_packets++; ··· 2318 netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO; 2319 pch_gbe_set_ethtool_ops(netdev); 2320 2321 + pch_gbe_mac_load_mac_addr(&adapter->hw); 2322 pch_gbe_mac_reset_hw(&adapter->hw); 2323 2324 /* setup the private structure */
+2
drivers/net/r8169.c
··· 3190 if (pci_dev_run_wake(pdev)) 3191 pm_runtime_put_noidle(&pdev->dev); 3192 3193 out: 3194 return rc; 3195
··· 3190 if (pci_dev_run_wake(pdev)) 3191 pm_runtime_put_noidle(&pdev->dev); 3192 3193 + netif_carrier_off(dev); 3194 + 3195 out: 3196 return rc; 3197
+3 -1
drivers/net/stmmac/stmmac_main.c
··· 1560 1561 priv->hw = device; 1562 1563 - if (device_can_wakeup(priv->device)) 1564 priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ 1565 1566 return 0; 1567 }
··· 1560 1561 priv->hw = device; 1562 1563 + if (device_can_wakeup(priv->device)) { 1564 priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ 1565 + enable_irq_wake(dev->irq); 1566 + } 1567 1568 return 0; 1569 }
+6 -2
drivers/net/tg3.c
··· 11158 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 11159 break; /* We have no PHY */ 11160 11161 - if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 11162 return -EAGAIN; 11163 11164 spin_lock_bh(&tp->lock); ··· 11176 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 11177 break; /* We have no PHY */ 11178 11179 - if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 11180 return -EAGAIN; 11181 11182 spin_lock_bh(&tp->lock);
··· 11158 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 11159 break; /* We have no PHY */ 11160 11161 + if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) || 11162 + ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && 11163 + !netif_running(dev))) 11164 return -EAGAIN; 11165 11166 spin_lock_bh(&tp->lock); ··· 11174 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 11175 break; /* We have no PHY */ 11176 11177 + if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) || 11178 + ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && 11179 + !netif_running(dev))) 11180 return -EAGAIN; 11181 11182 spin_lock_bh(&tp->lock);
+6 -6
drivers/net/usb/hso.c
··· 2628 2629 static void hso_free_tiomget(struct hso_serial *serial) 2630 { 2631 - struct hso_tiocmget *tiocmget = serial->tiocmget; 2632 if (tiocmget) { 2633 - if (tiocmget->urb) { 2634 - usb_free_urb(tiocmget->urb); 2635 - tiocmget->urb = NULL; 2636 - } 2637 serial->tiocmget = NULL; 2638 kfree(tiocmget); 2639 - 2640 } 2641 } 2642
··· 2628 2629 static void hso_free_tiomget(struct hso_serial *serial) 2630 { 2631 + struct hso_tiocmget *tiocmget; 2632 + if (!serial) 2633 + return; 2634 + tiocmget = serial->tiocmget; 2635 if (tiocmget) { 2636 + usb_free_urb(tiocmget->urb); 2637 + tiocmget->urb = NULL; 2638 serial->tiocmget = NULL; 2639 kfree(tiocmget); 2640 } 2641 } 2642
+3 -1
drivers/net/usb/usbnet.c
··· 931 if (urb != NULL) { 932 clear_bit (EVENT_RX_MEMORY, &dev->flags); 933 status = usb_autopm_get_interface(dev->intf); 934 - if (status < 0) 935 goto fail_lowmem; 936 if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK) 937 resched = 0; 938 usb_autopm_put_interface(dev->intf);
··· 931 if (urb != NULL) { 932 clear_bit (EVENT_RX_MEMORY, &dev->flags); 933 status = usb_autopm_get_interface(dev->intf); 934 + if (status < 0) { 935 + usb_free_urb(urb); 936 goto fail_lowmem; 937 + } 938 if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK) 939 resched = 0; 940 usb_autopm_put_interface(dev->intf);
-67
drivers/net/wireless/iwlwifi/iwl-3945.c
··· 402 } 403 #endif 404 405 - /** 406 - * iwl3945_good_plcp_health - checks for plcp error. 407 - * 408 - * When the plcp error is exceeding the thresholds, reset the radio 409 - * to improve the throughput. 410 - */ 411 - static bool iwl3945_good_plcp_health(struct iwl_priv *priv, 412 - struct iwl_rx_packet *pkt) 413 - { 414 - bool rc = true; 415 - struct iwl3945_notif_statistics current_stat; 416 - int combined_plcp_delta; 417 - unsigned int plcp_msec; 418 - unsigned long plcp_received_jiffies; 419 - 420 - if (priv->cfg->base_params->plcp_delta_threshold == 421 - IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) { 422 - IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n"); 423 - return rc; 424 - } 425 - memcpy(&current_stat, pkt->u.raw, sizeof(struct 426 - iwl3945_notif_statistics)); 427 - /* 428 - * check for plcp_err and trigger radio reset if it exceeds 429 - * the plcp error threshold plcp_delta. 430 - */ 431 - plcp_received_jiffies = jiffies; 432 - plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies - 433 - (long) priv->plcp_jiffies); 434 - priv->plcp_jiffies = plcp_received_jiffies; 435 - /* 436 - * check to make sure plcp_msec is not 0 to prevent division 437 - * by zero. 438 - */ 439 - if (plcp_msec) { 440 - combined_plcp_delta = 441 - (le32_to_cpu(current_stat.rx.ofdm.plcp_err) - 442 - le32_to_cpu(priv->_3945.statistics.rx.ofdm.plcp_err)); 443 - 444 - if ((combined_plcp_delta > 0) && 445 - ((combined_plcp_delta * 100) / plcp_msec) > 446 - priv->cfg->base_params->plcp_delta_threshold) { 447 - /* 448 - * if plcp_err exceed the threshold, the following 449 - * data is printed in csv format: 450 - * Text: plcp_err exceeded %d, 451 - * Received ofdm.plcp_err, 452 - * Current ofdm.plcp_err, 453 - * combined_plcp_delta, 454 - * plcp_msec 455 - */ 456 - IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, " 457 - "%u, %d, %u mSecs\n", 458 - priv->cfg->base_params->plcp_delta_threshold, 459 - le32_to_cpu(current_stat.rx.ofdm.plcp_err), 460 - combined_plcp_delta, plcp_msec); 461 - /* 462 - * Reset the RF radio due to the high plcp 463 - * error rate 464 - */ 465 - rc = false; 466 - } 467 - } 468 - return rc; 469 - } 470 - 471 void iwl3945_hw_rx_statistics(struct iwl_priv *priv, 472 struct iwl_rx_mem_buffer *rxb) 473 { ··· 2668 .isr_ops = { 2669 .isr = iwl_isr_legacy, 2670 }, 2671 - .check_plcp_health = iwl3945_good_plcp_health, 2672 2673 .debugfs_ops = { 2674 .rx_stats_read = iwl3945_ucode_rx_stats_read,
··· 402 } 403 #endif 404 405 void iwl3945_hw_rx_statistics(struct iwl_priv *priv, 406 struct iwl_rx_mem_buffer *rxb) 407 { ··· 2734 .isr_ops = { 2735 .isr = iwl_isr_legacy, 2736 }, 2737 2738 .debugfs_ops = { 2739 .rx_stats_read = iwl3945_ucode_rx_stats_read,
+1
net/bluetooth/l2cap.c
··· 859 result = L2CAP_CR_SEC_BLOCK; 860 else 861 result = L2CAP_CR_BAD_PSM; 862 863 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); 864 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
··· 859 result = L2CAP_CR_SEC_BLOCK; 860 else 861 result = L2CAP_CR_BAD_PSM; 862 + sk->sk_state = BT_DISCONN; 863 864 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); 865 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
+1 -1
net/bridge/br_input.c
··· 80 if (is_multicast_ether_addr(dest)) { 81 mdst = br_mdb_get(br, skb); 82 if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) { 83 - if ((mdst && !hlist_unhashed(&mdst->mglist)) || 84 br_multicast_is_router(br)) 85 skb2 = skb; 86 br_multicast_forward(mdst, skb, skb2);
··· 80 if (is_multicast_ether_addr(dest)) { 81 mdst = br_mdb_get(br, skb); 82 if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) { 83 + if ((mdst && mdst->mglist) || 84 br_multicast_is_router(br)) 85 skb2 = skb; 86 br_multicast_forward(mdst, skb, skb2);
+9 -10
net/bridge/br_multicast.c
··· 232 if (!netif_running(br->dev) || timer_pending(&mp->timer)) 233 goto out; 234 235 - if (!hlist_unhashed(&mp->mglist)) 236 - hlist_del_init(&mp->mglist); 237 238 if (mp->ports) 239 goto out; ··· 275 del_timer(&p->query_timer); 276 call_rcu_bh(&p->rcu, br_multicast_free_pg); 277 278 - if (!mp->ports && hlist_unhashed(&mp->mglist) && 279 netif_running(br->dev)) 280 mod_timer(&mp->timer, jiffies); 281 ··· 527 struct net_bridge *br = mp->br; 528 529 spin_lock(&br->multicast_lock); 530 - if (!netif_running(br->dev) || hlist_unhashed(&mp->mglist) || 531 mp->queries_sent >= br->multicast_last_member_count) 532 goto out; 533 ··· 718 goto err; 719 720 if (!port) { 721 - hlist_add_head(&mp->mglist, &br->mglist); 722 mod_timer(&mp->timer, now + br->multicast_membership_interval); 723 goto out; 724 } ··· 1164 1165 max_delay *= br->multicast_last_member_count; 1166 1167 - if (!hlist_unhashed(&mp->mglist) && 1168 (timer_pending(&mp->timer) ? 1169 time_after(mp->timer.expires, now + max_delay) : 1170 try_to_del_timer_sync(&mp->timer) >= 0)) ··· 1176 if (timer_pending(&p->timer) ? 1177 time_after(p->timer.expires, now + max_delay) : 1178 try_to_del_timer_sync(&p->timer) >= 0) 1179 - mod_timer(&mp->timer, now + max_delay); 1180 } 1181 1182 out: ··· 1235 goto out; 1236 1237 max_delay *= br->multicast_last_member_count; 1238 - if (!hlist_unhashed(&mp->mglist) && 1239 (timer_pending(&mp->timer) ? 1240 time_after(mp->timer.expires, now + max_delay) : 1241 try_to_del_timer_sync(&mp->timer) >= 0)) ··· 1247 if (timer_pending(&p->timer) ? 1248 time_after(p->timer.expires, now + max_delay) : 1249 try_to_del_timer_sync(&p->timer) >= 0) 1250 - mod_timer(&mp->timer, now + max_delay); 1251 } 1252 1253 out: ··· 1282 br->multicast_last_member_interval; 1283 1284 if (!port) { 1285 - if (!hlist_unhashed(&mp->mglist) && 1286 (timer_pending(&mp->timer) ? 1287 time_after(mp->timer.expires, time) : 1288 try_to_del_timer_sync(&mp->timer) >= 0)) {
··· 232 if (!netif_running(br->dev) || timer_pending(&mp->timer)) 233 goto out; 234 235 + mp->mglist = false; 236 237 if (mp->ports) 238 goto out; ··· 276 del_timer(&p->query_timer); 277 call_rcu_bh(&p->rcu, br_multicast_free_pg); 278 279 + if (!mp->ports && !mp->mglist && 280 netif_running(br->dev)) 281 mod_timer(&mp->timer, jiffies); 282 ··· 528 struct net_bridge *br = mp->br; 529 530 spin_lock(&br->multicast_lock); 531 + if (!netif_running(br->dev) || !mp->mglist || 532 mp->queries_sent >= br->multicast_last_member_count) 533 goto out; 534 ··· 719 goto err; 720 721 if (!port) { 722 + mp->mglist = true; 723 mod_timer(&mp->timer, now + br->multicast_membership_interval); 724 goto out; 725 } ··· 1165 1166 max_delay *= br->multicast_last_member_count; 1167 1168 + if (mp->mglist && 1169 (timer_pending(&mp->timer) ? 1170 time_after(mp->timer.expires, now + max_delay) : 1171 try_to_del_timer_sync(&mp->timer) >= 0)) ··· 1177 if (timer_pending(&p->timer) ? 1178 time_after(p->timer.expires, now + max_delay) : 1179 try_to_del_timer_sync(&p->timer) >= 0) 1180 + mod_timer(&p->timer, now + max_delay); 1181 } 1182 1183 out: ··· 1236 goto out; 1237 1238 max_delay *= br->multicast_last_member_count; 1239 + if (mp->mglist && 1240 (timer_pending(&mp->timer) ? 1241 time_after(mp->timer.expires, now + max_delay) : 1242 try_to_del_timer_sync(&mp->timer) >= 0)) ··· 1248 if (timer_pending(&p->timer) ? 1249 time_after(p->timer.expires, now + max_delay) : 1250 try_to_del_timer_sync(&p->timer) >= 0) 1251 + mod_timer(&p->timer, now + max_delay); 1252 } 1253 1254 out: ··· 1283 br->multicast_last_member_interval; 1284 1285 if (!port) { 1286 + if (mp->mglist && 1287 (timer_pending(&mp->timer) ? 1288 time_after(mp->timer.expires, time) : 1289 try_to_del_timer_sync(&mp->timer) >= 0)) {
+1 -2
net/bridge/br_private.h
··· 84 struct net_bridge_mdb_entry 85 { 86 struct hlist_node hlist[2]; 87 - struct hlist_node mglist; 88 struct net_bridge *br; 89 struct net_bridge_port_group __rcu *ports; 90 struct rcu_head rcu; 91 struct timer_list timer; 92 struct timer_list query_timer; 93 struct br_ip addr; 94 u32 queries_sent; 95 }; 96 ··· 238 spinlock_t multicast_lock; 239 struct net_bridge_mdb_htable __rcu *mdb; 240 struct hlist_head router_list; 241 - struct hlist_head mglist; 242 243 struct timer_list multicast_router_timer; 244 struct timer_list multicast_querier_timer;
··· 84 struct net_bridge_mdb_entry 85 { 86 struct hlist_node hlist[2]; 87 struct net_bridge *br; 88 struct net_bridge_port_group __rcu *ports; 89 struct rcu_head rcu; 90 struct timer_list timer; 91 struct timer_list query_timer; 92 struct br_ip addr; 93 + bool mglist; 94 u32 queries_sent; 95 }; 96 ··· 238 spinlock_t multicast_lock; 239 struct net_bridge_mdb_htable __rcu *mdb; 240 struct hlist_head router_list; 241 242 struct timer_list multicast_router_timer; 243 struct timer_list multicast_querier_timer;
+7 -2
net/core/dev.c
··· 1280 1281 static int __dev_close(struct net_device *dev) 1282 { 1283 LIST_HEAD(single); 1284 1285 list_add(&dev->unreg_list, &single); 1286 - return __dev_close_many(&single); 1287 } 1288 1289 int dev_close_many(struct list_head *head) ··· 1328 1329 list_add(&dev->unreg_list, &single); 1330 dev_close_many(&single); 1331 - 1332 return 0; 1333 } 1334 EXPORT_SYMBOL(dev_close); ··· 5066 5067 list_add(&dev->unreg_list, &single); 5068 rollback_registered_many(&single); 5069 } 5070 5071 unsigned long netdev_fix_features(unsigned long features, const char *name) ··· 6220 } 6221 } 6222 unregister_netdevice_many(&dev_kill_list); 6223 rtnl_unlock(); 6224 } 6225
··· 1280 1281 static int __dev_close(struct net_device *dev) 1282 { 1283 + int retval; 1284 LIST_HEAD(single); 1285 1286 list_add(&dev->unreg_list, &single); 1287 + retval = __dev_close_many(&single); 1288 + list_del(&single); 1289 + return retval; 1290 } 1291 1292 int dev_close_many(struct list_head *head) ··· 1325 1326 list_add(&dev->unreg_list, &single); 1327 dev_close_many(&single); 1328 + list_del(&single); 1329 return 0; 1330 } 1331 EXPORT_SYMBOL(dev_close); ··· 5063 5064 list_add(&dev->unreg_list, &single); 5065 rollback_registered_many(&single); 5066 + list_del(&single); 5067 } 5068 5069 unsigned long netdev_fix_features(unsigned long features, const char *name) ··· 6216 } 6217 } 6218 unregister_netdevice_many(&dev_kill_list); 6219 + list_del(&dev_kill_list); 6220 rtnl_unlock(); 6221 } 6222
+8 -1
net/dcb/dcbnl.c
··· 626 dcb->cmd = DCB_CMD_GAPP; 627 628 app_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_APP); 629 ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_IDTYPE, idtype); 630 if (ret) 631 goto out_cancel; ··· 1616 u8 dcb_setapp(struct net_device *dev, struct dcb_app *new) 1617 { 1618 struct dcb_app_type *itr; 1619 1620 spin_lock(&dcb_lock); 1621 /* Search for existing match and replace */ ··· 1651 } 1652 out: 1653 spin_unlock(&dcb_lock); 1654 - call_dcbevent_notifiers(DCB_APP_EVENT, new); 1655 return 0; 1656 } 1657 EXPORT_SYMBOL(dcb_setapp);
··· 626 dcb->cmd = DCB_CMD_GAPP; 627 628 app_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_APP); 629 + if (!app_nest) 630 + goto out_cancel; 631 + 632 ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_IDTYPE, idtype); 633 if (ret) 634 goto out_cancel; ··· 1613 u8 dcb_setapp(struct net_device *dev, struct dcb_app *new) 1614 { 1615 struct dcb_app_type *itr; 1616 + struct dcb_app_type event; 1617 + 1618 + memcpy(&event.name, dev->name, sizeof(event.name)); 1619 + memcpy(&event.app, new, sizeof(event.app)); 1620 1621 spin_lock(&dcb_lock); 1622 /* Search for existing match and replace */ ··· 1644 } 1645 out: 1646 spin_unlock(&dcb_lock); 1647 + call_dcbevent_notifiers(DCB_APP_EVENT, &event); 1648 return 0; 1649 } 1650 EXPORT_SYMBOL(dcb_setapp);
+20 -10
net/ipv4/devinet.c
··· 1030 return mtu >= 68; 1031 } 1032 1033 /* Called only under RTNL semaphore */ 1034 1035 static int inetdev_event(struct notifier_block *this, unsigned long event, ··· 1097 } 1098 ip_mc_up(in_dev); 1099 /* fall through */ 1100 - case NETDEV_NOTIFY_PEERS: 1101 case NETDEV_CHANGEADDR: 1102 /* Send gratuitous ARP to notify of link change */ 1103 - if (IN_DEV_ARP_NOTIFY(in_dev)) { 1104 - struct in_ifaddr *ifa = in_dev->ifa_list; 1105 - 1106 - if (ifa) 1107 - arp_send(ARPOP_REQUEST, ETH_P_ARP, 1108 - ifa->ifa_address, dev, 1109 - ifa->ifa_address, NULL, 1110 - dev->dev_addr, NULL); 1111 - } 1112 break; 1113 case NETDEV_DOWN: 1114 ip_mc_down(in_dev);
··· 1030 return mtu >= 68; 1031 } 1032 1033 + static void inetdev_send_gratuitous_arp(struct net_device *dev, 1034 + struct in_device *in_dev) 1035 + 1036 + { 1037 + struct in_ifaddr *ifa = in_dev->ifa_list; 1038 + 1039 + if (!ifa) 1040 + return; 1041 + 1042 + arp_send(ARPOP_REQUEST, ETH_P_ARP, 1043 + ifa->ifa_address, dev, 1044 + ifa->ifa_address, NULL, 1045 + dev->dev_addr, NULL); 1046 + } 1047 + 1048 /* Called only under RTNL semaphore */ 1049 1050 static int inetdev_event(struct notifier_block *this, unsigned long event, ··· 1082 } 1083 ip_mc_up(in_dev); 1084 /* fall through */ 1085 case NETDEV_CHANGEADDR: 1086 + if (!IN_DEV_ARP_NOTIFY(in_dev)) 1087 + break; 1088 + /* fall through */ 1089 + case NETDEV_NOTIFY_PEERS: 1090 /* Send gratuitous ARP to notify of link change */ 1091 + inetdev_send_gratuitous_arp(dev, in_dev); 1092 break; 1093 case NETDEV_DOWN: 1094 ip_mc_down(in_dev);
+1
net/ipv4/ip_gre.c
··· 775 .fl4_dst = dst, 776 .fl4_src = tiph->saddr, 777 .fl4_tos = RT_TOS(tos), 778 .fl_gre_key = tunnel->parms.o_key 779 }; 780 if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
··· 775 .fl4_dst = dst, 776 .fl4_src = tiph->saddr, 777 .fl4_tos = RT_TOS(tos), 778 + .proto = IPPROTO_GRE, 779 .fl_gre_key = tunnel->parms.o_key 780 }; 781 if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
+1
net/ipv4/route.c
··· 2722 .destroy = ipv4_dst_destroy, 2723 .check = ipv4_blackhole_dst_check, 2724 .default_mtu = ipv4_blackhole_default_mtu, 2725 .update_pmtu = ipv4_rt_blackhole_update_pmtu, 2726 }; 2727
··· 2722 .destroy = ipv4_dst_destroy, 2723 .check = ipv4_blackhole_dst_check, 2724 .default_mtu = ipv4_blackhole_default_mtu, 2725 + .default_advmss = ipv4_default_advmss, 2726 .update_pmtu = ipv4_rt_blackhole_update_pmtu, 2727 }; 2728
+1
net/ipv6/route.c
··· 128 .destroy = ip6_dst_destroy, 129 .check = ip6_dst_check, 130 .default_mtu = ip6_blackhole_default_mtu, 131 .update_pmtu = ip6_rt_blackhole_update_pmtu, 132 }; 133
··· 128 .destroy = ip6_dst_destroy, 129 .check = ip6_dst_check, 130 .default_mtu = ip6_blackhole_default_mtu, 131 + .default_advmss = ip6_default_advmss, 132 .update_pmtu = ip6_rt_blackhole_update_pmtu, 133 }; 134
+2
net/mac80211/util.c
··· 1210 switch (sdata->vif.type) { 1211 case NL80211_IFTYPE_STATION: 1212 changed |= BSS_CHANGED_ASSOC; 1213 ieee80211_bss_info_change_notify(sdata, changed); 1214 break; 1215 case NL80211_IFTYPE_ADHOC: 1216 changed |= BSS_CHANGED_IBSS;
··· 1210 switch (sdata->vif.type) { 1211 case NL80211_IFTYPE_STATION: 1212 changed |= BSS_CHANGED_ASSOC; 1213 + mutex_lock(&sdata->u.mgd.mtx); 1214 ieee80211_bss_info_change_notify(sdata, changed); 1215 + mutex_unlock(&sdata->u.mgd.mtx); 1216 break; 1217 case NL80211_IFTYPE_ADHOC: 1218 changed |= BSS_CHANGED_IBSS;
+2 -1
net/netfilter/core.c
··· 133 134 /* Optimization: we don't need to hold module 135 reference here, since function can't sleep. --RR */ 136 verdict = elem->hook(hook, skb, indev, outdev, okfn); 137 if (verdict != NF_ACCEPT) { 138 #ifdef CONFIG_NETFILTER_DEBUG ··· 146 #endif 147 if (verdict != NF_REPEAT) 148 return verdict; 149 - *i = (*i)->prev; 150 } 151 } 152 return NF_ACCEPT;
··· 133 134 /* Optimization: we don't need to hold module 135 reference here, since function can't sleep. --RR */ 136 + repeat: 137 verdict = elem->hook(hook, skb, indev, outdev, okfn); 138 if (verdict != NF_ACCEPT) { 139 #ifdef CONFIG_NETFILTER_DEBUG ··· 145 #endif 146 if (verdict != NF_REPEAT) 147 return verdict; 148 + goto repeat; 149 } 150 } 151 return NF_ACCEPT;
+5 -2
net/xfrm/xfrm_policy.c
··· 1340 default: 1341 BUG(); 1342 } 1343 - xdst = dst_alloc(dst_ops) ?: ERR_PTR(-ENOBUFS); 1344 xfrm_policy_put_afinfo(afinfo); 1345 1346 - xdst->flo.ops = &xfrm_bundle_fc_ops; 1347 1348 return xdst; 1349 }
··· 1340 default: 1341 BUG(); 1342 } 1343 + xdst = dst_alloc(dst_ops); 1344 xfrm_policy_put_afinfo(afinfo); 1345 1346 + if (likely(xdst)) 1347 + xdst->flo.ops = &xfrm_bundle_fc_ops; 1348 + else 1349 + xdst = ERR_PTR(-ENOBUFS); 1350 1351 return xdst; 1352 }