Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

1) Fix namespace init and cleanup in phonet to fix some oopses, from
Eric W. Biederman.

2) Missing kfree_skb() in AF_KEY, from Julia Lawall.

3) Refcount leak and source address handling fix in l2tp from James
Chapman.

4) Memory leak fix in CAIF from Tomasz Gregorek.

5) When routes are cloned from ipv6 addrconf routes, we don't process
expirations properly. Fix from Gao Feng.

6) Fix panic on DMA errors in atl1 driver, from Tony Zelenoff.

7) Only enable interrupts in 8139cp driver after we've registered the
IRQ handler. From Jason Wang.

8) Fix too many reads of KS_CIDER register in ks8851 during probe,
fixing crashes on spurious interrupts. From Matt Renzelmann.

9) Missing include in ath5k driver and missing iounmap on probe
failure, from Jonathan Bither.

10) Fix RX packet handling in smsc911x driver, from Will Deacon.

11) Fix ixgbe WoL on fiber by leaving the laser on during shutdown.

12) ks8851 needs MAX_RECV_FRAMES increased otherwise the internal MAC
buffers are easily overflown. Fix from Davide Cimingahi.

13) Fix memory leaks in peak_usb CAN driver, from Jesper Juhl.

14) gred packet scheduler can dump in WRED more when doing a netlink
dump. Fix from David Ward.

15) Fix MTU in USB smsc75xx driver, from Stephane Fillod.

16) Dummy device needs ->ndo_uninit handler to properly handle
->ndo_init failures. From Hiroaki SHIMODA.

17) Fix TX fragmentation in ath9k driver, from Sujith Manoharan.

18) Missing RTNL lock in ixgbe PM resume, from Benjamin Poirier.

19) Missing iounmap in farsync WAN driver, from Julia Lawall.

20) With LRO/GRO, tcp_grow_window() is easily tricked into not growing
the receive window properly, and this hurts performance. Fix from
Eric Dumazet.

21) Network namespace init failure can leak net_generic data, fix from
Julian Anastasov.

22) Fix skb_over_panic due to mis-accounting in TCP for partially ACK'd
SKBs. From Eric Dumazet.

23) New IDs for qmi_wwan driver, from Bjørn Mork.

24) Fix races in ax25_exit(), from Eric W. Biederman.

25) IPV6 TCP doesn't handle TCP_MAXSEG socket option properly, copy over
logic from the IPV4 side. From Neal Cardwell.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (59 commits)
tcp: fix TCP_MAXSEG for established IPv6 passive sockets
drivers/net: Do not free an IRQ if its request failed
drop_monitor: allow more events per second
ks8851: Fix request_irq/free_irq mismatch
net/hyperv: Adding cancellation to ensure rndis filter is closed
ks8851: Fix mutex deadlock in ks8851_net_stop()
net ax25: Reorder ax25_exit to remove races.
icplus: fix interrupt for IC+ 101A/G and 1001LF
net: qmi_wwan: support Sierra Wireless MC77xx devices in QMI mode
bnx2x: off by one in bnx2x_ets_e3b0_sp_pri_to_cos_set()
ksz884x: don't copy too much in netdev_set_mac_address()
tcp: fix retransmit of partially acked frames
netns: do not leak net_generic data on failed init
net/sock.h: fix sk_peek_off kernel-doc warning
tcp: fix tcp_grow_window() for large incoming frames
drivers/net/wan/farsync.c: add missing iounmap
davinci_mdio: Fix MDIO timeout check
ipv6: clean up rt6_clean_expires
ipv6: fix rt6_update_expires
arcnet: rimi: Fix device name in debug output
...

+407 -238
+3 -2
MAINTAINERS
··· 3592 3592 F: drivers/net/wireless/iwlegacy/ 3593 3593 3594 3594 INTEL WIRELESS WIFI LINK (iwlwifi) 3595 + M: Johannes Berg <johannes.berg@intel.com> 3595 3596 M: Wey-Yi Guy <wey-yi.w.guy@intel.com> 3596 3597 M: Intel Linux Wireless <ilw@linux.intel.com> 3597 3598 L: linux-wireless@vger.kernel.org ··· 7579 7578 F: fs/xfs/ 7580 7579 7581 7580 XILINX AXI ETHERNET DRIVER 7582 - M: Ariane Keller <ariane.keller@tik.ee.ethz.ch> 7583 - M: Daniel Borkmann <daniel.borkmann@tik.ee.ethz.ch> 7581 + M: Anirudha Sarangi <anirudh@xilinx.com> 7582 + M: John Linn <John.Linn@xilinx.com> 7584 7583 S: Maintained 7585 7584 F: drivers/net/ethernet/xilinx/xilinx_axienet* 7586 7585
+5 -2
drivers/bcma/sprom.c
··· 404 404 return -EOPNOTSUPP; 405 405 406 406 if (!bcma_sprom_ext_available(bus)) { 407 + bool sprom_onchip; 408 + 407 409 /* 408 410 * External SPROM takes precedence so check 409 411 * on-chip OTP only when no external SPROM 410 412 * is present. 411 413 */ 412 - if (bcma_sprom_onchip_available(bus)) { 414 + sprom_onchip = bcma_sprom_onchip_available(bus); 415 + if (sprom_onchip) { 413 416 /* determine offset */ 414 417 offset = bcma_sprom_onchip_offset(bus); 415 418 } 416 - if (!offset) { 419 + if (!offset || !sprom_onchip) { 417 420 /* 418 421 * Maybe there is no SPROM on the device? 419 422 * Now we ask the arch code if there is some sprom
+4 -4
drivers/net/arcnet/arc-rimi.c
··· 89 89 BUGLVL(D_NORMAL) printk(VERSION); 90 90 BUGLVL(D_NORMAL) printk("E-mail me if you actually test the RIM I driver, please!\n"); 91 91 92 - BUGMSG(D_NORMAL, "Given: node %02Xh, shmem %lXh, irq %d\n", 92 + BUGLVL(D_NORMAL) printk("Given: node %02Xh, shmem %lXh, irq %d\n", 93 93 dev->dev_addr[0], dev->mem_start, dev->irq); 94 94 95 95 if (dev->mem_start <= 0 || dev->irq <= 0) { 96 - BUGMSG(D_NORMAL, "No autoprobe for RIM I; you " 96 + BUGLVL(D_NORMAL) printk("No autoprobe for RIM I; you " 97 97 "must specify the shmem and irq!\n"); 98 98 return -ENODEV; 99 99 } 100 100 if (dev->dev_addr[0] == 0) { 101 - BUGMSG(D_NORMAL, "You need to specify your card's station " 101 + BUGLVL(D_NORMAL) printk("You need to specify your card's station " 102 102 "ID!\n"); 103 103 return -ENODEV; 104 104 } ··· 109 109 * will be taken. 110 110 */ 111 111 if (!request_mem_region(dev->mem_start, MIRROR_SIZE, "arcnet (90xx)")) { 112 - BUGMSG(D_NORMAL, "Card memory already allocated\n"); 112 + BUGLVL(D_NORMAL) printk("Card memory already allocated\n"); 113 113 return -ENODEV; 114 114 } 115 115 return arcrimi_found(dev);
+5 -4
drivers/net/caif/caif_hsi.c
··· 744 744 size_t fifo_occupancy = 0; 745 745 746 746 /* Wakeup timeout */ 747 - dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n", 747 + dev_dbg(&cfhsi->ndev->dev, "%s: Timeout.\n", 748 748 __func__); 749 749 750 750 /* Check FIFO to check if modem has sent something. */ 751 751 WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev, 752 752 &fifo_occupancy)); 753 753 754 - dev_err(&cfhsi->ndev->dev, "%s: Bytes in FIFO: %u.\n", 754 + dev_dbg(&cfhsi->ndev->dev, "%s: Bytes in FIFO: %u.\n", 755 755 __func__, (unsigned) fifo_occupancy); 756 756 757 757 /* Check if we misssed the interrupt. */ ··· 1210 1210 1211 1211 static void cfhsi_shutdown(struct cfhsi *cfhsi) 1212 1212 { 1213 - u8 *tx_buf, *rx_buf; 1213 + u8 *tx_buf, *rx_buf, *flip_buf; 1214 1214 1215 1215 /* Stop TXing */ 1216 1216 netif_tx_stop_all_queues(cfhsi->ndev); ··· 1234 1234 /* Store bufferes: will be freed later. */ 1235 1235 tx_buf = cfhsi->tx_buf; 1236 1236 rx_buf = cfhsi->rx_buf; 1237 - 1237 + flip_buf = cfhsi->rx_flip_buf; 1238 1238 /* Flush transmit queues. */ 1239 1239 cfhsi_abort_tx(cfhsi); 1240 1240 ··· 1247 1247 /* Free buffers. */ 1248 1248 kfree(tx_buf); 1249 1249 kfree(rx_buf); 1250 + kfree(flip_buf); 1250 1251 } 1251 1252 1252 1253 int cfhsi_remove(struct platform_device *pdev)
+2
drivers/net/can/usb/peak_usb/pcan_usb_pro.c
··· 875 875 PCAN_USBPRO_INFO_FW, 876 876 &fi, sizeof(fi)); 877 877 if (err) { 878 + kfree(usb_if); 878 879 dev_err(dev->netdev->dev.parent, 879 880 "unable to read %s firmware info (err %d)\n", 880 881 pcan_usb_pro.name, err); ··· 886 885 PCAN_USBPRO_INFO_BL, 887 886 &bi, sizeof(bi)); 888 887 if (err) { 888 + kfree(usb_if); 889 889 dev_err(dev->netdev->dev.parent, 890 890 "unable to read %s bootloader info (err %d)\n", 891 891 pcan_usb_pro.name, err);
+3 -3
drivers/net/dummy.c
··· 107 107 return 0; 108 108 } 109 109 110 - static void dummy_dev_free(struct net_device *dev) 110 + static void dummy_dev_uninit(struct net_device *dev) 111 111 { 112 112 free_percpu(dev->dstats); 113 - free_netdev(dev); 114 113 } 115 114 116 115 static const struct net_device_ops dummy_netdev_ops = { 117 116 .ndo_init = dummy_dev_init, 117 + .ndo_uninit = dummy_dev_uninit, 118 118 .ndo_start_xmit = dummy_xmit, 119 119 .ndo_validate_addr = eth_validate_addr, 120 120 .ndo_set_rx_mode = set_multicast_list, ··· 128 128 129 129 /* Initialize the device structure. */ 130 130 dev->netdev_ops = &dummy_netdev_ops; 131 - dev->destructor = dummy_dev_free; 131 + dev->destructor = free_netdev; 132 132 133 133 /* Fill in device structure with ethernet-generic values. */ 134 134 dev->tx_queue_len = 0;
+5 -7
drivers/net/ethernet/atheros/atlx/atl1.c
··· 2476 2476 "pcie phy link down %x\n", status); 2477 2477 if (netif_running(adapter->netdev)) { /* reset MAC */ 2478 2478 iowrite32(0, adapter->hw.hw_addr + REG_IMR); 2479 - schedule_work(&adapter->pcie_dma_to_rst_task); 2479 + schedule_work(&adapter->reset_dev_task); 2480 2480 return IRQ_HANDLED; 2481 2481 } 2482 2482 } ··· 2488 2488 "pcie DMA r/w error (status = 0x%x)\n", 2489 2489 status); 2490 2490 iowrite32(0, adapter->hw.hw_addr + REG_IMR); 2491 - schedule_work(&adapter->pcie_dma_to_rst_task); 2491 + schedule_work(&adapter->reset_dev_task); 2492 2492 return IRQ_HANDLED; 2493 2493 } 2494 2494 ··· 2633 2633 atl1_clean_rx_ring(adapter); 2634 2634 } 2635 2635 2636 - static void atl1_tx_timeout_task(struct work_struct *work) 2636 + static void atl1_reset_dev_task(struct work_struct *work) 2637 2637 { 2638 2638 struct atl1_adapter *adapter = 2639 - container_of(work, struct atl1_adapter, tx_timeout_task); 2639 + container_of(work, struct atl1_adapter, reset_dev_task); 2640 2640 struct net_device *netdev = adapter->netdev; 2641 2641 2642 2642 netif_device_detach(netdev); ··· 3038 3038 (unsigned long)adapter); 3039 3039 adapter->phy_timer_pending = false; 3040 3040 3041 - INIT_WORK(&adapter->tx_timeout_task, atl1_tx_timeout_task); 3041 + INIT_WORK(&adapter->reset_dev_task, atl1_reset_dev_task); 3042 3042 3043 3043 INIT_WORK(&adapter->link_chg_task, atlx_link_chg_task); 3044 - 3045 - INIT_WORK(&adapter->pcie_dma_to_rst_task, atl1_tx_timeout_task); 3046 3044 3047 3045 err = register_netdev(netdev); 3048 3046 if (err)
+1 -2
drivers/net/ethernet/atheros/atlx/atl1.h
··· 758 758 u16 link_speed; 759 759 u16 link_duplex; 760 760 spinlock_t lock; 761 - struct work_struct tx_timeout_task; 761 + struct work_struct reset_dev_task; 762 762 struct work_struct link_chg_task; 763 - struct work_struct pcie_dma_to_rst_task; 764 763 765 764 struct timer_list phy_config_timer; 766 765 bool phy_timer_pending;
+1 -1
drivers/net/ethernet/atheros/atlx/atlx.c
··· 194 194 { 195 195 struct atlx_adapter *adapter = netdev_priv(netdev); 196 196 /* Do the reset outside of interrupt context */ 197 - schedule_work(&adapter->tx_timeout_task); 197 + schedule_work(&adapter->reset_dev_task); 198 198 } 199 199 200 200 /*
+6 -6
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
··· 942 942 const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 : 943 943 DCBX_E3B0_MAX_NUM_COS_PORT0; 944 944 945 + if (pri >= max_num_of_cos) { 946 + DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid " 947 + "parameter Illegal strict priority\n"); 948 + return -EINVAL; 949 + } 950 + 945 951 if (sp_pri_to_cos[pri] != DCBX_INVALID_COS) { 946 952 DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid " 947 953 "parameter There can't be two COS's with " 948 954 "the same strict pri\n"); 949 955 return -EINVAL; 950 - } 951 - 952 - if (pri > max_num_of_cos) { 953 - DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid " 954 - "parameter Illegal strict priority\n"); 955 - return -EINVAL; 956 956 } 957 957 958 958 sp_pri_to_cos[pri] = cos_entry;
+10 -5
drivers/net/ethernet/intel/e1000e/ich8lan.c
··· 1310 1310 1311 1311 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU) 1312 1312 oem_reg |= HV_OEM_BITS_LPLU; 1313 - 1314 - /* Set Restart auto-neg to activate the bits */ 1315 - if (!hw->phy.ops.check_reset_block(hw)) 1316 - oem_reg |= HV_OEM_BITS_RESTART_AN; 1317 1313 } else { 1318 1314 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE | 1319 1315 E1000_PHY_CTRL_NOND0A_GBE_DISABLE)) ··· 1319 1323 E1000_PHY_CTRL_NOND0A_LPLU)) 1320 1324 oem_reg |= HV_OEM_BITS_LPLU; 1321 1325 } 1326 + 1327 + /* Set Restart auto-neg to activate the bits */ 1328 + if ((d0_state || (hw->mac.type != e1000_pchlan)) && 1329 + !hw->phy.ops.check_reset_block(hw)) 1330 + oem_reg |= HV_OEM_BITS_RESTART_AN; 1322 1331 1323 1332 ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg); 1324 1333 ··· 3683 3682 3684 3683 if (hw->mac.type >= e1000_pchlan) { 3685 3684 e1000_oem_bits_config_ich8lan(hw, false); 3686 - e1000_phy_hw_reset_ich8lan(hw); 3685 + 3686 + /* Reset PHY to activate OEM bits on 82577/8 */ 3687 + if (hw->mac.type == e1000_pchlan) 3688 + e1000e_phy_hw_reset_generic(hw); 3689 + 3687 3690 ret_val = hw->phy.ops.acquire(hw); 3688 3691 if (ret_val) 3689 3692 return;
+10
drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
··· 622 622 if (adapter->hw.mac.type == ixgbe_mac_82599EB) 623 623 set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state); 624 624 625 + #ifdef IXGBE_FCOE 626 + if (adapter->netdev->features & NETIF_F_FCOE_MTU) { 627 + struct ixgbe_ring_feature *f; 628 + f = &adapter->ring_feature[RING_F_FCOE]; 629 + if ((rxr_idx >= f->mask) && 630 + (rxr_idx < f->mask + f->indices)) 631 + set_bit(__IXGBE_RX_FCOE_BUFSZ, &ring->state); 632 + } 633 + 634 + #endif /* IXGBE_FCOE */ 625 635 /* apply Rx specific ring traits */ 626 636 ring->count = adapter->rx_ring_count; 627 637 ring->queue_index = rxr_idx;
+12 -8
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 3154 3154 set_ring_rsc_enabled(rx_ring); 3155 3155 else 3156 3156 clear_ring_rsc_enabled(rx_ring); 3157 - #ifdef IXGBE_FCOE 3158 - if (netdev->features & NETIF_F_FCOE_MTU) { 3159 - struct ixgbe_ring_feature *f; 3160 - f = &adapter->ring_feature[RING_F_FCOE]; 3161 - if ((i >= f->mask) && (i < f->mask + f->indices)) 3162 - set_bit(__IXGBE_RX_FCOE_BUFSZ, &rx_ring->state); 3163 - } 3164 - #endif /* IXGBE_FCOE */ 3165 3157 } 3166 3158 } 3167 3159 ··· 4828 4836 4829 4837 pci_wake_from_d3(pdev, false); 4830 4838 4839 + rtnl_lock(); 4831 4840 err = ixgbe_init_interrupt_scheme(adapter); 4841 + rtnl_unlock(); 4832 4842 if (err) { 4833 4843 e_dev_err("Cannot initialize interrupts for device\n"); 4834 4844 return err; ··· 4886 4892 #endif 4887 4893 if (wufc) { 4888 4894 ixgbe_set_rx_mode(netdev); 4895 + 4896 + /* 4897 + * enable the optics for both mult-speed fiber and 4898 + * 82599 SFP+ fiber as we can WoL. 4899 + */ 4900 + if (hw->mac.ops.enable_tx_laser && 4901 + (hw->phy.multispeed_fiber || 4902 + (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber && 4903 + hw->mac.type == ixgbe_mac_82599EB))) 4904 + hw->mac.ops.enable_tx_laser(hw); 4889 4905 4890 4906 /* turn on all-multi mode if wake on multicast is enabled */ 4891 4907 if (wufc & IXGBE_WUFC_MC) {
+11 -10
drivers/net/ethernet/micrel/ks8851.c
··· 889 889 netif_stop_queue(dev); 890 890 891 891 mutex_lock(&ks->lock); 892 + /* turn off the IRQs and ack any outstanding */ 893 + ks8851_wrreg16(ks, KS_IER, 0x0000); 894 + ks8851_wrreg16(ks, KS_ISR, 0xffff); 895 + mutex_unlock(&ks->lock); 892 896 893 897 /* stop any outstanding work */ 894 898 flush_work(&ks->irq_work); 895 899 flush_work(&ks->tx_work); 896 900 flush_work(&ks->rxctrl_work); 897 901 898 - /* turn off the IRQs and ack any outstanding */ 899 - ks8851_wrreg16(ks, KS_IER, 0x0000); 900 - ks8851_wrreg16(ks, KS_ISR, 0xffff); 901 - 902 + mutex_lock(&ks->lock); 902 903 /* shutdown RX process */ 903 904 ks8851_wrreg16(ks, KS_RXCR1, 0x0000); 904 905 ··· 908 907 909 908 /* set powermode to soft power down to save power */ 910 909 ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN); 910 + mutex_unlock(&ks->lock); 911 911 912 912 /* ensure any queued tx buffers are dumped */ 913 913 while (!skb_queue_empty(&ks->txq)) { ··· 920 918 dev_kfree_skb(txb); 921 919 } 922 920 923 - mutex_unlock(&ks->lock); 924 921 return 0; 925 922 } 926 923 ··· 1419 1418 struct net_device *ndev; 1420 1419 struct ks8851_net *ks; 1421 1420 int ret; 1421 + unsigned cider; 1422 1422 1423 1423 ndev = alloc_etherdev(sizeof(struct ks8851_net)); 1424 1424 if (!ndev) ··· 1486 1484 ks8851_soft_reset(ks, GRR_GSR); 1487 1485 1488 1486 /* simple check for a valid chip being connected to the bus */ 1489 - 1490 - if ((ks8851_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) { 1487 + cider = ks8851_rdreg16(ks, KS_CIDER); 1488 + if ((cider & ~CIDER_REV_MASK) != CIDER_ID) { 1491 1489 dev_err(&spi->dev, "failed to read device ID\n"); 1492 1490 ret = -ENODEV; 1493 1491 goto err_id; ··· 1518 1516 } 1519 1517 1520 1518 netdev_info(ndev, "revision %d, MAC %pM, IRQ %d, %s EEPROM\n", 1521 - CIDER_REV_GET(ks8851_rdreg16(ks, KS_CIDER)), 1522 - ndev->dev_addr, ndev->irq, 1519 + CIDER_REV_GET(cider), ndev->dev_addr, ndev->irq, 1523 1520 ks->rc_ccr & CCR_EEPROM ? "has" : "no"); 1524 1521 1525 1522 return 0; 1526 1523 1527 1524 1528 1525 err_netdev: 1529 - free_irq(ndev->irq, ndev); 1526 + free_irq(ndev->irq, ks); 1530 1527 1531 1528 err_id: 1532 1529 err_irq:
+1 -1
drivers/net/ethernet/micrel/ks8851_mll.c
··· 40 40 #define DRV_NAME "ks8851_mll" 41 41 42 42 static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 }; 43 - #define MAX_RECV_FRAMES 32 43 + #define MAX_RECV_FRAMES 255 44 44 #define MAX_BUF_SIZE 2048 45 45 #define TX_BUF_SIZE 2000 46 46 #define RX_BUF_SIZE 2000
+1 -1
drivers/net/ethernet/micrel/ksz884x.c
··· 5675 5675 memcpy(hw->override_addr, mac->sa_data, ETH_ALEN); 5676 5676 } 5677 5677 5678 - memcpy(dev->dev_addr, mac->sa_data, MAX_ADDR_LEN); 5678 + memcpy(dev->dev_addr, mac->sa_data, ETH_ALEN); 5679 5679 5680 5680 interrupt = hw_block_intr(hw); 5681 5681
+8 -2
drivers/net/ethernet/realtek/8139cp.c
··· 958 958 cpw8(Cmd, RxOn | TxOn); 959 959 } 960 960 961 + static void cp_enable_irq(struct cp_private *cp) 962 + { 963 + cpw16_f(IntrMask, cp_intr_mask); 964 + } 965 + 961 966 static void cp_init_hw (struct cp_private *cp) 962 967 { 963 968 struct net_device *dev = cp->dev; ··· 1001 996 cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16); 1002 997 1003 998 cpw16(MultiIntr, 0); 1004 - 1005 - cpw16_f(IntrMask, cp_intr_mask); 1006 999 1007 1000 cpw8_f(Cfg9346, Cfg9346_Lock); 1008 1001 } ··· 1132 1129 rc = request_irq(dev->irq, cp_interrupt, IRQF_SHARED, dev->name, dev); 1133 1130 if (rc) 1134 1131 goto err_out_hw; 1132 + 1133 + cp_enable_irq(cp); 1135 1134 1136 1135 netif_carrier_off(dev); 1137 1136 mii_check_media(&cp->mii_if, netif_msg_link(cp), true); ··· 2036 2031 /* FIXME: sh*t may happen if the Rx ring buffer is depleted */ 2037 2032 cp_init_rings_index (cp); 2038 2033 cp_init_hw (cp); 2034 + cp_enable_irq(cp); 2039 2035 netif_start_queue (dev); 2040 2036 2041 2037 spin_lock_irqsave (&cp->lock, flags);
+6 -11
drivers/net/ethernet/smsc/smsc911x.c
··· 1166 1166 1167 1167 /* Quickly dumps bad packets */ 1168 1168 static void 1169 - smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktbytes) 1169 + smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktwords) 1170 1170 { 1171 - unsigned int pktwords = (pktbytes + NET_IP_ALIGN + 3) >> 2; 1172 - 1173 1171 if (likely(pktwords >= 4)) { 1174 1172 unsigned int timeout = 500; 1175 1173 unsigned int val; ··· 1231 1233 continue; 1232 1234 } 1233 1235 1234 - skb = netdev_alloc_skb(dev, pktlength + NET_IP_ALIGN); 1236 + skb = netdev_alloc_skb(dev, pktwords << 2); 1235 1237 if (unlikely(!skb)) { 1236 1238 SMSC_WARN(pdata, rx_err, 1237 1239 "Unable to allocate skb for rx packet"); ··· 1241 1243 break; 1242 1244 } 1243 1245 1244 - skb->data = skb->head; 1245 - skb_reset_tail_pointer(skb); 1246 + pdata->ops->rx_readfifo(pdata, 1247 + (unsigned int *)skb->data, pktwords); 1246 1248 1247 1249 /* Align IP on 16B boundary */ 1248 1250 skb_reserve(skb, NET_IP_ALIGN); 1249 1251 skb_put(skb, pktlength - 4); 1250 - pdata->ops->rx_readfifo(pdata, 1251 - (unsigned int *)skb->head, pktwords); 1252 1252 skb->protocol = eth_type_trans(skb, dev); 1253 1253 skb_checksum_none_assert(skb); 1254 1254 netif_receive_skb(skb); ··· 1561 1565 smsc911x_reg_write(pdata, FIFO_INT, temp); 1562 1566 1563 1567 /* set RX Data offset to 2 bytes for alignment */ 1564 - smsc911x_reg_write(pdata, RX_CFG, (2 << 8)); 1568 + smsc911x_reg_write(pdata, RX_CFG, (NET_IP_ALIGN << 8)); 1565 1569 1566 1570 /* enable NAPI polling before enabling RX interrupts */ 1567 1571 napi_enable(&pdata->napi); ··· 2378 2382 SET_NETDEV_DEV(dev, &pdev->dev); 2379 2383 2380 2384 pdata = netdev_priv(dev); 2381 - 2382 2385 dev->irq = irq_res->start; 2383 2386 irq_flags = irq_res->flags & IRQF_TRIGGER_MASK; 2384 2387 pdata->ioaddr = ioremap_nocache(res->start, res_size); ··· 2441 2446 if (retval) { 2442 2447 SMSC_WARN(pdata, probe, 2443 2448 "Unable to claim requested irq: %d", dev->irq); 2444 - goto out_free_irq; 2449 + goto out_disable_resources; 2445 2450 } 2446 2451 2447 2452 retval = register_netdev(dev);
+5
drivers/net/ethernet/ti/davinci_mdio.c
··· 181 181 __davinci_mdio_reset(data); 182 182 return -EAGAIN; 183 183 } 184 + 185 + reg = __raw_readl(&regs->user[0].access); 186 + if ((reg & USERACCESS_GO) == 0) 187 + return 0; 188 + 184 189 dev_err(data->dev, "timed out waiting for user access\n"); 185 190 return -ETIMEDOUT; 186 191 }
+1 -3
drivers/net/ethernet/xilinx/xilinx_axienet.h
··· 2 2 * Definitions for Xilinx Axi Ethernet device driver. 3 3 * 4 4 * Copyright (c) 2009 Secret Lab Technologies, Ltd. 5 - * Copyright (c) 2010 Xilinx, Inc. All rights reserved. 6 - * Copyright (c) 2012 Daniel Borkmann, <daniel.borkmann@tik.ee.ethz.ch> 7 - * Copyright (c) 2012 Ariane Keller, <ariane.keller@tik.ee.ethz.ch> 5 + * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved. 8 6 */ 9 7 10 8 #ifndef XILINX_AXIENET_H
+3 -3
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
··· 4 4 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi 5 5 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net> 6 6 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. 7 - * Copyright (c) 2010 Xilinx, Inc. All rights reserved. 8 - * Copyright (c) 2012 Daniel Borkmann, <daniel.borkmann@tik.ee.ethz.ch> 9 - * Copyright (c) 2012 Ariane Keller, <ariane.keller@tik.ee.ethz.ch> 7 + * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu> 8 + * Copyright (c) 2010 - 2011 PetaLogix 9 + * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved. 10 10 * 11 11 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6 12 12 * and Spartan6.
+3 -3
drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
··· 2 2 * MDIO bus driver for the Xilinx Axi Ethernet device 3 3 * 4 4 * Copyright (c) 2009 Secret Lab Technologies, Ltd. 5 - * Copyright (c) 2010 Xilinx, Inc. All rights reserved. 6 - * Copyright (c) 2012 Daniel Borkmann, <daniel.borkmann@tik.ee.ethz.ch> 7 - * Copyright (c) 2012 Ariane Keller, <ariane.keller@tik.ee.ethz.ch> 5 + * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu> 6 + * Copyright (c) 2010 - 2011 PetaLogix 7 + * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved. 8 8 */ 9 9 10 10 #include <linux/of_address.h>
+14 -24
drivers/net/hyperv/netvsc_drv.c
··· 44 44 /* point back to our device context */ 45 45 struct hv_device *device_ctx; 46 46 struct delayed_work dwork; 47 + struct work_struct work; 47 48 }; 48 49 49 50 ··· 52 51 module_param(ring_size, int, S_IRUGO); 53 52 MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)"); 54 53 55 - struct set_multicast_work { 56 - struct work_struct work; 57 - struct net_device *net; 58 - }; 59 - 60 54 static void do_set_multicast(struct work_struct *w) 61 55 { 62 - struct set_multicast_work *swk = 63 - container_of(w, struct set_multicast_work, work); 64 - struct net_device *net = swk->net; 65 - 66 - struct net_device_context *ndevctx = netdev_priv(net); 56 + struct net_device_context *ndevctx = 57 + container_of(w, struct net_device_context, work); 67 58 struct netvsc_device *nvdev; 68 59 struct rndis_device *rdev; 69 60 70 61 nvdev = hv_get_drvdata(ndevctx->device_ctx); 71 - if (nvdev == NULL) 72 - goto out; 62 + if (nvdev == NULL || nvdev->ndev == NULL) 63 + return; 73 64 74 65 rdev = nvdev->extension; 75 66 if (rdev == NULL) 76 - goto out; 67 + return; 77 68 78 - if (net->flags & IFF_PROMISC) 69 + if (nvdev->ndev->flags & IFF_PROMISC) 79 70 rndis_filter_set_packet_filter(rdev, 80 71 NDIS_PACKET_TYPE_PROMISCUOUS); 81 72 else ··· 75 82 NDIS_PACKET_TYPE_BROADCAST | 76 83 NDIS_PACKET_TYPE_ALL_MULTICAST | 77 84 NDIS_PACKET_TYPE_DIRECTED); 78 - 79 - out: 80 - kfree(w); 81 85 } 82 86 83 87 static void netvsc_set_multicast_list(struct net_device *net) 84 88 { 85 - struct set_multicast_work *swk = 86 - kmalloc(sizeof(struct set_multicast_work), GFP_ATOMIC); 87 - if (swk == NULL) 88 - return; 89 + struct net_device_context *net_device_ctx = netdev_priv(net); 89 90 90 - swk->net = net; 91 - INIT_WORK(&swk->work, do_set_multicast); 92 - schedule_work(&swk->work); 91 + schedule_work(&net_device_ctx->work); 93 92 } 94 93 95 94 static int netvsc_open(struct net_device *net) ··· 110 125 111 126 netif_tx_disable(net); 112 127 128 + /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */ 129 + cancel_work_sync(&net_device_ctx->work); 113 130 ret = rndis_filter_close(device_obj); 114 131 if (ret != 0) 115 132 netdev_err(net, "unable to close device (ret %d).\n", ret); ··· 322 335 323 336 nvdev->start_remove = true; 324 337 cancel_delayed_work_sync(&ndevctx->dwork); 338 + cancel_work_sync(&ndevctx->work); 325 339 netif_tx_disable(ndev); 326 340 rndis_filter_device_remove(hdev); 327 341 ··· 391 403 net_device_ctx->device_ctx = dev; 392 404 hv_set_drvdata(dev, net); 393 405 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_send_garp); 406 + INIT_WORK(&net_device_ctx->work, do_set_multicast); 394 407 395 408 net->netdev_ops = &device_ops; 396 409 ··· 445 456 446 457 ndev_ctx = netdev_priv(net); 447 458 cancel_delayed_work_sync(&ndev_ctx->dwork); 459 + cancel_work_sync(&ndev_ctx->work); 448 460 449 461 /* Stop outbound asap */ 450 462 netif_tx_disable(net);
+11 -1
drivers/net/phy/icplus.c
··· 40 40 #define IP1001_PHASE_SEL_MASK 3 /* IP1001 RX/TXPHASE_SEL */ 41 41 #define IP1001_APS_ON 11 /* IP1001 APS Mode bit */ 42 42 #define IP101A_G_APS_ON 2 /* IP101A/G APS Mode bit */ 43 + #define IP101A_G_IRQ_CONF_STATUS 0x11 /* Conf Info IRQ & Status Reg */ 43 44 44 45 static int ip175c_config_init(struct phy_device *phydev) 45 46 { ··· 186 185 return 0; 187 186 } 188 187 188 + static int ip101a_g_ack_interrupt(struct phy_device *phydev) 189 + { 190 + int err = phy_read(phydev, IP101A_G_IRQ_CONF_STATUS); 191 + if (err < 0) 192 + return err; 193 + 194 + return 0; 195 + } 196 + 189 197 static struct phy_driver ip175c_driver = { 190 198 .phy_id = 0x02430d80, 191 199 .name = "ICPlus IP175C", ··· 214 204 .phy_id_mask = 0x0ffffff0, 215 205 .features = PHY_GBIT_FEATURES | SUPPORTED_Pause | 216 206 SUPPORTED_Asym_Pause, 217 - .flags = PHY_HAS_INTERRUPT, 218 207 .config_init = &ip1001_config_init, 219 208 .config_aneg = &genphy_config_aneg, 220 209 .read_status = &genphy_read_status, ··· 229 220 .features = PHY_BASIC_FEATURES | SUPPORTED_Pause | 230 221 SUPPORTED_Asym_Pause, 231 222 .flags = PHY_HAS_INTERRUPT, 223 + .ack_interrupt = ip101a_g_ack_interrupt, 232 224 .config_init = &ip101a_g_config_init, 233 225 .config_aneg = &genphy_config_aneg, 234 226 .read_status = &genphy_read_status,
+6 -9
drivers/net/ppp/ppp_generic.c
··· 235 235 /* Prototypes. */ 236 236 static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, 237 237 struct file *file, unsigned int cmd, unsigned long arg); 238 - static int ppp_xmit_process(struct ppp *ppp); 238 + static void ppp_xmit_process(struct ppp *ppp); 239 239 static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb); 240 240 static void ppp_push(struct ppp *ppp); 241 241 static void ppp_channel_push(struct channel *pch); ··· 969 969 put_unaligned_be16(proto, pp); 970 970 971 971 skb_queue_tail(&ppp->file.xq, skb); 972 - if (!ppp_xmit_process(ppp)) 973 - netif_stop_queue(dev); 972 + ppp_xmit_process(ppp); 974 973 return NETDEV_TX_OK; 975 974 976 975 outf: ··· 1047 1048 * Called to do any work queued up on the transmit side 1048 1049 * that can now be done. 1049 1050 */ 1050 - static int 1051 + static void 1051 1052 ppp_xmit_process(struct ppp *ppp) 1052 1053 { 1053 1054 struct sk_buff *skb; 1054 - int ret = 0; 1055 1055 1056 1056 ppp_xmit_lock(ppp); 1057 1057 if (!ppp->closing) { ··· 1060 1062 ppp_send_frame(ppp, skb); 1061 1063 /* If there's no work left to do, tell the core net 1062 1064 code that we can accept some more. */ 1063 - if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq)) { 1065 + if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq)) 1064 1066 netif_wake_queue(ppp->dev); 1065 - ret = 1; 1066 - } 1067 + else 1068 + netif_stop_queue(ppp->dev); 1067 1069 } 1068 1070 ppp_xmit_unlock(ppp); 1069 - return ret; 1070 1071 } 1071 1072 1072 1073 static inline struct sk_buff *
+30
drivers/net/usb/qmi_wwan.c
··· 365 365 .data = BIT(4), /* interface whitelist bitmap */ 366 366 }; 367 367 368 + /* Sierra Wireless provide equally useless interface descriptors 369 + * Devices in QMI mode can be switched between two different 370 + * configurations: 371 + * a) USB interface #8 is QMI/wwan 372 + * b) USB interfaces #8, #19 and #20 are QMI/wwan 373 + * 374 + * Both configurations provide a number of other interfaces (serial++), 375 + * some of which have the same endpoint configuration as we expect, so 376 + * a whitelist or blacklist is necessary. 377 + * 378 + * FIXME: The below whitelist should include BIT(20). It does not 379 + * because I cannot get it to work... 380 + */ 381 + static const struct driver_info qmi_wwan_sierra = { 382 + .description = "Sierra Wireless wwan/QMI device", 383 + .flags = FLAG_WWAN, 384 + .bind = qmi_wwan_bind_gobi, 385 + .unbind = qmi_wwan_unbind_shared, 386 + .manage_power = qmi_wwan_manage_power, 387 + .data = BIT(8) | BIT(19), /* interface whitelist bitmap */ 388 + }; 368 389 369 390 #define HUAWEI_VENDOR_ID 0x12D1 370 391 #define QMI_GOBI_DEVICE(vend, prod) \ ··· 465 444 .bInterfaceSubClass = 0xff, 466 445 .bInterfaceProtocol = 0xff, 467 446 .driver_info = (unsigned long)&qmi_wwan_force_int4, 447 + }, 448 + { /* Sierra Wireless MC77xx in QMI mode */ 449 + .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, 450 + .idVendor = 0x1199, 451 + .idProduct = 0x68a2, 452 + .bInterfaceClass = 0xff, 453 + .bInterfaceSubClass = 0xff, 454 + .bInterfaceProtocol = 0xff, 455 + .driver_info = (unsigned long)&qmi_wwan_sierra, 468 456 }, 469 457 {QMI_GOBI_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ 470 458 {QMI_GOBI_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */
+1
drivers/net/usb/smsc75xx.c
··· 1051 1051 dev->net->ethtool_ops = &smsc75xx_ethtool_ops; 1052 1052 dev->net->flags |= IFF_MULTICAST; 1053 1053 dev->net->hard_header_len += SMSC75XX_TX_OVERHEAD; 1054 + dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; 1054 1055 return 0; 1055 1056 } 1056 1057
+2 -3
drivers/net/virtio_net.c
··· 626 626 /* This can happen with OOM and indirect buffers. */ 627 627 if (unlikely(capacity < 0)) { 628 628 if (likely(capacity == -ENOMEM)) { 629 - if (net_ratelimit()) { 629 + if (net_ratelimit()) 630 630 dev_warn(&dev->dev, 631 631 "TX queue failure: out of memory\n"); 632 - } else { 632 + } else { 633 633 dev->stats.tx_fifo_errors++; 634 634 if (net_ratelimit()) 635 635 dev_warn(&dev->dev, 636 636 "Unexpected TX queue failure: %d\n", 637 637 capacity); 638 - } 639 638 } 640 639 dev->stats.tx_dropped++; 641 640 kfree_skb(skb);
+1
drivers/net/wan/farsync.c
··· 2483 2483 pr_err("Control memory remap failed\n"); 2484 2484 pci_release_regions(pdev); 2485 2485 pci_disable_device(pdev); 2486 + iounmap(card->mem); 2486 2487 kfree(card); 2487 2488 return -ENODEV; 2488 2489 }
+5 -2
drivers/net/wireless/ath/ath5k/ahb.c
··· 19 19 #include <linux/nl80211.h> 20 20 #include <linux/platform_device.h> 21 21 #include <linux/etherdevice.h> 22 + #include <linux/export.h> 22 23 #include <ar231x_platform.h> 23 24 #include "ath5k.h" 24 25 #include "debug.h" ··· 120 119 if (res == NULL) { 121 120 dev_err(&pdev->dev, "no IRQ resource found\n"); 122 121 ret = -ENXIO; 123 - goto err_out; 122 + goto err_iounmap; 124 123 } 125 124 126 125 irq = res->start; ··· 129 128 if (hw == NULL) { 130 129 dev_err(&pdev->dev, "no memory for ieee80211_hw\n"); 131 130 ret = -ENOMEM; 132 - goto err_out; 131 + goto err_iounmap; 133 132 } 134 133 135 134 ah = hw->priv; ··· 186 185 err_free_hw: 187 186 ieee80211_free_hw(hw); 188 187 platform_set_drvdata(pdev, NULL); 188 + err_iounmap: 189 + iounmap(mem); 189 190 err_out: 190 191 return ret; 191 192 }
+8 -1
drivers/net/wireless/ath/ath9k/main.c
··· 1548 1548 struct ath_hw *ah = sc->sc_ah; 1549 1549 struct ath_common *common = ath9k_hw_common(ah); 1550 1550 struct ieee80211_conf *conf = &hw->conf; 1551 + bool reset_channel = false; 1551 1552 1552 1553 ath9k_ps_wakeup(sc); 1553 1554 mutex_lock(&sc->mutex); ··· 1557 1556 sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE); 1558 1557 if (sc->ps_idle) 1559 1558 ath_cancel_work(sc); 1559 + else 1560 + /* 1561 + * The chip needs a reset to properly wake up from 1562 + * full sleep 1563 + */ 1564 + reset_channel = ah->chip_fullsleep; 1560 1565 } 1561 1566 1562 1567 /* ··· 1591 1584 } 1592 1585 } 1593 1586 1594 - if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { 1587 + if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) || reset_channel) { 1595 1588 struct ieee80211_channel *curchan = hw->conf.channel; 1596 1589 int pos = curchan->hw_value; 1597 1590 int old_pos = -1;
+9 -1
drivers/net/wireless/ath/ath9k/xmit.c
··· 1820 1820 struct ath_frame_info *fi = get_frame_info(skb); 1821 1821 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1822 1822 struct ath_buf *bf; 1823 + int fragno; 1823 1824 u16 seqno; 1824 1825 1825 1826 bf = ath_tx_get_buffer(sc); ··· 1832 1831 ATH_TXBUF_RESET(bf); 1833 1832 1834 1833 if (tid) { 1834 + fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; 1835 1835 seqno = tid->seq_next; 1836 1836 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT); 1837 - INCR(tid->seq_next, IEEE80211_SEQ_MAX); 1837 + 1838 + if (fragno) 1839 + hdr->seq_ctrl |= cpu_to_le16(fragno); 1840 + 1841 + if (!ieee80211_has_morefrags(hdr->frame_control)) 1842 + INCR(tid->seq_next, IEEE80211_SEQ_MAX); 1843 + 1838 1844 bf->bf_state.seqno = seqno; 1839 1845 } 1840 1846
+8
drivers/net/wireless/brcm80211/brcmsmac/main.c
··· 7614 7614 { 7615 7615 int len_mpdu; 7616 7616 struct ieee80211_rx_status rx_status; 7617 + struct ieee80211_hdr *hdr; 7617 7618 7618 7619 memset(&rx_status, 0, sizeof(rx_status)); 7619 7620 prep_mac80211_status(wlc, rxh, p, &rx_status); ··· 7623 7622 len_mpdu = p->len - D11_PHY_HDR_LEN - FCS_LEN; 7624 7623 skb_pull(p, D11_PHY_HDR_LEN); 7625 7624 __skb_trim(p, len_mpdu); 7625 + 7626 + /* unmute transmit */ 7627 + if (wlc->hw->suspended_fifos) { 7628 + hdr = (struct ieee80211_hdr *)p->data; 7629 + if (ieee80211_is_beacon(hdr->frame_control)) 7630 + brcms_b_mute(wlc->hw, false); 7631 + } 7626 7632 7627 7633 memcpy(IEEE80211_SKB_RXCB(p), &rx_status, sizeof(rx_status)); 7628 7634 ieee80211_rx_irqsafe(wlc->pub->ieee_hw, p);
+7 -2
drivers/net/wireless/libertas/cfg.c
··· 103 103 * Convert NL80211's auth_type to the one from Libertas, see chapter 5.9.1 104 104 * in the firmware spec 105 105 */ 106 - static u8 lbs_auth_to_authtype(enum nl80211_auth_type auth_type) 106 + static int lbs_auth_to_authtype(enum nl80211_auth_type auth_type) 107 107 { 108 108 int ret = -ENOTSUPP; 109 109 ··· 1411 1411 goto done; 1412 1412 } 1413 1413 1414 - lbs_set_authtype(priv, sme); 1414 + ret = lbs_set_authtype(priv, sme); 1415 + if (ret == -ENOTSUPP) { 1416 + wiphy_err(wiphy, "unsupported authtype 0x%x\n", sme->auth_type); 1417 + goto done; 1418 + } 1419 + 1415 1420 lbs_set_radio(priv, preamble, 1); 1416 1421 1417 1422 /* Do the actual association */
+9 -9
drivers/net/wireless/mwifiex/pcie.h
··· 48 48 #define PCIE_HOST_INT_STATUS_MASK 0xC3C 49 49 #define PCIE_SCRATCH_2_REG 0xC40 50 50 #define PCIE_SCRATCH_3_REG 0xC44 51 - #define PCIE_SCRATCH_4_REG 0xCC0 52 - #define PCIE_SCRATCH_5_REG 0xCC4 53 - #define PCIE_SCRATCH_6_REG 0xCC8 54 - #define PCIE_SCRATCH_7_REG 0xCCC 55 - #define PCIE_SCRATCH_8_REG 0xCD0 56 - #define PCIE_SCRATCH_9_REG 0xCD4 57 - #define PCIE_SCRATCH_10_REG 0xCD8 58 - #define PCIE_SCRATCH_11_REG 0xCDC 59 - #define PCIE_SCRATCH_12_REG 0xCE0 51 + #define PCIE_SCRATCH_4_REG 0xCD0 52 + #define PCIE_SCRATCH_5_REG 0xCD4 53 + #define PCIE_SCRATCH_6_REG 0xCD8 54 + #define PCIE_SCRATCH_7_REG 0xCDC 55 + #define PCIE_SCRATCH_8_REG 0xCE0 56 + #define PCIE_SCRATCH_9_REG 0xCE4 57 + #define PCIE_SCRATCH_10_REG 0xCE8 58 + #define PCIE_SCRATCH_11_REG 0xCEC 59 + #define PCIE_SCRATCH_12_REG 0xCF0 60 60 61 61 #define CPU_INTR_DNLD_RDY BIT(0) 62 62 #define CPU_INTR_DOOR_BELL BIT(1)
+1 -1
drivers/vhost/net.c
··· 238 238 239 239 vq->heads[vq->upend_idx].len = len; 240 240 ubuf->callback = vhost_zerocopy_callback; 241 - ubuf->arg = vq->ubufs; 241 + ubuf->ctx = vq->ubufs; 242 242 ubuf->desc = vq->upend_idx; 243 243 msg.msg_control = ubuf; 244 244 msg.msg_controllen = sizeof(ubuf);
+2 -3
drivers/vhost/vhost.c
··· 1598 1598 kfree(ubufs); 1599 1599 } 1600 1600 1601 - void vhost_zerocopy_callback(void *arg) 1601 + void vhost_zerocopy_callback(struct ubuf_info *ubuf) 1602 1602 { 1603 - struct ubuf_info *ubuf = arg; 1604 - struct vhost_ubuf_ref *ubufs = ubuf->arg; 1603 + struct vhost_ubuf_ref *ubufs = ubuf->ctx; 1605 1604 struct vhost_virtqueue *vq = ubufs->vq; 1606 1605 1607 1606 /* set len = 1 to mark this desc buffers done DMA */
+1 -1
drivers/vhost/vhost.h
··· 188 188 189 189 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, 190 190 unsigned int log_num, u64 len); 191 - void vhost_zerocopy_callback(void *arg); 191 + void vhost_zerocopy_callback(struct ubuf_info *); 192 192 int vhost_zerocopy_signal_used(struct vhost_virtqueue *vq); 193 193 194 194 #define vq_err(vq, fmt, ...) do { \
+4 -3
include/linux/skbuff.h
··· 238 238 /* 239 239 * The callback notifies userspace to release buffers when skb DMA is done in 240 240 * lower device, the skb last reference should be 0 when calling this. 241 - * The desc is used to track userspace buffer index. 241 + * The ctx field is used to track device context. 242 + * The desc field is used to track userspace buffer index. 242 243 */ 243 244 struct ubuf_info { 244 - void (*callback)(void *); 245 - void *arg; 245 + void (*callback)(struct ubuf_info *); 246 + void *ctx; 246 247 unsigned long desc; 247 248 }; 248 249
+5 -1
include/net/dst.h
··· 36 36 struct net_device *dev; 37 37 struct dst_ops *ops; 38 38 unsigned long _metrics; 39 - unsigned long expires; 39 + union { 40 + unsigned long expires; 41 + /* point to where the dst_entry copied from */ 42 + struct dst_entry *from; 43 + }; 40 44 struct dst_entry *path; 41 45 struct neighbour __rcu *_neighbour; 42 46 #ifdef CONFIG_XFRM
+48
include/net/ip6_fib.h
··· 123 123 return ((struct rt6_info *)dst)->rt6i_idev; 124 124 } 125 125 126 + static inline void rt6_clean_expires(struct rt6_info *rt) 127 + { 128 + if (!(rt->rt6i_flags & RTF_EXPIRES) && rt->dst.from) 129 + dst_release(rt->dst.from); 130 + 131 + rt->rt6i_flags &= ~RTF_EXPIRES; 132 + rt->dst.from = NULL; 133 + } 134 + 135 + static inline void rt6_set_expires(struct rt6_info *rt, unsigned long expires) 136 + { 137 + if (!(rt->rt6i_flags & RTF_EXPIRES) && rt->dst.from) 138 + dst_release(rt->dst.from); 139 + 140 + rt->rt6i_flags |= RTF_EXPIRES; 141 + rt->dst.expires = expires; 142 + } 143 + 144 + static inline void rt6_update_expires(struct rt6_info *rt, int timeout) 145 + { 146 + if (!(rt->rt6i_flags & RTF_EXPIRES)) { 147 + if (rt->dst.from) 148 + dst_release(rt->dst.from); 149 + /* dst_set_expires relies on expires == 0 150 + * if it has not been set previously. 151 + */ 152 + rt->dst.expires = 0; 153 + } 154 + 155 + dst_set_expires(&rt->dst, timeout); 156 + rt->rt6i_flags |= RTF_EXPIRES; 157 + } 158 + 159 + static inline void rt6_set_from(struct rt6_info *rt, struct rt6_info *from) 160 + { 161 + struct dst_entry *new = (struct dst_entry *) from; 162 + 163 + if (!(rt->rt6i_flags & RTF_EXPIRES) && rt->dst.from) { 164 + if (new == rt->dst.from) 165 + return; 166 + dst_release(rt->dst.from); 167 + } 168 + 169 + rt->rt6i_flags &= ~RTF_EXPIRES; 170 + rt->dst.from = new; 171 + dst_hold(new); 172 + } 173 + 126 174 struct fib6_walker_t { 127 175 struct list_head lh; 128 176 struct fib6_node *root, *node;
+3 -3
include/net/red.h
··· 245 245 * 246 246 * dummy packets as a burst after idle time, i.e. 247 247 * 248 - * p->qavg *= (1-W)^m 248 + * v->qavg *= (1-W)^m 249 249 * 250 250 * This is an apparently overcomplicated solution (f.e. we have to 251 251 * precompute a table to make this calculation in reasonable time) ··· 279 279 unsigned int backlog) 280 280 { 281 281 /* 282 - * NOTE: p->qavg is fixed point number with point at Wlog. 282 + * NOTE: v->qavg is fixed point number with point at Wlog. 283 283 * The formula below is equvalent to floating point 284 284 * version: 285 285 * ··· 390 390 if (red_is_idling(v)) 391 391 qavg = red_calc_qavg_from_idle_time(p, v); 392 392 393 - /* p->qavg is fixed point number with point at Wlog */ 393 + /* v->qavg is fixed point number with point at Wlog */ 394 394 qavg >>= p->Wlog; 395 395 396 396 if (qavg > p->target_max && p->max_P <= MAX_P_MAX)
+1
include/net/sock.h
··· 246 246 * @sk_user_data: RPC layer private data 247 247 * @sk_sndmsg_page: cached page for sendmsg 248 248 * @sk_sndmsg_off: cached offset for sendmsg 249 + * @sk_peek_off: current peek_offset value 249 250 * @sk_send_head: front of stuff to transmit 250 251 * @sk_security: used by security modules 251 252 * @sk_mark: generic packet mark
+5 -4
net/ax25/af_ax25.c
··· 2011 2011 proc_net_remove(&init_net, "ax25_route"); 2012 2012 proc_net_remove(&init_net, "ax25"); 2013 2013 proc_net_remove(&init_net, "ax25_calls"); 2014 - ax25_rt_free(); 2015 - ax25_uid_free(); 2016 - ax25_dev_free(); 2017 2014 2018 - ax25_unregister_sysctl(); 2019 2015 unregister_netdevice_notifier(&ax25_dev_notifier); 2016 + ax25_unregister_sysctl(); 2020 2017 2021 2018 dev_remove_pack(&ax25_packet_type); 2022 2019 2023 2020 sock_unregister(PF_AX25); 2024 2021 proto_unregister(&ax25_proto); 2022 + 2023 + ax25_rt_free(); 2024 + ax25_uid_free(); 2025 + ax25_dev_free(); 2025 2026 } 2026 2027 module_exit(ax25_exit);
+6 -3
net/caif/chnl_net.c
··· 103 103 skb->protocol = htons(ETH_P_IPV6); 104 104 break; 105 105 default: 106 + kfree_skb(skb); 106 107 priv->netdev->stats.rx_errors++; 107 108 return -EINVAL; 108 109 } ··· 221 220 222 221 if (skb->len > priv->netdev->mtu) { 223 222 pr_warn("Size of skb exceeded MTU\n"); 223 + kfree_skb(skb); 224 224 dev->stats.tx_errors++; 225 - return -ENOSPC; 225 + return NETDEV_TX_OK; 226 226 } 227 227 228 228 if (!priv->flowenabled) { 229 229 pr_debug("dropping packets flow off\n"); 230 + kfree_skb(skb); 230 231 dev->stats.tx_dropped++; 231 - return NETDEV_TX_BUSY; 232 + return NETDEV_TX_OK; 232 233 } 233 234 234 235 if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP) ··· 245 242 result = priv->chnl.dn->transmit(priv->chnl.dn, pkt); 246 243 if (result) { 247 244 dev->stats.tx_dropped++; 248 - return result; 245 + return NETDEV_TX_OK; 249 246 } 250 247 251 248 /* Update statistics. */
+20
net/core/dev.c
··· 1409 1409 * register_netdevice_notifier(). The notifier is unlinked into the 1410 1410 * kernel structures and may then be reused. A negative errno code 1411 1411 * is returned on a failure. 1412 + * 1413 + * After unregistering unregister and down device events are synthesized 1414 + * for all devices on the device list to the removed notifier to remove 1415 + * the need for special case cleanup code. 1412 1416 */ 1413 1417 1414 1418 int unregister_netdevice_notifier(struct notifier_block *nb) 1415 1419 { 1420 + struct net_device *dev; 1421 + struct net *net; 1416 1422 int err; 1417 1423 1418 1424 rtnl_lock(); 1419 1425 err = raw_notifier_chain_unregister(&netdev_chain, nb); 1426 + if (err) 1427 + goto unlock; 1428 + 1429 + for_each_net(net) { 1430 + for_each_netdev(net, dev) { 1431 + if (dev->flags & IFF_UP) { 1432 + nb->notifier_call(nb, NETDEV_GOING_DOWN, dev); 1433 + nb->notifier_call(nb, NETDEV_DOWN, dev); 1434 + } 1435 + nb->notifier_call(nb, NETDEV_UNREGISTER, dev); 1436 + nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev); 1437 + } 1438 + } 1439 + unlock: 1420 1440 rtnl_unlock(); 1421 1441 return err; 1422 1442 }
+1
net/core/drop_monitor.c
··· 150 150 for (i = 0; i < msg->entries; i++) { 151 151 if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) { 152 152 msg->points[i].count++; 153 + atomic_inc(&data->dm_hit_count); 153 154 goto out; 154 155 } 155 156 }
+18 -15
net/core/net_namespace.c
··· 83 83 84 84 static int ops_init(const struct pernet_operations *ops, struct net *net) 85 85 { 86 - int err; 86 + int err = -ENOMEM; 87 + void *data = NULL; 88 + 87 89 if (ops->id && ops->size) { 88 - void *data = kzalloc(ops->size, GFP_KERNEL); 90 + data = kzalloc(ops->size, GFP_KERNEL); 89 91 if (!data) 90 - return -ENOMEM; 92 + goto out; 91 93 92 94 err = net_assign_generic(net, *ops->id, data); 93 - if (err) { 94 - kfree(data); 95 - return err; 96 - } 95 + if (err) 96 + goto cleanup; 97 97 } 98 + err = 0; 98 99 if (ops->init) 99 - return ops->init(net); 100 - return 0; 100 + err = ops->init(net); 101 + if (!err) 102 + return 0; 103 + 104 + cleanup: 105 + kfree(data); 106 + 107 + out: 108 + return err; 101 109 } 102 110 103 111 static void ops_free(const struct pernet_operations *ops, struct net *net) ··· 456 448 static int __register_pernet_operations(struct list_head *list, 457 449 struct pernet_operations *ops) 458 450 { 459 - int err = 0; 460 - err = ops_init(ops, &init_net); 461 - if (err) 462 - ops_free(ops, &init_net); 463 - return err; 464 - 451 + return ops_init(ops, &init_net); 465 452 } 466 453 467 454 static void __unregister_pernet_operations(struct pernet_operations *ops)
+1
net/ipv4/tcp_input.c
··· 335 335 incr = __tcp_grow_window(sk, skb); 336 336 337 337 if (incr) { 338 + incr = max_t(int, incr, 2 * skb->len); 338 339 tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, 339 340 tp->window_clamp); 340 341 inet_csk(sk)->icsk_ack.quick |= 1;
+1
net/ipv4/tcp_output.c
··· 1096 1096 eat = min_t(int, len, skb_headlen(skb)); 1097 1097 if (eat) { 1098 1098 __skb_pull(skb, eat); 1099 + skb->avail_size -= eat; 1099 1100 len -= eat; 1100 1101 if (!len) 1101 1102 return;
+3 -6
net/ipv6/addrconf.c
··· 803 803 ip6_del_rt(rt); 804 804 rt = NULL; 805 805 } else if (!(rt->rt6i_flags & RTF_EXPIRES)) { 806 - rt->dst.expires = expires; 807 - rt->rt6i_flags |= RTF_EXPIRES; 806 + rt6_set_expires(rt, expires); 808 807 } 809 808 } 810 809 dst_release(&rt->dst); ··· 1886 1887 rt = NULL; 1887 1888 } else if (addrconf_finite_timeout(rt_expires)) { 1888 1889 /* not infinity */ 1889 - rt->dst.expires = jiffies + rt_expires; 1890 - rt->rt6i_flags |= RTF_EXPIRES; 1890 + rt6_set_expires(rt, jiffies + rt_expires); 1891 1891 } else { 1892 - rt->rt6i_flags &= ~RTF_EXPIRES; 1893 - rt->dst.expires = 0; 1892 + rt6_clean_expires(rt); 1894 1893 } 1895 1894 } else if (valid_lft) { 1896 1895 clock_t expires = 0;
+4 -5
net/ipv6/ip6_fib.c
··· 673 673 &rt->rt6i_gateway)) { 674 674 if (!(iter->rt6i_flags & RTF_EXPIRES)) 675 675 return -EEXIST; 676 - iter->dst.expires = rt->dst.expires; 677 - if (!(rt->rt6i_flags & RTF_EXPIRES)) { 678 - iter->rt6i_flags &= ~RTF_EXPIRES; 679 - iter->dst.expires = 0; 680 - } 676 + if (!(rt->rt6i_flags & RTF_EXPIRES)) 677 + rt6_clean_expires(iter); 678 + else 679 + rt6_set_expires(iter, rt->dst.expires); 681 680 return -EEXIST; 682 681 } 683 682 }
+1 -2
net/ipv6/ndisc.c
··· 1264 1264 } 1265 1265 1266 1266 if (rt) 1267 - rt->dst.expires = jiffies + (HZ * lifetime); 1268 - 1267 + rt6_set_expires(rt, jiffies + (HZ * lifetime)); 1269 1268 if (ra_msg->icmph.icmp6_hop_limit) { 1270 1269 in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit; 1271 1270 if (rt)
+44 -27
net/ipv6/route.c
··· 62 62 #include <linux/sysctl.h> 63 63 #endif 64 64 65 - static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort, 65 + static struct rt6_info *ip6_rt_copy(struct rt6_info *ort, 66 66 const struct in6_addr *dest); 67 67 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie); 68 68 static unsigned int ip6_default_advmss(const struct dst_entry *dst); ··· 285 285 rt->rt6i_idev = NULL; 286 286 in6_dev_put(idev); 287 287 } 288 + 289 + if (!(rt->rt6i_flags & RTF_EXPIRES) && dst->from) 290 + dst_release(dst->from); 291 + 288 292 if (peer) { 289 293 rt->rt6i_peer = NULL; 290 294 inet_putpeer(peer); ··· 333 329 334 330 static __inline__ int rt6_check_expired(const struct rt6_info *rt) 335 331 { 336 - return (rt->rt6i_flags & RTF_EXPIRES) && 337 - time_after(jiffies, rt->dst.expires); 332 + struct rt6_info *ort = NULL; 333 + 334 + if (rt->rt6i_flags & RTF_EXPIRES) { 335 + if (time_after(jiffies, rt->dst.expires)) 336 + return 1; 337 + } else if (rt->dst.from) { 338 + ort = (struct rt6_info *) rt->dst.from; 339 + return (ort->rt6i_flags & RTF_EXPIRES) && 340 + time_after(jiffies, ort->dst.expires); 341 + } 342 + return 0; 338 343 } 339 344 340 345 static inline int rt6_need_strict(const struct in6_addr *daddr) ··· 633 620 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref); 634 621 635 622 if (rt) { 636 - if (!addrconf_finite_timeout(lifetime)) { 637 - rt->rt6i_flags &= ~RTF_EXPIRES; 638 - } else { 639 - rt->dst.expires = jiffies + HZ * lifetime; 640 - rt->rt6i_flags |= RTF_EXPIRES; 641 - } 623 + if (!addrconf_finite_timeout(lifetime)) 624 + rt6_clean_expires(rt); 625 + else 626 + rt6_set_expires(rt, jiffies + HZ * lifetime); 627 + 642 628 dst_release(&rt->dst); 643 629 } 644 630 return 0; ··· 742 730 return __ip6_ins_rt(rt, &info); 743 731 } 744 732 745 - static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort, 733 + static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, 746 734 const struct in6_addr *daddr, 747 735 const struct in6_addr *saddr) 748 736 { ··· 966 954 rt->rt6i_idev = ort->rt6i_idev; 967 955 if (rt->rt6i_idev) 968 956 in6_dev_hold(rt->rt6i_idev); 969 - rt->dst.expires = 0; 970 957 971 958 rt->rt6i_gateway = ort->rt6i_gateway; 972 - rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES; 959 + rt->rt6i_flags = ort->rt6i_flags; 960 + rt6_clean_expires(rt); 973 961 rt->rt6i_metric = 0; 974 962 975 963 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key)); ··· 1031 1019 1032 1020 rt = (struct rt6_info *) skb_dst(skb); 1033 1021 if (rt) { 1034 - if (rt->rt6i_flags & RTF_CACHE) { 1035 - dst_set_expires(&rt->dst, 0); 1036 - rt->rt6i_flags |= RTF_EXPIRES; 1037 - } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) 1022 + if (rt->rt6i_flags & RTF_CACHE) 1023 + rt6_update_expires(rt, 0); 1024 + else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) 1038 1025 rt->rt6i_node->fn_sernum = -1; 1039 1026 } 1040 1027 } ··· 1300 1289 } 1301 1290 1302 1291 rt->dst.obsolete = -1; 1303 - rt->dst.expires = (cfg->fc_flags & RTF_EXPIRES) ? 1304 - jiffies + clock_t_to_jiffies(cfg->fc_expires) : 1305 - 0; 1292 + 1293 + if (cfg->fc_flags & RTF_EXPIRES) 1294 + rt6_set_expires(rt, jiffies + 1295 + clock_t_to_jiffies(cfg->fc_expires)); 1296 + else 1297 + rt6_clean_expires(rt); 1306 1298 1307 1299 if (cfg->fc_protocol == RTPROT_UNSPEC) 1308 1300 cfg->fc_protocol = RTPROT_BOOT; ··· 1750 1736 features |= RTAX_FEATURE_ALLFRAG; 1751 1737 dst_metric_set(&rt->dst, RTAX_FEATURES, features); 1752 1738 } 1753 - dst_set_expires(&rt->dst, net->ipv6.sysctl.ip6_rt_mtu_expires); 1754 - rt->rt6i_flags |= RTF_MODIFIED|RTF_EXPIRES; 1739 + rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires); 1740 + rt->rt6i_flags |= RTF_MODIFIED; 1755 1741 goto out; 1756 1742 } 1757 1743 ··· 1779 1765 * which is 10 mins. After 10 mins the decreased pmtu is expired 1780 1766 * and detecting PMTU increase will be automatically happened. 1781 1767 */ 1782 - dst_set_expires(&nrt->dst, net->ipv6.sysctl.ip6_rt_mtu_expires); 1783 - nrt->rt6i_flags |= RTF_DYNAMIC|RTF_EXPIRES; 1784 - 1768 + rt6_update_expires(nrt, net->ipv6.sysctl.ip6_rt_mtu_expires); 1769 + nrt->rt6i_flags |= RTF_DYNAMIC; 1785 1770 ip6_ins_rt(nrt); 1786 1771 } 1787 1772 out: ··· 1812 1799 * Misc support functions 1813 1800 */ 1814 1801 1815 - static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort, 1802 + static struct rt6_info *ip6_rt_copy(struct rt6_info *ort, 1816 1803 const struct in6_addr *dest) 1817 1804 { 1818 1805 struct net *net = dev_net(ort->dst.dev); ··· 1832 1819 if (rt->rt6i_idev) 1833 1820 in6_dev_hold(rt->rt6i_idev); 1834 1821 rt->dst.lastuse = jiffies; 1835 - rt->dst.expires = 0; 1836 1822 1837 1823 rt->rt6i_gateway = ort->rt6i_gateway; 1838 - rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES; 1824 + rt->rt6i_flags = ort->rt6i_flags; 1825 + if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) == 1826 + (RTF_DEFAULT | RTF_ADDRCONF)) 1827 + rt6_set_from(rt, ort); 1828 + else 1829 + rt6_clean_expires(rt); 1839 1830 rt->rt6i_metric = 0; 1840 1831 1841 1832 #ifdef CONFIG_IPV6_SUBTREES
+4
net/ipv6/tcp_ipv6.c
··· 1383 1383 tcp_mtup_init(newsk); 1384 1384 tcp_sync_mss(newsk, dst_mtu(dst)); 1385 1385 newtp->advmss = dst_metric_advmss(dst); 1386 + if (tcp_sk(sk)->rx_opt.user_mss && 1387 + tcp_sk(sk)->rx_opt.user_mss < newtp->advmss) 1388 + newtp->advmss = tcp_sk(sk)->rx_opt.user_mss; 1389 + 1386 1390 tcp_initialize_rcv_mss(newsk); 1387 1391 if (tcp_rsk(req)->snt_synack) 1388 1392 tcp_valid_rtt_meas(newsk,
+1 -1
net/key/af_key.c
··· 3480 3480 3481 3481 /* Addresses to be used by KM for negotiation, if ext is available */ 3482 3482 if (k != NULL && (set_sadb_kmaddress(skb, k) < 0)) 3483 - return -EINVAL; 3483 + goto err; 3484 3484 3485 3485 /* selector src */ 3486 3486 set_sadb_address(skb, sasize_sel, SADB_EXT_ADDRESS_SRC, sel);
+3 -2
net/l2tp/l2tp_ip.c
··· 232 232 { 233 233 write_lock_bh(&l2tp_ip_lock); 234 234 hlist_del_init(&sk->sk_bind_node); 235 - hlist_del_init(&sk->sk_node); 235 + sk_del_node_init(sk); 236 236 write_unlock_bh(&l2tp_ip_lock); 237 237 sk_common_release(sk); 238 238 } ··· 271 271 chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST) 272 272 goto out; 273 273 274 - inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr; 274 + if (addr->l2tp_addr.s_addr) 275 + inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr; 275 276 if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) 276 277 inet->inet_saddr = 0; /* Use device */ 277 278 sk_dst_reset(sk);
+2 -2
net/mac80211/ibss.c
··· 457 457 * fall back to HT20 if we don't use or use 458 458 * the other extension channel 459 459 */ 460 - if ((channel_type == NL80211_CHAN_HT40MINUS || 461 - channel_type == NL80211_CHAN_HT40PLUS) && 460 + if (!(channel_type == NL80211_CHAN_HT40MINUS || 461 + channel_type == NL80211_CHAN_HT40PLUS) || 462 462 channel_type != sdata->u.ibss.channel_type) 463 463 sta_ht_cap_new.cap &= 464 464 ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+6 -4
net/mac80211/rx.c
··· 103 103 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, 104 104 struct sk_buff *skb, 105 105 struct ieee80211_rate *rate, 106 - int rtap_len) 106 + int rtap_len, bool has_fcs) 107 107 { 108 108 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 109 109 struct ieee80211_radiotap_header *rthdr; ··· 134 134 } 135 135 136 136 /* IEEE80211_RADIOTAP_FLAGS */ 137 - if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) 137 + if (has_fcs && (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)) 138 138 *pos |= IEEE80211_RADIOTAP_F_FCS; 139 139 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) 140 140 *pos |= IEEE80211_RADIOTAP_F_BADFCS; ··· 294 294 } 295 295 296 296 /* prepend radiotap information */ 297 - ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom); 297 + ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom, 298 + true); 298 299 299 300 skb_reset_mac_header(skb); 300 301 skb->ip_summed = CHECKSUM_UNNECESSARY; ··· 2572 2571 goto out_free_skb; 2573 2572 2574 2573 /* prepend radiotap information */ 2575 - ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom); 2574 + ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom, 2575 + false); 2576 2576 2577 2577 skb_set_mac_header(skb, 0); 2578 2578 skb->ip_summed = CHECKSUM_UNNECESSARY;
+2 -19
net/phonet/pn_dev.c
··· 331 331 332 332 static void __net_exit phonet_exit_net(struct net *net) 333 333 { 334 - struct phonet_net *pnn = phonet_pernet(net); 335 - struct net_device *dev; 336 - unsigned i; 337 - 338 - rtnl_lock(); 339 - for_each_netdev(net, dev) 340 - phonet_device_destroy(dev); 341 - 342 - for (i = 0; i < 64; i++) { 343 - dev = pnn->routes.table[i]; 344 - if (dev) { 345 - rtm_phonet_notify(RTM_DELROUTE, dev, i); 346 - dev_put(dev); 347 - } 348 - } 349 - rtnl_unlock(); 350 - 351 334 proc_net_remove(net, "phonet"); 352 335 } 353 336 ··· 344 361 /* Initialize Phonet devices list */ 345 362 int __init phonet_device_init(void) 346 363 { 347 - int err = register_pernet_device(&phonet_net_ops); 364 + int err = register_pernet_subsys(&phonet_net_ops); 348 365 if (err) 349 366 return err; 350 367 ··· 360 377 { 361 378 rtnl_unregister_all(PF_PHONET); 362 379 unregister_netdevice_notifier(&phonet_device_notifier); 363 - unregister_pernet_device(&phonet_net_ops); 380 + unregister_pernet_subsys(&phonet_net_ops); 364 381 proc_net_remove(&init_net, "pnresource"); 365 382 } 366 383
+2 -5
net/sched/sch_gred.c
··· 565 565 opt.packets = q->packetsin; 566 566 opt.bytesin = q->bytesin; 567 567 568 - if (gred_wred_mode(table)) { 569 - q->vars.qidlestart = 570 - table->tab[table->def]->vars.qidlestart; 571 - q->vars.qavg = table->tab[table->def]->vars.qavg; 572 - } 568 + if (gred_wred_mode(table)) 569 + gred_load_wred_set(table, q); 573 570 574 571 opt.qave = red_calc_qavg(&q->parms, &q->vars, q->vars.qavg); 575 572
+1 -1
net/wireless/util.c
··· 989 989 if (rdev->wiphy.software_iftypes & BIT(iftype)) 990 990 continue; 991 991 for (j = 0; j < c->n_limits; j++) { 992 - if (!(limits[j].types & iftype)) 992 + if (!(limits[j].types & BIT(iftype))) 993 993 continue; 994 994 if (limits[j].max < num[iftype]) 995 995 goto cont;