Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:
"Famouse last words: "final pull request" :-)

I'm sending this because Jason Wang's fixes are pretty important

1) Add missing per-cpu stats initialization to ip6_vti. Otherwise
lockdep spits out a call trace. From Li RongQing.

2) Fix NULL oops in wireless hwsim, from Javier Lopez

3) TIPC deferred packet queue unlink must NULL out skb->next to avoid
crashes. From Erik Hugne

4) Fix access to uninitialized buffer in nf_nat netfilter code, from
Daniel Borkmann

5) Fix lifetime of ipv6 loopback and SIT tunnel addresses, otherwise
they basically timeout immediately. From Hannes Frederic Sowa

6) Fix DMA unmapping of TSO packets in bnx2x driver, from Michal
Schmidt

7) Do not allow L2 forwarding offload via macvtap device, the way
things are now it will not end up being forwaded at all. From
Jason Wang

8) Fix transmit queue selection via ndo_dfwd_start_xmit(), fixing
things like applying NETIF_F_LLTX to the wrong device (!!) and
eliding the proper transmit watchdog handling

9) qlcnic driver was not updating tx statistics at all, from Manish
Chopra"

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net:
qlcnic: Fix ethtool statistics length calculation
qlcnic: Fix bug in TX statistics
net: core: explicitly select a txq before doing l2 forwarding
macvlan: forbid L2 fowarding offload for macvtap
bnx2x: fix DMA unmapping of TSO split BDs
ipv6: add link-local, sit and loopback address with INFINITY_LIFE_TIME
bnx2x: prevent WARN during driver unload
tipc: correctly unlink packets from deferred packet queue
ipv6: pcpu_tstats.syncp should be initialised in ip6_vti.c
netfilter: only warn once on wrong seqadj usage
netfilter: nf_nat: fix access to uninitialized buffer in IRC NAT helper
NFC: Fix target mode p2p link establishment
iwlwifi: add new devices for 7265 series
mac80211: move "bufferable MMPDU" check to fix AP mode scan
mac80211_hwsim: Fix NULL pointer dereference

Changed files
+219 -126
drivers
net
bonding
ethernet
team
wireless
iwlwifi
pcie
mwifiex
staging
bcm
netlogic
rtl8188eu
os_dep
include
linux
net
+2 -1
drivers/net/bonding/bond_main.c
··· 3732 3732 } 3733 3733 3734 3734 3735 - static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb) 3735 + static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb, 3736 + void *accel_priv) 3736 3737 { 3737 3738 /* 3738 3739 * This helper function exists to help dev_pick_tx get the correct
+34 -10
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
··· 520 520 #define BNX2X_FP_STATE_IDLE 0 521 521 #define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */ 522 522 #define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */ 523 - #define BNX2X_FP_STATE_NAPI_YIELD (1 << 2) /* NAPI yielded this FP */ 524 - #define BNX2X_FP_STATE_POLL_YIELD (1 << 3) /* poll yielded this FP */ 523 + #define BNX2X_FP_STATE_DISABLED (1 << 2) 524 + #define BNX2X_FP_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this FP */ 525 + #define BNX2X_FP_STATE_POLL_YIELD (1 << 4) /* poll yielded this FP */ 526 + #define BNX2X_FP_OWNED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL) 525 527 #define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD) 526 - #define BNX2X_FP_LOCKED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL) 528 + #define BNX2X_FP_LOCKED (BNX2X_FP_OWNED | BNX2X_FP_STATE_DISABLED) 527 529 #define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD) 528 530 /* protect state */ 529 531 spinlock_t lock; ··· 615 613 { 616 614 bool rc = true; 617 615 618 - spin_lock(&fp->lock); 616 + spin_lock_bh(&fp->lock); 619 617 if (fp->state & BNX2X_FP_LOCKED) { 620 618 WARN_ON(fp->state & BNX2X_FP_STATE_NAPI); 621 619 fp->state |= BNX2X_FP_STATE_NAPI_YIELD; ··· 624 622 /* we don't care if someone yielded */ 625 623 fp->state = BNX2X_FP_STATE_NAPI; 626 624 } 627 - spin_unlock(&fp->lock); 625 + spin_unlock_bh(&fp->lock); 628 626 return rc; 629 627 } 630 628 ··· 633 631 { 634 632 bool rc = false; 635 633 636 - spin_lock(&fp->lock); 634 + spin_lock_bh(&fp->lock); 637 635 WARN_ON(fp->state & 638 636 (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD)); 639 637 640 638 if (fp->state & BNX2X_FP_STATE_POLL_YIELD) 641 639 rc = true; 642 - fp->state = BNX2X_FP_STATE_IDLE; 643 - spin_unlock(&fp->lock); 640 + 641 + /* state ==> idle, unless currently disabled */ 642 + fp->state &= BNX2X_FP_STATE_DISABLED; 643 + spin_unlock_bh(&fp->lock); 644 644 return rc; 645 645 } 646 646 ··· 673 669 674 670 if (fp->state & BNX2X_FP_STATE_POLL_YIELD) 675 671 rc = true; 676 - fp->state = BNX2X_FP_STATE_IDLE; 672 + 673 + /* state ==> idle, unless currently disabled */ 674 + fp->state &= BNX2X_FP_STATE_DISABLED; 677 675 spin_unlock_bh(&fp->lock); 678 676 return rc; 679 677 } ··· 683 677 /* true if a socket is polling, even if it did not get the lock */ 684 678 static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) 685 679 { 686 - WARN_ON(!(fp->state & BNX2X_FP_LOCKED)); 680 + WARN_ON(!(fp->state & BNX2X_FP_OWNED)); 687 681 return fp->state & BNX2X_FP_USER_PEND; 682 + } 683 + 684 + /* false if fp is currently owned */ 685 + static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp) 686 + { 687 + int rc = true; 688 + 689 + spin_lock_bh(&fp->lock); 690 + if (fp->state & BNX2X_FP_OWNED) 691 + rc = false; 692 + fp->state |= BNX2X_FP_STATE_DISABLED; 693 + spin_unlock_bh(&fp->lock); 694 + 695 + return rc; 688 696 } 689 697 #else 690 698 static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp) ··· 728 708 static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) 729 709 { 730 710 return false; 711 + } 712 + static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp) 713 + { 714 + return true; 731 715 } 732 716 #endif /* CONFIG_NET_RX_BUSY_POLL */ 733 717
+15 -13
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
··· 160 160 struct sk_buff *skb = tx_buf->skb; 161 161 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; 162 162 int nbd; 163 + u16 split_bd_len = 0; 163 164 164 165 /* prefetch skb end pointer to speedup dev_kfree_skb() */ 165 166 prefetch(&skb->end); ··· 168 167 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n", 169 168 txdata->txq_index, idx, tx_buf, skb); 170 169 171 - /* unmap first bd */ 172 170 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd; 173 - dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), 174 - BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE); 175 171 176 172 nbd = le16_to_cpu(tx_start_bd->nbd) - 1; 177 173 #ifdef BNX2X_STOP_ON_ERROR ··· 186 188 --nbd; 187 189 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); 188 190 189 - /* ...and the TSO split header bd since they have no mapping */ 191 + /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */ 190 192 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { 193 + tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd; 194 + split_bd_len = BD_UNMAP_LEN(tx_data_bd); 191 195 --nbd; 192 196 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); 193 197 } 198 + 199 + /* unmap first bd */ 200 + dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), 201 + BD_UNMAP_LEN(tx_start_bd) + split_bd_len, 202 + DMA_TO_DEVICE); 194 203 195 204 /* now free frags */ 196 205 while (nbd > 0) { ··· 1795 1790 { 1796 1791 int i; 1797 1792 1798 - local_bh_disable(); 1799 1793 for_each_rx_queue_cnic(bp, i) { 1800 1794 napi_disable(&bnx2x_fp(bp, i, napi)); 1801 - while (!bnx2x_fp_lock_napi(&bp->fp[i])) 1802 - mdelay(1); 1795 + while (!bnx2x_fp_ll_disable(&bp->fp[i])) 1796 + usleep_range(1000, 2000); 1803 1797 } 1804 - local_bh_enable(); 1805 1798 } 1806 1799 1807 1800 static void bnx2x_napi_disable(struct bnx2x *bp) 1808 1801 { 1809 1802 int i; 1810 1803 1811 - local_bh_disable(); 1812 1804 for_each_eth_queue(bp, i) { 1813 1805 napi_disable(&bnx2x_fp(bp, i, napi)); 1814 - while (!bnx2x_fp_lock_napi(&bp->fp[i])) 1815 - mdelay(1); 1806 + while (!bnx2x_fp_ll_disable(&bp->fp[i])) 1807 + usleep_range(1000, 2000); 1816 1808 } 1817 - local_bh_enable(); 1818 1809 } 1819 1810 1820 1811 void bnx2x_netif_start(struct bnx2x *bp) ··· 1833 1832 bnx2x_napi_disable_cnic(bp); 1834 1833 } 1835 1834 1836 - u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) 1835 + u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, 1836 + void *accel_priv) 1837 1837 { 1838 1838 struct bnx2x *bp = netdev_priv(dev); 1839 1839
+2 -1
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
··· 524 524 int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos); 525 525 526 526 /* select_queue callback */ 527 - u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb); 527 + u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, 528 + void *accel_priv); 528 529 529 530 static inline void bnx2x_update_rx_prod(struct bnx2x *bp, 530 531 struct bnx2x_fastpath *fp,
+13 -20
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 6827 6827 return __ixgbe_maybe_stop_tx(tx_ring, size); 6828 6828 } 6829 6829 6830 - #ifdef IXGBE_FCOE 6831 - static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) 6830 + static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, 6831 + void *accel_priv) 6832 6832 { 6833 + struct ixgbe_fwd_adapter *fwd_adapter = accel_priv; 6834 + #ifdef IXGBE_FCOE 6833 6835 struct ixgbe_adapter *adapter; 6834 6836 struct ixgbe_ring_feature *f; 6835 6837 int txq; 6838 + #endif 6839 + 6840 + if (fwd_adapter) 6841 + return skb->queue_mapping + fwd_adapter->tx_base_queue; 6842 + 6843 + #ifdef IXGBE_FCOE 6836 6844 6837 6845 /* 6838 6846 * only execute the code below if protocol is FCoE ··· 6866 6858 txq -= f->indices; 6867 6859 6868 6860 return txq + f->offset; 6861 + #else 6862 + return __netdev_pick_tx(dev, skb); 6863 + #endif 6869 6864 } 6870 6865 6871 - #endif 6872 6866 netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, 6873 6867 struct ixgbe_adapter *adapter, 6874 6868 struct ixgbe_ring *tx_ring) ··· 7639 7629 kfree(fwd_adapter); 7640 7630 } 7641 7631 7642 - static netdev_tx_t ixgbe_fwd_xmit(struct sk_buff *skb, 7643 - struct net_device *dev, 7644 - void *priv) 7645 - { 7646 - struct ixgbe_fwd_adapter *fwd_adapter = priv; 7647 - unsigned int queue; 7648 - struct ixgbe_ring *tx_ring; 7649 - 7650 - queue = skb->queue_mapping + fwd_adapter->tx_base_queue; 7651 - tx_ring = fwd_adapter->real_adapter->tx_ring[queue]; 7652 - 7653 - return __ixgbe_xmit_frame(skb, dev, tx_ring); 7654 - } 7655 - 7656 7632 static const struct net_device_ops ixgbe_netdev_ops = { 7657 7633 .ndo_open = ixgbe_open, 7658 7634 .ndo_stop = ixgbe_close, 7659 7635 .ndo_start_xmit = ixgbe_xmit_frame, 7660 - #ifdef IXGBE_FCOE 7661 7636 .ndo_select_queue = ixgbe_select_queue, 7662 - #endif 7663 7637 .ndo_set_rx_mode = ixgbe_set_rx_mode, 7664 7638 .ndo_validate_addr = eth_validate_addr, 7665 7639 .ndo_set_mac_address = ixgbe_set_mac, ··· 7683 7689 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, 7684 7690 .ndo_dfwd_add_station = ixgbe_fwd_add, 7685 7691 .ndo_dfwd_del_station = ixgbe_fwd_del, 7686 - .ndo_dfwd_start_xmit = ixgbe_fwd_xmit, 7687 7692 }; 7688 7693 7689 7694 /**
+2 -1
drivers/net/ethernet/lantiq_etop.c
··· 619 619 } 620 620 621 621 static u16 622 - ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb) 622 + ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb, 623 + void *accel_priv) 623 624 { 624 625 /* we are currently only using the first queue */ 625 626 return 0;
+2 -1
drivers/net/ethernet/mellanox/mlx4/en_tx.c
··· 592 592 } 593 593 } 594 594 595 - u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb) 595 + u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, 596 + void *accel_priv) 596 597 { 597 598 struct mlx4_en_priv *priv = netdev_priv(dev); 598 599 u16 rings_p_up = priv->num_tx_rings_p_up;
+2 -1
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
··· 714 714 int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); 715 715 716 716 void mlx4_en_tx_irq(struct mlx4_cq *mcq); 717 - u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb); 717 + u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, 718 + void *accel_priv); 718 719 netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); 719 720 720 721 int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
+1
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
··· 1711 1711 void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *); 1712 1712 void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx); 1713 1713 void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx); 1714 + void qlcnic_update_stats(struct qlcnic_adapter *); 1714 1715 1715 1716 /* Adapter hardware abstraction */ 1716 1717 struct qlcnic_hardware_ops {
+22 -19
drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
··· 167 167 168 168 #define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test) 169 169 170 - static inline int qlcnic_82xx_statistics(void) 170 + static inline int qlcnic_82xx_statistics(struct qlcnic_adapter *adapter) 171 171 { 172 - return ARRAY_SIZE(qlcnic_device_gstrings_stats) + 173 - ARRAY_SIZE(qlcnic_83xx_mac_stats_strings); 172 + return ARRAY_SIZE(qlcnic_gstrings_stats) + 173 + ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) + 174 + QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings; 174 175 } 175 176 176 - static inline int qlcnic_83xx_statistics(void) 177 + static inline int qlcnic_83xx_statistics(struct qlcnic_adapter *adapter) 177 178 { 178 - return ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) + 179 + return ARRAY_SIZE(qlcnic_gstrings_stats) + 180 + ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) + 179 181 ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) + 180 - ARRAY_SIZE(qlcnic_83xx_rx_stats_strings); 182 + ARRAY_SIZE(qlcnic_83xx_rx_stats_strings) + 183 + QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings; 181 184 } 182 185 183 186 static int qlcnic_dev_statistics_len(struct qlcnic_adapter *adapter) 184 187 { 185 - if (qlcnic_82xx_check(adapter)) 186 - return qlcnic_82xx_statistics(); 187 - else if (qlcnic_83xx_check(adapter)) 188 - return qlcnic_83xx_statistics(); 189 - else 190 - return -1; 188 + int len = -1; 189 + 190 + if (qlcnic_82xx_check(adapter)) { 191 + len = qlcnic_82xx_statistics(adapter); 192 + if (adapter->flags & QLCNIC_ESWITCH_ENABLED) 193 + len += ARRAY_SIZE(qlcnic_device_gstrings_stats); 194 + } else if (qlcnic_83xx_check(adapter)) { 195 + len = qlcnic_83xx_statistics(adapter); 196 + } 197 + 198 + return len; 191 199 } 192 200 193 201 #define QLCNIC_TX_INTR_NOT_CONFIGURED 0X78563412 ··· 928 920 929 921 static int qlcnic_get_sset_count(struct net_device *dev, int sset) 930 922 { 931 - int len; 932 923 933 924 struct qlcnic_adapter *adapter = netdev_priv(dev); 934 925 switch (sset) { 935 926 case ETH_SS_TEST: 936 927 return QLCNIC_TEST_LEN; 937 928 case ETH_SS_STATS: 938 - len = qlcnic_dev_statistics_len(adapter) + QLCNIC_STATS_LEN; 939 - if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) || 940 - qlcnic_83xx_check(adapter)) 941 - return len; 942 - return qlcnic_82xx_statistics(); 929 + return qlcnic_dev_statistics_len(adapter); 943 930 default: 944 931 return -EOPNOTSUPP; 945 932 } ··· 1270 1267 return data; 1271 1268 } 1272 1269 1273 - static void qlcnic_update_stats(struct qlcnic_adapter *adapter) 1270 + void qlcnic_update_stats(struct qlcnic_adapter *adapter) 1274 1271 { 1275 1272 struct qlcnic_host_tx_ring *tx_ring; 1276 1273 int ring;
+3
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
··· 2780 2780 struct qlcnic_adapter *adapter = netdev_priv(netdev); 2781 2781 struct net_device_stats *stats = &netdev->stats; 2782 2782 2783 + if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) 2784 + qlcnic_update_stats(adapter); 2785 + 2783 2786 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts; 2784 2787 stats->tx_packets = adapter->stats.xmitfinished; 2785 2788 stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
+2 -1
drivers/net/ethernet/tile/tilegx.c
··· 2080 2080 } 2081 2081 2082 2082 /* Return subqueue id on this core (one per core). */ 2083 - static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb) 2083 + static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb, 2084 + void *accel_priv) 2084 2085 { 2085 2086 return smp_processor_id(); 2086 2087 }
+7 -7
drivers/net/macvlan.c
··· 299 299 300 300 if (vlan->fwd_priv) { 301 301 skb->dev = vlan->lowerdev; 302 - ret = dev_hard_start_xmit(skb, skb->dev, NULL, vlan->fwd_priv); 302 + ret = dev_queue_xmit_accel(skb, vlan->fwd_priv); 303 303 } else { 304 304 ret = macvlan_queue_xmit(skb, dev); 305 305 } ··· 338 338 .cache_update = eth_header_cache_update, 339 339 }; 340 340 341 + static struct rtnl_link_ops macvlan_link_ops; 342 + 341 343 static int macvlan_open(struct net_device *dev) 342 344 { 343 345 struct macvlan_dev *vlan = netdev_priv(dev); ··· 355 353 goto hash_add; 356 354 } 357 355 358 - if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD) { 356 + if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD && 357 + dev->rtnl_link_ops == &macvlan_link_ops) { 359 358 vlan->fwd_priv = 360 359 lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev); 361 360 ··· 365 362 */ 366 363 if (IS_ERR_OR_NULL(vlan->fwd_priv)) { 367 364 vlan->fwd_priv = NULL; 368 - } else { 369 - dev->features &= ~NETIF_F_LLTX; 365 + } else 370 366 return 0; 371 - } 372 367 } 373 368 374 369 err = -EBUSY; ··· 700 699 features = netdev_increment_features(vlan->lowerdev->features, 701 700 features, 702 701 mask); 703 - if (!vlan->fwd_priv) 704 - features |= NETIF_F_LLTX; 702 + features |= NETIF_F_LLTX; 705 703 706 704 return features; 707 705 }
+2 -1
drivers/net/team/team.c
··· 1647 1647 return NETDEV_TX_OK; 1648 1648 } 1649 1649 1650 - static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb) 1650 + static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb, 1651 + void *accel_priv) 1651 1652 { 1652 1653 /* 1653 1654 * This helper function exists to help dev_pick_tx get the correct
+2 -1
drivers/net/tun.c
··· 348 348 * different rxq no. here. If we could not get rxhash, then we would 349 349 * hope the rxq no. may help here. 350 350 */ 351 - static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb) 351 + static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, 352 + void *accel_priv) 352 353 { 353 354 struct tun_struct *tun = netdev_priv(dev); 354 355 struct tun_flow_entry *e;
+8 -2
drivers/net/wireless/iwlwifi/pcie/drv.c
··· 357 357 {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)}, 358 358 {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_2ac_cfg)}, 359 359 {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)}, 360 - {IWL_PCI_DEVICE(0x095B, 0x5012, iwl7265_2ac_cfg)}, 361 - {IWL_PCI_DEVICE(0x095B, 0x500A, iwl7265_2ac_cfg)}, 360 + {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)}, 361 + {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2ac_cfg)}, 362 362 {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)}, 363 + {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)}, 363 364 {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)}, 364 365 {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)}, 365 366 {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)}, 366 367 {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)}, 367 368 {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)}, 368 369 {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)}, 370 + {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)}, 369 371 {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)}, 372 + {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)}, 373 + {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)}, 370 374 {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)}, 371 375 {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)}, 372 376 {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)}, 373 377 {IWL_PCI_DEVICE(0x095A, 0x5420, iwl7265_2n_cfg)}, 374 378 {IWL_PCI_DEVICE(0x095A, 0x5090, iwl7265_2ac_cfg)}, 379 + {IWL_PCI_DEVICE(0x095A, 0x5190, iwl7265_2ac_cfg)}, 380 + {IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)}, 375 381 {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)}, 376 382 {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)}, 377 383 #endif /* CONFIG_IWLMVM */
+1 -1
drivers/net/wireless/mac80211_hwsim.c
··· 2011 2011 (hwsim_flags & HWSIM_TX_STAT_ACK)) { 2012 2012 if (skb->len >= 16) { 2013 2013 hdr = (struct ieee80211_hdr *) skb->data; 2014 - mac80211_hwsim_monitor_ack(txi->rate_driver_data[0], 2014 + mac80211_hwsim_monitor_ack(data2->channel, 2015 2015 hdr->addr2); 2016 2016 } 2017 2017 txi->flags |= IEEE80211_TX_STAT_ACK;
+2 -1
drivers/net/wireless/mwifiex/main.c
··· 746 746 } 747 747 748 748 static u16 749 - mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb) 749 + mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb, 750 + void *accel_priv) 750 751 { 751 752 skb->priority = cfg80211_classify8021d(skb); 752 753 return mwifiex_1d_to_wmm_queue[skb->priority];
+2 -1
drivers/staging/bcm/Bcmnet.c
··· 39 39 return 0; 40 40 } 41 41 42 - static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb) 42 + static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb, 43 + void *accel_priv) 43 44 { 44 45 return ClassifyPacket(netdev_priv(dev), skb); 45 46 }
+2 -1
drivers/staging/netlogic/xlr_net.c
··· 306 306 return NETDEV_TX_OK; 307 307 } 308 308 309 - static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb) 309 + static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb, 310 + void *accel_priv) 310 311 { 311 312 return (u16)smp_processor_id(); 312 313 }
+2 -1
drivers/staging/rtl8188eu/os_dep/os_intfs.c
··· 652 652 return dscp >> 5; 653 653 } 654 654 655 - static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb) 655 + static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb, 656 + void *accel_priv) 656 657 { 657 658 struct adapter *padapter = rtw_netdev_priv(dev); 658 659 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+8 -4
include/linux/netdevice.h
··· 769 769 * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX) 770 770 * Required can not be NULL. 771 771 * 772 - * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb); 772 + * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, 773 + * void *accel_priv); 773 774 * Called to decide which queue to when device supports multiple 774 775 * transmit queues. 775 776 * ··· 991 990 netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb, 992 991 struct net_device *dev); 993 992 u16 (*ndo_select_queue)(struct net_device *dev, 994 - struct sk_buff *skb); 993 + struct sk_buff *skb, 994 + void *accel_priv); 995 995 void (*ndo_change_rx_flags)(struct net_device *dev, 996 996 int flags); 997 997 void (*ndo_set_rx_mode)(struct net_device *dev); ··· 1531 1529 } 1532 1530 1533 1531 struct netdev_queue *netdev_pick_tx(struct net_device *dev, 1534 - struct sk_buff *skb); 1532 + struct sk_buff *skb, 1533 + void *accel_priv); 1535 1534 u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); 1536 1535 1537 1536 /* ··· 1822 1819 void dev_disable_lro(struct net_device *dev); 1823 1820 int dev_loopback_xmit(struct sk_buff *newskb); 1824 1821 int dev_queue_xmit(struct sk_buff *skb); 1822 + int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv); 1825 1823 int register_netdevice(struct net_device *dev); 1826 1824 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); 1827 1825 void unregister_netdevice_many(struct list_head *head); ··· 2430 2426 int dev_get_phys_port_id(struct net_device *dev, 2431 2427 struct netdev_phys_port_id *ppid); 2432 2428 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2433 - struct netdev_queue *txq, void *accel_priv); 2429 + struct netdev_queue *txq); 2434 2430 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 2435 2431 2436 2432 extern int netdev_budget;
+17 -12
net/core/dev.c
··· 2539 2539 } 2540 2540 2541 2541 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2542 - struct netdev_queue *txq, void *accel_priv) 2542 + struct netdev_queue *txq) 2543 2543 { 2544 2544 const struct net_device_ops *ops = dev->netdev_ops; 2545 2545 int rc = NETDEV_TX_OK; ··· 2605 2605 dev_queue_xmit_nit(skb, dev); 2606 2606 2607 2607 skb_len = skb->len; 2608 - if (accel_priv) 2609 - rc = ops->ndo_dfwd_start_xmit(skb, dev, accel_priv); 2610 - else 2611 2608 rc = ops->ndo_start_xmit(skb, dev); 2612 2609 2613 2610 trace_net_dev_xmit(skb, rc, dev, skb_len); 2614 - if (rc == NETDEV_TX_OK && txq) 2611 + if (rc == NETDEV_TX_OK) 2615 2612 txq_trans_update(txq); 2616 2613 return rc; 2617 2614 } ··· 2624 2627 dev_queue_xmit_nit(nskb, dev); 2625 2628 2626 2629 skb_len = nskb->len; 2627 - if (accel_priv) 2628 - rc = ops->ndo_dfwd_start_xmit(nskb, dev, accel_priv); 2629 - else 2630 - rc = ops->ndo_start_xmit(nskb, dev); 2630 + rc = ops->ndo_start_xmit(nskb, dev); 2631 2631 trace_net_dev_xmit(nskb, rc, dev, skb_len); 2632 2632 if (unlikely(rc != NETDEV_TX_OK)) { 2633 2633 if (rc & ~NETDEV_TX_MASK) ··· 2805 2811 * the BH enable code must have IRQs enabled so that it will not deadlock. 2806 2812 * --BLG 2807 2813 */ 2808 - int dev_queue_xmit(struct sk_buff *skb) 2814 + int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) 2809 2815 { 2810 2816 struct net_device *dev = skb->dev; 2811 2817 struct netdev_queue *txq; ··· 2821 2827 2822 2828 skb_update_prio(skb); 2823 2829 2824 - txq = netdev_pick_tx(dev, skb); 2830 + txq = netdev_pick_tx(dev, skb, accel_priv); 2825 2831 q = rcu_dereference_bh(txq->qdisc); 2826 2832 2827 2833 #ifdef CONFIG_NET_CLS_ACT ··· 2857 2863 2858 2864 if (!netif_xmit_stopped(txq)) { 2859 2865 __this_cpu_inc(xmit_recursion); 2860 - rc = dev_hard_start_xmit(skb, dev, txq, NULL); 2866 + rc = dev_hard_start_xmit(skb, dev, txq); 2861 2867 __this_cpu_dec(xmit_recursion); 2862 2868 if (dev_xmit_complete(rc)) { 2863 2869 HARD_TX_UNLOCK(dev, txq); ··· 2886 2892 rcu_read_unlock_bh(); 2887 2893 return rc; 2888 2894 } 2895 + 2896 + int dev_queue_xmit(struct sk_buff *skb) 2897 + { 2898 + return __dev_queue_xmit(skb, NULL); 2899 + } 2889 2900 EXPORT_SYMBOL(dev_queue_xmit); 2901 + 2902 + int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv) 2903 + { 2904 + return __dev_queue_xmit(skb, accel_priv); 2905 + } 2906 + EXPORT_SYMBOL(dev_queue_xmit_accel); 2890 2907 2891 2908 2892 2909 /*=======================================================================
+7 -3
net/core/flow_dissector.c
··· 395 395 EXPORT_SYMBOL(__netdev_pick_tx); 396 396 397 397 struct netdev_queue *netdev_pick_tx(struct net_device *dev, 398 - struct sk_buff *skb) 398 + struct sk_buff *skb, 399 + void *accel_priv) 399 400 { 400 401 int queue_index = 0; 401 402 402 403 if (dev->real_num_tx_queues != 1) { 403 404 const struct net_device_ops *ops = dev->netdev_ops; 404 405 if (ops->ndo_select_queue) 405 - queue_index = ops->ndo_select_queue(dev, skb); 406 + queue_index = ops->ndo_select_queue(dev, skb, 407 + accel_priv); 406 408 else 407 409 queue_index = __netdev_pick_tx(dev, skb); 408 - queue_index = dev_cap_txqueue(dev, queue_index); 410 + 411 + if (!accel_priv) 412 + queue_index = dev_cap_txqueue(dev, queue_index); 409 413 } 410 414 411 415 skb_set_queue_mapping(skb, queue_index);
+1 -1
net/core/netpoll.c
··· 375 375 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { 376 376 struct netdev_queue *txq; 377 377 378 - txq = netdev_pick_tx(dev, skb); 378 + txq = netdev_pick_tx(dev, skb, NULL); 379 379 380 380 /* try until next clock tick */ 381 381 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
+4 -2
net/ipv6/addrconf.c
··· 2509 2509 struct inet6_ifaddr *ifp; 2510 2510 2511 2511 ifp = ipv6_add_addr(idev, addr, NULL, plen, 2512 - scope, IFA_F_PERMANENT, 0, 0); 2512 + scope, IFA_F_PERMANENT, 2513 + INFINITY_LIFE_TIME, INFINITY_LIFE_TIME); 2513 2514 if (!IS_ERR(ifp)) { 2514 2515 spin_lock_bh(&ifp->lock); 2515 2516 ifp->flags &= ~IFA_F_TENTATIVE; ··· 2638 2637 #endif 2639 2638 2640 2639 2641 - ifp = ipv6_add_addr(idev, addr, NULL, 64, IFA_LINK, addr_flags, 0, 0); 2640 + ifp = ipv6_add_addr(idev, addr, NULL, 64, IFA_LINK, addr_flags, 2641 + INFINITY_LIFE_TIME, INFINITY_LIFE_TIME); 2642 2642 if (!IS_ERR(ifp)) { 2643 2643 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, idev->dev, 0, 0); 2644 2644 addrconf_dad_start(ifp);
+6
net/ipv6/ip6_vti.c
··· 732 732 static inline int vti6_dev_init_gen(struct net_device *dev) 733 733 { 734 734 struct ip6_tnl *t = netdev_priv(dev); 735 + int i; 735 736 736 737 t->dev = dev; 737 738 t->net = dev_net(dev); 738 739 dev->tstats = alloc_percpu(struct pcpu_tstats); 739 740 if (!dev->tstats) 740 741 return -ENOMEM; 742 + for_each_possible_cpu(i) { 743 + struct pcpu_tstats *stats; 744 + stats = per_cpu_ptr(dev->tstats, i); 745 + u64_stats_init(&stats->syncp); 746 + } 741 747 return 0; 742 748 } 743 749
+4 -2
net/mac80211/iface.c
··· 1061 1061 } 1062 1062 1063 1063 static u16 ieee80211_netdev_select_queue(struct net_device *dev, 1064 - struct sk_buff *skb) 1064 + struct sk_buff *skb, 1065 + void *accel_priv) 1065 1066 { 1066 1067 return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb); 1067 1068 } ··· 1079 1078 }; 1080 1079 1081 1080 static u16 ieee80211_monitor_select_queue(struct net_device *dev, 1082 - struct sk_buff *skb) 1081 + struct sk_buff *skb, 1082 + void *accel_priv) 1083 1083 { 1084 1084 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1085 1085 struct ieee80211_local *local = sdata->local;
+13 -10
net/mac80211/tx.c
··· 463 463 { 464 464 struct sta_info *sta = tx->sta; 465 465 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 466 - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; 467 466 struct ieee80211_local *local = tx->local; 468 467 469 468 if (unlikely(!sta)) ··· 472 473 test_sta_flag(sta, WLAN_STA_PS_DRIVER)) && 473 474 !(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) { 474 475 int ac = skb_get_queue_mapping(tx->skb); 475 - 476 - /* only deauth, disassoc and action are bufferable MMPDUs */ 477 - if (ieee80211_is_mgmt(hdr->frame_control) && 478 - !ieee80211_is_deauth(hdr->frame_control) && 479 - !ieee80211_is_disassoc(hdr->frame_control) && 480 - !ieee80211_is_action(hdr->frame_control)) { 481 - info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER; 482 - return TX_CONTINUE; 483 - } 484 476 485 477 ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n", 486 478 sta->sta.addr, sta->sta.aid, ac); ··· 515 525 static ieee80211_tx_result debug_noinline 516 526 ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx) 517 527 { 528 + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 529 + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; 530 + 518 531 if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED)) 519 532 return TX_CONTINUE; 533 + 534 + /* only deauth, disassoc and action are bufferable MMPDUs */ 535 + if (ieee80211_is_mgmt(hdr->frame_control) && 536 + !ieee80211_is_deauth(hdr->frame_control) && 537 + !ieee80211_is_disassoc(hdr->frame_control) && 538 + !ieee80211_is_action(hdr->frame_control)) { 539 + if (tx->flags & IEEE80211_TX_UNICAST) 540 + info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER; 541 + return TX_CONTINUE; 542 + } 520 543 521 544 if (tx->flags & IEEE80211_TX_UNICAST) 522 545 return ieee80211_tx_h_unicast_ps_buf(tx);
+1 -1
net/netfilter/nf_conntrack_seqadj.c
··· 37 37 return 0; 38 38 39 39 if (unlikely(!seqadj)) { 40 - WARN(1, "Wrong seqadj usage, missing nfct_seqadj_ext_add()\n"); 40 + WARN_ONCE(1, "Missing nfct_seqadj_ext_add() setup call\n"); 41 41 return 0; 42 42 } 43 43
+27 -5
net/netfilter/nf_nat_irc.c
··· 34 34 struct nf_conntrack_expect *exp) 35 35 { 36 36 char buffer[sizeof("4294967296 65635")]; 37 + struct nf_conn *ct = exp->master; 38 + union nf_inet_addr newaddr; 37 39 u_int16_t port; 38 40 unsigned int ret; 39 41 40 42 /* Reply comes from server. */ 43 + newaddr = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3; 44 + 41 45 exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; 42 46 exp->dir = IP_CT_DIR_REPLY; 43 47 exp->expectfn = nf_nat_follow_master; ··· 61 57 } 62 58 63 59 if (port == 0) { 64 - nf_ct_helper_log(skb, exp->master, "all ports in use"); 60 + nf_ct_helper_log(skb, ct, "all ports in use"); 65 61 return NF_DROP; 66 62 } 67 63 68 - ret = nf_nat_mangle_tcp_packet(skb, exp->master, ctinfo, 69 - protoff, matchoff, matchlen, buffer, 70 - strlen(buffer)); 64 + /* strlen("\1DCC CHAT chat AAAAAAAA P\1\n")=27 65 + * strlen("\1DCC SCHAT chat AAAAAAAA P\1\n")=28 66 + * strlen("\1DCC SEND F AAAAAAAA P S\1\n")=26 67 + * strlen("\1DCC MOVE F AAAAAAAA P S\1\n")=26 68 + * strlen("\1DCC TSEND F AAAAAAAA P S\1\n")=27 69 + * 70 + * AAAAAAAAA: bound addr (1.0.0.0==16777216, min 8 digits, 71 + * 255.255.255.255==4294967296, 10 digits) 72 + * P: bound port (min 1 d, max 5d (65635)) 73 + * F: filename (min 1 d ) 74 + * S: size (min 1 d ) 75 + * 0x01, \n: terminators 76 + */ 77 + /* AAA = "us", ie. where server normally talks to. */ 78 + snprintf(buffer, sizeof(buffer), "%u %u", ntohl(newaddr.ip), port); 79 + pr_debug("nf_nat_irc: inserting '%s' == %pI4, port %u\n", 80 + buffer, &newaddr.ip, port); 81 + 82 + ret = nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff, matchoff, 83 + matchlen, buffer, strlen(buffer)); 71 84 if (ret != NF_ACCEPT) { 72 - nf_ct_helper_log(skb, exp->master, "cannot mangle packet"); 85 + nf_ct_helper_log(skb, ct, "cannot mangle packet"); 73 86 nf_ct_unexpect_related(exp); 74 87 } 88 + 75 89 return ret; 76 90 } 77 91
+1 -1
net/nfc/core.c
··· 384 384 { 385 385 dev->dep_link_up = true; 386 386 387 - if (!dev->active_target) { 387 + if (!dev->active_target && rf_mode == NFC_RF_INITIATOR) { 388 388 struct nfc_target *target; 389 389 390 390 target = nfc_find_target(dev, target_idx);
+1 -1
net/sched/sch_generic.c
··· 126 126 127 127 HARD_TX_LOCK(dev, txq, smp_processor_id()); 128 128 if (!netif_xmit_frozen_or_stopped(txq)) 129 - ret = dev_hard_start_xmit(skb, dev, txq, NULL); 129 + ret = dev_hard_start_xmit(skb, dev, txq); 130 130 131 131 HARD_TX_UNLOCK(dev, txq); 132 132
+1
net/tipc/link.c
··· 1498 1498 int type; 1499 1499 1500 1500 head = head->next; 1501 + buf->next = NULL; 1501 1502 1502 1503 /* Ensure bearer is still enabled */ 1503 1504 if (unlikely(!b_ptr->active))