Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:
"Famouse last words: "final pull request" :-)

I'm sending this because Jason Wang's fixes are pretty important

1) Add missing per-cpu stats initialization to ip6_vti. Otherwise
lockdep spits out a call trace. From Li RongQing.

2) Fix NULL oops in wireless hwsim, from Javier Lopez

3) TIPC deferred packet queue unlink must NULL out skb->next to avoid
crashes. From Erik Hugne

4) Fix access to uninitialized buffer in nf_nat netfilter code, from
Daniel Borkmann

5) Fix lifetime of ipv6 loopback and SIT tunnel addresses, otherwise
they basically timeout immediately. From Hannes Frederic Sowa

6) Fix DMA unmapping of TSO packets in bnx2x driver, from Michal
Schmidt

7) Do not allow L2 forwarding offload via macvtap device, the way
things are now it will not end up being forwaded at all. From
Jason Wang

8) Fix transmit queue selection via ndo_dfwd_start_xmit(), fixing
things like applying NETIF_F_LLTX to the wrong device (!!) and
eliding the proper transmit watchdog handling

9) qlcnic driver was not updating tx statistics at all, from Manish
Chopra"

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net:
qlcnic: Fix ethtool statistics length calculation
qlcnic: Fix bug in TX statistics
net: core: explicitly select a txq before doing l2 forwarding
macvlan: forbid L2 fowarding offload for macvtap
bnx2x: fix DMA unmapping of TSO split BDs
ipv6: add link-local, sit and loopback address with INFINITY_LIFE_TIME
bnx2x: prevent WARN during driver unload
tipc: correctly unlink packets from deferred packet queue
ipv6: pcpu_tstats.syncp should be initialised in ip6_vti.c
netfilter: only warn once on wrong seqadj usage
netfilter: nf_nat: fix access to uninitialized buffer in IRC NAT helper
NFC: Fix target mode p2p link establishment
iwlwifi: add new devices for 7265 series
mac80211: move "bufferable MMPDU" check to fix AP mode scan
mac80211_hwsim: Fix NULL pointer dereference

Changed files
+219 -126
drivers
net
bonding
ethernet
team
wireless
iwlwifi
pcie
mwifiex
staging
bcm
netlogic
rtl8188eu
os_dep
include
linux
net
+2 -1
drivers/net/bonding/bond_main.c
··· 3732 } 3733 3734 3735 - static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb) 3736 { 3737 /* 3738 * This helper function exists to help dev_pick_tx get the correct
··· 3732 } 3733 3734 3735 + static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb, 3736 + void *accel_priv) 3737 { 3738 /* 3739 * This helper function exists to help dev_pick_tx get the correct
+34 -10
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
··· 520 #define BNX2X_FP_STATE_IDLE 0 521 #define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */ 522 #define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */ 523 - #define BNX2X_FP_STATE_NAPI_YIELD (1 << 2) /* NAPI yielded this FP */ 524 - #define BNX2X_FP_STATE_POLL_YIELD (1 << 3) /* poll yielded this FP */ 525 #define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD) 526 - #define BNX2X_FP_LOCKED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL) 527 #define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD) 528 /* protect state */ 529 spinlock_t lock; ··· 615 { 616 bool rc = true; 617 618 - spin_lock(&fp->lock); 619 if (fp->state & BNX2X_FP_LOCKED) { 620 WARN_ON(fp->state & BNX2X_FP_STATE_NAPI); 621 fp->state |= BNX2X_FP_STATE_NAPI_YIELD; ··· 624 /* we don't care if someone yielded */ 625 fp->state = BNX2X_FP_STATE_NAPI; 626 } 627 - spin_unlock(&fp->lock); 628 return rc; 629 } 630 ··· 633 { 634 bool rc = false; 635 636 - spin_lock(&fp->lock); 637 WARN_ON(fp->state & 638 (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD)); 639 640 if (fp->state & BNX2X_FP_STATE_POLL_YIELD) 641 rc = true; 642 - fp->state = BNX2X_FP_STATE_IDLE; 643 - spin_unlock(&fp->lock); 644 return rc; 645 } 646 ··· 673 674 if (fp->state & BNX2X_FP_STATE_POLL_YIELD) 675 rc = true; 676 - fp->state = BNX2X_FP_STATE_IDLE; 677 spin_unlock_bh(&fp->lock); 678 return rc; 679 } ··· 683 /* true if a socket is polling, even if it did not get the lock */ 684 static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) 685 { 686 - WARN_ON(!(fp->state & BNX2X_FP_LOCKED)); 687 return fp->state & BNX2X_FP_USER_PEND; 688 } 689 #else 690 static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp) ··· 728 static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) 729 { 730 return false; 731 } 732 #endif /* CONFIG_NET_RX_BUSY_POLL */ 733
··· 520 #define BNX2X_FP_STATE_IDLE 0 521 #define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */ 522 #define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */ 523 + #define BNX2X_FP_STATE_DISABLED (1 << 2) 524 + #define BNX2X_FP_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this FP */ 525 + #define BNX2X_FP_STATE_POLL_YIELD (1 << 4) /* poll yielded this FP */ 526 + #define BNX2X_FP_OWNED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL) 527 #define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD) 528 + #define BNX2X_FP_LOCKED (BNX2X_FP_OWNED | BNX2X_FP_STATE_DISABLED) 529 #define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD) 530 /* protect state */ 531 spinlock_t lock; ··· 613 { 614 bool rc = true; 615 616 + spin_lock_bh(&fp->lock); 617 if (fp->state & BNX2X_FP_LOCKED) { 618 WARN_ON(fp->state & BNX2X_FP_STATE_NAPI); 619 fp->state |= BNX2X_FP_STATE_NAPI_YIELD; ··· 622 /* we don't care if someone yielded */ 623 fp->state = BNX2X_FP_STATE_NAPI; 624 } 625 + spin_unlock_bh(&fp->lock); 626 return rc; 627 } 628 ··· 631 { 632 bool rc = false; 633 634 + spin_lock_bh(&fp->lock); 635 WARN_ON(fp->state & 636 (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD)); 637 638 if (fp->state & BNX2X_FP_STATE_POLL_YIELD) 639 rc = true; 640 + 641 + /* state ==> idle, unless currently disabled */ 642 + fp->state &= BNX2X_FP_STATE_DISABLED; 643 + spin_unlock_bh(&fp->lock); 644 return rc; 645 } 646 ··· 669 670 if (fp->state & BNX2X_FP_STATE_POLL_YIELD) 671 rc = true; 672 + 673 + /* state ==> idle, unless currently disabled */ 674 + fp->state &= BNX2X_FP_STATE_DISABLED; 675 spin_unlock_bh(&fp->lock); 676 return rc; 677 } ··· 677 /* true if a socket is polling, even if it did not get the lock */ 678 static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) 679 { 680 + WARN_ON(!(fp->state & BNX2X_FP_OWNED)); 681 return fp->state & BNX2X_FP_USER_PEND; 682 + } 683 + 684 + /* false if fp is currently owned */ 685 + static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp) 686 + { 687 + int rc = true; 688 + 689 + spin_lock_bh(&fp->lock); 690 + if (fp->state & BNX2X_FP_OWNED) 691 + rc = false; 692 + fp->state |= BNX2X_FP_STATE_DISABLED; 693 + spin_unlock_bh(&fp->lock); 694 + 695 + return rc; 696 } 697 #else 698 static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp) ··· 708 static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) 709 { 710 return false; 711 + } 712 + static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp) 713 + { 714 + return true; 715 } 716 #endif /* CONFIG_NET_RX_BUSY_POLL */ 717
+15 -13
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
··· 160 struct sk_buff *skb = tx_buf->skb; 161 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; 162 int nbd; 163 164 /* prefetch skb end pointer to speedup dev_kfree_skb() */ 165 prefetch(&skb->end); ··· 168 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n", 169 txdata->txq_index, idx, tx_buf, skb); 170 171 - /* unmap first bd */ 172 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd; 173 - dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), 174 - BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE); 175 176 nbd = le16_to_cpu(tx_start_bd->nbd) - 1; 177 #ifdef BNX2X_STOP_ON_ERROR ··· 186 --nbd; 187 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); 188 189 - /* ...and the TSO split header bd since they have no mapping */ 190 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { 191 --nbd; 192 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); 193 } 194 195 /* now free frags */ 196 while (nbd > 0) { ··· 1795 { 1796 int i; 1797 1798 - local_bh_disable(); 1799 for_each_rx_queue_cnic(bp, i) { 1800 napi_disable(&bnx2x_fp(bp, i, napi)); 1801 - while (!bnx2x_fp_lock_napi(&bp->fp[i])) 1802 - mdelay(1); 1803 } 1804 - local_bh_enable(); 1805 } 1806 1807 static void bnx2x_napi_disable(struct bnx2x *bp) 1808 { 1809 int i; 1810 1811 - local_bh_disable(); 1812 for_each_eth_queue(bp, i) { 1813 napi_disable(&bnx2x_fp(bp, i, napi)); 1814 - while (!bnx2x_fp_lock_napi(&bp->fp[i])) 1815 - mdelay(1); 1816 } 1817 - local_bh_enable(); 1818 } 1819 1820 void bnx2x_netif_start(struct bnx2x *bp) ··· 1833 bnx2x_napi_disable_cnic(bp); 1834 } 1835 1836 - u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) 1837 { 1838 struct bnx2x *bp = netdev_priv(dev); 1839
··· 160 struct sk_buff *skb = tx_buf->skb; 161 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; 162 int nbd; 163 + u16 split_bd_len = 0; 164 165 /* prefetch skb end pointer to speedup dev_kfree_skb() */ 166 prefetch(&skb->end); ··· 167 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n", 168 txdata->txq_index, idx, tx_buf, skb); 169 170 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd; 171 172 nbd = le16_to_cpu(tx_start_bd->nbd) - 1; 173 #ifdef BNX2X_STOP_ON_ERROR ··· 188 --nbd; 189 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); 190 191 + /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */ 192 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { 193 + tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd; 194 + split_bd_len = BD_UNMAP_LEN(tx_data_bd); 195 --nbd; 196 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); 197 } 198 + 199 + /* unmap first bd */ 200 + dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), 201 + BD_UNMAP_LEN(tx_start_bd) + split_bd_len, 202 + DMA_TO_DEVICE); 203 204 /* now free frags */ 205 while (nbd > 0) { ··· 1790 { 1791 int i; 1792 1793 for_each_rx_queue_cnic(bp, i) { 1794 napi_disable(&bnx2x_fp(bp, i, napi)); 1795 + while (!bnx2x_fp_ll_disable(&bp->fp[i])) 1796 + usleep_range(1000, 2000); 1797 } 1798 } 1799 1800 static void bnx2x_napi_disable(struct bnx2x *bp) 1801 { 1802 int i; 1803 1804 for_each_eth_queue(bp, i) { 1805 napi_disable(&bnx2x_fp(bp, i, napi)); 1806 + while (!bnx2x_fp_ll_disable(&bp->fp[i])) 1807 + usleep_range(1000, 2000); 1808 } 1809 } 1810 1811 void bnx2x_netif_start(struct bnx2x *bp) ··· 1832 bnx2x_napi_disable_cnic(bp); 1833 } 1834 1835 + u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, 1836 + void *accel_priv) 1837 { 1838 struct bnx2x *bp = netdev_priv(dev); 1839
+2 -1
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
··· 524 int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos); 525 526 /* select_queue callback */ 527 - u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb); 528 529 static inline void bnx2x_update_rx_prod(struct bnx2x *bp, 530 struct bnx2x_fastpath *fp,
··· 524 int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos); 525 526 /* select_queue callback */ 527 + u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, 528 + void *accel_priv); 529 530 static inline void bnx2x_update_rx_prod(struct bnx2x *bp, 531 struct bnx2x_fastpath *fp,
+13 -20
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 6827 return __ixgbe_maybe_stop_tx(tx_ring, size); 6828 } 6829 6830 - #ifdef IXGBE_FCOE 6831 - static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) 6832 { 6833 struct ixgbe_adapter *adapter; 6834 struct ixgbe_ring_feature *f; 6835 int txq; 6836 6837 /* 6838 * only execute the code below if protocol is FCoE ··· 6866 txq -= f->indices; 6867 6868 return txq + f->offset; 6869 } 6870 6871 - #endif 6872 netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, 6873 struct ixgbe_adapter *adapter, 6874 struct ixgbe_ring *tx_ring) ··· 7639 kfree(fwd_adapter); 7640 } 7641 7642 - static netdev_tx_t ixgbe_fwd_xmit(struct sk_buff *skb, 7643 - struct net_device *dev, 7644 - void *priv) 7645 - { 7646 - struct ixgbe_fwd_adapter *fwd_adapter = priv; 7647 - unsigned int queue; 7648 - struct ixgbe_ring *tx_ring; 7649 - 7650 - queue = skb->queue_mapping + fwd_adapter->tx_base_queue; 7651 - tx_ring = fwd_adapter->real_adapter->tx_ring[queue]; 7652 - 7653 - return __ixgbe_xmit_frame(skb, dev, tx_ring); 7654 - } 7655 - 7656 static const struct net_device_ops ixgbe_netdev_ops = { 7657 .ndo_open = ixgbe_open, 7658 .ndo_stop = ixgbe_close, 7659 .ndo_start_xmit = ixgbe_xmit_frame, 7660 - #ifdef IXGBE_FCOE 7661 .ndo_select_queue = ixgbe_select_queue, 7662 - #endif 7663 .ndo_set_rx_mode = ixgbe_set_rx_mode, 7664 .ndo_validate_addr = eth_validate_addr, 7665 .ndo_set_mac_address = ixgbe_set_mac, ··· 7683 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, 7684 .ndo_dfwd_add_station = ixgbe_fwd_add, 7685 .ndo_dfwd_del_station = ixgbe_fwd_del, 7686 - .ndo_dfwd_start_xmit = ixgbe_fwd_xmit, 7687 }; 7688 7689 /**
··· 6827 return __ixgbe_maybe_stop_tx(tx_ring, size); 6828 } 6829 6830 + static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, 6831 + void *accel_priv) 6832 { 6833 + struct ixgbe_fwd_adapter *fwd_adapter = accel_priv; 6834 + #ifdef IXGBE_FCOE 6835 struct ixgbe_adapter *adapter; 6836 struct ixgbe_ring_feature *f; 6837 int txq; 6838 + #endif 6839 + 6840 + if (fwd_adapter) 6841 + return skb->queue_mapping + fwd_adapter->tx_base_queue; 6842 + 6843 + #ifdef IXGBE_FCOE 6844 6845 /* 6846 * only execute the code below if protocol is FCoE ··· 6858 txq -= f->indices; 6859 6860 return txq + f->offset; 6861 + #else 6862 + return __netdev_pick_tx(dev, skb); 6863 + #endif 6864 } 6865 6866 netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, 6867 struct ixgbe_adapter *adapter, 6868 struct ixgbe_ring *tx_ring) ··· 7629 kfree(fwd_adapter); 7630 } 7631 7632 static const struct net_device_ops ixgbe_netdev_ops = { 7633 .ndo_open = ixgbe_open, 7634 .ndo_stop = ixgbe_close, 7635 .ndo_start_xmit = ixgbe_xmit_frame, 7636 .ndo_select_queue = ixgbe_select_queue, 7637 .ndo_set_rx_mode = ixgbe_set_rx_mode, 7638 .ndo_validate_addr = eth_validate_addr, 7639 .ndo_set_mac_address = ixgbe_set_mac, ··· 7689 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, 7690 .ndo_dfwd_add_station = ixgbe_fwd_add, 7691 .ndo_dfwd_del_station = ixgbe_fwd_del, 7692 }; 7693 7694 /**
+2 -1
drivers/net/ethernet/lantiq_etop.c
··· 619 } 620 621 static u16 622 - ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb) 623 { 624 /* we are currently only using the first queue */ 625 return 0;
··· 619 } 620 621 static u16 622 + ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb, 623 + void *accel_priv) 624 { 625 /* we are currently only using the first queue */ 626 return 0;
+2 -1
drivers/net/ethernet/mellanox/mlx4/en_tx.c
··· 592 } 593 } 594 595 - u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb) 596 { 597 struct mlx4_en_priv *priv = netdev_priv(dev); 598 u16 rings_p_up = priv->num_tx_rings_p_up;
··· 592 } 593 } 594 595 + u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, 596 + void *accel_priv) 597 { 598 struct mlx4_en_priv *priv = netdev_priv(dev); 599 u16 rings_p_up = priv->num_tx_rings_p_up;
+2 -1
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
··· 714 int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); 715 716 void mlx4_en_tx_irq(struct mlx4_cq *mcq); 717 - u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb); 718 netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); 719 720 int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
··· 714 int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); 715 716 void mlx4_en_tx_irq(struct mlx4_cq *mcq); 717 + u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, 718 + void *accel_priv); 719 netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); 720 721 int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
+1
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
··· 1711 void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *); 1712 void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx); 1713 void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx); 1714 1715 /* Adapter hardware abstraction */ 1716 struct qlcnic_hardware_ops {
··· 1711 void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *); 1712 void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx); 1713 void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx); 1714 + void qlcnic_update_stats(struct qlcnic_adapter *); 1715 1716 /* Adapter hardware abstraction */ 1717 struct qlcnic_hardware_ops {
+22 -19
drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
··· 167 168 #define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test) 169 170 - static inline int qlcnic_82xx_statistics(void) 171 { 172 - return ARRAY_SIZE(qlcnic_device_gstrings_stats) + 173 - ARRAY_SIZE(qlcnic_83xx_mac_stats_strings); 174 } 175 176 - static inline int qlcnic_83xx_statistics(void) 177 { 178 - return ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) + 179 ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) + 180 - ARRAY_SIZE(qlcnic_83xx_rx_stats_strings); 181 } 182 183 static int qlcnic_dev_statistics_len(struct qlcnic_adapter *adapter) 184 { 185 - if (qlcnic_82xx_check(adapter)) 186 - return qlcnic_82xx_statistics(); 187 - else if (qlcnic_83xx_check(adapter)) 188 - return qlcnic_83xx_statistics(); 189 - else 190 - return -1; 191 } 192 193 #define QLCNIC_TX_INTR_NOT_CONFIGURED 0X78563412 ··· 928 929 static int qlcnic_get_sset_count(struct net_device *dev, int sset) 930 { 931 - int len; 932 933 struct qlcnic_adapter *adapter = netdev_priv(dev); 934 switch (sset) { 935 case ETH_SS_TEST: 936 return QLCNIC_TEST_LEN; 937 case ETH_SS_STATS: 938 - len = qlcnic_dev_statistics_len(adapter) + QLCNIC_STATS_LEN; 939 - if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) || 940 - qlcnic_83xx_check(adapter)) 941 - return len; 942 - return qlcnic_82xx_statistics(); 943 default: 944 return -EOPNOTSUPP; 945 } ··· 1270 return data; 1271 } 1272 1273 - static void qlcnic_update_stats(struct qlcnic_adapter *adapter) 1274 { 1275 struct qlcnic_host_tx_ring *tx_ring; 1276 int ring;
··· 167 168 #define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test) 169 170 + static inline int qlcnic_82xx_statistics(struct qlcnic_adapter *adapter) 171 { 172 + return ARRAY_SIZE(qlcnic_gstrings_stats) + 173 + ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) + 174 + QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings; 175 } 176 177 + static inline int qlcnic_83xx_statistics(struct qlcnic_adapter *adapter) 178 { 179 + return ARRAY_SIZE(qlcnic_gstrings_stats) + 180 + ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) + 181 ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) + 182 + ARRAY_SIZE(qlcnic_83xx_rx_stats_strings) + 183 + QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings; 184 } 185 186 static int qlcnic_dev_statistics_len(struct qlcnic_adapter *adapter) 187 { 188 + int len = -1; 189 + 190 + if (qlcnic_82xx_check(adapter)) { 191 + len = qlcnic_82xx_statistics(adapter); 192 + if (adapter->flags & QLCNIC_ESWITCH_ENABLED) 193 + len += ARRAY_SIZE(qlcnic_device_gstrings_stats); 194 + } else if (qlcnic_83xx_check(adapter)) { 195 + len = qlcnic_83xx_statistics(adapter); 196 + } 197 + 198 + return len; 199 } 200 201 #define QLCNIC_TX_INTR_NOT_CONFIGURED 0X78563412 ··· 920 921 static int qlcnic_get_sset_count(struct net_device *dev, int sset) 922 { 923 924 struct qlcnic_adapter *adapter = netdev_priv(dev); 925 switch (sset) { 926 case ETH_SS_TEST: 927 return QLCNIC_TEST_LEN; 928 case ETH_SS_STATS: 929 + return qlcnic_dev_statistics_len(adapter); 930 default: 931 return -EOPNOTSUPP; 932 } ··· 1267 return data; 1268 } 1269 1270 + void qlcnic_update_stats(struct qlcnic_adapter *adapter) 1271 { 1272 struct qlcnic_host_tx_ring *tx_ring; 1273 int ring;
+3
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
··· 2780 struct qlcnic_adapter *adapter = netdev_priv(netdev); 2781 struct net_device_stats *stats = &netdev->stats; 2782 2783 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts; 2784 stats->tx_packets = adapter->stats.xmitfinished; 2785 stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
··· 2780 struct qlcnic_adapter *adapter = netdev_priv(netdev); 2781 struct net_device_stats *stats = &netdev->stats; 2782 2783 + if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) 2784 + qlcnic_update_stats(adapter); 2785 + 2786 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts; 2787 stats->tx_packets = adapter->stats.xmitfinished; 2788 stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
+2 -1
drivers/net/ethernet/tile/tilegx.c
··· 2080 } 2081 2082 /* Return subqueue id on this core (one per core). */ 2083 - static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb) 2084 { 2085 return smp_processor_id(); 2086 }
··· 2080 } 2081 2082 /* Return subqueue id on this core (one per core). */ 2083 + static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb, 2084 + void *accel_priv) 2085 { 2086 return smp_processor_id(); 2087 }
+7 -7
drivers/net/macvlan.c
··· 299 300 if (vlan->fwd_priv) { 301 skb->dev = vlan->lowerdev; 302 - ret = dev_hard_start_xmit(skb, skb->dev, NULL, vlan->fwd_priv); 303 } else { 304 ret = macvlan_queue_xmit(skb, dev); 305 } ··· 338 .cache_update = eth_header_cache_update, 339 }; 340 341 static int macvlan_open(struct net_device *dev) 342 { 343 struct macvlan_dev *vlan = netdev_priv(dev); ··· 355 goto hash_add; 356 } 357 358 - if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD) { 359 vlan->fwd_priv = 360 lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev); 361 ··· 365 */ 366 if (IS_ERR_OR_NULL(vlan->fwd_priv)) { 367 vlan->fwd_priv = NULL; 368 - } else { 369 - dev->features &= ~NETIF_F_LLTX; 370 return 0; 371 - } 372 } 373 374 err = -EBUSY; ··· 700 features = netdev_increment_features(vlan->lowerdev->features, 701 features, 702 mask); 703 - if (!vlan->fwd_priv) 704 - features |= NETIF_F_LLTX; 705 706 return features; 707 }
··· 299 300 if (vlan->fwd_priv) { 301 skb->dev = vlan->lowerdev; 302 + ret = dev_queue_xmit_accel(skb, vlan->fwd_priv); 303 } else { 304 ret = macvlan_queue_xmit(skb, dev); 305 } ··· 338 .cache_update = eth_header_cache_update, 339 }; 340 341 + static struct rtnl_link_ops macvlan_link_ops; 342 + 343 static int macvlan_open(struct net_device *dev) 344 { 345 struct macvlan_dev *vlan = netdev_priv(dev); ··· 353 goto hash_add; 354 } 355 356 + if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD && 357 + dev->rtnl_link_ops == &macvlan_link_ops) { 358 vlan->fwd_priv = 359 lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev); 360 ··· 362 */ 363 if (IS_ERR_OR_NULL(vlan->fwd_priv)) { 364 vlan->fwd_priv = NULL; 365 + } else 366 return 0; 367 } 368 369 err = -EBUSY; ··· 699 features = netdev_increment_features(vlan->lowerdev->features, 700 features, 701 mask); 702 + features |= NETIF_F_LLTX; 703 704 return features; 705 }
+2 -1
drivers/net/team/team.c
··· 1647 return NETDEV_TX_OK; 1648 } 1649 1650 - static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb) 1651 { 1652 /* 1653 * This helper function exists to help dev_pick_tx get the correct
··· 1647 return NETDEV_TX_OK; 1648 } 1649 1650 + static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb, 1651 + void *accel_priv) 1652 { 1653 /* 1654 * This helper function exists to help dev_pick_tx get the correct
+2 -1
drivers/net/tun.c
··· 348 * different rxq no. here. If we could not get rxhash, then we would 349 * hope the rxq no. may help here. 350 */ 351 - static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb) 352 { 353 struct tun_struct *tun = netdev_priv(dev); 354 struct tun_flow_entry *e;
··· 348 * different rxq no. here. If we could not get rxhash, then we would 349 * hope the rxq no. may help here. 350 */ 351 + static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, 352 + void *accel_priv) 353 { 354 struct tun_struct *tun = netdev_priv(dev); 355 struct tun_flow_entry *e;
+8 -2
drivers/net/wireless/iwlwifi/pcie/drv.c
··· 357 {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)}, 358 {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_2ac_cfg)}, 359 {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)}, 360 - {IWL_PCI_DEVICE(0x095B, 0x5012, iwl7265_2ac_cfg)}, 361 - {IWL_PCI_DEVICE(0x095B, 0x500A, iwl7265_2ac_cfg)}, 362 {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)}, 363 {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)}, 364 {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)}, 365 {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)}, 366 {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)}, 367 {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)}, 368 {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)}, 369 {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)}, 370 {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)}, 371 {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)}, 372 {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)}, 373 {IWL_PCI_DEVICE(0x095A, 0x5420, iwl7265_2n_cfg)}, 374 {IWL_PCI_DEVICE(0x095A, 0x5090, iwl7265_2ac_cfg)}, 375 {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)}, 376 {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)}, 377 #endif /* CONFIG_IWLMVM */
··· 357 {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)}, 358 {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_2ac_cfg)}, 359 {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)}, 360 + {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)}, 361 + {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2ac_cfg)}, 362 {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)}, 363 + {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)}, 364 {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)}, 365 {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)}, 366 {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)}, 367 {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)}, 368 {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)}, 369 {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)}, 370 + {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)}, 371 {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)}, 372 + {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)}, 373 + {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)}, 374 {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)}, 375 {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)}, 376 {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)}, 377 {IWL_PCI_DEVICE(0x095A, 0x5420, iwl7265_2n_cfg)}, 378 {IWL_PCI_DEVICE(0x095A, 0x5090, iwl7265_2ac_cfg)}, 379 + {IWL_PCI_DEVICE(0x095A, 0x5190, iwl7265_2ac_cfg)}, 380 + {IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)}, 381 {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)}, 382 {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)}, 383 #endif /* CONFIG_IWLMVM */
+1 -1
drivers/net/wireless/mac80211_hwsim.c
··· 2011 (hwsim_flags & HWSIM_TX_STAT_ACK)) { 2012 if (skb->len >= 16) { 2013 hdr = (struct ieee80211_hdr *) skb->data; 2014 - mac80211_hwsim_monitor_ack(txi->rate_driver_data[0], 2015 hdr->addr2); 2016 } 2017 txi->flags |= IEEE80211_TX_STAT_ACK;
··· 2011 (hwsim_flags & HWSIM_TX_STAT_ACK)) { 2012 if (skb->len >= 16) { 2013 hdr = (struct ieee80211_hdr *) skb->data; 2014 + mac80211_hwsim_monitor_ack(data2->channel, 2015 hdr->addr2); 2016 } 2017 txi->flags |= IEEE80211_TX_STAT_ACK;
+2 -1
drivers/net/wireless/mwifiex/main.c
··· 746 } 747 748 static u16 749 - mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb) 750 { 751 skb->priority = cfg80211_classify8021d(skb); 752 return mwifiex_1d_to_wmm_queue[skb->priority];
··· 746 } 747 748 static u16 749 + mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb, 750 + void *accel_priv) 751 { 752 skb->priority = cfg80211_classify8021d(skb); 753 return mwifiex_1d_to_wmm_queue[skb->priority];
+2 -1
drivers/staging/bcm/Bcmnet.c
··· 39 return 0; 40 } 41 42 - static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb) 43 { 44 return ClassifyPacket(netdev_priv(dev), skb); 45 }
··· 39 return 0; 40 } 41 42 + static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb, 43 + void *accel_priv) 44 { 45 return ClassifyPacket(netdev_priv(dev), skb); 46 }
+2 -1
drivers/staging/netlogic/xlr_net.c
··· 306 return NETDEV_TX_OK; 307 } 308 309 - static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb) 310 { 311 return (u16)smp_processor_id(); 312 }
··· 306 return NETDEV_TX_OK; 307 } 308 309 + static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb, 310 + void *accel_priv) 311 { 312 return (u16)smp_processor_id(); 313 }
+2 -1
drivers/staging/rtl8188eu/os_dep/os_intfs.c
··· 652 return dscp >> 5; 653 } 654 655 - static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb) 656 { 657 struct adapter *padapter = rtw_netdev_priv(dev); 658 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
··· 652 return dscp >> 5; 653 } 654 655 + static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb, 656 + void *accel_priv) 657 { 658 struct adapter *padapter = rtw_netdev_priv(dev); 659 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+8 -4
include/linux/netdevice.h
··· 769 * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX) 770 * Required can not be NULL. 771 * 772 - * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb); 773 * Called to decide which queue to when device supports multiple 774 * transmit queues. 775 * ··· 991 netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb, 992 struct net_device *dev); 993 u16 (*ndo_select_queue)(struct net_device *dev, 994 - struct sk_buff *skb); 995 void (*ndo_change_rx_flags)(struct net_device *dev, 996 int flags); 997 void (*ndo_set_rx_mode)(struct net_device *dev); ··· 1531 } 1532 1533 struct netdev_queue *netdev_pick_tx(struct net_device *dev, 1534 - struct sk_buff *skb); 1535 u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); 1536 1537 /* ··· 1822 void dev_disable_lro(struct net_device *dev); 1823 int dev_loopback_xmit(struct sk_buff *newskb); 1824 int dev_queue_xmit(struct sk_buff *skb); 1825 int register_netdevice(struct net_device *dev); 1826 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); 1827 void unregister_netdevice_many(struct list_head *head); ··· 2430 int dev_get_phys_port_id(struct net_device *dev, 2431 struct netdev_phys_port_id *ppid); 2432 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2433 - struct netdev_queue *txq, void *accel_priv); 2434 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 2435 2436 extern int netdev_budget;
··· 769 * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX) 770 * Required can not be NULL. 771 * 772 + * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, 773 + * void *accel_priv); 774 * Called to decide which queue to when device supports multiple 775 * transmit queues. 776 * ··· 990 netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb, 991 struct net_device *dev); 992 u16 (*ndo_select_queue)(struct net_device *dev, 993 + struct sk_buff *skb, 994 + void *accel_priv); 995 void (*ndo_change_rx_flags)(struct net_device *dev, 996 int flags); 997 void (*ndo_set_rx_mode)(struct net_device *dev); ··· 1529 } 1530 1531 struct netdev_queue *netdev_pick_tx(struct net_device *dev, 1532 + struct sk_buff *skb, 1533 + void *accel_priv); 1534 u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); 1535 1536 /* ··· 1819 void dev_disable_lro(struct net_device *dev); 1820 int dev_loopback_xmit(struct sk_buff *newskb); 1821 int dev_queue_xmit(struct sk_buff *skb); 1822 + int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv); 1823 int register_netdevice(struct net_device *dev); 1824 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); 1825 void unregister_netdevice_many(struct list_head *head); ··· 2426 int dev_get_phys_port_id(struct net_device *dev, 2427 struct netdev_phys_port_id *ppid); 2428 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2429 + struct netdev_queue *txq); 2430 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 2431 2432 extern int netdev_budget;
+17 -12
net/core/dev.c
··· 2539 } 2540 2541 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2542 - struct netdev_queue *txq, void *accel_priv) 2543 { 2544 const struct net_device_ops *ops = dev->netdev_ops; 2545 int rc = NETDEV_TX_OK; ··· 2605 dev_queue_xmit_nit(skb, dev); 2606 2607 skb_len = skb->len; 2608 - if (accel_priv) 2609 - rc = ops->ndo_dfwd_start_xmit(skb, dev, accel_priv); 2610 - else 2611 rc = ops->ndo_start_xmit(skb, dev); 2612 2613 trace_net_dev_xmit(skb, rc, dev, skb_len); 2614 - if (rc == NETDEV_TX_OK && txq) 2615 txq_trans_update(txq); 2616 return rc; 2617 } ··· 2624 dev_queue_xmit_nit(nskb, dev); 2625 2626 skb_len = nskb->len; 2627 - if (accel_priv) 2628 - rc = ops->ndo_dfwd_start_xmit(nskb, dev, accel_priv); 2629 - else 2630 - rc = ops->ndo_start_xmit(nskb, dev); 2631 trace_net_dev_xmit(nskb, rc, dev, skb_len); 2632 if (unlikely(rc != NETDEV_TX_OK)) { 2633 if (rc & ~NETDEV_TX_MASK) ··· 2805 * the BH enable code must have IRQs enabled so that it will not deadlock. 2806 * --BLG 2807 */ 2808 - int dev_queue_xmit(struct sk_buff *skb) 2809 { 2810 struct net_device *dev = skb->dev; 2811 struct netdev_queue *txq; ··· 2821 2822 skb_update_prio(skb); 2823 2824 - txq = netdev_pick_tx(dev, skb); 2825 q = rcu_dereference_bh(txq->qdisc); 2826 2827 #ifdef CONFIG_NET_CLS_ACT ··· 2857 2858 if (!netif_xmit_stopped(txq)) { 2859 __this_cpu_inc(xmit_recursion); 2860 - rc = dev_hard_start_xmit(skb, dev, txq, NULL); 2861 __this_cpu_dec(xmit_recursion); 2862 if (dev_xmit_complete(rc)) { 2863 HARD_TX_UNLOCK(dev, txq); ··· 2886 rcu_read_unlock_bh(); 2887 return rc; 2888 } 2889 EXPORT_SYMBOL(dev_queue_xmit); 2890 2891 2892 /*=======================================================================
··· 2539 } 2540 2541 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2542 + struct netdev_queue *txq) 2543 { 2544 const struct net_device_ops *ops = dev->netdev_ops; 2545 int rc = NETDEV_TX_OK; ··· 2605 dev_queue_xmit_nit(skb, dev); 2606 2607 skb_len = skb->len; 2608 rc = ops->ndo_start_xmit(skb, dev); 2609 2610 trace_net_dev_xmit(skb, rc, dev, skb_len); 2611 + if (rc == NETDEV_TX_OK) 2612 txq_trans_update(txq); 2613 return rc; 2614 } ··· 2627 dev_queue_xmit_nit(nskb, dev); 2628 2629 skb_len = nskb->len; 2630 + rc = ops->ndo_start_xmit(nskb, dev); 2631 trace_net_dev_xmit(nskb, rc, dev, skb_len); 2632 if (unlikely(rc != NETDEV_TX_OK)) { 2633 if (rc & ~NETDEV_TX_MASK) ··· 2811 * the BH enable code must have IRQs enabled so that it will not deadlock. 2812 * --BLG 2813 */ 2814 + int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) 2815 { 2816 struct net_device *dev = skb->dev; 2817 struct netdev_queue *txq; ··· 2827 2828 skb_update_prio(skb); 2829 2830 + txq = netdev_pick_tx(dev, skb, accel_priv); 2831 q = rcu_dereference_bh(txq->qdisc); 2832 2833 #ifdef CONFIG_NET_CLS_ACT ··· 2863 2864 if (!netif_xmit_stopped(txq)) { 2865 __this_cpu_inc(xmit_recursion); 2866 + rc = dev_hard_start_xmit(skb, dev, txq); 2867 __this_cpu_dec(xmit_recursion); 2868 if (dev_xmit_complete(rc)) { 2869 HARD_TX_UNLOCK(dev, txq); ··· 2892 rcu_read_unlock_bh(); 2893 return rc; 2894 } 2895 + 2896 + int dev_queue_xmit(struct sk_buff *skb) 2897 + { 2898 + return __dev_queue_xmit(skb, NULL); 2899 + } 2900 EXPORT_SYMBOL(dev_queue_xmit); 2901 + 2902 + int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv) 2903 + { 2904 + return __dev_queue_xmit(skb, accel_priv); 2905 + } 2906 + EXPORT_SYMBOL(dev_queue_xmit_accel); 2907 2908 2909 /*=======================================================================
+7 -3
net/core/flow_dissector.c
··· 395 EXPORT_SYMBOL(__netdev_pick_tx); 396 397 struct netdev_queue *netdev_pick_tx(struct net_device *dev, 398 - struct sk_buff *skb) 399 { 400 int queue_index = 0; 401 402 if (dev->real_num_tx_queues != 1) { 403 const struct net_device_ops *ops = dev->netdev_ops; 404 if (ops->ndo_select_queue) 405 - queue_index = ops->ndo_select_queue(dev, skb); 406 else 407 queue_index = __netdev_pick_tx(dev, skb); 408 - queue_index = dev_cap_txqueue(dev, queue_index); 409 } 410 411 skb_set_queue_mapping(skb, queue_index);
··· 395 EXPORT_SYMBOL(__netdev_pick_tx); 396 397 struct netdev_queue *netdev_pick_tx(struct net_device *dev, 398 + struct sk_buff *skb, 399 + void *accel_priv) 400 { 401 int queue_index = 0; 402 403 if (dev->real_num_tx_queues != 1) { 404 const struct net_device_ops *ops = dev->netdev_ops; 405 if (ops->ndo_select_queue) 406 + queue_index = ops->ndo_select_queue(dev, skb, 407 + accel_priv); 408 else 409 queue_index = __netdev_pick_tx(dev, skb); 410 + 411 + if (!accel_priv) 412 + queue_index = dev_cap_txqueue(dev, queue_index); 413 } 414 415 skb_set_queue_mapping(skb, queue_index);
+1 -1
net/core/netpoll.c
··· 375 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { 376 struct netdev_queue *txq; 377 378 - txq = netdev_pick_tx(dev, skb); 379 380 /* try until next clock tick */ 381 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
··· 375 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { 376 struct netdev_queue *txq; 377 378 + txq = netdev_pick_tx(dev, skb, NULL); 379 380 /* try until next clock tick */ 381 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
+4 -2
net/ipv6/addrconf.c
··· 2509 struct inet6_ifaddr *ifp; 2510 2511 ifp = ipv6_add_addr(idev, addr, NULL, plen, 2512 - scope, IFA_F_PERMANENT, 0, 0); 2513 if (!IS_ERR(ifp)) { 2514 spin_lock_bh(&ifp->lock); 2515 ifp->flags &= ~IFA_F_TENTATIVE; ··· 2638 #endif 2639 2640 2641 - ifp = ipv6_add_addr(idev, addr, NULL, 64, IFA_LINK, addr_flags, 0, 0); 2642 if (!IS_ERR(ifp)) { 2643 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, idev->dev, 0, 0); 2644 addrconf_dad_start(ifp);
··· 2509 struct inet6_ifaddr *ifp; 2510 2511 ifp = ipv6_add_addr(idev, addr, NULL, plen, 2512 + scope, IFA_F_PERMANENT, 2513 + INFINITY_LIFE_TIME, INFINITY_LIFE_TIME); 2514 if (!IS_ERR(ifp)) { 2515 spin_lock_bh(&ifp->lock); 2516 ifp->flags &= ~IFA_F_TENTATIVE; ··· 2637 #endif 2638 2639 2640 + ifp = ipv6_add_addr(idev, addr, NULL, 64, IFA_LINK, addr_flags, 2641 + INFINITY_LIFE_TIME, INFINITY_LIFE_TIME); 2642 if (!IS_ERR(ifp)) { 2643 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, idev->dev, 0, 0); 2644 addrconf_dad_start(ifp);
+6
net/ipv6/ip6_vti.c
··· 732 static inline int vti6_dev_init_gen(struct net_device *dev) 733 { 734 struct ip6_tnl *t = netdev_priv(dev); 735 736 t->dev = dev; 737 t->net = dev_net(dev); 738 dev->tstats = alloc_percpu(struct pcpu_tstats); 739 if (!dev->tstats) 740 return -ENOMEM; 741 return 0; 742 } 743
··· 732 static inline int vti6_dev_init_gen(struct net_device *dev) 733 { 734 struct ip6_tnl *t = netdev_priv(dev); 735 + int i; 736 737 t->dev = dev; 738 t->net = dev_net(dev); 739 dev->tstats = alloc_percpu(struct pcpu_tstats); 740 if (!dev->tstats) 741 return -ENOMEM; 742 + for_each_possible_cpu(i) { 743 + struct pcpu_tstats *stats; 744 + stats = per_cpu_ptr(dev->tstats, i); 745 + u64_stats_init(&stats->syncp); 746 + } 747 return 0; 748 } 749
+4 -2
net/mac80211/iface.c
··· 1061 } 1062 1063 static u16 ieee80211_netdev_select_queue(struct net_device *dev, 1064 - struct sk_buff *skb) 1065 { 1066 return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb); 1067 } ··· 1079 }; 1080 1081 static u16 ieee80211_monitor_select_queue(struct net_device *dev, 1082 - struct sk_buff *skb) 1083 { 1084 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1085 struct ieee80211_local *local = sdata->local;
··· 1061 } 1062 1063 static u16 ieee80211_netdev_select_queue(struct net_device *dev, 1064 + struct sk_buff *skb, 1065 + void *accel_priv) 1066 { 1067 return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb); 1068 } ··· 1078 }; 1079 1080 static u16 ieee80211_monitor_select_queue(struct net_device *dev, 1081 + struct sk_buff *skb, 1082 + void *accel_priv) 1083 { 1084 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1085 struct ieee80211_local *local = sdata->local;
+13 -10
net/mac80211/tx.c
··· 463 { 464 struct sta_info *sta = tx->sta; 465 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 466 - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; 467 struct ieee80211_local *local = tx->local; 468 469 if (unlikely(!sta)) ··· 472 test_sta_flag(sta, WLAN_STA_PS_DRIVER)) && 473 !(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) { 474 int ac = skb_get_queue_mapping(tx->skb); 475 - 476 - /* only deauth, disassoc and action are bufferable MMPDUs */ 477 - if (ieee80211_is_mgmt(hdr->frame_control) && 478 - !ieee80211_is_deauth(hdr->frame_control) && 479 - !ieee80211_is_disassoc(hdr->frame_control) && 480 - !ieee80211_is_action(hdr->frame_control)) { 481 - info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER; 482 - return TX_CONTINUE; 483 - } 484 485 ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n", 486 sta->sta.addr, sta->sta.aid, ac); ··· 515 static ieee80211_tx_result debug_noinline 516 ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx) 517 { 518 if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED)) 519 return TX_CONTINUE; 520 521 if (tx->flags & IEEE80211_TX_UNICAST) 522 return ieee80211_tx_h_unicast_ps_buf(tx);
··· 463 { 464 struct sta_info *sta = tx->sta; 465 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 466 struct ieee80211_local *local = tx->local; 467 468 if (unlikely(!sta)) ··· 473 test_sta_flag(sta, WLAN_STA_PS_DRIVER)) && 474 !(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) { 475 int ac = skb_get_queue_mapping(tx->skb); 476 477 ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n", 478 sta->sta.addr, sta->sta.aid, ac); ··· 525 static ieee80211_tx_result debug_noinline 526 ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx) 527 { 528 + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 529 + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; 530 + 531 if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED)) 532 return TX_CONTINUE; 533 + 534 + /* only deauth, disassoc and action are bufferable MMPDUs */ 535 + if (ieee80211_is_mgmt(hdr->frame_control) && 536 + !ieee80211_is_deauth(hdr->frame_control) && 537 + !ieee80211_is_disassoc(hdr->frame_control) && 538 + !ieee80211_is_action(hdr->frame_control)) { 539 + if (tx->flags & IEEE80211_TX_UNICAST) 540 + info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER; 541 + return TX_CONTINUE; 542 + } 543 544 if (tx->flags & IEEE80211_TX_UNICAST) 545 return ieee80211_tx_h_unicast_ps_buf(tx);
+1 -1
net/netfilter/nf_conntrack_seqadj.c
··· 37 return 0; 38 39 if (unlikely(!seqadj)) { 40 - WARN(1, "Wrong seqadj usage, missing nfct_seqadj_ext_add()\n"); 41 return 0; 42 } 43
··· 37 return 0; 38 39 if (unlikely(!seqadj)) { 40 + WARN_ONCE(1, "Missing nfct_seqadj_ext_add() setup call\n"); 41 return 0; 42 } 43
+27 -5
net/netfilter/nf_nat_irc.c
··· 34 struct nf_conntrack_expect *exp) 35 { 36 char buffer[sizeof("4294967296 65635")]; 37 u_int16_t port; 38 unsigned int ret; 39 40 /* Reply comes from server. */ 41 exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; 42 exp->dir = IP_CT_DIR_REPLY; 43 exp->expectfn = nf_nat_follow_master; ··· 61 } 62 63 if (port == 0) { 64 - nf_ct_helper_log(skb, exp->master, "all ports in use"); 65 return NF_DROP; 66 } 67 68 - ret = nf_nat_mangle_tcp_packet(skb, exp->master, ctinfo, 69 - protoff, matchoff, matchlen, buffer, 70 - strlen(buffer)); 71 if (ret != NF_ACCEPT) { 72 - nf_ct_helper_log(skb, exp->master, "cannot mangle packet"); 73 nf_ct_unexpect_related(exp); 74 } 75 return ret; 76 } 77
··· 34 struct nf_conntrack_expect *exp) 35 { 36 char buffer[sizeof("4294967296 65635")]; 37 + struct nf_conn *ct = exp->master; 38 + union nf_inet_addr newaddr; 39 u_int16_t port; 40 unsigned int ret; 41 42 /* Reply comes from server. */ 43 + newaddr = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3; 44 + 45 exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; 46 exp->dir = IP_CT_DIR_REPLY; 47 exp->expectfn = nf_nat_follow_master; ··· 57 } 58 59 if (port == 0) { 60 + nf_ct_helper_log(skb, ct, "all ports in use"); 61 return NF_DROP; 62 } 63 64 + /* strlen("\1DCC CHAT chat AAAAAAAA P\1\n")=27 65 + * strlen("\1DCC SCHAT chat AAAAAAAA P\1\n")=28 66 + * strlen("\1DCC SEND F AAAAAAAA P S\1\n")=26 67 + * strlen("\1DCC MOVE F AAAAAAAA P S\1\n")=26 68 + * strlen("\1DCC TSEND F AAAAAAAA P S\1\n")=27 69 + * 70 + * AAAAAAAAA: bound addr (1.0.0.0==16777216, min 8 digits, 71 + * 255.255.255.255==4294967296, 10 digits) 72 + * P: bound port (min 1 d, max 5d (65635)) 73 + * F: filename (min 1 d ) 74 + * S: size (min 1 d ) 75 + * 0x01, \n: terminators 76 + */ 77 + /* AAA = "us", ie. where server normally talks to. */ 78 + snprintf(buffer, sizeof(buffer), "%u %u", ntohl(newaddr.ip), port); 79 + pr_debug("nf_nat_irc: inserting '%s' == %pI4, port %u\n", 80 + buffer, &newaddr.ip, port); 81 + 82 + ret = nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff, matchoff, 83 + matchlen, buffer, strlen(buffer)); 84 if (ret != NF_ACCEPT) { 85 + nf_ct_helper_log(skb, ct, "cannot mangle packet"); 86 nf_ct_unexpect_related(exp); 87 } 88 + 89 return ret; 90 } 91
+1 -1
net/nfc/core.c
··· 384 { 385 dev->dep_link_up = true; 386 387 - if (!dev->active_target) { 388 struct nfc_target *target; 389 390 target = nfc_find_target(dev, target_idx);
··· 384 { 385 dev->dep_link_up = true; 386 387 + if (!dev->active_target && rf_mode == NFC_RF_INITIATOR) { 388 struct nfc_target *target; 389 390 target = nfc_find_target(dev, target_idx);
+1 -1
net/sched/sch_generic.c
··· 126 127 HARD_TX_LOCK(dev, txq, smp_processor_id()); 128 if (!netif_xmit_frozen_or_stopped(txq)) 129 - ret = dev_hard_start_xmit(skb, dev, txq, NULL); 130 131 HARD_TX_UNLOCK(dev, txq); 132
··· 126 127 HARD_TX_LOCK(dev, txq, smp_processor_id()); 128 if (!netif_xmit_frozen_or_stopped(txq)) 129 + ret = dev_hard_start_xmit(skb, dev, txq); 130 131 HARD_TX_UNLOCK(dev, txq); 132
+1
net/tipc/link.c
··· 1498 int type; 1499 1500 head = head->next; 1501 1502 /* Ensure bearer is still enabled */ 1503 if (unlikely(!b_ptr->active))
··· 1498 int type; 1499 1500 head = head->next; 1501 + buf->next = NULL; 1502 1503 /* Ensure bearer is still enabled */ 1504 if (unlikely(!b_ptr->active))