Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: remove 'fallback' argument from dev->ndo_select_queue()

After the previous patch, all the callers of ndo_select_queue()
provide as a 'fallback' argument netdev_pick_tx.
The only exceptions are nested calls to ndo_select_queue(),
which pass down the 'fallback' available in the current scope
- still netdev_pick_tx.

We can drop such argument and replace fallback() invocation with
netdev_pick_tx(). This avoids an indirect call per xmit packet
in some scenarios (TCP syn, UDP unconnected, XDP generic, pktgen)
with device drivers implementing such ndo. It also clean the code
a bit.

Tested with ixgbe and CONFIG_FCOE=m

With pktgen using queue xmit:
threads vanilla patched
(kpps) (kpps)
1 2334 2428
2 4166 4278
4 7895 8100

v1 -> v2:
- rebased after helper's name change

Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Paolo Abeni and committed by
David S. Miller
a350ecce b71b5837

+57 -97
+1 -2
drivers/infiniband/hw/hfi1/vnic_main.c
··· 423 423 424 424 static u16 hfi1_vnic_select_queue(struct net_device *netdev, 425 425 struct sk_buff *skb, 426 - struct net_device *sb_dev, 427 - select_queue_fallback_t fallback) 426 + struct net_device *sb_dev) 428 427 { 429 428 struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev); 430 429 struct opa_vnic_skb_mdata *mdata;
+2 -4
drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
··· 95 95 } 96 96 97 97 static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb, 98 - struct net_device *sb_dev, 99 - select_queue_fallback_t fallback) 98 + struct net_device *sb_dev) 100 99 { 101 100 struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev); 102 101 struct opa_vnic_skb_mdata *mdata; ··· 105 106 mdata = skb_push(skb, sizeof(*mdata)); 106 107 mdata->entropy = opa_vnic_calc_entropy(skb); 107 108 mdata->vl = opa_vnic_get_vl(adapter, skb); 108 - rc = adapter->rn_ops->ndo_select_queue(netdev, skb, 109 - sb_dev, fallback); 109 + rc = adapter->rn_ops->ndo_select_queue(netdev, skb, sb_dev); 110 110 skb_pull(skb, sizeof(*mdata)); 111 111 return rc; 112 112 }
+1 -2
drivers/net/bonding/bond_main.c
··· 4114 4114 4115 4115 4116 4116 static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb, 4117 - struct net_device *sb_dev, 4118 - select_queue_fallback_t fallback) 4117 + struct net_device *sb_dev) 4119 4118 { 4120 4119 /* This helper function exists to help dev_pick_tx get the correct 4121 4120 * destination queue. Using a helper function skips a call to
+2 -3
drivers/net/ethernet/amazon/ena/ena_netdev.c
··· 2258 2258 } 2259 2259 2260 2260 static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb, 2261 - struct net_device *sb_dev, 2262 - select_queue_fallback_t fallback) 2261 + struct net_device *sb_dev) 2263 2262 { 2264 2263 u16 qid; 2265 2264 /* we suspect that this is good for in--kernel network services that ··· 2268 2269 if (skb_rx_queue_recorded(skb)) 2269 2270 qid = skb_get_rx_queue(skb); 2270 2271 else 2271 - qid = fallback(dev, skb, NULL); 2272 + qid = netdev_pick_tx(dev, skb, NULL); 2272 2273 2273 2274 return qid; 2274 2275 }
+3 -4
drivers/net/ethernet/broadcom/bcmsysport.c
··· 2274 2274 }; 2275 2275 2276 2276 static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb, 2277 - struct net_device *sb_dev, 2278 - select_queue_fallback_t fallback) 2277 + struct net_device *sb_dev) 2279 2278 { 2280 2279 struct bcm_sysport_priv *priv = netdev_priv(dev); 2281 2280 u16 queue = skb_get_queue_mapping(skb); ··· 2282 2283 unsigned int q, port; 2283 2284 2284 2285 if (!netdev_uses_dsa(dev)) 2285 - return fallback(dev, skb, NULL); 2286 + return netdev_pick_tx(dev, skb, NULL); 2286 2287 2287 2288 /* DSA tagging layer will have configured the correct queue */ 2288 2289 q = BRCM_TAG_GET_QUEUE(queue); ··· 2290 2291 tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues]; 2291 2292 2292 2293 if (unlikely(!tx_ring)) 2293 - return fallback(dev, skb, NULL); 2294 + return netdev_pick_tx(dev, skb, NULL); 2294 2295 2295 2296 return tx_ring->index; 2296 2297 }
+2 -3
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
··· 1909 1909 } 1910 1910 1911 1911 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, 1912 - struct net_device *sb_dev, 1913 - select_queue_fallback_t fallback) 1912 + struct net_device *sb_dev) 1914 1913 { 1915 1914 struct bnx2x *bp = netdev_priv(dev); 1916 1915 ··· 1931 1932 } 1932 1933 1933 1934 /* select a non-FCoE queue */ 1934 - return fallback(dev, skb, NULL) % 1935 + return netdev_pick_tx(dev, skb, NULL) % 1935 1936 (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos); 1936 1937 } 1937 1938
+1 -2
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
··· 498 498 499 499 /* select_queue callback */ 500 500 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, 501 - struct net_device *sb_dev, 502 - select_queue_fallback_t fallback); 501 + struct net_device *sb_dev); 503 502 504 503 static inline void bnx2x_update_rx_prod(struct bnx2x *bp, 505 504 struct bnx2x_fastpath *fp,
+2 -3
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
··· 979 979 } 980 980 981 981 static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb, 982 - struct net_device *sb_dev, 983 - select_queue_fallback_t fallback) 982 + struct net_device *sb_dev) 984 983 { 985 984 int txq; 986 985 ··· 1021 1022 return txq; 1022 1023 } 1023 1024 1024 - return fallback(dev, skb, NULL) % dev->real_num_tx_queues; 1025 + return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues; 1025 1026 } 1026 1027 1027 1028 static int closest_timer(const struct sge *s, int time)
+2 -3
drivers/net/ethernet/hisilicon/hns/hns_enet.c
··· 1964 1964 1965 1965 static u16 1966 1966 hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb, 1967 - struct net_device *sb_dev, 1968 - select_queue_fallback_t fallback) 1967 + struct net_device *sb_dev) 1969 1968 { 1970 1969 struct ethhdr *eth_hdr = (struct ethhdr *)skb->data; 1971 1970 struct hns_nic_priv *priv = netdev_priv(ndev); ··· 1974 1975 is_multicast_ether_addr(eth_hdr->h_dest)) 1975 1976 return 0; 1976 1977 else 1977 - return fallback(ndev, skb, NULL); 1978 + return netdev_pick_tx(ndev, skb, NULL); 1978 1979 } 1979 1980 1980 1981 static const struct net_device_ops hns_nic_netdev_ops = {
+2 -3
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 8483 8483 8484 8484 #ifdef IXGBE_FCOE 8485 8485 static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, 8486 - struct net_device *sb_dev, 8487 - select_queue_fallback_t fallback) 8486 + struct net_device *sb_dev) 8488 8487 { 8489 8488 struct ixgbe_adapter *adapter; 8490 8489 struct ixgbe_ring_feature *f; ··· 8513 8514 break; 8514 8515 /* fall through */ 8515 8516 default: 8516 - return fallback(dev, skb, sb_dev); 8517 + return netdev_pick_tx(dev, skb, sb_dev); 8517 8518 } 8518 8519 8519 8520 f = &adapter->ring_feature[RING_F_FCOE];
+3 -4
drivers/net/ethernet/mellanox/mlx4/en_tx.c
··· 685 685 } 686 686 687 687 u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, 688 - struct net_device *sb_dev, 689 - select_queue_fallback_t fallback) 688 + struct net_device *sb_dev) 690 689 { 691 690 struct mlx4_en_priv *priv = netdev_priv(dev); 692 691 u16 rings_p_up = priv->num_tx_rings_p_up; 693 692 694 693 if (netdev_get_num_tc(dev)) 695 - return fallback(dev, skb, NULL); 694 + return netdev_pick_tx(dev, skb, NULL); 696 695 697 - return fallback(dev, skb, NULL) % rings_p_up; 696 + return netdev_pick_tx(dev, skb, NULL) % rings_p_up; 698 697 } 699 698 700 699 static void mlx4_bf_copy(void __iomem *dst, const void *src,
+1 -2
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
··· 698 698 699 699 void mlx4_en_tx_irq(struct mlx4_cq *mcq); 700 700 u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, 701 - struct net_device *sb_dev, 702 - select_queue_fallback_t fallback); 701 + struct net_device *sb_dev); 703 702 netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); 704 703 netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring, 705 704 struct mlx4_en_rx_alloc *frame,
+1 -2
drivers/net/ethernet/mellanox/mlx5/core/en.h
··· 769 769 void mlx5e_build_ptys2ethtool_map(void); 770 770 771 771 u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, 772 - struct net_device *sb_dev, 773 - select_queue_fallback_t fallback); 772 + struct net_device *sb_dev); 774 773 netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev); 775 774 netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, 776 775 struct mlx5e_tx_wqe *wqe, u16 pi);
+2 -3
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
··· 110 110 #endif 111 111 112 112 u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, 113 - struct net_device *sb_dev, 114 - select_queue_fallback_t fallback) 113 + struct net_device *sb_dev) 115 114 { 115 + int channel_ix = netdev_pick_tx(dev, skb, NULL); 116 116 struct mlx5e_priv *priv = netdev_priv(dev); 117 - int channel_ix = fallback(dev, skb, NULL); 118 117 u16 num_channels; 119 118 int up = 0; 120 119
+1 -2
drivers/net/ethernet/qlogic/qede/qede.h
··· 498 498 /* Datapath functions definition */ 499 499 netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev); 500 500 u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb, 501 - struct net_device *sb_dev, 502 - select_queue_fallback_t fallback); 501 + struct net_device *sb_dev); 503 502 netdev_features_t qede_features_check(struct sk_buff *skb, 504 503 struct net_device *dev, 505 504 netdev_features_t features);
+2 -3
drivers/net/ethernet/qlogic/qede/qede_fp.c
··· 1696 1696 } 1697 1697 1698 1698 u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb, 1699 - struct net_device *sb_dev, 1700 - select_queue_fallback_t fallback) 1699 + struct net_device *sb_dev) 1701 1700 { 1702 1701 struct qede_dev *edev = netdev_priv(dev); 1703 1702 int total_txq; ··· 1704 1705 total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc; 1705 1706 1706 1707 return QEDE_TSS_COUNT(edev) ? 1707 - fallback(dev, skb, NULL) % total_txq : 0; 1708 + netdev_pick_tx(dev, skb, NULL) % total_txq : 0; 1708 1709 } 1709 1710 1710 1711 /* 8B udp header + 8B base tunnel header + 32B option length */
+1 -2
drivers/net/ethernet/renesas/ravb_main.c
··· 1615 1615 } 1616 1616 1617 1617 static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb, 1618 - struct net_device *sb_dev, 1619 - select_queue_fallback_t fallback) 1618 + struct net_device *sb_dev) 1620 1619 { 1621 1620 /* If skb needs TX timestamp, it is handled in network control queue */ 1622 1621 return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC :
+1 -2
drivers/net/ethernet/sun/ldmvsw.c
··· 101 101 } 102 102 103 103 static u16 vsw_select_queue(struct net_device *dev, struct sk_buff *skb, 104 - struct net_device *sb_dev, 105 - select_queue_fallback_t fallback) 104 + struct net_device *sb_dev) 106 105 { 107 106 struct vnet_port *port = netdev_priv(dev); 108 107
+1 -2
drivers/net/ethernet/sun/sunvnet.c
··· 234 234 } 235 235 236 236 static u16 vnet_select_queue(struct net_device *dev, struct sk_buff *skb, 237 - struct net_device *sb_dev, 238 - select_queue_fallback_t fallback) 237 + struct net_device *sb_dev) 239 238 { 240 239 struct vnet *vp = netdev_priv(dev); 241 240 struct vnet_port *port = __tx_port_find(vp, skb);
+4 -6
drivers/net/hyperv/netvsc_drv.c
··· 308 308 * If a valid queue has already been assigned, then use that. 309 309 * Otherwise compute tx queue based on hash and the send table. 310 310 * 311 - * This is basically similar to default (__netdev_pick_tx) with the added step 311 + * This is basically similar to default (netdev_pick_tx) with the added step 312 312 * of using the host send_table when no other queue has been assigned. 313 313 * 314 314 * TODO support XPS - but get_xps_queue not exported ··· 331 331 } 332 332 333 333 static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, 334 - struct net_device *sb_dev, 335 - select_queue_fallback_t fallback) 334 + struct net_device *sb_dev) 336 335 { 337 336 struct net_device_context *ndc = netdev_priv(ndev); 338 337 struct net_device *vf_netdev; ··· 343 344 const struct net_device_ops *vf_ops = vf_netdev->netdev_ops; 344 345 345 346 if (vf_ops->ndo_select_queue) 346 - txq = vf_ops->ndo_select_queue(vf_netdev, skb, 347 - sb_dev, fallback); 347 + txq = vf_ops->ndo_select_queue(vf_netdev, skb, sb_dev); 348 348 else 349 - txq = fallback(vf_netdev, skb, NULL); 349 + txq = netdev_pick_tx(vf_netdev, skb, NULL); 350 350 351 351 /* Record the queue selected by VF so that it can be 352 352 * used for common case where VF has more queues than
+3 -5
drivers/net/net_failover.c
··· 115 115 116 116 static u16 net_failover_select_queue(struct net_device *dev, 117 117 struct sk_buff *skb, 118 - struct net_device *sb_dev, 119 - select_queue_fallback_t fallback) 118 + struct net_device *sb_dev) 120 119 { 121 120 struct net_failover_info *nfo_info = netdev_priv(dev); 122 121 struct net_device *primary_dev; ··· 126 127 const struct net_device_ops *ops = primary_dev->netdev_ops; 127 128 128 129 if (ops->ndo_select_queue) 129 - txq = ops->ndo_select_queue(primary_dev, skb, 130 - sb_dev, fallback); 130 + txq = ops->ndo_select_queue(primary_dev, skb, sb_dev); 131 131 else 132 - txq = fallback(primary_dev, skb, NULL); 132 + txq = netdev_pick_tx(primary_dev, skb, NULL); 133 133 134 134 qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping; 135 135
+1 -2
drivers/net/team/team.c
··· 1691 1691 } 1692 1692 1693 1693 static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb, 1694 - struct net_device *sb_dev, 1695 - select_queue_fallback_t fallback) 1694 + struct net_device *sb_dev) 1696 1695 { 1697 1696 /* 1698 1697 * This helper function exists to help dev_pick_tx get the correct
+1 -2
drivers/net/tun.c
··· 606 606 } 607 607 608 608 static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, 609 - struct net_device *sb_dev, 610 - select_queue_fallback_t fallback) 609 + struct net_device *sb_dev) 611 610 { 612 611 struct tun_struct *tun = netdev_priv(dev); 613 612 u16 ret;
+1 -2
drivers/net/wireless/marvell/mwifiex/main.c
··· 1282 1282 1283 1283 static u16 1284 1284 mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb, 1285 - struct net_device *sb_dev, 1286 - select_queue_fallback_t fallback) 1285 + struct net_device *sb_dev) 1287 1286 { 1288 1287 skb->priority = cfg80211_classify8021d(skb, NULL); 1289 1288 return mwifiex_1d_to_wmm_queue[skb->priority];
+3 -3
drivers/net/xen-netback/interface.c
··· 148 148 } 149 149 150 150 static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb, 151 - struct net_device *sb_dev, 152 - select_queue_fallback_t fallback) 151 + struct net_device *sb_dev) 153 152 { 154 153 struct xenvif *vif = netdev_priv(dev); 155 154 unsigned int size = vif->hash.size; ··· 161 162 return 0; 162 163 163 164 if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) 164 - return fallback(dev, skb, NULL) % dev->real_num_tx_queues; 165 + return netdev_pick_tx(dev, skb, NULL) % 166 + dev->real_num_tx_queues; 165 167 166 168 xenvif_set_skb_hash(vif, skb); 167 169
+1 -2
drivers/net/xen-netfront.c
··· 543 543 } 544 544 545 545 static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb, 546 - struct net_device *sb_dev, 547 - select_queue_fallback_t fallback) 546 + struct net_device *sb_dev) 548 547 { 549 548 unsigned int num_queues = dev->real_num_tx_queues; 550 549 u32 hash;
+1 -2
drivers/staging/rtl8188eu/os_dep/os_intfs.c
··· 245 245 } 246 246 247 247 static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb, 248 - struct net_device *sb_dev, 249 - select_queue_fallback_t fallback) 248 + struct net_device *sb_dev) 250 249 { 251 250 struct adapter *padapter = rtw_netdev_priv(dev); 252 251 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+1 -2
drivers/staging/rtl8723bs/os_dep/os_intfs.c
··· 404 404 405 405 406 406 static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb, 407 - struct net_device *sb_dev, 408 - select_queue_fallback_t fallback) 407 + struct net_device *sb_dev) 409 408 { 410 409 struct adapter *padapter = rtw_netdev_priv(dev); 411 410 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+4 -8
include/linux/netdevice.h
··· 986 986 * those the driver believes to be appropriate. 987 987 * 988 988 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, 989 - * struct net_device *sb_dev, 990 - * select_queue_fallback_t fallback); 989 + * struct net_device *sb_dev); 991 990 * Called to decide which queue to use when device supports multiple 992 991 * transmit queues. 993 992 * ··· 1267 1268 netdev_features_t features); 1268 1269 u16 (*ndo_select_queue)(struct net_device *dev, 1269 1270 struct sk_buff *skb, 1270 - struct net_device *sb_dev, 1271 - select_queue_fallback_t fallback); 1271 + struct net_device *sb_dev); 1272 1272 void (*ndo_change_rx_flags)(struct net_device *dev, 1273 1273 int flags); 1274 1274 void (*ndo_set_rx_mode)(struct net_device *dev); ··· 2639 2641 void dev_disable_lro(struct net_device *dev); 2640 2642 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb); 2641 2643 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, 2642 - struct net_device *sb_dev, 2643 - select_queue_fallback_t fallback); 2644 + struct net_device *sb_dev); 2644 2645 u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb, 2645 - struct net_device *sb_dev, 2646 - select_queue_fallback_t fallback); 2646 + struct net_device *sb_dev); 2647 2647 int dev_queue_xmit(struct sk_buff *skb); 2648 2648 int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev); 2649 2649 int dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
+3 -6
net/core/dev.c
··· 3689 3689 } 3690 3690 3691 3691 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, 3692 - struct net_device *sb_dev, 3693 - select_queue_fallback_t fallback) 3692 + struct net_device *sb_dev) 3694 3693 { 3695 3694 return 0; 3696 3695 } 3697 3696 EXPORT_SYMBOL(dev_pick_tx_zero); 3698 3697 3699 3698 u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb, 3700 - struct net_device *sb_dev, 3701 - select_queue_fallback_t fallback) 3699 + struct net_device *sb_dev) 3702 3700 { 3703 3701 return (u16)raw_smp_processor_id() % dev->real_num_tx_queues; 3704 3702 } ··· 3746 3748 const struct net_device_ops *ops = dev->netdev_ops; 3747 3749 3748 3750 if (ops->ndo_select_queue) 3749 - queue_index = ops->ndo_select_queue(dev, skb, sb_dev, 3750 - netdev_pick_tx); 3751 + queue_index = ops->ndo_select_queue(dev, skb, sb_dev); 3751 3752 else 3752 3753 queue_index = netdev_pick_tx(dev, skb, sb_dev); 3753 3754
+2 -4
net/mac80211/iface.c
··· 1133 1133 1134 1134 static u16 ieee80211_netdev_select_queue(struct net_device *dev, 1135 1135 struct sk_buff *skb, 1136 - struct net_device *sb_dev, 1137 - select_queue_fallback_t fallback) 1136 + struct net_device *sb_dev) 1138 1137 { 1139 1138 return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb); 1140 1139 } ··· 1178 1179 1179 1180 static u16 ieee80211_monitor_select_queue(struct net_device *dev, 1180 1181 struct sk_buff *skb, 1181 - struct net_device *sb_dev, 1182 - select_queue_fallback_t fallback) 1182 + struct net_device *sb_dev) 1183 1183 { 1184 1184 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1185 1185 struct ieee80211_local *local = sdata->local;
+1 -2
net/packet/af_packet.c
··· 287 287 #endif 288 288 skb_record_rx_queue(skb, cpu % dev->real_num_tx_queues); 289 289 if (ops->ndo_select_queue) { 290 - queue_index = ops->ndo_select_queue(dev, skb, NULL, 291 - netdev_pick_tx); 290 + queue_index = ops->ndo_select_queue(dev, skb, NULL); 292 291 queue_index = netdev_cap_txqueue(dev, queue_index); 293 292 } else { 294 293 queue_index = netdev_pick_tx(dev, skb, NULL);