Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

netdevice: add queue selection fallback handler for ndo_select_queue

Add a new argument for ndo_select_queue() callback that passes a
fallback handler. This gets invoked through netdev_pick_tx();
fallback handler is currently __netdev_pick_tx() as most drivers
invoke this function within their customized implementation in
case for skbs that don't need any special handling. This fallback
handler can then be replaced on other call-sites with different
queue selection methods (e.g. in packet sockets, pktgen etc).

This also has the nice side-effect that __netdev_pick_tx() is
then only invoked from netdev_pick_tx() and export of that
function to modules can be undone.

Suggested-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Daniel Borkmann and committed by
David S. Miller
99932d4f c321f7d7

+31 -27
+1 -1
drivers/net/bonding/bond_main.c
··· 3707 3707 3708 3708 3709 3709 static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb, 3710 - void *accel_priv) 3710 + void *accel_priv, select_queue_fallback_t fallback) 3711 3711 { 3712 3712 /* 3713 3713 * This helper function exists to help dev_pick_tx get the correct
+2 -2
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
··· 1873 1873 } 1874 1874 1875 1875 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, 1876 - void *accel_priv) 1876 + void *accel_priv, select_queue_fallback_t fallback) 1877 1877 { 1878 1878 struct bnx2x *bp = netdev_priv(dev); 1879 1879 ··· 1895 1895 } 1896 1896 1897 1897 /* select a non-FCoE queue */ 1898 - return __netdev_pick_tx(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp); 1898 + return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp); 1899 1899 } 1900 1900 1901 1901 void bnx2x_set_num_queues(struct bnx2x *bp)
+1 -1
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
··· 496 496 497 497 /* select_queue callback */ 498 498 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, 499 - void *accel_priv); 499 + void *accel_priv, select_queue_fallback_t fallback); 500 500 501 501 static inline void bnx2x_update_rx_prod(struct bnx2x *bp, 502 502 struct bnx2x_fastpath *fp,
+3 -3
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 6881 6881 } 6882 6882 6883 6883 static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, 6884 - void *accel_priv) 6884 + void *accel_priv, select_queue_fallback_t fallback) 6885 6885 { 6886 6886 struct ixgbe_fwd_adapter *fwd_adapter = accel_priv; 6887 6887 #ifdef IXGBE_FCOE ··· 6907 6907 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) 6908 6908 break; 6909 6909 default: 6910 - return __netdev_pick_tx(dev, skb); 6910 + return fallback(dev, skb); 6911 6911 } 6912 6912 6913 6913 f = &adapter->ring_feature[RING_F_FCOE]; ··· 6920 6920 6921 6921 return txq + f->offset; 6922 6922 #else 6923 - return __netdev_pick_tx(dev, skb); 6923 + return fallback(dev, skb); 6924 6924 #endif 6925 6925 } 6926 6926
+1 -1
drivers/net/ethernet/lantiq_etop.c
··· 619 619 620 620 static u16 621 621 ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb, 622 - void *accel_priv) 622 + void *accel_priv, select_queue_fallback_t fallback) 623 623 { 624 624 /* we are currently only using the first queue */ 625 625 return 0;
+2 -2
drivers/net/ethernet/mellanox/mlx4/en_tx.c
··· 629 629 } 630 630 631 631 u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, 632 - void *accel_priv) 632 + void *accel_priv, select_queue_fallback_t fallback) 633 633 { 634 634 struct mlx4_en_priv *priv = netdev_priv(dev); 635 635 u16 rings_p_up = priv->num_tx_rings_p_up; ··· 641 641 if (vlan_tx_tag_present(skb)) 642 642 up = vlan_tx_tag_get(skb) >> VLAN_PRIO_SHIFT; 643 643 644 - return __netdev_pick_tx(dev, skb) % rings_p_up + up * rings_p_up; 644 + return fallback(dev, skb) % rings_p_up + up * rings_p_up; 645 645 } 646 646 647 647 static void mlx4_bf_copy(void __iomem *dst, unsigned long *src, unsigned bytecnt)
+1 -1
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
··· 723 723 724 724 void mlx4_en_tx_irq(struct mlx4_cq *mcq); 725 725 u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, 726 - void *accel_priv); 726 + void *accel_priv, select_queue_fallback_t fallback); 727 727 netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); 728 728 729 729 int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
+1 -1
drivers/net/ethernet/tile/tilegx.c
··· 2071 2071 2072 2072 /* Return subqueue id on this core (one per core). */ 2073 2073 static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb, 2074 - void *accel_priv) 2074 + void *accel_priv, select_queue_fallback_t fallback) 2075 2075 { 2076 2076 return smp_processor_id(); 2077 2077 }
+1 -1
drivers/net/team/team.c
··· 1648 1648 } 1649 1649 1650 1650 static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb, 1651 - void *accel_priv) 1651 + void *accel_priv, select_queue_fallback_t fallback) 1652 1652 { 1653 1653 /* 1654 1654 * This helper function exists to help dev_pick_tx get the correct
+1 -1
drivers/net/tun.c
··· 366 366 * hope the rxq no. may help here. 367 367 */ 368 368 static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, 369 - void *accel_priv) 369 + void *accel_priv, select_queue_fallback_t fallback) 370 370 { 371 371 struct tun_struct *tun = netdev_priv(dev); 372 372 struct tun_flow_entry *e;
+1 -1
drivers/net/wireless/mwifiex/main.c
··· 748 748 749 749 static u16 750 750 mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb, 751 - void *accel_priv) 751 + void *accel_priv, select_queue_fallback_t fallback) 752 752 { 753 753 skb->priority = cfg80211_classify8021d(skb, NULL); 754 754 return mwifiex_1d_to_wmm_queue[skb->priority];
+1 -1
drivers/staging/bcm/Bcmnet.c
··· 40 40 } 41 41 42 42 static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb, 43 - void *accel_priv) 43 + void *accel_priv, select_queue_fallback_t fallback) 44 44 { 45 45 return ClassifyPacket(netdev_priv(dev), skb); 46 46 }
+1 -1
drivers/staging/netlogic/xlr_net.c
··· 307 307 } 308 308 309 309 static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb, 310 - void *accel_priv) 310 + void *accel_priv, select_queue_fallback_t fallback) 311 311 { 312 312 return (u16)smp_processor_id(); 313 313 }
+1 -1
drivers/staging/rtl8188eu/os_dep/os_intfs.c
··· 653 653 } 654 654 655 655 static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb, 656 - void *accel_priv) 656 + void *accel_priv, select_queue_fallback_t fallback) 657 657 { 658 658 struct adapter *padapter = rtw_netdev_priv(dev); 659 659 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+6 -3
include/linux/netdevice.h
··· 752 752 unsigned char id_len; 753 753 }; 754 754 755 + typedef u16 (*select_queue_fallback_t)(struct net_device *dev, 756 + struct sk_buff *skb); 757 + 755 758 /* 756 759 * This structure defines the management hooks for network devices. 757 760 * The following hooks can be defined; unless noted otherwise, they are ··· 786 783 * Required can not be NULL. 787 784 * 788 785 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, 789 - * void *accel_priv); 786 + * void *accel_priv, select_queue_fallback_t fallback); 790 787 * Called to decide which queue to when device supports multiple 791 788 * transmit queues. 792 789 * ··· 1008 1005 struct net_device *dev); 1009 1006 u16 (*ndo_select_queue)(struct net_device *dev, 1010 1007 struct sk_buff *skb, 1011 - void *accel_priv); 1008 + void *accel_priv, 1009 + select_queue_fallback_t fallback); 1012 1010 void (*ndo_change_rx_flags)(struct net_device *dev, 1013 1011 int flags); 1014 1012 void (*ndo_set_rx_mode)(struct net_device *dev); ··· 1555 1551 struct netdev_queue *netdev_pick_tx(struct net_device *dev, 1556 1552 struct sk_buff *skb, 1557 1553 void *accel_priv); 1558 - u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); 1559 1554 1560 1555 /* 1561 1556 * Net namespace inlines
+3 -4
net/core/flow_dissector.c
··· 372 372 #endif 373 373 } 374 374 375 - u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) 375 + static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) 376 376 { 377 377 struct sock *sk = skb->sk; 378 378 int queue_index = sk_tx_queue_get(sk); ··· 392 392 393 393 return queue_index; 394 394 } 395 - EXPORT_SYMBOL(__netdev_pick_tx); 396 395 397 396 struct netdev_queue *netdev_pick_tx(struct net_device *dev, 398 397 struct sk_buff *skb, ··· 402 403 if (dev->real_num_tx_queues != 1) { 403 404 const struct net_device_ops *ops = dev->netdev_ops; 404 405 if (ops->ndo_select_queue) 405 - queue_index = ops->ndo_select_queue(dev, skb, 406 - accel_priv); 406 + queue_index = ops->ndo_select_queue(dev, skb, accel_priv, 407 + __netdev_pick_tx); 407 408 else 408 409 queue_index = __netdev_pick_tx(dev, skb); 409 410
+4 -2
net/mac80211/iface.c
··· 1057 1057 1058 1058 static u16 ieee80211_netdev_select_queue(struct net_device *dev, 1059 1059 struct sk_buff *skb, 1060 - void *accel_priv) 1060 + void *accel_priv, 1061 + select_queue_fallback_t fallback) 1061 1062 { 1062 1063 return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb); 1063 1064 } ··· 1076 1075 1077 1076 static u16 ieee80211_monitor_select_queue(struct net_device *dev, 1078 1077 struct sk_buff *skb, 1079 - void *accel_priv) 1078 + void *accel_priv, 1079 + select_queue_fallback_t fallback) 1080 1080 { 1081 1081 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1082 1082 struct ieee80211_local *local = sdata->local;