Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'veth-flexible-channel-numbers'

Paolo Abeni says:

====================
veth: more flexible channels number configuration

XDP setups can benefit from multiple veth RX/TX queues. Currently
veth allow setting such number only at creation time via the
'numrxqueues' and 'numtxqueues' parameters.

This series introduces support for the ethtool set_channel operation
and allows configuring the queue number via a new module parameter.

The veth default configuration is not changed.

Finally self-tests are updated to check the new features, with both
valid and invalid arguments.

This iteration is a rebase of the most recent RFC, it does not provide
a module parameter to configure the default number of queues, but I
think could be worthy

RFC v1 -> RFC v2:
- report more consistent 'combined' count
- make set_channel as resilient as possible to errors
- drop module parameter - but I would still consider it.
- more self-tests
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+434 -54
+252 -53
drivers/net/veth.c
··· 224 224 { 225 225 channels->tx_count = dev->real_num_tx_queues; 226 226 channels->rx_count = dev->real_num_rx_queues; 227 - channels->max_tx = dev->real_num_tx_queues; 228 - channels->max_rx = dev->real_num_rx_queues; 229 - channels->combined_count = min(dev->real_num_rx_queues, dev->real_num_tx_queues); 230 - channels->max_combined = min(dev->real_num_rx_queues, dev->real_num_tx_queues); 227 + channels->max_tx = dev->num_tx_queues; 228 + channels->max_rx = dev->num_rx_queues; 231 229 } 230 + 231 + static int veth_set_channels(struct net_device *dev, 232 + struct ethtool_channels *ch); 232 233 233 234 static const struct ethtool_ops veth_ethtool_ops = { 234 235 .get_drvinfo = veth_get_drvinfo, ··· 240 239 .get_link_ksettings = veth_get_link_ksettings, 241 240 .get_ts_info = ethtool_op_get_ts_info, 242 241 .get_channels = veth_get_channels, 242 + .set_channels = veth_set_channels, 243 243 }; 244 244 245 245 /* general routines */ ··· 930 928 return done; 931 929 } 932 930 933 - static int __veth_napi_enable(struct net_device *dev) 931 + static int __veth_napi_enable_range(struct net_device *dev, int start, int end) 934 932 { 935 933 struct veth_priv *priv = netdev_priv(dev); 936 934 int err, i; 937 935 938 - for (i = 0; i < dev->real_num_rx_queues; i++) { 936 + for (i = start; i < end; i++) { 939 937 struct veth_rq *rq = &priv->rq[i]; 940 938 941 939 err = ptr_ring_init(&rq->xdp_ring, VETH_RING_SIZE, GFP_KERNEL); ··· 943 941 goto err_xdp_ring; 944 942 } 945 943 946 - for (i = 0; i < dev->real_num_rx_queues; i++) { 944 + for (i = start; i < end; i++) { 947 945 struct veth_rq *rq = &priv->rq[i]; 948 946 949 947 napi_enable(&rq->xdp_napi); ··· 951 949 } 952 950 953 951 return 0; 952 + 954 953 err_xdp_ring: 955 - for (i--; i >= 0; i--) 954 + for (i--; i >= start; i--) 956 955 ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free); 957 956 958 957 return err; 959 958 } 960 959 961 - static void veth_napi_del(struct net_device *dev) 960 + static int __veth_napi_enable(struct net_device *dev) 961 + { 962 + return __veth_napi_enable_range(dev, 0, dev->real_num_rx_queues); 963 + } 964 + 965 + static void veth_napi_del_range(struct net_device *dev, int start, int end) 962 966 { 963 967 struct veth_priv *priv = netdev_priv(dev); 964 968 int i; 965 969 966 - for (i = 0; i < dev->real_num_rx_queues; i++) { 970 + for (i = start; i < end; i++) { 967 971 struct veth_rq *rq = &priv->rq[i]; 968 972 969 973 rcu_assign_pointer(priv->rq[i].napi, NULL); ··· 978 970 } 979 971 synchronize_net(); 980 972 981 - for (i = 0; i < dev->real_num_rx_queues; i++) { 973 + for (i = start; i < end; i++) { 982 974 struct veth_rq *rq = &priv->rq[i]; 983 975 984 976 rq->rx_notify_masked = false; ··· 986 978 } 987 979 } 988 980 981 + static void veth_napi_del(struct net_device *dev) 982 + { 983 + veth_napi_del_range(dev, 0, dev->real_num_rx_queues); 984 + } 985 + 989 986 static bool veth_gro_requested(const struct net_device *dev) 990 987 { 991 988 return !!(dev->wanted_features & NETIF_F_GRO); 989 + } 990 + 991 + static int veth_enable_xdp_range(struct net_device *dev, int start, int end, 992 + bool napi_already_on) 993 + { 994 + struct veth_priv *priv = netdev_priv(dev); 995 + int err, i; 996 + 997 + for (i = start; i < end; i++) { 998 + struct veth_rq *rq = &priv->rq[i]; 999 + 1000 + if (!napi_already_on) 1001 + netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT); 1002 + err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i, rq->xdp_napi.napi_id); 1003 + if (err < 0) 1004 + goto err_rxq_reg; 1005 + 1006 + err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, 1007 + MEM_TYPE_PAGE_SHARED, 1008 + NULL); 1009 + if (err < 0) 1010 + goto err_reg_mem; 1011 + 1012 + /* Save original mem info as it can be overwritten */ 1013 + rq->xdp_mem = rq->xdp_rxq.mem; 1014 + } 1015 + return 0; 1016 + 1017 + err_reg_mem: 1018 + xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq); 1019 + err_rxq_reg: 1020 + for (i--; i >= start; i--) { 1021 + struct veth_rq *rq = &priv->rq[i]; 1022 + 1023 + xdp_rxq_info_unreg(&rq->xdp_rxq); 1024 + if (!napi_already_on) 1025 + netif_napi_del(&rq->xdp_napi); 1026 + } 1027 + 1028 + return err; 1029 + } 1030 + 1031 + static void veth_disable_xdp_range(struct net_device *dev, int start, int end, 1032 + bool delete_napi) 1033 + { 1034 + struct veth_priv *priv = netdev_priv(dev); 1035 + int i; 1036 + 1037 + for (i = start; i < end; i++) { 1038 + struct veth_rq *rq = &priv->rq[i]; 1039 + 1040 + rq->xdp_rxq.mem = rq->xdp_mem; 1041 + xdp_rxq_info_unreg(&rq->xdp_rxq); 1042 + 1043 + if (delete_napi) 1044 + netif_napi_del(&rq->xdp_napi); 1045 + } 992 1046 } 993 1047 994 1048 static int veth_enable_xdp(struct net_device *dev) ··· 1060 990 int err, i; 1061 991 1062 992 if (!xdp_rxq_info_is_reg(&priv->rq[0].xdp_rxq)) { 1063 - for (i = 0; i < dev->real_num_rx_queues; i++) { 1064 - struct veth_rq *rq = &priv->rq[i]; 1065 - 1066 - if (!napi_already_on) 1067 - netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT); 1068 - err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i, rq->xdp_napi.napi_id); 1069 - if (err < 0) 1070 - goto err_rxq_reg; 1071 - 1072 - err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, 1073 - MEM_TYPE_PAGE_SHARED, 1074 - NULL); 1075 - if (err < 0) 1076 - goto err_reg_mem; 1077 - 1078 - /* Save original mem info as it can be overwritten */ 1079 - rq->xdp_mem = rq->xdp_rxq.mem; 1080 - } 993 + err = veth_enable_xdp_range(dev, 0, dev->real_num_rx_queues, napi_already_on); 994 + if (err) 995 + return err; 1081 996 1082 997 if (!napi_already_on) { 1083 998 err = __veth_napi_enable(dev); 1084 - if (err) 1085 - goto err_rxq_reg; 999 + if (err) { 1000 + veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, true); 1001 + return err; 1002 + } 1086 1003 1087 1004 if (!veth_gro_requested(dev)) { 1088 1005 /* user-space did not require GRO, but adding XDP ··· 1087 1030 } 1088 1031 1089 1032 return 0; 1090 - err_reg_mem: 1091 - xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq); 1092 - err_rxq_reg: 1093 - for (i--; i >= 0; i--) { 1094 - struct veth_rq *rq = &priv->rq[i]; 1095 - 1096 - xdp_rxq_info_unreg(&rq->xdp_rxq); 1097 - if (!napi_already_on) 1098 - netif_napi_del(&rq->xdp_napi); 1099 - } 1100 - 1101 - return err; 1102 1033 } 1103 1034 1104 1035 static void veth_disable_xdp(struct net_device *dev) ··· 1109 1064 } 1110 1065 } 1111 1066 1112 - for (i = 0; i < dev->real_num_rx_queues; i++) { 1113 - struct veth_rq *rq = &priv->rq[i]; 1114 - 1115 - rq->xdp_rxq.mem = rq->xdp_mem; 1116 - xdp_rxq_info_unreg(&rq->xdp_rxq); 1117 - } 1067 + veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, false); 1118 1068 } 1119 1069 1120 - static int veth_napi_enable(struct net_device *dev) 1070 + static int veth_napi_enable_range(struct net_device *dev, int start, int end) 1121 1071 { 1122 1072 struct veth_priv *priv = netdev_priv(dev); 1123 1073 int err, i; 1124 1074 1125 - for (i = 0; i < dev->real_num_rx_queues; i++) { 1075 + for (i = start; i < end; i++) { 1126 1076 struct veth_rq *rq = &priv->rq[i]; 1127 1077 1128 1078 netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT); 1129 1079 } 1130 1080 1131 - err = __veth_napi_enable(dev); 1081 + err = __veth_napi_enable_range(dev, start, end); 1132 1082 if (err) { 1133 - for (i = 0; i < dev->real_num_rx_queues; i++) { 1083 + for (i = start; i < end; i++) { 1134 1084 struct veth_rq *rq = &priv->rq[i]; 1135 1085 1136 1086 netif_napi_del(&rq->xdp_napi); ··· 1133 1093 return err; 1134 1094 } 1135 1095 return err; 1096 + } 1097 + 1098 + static int veth_napi_enable(struct net_device *dev) 1099 + { 1100 + return veth_napi_enable_range(dev, 0, dev->real_num_rx_queues); 1101 + } 1102 + 1103 + static void veth_disable_range_safe(struct net_device *dev, int start, int end) 1104 + { 1105 + struct veth_priv *priv = netdev_priv(dev); 1106 + 1107 + if (start >= end) 1108 + return; 1109 + 1110 + if (priv->_xdp_prog) { 1111 + veth_napi_del_range(dev, start, end); 1112 + veth_disable_xdp_range(dev, start, end, false); 1113 + } else if (veth_gro_requested(dev)) { 1114 + veth_napi_del_range(dev, start, end); 1115 + } 1116 + } 1117 + 1118 + static int veth_enable_range_safe(struct net_device *dev, int start, int end) 1119 + { 1120 + struct veth_priv *priv = netdev_priv(dev); 1121 + int err; 1122 + 1123 + if (start >= end) 1124 + return 0; 1125 + 1126 + if (priv->_xdp_prog) { 1127 + /* these channels are freshly initialized, napi is not on there even 1128 + * when GRO is requeste 1129 + */ 1130 + err = veth_enable_xdp_range(dev, start, end, false); 1131 + if (err) 1132 + return err; 1133 + 1134 + err = __veth_napi_enable_range(dev, start, end); 1135 + if (err) { 1136 + /* on error always delete the newly added napis */ 1137 + veth_disable_xdp_range(dev, start, end, true); 1138 + return err; 1139 + } 1140 + } else if (veth_gro_requested(dev)) { 1141 + return veth_napi_enable_range(dev, start, end); 1142 + } 1143 + return 0; 1144 + } 1145 + 1146 + static int veth_set_channels(struct net_device *dev, 1147 + struct ethtool_channels *ch) 1148 + { 1149 + struct veth_priv *priv = netdev_priv(dev); 1150 + unsigned int old_rx_count, new_rx_count; 1151 + struct veth_priv *peer_priv; 1152 + struct net_device *peer; 1153 + int err; 1154 + 1155 + /* sanity check. Upper bounds are already enforced by the caller */ 1156 + if (!ch->rx_count || !ch->tx_count) 1157 + return -EINVAL; 1158 + 1159 + /* avoid braking XDP, if that is enabled */ 1160 + peer = rtnl_dereference(priv->peer); 1161 + peer_priv = peer ? netdev_priv(peer) : NULL; 1162 + if (priv->_xdp_prog && peer && ch->rx_count < peer->real_num_tx_queues) 1163 + return -EINVAL; 1164 + 1165 + if (peer && peer_priv && peer_priv->_xdp_prog && ch->tx_count > peer->real_num_rx_queues) 1166 + return -EINVAL; 1167 + 1168 + old_rx_count = dev->real_num_rx_queues; 1169 + new_rx_count = ch->rx_count; 1170 + if (netif_running(dev)) { 1171 + /* turn device off */ 1172 + netif_carrier_off(dev); 1173 + if (peer) 1174 + netif_carrier_off(peer); 1175 + 1176 + /* try to allocate new resurces, as needed*/ 1177 + err = veth_enable_range_safe(dev, old_rx_count, new_rx_count); 1178 + if (err) 1179 + goto out; 1180 + } 1181 + 1182 + err = netif_set_real_num_rx_queues(dev, ch->rx_count); 1183 + if (err) 1184 + goto revert; 1185 + 1186 + err = netif_set_real_num_tx_queues(dev, ch->tx_count); 1187 + if (err) { 1188 + int err2 = netif_set_real_num_rx_queues(dev, old_rx_count); 1189 + 1190 + /* this error condition could happen only if rx and tx change 1191 + * in opposite directions (e.g. tx nr raises, rx nr decreases) 1192 + * and we can't do anything to fully restore the original 1193 + * status 1194 + */ 1195 + if (err2) 1196 + pr_warn("Can't restore rx queues config %d -> %d %d", 1197 + new_rx_count, old_rx_count, err2); 1198 + else 1199 + goto revert; 1200 + } 1201 + 1202 + out: 1203 + if (netif_running(dev)) { 1204 + /* note that we need to swap the arguments WRT the enable part 1205 + * to identify the range we have to disable 1206 + */ 1207 + veth_disable_range_safe(dev, new_rx_count, old_rx_count); 1208 + netif_carrier_on(dev); 1209 + if (peer) 1210 + netif_carrier_on(peer); 1211 + } 1212 + return err; 1213 + 1214 + revert: 1215 + new_rx_count = old_rx_count; 1216 + old_rx_count = ch->rx_count; 1217 + goto out; 1136 1218 } 1137 1219 1138 1220 static int veth_open(struct net_device *dev) ··· 1609 1447 netdev_update_features(dev); 1610 1448 } 1611 1449 1450 + static int veth_init_queues(struct net_device *dev, struct nlattr *tb[]) 1451 + { 1452 + int err; 1453 + 1454 + if (!tb[IFLA_NUM_TX_QUEUES] && dev->num_tx_queues > 1) { 1455 + err = netif_set_real_num_tx_queues(dev, 1); 1456 + if (err) 1457 + return err; 1458 + } 1459 + if (!tb[IFLA_NUM_RX_QUEUES] && dev->num_rx_queues > 1) { 1460 + err = netif_set_real_num_rx_queues(dev, 1); 1461 + if (err) 1462 + return err; 1463 + } 1464 + return 0; 1465 + } 1466 + 1612 1467 static int veth_newlink(struct net *src_net, struct net_device *dev, 1613 1468 struct nlattr *tb[], struct nlattr *data[], 1614 1469 struct netlink_ext_ack *extack) ··· 1735 1556 1736 1557 priv = netdev_priv(dev); 1737 1558 rcu_assign_pointer(priv->peer, peer); 1559 + err = veth_init_queues(dev, tb); 1560 + if (err) 1561 + goto err_queues; 1738 1562 1739 1563 priv = netdev_priv(peer); 1740 1564 rcu_assign_pointer(priv->peer, dev); 1565 + err = veth_init_queues(peer, tb); 1566 + if (err) 1567 + goto err_queues; 1741 1568 1742 1569 veth_disable_gro(dev); 1743 1570 return 0; 1744 1571 1572 + err_queues: 1573 + unregister_netdevice(dev); 1745 1574 err_register_dev: 1746 1575 /* nothing to do */ 1747 1576 err_configure_peer: ··· 1795 1608 return peer ? dev_net(peer) : dev_net(dev); 1796 1609 } 1797 1610 1611 + static unsigned int veth_get_num_queues(void) 1612 + { 1613 + /* enforce the same queue limit as rtnl_create_link */ 1614 + int queues = num_possible_cpus(); 1615 + 1616 + if (queues > 4096) 1617 + queues = 4096; 1618 + return queues; 1619 + } 1620 + 1798 1621 static struct rtnl_link_ops veth_link_ops = { 1799 1622 .kind = DRV_NAME, 1800 1623 .priv_size = sizeof(struct veth_priv), ··· 1815 1618 .policy = veth_policy, 1816 1619 .maxtype = VETH_INFO_MAX, 1817 1620 .get_link_net = veth_get_link_net, 1621 + .get_num_tx_queues = veth_get_num_queues, 1622 + .get_num_rx_queues = veth_get_num_queues, 1818 1623 }; 1819 1624 1820 1625 /*
+182 -1
tools/testing/selftests/net/veth.sh
··· 13 13 readonly BM_NET_V4=192.168.1. 14 14 readonly BM_NET_V6=2001:db8:: 15 15 16 - readonly NPROCS=`nproc` 16 + readonly CPUS=`nproc` 17 17 ret=0 18 18 19 19 cleanup() { ··· 75 75 __chk_flag "$1" $2 $3 tcp-segmentation-offload 76 76 } 77 77 78 + chk_channels() { 79 + local msg="$1" 80 + local target=$2 81 + local rx=$3 82 + local tx=$4 83 + 84 + local dev=veth$target 85 + 86 + local cur_rx=`ip netns exec $BASE$target ethtool -l $dev |\ 87 + grep RX: | tail -n 1 | awk '{print $2}' ` 88 + local cur_tx=`ip netns exec $BASE$target ethtool -l $dev |\ 89 + grep TX: | tail -n 1 | awk '{print $2}'` 90 + local cur_combined=`ip netns exec $BASE$target ethtool -l $dev |\ 91 + grep Combined: | tail -n 1 | awk '{print $2}'` 92 + 93 + printf "%-60s" "$msg" 94 + if [ "$cur_rx" = "$rx" -a "$cur_tx" = "$tx" -a "$cur_combined" = "n/a" ]; then 95 + echo " ok " 96 + else 97 + echo " fail rx:$rx:$cur_rx tx:$tx:$cur_tx combined:n/a:$cur_combined" 98 + fi 99 + } 100 + 78 101 chk_gro() { 79 102 local msg="$1" 80 103 local expected=$2 ··· 130 107 fi 131 108 } 132 109 110 + __change_channels() 111 + { 112 + local cur_cpu 113 + local end=$1 114 + local cur 115 + local i 116 + 117 + while true; do 118 + printf -v cur '%(%s)T' 119 + [ $cur -le $end ] || break 120 + 121 + for i in `seq 1 $CPUS`; do 122 + ip netns exec $NS_SRC ethtool -L veth$SRC rx $i tx $i 123 + ip netns exec $NS_DST ethtool -L veth$DST rx $i tx $i 124 + done 125 + 126 + for i in `seq 1 $((CPUS - 1))`; do 127 + cur_cpu=$((CPUS - $i)) 128 + ip netns exec $NS_SRC ethtool -L veth$SRC rx $cur_cpu tx $cur_cpu 129 + ip netns exec $NS_DST ethtool -L veth$DST rx $cur_cpu tx $cur_cpu 130 + done 131 + done 132 + } 133 + 134 + __send_data() { 135 + local end=$1 136 + 137 + while true; do 138 + printf -v cur '%(%s)T' 139 + [ $cur -le $end ] || break 140 + 141 + ip netns exec $NS_SRC ./udpgso_bench_tx -4 -s 1000 -M 300 -D $BM_NET_V4$DST 142 + done 143 + } 144 + 145 + do_stress() { 146 + local end 147 + printf -v end '%(%s)T' 148 + end=$((end + $STRESS)) 149 + 150 + ip netns exec $NS_SRC ethtool -L veth$SRC rx 3 tx 3 151 + ip netns exec $NS_DST ethtool -L veth$DST rx 3 tx 3 152 + 153 + ip netns exec $NS_DST ./udpgso_bench_rx & 154 + local rx_pid=$! 155 + 156 + echo "Running stress test for $STRESS seconds..." 157 + __change_channels $end & 158 + local ch_pid=$! 159 + __send_data $end & 160 + local data_pid_1=$! 161 + __send_data $end & 162 + local data_pid_2=$! 163 + __send_data $end & 164 + local data_pid_3=$! 165 + __send_data $end & 166 + local data_pid_4=$! 167 + 168 + wait $ch_pid $data_pid_1 $data_pid_2 $data_pid_3 $data_pid_4 169 + kill -9 $rx_pid 170 + echo "done" 171 + 172 + # restore previous setting 173 + ip netns exec $NS_SRC ethtool -L veth$SRC rx 2 tx 2 174 + ip netns exec $NS_DST ethtool -L veth$DST rx 2 tx 1 175 + } 176 + 177 + usage() { 178 + echo "Usage: $0 [-h] [-s <seconds>]" 179 + echo -e "\t-h: show this help" 180 + echo -e "\t-s: run optional stress tests for the given amount of seconds" 181 + } 182 + 183 + STRESS=0 184 + while getopts "hs:" option; do 185 + case "$option" in 186 + "h") 187 + usage $0 188 + exit 0 189 + ;; 190 + "s") 191 + STRESS=$OPTARG 192 + ;; 193 + esac 194 + done 195 + 133 196 if [ ! -f ../bpf/xdp_dummy.o ]; then 134 197 echo "Missing xdp_dummy helper. Build bpf selftest first" 135 198 exit 1 136 199 fi 200 + 201 + [ $CPUS -lt 2 ] && echo "Only one CPU available, some tests will be skipped" 202 + [ $STRESS -gt 0 -a $CPUS -lt 3 ] && echo " stress test will be skipped, too" 137 203 138 204 create_ns 139 205 chk_gro_flag "default - gro flag" $SRC off ··· 246 134 cleanup 247 135 248 136 create_ns 137 + chk_channels "default channels" $DST 1 1 138 + 249 139 ip -n $NS_DST link set dev veth$DST down 250 140 ip netns exec $NS_DST ethtool -K veth$DST gro on 251 141 chk_gro_flag "with gro enabled on link down - gro flag" $DST on ··· 261 147 cleanup 262 148 263 149 create_ns 150 + 151 + CUR_TX=1 152 + CUR_RX=1 153 + if [ $CPUS -gt 1 ]; then 154 + ip netns exec $NS_DST ethtool -L veth$DST tx 2 155 + chk_channels "setting tx channels" $DST 1 2 156 + CUR_TX=2 157 + fi 158 + 159 + if [ $CPUS -gt 2 ]; then 160 + ip netns exec $NS_DST ethtool -L veth$DST rx 3 tx 3 161 + chk_channels "setting both rx and tx channels" $DST 3 3 162 + CUR_RX=3 163 + CUR_TX=3 164 + fi 165 + 166 + ip netns exec $NS_DST ethtool -L veth$DST combined 2 2>/dev/null 167 + chk_channels "bad setting: combined channels" $DST $CUR_RX $CUR_TX 168 + 169 + ip netns exec $NS_DST ethtool -L veth$DST tx $((CPUS + 1)) 2>/dev/null 170 + chk_channels "setting invalid channels nr" $DST $CUR_RX $CUR_TX 171 + 172 + if [ $CPUS -gt 1 ]; then 173 + # this also tests queues nr reduction 174 + ip netns exec $NS_DST ethtool -L veth$DST rx 1 tx 2 2>/dev/null 175 + ip netns exec $NS_SRC ethtool -L veth$SRC rx 1 tx 2 2>/dev/null 176 + printf "%-60s" "bad setting: XDP with RX nr less than TX" 177 + ip -n $NS_DST link set dev veth$DST xdp object ../bpf/xdp_dummy.o \ 178 + section xdp_dummy 2>/dev/null &&\ 179 + echo "fail - set operation successful ?!?" || echo " ok " 180 + 181 + # the following tests will run with multiple channels active 182 + ip netns exec $NS_SRC ethtool -L veth$SRC rx 2 183 + ip netns exec $NS_DST ethtool -L veth$DST rx 2 184 + ip -n $NS_DST link set dev veth$DST xdp object ../bpf/xdp_dummy.o \ 185 + section xdp_dummy 2>/dev/null 186 + printf "%-60s" "bad setting: reducing RX nr below peer TX with XDP set" 187 + ip netns exec $NS_DST ethtool -L veth$DST rx 1 2>/dev/null &&\ 188 + echo "fail - set operation successful ?!?" || echo " ok " 189 + CUR_RX=2 190 + CUR_TX=2 191 + fi 192 + 193 + if [ $CPUS -gt 2 ]; then 194 + printf "%-60s" "bad setting: increasing peer TX nr above RX with XDP set" 195 + ip netns exec $NS_SRC ethtool -L veth$SRC tx 3 2>/dev/null &&\ 196 + echo "fail - set operation successful ?!?" || echo " ok " 197 + chk_channels "setting invalid channels nr" $DST 2 2 198 + fi 199 + 264 200 ip -n $NS_DST link set dev veth$DST xdp object ../bpf/xdp_dummy.o section xdp_dummy 2>/dev/null 265 201 chk_gro_flag "with xdp attached - gro flag" $DST on 266 202 chk_gro_flag " - peer gro flag" $SRC off ··· 331 167 chk_gro_flag " - peer gro flag" $SRC off 332 168 chk_tso_flag " - tso flag" $SRC on 333 169 chk_tso_flag " - peer tso flag" $DST on 170 + 171 + if [ $CPUS -gt 1 ]; then 172 + ip netns exec $NS_DST ethtool -L veth$DST tx 1 173 + chk_channels "decreasing tx channels with device down" $DST 2 1 174 + fi 175 + 334 176 ip -n $NS_DST link set dev veth$DST up 335 177 ip -n $NS_SRC link set dev veth$SRC up 336 178 chk_gro " - aggregation" 1 179 + 180 + if [ $CPUS -gt 1 ]; then 181 + [ $STRESS -gt 0 -a $CPUS -gt 2 ] && do_stress 182 + 183 + ip -n $NS_DST link set dev veth$DST down 184 + ip -n $NS_SRC link set dev veth$SRC down 185 + ip netns exec $NS_DST ethtool -L veth$DST tx 2 186 + chk_channels "increasing tx channels with device down" $DST 2 2 187 + ip -n $NS_DST link set dev veth$DST up 188 + ip -n $NS_SRC link set dev veth$SRC up 189 + fi 337 190 338 191 ip netns exec $NS_DST ethtool -K veth$DST gro off 339 192 ip netns exec $NS_SRC ethtool -K veth$SRC tx-udp-segmentation off