Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: ll_temac: Make RX/TX ring sizes configurable

Add support for setting the RX and TX ring sizes for this driver using
ethtool. Also increase the default RX ring size as the previous default
was far too low for good performance in some configurations.

Signed-off-by: Esben Haabendal <esben@geanix.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Esben Haabendal and committed by
David S. Miller
f7b261bf 7c462a0c

+72 -26
+2
drivers/net/ethernet/xilinx/ll_temac.h
··· 369 369 /* Buffer descriptors */ 370 370 struct cdmac_bd *tx_bd_v; 371 371 dma_addr_t tx_bd_p; 372 + u32 tx_bd_num; 372 373 struct cdmac_bd *rx_bd_v; 373 374 dma_addr_t rx_bd_p; 375 + u32 rx_bd_num; 374 376 int tx_bd_ci; 375 377 int tx_bd_tail; 376 378 int rx_bd_ci;
+70 -26
drivers/net/ethernet/xilinx/ll_temac_main.c
··· 58 58 59 59 #include "ll_temac.h" 60 60 61 - #define TX_BD_NUM 64 62 - #define RX_BD_NUM 128 61 + /* Descriptors defines for Tx and Rx DMA */ 62 + #define TX_BD_NUM_DEFAULT 64 63 + #define RX_BD_NUM_DEFAULT 1024 64 + #define TX_BD_NUM_MAX 4096 65 + #define RX_BD_NUM_MAX 4096 63 66 64 67 /* --------------------------------------------------------------------- 65 68 * Low level register access functions ··· 304 301 /* Reset Local Link (DMA) */ 305 302 lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST); 306 303 307 - for (i = 0; i < RX_BD_NUM; i++) { 304 + for (i = 0; i < lp->rx_bd_num; i++) { 308 305 if (!lp->rx_skb[i]) 309 306 break; 310 307 else { ··· 315 312 } 316 313 if (lp->rx_bd_v) 317 314 dma_free_coherent(ndev->dev.parent, 318 - sizeof(*lp->rx_bd_v) * RX_BD_NUM, 319 - lp->rx_bd_v, lp->rx_bd_p); 315 + sizeof(*lp->rx_bd_v) * lp->rx_bd_num, 316 + lp->rx_bd_v, lp->rx_bd_p); 320 317 if (lp->tx_bd_v) 321 318 dma_free_coherent(ndev->dev.parent, 322 - sizeof(*lp->tx_bd_v) * TX_BD_NUM, 323 - lp->tx_bd_v, lp->tx_bd_p); 319 + sizeof(*lp->tx_bd_v) * lp->tx_bd_num, 320 + lp->tx_bd_v, lp->tx_bd_p); 324 321 } 325 322 326 323 /** ··· 333 330 dma_addr_t skb_dma_addr; 334 331 int i; 335 332 336 - lp->rx_skb = devm_kcalloc(&ndev->dev, RX_BD_NUM, sizeof(*lp->rx_skb), 337 - GFP_KERNEL); 333 + lp->rx_skb = devm_kcalloc(&ndev->dev, lp->rx_bd_num, 334 + sizeof(*lp->rx_skb), GFP_KERNEL); 338 335 if (!lp->rx_skb) 339 336 goto out; 340 337 341 338 /* allocate the tx and rx ring buffer descriptors. */ 342 339 /* returns a virtual address and a physical address. */ 343 340 lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, 344 - sizeof(*lp->tx_bd_v) * TX_BD_NUM, 341 + sizeof(*lp->tx_bd_v) * lp->tx_bd_num, 345 342 &lp->tx_bd_p, GFP_KERNEL); 346 343 if (!lp->tx_bd_v) 347 344 goto out; 348 345 349 346 lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, 350 - sizeof(*lp->rx_bd_v) * RX_BD_NUM, 347 + sizeof(*lp->rx_bd_v) * lp->rx_bd_num, 351 348 &lp->rx_bd_p, GFP_KERNEL); 352 349 if (!lp->rx_bd_v) 353 350 goto out; 354 351 355 - for (i = 0; i < TX_BD_NUM; i++) { 352 + for (i = 0; i < lp->tx_bd_num; i++) { 356 353 lp->tx_bd_v[i].next = cpu_to_be32(lp->tx_bd_p 357 - + sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM)); 354 + + sizeof(*lp->tx_bd_v) * ((i + 1) % lp->tx_bd_num)); 358 355 } 359 356 360 - for (i = 0; i < RX_BD_NUM; i++) { 357 + for (i = 0; i < lp->rx_bd_num; i++) { 361 358 lp->rx_bd_v[i].next = cpu_to_be32(lp->rx_bd_p 362 - + sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM)); 359 + + sizeof(*lp->rx_bd_v) * ((i + 1) % lp->rx_bd_num)); 363 360 364 361 skb = netdev_alloc_skb_ip_align(ndev, 365 362 XTE_MAX_JUMBO_FRAME_SIZE); ··· 392 389 lp->tx_bd_ci = 0; 393 390 lp->tx_bd_tail = 0; 394 391 lp->rx_bd_ci = 0; 395 - lp->rx_bd_tail = RX_BD_NUM - 1; 392 + lp->rx_bd_tail = lp->rx_bd_num - 1; 396 393 397 394 /* Enable RX DMA transfers */ 398 395 wmb(); ··· 787 784 ndev->stats.tx_bytes += be32_to_cpu(cur_p->len); 788 785 789 786 lp->tx_bd_ci++; 790 - if (lp->tx_bd_ci >= TX_BD_NUM) 787 + if (lp->tx_bd_ci >= lp->tx_bd_num) 791 788 lp->tx_bd_ci = 0; 792 789 793 790 cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; ··· 813 810 return NETDEV_TX_BUSY; 814 811 815 812 tail++; 816 - if (tail >= TX_BD_NUM) 813 + if (tail >= lp->tx_bd_num) 817 814 tail = 0; 818 815 819 816 cur_p = &lp->tx_bd_v[tail]; ··· 877 874 ptr_to_txbd((void *)skb, cur_p); 878 875 879 876 for (ii = 0; ii < num_frag; ii++) { 880 - if (++lp->tx_bd_tail >= TX_BD_NUM) 877 + if (++lp->tx_bd_tail >= lp->tx_bd_num) 881 878 lp->tx_bd_tail = 0; 882 879 883 880 cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; ··· 887 884 DMA_TO_DEVICE); 888 885 if (dma_mapping_error(ndev->dev.parent, skb_dma_addr)) { 889 886 if (--lp->tx_bd_tail < 0) 890 - lp->tx_bd_tail = TX_BD_NUM - 1; 887 + lp->tx_bd_tail = lp->tx_bd_num - 1; 891 888 cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; 892 889 while (--ii >= 0) { 893 890 --frag; ··· 896 893 skb_frag_size(frag), 897 894 DMA_TO_DEVICE); 898 895 if (--lp->tx_bd_tail < 0) 899 - lp->tx_bd_tail = TX_BD_NUM - 1; 896 + lp->tx_bd_tail = lp->tx_bd_num - 1; 900 897 cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; 901 898 } 902 899 dma_unmap_single(ndev->dev.parent, ··· 915 912 916 913 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; 917 914 lp->tx_bd_tail++; 918 - if (lp->tx_bd_tail >= TX_BD_NUM) 915 + if (lp->tx_bd_tail >= lp->tx_bd_num) 919 916 lp->tx_bd_tail = 0; 920 917 921 918 skb_tx_timestamp(skb); ··· 935 932 return 0; 936 933 available = 1 + lp->rx_bd_tail - lp->rx_bd_ci; 937 934 if (available <= 0) 938 - available += RX_BD_NUM; 935 + available += lp->rx_bd_num; 939 936 return available; 940 937 } 941 938 ··· 1004 1001 ndev->stats.rx_bytes += length; 1005 1002 1006 1003 rx_bd = lp->rx_bd_ci; 1007 - if (++lp->rx_bd_ci >= RX_BD_NUM) 1004 + if (++lp->rx_bd_ci >= lp->rx_bd_num) 1008 1005 lp->rx_bd_ci = 0; 1009 1006 } while (rx_bd != lp->rx_bd_tail); 1010 1007 ··· 1035 1032 dma_addr_t skb_dma_addr; 1036 1033 1037 1034 rx_bd = lp->rx_bd_tail + 1; 1038 - if (rx_bd >= RX_BD_NUM) 1035 + if (rx_bd >= lp->rx_bd_num) 1039 1036 rx_bd = 0; 1040 1037 bd = &lp->rx_bd_v[rx_bd]; 1041 1038 ··· 1251 1248 .attrs = temac_device_attrs, 1252 1249 }; 1253 1250 1254 - /* ethtool support */ 1251 + /* --------------------------------------------------------------------- 1252 + * ethtool support 1253 + */ 1254 + 1255 + static void ll_temac_ethtools_get_ringparam(struct net_device *ndev, 1256 + struct ethtool_ringparam *ering) 1257 + { 1258 + struct temac_local *lp = netdev_priv(ndev); 1259 + 1260 + ering->rx_max_pending = RX_BD_NUM_MAX; 1261 + ering->rx_mini_max_pending = 0; 1262 + ering->rx_jumbo_max_pending = 0; 1263 + ering->tx_max_pending = TX_BD_NUM_MAX; 1264 + ering->rx_pending = lp->rx_bd_num; 1265 + ering->rx_mini_pending = 0; 1266 + ering->rx_jumbo_pending = 0; 1267 + ering->tx_pending = lp->tx_bd_num; 1268 + } 1269 + 1270 + static int ll_temac_ethtools_set_ringparam(struct net_device *ndev, 1271 + struct ethtool_ringparam *ering) 1272 + { 1273 + struct temac_local *lp = netdev_priv(ndev); 1274 + 1275 + if (ering->rx_pending > RX_BD_NUM_MAX || 1276 + ering->rx_mini_pending || 1277 + ering->rx_jumbo_pending || 1278 + ering->rx_pending > TX_BD_NUM_MAX) 1279 + return -EINVAL; 1280 + 1281 + if (netif_running(ndev)) 1282 + return -EBUSY; 1283 + 1284 + lp->rx_bd_num = ering->rx_pending; 1285 + lp->tx_bd_num = ering->tx_pending; 1286 + return 0; 1287 + } 1288 + 1255 1289 static const struct ethtool_ops temac_ethtool_ops = { 1256 1290 .nway_reset = phy_ethtool_nway_reset, 1257 1291 .get_link = ethtool_op_get_link, 1258 1292 .get_ts_info = ethtool_op_get_ts_info, 1259 1293 .get_link_ksettings = phy_ethtool_get_link_ksettings, 1260 1294 .set_link_ksettings = phy_ethtool_set_link_ksettings, 1295 + .get_ringparam = ll_temac_ethtools_get_ringparam, 1296 + .set_ringparam = ll_temac_ethtools_set_ringparam, 1261 1297 }; 1262 1298 1263 1299 static int temac_probe(struct platform_device *pdev) ··· 1340 1298 lp->ndev = ndev; 1341 1299 lp->dev = &pdev->dev; 1342 1300 lp->options = XTE_OPTION_DEFAULTS; 1301 + lp->rx_bd_num = RX_BD_NUM_DEFAULT; 1302 + lp->tx_bd_num = TX_BD_NUM_DEFAULT; 1343 1303 spin_lock_init(&lp->rx_lock); 1344 1304 INIT_DELAYED_WORK(&lp->restart_work, ll_temac_restart_work_func); 1345 1305