Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mvneta: add XDP ethtool errors stats for TX to driver

Adding ethtool stats for when XDP transmitted packets overrun the TX
queue. This is recorded separately for XDP_TX and ndo_xdp_xmit. This
is an important aid for troubleshooting XDP based setups.

It is currently a known weakness and property of XDP that there isn't
any push-back or congestion feedback when transmitting frames via XDP.
It's easy to realise when redirecting from a higher speed link into a
slower speed link, or simply two ingress links into a single egress.
The situation can also happen when Ethernet flow control is active.

For testing the patch and provoking the situation to occur on my
Espressobin board, I configured the TX-queue to be smaller (434) than
RX-queue (512) and overload network with large MTU size frames (as a
larger frame takes longer to transmit).

Hopefully the upcoming XDP TX hook can be extended to provide insight
into these TX queue overflows, to allow programmable adaptation
strategies.

Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
Acked-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Jesper Dangaard Brouer and committed by
David S. Miller
15070919 0b56a29f

+26 -4
+26 -4
drivers/net/ethernet/marvell/mvneta.c
··· 344 344 ETHTOOL_XDP_REDIRECT, 345 345 ETHTOOL_XDP_PASS, 346 346 ETHTOOL_XDP_DROP, 347 - ETHTOOL_XDP_XMIT, 348 347 ETHTOOL_XDP_TX, 348 + ETHTOOL_XDP_TX_ERR, 349 + ETHTOOL_XDP_XMIT, 350 + ETHTOOL_XDP_XMIT_ERR, 349 351 ETHTOOL_MAX_STATS, 350 352 }; 351 353 ··· 406 404 { ETHTOOL_XDP_PASS, T_SW, "rx_xdp_pass", }, 407 405 { ETHTOOL_XDP_DROP, T_SW, "rx_xdp_drop", }, 408 406 { ETHTOOL_XDP_TX, T_SW, "rx_xdp_tx", }, 407 + { ETHTOOL_XDP_TX_ERR, T_SW, "rx_xdp_tx_errors", }, 409 408 { ETHTOOL_XDP_XMIT, T_SW, "tx_xdp_xmit", }, 409 + { ETHTOOL_XDP_XMIT_ERR, T_SW, "tx_xdp_xmit_errors", }, 410 410 }; 411 411 412 412 struct mvneta_stats { ··· 421 417 u64 xdp_pass; 422 418 u64 xdp_drop; 423 419 u64 xdp_xmit; 420 + u64 xdp_xmit_err; 424 421 u64 xdp_tx; 422 + u64 xdp_tx_err; 425 423 }; 426 424 427 425 struct mvneta_ethtool_stats { ··· 2065 2059 static int 2066 2060 mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp) 2067 2061 { 2062 + struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 2068 2063 struct mvneta_tx_queue *txq; 2069 2064 struct netdev_queue *nq; 2070 2065 struct xdp_frame *xdpf; ··· 2083 2076 __netif_tx_lock(nq, cpu); 2084 2077 ret = mvneta_xdp_submit_frame(pp, txq, xdpf, false); 2085 2078 if (ret == MVNETA_XDP_TX) { 2086 - struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 2087 - 2088 2079 u64_stats_update_begin(&stats->syncp); 2089 2080 stats->es.ps.tx_bytes += xdpf->len; 2090 2081 stats->es.ps.tx_packets++; ··· 2090 2085 u64_stats_update_end(&stats->syncp); 2091 2086 2092 2087 mvneta_txq_pend_desc_add(pp, txq, 0); 2088 + } else { 2089 + u64_stats_update_begin(&stats->syncp); 2090 + stats->es.ps.xdp_tx_err++; 2091 + u64_stats_update_end(&stats->syncp); 2093 2092 } 2094 2093 __netif_tx_unlock(nq); 2095 2094 ··· 2137 2128 stats->es.ps.tx_bytes += nxmit_byte; 2138 2129 stats->es.ps.tx_packets += nxmit; 2139 2130 stats->es.ps.xdp_xmit += nxmit; 2131 + stats->es.ps.xdp_xmit_err += num_frame - nxmit; 2140 2132 u64_stats_update_end(&stats->syncp); 2141 2133 2142 2134 return nxmit; ··· 2162 2152 int err; 2163 2153 2164 2154 err = xdp_do_redirect(pp->dev, xdp, prog); 2165 - if (err) { 2155 + if (unlikely(err)) { 2166 2156 ret = MVNETA_XDP_DROPPED; 2167 2157 page_pool_put_page(rxq->page_pool, 2168 2158 virt_to_head_page(xdp->data), len, ··· 4528 4518 u64 skb_alloc_error; 4529 4519 u64 refill_error; 4530 4520 u64 xdp_redirect; 4521 + u64 xdp_xmit_err; 4522 + u64 xdp_tx_err; 4531 4523 u64 xdp_pass; 4532 4524 u64 xdp_drop; 4533 4525 u64 xdp_xmit; ··· 4544 4532 xdp_pass = stats->es.ps.xdp_pass; 4545 4533 xdp_drop = stats->es.ps.xdp_drop; 4546 4534 xdp_xmit = stats->es.ps.xdp_xmit; 4535 + xdp_xmit_err = stats->es.ps.xdp_xmit_err; 4547 4536 xdp_tx = stats->es.ps.xdp_tx; 4537 + xdp_tx_err = stats->es.ps.xdp_tx_err; 4548 4538 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 4549 4539 4550 4540 es->skb_alloc_error += skb_alloc_error; ··· 4555 4541 es->ps.xdp_pass += xdp_pass; 4556 4542 es->ps.xdp_drop += xdp_drop; 4557 4543 es->ps.xdp_xmit += xdp_xmit; 4544 + es->ps.xdp_xmit_err += xdp_xmit_err; 4558 4545 es->ps.xdp_tx += xdp_tx; 4546 + es->ps.xdp_tx_err += xdp_tx_err; 4559 4547 } 4560 4548 } 4561 4549 ··· 4610 4594 case ETHTOOL_XDP_TX: 4611 4595 pp->ethtool_stats[i] = stats.ps.xdp_tx; 4612 4596 break; 4597 + case ETHTOOL_XDP_TX_ERR: 4598 + pp->ethtool_stats[i] = stats.ps.xdp_tx_err; 4599 + break; 4613 4600 case ETHTOOL_XDP_XMIT: 4614 4601 pp->ethtool_stats[i] = stats.ps.xdp_xmit; 4602 + break; 4603 + case ETHTOOL_XDP_XMIT_ERR: 4604 + pp->ethtool_stats[i] = stats.ps.xdp_xmit_err; 4615 4605 break; 4616 4606 } 4617 4607 break;