Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: mvneta: align xdp stats naming scheme to mlx5 driver

Introduce "rx" prefix in the name scheme for xdp counters
on rx path.
Differentiate between XDP_TX and ndo_xdp_xmit counters

Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Lorenzo Bianconi and committed by
David S. Miller
7d51a015 c3d5e561

+36 -16
+36 -16
drivers/net/ethernet/marvell/mvneta.c
··· 344 344 ETHTOOL_XDP_REDIRECT, 345 345 ETHTOOL_XDP_PASS, 346 346 ETHTOOL_XDP_DROP, 347 + ETHTOOL_XDP_XMIT, 347 348 ETHTOOL_XDP_TX, 348 349 ETHTOOL_MAX_STATS, 349 350 }; ··· 400 399 { ETHTOOL_STAT_EEE_WAKEUP, T_SW, "eee_wakeup_errors", }, 401 400 { ETHTOOL_STAT_SKB_ALLOC_ERR, T_SW, "skb_alloc_errors", }, 402 401 { ETHTOOL_STAT_REFILL_ERR, T_SW, "refill_errors", }, 403 - { ETHTOOL_XDP_REDIRECT, T_SW, "xdp_redirect", }, 404 - { ETHTOOL_XDP_PASS, T_SW, "xdp_pass", }, 405 - { ETHTOOL_XDP_DROP, T_SW, "xdp_drop", }, 406 - { ETHTOOL_XDP_TX, T_SW, "xdp_tx", }, 402 + { ETHTOOL_XDP_REDIRECT, T_SW, "rx_xdp_redirect", }, 403 + { ETHTOOL_XDP_PASS, T_SW, "rx_xdp_pass", }, 404 + { ETHTOOL_XDP_DROP, T_SW, "rx_xdp_drop", }, 405 + { ETHTOOL_XDP_TX, T_SW, "rx_xdp_tx", }, 406 + { ETHTOOL_XDP_XMIT, T_SW, "tx_xdp_xmit", }, 407 407 }; 408 408 409 409 struct mvneta_stats { ··· 416 414 u64 xdp_redirect; 417 415 u64 xdp_pass; 418 416 u64 xdp_drop; 417 + u64 xdp_xmit; 419 418 u64 xdp_tx; 420 419 }; 421 420 ··· 2015 2012 mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq, 2016 2013 struct xdp_frame *xdpf, bool dma_map) 2017 2014 { 2018 - struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 2019 2015 struct mvneta_tx_desc *tx_desc; 2020 2016 struct mvneta_tx_buf *buf; 2021 2017 dma_addr_t dma_addr; ··· 2049 2047 tx_desc->buf_phys_addr = dma_addr; 2050 2048 tx_desc->data_size = xdpf->len; 2051 2049 2052 - u64_stats_update_begin(&stats->syncp); 2053 - stats->es.ps.tx_bytes += xdpf->len; 2054 - stats->es.ps.tx_packets++; 2055 - stats->es.ps.xdp_tx++; 2056 - u64_stats_update_end(&stats->syncp); 2057 - 2058 2050 mvneta_txq_inc_put(txq); 2059 2051 txq->pending++; 2060 2052 txq->count++; ··· 2075 2079 2076 2080 __netif_tx_lock(nq, cpu); 2077 2081 ret = mvneta_xdp_submit_frame(pp, txq, xdpf, false); 2078 - if (ret == MVNETA_XDP_TX) 2082 + if (ret == MVNETA_XDP_TX) { 2083 + struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 2084 + 2085 + u64_stats_update_begin(&stats->syncp); 2086 + stats->es.ps.tx_bytes += xdpf->len; 2087 + stats->es.ps.tx_packets++; 2088 + stats->es.ps.xdp_tx++; 2089 + u64_stats_update_end(&stats->syncp); 2090 + 2079 2091 mvneta_txq_pend_desc_add(pp, txq, 0); 2092 + } 2080 2093 __netif_tx_unlock(nq); 2081 2094 2082 2095 return ret; ··· 2096 2091 struct xdp_frame **frames, u32 flags) 2097 2092 { 2098 2093 struct mvneta_port *pp = netdev_priv(dev); 2094 + struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 2095 + int i, nxmit_byte = 0, nxmit = num_frame; 2099 2096 int cpu = smp_processor_id(); 2100 2097 struct mvneta_tx_queue *txq; 2101 2098 struct netdev_queue *nq; 2102 - int i, drops = 0; 2103 2099 u32 ret; 2104 2100 2105 2101 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) ··· 2112 2106 __netif_tx_lock(nq, cpu); 2113 2107 for (i = 0; i < num_frame; i++) { 2114 2108 ret = mvneta_xdp_submit_frame(pp, txq, frames[i], true); 2115 - if (ret != MVNETA_XDP_TX) { 2109 + if (ret == MVNETA_XDP_TX) { 2110 + nxmit_byte += frames[i]->len; 2111 + } else { 2116 2112 xdp_return_frame_rx_napi(frames[i]); 2117 - drops++; 2113 + nxmit--; 2118 2114 } 2119 2115 } 2120 2116 ··· 2124 2116 mvneta_txq_pend_desc_add(pp, txq, 0); 2125 2117 __netif_tx_unlock(nq); 2126 2118 2127 - return num_frame - drops; 2119 + u64_stats_update_begin(&stats->syncp); 2120 + stats->es.ps.tx_bytes += nxmit_byte; 2121 + stats->es.ps.tx_packets += nxmit; 2122 + stats->es.ps.xdp_xmit += nxmit; 2123 + u64_stats_update_end(&stats->syncp); 2124 + 2125 + return nxmit; 2128 2126 } 2129 2127 2130 2128 static int ··· 4498 4484 u64 xdp_redirect; 4499 4485 u64 xdp_pass; 4500 4486 u64 xdp_drop; 4487 + u64 xdp_xmit; 4501 4488 u64 xdp_tx; 4502 4489 4503 4490 stats = per_cpu_ptr(pp->stats, cpu); ··· 4509 4494 xdp_redirect = stats->es.ps.xdp_redirect; 4510 4495 xdp_pass = stats->es.ps.xdp_pass; 4511 4496 xdp_drop = stats->es.ps.xdp_drop; 4497 + xdp_xmit = stats->es.ps.xdp_xmit; 4512 4498 xdp_tx = stats->es.ps.xdp_tx; 4513 4499 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 4514 4500 ··· 4518 4502 es->ps.xdp_redirect += xdp_redirect; 4519 4503 es->ps.xdp_pass += xdp_pass; 4520 4504 es->ps.xdp_drop += xdp_drop; 4505 + es->ps.xdp_xmit += xdp_xmit; 4521 4506 es->ps.xdp_tx += xdp_tx; 4522 4507 } 4523 4508 } ··· 4571 4554 break; 4572 4555 case ETHTOOL_XDP_TX: 4573 4556 pp->ethtool_stats[i] = stats.ps.xdp_tx; 4557 + break; 4558 + case ETHTOOL_XDP_XMIT: 4559 + pp->ethtool_stats[i] = stats.ps.xdp_xmit; 4574 4560 break; 4575 4561 } 4576 4562 break;