Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'net-bcmgenet-64bit-stats-and-expose-more-stats-in-ethtool'

Zak Kemble says:

====================
net: bcmgenet: 64bit stats and expose more stats in ethtool

Hi, this patchset updates the bcmgenet driver with new 64bit statistics via
ndo_get_stats64 and rtnl_link_stats64, now reports hardware discarded
packets in the rx_missed_errors stat and exposes more stats in ethtool.

v1: https://lore.kernel.org/20250513144107.1989-1-zakkemble@gmail.com
v2: https://lore.kernel.org/20250515145142.1415-1-zakkemble@gmail.com
====================

Link: https://patch.msgid.link/20250519113257.1031-1-zakkemble@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+220 -87
+194 -81
drivers/net/ethernet/broadcom/genet/bcmgenet.c
··· 969 969 970 970 /* standard ethtool support functions. */ 971 971 enum bcmgenet_stat_type { 972 - BCMGENET_STAT_NETDEV = -1, 972 + BCMGENET_STAT_RTNL = -1, 973 973 BCMGENET_STAT_MIB_RX, 974 974 BCMGENET_STAT_MIB_TX, 975 975 BCMGENET_STAT_RUNT, 976 976 BCMGENET_STAT_MISC, 977 977 BCMGENET_STAT_SOFT, 978 + BCMGENET_STAT_SOFT64, 978 979 }; 979 980 980 981 struct bcmgenet_stats { ··· 985 984 enum bcmgenet_stat_type type; 986 985 /* reg offset from UMAC base for misc counters */ 987 986 u16 reg_offset; 987 + /* sync for u64 stats counters */ 988 + int syncp_offset; 988 989 }; 989 990 990 - #define STAT_NETDEV(m) { \ 991 + #define STAT_RTNL(m) { \ 991 992 .stat_string = __stringify(m), \ 992 - .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \ 993 - .stat_offset = offsetof(struct net_device_stats, m), \ 994 - .type = BCMGENET_STAT_NETDEV, \ 993 + .stat_sizeof = sizeof(((struct rtnl_link_stats64 *)0)->m), \ 994 + .stat_offset = offsetof(struct rtnl_link_stats64, m), \ 995 + .type = BCMGENET_STAT_RTNL, \ 995 996 } 996 997 997 998 #define STAT_GENET_MIB(str, m, _type) { \ ··· 1001 998 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \ 1002 999 .stat_offset = offsetof(struct bcmgenet_priv, m), \ 1003 1000 .type = _type, \ 1001 + } 1002 + 1003 + #define STAT_GENET_SOFT_MIB64(str, s, m) { \ 1004 + .stat_string = str, \ 1005 + .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->s.m), \ 1006 + .stat_offset = offsetof(struct bcmgenet_priv, s.m), \ 1007 + .type = BCMGENET_STAT_SOFT64, \ 1008 + .syncp_offset = offsetof(struct bcmgenet_priv, s.syncp), \ 1004 1009 } 1005 1010 1006 1011 #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX) ··· 1025 1014 } 1026 1015 1027 1016 #define STAT_GENET_Q(num) \ 1028 - STAT_GENET_SOFT_MIB("txq" __stringify(num) "_packets", \ 1029 - tx_rings[num].packets), \ 1030 - STAT_GENET_SOFT_MIB("txq" __stringify(num) "_bytes", \ 1031 - tx_rings[num].bytes), \ 1032 - STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_bytes", \ 1033 - rx_rings[num].bytes), \ 1034 - STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_packets", \ 1035 - rx_rings[num].packets), \ 1036 - STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_errors", \ 1037 - rx_rings[num].errors), \ 1038 - STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_dropped", \ 1039 - rx_rings[num].dropped) 1017 + STAT_GENET_SOFT_MIB64("txq" __stringify(num) "_packets", \ 1018 + tx_rings[num].stats64, packets), \ 1019 + STAT_GENET_SOFT_MIB64("txq" __stringify(num) "_bytes", \ 1020 + tx_rings[num].stats64, bytes), \ 1021 + STAT_GENET_SOFT_MIB64("txq" __stringify(num) "_errors", \ 1022 + tx_rings[num].stats64, errors), \ 1023 + STAT_GENET_SOFT_MIB64("txq" __stringify(num) "_dropped", \ 1024 + tx_rings[num].stats64, dropped), \ 1025 + STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_bytes", \ 1026 + rx_rings[num].stats64, bytes), \ 1027 + STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_packets", \ 1028 + rx_rings[num].stats64, packets), \ 1029 + STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_errors", \ 1030 + rx_rings[num].stats64, errors), \ 1031 + STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_dropped", \ 1032 + rx_rings[num].stats64, dropped), \ 1033 + STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_multicast", \ 1034 + rx_rings[num].stats64, multicast), \ 1035 + STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_missed", \ 1036 + rx_rings[num].stats64, missed), \ 1037 + STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_length_errors", \ 1038 + rx_rings[num].stats64, length_errors), \ 1039 + STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_over_errors", \ 1040 + rx_rings[num].stats64, over_errors), \ 1041 + STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_crc_errors", \ 1042 + rx_rings[num].stats64, crc_errors), \ 1043 + STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_frame_errors", \ 1044 + rx_rings[num].stats64, frame_errors), \ 1045 + STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_fragmented_errors", \ 1046 + rx_rings[num].stats64, fragmented_errors), \ 1047 + STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_broadcast", \ 1048 + rx_rings[num].stats64, broadcast) 1040 1049 1041 1050 /* There is a 0xC gap between the end of RX and beginning of TX stats and then 1042 1051 * between the end of TX stats and the beginning of the RX RUNT ··· 1068 1037 */ 1069 1038 static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = { 1070 1039 /* general stats */ 1071 - STAT_NETDEV(rx_packets), 1072 - STAT_NETDEV(tx_packets), 1073 - STAT_NETDEV(rx_bytes), 1074 - STAT_NETDEV(tx_bytes), 1075 - STAT_NETDEV(rx_errors), 1076 - STAT_NETDEV(tx_errors), 1077 - STAT_NETDEV(rx_dropped), 1078 - STAT_NETDEV(tx_dropped), 1079 - STAT_NETDEV(multicast), 1040 + STAT_RTNL(rx_packets), 1041 + STAT_RTNL(tx_packets), 1042 + STAT_RTNL(rx_bytes), 1043 + STAT_RTNL(tx_bytes), 1044 + STAT_RTNL(rx_errors), 1045 + STAT_RTNL(tx_errors), 1046 + STAT_RTNL(rx_dropped), 1047 + STAT_RTNL(tx_dropped), 1048 + STAT_RTNL(multicast), 1049 + STAT_RTNL(rx_missed_errors), 1050 + STAT_RTNL(rx_length_errors), 1051 + STAT_RTNL(rx_over_errors), 1052 + STAT_RTNL(rx_crc_errors), 1053 + STAT_RTNL(rx_frame_errors), 1080 1054 /* UniMAC RSV counters */ 1081 1055 STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64), 1082 1056 STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127), ··· 1169 1133 1170 1134 #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats) 1171 1135 1136 + #define BCMGENET_STATS64_ADD(stats, m, v) \ 1137 + do { \ 1138 + u64_stats_update_begin(&stats->syncp); \ 1139 + u64_stats_add(&stats->m, v); \ 1140 + u64_stats_update_end(&stats->syncp); \ 1141 + } while (0) 1142 + 1143 + #define BCMGENET_STATS64_INC(stats, m) \ 1144 + do { \ 1145 + u64_stats_update_begin(&stats->syncp); \ 1146 + u64_stats_inc(&stats->m); \ 1147 + u64_stats_update_end(&stats->syncp); \ 1148 + } while (0) 1149 + 1172 1150 static void bcmgenet_get_drvinfo(struct net_device *dev, 1173 1151 struct ethtool_drvinfo *info) 1174 1152 { ··· 1266 1216 1267 1217 s = &bcmgenet_gstrings_stats[i]; 1268 1218 switch (s->type) { 1269 - case BCMGENET_STAT_NETDEV: 1219 + case BCMGENET_STAT_RTNL: 1270 1220 case BCMGENET_STAT_SOFT: 1221 + case BCMGENET_STAT_SOFT64: 1271 1222 continue; 1272 1223 case BCMGENET_STAT_RUNT: 1273 1224 offset += BCMGENET_STAT_OFFSET; ··· 1306 1255 u64 *data) 1307 1256 { 1308 1257 struct bcmgenet_priv *priv = netdev_priv(dev); 1258 + struct rtnl_link_stats64 stats64; 1259 + struct u64_stats_sync *syncp; 1260 + unsigned int start; 1309 1261 int i; 1310 1262 1311 1263 if (netif_running(dev)) 1312 1264 bcmgenet_update_mib_counters(priv); 1313 1265 1314 - dev->netdev_ops->ndo_get_stats(dev); 1266 + dev_get_stats(dev, &stats64); 1315 1267 1316 1268 for (i = 0; i < BCMGENET_STATS_LEN; i++) { 1317 1269 const struct bcmgenet_stats *s; 1318 1270 char *p; 1319 1271 1320 1272 s = &bcmgenet_gstrings_stats[i]; 1321 - if (s->type == BCMGENET_STAT_NETDEV) 1322 - p = (char *)&dev->stats; 1323 - else 1324 - p = (char *)priv; 1325 - p += s->stat_offset; 1326 - if (sizeof(unsigned long) != sizeof(u32) && 1327 - s->stat_sizeof == sizeof(unsigned long)) 1328 - data[i] = *(unsigned long *)p; 1329 - else 1330 - data[i] = *(u32 *)p; 1273 + p = (char *)priv; 1274 + 1275 + if (s->type == BCMGENET_STAT_SOFT64) { 1276 + syncp = (struct u64_stats_sync *)(p + s->syncp_offset); 1277 + do { 1278 + start = u64_stats_fetch_begin(syncp); 1279 + data[i] = u64_stats_read((u64_stats_t *)(p + s->stat_offset)); 1280 + } while (u64_stats_fetch_retry(syncp, start)); 1281 + } else { 1282 + if (s->type == BCMGENET_STAT_RTNL) 1283 + p = (char *)&stats64; 1284 + 1285 + p += s->stat_offset; 1286 + if (sizeof(unsigned long) != sizeof(u32) && 1287 + s->stat_sizeof == sizeof(unsigned long)) 1288 + data[i] = *(unsigned long *)p; 1289 + else 1290 + data[i] = *(u32 *)p; 1291 + } 1331 1292 } 1332 1293 } 1333 1294 ··· 1919 1856 static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, 1920 1857 struct bcmgenet_tx_ring *ring) 1921 1858 { 1859 + struct bcmgenet_tx_stats64 *stats = &ring->stats64; 1922 1860 struct bcmgenet_priv *priv = netdev_priv(dev); 1923 1861 unsigned int txbds_processed = 0; 1924 1862 unsigned int bytes_compl = 0; ··· 1960 1896 ring->free_bds += txbds_processed; 1961 1897 ring->c_index = c_index; 1962 1898 1963 - ring->packets += pkts_compl; 1964 - ring->bytes += bytes_compl; 1899 + u64_stats_update_begin(&stats->syncp); 1900 + u64_stats_add(&stats->packets, pkts_compl); 1901 + u64_stats_add(&stats->bytes, bytes_compl); 1902 + u64_stats_update_end(&stats->syncp); 1965 1903 1966 1904 netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->index), 1967 1905 pkts_compl, bytes_compl); ··· 2049 1983 * the transmit checksum offsets in the descriptors 2050 1984 */ 2051 1985 static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev, 2052 - struct sk_buff *skb) 1986 + struct sk_buff *skb, 1987 + struct bcmgenet_tx_ring *ring) 2053 1988 { 1989 + struct bcmgenet_tx_stats64 *stats = &ring->stats64; 2054 1990 struct bcmgenet_priv *priv = netdev_priv(dev); 2055 1991 struct status_64 *status = NULL; 2056 1992 struct sk_buff *new_skb; ··· 2069 2001 if (!new_skb) { 2070 2002 dev_kfree_skb_any(skb); 2071 2003 priv->mib.tx_realloc_tsb_failed++; 2072 - dev->stats.tx_dropped++; 2004 + BCMGENET_STATS64_INC(stats, dropped); 2073 2005 return NULL; 2074 2006 } 2075 2007 dev_consume_skb_any(skb); ··· 2157 2089 GENET_CB(skb)->bytes_sent = skb->len; 2158 2090 2159 2091 /* add the Transmit Status Block */ 2160 - skb = bcmgenet_add_tsb(dev, skb); 2092 + skb = bcmgenet_add_tsb(dev, skb, ring); 2161 2093 if (!skb) { 2162 2094 ret = NETDEV_TX_OK; 2163 2095 goto out; ··· 2299 2231 static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, 2300 2232 unsigned int budget) 2301 2233 { 2234 + struct bcmgenet_rx_stats64 *stats = &ring->stats64; 2302 2235 struct bcmgenet_priv *priv = ring->priv; 2303 2236 struct net_device *dev = priv->dev; 2304 2237 struct enet_cb *cb; ··· 2322 2253 DMA_P_INDEX_DISCARD_CNT_MASK; 2323 2254 if (discards > ring->old_discards) { 2324 2255 discards = discards - ring->old_discards; 2325 - ring->errors += discards; 2256 + BCMGENET_STATS64_ADD(stats, missed, discards); 2326 2257 ring->old_discards += discards; 2327 2258 2328 2259 /* Clear HW register when we reach 75% of maximum 0xFFFF */ ··· 2348 2279 skb = bcmgenet_rx_refill(priv, cb); 2349 2280 2350 2281 if (unlikely(!skb)) { 2351 - ring->dropped++; 2282 + BCMGENET_STATS64_INC(stats, dropped); 2352 2283 goto next; 2353 2284 } 2354 2285 ··· 2375 2306 2376 2307 if (unlikely(len > RX_BUF_LENGTH)) { 2377 2308 netif_err(priv, rx_status, dev, "oversized packet\n"); 2378 - dev->stats.rx_length_errors++; 2379 - dev->stats.rx_errors++; 2309 + BCMGENET_STATS64_INC(stats, length_errors); 2380 2310 dev_kfree_skb_any(skb); 2381 2311 goto next; 2382 2312 } ··· 2383 2315 if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { 2384 2316 netif_err(priv, rx_status, dev, 2385 2317 "dropping fragmented packet!\n"); 2386 - ring->errors++; 2318 + BCMGENET_STATS64_INC(stats, fragmented_errors); 2387 2319 dev_kfree_skb_any(skb); 2388 2320 goto next; 2389 2321 } ··· 2396 2328 DMA_RX_RXER))) { 2397 2329 netif_err(priv, rx_status, dev, "dma_flag=0x%x\n", 2398 2330 (unsigned int)dma_flag); 2331 + u64_stats_update_begin(&stats->syncp); 2399 2332 if (dma_flag & DMA_RX_CRC_ERROR) 2400 - dev->stats.rx_crc_errors++; 2333 + u64_stats_inc(&stats->crc_errors); 2401 2334 if (dma_flag & DMA_RX_OV) 2402 - dev->stats.rx_over_errors++; 2335 + u64_stats_inc(&stats->over_errors); 2403 2336 if (dma_flag & DMA_RX_NO) 2404 - dev->stats.rx_frame_errors++; 2337 + u64_stats_inc(&stats->frame_errors); 2405 2338 if (dma_flag & DMA_RX_LG) 2406 - dev->stats.rx_length_errors++; 2407 - dev->stats.rx_errors++; 2339 + u64_stats_inc(&stats->length_errors); 2340 + if ((dma_flag & (DMA_RX_CRC_ERROR | 2341 + DMA_RX_OV | 2342 + DMA_RX_NO | 2343 + DMA_RX_LG | 2344 + DMA_RX_RXER)) == DMA_RX_RXER) 2345 + u64_stats_inc(&stats->errors); 2346 + u64_stats_update_end(&stats->syncp); 2408 2347 dev_kfree_skb_any(skb); 2409 2348 goto next; 2410 2349 } /* error packet */ ··· 2431 2356 2432 2357 /*Finish setting up the received SKB and send it to the kernel*/ 2433 2358 skb->protocol = eth_type_trans(skb, priv->dev); 2434 - ring->packets++; 2435 - ring->bytes += len; 2359 + 2360 + u64_stats_update_begin(&stats->syncp); 2361 + u64_stats_inc(&stats->packets); 2362 + u64_stats_add(&stats->bytes, len); 2436 2363 if (dma_flag & DMA_RX_MULT) 2437 - dev->stats.multicast++; 2364 + u64_stats_inc(&stats->multicast); 2365 + else if (dma_flag & DMA_RX_BRDCAST) 2366 + u64_stats_inc(&stats->broadcast); 2367 + u64_stats_update_end(&stats->syncp); 2438 2368 2439 2369 /* Notify kernel */ 2440 2370 napi_gro_receive(&ring->napi, skb); ··· 3500 3420 3501 3421 netif_trans_update(dev); 3502 3422 3503 - dev->stats.tx_errors++; 3423 + BCMGENET_STATS64_INC((&priv->tx_rings[txqueue].stats64), errors); 3504 3424 3505 3425 netif_tx_wake_all_queues(dev); 3506 3426 } ··· 3589 3509 return 0; 3590 3510 } 3591 3511 3592 - static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev) 3512 + static void bcmgenet_get_stats64(struct net_device *dev, 3513 + struct rtnl_link_stats64 *stats) 3593 3514 { 3594 3515 struct bcmgenet_priv *priv = netdev_priv(dev); 3595 - unsigned long tx_bytes = 0, tx_packets = 0; 3596 - unsigned long rx_bytes = 0, rx_packets = 0; 3597 - unsigned long rx_errors = 0, rx_dropped = 0; 3598 - struct bcmgenet_tx_ring *tx_ring; 3599 - struct bcmgenet_rx_ring *rx_ring; 3516 + struct bcmgenet_tx_stats64 *tx_stats; 3517 + struct bcmgenet_rx_stats64 *rx_stats; 3518 + u64 rx_length_errors, rx_over_errors; 3519 + u64 rx_missed, rx_fragmented_errors; 3520 + u64 rx_crc_errors, rx_frame_errors; 3521 + u64 tx_errors, tx_dropped; 3522 + u64 rx_errors, rx_dropped; 3523 + u64 tx_bytes, tx_packets; 3524 + u64 rx_bytes, rx_packets; 3525 + unsigned int start; 3600 3526 unsigned int q; 3527 + u64 multicast; 3601 3528 3602 3529 for (q = 0; q <= priv->hw_params->tx_queues; q++) { 3603 - tx_ring = &priv->tx_rings[q]; 3604 - tx_bytes += tx_ring->bytes; 3605 - tx_packets += tx_ring->packets; 3530 + tx_stats = &priv->tx_rings[q].stats64; 3531 + do { 3532 + start = u64_stats_fetch_begin(&tx_stats->syncp); 3533 + tx_bytes = u64_stats_read(&tx_stats->bytes); 3534 + tx_packets = u64_stats_read(&tx_stats->packets); 3535 + tx_errors = u64_stats_read(&tx_stats->errors); 3536 + tx_dropped = u64_stats_read(&tx_stats->dropped); 3537 + } while (u64_stats_fetch_retry(&tx_stats->syncp, start)); 3538 + 3539 + stats->tx_bytes += tx_bytes; 3540 + stats->tx_packets += tx_packets; 3541 + stats->tx_errors += tx_errors; 3542 + stats->tx_dropped += tx_dropped; 3606 3543 } 3607 3544 3608 3545 for (q = 0; q <= priv->hw_params->rx_queues; q++) { 3609 - rx_ring = &priv->rx_rings[q]; 3546 + rx_stats = &priv->rx_rings[q].stats64; 3547 + do { 3548 + start = u64_stats_fetch_begin(&rx_stats->syncp); 3549 + rx_bytes = u64_stats_read(&rx_stats->bytes); 3550 + rx_packets = u64_stats_read(&rx_stats->packets); 3551 + rx_errors = u64_stats_read(&rx_stats->errors); 3552 + rx_dropped = u64_stats_read(&rx_stats->dropped); 3553 + rx_missed = u64_stats_read(&rx_stats->missed); 3554 + rx_length_errors = u64_stats_read(&rx_stats->length_errors); 3555 + rx_over_errors = u64_stats_read(&rx_stats->over_errors); 3556 + rx_crc_errors = u64_stats_read(&rx_stats->crc_errors); 3557 + rx_frame_errors = u64_stats_read(&rx_stats->frame_errors); 3558 + rx_fragmented_errors = u64_stats_read(&rx_stats->fragmented_errors); 3559 + multicast = u64_stats_read(&rx_stats->multicast); 3560 + } while (u64_stats_fetch_retry(&rx_stats->syncp, start)); 3610 3561 3611 - rx_bytes += rx_ring->bytes; 3612 - rx_packets += rx_ring->packets; 3613 - rx_errors += rx_ring->errors; 3614 - rx_dropped += rx_ring->dropped; 3562 + rx_errors += rx_length_errors; 3563 + rx_errors += rx_crc_errors; 3564 + rx_errors += rx_frame_errors; 3565 + rx_errors += rx_fragmented_errors; 3566 + 3567 + stats->rx_bytes += rx_bytes; 3568 + stats->rx_packets += rx_packets; 3569 + stats->rx_errors += rx_errors; 3570 + stats->rx_dropped += rx_dropped; 3571 + stats->rx_missed_errors += rx_missed; 3572 + stats->rx_length_errors += rx_length_errors; 3573 + stats->rx_over_errors += rx_over_errors; 3574 + stats->rx_crc_errors += rx_crc_errors; 3575 + stats->rx_frame_errors += rx_frame_errors; 3576 + stats->multicast += multicast; 3615 3577 } 3616 - 3617 - dev->stats.tx_bytes = tx_bytes; 3618 - dev->stats.tx_packets = tx_packets; 3619 - dev->stats.rx_bytes = rx_bytes; 3620 - dev->stats.rx_packets = rx_packets; 3621 - dev->stats.rx_errors = rx_errors; 3622 - dev->stats.rx_missed_errors = rx_errors; 3623 - dev->stats.rx_dropped = rx_dropped; 3624 - return &dev->stats; 3625 3578 } 3626 3579 3627 3580 static int bcmgenet_change_carrier(struct net_device *dev, bool new_carrier) ··· 3682 3569 .ndo_set_mac_address = bcmgenet_set_mac_addr, 3683 3570 .ndo_eth_ioctl = phy_do_ioctl_running, 3684 3571 .ndo_set_features = bcmgenet_set_features, 3685 - .ndo_get_stats = bcmgenet_get_stats, 3572 + .ndo_get_stats64 = bcmgenet_get_stats64, 3686 3573 .ndo_change_carrier = bcmgenet_change_carrier, 3687 3574 }; 3688 3575
+26 -6
drivers/net/ethernet/broadcom/genet/bcmgenet.h
··· 155 155 u32 tx_realloc_tsb_failed; 156 156 }; 157 157 158 + struct bcmgenet_tx_stats64 { 159 + struct u64_stats_sync syncp; 160 + u64_stats_t packets; 161 + u64_stats_t bytes; 162 + u64_stats_t errors; 163 + u64_stats_t dropped; 164 + }; 165 + 166 + struct bcmgenet_rx_stats64 { 167 + struct u64_stats_sync syncp; 168 + u64_stats_t bytes; 169 + u64_stats_t packets; 170 + u64_stats_t errors; 171 + u64_stats_t dropped; 172 + u64_stats_t multicast; 173 + u64_stats_t broadcast; 174 + u64_stats_t missed; 175 + u64_stats_t length_errors; 176 + u64_stats_t over_errors; 177 + u64_stats_t crc_errors; 178 + u64_stats_t frame_errors; 179 + u64_stats_t fragmented_errors; 180 + }; 181 + 158 182 #define UMAC_MIB_START 0x400 159 183 160 184 #define UMAC_MDIO_CMD 0x614 ··· 539 515 struct bcmgenet_tx_ring { 540 516 spinlock_t lock; /* ring lock */ 541 517 struct napi_struct napi; /* NAPI per tx queue */ 542 - unsigned long packets; 543 - unsigned long bytes; 518 + struct bcmgenet_tx_stats64 stats64; 544 519 unsigned int index; /* ring index */ 545 520 struct enet_cb *cbs; /* tx ring buffer control block*/ 546 521 unsigned int size; /* size of each tx ring */ ··· 563 540 564 541 struct bcmgenet_rx_ring { 565 542 struct napi_struct napi; /* Rx NAPI struct */ 566 - unsigned long bytes; 567 - unsigned long packets; 568 - unsigned long errors; 569 - unsigned long dropped; 543 + struct bcmgenet_rx_stats64 stats64; 570 544 unsigned int index; /* Rx ring index */ 571 545 struct enet_cb *cbs; /* Rx ring buffer control block */ 572 546 unsigned int size; /* Rx ring size */