Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'net-mana-refactor-gf-stats-handling-and-add-rx_missed_errors-counter'

Erni Sri Satya Vennela says:

====================
net: mana: Refactor GF stats handling and add rx_missed_errors counter

Restructure mana_query_gf_stats() to operate on the per-VF mana_context,
instead of per-port statistics. Introduce mana_ethtool_hc_stats to
isolate hardware counter statistics and update the
"ethtool -S <interface>" output to expose all relevant counters while
preserving backward compatibility.

Add support for the standard rx_missed_errors counter by mapping it
to the hardware's hc_rx_discards_no_wqe metric. Refresh statistics
every 2 seconds.
====================

Link: https://patch.msgid.link/1763120599-6331-1-git-send-email-ernis@linux.microsoft.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+148 -98
+66 -35
drivers/net/ethernet/microsoft/mana/mana_en.c
··· 534 534 535 535 netdev_stats_to_stats64(st, &ndev->stats); 536 536 537 + if (apc->ac->hwc_timeout_occurred) 538 + netdev_warn_once(ndev, "HWC timeout occurred\n"); 539 + 540 + st->rx_missed_errors = apc->ac->hc_stats.hc_rx_discards_no_wqe; 541 + 537 542 for (q = 0; q < num_queues; q++) { 538 543 rx_stats = &apc->rxqs[q]->stats; 539 544 ··· 2814 2809 return 0; 2815 2810 } 2816 2811 2817 - void mana_query_gf_stats(struct mana_port_context *apc) 2812 + int mana_query_gf_stats(struct mana_context *ac) 2818 2813 { 2814 + struct gdma_context *gc = ac->gdma_dev->gdma_context; 2819 2815 struct mana_query_gf_stat_resp resp = {}; 2820 2816 struct mana_query_gf_stat_req req = {}; 2821 - struct net_device *ndev = apc->ndev; 2817 + struct device *dev = gc->dev; 2822 2818 int err; 2823 2819 2824 2820 mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_GF_STAT, ··· 2853 2847 STATISTICS_FLAGS_HC_TX_BCAST_BYTES | 2854 2848 STATISTICS_FLAGS_TX_ERRORS_GDMA_ERROR; 2855 2849 2856 - err = mana_send_request(apc->ac, &req, sizeof(req), &resp, 2850 + err = mana_send_request(ac, &req, sizeof(req), &resp, 2857 2851 sizeof(resp)); 2858 2852 if (err) { 2859 - netdev_err(ndev, "Failed to query GF stats: %d\n", err); 2860 - return; 2853 + dev_err(dev, "Failed to query GF stats: %d\n", err); 2854 + return err; 2861 2855 } 2862 2856 err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_GF_STAT, 2863 2857 sizeof(resp)); 2864 2858 if (err || resp.hdr.status) { 2865 - netdev_err(ndev, "Failed to query GF stats: %d, 0x%x\n", err, 2866 - resp.hdr.status); 2867 - return; 2859 + dev_err(dev, "Failed to query GF stats: %d, 0x%x\n", err, 2860 + resp.hdr.status); 2861 + return err; 2868 2862 } 2869 2863 2870 - apc->eth_stats.hc_rx_discards_no_wqe = resp.rx_discards_nowqe; 2871 - apc->eth_stats.hc_rx_err_vport_disabled = resp.rx_err_vport_disabled; 2872 - apc->eth_stats.hc_rx_bytes = resp.hc_rx_bytes; 2873 - apc->eth_stats.hc_rx_ucast_pkts = resp.hc_rx_ucast_pkts; 2874 - apc->eth_stats.hc_rx_ucast_bytes = resp.hc_rx_ucast_bytes; 2875 - apc->eth_stats.hc_rx_bcast_pkts = resp.hc_rx_bcast_pkts; 2876 - apc->eth_stats.hc_rx_bcast_bytes = resp.hc_rx_bcast_bytes; 2877 - apc->eth_stats.hc_rx_mcast_pkts = resp.hc_rx_mcast_pkts; 2878 - apc->eth_stats.hc_rx_mcast_bytes = resp.hc_rx_mcast_bytes; 2879 - apc->eth_stats.hc_tx_err_gf_disabled = resp.tx_err_gf_disabled; 2880 - apc->eth_stats.hc_tx_err_vport_disabled = resp.tx_err_vport_disabled; 2881 - apc->eth_stats.hc_tx_err_inval_vportoffset_pkt = 2864 + ac->hc_stats.hc_rx_discards_no_wqe = resp.rx_discards_nowqe; 2865 + ac->hc_stats.hc_rx_err_vport_disabled = resp.rx_err_vport_disabled; 2866 + ac->hc_stats.hc_rx_bytes = resp.hc_rx_bytes; 2867 + ac->hc_stats.hc_rx_ucast_pkts = resp.hc_rx_ucast_pkts; 2868 + ac->hc_stats.hc_rx_ucast_bytes = resp.hc_rx_ucast_bytes; 2869 + ac->hc_stats.hc_rx_bcast_pkts = resp.hc_rx_bcast_pkts; 2870 + ac->hc_stats.hc_rx_bcast_bytes = resp.hc_rx_bcast_bytes; 2871 + ac->hc_stats.hc_rx_mcast_pkts = resp.hc_rx_mcast_pkts; 2872 + ac->hc_stats.hc_rx_mcast_bytes = resp.hc_rx_mcast_bytes; 2873 + ac->hc_stats.hc_tx_err_gf_disabled = resp.tx_err_gf_disabled; 2874 + ac->hc_stats.hc_tx_err_vport_disabled = resp.tx_err_vport_disabled; 2875 + ac->hc_stats.hc_tx_err_inval_vportoffset_pkt = 2882 2876 resp.tx_err_inval_vport_offset_pkt; 2883 - apc->eth_stats.hc_tx_err_vlan_enforcement = 2877 + ac->hc_stats.hc_tx_err_vlan_enforcement = 2884 2878 resp.tx_err_vlan_enforcement; 2885 - apc->eth_stats.hc_tx_err_eth_type_enforcement = 2879 + ac->hc_stats.hc_tx_err_eth_type_enforcement = 2886 2880 resp.tx_err_ethtype_enforcement; 2887 - apc->eth_stats.hc_tx_err_sa_enforcement = resp.tx_err_SA_enforcement; 2888 - apc->eth_stats.hc_tx_err_sqpdid_enforcement = 2881 + ac->hc_stats.hc_tx_err_sa_enforcement = resp.tx_err_SA_enforcement; 2882 + ac->hc_stats.hc_tx_err_sqpdid_enforcement = 2889 2883 resp.tx_err_SQPDID_enforcement; 2890 - apc->eth_stats.hc_tx_err_cqpdid_enforcement = 2884 + ac->hc_stats.hc_tx_err_cqpdid_enforcement = 2891 2885 resp.tx_err_CQPDID_enforcement; 2892 - apc->eth_stats.hc_tx_err_mtu_violation = resp.tx_err_mtu_violation; 2893 - apc->eth_stats.hc_tx_err_inval_oob = resp.tx_err_inval_oob; 2894 - apc->eth_stats.hc_tx_bytes = resp.hc_tx_bytes; 2895 - apc->eth_stats.hc_tx_ucast_pkts = resp.hc_tx_ucast_pkts; 2896 - apc->eth_stats.hc_tx_ucast_bytes = resp.hc_tx_ucast_bytes; 2897 - apc->eth_stats.hc_tx_bcast_pkts = resp.hc_tx_bcast_pkts; 2898 - apc->eth_stats.hc_tx_bcast_bytes = resp.hc_tx_bcast_bytes; 2899 - apc->eth_stats.hc_tx_mcast_pkts = resp.hc_tx_mcast_pkts; 2900 - apc->eth_stats.hc_tx_mcast_bytes = resp.hc_tx_mcast_bytes; 2901 - apc->eth_stats.hc_tx_err_gdma = resp.tx_err_gdma; 2886 + ac->hc_stats.hc_tx_err_mtu_violation = resp.tx_err_mtu_violation; 2887 + ac->hc_stats.hc_tx_err_inval_oob = resp.tx_err_inval_oob; 2888 + ac->hc_stats.hc_tx_bytes = resp.hc_tx_bytes; 2889 + ac->hc_stats.hc_tx_ucast_pkts = resp.hc_tx_ucast_pkts; 2890 + ac->hc_stats.hc_tx_ucast_bytes = resp.hc_tx_ucast_bytes; 2891 + ac->hc_stats.hc_tx_bcast_pkts = resp.hc_tx_bcast_pkts; 2892 + ac->hc_stats.hc_tx_bcast_bytes = resp.hc_tx_bcast_bytes; 2893 + ac->hc_stats.hc_tx_mcast_pkts = resp.hc_tx_mcast_pkts; 2894 + ac->hc_stats.hc_tx_mcast_bytes = resp.hc_tx_mcast_bytes; 2895 + ac->hc_stats.hc_tx_err_gdma = resp.tx_err_gdma; 2896 + 2897 + return 0; 2902 2898 } 2903 2899 2904 2900 void mana_query_phy_stats(struct mana_port_context *apc) ··· 3435 3427 return 0; 3436 3428 } 3437 3429 3430 + #define MANA_GF_STATS_PERIOD (2 * HZ) 3431 + 3432 + static void mana_gf_stats_work_handler(struct work_struct *work) 3433 + { 3434 + struct mana_context *ac = 3435 + container_of(to_delayed_work(work), struct mana_context, gf_stats_work); 3436 + int err; 3437 + 3438 + err = mana_query_gf_stats(ac); 3439 + if (err == -ETIMEDOUT) { 3440 + /* HWC timeout detected - reset stats and stop rescheduling */ 3441 + ac->hwc_timeout_occurred = true; 3442 + memset(&ac->hc_stats, 0, sizeof(ac->hc_stats)); 3443 + return; 3444 + } 3445 + schedule_delayed_work(&ac->gf_stats_work, MANA_GF_STATS_PERIOD); 3446 + } 3447 + 3438 3448 int mana_probe(struct gdma_dev *gd, bool resuming) 3439 3449 { 3440 3450 struct gdma_context *gc = gd->gdma_context; ··· 3545 3519 } 3546 3520 3547 3521 err = add_adev(gd, "eth"); 3522 + 3523 + INIT_DELAYED_WORK(&ac->gf_stats_work, mana_gf_stats_work_handler); 3524 + schedule_delayed_work(&ac->gf_stats_work, MANA_GF_STATS_PERIOD); 3525 + 3548 3526 out: 3549 3527 if (err) { 3550 3528 mana_remove(gd, false); ··· 3573 3543 int i; 3574 3544 3575 3545 disable_work_sync(&ac->link_change_work); 3546 + cancel_delayed_work_sync(&ac->gf_stats_work); 3576 3547 3577 3548 /* adev currently doesn't support suspending, always remove it */ 3578 3549 if (gd->adev)
+64 -57
drivers/net/ethernet/microsoft/mana/mana_ethtool.c
··· 15 15 static const struct mana_stats_desc mana_eth_stats[] = { 16 16 {"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)}, 17 17 {"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)}, 18 - {"hc_rx_discards_no_wqe", offsetof(struct mana_ethtool_stats, 19 - hc_rx_discards_no_wqe)}, 20 - {"hc_rx_err_vport_disabled", offsetof(struct mana_ethtool_stats, 21 - hc_rx_err_vport_disabled)}, 22 - {"hc_rx_bytes", offsetof(struct mana_ethtool_stats, hc_rx_bytes)}, 23 - {"hc_rx_ucast_pkts", offsetof(struct mana_ethtool_stats, 24 - hc_rx_ucast_pkts)}, 25 - {"hc_rx_ucast_bytes", offsetof(struct mana_ethtool_stats, 26 - hc_rx_ucast_bytes)}, 27 - {"hc_rx_bcast_pkts", offsetof(struct mana_ethtool_stats, 28 - hc_rx_bcast_pkts)}, 29 - {"hc_rx_bcast_bytes", offsetof(struct mana_ethtool_stats, 30 - hc_rx_bcast_bytes)}, 31 - {"hc_rx_mcast_pkts", offsetof(struct mana_ethtool_stats, 32 - hc_rx_mcast_pkts)}, 33 - {"hc_rx_mcast_bytes", offsetof(struct mana_ethtool_stats, 34 - hc_rx_mcast_bytes)}, 35 - {"hc_tx_err_gf_disabled", offsetof(struct mana_ethtool_stats, 36 - hc_tx_err_gf_disabled)}, 37 - {"hc_tx_err_vport_disabled", offsetof(struct mana_ethtool_stats, 38 - hc_tx_err_vport_disabled)}, 39 - {"hc_tx_err_inval_vportoffset_pkt", 40 - offsetof(struct mana_ethtool_stats, 41 - hc_tx_err_inval_vportoffset_pkt)}, 42 - {"hc_tx_err_vlan_enforcement", offsetof(struct mana_ethtool_stats, 43 - hc_tx_err_vlan_enforcement)}, 44 - {"hc_tx_err_eth_type_enforcement", 45 - offsetof(struct mana_ethtool_stats, hc_tx_err_eth_type_enforcement)}, 46 - {"hc_tx_err_sa_enforcement", offsetof(struct mana_ethtool_stats, 47 - hc_tx_err_sa_enforcement)}, 48 - {"hc_tx_err_sqpdid_enforcement", 49 - offsetof(struct mana_ethtool_stats, hc_tx_err_sqpdid_enforcement)}, 50 - {"hc_tx_err_cqpdid_enforcement", 51 - offsetof(struct mana_ethtool_stats, hc_tx_err_cqpdid_enforcement)}, 52 - {"hc_tx_err_mtu_violation", offsetof(struct mana_ethtool_stats, 53 - hc_tx_err_mtu_violation)}, 54 - {"hc_tx_err_inval_oob", offsetof(struct mana_ethtool_stats, 55 - hc_tx_err_inval_oob)}, 56 - {"hc_tx_err_gdma", offsetof(struct mana_ethtool_stats, 57 - hc_tx_err_gdma)}, 58 - {"hc_tx_bytes", offsetof(struct mana_ethtool_stats, hc_tx_bytes)}, 59 - {"hc_tx_ucast_pkts", offsetof(struct mana_ethtool_stats, 60 - hc_tx_ucast_pkts)}, 61 - {"hc_tx_ucast_bytes", offsetof(struct mana_ethtool_stats, 62 - hc_tx_ucast_bytes)}, 63 - {"hc_tx_bcast_pkts", offsetof(struct mana_ethtool_stats, 64 - hc_tx_bcast_pkts)}, 65 - {"hc_tx_bcast_bytes", offsetof(struct mana_ethtool_stats, 66 - hc_tx_bcast_bytes)}, 67 - {"hc_tx_mcast_pkts", offsetof(struct mana_ethtool_stats, 68 - hc_tx_mcast_pkts)}, 69 - {"hc_tx_mcast_bytes", offsetof(struct mana_ethtool_stats, 70 - hc_tx_mcast_bytes)}, 71 18 {"tx_cq_err", offsetof(struct mana_ethtool_stats, tx_cqe_err)}, 72 19 {"tx_cqe_unknown_type", offsetof(struct mana_ethtool_stats, 73 20 tx_cqe_unknown_type)}, ··· 22 75 rx_coalesced_err)}, 23 76 {"rx_cqe_unknown_type", offsetof(struct mana_ethtool_stats, 24 77 rx_cqe_unknown_type)}, 78 + }; 79 + 80 + static const struct mana_stats_desc mana_hc_stats[] = { 81 + {"hc_rx_discards_no_wqe", offsetof(struct mana_ethtool_hc_stats, 82 + hc_rx_discards_no_wqe)}, 83 + {"hc_rx_err_vport_disabled", offsetof(struct mana_ethtool_hc_stats, 84 + hc_rx_err_vport_disabled)}, 85 + {"hc_rx_bytes", offsetof(struct mana_ethtool_hc_stats, hc_rx_bytes)}, 86 + {"hc_rx_ucast_pkts", offsetof(struct mana_ethtool_hc_stats, 87 + hc_rx_ucast_pkts)}, 88 + {"hc_rx_ucast_bytes", offsetof(struct mana_ethtool_hc_stats, 89 + hc_rx_ucast_bytes)}, 90 + {"hc_rx_bcast_pkts", offsetof(struct mana_ethtool_hc_stats, 91 + hc_rx_bcast_pkts)}, 92 + {"hc_rx_bcast_bytes", offsetof(struct mana_ethtool_hc_stats, 93 + hc_rx_bcast_bytes)}, 94 + {"hc_rx_mcast_pkts", offsetof(struct mana_ethtool_hc_stats, 95 + hc_rx_mcast_pkts)}, 96 + {"hc_rx_mcast_bytes", offsetof(struct mana_ethtool_hc_stats, 97 + hc_rx_mcast_bytes)}, 98 + {"hc_tx_err_gf_disabled", offsetof(struct mana_ethtool_hc_stats, 99 + hc_tx_err_gf_disabled)}, 100 + {"hc_tx_err_vport_disabled", offsetof(struct mana_ethtool_hc_stats, 101 + hc_tx_err_vport_disabled)}, 102 + {"hc_tx_err_inval_vportoffset_pkt", 103 + offsetof(struct mana_ethtool_hc_stats, 104 + hc_tx_err_inval_vportoffset_pkt)}, 105 + {"hc_tx_err_vlan_enforcement", offsetof(struct mana_ethtool_hc_stats, 106 + hc_tx_err_vlan_enforcement)}, 107 + {"hc_tx_err_eth_type_enforcement", 108 + offsetof(struct mana_ethtool_hc_stats, hc_tx_err_eth_type_enforcement)}, 109 + {"hc_tx_err_sa_enforcement", offsetof(struct mana_ethtool_hc_stats, 110 + hc_tx_err_sa_enforcement)}, 111 + {"hc_tx_err_sqpdid_enforcement", 112 + offsetof(struct mana_ethtool_hc_stats, hc_tx_err_sqpdid_enforcement)}, 113 + {"hc_tx_err_cqpdid_enforcement", 114 + offsetof(struct mana_ethtool_hc_stats, hc_tx_err_cqpdid_enforcement)}, 115 + {"hc_tx_err_mtu_violation", offsetof(struct mana_ethtool_hc_stats, 116 + hc_tx_err_mtu_violation)}, 117 + {"hc_tx_err_inval_oob", offsetof(struct mana_ethtool_hc_stats, 118 + hc_tx_err_inval_oob)}, 119 + {"hc_tx_err_gdma", offsetof(struct mana_ethtool_hc_stats, 120 + hc_tx_err_gdma)}, 121 + {"hc_tx_bytes", offsetof(struct mana_ethtool_hc_stats, hc_tx_bytes)}, 122 + {"hc_tx_ucast_pkts", offsetof(struct mana_ethtool_hc_stats, 123 + hc_tx_ucast_pkts)}, 124 + {"hc_tx_ucast_bytes", offsetof(struct mana_ethtool_hc_stats, 125 + hc_tx_ucast_bytes)}, 126 + {"hc_tx_bcast_pkts", offsetof(struct mana_ethtool_hc_stats, 127 + hc_tx_bcast_pkts)}, 128 + {"hc_tx_bcast_bytes", offsetof(struct mana_ethtool_hc_stats, 129 + hc_tx_bcast_bytes)}, 130 + {"hc_tx_mcast_pkts", offsetof(struct mana_ethtool_hc_stats, 131 + hc_tx_mcast_pkts)}, 132 + {"hc_tx_mcast_bytes", offsetof(struct mana_ethtool_hc_stats, 133 + hc_tx_mcast_bytes)}, 25 134 }; 26 135 27 136 static const struct mana_stats_desc mana_phy_stats[] = { ··· 141 138 if (stringset != ETH_SS_STATS) 142 139 return -EINVAL; 143 140 144 - return ARRAY_SIZE(mana_eth_stats) + ARRAY_SIZE(mana_phy_stats) + 141 + return ARRAY_SIZE(mana_eth_stats) + ARRAY_SIZE(mana_phy_stats) + ARRAY_SIZE(mana_hc_stats) + 145 142 num_queues * (MANA_STATS_RX_COUNT + MANA_STATS_TX_COUNT); 146 143 } 147 144 ··· 153 150 154 151 if (stringset != ETH_SS_STATS) 155 152 return; 156 - 157 153 for (i = 0; i < ARRAY_SIZE(mana_eth_stats); i++) 158 154 ethtool_puts(&data, mana_eth_stats[i].name); 155 + 156 + for (i = 0; i < ARRAY_SIZE(mana_hc_stats); i++) 157 + ethtool_puts(&data, mana_hc_stats[i].name); 159 158 160 159 for (i = 0; i < ARRAY_SIZE(mana_phy_stats); i++) 161 160 ethtool_puts(&data, mana_phy_stats[i].name); ··· 191 186 struct mana_port_context *apc = netdev_priv(ndev); 192 187 unsigned int num_queues = apc->num_queues; 193 188 void *eth_stats = &apc->eth_stats; 189 + void *hc_stats = &apc->ac->hc_stats; 194 190 void *phy_stats = &apc->phy_stats; 195 191 struct mana_stats_rx *rx_stats; 196 192 struct mana_stats_tx *tx_stats; ··· 213 207 214 208 if (!apc->port_is_up) 215 209 return; 216 - /* we call mana function to update stats from GDMA */ 217 - mana_query_gf_stats(apc); 218 210 219 211 /* We call this mana function to get the phy stats from GDMA and includes 220 212 * aggregate tx/rx drop counters, Per-TC(Traffic Channel) tx/rx and pause ··· 222 218 223 219 for (q = 0; q < ARRAY_SIZE(mana_eth_stats); q++) 224 220 data[i++] = *(u64 *)(eth_stats + mana_eth_stats[q].offset); 221 + 222 + for (q = 0; q < ARRAY_SIZE(mana_hc_stats); q++) 223 + data[i++] = *(u64 *)(hc_stats + mana_hc_stats[q].offset); 225 224 226 225 for (q = 0; q < ARRAY_SIZE(mana_phy_stats); q++) 227 226 data[i++] = *(u64 *)(phy_stats + mana_phy_stats[q].offset);
+5 -1
include/net/mana/gdma.h
··· 592 592 #define GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE BIT(17) 593 593 #define GDMA_DRV_CAP_FLAG_1_HW_VPORT_LINK_AWARE BIT(6) 594 594 595 + /* Driver can send HWC periodically to query stats */ 596 + #define GDMA_DRV_CAP_FLAG_1_PERIODIC_STATS_QUERY BIT(21) 597 + 595 598 #define GDMA_DRV_CAP_FLAGS1 \ 596 599 (GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \ 597 600 GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX | \ ··· 604 601 GDMA_DRV_CAP_FLAG_1_DYNAMIC_IRQ_ALLOC_SUPPORT | \ 605 602 GDMA_DRV_CAP_FLAG_1_SELF_RESET_ON_EQE | \ 606 603 GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE | \ 607 - GDMA_DRV_CAP_FLAG_1_HW_VPORT_LINK_AWARE) 604 + GDMA_DRV_CAP_FLAG_1_HW_VPORT_LINK_AWARE | \ 605 + GDMA_DRV_CAP_FLAG_1_PERIODIC_STATS_QUERY) 608 606 609 607 #define GDMA_DRV_CAP_FLAGS2 0 610 608
+13 -5
include/net/mana/mana.h
··· 375 375 struct mana_ethtool_stats { 376 376 u64 stop_queue; 377 377 u64 wake_queue; 378 + u64 tx_cqe_err; 379 + u64 tx_cqe_unknown_type; 380 + u64 rx_coalesced_err; 381 + u64 rx_cqe_unknown_type; 382 + }; 383 + 384 + struct mana_ethtool_hc_stats { 378 385 u64 hc_rx_discards_no_wqe; 379 386 u64 hc_rx_err_vport_disabled; 380 387 u64 hc_rx_bytes; ··· 409 402 u64 hc_tx_mcast_pkts; 410 403 u64 hc_tx_mcast_bytes; 411 404 u64 hc_tx_err_gdma; 412 - u64 tx_cqe_err; 413 - u64 tx_cqe_unknown_type; 414 - u64 rx_coalesced_err; 415 - u64 rx_cqe_unknown_type; 416 405 }; 417 406 418 407 struct mana_ethtool_phy_stats { ··· 476 473 u16 num_ports; 477 474 u8 bm_hostmode; 478 475 476 + struct mana_ethtool_hc_stats hc_stats; 479 477 struct mana_eq *eqs; 480 478 struct dentry *mana_eqs_debugfs; 479 + 480 + /* Workqueue for querying hardware stats */ 481 + struct delayed_work gf_stats_work; 482 + bool hwc_timeout_occurred; 481 483 482 484 struct net_device *ports[MAX_PORTS_IN_MANA_DEV]; 483 485 ··· 585 577 struct bpf_prog *mana_xdp_get(struct mana_port_context *apc); 586 578 void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog); 587 579 int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf); 588 - void mana_query_gf_stats(struct mana_port_context *apc); 580 + int mana_query_gf_stats(struct mana_context *ac); 589 581 int mana_query_link_cfg(struct mana_port_context *apc); 590 582 int mana_set_bw_clamp(struct mana_port_context *apc, u32 speed, 591 583 int enable_clamping);