Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'bnxt_en-update'

Michael Chan says:

====================
bnxt_en update.

This patchset removes the PCIe histogram and other debug register
data from ethtool -S. The removed data are not counters and they have
very large and constantly fluctuating values that are not suitable for
the ethtool -S decimal counter display.

The rest of the patches implement counter rollover for all hardware
counters that are not 64-bit counters. Different generations of
hardware have different counter widths. The driver will now query
the counter widths of all counters from firmware and implement
rollover support on all non-64-bit counters.

The last patch adds the PCIe histogram and other PCIe register data back
using the ethtool -d interface.

v2: Fix bnxt_re RDMA driver compile issue.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+864 -346
+1 -1
drivers/infiniband/hw/bnxt_re/hw_counters.c
··· 132 132 stats->value[BNXT_RE_RECOVERABLE_ERRORS] = 133 133 le64_to_cpu(bnxt_re_stats->tx_bcast_pkts); 134 134 stats->value[BNXT_RE_RX_DROPS] = 135 - le64_to_cpu(bnxt_re_stats->rx_drop_pkts); 135 + le64_to_cpu(bnxt_re_stats->rx_error_pkts); 136 136 stats->value[BNXT_RE_RX_DISCARDS] = 137 137 le64_to_cpu(bnxt_re_stats->rx_discard_pkts); 138 138 stats->value[BNXT_RE_RX_PKTS] =
+363 -137
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 3711 3711 return 0; 3712 3712 } 3713 3713 3714 + static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats) 3715 + { 3716 + kfree(stats->hw_masks); 3717 + stats->hw_masks = NULL; 3718 + kfree(stats->sw_stats); 3719 + stats->sw_stats = NULL; 3720 + if (stats->hw_stats) { 3721 + dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats, 3722 + stats->hw_stats_map); 3723 + stats->hw_stats = NULL; 3724 + } 3725 + } 3726 + 3727 + static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats, 3728 + bool alloc_masks) 3729 + { 3730 + stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len, 3731 + &stats->hw_stats_map, GFP_KERNEL); 3732 + if (!stats->hw_stats) 3733 + return -ENOMEM; 3734 + 3735 + memset(stats->hw_stats, 0, stats->len); 3736 + 3737 + stats->sw_stats = kzalloc(stats->len, GFP_KERNEL); 3738 + if (!stats->sw_stats) 3739 + goto stats_mem_err; 3740 + 3741 + if (alloc_masks) { 3742 + stats->hw_masks = kzalloc(stats->len, GFP_KERNEL); 3743 + if (!stats->hw_masks) 3744 + goto stats_mem_err; 3745 + } 3746 + return 0; 3747 + 3748 + stats_mem_err: 3749 + bnxt_free_stats_mem(bp, stats); 3750 + return -ENOMEM; 3751 + } 3752 + 3753 + static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count) 3754 + { 3755 + int i; 3756 + 3757 + for (i = 0; i < count; i++) 3758 + mask_arr[i] = mask; 3759 + } 3760 + 3761 + static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count) 3762 + { 3763 + int i; 3764 + 3765 + for (i = 0; i < count; i++) 3766 + mask_arr[i] = le64_to_cpu(hw_mask_arr[i]); 3767 + } 3768 + 3769 + static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp, 3770 + struct bnxt_stats_mem *stats) 3771 + { 3772 + struct hwrm_func_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr; 3773 + struct hwrm_func_qstats_ext_input req = {0}; 3774 + __le64 *hw_masks; 3775 + int rc; 3776 + 3777 + if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) || 3778 + !(bp->flags & BNXT_FLAG_CHIP_P5)) 3779 + return -EOPNOTSUPP; 3780 + 3781 + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QSTATS_EXT, -1, -1); 3782 + req.flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; 3783 + mutex_lock(&bp->hwrm_cmd_lock); 3784 + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3785 + if (rc) 3786 + goto qstat_exit; 3787 + 3788 + hw_masks = &resp->rx_ucast_pkts; 3789 + bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8); 3790 + 3791 + qstat_exit: 3792 + mutex_unlock(&bp->hwrm_cmd_lock); 3793 + return rc; 3794 + } 3795 + 3796 + static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags); 3797 + static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags); 3798 + 3799 + static void bnxt_init_stats(struct bnxt *bp) 3800 + { 3801 + struct bnxt_napi *bnapi = bp->bnapi[0]; 3802 + struct bnxt_cp_ring_info *cpr; 3803 + struct bnxt_stats_mem *stats; 3804 + __le64 *rx_stats, *tx_stats; 3805 + int rc, rx_count, tx_count; 3806 + u64 *rx_masks, *tx_masks; 3807 + u64 mask; 3808 + u8 flags; 3809 + 3810 + cpr = &bnapi->cp_ring; 3811 + stats = &cpr->stats; 3812 + rc = bnxt_hwrm_func_qstat_ext(bp, stats); 3813 + if (rc) { 3814 + if (bp->flags & BNXT_FLAG_CHIP_P5) 3815 + mask = (1ULL << 48) - 1; 3816 + else 3817 + mask = -1ULL; 3818 + bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8); 3819 + } 3820 + if (bp->flags & BNXT_FLAG_PORT_STATS) { 3821 + stats = &bp->port_stats; 3822 + rx_stats = stats->hw_stats; 3823 + rx_masks = stats->hw_masks; 3824 + rx_count = sizeof(struct rx_port_stats) / 8; 3825 + tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 3826 + tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 3827 + tx_count = sizeof(struct tx_port_stats) / 8; 3828 + 3829 + flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK; 3830 + rc = bnxt_hwrm_port_qstats(bp, flags); 3831 + if (rc) { 3832 + mask = (1ULL << 40) - 1; 3833 + 3834 + bnxt_fill_masks(rx_masks, mask, rx_count); 3835 + bnxt_fill_masks(tx_masks, mask, tx_count); 3836 + } else { 3837 + bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count); 3838 + bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count); 3839 + bnxt_hwrm_port_qstats(bp, 0); 3840 + } 3841 + } 3842 + if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 3843 + stats = &bp->rx_port_stats_ext; 3844 + rx_stats = stats->hw_stats; 3845 + rx_masks = stats->hw_masks; 3846 + rx_count = sizeof(struct rx_port_stats_ext) / 8; 3847 + stats = &bp->tx_port_stats_ext; 3848 + tx_stats = stats->hw_stats; 3849 + tx_masks = stats->hw_masks; 3850 + tx_count = sizeof(struct tx_port_stats_ext) / 8; 3851 + 3852 + flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; 3853 + rc = bnxt_hwrm_port_qstats_ext(bp, flags); 3854 + if (rc) { 3855 + mask = (1ULL << 40) - 1; 3856 + 3857 + bnxt_fill_masks(rx_masks, mask, rx_count); 3858 + if (tx_stats) 3859 + bnxt_fill_masks(tx_masks, mask, tx_count); 3860 + } else { 3861 + bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count); 3862 + if (tx_stats) 3863 + bnxt_copy_hw_masks(tx_masks, tx_stats, 3864 + tx_count); 3865 + bnxt_hwrm_port_qstats_ext(bp, 0); 3866 + } 3867 + } 3868 + } 3869 + 3714 3870 static void bnxt_free_port_stats(struct bnxt *bp) 3715 3871 { 3716 - struct pci_dev *pdev = bp->pdev; 3717 - 3718 3872 bp->flags &= ~BNXT_FLAG_PORT_STATS; 3719 3873 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT; 3720 3874 3721 - if (bp->hw_rx_port_stats) { 3722 - dma_free_coherent(&pdev->dev, bp->hw_port_stats_size, 3723 - bp->hw_rx_port_stats, 3724 - bp->hw_rx_port_stats_map); 3725 - bp->hw_rx_port_stats = NULL; 3726 - } 3727 - 3728 - if (bp->hw_tx_port_stats_ext) { 3729 - dma_free_coherent(&pdev->dev, sizeof(struct tx_port_stats_ext), 3730 - bp->hw_tx_port_stats_ext, 3731 - bp->hw_tx_port_stats_ext_map); 3732 - bp->hw_tx_port_stats_ext = NULL; 3733 - } 3734 - 3735 - if (bp->hw_rx_port_stats_ext) { 3736 - dma_free_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext), 3737 - bp->hw_rx_port_stats_ext, 3738 - bp->hw_rx_port_stats_ext_map); 3739 - bp->hw_rx_port_stats_ext = NULL; 3740 - } 3741 - 3742 - if (bp->hw_pcie_stats) { 3743 - dma_free_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats), 3744 - bp->hw_pcie_stats, bp->hw_pcie_stats_map); 3745 - bp->hw_pcie_stats = NULL; 3746 - } 3875 + bnxt_free_stats_mem(bp, &bp->port_stats); 3876 + bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext); 3877 + bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext); 3747 3878 } 3748 3879 3749 3880 static void bnxt_free_ring_stats(struct bnxt *bp) 3750 3881 { 3751 - struct pci_dev *pdev = bp->pdev; 3752 - int size, i; 3882 + int i; 3753 3883 3754 3884 if (!bp->bnapi) 3755 3885 return; 3756 - 3757 - size = bp->hw_ring_stats_size; 3758 3886 3759 3887 for (i = 0; i < bp->cp_nr_rings; i++) { 3760 3888 struct bnxt_napi *bnapi = bp->bnapi[i]; 3761 3889 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3762 3890 3763 - if (cpr->hw_stats) { 3764 - dma_free_coherent(&pdev->dev, size, cpr->hw_stats, 3765 - cpr->hw_stats_map); 3766 - cpr->hw_stats = NULL; 3767 - } 3891 + bnxt_free_stats_mem(bp, &cpr->stats); 3768 3892 } 3769 3893 } 3770 3894 3771 3895 static int bnxt_alloc_stats(struct bnxt *bp) 3772 3896 { 3773 3897 u32 size, i; 3774 - struct pci_dev *pdev = bp->pdev; 3898 + int rc; 3775 3899 3776 3900 size = bp->hw_ring_stats_size; 3777 3901 ··· 3903 3779 struct bnxt_napi *bnapi = bp->bnapi[i]; 3904 3780 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3905 3781 3906 - cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size, 3907 - &cpr->hw_stats_map, 3908 - GFP_KERNEL); 3909 - if (!cpr->hw_stats) 3910 - return -ENOMEM; 3782 + cpr->stats.len = size; 3783 + rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i); 3784 + if (rc) 3785 + return rc; 3911 3786 3912 3787 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; 3913 3788 } ··· 3914 3791 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700) 3915 3792 return 0; 3916 3793 3917 - if (bp->hw_rx_port_stats) 3794 + if (bp->port_stats.hw_stats) 3918 3795 goto alloc_ext_stats; 3919 3796 3920 - bp->hw_port_stats_size = sizeof(struct rx_port_stats) + 3921 - sizeof(struct tx_port_stats) + 1024; 3797 + bp->port_stats.len = BNXT_PORT_STATS_SIZE; 3798 + rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true); 3799 + if (rc) 3800 + return rc; 3922 3801 3923 - bp->hw_rx_port_stats = 3924 - dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size, 3925 - &bp->hw_rx_port_stats_map, 3926 - GFP_KERNEL); 3927 - if (!bp->hw_rx_port_stats) 3928 - return -ENOMEM; 3929 - 3930 - bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) + 512; 3931 - bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map + 3932 - sizeof(struct rx_port_stats) + 512; 3933 3802 bp->flags |= BNXT_FLAG_PORT_STATS; 3934 3803 3935 3804 alloc_ext_stats: ··· 3930 3815 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) 3931 3816 return 0; 3932 3817 3933 - if (bp->hw_rx_port_stats_ext) 3818 + if (bp->rx_port_stats_ext.hw_stats) 3934 3819 goto alloc_tx_ext_stats; 3935 3820 3936 - bp->hw_rx_port_stats_ext = 3937 - dma_alloc_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext), 3938 - &bp->hw_rx_port_stats_ext_map, GFP_KERNEL); 3939 - if (!bp->hw_rx_port_stats_ext) 3821 + bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext); 3822 + rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true); 3823 + /* Extended stats are optional */ 3824 + if (rc) 3940 3825 return 0; 3941 3826 3942 3827 alloc_tx_ext_stats: 3943 - if (bp->hw_tx_port_stats_ext) 3944 - goto alloc_pcie_stats; 3828 + if (bp->tx_port_stats_ext.hw_stats) 3829 + return 0; 3945 3830 3946 3831 if (bp->hwrm_spec_code >= 0x10902 || 3947 3832 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) { 3948 - bp->hw_tx_port_stats_ext = 3949 - dma_alloc_coherent(&pdev->dev, 3950 - sizeof(struct tx_port_stats_ext), 3951 - &bp->hw_tx_port_stats_ext_map, 3952 - GFP_KERNEL); 3833 + bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext); 3834 + rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true); 3835 + /* Extended stats are optional */ 3836 + if (rc) 3837 + return 0; 3953 3838 } 3954 3839 bp->flags |= BNXT_FLAG_PORT_STATS_EXT; 3955 - 3956 - alloc_pcie_stats: 3957 - if (bp->hw_pcie_stats || 3958 - !(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)) 3959 - return 0; 3960 - 3961 - bp->hw_pcie_stats = 3962 - dma_alloc_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats), 3963 - &bp->hw_pcie_stats_map, GFP_KERNEL); 3964 - if (!bp->hw_pcie_stats) 3965 - return 0; 3966 - 3967 - bp->flags |= BNXT_FLAG_PCIE_STATS; 3968 3840 return 0; 3969 3841 } 3970 3842 ··· 4051 3949 bnxt_free_ntp_fltrs(bp, irq_re_init); 4052 3950 if (irq_re_init) { 4053 3951 bnxt_free_ring_stats(bp); 3952 + if (!(bp->fw_cap & BNXT_FW_CAP_PORT_STATS_NO_RESET)) 3953 + bnxt_free_port_stats(bp); 4054 3954 bnxt_free_ring_grps(bp); 4055 3955 bnxt_free_vnics(bp); 4056 3956 kfree(bp->tx_ring_map); ··· 4156 4052 rc = bnxt_alloc_stats(bp); 4157 4053 if (rc) 4158 4054 goto alloc_mem_err; 4055 + bnxt_init_stats(bp); 4159 4056 4160 4057 rc = bnxt_alloc_ntp_fltrs(bp); 4161 4058 if (rc) ··· 6563 6458 struct bnxt_napi *bnapi = bp->bnapi[i]; 6564 6459 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 6565 6460 6566 - req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map); 6461 + req.stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map); 6567 6462 6568 6463 rc = _hwrm_send_message(bp, &req, sizeof(req), 6569 6464 HWRM_CMD_TIMEOUT); ··· 7594 7489 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7595 7490 } 7596 7491 7597 - static int bnxt_hwrm_port_qstats(struct bnxt *bp) 7492 + static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask) 7493 + { 7494 + u64 sw_tmp; 7495 + 7496 + sw_tmp = (*sw & ~mask) | hw; 7497 + if (hw < (*sw & mask)) 7498 + sw_tmp += mask + 1; 7499 + WRITE_ONCE(*sw, sw_tmp); 7500 + } 7501 + 7502 + static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks, 7503 + int count, bool ignore_zero) 7504 + { 7505 + int i; 7506 + 7507 + for (i = 0; i < count; i++) { 7508 + u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i])); 7509 + 7510 + if (ignore_zero && !hw) 7511 + continue; 7512 + 7513 + if (masks[i] == -1ULL) 7514 + sw_stats[i] = hw; 7515 + else 7516 + bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]); 7517 + } 7518 + } 7519 + 7520 + static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats) 7521 + { 7522 + if (!stats->hw_stats) 7523 + return; 7524 + 7525 + __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats, 7526 + stats->hw_masks, stats->len / 8, false); 7527 + } 7528 + 7529 + static void bnxt_accumulate_all_stats(struct bnxt *bp) 7530 + { 7531 + struct bnxt_stats_mem *ring0_stats; 7532 + bool ignore_zero = false; 7533 + int i; 7534 + 7535 + /* Chip bug. Counter intermittently becomes 0. */ 7536 + if (bp->flags & BNXT_FLAG_CHIP_P5) 7537 + ignore_zero = true; 7538 + 7539 + for (i = 0; i < bp->cp_nr_rings; i++) { 7540 + struct bnxt_napi *bnapi = bp->bnapi[i]; 7541 + struct bnxt_cp_ring_info *cpr; 7542 + struct bnxt_stats_mem *stats; 7543 + 7544 + cpr = &bnapi->cp_ring; 7545 + stats = &cpr->stats; 7546 + if (!i) 7547 + ring0_stats = stats; 7548 + __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats, 7549 + ring0_stats->hw_masks, 7550 + ring0_stats->len / 8, ignore_zero); 7551 + } 7552 + if (bp->flags & BNXT_FLAG_PORT_STATS) { 7553 + struct bnxt_stats_mem *stats = &bp->port_stats; 7554 + __le64 *hw_stats = stats->hw_stats; 7555 + u64 *sw_stats = stats->sw_stats; 7556 + u64 *masks = stats->hw_masks; 7557 + int cnt; 7558 + 7559 + cnt = sizeof(struct rx_port_stats) / 8; 7560 + __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false); 7561 + 7562 + hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 7563 + sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 7564 + masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 7565 + cnt = sizeof(struct tx_port_stats) / 8; 7566 + __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false); 7567 + } 7568 + if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 7569 + bnxt_accumulate_stats(&bp->rx_port_stats_ext); 7570 + bnxt_accumulate_stats(&bp->tx_port_stats_ext); 7571 + } 7572 + } 7573 + 7574 + static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags) 7598 7575 { 7599 7576 struct bnxt_pf_info *pf = &bp->pf; 7600 7577 struct hwrm_port_qstats_input req = {0}; ··· 7684 7497 if (!(bp->flags & BNXT_FLAG_PORT_STATS)) 7685 7498 return 0; 7686 7499 7500 + if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) 7501 + return -EOPNOTSUPP; 7502 + 7503 + req.flags = flags; 7687 7504 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1); 7688 7505 req.port_id = cpu_to_le16(pf->port_id); 7689 - req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map); 7690 - req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map); 7506 + req.tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map + 7507 + BNXT_TX_PORT_STATS_BYTE_OFFSET); 7508 + req.rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map); 7691 7509 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7692 7510 } 7693 7511 7694 - static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp) 7512 + static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags) 7695 7513 { 7696 7514 struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr; 7697 7515 struct hwrm_queue_pri2cos_qcfg_input req2 = {0}; ··· 7708 7516 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) 7709 7517 return 0; 7710 7518 7519 + if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) 7520 + return -EOPNOTSUPP; 7521 + 7711 7522 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1); 7523 + req.flags = flags; 7712 7524 req.port_id = cpu_to_le16(pf->port_id); 7713 7525 req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext)); 7714 - req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map); 7715 - tx_stat_size = bp->hw_tx_port_stats_ext ? 7716 - sizeof(*bp->hw_tx_port_stats_ext) : 0; 7526 + req.rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map); 7527 + tx_stat_size = bp->tx_port_stats_ext.hw_stats ? 7528 + sizeof(struct tx_port_stats_ext) : 0; 7717 7529 req.tx_stat_size = cpu_to_le16(tx_stat_size); 7718 - req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map); 7530 + req.tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map); 7719 7531 mutex_lock(&bp->hwrm_cmd_lock); 7720 7532 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7721 7533 if (!rc) { ··· 7730 7534 bp->fw_rx_stats_ext_size = 0; 7731 7535 bp->fw_tx_stats_ext_size = 0; 7732 7536 } 7537 + if (flags) 7538 + goto qstats_done; 7539 + 7733 7540 if (bp->fw_tx_stats_ext_size <= 7734 7541 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) { 7735 7542 mutex_unlock(&bp->hwrm_cmd_lock); ··· 7771 7572 qstats_done: 7772 7573 mutex_unlock(&bp->hwrm_cmd_lock); 7773 7574 return rc; 7774 - } 7775 - 7776 - static int bnxt_hwrm_pcie_qstats(struct bnxt *bp) 7777 - { 7778 - struct hwrm_pcie_qstats_input req = {0}; 7779 - 7780 - if (!(bp->flags & BNXT_FLAG_PCIE_STATS)) 7781 - return 0; 7782 - 7783 - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1); 7784 - req.pcie_stat_size = cpu_to_le16(sizeof(struct pcie_ctx_hw_stats)); 7785 - req.pcie_stat_host_addr = cpu_to_le64(bp->hw_pcie_stats_map); 7786 - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7787 7575 } 7788 7576 7789 7577 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) ··· 8794 8608 if (BNXT_PF(bp)) 8795 8609 bp->fw_cap |= BNXT_FW_CAP_SHARED_PORT_CFG; 8796 8610 } 8611 + if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET) 8612 + bp->fw_cap |= BNXT_FW_CAP_PORT_STATS_NO_RESET; 8613 + 8797 8614 if (resp->supported_speeds_auto_mode) 8798 8615 link_info->support_auto_speeds = 8799 8616 le16_to_cpu(resp->supported_speeds_auto_mode); ··· 9799 9610 { 9800 9611 int i; 9801 9612 9802 - 9803 9613 for (i = 0; i < bp->cp_nr_rings; i++) { 9804 9614 struct bnxt_napi *bnapi = bp->bnapi[i]; 9805 9615 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 9806 - struct ctx_hw_stats *hw_stats = cpr->hw_stats; 9616 + u64 *sw = cpr->stats.sw_stats; 9807 9617 9808 - stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts); 9809 - stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts); 9810 - stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts); 9618 + stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts); 9619 + stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); 9620 + stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts); 9811 9621 9812 - stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts); 9813 - stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts); 9814 - stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts); 9622 + stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts); 9623 + stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts); 9624 + stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts); 9815 9625 9816 - stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes); 9817 - stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes); 9818 - stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes); 9626 + stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes); 9627 + stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes); 9628 + stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes); 9819 9629 9820 - stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes); 9821 - stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes); 9822 - stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes); 9630 + stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes); 9631 + stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes); 9632 + stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes); 9823 9633 9824 9634 stats->rx_missed_errors += 9825 - le64_to_cpu(hw_stats->rx_discard_pkts); 9635 + BNXT_GET_RING_STATS64(sw, rx_discard_pkts); 9826 9636 9827 - stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts); 9637 + stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); 9828 9638 9829 - stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts); 9639 + stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts); 9830 9640 } 9831 9641 } 9832 9642 ··· 9863 9675 bnxt_add_prev_stats(bp, stats); 9864 9676 9865 9677 if (bp->flags & BNXT_FLAG_PORT_STATS) { 9866 - struct rx_port_stats *rx = bp->hw_rx_port_stats; 9867 - struct tx_port_stats *tx = bp->hw_tx_port_stats; 9678 + u64 *rx = bp->port_stats.sw_stats; 9679 + u64 *tx = bp->port_stats.sw_stats + 9680 + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 9868 9681 9869 - stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames); 9870 - stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames); 9871 - stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) + 9872 - le64_to_cpu(rx->rx_ovrsz_frames) + 9873 - le64_to_cpu(rx->rx_runt_frames); 9874 - stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) + 9875 - le64_to_cpu(rx->rx_jbr_frames); 9876 - stats->collisions = le64_to_cpu(tx->tx_total_collisions); 9877 - stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns); 9878 - stats->tx_errors = le64_to_cpu(tx->tx_err); 9682 + stats->rx_crc_errors = 9683 + BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames); 9684 + stats->rx_frame_errors = 9685 + BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames); 9686 + stats->rx_length_errors = 9687 + BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) + 9688 + BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) + 9689 + BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames); 9690 + stats->rx_errors = 9691 + BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) + 9692 + BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames); 9693 + stats->collisions = 9694 + BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions); 9695 + stats->tx_fifo_errors = 9696 + BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns); 9697 + stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err); 9879 9698 } 9880 9699 clear_bit(BNXT_STATE_READ_STATS, &bp->state); 9881 9700 } ··· 10228 10033 return rc; 10229 10034 } 10230 10035 10036 + int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words, 10037 + u32 *reg_buf) 10038 + { 10039 + struct hwrm_dbg_read_direct_output *resp = bp->hwrm_cmd_resp_addr; 10040 + struct hwrm_dbg_read_direct_input req = {0}; 10041 + __le32 *dbg_reg_buf; 10042 + dma_addr_t mapping; 10043 + int rc, i; 10044 + 10045 + dbg_reg_buf = dma_alloc_coherent(&bp->pdev->dev, num_words * 4, 10046 + &mapping, GFP_KERNEL); 10047 + if (!dbg_reg_buf) 10048 + return -ENOMEM; 10049 + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_READ_DIRECT, -1, -1); 10050 + req.host_dest_addr = cpu_to_le64(mapping); 10051 + req.read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR); 10052 + req.read_len32 = cpu_to_le32(num_words); 10053 + mutex_lock(&bp->hwrm_cmd_lock); 10054 + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 10055 + if (rc || resp->error_code) { 10056 + rc = -EIO; 10057 + goto dbg_rd_reg_exit; 10058 + } 10059 + for (i = 0; i < num_words; i++) 10060 + reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]); 10061 + 10062 + dbg_rd_reg_exit: 10063 + mutex_unlock(&bp->hwrm_cmd_lock); 10064 + dma_free_coherent(&bp->pdev->dev, num_words * 4, dbg_reg_buf, mapping); 10065 + return rc; 10066 + } 10067 + 10231 10068 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type, 10232 10069 u32 ring_id, u32 *prod, u32 *cons) 10233 10070 { ··· 10404 10177 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) 10405 10178 bnxt_fw_health_check(bp); 10406 10179 10407 - if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) && 10408 - bp->stats_coal_ticks) { 10180 + if (bp->link_info.link_up && bp->stats_coal_ticks) { 10409 10181 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event); 10410 10182 bnxt_queue_sp_work(bp); 10411 10183 } ··· 10690 10464 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) 10691 10465 bnxt_hwrm_exec_fwd_req(bp); 10692 10466 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) { 10693 - bnxt_hwrm_port_qstats(bp); 10694 - bnxt_hwrm_port_qstats_ext(bp); 10695 - bnxt_hwrm_pcie_qstats(bp); 10467 + bnxt_hwrm_port_qstats(bp, 0); 10468 + bnxt_hwrm_port_qstats_ext(bp, 0); 10469 + bnxt_accumulate_all_stats(bp); 10696 10470 } 10697 10471 10698 10472 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
+78 -18
drivers/net/ethernet/broadcom/bnxt/bnxt.h
··· 919 919 struct bnxt_cmn_sw_stats cmn; 920 920 }; 921 921 922 + struct bnxt_stats_mem { 923 + u64 *sw_stats; 924 + u64 *hw_masks; 925 + void *hw_stats; 926 + dma_addr_t hw_stats_map; 927 + int len; 928 + }; 929 + 922 930 struct bnxt_cp_ring_info { 923 931 struct bnxt_napi *bnapi; 924 932 u32 cp_raw_cons; ··· 951 943 952 944 dma_addr_t cp_desc_mapping[MAX_CP_PAGES]; 953 945 954 - struct ctx_hw_stats *hw_stats; 955 - dma_addr_t hw_stats_map; 946 + struct bnxt_stats_mem stats; 956 947 u32 hw_stats_ctx_id; 957 948 958 949 struct bnxt_sw_stats sw_stats; ··· 1142 1135 #define BNXT_FLTR_UPDATE 1 1143 1136 }; 1144 1137 1138 + struct hwrm_port_phy_qcfg_output_compat { 1139 + __le16 error_code; 1140 + __le16 req_type; 1141 + __le16 seq_id; 1142 + __le16 resp_len; 1143 + u8 link; 1144 + u8 link_signal_mode; 1145 + __le16 link_speed; 1146 + u8 duplex_cfg; 1147 + u8 pause; 1148 + __le16 support_speeds; 1149 + __le16 force_link_speed; 1150 + u8 auto_mode; 1151 + u8 auto_pause; 1152 + __le16 auto_link_speed; 1153 + __le16 auto_link_speed_mask; 1154 + u8 wirespeed; 1155 + u8 lpbk; 1156 + u8 force_pause; 1157 + u8 module_status; 1158 + __le32 preemphasis; 1159 + u8 phy_maj; 1160 + u8 phy_min; 1161 + u8 phy_bld; 1162 + u8 phy_type; 1163 + u8 media_type; 1164 + u8 xcvr_pkg_type; 1165 + u8 eee_config_phy_addr; 1166 + u8 parallel_detect; 1167 + __le16 link_partner_adv_speeds; 1168 + u8 link_partner_adv_auto_mode; 1169 + u8 link_partner_adv_pause; 1170 + __le16 adv_eee_link_speed_mask; 1171 + __le16 link_partner_adv_eee_link_speed_mask; 1172 + __le32 xcvr_identifier_type_tx_lpi_timer; 1173 + __le16 fec_cfg; 1174 + u8 duplex_state; 1175 + u8 option_flags; 1176 + char phy_vendor_name[16]; 1177 + char phy_vendor_partnumber[16]; 1178 + u8 unused_0[7]; 1179 + u8 valid; 1180 + }; 1181 + 1145 1182 struct bnxt_link_info { 1146 1183 u8 phy_type; 1147 1184 u8 media_type; ··· 1303 1252 u16 timeout; 1304 1253 char string[BNXT_MAX_TEST][ETH_GSTRING_LEN]; 1305 1254 }; 1255 + 1256 + #define CHIMP_REG_VIEW_ADDR \ 1257 + ((bp->flags & BNXT_FLAG_CHIP_P5) ? 0x80000000 : 0xb1000000) 1306 1258 1307 1259 #define BNXT_GRCPF_REG_CHIMP_COMM 0x0 1308 1260 #define BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER 0x100 ··· 1620 1566 #define BNXT_FLAG_DIM 0x2000000 1621 1567 #define BNXT_FLAG_ROCE_MIRROR_CAP 0x4000000 1622 1568 #define BNXT_FLAG_PORT_STATS_EXT 0x10000000 1623 - #define BNXT_FLAG_PCIE_STATS 0x40000000 1624 1569 1625 1570 #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \ 1626 1571 BNXT_FLAG_RFS | \ ··· 1772 1719 #define BNXT_FW_CAP_VLAN_RX_STRIP 0x01000000 1773 1720 #define BNXT_FW_CAP_VLAN_TX_INSERT 0x02000000 1774 1721 #define BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED 0x04000000 1722 + #define BNXT_FW_CAP_PORT_STATS_NO_RESET 0x10000000 1775 1723 1776 1724 #define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM) 1777 1725 u32 hwrm_spec_code; ··· 1787 1733 dma_addr_t hwrm_cmd_kong_resp_dma_addr; 1788 1734 1789 1735 struct rtnl_link_stats64 net_stats_prev; 1790 - struct rx_port_stats *hw_rx_port_stats; 1791 - struct tx_port_stats *hw_tx_port_stats; 1792 - struct rx_port_stats_ext *hw_rx_port_stats_ext; 1793 - struct tx_port_stats_ext *hw_tx_port_stats_ext; 1794 - struct pcie_ctx_hw_stats *hw_pcie_stats; 1795 - dma_addr_t hw_rx_port_stats_map; 1796 - dma_addr_t hw_tx_port_stats_map; 1797 - dma_addr_t hw_rx_port_stats_ext_map; 1798 - dma_addr_t hw_tx_port_stats_ext_map; 1799 - dma_addr_t hw_pcie_stats_map; 1800 - int hw_port_stats_size; 1736 + struct bnxt_stats_mem port_stats; 1737 + struct bnxt_stats_mem rx_port_stats_ext; 1738 + struct bnxt_stats_mem tx_port_stats_ext; 1801 1739 u16 fw_rx_stats_ext_size; 1802 1740 u16 fw_tx_stats_ext_size; 1803 1741 u16 hw_ring_stats_size; ··· 1931 1885 struct device *hwmon_dev; 1932 1886 }; 1933 1887 1888 + #define BNXT_GET_RING_STATS64(sw, counter) \ 1889 + (*((sw) + offsetof(struct ctx_hw_stats, counter) / 8)) 1890 + 1891 + #define BNXT_GET_RX_PORT_STATS64(sw, counter) \ 1892 + (*((sw) + offsetof(struct rx_port_stats, counter) / 8)) 1893 + 1894 + #define BNXT_GET_TX_PORT_STATS64(sw, counter) \ 1895 + (*((sw) + offsetof(struct tx_port_stats, counter) / 8)) 1896 + 1897 + #define BNXT_PORT_STATS_SIZE \ 1898 + (sizeof(struct rx_port_stats) + sizeof(struct tx_port_stats) + 1024) 1899 + 1900 + #define BNXT_TX_PORT_STATS_BYTE_OFFSET \ 1901 + (sizeof(struct rx_port_stats) + 512) 1902 + 1934 1903 #define BNXT_RX_STATS_OFFSET(counter) \ 1935 1904 (offsetof(struct rx_port_stats, counter) / 8) 1936 1905 1937 1906 #define BNXT_TX_STATS_OFFSET(counter) \ 1938 1907 ((offsetof(struct tx_port_stats, counter) + \ 1939 - sizeof(struct rx_port_stats) + 512) / 8) 1908 + BNXT_TX_PORT_STATS_BYTE_OFFSET) / 8) 1940 1909 1941 1910 #define BNXT_RX_STATS_EXT_OFFSET(counter) \ 1942 1911 (offsetof(struct rx_port_stats_ext, counter) / 8) 1943 1912 1944 1913 #define BNXT_TX_STATS_EXT_OFFSET(counter) \ 1945 1914 (offsetof(struct tx_port_stats_ext, counter) / 8) 1946 - 1947 - #define BNXT_PCIE_STATS_OFFSET(counter) \ 1948 - (offsetof(struct pcie_ctx_hw_stats, counter) / 8) 1949 1915 1950 1916 #define BNXT_HW_FEATURE_VLAN_ALL_RX \ 1951 1917 (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX) ··· 2120 2062 int bnxt_half_open_nic(struct bnxt *bp); 2121 2063 void bnxt_half_close_nic(struct bnxt *bp); 2122 2064 int bnxt_close_nic(struct bnxt *, bool, bool); 2065 + int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words, 2066 + u32 *reg_buf); 2123 2067 void bnxt_fw_exception(struct bnxt *bp); 2124 2068 void bnxt_fw_reset(struct bnxt *bp); 2125 2069 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
+1 -1
drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
··· 544 544 static int bnxt_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc) 545 545 { 546 546 struct bnxt *bp = netdev_priv(dev); 547 - __le64 *stats = (__le64 *)bp->hw_rx_port_stats; 547 + __le64 *stats = bp->port_stats.hw_stats; 548 548 struct ieee_pfc *my_pfc = bp->ieee_pfc; 549 549 long rx_off, tx_off; 550 550 int i, rc;
+76 -62
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
··· 142 142 "rx_mcast_packets", 143 143 "rx_bcast_packets", 144 144 "rx_discards", 145 - "rx_drops", 145 + "rx_errors", 146 146 "rx_ucast_bytes", 147 147 "rx_mcast_bytes", 148 148 "rx_bcast_bytes", ··· 152 152 "tx_ucast_packets", 153 153 "tx_mcast_packets", 154 154 "tx_bcast_packets", 155 + "tx_errors", 155 156 "tx_discards", 156 - "tx_drops", 157 157 "tx_ucast_bytes", 158 158 "tx_mcast_bytes", 159 159 "tx_bcast_bytes", ··· 292 292 BNXT_TX_STATS_PRI_ENTRY(counter, 5), \ 293 293 BNXT_TX_STATS_PRI_ENTRY(counter, 6), \ 294 294 BNXT_TX_STATS_PRI_ENTRY(counter, 7) 295 - 296 - #define BNXT_PCIE_STATS_ENTRY(counter) \ 297 - { BNXT_PCIE_STATS_OFFSET(counter), __stringify(counter) } 298 295 299 296 enum { 300 297 RX_TOTAL_DISCARDS, ··· 451 454 BNXT_TX_STATS_PRI_ENTRIES(tx_packets), 452 455 }; 453 456 454 - static const struct { 455 - long offset; 456 - char string[ETH_GSTRING_LEN]; 457 - } bnxt_pcie_stats_arr[] = { 458 - BNXT_PCIE_STATS_ENTRY(pcie_pl_signal_integrity), 459 - BNXT_PCIE_STATS_ENTRY(pcie_dl_signal_integrity), 460 - BNXT_PCIE_STATS_ENTRY(pcie_tl_signal_integrity), 461 - BNXT_PCIE_STATS_ENTRY(pcie_link_integrity), 462 - BNXT_PCIE_STATS_ENTRY(pcie_tx_traffic_rate), 463 - BNXT_PCIE_STATS_ENTRY(pcie_rx_traffic_rate), 464 - BNXT_PCIE_STATS_ENTRY(pcie_tx_dllp_statistics), 465 - BNXT_PCIE_STATS_ENTRY(pcie_rx_dllp_statistics), 466 - BNXT_PCIE_STATS_ENTRY(pcie_equalization_time), 467 - BNXT_PCIE_STATS_ENTRY(pcie_ltssm_histogram[0]), 468 - BNXT_PCIE_STATS_ENTRY(pcie_ltssm_histogram[2]), 469 - BNXT_PCIE_STATS_ENTRY(pcie_recovery_histogram), 470 - }; 471 - 472 457 #define BNXT_NUM_SW_FUNC_STATS ARRAY_SIZE(bnxt_sw_func_stats) 473 458 #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr) 474 459 #define BNXT_NUM_STATS_PRI \ ··· 458 479 ARRAY_SIZE(bnxt_rx_pkts_pri_arr) + \ 459 480 ARRAY_SIZE(bnxt_tx_bytes_pri_arr) + \ 460 481 ARRAY_SIZE(bnxt_tx_pkts_pri_arr)) 461 - #define BNXT_NUM_PCIE_STATS ARRAY_SIZE(bnxt_pcie_stats_arr) 462 482 463 483 static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp) 464 484 { ··· 503 525 if (bp->pri2cos_valid) 504 526 num_stats += BNXT_NUM_STATS_PRI; 505 527 } 506 - 507 - if (bp->flags & BNXT_FLAG_PCIE_STATS) 508 - num_stats += BNXT_NUM_PCIE_STATS; 509 528 510 529 return num_stats; 511 530 } ··· 559 584 for (i = 0; i < bp->cp_nr_rings; i++) { 560 585 struct bnxt_napi *bnapi = bp->bnapi[i]; 561 586 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 562 - __le64 *hw_stats = (__le64 *)cpr->hw_stats; 587 + u64 *sw_stats = cpr->stats.sw_stats; 563 588 u64 *sw; 564 589 int k; 565 590 566 591 if (is_rx_ring(bp, i)) { 567 592 for (k = 0; k < NUM_RING_RX_HW_STATS; j++, k++) 568 - buf[j] = le64_to_cpu(hw_stats[k]); 593 + buf[j] = sw_stats[k]; 569 594 } 570 595 if (is_tx_ring(bp, i)) { 571 596 k = NUM_RING_RX_HW_STATS; 572 597 for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS; 573 598 j++, k++) 574 - buf[j] = le64_to_cpu(hw_stats[k]); 599 + buf[j] = sw_stats[k]; 575 600 } 576 601 if (!tpa_stats || !is_rx_ring(bp, i)) 577 602 goto skip_tpa_ring_stats; ··· 579 604 k = NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS; 580 605 for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS + 581 606 tpa_stats; j++, k++) 582 - buf[j] = le64_to_cpu(hw_stats[k]); 607 + buf[j] = sw_stats[k]; 583 608 584 609 skip_tpa_ring_stats: 585 610 sw = (u64 *)&cpr->sw_stats.rx; ··· 593 618 buf[j] = sw[k]; 594 619 595 620 bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter += 596 - le64_to_cpu(cpr->hw_stats->rx_discard_pkts); 621 + BNXT_GET_RING_STATS64(sw_stats, rx_discard_pkts); 597 622 bnxt_sw_func_stats[TX_TOTAL_DISCARDS].counter += 598 - le64_to_cpu(cpr->hw_stats->tx_discard_pkts); 623 + BNXT_GET_RING_STATS64(sw_stats, tx_discard_pkts); 599 624 } 600 625 601 626 for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++, j++) ··· 603 628 604 629 skip_ring_stats: 605 630 if (bp->flags & BNXT_FLAG_PORT_STATS) { 606 - __le64 *port_stats = (__le64 *)bp->hw_rx_port_stats; 631 + u64 *port_stats = bp->port_stats.sw_stats; 607 632 608 - for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++) { 609 - buf[j] = le64_to_cpu(*(port_stats + 610 - bnxt_port_stats_arr[i].offset)); 611 - } 633 + for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++) 634 + buf[j] = *(port_stats + bnxt_port_stats_arr[i].offset); 612 635 } 613 636 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 614 - __le64 *rx_port_stats_ext = (__le64 *)bp->hw_rx_port_stats_ext; 615 - __le64 *tx_port_stats_ext = (__le64 *)bp->hw_tx_port_stats_ext; 637 + u64 *rx_port_stats_ext = bp->rx_port_stats_ext.sw_stats; 638 + u64 *tx_port_stats_ext = bp->tx_port_stats_ext.sw_stats; 616 639 617 640 for (i = 0; i < bp->fw_rx_stats_ext_size; i++, j++) { 618 - buf[j] = le64_to_cpu(*(rx_port_stats_ext + 619 - bnxt_port_stats_ext_arr[i].offset)); 641 + buf[j] = *(rx_port_stats_ext + 642 + bnxt_port_stats_ext_arr[i].offset); 620 643 } 621 644 for (i = 0; i < bp->fw_tx_stats_ext_size; i++, j++) { 622 - buf[j] = le64_to_cpu(*(tx_port_stats_ext + 623 - bnxt_tx_port_stats_ext_arr[i].offset)); 645 + buf[j] = *(tx_port_stats_ext + 646 + bnxt_tx_port_stats_ext_arr[i].offset); 624 647 } 625 648 if (bp->pri2cos_valid) { 626 649 for (i = 0; i < 8; i++, j++) { 627 650 long n = bnxt_rx_bytes_pri_arr[i].base_off + 628 651 bp->pri2cos_idx[i]; 629 652 630 - buf[j] = le64_to_cpu(*(rx_port_stats_ext + n)); 653 + buf[j] = *(rx_port_stats_ext + n); 631 654 } 632 655 for (i = 0; i < 8; i++, j++) { 633 656 long n = bnxt_rx_pkts_pri_arr[i].base_off + 634 657 bp->pri2cos_idx[i]; 635 658 636 - buf[j] = le64_to_cpu(*(rx_port_stats_ext + n)); 659 + buf[j] = *(rx_port_stats_ext + n); 637 660 } 638 661 for (i = 0; i < 8; i++, j++) { 639 662 long n = bnxt_tx_bytes_pri_arr[i].base_off + 640 663 bp->pri2cos_idx[i]; 641 664 642 - buf[j] = le64_to_cpu(*(tx_port_stats_ext + n)); 665 + buf[j] = *(tx_port_stats_ext + n); 643 666 } 644 667 for (i = 0; i < 8; i++, j++) { 645 668 long n = bnxt_tx_pkts_pri_arr[i].base_off + 646 669 bp->pri2cos_idx[i]; 647 670 648 - buf[j] = le64_to_cpu(*(tx_port_stats_ext + n)); 671 + buf[j] = *(tx_port_stats_ext + n); 649 672 } 650 - } 651 - } 652 - if (bp->flags & BNXT_FLAG_PCIE_STATS) { 653 - __le64 *pcie_stats = (__le64 *)bp->hw_pcie_stats; 654 - 655 - for (i = 0; i < BNXT_NUM_PCIE_STATS; i++, j++) { 656 - buf[j] = le64_to_cpu(*(pcie_stats + 657 - bnxt_pcie_stats_arr[i].offset)); 658 673 } 659 674 } 660 675 } ··· 745 780 bnxt_tx_pkts_pri_arr[i].string); 746 781 buf += ETH_GSTRING_LEN; 747 782 } 748 - } 749 - } 750 - if (bp->flags & BNXT_FLAG_PCIE_STATS) { 751 - for (i = 0; i < BNXT_NUM_PCIE_STATS; i++) { 752 - strcpy(buf, bnxt_pcie_stats_arr[i].string); 753 - buf += ETH_GSTRING_LEN; 754 783 } 755 784 } 756 785 break; ··· 1322 1363 info->eedump_len = 0; 1323 1364 /* TODO CHIMP FW: reg dump details */ 1324 1365 info->regdump_len = 0; 1366 + } 1367 + 1368 + static int bnxt_get_regs_len(struct net_device *dev) 1369 + { 1370 + struct bnxt *bp = netdev_priv(dev); 1371 + int reg_len; 1372 + 1373 + reg_len = BNXT_PXP_REG_LEN; 1374 + 1375 + if (bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED) 1376 + reg_len += sizeof(struct pcie_ctx_hw_stats); 1377 + 1378 + return reg_len; 1379 + } 1380 + 1381 + static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs, 1382 + void *_p) 1383 + { 1384 + struct pcie_ctx_hw_stats *hw_pcie_stats; 1385 + struct hwrm_pcie_qstats_input req = {0}; 1386 + struct bnxt *bp = netdev_priv(dev); 1387 + dma_addr_t hw_pcie_stats_addr; 1388 + int rc; 1389 + 1390 + regs->version = 0; 1391 + bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p); 1392 + 1393 + if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)) 1394 + return; 1395 + 1396 + hw_pcie_stats = dma_alloc_coherent(&bp->pdev->dev, 1397 + sizeof(*hw_pcie_stats), 1398 + &hw_pcie_stats_addr, GFP_KERNEL); 1399 + if (!hw_pcie_stats) 1400 + return; 1401 + 1402 + regs->version = 1; 1403 + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1); 1404 + req.pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats)); 1405 + req.pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr); 1406 + mutex_lock(&bp->hwrm_cmd_lock); 1407 + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 1408 + if (!rc) { 1409 + __le64 *src = (__le64 *)hw_pcie_stats; 1410 + u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN); 1411 + int i; 1412 + 1413 + for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++) 1414 + dst[i] = le64_to_cpu(src[i]); 1415 + } 1416 + mutex_unlock(&bp->hwrm_cmd_lock); 1417 + dma_free_coherent(&bp->pdev->dev, sizeof(*hw_pcie_stats), hw_pcie_stats, 1418 + hw_pcie_stats_addr); 1325 1419 } 1326 1420 1327 1421 static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) ··· 3652 3640 .get_pauseparam = bnxt_get_pauseparam, 3653 3641 .set_pauseparam = bnxt_set_pauseparam, 3654 3642 .get_drvinfo = bnxt_get_drvinfo, 3643 + .get_regs_len = bnxt_get_regs_len, 3644 + .get_regs = bnxt_get_regs, 3655 3645 .get_wol = bnxt_get_wol, 3656 3646 .set_wol = bnxt_set_wol, 3657 3647 .get_coalesce = bnxt_get_coalesce,
+2
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
··· 84 84 ETH_RESET_PHY | ETH_RESET_RAM) \ 85 85 << ETH_RESET_SHARED_SHIFT) 86 86 87 + #define BNXT_PXP_REG_LEN 0x3110 88 + 87 89 extern const struct ethtool_ops bnxt_ethtool_ops; 88 90 89 91 u32 bnxt_get_rxfh_indir_size(struct net_device *dev);
+342 -126
drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
··· 169 169 #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS 0x52UL 170 170 #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS 0x53UL 171 171 #define HWRM_RING_AGGINT_QCAPS 0x54UL 172 + #define HWRM_RING_SCHQ_ALLOC 0x55UL 173 + #define HWRM_RING_SCHQ_CFG 0x56UL 174 + #define HWRM_RING_SCHQ_FREE 0x57UL 172 175 #define HWRM_RING_RESET 0x5eUL 173 176 #define HWRM_RING_GRP_ALLOC 0x60UL 174 177 #define HWRM_RING_GRP_FREE 0x61UL 178 + #define HWRM_RING_CFG 0x62UL 179 + #define HWRM_RING_QCFG 0x63UL 175 180 #define HWRM_RESERVED5 0x64UL 176 181 #define HWRM_RESERVED6 0x65UL 177 182 #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC 0x70UL ··· 229 224 #define HWRM_FW_IPC_MAILBOX 0xccUL 230 225 #define HWRM_FW_ECN_CFG 0xcdUL 231 226 #define HWRM_FW_ECN_QCFG 0xceUL 227 + #define HWRM_FW_SECURE_CFG 0xcfUL 232 228 #define HWRM_EXEC_FWD_RESP 0xd0UL 233 229 #define HWRM_REJECT_FWD_RESP 0xd1UL 234 230 #define HWRM_FWD_RESP 0xd2UL ··· 343 337 #define HWRM_FUNC_VF_BW_QCFG 0x196UL 344 338 #define HWRM_FUNC_HOST_PF_IDS_QUERY 0x197UL 345 339 #define HWRM_FUNC_QSTATS_EXT 0x198UL 340 + #define HWRM_STAT_EXT_CTX_QUERY 0x199UL 346 341 #define HWRM_SELFTEST_QLIST 0x200UL 347 342 #define HWRM_SELFTEST_EXEC 0x201UL 348 343 #define HWRM_SELFTEST_IRQ 0x202UL ··· 360 353 #define HWRM_TF_VERSION_GET 0x2bdUL 361 354 #define HWRM_TF_SESSION_OPEN 0x2c6UL 362 355 #define HWRM_TF_SESSION_ATTACH 0x2c7UL 363 - #define HWRM_TF_SESSION_CLOSE 0x2c8UL 364 - #define HWRM_TF_SESSION_QCFG 0x2c9UL 365 - #define HWRM_TF_SESSION_RESC_QCAPS 0x2caUL 366 - #define HWRM_TF_SESSION_RESC_ALLOC 0x2cbUL 367 - #define HWRM_TF_SESSION_RESC_FREE 0x2ccUL 368 - #define HWRM_TF_SESSION_RESC_FLUSH 0x2cdUL 369 - #define HWRM_TF_TBL_TYPE_GET 0x2d0UL 370 - #define HWRM_TF_TBL_TYPE_SET 0x2d1UL 371 - #define HWRM_TF_CTXT_MEM_RGTR 0x2daUL 372 - #define HWRM_TF_CTXT_MEM_UNRGTR 0x2dbUL 373 - #define HWRM_TF_EXT_EM_QCAPS 0x2dcUL 374 - #define HWRM_TF_EXT_EM_OP 0x2ddUL 375 - #define HWRM_TF_EXT_EM_CFG 0x2deUL 376 - #define HWRM_TF_EXT_EM_QCFG 0x2dfUL 377 - #define HWRM_TF_TCAM_SET 0x2eeUL 378 - #define HWRM_TF_TCAM_GET 0x2efUL 379 - #define HWRM_TF_TCAM_MOVE 0x2f0UL 380 - #define HWRM_TF_TCAM_FREE 0x2f1UL 356 + #define HWRM_TF_SESSION_REGISTER 0x2c8UL 357 + #define HWRM_TF_SESSION_UNREGISTER 0x2c9UL 358 + #define HWRM_TF_SESSION_CLOSE 0x2caUL 359 + #define HWRM_TF_SESSION_QCFG 0x2cbUL 360 + #define HWRM_TF_SESSION_RESC_QCAPS 0x2ccUL 361 + #define HWRM_TF_SESSION_RESC_ALLOC 0x2cdUL 362 + #define HWRM_TF_SESSION_RESC_FREE 0x2ceUL 363 + #define HWRM_TF_SESSION_RESC_FLUSH 0x2cfUL 364 + #define HWRM_TF_TBL_TYPE_GET 0x2daUL 365 + #define HWRM_TF_TBL_TYPE_SET 0x2dbUL 366 + #define HWRM_TF_CTXT_MEM_RGTR 0x2e4UL 367 + #define HWRM_TF_CTXT_MEM_UNRGTR 0x2e5UL 368 + #define HWRM_TF_EXT_EM_QCAPS 0x2e6UL 369 + #define HWRM_TF_EXT_EM_OP 0x2e7UL 370 + #define HWRM_TF_EXT_EM_CFG 0x2e8UL 371 + #define HWRM_TF_EXT_EM_QCFG 0x2e9UL 372 + #define HWRM_TF_EM_INSERT 0x2eaUL 373 + #define HWRM_TF_EM_DELETE 0x2ebUL 374 + #define HWRM_TF_TCAM_SET 0x2f8UL 375 + #define HWRM_TF_TCAM_GET 0x2f9UL 376 + #define HWRM_TF_TCAM_MOVE 0x2faUL 377 + #define HWRM_TF_TCAM_FREE 0x2fbUL 378 + #define HWRM_TF_GLOBAL_CFG_SET 0x2fcUL 379 + #define HWRM_TF_GLOBAL_CFG_GET 0x2fdUL 381 380 #define HWRM_SV 0x400UL 382 381 #define HWRM_DBG_READ_DIRECT 0xff10UL 383 382 #define HWRM_DBG_READ_INDIRECT 0xff11UL ··· 404 391 #define HWRM_DBG_QCAPS 0xff20UL 405 392 #define HWRM_DBG_QCFG 0xff21UL 406 393 #define HWRM_DBG_CRASHDUMP_MEDIUM_CFG 0xff22UL 394 + #define HWRM_NVM_REQ_ARBITRATION 0xffedUL 407 395 #define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL 408 396 #define HWRM_NVM_VALIDATE_OPTION 0xffefUL 409 397 #define HWRM_NVM_FLUSH 0xfff0UL ··· 478 464 #define HWRM_VERSION_MAJOR 1 479 465 #define HWRM_VERSION_MINOR 10 480 466 #define HWRM_VERSION_UPDATE 1 481 - #define HWRM_VERSION_RSVD 33 482 - #define HWRM_VERSION_STR "1.10.1.33" 467 + #define HWRM_VERSION_RSVD 54 468 + #define HWRM_VERSION_STR "1.10.1.54" 483 469 484 470 /* hwrm_ver_get_input (size:192b/24B) */ 485 471 struct hwrm_ver_get_input { ··· 1108 1094 #define FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x20UL 1109 1095 #define FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x40UL 1110 1096 #define FUNC_VF_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x80UL 1097 + #define FUNC_VF_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE 0x100UL 1098 + #define FUNC_VF_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x200UL 1111 1099 __le16 num_rsscos_ctxs; 1112 1100 __le16 num_cmpl_rings; 1113 1101 __le16 num_tx_rings; ··· 1205 1189 __le16 max_sp_tx_rings; 1206 1190 u8 unused_0[2]; 1207 1191 __le32 flags_ext; 1208 - #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_MARK_SUPPORTED 0x1UL 1209 - #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_STATS_SUPPORTED 0x2UL 1210 - #define FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED 0x4UL 1211 - u8 unused_1[3]; 1192 + #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_MARK_SUPPORTED 0x1UL 1193 + #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_STATS_SUPPORTED 0x2UL 1194 + #define FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED 0x4UL 1195 + #define FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT 0x8UL 1196 + #define FUNC_QCAPS_RESP_FLAGS_EXT_PROXY_MODE_SUPPORT 0x10UL 1197 + #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_PROXY_SRC_INTF_OVERRIDE_SUPPORT 0x20UL 1198 + #define FUNC_QCAPS_RESP_FLAGS_EXT_SCHQ_SUPPORTED 0x40UL 1199 + #define FUNC_QCAPS_RESP_FLAGS_EXT_PPP_PUSH_MODE_SUPPORTED 0x80UL 1200 + u8 max_schqs; 1201 + u8 unused_1[2]; 1212 1202 u8 valid; 1213 1203 }; 1214 1204 ··· 1248 1226 #define FUNC_QCFG_RESP_FLAGS_TRUSTED_VF 0x40UL 1249 1227 #define FUNC_QCFG_RESP_FLAGS_SECURE_MODE_ENABLED 0x80UL 1250 1228 #define FUNC_QCFG_RESP_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x100UL 1229 + #define FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED 0x200UL 1230 + #define FUNC_QCFG_RESP_FLAGS_PPP_PUSH_MODE_ENABLED 0x400UL 1251 1231 u8 mac_address[6]; 1252 1232 __le16 pci_id; 1253 1233 __le16 alloc_rsscos_ctx; ··· 1345 1321 u8 valid; 1346 1322 }; 1347 1323 1348 - /* hwrm_func_cfg_input (size:704b/88B) */ 1324 + /* hwrm_func_cfg_input (size:768b/96B) */ 1349 1325 struct hwrm_func_cfg_input { 1350 1326 __le16 req_type; 1351 1327 __le16 cmpl_ring; ··· 1376 1352 #define FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST 0x800000UL 1377 1353 #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE 0x1000000UL 1378 1354 #define FUNC_CFG_REQ_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x2000000UL 1355 + #define FUNC_CFG_REQ_FLAGS_HOT_RESET_IF_EN_DIS 0x4000000UL 1356 + #define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE 0x8000000UL 1357 + #define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x10000000UL 1379 1358 __le32 enables; 1380 - #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL 1381 - #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL 1382 - #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL 1383 - #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL 1384 - #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL 1385 - #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL 1386 - #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL 1387 - #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL 1388 - #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL 1389 - #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL 1390 - #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL 1391 - #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL 1392 - #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL 1393 - #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL 1394 - #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL 1395 - #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL 1396 - #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL 1397 - #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL 1398 - #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL 1399 - #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL 1400 - #define FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE 0x100000UL 1401 - #define FUNC_CFG_REQ_ENABLES_NUM_MSIX 0x200000UL 1402 - #define FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE 0x400000UL 1359 + #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL 1360 + #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL 1361 + #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL 1362 + #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL 1363 + #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL 1364 + #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL 1365 + #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL 1366 + #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL 1367 + #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL 1368 + #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL 1369 + #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL 1370 + #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL 1371 + #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL 1372 + #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL 1373 + #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL 1374 + #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL 1375 + #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL 1376 + #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL 1377 + #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL 1378 + #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL 1379 + #define FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE 0x100000UL 1380 + #define FUNC_CFG_REQ_ENABLES_NUM_MSIX 0x200000UL 1381 + #define FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE 0x400000UL 1382 + #define FUNC_CFG_REQ_ENABLES_HOT_RESET_IF_SUPPORT 0x800000UL 1383 + #define FUNC_CFG_REQ_ENABLES_SCHQ_ID 0x1000000UL 1403 1384 __le16 mtu; 1404 1385 __le16 mru; 1405 1386 __le16 num_rsscos_ctxs; ··· 1478 1449 #define FUNC_CFG_REQ_OPTIONS_RSVD_MASK 0xf0UL 1479 1450 #define FUNC_CFG_REQ_OPTIONS_RSVD_SFT 4 1480 1451 __le16 num_mcast_filters; 1452 + __le16 schq_id; 1453 + u8 unused_0[6]; 1481 1454 }; 1482 1455 1483 1456 /* hwrm_func_cfg_output (size:128b/16B) */ ··· 1538 1507 u8 valid; 1539 1508 }; 1540 1509 1541 - /* hwrm_func_qstats_ext_input (size:192b/24B) */ 1510 + /* hwrm_func_qstats_ext_input (size:256b/32B) */ 1542 1511 struct hwrm_func_qstats_ext_input { 1543 1512 __le16 req_type; 1544 1513 __le16 cmpl_ring; ··· 1551 1520 #define FUNC_QSTATS_EXT_REQ_FLAGS_ROCE_ONLY 0x1UL 1552 1521 #define FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x2UL 1553 1522 #define FUNC_QSTATS_EXT_REQ_FLAGS_LAST FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 1554 - u8 unused_0[5]; 1523 + u8 unused_0[1]; 1524 + __le32 enables; 1525 + #define FUNC_QSTATS_EXT_REQ_ENABLES_SCHQ_ID 0x1UL 1526 + __le16 schq_id; 1527 + __le16 traffic_class; 1528 + u8 unused_1[4]; 1555 1529 }; 1556 1530 1557 1531 /* hwrm_func_qstats_ext_output (size:1472b/184B) */ ··· 1569 1533 __le64 rx_mcast_pkts; 1570 1534 __le64 rx_bcast_pkts; 1571 1535 __le64 rx_discard_pkts; 1572 - __le64 rx_drop_pkts; 1536 + __le64 rx_error_pkts; 1573 1537 __le64 rx_ucast_bytes; 1574 1538 __le64 rx_mcast_bytes; 1575 1539 __le64 rx_bcast_bytes; 1576 1540 __le64 tx_ucast_pkts; 1577 1541 __le64 tx_mcast_pkts; 1578 1542 __le64 tx_bcast_pkts; 1543 + __le64 tx_error_pkts; 1579 1544 __le64 tx_discard_pkts; 1580 - __le64 tx_drop_pkts; 1581 1545 __le64 tx_ucast_bytes; 1582 1546 __le64 tx_mcast_bytes; 1583 1547 __le64 tx_bcast_bytes; ··· 2412 2376 __le16 target_id; 2413 2377 __le64 resp_addr; 2414 2378 __le32 flags; 2415 - #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL 2416 - #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL 2417 - #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL 2418 - #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL 2419 - #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL 2420 - #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL 2421 - #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL 2422 - #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL 2423 - #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL 2424 - #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL 2425 - #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL 2426 - #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL 2427 - #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL 2428 - #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL 2429 - #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL 2379 + #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL 2380 + #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL 2381 + #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL 2382 + #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL 2383 + #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL 2384 + #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL 2385 + #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL 2386 + #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL 2387 + #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL 2388 + #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL 2389 + #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL 2390 + #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL 2391 + #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL 2392 + #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL 2393 + #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL 2394 + #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_ENABLE 0x8000UL 2395 + #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_DISABLE 0x10000UL 2396 + #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_2XN_ENABLE 0x20000UL 2397 + #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_2XN_DISABLE 0x40000UL 2430 2398 __le32 enables; 2431 - #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL 2432 - #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL 2433 - #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL 2434 - #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL 2435 - #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL 2436 - #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL 2437 - #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL 2438 - #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL 2439 - #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL 2440 - #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL 2441 - #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL 2399 + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL 2400 + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL 2401 + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL 2402 + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL 2403 + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL 2404 + #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL 2405 + #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL 2406 + #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL 2407 + #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL 2408 + #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL 2409 + #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL 2410 + #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED 0x800UL 2411 + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK 0x1000UL 2442 2412 __le16 port_id; 2443 2413 __le16 force_link_speed; 2444 2414 #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB 0x1UL ··· 2457 2415 #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB 0x190UL 2458 2416 #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB 0x1f4UL 2459 2417 #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB 0x3e8UL 2460 - #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_200GB 0x7d0UL 2461 2418 #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB 0xffffUL 2462 2419 #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB 2463 2420 u8 auto_mode; ··· 2487 2446 #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB 0x190UL 2488 2447 #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB 0x1f4UL 2489 2448 #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB 0x3e8UL 2490 - #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_200GB 0x7d0UL 2491 2449 #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB 0xffffUL 2492 2450 #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_LAST PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB 2493 2451 __le16 auto_link_speed_mask; ··· 2504 2464 #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB 0x800UL 2505 2465 #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL 2506 2466 #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL 2507 - #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_200GB 0x4000UL 2508 2467 u8 wirespeed; 2509 2468 #define PORT_PHY_CFG_REQ_WIRESPEED_OFF 0x0UL 2510 2469 #define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL ··· 2527 2488 #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3 0x10UL 2528 2489 #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4 0x20UL 2529 2490 #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB 0x40UL 2530 - u8 unused_2[2]; 2491 + __le16 force_pam4_link_speed; 2492 + #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB 0x1f4UL 2493 + #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB 0x3e8UL 2494 + #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB 0x7d0UL 2495 + #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB 2531 2496 __le32 tx_lpi_timer; 2532 2497 #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK 0xffffffUL 2533 2498 #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT 0 2534 - __le32 unused_3; 2499 + __le16 auto_link_pam4_speed_mask; 2500 + #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_50G 0x1UL 2501 + #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_100G 0x2UL 2502 + #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_200G 0x4UL 2503 + u8 unused_2[2]; 2535 2504 }; 2536 2505 2537 2506 /* hwrm_port_phy_cfg_output (size:128b/16B) */ ··· 2573 2526 u8 unused_0[6]; 2574 2527 }; 2575 2528 2576 - /* hwrm_port_phy_qcfg_output (size:768b/96B) */ 2529 + /* hwrm_port_phy_qcfg_output (size:832b/104B) */ 2577 2530 struct hwrm_port_phy_qcfg_output { 2578 2531 __le16 error_code; 2579 2532 __le16 req_type; ··· 2584 2537 #define PORT_PHY_QCFG_RESP_LINK_SIGNAL 0x1UL 2585 2538 #define PORT_PHY_QCFG_RESP_LINK_LINK 0x2UL 2586 2539 #define PORT_PHY_QCFG_RESP_LINK_LAST PORT_PHY_QCFG_RESP_LINK_LINK 2587 - u8 unused_0; 2540 + u8 link_signal_mode; 2541 + #define PORT_PHY_QCFG_RESP_LINK_SIGNAL_MODE_NRZ 0x0UL 2542 + #define PORT_PHY_QCFG_RESP_LINK_SIGNAL_MODE_PAM4 0x1UL 2543 + #define PORT_PHY_QCFG_RESP_LINK_SIGNAL_MODE_LAST PORT_PHY_QCFG_RESP_LINK_SIGNAL_MODE_PAM4 2588 2544 __le16 link_speed; 2589 2545 #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB 0x1UL 2590 2546 #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB 0xaUL ··· 2624 2574 #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB 0x800UL 2625 2575 #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL 2626 2576 #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL 2627 - #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_200GB 0x4000UL 2628 2577 __le16 force_link_speed; 2629 2578 #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB 0x1UL 2630 2579 #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB 0xaUL ··· 2635 2586 #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB 0x190UL 2636 2587 #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB 0x1f4UL 2637 2588 #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB 0x3e8UL 2638 - #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_200GB 0x7d0UL 2639 2589 #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB 0xffffUL 2640 2590 #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB 2641 2591 u8 auto_mode; ··· 2659 2611 #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB 0x190UL 2660 2612 #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB 0x1f4UL 2661 2613 #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB 0x3e8UL 2662 - #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_200GB 0x7d0UL 2663 2614 #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB 0xffffUL 2664 2615 #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB 2665 2616 __le16 auto_link_speed_mask; ··· 2676 2629 #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB 0x800UL 2677 2630 #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL 2678 2631 #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL 2679 - #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_200GB 0x4000UL 2680 2632 u8 wirespeed; 2681 2633 #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF 0x0UL 2682 2634 #define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL ··· 2809 2763 #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24) 2810 2764 #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 2811 2765 __le16 fec_cfg; 2812 - #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL 2813 - #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL 2814 - #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL 2815 - #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL 2816 - #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL 2817 - #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL 2818 - #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL 2766 + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL 2767 + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL 2768 + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL 2769 + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL 2770 + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL 2771 + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL 2772 + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL 2773 + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_SUPPORTED 0x80UL 2774 + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_ENABLED 0x100UL 2775 + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_2XN_SUPPORTED 0x200UL 2776 + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_2XN_ENABLED 0x400UL 2777 + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ACTIVE 0x800UL 2778 + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ACTIVE 0x1000UL 2779 + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_ACTIVE 0x2000UL 2780 + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_2XN_ACTIVE 0x4000UL 2819 2781 u8 duplex_state; 2820 2782 #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF 0x0UL 2821 2783 #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL 0x1UL ··· 2832 2778 #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_MEDIA_AUTO_DETECT 0x1UL 2833 2779 char phy_vendor_name[16]; 2834 2780 char phy_vendor_partnumber[16]; 2835 - u8 unused_2[7]; 2781 + __le16 support_pam4_speeds; 2782 + #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_50G 0x1UL 2783 + #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_100G 0x2UL 2784 + #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_200G 0x4UL 2785 + __le16 force_pam4_link_speed; 2786 + #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_50GB 0x1f4UL 2787 + #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_100GB 0x3e8UL 2788 + #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_200GB 0x7d0UL 2789 + #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_200GB 2790 + __le16 auto_pam4_link_speed_mask; 2791 + #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_50G 0x1UL 2792 + #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_100G 0x2UL 2793 + #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_200G 0x4UL 2794 + __le16 link_partner_pam4_adv_speeds; 2795 + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_50GB 0x1UL 2796 + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_100GB 0x2UL 2797 + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_200GB 0x4UL 2798 + u8 unused_0[7]; 2836 2799 u8 valid; 2837 2800 }; 2838 2801 ··· 3375 3304 u8 unused_0[6]; 3376 3305 }; 3377 3306 3378 - /* hwrm_port_phy_qcaps_output (size:192b/24B) */ 3307 + /* hwrm_port_phy_qcaps_output (size:256b/32B) */ 3379 3308 struct hwrm_port_phy_qcaps_output { 3380 3309 __le16 error_code; 3381 3310 __le16 req_type; 3382 3311 __le16 seq_id; 3383 3312 __le16 resp_len; 3384 3313 u8 flags; 3385 - #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL 3386 - #define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED 0x2UL 3387 - #define PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED 0x4UL 3388 - #define PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED 0x8UL 3389 - #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xf0UL 3390 - #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 4 3314 + #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL 3315 + #define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED 0x2UL 3316 + #define PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED 0x4UL 3317 + #define PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED 0x8UL 3318 + #define PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET 0x10UL 3319 + #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xe0UL 3320 + #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 5 3391 3321 u8 port_cnt; 3392 3322 #define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL 3393 3323 #define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL ··· 3411 3339 #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL 3412 3340 #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL 3413 3341 #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL 3414 - #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_200GB 0x4000UL 3415 3342 __le16 supported_speeds_auto_mode; 3416 3343 #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL 3417 3344 #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL ··· 3426 3355 #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL 3427 3356 #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL 3428 3357 #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL 3429 - #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_200GB 0x4000UL 3430 3358 __le16 supported_speeds_eee_mode; 3431 3359 #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL 3432 3360 #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL ··· 3442 3372 __le32 valid_tx_lpi_timer_high; 3443 3373 #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK 0xffffffUL 3444 3374 #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT 0 3445 - #define PORT_PHY_QCAPS_RESP_VALID_MASK 0xff000000UL 3446 - #define PORT_PHY_QCAPS_RESP_VALID_SFT 24 3375 + #define PORT_PHY_QCAPS_RESP_RSVD_MASK 0xff000000UL 3376 + #define PORT_PHY_QCAPS_RESP_RSVD_SFT 24 3377 + __le16 supported_pam4_speeds_auto_mode; 3378 + #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_50G 0x1UL 3379 + #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_100G 0x2UL 3380 + #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_200G 0x4UL 3381 + __le16 supported_pam4_speeds_force_mode; 3382 + #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_50G 0x1UL 3383 + #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_100G 0x2UL 3384 + #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_200G 0x4UL 3385 + u8 unused_0[3]; 3386 + u8 valid; 3447 3387 }; 3448 3388 3449 3389 /* hwrm_port_phy_i2c_read_input (size:320b/40B) */ ··· 3892 3812 u8 unused_0; 3893 3813 }; 3894 3814 3895 - /* hwrm_queue_qportcfg_output (size:256b/32B) */ 3815 + /* hwrm_queue_qportcfg_output (size:1344b/168B) */ 3896 3816 struct hwrm_queue_qportcfg_output { 3897 3817 __le16 error_code; 3898 3818 __le16 req_type; ··· 3978 3898 #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL 3979 3899 #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL 3980 3900 #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 3901 + u8 unused_0; 3902 + char qid0_name[16]; 3903 + char qid1_name[16]; 3904 + char qid2_name[16]; 3905 + char qid3_name[16]; 3906 + char qid4_name[16]; 3907 + char qid5_name[16]; 3908 + char qid6_name[16]; 3909 + char qid7_name[16]; 3910 + u8 unused_1[7]; 3911 + u8 valid; 3912 + }; 3913 + 3914 + /* hwrm_queue_qcfg_input (size:192b/24B) */ 3915 + struct hwrm_queue_qcfg_input { 3916 + __le16 req_type; 3917 + __le16 cmpl_ring; 3918 + __le16 seq_id; 3919 + __le16 target_id; 3920 + __le64 resp_addr; 3921 + __le32 flags; 3922 + #define QUEUE_QCFG_REQ_FLAGS_PATH 0x1UL 3923 + #define QUEUE_QCFG_REQ_FLAGS_PATH_TX 0x0UL 3924 + #define QUEUE_QCFG_REQ_FLAGS_PATH_RX 0x1UL 3925 + #define QUEUE_QCFG_REQ_FLAGS_PATH_LAST QUEUE_QCFG_REQ_FLAGS_PATH_RX 3926 + __le32 queue_id; 3927 + }; 3928 + 3929 + /* hwrm_queue_qcfg_output (size:128b/16B) */ 3930 + struct hwrm_queue_qcfg_output { 3931 + __le16 error_code; 3932 + __le16 req_type; 3933 + __le16 seq_id; 3934 + __le16 resp_len; 3935 + __le32 queue_len; 3936 + u8 service_profile; 3937 + #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSY 0x0UL 3938 + #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSLESS 0x1UL 3939 + #define QUEUE_QCFG_RESP_SERVICE_PROFILE_UNKNOWN 0xffUL 3940 + #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LAST QUEUE_QCFG_RESP_SERVICE_PROFILE_UNKNOWN 3941 + u8 queue_cfg_info; 3942 + #define QUEUE_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL 3943 + u8 unused_0; 3981 3944 u8 valid; 3982 3945 }; 3983 3946 ··· 5061 4938 #define VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID 0x20UL 5062 4939 #define VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID 0x40UL 5063 4940 #define VNIC_CFG_REQ_ENABLES_QUEUE_ID 0x80UL 4941 + #define VNIC_CFG_REQ_ENABLES_RX_CSUM_V2_MODE 0x100UL 5064 4942 __le16 vnic_id; 5065 4943 __le16 dflt_ring_grp; 5066 4944 __le16 rss_rule; ··· 5071 4947 __le16 default_rx_ring_id; 5072 4948 __le16 default_cmpl_ring_id; 5073 4949 __le16 queue_id; 5074 - u8 unused0[6]; 4950 + u8 rx_csum_v2_mode; 4951 + #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_DEFAULT 0x0UL 4952 + #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_ALL_OK 0x1UL 4953 + #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX 0x2UL 4954 + #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_LAST VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX 4955 + u8 unused0[5]; 5075 4956 }; 5076 4957 5077 4958 /* hwrm_vnic_cfg_output (size:128b/16B) */ ··· 5118 4989 #define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL 5119 4990 #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP 0x80UL 5120 4991 #define VNIC_QCAPS_RESP_FLAGS_COS_ASSIGNMENT_CAP 0x100UL 4992 + #define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V2_CAP 0x200UL 5121 4993 __le16 max_aggs_supported; 5122 4994 u8 unused_1[5]; 5123 4995 u8 valid; ··· 5285 5155 #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6 0x8UL 5286 5156 #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE 0x10UL 5287 5157 #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE 0x20UL 5158 + #define VNIC_PLCMODES_CFG_REQ_FLAGS_VIRTIO_PLACEMENT 0x40UL 5288 5159 __le32 enables; 5289 5160 #define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID 0x1UL 5290 5161 #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID 0x2UL 5291 5162 #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID 0x4UL 5163 + #define VNIC_PLCMODES_CFG_REQ_ENABLES_MAX_BDS_VALID 0x8UL 5292 5164 __le32 vnic_id; 5293 5165 __le16 jumbo_thresh; 5294 5166 __le16 hds_offset; 5295 5167 __le16 hds_threshold; 5296 - u8 unused_0[6]; 5168 + __le16 max_bds; 5169 + u8 unused_0[4]; 5297 5170 }; 5298 5171 5299 5172 /* hwrm_vnic_plcmodes_cfg_output (size:128b/16B) */ ··· 5364 5231 #define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL 5365 5232 #define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL 5366 5233 #define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL 5234 + #define RING_ALLOC_REQ_ENABLES_SCHQ_ID 0x200UL 5367 5235 u8 ring_type; 5368 5236 #define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL 5369 5237 #define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL ··· 5380 5246 __le32 fbo; 5381 5247 u8 page_size; 5382 5248 u8 page_tbl_depth; 5383 - u8 unused_1[2]; 5249 + __le16 schq_id; 5384 5250 __le32 length; 5385 5251 __le16 logical_id; 5386 5252 __le16 cmpl_ring_id; ··· 5478 5344 __le16 target_id; 5479 5345 __le64 resp_addr; 5480 5346 u8 ring_type; 5481 - #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL 5482 - #define RING_RESET_REQ_RING_TYPE_TX 0x1UL 5483 - #define RING_RESET_REQ_RING_TYPE_RX 0x2UL 5484 - #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL 5485 - #define RING_RESET_REQ_RING_TYPE_LAST RING_RESET_REQ_RING_TYPE_ROCE_CMPL 5347 + #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL 5348 + #define RING_RESET_REQ_RING_TYPE_TX 0x1UL 5349 + #define RING_RESET_REQ_RING_TYPE_RX 0x2UL 5350 + #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL 5351 + #define RING_RESET_REQ_RING_TYPE_RX_RING_GRP 0x6UL 5352 + #define RING_RESET_REQ_RING_TYPE_LAST RING_RESET_REQ_RING_TYPE_RX_RING_GRP 5486 5353 u8 unused_0; 5487 5354 __le16 ring_id; 5488 5355 u8 unused_1[4]; ··· 5664 5529 u8 unused_0[7]; 5665 5530 u8 valid; 5666 5531 }; 5532 + 5667 5533 #define DEFAULT_FLOW_ID 0xFFFFFFFFUL 5668 5534 #define ROCEV1_FLOW_ID 0xFFFFFFFEUL 5669 5535 #define ROCEV2_FLOW_ID 0xFFFFFFFDUL ··· 6952 6816 __le64 rx_mcast_pkts; 6953 6817 __le64 rx_bcast_pkts; 6954 6818 __le64 rx_discard_pkts; 6955 - __le64 rx_drop_pkts; 6819 + __le64 rx_error_pkts; 6956 6820 __le64 rx_ucast_bytes; 6957 6821 __le64 rx_mcast_bytes; 6958 6822 __le64 rx_bcast_bytes; 6959 6823 __le64 tx_ucast_pkts; 6960 6824 __le64 tx_mcast_pkts; 6961 6825 __le64 tx_bcast_pkts; 6826 + __le64 tx_error_pkts; 6962 6827 __le64 tx_discard_pkts; 6963 - __le64 tx_drop_pkts; 6964 6828 __le64 tx_ucast_bytes; 6965 6829 __le64 tx_mcast_bytes; 6966 6830 __le64 tx_bcast_bytes; ··· 6976 6840 __le64 rx_mcast_pkts; 6977 6841 __le64 rx_bcast_pkts; 6978 6842 __le64 rx_discard_pkts; 6979 - __le64 rx_drop_pkts; 6843 + __le64 rx_error_pkts; 6980 6844 __le64 rx_ucast_bytes; 6981 6845 __le64 rx_mcast_bytes; 6982 6846 __le64 rx_bcast_bytes; 6983 6847 __le64 tx_ucast_pkts; 6984 6848 __le64 tx_mcast_pkts; 6985 6849 __le64 tx_bcast_pkts; 6850 + __le64 tx_error_pkts; 6986 6851 __le64 tx_discard_pkts; 6987 - __le64 tx_drop_pkts; 6988 6852 __le64 tx_ucast_bytes; 6989 6853 __le64 tx_mcast_bytes; 6990 6854 __le64 tx_bcast_bytes; ··· 7051 6915 __le16 target_id; 7052 6916 __le64 resp_addr; 7053 6917 __le32 stat_ctx_id; 7054 - u8 unused_0[4]; 6918 + u8 flags; 6919 + #define STAT_CTX_QUERY_REQ_FLAGS_COUNTER_MASK 0x1UL 6920 + u8 unused_0[3]; 7055 6921 }; 7056 6922 7057 6923 /* hwrm_stat_ctx_query_output (size:1408b/176B) */ ··· 7082 6944 __le64 rx_agg_bytes; 7083 6945 __le64 rx_agg_events; 7084 6946 __le64 rx_agg_aborts; 6947 + u8 unused_0[7]; 6948 + u8 valid; 6949 + }; 6950 + 6951 + /* hwrm_stat_ext_ctx_query_input (size:192b/24B) */ 6952 + struct hwrm_stat_ext_ctx_query_input { 6953 + __le16 req_type; 6954 + __le16 cmpl_ring; 6955 + __le16 seq_id; 6956 + __le16 target_id; 6957 + __le64 resp_addr; 6958 + __le32 stat_ctx_id; 6959 + u8 flags; 6960 + #define STAT_EXT_CTX_QUERY_REQ_FLAGS_COUNTER_MASK 0x1UL 6961 + u8 unused_0[3]; 6962 + }; 6963 + 6964 + /* hwrm_stat_ext_ctx_query_output (size:1472b/184B) */ 6965 + struct hwrm_stat_ext_ctx_query_output { 6966 + __le16 error_code; 6967 + __le16 req_type; 6968 + __le16 seq_id; 6969 + __le16 resp_len; 6970 + __le64 rx_ucast_pkts; 6971 + __le64 rx_mcast_pkts; 6972 + __le64 rx_bcast_pkts; 6973 + __le64 rx_discard_pkts; 6974 + __le64 rx_error_pkts; 6975 + __le64 rx_ucast_bytes; 6976 + __le64 rx_mcast_bytes; 6977 + __le64 rx_bcast_bytes; 6978 + __le64 tx_ucast_pkts; 6979 + __le64 tx_mcast_pkts; 6980 + __le64 tx_bcast_pkts; 6981 + __le64 tx_error_pkts; 6982 + __le64 tx_discard_pkts; 6983 + __le64 tx_ucast_bytes; 6984 + __le64 tx_mcast_bytes; 6985 + __le64 tx_bcast_bytes; 6986 + __le64 rx_tpa_eligible_pkt; 6987 + __le64 rx_tpa_eligible_bytes; 6988 + __le64 rx_tpa_pkt; 6989 + __le64 rx_tpa_bytes; 6990 + __le64 rx_tpa_errors; 7085 6991 u8 unused_0[7]; 7086 6992 u8 valid; 7087 6993 }; ··· 7679 7497 u8 valid; 7680 7498 }; 7681 7499 7500 + /* hwrm_dbg_read_direct_input (size:256b/32B) */ 7501 + struct hwrm_dbg_read_direct_input { 7502 + __le16 req_type; 7503 + __le16 cmpl_ring; 7504 + __le16 seq_id; 7505 + __le16 target_id; 7506 + __le64 resp_addr; 7507 + __le64 host_dest_addr; 7508 + __le32 read_addr; 7509 + __le32 read_len32; 7510 + }; 7511 + 7512 + /* hwrm_dbg_read_direct_output (size:128b/16B) */ 7513 + struct hwrm_dbg_read_direct_output { 7514 + __le16 error_code; 7515 + __le16 req_type; 7516 + __le16 seq_id; 7517 + __le16 resp_len; 7518 + __le32 crc32; 7519 + u8 unused_0[3]; 7520 + u8 valid; 7521 + }; 7522 + 7682 7523 /* coredump_segment_record (size:128b/16B) */ 7683 7524 struct coredump_segment_record { 7684 7525 __le16 component_id; ··· 7712 7507 u8 seg_flags; 7713 7508 u8 compress_flags; 7714 7509 #define SFLAG_COMPRESSED_ZLIB 0x1UL 7715 - u8 unused_0[6]; 7510 + u8 unused_0[2]; 7511 + __le32 segment_len; 7716 7512 }; 7717 7513 7718 7514 /* hwrm_dbg_coredump_list_input (size:256b/32B) */ ··· 7826 7620 #define DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL 0x0UL 7827 7621 #define DBG_RING_INFO_GET_REQ_RING_TYPE_TX 0x1UL 7828 7622 #define DBG_RING_INFO_GET_REQ_RING_TYPE_RX 0x2UL 7829 - #define DBG_RING_INFO_GET_REQ_RING_TYPE_LAST DBG_RING_INFO_GET_REQ_RING_TYPE_RX 7623 + #define DBG_RING_INFO_GET_REQ_RING_TYPE_NQ 0x3UL 7624 + #define DBG_RING_INFO_GET_REQ_RING_TYPE_LAST DBG_RING_INFO_GET_REQ_RING_TYPE_NQ 7830 7625 u8 unused_0[3]; 7831 7626 __le32 fw_ring_id; 7832 7627 }; ··· 7840 7633 __le16 resp_len; 7841 7634 __le32 producer_index; 7842 7635 __le32 consumer_index; 7843 - u8 unused_0[7]; 7636 + __le32 cag_vector_ctrl; 7637 + u8 unused_0[3]; 7844 7638 u8 valid; 7845 7639 }; 7846 7640 ··· 8130 7922 #define NVM_INSTALL_UPDATE_REQ_FLAGS_ERASE_UNUSED_SPACE 0x1UL 8131 7923 #define NVM_INSTALL_UPDATE_REQ_FLAGS_REMOVE_UNUSED_PKG 0x2UL 8132 7924 #define NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG 0x4UL 7925 + #define NVM_INSTALL_UPDATE_REQ_FLAGS_VERIFY_ONLY 0x8UL 8133 7926 u8 unused_0[2]; 8134 7927 }; 8135 7928 ··· 8310 8101 char test5_name[32]; 8311 8102 char test6_name[32]; 8312 8103 char test7_name[32]; 8313 - u8 unused_2[7]; 8104 + u8 eyescope_target_BER_support; 8105 + #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E8_SUPPORTED 0x0UL 8106 + #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E9_SUPPORTED 0x1UL 8107 + #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E10_SUPPORTED 0x2UL 8108 + #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E11_SUPPORTED 0x3UL 8109 + #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E12_SUPPORTED 0x4UL 8110 + #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_LAST SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E12_SUPPORTED 8111 + u8 unused_2[6]; 8314 8112 u8 valid; 8315 8113 }; 8316 8114
+1 -1
drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
··· 1029 1029 rc = bnxt_hwrm_exec_fwd_resp( 1030 1030 bp, vf, sizeof(struct hwrm_port_phy_qcfg_input)); 1031 1031 } else { 1032 - struct hwrm_port_phy_qcfg_output phy_qcfg_resp; 1032 + struct hwrm_port_phy_qcfg_output_compat phy_qcfg_resp = {0}; 1033 1033 struct hwrm_port_phy_qcfg_input *phy_qcfg_req; 1034 1034 1035 1035 phy_qcfg_req =