Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'mlx5-updates-2020-01-22' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2020-01-22

This series provides updates to mlx5 driver.
1) Misc small cleanups
2) Some SW steering updates including header copy support
3) Full ethtool statistics support for E-Switch uplink representor
Some refactoring was required to share the bare-metal NIC ethtool
stats with the Uplink representor. On Top of this Vlad converts the
ethtool stats support in E-Swtich vports representors to use the mlx5e
"stats groups" infrastructure and then applied all applicable stats
to the uplink representor netdev.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+814 -407
+2 -1
drivers/net/ethernet/mellanox/mlx5/core/en.h
··· 892 892 int (*update_rx)(struct mlx5e_priv *priv); 893 893 void (*update_stats)(struct mlx5e_priv *priv); 894 894 void (*update_carrier)(struct mlx5e_priv *priv); 895 + unsigned int (*stats_grps_num)(struct mlx5e_priv *priv); 896 + mlx5e_stats_grp_t *stats_grps; 895 897 struct { 896 898 mlx5e_fp_handle_rx_cqe handle_rx_cqe; 897 899 mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe; ··· 966 964 mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, 967 965 struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt); 968 966 969 - void mlx5e_update_stats(struct mlx5e_priv *priv); 970 967 void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); 971 968 void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s); 972 969
+5 -18
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
··· 218 218 219 219 int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset) 220 220 { 221 - int i, num_stats = 0; 222 - 223 221 switch (sset) { 224 222 case ETH_SS_STATS: 225 - for (i = 0; i < mlx5e_num_stats_grps; i++) 226 - num_stats += mlx5e_stats_grps[i].get_num_stats(priv); 227 - return num_stats; 223 + return mlx5e_stats_total_num(priv); 228 224 case ETH_SS_PRIV_FLAGS: 229 225 return MLX5E_NUM_PFLAGS; 230 226 case ETH_SS_TEST: ··· 236 240 struct mlx5e_priv *priv = netdev_priv(dev); 237 241 238 242 return mlx5e_ethtool_get_sset_count(priv, sset); 239 - } 240 - 241 - static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, u8 *data) 242 - { 243 - int i, idx = 0; 244 - 245 - for (i = 0; i < mlx5e_num_stats_grps; i++) 246 - idx = mlx5e_stats_grps[i].fill_strings(priv, data, idx); 247 243 } 248 244 249 245 void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, u32 stringset, u8 *data) ··· 256 268 break; 257 269 258 270 case ETH_SS_STATS: 259 - mlx5e_fill_stats_strings(priv, data); 271 + mlx5e_stats_fill_strings(priv, data); 260 272 break; 261 273 } 262 274 } ··· 271 283 void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv, 272 284 struct ethtool_stats *stats, u64 *data) 273 285 { 274 - int i, idx = 0; 286 + int idx = 0; 275 287 276 288 mutex_lock(&priv->state_lock); 277 - mlx5e_update_stats(priv); 289 + mlx5e_stats_update(priv); 278 290 mutex_unlock(&priv->state_lock); 279 291 280 - for (i = 0; i < mlx5e_num_stats_grps; i++) 281 - idx = mlx5e_stats_grps[i].fill_stats(priv, data, idx); 292 + mlx5e_stats_fill(priv, data, idx); 282 293 } 283 294 284 295 static void mlx5e_get_ethtool_stats(struct net_device *dev,
+7 -12
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 159 159 mutex_unlock(&priv->state_lock); 160 160 } 161 161 162 - void mlx5e_update_stats(struct mlx5e_priv *priv) 163 - { 164 - int i; 165 - 166 - for (i = mlx5e_num_stats_grps - 1; i >= 0; i--) 167 - if (mlx5e_stats_grps[i].update_stats) 168 - mlx5e_stats_grps[i].update_stats(priv); 169 - } 170 - 171 162 void mlx5e_update_ndo_stats(struct mlx5e_priv *priv) 172 163 { 173 164 int i; 174 165 175 - for (i = mlx5e_num_stats_grps - 1; i >= 0; i--) 176 - if (mlx5e_stats_grps[i].update_stats_mask & 166 + for (i = mlx5e_nic_stats_grps_num(priv) - 1; i >= 0; i--) 167 + if (mlx5e_nic_stats_grps[i]->update_stats_mask & 177 168 MLX5E_NDO_UPDATE_STATS) 178 - mlx5e_stats_grps[i].update_stats(priv); 169 + mlx5e_nic_stats_grps[i]->update_stats(priv); 179 170 } 180 171 181 172 static void mlx5e_update_stats_work(struct work_struct *work) ··· 4869 4878 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL | 4870 4879 NETIF_F_GSO_UDP_TUNNEL_CSUM; 4871 4880 netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM; 4881 + netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL | 4882 + NETIF_F_GSO_UDP_TUNNEL_CSUM; 4872 4883 } 4873 4884 4874 4885 if (mlx5e_tunnel_proto_supported(mdev, IPPROTO_GRE)) { ··· 5188 5195 .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq, 5189 5196 .max_tc = MLX5E_MAX_NUM_TC, 5190 5197 .rq_groups = MLX5E_NUM_RQ_GROUPS(XSK), 5198 + .stats_grps = mlx5e_nic_stats_grps, 5199 + .stats_grps_num = mlx5e_nic_stats_grps_num, 5191 5200 }; 5192 5201 5193 5202 /* mlx5e generic netdev management API (move to en_common.c) */
+187 -92
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
··· 117 117 #define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc) 118 118 #define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc) 119 119 120 - static void mlx5e_rep_get_strings(struct net_device *dev, 121 - u32 stringset, uint8_t *data) 120 + static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw_rep) 122 121 { 123 - int i, j; 124 - 125 - switch (stringset) { 126 - case ETH_SS_STATS: 127 - for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++) 128 - strcpy(data + (i * ETH_GSTRING_LEN), 129 - sw_rep_stats_desc[i].format); 130 - for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++) 131 - strcpy(data + (i * ETH_GSTRING_LEN), 132 - vport_rep_stats_desc[j].format); 133 - break; 134 - } 122 + return NUM_VPORT_REP_SW_COUNTERS; 135 123 } 136 124 137 - static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv) 125 + static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw_rep) 126 + { 127 + int i; 128 + 129 + for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++) 130 + strcpy(data + (idx++) * ETH_GSTRING_LEN, 131 + sw_rep_stats_desc[i].format); 132 + return idx; 133 + } 134 + 135 + static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw_rep) 136 + { 137 + int i; 138 + 139 + for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++) 140 + data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, 141 + sw_rep_stats_desc, i); 142 + return idx; 143 + } 144 + 145 + static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw_rep) 146 + { 147 + struct mlx5e_sw_stats *s = &priv->stats.sw; 148 + struct rtnl_link_stats64 stats64 = {}; 149 + 150 + memset(s, 0, sizeof(*s)); 151 + mlx5e_fold_sw_stats64(priv, &stats64); 152 + 153 + s->rx_packets = stats64.rx_packets; 154 + s->rx_bytes = stats64.rx_bytes; 155 + s->tx_packets = stats64.tx_packets; 156 + s->tx_bytes = stats64.tx_bytes; 157 + s->tx_queue_dropped = stats64.tx_dropped; 158 + } 159 + 160 + static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport_rep) 161 + { 162 + return NUM_VPORT_REP_HW_COUNTERS; 163 + } 164 + 165 + static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport_rep) 166 + { 167 + int i; 168 + 169 + for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++) 170 + strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_rep_stats_desc[i].format); 171 + return idx; 172 + } 173 + 174 + static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport_rep) 175 + { 176 + int i; 177 + 178 + for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++) 179 + data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.vf_vport, 180 + vport_rep_stats_desc, i); 181 + return idx; 182 + } 183 + 184 + static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep) 138 185 { 139 186 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 140 187 struct mlx5e_rep_priv *rpriv = priv->ppriv; ··· 204 157 vport_stats->tx_bytes = vf_stats.rx_bytes; 205 158 } 206 159 207 - static void mlx5e_uplink_rep_update_hw_counters(struct mlx5e_priv *priv) 160 + static void mlx5e_rep_get_strings(struct net_device *dev, 161 + u32 stringset, uint8_t *data) 208 162 { 209 - struct mlx5e_pport_stats *pstats = &priv->stats.pport; 210 - struct rtnl_link_stats64 *vport_stats; 163 + struct mlx5e_priv *priv = netdev_priv(dev); 211 164 212 - mlx5e_grp_802_3_update_stats(priv); 213 - 214 - vport_stats = &priv->stats.vf_vport; 215 - 216 - vport_stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok); 217 - vport_stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok); 218 - vport_stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok); 219 - vport_stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok); 220 - } 221 - 222 - static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv) 223 - { 224 - struct mlx5e_sw_stats *s = &priv->stats.sw; 225 - struct rtnl_link_stats64 stats64 = {}; 226 - 227 - memset(s, 0, sizeof(*s)); 228 - mlx5e_fold_sw_stats64(priv, &stats64); 229 - 230 - s->rx_packets = stats64.rx_packets; 231 - s->rx_bytes = stats64.rx_bytes; 232 - s->tx_packets = stats64.tx_packets; 233 - s->tx_bytes = stats64.tx_bytes; 234 - s->tx_queue_dropped = stats64.tx_dropped; 165 + switch (stringset) { 166 + case ETH_SS_STATS: 167 + mlx5e_stats_fill_strings(priv, data); 168 + break; 169 + } 235 170 } 236 171 237 172 static void mlx5e_rep_get_ethtool_stats(struct net_device *dev, 238 173 struct ethtool_stats *stats, u64 *data) 239 174 { 240 175 struct mlx5e_priv *priv = netdev_priv(dev); 241 - int i, j; 242 176 243 - if (!data) 244 - return; 245 - 246 - mutex_lock(&priv->state_lock); 247 - mlx5e_rep_update_sw_counters(priv); 248 - priv->profile->update_stats(priv); 249 - mutex_unlock(&priv->state_lock); 250 - 251 - for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++) 252 - data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, 253 - sw_rep_stats_desc, i); 254 - 255 - for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++) 256 - data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.vf_vport, 257 - vport_rep_stats_desc, j); 177 + mlx5e_ethtool_get_ethtool_stats(priv, stats, data); 258 178 } 259 179 260 180 static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset) 261 181 { 182 + struct mlx5e_priv *priv = netdev_priv(dev); 183 + 262 184 switch (sset) { 263 185 case ETH_SS_STATS: 264 - return NUM_VPORT_REP_SW_COUNTERS + NUM_VPORT_REP_HW_COUNTERS; 186 + return mlx5e_stats_total_num(priv); 265 187 default: 266 188 return -EOPNOTSUPP; 267 189 } ··· 1690 1674 mlx5e_close_drop_rq(&priv->drop_rq); 1691 1675 } 1692 1676 1677 + static int mlx5e_init_ul_rep_rx(struct mlx5e_priv *priv) 1678 + { 1679 + int err = mlx5e_init_rep_rx(priv); 1680 + 1681 + if (err) 1682 + return err; 1683 + 1684 + mlx5e_create_q_counters(priv); 1685 + return 0; 1686 + } 1687 + 1688 + static void mlx5e_cleanup_ul_rep_rx(struct mlx5e_priv *priv) 1689 + { 1690 + mlx5e_destroy_q_counters(priv); 1691 + mlx5e_cleanup_rep_rx(priv); 1692 + } 1693 + 1694 + static int mlx5e_init_uplink_rep_tx(struct mlx5e_rep_priv *rpriv) 1695 + { 1696 + struct mlx5_rep_uplink_priv *uplink_priv; 1697 + struct net_device *netdev; 1698 + struct mlx5e_priv *priv; 1699 + int err; 1700 + 1701 + netdev = rpriv->netdev; 1702 + priv = netdev_priv(netdev); 1703 + uplink_priv = &rpriv->uplink_priv; 1704 + 1705 + mutex_init(&uplink_priv->unready_flows_lock); 1706 + INIT_LIST_HEAD(&uplink_priv->unready_flows); 1707 + 1708 + /* init shared tc flow table */ 1709 + err = mlx5e_tc_esw_init(&uplink_priv->tc_ht); 1710 + if (err) 1711 + return err; 1712 + 1713 + mlx5_init_port_tun_entropy(&uplink_priv->tun_entropy, priv->mdev); 1714 + 1715 + /* init indirect block notifications */ 1716 + INIT_LIST_HEAD(&uplink_priv->tc_indr_block_priv_list); 1717 + uplink_priv->netdevice_nb.notifier_call = mlx5e_nic_rep_netdevice_event; 1718 + err = register_netdevice_notifier(&uplink_priv->netdevice_nb); 1719 + if (err) { 1720 + mlx5_core_err(priv->mdev, "Failed to register netdev notifier\n"); 1721 + goto tc_esw_cleanup; 1722 + } 1723 + 1724 + return 0; 1725 + 1726 + tc_esw_cleanup: 1727 + mlx5e_tc_esw_cleanup(&uplink_priv->tc_ht); 1728 + return err; 1729 + } 1730 + 1693 1731 static int mlx5e_init_rep_tx(struct mlx5e_priv *priv) 1694 1732 { 1695 1733 struct mlx5e_rep_priv *rpriv = priv->ppriv; 1696 - struct mlx5_rep_uplink_priv *uplink_priv; 1697 1734 int err; 1698 1735 1699 1736 err = mlx5e_create_tises(priv); ··· 1756 1687 } 1757 1688 1758 1689 if (rpriv->rep->vport == MLX5_VPORT_UPLINK) { 1759 - uplink_priv = &rpriv->uplink_priv; 1760 - 1761 - mutex_init(&uplink_priv->unready_flows_lock); 1762 - INIT_LIST_HEAD(&uplink_priv->unready_flows); 1763 - 1764 - /* init shared tc flow table */ 1765 - err = mlx5e_tc_esw_init(&uplink_priv->tc_ht); 1690 + err = mlx5e_init_uplink_rep_tx(rpriv); 1766 1691 if (err) 1767 1692 goto destroy_tises; 1768 - 1769 - mlx5_init_port_tun_entropy(&uplink_priv->tun_entropy, priv->mdev); 1770 - 1771 - /* init indirect block notifications */ 1772 - INIT_LIST_HEAD(&uplink_priv->tc_indr_block_priv_list); 1773 - uplink_priv->netdevice_nb.notifier_call = mlx5e_nic_rep_netdevice_event; 1774 - err = register_netdevice_notifier(&uplink_priv->netdevice_nb); 1775 - if (err) { 1776 - mlx5_core_err(priv->mdev, "Failed to register netdev notifier\n"); 1777 - goto tc_esw_cleanup; 1778 - } 1779 1693 } 1780 1694 1781 1695 return 0; 1782 1696 1783 - tc_esw_cleanup: 1784 - mlx5e_tc_esw_cleanup(&uplink_priv->tc_ht); 1785 1697 destroy_tises: 1786 1698 mlx5e_destroy_tises(priv); 1787 1699 return err; 1700 + } 1701 + 1702 + static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv *rpriv) 1703 + { 1704 + /* clean indirect TC block notifications */ 1705 + unregister_netdevice_notifier(&rpriv->uplink_priv.netdevice_nb); 1706 + mlx5e_rep_indr_clean_block_privs(rpriv); 1707 + 1708 + /* delete shared tc flow table */ 1709 + mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht); 1710 + mutex_destroy(&rpriv->uplink_priv.unready_flows_lock); 1788 1711 } 1789 1712 1790 1713 static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv) ··· 1785 1724 1786 1725 mlx5e_destroy_tises(priv); 1787 1726 1788 - if (rpriv->rep->vport == MLX5_VPORT_UPLINK) { 1789 - /* clean indirect TC block notifications */ 1790 - unregister_netdevice_notifier(&rpriv->uplink_priv.netdevice_nb); 1791 - mlx5e_rep_indr_clean_block_privs(rpriv); 1792 - 1793 - /* delete shared tc flow table */ 1794 - mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht); 1795 - mutex_destroy(&rpriv->uplink_priv.unready_flows_lock); 1796 - } 1727 + if (rpriv->rep->vport == MLX5_VPORT_UPLINK) 1728 + mlx5e_cleanup_uplink_rep_tx(rpriv); 1797 1729 } 1798 1730 1799 1731 static void mlx5e_rep_enable(struct mlx5e_priv *priv) ··· 1866 1812 mlx5_lag_remove(mdev); 1867 1813 } 1868 1814 1815 + static MLX5E_DEFINE_STATS_GRP(sw_rep, 0); 1816 + static MLX5E_DEFINE_STATS_GRP(vport_rep, MLX5E_NDO_UPDATE_STATS); 1817 + 1818 + /* The stats groups order is opposite to the update_stats() order calls */ 1819 + static mlx5e_stats_grp_t mlx5e_rep_stats_grps[] = { 1820 + &MLX5E_STATS_GRP(sw_rep), 1821 + &MLX5E_STATS_GRP(vport_rep), 1822 + }; 1823 + 1824 + static unsigned int mlx5e_rep_stats_grps_num(struct mlx5e_priv *priv) 1825 + { 1826 + return ARRAY_SIZE(mlx5e_rep_stats_grps); 1827 + } 1828 + 1829 + /* The stats groups order is opposite to the update_stats() order calls */ 1830 + static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps[] = { 1831 + &MLX5E_STATS_GRP(sw), 1832 + &MLX5E_STATS_GRP(qcnt), 1833 + &MLX5E_STATS_GRP(vnic_env), 1834 + &MLX5E_STATS_GRP(vport), 1835 + &MLX5E_STATS_GRP(802_3), 1836 + &MLX5E_STATS_GRP(2863), 1837 + &MLX5E_STATS_GRP(2819), 1838 + &MLX5E_STATS_GRP(phy), 1839 + &MLX5E_STATS_GRP(eth_ext), 1840 + &MLX5E_STATS_GRP(pcie), 1841 + &MLX5E_STATS_GRP(per_prio), 1842 + &MLX5E_STATS_GRP(pme), 1843 + &MLX5E_STATS_GRP(channels), 1844 + &MLX5E_STATS_GRP(per_port_buff_congest), 1845 + }; 1846 + 1847 + static unsigned int mlx5e_ul_rep_stats_grps_num(struct mlx5e_priv *priv) 1848 + { 1849 + return ARRAY_SIZE(mlx5e_ul_rep_stats_grps); 1850 + } 1851 + 1869 1852 static const struct mlx5e_profile mlx5e_rep_profile = { 1870 1853 .init = mlx5e_init_rep, 1871 1854 .cleanup = mlx5e_cleanup_rep, ··· 1912 1821 .cleanup_tx = mlx5e_cleanup_rep_tx, 1913 1822 .enable = mlx5e_rep_enable, 1914 1823 .update_rx = mlx5e_update_rep_rx, 1915 - .update_stats = mlx5e_rep_update_hw_counters, 1824 + .update_stats = mlx5e_update_ndo_stats, 1916 1825 .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep, 1917 1826 .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq, 1918 1827 .max_tc = 1, 1919 1828 .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR), 1829 + .stats_grps = mlx5e_rep_stats_grps, 1830 + .stats_grps_num = mlx5e_rep_stats_grps_num, 1920 1831 }; 1921 1832 1922 1833 static const struct mlx5e_profile mlx5e_uplink_rep_profile = { 1923 1834 .init = mlx5e_init_rep, 1924 1835 .cleanup = mlx5e_cleanup_rep, 1925 - .init_rx = mlx5e_init_rep_rx, 1926 - .cleanup_rx = mlx5e_cleanup_rep_rx, 1836 + .init_rx = mlx5e_init_ul_rep_rx, 1837 + .cleanup_rx = mlx5e_cleanup_ul_rep_rx, 1927 1838 .init_tx = mlx5e_init_rep_tx, 1928 1839 .cleanup_tx = mlx5e_cleanup_rep_tx, 1929 1840 .enable = mlx5e_uplink_rep_enable, 1930 1841 .disable = mlx5e_uplink_rep_disable, 1931 1842 .update_rx = mlx5e_update_rep_rx, 1932 - .update_stats = mlx5e_uplink_rep_update_hw_counters, 1843 + .update_stats = mlx5e_update_ndo_stats, 1933 1844 .update_carrier = mlx5e_update_carrier, 1934 1845 .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep, 1935 1846 .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq, 1936 1847 .max_tc = MLX5E_MAX_NUM_TC, 1937 1848 .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR), 1849 + .stats_grps = mlx5e_ul_rep_stats_grps, 1850 + .stats_grps_num = mlx5e_ul_rep_stats_grps_num, 1938 1851 }; 1939 1852 1940 1853 static bool
+157 -185
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
··· 35 35 #include "en_accel/ipsec.h" 36 36 #include "en_accel/tls.h" 37 37 38 + static unsigned int stats_grps_num(struct mlx5e_priv *priv) 39 + { 40 + return !priv->profile->stats_grps_num ? 0 : 41 + priv->profile->stats_grps_num(priv); 42 + } 43 + 44 + unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv) 45 + { 46 + mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps; 47 + const unsigned int num_stats_grps = stats_grps_num(priv); 48 + unsigned int total = 0; 49 + int i; 50 + 51 + for (i = 0; i < num_stats_grps; i++) 52 + total += stats_grps[i]->get_num_stats(priv); 53 + 54 + return total; 55 + } 56 + 57 + void mlx5e_stats_update(struct mlx5e_priv *priv) 58 + { 59 + mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps; 60 + const unsigned int num_stats_grps = stats_grps_num(priv); 61 + int i; 62 + 63 + for (i = num_stats_grps - 1; i >= 0; i--) 64 + if (stats_grps[i]->update_stats) 65 + stats_grps[i]->update_stats(priv); 66 + } 67 + 68 + void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx) 69 + { 70 + mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps; 71 + const unsigned int num_stats_grps = stats_grps_num(priv); 72 + int i; 73 + 74 + for (i = 0; i < num_stats_grps; i++) 75 + idx = stats_grps[i]->fill_stats(priv, data, idx); 76 + } 77 + 78 + void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data) 79 + { 80 + mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps; 81 + const unsigned int num_stats_grps = stats_grps_num(priv); 82 + int i, idx = 0; 83 + 84 + for (i = 0; i < num_stats_grps; i++) 85 + idx = stats_grps[i]->fill_strings(priv, data, idx); 86 + } 87 + 88 + /* Concrete NIC Stats */ 89 + 38 90 static const struct counter_desc sw_stats_desc[] = { 39 91 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) }, 40 92 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) }, ··· 198 146 199 147 #define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc) 200 148 201 - static int mlx5e_grp_sw_get_num_stats(struct mlx5e_priv *priv) 149 + static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw) 202 150 { 203 151 return NUM_SW_COUNTERS; 204 152 } 205 153 206 - static int mlx5e_grp_sw_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx) 154 + static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw) 207 155 { 208 156 int i; 209 157 ··· 212 160 return idx; 213 161 } 214 162 215 - static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx) 163 + static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw) 216 164 { 217 165 int i; 218 166 ··· 221 169 return idx; 222 170 } 223 171 224 - static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) 172 + static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw) 225 173 { 226 174 struct mlx5e_sw_stats *s = &priv->stats.sw; 227 175 int i; ··· 367 315 #define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc) 368 316 #define NUM_DROP_RQ_COUNTERS ARRAY_SIZE(drop_rq_stats_desc) 369 317 370 - static int mlx5e_grp_q_get_num_stats(struct mlx5e_priv *priv) 318 + static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qcnt) 371 319 { 372 320 int num_stats = 0; 373 321 ··· 380 328 return num_stats; 381 329 } 382 330 383 - static int mlx5e_grp_q_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx) 331 + static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt) 384 332 { 385 333 int i; 386 334 ··· 395 343 return idx; 396 344 } 397 345 398 - static int mlx5e_grp_q_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx) 346 + static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt) 399 347 { 400 348 int i; 401 349 ··· 408 356 return idx; 409 357 } 410 358 411 - static void mlx5e_grp_q_update_stats(struct mlx5e_priv *priv) 359 + static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt) 412 360 { 413 361 struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt; 414 362 u32 out[MLX5_ST_SZ_DW(query_q_counter_out)]; ··· 443 391 (MLX5_CAP_GEN(dev, vnic_env_int_rq_oob) ? \ 444 392 ARRAY_SIZE(vnic_env_stats_dev_oob_desc) : 0) 445 393 446 - static int mlx5e_grp_vnic_env_get_num_stats(struct mlx5e_priv *priv) 394 + static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vnic_env) 447 395 { 448 396 return NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev) + 449 397 NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); 450 398 } 451 399 452 - static int mlx5e_grp_vnic_env_fill_strings(struct mlx5e_priv *priv, u8 *data, 453 - int idx) 400 + static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env) 454 401 { 455 402 int i; 456 403 ··· 463 412 return idx; 464 413 } 465 414 466 - static int mlx5e_grp_vnic_env_fill_stats(struct mlx5e_priv *priv, u64 *data, 467 - int idx) 415 + static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env) 468 416 { 469 417 int i; 470 418 ··· 477 427 return idx; 478 428 } 479 429 480 - static void mlx5e_grp_vnic_env_update_stats(struct mlx5e_priv *priv) 430 + static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env) 481 431 { 482 432 u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out; 483 433 int outlen = MLX5_ST_SZ_BYTES(query_vnic_env_out); ··· 540 490 541 491 #define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc) 542 492 543 - static int mlx5e_grp_vport_get_num_stats(struct mlx5e_priv *priv) 493 + static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport) 544 494 { 545 495 return NUM_VPORT_COUNTERS; 546 496 } 547 497 548 - static int mlx5e_grp_vport_fill_strings(struct mlx5e_priv *priv, u8 *data, 549 - int idx) 498 + static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport) 550 499 { 551 500 int i; 552 501 ··· 554 505 return idx; 555 506 } 556 507 557 - static int mlx5e_grp_vport_fill_stats(struct mlx5e_priv *priv, u64 *data, 558 - int idx) 508 + static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport) 559 509 { 560 510 int i; 561 511 ··· 564 516 return idx; 565 517 } 566 518 567 - static void mlx5e_grp_vport_update_stats(struct mlx5e_priv *priv) 519 + static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport) 568 520 { 569 521 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out); 570 522 u32 *out = (u32 *)priv->stats.vport.query_vport_out; ··· 603 555 604 556 #define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc) 605 557 606 - static int mlx5e_grp_802_3_get_num_stats(struct mlx5e_priv *priv) 558 + static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(802_3) 607 559 { 608 560 return NUM_PPORT_802_3_COUNTERS; 609 561 } 610 562 611 - static int mlx5e_grp_802_3_fill_strings(struct mlx5e_priv *priv, u8 *data, 612 - int idx) 563 + static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(802_3) 613 564 { 614 565 int i; 615 566 ··· 617 570 return idx; 618 571 } 619 572 620 - static int mlx5e_grp_802_3_fill_stats(struct mlx5e_priv *priv, u64 *data, 621 - int idx) 573 + static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(802_3) 622 574 { 623 575 int i; 624 576 ··· 630 584 #define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \ 631 585 (MLX5_CAP_GEN(mdev, pcam_reg) ? MLX5_CAP_PCAM_REG(mdev, ppcnt) : 1) 632 586 633 - void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv) 587 + static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(802_3) 634 588 { 635 589 struct mlx5e_pport_stats *pstats = &priv->stats.pport; 636 590 struct mlx5_core_dev *mdev = priv->mdev; ··· 658 612 659 613 #define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc) 660 614 661 - static int mlx5e_grp_2863_get_num_stats(struct mlx5e_priv *priv) 615 + static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2863) 662 616 { 663 617 return NUM_PPORT_2863_COUNTERS; 664 618 } 665 619 666 - static int mlx5e_grp_2863_fill_strings(struct mlx5e_priv *priv, u8 *data, 667 - int idx) 620 + static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2863) 668 621 { 669 622 int i; 670 623 ··· 672 627 return idx; 673 628 } 674 629 675 - static int mlx5e_grp_2863_fill_stats(struct mlx5e_priv *priv, u64 *data, 676 - int idx) 630 + static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2863) 677 631 { 678 632 int i; 679 633 ··· 682 638 return idx; 683 639 } 684 640 685 - static void mlx5e_grp_2863_update_stats(struct mlx5e_priv *priv) 641 + static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2863) 686 642 { 687 643 struct mlx5e_pport_stats *pstats = &priv->stats.pport; 688 644 struct mlx5_core_dev *mdev = priv->mdev; ··· 717 673 718 674 #define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc) 719 675 720 - static int mlx5e_grp_2819_get_num_stats(struct mlx5e_priv *priv) 676 + static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2819) 721 677 { 722 678 return NUM_PPORT_2819_COUNTERS; 723 679 } 724 680 725 - static int mlx5e_grp_2819_fill_strings(struct mlx5e_priv *priv, u8 *data, 726 - int idx) 681 + static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2819) 727 682 { 728 683 int i; 729 684 ··· 731 688 return idx; 732 689 } 733 690 734 - static int mlx5e_grp_2819_fill_stats(struct mlx5e_priv *priv, u64 *data, 735 - int idx) 691 + static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2819) 736 692 { 737 693 int i; 738 694 ··· 741 699 return idx; 742 700 } 743 701 744 - static void mlx5e_grp_2819_update_stats(struct mlx5e_priv *priv) 702 + static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2819) 745 703 { 746 704 struct mlx5e_pport_stats *pstats = &priv->stats.pport; 747 705 struct mlx5_core_dev *mdev = priv->mdev; ··· 779 737 #define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \ 780 738 ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc) 781 739 782 - static int mlx5e_grp_phy_get_num_stats(struct mlx5e_priv *priv) 740 + static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy) 783 741 { 784 742 struct mlx5_core_dev *mdev = priv->mdev; 785 743 int num_stats; ··· 796 754 return num_stats; 797 755 } 798 756 799 - static int mlx5e_grp_phy_fill_strings(struct mlx5e_priv *priv, u8 *data, 800 - int idx) 757 + static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy) 801 758 { 802 759 struct mlx5_core_dev *mdev = priv->mdev; 803 760 int i; ··· 818 777 return idx; 819 778 } 820 779 821 - static int mlx5e_grp_phy_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx) 780 + static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy) 822 781 { 823 782 struct mlx5_core_dev *mdev = priv->mdev; 824 783 int i; ··· 844 803 return idx; 845 804 } 846 805 847 - static void mlx5e_grp_phy_update_stats(struct mlx5e_priv *priv) 806 + static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy) 848 807 { 849 808 struct mlx5e_pport_stats *pstats = &priv->stats.pport; 850 809 struct mlx5_core_dev *mdev = priv->mdev; ··· 874 833 875 834 #define NUM_PPORT_ETH_EXT_COUNTERS ARRAY_SIZE(pport_eth_ext_stats_desc) 876 835 877 - static int mlx5e_grp_eth_ext_get_num_stats(struct mlx5e_priv *priv) 836 + static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(eth_ext) 878 837 { 879 838 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters)) 880 839 return NUM_PPORT_ETH_EXT_COUNTERS; ··· 882 841 return 0; 883 842 } 884 843 885 - static int mlx5e_grp_eth_ext_fill_strings(struct mlx5e_priv *priv, u8 *data, 886 - int idx) 844 + static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(eth_ext) 887 845 { 888 846 int i; 889 847 ··· 893 853 return idx; 894 854 } 895 855 896 - static int mlx5e_grp_eth_ext_fill_stats(struct mlx5e_priv *priv, u64 *data, 897 - int idx) 856 + static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext) 898 857 { 899 858 int i; 900 859 ··· 905 866 return idx; 906 867 } 907 868 908 - static void mlx5e_grp_eth_ext_update_stats(struct mlx5e_priv *priv) 869 + static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(eth_ext) 909 870 { 910 871 struct mlx5e_pport_stats *pstats = &priv->stats.pport; 911 872 struct mlx5_core_dev *mdev = priv->mdev; ··· 946 907 #define NUM_PCIE_PERF_COUNTERS64 ARRAY_SIZE(pcie_perf_stats_desc64) 947 908 #define NUM_PCIE_PERF_STALL_COUNTERS ARRAY_SIZE(pcie_perf_stall_stats_desc) 948 909 949 - static int mlx5e_grp_pcie_get_num_stats(struct mlx5e_priv *priv) 910 + static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pcie) 950 911 { 951 912 int num_stats = 0; 952 913 ··· 962 923 return num_stats; 963 924 } 964 925 965 - static int mlx5e_grp_pcie_fill_strings(struct mlx5e_priv *priv, u8 *data, 966 - int idx) 926 + static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie) 967 927 { 968 928 int i; 969 929 ··· 983 945 return idx; 984 946 } 985 947 986 - static int mlx5e_grp_pcie_fill_stats(struct mlx5e_priv *priv, u64 *data, 987 - int idx) 948 + static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie) 988 949 { 989 950 int i; 990 951 ··· 1007 970 return idx; 1008 971 } 1009 972 1010 - static void mlx5e_grp_pcie_update_stats(struct mlx5e_priv *priv) 973 + static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie) 1011 974 { 1012 975 struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie; 1013 976 struct mlx5_core_dev *mdev = priv->mdev; ··· 1055 1018 return NUM_PPORT_PER_TC_PRIO_COUNTERS * NUM_PPORT_PRIO; 1056 1019 } 1057 1020 1058 - static int mlx5e_grp_per_port_buffer_congest_fill_strings(struct mlx5e_priv *priv, 1059 - u8 *data, int idx) 1021 + static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_port_buff_congest) 1060 1022 { 1061 1023 struct mlx5_core_dev *mdev = priv->mdev; 1062 1024 int i, prio; ··· 1075 1039 return idx; 1076 1040 } 1077 1041 1078 - static int mlx5e_grp_per_port_buffer_congest_fill_stats(struct mlx5e_priv *priv, 1079 - u64 *data, int idx) 1042 + static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest) 1080 1043 { 1081 1044 struct mlx5e_pport_stats *pport = &priv->stats.pport; 1082 1045 struct mlx5_core_dev *mdev = priv->mdev; ··· 1150 1115 } 1151 1116 } 1152 1117 1153 - static int mlx5e_grp_per_port_buffer_congest_get_num_stats(struct mlx5e_priv *priv) 1118 + static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_port_buff_congest) 1154 1119 { 1155 1120 return mlx5e_grp_per_tc_prio_get_num_stats(priv) + 1156 1121 mlx5e_grp_per_tc_congest_prio_get_num_stats(priv); 1157 1122 } 1158 1123 1159 - static void mlx5e_grp_per_port_buffer_congest_update_stats(struct mlx5e_priv *priv) 1124 + static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_port_buff_congest) 1160 1125 { 1161 1126 mlx5e_grp_per_tc_prio_update_stats(priv); 1162 1127 mlx5e_grp_per_tc_congest_prio_update_stats(priv); ··· 1331 1296 return idx; 1332 1297 } 1333 1298 1334 - static int mlx5e_grp_per_prio_get_num_stats(struct mlx5e_priv *priv) 1299 + static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio) 1335 1300 { 1336 1301 return mlx5e_grp_per_prio_traffic_get_num_stats() + 1337 1302 mlx5e_grp_per_prio_pfc_get_num_stats(priv); 1338 1303 } 1339 1304 1340 - static int mlx5e_grp_per_prio_fill_strings(struct mlx5e_priv *priv, u8 *data, 1341 - int idx) 1305 + static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_prio) 1342 1306 { 1343 1307 idx = mlx5e_grp_per_prio_traffic_fill_strings(priv, data, idx); 1344 1308 idx = mlx5e_grp_per_prio_pfc_fill_strings(priv, data, idx); 1345 1309 return idx; 1346 1310 } 1347 1311 1348 - static int mlx5e_grp_per_prio_fill_stats(struct mlx5e_priv *priv, u64 *data, 1349 - int idx) 1312 + static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_prio) 1350 1313 { 1351 1314 idx = mlx5e_grp_per_prio_traffic_fill_stats(priv, data, idx); 1352 1315 idx = mlx5e_grp_per_prio_pfc_fill_stats(priv, data, idx); 1353 1316 return idx; 1354 1317 } 1355 1318 1356 - static void mlx5e_grp_per_prio_update_stats(struct mlx5e_priv *priv) 1319 + static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_prio) 1357 1320 { 1358 1321 struct mlx5e_pport_stats *pstats = &priv->stats.pport; 1359 1322 struct mlx5_core_dev *mdev = priv->mdev; ··· 1386 1353 #define NUM_PME_STATUS_STATS ARRAY_SIZE(mlx5e_pme_status_desc) 1387 1354 #define NUM_PME_ERR_STATS ARRAY_SIZE(mlx5e_pme_error_desc) 1388 1355 1389 - static int mlx5e_grp_pme_get_num_stats(struct mlx5e_priv *priv) 1356 + static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pme) 1390 1357 { 1391 1358 return NUM_PME_STATUS_STATS + NUM_PME_ERR_STATS; 1392 1359 } 1393 1360 1394 - static int mlx5e_grp_pme_fill_strings(struct mlx5e_priv *priv, u8 *data, 1395 - int idx) 1361 + static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pme) 1396 1362 { 1397 1363 int i; 1398 1364 ··· 1404 1372 return idx; 1405 1373 } 1406 1374 1407 - static int mlx5e_grp_pme_fill_stats(struct mlx5e_priv *priv, u64 *data, 1408 - int idx) 1375 + static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme) 1409 1376 { 1410 1377 struct mlx5_pme_stats pme_stats; 1411 1378 int i; ··· 1422 1391 return idx; 1423 1392 } 1424 1393 1425 - static int mlx5e_grp_ipsec_get_num_stats(struct mlx5e_priv *priv) 1394 + static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; } 1395 + 1396 + static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec) 1426 1397 { 1427 1398 return mlx5e_ipsec_get_count(priv); 1428 1399 } 1429 1400 1430 - static int mlx5e_grp_ipsec_fill_strings(struct mlx5e_priv *priv, u8 *data, 1431 - int idx) 1401 + static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec) 1432 1402 { 1433 1403 return idx + mlx5e_ipsec_get_strings(priv, 1434 1404 data + idx * ETH_GSTRING_LEN); 1435 1405 } 1436 1406 1437 - static int mlx5e_grp_ipsec_fill_stats(struct mlx5e_priv *priv, u64 *data, 1438 - int idx) 1407 + static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec) 1439 1408 { 1440 1409 return idx + mlx5e_ipsec_get_stats(priv, data + idx); 1441 1410 } 1442 1411 1443 - static void mlx5e_grp_ipsec_update_stats(struct mlx5e_priv *priv) 1412 + static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec) 1444 1413 { 1445 1414 mlx5e_ipsec_update_stats(priv); 1446 1415 } 1447 1416 1448 - static int mlx5e_grp_tls_get_num_stats(struct mlx5e_priv *priv) 1417 + static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls) 1449 1418 { 1450 1419 return mlx5e_tls_get_count(priv); 1451 1420 } 1452 1421 1453 - static int mlx5e_grp_tls_fill_strings(struct mlx5e_priv *priv, u8 *data, 1454 - int idx) 1422 + static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls) 1455 1423 { 1456 1424 return idx + mlx5e_tls_get_strings(priv, data + idx * ETH_GSTRING_LEN); 1457 1425 } 1458 1426 1459 - static int mlx5e_grp_tls_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx) 1427 + static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls) 1460 1428 { 1461 1429 return idx + mlx5e_tls_get_stats(priv, data + idx); 1462 1430 } 1431 + 1432 + static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; } 1463 1433 1464 1434 static const struct counter_desc rq_stats_desc[] = { 1465 1435 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) }, ··· 1595 1563 #define NUM_XSKSQ_STATS ARRAY_SIZE(xsksq_stats_desc) 1596 1564 #define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc) 1597 1565 1598 - static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv) 1566 + static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels) 1599 1567 { 1600 1568 int max_nch = priv->max_nch; 1601 1569 ··· 1608 1576 (NUM_XSKSQ_STATS * max_nch * priv->xsk.ever_used); 1609 1577 } 1610 1578 1611 - static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data, 1612 - int idx) 1579 + static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels) 1613 1580 { 1614 1581 bool is_xsk = priv->xsk.ever_used; 1615 1582 int max_nch = priv->max_nch; ··· 1650 1619 return idx; 1651 1620 } 1652 1621 1653 - static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data, 1654 - int idx) 1622 + static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels) 1655 1623 { 1656 1624 bool is_xsk = priv->xsk.ever_used; 1657 1625 int max_nch = priv->max_nch; ··· 1698 1668 return idx; 1699 1669 } 1700 1670 1671 + static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(channels) { return; } 1672 + 1673 + MLX5E_DEFINE_STATS_GRP(sw, 0); 1674 + MLX5E_DEFINE_STATS_GRP(qcnt, MLX5E_NDO_UPDATE_STATS); 1675 + MLX5E_DEFINE_STATS_GRP(vnic_env, 0); 1676 + MLX5E_DEFINE_STATS_GRP(vport, MLX5E_NDO_UPDATE_STATS); 1677 + MLX5E_DEFINE_STATS_GRP(802_3, MLX5E_NDO_UPDATE_STATS); 1678 + MLX5E_DEFINE_STATS_GRP(2863, 0); 1679 + MLX5E_DEFINE_STATS_GRP(2819, 0); 1680 + MLX5E_DEFINE_STATS_GRP(phy, 0); 1681 + MLX5E_DEFINE_STATS_GRP(pcie, 0); 1682 + MLX5E_DEFINE_STATS_GRP(per_prio, 0); 1683 + MLX5E_DEFINE_STATS_GRP(pme, 0); 1684 + MLX5E_DEFINE_STATS_GRP(channels, 0); 1685 + MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0); 1686 + MLX5E_DEFINE_STATS_GRP(eth_ext, 0); 1687 + static MLX5E_DEFINE_STATS_GRP(ipsec, 0); 1688 + static MLX5E_DEFINE_STATS_GRP(tls, 0); 1689 + 1701 1690 /* The stats groups order is opposite to the update_stats() order calls */ 1702 - const struct mlx5e_stats_grp mlx5e_stats_grps[] = { 1703 - { 1704 - .get_num_stats = mlx5e_grp_sw_get_num_stats, 1705 - .fill_strings = mlx5e_grp_sw_fill_strings, 1706 - .fill_stats = mlx5e_grp_sw_fill_stats, 1707 - .update_stats = mlx5e_grp_sw_update_stats, 1708 - }, 1709 - { 1710 - .get_num_stats = mlx5e_grp_q_get_num_stats, 1711 - .fill_strings = mlx5e_grp_q_fill_strings, 1712 - .fill_stats = mlx5e_grp_q_fill_stats, 1713 - .update_stats_mask = MLX5E_NDO_UPDATE_STATS, 1714 - .update_stats = mlx5e_grp_q_update_stats, 1715 - }, 1716 - { 1717 - .get_num_stats = mlx5e_grp_vnic_env_get_num_stats, 1718 - .fill_strings = mlx5e_grp_vnic_env_fill_strings, 1719 - .fill_stats = mlx5e_grp_vnic_env_fill_stats, 1720 - .update_stats = mlx5e_grp_vnic_env_update_stats, 1721 - }, 1722 - { 1723 - .get_num_stats = mlx5e_grp_vport_get_num_stats, 1724 - .fill_strings = mlx5e_grp_vport_fill_strings, 1725 - .fill_stats = mlx5e_grp_vport_fill_stats, 1726 - .update_stats_mask = MLX5E_NDO_UPDATE_STATS, 1727 - .update_stats = mlx5e_grp_vport_update_stats, 1728 - }, 1729 - { 1730 - .get_num_stats = mlx5e_grp_802_3_get_num_stats, 1731 - .fill_strings = mlx5e_grp_802_3_fill_strings, 1732 - .fill_stats = mlx5e_grp_802_3_fill_stats, 1733 - .update_stats_mask = MLX5E_NDO_UPDATE_STATS, 1734 - .update_stats = mlx5e_grp_802_3_update_stats, 1735 - }, 1736 - { 1737 - .get_num_stats = mlx5e_grp_2863_get_num_stats, 1738 - .fill_strings = mlx5e_grp_2863_fill_strings, 1739 - .fill_stats = mlx5e_grp_2863_fill_stats, 1740 - .update_stats = mlx5e_grp_2863_update_stats, 1741 - }, 1742 - { 1743 - .get_num_stats = mlx5e_grp_2819_get_num_stats, 1744 - .fill_strings = mlx5e_grp_2819_fill_strings, 1745 - .fill_stats = mlx5e_grp_2819_fill_stats, 1746 - .update_stats = mlx5e_grp_2819_update_stats, 1747 - }, 1748 - { 1749 - .get_num_stats = mlx5e_grp_phy_get_num_stats, 1750 - .fill_strings = mlx5e_grp_phy_fill_strings, 1751 - .fill_stats = mlx5e_grp_phy_fill_stats, 1752 - .update_stats = mlx5e_grp_phy_update_stats, 1753 - }, 1754 - { 1755 - .get_num_stats = mlx5e_grp_eth_ext_get_num_stats, 1756 - .fill_strings = mlx5e_grp_eth_ext_fill_strings, 1757 - .fill_stats = mlx5e_grp_eth_ext_fill_stats, 1758 - .update_stats = mlx5e_grp_eth_ext_update_stats, 1759 - }, 1760 - { 1761 - .get_num_stats = mlx5e_grp_pcie_get_num_stats, 1762 - .fill_strings = mlx5e_grp_pcie_fill_strings, 1763 - .fill_stats = mlx5e_grp_pcie_fill_stats, 1764 - .update_stats = mlx5e_grp_pcie_update_stats, 1765 - }, 1766 - { 1767 - .get_num_stats = mlx5e_grp_per_prio_get_num_stats, 1768 - .fill_strings = mlx5e_grp_per_prio_fill_strings, 1769 - .fill_stats = mlx5e_grp_per_prio_fill_stats, 1770 - .update_stats = mlx5e_grp_per_prio_update_stats, 1771 - }, 1772 - { 1773 - .get_num_stats = mlx5e_grp_pme_get_num_stats, 1774 - .fill_strings = mlx5e_grp_pme_fill_strings, 1775 - .fill_stats = mlx5e_grp_pme_fill_stats, 1776 - }, 1777 - { 1778 - .get_num_stats = mlx5e_grp_ipsec_get_num_stats, 1779 - .fill_strings = mlx5e_grp_ipsec_fill_strings, 1780 - .fill_stats = mlx5e_grp_ipsec_fill_stats, 1781 - .update_stats = mlx5e_grp_ipsec_update_stats, 1782 - }, 1783 - { 1784 - .get_num_stats = mlx5e_grp_tls_get_num_stats, 1785 - .fill_strings = mlx5e_grp_tls_fill_strings, 1786 - .fill_stats = mlx5e_grp_tls_fill_stats, 1787 - }, 1788 - { 1789 - .get_num_stats = mlx5e_grp_channels_get_num_stats, 1790 - .fill_strings = mlx5e_grp_channels_fill_strings, 1791 - .fill_stats = mlx5e_grp_channels_fill_stats, 1792 - }, 1793 - { 1794 - .get_num_stats = mlx5e_grp_per_port_buffer_congest_get_num_stats, 1795 - .fill_strings = mlx5e_grp_per_port_buffer_congest_fill_strings, 1796 - .fill_stats = mlx5e_grp_per_port_buffer_congest_fill_stats, 1797 - .update_stats = mlx5e_grp_per_port_buffer_congest_update_stats, 1798 - }, 1691 + mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = { 1692 + &MLX5E_STATS_GRP(sw), 1693 + &MLX5E_STATS_GRP(qcnt), 1694 + &MLX5E_STATS_GRP(vnic_env), 1695 + &MLX5E_STATS_GRP(vport), 1696 + &MLX5E_STATS_GRP(802_3), 1697 + &MLX5E_STATS_GRP(2863), 1698 + &MLX5E_STATS_GRP(2819), 1699 + &MLX5E_STATS_GRP(phy), 1700 + &MLX5E_STATS_GRP(eth_ext), 1701 + &MLX5E_STATS_GRP(pcie), 1702 + &MLX5E_STATS_GRP(per_prio), 1703 + &MLX5E_STATS_GRP(pme), 1704 + &MLX5E_STATS_GRP(ipsec), 1705 + &MLX5E_STATS_GRP(tls), 1706 + &MLX5E_STATS_GRP(channels), 1707 + &MLX5E_STATS_GRP(per_port_buff_congest), 1799 1708 }; 1800 1709 1801 - const int mlx5e_num_stats_grps = ARRAY_SIZE(mlx5e_stats_grps); 1710 + unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv) 1711 + { 1712 + return ARRAY_SIZE(mlx5e_nic_stats_grps); 1713 + }
+67 -16
drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
··· 29 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 30 * SOFTWARE. 31 31 */ 32 + 32 33 #ifndef __MLX5_EN_STATS_H__ 33 34 #define __MLX5_EN_STATS_H__ 34 35 ··· 55 54 char format[ETH_GSTRING_LEN]; 56 55 size_t offset; /* Byte offset */ 57 56 }; 57 + 58 + enum { 59 + MLX5E_NDO_UPDATE_STATS = BIT(0x1), 60 + }; 61 + 62 + struct mlx5e_priv; 63 + struct mlx5e_stats_grp { 64 + u16 update_stats_mask; 65 + int (*get_num_stats)(struct mlx5e_priv *priv); 66 + int (*fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx); 67 + int (*fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx); 68 + void (*update_stats)(struct mlx5e_priv *priv); 69 + }; 70 + 71 + typedef const struct mlx5e_stats_grp *const mlx5e_stats_grp_t; 72 + 73 + #define MLX5E_STATS_GRP_OP(grp, name) mlx5e_stats_grp_ ## grp ## _ ## name 74 + 75 + #define MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(grp) \ 76 + int MLX5E_STATS_GRP_OP(grp, num_stats)(struct mlx5e_priv *priv) 77 + 78 + #define MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(grp) \ 79 + void MLX5E_STATS_GRP_OP(grp, update_stats)(struct mlx5e_priv *priv) 80 + 81 + #define MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(grp) \ 82 + int MLX5E_STATS_GRP_OP(grp, fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx) 83 + 84 + #define MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(grp) \ 85 + int MLX5E_STATS_GRP_OP(grp, fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx) 86 + 87 + #define MLX5E_STATS_GRP(grp) mlx5e_stats_grp_ ## grp 88 + 89 + #define MLX5E_DECLARE_STATS_GRP(grp) \ 90 + const struct mlx5e_stats_grp MLX5E_STATS_GRP(grp) 91 + 92 + #define MLX5E_DEFINE_STATS_GRP(grp, mask) \ 93 + MLX5E_DECLARE_STATS_GRP(grp) = { \ 94 + .get_num_stats = MLX5E_STATS_GRP_OP(grp, num_stats), \ 95 + .fill_stats = MLX5E_STATS_GRP_OP(grp, fill_stats), \ 96 + .fill_strings = MLX5E_STATS_GRP_OP(grp, fill_strings), \ 97 + .update_stats = MLX5E_STATS_GRP_OP(grp, update_stats), \ 98 + .update_stats_mask = mask, \ 99 + } 100 + 101 + unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv); 102 + void mlx5e_stats_update(struct mlx5e_priv *priv); 103 + void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx); 104 + void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data); 105 + 106 + /* Concrete NIC Stats */ 58 107 59 108 struct mlx5e_sw_stats { 60 109 u64 rx_packets; ··· 373 322 struct mlx5e_pcie_stats pcie; 374 323 }; 375 324 376 - enum { 377 - MLX5E_NDO_UPDATE_STATS = BIT(0x1), 378 - }; 325 + extern mlx5e_stats_grp_t mlx5e_nic_stats_grps[]; 326 + unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv); 379 327 380 - struct mlx5e_priv; 381 - struct mlx5e_stats_grp { 382 - u16 update_stats_mask; 383 - int (*get_num_stats)(struct mlx5e_priv *priv); 384 - int (*fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx); 385 - int (*fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx); 386 - void (*update_stats)(struct mlx5e_priv *priv); 387 - }; 388 - 389 - extern const struct mlx5e_stats_grp mlx5e_stats_grps[]; 390 - extern const int mlx5e_num_stats_grps; 391 - 392 - void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv); 328 + extern MLX5E_DECLARE_STATS_GRP(sw); 329 + extern MLX5E_DECLARE_STATS_GRP(qcnt); 330 + extern MLX5E_DECLARE_STATS_GRP(vnic_env); 331 + extern MLX5E_DECLARE_STATS_GRP(vport); 332 + extern MLX5E_DECLARE_STATS_GRP(802_3); 333 + extern MLX5E_DECLARE_STATS_GRP(2863); 334 + extern MLX5E_DECLARE_STATS_GRP(2819); 335 + extern MLX5E_DECLARE_STATS_GRP(phy); 336 + extern MLX5E_DECLARE_STATS_GRP(eth_ext); 337 + extern MLX5E_DECLARE_STATS_GRP(pcie); 338 + extern MLX5E_DECLARE_STATS_GRP(per_prio); 339 + extern MLX5E_DECLARE_STATS_GRP(pme); 340 + extern MLX5E_DECLARE_STATS_GRP(channels); 341 + extern MLX5E_DECLARE_STATS_GRP(per_port_buff_congest); 393 342 394 343 #endif /* __MLX5_EN_STATS_H__ */
+39
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 1810 1810 outer_headers); 1811 1811 } 1812 1812 1813 + static int mlx5e_flower_parse_meta(struct net_device *filter_dev, 1814 + struct flow_cls_offload *f) 1815 + { 1816 + struct flow_rule *rule = flow_cls_offload_flow_rule(f); 1817 + struct netlink_ext_ack *extack = f->common.extack; 1818 + struct net_device *ingress_dev; 1819 + struct flow_match_meta match; 1820 + 1821 + if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) 1822 + return 0; 1823 + 1824 + flow_rule_match_meta(rule, &match); 1825 + if (match.mask->ingress_ifindex != 0xFFFFFFFF) { 1826 + NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask"); 1827 + return -EINVAL; 1828 + } 1829 + 1830 + ingress_dev = __dev_get_by_index(dev_net(filter_dev), 1831 + match.key->ingress_ifindex); 1832 + if (!ingress_dev) { 1833 + NL_SET_ERR_MSG_MOD(extack, 1834 + "Can't find the ingress port to match on"); 1835 + return -EINVAL; 1836 + } 1837 + 1838 + if (ingress_dev != filter_dev) { 1839 + NL_SET_ERR_MSG_MOD(extack, 1840 + "Can't match on the ingress filter port"); 1841 + return -EINVAL; 1842 + } 1843 + 1844 + return 0; 1845 + } 1846 + 1813 1847 static int __parse_cls_flower(struct mlx5e_priv *priv, 1814 1848 struct mlx5_flow_spec *spec, 1815 1849 struct flow_cls_offload *f, ··· 1864 1830 u16 addr_type = 0; 1865 1831 u8 ip_proto = 0; 1866 1832 u8 *match_level; 1833 + int err; 1867 1834 1868 1835 match_level = outer_match_level; 1869 1836 ··· 1907 1872 headers_v = get_match_headers_value(MLX5_FLOW_CONTEXT_ACTION_DECAP, 1908 1873 spec); 1909 1874 } 1875 + 1876 + err = mlx5e_flower_parse_meta(filter_dev, f); 1877 + if (err) 1878 + return err; 1910 1879 1911 1880 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 1912 1881 struct flow_match_basic match;
+4 -4
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c
··· 32 32 * pools. 33 33 */ 34 34 #define ESW_SIZE (16 * 1024 * 1024) 35 - const unsigned int ESW_POOLS[] = { 4 * 1024 * 1024, 36 - 1 * 1024 * 1024, 37 - 64 * 1024, 38 - 4 * 1024, }; 35 + static const unsigned int ESW_POOLS[] = { 4 * 1024 * 1024, 36 + 1 * 1024 * 1024, 37 + 64 * 1024, 38 + 4 * 1024, }; 39 39 40 40 struct mlx5_esw_chains_priv { 41 41 struct rhashtable chains_ht;
+24
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
··· 419 419 mlx5e_destroy_q_counters(priv); 420 420 } 421 421 422 + /* The stats groups order is opposite to the update_stats() order calls */ 423 + static mlx5e_stats_grp_t mlx5i_stats_grps[] = { 424 + &MLX5E_STATS_GRP(sw), 425 + &MLX5E_STATS_GRP(qcnt), 426 + &MLX5E_STATS_GRP(vnic_env), 427 + &MLX5E_STATS_GRP(vport), 428 + &MLX5E_STATS_GRP(802_3), 429 + &MLX5E_STATS_GRP(2863), 430 + &MLX5E_STATS_GRP(2819), 431 + &MLX5E_STATS_GRP(phy), 432 + &MLX5E_STATS_GRP(pcie), 433 + &MLX5E_STATS_GRP(per_prio), 434 + &MLX5E_STATS_GRP(pme), 435 + &MLX5E_STATS_GRP(channels), 436 + &MLX5E_STATS_GRP(per_port_buff_congest), 437 + }; 438 + 439 + static unsigned int mlx5i_stats_grps_num(struct mlx5e_priv *priv) 440 + { 441 + return ARRAY_SIZE(mlx5i_stats_grps); 442 + } 443 + 422 444 static const struct mlx5e_profile mlx5i_nic_profile = { 423 445 .init = mlx5i_init, 424 446 .cleanup = mlx5i_cleanup, ··· 457 435 .rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */ 458 436 .max_tc = MLX5I_MAX_NUM_TC, 459 437 .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR), 438 + .stats_grps = mlx5i_stats_grps, 439 + .stats_grps_num = mlx5i_stats_grps_num, 460 440 }; 461 441 462 442 /* mlx5i netdev NDos */
+305 -78
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
··· 677 677 goto out_invalid_arg; 678 678 } 679 679 if (action->dest_tbl.tbl->level <= matcher->tbl->level) { 680 + mlx5_core_warn_once(dmn->mdev, 681 + "Connecting table to a lower/same level destination table\n"); 680 682 mlx5dr_dbg(dmn, 681 - "Destination table level should be higher than source table\n"); 682 - goto out_invalid_arg; 683 + "Connecting table at level %d to a destination table at level %d\n", 684 + matcher->tbl->level, 685 + action->dest_tbl.tbl->level); 683 686 } 684 687 attr.final_icm_addr = rx_rule ? 685 688 action->dest_tbl.tbl->rx.s_anchor->chunk->icm_addr : ··· 1317 1314 } 1318 1315 1319 1316 static int 1320 - dr_action_modify_sw_to_hw(struct mlx5dr_domain *dmn, 1321 - __be64 *sw_action, 1322 - __be64 *hw_action, 1323 - const struct dr_action_modify_field_conv **ret_hw_info) 1317 + dr_action_modify_sw_to_hw_add(struct mlx5dr_domain *dmn, 1318 + __be64 *sw_action, 1319 + __be64 *hw_action, 1320 + const struct dr_action_modify_field_conv **ret_hw_info) 1324 1321 { 1325 1322 const struct dr_action_modify_field_conv *hw_action_info; 1326 - u8 offset, length, max_length, action; 1323 + u8 max_length; 1327 1324 u16 sw_field; 1328 - u8 hw_opcode; 1329 1325 u32 data; 1330 1326 1331 1327 /* Get SW modify action data */ 1332 - action = MLX5_GET(set_action_in, sw_action, action_type); 1328 + sw_field = MLX5_GET(set_action_in, sw_action, field); 1329 + data = MLX5_GET(set_action_in, sw_action, data); 1330 + 1331 + /* Convert SW data to HW modify action format */ 1332 + hw_action_info = dr_action_modify_get_hw_info(sw_field); 1333 + if (!hw_action_info) { 1334 + mlx5dr_dbg(dmn, "Modify add action invalid field given\n"); 1335 + return -EINVAL; 1336 + } 1337 + 1338 + max_length = hw_action_info->end - hw_action_info->start + 1; 1339 + 1340 + MLX5_SET(dr_action_hw_set, hw_action, 1341 + opcode, MLX5DR_ACTION_MDFY_HW_OP_ADD); 1342 + 1343 + MLX5_SET(dr_action_hw_set, hw_action, destination_field_code, 1344 + hw_action_info->hw_field); 1345 + 1346 + MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter, 1347 + hw_action_info->start); 1348 + 1349 + /* PRM defines that length zero specific length of 32bits */ 1350 + MLX5_SET(dr_action_hw_set, hw_action, destination_length, 1351 + max_length == 32 ? 0 : max_length); 1352 + 1353 + MLX5_SET(dr_action_hw_set, hw_action, inline_data, data); 1354 + 1355 + *ret_hw_info = hw_action_info; 1356 + 1357 + return 0; 1358 + } 1359 + 1360 + static int 1361 + dr_action_modify_sw_to_hw_set(struct mlx5dr_domain *dmn, 1362 + __be64 *sw_action, 1363 + __be64 *hw_action, 1364 + const struct dr_action_modify_field_conv **ret_hw_info) 1365 + { 1366 + const struct dr_action_modify_field_conv *hw_action_info; 1367 + u8 offset, length, max_length; 1368 + u16 sw_field; 1369 + u32 data; 1370 + 1371 + /* Get SW modify action data */ 1333 1372 length = MLX5_GET(set_action_in, sw_action, length); 1334 1373 offset = MLX5_GET(set_action_in, sw_action, offset); 1335 1374 sw_field = MLX5_GET(set_action_in, sw_action, field); ··· 1380 1335 /* Convert SW data to HW modify action format */ 1381 1336 hw_action_info = dr_action_modify_get_hw_info(sw_field); 1382 1337 if (!hw_action_info) { 1383 - mlx5dr_dbg(dmn, "Modify action invalid field given\n"); 1338 + mlx5dr_dbg(dmn, "Modify set action invalid field given\n"); 1384 1339 return -EINVAL; 1385 1340 } 1386 1341 1342 + /* PRM defines that length zero specific length of 32bits */ 1343 + length = length ? length : 32; 1344 + 1387 1345 max_length = hw_action_info->end - hw_action_info->start + 1; 1388 1346 1389 - switch (action) { 1390 - case MLX5_ACTION_TYPE_SET: 1391 - hw_opcode = MLX5DR_ACTION_MDFY_HW_OP_SET; 1392 - /* PRM defines that length zero specific length of 32bits */ 1393 - if (!length) 1394 - length = 32; 1395 - 1396 - if (length + offset > max_length) { 1397 - mlx5dr_dbg(dmn, "Modify action length + offset exceeds limit\n"); 1398 - return -EINVAL; 1399 - } 1400 - break; 1401 - 1402 - case MLX5_ACTION_TYPE_ADD: 1403 - hw_opcode = MLX5DR_ACTION_MDFY_HW_OP_ADD; 1404 - offset = 0; 1405 - length = max_length; 1406 - break; 1407 - 1408 - default: 1409 - mlx5dr_info(dmn, "Unsupported action_type for modify action\n"); 1410 - return -EOPNOTSUPP; 1347 + if (length + offset > max_length) { 1348 + mlx5dr_dbg(dmn, "Modify action length + offset exceeds limit\n"); 1349 + return -EINVAL; 1411 1350 } 1412 1351 1413 - MLX5_SET(dr_action_hw_set, hw_action, opcode, hw_opcode); 1352 + MLX5_SET(dr_action_hw_set, hw_action, 1353 + opcode, MLX5DR_ACTION_MDFY_HW_OP_SET); 1414 1354 1415 1355 MLX5_SET(dr_action_hw_set, hw_action, destination_field_code, 1416 1356 hw_action_info->hw_field); ··· 1414 1384 } 1415 1385 1416 1386 static int 1417 - dr_action_modify_check_field_limitation(struct mlx5dr_domain *dmn, 1418 - const __be64 *sw_action) 1387 + dr_action_modify_sw_to_hw_copy(struct mlx5dr_domain *dmn, 1388 + __be64 *sw_action, 1389 + __be64 *hw_action, 1390 + const struct dr_action_modify_field_conv **ret_dst_hw_info, 1391 + const struct dr_action_modify_field_conv **ret_src_hw_info) 1419 1392 { 1420 - u16 sw_field; 1421 - u8 action; 1393 + u8 src_offset, dst_offset, src_max_length, dst_max_length, length; 1394 + const struct dr_action_modify_field_conv *hw_dst_action_info; 1395 + const struct dr_action_modify_field_conv *hw_src_action_info; 1396 + u16 src_field, dst_field; 1422 1397 1423 - sw_field = MLX5_GET(set_action_in, sw_action, field); 1398 + /* Get SW modify action data */ 1399 + src_field = MLX5_GET(copy_action_in, sw_action, src_field); 1400 + dst_field = MLX5_GET(copy_action_in, sw_action, dst_field); 1401 + src_offset = MLX5_GET(copy_action_in, sw_action, src_offset); 1402 + dst_offset = MLX5_GET(copy_action_in, sw_action, dst_offset); 1403 + length = MLX5_GET(copy_action_in, sw_action, length); 1404 + 1405 + /* Convert SW data to HW modify action format */ 1406 + hw_src_action_info = dr_action_modify_get_hw_info(src_field); 1407 + hw_dst_action_info = dr_action_modify_get_hw_info(dst_field); 1408 + if (!hw_src_action_info || !hw_dst_action_info) { 1409 + mlx5dr_dbg(dmn, "Modify copy action invalid field given\n"); 1410 + return -EINVAL; 1411 + } 1412 + 1413 + /* PRM defines that length zero specific length of 32bits */ 1414 + length = length ? length : 32; 1415 + 1416 + src_max_length = hw_src_action_info->end - 1417 + hw_src_action_info->start + 1; 1418 + dst_max_length = hw_dst_action_info->end - 1419 + hw_dst_action_info->start + 1; 1420 + 1421 + if (length + src_offset > src_max_length || 1422 + length + dst_offset > dst_max_length) { 1423 + mlx5dr_dbg(dmn, "Modify action length + offset exceeds limit\n"); 1424 + return -EINVAL; 1425 + } 1426 + 1427 + MLX5_SET(dr_action_hw_copy, hw_action, 1428 + opcode, MLX5DR_ACTION_MDFY_HW_OP_COPY); 1429 + 1430 + MLX5_SET(dr_action_hw_copy, hw_action, destination_field_code, 1431 + hw_dst_action_info->hw_field); 1432 + 1433 + MLX5_SET(dr_action_hw_copy, hw_action, destination_left_shifter, 1434 + hw_dst_action_info->start + dst_offset); 1435 + 1436 + MLX5_SET(dr_action_hw_copy, hw_action, destination_length, 1437 + length == 32 ? 0 : length); 1438 + 1439 + MLX5_SET(dr_action_hw_copy, hw_action, source_field_code, 1440 + hw_src_action_info->hw_field); 1441 + 1442 + MLX5_SET(dr_action_hw_copy, hw_action, source_left_shifter, 1443 + hw_src_action_info->start + dst_offset); 1444 + 1445 + *ret_dst_hw_info = hw_dst_action_info; 1446 + *ret_src_hw_info = hw_src_action_info; 1447 + 1448 + return 0; 1449 + } 1450 + 1451 + static int 1452 + dr_action_modify_sw_to_hw(struct mlx5dr_domain *dmn, 1453 + __be64 *sw_action, 1454 + __be64 *hw_action, 1455 + const struct dr_action_modify_field_conv **ret_dst_hw_info, 1456 + const struct dr_action_modify_field_conv **ret_src_hw_info) 1457 + { 1458 + u8 action; 1459 + int ret; 1460 + 1461 + *hw_action = 0; 1462 + *ret_src_hw_info = NULL; 1463 + 1464 + /* Get SW modify action type */ 1424 1465 action = MLX5_GET(set_action_in, sw_action, action_type); 1425 1466 1426 - /* Check if SW field is supported in current domain (RX/TX) */ 1427 - if (action == MLX5_ACTION_TYPE_SET) { 1428 - if (sw_field == MLX5_ACTION_IN_FIELD_METADATA_REG_A) { 1429 - if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_TX) { 1430 - mlx5dr_dbg(dmn, "Unsupported field %d for RX/FDB set action\n", 1431 - sw_field); 1432 - return -EINVAL; 1433 - } 1434 - } 1467 + switch (action) { 1468 + case MLX5_ACTION_TYPE_SET: 1469 + ret = dr_action_modify_sw_to_hw_set(dmn, sw_action, 1470 + hw_action, 1471 + ret_dst_hw_info); 1472 + break; 1435 1473 1436 - if (sw_field == MLX5_ACTION_IN_FIELD_METADATA_REG_B) { 1437 - if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_RX) { 1438 - mlx5dr_dbg(dmn, "Unsupported field %d for TX/FDB set action\n", 1439 - sw_field); 1440 - return -EINVAL; 1441 - } 1442 - } 1443 - } else if (action == MLX5_ACTION_TYPE_ADD) { 1444 - if (sw_field != MLX5_ACTION_IN_FIELD_OUT_IP_TTL && 1445 - sw_field != MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT && 1446 - sw_field != MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM && 1447 - sw_field != MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM) { 1448 - mlx5dr_dbg(dmn, "Unsupported field %d for add action\n", sw_field); 1474 + case MLX5_ACTION_TYPE_ADD: 1475 + ret = dr_action_modify_sw_to_hw_add(dmn, sw_action, 1476 + hw_action, 1477 + ret_dst_hw_info); 1478 + break; 1479 + 1480 + case MLX5_ACTION_TYPE_COPY: 1481 + ret = dr_action_modify_sw_to_hw_copy(dmn, sw_action, 1482 + hw_action, 1483 + ret_dst_hw_info, 1484 + ret_src_hw_info); 1485 + break; 1486 + 1487 + default: 1488 + mlx5dr_info(dmn, "Unsupported action_type for modify action\n"); 1489 + ret = -EOPNOTSUPP; 1490 + } 1491 + 1492 + return ret; 1493 + } 1494 + 1495 + static int 1496 + dr_action_modify_check_set_field_limitation(struct mlx5dr_action *action, 1497 + const __be64 *sw_action) 1498 + { 1499 + u16 sw_field = MLX5_GET(set_action_in, sw_action, field); 1500 + struct mlx5dr_domain *dmn = action->rewrite.dmn; 1501 + 1502 + if (sw_field == MLX5_ACTION_IN_FIELD_METADATA_REG_A) { 1503 + action->rewrite.allow_rx = 0; 1504 + if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_TX) { 1505 + mlx5dr_dbg(dmn, "Unsupported field %d for RX/FDB set action\n", 1506 + sw_field); 1449 1507 return -EINVAL; 1450 1508 } 1451 - } else { 1452 - mlx5dr_info(dmn, "Unsupported action %d modify action\n", action); 1453 - return -EOPNOTSUPP; 1509 + } else if (sw_field == MLX5_ACTION_IN_FIELD_METADATA_REG_B) { 1510 + action->rewrite.allow_tx = 0; 1511 + if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_RX) { 1512 + mlx5dr_dbg(dmn, "Unsupported field %d for TX/FDB set action\n", 1513 + sw_field); 1514 + return -EINVAL; 1515 + } 1516 + } 1517 + 1518 + if (!action->rewrite.allow_rx && !action->rewrite.allow_tx) { 1519 + mlx5dr_dbg(dmn, "Modify SET actions not supported on both RX and TX\n"); 1520 + return -EINVAL; 1454 1521 } 1455 1522 1456 1523 return 0; 1524 + } 1525 + 1526 + static int 1527 + dr_action_modify_check_add_field_limitation(struct mlx5dr_action *action, 1528 + const __be64 *sw_action) 1529 + { 1530 + u16 sw_field = MLX5_GET(set_action_in, sw_action, field); 1531 + struct mlx5dr_domain *dmn = action->rewrite.dmn; 1532 + 1533 + if (sw_field != MLX5_ACTION_IN_FIELD_OUT_IP_TTL && 1534 + sw_field != MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT && 1535 + sw_field != MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM && 1536 + sw_field != MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM) { 1537 + mlx5dr_dbg(dmn, "Unsupported field %d for add action\n", 1538 + sw_field); 1539 + return -EINVAL; 1540 + } 1541 + 1542 + return 0; 1543 + } 1544 + 1545 + static int 1546 + dr_action_modify_check_copy_field_limitation(struct mlx5dr_action *action, 1547 + const __be64 *sw_action) 1548 + { 1549 + struct mlx5dr_domain *dmn = action->rewrite.dmn; 1550 + u16 sw_fields[2]; 1551 + int i; 1552 + 1553 + sw_fields[0] = MLX5_GET(copy_action_in, sw_action, src_field); 1554 + sw_fields[1] = MLX5_GET(copy_action_in, sw_action, dst_field); 1555 + 1556 + for (i = 0; i < 2; i++) { 1557 + if (sw_fields[i] == MLX5_ACTION_IN_FIELD_METADATA_REG_A) { 1558 + action->rewrite.allow_rx = 0; 1559 + if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_TX) { 1560 + mlx5dr_dbg(dmn, "Unsupported field %d for RX/FDB set action\n", 1561 + sw_fields[i]); 1562 + return -EINVAL; 1563 + } 1564 + } else if (sw_fields[i] == MLX5_ACTION_IN_FIELD_METADATA_REG_B) { 1565 + action->rewrite.allow_tx = 0; 1566 + if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_RX) { 1567 + mlx5dr_dbg(dmn, "Unsupported field %d for TX/FDB set action\n", 1568 + sw_fields[i]); 1569 + return -EINVAL; 1570 + } 1571 + } 1572 + } 1573 + 1574 + if (!action->rewrite.allow_rx && !action->rewrite.allow_tx) { 1575 + mlx5dr_dbg(dmn, "Modify copy actions not supported on both RX and TX\n"); 1576 + return -EINVAL; 1577 + } 1578 + 1579 + return 0; 1580 + } 1581 + 1582 + static int 1583 + dr_action_modify_check_field_limitation(struct mlx5dr_action *action, 1584 + const __be64 *sw_action) 1585 + { 1586 + struct mlx5dr_domain *dmn = action->rewrite.dmn; 1587 + u8 action_type; 1588 + int ret; 1589 + 1590 + action_type = MLX5_GET(set_action_in, sw_action, action_type); 1591 + 1592 + switch (action_type) { 1593 + case MLX5_ACTION_TYPE_SET: 1594 + ret = dr_action_modify_check_set_field_limitation(action, 1595 + sw_action); 1596 + break; 1597 + 1598 + case MLX5_ACTION_TYPE_ADD: 1599 + ret = dr_action_modify_check_add_field_limitation(action, 1600 + sw_action); 1601 + break; 1602 + 1603 + case MLX5_ACTION_TYPE_COPY: 1604 + ret = dr_action_modify_check_copy_field_limitation(action, 1605 + sw_action); 1606 + break; 1607 + 1608 + default: 1609 + mlx5dr_info(dmn, "Unsupported action %d modify action\n", 1610 + action_type); 1611 + ret = -EOPNOTSUPP; 1612 + } 1613 + 1614 + return ret; 1457 1615 } 1458 1616 1459 1617 static bool ··· 1652 1434 return sw_field == MLX5_ACTION_IN_FIELD_OUT_IP_TTL; 1653 1435 } 1654 1436 1655 - static int dr_actions_convert_modify_header(struct mlx5dr_domain *dmn, 1437 + static int dr_actions_convert_modify_header(struct mlx5dr_action *action, 1656 1438 u32 max_hw_actions, 1657 1439 u32 num_sw_actions, 1658 1440 __be64 sw_actions[], ··· 1660 1442 u32 *num_hw_actions, 1661 1443 bool *modify_ttl) 1662 1444 { 1663 - const struct dr_action_modify_field_conv *hw_action_info; 1445 + const struct dr_action_modify_field_conv *hw_dst_action_info; 1446 + const struct dr_action_modify_field_conv *hw_src_action_info; 1664 1447 u16 hw_field = MLX5DR_ACTION_MDFY_HW_FLD_RESERVED; 1665 1448 u32 l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_NONE; 1666 1449 u32 l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_NONE; 1450 + struct mlx5dr_domain *dmn = action->rewrite.dmn; 1667 1451 int ret, i, hw_idx = 0; 1668 1452 __be64 *sw_action; 1669 1453 __be64 hw_action; 1670 1454 1671 1455 *modify_ttl = false; 1672 1456 1457 + action->rewrite.allow_rx = 1; 1458 + action->rewrite.allow_tx = 1; 1459 + 1673 1460 for (i = 0; i < num_sw_actions; i++) { 1674 1461 sw_action = &sw_actions[i]; 1675 1462 1676 - ret = dr_action_modify_check_field_limitation(dmn, sw_action); 1463 + ret = dr_action_modify_check_field_limitation(action, 1464 + sw_action); 1677 1465 if (ret) 1678 1466 return ret; 1679 1467 ··· 1690 1466 ret = dr_action_modify_sw_to_hw(dmn, 1691 1467 sw_action, 1692 1468 &hw_action, 1693 - &hw_action_info); 1469 + &hw_dst_action_info, 1470 + &hw_src_action_info); 1694 1471 if (ret) 1695 1472 return ret; 1696 1473 1697 1474 /* Due to a HW limitation we cannot modify 2 different L3 types */ 1698 - if (l3_type && hw_action_info->l3_type && 1699 - hw_action_info->l3_type != l3_type) { 1475 + if (l3_type && hw_dst_action_info->l3_type && 1476 + hw_dst_action_info->l3_type != l3_type) { 1700 1477 mlx5dr_dbg(dmn, "Action list can't support two different L3 types\n"); 1701 1478 return -EINVAL; 1702 1479 } 1703 - if (hw_action_info->l3_type) 1704 - l3_type = hw_action_info->l3_type; 1480 + if (hw_dst_action_info->l3_type) 1481 + l3_type = hw_dst_action_info->l3_type; 1705 1482 1706 1483 /* Due to a HW limitation we cannot modify two different L4 types */ 1707 - if (l4_type && hw_action_info->l4_type && 1708 - hw_action_info->l4_type != l4_type) { 1484 + if (l4_type && hw_dst_action_info->l4_type && 1485 + hw_dst_action_info->l4_type != l4_type) { 1709 1486 mlx5dr_dbg(dmn, "Action list can't support two different L4 types\n"); 1710 1487 return -EINVAL; 1711 1488 } 1712 - if (hw_action_info->l4_type) 1713 - l4_type = hw_action_info->l4_type; 1489 + if (hw_dst_action_info->l4_type) 1490 + l4_type = hw_dst_action_info->l4_type; 1714 1491 1715 1492 /* HW reads and executes two actions at once this means we 1716 1493 * need to create a gap if two actions access the same field 1717 1494 */ 1718 - if ((hw_idx % 2) && hw_field == hw_action_info->hw_field) { 1495 + if ((hw_idx % 2) && (hw_field == hw_dst_action_info->hw_field || 1496 + (hw_src_action_info && 1497 + hw_field == hw_src_action_info->hw_field))) { 1719 1498 /* Check if after gap insertion the total number of HW 1720 1499 * modify actions doesn't exceeds the limit 1721 1500 */ ··· 1728 1501 return -EINVAL; 1729 1502 } 1730 1503 } 1731 - hw_field = hw_action_info->hw_field; 1504 + hw_field = hw_dst_action_info->hw_field; 1732 1505 1733 1506 hw_actions[hw_idx] = hw_action; 1734 1507 hw_idx++; ··· 1771 1544 goto free_chunk; 1772 1545 } 1773 1546 1774 - ret = dr_actions_convert_modify_header(dmn, 1547 + ret = dr_actions_convert_modify_header(action, 1775 1548 max_hw_actions, 1776 1549 num_sw_actions, 1777 1550 actions,
+16
drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
··· 32 32 }; 33 33 34 34 enum { 35 + MLX5DR_ACTION_MDFY_HW_OP_COPY = 0x1, 35 36 MLX5DR_ACTION_MDFY_HW_OP_SET = 0x2, 36 37 MLX5DR_ACTION_MDFY_HW_OP_ADD = 0x3, 37 38 }; ··· 624 623 u8 destination_length[0x5]; 625 624 626 625 u8 inline_data[0x20]; 626 + }; 627 + 628 + struct mlx5_ifc_dr_action_hw_copy_bits { 629 + u8 opcode[0x8]; 630 + u8 destination_field_code[0x8]; 631 + u8 reserved_at_10[0x2]; 632 + u8 destination_left_shifter[0x6]; 633 + u8 reserved_at_18[0x2]; 634 + u8 destination_length[0x6]; 635 + 636 + u8 reserved_at_20[0x8]; 637 + u8 source_field_code[0x8]; 638 + u8 reserved_at_30[0x2]; 639 + u8 source_left_shifter[0x6]; 640 + u8 reserved_at_38[0x8]; 627 641 }; 628 642 629 643 #endif /* MLX5_IFC_DR_H */
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/wq.c
··· 89 89 len = nstrides << wq->fbc.log_stride; 90 90 wqe = mlx5_wq_cyc_get_wqe(wq, ix); 91 91 92 - pr_info("WQE DUMP: WQ size %d WQ cur size %d, WQE index 0x%x, len: %ld\n", 92 + pr_info("WQE DUMP: WQ size %d WQ cur size %d, WQE index 0x%x, len: %zu\n", 93 93 mlx5_wq_cyc_get_size(wq), wq->cur_sz, ix, len); 94 94 print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, wqe, len, false); 95 95 }