Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'mlx5-updates-2021-09-30' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2021-09-30

1) From Yevgeny Kliteynik:

This patch series deals with vport handling in SW steering.

For every vport, SW steering queries FW for this vport's properties,
such as RX/TX ICM addresses to be able to add this vport as dest action.
The following patches rework vport capabilities managements and add support
for Scalable Functions (SFs).

- Patch 1 fixes the vport number data type all over the DR code to 16 bits
in accordance with HW spec.
- Patch 2 replaces local SW steering WIRE_PORT macro with the existing
mlx5 define.
- Patch 3 adds missing query for vport 0 and and handles eswitch manager
capabilities for ECPF (BlueField in embedded CPU mode).
- Patch 4 fixes error messages for failure to obtain vport caps from
different locations in the code to have the same verbosity level and
similar wording.
- Patch 5 adds support for csum recalculation flow tables on SFs: it
implements these FTs management in XArray instead of the fixed size array,
thus adding support for csum recalculation table for any valid vport.
- Patch 6 is the main patch of this whole series: it refactors vports
capabilities handling and adds SFs support.

2) Minor and trivial updates and cleanups

* tag 'mlx5-updates-2021-09-30' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
net/mlx5e: Use array_size() helper
net/mlx5: Use struct_size() helper in kvzalloc()
net/mlx5: Use kvcalloc() instead of kvzalloc()
net/mlx5: Tolerate failures in debug features while driver load
net/mlx5: Warn for devlink reload when there are VFs alive
net/mlx5: DR, Add missing string for action type SAMPLER
net/mlx5: DR, init_next_match only if needed
net/mlx5: DR, Fix typo 'offeset' to 'offset'
net/mlx5: DR, Increase supported num of actions to 32
net/mlx5: DR, Add support for SF vports
net/mlx5: DR, Support csum recalculation flow table on SFs
net/mlx5: DR, Align error messages for failure to obtain vport caps
net/mlx5: DR, Add missing query for vport 0
net/mlx5: DR, Replace local WIRE_PORT macro with the existing MLX5_VPORT_UPLINK
net/mlx5: DR, Fix vport number data type to u16
====================

Link: https://lore.kernel.org/r/20210930232050.41779-1-saeed@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+222 -153
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
··· 2058 2058 return -EINVAL; 2059 2059 } 2060 2060 2061 - cmd->stats = kvzalloc(MLX5_CMD_OP_MAX * sizeof(*cmd->stats), GFP_KERNEL); 2061 + cmd->stats = kvcalloc(MLX5_CMD_OP_MAX, sizeof(*cmd->stats), GFP_KERNEL); 2062 2062 if (!cmd->stats) 2063 2063 return -ENOMEM; 2064 2064
+5
drivers/net/ethernet/mellanox/mlx5/core/devlink.c
··· 136 136 struct netlink_ext_ack *extack) 137 137 { 138 138 struct mlx5_core_dev *dev = devlink_priv(devlink); 139 + struct pci_dev *pdev = dev->pdev; 139 140 bool sf_dev_allocated; 140 141 141 142 sf_dev_allocated = mlx5_sf_dev_allocated(dev); ··· 152 151 if (mlx5_lag_is_active(dev)) { 153 152 NL_SET_ERR_MSG_MOD(extack, "reload is unsupported in Lag mode"); 154 153 return -EOPNOTSUPP; 154 + } 155 + 156 + if (pci_num_vf(pdev)) { 157 + NL_SET_ERR_MSG_MOD(extack, "reload while VFs are present is unfavorable"); 155 158 } 156 159 157 160 switch (action) {
+6 -4
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 930 930 struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo; 931 931 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); 932 932 int dsegs_per_wq = wq_sz * MLX5_SEND_WQEBB_NUM_DS; 933 + size_t size; 933 934 934 - xdpi_fifo->xi = kvzalloc_node(sizeof(*xdpi_fifo->xi) * dsegs_per_wq, 935 - GFP_KERNEL, numa); 935 + size = array_size(sizeof(*xdpi_fifo->xi), dsegs_per_wq); 936 + xdpi_fifo->xi = kvzalloc_node(size, GFP_KERNEL, numa); 936 937 if (!xdpi_fifo->xi) 937 938 return -ENOMEM; 938 939 ··· 947 946 static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa) 948 947 { 949 948 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); 949 + size_t size; 950 950 int err; 951 951 952 - sq->db.wqe_info = kvzalloc_node(sizeof(*sq->db.wqe_info) * wq_sz, 953 - GFP_KERNEL, numa); 952 + size = array_size(sizeof(*sq->db.wqe_info), wq_sz); 953 + sq->db.wqe_info = kvzalloc_node(size, GFP_KERNEL, numa); 954 954 if (!sq->db.wqe_info) 955 955 return -ENOMEM; 956 956
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
··· 1009 1009 u16 vport_num; 1010 1010 1011 1011 num_vfs = esw->esw_funcs.num_vfs; 1012 - flows = kvzalloc(num_vfs * sizeof(*flows), GFP_KERNEL); 1012 + flows = kvcalloc(num_vfs, sizeof(*flows), GFP_KERNEL); 1013 1013 if (!flows) 1014 1014 return -ENOMEM; 1015 1015 ··· 1188 1188 1189 1189 peer_miss_rules_setup(esw, peer_dev, spec, &dest); 1190 1190 1191 - flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL); 1191 + flows = kvcalloc(nvports, sizeof(*flows), GFP_KERNEL); 1192 1192 if (!flows) { 1193 1193 err = -ENOMEM; 1194 1194 goto alloc_flows_err;
+1 -2
drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
··· 497 497 alloc_bitmask = MLX5_CAP_GEN(dev, flow_counter_bulk_alloc); 498 498 bulk_len = alloc_bitmask > 0 ? MLX5_FC_BULK_NUM_FCS(alloc_bitmask) : 1; 499 499 500 - bulk = kvzalloc(sizeof(*bulk) + bulk_len * sizeof(struct mlx5_fc), 501 - GFP_KERNEL); 500 + bulk = kvzalloc(struct_size(bulk, fcs, bulk_len), GFP_KERNEL); 502 501 if (!bulk) 503 502 goto err_alloc_bulk; 504 503
+6 -6
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 1112 1112 1113 1113 err = mlx5_fw_tracer_init(dev->tracer); 1114 1114 if (err) { 1115 - mlx5_core_err(dev, "Failed to init FW tracer\n"); 1116 - goto err_fw_tracer; 1115 + mlx5_core_err(dev, "Failed to init FW tracer %d\n", err); 1116 + mlx5_fw_tracer_destroy(dev->tracer); 1117 + dev->tracer = NULL; 1117 1118 } 1118 1119 1119 1120 mlx5_fw_reset_events_start(dev); ··· 1122 1121 1123 1122 err = mlx5_rsc_dump_init(dev); 1124 1123 if (err) { 1125 - mlx5_core_err(dev, "Failed to init Resource dump\n"); 1126 - goto err_rsc_dump; 1124 + mlx5_core_err(dev, "Failed to init Resource dump %d\n", err); 1125 + mlx5_rsc_dump_destroy(dev); 1126 + dev->rsc_dump = NULL; 1127 1127 } 1128 1128 1129 1129 err = mlx5_fpga_device_start(dev); ··· 1194 1192 mlx5_fpga_device_stop(dev); 1195 1193 err_fpga_start: 1196 1194 mlx5_rsc_dump_cleanup(dev); 1197 - err_rsc_dump: 1198 1195 mlx5_hv_vhca_cleanup(dev->hv_vhca); 1199 1196 mlx5_fw_reset_events_stop(dev); 1200 1197 mlx5_fw_tracer_cleanup(dev->tracer); 1201 - err_fw_tracer: 1202 1198 mlx5_eq_table_destroy(dev); 1203 1199 err_eq_table: 1204 1200 mlx5_irq_table_destroy(dev);
+11 -8
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
··· 39 39 [DR_ACTION_TYP_VPORT] = "DR_ACTION_TYP_VPORT", 40 40 [DR_ACTION_TYP_POP_VLAN] = "DR_ACTION_TYP_POP_VLAN", 41 41 [DR_ACTION_TYP_PUSH_VLAN] = "DR_ACTION_TYP_PUSH_VLAN", 42 + [DR_ACTION_TYP_SAMPLER] = "DR_ACTION_TYP_SAMPLER", 42 43 [DR_ACTION_TYP_INSERT_HDR] = "DR_ACTION_TYP_INSERT_HDR", 43 44 [DR_ACTION_TYP_REMOVE_HDR] = "DR_ACTION_TYP_REMOVE_HDR", 44 45 [DR_ACTION_TYP_MAX] = "DR_ACTION_UNKNOWN", ··· 514 513 /* If destination is vport we will get the FW flow table 515 514 * that recalculates the CS and forwards to the vport. 516 515 */ 517 - ret = mlx5dr_domain_cache_get_recalc_cs_ft_addr(dest_action->vport->dmn, 518 - dest_action->vport->caps->num, 519 - final_icm_addr); 516 + ret = mlx5dr_domain_get_recalc_cs_ft_addr(dest_action->vport->dmn, 517 + dest_action->vport->caps->num, 518 + final_icm_addr); 520 519 if (ret) { 521 520 mlx5dr_err(dmn, "Failed to get FW cs recalc flow table\n"); 522 521 return ret; ··· 633 632 return -EOPNOTSUPP; 634 633 case DR_ACTION_TYP_CTR: 635 634 attr.ctr_id = action->ctr->ctr_id + 636 - action->ctr->offeset; 635 + action->ctr->offset; 637 636 break; 638 637 case DR_ACTION_TYP_TAG: 639 638 attr.flow_tag = action->flow_tag->flow_tag; ··· 670 669 attr.hit_gvmi = action->vport->caps->vhca_gvmi; 671 670 dest_action = action; 672 671 if (rx_rule) { 673 - if (action->vport->caps->num == WIRE_PORT) { 672 + if (action->vport->caps->num == MLX5_VPORT_UPLINK) { 674 673 mlx5dr_dbg(dmn, "Device doesn't support Loopback on WIRE vport\n"); 675 674 return -EOPNOTSUPP; 676 675 } ··· 1748 1747 1749 1748 struct mlx5dr_action * 1750 1749 mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn, 1751 - u32 vport, u8 vhca_id_valid, 1750 + u16 vport, u8 vhca_id_valid, 1752 1751 u16 vhca_id) 1753 1752 { 1754 1753 struct mlx5dr_cmd_vport_cap *vport_cap; ··· 1768 1767 return NULL; 1769 1768 } 1770 1769 1771 - vport_cap = mlx5dr_get_vport_cap(&vport_dmn->info.caps, vport); 1770 + vport_cap = mlx5dr_domain_get_vport_cap(vport_dmn, vport); 1772 1771 if (!vport_cap) { 1773 - mlx5dr_dbg(dmn, "Failed to get vport %d caps\n", vport); 1772 + mlx5dr_err(dmn, 1773 + "Failed to get vport 0x%x caps - vport is disabled or invalid\n", 1774 + vport); 1774 1775 return NULL; 1775 1776 } 1776 1777
+4 -2
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
··· 195 195 196 196 caps->roce_min_src_udp = MLX5_CAP_ROCE(mdev, r_roce_min_src_udp_port); 197 197 198 + caps->is_ecpf = mlx5_core_is_ecpf_esw_manager(mdev); 199 + 198 200 return 0; 199 201 } 200 202 ··· 274 272 u32 table_id, 275 273 u32 group_id, 276 274 u32 modify_header_id, 277 - u32 vport_id) 275 + u16 vport) 278 276 { 279 277 u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {}; 280 278 void *in_flow_context; ··· 305 303 in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination); 306 304 MLX5_SET(dest_format_struct, in_dests, destination_type, 307 305 MLX5_FLOW_DESTINATION_TYPE_VPORT); 308 - MLX5_SET(dest_format_struct, in_dests, destination_id, vport_id); 306 + MLX5_SET(dest_format_struct, in_dests, destination_id, vport); 309 307 310 308 err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out)); 311 309 kvfree(in);
+146 -80
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
··· 9 9 ((dmn)->info.caps.dmn_type##_sw_owner_v2 && \ 10 10 (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_6DX)) 11 11 12 - static int dr_domain_init_cache(struct mlx5dr_domain *dmn) 12 + static void dr_domain_init_csum_recalc_fts(struct mlx5dr_domain *dmn) 13 13 { 14 14 /* Per vport cached FW FT for checksum recalculation, this 15 - * recalculation is needed due to a HW bug. 15 + * recalculation is needed due to a HW bug in STEv0. 16 16 */ 17 - dmn->cache.recalc_cs_ft = kcalloc(dmn->info.caps.num_vports, 18 - sizeof(dmn->cache.recalc_cs_ft[0]), 19 - GFP_KERNEL); 20 - if (!dmn->cache.recalc_cs_ft) 21 - return -ENOMEM; 22 - 23 - return 0; 17 + xa_init(&dmn->csum_fts_xa); 24 18 } 25 19 26 - static void dr_domain_uninit_cache(struct mlx5dr_domain *dmn) 27 - { 28 - int i; 29 - 30 - for (i = 0; i < dmn->info.caps.num_vports; i++) { 31 - if (!dmn->cache.recalc_cs_ft[i]) 32 - continue; 33 - 34 - mlx5dr_fw_destroy_recalc_cs_ft(dmn, dmn->cache.recalc_cs_ft[i]); 35 - } 36 - 37 - kfree(dmn->cache.recalc_cs_ft); 38 - } 39 - 40 - int mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn, 41 - u32 vport_num, 42 - u64 *rx_icm_addr) 20 + static void dr_domain_uninit_csum_recalc_fts(struct mlx5dr_domain *dmn) 43 21 { 44 22 struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft; 23 + unsigned long i; 45 24 46 - recalc_cs_ft = dmn->cache.recalc_cs_ft[vport_num]; 25 + xa_for_each(&dmn->csum_fts_xa, i, recalc_cs_ft) { 26 + if (recalc_cs_ft) 27 + mlx5dr_fw_destroy_recalc_cs_ft(dmn, recalc_cs_ft); 28 + } 29 + 30 + xa_destroy(&dmn->csum_fts_xa); 31 + } 32 + 33 + int mlx5dr_domain_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn, 34 + u16 vport_num, 35 + u64 *rx_icm_addr) 36 + { 37 + struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft; 38 + int ret; 39 + 40 + recalc_cs_ft = xa_load(&dmn->csum_fts_xa, vport_num); 47 41 if (!recalc_cs_ft) { 48 - /* Table not in cache, need to allocate a new one */ 42 + /* Table hasn't been created yet */ 49 43 recalc_cs_ft = mlx5dr_fw_create_recalc_cs_ft(dmn, vport_num); 50 44 if (!recalc_cs_ft) 51 45 return -EINVAL; 52 46 53 - dmn->cache.recalc_cs_ft[vport_num] = recalc_cs_ft; 47 + ret = xa_err(xa_store(&dmn->csum_fts_xa, vport_num, 48 + recalc_cs_ft, GFP_KERNEL)); 49 + if (ret) 50 + return ret; 54 51 } 55 52 56 53 *rx_icm_addr = recalc_cs_ft->rx_icm_addr; ··· 121 124 mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn); 122 125 } 123 126 124 - static int dr_domain_query_vport(struct mlx5dr_domain *dmn, 125 - bool other_vport, 126 - u16 vport_number) 127 + static void dr_domain_fill_uplink_caps(struct mlx5dr_domain *dmn, 128 + struct mlx5dr_cmd_vport_cap *uplink_vport) 127 129 { 128 - struct mlx5dr_cmd_vport_cap *vport_caps; 130 + struct mlx5dr_esw_caps *esw_caps = &dmn->info.caps.esw_caps; 131 + 132 + uplink_vport->num = MLX5_VPORT_UPLINK; 133 + uplink_vport->icm_address_rx = esw_caps->uplink_icm_address_rx; 134 + uplink_vport->icm_address_tx = esw_caps->uplink_icm_address_tx; 135 + uplink_vport->vport_gvmi = 0; 136 + uplink_vport->vhca_gvmi = dmn->info.caps.gvmi; 137 + } 138 + 139 + static int dr_domain_query_vport(struct mlx5dr_domain *dmn, 140 + u16 vport_number, 141 + struct mlx5dr_cmd_vport_cap *vport_caps) 142 + { 143 + u16 cmd_vport = vport_number; 144 + bool other_vport = true; 129 145 int ret; 130 146 131 - vport_caps = &dmn->info.caps.vports_caps[vport_number]; 147 + if (vport_number == MLX5_VPORT_UPLINK) { 148 + dr_domain_fill_uplink_caps(dmn, vport_caps); 149 + return 0; 150 + } 151 + 152 + if (dmn->info.caps.is_ecpf && vport_number == MLX5_VPORT_ECPF) { 153 + other_vport = false; 154 + cmd_vport = 0; 155 + } 132 156 133 157 ret = mlx5dr_cmd_query_esw_vport_context(dmn->mdev, 134 158 other_vport, 135 - vport_number, 159 + cmd_vport, 136 160 &vport_caps->icm_address_rx, 137 161 &vport_caps->icm_address_tx); 138 162 if (ret) ··· 161 143 162 144 ret = mlx5dr_cmd_query_gvmi(dmn->mdev, 163 145 other_vport, 164 - vport_number, 146 + cmd_vport, 165 147 &vport_caps->vport_gvmi); 166 148 if (ret) 167 149 return ret; ··· 172 154 return 0; 173 155 } 174 156 175 - static int dr_domain_query_vports(struct mlx5dr_domain *dmn) 157 + static int dr_domain_query_esw_mngr(struct mlx5dr_domain *dmn) 176 158 { 177 - struct mlx5dr_esw_caps *esw_caps = &dmn->info.caps.esw_caps; 178 - struct mlx5dr_cmd_vport_cap *wire_vport; 179 - int vport; 159 + return dr_domain_query_vport(dmn, 160 + dmn->info.caps.is_ecpf ? MLX5_VPORT_ECPF : 0, 161 + &dmn->info.caps.vports.esw_manager_caps); 162 + } 163 + 164 + static struct mlx5dr_cmd_vport_cap * 165 + dr_domain_add_vport_cap(struct mlx5dr_domain *dmn, u16 vport) 166 + { 167 + struct mlx5dr_cmd_caps *caps = &dmn->info.caps; 168 + struct mlx5dr_cmd_vport_cap *vport_caps; 180 169 int ret; 181 170 182 - /* Query vports (except wire vport) */ 183 - for (vport = 0; vport < dmn->info.caps.num_esw_ports - 1; vport++) { 184 - ret = dr_domain_query_vport(dmn, !!vport, vport); 185 - if (ret) 186 - return ret; 171 + vport_caps = kvzalloc(sizeof(*vport_caps), GFP_KERNEL); 172 + if (!vport_caps) 173 + return NULL; 174 + 175 + ret = dr_domain_query_vport(dmn, vport, vport_caps); 176 + if (ret) { 177 + kvfree(vport_caps); 178 + return NULL; 187 179 } 188 180 189 - /* Last vport is the wire port */ 190 - wire_vport = &dmn->info.caps.vports_caps[vport]; 191 - wire_vport->num = WIRE_PORT; 192 - wire_vport->icm_address_rx = esw_caps->uplink_icm_address_rx; 193 - wire_vport->icm_address_tx = esw_caps->uplink_icm_address_tx; 194 - wire_vport->vport_gvmi = 0; 195 - wire_vport->vhca_gvmi = dmn->info.caps.gvmi; 181 + ret = xa_insert(&caps->vports.vports_caps_xa, vport, 182 + vport_caps, GFP_KERNEL); 183 + if (ret) { 184 + mlx5dr_dbg(dmn, "Couldn't insert new vport into xarray (%d)\n", ret); 185 + kvfree(vport_caps); 186 + return ERR_PTR(ret); 187 + } 188 + 189 + return vport_caps; 190 + } 191 + 192 + struct mlx5dr_cmd_vport_cap * 193 + mlx5dr_domain_get_vport_cap(struct mlx5dr_domain *dmn, u16 vport) 194 + { 195 + struct mlx5dr_cmd_caps *caps = &dmn->info.caps; 196 + struct mlx5dr_cmd_vport_cap *vport_caps; 197 + 198 + if ((caps->is_ecpf && vport == MLX5_VPORT_ECPF) || 199 + (!caps->is_ecpf && vport == 0)) 200 + return &caps->vports.esw_manager_caps; 201 + 202 + vport_load: 203 + vport_caps = xa_load(&caps->vports.vports_caps_xa, vport); 204 + if (vport_caps) 205 + return vport_caps; 206 + 207 + vport_caps = dr_domain_add_vport_cap(dmn, vport); 208 + if (PTR_ERR(vport_caps) == -EBUSY) 209 + /* caps were already stored by another thread */ 210 + goto vport_load; 211 + 212 + return vport_caps; 213 + } 214 + 215 + static void dr_domain_clear_vports(struct mlx5dr_domain *dmn) 216 + { 217 + struct mlx5dr_cmd_vport_cap *vport_caps; 218 + unsigned long i; 219 + 220 + xa_for_each(&dmn->info.caps.vports.vports_caps_xa, i, vport_caps) { 221 + vport_caps = xa_erase(&dmn->info.caps.vports.vports_caps_xa, i); 222 + kvfree(vport_caps); 223 + } 224 + } 225 + 226 + static int dr_domain_query_uplink(struct mlx5dr_domain *dmn) 227 + { 228 + struct mlx5dr_cmd_vport_cap *vport_caps; 229 + 230 + vport_caps = mlx5dr_domain_get_vport_cap(dmn, MLX5_VPORT_UPLINK); 231 + if (!vport_caps) 232 + return -EINVAL; 196 233 197 234 return 0; 198 235 } ··· 269 196 dmn->info.caps.esw_rx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_rx; 270 197 dmn->info.caps.esw_tx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_tx; 271 198 272 - dmn->info.caps.vports_caps = kcalloc(dmn->info.caps.num_esw_ports, 273 - sizeof(dmn->info.caps.vports_caps[0]), 274 - GFP_KERNEL); 275 - if (!dmn->info.caps.vports_caps) 276 - return -ENOMEM; 199 + xa_init(&dmn->info.caps.vports.vports_caps_xa); 277 200 278 - ret = dr_domain_query_vports(dmn); 201 + /* Query eswitch manager and uplink vports only. Rest of the 202 + * vports (vport 0, VFs and SFs) will be queried dynamically. 203 + */ 204 + 205 + ret = dr_domain_query_esw_mngr(dmn); 279 206 if (ret) { 280 - mlx5dr_err(dmn, "Failed to query vports caps (err: %d)", ret); 281 - goto free_vports_caps; 207 + mlx5dr_err(dmn, "Failed to query eswitch manager vport caps (err: %d)", ret); 208 + goto free_vports_caps_xa; 282 209 } 283 210 284 - dmn->info.caps.num_vports = dmn->info.caps.num_esw_ports - 1; 211 + ret = dr_domain_query_uplink(dmn); 212 + if (ret) { 213 + mlx5dr_err(dmn, "Failed to query uplink vport caps (err: %d)", ret); 214 + goto free_vports_caps_xa; 215 + } 285 216 286 217 return 0; 287 218 288 - free_vports_caps: 289 - kfree(dmn->info.caps.vports_caps); 290 - dmn->info.caps.vports_caps = NULL; 219 + free_vports_caps_xa: 220 + xa_destroy(&dmn->info.caps.vports.vports_caps_xa); 221 + 291 222 return ret; 292 223 } 293 224 ··· 305 228 mlx5dr_err(dmn, "Failed to allocate domain, bad link type\n"); 306 229 return -EOPNOTSUPP; 307 230 } 308 - 309 - dmn->info.caps.num_esw_ports = mlx5_eswitch_get_total_vports(mdev); 310 231 311 232 ret = mlx5dr_cmd_query_device(mdev, &dmn->info.caps); 312 233 if (ret) ··· 342 267 343 268 dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX; 344 269 dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX; 345 - vport_cap = mlx5dr_get_vport_cap(&dmn->info.caps, 0); 346 - if (!vport_cap) { 347 - mlx5dr_err(dmn, "Failed to get esw manager vport\n"); 348 - return -ENOENT; 349 - } 270 + vport_cap = &dmn->info.caps.vports.esw_manager_caps; 350 271 351 272 dmn->info.supp_sw_steering = true; 352 273 dmn->info.tx.default_icm_addr = vport_cap->icm_address_tx; ··· 361 290 362 291 static void dr_domain_caps_uninit(struct mlx5dr_domain *dmn) 363 292 { 364 - kfree(dmn->info.caps.vports_caps); 293 + dr_domain_clear_vports(dmn); 294 + xa_destroy(&dmn->info.caps.vports.vports_caps_xa); 365 295 } 366 296 367 297 struct mlx5dr_domain * ··· 405 333 goto uninit_caps; 406 334 } 407 335 408 - ret = dr_domain_init_cache(dmn); 409 - if (ret) { 410 - mlx5dr_err(dmn, "Failed initialize domain cache\n"); 411 - goto uninit_resourses; 412 - } 336 + dr_domain_init_csum_recalc_fts(dmn); 413 337 414 338 return dmn; 415 339 416 - uninit_resourses: 417 - dr_domain_uninit_resources(dmn); 418 340 uninit_caps: 419 341 dr_domain_caps_uninit(dmn); 420 342 free_domain: ··· 447 381 448 382 /* make sure resources are not used by the hardware */ 449 383 mlx5dr_cmd_sync_steering(dmn->mdev); 450 - dr_domain_uninit_cache(dmn); 384 + dr_domain_uninit_csum_recalc_fts(dmn); 451 385 dr_domain_uninit_resources(dmn); 452 386 dr_domain_caps_uninit(dmn); 453 387 mutex_destroy(&dmn->info.tx.mutex);
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
··· 5 5 #include "dr_types.h" 6 6 7 7 struct mlx5dr_fw_recalc_cs_ft * 8 - mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u32 vport_num) 8 + mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u16 vport_num) 9 9 { 10 10 struct mlx5dr_cmd_create_flow_table_attr ft_attr = {}; 11 11 struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
··· 1042 1042 return false; 1043 1043 1044 1044 if (mask->misc.source_port) { 1045 - if (rx && value->misc.source_port != WIRE_PORT) 1045 + if (rx && value->misc.source_port != MLX5_VPORT_UPLINK) 1046 1046 return true; 1047 1047 1048 - if (!rx && value->misc.source_port == WIRE_PORT) 1048 + if (!rx && value->misc.source_port == MLX5_VPORT_UPLINK) 1049 1049 return true; 1050 1050 } 1051 1051
+7 -6
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
··· 1645 1645 struct mlx5dr_match_misc *misc = &value->misc; 1646 1646 struct mlx5dr_cmd_vport_cap *vport_cap; 1647 1647 struct mlx5dr_domain *dmn = sb->dmn; 1648 - struct mlx5dr_cmd_caps *caps; 1648 + struct mlx5dr_domain *vport_dmn; 1649 1649 u8 *bit_mask = sb->bit_mask; 1650 1650 bool source_gvmi_set; 1651 1651 ··· 1654 1654 if (sb->vhca_id_valid) { 1655 1655 /* Find port GVMI based on the eswitch_owner_vhca_id */ 1656 1656 if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi) 1657 - caps = &dmn->info.caps; 1657 + vport_dmn = dmn; 1658 1658 else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id == 1659 1659 dmn->peer_dmn->info.caps.gvmi)) 1660 - caps = &dmn->peer_dmn->info.caps; 1660 + vport_dmn = dmn->peer_dmn; 1661 1661 else 1662 1662 return -EINVAL; 1663 1663 1664 1664 misc->source_eswitch_owner_vhca_id = 0; 1665 1665 } else { 1666 - caps = &dmn->info.caps; 1666 + vport_dmn = dmn; 1667 1667 } 1668 1668 1669 1669 source_gvmi_set = MLX5_GET(ste_src_gvmi_qp, bit_mask, source_gvmi); 1670 1670 if (source_gvmi_set) { 1671 - vport_cap = mlx5dr_get_vport_cap(caps, misc->source_port); 1671 + vport_cap = mlx5dr_domain_get_vport_cap(vport_dmn, 1672 + misc->source_port); 1672 1673 if (!vport_cap) { 1673 - mlx5dr_err(dmn, "Vport 0x%x is invalid\n", 1674 + mlx5dr_err(dmn, "Vport 0x%x is disabled or invalid\n", 1674 1675 misc->source_port); 1675 1676 return -EINVAL; 1676 1677 }
+10 -8
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
··· 586 586 } else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) { 587 587 u8 *d_action; 588 588 589 - dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi); 590 - action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action); 591 - action_sz = DR_STE_ACTION_TRIPLE_SZ; 589 + if (action_sz < DR_STE_ACTION_TRIPLE_SZ) { 590 + dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi); 591 + action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action); 592 + action_sz = DR_STE_ACTION_TRIPLE_SZ; 593 + } 592 594 d_action = action + DR_STE_ACTION_SINGLE_SZ; 593 595 594 596 dr_ste_v1_set_encap_l3(last_ste, ··· 1778 1776 struct mlx5dr_match_misc *misc = &value->misc; 1779 1777 struct mlx5dr_cmd_vport_cap *vport_cap; 1780 1778 struct mlx5dr_domain *dmn = sb->dmn; 1781 - struct mlx5dr_cmd_caps *caps; 1779 + struct mlx5dr_domain *vport_dmn; 1782 1780 u8 *bit_mask = sb->bit_mask; 1783 1781 1784 1782 DR_STE_SET_TAG(src_gvmi_qp_v1, tag, source_qp, misc, source_sqn); ··· 1786 1784 if (sb->vhca_id_valid) { 1787 1785 /* Find port GVMI based on the eswitch_owner_vhca_id */ 1788 1786 if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi) 1789 - caps = &dmn->info.caps; 1787 + vport_dmn = dmn; 1790 1788 else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id == 1791 1789 dmn->peer_dmn->info.caps.gvmi)) 1792 - caps = &dmn->peer_dmn->info.caps; 1790 + vport_dmn = dmn->peer_dmn; 1793 1791 else 1794 1792 return -EINVAL; 1795 1793 1796 1794 misc->source_eswitch_owner_vhca_id = 0; 1797 1795 } else { 1798 - caps = &dmn->info.caps; 1796 + vport_dmn = dmn; 1799 1797 } 1800 1798 1801 1799 if (!MLX5_GET(ste_src_gvmi_qp_v1, bit_mask, source_gvmi)) 1802 1800 return 0; 1803 1801 1804 - vport_cap = mlx5dr_get_vport_cap(caps, misc->source_port); 1802 + vport_cap = mlx5dr_domain_get_vport_cap(vport_dmn, misc->source_port); 1805 1803 if (!vport_cap) { 1806 1804 mlx5dr_err(dmn, "Vport 0x%x is disabled or invalid\n", 1807 1805 misc->source_port);
+18 -29
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
··· 4 4 #ifndef _DR_TYPES_ 5 5 #define _DR_TYPES_ 6 6 7 - #include <linux/mlx5/driver.h> 7 + #include <linux/mlx5/vport.h> 8 8 #include <linux/refcount.h> 9 9 #include "fs_core.h" 10 10 #include "wq.h" ··· 14 14 15 15 #define DR_RULE_MAX_STES 18 16 16 #define DR_ACTION_MAX_STES 5 17 - #define WIRE_PORT 0xFFFF 18 17 #define DR_STE_SVLAN 0x1 19 18 #define DR_STE_CVLAN 0x2 20 19 #define DR_SZ_MATCH_PARAM (MLX5_ST_SZ_DW_MATCH_PARAM * 4) ··· 751 752 struct mlx5dr_cmd_vport_cap { 752 753 u16 vport_gvmi; 753 754 u16 vhca_gvmi; 755 + u16 num; 754 756 u64 icm_address_rx; 755 757 u64 icm_address_tx; 756 - u32 num; 757 758 }; 758 759 759 760 struct mlx5dr_roce_cap { 760 761 u8 roce_en:1; 761 762 u8 fl_rc_qp_when_roce_disabled:1; 762 763 u8 fl_rc_qp_when_roce_enabled:1; 764 + }; 765 + 766 + struct mlx5dr_vports { 767 + struct mlx5dr_cmd_vport_cap esw_manager_caps; 768 + struct xarray vports_caps_xa; 763 769 }; 764 770 765 771 struct mlx5dr_cmd_caps { ··· 790 786 u8 flex_parser_id_gtpu_first_ext_dw_0; 791 787 u8 max_ft_level; 792 788 u16 roce_min_src_udp; 793 - u8 num_esw_ports; 794 789 u8 sw_format_ver; 795 790 bool eswitch_manager; 796 791 bool rx_sw_owner; ··· 798 795 u8 rx_sw_owner_v2:1; 799 796 u8 tx_sw_owner_v2:1; 800 797 u8 fdb_sw_owner_v2:1; 801 - u32 num_vports; 802 798 struct mlx5dr_esw_caps esw_caps; 803 - struct mlx5dr_cmd_vport_cap *vports_caps; 799 + struct mlx5dr_vports vports; 804 800 bool prio_tag_required; 805 801 struct mlx5dr_roce_cap roce_caps; 802 + u8 is_ecpf:1; 806 803 u8 isolate_vl_tc:1; 807 804 }; 808 805 ··· 829 826 struct mlx5dr_cmd_caps caps; 830 827 }; 831 828 832 - struct mlx5dr_domain_cache { 833 - struct mlx5dr_fw_recalc_cs_ft **recalc_cs_ft; 834 - }; 835 - 836 829 struct mlx5dr_domain { 837 830 struct mlx5dr_domain *peer_dmn; 838 831 struct mlx5_core_dev *mdev; ··· 840 841 struct mlx5dr_icm_pool *action_icm_pool; 841 842 struct mlx5dr_send_ring *send_ring; 842 843 struct mlx5dr_domain_info info; 843 - struct mlx5dr_domain_cache cache; 844 + struct xarray csum_fts_xa; 844 845 struct mlx5dr_ste_ctx *ste_ctx; 845 846 }; 846 847 ··· 941 942 942 943 struct mlx5dr_action_ctr { 943 944 u32 ctr_id; 944 - u32 offeset; 945 + u32 offset; 945 946 }; 946 947 947 948 struct mlx5dr_action_vport { ··· 1101 1102 return true; 1102 1103 } 1103 1104 1104 - static inline struct mlx5dr_cmd_vport_cap * 1105 - mlx5dr_get_vport_cap(struct mlx5dr_cmd_caps *caps, u32 vport) 1106 - { 1107 - if (!caps->vports_caps || 1108 - (vport >= caps->num_vports && vport != WIRE_PORT)) 1109 - return NULL; 1110 - 1111 - if (vport == WIRE_PORT) 1112 - vport = caps->num_vports; 1113 - 1114 - return &caps->vports_caps[vport]; 1115 - } 1105 + struct mlx5dr_cmd_vport_cap * 1106 + mlx5dr_domain_get_vport_cap(struct mlx5dr_domain *dmn, u16 vport); 1116 1107 1117 1108 struct mlx5dr_cmd_query_flow_table_details { 1118 1109 u8 status; ··· 1143 1154 u32 table_id, 1144 1155 u32 group_id, 1145 1156 u32 modify_header_id, 1146 - u32 vport_id); 1157 + u16 vport_id); 1147 1158 int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev, 1148 1159 u32 table_type, 1149 1160 u32 table_id); ··· 1361 1372 }; 1362 1373 1363 1374 struct mlx5dr_fw_recalc_cs_ft * 1364 - mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u32 vport_num); 1375 + mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u16 vport_num); 1365 1376 void mlx5dr_fw_destroy_recalc_cs_ft(struct mlx5dr_domain *dmn, 1366 1377 struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft); 1367 - int mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn, 1368 - u32 vport_num, 1369 - u64 *rx_icm_addr); 1378 + int mlx5dr_domain_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn, 1379 + u16 vport_num, 1380 + u64 *rx_icm_addr); 1370 1381 int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn, 1371 1382 struct mlx5dr_cmd_flow_destination_hw_info *dest, 1372 1383 int num_dest,
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
··· 222 222 dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID; 223 223 } 224 224 225 - #define MLX5_FLOW_CONTEXT_ACTION_MAX 20 225 + #define MLX5_FLOW_CONTEXT_ACTION_MAX 32 226 226 static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, 227 227 struct mlx5_flow_table *ft, 228 228 struct mlx5_flow_group *group,
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
··· 89 89 90 90 struct mlx5dr_action * 91 91 mlx5dr_action_create_dest_vport(struct mlx5dr_domain *domain, 92 - u32 vport, u8 vhca_id_valid, 92 + u16 vport, u8 vhca_id_valid, 93 93 u16 vhca_id); 94 94 95 95 struct mlx5dr_action *