Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'mlx5-next-vhca-id' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux

Saeed Mahameed says:

====================
mlx5-next-vhca-id

A preparation patchset for adjacent function vports.

Adjacent functions can delegate their SR-IOV VFs to sibling PFs,
allowing for more flexible and scalable management in multi-host and
ECPF-to-host scenarios. Adjacent vports can be managed by the management
PF via their unique vhca id and can't be managed by function index as the
index can conflict with the local vports/vfs.

This series provides:

- Use the cached vcha id instead of querying it every time from fw
- Query hca cap using vhca id instead of function id when FW supports it
- Add HW capabilities and required definitions for adjacent function vports

* tag 'mlx5-next-vhca-id' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux:
{rdma,net}/mlx5: export mlx5_vport_get_vhca_id
net/mlx5: E-Switch, Set/Query hca cap via vhca id
net/mlx5: E-Switch, Cache vport vhca id on first cap query
net/mlx5: mlx5_ifc, Add hardware definitions needed for adjacent vports
====================

Link: https://patch.msgid.link/20250815194901.298689-1-saeed@kernel.org
Signed-off-by: Paolo Abeni <pabeni@redhat.com>

+263 -65
+4 -23
drivers/infiniband/hw/mlx5/std_types.c
··· 83 83 static int fill_vport_vhca_id(struct mlx5_core_dev *mdev, u16 vport, 84 84 struct mlx5_ib_uapi_query_port *info) 85 85 { 86 - size_t out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); 87 - u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {}; 88 - void *out; 89 - int err; 86 + int err = mlx5_vport_get_vhca_id(mdev, vport, &info->vport_vhca_id); 90 87 91 - out = kzalloc(out_sz, GFP_KERNEL); 92 - if (!out) 93 - return -ENOMEM; 94 - 95 - MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); 96 - MLX5_SET(query_hca_cap_in, in, other_function, true); 97 - MLX5_SET(query_hca_cap_in, in, function_id, vport); 98 - MLX5_SET(query_hca_cap_in, in, op_mod, 99 - MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE | 100 - HCA_CAP_OPMOD_GET_CUR); 101 - 102 - err = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_sz); 103 88 if (err) 104 - goto out; 105 - 106 - info->vport_vhca_id = MLX5_GET(query_hca_cap_out, out, 107 - capability.cmd_hca_cap.vhca_id); 89 + return err; 108 90 109 91 info->flags |= MLX5_IB_UAPI_QUERY_PORT_VPORT_VHCA_ID; 110 - out: 111 - kfree(out); 112 - return err; 92 + 93 + return 0; 113 94 } 114 95 115 96 static int fill_multiport_info(struct mlx5_ib_dev *dev, u32 port_num,
+2
drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 2 /* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. */ 3 3 4 + #include <linux/mlx5/vport.h> 5 + 4 6 #include "reporter_vnic.h" 5 7 #include "en_stats.h" 6 8 #include "devlink.h"
+16 -2
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
··· 820 820 821 821 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability); 822 822 vport->info.roce_enabled = MLX5_GET(cmd_hca_cap, hca_caps, roce); 823 + vport->vhca_id = MLX5_GET(cmd_hca_cap, hca_caps, vhca_id); 823 824 824 825 if (!MLX5_CAP_GEN_MAX(esw->dev, hca_cap_2)) 825 826 goto out_free; ··· 838 837 out_free: 839 838 kfree(query_ctx); 840 839 return err; 840 + } 841 + 842 + bool mlx5_esw_vport_vhca_id(struct mlx5_eswitch *esw, u16 vportn, u16 *vhca_id) 843 + { 844 + struct mlx5_vport *vport; 845 + 846 + vport = mlx5_eswitch_get_vport(esw, vportn); 847 + if (IS_ERR(vport) || MLX5_VPORT_INVAL_VHCA_ID(vport)) 848 + return false; 849 + 850 + *vhca_id = vport->vhca_id; 851 + return true; 841 852 } 842 853 843 854 static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport) ··· 942 929 943 930 if (!mlx5_esw_is_manager_vport(esw, vport_num) && 944 931 MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) { 945 - ret = mlx5_esw_vport_vhca_id_set(esw, vport_num); 932 + ret = mlx5_esw_vport_vhca_id_map(esw, vport); 946 933 if (ret) 947 934 goto err_vhca_mapping; 948 935 } ··· 986 973 987 974 if (!mlx5_esw_is_manager_vport(esw, vport_num) && 988 975 MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) 989 - mlx5_esw_vport_vhca_id_clear(esw, vport_num); 976 + mlx5_esw_vport_vhca_id_unmap(esw, vport); 990 977 991 978 if (vport->vport != MLX5_VPORT_PF && 992 979 (vport->info.ipsec_crypto_enabled || vport->info.ipsec_packet_enabled)) ··· 1748 1735 vport->vport = vport_num; 1749 1736 vport->index = index; 1750 1737 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO; 1738 + vport->vhca_id = MLX5_VHCA_ID_INVALID; 1751 1739 INIT_WORK(&vport->vport_change_handler, esw_vport_change_handler); 1752 1740 err = xa_insert(&esw->vports, vport_num, vport, GFP_KERNEL); 1753 1741 if (err)
+18 -2
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
··· 197 197 return mlx5_devlink_port_get(dl_port)->vport; 198 198 } 199 199 200 + #define MLX5_VHCA_ID_INVALID (-1) 201 + 202 + #define MLX5_VPORT_INVAL_VHCA_ID(vport) \ 203 + ((vport)->vhca_id == MLX5_VHCA_ID_INVALID) 204 + 200 205 struct mlx5_vport { 201 206 struct mlx5_core_dev *dev; 202 207 struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE]; ··· 214 209 struct vport_egress egress; 215 210 u32 default_metadata; 216 211 u32 metadata; 212 + int vhca_id; 217 213 218 214 struct mlx5_vport_info info; 219 215 ··· 829 823 830 824 int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id); 831 825 832 - int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num); 833 - void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num); 826 + int mlx5_esw_vport_vhca_id_map(struct mlx5_eswitch *esw, 827 + struct mlx5_vport *vport); 828 + void mlx5_esw_vport_vhca_id_unmap(struct mlx5_eswitch *esw, 829 + struct mlx5_vport *vport); 834 830 int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num); 831 + bool mlx5_esw_vport_vhca_id(struct mlx5_eswitch *esw, u16 vportn, u16 *vhca_id); 835 832 836 833 /** 837 834 * struct mlx5_esw_event_info - Indicates eswitch mode changed/changing. ··· 982 973 { 983 974 return true; 984 975 } 976 + 977 + static inline bool 978 + mlx5_esw_vport_vhca_id(struct mlx5_eswitch *esw, u16 vportn, u16 *vhca_id) 979 + { 980 + return -EOPNOTSUPP; 981 + } 982 + 985 983 #endif /* CONFIG_MLX5_ESWITCH */ 986 984 987 985 #endif /* __MLX5_ESWITCH_H__ */
+17 -17
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
··· 4167 4167 } 4168 4168 EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match); 4169 4169 4170 - int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num) 4170 + int mlx5_esw_vport_vhca_id_map(struct mlx5_eswitch *esw, 4171 + struct mlx5_vport *vport) 4171 4172 { 4172 4173 u16 *old_entry, *vhca_map_entry, vhca_id; 4173 - int err; 4174 4174 4175 - err = mlx5_vport_get_vhca_id(esw->dev, vport_num, &vhca_id); 4176 - if (err) { 4177 - esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%u,err=%d)\n", 4178 - vport_num, err); 4179 - return err; 4175 + if (WARN_ONCE(MLX5_VPORT_INVAL_VHCA_ID(vport), 4176 + "vport %d vhca_id is not set", vport->vport)) { 4177 + int err; 4178 + 4179 + err = mlx5_vport_get_vhca_id(vport->dev, vport->vport, 4180 + &vhca_id); 4181 + if (err) 4182 + return err; 4183 + vport->vhca_id = vhca_id; 4180 4184 } 4181 4185 4186 + vhca_id = vport->vhca_id; 4182 4187 vhca_map_entry = kmalloc(sizeof(*vhca_map_entry), GFP_KERNEL); 4183 4188 if (!vhca_map_entry) 4184 4189 return -ENOMEM; 4185 4190 4186 - *vhca_map_entry = vport_num; 4191 + *vhca_map_entry = vport->vport; 4187 4192 old_entry = xa_store(&esw->offloads.vhca_map, vhca_id, vhca_map_entry, GFP_KERNEL); 4188 4193 if (xa_is_err(old_entry)) { 4189 4194 kfree(vhca_map_entry); ··· 4198 4193 return 0; 4199 4194 } 4200 4195 4201 - void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num) 4196 + void mlx5_esw_vport_vhca_id_unmap(struct mlx5_eswitch *esw, 4197 + struct mlx5_vport *vport) 4202 4198 { 4203 - u16 *vhca_map_entry, vhca_id; 4204 - int err; 4199 + u16 *vhca_map_entry; 4205 4200 4206 - err = mlx5_vport_get_vhca_id(esw->dev, vport_num, &vhca_id); 4207 - if (err) 4208 - esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%hu,err=%d)\n", 4209 - vport_num, err); 4210 - 4211 - vhca_map_entry = xa_erase(&esw->offloads.vhca_map, vhca_id); 4201 + vhca_map_entry = xa_erase(&esw->offloads.vhca_map, vport->vhca_id); 4212 4202 kfree(vhca_map_entry); 4213 4203 } 4214 4204
-2
drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
··· 447 447 #define mlx5_vport_get_other_func_general_cap(dev, vport, out) \ 448 448 mlx5_vport_get_other_func_cap(dev, vport, out, MLX5_CAP_GENERAL) 449 449 450 - int mlx5_vport_get_vhca_id(struct mlx5_core_dev *dev, u16 vport, u16 *vhca_id); 451 - 452 450 static inline u32 mlx5_sriov_get_vf_total_msix(struct pci_dev *pdev) 453 451 { 454 452 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+11 -5
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
··· 1199 1199 int mlx5hws_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_function, 1200 1200 u16 vport_number, u16 *gvmi) 1201 1201 { 1202 - bool ec_vf_func = other_function ? mlx5_core_is_ec_vf_vport(mdev, vport_number) : false; 1203 1202 u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {}; 1204 1203 int out_size; 1205 1204 void *out; 1206 1205 int err; 1207 1206 1207 + if (other_function) { 1208 + err = mlx5_vport_get_vhca_id(mdev, vport_number, gvmi); 1209 + if (!err) 1210 + return 0; 1211 + 1212 + mlx5_core_err(mdev, "Failed to get vport vhca id for vport %d\n", 1213 + vport_number); 1214 + return err; 1215 + } 1216 + 1217 + /* get vhca_id for `this` function */ 1208 1218 out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out); 1209 1219 out = kzalloc(out_size, GFP_KERNEL); 1210 1220 if (!out) 1211 1221 return -ENOMEM; 1212 1222 1213 1223 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); 1214 - MLX5_SET(query_hca_cap_in, in, other_function, other_function); 1215 - MLX5_SET(query_hca_cap_in, in, function_id, 1216 - mlx5_vport_to_func_id(mdev, vport_number, ec_vf_func)); 1217 - MLX5_SET(query_hca_cap_in, in, ec_vf_function, ec_vf_func); 1218 1224 MLX5_SET(query_hca_cap_in, in, op_mod, 1219 1225 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1 | HCA_CAP_OPMOD_GET_CUR); 1220 1226
+12 -4
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_cmd.c
··· 2 2 /* Copyright (c) 2019 Mellanox Technologies. */ 3 3 4 4 #include "dr_types.h" 5 + #include "eswitch.h" 5 6 6 7 int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev, 7 8 bool other_vport, ··· 35 34 int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_vport, 36 35 u16 vport_number, u16 *gvmi) 37 36 { 38 - bool ec_vf_func = other_vport ? mlx5_core_is_ec_vf_vport(mdev, vport_number) : false; 39 37 u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {}; 40 38 int out_size; 41 39 void *out; 42 40 int err; 43 41 42 + if (other_vport) { 43 + err = mlx5_vport_get_vhca_id(mdev, vport_number, gvmi); 44 + if (!err) 45 + return 0; 46 + 47 + mlx5_core_err(mdev, "Failed to get vport vhca id for vport %d\n", 48 + vport_number); 49 + return err; 50 + } 51 + 52 + /* get vhca_id for `this` function */ 44 53 out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out); 45 54 out = kzalloc(out_size, GFP_KERNEL); 46 55 if (!out) 47 56 return -ENOMEM; 48 57 49 58 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); 50 - MLX5_SET(query_hca_cap_in, in, other_function, other_vport); 51 - MLX5_SET(query_hca_cap_in, in, function_id, mlx5_vport_to_func_id(mdev, vport_number, ec_vf_func)); 52 - MLX5_SET(query_hca_cap_in, in, ec_vf_function, ec_vf_func); 53 59 MLX5_SET(query_hca_cap_in, in, op_mod, 54 60 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1 | 55 61 HCA_CAP_OPMOD_GET_CUR);
+52 -6
drivers/net/ethernet/mellanox/mlx5/core/vport.c
··· 36 36 #include <linux/mlx5/vport.h> 37 37 #include <linux/mlx5/eswitch.h> 38 38 #include "mlx5_core.h" 39 + #include "eswitch.h" 39 40 #include "sf/sf.h" 40 41 41 42 /* Mutex to hold while enabling or disabling RoCE */ ··· 1190 1189 } 1191 1190 EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid); 1192 1191 1192 + static bool mlx5_vport_use_vhca_id_as_func_id(struct mlx5_core_dev *dev, 1193 + u16 vport_num, u16 *vhca_id) 1194 + { 1195 + if (!MLX5_CAP_GEN_2(dev, function_id_type_vhca_id)) 1196 + return false; 1197 + 1198 + return mlx5_esw_vport_vhca_id(dev->priv.eswitch, vport_num, vhca_id); 1199 + } 1200 + 1193 1201 int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 vport, void *out, 1194 1202 u16 opmod) 1195 1203 { 1196 - bool ec_vf_func = mlx5_core_is_ec_vf_vport(dev, vport); 1197 1204 u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)] = {}; 1205 + u16 vhca_id = 0, function_id = 0; 1206 + bool ec_vf_func = false; 1207 + 1208 + /* if this vport is referring to a vport on the ec PF (embedded cpu ) 1209 + * let the FW know which domain we are querying since vport numbers or 1210 + * function_ids are not unique across the different PF domains, 1211 + * unless we use vhca_id as the function_id below. 1212 + */ 1213 + ec_vf_func = mlx5_core_is_ec_vf_vport(dev, vport); 1214 + function_id = mlx5_vport_to_func_id(dev, vport, ec_vf_func); 1215 + 1216 + if (mlx5_vport_use_vhca_id_as_func_id(dev, vport, &vhca_id)) { 1217 + MLX5_SET(query_hca_cap_in, in, function_id_type, 1); 1218 + function_id = vhca_id; 1219 + ec_vf_func = false; 1220 + mlx5_core_dbg(dev, "%s using vhca_id as function_id for vport %d vhca_id 0x%x\n", 1221 + __func__, vport, vhca_id); 1222 + } 1198 1223 1199 1224 opmod = (opmod << 1) | (HCA_CAP_OPMOD_GET_MAX & 0x01); 1200 1225 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); 1201 1226 MLX5_SET(query_hca_cap_in, in, op_mod, opmod); 1202 - MLX5_SET(query_hca_cap_in, in, function_id, mlx5_vport_to_func_id(dev, vport, ec_vf_func)); 1203 1227 MLX5_SET(query_hca_cap_in, in, other_function, true); 1204 1228 MLX5_SET(query_hca_cap_in, in, ec_vf_function, ec_vf_func); 1229 + MLX5_SET(query_hca_cap_in, in, function_id, function_id); 1205 1230 return mlx5_cmd_exec_inout(dev, query_hca_cap, in, out); 1206 1231 } 1207 1232 EXPORT_SYMBOL_GPL(mlx5_vport_get_other_func_cap); ··· 1239 1212 void *hca_caps; 1240 1213 int err; 1241 1214 1242 - *vhca_id = 0; 1215 + /* try get vhca_id via eswitch */ 1216 + if (mlx5_esw_vport_vhca_id(dev->priv.eswitch, vport, vhca_id)) 1217 + return 0; 1243 1218 1244 1219 query_ctx = kzalloc(query_out_sz, GFP_KERNEL); 1245 1220 if (!query_ctx) ··· 1258 1229 kfree(query_ctx); 1259 1230 return err; 1260 1231 } 1232 + EXPORT_SYMBOL_GPL(mlx5_vport_get_vhca_id); 1261 1233 1262 1234 int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap, 1263 1235 u16 vport, u16 opmod) 1264 1236 { 1265 - bool ec_vf_func = mlx5_core_is_ec_vf_vport(dev, vport); 1266 1237 int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in); 1238 + u16 vhca_id = 0, function_id = 0; 1239 + bool ec_vf_func = false; 1267 1240 void *set_hca_cap; 1268 1241 void *set_ctx; 1269 1242 int ret; ··· 1274 1243 if (!set_ctx) 1275 1244 return -ENOMEM; 1276 1245 1246 + /* if this vport is referring to a vport on the ec PF (embedded cpu ) 1247 + * let the FW know which domain we are querying since vport numbers or 1248 + * function_ids are not unique across the different PF domains, 1249 + * unless we use vhca_id as the function_id below. 1250 + */ 1251 + ec_vf_func = mlx5_core_is_ec_vf_vport(dev, vport); 1252 + function_id = mlx5_vport_to_func_id(dev, vport, ec_vf_func); 1253 + 1254 + if (mlx5_vport_use_vhca_id_as_func_id(dev, vport, &vhca_id)) { 1255 + MLX5_SET(set_hca_cap_in, set_ctx, function_id_type, 1); 1256 + function_id = vhca_id; 1257 + ec_vf_func = false; 1258 + mlx5_core_dbg(dev, "%s using vhca_id as function_id for vport %d vhca_id 0x%x\n", 1259 + __func__, vport, vhca_id); 1260 + } 1261 + 1277 1262 MLX5_SET(set_hca_cap_in, set_ctx, opcode, MLX5_CMD_OP_SET_HCA_CAP); 1278 1263 MLX5_SET(set_hca_cap_in, set_ctx, op_mod, opmod << 1); 1279 1264 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability); 1280 1265 memcpy(set_hca_cap, hca_cap, MLX5_ST_SZ_BYTES(cmd_hca_cap)); 1281 - MLX5_SET(set_hca_cap_in, set_ctx, function_id, 1282 - mlx5_vport_to_func_id(dev, vport, ec_vf_func)); 1283 1266 MLX5_SET(set_hca_cap_in, set_ctx, other_function, true); 1284 1267 MLX5_SET(set_hca_cap_in, set_ctx, ec_vf_function, ec_vf_func); 1268 + MLX5_SET(set_hca_cap_in, set_ctx, function_id, function_id); 1285 1269 ret = mlx5_cmd_exec_in(dev, set_hca_cap, set_ctx); 1286 1270 1287 1271 kfree(set_ctx);
+129 -4
include/linux/mlx5/mlx5_ifc.h
··· 189 189 MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS = 0x727, 190 190 MLX5_CMD_OP_RELEASE_XRQ_ERROR = 0x729, 191 191 MLX5_CMD_OP_MODIFY_XRQ = 0x72a, 192 + MLX5_CMD_OPCODE_QUERY_DELEGATED_VHCA = 0x732, 193 + MLX5_CMD_OPCODE_CREATE_ESW_VPORT = 0x733, 194 + MLX5_CMD_OPCODE_DESTROY_ESW_VPORT = 0x734, 192 195 MLX5_CMD_OP_QUERY_ESW_FUNCTIONS = 0x740, 193 196 MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750, 194 197 MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751, ··· 2210 2207 2211 2208 u8 reserved_at_440[0x8]; 2212 2209 u8 max_num_eqs_24b[0x18]; 2213 - u8 reserved_at_460[0x3a0]; 2210 + 2211 + u8 reserved_at_460[0x160]; 2212 + 2213 + u8 query_adjacent_functions_id[0x1]; 2214 + u8 ingress_egress_esw_vport_connect[0x1]; 2215 + u8 function_id_type_vhca_id[0x1]; 2216 + u8 reserved_at_5c3[0xd]; 2217 + u8 delegate_vhca_management_profiles[0x10]; 2218 + 2219 + u8 delegated_vhca_max[0x10]; 2220 + u8 delegate_vhca_max[0x10]; 2221 + 2222 + u8 reserved_at_600[0x200]; 2214 2223 }; 2215 2224 2216 2225 enum mlx5_ifc_flow_destination_type { ··· 5174 5159 5175 5160 u8 other_function[0x1]; 5176 5161 u8 ec_vf_function[0x1]; 5177 - u8 reserved_at_42[0xe]; 5162 + u8 reserved_at_42[0x1]; 5163 + u8 function_id_type[0x1]; 5164 + u8 reserved_at_44[0xc]; 5178 5165 u8 function_id[0x10]; 5179 5166 5180 5167 u8 reserved_at_60[0x20]; ··· 6374 6357 6375 6358 u8 other_function[0x1]; 6376 6359 u8 ec_vf_function[0x1]; 6377 - u8 reserved_at_42[0xe]; 6360 + u8 reserved_at_42[0x1]; 6361 + u8 function_id_type[0x1]; 6362 + u8 reserved_at_44[0xc]; 6378 6363 u8 function_id[0x10]; 6379 6364 6380 6365 u8 reserved_at_60[0x20]; ··· 7002 6983 u8 reserved_at_60[0x20]; 7003 6984 }; 7004 6985 6986 + struct mlx5_ifc_destroy_esw_vport_out_bits { 6987 + u8 status[0x8]; 6988 + u8 reserved_at_8[0x18]; 6989 + 6990 + u8 syndrome[0x20]; 6991 + 6992 + u8 reserved_at_40[0x20]; 6993 + }; 6994 + 6995 + struct mlx5_ifc_destroy_esw_vport_in_bits { 6996 + u8 opcode[0x10]; 6997 + u8 uid[0x10]; 6998 + 6999 + u8 reserved_at_20[0x10]; 7000 + u8 op_mod[0x10]; 7001 + 7002 + u8 reserved_at_40[0x10]; 7003 + u8 vport_num[0x10]; 7004 + 7005 + u8 reserved_at_60[0x20]; 7006 + }; 7007 + 7005 7008 struct mlx5_ifc_modify_esw_vport_context_out_bits { 7006 7009 u8 status[0x8]; 7007 7010 u8 reserved_at_8[0x18]; ··· 7525 7484 u8 reserved_at_40[0x40]; 7526 7485 }; 7527 7486 7487 + struct mlx5_ifc_function_vhca_rid_info_reg_bits { 7488 + u8 host_number[0x8]; 7489 + u8 host_pci_device_function[0x8]; 7490 + u8 host_pci_bus[0x8]; 7491 + u8 reserved_at_18[0x3]; 7492 + u8 pci_bus_assigned[0x1]; 7493 + u8 function_type[0x4]; 7494 + 7495 + u8 parent_pci_device_function[0x8]; 7496 + u8 parent_pci_bus[0x8]; 7497 + u8 vhca_id[0x10]; 7498 + 7499 + u8 reserved_at_40[0x10]; 7500 + u8 function_id[0x10]; 7501 + 7502 + u8 reserved_at_60[0x20]; 7503 + }; 7504 + 7505 + struct mlx5_ifc_delegated_function_vhca_rid_info_bits { 7506 + struct mlx5_ifc_function_vhca_rid_info_reg_bits function_vhca_rid_info; 7507 + 7508 + u8 reserved_at_80[0x18]; 7509 + u8 manage_profile[0x8]; 7510 + 7511 + u8 reserved_at_a0[0x60]; 7512 + }; 7513 + 7514 + struct mlx5_ifc_query_delegated_vhca_out_bits { 7515 + u8 status[0x8]; 7516 + u8 reserved_at_8[0x18]; 7517 + 7518 + u8 syndrome[0x20]; 7519 + 7520 + u8 reserved_at_40[0x20]; 7521 + 7522 + u8 reserved_at_60[0x10]; 7523 + u8 functions_count[0x10]; 7524 + 7525 + u8 reserved_at_80[0x80]; 7526 + 7527 + struct mlx5_ifc_delegated_function_vhca_rid_info_bits 7528 + delegated_function_vhca_rid_info[]; 7529 + }; 7530 + 7531 + struct mlx5_ifc_query_delegated_vhca_in_bits { 7532 + u8 opcode[0x10]; 7533 + u8 uid[0x10]; 7534 + 7535 + u8 reserved_at_20[0x10]; 7536 + u8 op_mod[0x10]; 7537 + 7538 + u8 reserved_at_40[0x40]; 7539 + }; 7540 + 7541 + struct mlx5_ifc_create_esw_vport_out_bits { 7542 + u8 status[0x8]; 7543 + u8 reserved_at_8[0x18]; 7544 + 7545 + u8 syndrome[0x20]; 7546 + 7547 + u8 reserved_at_40[0x20]; 7548 + 7549 + u8 reserved_at_60[0x10]; 7550 + u8 vport_num[0x10]; 7551 + }; 7552 + 7553 + struct mlx5_ifc_create_esw_vport_in_bits { 7554 + u8 opcode[0x10]; 7555 + u8 reserved_at_10[0x10]; 7556 + 7557 + u8 reserved_at_20[0x10]; 7558 + u8 op_mod[0x10]; 7559 + 7560 + u8 reserved_at_40[0x10]; 7561 + u8 managed_vhca_id[0x10]; 7562 + 7563 + u8 reserved_at_60[0x20]; 7564 + }; 7565 + 7528 7566 struct mlx5_ifc_qp_2rst_out_bits { 7529 7567 u8 status[0x8]; 7530 7568 u8 reserved_at_8[0x18]; ··· 7731 7611 u8 reserved_at_41[0xf]; 7732 7612 u8 vport_number[0x10]; 7733 7613 7734 - u8 reserved_at_60[0x18]; 7614 + u8 reserved_at_60[0x10]; 7615 + u8 ingress_connect[0x1]; 7616 + u8 egress_connect[0x1]; 7617 + u8 ingress_connect_valid[0x1]; 7618 + u8 egress_connect_valid[0x1]; 7619 + u8 reserved_at_74[0x4]; 7735 7620 u8 admin_state[0x4]; 7736 7621 u8 reserved_at_7c[0x4]; 7737 7622 };
+2
include/linux/mlx5/vport.h
··· 135 135 u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev); 136 136 int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 vport, void *out, 137 137 u16 opmod); 138 + int mlx5_vport_get_vhca_id(struct mlx5_core_dev *dev, u16 vport, u16 *vhca_id); 139 + 138 140 #endif /* __MLX5_VPORT_H__ */