Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'mlx5-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux

Merge mlx5-next patches needed for upcoming mlx5 software steering.

1) Alex adds HW bits and definitions required for SW steering
2) Ariel moves device memory management to mlx5_core (From mlx5_ib)
3) Maor, Cleanups and fixups for eswitch mode and RoCE
4) Mark, Set only stag for match untagged packets

Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>

+497 -251
-130
drivers/infiniband/hw/mlx5/cmd.c
··· 186 186 return err; 187 187 } 188 188 189 - int mlx5_cmd_alloc_sw_icm(struct mlx5_dm *dm, int type, u64 length, 190 - u16 uid, phys_addr_t *addr, u32 *obj_id) 191 - { 192 - struct mlx5_core_dev *dev = dm->dev; 193 - u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {}; 194 - u32 in[MLX5_ST_SZ_DW(create_sw_icm_in)] = {}; 195 - unsigned long *block_map; 196 - u64 icm_start_addr; 197 - u32 log_icm_size; 198 - u32 num_blocks; 199 - u32 max_blocks; 200 - u64 block_idx; 201 - void *sw_icm; 202 - int ret; 203 - 204 - MLX5_SET(general_obj_in_cmd_hdr, in, opcode, 205 - MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 206 - MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_SW_ICM); 207 - MLX5_SET(general_obj_in_cmd_hdr, in, uid, uid); 208 - 209 - switch (type) { 210 - case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM: 211 - icm_start_addr = MLX5_CAP64_DEV_MEM(dev, 212 - steering_sw_icm_start_address); 213 - log_icm_size = MLX5_CAP_DEV_MEM(dev, log_steering_sw_icm_size); 214 - block_map = dm->steering_sw_icm_alloc_blocks; 215 - break; 216 - case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM: 217 - icm_start_addr = MLX5_CAP64_DEV_MEM(dev, 218 - header_modify_sw_icm_start_address); 219 - log_icm_size = MLX5_CAP_DEV_MEM(dev, 220 - log_header_modify_sw_icm_size); 221 - block_map = dm->header_modify_sw_icm_alloc_blocks; 222 - break; 223 - default: 224 - return -EINVAL; 225 - } 226 - 227 - num_blocks = (length + MLX5_SW_ICM_BLOCK_SIZE(dev) - 1) >> 228 - MLX5_LOG_SW_ICM_BLOCK_SIZE(dev); 229 - max_blocks = BIT(log_icm_size - MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)); 230 - spin_lock(&dm->lock); 231 - block_idx = bitmap_find_next_zero_area(block_map, 232 - max_blocks, 233 - 0, 234 - num_blocks, 0); 235 - 236 - if (block_idx < max_blocks) 237 - bitmap_set(block_map, 238 - block_idx, num_blocks); 239 - 240 - spin_unlock(&dm->lock); 241 - 242 - if (block_idx >= max_blocks) 243 - return -ENOMEM; 244 - 245 - sw_icm = MLX5_ADDR_OF(create_sw_icm_in, in, sw_icm); 246 - icm_start_addr += block_idx << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev); 247 - MLX5_SET64(sw_icm, sw_icm, sw_icm_start_addr, 248 - icm_start_addr); 249 - MLX5_SET(sw_icm, sw_icm, log_sw_icm_size, ilog2(length)); 250 - 251 - ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 252 - if (ret) { 253 - spin_lock(&dm->lock); 254 - bitmap_clear(block_map, 255 - block_idx, num_blocks); 256 - spin_unlock(&dm->lock); 257 - 258 - return ret; 259 - } 260 - 261 - *addr = icm_start_addr; 262 - *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 263 - 264 - return 0; 265 - } 266 - 267 - int mlx5_cmd_dealloc_sw_icm(struct mlx5_dm *dm, int type, u64 length, 268 - u16 uid, phys_addr_t addr, u32 obj_id) 269 - { 270 - struct mlx5_core_dev *dev = dm->dev; 271 - u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {}; 272 - u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {}; 273 - unsigned long *block_map; 274 - u32 num_blocks; 275 - u64 start_idx; 276 - int err; 277 - 278 - num_blocks = (length + MLX5_SW_ICM_BLOCK_SIZE(dev) - 1) >> 279 - MLX5_LOG_SW_ICM_BLOCK_SIZE(dev); 280 - 281 - switch (type) { 282 - case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM: 283 - start_idx = 284 - (addr - MLX5_CAP64_DEV_MEM( 285 - dev, steering_sw_icm_start_address)) >> 286 - MLX5_LOG_SW_ICM_BLOCK_SIZE(dev); 287 - block_map = dm->steering_sw_icm_alloc_blocks; 288 - break; 289 - case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM: 290 - start_idx = 291 - (addr - 292 - MLX5_CAP64_DEV_MEM( 293 - dev, header_modify_sw_icm_start_address)) >> 294 - MLX5_LOG_SW_ICM_BLOCK_SIZE(dev); 295 - block_map = dm->header_modify_sw_icm_alloc_blocks; 296 - break; 297 - default: 298 - return -EINVAL; 299 - } 300 - 301 - MLX5_SET(general_obj_in_cmd_hdr, in, opcode, 302 - MLX5_CMD_OP_DESTROY_GENERAL_OBJECT); 303 - MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_SW_ICM); 304 - MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id); 305 - MLX5_SET(general_obj_in_cmd_hdr, in, uid, uid); 306 - 307 - err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 308 - if (err) 309 - return err; 310 - 311 - spin_lock(&dm->lock); 312 - bitmap_clear(block_map, 313 - start_idx, num_blocks); 314 - spin_unlock(&dm->lock); 315 - 316 - return 0; 317 - } 318 - 319 189 int mlx5_cmd_query_ext_ppcnt_counters(struct mlx5_core_dev *dev, void *out) 320 190 { 321 191 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
-4
drivers/infiniband/hw/mlx5/cmd.h
··· 65 65 u16 uid); 66 66 int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb, 67 67 u16 opmod, u8 port); 68 - int mlx5_cmd_alloc_sw_icm(struct mlx5_dm *dm, int type, u64 length, 69 - u16 uid, phys_addr_t *addr, u32 *obj_id); 70 - int mlx5_cmd_dealloc_sw_icm(struct mlx5_dm *dm, int type, u64 length, 71 - u16 uid, phys_addr_t addr, u32 obj_id); 72 68 #endif /* MLX5_IB_CMD_H */
+30 -72
drivers/infiniband/hw/mlx5/main.c
··· 2280 2280 return -EOPNOTSUPP; 2281 2281 break; 2282 2282 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM: 2283 + case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM: 2283 2284 if (!capable(CAP_SYS_RAWIO) || 2284 2285 !capable(CAP_NET_RAW)) 2285 2286 return -EPERM; ··· 2345 2344 struct uverbs_attr_bundle *attrs, 2346 2345 int type) 2347 2346 { 2348 - struct mlx5_dm *dm_db = &to_mdev(ctx->device)->dm; 2347 + struct mlx5_core_dev *dev = to_mdev(ctx->device)->mdev; 2349 2348 u64 act_size; 2350 2349 int err; 2351 2350 2352 2351 /* Allocation size must a multiple of the basic block size 2353 2352 * and a power of 2. 2354 2353 */ 2355 - act_size = round_up(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dm_db->dev)); 2354 + act_size = round_up(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dev)); 2356 2355 act_size = roundup_pow_of_two(act_size); 2357 2356 2358 2357 dm->size = act_size; 2359 - err = mlx5_cmd_alloc_sw_icm(dm_db, type, act_size, 2360 - to_mucontext(ctx)->devx_uid, &dm->dev_addr, 2361 - &dm->icm_dm.obj_id); 2358 + err = mlx5_dm_sw_icm_alloc(dev, type, act_size, 2359 + to_mucontext(ctx)->devx_uid, &dm->dev_addr, 2360 + &dm->icm_dm.obj_id); 2362 2361 if (err) 2363 2362 return err; 2364 2363 ··· 2366 2365 MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET, 2367 2366 &dm->dev_addr, sizeof(dm->dev_addr)); 2368 2367 if (err) 2369 - mlx5_cmd_dealloc_sw_icm(dm_db, type, dm->size, 2370 - to_mucontext(ctx)->devx_uid, 2371 - dm->dev_addr, dm->icm_dm.obj_id); 2368 + mlx5_dm_sw_icm_dealloc(dev, type, dm->size, 2369 + to_mucontext(ctx)->devx_uid, dm->dev_addr, 2370 + dm->icm_dm.obj_id); 2372 2371 2373 2372 return err; 2374 2373 } ··· 2408 2407 attrs); 2409 2408 break; 2410 2409 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM: 2410 + err = handle_alloc_dm_sw_icm(context, dm, 2411 + attr, attrs, 2412 + MLX5_SW_ICM_TYPE_STEERING); 2413 + break; 2411 2414 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM: 2412 - err = handle_alloc_dm_sw_icm(context, dm, attr, attrs, type); 2415 + err = handle_alloc_dm_sw_icm(context, dm, 2416 + attr, attrs, 2417 + MLX5_SW_ICM_TYPE_HEADER_MODIFY); 2413 2418 break; 2414 2419 default: 2415 2420 err = -EOPNOTSUPP; ··· 2435 2428 { 2436 2429 struct mlx5_ib_ucontext *ctx = rdma_udata_to_drv_context( 2437 2430 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); 2431 + struct mlx5_core_dev *dev = to_mdev(ibdm->device)->mdev; 2438 2432 struct mlx5_dm *dm_db = &to_mdev(ibdm->device)->dm; 2439 2433 struct mlx5_ib_dm *dm = to_mdm(ibdm); 2440 2434 u32 page_idx; ··· 2447 2439 if (ret) 2448 2440 return ret; 2449 2441 2450 - page_idx = (dm->dev_addr - 2451 - pci_resource_start(dm_db->dev->pdev, 0) - 2452 - MLX5_CAP64_DEV_MEM(dm_db->dev, 2453 - memic_bar_start_addr)) >> 2454 - PAGE_SHIFT; 2442 + page_idx = (dm->dev_addr - pci_resource_start(dev->pdev, 0) - 2443 + MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr)) >> 2444 + PAGE_SHIFT; 2455 2445 bitmap_clear(ctx->dm_pages, page_idx, 2456 2446 DIV_ROUND_UP(dm->size, PAGE_SIZE)); 2457 2447 break; 2458 2448 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM: 2449 + ret = mlx5_dm_sw_icm_dealloc(dev, MLX5_SW_ICM_TYPE_STEERING, 2450 + dm->size, ctx->devx_uid, dm->dev_addr, 2451 + dm->icm_dm.obj_id); 2452 + if (ret) 2453 + return ret; 2454 + break; 2459 2455 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM: 2460 - ret = mlx5_cmd_dealloc_sw_icm(dm_db, dm->type, dm->size, 2461 - ctx->devx_uid, dm->dev_addr, 2462 - dm->icm_dm.obj_id); 2456 + ret = mlx5_dm_sw_icm_dealloc(dev, MLX5_SW_ICM_TYPE_HEADER_MODIFY, 2457 + dm->size, ctx->devx_uid, dm->dev_addr, 2458 + dm->icm_dm.obj_id); 2463 2459 if (ret) 2464 2460 return ret; 2465 2461 break; ··· 6108 6096 6109 6097 static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev) 6110 6098 { 6111 - struct mlx5_core_dev *mdev = dev->mdev; 6112 - 6113 6099 mlx5_ib_cleanup_multiport_master(dev); 6114 6100 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) { 6115 6101 srcu_barrier(&dev->mr_srcu); ··· 6115 6105 } 6116 6106 6117 6107 WARN_ON(!bitmap_empty(dev->dm.memic_alloc_pages, MLX5_MAX_MEMIC_PAGES)); 6118 - 6119 - WARN_ON(dev->dm.steering_sw_icm_alloc_blocks && 6120 - !bitmap_empty( 6121 - dev->dm.steering_sw_icm_alloc_blocks, 6122 - BIT(MLX5_CAP_DEV_MEM(mdev, log_steering_sw_icm_size) - 6123 - MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev)))); 6124 - 6125 - kfree(dev->dm.steering_sw_icm_alloc_blocks); 6126 - 6127 - WARN_ON(dev->dm.header_modify_sw_icm_alloc_blocks && 6128 - !bitmap_empty(dev->dm.header_modify_sw_icm_alloc_blocks, 6129 - BIT(MLX5_CAP_DEV_MEM( 6130 - mdev, log_header_modify_sw_icm_size) - 6131 - MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev)))); 6132 - 6133 - kfree(dev->dm.header_modify_sw_icm_alloc_blocks); 6134 6108 } 6135 6109 6136 6110 static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev) 6137 6111 { 6138 6112 struct mlx5_core_dev *mdev = dev->mdev; 6139 - u64 header_modify_icm_blocks = 0; 6140 - u64 steering_icm_blocks = 0; 6141 6113 int err; 6142 6114 int i; 6143 6115 ··· 6166 6174 INIT_LIST_HEAD(&dev->qp_list); 6167 6175 spin_lock_init(&dev->reset_flow_resource_lock); 6168 6176 6169 - if (MLX5_CAP_GEN_64(mdev, general_obj_types) & 6170 - MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM) { 6171 - if (MLX5_CAP64_DEV_MEM(mdev, steering_sw_icm_start_address)) { 6172 - steering_icm_blocks = 6173 - BIT(MLX5_CAP_DEV_MEM(mdev, 6174 - log_steering_sw_icm_size) - 6175 - MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev)); 6176 - 6177 - dev->dm.steering_sw_icm_alloc_blocks = 6178 - kcalloc(BITS_TO_LONGS(steering_icm_blocks), 6179 - sizeof(unsigned long), GFP_KERNEL); 6180 - if (!dev->dm.steering_sw_icm_alloc_blocks) 6181 - goto err_mp; 6182 - } 6183 - 6184 - if (MLX5_CAP64_DEV_MEM(mdev, 6185 - header_modify_sw_icm_start_address)) { 6186 - header_modify_icm_blocks = BIT( 6187 - MLX5_CAP_DEV_MEM( 6188 - mdev, log_header_modify_sw_icm_size) - 6189 - MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev)); 6190 - 6191 - dev->dm.header_modify_sw_icm_alloc_blocks = 6192 - kcalloc(BITS_TO_LONGS(header_modify_icm_blocks), 6193 - sizeof(unsigned long), GFP_KERNEL); 6194 - if (!dev->dm.header_modify_sw_icm_alloc_blocks) 6195 - goto err_dm; 6196 - } 6197 - } 6198 - 6199 6177 spin_lock_init(&dev->dm.lock); 6200 6178 dev->dm.dev = mdev; 6201 6179 6202 6180 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) { 6203 6181 err = init_srcu_struct(&dev->mr_srcu); 6204 6182 if (err) 6205 - goto err_dm; 6183 + goto err_mp; 6206 6184 } 6207 6185 6208 6186 return 0; 6209 - 6210 - err_dm: 6211 - kfree(dev->dm.steering_sw_icm_alloc_blocks); 6212 - kfree(dev->dm.header_modify_sw_icm_alloc_blocks); 6213 6187 6214 6188 err_mp: 6215 6189 mlx5_ib_cleanup_multiport_master(dev);
-2
drivers/infiniband/hw/mlx5/mlx5_ib.h
··· 881 881 */ 882 882 spinlock_t lock; 883 883 DECLARE_BITMAP(memic_alloc_pages, MLX5_MAX_MEMIC_PAGES); 884 - unsigned long *steering_sw_icm_alloc_blocks; 885 - unsigned long *header_modify_sw_icm_alloc_blocks; 886 884 }; 887 885 888 886 struct mlx5_read_counters_attr {
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/Makefile
··· 15 15 health.o mcg.o cq.o alloc.o qp.o port.o mr.o pd.o \ 16 16 transobj.o vport.o sriov.o fs_cmd.o fs_core.o pci_irq.o \ 17 17 fs_counters.o rl.o lag.o dev.o events.o wq.o lib/gid.o \ 18 - lib/devcom.o lib/pci_vsc.o diag/fs_tracepoint.o \ 18 + lib/devcom.o lib/pci_vsc.o lib/dm.o diag/fs_tracepoint.o \ 19 19 diag/fw_tracer.o diag/crdump.o devlink.o 20 20 21 21 #
+4 -1
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 1896 1896 *match_level = MLX5_MATCH_L2; 1897 1897 } 1898 1898 } else if (*match_level != MLX5_MATCH_NONE) { 1899 - MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1); 1899 + /* cvlan_tag enabled in match criteria and 1900 + * disabled in match value means both S & C tags 1901 + * don't exist (untagged of both) 1902 + */ 1900 1903 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1); 1901 1904 *match_level = MLX5_MATCH_L2; 1902 1905 }
+223
drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 + // Copyright (c) 2019 Mellanox Technologies 3 + 4 + #include <linux/mlx5/driver.h> 5 + #include <linux/mlx5/device.h> 6 + 7 + #include "mlx5_core.h" 8 + #include "lib/mlx5.h" 9 + 10 + struct mlx5_dm { 11 + /* protect access to icm bitmask */ 12 + spinlock_t lock; 13 + unsigned long *steering_sw_icm_alloc_blocks; 14 + unsigned long *header_modify_sw_icm_alloc_blocks; 15 + }; 16 + 17 + struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev) 18 + { 19 + u64 header_modify_icm_blocks = 0; 20 + u64 steering_icm_blocks = 0; 21 + struct mlx5_dm *dm; 22 + 23 + if (!(MLX5_CAP_GEN_64(dev, general_obj_types) & MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM)) 24 + return 0; 25 + 26 + dm = kzalloc(sizeof(*dm), GFP_KERNEL); 27 + if (!dm) 28 + return ERR_PTR(-ENOMEM); 29 + 30 + spin_lock_init(&dm->lock); 31 + 32 + if (MLX5_CAP64_DEV_MEM(dev, steering_sw_icm_start_address)) { 33 + steering_icm_blocks = 34 + BIT(MLX5_CAP_DEV_MEM(dev, log_steering_sw_icm_size) - 35 + MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)); 36 + 37 + dm->steering_sw_icm_alloc_blocks = 38 + kcalloc(BITS_TO_LONGS(steering_icm_blocks), 39 + sizeof(unsigned long), GFP_KERNEL); 40 + if (!dm->steering_sw_icm_alloc_blocks) 41 + goto err_steering; 42 + } 43 + 44 + if (MLX5_CAP64_DEV_MEM(dev, header_modify_sw_icm_start_address)) { 45 + header_modify_icm_blocks = 46 + BIT(MLX5_CAP_DEV_MEM(dev, log_header_modify_sw_icm_size) - 47 + MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)); 48 + 49 + dm->header_modify_sw_icm_alloc_blocks = 50 + kcalloc(BITS_TO_LONGS(header_modify_icm_blocks), 51 + sizeof(unsigned long), GFP_KERNEL); 52 + if (!dm->header_modify_sw_icm_alloc_blocks) 53 + goto err_modify_hdr; 54 + } 55 + 56 + return dm; 57 + 58 + err_modify_hdr: 59 + kfree(dm->steering_sw_icm_alloc_blocks); 60 + 61 + err_steering: 62 + kfree(dm); 63 + 64 + return ERR_PTR(-ENOMEM); 65 + } 66 + 67 + void mlx5_dm_cleanup(struct mlx5_core_dev *dev) 68 + { 69 + struct mlx5_dm *dm = dev->dm; 70 + 71 + if (!dev->dm) 72 + return; 73 + 74 + if (dm->steering_sw_icm_alloc_blocks) { 75 + WARN_ON(!bitmap_empty(dm->steering_sw_icm_alloc_blocks, 76 + BIT(MLX5_CAP_DEV_MEM(dev, log_steering_sw_icm_size) - 77 + MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)))); 78 + kfree(dm->steering_sw_icm_alloc_blocks); 79 + } 80 + 81 + if (dm->header_modify_sw_icm_alloc_blocks) { 82 + WARN_ON(!bitmap_empty(dm->header_modify_sw_icm_alloc_blocks, 83 + BIT(MLX5_CAP_DEV_MEM(dev, 84 + log_header_modify_sw_icm_size) - 85 + MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)))); 86 + kfree(dm->header_modify_sw_icm_alloc_blocks); 87 + } 88 + 89 + kfree(dm); 90 + } 91 + 92 + int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, 93 + u64 length, u16 uid, phys_addr_t *addr, u32 *obj_id) 94 + { 95 + u32 num_blocks = DIV_ROUND_UP_ULL(length, MLX5_SW_ICM_BLOCK_SIZE(dev)); 96 + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {}; 97 + u32 in[MLX5_ST_SZ_DW(create_sw_icm_in)] = {}; 98 + struct mlx5_dm *dm = dev->dm; 99 + unsigned long *block_map; 100 + u64 icm_start_addr; 101 + u32 log_icm_size; 102 + u32 max_blocks; 103 + u64 block_idx; 104 + void *sw_icm; 105 + int ret; 106 + 107 + if (!dev->dm) 108 + return -EOPNOTSUPP; 109 + 110 + if (!length || (length & (length - 1)) || 111 + length & (MLX5_SW_ICM_BLOCK_SIZE(dev) - 1)) 112 + return -EINVAL; 113 + 114 + MLX5_SET(general_obj_in_cmd_hdr, in, opcode, 115 + MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 116 + MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_SW_ICM); 117 + MLX5_SET(general_obj_in_cmd_hdr, in, uid, uid); 118 + 119 + switch (type) { 120 + case MLX5_SW_ICM_TYPE_STEERING: 121 + icm_start_addr = MLX5_CAP64_DEV_MEM(dev, steering_sw_icm_start_address); 122 + log_icm_size = MLX5_CAP_DEV_MEM(dev, log_steering_sw_icm_size); 123 + block_map = dm->steering_sw_icm_alloc_blocks; 124 + break; 125 + case MLX5_SW_ICM_TYPE_HEADER_MODIFY: 126 + icm_start_addr = MLX5_CAP64_DEV_MEM(dev, header_modify_sw_icm_start_address); 127 + log_icm_size = MLX5_CAP_DEV_MEM(dev, 128 + log_header_modify_sw_icm_size); 129 + block_map = dm->header_modify_sw_icm_alloc_blocks; 130 + break; 131 + default: 132 + return -EINVAL; 133 + } 134 + 135 + if (!block_map) 136 + return -EOPNOTSUPP; 137 + 138 + max_blocks = BIT(log_icm_size - MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)); 139 + spin_lock(&dm->lock); 140 + block_idx = bitmap_find_next_zero_area(block_map, 141 + max_blocks, 142 + 0, 143 + num_blocks, 0); 144 + 145 + if (block_idx < max_blocks) 146 + bitmap_set(block_map, 147 + block_idx, num_blocks); 148 + 149 + spin_unlock(&dm->lock); 150 + 151 + if (block_idx >= max_blocks) 152 + return -ENOMEM; 153 + 154 + sw_icm = MLX5_ADDR_OF(create_sw_icm_in, in, sw_icm); 155 + icm_start_addr += block_idx << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev); 156 + MLX5_SET64(sw_icm, sw_icm, sw_icm_start_addr, 157 + icm_start_addr); 158 + MLX5_SET(sw_icm, sw_icm, log_sw_icm_size, ilog2(length)); 159 + 160 + ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 161 + if (ret) { 162 + spin_lock(&dm->lock); 163 + bitmap_clear(block_map, 164 + block_idx, num_blocks); 165 + spin_unlock(&dm->lock); 166 + 167 + return ret; 168 + } 169 + 170 + *addr = icm_start_addr; 171 + *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 172 + 173 + return 0; 174 + } 175 + EXPORT_SYMBOL_GPL(mlx5_dm_sw_icm_alloc); 176 + 177 + int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, 178 + u64 length, u16 uid, phys_addr_t addr, u32 obj_id) 179 + { 180 + u32 num_blocks = DIV_ROUND_UP_ULL(length, MLX5_SW_ICM_BLOCK_SIZE(dev)); 181 + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {}; 182 + u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {}; 183 + struct mlx5_dm *dm = dev->dm; 184 + unsigned long *block_map; 185 + u64 icm_start_addr; 186 + u64 start_idx; 187 + int err; 188 + 189 + if (!dev->dm) 190 + return -EOPNOTSUPP; 191 + 192 + switch (type) { 193 + case MLX5_SW_ICM_TYPE_STEERING: 194 + icm_start_addr = MLX5_CAP64_DEV_MEM(dev, steering_sw_icm_start_address); 195 + block_map = dm->steering_sw_icm_alloc_blocks; 196 + break; 197 + case MLX5_SW_ICM_TYPE_HEADER_MODIFY: 198 + icm_start_addr = MLX5_CAP64_DEV_MEM(dev, header_modify_sw_icm_start_address); 199 + block_map = dm->header_modify_sw_icm_alloc_blocks; 200 + break; 201 + default: 202 + return -EINVAL; 203 + } 204 + 205 + MLX5_SET(general_obj_in_cmd_hdr, in, opcode, 206 + MLX5_CMD_OP_DESTROY_GENERAL_OBJECT); 207 + MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_SW_ICM); 208 + MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id); 209 + MLX5_SET(general_obj_in_cmd_hdr, in, uid, uid); 210 + 211 + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 212 + if (err) 213 + return err; 214 + 215 + start_idx = (addr - icm_start_addr) >> MLX5_LOG_SW_ICM_BLOCK_SIZE(dev); 216 + spin_lock(&dm->lock); 217 + bitmap_clear(block_map, 218 + start_idx, num_blocks); 219 + spin_unlock(&dm->lock); 220 + 221 + return 0; 222 + } 223 + EXPORT_SYMBOL_GPL(mlx5_dm_sw_icm_dealloc);
+5
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 876 876 goto err_eswitch_cleanup; 877 877 } 878 878 879 + dev->dm = mlx5_dm_create(dev); 880 + if (IS_ERR(dev->dm)) 881 + mlx5_core_warn(dev, "Failed to init device memory%d\n", err); 882 + 879 883 dev->tracer = mlx5_fw_tracer_create(dev); 880 884 dev->hv_vhca = mlx5_hv_vhca_create(dev); 881 885 ··· 914 910 { 915 911 mlx5_hv_vhca_destroy(dev->hv_vhca); 916 912 mlx5_fw_tracer_destroy(dev->tracer); 913 + mlx5_dm_cleanup(dev); 917 914 mlx5_fpga_cleanup(dev); 918 915 mlx5_eswitch_cleanup(dev->priv.eswitch); 919 916 mlx5_sriov_cleanup(dev);
+3
drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
··· 198 198 int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode); 199 199 int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode); 200 200 201 + struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev); 202 + void mlx5_dm_cleanup(struct mlx5_core_dev *dev); 203 + 201 204 #define MLX5_PPS_CAP(mdev) (MLX5_CAP_GEN((mdev), pps) && \ 202 205 MLX5_CAP_GEN((mdev), pps_modify) && \ 203 206 MLX5_CAP_MCAM_FEATURE((mdev), mtpps_fs) && \
+5 -3
drivers/net/ethernet/mellanox/mlx5/core/rdma.c
··· 14 14 { 15 15 struct mlx5_core_roce *roce = &dev->priv.roce; 16 16 17 - if (!roce->ft) 18 - return; 19 - 20 17 mlx5_del_flow_rules(roce->allow_rule); 21 18 mlx5_destroy_flow_group(roce->fg); 22 19 mlx5_destroy_flow_table(roce->ft); ··· 142 145 143 146 void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev) 144 147 { 148 + struct mlx5_core_roce *roce = &dev->priv.roce; 149 + 150 + if (!roce->ft) 151 + return; 152 + 145 153 mlx5_rdma_disable_roce_steering(dev); 146 154 mlx5_rdma_del_roce_addr(dev); 147 155 mlx5_nic_vport_disable_roce(dev);
+7
include/linux/mlx5/device.h
··· 1162 1162 #define MLX5_CAP_FLOWTABLE(mdev, cap) \ 1163 1163 MLX5_GET(flow_table_nic_cap, mdev->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap) 1164 1164 1165 + #define MLX5_CAP64_FLOWTABLE(mdev, cap) \ 1166 + MLX5_GET64(flow_table_nic_cap, (mdev)->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap) 1167 + 1165 1168 #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \ 1166 1169 MLX5_GET(flow_table_nic_cap, mdev->caps.hca_max[MLX5_CAP_FLOW_TABLE], cap) 1167 1170 ··· 1227 1224 #define MLX5_CAP_ESW(mdev, cap) \ 1228 1225 MLX5_GET(e_switch_cap, \ 1229 1226 mdev->caps.hca_cur[MLX5_CAP_ESWITCH], cap) 1227 + 1228 + #define MLX5_CAP64_ESW_FLOWTABLE(mdev, cap) \ 1229 + MLX5_GET64(flow_table_eswitch_cap, \ 1230 + (mdev)->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) 1230 1231 1231 1232 #define MLX5_CAP_ESW_MAX(mdev, cap) \ 1232 1233 MLX5_GET(e_switch_cap, \
+14
include/linux/mlx5/driver.h
··· 626 626 struct mlx5_sq_bfreg bfreg; 627 627 }; 628 628 629 + enum mlx5_sw_icm_type { 630 + MLX5_SW_ICM_TYPE_STEERING, 631 + MLX5_SW_ICM_TYPE_HEADER_MODIFY, 632 + }; 633 + 629 634 #define MLX5_MAX_RESERVED_GIDS 8 630 635 631 636 struct mlx5_rsvd_gids { ··· 662 657 struct mlx5_pps pps_info; 663 658 }; 664 659 660 + struct mlx5_dm; 665 661 struct mlx5_fw_tracer; 666 662 struct mlx5_vxlan; 667 663 struct mlx5_geneve; 668 664 struct mlx5_hv_vhca; 665 + 666 + #define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity)) 667 + #define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)) 669 668 670 669 struct mlx5_core_dev { 671 670 struct device *device; ··· 704 695 atomic_t num_qps; 705 696 u32 issi; 706 697 struct mlx5e_resources mlx5e_res; 698 + struct mlx5_dm *dm; 707 699 struct mlx5_vxlan *vxlan; 708 700 struct mlx5_geneve *geneve; 709 701 struct { ··· 1088 1078 size_t *offsets); 1089 1079 struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev); 1090 1080 void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up); 1081 + int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, 1082 + u64 length, u16 uid, phys_addr_t *addr, u32 *obj_id); 1083 + int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, 1084 + u64 length, u16 uid, phys_addr_t addr, u32 obj_id); 1091 1085 1092 1086 #ifdef CONFIG_MLX5_CORE_IPOIB 1093 1087 struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
+7 -1
include/linux/mlx5/eswitch.h
··· 60 60 struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw, 61 61 u16 vport_num); 62 62 void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type); 63 - u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw); 64 63 struct mlx5_flow_handle * 65 64 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, 66 65 u16 vport_num, u32 sqn); ··· 73 74 bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw); 74 75 u32 mlx5_eswitch_get_vport_metadata_for_match(const struct mlx5_eswitch *esw, 75 76 u16 vport_num); 77 + u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw); 76 78 #else /* CONFIG_MLX5_ESWITCH */ 79 + 80 + static inline u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw) 81 + { 82 + return MLX5_ESWITCH_NONE; 83 + } 84 + 77 85 static inline enum devlink_eswitch_encap_mode 78 86 mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev) 79 87 {
+198 -37
include/linux/mlx5/mlx5_ifc.h
··· 282 282 MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT = 0x940, 283 283 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT = 0x941, 284 284 MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT = 0x942, 285 + MLX5_CMD_OP_SYNC_STEERING = 0xb00, 285 286 MLX5_CMD_OP_FPGA_CREATE_QP = 0x960, 286 287 MLX5_CMD_OP_FPGA_MODIFY_QP = 0x961, 287 288 MLX5_CMD_OP_FPGA_QUERY_QP = 0x962, ··· 486 485 }; 487 486 488 487 struct mlx5_ifc_fte_match_set_misc_bits { 489 - u8 reserved_at_0[0x8]; 488 + u8 gre_c_present[0x1]; 489 + u8 reserved_auto1[0x1]; 490 + u8 gre_k_present[0x1]; 491 + u8 gre_s_present[0x1]; 492 + u8 source_vhca_port[0x4]; 490 493 u8 source_sqn[0x18]; 491 494 492 495 u8 source_eswitch_owner_vhca_id[0x10]; ··· 570 565 571 566 u8 metadata_reg_a[0x20]; 572 567 573 - u8 reserved_at_1a0[0x60]; 568 + u8 metadata_reg_b[0x20]; 569 + 570 + u8 reserved_at_1c0[0x40]; 574 571 }; 575 572 576 573 struct mlx5_ifc_fte_match_set_misc3_bits { 577 - u8 reserved_at_0[0x120]; 574 + u8 inner_tcp_seq_num[0x20]; 575 + 576 + u8 outer_tcp_seq_num[0x20]; 577 + 578 + u8 inner_tcp_ack_num[0x20]; 579 + 580 + u8 outer_tcp_ack_num[0x20]; 581 + 582 + u8 reserved_at_80[0x8]; 583 + u8 outer_vxlan_gpe_vni[0x18]; 584 + 585 + u8 outer_vxlan_gpe_next_protocol[0x8]; 586 + u8 outer_vxlan_gpe_flags[0x8]; 587 + u8 reserved_at_b0[0x10]; 588 + 589 + u8 icmp_header_data[0x20]; 590 + 591 + u8 icmpv6_header_data[0x20]; 592 + 593 + u8 icmp_type[0x8]; 594 + u8 icmp_code[0x8]; 595 + u8 icmpv6_type[0x8]; 596 + u8 icmpv6_code[0x8]; 597 + 578 598 u8 geneve_tlv_option_0_data[0x20]; 599 + 579 600 u8 reserved_at_140[0xc0]; 580 601 }; 581 602 ··· 697 666 698 667 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer; 699 668 700 - u8 reserved_at_e00[0x7200]; 669 + u8 reserved_at_e00[0x1200]; 670 + 671 + u8 sw_steering_nic_rx_action_drop_icm_address[0x40]; 672 + 673 + u8 sw_steering_nic_tx_action_drop_icm_address[0x40]; 674 + 675 + u8 sw_steering_nic_tx_action_allow_icm_address[0x40]; 676 + 677 + u8 reserved_at_20c0[0x5f40]; 701 678 }; 702 679 703 680 enum { ··· 737 698 738 699 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_esw_acl_egress; 739 700 740 - u8 reserved_at_800[0x7800]; 701 + u8 reserved_at_800[0x1000]; 702 + 703 + u8 sw_steering_fdb_action_drop_icm_address_rx[0x40]; 704 + 705 + u8 sw_steering_fdb_action_drop_icm_address_tx[0x40]; 706 + 707 + u8 sw_steering_uplink_icm_address_rx[0x40]; 708 + 709 + u8 sw_steering_uplink_icm_address_tx[0x40]; 710 + 711 + u8 reserved_at_1900[0x6700]; 741 712 }; 742 713 743 714 enum { ··· 896 847 u8 roce_address_table_size[0x10]; 897 848 898 849 u8 reserved_at_100[0x700]; 850 + }; 851 + 852 + struct mlx5_ifc_sync_steering_in_bits { 853 + u8 opcode[0x10]; 854 + u8 uid[0x10]; 855 + 856 + u8 reserved_at_20[0x10]; 857 + u8 op_mod[0x10]; 858 + 859 + u8 reserved_at_40[0xc0]; 860 + }; 861 + 862 + struct mlx5_ifc_sync_steering_out_bits { 863 + u8 status[0x8]; 864 + u8 reserved_at_8[0x18]; 865 + 866 + u8 syndrome[0x20]; 867 + 868 + u8 reserved_at_40[0x40]; 899 869 }; 900 870 901 871 struct mlx5_ifc_device_mem_cap_bits { ··· 1107 1039 MLX5_CAP_UMR_FENCE_STRONG = 0x0, 1108 1040 MLX5_CAP_UMR_FENCE_SMALL = 0x1, 1109 1041 MLX5_CAP_UMR_FENCE_NONE = 0x2, 1042 + }; 1043 + 1044 + enum { 1045 + MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED = 1 << 7, 1046 + MLX5_FLEX_PARSER_ICMP_V4_ENABLED = 1 << 8, 1047 + MLX5_FLEX_PARSER_ICMP_V6_ENABLED = 1 << 9, 1110 1048 }; 1111 1049 1112 1050 enum { ··· 1488 1414 1489 1415 u8 reserved_at_6c0[0x4]; 1490 1416 u8 flex_parser_id_geneve_tlv_option_0[0x4]; 1491 - u8 reserved_at_6c8[0x28]; 1417 + u8 flex_parser_id_icmp_dw1[0x4]; 1418 + u8 flex_parser_id_icmp_dw0[0x4]; 1419 + u8 flex_parser_id_icmpv6_dw1[0x4]; 1420 + u8 flex_parser_id_icmpv6_dw0[0x4]; 1421 + u8 flex_parser_id_outer_first_mpls_over_gre[0x4]; 1422 + u8 flex_parser_id_outer_first_mpls_over_udp_label[0x4]; 1423 + 1424 + u8 reserved_at_6e0[0x10]; 1492 1425 u8 sf_base_id[0x10]; 1493 1426 1494 1427 u8 reserved_at_700[0x80]; ··· 2733 2652 struct mlx5_ifc_debug_cap_bits debug_cap; 2734 2653 struct mlx5_ifc_fpga_cap_bits fpga_cap; 2735 2654 struct mlx5_ifc_tls_cap_bits tls_cap; 2655 + struct mlx5_ifc_device_mem_cap_bits device_mem_cap; 2736 2656 u8 reserved_at_0[0x8000]; 2737 2657 }; 2738 2658 ··· 3337 3255 u8 cvlan_pcp[0x3]; 3338 3256 u8 cvlan_id[0xc]; 3339 3257 3340 - u8 reserved_at_60[0x7a0]; 3258 + u8 reserved_at_60[0x720]; 3259 + 3260 + u8 sw_steering_vport_icm_address_rx[0x40]; 3261 + 3262 + u8 sw_steering_vport_icm_address_tx[0x40]; 3341 3263 }; 3342 3264 3343 3265 enum { ··· 5027 4941 u8 reserved_at_20[0x10]; 5028 4942 u8 op_mod[0x10]; 5029 4943 5030 - u8 reserved_at_40[0x40]; 4944 + u8 other_function[0x1]; 4945 + u8 reserved_at_41[0xf]; 4946 + u8 function_id[0x10]; 4947 + 4948 + u8 reserved_at_60[0x20]; 4949 + }; 4950 + 4951 + struct mlx5_ifc_other_hca_cap_bits { 4952 + u8 roce[0x1]; 4953 + u8 reserved_0[0x27f]; 4954 + }; 4955 + 4956 + struct mlx5_ifc_query_other_hca_cap_out_bits { 4957 + u8 status[0x8]; 4958 + u8 reserved_0[0x18]; 4959 + 4960 + u8 syndrome[0x20]; 4961 + 4962 + u8 reserved_1[0x40]; 4963 + 4964 + struct mlx5_ifc_other_hca_cap_bits other_capability; 4965 + }; 4966 + 4967 + struct mlx5_ifc_query_other_hca_cap_in_bits { 4968 + u8 opcode[0x10]; 4969 + u8 reserved_0[0x10]; 4970 + 4971 + u8 reserved_1[0x10]; 4972 + u8 op_mod[0x10]; 4973 + 4974 + u8 reserved_2[0x10]; 4975 + u8 function_id[0x10]; 4976 + 4977 + u8 reserved_3[0x20]; 4978 + }; 4979 + 4980 + struct mlx5_ifc_modify_other_hca_cap_out_bits { 4981 + u8 status[0x8]; 4982 + u8 reserved_0[0x18]; 4983 + 4984 + u8 syndrome[0x20]; 4985 + 4986 + u8 reserved_1[0x40]; 4987 + }; 4988 + 4989 + struct mlx5_ifc_modify_other_hca_cap_in_bits { 4990 + u8 opcode[0x10]; 4991 + u8 reserved_0[0x10]; 4992 + 4993 + u8 reserved_1[0x10]; 4994 + u8 op_mod[0x10]; 4995 + 4996 + u8 reserved_2[0x10]; 4997 + u8 function_id[0x10]; 4998 + u8 field_select[0x20]; 4999 + 5000 + struct mlx5_ifc_other_hca_cap_bits other_capability; 5001 + }; 5002 + 5003 + struct mlx5_ifc_flow_table_context_bits { 5004 + u8 reformat_en[0x1]; 5005 + u8 decap_en[0x1]; 5006 + u8 sw_owner[0x1]; 5007 + u8 termination_table[0x1]; 5008 + u8 table_miss_action[0x4]; 5009 + u8 level[0x8]; 5010 + u8 reserved_at_10[0x8]; 5011 + u8 log_size[0x8]; 5012 + 5013 + u8 reserved_at_20[0x8]; 5014 + u8 table_miss_id[0x18]; 5015 + 5016 + u8 reserved_at_40[0x8]; 5017 + u8 lag_master_next_table_id[0x18]; 5018 + 5019 + u8 reserved_at_60[0x60]; 5020 + 5021 + u8 sw_owner_icm_root_1[0x40]; 5022 + 5023 + u8 sw_owner_icm_root_0[0x40]; 5024 + 5031 5025 }; 5032 5026 5033 5027 struct mlx5_ifc_query_flow_table_out_bits { ··· 5118 4952 5119 4953 u8 reserved_at_40[0x80]; 5120 4954 5121 - u8 reserved_at_c0[0x8]; 5122 - u8 level[0x8]; 5123 - u8 reserved_at_d0[0x8]; 5124 - u8 log_size[0x8]; 5125 - 5126 - u8 reserved_at_e0[0x120]; 4955 + struct mlx5_ifc_flow_table_context_bits flow_table_context; 5127 4956 }; 5128 4957 5129 4958 struct mlx5_ifc_query_flow_table_in_bits { ··· 5388 5227 u8 reserved_at_60[0x20]; 5389 5228 }; 5390 5229 5391 - enum { 5230 + enum mlx5_reformat_ctx_type { 5392 5231 MLX5_REFORMAT_TYPE_L2_TO_VXLAN = 0x0, 5393 5232 MLX5_REFORMAT_TYPE_L2_TO_NVGRE = 0x1, 5394 5233 MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL = 0x2, ··· 5484 5323 MLX5_ACTION_IN_FIELD_OUT_DIPV4 = 0x16, 5485 5324 MLX5_ACTION_IN_FIELD_OUT_FIRST_VID = 0x17, 5486 5325 MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT = 0x47, 5326 + MLX5_ACTION_IN_FIELD_METADATA_REG_A = 0x49, 5327 + MLX5_ACTION_IN_FIELD_METADATA_REG_B = 0x50, 5487 5328 MLX5_ACTION_IN_FIELD_METADATA_REG_C_0 = 0x51, 5329 + MLX5_ACTION_IN_FIELD_METADATA_REG_C_1 = 0x52, 5330 + MLX5_ACTION_IN_FIELD_METADATA_REG_C_2 = 0x53, 5331 + MLX5_ACTION_IN_FIELD_METADATA_REG_C_3 = 0x54, 5332 + MLX5_ACTION_IN_FIELD_METADATA_REG_C_4 = 0x55, 5333 + MLX5_ACTION_IN_FIELD_METADATA_REG_C_5 = 0x56, 5334 + MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM = 0x59, 5335 + MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM = 0x5B, 5488 5336 }; 5489 5337 5490 5338 struct mlx5_ifc_alloc_modify_header_context_out_bits { ··· 7541 7371 u8 klm_pas_mtt[0][0x20]; 7542 7372 }; 7543 7373 7374 + enum { 7375 + MLX5_FLOW_TABLE_TYPE_NIC_RX = 0x0, 7376 + MLX5_FLOW_TABLE_TYPE_NIC_TX = 0x1, 7377 + MLX5_FLOW_TABLE_TYPE_ESW_EGRESS_ACL = 0x2, 7378 + MLX5_FLOW_TABLE_TYPE_ESW_INGRESS_ACL = 0x3, 7379 + MLX5_FLOW_TABLE_TYPE_FDB = 0X4, 7380 + MLX5_FLOW_TABLE_TYPE_SNIFFER_RX = 0X5, 7381 + MLX5_FLOW_TABLE_TYPE_SNIFFER_TX = 0X6, 7382 + }; 7383 + 7544 7384 struct mlx5_ifc_create_flow_table_out_bits { 7545 7385 u8 status[0x8]; 7546 - u8 reserved_at_8[0x18]; 7386 + u8 icm_address_63_40[0x18]; 7547 7387 7548 7388 u8 syndrome[0x20]; 7549 7389 7550 - u8 reserved_at_40[0x8]; 7390 + u8 icm_address_39_32[0x8]; 7551 7391 u8 table_id[0x18]; 7552 7392 7553 - u8 reserved_at_60[0x20]; 7554 - }; 7555 - 7556 - struct mlx5_ifc_flow_table_context_bits { 7557 - u8 reformat_en[0x1]; 7558 - u8 decap_en[0x1]; 7559 - u8 reserved_at_2[0x1]; 7560 - u8 termination_table[0x1]; 7561 - u8 table_miss_action[0x4]; 7562 - u8 level[0x8]; 7563 - u8 reserved_at_10[0x8]; 7564 - u8 log_size[0x8]; 7565 - 7566 - u8 reserved_at_20[0x8]; 7567 - u8 table_miss_id[0x18]; 7568 - 7569 - u8 reserved_at_40[0x8]; 7570 - u8 lag_master_next_table_id[0x18]; 7571 - 7572 - u8 reserved_at_60[0xe0]; 7393 + u8 icm_address_31_0[0x20]; 7573 7394 }; 7574 7395 7575 7396 struct mlx5_ifc_create_flow_table_in_bits {