Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'mlx5-next-2020-12-02' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux

Saeed Mahameed says:

====================
mlx5-next-2020-12-02

Low level mlx5 updates required by both netdev and rdma trees.

* tag 'mlx5-next-2020-12-02' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux:
net/mlx5: Treat host PF vport as other (non eswitch manager) vport
net/mlx5: Enable host PF HCA after eswitch is initialized
net/mlx5: Rename peer_pf to host_pf
net/mlx5: Make API mlx5_core_is_ecpf accept const pointer
net/mlx5: Export steering related functions
net/mlx5: Expose other function ifc bits
net/mlx5: Expose IP-in-IP TX and RX capability bits
net/mlx5: Update the hardware interface definition for vhca state
net/mlx5: Update the list of the PCI supported devices
net/mlx5: Avoid exposing driver internal command helpers
net/mlx5: Add ts_cqe_to_dest_cqn related bits
net/mlx5: Add misc4 to mlx5_ifc_fte_match_param_bits
net/mlx5: Check dr mask size against mlx5_match_param size
net/mlx5: Add sampler destination type
net/mlx5: Add sample offload hardware bits and structures
====================

Link: https://lore.kernel.org/r/20201203011010.213440-1-saeedm@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+250 -104
-3
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
··· 2142 2142 kvfree(cmd->stats); 2143 2143 return err; 2144 2144 } 2145 - EXPORT_SYMBOL(mlx5_cmd_init); 2146 2145 2147 2146 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev) 2148 2147 { ··· 2154 2155 dma_pool_destroy(cmd->pool); 2155 2156 kvfree(cmd->stats); 2156 2157 } 2157 - EXPORT_SYMBOL(mlx5_cmd_cleanup); 2158 2158 2159 2159 void mlx5_cmd_set_state(struct mlx5_core_dev *dev, 2160 2160 enum mlx5_cmdif_state cmdif_state) 2161 2161 { 2162 2162 dev->cmd.state = cmdif_state; 2163 2163 } 2164 - EXPORT_SYMBOL(mlx5_cmd_set_state);
+3
drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
··· 247 247 case MLX5_FLOW_DESTINATION_TYPE_TIR: 248 248 trace_seq_printf(p, "tir=%u\n", dst->tir_num); 249 249 break; 250 + case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER: 251 + trace_seq_printf(p, "sampler_id=%u\n", dst->sampler_id); 252 + break; 250 253 case MLX5_FLOW_DESTINATION_TYPE_COUNTER: 251 254 trace_seq_printf(p, "counter_id=%u\n", counter_id); 252 255 break;
+54 -22
drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
··· 8 8 return (ioread32be(&dev->iseg->initializing) >> MLX5_ECPU_BIT_NUM) & 1; 9 9 } 10 10 11 - static int mlx5_peer_pf_init(struct mlx5_core_dev *dev) 11 + static bool mlx5_ecpf_esw_admins_host_pf(const struct mlx5_core_dev *dev) 12 12 { 13 - u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {}; 14 - int err; 13 + /* In separate host mode, PF enables itself. 14 + * When ECPF is eswitch manager, eswitch enables host PF after 15 + * eswitch is setup. 16 + */ 17 + return mlx5_core_is_ecpf_esw_manager(dev); 18 + } 19 + 20 + int mlx5_cmd_host_pf_enable_hca(struct mlx5_core_dev *dev) 21 + { 22 + u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {}; 23 + u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {}; 15 24 16 25 MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA); 17 - err = mlx5_cmd_exec_in(dev, enable_hca, in); 26 + MLX5_SET(enable_hca_in, in, function_id, 0); 27 + MLX5_SET(enable_hca_in, in, embedded_cpu_function, 0); 28 + return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); 29 + } 30 + 31 + int mlx5_cmd_host_pf_disable_hca(struct mlx5_core_dev *dev) 32 + { 33 + u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {}; 34 + u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {}; 35 + 36 + MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA); 37 + MLX5_SET(disable_hca_in, in, function_id, 0); 38 + MLX5_SET(disable_hca_in, in, embedded_cpu_function, 0); 39 + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 40 + } 41 + 42 + static int mlx5_host_pf_init(struct mlx5_core_dev *dev) 43 + { 44 + int err; 45 + 46 + if (mlx5_ecpf_esw_admins_host_pf(dev)) 47 + return 0; 48 + 49 + /* ECPF shall enable HCA for host PF in the same way a PF 50 + * does this for its VFs when ECPF is not a eswitch manager. 51 + */ 52 + err = mlx5_cmd_host_pf_enable_hca(dev); 18 53 if (err) 19 - mlx5_core_err(dev, "Failed to enable peer PF HCA err(%d)\n", 20 - err); 54 + mlx5_core_err(dev, "Failed to enable external host PF HCA err(%d)\n", err); 21 55 22 56 return err; 23 57 } 24 58 25 - static void mlx5_peer_pf_cleanup(struct mlx5_core_dev *dev) 59 + static void mlx5_host_pf_cleanup(struct mlx5_core_dev *dev) 26 60 { 27 - u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {}; 28 61 int err; 29 62 30 - MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA); 31 - err = mlx5_cmd_exec_in(dev, disable_hca, in); 63 + if (mlx5_ecpf_esw_admins_host_pf(dev)) 64 + return; 65 + 66 + err = mlx5_cmd_host_pf_disable_hca(dev); 32 67 if (err) { 33 - mlx5_core_err(dev, "Failed to disable peer PF HCA err(%d)\n", 34 - err); 68 + mlx5_core_err(dev, "Failed to disable external host PF HCA err(%d)\n", err); 35 69 return; 36 70 } 37 - 38 - err = mlx5_wait_for_pages(dev, &dev->priv.peer_pf_pages); 39 - if (err) 40 - mlx5_core_warn(dev, "Timeout reclaiming peer PF pages err(%d)\n", 41 - err); 42 71 } 43 72 44 73 int mlx5_ec_init(struct mlx5_core_dev *dev) ··· 75 46 if (!mlx5_core_is_ecpf(dev)) 76 47 return 0; 77 48 78 - /* ECPF shall enable HCA for peer PF in the same way a PF 79 - * does this for its VFs. 80 - */ 81 - return mlx5_peer_pf_init(dev); 49 + return mlx5_host_pf_init(dev); 82 50 } 83 51 84 52 void mlx5_ec_cleanup(struct mlx5_core_dev *dev) 85 53 { 54 + int err; 55 + 86 56 if (!mlx5_core_is_ecpf(dev)) 87 57 return; 88 58 89 - mlx5_peer_pf_cleanup(dev); 59 + mlx5_host_pf_cleanup(dev); 60 + 61 + err = mlx5_wait_for_pages(dev, &dev->priv.host_pf_pages); 62 + if (err) 63 + mlx5_core_warn(dev, "Timeout reclaiming external host PF pages err(%d)\n", err); 90 64 }
+3
drivers/net/ethernet/mellanox/mlx5/core/ecpf.h
··· 17 17 int mlx5_ec_init(struct mlx5_core_dev *dev); 18 18 void mlx5_ec_cleanup(struct mlx5_core_dev *dev); 19 19 20 + int mlx5_cmd_host_pf_enable_hca(struct mlx5_core_dev *dev); 21 + int mlx5_cmd_host_pf_disable_hca(struct mlx5_core_dev *dev); 22 + 20 23 #else /* CONFIG_MLX5_ESWITCH */ 21 24 22 25 static inline bool
+4 -1
drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c
··· 8 8 struct mlx5_flow_table * 9 9 esw_acl_table_create(struct mlx5_eswitch *esw, u16 vport_num, int ns, int size) 10 10 { 11 + struct mlx5_flow_table_attr ft_attr = {}; 11 12 struct mlx5_core_dev *dev = esw->dev; 12 13 struct mlx5_flow_namespace *root_ns; 13 14 struct mlx5_flow_table *acl; ··· 34 33 return ERR_PTR(-EOPNOTSUPP); 35 34 } 36 35 37 - acl = mlx5_create_vport_flow_table(root_ns, 0, size, 0, vport_num); 36 + ft_attr.max_fte = size; 37 + ft_attr.flags = MLX5_FLOW_TABLE_OTHER_VPORT; 38 + acl = mlx5_create_vport_flow_table(root_ns, &ft_attr, vport_num); 38 39 if (IS_ERR(acl)) { 39 40 err = PTR_ERR(acl); 40 41 esw_warn(dev, "vport[%d] create %s ACL table, err(%d)\n", vport_num,
+28 -1
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
··· 1474 1474 return err; 1475 1475 } 1476 1476 1477 + static int host_pf_enable_hca(struct mlx5_core_dev *dev) 1478 + { 1479 + if (!mlx5_core_is_ecpf(dev)) 1480 + return 0; 1481 + 1482 + /* Once vport and representor are ready, take out the external host PF 1483 + * out of initializing state. Enabling HCA clears the iser->initializing 1484 + * bit and host PF driver loading can progress. 1485 + */ 1486 + return mlx5_cmd_host_pf_enable_hca(dev); 1487 + } 1488 + 1489 + static void host_pf_disable_hca(struct mlx5_core_dev *dev) 1490 + { 1491 + if (!mlx5_core_is_ecpf(dev)) 1492 + return; 1493 + 1494 + mlx5_cmd_host_pf_disable_hca(dev); 1495 + } 1496 + 1477 1497 /* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs 1478 1498 * whichever are present on the eswitch. 1479 1499 */ ··· 1507 1487 ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_PF, enabled_events); 1508 1488 if (ret) 1509 1489 return ret; 1490 + 1491 + /* Enable external host PF HCA */ 1492 + ret = host_pf_enable_hca(esw->dev); 1493 + if (ret) 1494 + goto pf_hca_err; 1510 1495 1511 1496 /* Enable ECPF vport */ 1512 1497 if (mlx5_ecpf_vport_exists(esw->dev)) { ··· 1530 1505 vf_err: 1531 1506 if (mlx5_ecpf_vport_exists(esw->dev)) 1532 1507 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF); 1533 - 1534 1508 ecpf_err: 1509 + host_pf_disable_hca(esw->dev); 1510 + pf_hca_err: 1535 1511 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF); 1536 1512 return ret; 1537 1513 } ··· 1547 1521 if (mlx5_ecpf_vport_exists(esw->dev)) 1548 1522 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF); 1549 1523 1524 + host_pf_disable_hca(esw->dev); 1550 1525 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF); 1551 1526 } 1552 1527
+27 -30
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
··· 172 172 MLX5_SET(set_flow_table_root_in, in, table_id, ft->id); 173 173 174 174 MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn); 175 - if (ft->vport) { 176 - MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport); 177 - MLX5_SET(set_flow_table_root_in, in, other_vport, 1); 178 - } 175 + MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport); 176 + MLX5_SET(set_flow_table_root_in, in, other_vport, 177 + !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT)); 179 178 180 179 return mlx5_cmd_exec_in(dev, set_flow_table_root, in); 181 180 } ··· 198 199 MLX5_SET(create_flow_table_in, in, table_type, ft->type); 199 200 MLX5_SET(create_flow_table_in, in, flow_table_context.level, ft->level); 200 201 MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, log_size); 201 - if (ft->vport) { 202 - MLX5_SET(create_flow_table_in, in, vport_number, ft->vport); 203 - MLX5_SET(create_flow_table_in, in, other_vport, 1); 204 - } 202 + MLX5_SET(create_flow_table_in, in, vport_number, ft->vport); 203 + MLX5_SET(create_flow_table_in, in, other_vport, 204 + !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT)); 205 205 206 206 MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en, 207 207 en_decap); ··· 250 252 MLX5_CMD_OP_DESTROY_FLOW_TABLE); 251 253 MLX5_SET(destroy_flow_table_in, in, table_type, ft->type); 252 254 MLX5_SET(destroy_flow_table_in, in, table_id, ft->id); 253 - if (ft->vport) { 254 - MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport); 255 - MLX5_SET(destroy_flow_table_in, in, other_vport, 1); 256 - } 255 + MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport); 256 + MLX5_SET(destroy_flow_table_in, in, other_vport, 257 + !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT)); 257 258 258 259 return mlx5_cmd_exec_in(dev, destroy_flow_table, in); 259 260 } ··· 280 283 flow_table_context.lag_master_next_table_id, 0); 281 284 } 282 285 } else { 283 - if (ft->vport) { 284 - MLX5_SET(modify_flow_table_in, in, vport_number, 285 - ft->vport); 286 - MLX5_SET(modify_flow_table_in, in, other_vport, 1); 287 - } 286 + MLX5_SET(modify_flow_table_in, in, vport_number, ft->vport); 287 + MLX5_SET(modify_flow_table_in, in, other_vport, 288 + !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT)); 288 289 MLX5_SET(modify_flow_table_in, in, modify_field_select, 289 290 MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID); 290 291 if (next_ft) { ··· 320 325 MLX5_SET(create_flow_group_in, in, other_vport, 1); 321 326 } 322 327 328 + MLX5_SET(create_flow_group_in, in, vport_number, ft->vport); 329 + MLX5_SET(create_flow_group_in, in, other_vport, 330 + !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT)); 323 331 err = mlx5_cmd_exec_inout(dev, create_flow_group, in, out); 324 332 if (!err) 325 333 fg->id = MLX5_GET(create_flow_group_out, out, ··· 342 344 MLX5_SET(destroy_flow_group_in, in, table_type, ft->type); 343 345 MLX5_SET(destroy_flow_group_in, in, table_id, ft->id); 344 346 MLX5_SET(destroy_flow_group_in, in, group_id, fg->id); 345 - if (ft->vport) { 346 - MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport); 347 - MLX5_SET(destroy_flow_group_in, in, other_vport, 1); 348 - } 349 - 347 + MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport); 348 + MLX5_SET(destroy_flow_group_in, in, other_vport, 349 + !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT)); 350 350 return mlx5_cmd_exec_in(dev, destroy_flow_group, in); 351 351 } 352 352 ··· 423 427 MLX5_SET(set_fte_in, in, ignore_flow_level, 424 428 !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL)); 425 429 426 - if (ft->vport) { 427 - MLX5_SET(set_fte_in, in, vport_number, ft->vport); 428 - MLX5_SET(set_fte_in, in, other_vport, 1); 429 - } 430 + MLX5_SET(set_fte_in, in, vport_number, ft->vport); 431 + MLX5_SET(set_fte_in, in, other_vport, 432 + !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT)); 430 433 431 434 in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context); 432 435 MLX5_SET(flow_context, in_flow_context, group_id, group_id); ··· 509 514 packet_reformat_id, 510 515 dst->dest_attr.vport.pkt_reformat->id); 511 516 } 517 + break; 518 + case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER: 519 + id = dst->dest_attr.sampler_id; 512 520 break; 513 521 default: 514 522 id = dst->dest_attr.tir_num; ··· 599 601 MLX5_SET(delete_fte_in, in, table_type, ft->type); 600 602 MLX5_SET(delete_fte_in, in, table_id, ft->id); 601 603 MLX5_SET(delete_fte_in, in, flow_index, fte->index); 602 - if (ft->vport) { 603 - MLX5_SET(delete_fte_in, in, vport_number, ft->vport); 604 - MLX5_SET(delete_fte_in, in, other_vport, 1); 605 - } 604 + MLX5_SET(delete_fte_in, in, vport_number, ft->vport); 605 + MLX5_SET(delete_fte_in, in, other_vport, 606 + !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT)); 606 607 607 608 return mlx5_cmd_exec_in(dev, delete_fte, in); 608 609 }
+7 -10
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
··· 1152 1152 { 1153 1153 return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, 0); 1154 1154 } 1155 + EXPORT_SYMBOL(mlx5_create_flow_table); 1155 1156 1156 - struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns, 1157 - int prio, int max_fte, 1158 - u32 level, u16 vport) 1157 + struct mlx5_flow_table * 1158 + mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns, 1159 + struct mlx5_flow_table_attr *ft_attr, u16 vport) 1159 1160 { 1160 - struct mlx5_flow_table_attr ft_attr = {}; 1161 - 1162 - ft_attr.max_fte = max_fte; 1163 - ft_attr.level = level; 1164 - ft_attr.prio = prio; 1165 - 1166 - return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_NORMAL, vport); 1161 + return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, vport); 1167 1162 } 1168 1163 1169 1164 struct mlx5_flow_table* ··· 1238 1243 1239 1244 return fg; 1240 1245 } 1246 + EXPORT_SYMBOL(mlx5_create_flow_group); 1241 1247 1242 1248 static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest) 1243 1249 { ··· 2142 2146 mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n", 2143 2147 fg->id); 2144 2148 } 2149 + EXPORT_SYMBOL(mlx5_destroy_flow_group); 2145 2150 2146 2151 struct mlx5_flow_namespace *mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev, 2147 2152 int n)
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
··· 194 194 u32 qpn; 195 195 }; 196 196 197 - #define MLX5_FTE_MATCH_PARAM_RESERVED reserved_at_a00 197 + #define MLX5_FTE_MATCH_PARAM_RESERVED reserved_at_c00 198 198 /* Calculate the fte_match_param length and without the reserved length. 199 199 * Make sure the reserved field is the last. 200 200 */
+10 -9
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 1126 1126 goto err_sriov; 1127 1127 } 1128 1128 1129 - err = mlx5_sriov_attach(dev); 1130 - if (err) { 1131 - mlx5_core_err(dev, "sriov init failed %d\n", err); 1132 - goto err_sriov; 1133 - } 1134 - 1135 1129 err = mlx5_ec_init(dev); 1136 1130 if (err) { 1137 1131 mlx5_core_err(dev, "Failed to init embedded CPU\n"); 1138 1132 goto err_ec; 1139 1133 } 1140 1134 1135 + err = mlx5_sriov_attach(dev); 1136 + if (err) { 1137 + mlx5_core_err(dev, "sriov init failed %d\n", err); 1138 + goto err_sriov; 1139 + } 1140 + 1141 1141 return 0; 1142 1142 1143 - err_ec: 1144 - mlx5_sriov_detach(dev); 1145 1143 err_sriov: 1144 + mlx5_ec_cleanup(dev); 1145 + err_ec: 1146 1146 mlx5_cleanup_fs(dev); 1147 1147 err_fs: 1148 1148 mlx5_accel_tls_cleanup(dev); ··· 1168 1168 1169 1169 static void mlx5_unload(struct mlx5_core_dev *dev) 1170 1170 { 1171 - mlx5_ec_cleanup(dev); 1172 1171 mlx5_sriov_detach(dev); 1172 + mlx5_ec_cleanup(dev); 1173 1173 mlx5_cleanup_fs(dev); 1174 1174 mlx5_accel_ipsec_cleanup(dev); 1175 1175 mlx5_accel_tls_cleanup(dev); ··· 1594 1594 { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */ 1595 1595 { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */ 1596 1596 { PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */ 1597 + { PCI_VDEVICE(MELLANOX, 0xa2dc) }, /* BlueField-3 integrated ConnectX-7 network controller */ 1597 1598 { 0, } 1598 1599 }; 1599 1600
+4
drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
··· 122 122 123 123 int mlx5_query_hca_caps(struct mlx5_core_dev *dev); 124 124 int mlx5_query_board_id(struct mlx5_core_dev *dev); 125 + int mlx5_cmd_init(struct mlx5_core_dev *dev); 126 + void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); 127 + void mlx5_cmd_set_state(struct mlx5_core_dev *dev, 128 + enum mlx5_cmdif_state cmdif_state); 125 129 int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id); 126 130 int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev); 127 131 int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev);
+6 -6
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
··· 374 374 if (func_id) 375 375 dev->priv.vfs_pages += npages; 376 376 else if (mlx5_core_is_ecpf(dev) && !ec_function) 377 - dev->priv.peer_pf_pages += npages; 377 + dev->priv.host_pf_pages += npages; 378 378 379 379 mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x, err %d\n", 380 380 npages, ec_function, func_id, err); ··· 416 416 if (func_id) 417 417 dev->priv.vfs_pages -= npages; 418 418 else if (mlx5_core_is_ecpf(dev) && !ec_function) 419 - dev->priv.peer_pf_pages -= npages; 419 + dev->priv.host_pf_pages -= npages; 420 420 421 421 mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x\n", 422 422 npages, ec_function, func_id); ··· 506 506 if (func_id) 507 507 dev->priv.vfs_pages -= num_claimed; 508 508 else if (mlx5_core_is_ecpf(dev) && !ec_function) 509 - dev->priv.peer_pf_pages -= num_claimed; 509 + dev->priv.host_pf_pages -= num_claimed; 510 510 511 511 out_free: 512 512 kvfree(out); ··· 661 661 WARN(dev->priv.vfs_pages, 662 662 "VFs FW pages counter is %d after reclaiming all pages\n", 663 663 dev->priv.vfs_pages); 664 - WARN(dev->priv.peer_pf_pages, 665 - "Peer PF FW pages counter is %d after reclaiming all pages\n", 666 - dev->priv.peer_pf_pages); 664 + WARN(dev->priv.host_pf_pages, 665 + "External host PF FW pages counter is %d after reclaiming all pages\n", 666 + dev->priv.host_pf_pages); 667 667 668 668 return 0; 669 669 }
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
··· 643 643 } 644 644 645 645 if (mask) { 646 - if (mask->match_sz > sizeof(struct mlx5dr_match_param)) { 646 + if (mask->match_sz > DR_SZ_MATCH_PARAM) { 647 647 mlx5dr_err(dmn, "Invalid match size attribute\n"); 648 648 return -EINVAL; 649 649 }
+1 -2
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
··· 874 874 u32 s_idx, e_idx; 875 875 876 876 if (!value_size || 877 - (value_size > sizeof(struct mlx5dr_match_param) || 878 - (value_size % sizeof(u32)))) { 877 + (value_size > DR_SZ_MATCH_PARAM || (value_size % sizeof(u32)))) { 879 878 mlx5dr_err(matcher->tbl->dmn, "Rule parameters length is incorrect\n"); 880 879 return false; 881 880 }
+1
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
··· 17 17 #define WIRE_PORT 0xFFFF 18 18 #define DR_STE_SVLAN 0x1 19 19 #define DR_STE_CVLAN 0x2 20 + #define DR_SZ_MATCH_PARAM (MLX5_ST_SZ_DW_MATCH_PARAM * 4) 20 21 21 22 #define mlx5dr_err(dmn, arg...) mlx5_core_err((dmn)->mdev, ##arg) 22 23 #define mlx5dr_info(dmn, arg...) mlx5_core_info((dmn)->mdev, ##arg)
+8
include/linux/mlx5/device.h
··· 346 346 MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd, 347 347 348 348 MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED = 0xe, 349 + MLX5_EVENT_TYPE_VHCA_STATE_CHANGE = 0xf, 349 350 350 351 MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c, 351 352 MLX5_EVENT_TYPE_DCT_KEY_VIOLATION = 0x1d, ··· 718 717 u8 sync_rst_state; 719 718 }; 720 719 720 + struct mlx5_eqe_vhca_state { 721 + __be16 ec_function; 722 + __be16 function_id; 723 + } __packed; 724 + 721 725 union ev_data { 722 726 __be32 raw[7]; 723 727 struct mlx5_eqe_cmd cmd; ··· 742 736 struct mlx5_eqe_temp_warning temp_warning; 743 737 struct mlx5_eqe_xrq_err xrq_err; 744 738 struct mlx5_eqe_sync_fw_update sync_fw_update; 739 + struct mlx5_eqe_vhca_state vhca_state; 745 740 } __packed; 746 741 747 742 struct mlx5_eqe { ··· 1083 1076 MLX5_MATCH_INNER_HEADERS = 1 << 2, 1084 1077 MLX5_MATCH_MISC_PARAMETERS_2 = 1 << 3, 1085 1078 MLX5_MATCH_MISC_PARAMETERS_3 = 1 << 4, 1079 + MLX5_MATCH_MISC_PARAMETERS_4 = 1 << 5, 1086 1080 }; 1087 1081 1088 1082 enum {
+2 -6
include/linux/mlx5/driver.h
··· 547 547 atomic_t reg_pages; 548 548 struct list_head free_list; 549 549 int vfs_pages; 550 - int peer_pf_pages; 550 + int host_pf_pages; 551 551 552 552 struct mlx5_core_health health; 553 553 ··· 888 888 CMD_ALLOWED_OPCODE_ALL, 889 889 }; 890 890 891 - int mlx5_cmd_init(struct mlx5_core_dev *dev); 892 - void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); 893 - void mlx5_cmd_set_state(struct mlx5_core_dev *dev, 894 - enum mlx5_cmdif_state cmdif_state); 895 891 void mlx5_cmd_use_events(struct mlx5_core_dev *dev); 896 892 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); 897 893 void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode); ··· 1133 1137 return dev->coredev_type == MLX5_COREDEV_VF; 1134 1138 } 1135 1139 1136 - static inline bool mlx5_core_is_ecpf(struct mlx5_core_dev *dev) 1140 + static inline bool mlx5_core_is_ecpf(const struct mlx5_core_dev *dev) 1137 1141 { 1138 1142 return dev->caps.embedded_cpu; 1139 1143 }
+3 -3
include/linux/mlx5/fs.h
··· 50 50 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP = BIT(1), 51 51 MLX5_FLOW_TABLE_TERMINATION = BIT(2), 52 52 MLX5_FLOW_TABLE_UNMANAGED = BIT(3), 53 + MLX5_FLOW_TABLE_OTHER_VPORT = BIT(4), 53 54 }; 54 55 55 56 #define LEFTOVERS_RULE_NUM 2 ··· 133 132 struct mlx5_pkt_reformat *pkt_reformat; 134 133 u8 flags; 135 134 } vport; 135 + u32 sampler_id; 136 136 }; 137 137 }; 138 138 ··· 175 173 176 174 struct mlx5_flow_table * 177 175 mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns, 178 - int prio, 179 - int num_flow_table_entries, 180 - u32 level, u16 vport); 176 + struct mlx5_flow_table_attr *ft_attr, u16 vport); 181 177 struct mlx5_flow_table *mlx5_create_lag_demux_flow_table( 182 178 struct mlx5_flow_namespace *ns, 183 179 int prio, u32 level);
+86 -8
include/linux/mlx5/mlx5_ifc.h
··· 299 299 MLX5_CMD_OP_CREATE_UMEM = 0xa08, 300 300 MLX5_CMD_OP_DESTROY_UMEM = 0xa0a, 301 301 MLX5_CMD_OP_SYNC_STEERING = 0xb00, 302 + MLX5_CMD_OP_QUERY_VHCA_STATE = 0xb0d, 303 + MLX5_CMD_OP_MODIFY_VHCA_STATE = 0xb0e, 302 304 MLX5_CMD_OP_MAX 303 305 }; 304 306 ··· 625 623 u8 reserved_at_140[0xc0]; 626 624 }; 627 625 626 + struct mlx5_ifc_fte_match_set_misc4_bits { 627 + u8 prog_sample_field_value_0[0x20]; 628 + 629 + u8 prog_sample_field_id_0[0x20]; 630 + 631 + u8 prog_sample_field_value_1[0x20]; 632 + 633 + u8 prog_sample_field_id_1[0x20]; 634 + 635 + u8 prog_sample_field_value_2[0x20]; 636 + 637 + u8 prog_sample_field_id_2[0x20]; 638 + 639 + u8 prog_sample_field_value_3[0x20]; 640 + 641 + u8 prog_sample_field_id_3[0x20]; 642 + 643 + u8 reserved_at_100[0x100]; 644 + }; 645 + 628 646 struct mlx5_ifc_cmd_pas_bits { 629 647 u8 pa_h[0x20]; 630 648 ··· 913 891 u8 tunnel_stateless_ipv4_over_vxlan[0x1]; 914 892 u8 tunnel_stateless_ip_over_ip[0x1]; 915 893 u8 insert_trailer[0x1]; 916 - u8 reserved_at_2b[0x5]; 894 + u8 reserved_at_2b[0x1]; 895 + u8 tunnel_stateless_ip_over_ip_rx[0x1]; 896 + u8 tunnel_stateless_ip_over_ip_tx[0x1]; 897 + u8 reserved_at_2e[0x2]; 917 898 u8 max_vxlan_udp_ports[0x8]; 918 899 u8 reserved_at_38[0x6]; 919 900 u8 max_geneve_opt_len[0x1]; ··· 1249 1224 #define MLX5_FC_BULK_NUM_FCS(fc_enum) (MLX5_FC_BULK_SIZE_FACTOR * (fc_enum)) 1250 1225 1251 1226 struct mlx5_ifc_cmd_hca_cap_bits { 1252 - u8 reserved_at_0[0x30]; 1227 + u8 reserved_at_0[0x1f]; 1228 + u8 vhca_resource_manager[0x1]; 1229 + 1230 + u8 reserved_at_20[0x3]; 1231 + u8 event_on_vhca_state_teardown_request[0x1]; 1232 + u8 event_on_vhca_state_in_use[0x1]; 1233 + u8 event_on_vhca_state_active[0x1]; 1234 + u8 event_on_vhca_state_allocated[0x1]; 1235 + u8 event_on_vhca_state_invalid[0x1]; 1236 + u8 reserved_at_28[0x8]; 1253 1237 u8 vhca_id[0x10]; 1254 1238 1255 1239 u8 reserved_at_40[0x40]; ··· 1275 1241 u8 ece_support[0x1]; 1276 1242 u8 reserved_at_a4[0x7]; 1277 1243 u8 log_max_srq[0x5]; 1278 - u8 reserved_at_b0[0x10]; 1244 + u8 reserved_at_b0[0x2]; 1245 + u8 ts_cqe_to_dest_cqn[0x1]; 1246 + u8 reserved_at_b3[0xd]; 1279 1247 1280 1248 u8 max_sgl_for_optimized_performance[0x8]; 1281 1249 u8 log_max_cq_sz[0x8]; ··· 1548 1512 u8 disable_local_lb_uc[0x1]; 1549 1513 u8 disable_local_lb_mc[0x1]; 1550 1514 u8 log_min_hairpin_wq_data_sz[0x5]; 1551 - u8 reserved_at_3e8[0x3]; 1515 + u8 reserved_at_3e8[0x2]; 1516 + u8 vhca_state[0x1]; 1552 1517 u8 log_max_vlan_list[0x5]; 1553 1518 u8 reserved_at_3f0[0x3]; 1554 1519 u8 log_max_current_mc_list[0x5]; ··· 1617 1580 u8 max_num_of_monitor_counters[0x10]; 1618 1581 u8 num_ppcnt_monitor_counters[0x10]; 1619 1582 1620 - u8 reserved_at_640[0x10]; 1583 + u8 max_num_sf[0x10]; 1621 1584 u8 num_q_monitor_counters[0x10]; 1622 1585 1623 1586 u8 reserved_at_660[0x20]; ··· 1653 1616 MLX5_FLOW_DESTINATION_TYPE_VPORT = 0x0, 1654 1617 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1, 1655 1618 MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2, 1619 + MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER = 0x6, 1656 1620 1657 1621 MLX5_FLOW_DESTINATION_TYPE_PORT = 0x99, 1658 1622 MLX5_FLOW_DESTINATION_TYPE_COUNTER = 0x100, ··· 1706 1668 1707 1669 struct mlx5_ifc_fte_match_set_misc3_bits misc_parameters_3; 1708 1670 1709 - u8 reserved_at_a00[0x600]; 1671 + struct mlx5_ifc_fte_match_set_misc4_bits misc_parameters_4; 1672 + 1673 + u8 reserved_at_c00[0x400]; 1710 1674 }; 1711 1675 1712 1676 enum { ··· 3329 3289 u8 reserved_at_80[0x10]; 3330 3290 u8 hairpin_peer_vhca[0x10]; 3331 3291 3332 - u8 reserved_at_a0[0x50]; 3292 + u8 reserved_at_a0[0x20]; 3333 3293 3294 + u8 reserved_at_c0[0x8]; 3295 + u8 ts_cqe_to_dest_cqn[0x18]; 3296 + 3297 + u8 reserved_at_e0[0x10]; 3334 3298 u8 packet_pacing_rate_limit_index[0x10]; 3335 3299 u8 tis_lst_sz[0x10]; 3336 3300 u8 reserved_at_110[0x10]; ··· 4248 4204 u8 reserved_at_20[0x10]; 4249 4205 u8 op_mod[0x10]; 4250 4206 4251 - u8 reserved_at_40[0x40]; 4207 + u8 other_function[0x1]; 4208 + u8 reserved_at_41[0xf]; 4209 + u8 function_id[0x10]; 4210 + 4211 + u8 reserved_at_60[0x20]; 4252 4212 4253 4213 union mlx5_ifc_hca_cap_union_bits capability; 4254 4214 }; ··· 5509 5461 MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2, 5510 5462 MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0x3, 5511 5463 MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_3 = 0x4, 5464 + MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_4 = 0x5, 5512 5465 }; 5513 5466 5514 5467 struct mlx5_ifc_query_flow_group_out_bits { ··· 10706 10657 enum { 10707 10658 MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = BIT(0xc), 10708 10659 MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC = BIT(0x13), 10660 + MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_SAMPLER = BIT(0x20), 10709 10661 }; 10710 10662 10711 10663 enum { 10712 10664 MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = 0xc, 10713 10665 MLX5_GENERAL_OBJECT_TYPES_IPSEC = 0x13, 10666 + MLX5_GENERAL_OBJECT_TYPES_SAMPLER = 0x20, 10714 10667 }; 10715 10668 10716 10669 enum { ··· 10785 10734 struct mlx5_ifc_create_encryption_key_in_bits { 10786 10735 struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr; 10787 10736 struct mlx5_ifc_encryption_key_obj_bits encryption_key_object; 10737 + }; 10738 + 10739 + struct mlx5_ifc_sampler_obj_bits { 10740 + u8 modify_field_select[0x40]; 10741 + 10742 + u8 table_type[0x8]; 10743 + u8 level[0x8]; 10744 + u8 reserved_at_50[0xf]; 10745 + u8 ignore_flow_level[0x1]; 10746 + 10747 + u8 sample_ratio[0x20]; 10748 + 10749 + u8 reserved_at_80[0x8]; 10750 + u8 sample_table_id[0x18]; 10751 + 10752 + u8 reserved_at_a0[0x8]; 10753 + u8 default_table_id[0x18]; 10754 + 10755 + u8 sw_steering_icm_address_rx[0x40]; 10756 + u8 sw_steering_icm_address_tx[0x40]; 10757 + 10758 + u8 reserved_at_140[0xa0]; 10759 + }; 10760 + 10761 + struct mlx5_ifc_create_sampler_obj_in_bits { 10762 + struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr; 10763 + struct mlx5_ifc_sampler_obj_bits sampler_object; 10788 10764 }; 10789 10765 10790 10766 enum {
+1 -1
include/uapi/rdma/mlx5_user_ioctl_cmds.h
··· 232 232 MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX = (1U << UVERBS_ID_NS_SHIFT), 233 233 }; 234 234 235 - #define MLX5_IB_DW_MATCH_PARAM 0x80 235 + #define MLX5_IB_DW_MATCH_PARAM 0x90 236 236 237 237 struct mlx5_ib_match_params { 238 238 __u32 match_params[MLX5_IB_DW_MATCH_PARAM];