Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'mlx5-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux

1) New generic devlink param "enable_roce", for downstream devlink
reload support

2) Do vport ACL configuration on per vport basis when
enabling/disabling a vport. This enables to have vports enabled/disabled
outside of eswitch config for future

3) Split the code for legacy vs offloads mode and make it clear

4) Tide up vport locking and workqueue usage

5) Fix metadata enablement for ECPF

6) Make explicit use of VF property to publish IB_DEVICE_VIRTUAL_FUNCTION

7) E-Switch and flow steering core low level support and refactoring for
netfilter flowtables offload

Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>

+765 -450
+21
Documentation/networking/device_drivers/mellanox/mlx5.rst
··· 154 154 values: 155 155 cmode runtime value smfs 156 156 157 + enable_roce: RoCE enablement state 158 + ---------------------------------- 159 + RoCE enablement state controls driver support for RoCE traffic. 160 + When RoCE is disabled, there is no gid table, only raw ethernet QPs are supported and traffic on the well known UDP RoCE port is handled as raw ethernet traffic. 161 + 162 + To change RoCE enablement state a user must change the driverinit cmode value and run devlink reload. 163 + 164 + User command examples: 165 + 166 + - Disable RoCE:: 167 + 168 + $ devlink dev param set pci/0000:06:00.0 name enable_roce value false cmode driverinit 169 + $ devlink dev reload pci/0000:06:00.0 170 + 171 + - Read RoCE enablement state:: 172 + 173 + $ devlink dev param show pci/0000:06:00.0 name enable_roce 174 + pci/0000:06:00.0: 175 + name enable_roce type generic 176 + values: 177 + cmode driverinit value true 157 178 158 179 Devlink health reporters 159 180 ========================
+1 -1
drivers/infiniband/hw/mlx5/ib_rep.c
··· 35 35 int vport_index; 36 36 37 37 if (rep->vport == MLX5_VPORT_UPLINK) 38 - profile = &uplink_rep_profile; 38 + profile = &raw_eth_profile; 39 39 else 40 40 return mlx5_ib_set_vport_rep(dev, rep); 41 41
+1 -1
drivers/infiniband/hw/mlx5/ib_rep.h
··· 10 10 #include "mlx5_ib.h" 11 11 12 12 #ifdef CONFIG_MLX5_ESWITCH 13 - extern const struct mlx5_ib_profile uplink_rep_profile; 13 + extern const struct mlx5_ib_profile raw_eth_profile; 14 14 15 15 u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw); 16 16 struct mlx5_ib_dev *mlx5_ib_get_rep_ibdev(struct mlx5_eswitch *esw,
+21 -20
drivers/infiniband/hw/mlx5/main.c
··· 1031 1031 if (MLX5_CAP_GEN(mdev, cd)) 1032 1032 props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL; 1033 1033 1034 - if (!mlx5_core_is_pf(mdev)) 1034 + if (mlx5_core_is_vf(mdev)) 1035 1035 props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION; 1036 1036 1037 1037 if (mlx5_ib_port_link_layer(ibdev, 1) == ··· 5145 5145 immutable->pkey_tbl_len = attr.pkey_tbl_len; 5146 5146 immutable->gid_tbl_len = attr.gid_tbl_len; 5147 5147 immutable->core_cap_flags = get_core_cap_flags(ibdev, &rep); 5148 - if ((ll == IB_LINK_LAYER_INFINIBAND) || MLX5_CAP_GEN(dev->mdev, roce)) 5149 - immutable->max_mad_size = IB_MGMT_MAD_SIZE; 5148 + immutable->max_mad_size = IB_MGMT_MAD_SIZE; 5150 5149 5151 5150 return 0; 5152 5151 } ··· 5248 5249 { 5249 5250 int err; 5250 5251 5251 - if (MLX5_CAP_GEN(dev->mdev, roce)) { 5252 - err = mlx5_nic_vport_enable_roce(dev->mdev); 5253 - if (err) 5254 - return err; 5255 - } 5252 + err = mlx5_nic_vport_enable_roce(dev->mdev); 5253 + if (err) 5254 + return err; 5256 5255 5257 5256 err = mlx5_eth_lag_init(dev); 5258 5257 if (err) ··· 5259 5262 return 0; 5260 5263 5261 5264 err_disable_roce: 5262 - if (MLX5_CAP_GEN(dev->mdev, roce)) 5263 - mlx5_nic_vport_disable_roce(dev->mdev); 5265 + mlx5_nic_vport_disable_roce(dev->mdev); 5264 5266 5265 5267 return err; 5266 5268 } ··· 5267 5271 static void mlx5_disable_eth(struct mlx5_ib_dev *dev) 5268 5272 { 5269 5273 mlx5_eth_lag_cleanup(dev); 5270 - if (MLX5_CAP_GEN(dev->mdev, roce)) 5271 - mlx5_nic_vport_disable_roce(dev->mdev); 5274 + mlx5_nic_vport_disable_roce(dev->mdev); 5272 5275 } 5273 5276 5274 5277 struct mlx5_ib_counter { ··· 6439 6444 .query_port = mlx5_ib_rep_query_port, 6440 6445 }; 6441 6446 6442 - static int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev *dev) 6447 + static int mlx5_ib_stage_raw_eth_non_default_cb(struct mlx5_ib_dev *dev) 6443 6448 { 6444 6449 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_rep_ops); 6445 6450 return 0; ··· 6479 6484 mlx5_remove_netdev_notifier(dev, port_num); 6480 6485 } 6481 6486 6482 - static int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev) 6487 + static int mlx5_ib_stage_raw_eth_roce_init(struct mlx5_ib_dev *dev) 6483 6488 { 6484 6489 struct mlx5_core_dev *mdev = dev->mdev; 6485 6490 enum rdma_link_layer ll; ··· 6495 6500 return err; 6496 6501 } 6497 6502 6498 - static void mlx5_ib_stage_rep_roce_cleanup(struct mlx5_ib_dev *dev) 6503 + static void mlx5_ib_stage_raw_eth_roce_cleanup(struct mlx5_ib_dev *dev) 6499 6504 { 6500 6505 mlx5_ib_stage_common_roce_cleanup(dev); 6501 6506 } ··· 6802 6807 mlx5_ib_stage_delay_drop_cleanup), 6803 6808 }; 6804 6809 6805 - const struct mlx5_ib_profile uplink_rep_profile = { 6810 + const struct mlx5_ib_profile raw_eth_profile = { 6806 6811 STAGE_CREATE(MLX5_IB_STAGE_INIT, 6807 6812 mlx5_ib_stage_init_init, 6808 6813 mlx5_ib_stage_init_cleanup), ··· 6813 6818 mlx5_ib_stage_caps_init, 6814 6819 NULL), 6815 6820 STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB, 6816 - mlx5_ib_stage_rep_non_default_cb, 6821 + mlx5_ib_stage_raw_eth_non_default_cb, 6817 6822 NULL), 6818 6823 STAGE_CREATE(MLX5_IB_STAGE_ROCE, 6819 - mlx5_ib_stage_rep_roce_init, 6820 - mlx5_ib_stage_rep_roce_cleanup), 6824 + mlx5_ib_stage_raw_eth_roce_init, 6825 + mlx5_ib_stage_raw_eth_roce_cleanup), 6821 6826 STAGE_CREATE(MLX5_IB_STAGE_SRQ, 6822 6827 mlx5_init_srq_table, 6823 6828 mlx5_cleanup_srq_table), ··· 6893 6898 6894 6899 static void *mlx5_ib_add(struct mlx5_core_dev *mdev) 6895 6900 { 6901 + const struct mlx5_ib_profile *profile; 6896 6902 enum rdma_link_layer ll; 6897 6903 struct mlx5_ib_dev *dev; 6898 6904 int port_type_cap; ··· 6929 6933 dev->mdev = mdev; 6930 6934 dev->num_ports = num_ports; 6931 6935 6932 - return __mlx5_ib_add(dev, &pf_profile); 6936 + if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_is_roce_enabled(mdev)) 6937 + profile = &raw_eth_profile; 6938 + else 6939 + profile = &pf_profile; 6940 + 6941 + return __mlx5_ib_add(dev, profile); 6933 6942 } 6934 6943 6935 6944 static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
+22
drivers/net/ethernet/mellanox/mlx5/core/devlink.c
··· 177 177 MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE, 178 178 }; 179 179 180 + static int mlx5_devlink_enable_roce_validate(struct devlink *devlink, u32 id, 181 + union devlink_param_value val, 182 + struct netlink_ext_ack *extack) 183 + { 184 + struct mlx5_core_dev *dev = devlink_priv(devlink); 185 + bool new_state = val.vbool; 186 + 187 + if (new_state && !MLX5_CAP_GEN(dev, roce)) { 188 + NL_SET_ERR_MSG_MOD(extack, "Device doesn't support RoCE"); 189 + return -EOPNOTSUPP; 190 + } 191 + 192 + return 0; 193 + } 194 + 180 195 static const struct devlink_param mlx5_devlink_params[] = { 181 196 DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE, 182 197 "flow_steering_mode", DEVLINK_PARAM_TYPE_STRING, 183 198 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 184 199 mlx5_devlink_fs_mode_get, mlx5_devlink_fs_mode_set, 185 200 mlx5_devlink_fs_mode_validate), 201 + DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), 202 + NULL, NULL, mlx5_devlink_enable_roce_validate), 186 203 }; 187 204 188 205 static void mlx5_devlink_set_params_init_values(struct devlink *devlink) ··· 213 196 strcpy(value.vstr, "smfs"); 214 197 devlink_param_driverinit_value_set(devlink, 215 198 MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE, 199 + value); 200 + 201 + value.vbool = MLX5_CAP_GEN(dev, roce); 202 + devlink_param_driverinit_value_set(devlink, 203 + DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE, 216 204 value); 217 205 } 218 206
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 1074 1074 memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr)); 1075 1075 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1076 1076 slow_attr->split_count = 0; 1077 - slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN; 1077 + slow_attr->dest_chain = FDB_TC_SLOW_PATH_CHAIN; 1078 1078 1079 1079 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr); 1080 1080 if (!IS_ERR(rule)) ··· 1091 1091 memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr)); 1092 1092 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1093 1093 slow_attr->split_count = 0; 1094 - slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN; 1094 + slow_attr->dest_chain = FDB_TC_SLOW_PATH_CHAIN; 1095 1095 mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr); 1096 1096 flow_flag_clear(flow, SLOW); 1097 1097 }
+348 -231
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
··· 111 111 } 112 112 113 113 /* E-Switch vport context HW commands */ 114 - static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport, 115 - void *in, int inlen) 114 + int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport, 115 + bool other_vport, 116 + void *in, int inlen) 116 117 { 117 118 u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0}; 118 119 119 120 MLX5_SET(modify_esw_vport_context_in, in, opcode, 120 121 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT); 121 122 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport); 122 - MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1); 123 + MLX5_SET(modify_esw_vport_context_in, in, other_vport, other_vport); 123 124 return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); 124 125 } 125 126 126 - int mlx5_eswitch_modify_esw_vport_context(struct mlx5_eswitch *esw, u16 vport, 127 - void *in, int inlen) 128 - { 129 - return modify_esw_vport_context_cmd(esw->dev, vport, in, inlen); 130 - } 131 - 132 - static int query_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport, 133 - void *out, int outlen) 127 + int mlx5_eswitch_query_esw_vport_context(struct mlx5_core_dev *dev, u16 vport, 128 + bool other_vport, 129 + void *out, int outlen) 134 130 { 135 131 u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {}; 136 132 137 133 MLX5_SET(query_esw_vport_context_in, in, opcode, 138 134 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT); 139 135 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport); 140 - MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1); 136 + MLX5_SET(modify_esw_vport_context_in, in, other_vport, other_vport); 141 137 return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); 142 - } 143 - 144 - int mlx5_eswitch_query_esw_vport_context(struct mlx5_eswitch *esw, u16 vport, 145 - void *out, int outlen) 146 - { 147 - return query_esw_vport_context_cmd(esw->dev, vport, out, outlen); 148 138 } 149 139 150 140 static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport, ··· 169 179 MLX5_SET(modify_esw_vport_context_in, in, 170 180 field_select.vport_cvlan_insert, 1); 171 181 172 - return modify_esw_vport_context_cmd(dev, vport, in, sizeof(in)); 182 + return mlx5_eswitch_modify_esw_vport_context(dev, vport, true, 183 + in, sizeof(in)); 173 184 } 174 185 175 186 /* E-Switch FDB */ ··· 443 452 return err; 444 453 } 445 454 455 + static void esw_destroy_legacy_table(struct mlx5_eswitch *esw) 456 + { 457 + esw_cleanup_vepa_rules(esw); 458 + esw_destroy_legacy_fdb_table(esw); 459 + esw_destroy_legacy_vepa_table(esw); 460 + } 461 + 446 462 #define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \ 447 463 MLX5_VPORT_MC_ADDR_CHANGE | \ 448 464 MLX5_VPORT_PROMISC_CHANGE) ··· 462 464 if (ret) 463 465 return ret; 464 466 465 - mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS); 466 - return 0; 467 - } 468 - 469 - static void esw_destroy_legacy_table(struct mlx5_eswitch *esw) 470 - { 471 - esw_cleanup_vepa_rules(esw); 472 - esw_destroy_legacy_fdb_table(esw); 473 - esw_destroy_legacy_vepa_table(esw); 467 + ret = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS); 468 + if (ret) 469 + esw_destroy_legacy_table(esw); 470 + return ret; 474 471 } 475 472 476 473 static void esw_legacy_disable(struct mlx5_eswitch *esw) ··· 494 501 /* Skip mlx5_mpfs_add_mac for eswitch_managers, 495 502 * it is already done by its netdev in mlx5e_execute_l2_action 496 503 */ 497 - if (esw->manager_vport == vport) 504 + if (mlx5_esw_is_manager_vport(esw, vport)) 498 505 goto fdb_add; 499 506 500 507 err = mlx5_mpfs_add_mac(esw->dev, mac); ··· 523 530 u16 vport = vaddr->vport; 524 531 int err = 0; 525 532 526 - /* Skip mlx5_mpfs_del_mac for eswitch managerss, 533 + /* Skip mlx5_mpfs_del_mac for eswitch managers, 527 534 * it is already done by its netdev in mlx5e_execute_l2_action 528 535 */ 529 - if (!vaddr->mpfs || esw->manager_vport == vport) 536 + if (!vaddr->mpfs || mlx5_esw_is_manager_vport(esw, vport)) 530 537 goto fdb_del; 531 538 532 539 err = mlx5_mpfs_del_mac(esw->dev, mac); ··· 1033 1040 void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw, 1034 1041 struct mlx5_vport *vport) 1035 1042 { 1036 - if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan)) 1043 + if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan)) { 1037 1044 mlx5_del_flow_rules(vport->egress.allowed_vlan); 1045 + vport->egress.allowed_vlan = NULL; 1046 + } 1038 1047 1039 - if (!IS_ERR_OR_NULL(vport->egress.drop_rule)) 1040 - mlx5_del_flow_rules(vport->egress.drop_rule); 1041 - 1042 - vport->egress.allowed_vlan = NULL; 1043 - vport->egress.drop_rule = NULL; 1048 + if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_rule)) { 1049 + mlx5_del_flow_rules(vport->egress.legacy.drop_rule); 1050 + vport->egress.legacy.drop_rule = NULL; 1051 + } 1044 1052 } 1045 1053 1046 1054 void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw, ··· 1061 1067 vport->egress.acl = NULL; 1062 1068 } 1063 1069 1064 - int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, 1065 - struct mlx5_vport *vport) 1070 + static int 1071 + esw_vport_create_legacy_ingress_acl_groups(struct mlx5_eswitch *esw, 1072 + struct mlx5_vport *vport) 1066 1073 { 1067 1074 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1068 1075 struct mlx5_core_dev *dev = esw->dev; 1069 - struct mlx5_flow_namespace *root_ns; 1070 - struct mlx5_flow_table *acl; 1071 1076 struct mlx5_flow_group *g; 1072 1077 void *match_criteria; 1073 1078 u32 *flow_group_in; 1074 - /* The ingress acl table contains 4 groups 1075 - * (2 active rules at the same time - 1076 - * 1 allow rule from one of the first 3 groups. 1077 - * 1 drop rule from the last group): 1078 - * 1)Allow untagged traffic with smac=original mac. 1079 - * 2)Allow untagged traffic. 1080 - * 3)Allow traffic with smac=original mac. 1081 - * 4)Drop all other traffic. 1082 - */ 1083 - int table_size = 4; 1084 - int err = 0; 1085 - 1086 - if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) 1087 - return -EOPNOTSUPP; 1088 - 1089 - if (!IS_ERR_OR_NULL(vport->ingress.acl)) 1090 - return 0; 1091 - 1092 - esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n", 1093 - vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size)); 1094 - 1095 - root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS, 1096 - mlx5_eswitch_vport_num_to_index(esw, vport->vport)); 1097 - if (!root_ns) { 1098 - esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n", vport->vport); 1099 - return -EOPNOTSUPP; 1100 - } 1079 + int err; 1101 1080 1102 1081 flow_group_in = kvzalloc(inlen, GFP_KERNEL); 1103 1082 if (!flow_group_in) 1104 1083 return -ENOMEM; 1105 - 1106 - acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport); 1107 - if (IS_ERR(acl)) { 1108 - err = PTR_ERR(acl); 1109 - esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n", 1110 - vport->vport, err); 1111 - goto out; 1112 - } 1113 - vport->ingress.acl = acl; 1114 1084 1115 1085 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); 1116 1086 ··· 1085 1127 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); 1086 1128 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); 1087 1129 1088 - g = mlx5_create_flow_group(acl, flow_group_in); 1130 + g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); 1089 1131 if (IS_ERR(g)) { 1090 1132 err = PTR_ERR(g); 1091 - esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n", 1133 + esw_warn(dev, "vport[%d] ingress create untagged spoofchk flow group, err(%d)\n", 1092 1134 vport->vport, err); 1093 - goto out; 1135 + goto spoof_err; 1094 1136 } 1095 - vport->ingress.allow_untagged_spoofchk_grp = g; 1137 + vport->ingress.legacy.allow_untagged_spoofchk_grp = g; 1096 1138 1097 1139 memset(flow_group_in, 0, inlen); 1098 1140 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); ··· 1100 1142 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1); 1101 1143 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1); 1102 1144 1103 - g = mlx5_create_flow_group(acl, flow_group_in); 1145 + g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); 1104 1146 if (IS_ERR(g)) { 1105 1147 err = PTR_ERR(g); 1106 - esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n", 1148 + esw_warn(dev, "vport[%d] ingress create untagged flow group, err(%d)\n", 1107 1149 vport->vport, err); 1108 - goto out; 1150 + goto untagged_err; 1109 1151 } 1110 - vport->ingress.allow_untagged_only_grp = g; 1152 + vport->ingress.legacy.allow_untagged_only_grp = g; 1111 1153 1112 1154 memset(flow_group_in, 0, inlen); 1113 1155 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); ··· 1116 1158 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2); 1117 1159 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2); 1118 1160 1119 - g = mlx5_create_flow_group(acl, flow_group_in); 1161 + g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); 1120 1162 if (IS_ERR(g)) { 1121 1163 err = PTR_ERR(g); 1122 - esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n", 1164 + esw_warn(dev, "vport[%d] ingress create spoofchk flow group, err(%d)\n", 1123 1165 vport->vport, err); 1124 - goto out; 1166 + goto allow_spoof_err; 1125 1167 } 1126 - vport->ingress.allow_spoofchk_only_grp = g; 1168 + vport->ingress.legacy.allow_spoofchk_only_grp = g; 1127 1169 1128 1170 memset(flow_group_in, 0, inlen); 1129 1171 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3); 1130 1172 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3); 1131 1173 1132 - g = mlx5_create_flow_group(acl, flow_group_in); 1174 + g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); 1133 1175 if (IS_ERR(g)) { 1134 1176 err = PTR_ERR(g); 1135 - esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n", 1177 + esw_warn(dev, "vport[%d] ingress create drop flow group, err(%d)\n", 1136 1178 vport->vport, err); 1137 - goto out; 1179 + goto drop_err; 1138 1180 } 1139 - vport->ingress.drop_grp = g; 1181 + vport->ingress.legacy.drop_grp = g; 1182 + kvfree(flow_group_in); 1183 + return 0; 1140 1184 1141 - out: 1142 - if (err) { 1143 - if (!IS_ERR_OR_NULL(vport->ingress.allow_spoofchk_only_grp)) 1144 - mlx5_destroy_flow_group( 1145 - vport->ingress.allow_spoofchk_only_grp); 1146 - if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_only_grp)) 1147 - mlx5_destroy_flow_group( 1148 - vport->ingress.allow_untagged_only_grp); 1149 - if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_spoofchk_grp)) 1150 - mlx5_destroy_flow_group( 1151 - vport->ingress.allow_untagged_spoofchk_grp); 1152 - if (!IS_ERR_OR_NULL(vport->ingress.acl)) 1153 - mlx5_destroy_flow_table(vport->ingress.acl); 1185 + drop_err: 1186 + if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_spoofchk_only_grp)) { 1187 + mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp); 1188 + vport->ingress.legacy.allow_spoofchk_only_grp = NULL; 1154 1189 } 1155 - 1190 + allow_spoof_err: 1191 + if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_only_grp)) { 1192 + mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp); 1193 + vport->ingress.legacy.allow_untagged_only_grp = NULL; 1194 + } 1195 + untagged_err: 1196 + if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_spoofchk_grp)) { 1197 + mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp); 1198 + vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL; 1199 + } 1200 + spoof_err: 1156 1201 kvfree(flow_group_in); 1157 1202 return err; 1203 + } 1204 + 1205 + int esw_vport_create_ingress_acl_table(struct mlx5_eswitch *esw, 1206 + struct mlx5_vport *vport, int table_size) 1207 + { 1208 + struct mlx5_core_dev *dev = esw->dev; 1209 + struct mlx5_flow_namespace *root_ns; 1210 + struct mlx5_flow_table *acl; 1211 + int vport_index; 1212 + int err; 1213 + 1214 + if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) 1215 + return -EOPNOTSUPP; 1216 + 1217 + esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n", 1218 + vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size)); 1219 + 1220 + vport_index = mlx5_eswitch_vport_num_to_index(esw, vport->vport); 1221 + root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS, 1222 + vport_index); 1223 + if (!root_ns) { 1224 + esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n", 1225 + vport->vport); 1226 + return -EOPNOTSUPP; 1227 + } 1228 + 1229 + acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport); 1230 + if (IS_ERR(acl)) { 1231 + err = PTR_ERR(acl); 1232 + esw_warn(dev, "vport[%d] ingress create flow Table, err(%d)\n", 1233 + vport->vport, err); 1234 + return err; 1235 + } 1236 + vport->ingress.acl = acl; 1237 + return 0; 1238 + } 1239 + 1240 + void esw_vport_destroy_ingress_acl_table(struct mlx5_vport *vport) 1241 + { 1242 + if (!vport->ingress.acl) 1243 + return; 1244 + 1245 + mlx5_destroy_flow_table(vport->ingress.acl); 1246 + vport->ingress.acl = NULL; 1158 1247 } 1159 1248 1160 1249 void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw, 1161 1250 struct mlx5_vport *vport) 1162 1251 { 1163 - if (!IS_ERR_OR_NULL(vport->ingress.drop_rule)) 1164 - mlx5_del_flow_rules(vport->ingress.drop_rule); 1252 + if (vport->ingress.legacy.drop_rule) { 1253 + mlx5_del_flow_rules(vport->ingress.legacy.drop_rule); 1254 + vport->ingress.legacy.drop_rule = NULL; 1255 + } 1165 1256 1166 - if (!IS_ERR_OR_NULL(vport->ingress.allow_rule)) 1257 + if (vport->ingress.allow_rule) { 1167 1258 mlx5_del_flow_rules(vport->ingress.allow_rule); 1168 - 1169 - vport->ingress.drop_rule = NULL; 1170 - vport->ingress.allow_rule = NULL; 1171 - 1172 - esw_vport_del_ingress_acl_modify_metadata(esw, vport); 1259 + vport->ingress.allow_rule = NULL; 1260 + } 1173 1261 } 1174 1262 1175 - void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw, 1176 - struct mlx5_vport *vport) 1263 + static void esw_vport_disable_legacy_ingress_acl(struct mlx5_eswitch *esw, 1264 + struct mlx5_vport *vport) 1177 1265 { 1178 - if (IS_ERR_OR_NULL(vport->ingress.acl)) 1266 + if (!vport->ingress.acl) 1179 1267 return; 1180 1268 1181 1269 esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport); 1182 1270 1183 1271 esw_vport_cleanup_ingress_rules(esw, vport); 1184 - mlx5_destroy_flow_group(vport->ingress.allow_spoofchk_only_grp); 1185 - mlx5_destroy_flow_group(vport->ingress.allow_untagged_only_grp); 1186 - mlx5_destroy_flow_group(vport->ingress.allow_untagged_spoofchk_grp); 1187 - mlx5_destroy_flow_group(vport->ingress.drop_grp); 1188 - mlx5_destroy_flow_table(vport->ingress.acl); 1189 - vport->ingress.acl = NULL; 1190 - vport->ingress.drop_grp = NULL; 1191 - vport->ingress.allow_spoofchk_only_grp = NULL; 1192 - vport->ingress.allow_untagged_only_grp = NULL; 1193 - vport->ingress.allow_untagged_spoofchk_grp = NULL; 1272 + if (vport->ingress.legacy.allow_spoofchk_only_grp) { 1273 + mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp); 1274 + vport->ingress.legacy.allow_spoofchk_only_grp = NULL; 1275 + } 1276 + if (vport->ingress.legacy.allow_untagged_only_grp) { 1277 + mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp); 1278 + vport->ingress.legacy.allow_untagged_only_grp = NULL; 1279 + } 1280 + if (vport->ingress.legacy.allow_untagged_spoofchk_grp) { 1281 + mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp); 1282 + vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL; 1283 + } 1284 + if (vport->ingress.legacy.drop_grp) { 1285 + mlx5_destroy_flow_group(vport->ingress.legacy.drop_grp); 1286 + vport->ingress.legacy.drop_grp = NULL; 1287 + } 1288 + esw_vport_destroy_ingress_acl_table(vport); 1194 1289 } 1195 1290 1196 1291 static int esw_vport_ingress_config(struct mlx5_eswitch *esw, 1197 1292 struct mlx5_vport *vport) 1198 1293 { 1199 - struct mlx5_fc *counter = vport->ingress.drop_counter; 1294 + struct mlx5_fc *counter = vport->ingress.legacy.drop_counter; 1200 1295 struct mlx5_flow_destination drop_ctr_dst = {0}; 1201 1296 struct mlx5_flow_destination *dst = NULL; 1202 1297 struct mlx5_flow_act flow_act = {0}; 1203 - struct mlx5_flow_spec *spec; 1298 + struct mlx5_flow_spec *spec = NULL; 1204 1299 int dest_num = 0; 1205 1300 int err = 0; 1206 1301 u8 *smac_v; 1207 1302 1303 + /* The ingress acl table contains 4 groups 1304 + * (2 active rules at the same time - 1305 + * 1 allow rule from one of the first 3 groups. 1306 + * 1 drop rule from the last group): 1307 + * 1)Allow untagged traffic with smac=original mac. 1308 + * 2)Allow untagged traffic. 1309 + * 3)Allow traffic with smac=original mac. 1310 + * 4)Drop all other traffic. 1311 + */ 1312 + int table_size = 4; 1313 + 1208 1314 esw_vport_cleanup_ingress_rules(esw, vport); 1209 1315 1210 1316 if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) { 1211 - esw_vport_disable_ingress_acl(esw, vport); 1317 + esw_vport_disable_legacy_ingress_acl(esw, vport); 1212 1318 return 0; 1213 1319 } 1214 1320 1215 - err = esw_vport_enable_ingress_acl(esw, vport); 1216 - if (err) { 1217 - mlx5_core_warn(esw->dev, 1218 - "failed to enable ingress acl (%d) on vport[%d]\n", 1219 - err, vport->vport); 1220 - return err; 1321 + if (!vport->ingress.acl) { 1322 + err = esw_vport_create_ingress_acl_table(esw, vport, table_size); 1323 + if (err) { 1324 + esw_warn(esw->dev, 1325 + "vport[%d] enable ingress acl err (%d)\n", 1326 + err, vport->vport); 1327 + return err; 1328 + } 1329 + 1330 + err = esw_vport_create_legacy_ingress_acl_groups(esw, vport); 1331 + if (err) 1332 + goto out; 1221 1333 } 1222 1334 1223 1335 esw_debug(esw->dev, ··· 1337 1309 dst = &drop_ctr_dst; 1338 1310 dest_num++; 1339 1311 } 1340 - vport->ingress.drop_rule = 1312 + vport->ingress.legacy.drop_rule = 1341 1313 mlx5_add_flow_rules(vport->ingress.acl, spec, 1342 1314 &flow_act, dst, dest_num); 1343 - if (IS_ERR(vport->ingress.drop_rule)) { 1344 - err = PTR_ERR(vport->ingress.drop_rule); 1315 + if (IS_ERR(vport->ingress.legacy.drop_rule)) { 1316 + err = PTR_ERR(vport->ingress.legacy.drop_rule); 1345 1317 esw_warn(esw->dev, 1346 1318 "vport[%d] configure ingress drop rule, err(%d)\n", 1347 1319 vport->vport, err); 1348 - vport->ingress.drop_rule = NULL; 1320 + vport->ingress.legacy.drop_rule = NULL; 1349 1321 goto out; 1350 1322 } 1323 + kvfree(spec); 1324 + return 0; 1351 1325 1352 1326 out: 1353 - if (err) 1354 - esw_vport_cleanup_ingress_rules(esw, vport); 1327 + esw_vport_disable_legacy_ingress_acl(esw, vport); 1328 + kvfree(spec); 1329 + return err; 1330 + } 1331 + 1332 + int mlx5_esw_create_vport_egress_acl_vlan(struct mlx5_eswitch *esw, 1333 + struct mlx5_vport *vport, 1334 + u16 vlan_id, u32 flow_action) 1335 + { 1336 + struct mlx5_flow_act flow_act = {}; 1337 + struct mlx5_flow_spec *spec; 1338 + int err = 0; 1339 + 1340 + if (vport->egress.allowed_vlan) 1341 + return -EEXIST; 1342 + 1343 + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1344 + if (!spec) 1345 + return -ENOMEM; 1346 + 1347 + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag); 1348 + MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag); 1349 + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid); 1350 + MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vlan_id); 1351 + 1352 + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; 1353 + flow_act.action = flow_action; 1354 + vport->egress.allowed_vlan = 1355 + mlx5_add_flow_rules(vport->egress.acl, spec, 1356 + &flow_act, NULL, 0); 1357 + if (IS_ERR(vport->egress.allowed_vlan)) { 1358 + err = PTR_ERR(vport->egress.allowed_vlan); 1359 + esw_warn(esw->dev, 1360 + "vport[%d] configure egress vlan rule failed, err(%d)\n", 1361 + vport->vport, err); 1362 + vport->egress.allowed_vlan = NULL; 1363 + } 1364 + 1355 1365 kvfree(spec); 1356 1366 return err; 1357 1367 } ··· 1397 1331 static int esw_vport_egress_config(struct mlx5_eswitch *esw, 1398 1332 struct mlx5_vport *vport) 1399 1333 { 1400 - struct mlx5_fc *counter = vport->egress.drop_counter; 1334 + struct mlx5_fc *counter = vport->egress.legacy.drop_counter; 1401 1335 struct mlx5_flow_destination drop_ctr_dst = {0}; 1402 1336 struct mlx5_flow_destination *dst = NULL; 1403 1337 struct mlx5_flow_act flow_act = {0}; ··· 1424 1358 "vport[%d] configure egress rules, vlan(%d) qos(%d)\n", 1425 1359 vport->vport, vport->info.vlan, vport->info.qos); 1426 1360 1427 - spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1428 - if (!spec) { 1429 - err = -ENOMEM; 1430 - goto out; 1431 - } 1432 - 1433 1361 /* Allowed vlan rule */ 1434 - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag); 1435 - MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag); 1436 - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid); 1437 - MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->info.vlan); 1438 - 1439 - spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; 1440 - flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW; 1441 - vport->egress.allowed_vlan = 1442 - mlx5_add_flow_rules(vport->egress.acl, spec, 1443 - &flow_act, NULL, 0); 1444 - if (IS_ERR(vport->egress.allowed_vlan)) { 1445 - err = PTR_ERR(vport->egress.allowed_vlan); 1446 - esw_warn(esw->dev, 1447 - "vport[%d] configure egress allowed vlan rule failed, err(%d)\n", 1448 - vport->vport, err); 1449 - vport->egress.allowed_vlan = NULL; 1450 - goto out; 1451 - } 1362 + err = mlx5_esw_create_vport_egress_acl_vlan(esw, vport, vport->info.vlan, 1363 + MLX5_FLOW_CONTEXT_ACTION_ALLOW); 1364 + if (err) 1365 + return err; 1452 1366 1453 1367 /* Drop others rule (star rule) */ 1454 - memset(spec, 0, sizeof(*spec)); 1368 + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1369 + if (!spec) 1370 + goto out; 1371 + 1455 1372 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; 1456 1373 1457 1374 /* Attach egress drop flow counter */ ··· 1445 1396 dst = &drop_ctr_dst; 1446 1397 dest_num++; 1447 1398 } 1448 - vport->egress.drop_rule = 1399 + vport->egress.legacy.drop_rule = 1449 1400 mlx5_add_flow_rules(vport->egress.acl, spec, 1450 1401 &flow_act, dst, dest_num); 1451 - if (IS_ERR(vport->egress.drop_rule)) { 1452 - err = PTR_ERR(vport->egress.drop_rule); 1402 + if (IS_ERR(vport->egress.legacy.drop_rule)) { 1403 + err = PTR_ERR(vport->egress.legacy.drop_rule); 1453 1404 esw_warn(esw->dev, 1454 1405 "vport[%d] configure egress drop rule failed, err(%d)\n", 1455 1406 vport->vport, err); 1456 - vport->egress.drop_rule = NULL; 1407 + vport->egress.legacy.drop_rule = NULL; 1457 1408 } 1458 1409 out: 1459 1410 kvfree(spec); ··· 1668 1619 u16 vport_num = vport->vport; 1669 1620 int flags; 1670 1621 1671 - if (esw->manager_vport == vport_num) 1622 + if (mlx5_esw_is_manager_vport(esw, vport_num)) 1672 1623 return; 1673 1624 1674 1625 mlx5_modify_vport_admin_state(esw->dev, ··· 1688 1639 SET_VLAN_STRIP | SET_VLAN_INSERT : 0; 1689 1640 modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, vport->info.qos, 1690 1641 flags); 1691 - 1692 - /* Only legacy mode needs ACLs */ 1693 - if (esw->mode == MLX5_ESWITCH_LEGACY) { 1694 - esw_vport_ingress_config(esw, vport); 1695 - esw_vport_egress_config(esw, vport); 1696 - } 1697 1642 } 1698 1643 1699 - static void esw_vport_create_drop_counters(struct mlx5_vport *vport) 1644 + static int esw_vport_create_legacy_acl_tables(struct mlx5_eswitch *esw, 1645 + struct mlx5_vport *vport) 1700 1646 { 1701 - struct mlx5_core_dev *dev = vport->dev; 1647 + int ret; 1702 1648 1703 - if (MLX5_CAP_ESW_INGRESS_ACL(dev, flow_counter)) { 1704 - vport->ingress.drop_counter = mlx5_fc_create(dev, false); 1705 - if (IS_ERR(vport->ingress.drop_counter)) { 1706 - esw_warn(dev, 1649 + /* Only non manager vports need ACL in legacy mode */ 1650 + if (mlx5_esw_is_manager_vport(esw, vport->vport)) 1651 + return 0; 1652 + 1653 + if (!mlx5_esw_is_manager_vport(esw, vport->vport) && 1654 + MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter)) { 1655 + vport->ingress.legacy.drop_counter = mlx5_fc_create(esw->dev, false); 1656 + if (IS_ERR(vport->ingress.legacy.drop_counter)) { 1657 + esw_warn(esw->dev, 1707 1658 "vport[%d] configure ingress drop rule counter failed\n", 1708 1659 vport->vport); 1709 - vport->ingress.drop_counter = NULL; 1660 + vport->ingress.legacy.drop_counter = NULL; 1710 1661 } 1711 1662 } 1712 1663 1713 - if (MLX5_CAP_ESW_EGRESS_ACL(dev, flow_counter)) { 1714 - vport->egress.drop_counter = mlx5_fc_create(dev, false); 1715 - if (IS_ERR(vport->egress.drop_counter)) { 1716 - esw_warn(dev, 1664 + ret = esw_vport_ingress_config(esw, vport); 1665 + if (ret) 1666 + goto ingress_err; 1667 + 1668 + if (!mlx5_esw_is_manager_vport(esw, vport->vport) && 1669 + MLX5_CAP_ESW_EGRESS_ACL(esw->dev, flow_counter)) { 1670 + vport->egress.legacy.drop_counter = mlx5_fc_create(esw->dev, false); 1671 + if (IS_ERR(vport->egress.legacy.drop_counter)) { 1672 + esw_warn(esw->dev, 1717 1673 "vport[%d] configure egress drop rule counter failed\n", 1718 1674 vport->vport); 1719 - vport->egress.drop_counter = NULL; 1675 + vport->egress.legacy.drop_counter = NULL; 1720 1676 } 1721 1677 } 1678 + 1679 + ret = esw_vport_egress_config(esw, vport); 1680 + if (ret) 1681 + goto egress_err; 1682 + 1683 + return 0; 1684 + 1685 + egress_err: 1686 + esw_vport_disable_legacy_ingress_acl(esw, vport); 1687 + mlx5_fc_destroy(esw->dev, vport->egress.legacy.drop_counter); 1688 + vport->egress.legacy.drop_counter = NULL; 1689 + 1690 + ingress_err: 1691 + mlx5_fc_destroy(esw->dev, vport->ingress.legacy.drop_counter); 1692 + vport->ingress.legacy.drop_counter = NULL; 1693 + return ret; 1722 1694 } 1723 1695 1724 - static void esw_vport_destroy_drop_counters(struct mlx5_vport *vport) 1696 + static int esw_vport_setup_acl(struct mlx5_eswitch *esw, 1697 + struct mlx5_vport *vport) 1725 1698 { 1726 - struct mlx5_core_dev *dev = vport->dev; 1727 - 1728 - if (vport->ingress.drop_counter) 1729 - mlx5_fc_destroy(dev, vport->ingress.drop_counter); 1730 - if (vport->egress.drop_counter) 1731 - mlx5_fc_destroy(dev, vport->egress.drop_counter); 1699 + if (esw->mode == MLX5_ESWITCH_LEGACY) 1700 + return esw_vport_create_legacy_acl_tables(esw, vport); 1701 + else 1702 + return esw_vport_create_offloads_acl_tables(esw, vport); 1732 1703 } 1733 1704 1734 - static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport, 1735 - enum mlx5_eswitch_vport_event enabled_events) 1705 + static void esw_vport_destroy_legacy_acl_tables(struct mlx5_eswitch *esw, 1706 + struct mlx5_vport *vport) 1707 + 1708 + { 1709 + if (mlx5_esw_is_manager_vport(esw, vport->vport)) 1710 + return; 1711 + 1712 + esw_vport_disable_egress_acl(esw, vport); 1713 + mlx5_fc_destroy(esw->dev, vport->egress.legacy.drop_counter); 1714 + vport->egress.legacy.drop_counter = NULL; 1715 + 1716 + esw_vport_disable_legacy_ingress_acl(esw, vport); 1717 + mlx5_fc_destroy(esw->dev, vport->ingress.legacy.drop_counter); 1718 + vport->ingress.legacy.drop_counter = NULL; 1719 + } 1720 + 1721 + static void esw_vport_cleanup_acl(struct mlx5_eswitch *esw, 1722 + struct mlx5_vport *vport) 1723 + { 1724 + if (esw->mode == MLX5_ESWITCH_LEGACY) 1725 + esw_vport_destroy_legacy_acl_tables(esw, vport); 1726 + else 1727 + esw_vport_destroy_offloads_acl_tables(esw, vport); 1728 + } 1729 + 1730 + static int esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport, 1731 + enum mlx5_eswitch_vport_event enabled_events) 1736 1732 { 1737 1733 u16 vport_num = vport->vport; 1734 + int ret; 1738 1735 1739 1736 mutex_lock(&esw->state_lock); 1740 1737 WARN_ON(vport->enabled); 1741 1738 1742 1739 esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num); 1743 1740 1744 - /* Create steering drop counters for ingress and egress ACLs */ 1745 - if (vport_num && esw->mode == MLX5_ESWITCH_LEGACY) 1746 - esw_vport_create_drop_counters(vport); 1747 - 1748 1741 /* Restore old vport configuration */ 1749 1742 esw_apply_vport_conf(esw, vport); 1743 + 1744 + ret = esw_vport_setup_acl(esw, vport); 1745 + if (ret) 1746 + goto done; 1750 1747 1751 1748 /* Attach vport to the eswitch rate limiter */ 1752 1749 if (esw_vport_enable_qos(esw, vport, vport->info.max_rate, ··· 1806 1711 /* Esw manager is trusted by default. Host PF (vport 0) is trusted as well 1807 1712 * in smartNIC as it's a vport group manager. 1808 1713 */ 1809 - if (esw->manager_vport == vport_num || 1714 + if (mlx5_esw_is_manager_vport(esw, vport_num) || 1810 1715 (!vport_num && mlx5_core_is_ecpf(esw->dev))) 1811 1716 vport->info.trusted = true; 1812 1717 ··· 1814 1719 1815 1720 esw->enabled_vports++; 1816 1721 esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num); 1722 + done: 1817 1723 mutex_unlock(&esw->state_lock); 1724 + return ret; 1818 1725 } 1819 1726 1820 1727 static void esw_disable_vport(struct mlx5_eswitch *esw, ··· 1824 1727 { 1825 1728 u16 vport_num = vport->vport; 1826 1729 1730 + mutex_lock(&esw->state_lock); 1827 1731 if (!vport->enabled) 1828 - return; 1732 + goto done; 1829 1733 1830 1734 esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num); 1831 1735 /* Mark this vport as disabled to discard new events */ 1832 1736 vport->enabled = false; 1833 1737 1834 - /* Wait for current already scheduled events to complete */ 1835 - flush_workqueue(esw->work_queue); 1836 1738 /* Disable events from this vport */ 1837 1739 arm_vport_context_events_cmd(esw->dev, vport->vport, 0); 1838 - mutex_lock(&esw->state_lock); 1839 1740 /* We don't assume VFs will cleanup after themselves. 1840 1741 * Calling vport change handler while vport is disabled will cleanup 1841 1742 * the vport resources. ··· 1841 1746 esw_vport_change_handle_locked(vport); 1842 1747 vport->enabled_events = 0; 1843 1748 esw_vport_disable_qos(esw, vport); 1844 - if (esw->manager_vport != vport_num && 1845 - esw->mode == MLX5_ESWITCH_LEGACY) { 1749 + 1750 + if (!mlx5_esw_is_manager_vport(esw, vport->vport) && 1751 + esw->mode == MLX5_ESWITCH_LEGACY) 1846 1752 mlx5_modify_vport_admin_state(esw->dev, 1847 1753 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, 1848 1754 vport_num, 1, 1849 1755 MLX5_VPORT_ADMIN_STATE_DOWN); 1850 - esw_vport_disable_egress_acl(esw, vport); 1851 - esw_vport_disable_ingress_acl(esw, vport); 1852 - esw_vport_destroy_drop_counters(vport); 1853 - } 1756 + 1757 + esw_vport_cleanup_acl(esw, vport); 1854 1758 esw->enabled_vports--; 1759 + 1760 + done: 1855 1761 mutex_unlock(&esw->state_lock); 1856 1762 } 1857 1763 ··· 1866 1770 1867 1771 vport_num = be16_to_cpu(eqe->data.vport_change.vport_num); 1868 1772 vport = mlx5_eswitch_get_vport(esw, vport_num); 1869 - if (IS_ERR(vport)) 1870 - return NOTIFY_OK; 1871 - 1872 - if (vport->enabled) 1773 + if (!IS_ERR(vport)) 1873 1774 queue_work(esw->work_queue, &vport->vport_change_handler); 1874 - 1875 1775 return NOTIFY_OK; 1876 1776 } 1877 1777 ··· 1938 1846 /* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs 1939 1847 * whichever are present on the eswitch. 1940 1848 */ 1941 - void 1849 + int 1942 1850 mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw, 1943 1851 enum mlx5_eswitch_vport_event enabled_events) 1944 1852 { 1945 1853 struct mlx5_vport *vport; 1854 + int num_vfs; 1855 + int ret; 1946 1856 int i; 1947 1857 1948 1858 /* Enable PF vport */ 1949 1859 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); 1950 - esw_enable_vport(esw, vport, enabled_events); 1860 + ret = esw_enable_vport(esw, vport, enabled_events); 1861 + if (ret) 1862 + return ret; 1951 1863 1952 - /* Enable ECPF vports */ 1864 + /* Enable ECPF vport */ 1953 1865 if (mlx5_ecpf_vport_exists(esw->dev)) { 1954 1866 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); 1955 - esw_enable_vport(esw, vport, enabled_events); 1867 + ret = esw_enable_vport(esw, vport, enabled_events); 1868 + if (ret) 1869 + goto ecpf_err; 1956 1870 } 1957 1871 1958 1872 /* Enable VF vports */ 1959 - mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) 1960 - esw_enable_vport(esw, vport, enabled_events); 1873 + mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) { 1874 + ret = esw_enable_vport(esw, vport, enabled_events); 1875 + if (ret) 1876 + goto vf_err; 1877 + } 1878 + return 0; 1879 + 1880 + vf_err: 1881 + num_vfs = i - 1; 1882 + mlx5_esw_for_each_vf_vport_reverse(esw, i, vport, num_vfs) 1883 + esw_disable_vport(esw, vport); 1884 + 1885 + if (mlx5_ecpf_vport_exists(esw->dev)) { 1886 + vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); 1887 + esw_disable_vport(esw, vport); 1888 + } 1889 + 1890 + ecpf_err: 1891 + vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); 1892 + esw_disable_vport(esw, vport); 1893 + return ret; 1961 1894 } 1962 1895 1963 1896 /* mlx5_eswitch_disable_pf_vf_vports() disables vports of PF, ECPF and VFs ··· 2602 2485 if (!vport->enabled || esw->mode != MLX5_ESWITCH_LEGACY) 2603 2486 return 0; 2604 2487 2605 - if (vport->egress.drop_counter) 2606 - mlx5_fc_query(dev, vport->egress.drop_counter, 2488 + if (vport->egress.legacy.drop_counter) 2489 + mlx5_fc_query(dev, vport->egress.legacy.drop_counter, 2607 2490 &stats->rx_dropped, &bytes); 2608 2491 2609 - if (vport->ingress.drop_counter) 2610 - mlx5_fc_query(dev, vport->ingress.drop_counter, 2492 + if (vport->ingress.legacy.drop_counter) 2493 + mlx5_fc_query(dev, vport->ingress.legacy.drop_counter, 2611 2494 &stats->tx_dropped, &bytes); 2612 2495 2613 2496 if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) &&
+56 -31
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
··· 43 43 #include <linux/mlx5/fs.h> 44 44 #include "lib/mpfs.h" 45 45 46 + #define FDB_TC_MAX_CHAIN 3 47 + #define FDB_FT_CHAIN (FDB_TC_MAX_CHAIN + 1) 48 + #define FDB_TC_SLOW_PATH_CHAIN (FDB_FT_CHAIN + 1) 49 + 50 + /* The index of the last real chain (FT) + 1 as chain zero is valid as well */ 51 + #define FDB_NUM_CHAINS (FDB_FT_CHAIN + 1) 52 + 53 + #define FDB_TC_MAX_PRIO 16 54 + #define FDB_TC_LEVELS_PER_PRIO 2 55 + 46 56 #ifdef CONFIG_MLX5_ESWITCH 47 57 48 58 #define MLX5_MAX_UC_PER_VPORT(dev) \ ··· 69 59 #define mlx5_esw_has_fwd_fdb(dev) \ 70 60 MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table) 71 61 72 - #define FDB_MAX_CHAIN 3 73 - #define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1) 74 - #define FDB_MAX_PRIO 16 75 - 76 62 struct vport_ingress { 77 63 struct mlx5_flow_table *acl; 78 - struct mlx5_flow_group *allow_untagged_spoofchk_grp; 79 - struct mlx5_flow_group *allow_spoofchk_only_grp; 80 - struct mlx5_flow_group *allow_untagged_only_grp; 81 - struct mlx5_flow_group *drop_grp; 82 - struct mlx5_modify_hdr *modify_metadata; 83 - struct mlx5_flow_handle *modify_metadata_rule; 84 - struct mlx5_flow_handle *allow_rule; 85 - struct mlx5_flow_handle *drop_rule; 86 - struct mlx5_fc *drop_counter; 64 + struct mlx5_flow_handle *allow_rule; 65 + struct { 66 + struct mlx5_flow_group *allow_spoofchk_only_grp; 67 + struct mlx5_flow_group *allow_untagged_spoofchk_grp; 68 + struct mlx5_flow_group *allow_untagged_only_grp; 69 + struct mlx5_flow_group *drop_grp; 70 + struct mlx5_flow_handle *drop_rule; 71 + struct mlx5_fc *drop_counter; 72 + } legacy; 73 + struct { 74 + struct mlx5_flow_group *metadata_grp; 75 + struct mlx5_modify_hdr *modify_metadata; 76 + struct mlx5_flow_handle *modify_metadata_rule; 77 + } offloads; 87 78 }; 88 79 89 80 struct vport_egress { ··· 92 81 struct mlx5_flow_group *allowed_vlans_grp; 93 82 struct mlx5_flow_group *drop_grp; 94 83 struct mlx5_flow_handle *allowed_vlan; 95 - struct mlx5_flow_handle *drop_rule; 96 - struct mlx5_fc *drop_counter; 84 + struct { 85 + struct mlx5_flow_handle *drop_rule; 86 + struct mlx5_fc *drop_counter; 87 + } legacy; 97 88 }; 98 89 99 90 struct mlx5_vport_drop_stats { ··· 152 139 153 140 extern const unsigned int ESW_POOLS[4]; 154 141 155 - #define PRIO_LEVELS 2 156 142 struct mlx5_eswitch_fdb { 157 143 union { 158 144 struct legacy_fdb { ··· 178 166 struct { 179 167 struct mlx5_flow_table *fdb; 180 168 u32 num_rules; 181 - } fdb_prio[FDB_MAX_CHAIN + 1][FDB_MAX_PRIO + 1][PRIO_LEVELS]; 169 + } fdb_prio[FDB_NUM_CHAINS][FDB_TC_MAX_PRIO + 1][FDB_TC_LEVELS_PER_PRIO]; 182 170 /* Protects fdb_prio table */ 183 171 struct mutex fdb_prio_lock; 184 172 ··· 229 217 struct mlx5_eswitch { 230 218 struct mlx5_core_dev *dev; 231 219 struct mlx5_nb nb; 232 - /* legacy data structures */ 233 220 struct mlx5_eswitch_fdb fdb_table; 221 + /* legacy data structures */ 234 222 struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE]; 235 223 struct esw_mc_addr mc_promisc; 236 224 /* end of legacy */ ··· 263 251 int esw_offloads_init_reps(struct mlx5_eswitch *esw); 264 252 void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw, 265 253 struct mlx5_vport *vport); 266 - int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, 267 - struct mlx5_vport *vport); 254 + int esw_vport_create_ingress_acl_table(struct mlx5_eswitch *esw, 255 + struct mlx5_vport *vport, 256 + int table_size); 257 + void esw_vport_destroy_ingress_acl_table(struct mlx5_vport *vport); 268 258 void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw, 269 259 struct mlx5_vport *vport); 270 260 int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, 271 261 struct mlx5_vport *vport); 272 262 void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw, 273 263 struct mlx5_vport *vport); 274 - void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw, 275 - struct mlx5_vport *vport); 276 - void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw, 277 - struct mlx5_vport *vport); 278 264 int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, 279 265 u32 rate_mbps); 280 266 ··· 302 292 struct ifla_vf_stats *vf_stats); 303 293 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule); 304 294 305 - int mlx5_eswitch_modify_esw_vport_context(struct mlx5_eswitch *esw, u16 vport, 295 + int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport, 296 + bool other_vport, 306 297 void *in, int inlen); 307 - int mlx5_eswitch_query_esw_vport_context(struct mlx5_eswitch *esw, u16 vport, 298 + int mlx5_eswitch_query_esw_vport_context(struct mlx5_core_dev *dev, u16 vport, 299 + bool other_vport, 308 300 void *out, int outlen); 309 301 310 302 struct mlx5_flow_spec; ··· 433 421 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, 434 422 u16 vport, u16 vlan, u8 qos, u8 set_flags); 435 423 424 + int mlx5_esw_create_vport_egress_acl_vlan(struct mlx5_eswitch *esw, 425 + struct mlx5_vport *vport, 426 + u16 vlan_id, u32 flow_action); 427 + 436 428 static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev, 437 429 u8 vlan_depth) 438 430 { ··· 473 457 { 474 458 return mlx5_core_is_ecpf_esw_manager(dev) ? 475 459 MLX5_VPORT_ECPF : MLX5_VPORT_PF; 460 + } 461 + 462 + static inline bool 463 + mlx5_esw_is_manager_vport(const struct mlx5_eswitch *esw, u16 vport_num) 464 + { 465 + return esw->manager_vport == vport_num; 476 466 } 477 467 478 468 static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev) ··· 615 593 void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs); 616 594 int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data); 617 595 618 - void 596 + int 619 597 mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw, 620 598 enum mlx5_eswitch_vport_event enabled_events); 621 599 void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw); 600 + 601 + int 602 + esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw, 603 + struct mlx5_vport *vport); 604 + void 605 + esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw, 606 + struct mlx5_vport *vport); 622 607 623 608 #else /* CONFIG_MLX5_ESWITCH */ 624 609 /* eswitch API stubs */ ··· 641 612 } 642 613 643 614 static inline void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs) {} 644 - 645 - #define FDB_MAX_CHAIN 1 646 - #define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1) 647 - #define FDB_MAX_PRIO 1 648 615 649 616 #endif /* CONFIG_MLX5_ESWITCH */ 650 617
+150 -127
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
··· 75 75 u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw) 76 76 { 77 77 if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED) 78 - return FDB_MAX_CHAIN; 78 + return FDB_TC_MAX_CHAIN; 79 79 80 80 return 0; 81 81 } ··· 83 83 u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw) 84 84 { 85 85 if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED) 86 - return FDB_MAX_PRIO; 86 + return FDB_TC_MAX_PRIO; 87 87 88 88 return 1; 89 89 } ··· 599 599 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) 600 600 return 0; 601 601 602 - err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport, 602 + err = mlx5_eswitch_query_esw_vport_context(esw->dev, 0, false, 603 603 out, sizeof(out)); 604 604 if (err) 605 605 return err; ··· 618 618 MLX5_SET(modify_esw_vport_context_in, in, 619 619 field_select.fdb_to_vport_reg_c_id, 1); 620 620 621 - return mlx5_eswitch_modify_esw_vport_context(esw, esw->manager_vport, 621 + return mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, 622 622 in, sizeof(in)); 623 623 } 624 624 ··· 927 927 int table_prio, l = 0; 928 928 u32 flags = 0; 929 929 930 - if (chain == FDB_SLOW_PATH_CHAIN) 930 + if (chain == FDB_TC_SLOW_PATH_CHAIN) 931 931 return esw->fdb_table.offloads.slow_fdb; 932 932 933 933 mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock); ··· 952 952 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT | 953 953 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP); 954 954 955 - table_prio = (chain * FDB_MAX_PRIO) + prio - 1; 955 + table_prio = prio - 1; 956 956 957 957 /* create earlier levels for correct fs_core lookup when 958 958 * connecting tables ··· 989 989 { 990 990 int l; 991 991 992 - if (chain == FDB_SLOW_PATH_CHAIN) 992 + if (chain == FDB_TC_SLOW_PATH_CHAIN) 993 993 return; 994 994 995 995 mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock); ··· 1777 1777 flow_act.vlan[0].vid = 0; 1778 1778 flow_act.vlan[0].prio = 0; 1779 1779 1780 - if (vport->ingress.modify_metadata_rule) { 1780 + if (vport->ingress.offloads.modify_metadata_rule) { 1781 1781 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 1782 - flow_act.modify_hdr = vport->ingress.modify_metadata; 1782 + flow_act.modify_hdr = vport->ingress.offloads.modify_metadata; 1783 1783 } 1784 1784 1785 1785 vport->ingress.allow_rule = ··· 1815 1815 MLX5_SET(set_action_in, action, data, 1816 1816 mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport)); 1817 1817 1818 - vport->ingress.modify_metadata = 1818 + vport->ingress.offloads.modify_metadata = 1819 1819 mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS, 1820 1820 1, action); 1821 - if (IS_ERR(vport->ingress.modify_metadata)) { 1822 - err = PTR_ERR(vport->ingress.modify_metadata); 1821 + if (IS_ERR(vport->ingress.offloads.modify_metadata)) { 1822 + err = PTR_ERR(vport->ingress.offloads.modify_metadata); 1823 1823 esw_warn(esw->dev, 1824 1824 "failed to alloc modify header for vport %d ingress acl (%d)\n", 1825 1825 vport->vport, err); ··· 1827 1827 } 1828 1828 1829 1829 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW; 1830 - flow_act.modify_hdr = vport->ingress.modify_metadata; 1831 - vport->ingress.modify_metadata_rule = mlx5_add_flow_rules(vport->ingress.acl, 1832 - &spec, &flow_act, NULL, 0); 1833 - if (IS_ERR(vport->ingress.modify_metadata_rule)) { 1834 - err = PTR_ERR(vport->ingress.modify_metadata_rule); 1830 + flow_act.modify_hdr = vport->ingress.offloads.modify_metadata; 1831 + vport->ingress.offloads.modify_metadata_rule = 1832 + mlx5_add_flow_rules(vport->ingress.acl, 1833 + &spec, &flow_act, NULL, 0); 1834 + if (IS_ERR(vport->ingress.offloads.modify_metadata_rule)) { 1835 + err = PTR_ERR(vport->ingress.offloads.modify_metadata_rule); 1835 1836 esw_warn(esw->dev, 1836 1837 "failed to add setting metadata rule for vport %d ingress acl, err(%d)\n", 1837 1838 vport->vport, err); 1838 - vport->ingress.modify_metadata_rule = NULL; 1839 + vport->ingress.offloads.modify_metadata_rule = NULL; 1839 1840 goto out; 1840 1841 } 1841 1842 1842 1843 out: 1843 1844 if (err) 1844 - mlx5_modify_header_dealloc(esw->dev, vport->ingress.modify_metadata); 1845 + mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata); 1845 1846 return err; 1846 1847 } 1847 1848 1848 - void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw, 1849 - struct mlx5_vport *vport) 1849 + static void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw, 1850 + struct mlx5_vport *vport) 1850 1851 { 1851 - if (vport->ingress.modify_metadata_rule) { 1852 - mlx5_del_flow_rules(vport->ingress.modify_metadata_rule); 1853 - mlx5_modify_header_dealloc(esw->dev, vport->ingress.modify_metadata); 1852 + if (vport->ingress.offloads.modify_metadata_rule) { 1853 + mlx5_del_flow_rules(vport->ingress.offloads.modify_metadata_rule); 1854 + mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata); 1854 1855 1855 - vport->ingress.modify_metadata_rule = NULL; 1856 + vport->ingress.offloads.modify_metadata_rule = NULL; 1856 1857 } 1857 1858 } 1858 1859 1859 - static int esw_vport_egress_prio_tag_config(struct mlx5_eswitch *esw, 1860 - struct mlx5_vport *vport) 1860 + static int esw_vport_create_ingress_acl_group(struct mlx5_eswitch *esw, 1861 + struct mlx5_vport *vport) 1861 1862 { 1862 - struct mlx5_flow_act flow_act = {0}; 1863 - struct mlx5_flow_spec *spec; 1864 - int err = 0; 1863 + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1864 + struct mlx5_flow_group *g; 1865 + u32 *flow_group_in; 1866 + int ret = 0; 1865 1867 1866 - if (!MLX5_CAP_GEN(esw->dev, prio_tag_required)) 1867 - return 0; 1868 + flow_group_in = kvzalloc(inlen, GFP_KERNEL); 1869 + if (!flow_group_in) 1870 + return -ENOMEM; 1868 1871 1869 - /* For prio tag mode, there is only 1 FTEs: 1870 - * 1) prio tag packets - pop the prio tag VLAN, allow 1871 - * Unmatched traffic is allowed by default 1872 - */ 1872 + memset(flow_group_in, 0, inlen); 1873 + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); 1874 + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); 1873 1875 1874 - esw_vport_cleanup_egress_rules(esw, vport); 1875 - 1876 - err = esw_vport_enable_egress_acl(esw, vport); 1877 - if (err) { 1878 - mlx5_core_warn(esw->dev, 1879 - "failed to enable egress acl (%d) on vport[%d]\n", 1880 - err, vport->vport); 1881 - return err; 1882 - } 1883 - 1884 - esw_debug(esw->dev, 1885 - "vport[%d] configure prio tag egress rules\n", vport->vport); 1886 - 1887 - spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1888 - if (!spec) { 1889 - err = -ENOMEM; 1890 - goto out_no_mem; 1891 - } 1892 - 1893 - /* prio tag vlan rule - pop it so VF receives untagged packets */ 1894 - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag); 1895 - MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag); 1896 - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid); 1897 - MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, 0); 1898 - 1899 - spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; 1900 - flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_POP | 1901 - MLX5_FLOW_CONTEXT_ACTION_ALLOW; 1902 - vport->egress.allowed_vlan = 1903 - mlx5_add_flow_rules(vport->egress.acl, spec, 1904 - &flow_act, NULL, 0); 1905 - if (IS_ERR(vport->egress.allowed_vlan)) { 1906 - err = PTR_ERR(vport->egress.allowed_vlan); 1876 + g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); 1877 + if (IS_ERR(g)) { 1878 + ret = PTR_ERR(g); 1907 1879 esw_warn(esw->dev, 1908 - "vport[%d] configure egress pop prio tag vlan rule failed, err(%d)\n", 1909 - vport->vport, err); 1910 - vport->egress.allowed_vlan = NULL; 1911 - goto out; 1880 + "Failed to create vport[%d] ingress metadata group, err(%d)\n", 1881 + vport->vport, ret); 1882 + goto grp_err; 1912 1883 } 1913 - 1914 - out: 1915 - kvfree(spec); 1916 - out_no_mem: 1917 - if (err) 1918 - esw_vport_cleanup_egress_rules(esw, vport); 1919 - return err; 1884 + vport->ingress.offloads.metadata_grp = g; 1885 + grp_err: 1886 + kvfree(flow_group_in); 1887 + return ret; 1920 1888 } 1921 1889 1922 - static int esw_vport_ingress_common_config(struct mlx5_eswitch *esw, 1923 - struct mlx5_vport *vport) 1890 + static void esw_vport_destroy_ingress_acl_group(struct mlx5_vport *vport) 1891 + { 1892 + if (vport->ingress.offloads.metadata_grp) { 1893 + mlx5_destroy_flow_group(vport->ingress.offloads.metadata_grp); 1894 + vport->ingress.offloads.metadata_grp = NULL; 1895 + } 1896 + } 1897 + 1898 + static int esw_vport_ingress_config(struct mlx5_eswitch *esw, 1899 + struct mlx5_vport *vport) 1924 1900 { 1925 1901 int err; 1926 1902 ··· 1905 1929 return 0; 1906 1930 1907 1931 esw_vport_cleanup_ingress_rules(esw, vport); 1908 - 1909 - err = esw_vport_enable_ingress_acl(esw, vport); 1932 + err = esw_vport_create_ingress_acl_table(esw, vport, 1); 1910 1933 if (err) { 1911 1934 esw_warn(esw->dev, 1912 1935 "failed to enable ingress acl (%d) on vport[%d]\n", ··· 1913 1938 return err; 1914 1939 } 1915 1940 1941 + err = esw_vport_create_ingress_acl_group(esw, vport); 1942 + if (err) 1943 + goto group_err; 1944 + 1916 1945 esw_debug(esw->dev, 1917 1946 "vport[%d] configure ingress rules\n", vport->vport); 1918 1947 1919 1948 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { 1920 1949 err = esw_vport_add_ingress_acl_modify_metadata(esw, vport); 1921 1950 if (err) 1922 - goto out; 1951 + goto metadata_err; 1923 1952 } 1924 1953 1925 1954 if (MLX5_CAP_GEN(esw->dev, prio_tag_required) && 1926 1955 mlx5_eswitch_is_vf_vport(esw, vport->vport)) { 1927 1956 err = esw_vport_ingress_prio_tag_config(esw, vport); 1928 1957 if (err) 1929 - goto out; 1958 + goto prio_tag_err; 1930 1959 } 1960 + return 0; 1931 1961 1932 - out: 1962 + prio_tag_err: 1963 + esw_vport_del_ingress_acl_modify_metadata(esw, vport); 1964 + metadata_err: 1965 + esw_vport_cleanup_ingress_rules(esw, vport); 1966 + esw_vport_destroy_ingress_acl_group(vport); 1967 + group_err: 1968 + esw_vport_destroy_ingress_acl_table(vport); 1969 + return err; 1970 + } 1971 + 1972 + static int esw_vport_egress_config(struct mlx5_eswitch *esw, 1973 + struct mlx5_vport *vport) 1974 + { 1975 + int err; 1976 + 1977 + if (!MLX5_CAP_GEN(esw->dev, prio_tag_required)) 1978 + return 0; 1979 + 1980 + esw_vport_cleanup_egress_rules(esw, vport); 1981 + 1982 + err = esw_vport_enable_egress_acl(esw, vport); 1933 1983 if (err) 1934 - esw_vport_disable_ingress_acl(esw, vport); 1984 + return err; 1985 + 1986 + /* For prio tag mode, there is only 1 FTEs: 1987 + * 1) prio tag packets - pop the prio tag VLAN, allow 1988 + * Unmatched traffic is allowed by default 1989 + */ 1990 + esw_debug(esw->dev, 1991 + "vport[%d] configure prio tag egress rules\n", vport->vport); 1992 + 1993 + /* prio tag vlan rule - pop it so VF receives untagged packets */ 1994 + err = mlx5_esw_create_vport_egress_acl_vlan(esw, vport, 0, 1995 + MLX5_FLOW_CONTEXT_ACTION_VLAN_POP | 1996 + MLX5_FLOW_CONTEXT_ACTION_ALLOW); 1997 + if (err) 1998 + esw_vport_disable_egress_acl(esw, vport); 1999 + 1935 2000 return err; 1936 2001 } 1937 2002 ··· 1995 1980 return true; 1996 1981 } 1997 1982 1998 - static int esw_create_offloads_acl_tables(struct mlx5_eswitch *esw) 1983 + int 1984 + esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw, 1985 + struct mlx5_vport *vport) 1986 + { 1987 + int err; 1988 + 1989 + err = esw_vport_ingress_config(esw, vport); 1990 + if (err) 1991 + return err; 1992 + 1993 + if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) { 1994 + err = esw_vport_egress_config(esw, vport); 1995 + if (err) { 1996 + esw_vport_del_ingress_acl_modify_metadata(esw, vport); 1997 + esw_vport_cleanup_ingress_rules(esw, vport); 1998 + esw_vport_destroy_ingress_acl_table(vport); 1999 + } 2000 + } 2001 + return err; 2002 + } 2003 + 2004 + void 2005 + esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw, 2006 + struct mlx5_vport *vport) 2007 + { 2008 + esw_vport_disable_egress_acl(esw, vport); 2009 + esw_vport_del_ingress_acl_modify_metadata(esw, vport); 2010 + esw_vport_cleanup_ingress_rules(esw, vport); 2011 + esw_vport_destroy_ingress_acl_group(vport); 2012 + esw_vport_destroy_ingress_acl_table(vport); 2013 + } 2014 + 2015 + static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw) 1999 2016 { 2000 2017 struct mlx5_vport *vport; 2001 - int i, j; 2002 2018 int err; 2003 2019 2004 2020 if (esw_check_vport_match_metadata_supported(esw)) 2005 2021 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA; 2006 2022 2007 - mlx5_esw_for_all_vports(esw, i, vport) { 2008 - err = esw_vport_ingress_common_config(esw, vport); 2009 - if (err) 2010 - goto err_ingress; 2011 - 2012 - if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) { 2013 - err = esw_vport_egress_prio_tag_config(esw, vport); 2014 - if (err) 2015 - goto err_egress; 2016 - } 2017 - } 2018 - 2019 - if (mlx5_eswitch_vport_match_metadata_enabled(esw)) 2020 - esw_info(esw->dev, "Use metadata reg_c as source vport to match\n"); 2021 - 2022 - return 0; 2023 - 2024 - err_egress: 2025 - esw_vport_disable_ingress_acl(esw, vport); 2026 - err_ingress: 2027 - for (j = MLX5_VPORT_PF; j < i; j++) { 2028 - vport = &esw->vports[j]; 2029 - esw_vport_disable_egress_acl(esw, vport); 2030 - esw_vport_disable_ingress_acl(esw, vport); 2031 - } 2032 - 2023 + vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK); 2024 + err = esw_vport_create_offloads_acl_tables(esw, vport); 2025 + if (err) 2026 + esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA; 2033 2027 return err; 2034 2028 } 2035 2029 2036 - static void esw_destroy_offloads_acl_tables(struct mlx5_eswitch *esw) 2030 + static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw) 2037 2031 { 2038 2032 struct mlx5_vport *vport; 2039 - int i; 2040 2033 2041 - mlx5_esw_for_all_vports(esw, i, vport) { 2042 - esw_vport_disable_egress_acl(esw, vport); 2043 - esw_vport_disable_ingress_acl(esw, vport); 2044 - } 2045 - 2034 + vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK); 2035 + esw_vport_destroy_offloads_acl_tables(esw, vport); 2046 2036 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA; 2047 2037 } 2048 2038 ··· 2065 2045 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb)); 2066 2046 mutex_init(&esw->fdb_table.offloads.fdb_prio_lock); 2067 2047 2068 - err = esw_create_offloads_acl_tables(esw); 2048 + err = esw_create_uplink_offloads_acl_tables(esw); 2069 2049 if (err) 2070 2050 return err; 2071 2051 ··· 2090 2070 esw_destroy_offloads_fdb_tables(esw); 2091 2071 2092 2072 create_fdb_err: 2093 - esw_destroy_offloads_acl_tables(esw); 2073 + esw_destroy_uplink_offloads_acl_tables(esw); 2094 2074 2095 2075 return err; 2096 2076 } ··· 2100 2080 esw_destroy_vport_rx_group(esw); 2101 2081 esw_destroy_offloads_table(esw); 2102 2082 esw_destroy_offloads_fdb_tables(esw); 2103 - esw_destroy_offloads_acl_tables(esw); 2083 + esw_destroy_uplink_offloads_acl_tables(esw); 2104 2084 } 2105 2085 2106 2086 static void ··· 2189 2169 if (err) 2190 2170 goto err_vport_metadata; 2191 2171 2192 - mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE); 2172 + err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE); 2173 + if (err) 2174 + goto err_vports; 2193 2175 2194 2176 err = esw_offloads_load_all_reps(esw); 2195 2177 if (err) ··· 2204 2182 2205 2183 err_reps: 2206 2184 mlx5_eswitch_disable_pf_vf_vports(esw); 2185 + err_vports: 2207 2186 esw_set_passing_vport_metadata(esw, false); 2208 2187 err_vport_metadata: 2209 2188 esw_offloads_steering_cleanup(esw);
+94 -35
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
··· 2400 2400 int acc_level_ns = acc_level; 2401 2401 2402 2402 prio->start_level = acc_level; 2403 - fs_for_each_ns(ns, prio) 2403 + fs_for_each_ns(ns, prio) { 2404 2404 /* This updates start_level and num_levels of ns's priority descendants */ 2405 2405 acc_level_ns = set_prio_attrs_in_ns(ns, acc_level); 2406 + 2407 + /* If this a prio with chains, and we can jump from one chain 2408 + * (namepsace) to another, so we accumulate the levels 2409 + */ 2410 + if (prio->node.type == FS_TYPE_PRIO_CHAINS) 2411 + acc_level = acc_level_ns; 2412 + } 2413 + 2406 2414 if (!prio->num_levels) 2407 2415 prio->num_levels = acc_level_ns - prio->start_level; 2408 2416 WARN_ON(prio->num_levels < acc_level_ns - prio->start_level); ··· 2599 2591 steering->rdma_rx_root_ns = NULL; 2600 2592 return err; 2601 2593 } 2602 - static int init_fdb_root_ns(struct mlx5_flow_steering *steering) 2594 + 2595 + /* FT and tc chains are stored in the same array so we can re-use the 2596 + * mlx5_get_fdb_sub_ns() and tc api for FT chains. 2597 + * When creating a new ns for each chain store it in the first available slot. 2598 + * Assume tc chains are created and stored first and only then the FT chain. 2599 + */ 2600 + static void store_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering, 2601 + struct mlx5_flow_namespace *ns) 2602 + { 2603 + int chain = 0; 2604 + 2605 + while (steering->fdb_sub_ns[chain]) 2606 + ++chain; 2607 + 2608 + steering->fdb_sub_ns[chain] = ns; 2609 + } 2610 + 2611 + static int create_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering, 2612 + struct fs_prio *maj_prio) 2603 2613 { 2604 2614 struct mlx5_flow_namespace *ns; 2605 - struct fs_prio *maj_prio; 2606 2615 struct fs_prio *min_prio; 2616 + int prio; 2617 + 2618 + ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF); 2619 + if (IS_ERR(ns)) 2620 + return PTR_ERR(ns); 2621 + 2622 + for (prio = 0; prio < FDB_TC_MAX_PRIO; prio++) { 2623 + min_prio = fs_create_prio(ns, prio, FDB_TC_LEVELS_PER_PRIO); 2624 + if (IS_ERR(min_prio)) 2625 + return PTR_ERR(min_prio); 2626 + } 2627 + 2628 + store_fdb_sub_ns_prio_chain(steering, ns); 2629 + 2630 + return 0; 2631 + } 2632 + 2633 + static int create_fdb_chains(struct mlx5_flow_steering *steering, 2634 + int fs_prio, 2635 + int chains) 2636 + { 2637 + struct fs_prio *maj_prio; 2607 2638 int levels; 2608 2639 int chain; 2609 - int prio; 2640 + int err; 2641 + 2642 + levels = FDB_TC_LEVELS_PER_PRIO * FDB_TC_MAX_PRIO * chains; 2643 + maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns, 2644 + fs_prio, 2645 + levels); 2646 + if (IS_ERR(maj_prio)) 2647 + return PTR_ERR(maj_prio); 2648 + 2649 + for (chain = 0; chain < chains; chain++) { 2650 + err = create_fdb_sub_ns_prio_chain(steering, maj_prio); 2651 + if (err) 2652 + return err; 2653 + } 2654 + 2655 + return 0; 2656 + } 2657 + 2658 + static int create_fdb_fast_path(struct mlx5_flow_steering *steering) 2659 + { 2660 + int err; 2661 + 2662 + steering->fdb_sub_ns = kcalloc(FDB_NUM_CHAINS, 2663 + sizeof(*steering->fdb_sub_ns), 2664 + GFP_KERNEL); 2665 + if (!steering->fdb_sub_ns) 2666 + return -ENOMEM; 2667 + 2668 + err = create_fdb_chains(steering, FDB_TC_OFFLOAD, FDB_TC_MAX_CHAIN + 1); 2669 + if (err) 2670 + return err; 2671 + 2672 + err = create_fdb_chains(steering, FDB_FT_OFFLOAD, 1); 2673 + if (err) 2674 + return err; 2675 + 2676 + return 0; 2677 + } 2678 + 2679 + static int init_fdb_root_ns(struct mlx5_flow_steering *steering) 2680 + { 2681 + struct fs_prio *maj_prio; 2610 2682 int err; 2611 2683 2612 2684 steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB); 2613 2685 if (!steering->fdb_root_ns) 2614 - return -ENOMEM; 2615 - 2616 - steering->fdb_sub_ns = kzalloc(sizeof(steering->fdb_sub_ns) * 2617 - (FDB_MAX_CHAIN + 1), GFP_KERNEL); 2618 - if (!steering->fdb_sub_ns) 2619 2686 return -ENOMEM; 2620 2687 2621 2688 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BYPASS_PATH, ··· 2699 2616 err = PTR_ERR(maj_prio); 2700 2617 goto out_err; 2701 2618 } 2702 - 2703 - levels = 2 * FDB_MAX_PRIO * (FDB_MAX_CHAIN + 1); 2704 - maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns, 2705 - FDB_FAST_PATH, 2706 - levels); 2707 - if (IS_ERR(maj_prio)) { 2708 - err = PTR_ERR(maj_prio); 2619 + err = create_fdb_fast_path(steering); 2620 + if (err) 2709 2621 goto out_err; 2710 - } 2711 - 2712 - for (chain = 0; chain <= FDB_MAX_CHAIN; chain++) { 2713 - ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF); 2714 - if (IS_ERR(ns)) { 2715 - err = PTR_ERR(ns); 2716 - goto out_err; 2717 - } 2718 - 2719 - for (prio = 0; prio < FDB_MAX_PRIO * (chain + 1); prio++) { 2720 - min_prio = fs_create_prio(ns, prio, 2); 2721 - if (IS_ERR(min_prio)) { 2722 - err = PTR_ERR(min_prio); 2723 - goto out_err; 2724 - } 2725 - } 2726 - 2727 - steering->fdb_sub_ns[chain] = ns; 2728 - } 2729 2622 2730 2623 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_SLOW_PATH, 1); 2731 2624 if (IS_ERR(maj_prio)) {
+16
include/linux/mlx5/driver.h
··· 1121 1121 return dev->coredev_type == MLX5_COREDEV_PF; 1122 1122 } 1123 1123 1124 + static inline bool mlx5_core_is_vf(const struct mlx5_core_dev *dev) 1125 + { 1126 + return dev->coredev_type == MLX5_COREDEV_VF; 1127 + } 1128 + 1124 1129 static inline bool mlx5_core_is_ecpf(struct mlx5_core_dev *dev) 1125 1130 { 1126 1131 return dev->caps.embedded_cpu; ··· 1190 1185 enum { 1191 1186 MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32, 1192 1187 }; 1188 + 1189 + static inline bool mlx5_is_roce_enabled(struct mlx5_core_dev *dev) 1190 + { 1191 + struct devlink *devlink = priv_to_devlink(dev); 1192 + union devlink_param_value val; 1193 + 1194 + devlink_param_driverinit_value_get(devlink, 1195 + DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE, 1196 + &val); 1197 + return val.vbool; 1198 + } 1193 1199 1194 1200 #endif /* MLX5_DRIVER_H */
+2 -1
include/linux/mlx5/fs.h
··· 80 80 81 81 enum { 82 82 FDB_BYPASS_PATH, 83 - FDB_FAST_PATH, 83 + FDB_TC_OFFLOAD, 84 + FDB_FT_OFFLOAD, 84 85 FDB_SLOW_PATH, 85 86 }; 86 87
+1 -1
include/linux/mlx5/mlx5_ifc.h
··· 1153 1153 u8 log_max_srq[0x5]; 1154 1154 u8 reserved_at_b0[0x10]; 1155 1155 1156 - u8 reserved_at_c0[0x8]; 1156 + u8 max_sgl_for_optimized_performance[0x8]; 1157 1157 u8 log_max_cq_sz[0x8]; 1158 1158 u8 reserved_at_d0[0xb]; 1159 1159 u8 log_max_cq[0x5];
+4
include/net/devlink.h
··· 402 402 DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN, 403 403 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, 404 404 DEVLINK_PARAM_GENERIC_ID_RESET_DEV_ON_DRV_PROBE, 405 + DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE, 405 406 406 407 /* add new param generic ids above here*/ 407 408 __DEVLINK_PARAM_GENERIC_ID_MAX, ··· 436 435 #define DEVLINK_PARAM_GENERIC_RESET_DEV_ON_DRV_PROBE_NAME \ 437 436 "reset_dev_on_drv_probe" 438 437 #define DEVLINK_PARAM_GENERIC_RESET_DEV_ON_DRV_PROBE_TYPE DEVLINK_PARAM_TYPE_U8 438 + 439 + #define DEVLINK_PARAM_GENERIC_ENABLE_ROCE_NAME "enable_roce" 440 + #define DEVLINK_PARAM_GENERIC_ENABLE_ROCE_TYPE DEVLINK_PARAM_TYPE_BOOL 439 441 440 442 #define DEVLINK_PARAM_GENERIC(_id, _cmodes, _get, _set, _validate) \ 441 443 { \
+5
net/core/devlink.c
··· 3006 3006 .name = DEVLINK_PARAM_GENERIC_RESET_DEV_ON_DRV_PROBE_NAME, 3007 3007 .type = DEVLINK_PARAM_GENERIC_RESET_DEV_ON_DRV_PROBE_TYPE, 3008 3008 }, 3009 + { 3010 + .id = DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE, 3011 + .name = DEVLINK_PARAM_GENERIC_ENABLE_ROCE_NAME, 3012 + .type = DEVLINK_PARAM_GENERIC_ENABLE_ROCE_TYPE, 3013 + }, 3009 3014 }; 3010 3015 3011 3016 static int devlink_param_generic_verify(const struct devlink_param *param)