Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net/mlx5e: Support IPsec packet offload for RX in switchdev mode

As decryption must be done first, add new prio for IPsec offload in
FDB, and put it just lower than BYPASS prio and higher than TC prio.
Three levels are added for RX. The first one is for ip xfrm policy. SA
table is created in the second level for ip xfrm state. The status
table is created in the last to check the decryption result. If
success, packets continue with the next process, or dropped otherwise.
For now, the set of reg c1 is removed for swtichdev mode, and the
datapath process will be added in the next patch.

Signed-off-by: Jianbo Liu <jianbol@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Link: https://lore.kernel.org/r/c91063554cf643fb50b99cf093e8a9bf11729de5.1690802064.git.leon@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

authored by

Jianbo Liu and committed by
Jakub Kicinski
1762f132 6e125265

+313 -47
+4
drivers/net/ethernet/mellanox/mlx5/core/Makefile
··· 75 75 esw/acl/egress_lgcy.o esw/acl/egress_ofld.o \ 76 76 esw/acl/ingress_lgcy.o esw/acl/ingress_ofld.o 77 77 78 + ifneq ($(CONFIG_MLX5_EN_IPSEC),) 79 + mlx5_core-$(CONFIG_MLX5_ESWITCH) += esw/ipsec_fs.o 80 + endif 81 + 78 82 mlx5_core-$(CONFIG_MLX5_BRIDGE) += esw/bridge.o esw/bridge_mcast.o esw/bridge_debugfs.o \ 79 83 en/rep/bridge.o 80 84
+33 -8
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
··· 143 143 atomic64_t ipsec_tx_drop_trailer; 144 144 }; 145 145 146 - struct mlx5e_ipsec_rx; 146 + struct mlx5e_ipsec_fc; 147 147 struct mlx5e_ipsec_tx; 148 148 149 149 struct mlx5e_ipsec_work { ··· 180 180 enum mlx5_flow_namespace_type chains_ns; 181 181 }; 182 182 183 + struct mlx5e_ipsec_ft { 184 + struct mutex mutex; /* Protect changes to this struct */ 185 + struct mlx5_flow_table *pol; 186 + struct mlx5_flow_table *sa; 187 + struct mlx5_flow_table *status; 188 + u32 refcnt; 189 + }; 190 + 191 + struct mlx5e_ipsec_rule { 192 + struct mlx5_flow_handle *rule; 193 + struct mlx5_modify_hdr *modify_hdr; 194 + struct mlx5_pkt_reformat *pkt_reformat; 195 + struct mlx5_fc *fc; 196 + }; 197 + 198 + struct mlx5e_ipsec_miss { 199 + struct mlx5_flow_group *group; 200 + struct mlx5_flow_handle *rule; 201 + }; 202 + 203 + struct mlx5e_ipsec_rx { 204 + struct mlx5e_ipsec_ft ft; 205 + struct mlx5e_ipsec_miss pol; 206 + struct mlx5e_ipsec_miss sa; 207 + struct mlx5e_ipsec_rule status; 208 + struct mlx5e_ipsec_miss status_drop; 209 + struct mlx5_fc *status_drop_cnt; 210 + struct mlx5e_ipsec_fc *fc; 211 + struct mlx5_fs_chains *chains; 212 + u8 allow_tunnel_mode : 1; 213 + }; 214 + 183 215 struct mlx5e_ipsec { 184 216 struct mlx5_core_dev *mdev; 185 217 struct xarray sadb; ··· 235 203 u32 esn; 236 204 u32 esn_msb; 237 205 u8 overlap: 1; 238 - }; 239 - 240 - struct mlx5e_ipsec_rule { 241 - struct mlx5_flow_handle *rule; 242 - struct mlx5_modify_hdr *modify_hdr; 243 - struct mlx5_pkt_reformat *pkt_reformat; 244 - struct mlx5_fc *fc; 245 206 }; 246 207 247 208 struct mlx5e_ipsec_limits {
+43 -37
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
··· 9 9 #include "fs_core.h" 10 10 #include "lib/ipsec_fs_roce.h" 11 11 #include "lib/fs_chains.h" 12 + #include "esw/ipsec_fs.h" 12 13 13 14 #define NUM_IPSEC_FTE BIT(15) 14 15 #define MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_SIZE 16 ··· 18 17 struct mlx5e_ipsec_fc { 19 18 struct mlx5_fc *cnt; 20 19 struct mlx5_fc *drop; 21 - }; 22 - 23 - struct mlx5e_ipsec_ft { 24 - struct mutex mutex; /* Protect changes to this struct */ 25 - struct mlx5_flow_table *pol; 26 - struct mlx5_flow_table *sa; 27 - struct mlx5_flow_table *status; 28 - u32 refcnt; 29 - }; 30 - 31 - struct mlx5e_ipsec_miss { 32 - struct mlx5_flow_group *group; 33 - struct mlx5_flow_handle *rule; 34 - }; 35 - 36 - struct mlx5e_ipsec_rx { 37 - struct mlx5e_ipsec_ft ft; 38 - struct mlx5e_ipsec_miss pol; 39 - struct mlx5e_ipsec_miss sa; 40 - struct mlx5e_ipsec_rule status; 41 - struct mlx5e_ipsec_fc *fc; 42 - struct mlx5_fs_chains *chains; 43 - u8 allow_tunnel_mode : 1; 44 20 }; 45 21 46 22 struct mlx5e_ipsec_tx { ··· 237 259 static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec, 238 260 struct mlx5e_ipsec_rx *rx, u32 family) 239 261 { 240 - 241 262 /* disconnect */ 242 - ipsec_rx_ft_disconnect(ipsec, family); 263 + if (rx != ipsec->rx_esw) 264 + ipsec_rx_ft_disconnect(ipsec, family); 243 265 244 266 if (rx->chains) { 245 267 ipsec_chains_destroy(rx->chains); ··· 254 276 mlx5_destroy_flow_table(rx->ft.sa); 255 277 if (rx->allow_tunnel_mode) 256 278 mlx5_eswitch_unblock_encap(mdev); 257 - mlx5_del_flow_rules(rx->status.rule); 258 - mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr); 279 + if (rx == ipsec->rx_esw) { 280 + mlx5_esw_ipsec_rx_status_destroy(ipsec, rx); 281 + } else { 282 + mlx5_del_flow_rules(rx->status.rule); 283 + mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr); 284 + } 259 285 mlx5_destroy_flow_table(rx->ft.status); 260 286 261 287 mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family); ··· 270 288 u32 family, 271 289 struct mlx5e_ipsec_rx_create_attr *attr) 272 290 { 291 + if (rx == ipsec->rx_esw) { 292 + /* For packet offload in switchdev mode, RX & TX use FDB namespace */ 293 + attr->ns = ipsec->tx_esw->ns; 294 + mlx5_esw_ipsec_rx_create_attr_set(ipsec, attr); 295 + return; 296 + } 297 + 273 298 attr->ns = mlx5e_fs_get_ns(ipsec->fs, false); 274 299 attr->ttc = mlx5e_fs_get_ttc(ipsec->fs, false); 275 300 attr->family = family; ··· 294 305 { 295 306 struct mlx5_flow_table *ft; 296 307 int err; 308 + 309 + if (rx == ipsec->rx_esw) 310 + return mlx5_esw_ipsec_rx_status_pass_dest_get(ipsec, dest); 297 311 298 312 *dest = mlx5_ttc_get_default_dest(attr->ttc, family2tt(attr->family)); 299 313 err = mlx5_ipsec_fs_roce_rx_create(ipsec->mdev, ipsec->roce, attr->ns, dest, ··· 349 357 350 358 dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; 351 359 dest[1].counter_id = mlx5_fc_id(rx->fc->cnt); 352 - err = ipsec_status_rule(mdev, rx, dest); 360 + if (rx == ipsec->rx_esw) 361 + err = mlx5_esw_ipsec_rx_status_create(ipsec, rx, dest); 362 + else 363 + err = ipsec_status_rule(mdev, rx, dest); 353 364 if (err) 354 365 goto err_add; 355 366 ··· 401 406 402 407 connect: 403 408 /* connect */ 404 - ipsec_rx_ft_connect(ipsec, rx, &attr); 409 + if (rx != ipsec->rx_esw) 410 + ipsec_rx_ft_connect(ipsec, rx, &attr); 405 411 return 0; 406 412 407 413 err_pol_miss: ··· 860 864 } 861 865 } 862 866 863 - static enum mlx5_flow_namespace_type ipsec_fs_get_ns(struct mlx5e_ipsec *ipsec, u8 dir) 867 + static enum mlx5_flow_namespace_type ipsec_fs_get_ns(struct mlx5e_ipsec *ipsec, 868 + int type, u8 dir) 864 869 { 870 + if (ipsec->is_uplink_rep && type == XFRM_DEV_OFFLOAD_PACKET) 871 + return MLX5_FLOW_NAMESPACE_FDB; 872 + 865 873 if (dir == XFRM_DEV_OFFLOAD_IN) 866 874 return MLX5_FLOW_NAMESPACE_KERNEL; 867 875 868 876 return MLX5_FLOW_NAMESPACE_EGRESS; 869 877 } 870 878 871 - static int setup_modify_header(struct mlx5e_ipsec *ipsec, u32 val, u8 dir, 879 + static int setup_modify_header(struct mlx5e_ipsec *ipsec, int type, u32 val, u8 dir, 872 880 struct mlx5_flow_act *flow_act) 873 881 { 874 - enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, dir); 882 + enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, type, dir); 875 883 u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; 876 884 struct mlx5_core_dev *mdev = ipsec->mdev; 877 885 struct mlx5_modify_hdr *modify_hdr; ··· 1085 1085 struct mlx5_accel_esp_xfrm_attrs *attrs, 1086 1086 struct mlx5_flow_act *flow_act) 1087 1087 { 1088 - enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, attrs->dir); 1088 + enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, attrs->type, 1089 + attrs->dir); 1089 1090 struct mlx5_pkt_reformat_params reformat_params = {}; 1090 1091 struct mlx5_core_dev *mdev = ipsec->mdev; 1091 1092 struct mlx5_pkt_reformat *pkt_reformat; ··· 1128 1127 struct mlx5_flow_spec *spec; 1129 1128 struct mlx5e_ipsec_rx *rx; 1130 1129 struct mlx5_fc *counter; 1131 - int err; 1130 + int err = 0; 1132 1131 1133 1132 rx = rx_ft_get(mdev, ipsec, attrs->family, attrs->type); 1134 1133 if (IS_ERR(rx)) ··· 1149 1148 setup_fte_esp(spec); 1150 1149 setup_fte_no_frags(spec); 1151 1150 1152 - err = setup_modify_header(ipsec, sa_entry->ipsec_obj_id | BIT(31), 1153 - XFRM_DEV_OFFLOAD_IN, &flow_act); 1151 + if (rx != ipsec->rx_esw) 1152 + err = setup_modify_header(ipsec, attrs->type, 1153 + sa_entry->ipsec_obj_id | BIT(31), 1154 + XFRM_DEV_OFFLOAD_IN, &flow_act); 1154 1155 if (err) 1155 1156 goto err_mod_header; 1156 1157 ··· 1343 1340 if (!attrs->reqid) 1344 1341 break; 1345 1342 1346 - err = setup_modify_header(ipsec, attrs->reqid, 1343 + err = setup_modify_header(ipsec, attrs->type, attrs->reqid, 1347 1344 XFRM_DEV_OFFLOAD_OUT, &flow_act); 1348 1345 if (err) 1349 1346 goto err_mod_header; ··· 1391 1388 { 1392 1389 struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs; 1393 1390 struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry); 1391 + struct mlx5e_ipsec *ipsec = pol_entry->ipsec; 1394 1392 struct mlx5_flow_destination dest[2]; 1395 1393 struct mlx5_flow_act flow_act = {}; 1396 1394 struct mlx5_flow_handle *rule; ··· 1437 1433 } 1438 1434 1439 1435 flow_act.flags |= FLOW_ACT_NO_APPEND; 1436 + if (rx == ipsec->rx_esw && rx->chains) 1437 + flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 1440 1438 dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 1441 1439 dest[dstn].ft = rx->ft.sa; 1442 1440 dstn++;
+3 -2
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
··· 45 45 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, decap)) 46 46 caps |= MLX5_IPSEC_CAP_PACKET_OFFLOAD; 47 47 48 - if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) && 49 - MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level)) 48 + if ((MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) && 49 + MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level)) || 50 + MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, ignore_flow_level)) 50 51 caps |= MLX5_IPSEC_CAP_PRIO; 51 52 52 53 if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
+184
drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 + // Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. 3 + 4 + #include "fs_core.h" 5 + #include "eswitch.h" 6 + #include "en_accel/ipsec.h" 7 + #include "esw/ipsec_fs.h" 8 + 9 + enum { 10 + MLX5_ESW_IPSEC_RX_POL_FT_LEVEL, 11 + MLX5_ESW_IPSEC_RX_ESP_FT_LEVEL, 12 + MLX5_ESW_IPSEC_RX_ESP_FT_CHK_LEVEL, 13 + }; 14 + 15 + static void esw_ipsec_rx_status_drop_destroy(struct mlx5e_ipsec *ipsec, 16 + struct mlx5e_ipsec_rx *rx) 17 + { 18 + mlx5_del_flow_rules(rx->status_drop.rule); 19 + mlx5_destroy_flow_group(rx->status_drop.group); 20 + mlx5_fc_destroy(ipsec->mdev, rx->status_drop_cnt); 21 + } 22 + 23 + static void esw_ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec, 24 + struct mlx5e_ipsec_rx *rx) 25 + { 26 + mlx5_del_flow_rules(rx->status.rule); 27 + mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch), 0, 1, 0); 28 + } 29 + 30 + static int esw_ipsec_rx_status_drop_create(struct mlx5e_ipsec *ipsec, 31 + struct mlx5e_ipsec_rx *rx) 32 + { 33 + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 34 + struct mlx5_flow_table *ft = rx->ft.status; 35 + struct mlx5_core_dev *mdev = ipsec->mdev; 36 + struct mlx5_flow_destination dest = {}; 37 + struct mlx5_flow_act flow_act = {}; 38 + struct mlx5_flow_handle *rule; 39 + struct mlx5_fc *flow_counter; 40 + struct mlx5_flow_spec *spec; 41 + struct mlx5_flow_group *g; 42 + u32 *flow_group_in; 43 + int err = 0; 44 + 45 + flow_group_in = kvzalloc(inlen, GFP_KERNEL); 46 + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 47 + if (!flow_group_in || !spec) { 48 + err = -ENOMEM; 49 + goto err_out; 50 + } 51 + 52 + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1); 53 + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1); 54 + g = mlx5_create_flow_group(ft, flow_group_in); 55 + if (IS_ERR(g)) { 56 + err = PTR_ERR(g); 57 + mlx5_core_err(mdev, 58 + "Failed to add ipsec rx status drop flow group, err=%d\n", err); 59 + goto err_out; 60 + } 61 + 62 + flow_counter = mlx5_fc_create(mdev, false); 63 + if (IS_ERR(flow_counter)) { 64 + err = PTR_ERR(flow_counter); 65 + mlx5_core_err(mdev, 66 + "Failed to add ipsec rx status drop rule counter, err=%d\n", err); 67 + goto err_cnt; 68 + } 69 + 70 + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT; 71 + dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; 72 + dest.counter_id = mlx5_fc_id(flow_counter); 73 + spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK; 74 + rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); 75 + if (IS_ERR(rule)) { 76 + err = PTR_ERR(rule); 77 + mlx5_core_err(mdev, 78 + "Failed to add ipsec rx status drop rule, err=%d\n", err); 79 + goto err_rule; 80 + } 81 + 82 + rx->status_drop.group = g; 83 + rx->status_drop.rule = rule; 84 + rx->status_drop_cnt = flow_counter; 85 + 86 + kvfree(flow_group_in); 87 + kvfree(spec); 88 + return 0; 89 + 90 + err_rule: 91 + mlx5_fc_destroy(mdev, flow_counter); 92 + err_cnt: 93 + mlx5_destroy_flow_group(g); 94 + err_out: 95 + kvfree(flow_group_in); 96 + kvfree(spec); 97 + return err; 98 + } 99 + 100 + static int esw_ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec, 101 + struct mlx5e_ipsec_rx *rx, 102 + struct mlx5_flow_destination *dest) 103 + { 104 + struct mlx5_flow_act flow_act = {}; 105 + struct mlx5_flow_handle *rule; 106 + struct mlx5_flow_spec *spec; 107 + int err; 108 + 109 + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 110 + if (!spec) 111 + return -ENOMEM; 112 + 113 + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, 114 + misc_parameters_2.ipsec_syndrome); 115 + MLX5_SET(fte_match_param, spec->match_value, 116 + misc_parameters_2.ipsec_syndrome, 0); 117 + spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK; 118 + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; 119 + flow_act.flags = FLOW_ACT_NO_APPEND; 120 + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | 121 + MLX5_FLOW_CONTEXT_ACTION_COUNT; 122 + rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2); 123 + if (IS_ERR(rule)) { 124 + err = PTR_ERR(rule); 125 + mlx5_core_warn(ipsec->mdev, 126 + "Failed to add ipsec rx status pass rule, err=%d\n", err); 127 + goto err_rule; 128 + } 129 + 130 + rx->status.rule = rule; 131 + kvfree(spec); 132 + return 0; 133 + 134 + err_rule: 135 + kvfree(spec); 136 + return err; 137 + } 138 + 139 + void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec, 140 + struct mlx5e_ipsec_rx *rx) 141 + { 142 + esw_ipsec_rx_status_pass_destroy(ipsec, rx); 143 + esw_ipsec_rx_status_drop_destroy(ipsec, rx); 144 + } 145 + 146 + int mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec, 147 + struct mlx5e_ipsec_rx *rx, 148 + struct mlx5_flow_destination *dest) 149 + { 150 + int err; 151 + 152 + err = esw_ipsec_rx_status_drop_create(ipsec, rx); 153 + if (err) 154 + return err; 155 + 156 + err = esw_ipsec_rx_status_pass_create(ipsec, rx, dest); 157 + if (err) 158 + goto err_pass_create; 159 + 160 + return 0; 161 + 162 + err_pass_create: 163 + esw_ipsec_rx_status_drop_destroy(ipsec, rx); 164 + return err; 165 + } 166 + 167 + void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec, 168 + struct mlx5e_ipsec_rx_create_attr *attr) 169 + { 170 + attr->prio = FDB_CRYPTO_INGRESS; 171 + attr->pol_level = MLX5_ESW_IPSEC_RX_POL_FT_LEVEL; 172 + attr->sa_level = MLX5_ESW_IPSEC_RX_ESP_FT_LEVEL; 173 + attr->status_level = MLX5_ESW_IPSEC_RX_ESP_FT_CHK_LEVEL; 174 + attr->chains_ns = MLX5_FLOW_NAMESPACE_FDB; 175 + } 176 + 177 + int mlx5_esw_ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec, 178 + struct mlx5_flow_destination *dest) 179 + { 180 + dest->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 181 + dest->ft = mlx5_chains_get_table(esw_chains(ipsec->mdev->priv.eswitch), 0, 1, 0); 182 + 183 + return 0; 184 + }
+39
drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ 2 + /* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ 3 + 4 + #ifndef __MLX5_ESW_IPSEC_FS_H__ 5 + #define __MLX5_ESW_IPSEC_FS_H__ 6 + 7 + struct mlx5e_ipsec; 8 + 9 + #ifdef CONFIG_MLX5_ESWITCH 10 + void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec, 11 + struct mlx5e_ipsec_rx *rx); 12 + int mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec, 13 + struct mlx5e_ipsec_rx *rx, 14 + struct mlx5_flow_destination *dest); 15 + void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec, 16 + struct mlx5e_ipsec_rx_create_attr *attr); 17 + int mlx5_esw_ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec, 18 + struct mlx5_flow_destination *dest); 19 + #else 20 + static inline void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec, 21 + struct mlx5e_ipsec_rx *rx) {} 22 + 23 + static inline int mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec, 24 + struct mlx5e_ipsec_rx *rx, 25 + struct mlx5_flow_destination *dest) 26 + { 27 + return -EINVAL; 28 + } 29 + 30 + static inline void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec, 31 + struct mlx5e_ipsec_rx_create_attr *attr) {} 32 + 33 + static inline int mlx5_esw_ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec, 34 + struct mlx5_flow_destination *dest) 35 + { 36 + return -EINVAL; 37 + } 38 + #endif /* CONFIG_MLX5_ESWITCH */ 39 + #endif /* __MLX5_ESW_IPSEC_FS_H__ */
+6
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
··· 2987 2987 if (err) 2988 2988 goto out_err; 2989 2989 2990 + maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_CRYPTO_INGRESS, 3); 2991 + if (IS_ERR(maj_prio)) { 2992 + err = PTR_ERR(maj_prio); 2993 + goto out_err; 2994 + } 2995 + 2990 2996 err = create_fdb_fast_path(steering); 2991 2997 if (err) 2992 2998 goto out_err;
+1
include/linux/mlx5/fs.h
··· 109 109 110 110 enum { 111 111 FDB_BYPASS_PATH, 112 + FDB_CRYPTO_INGRESS, 112 113 FDB_TC_OFFLOAD, 113 114 FDB_FT_OFFLOAD, 114 115 FDB_TC_MISS,