Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net/mlx5e: Handle IPsec offload for RX datapath in switchdev mode

Reuse tun opts bits in reg c1, to pass IPsec obj id to datapath.
As this is only for RX SA and there are only 11 bits, xarray is used
to map IPsec obj id to an index, which is between 1 and 0x7ff, and
replace obj id to write to reg c1.

Signed-off-by: Jianbo Liu <jianbol@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Link: https://lore.kernel.org/r/43d60fbcc9cd672a97d7e2a2f7fe6a3d9e9a776d.1690802064.git.leon@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

authored by

Jianbo Liu and committed by
Jakub Kicinski
91bafc63 1762f132

+139 -3
+14 -3
drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
··· 715 715 uplink_priv = &uplink_rpriv->uplink_priv; 716 716 ct_priv = uplink_priv->ct_priv; 717 717 718 - if (!mlx5_ipsec_is_rx_flow(cqe) && 719 - !mlx5e_tc_update_skb(cqe, skb, mapping_ctx, reg_c0, ct_priv, zone_restore_id, tunnel_id, 720 - &tc_priv)) 718 + #ifdef CONFIG_MLX5_EN_IPSEC 719 + if (!(tunnel_id >> ESW_TUN_OPTS_BITS)) { 720 + u32 mapped_id; 721 + u32 metadata; 722 + 723 + mapped_id = tunnel_id & ESW_IPSEC_RX_MAPPED_ID_MASK; 724 + if (mapped_id && 725 + !mlx5_esw_ipsec_rx_make_metadata(priv, mapped_id, &metadata)) 726 + mlx5e_ipsec_offload_handle_rx_skb(priv->netdev, skb, metadata); 727 + } 728 + #endif 729 + 730 + if (!mlx5e_tc_update_skb(cqe, skb, mapping_ctx, reg_c0, ct_priv, 731 + zone_restore_id, tunnel_id, &tc_priv)) 721 732 goto free_skb; 722 733 723 734 forward:
+2
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
··· 210 210 struct mlx5e_ipsec_fc *fc; 211 211 struct mlx5_fs_chains *chains; 212 212 u8 allow_tunnel_mode : 1; 213 + struct xarray ipsec_obj_id_map; 213 214 }; 214 215 215 216 struct mlx5e_ipsec { ··· 257 256 struct mlx5e_ipsec_work *work; 258 257 struct mlx5e_ipsec_dwork *dwork; 259 258 struct mlx5e_ipsec_limits limits; 259 + u32 rx_mapped_id; 260 260 }; 261 261 262 262 struct mlx5_accel_pol_xfrm_attrs {
+7
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
··· 1153 1153 err = setup_modify_header(ipsec, attrs->type, 1154 1154 sa_entry->ipsec_obj_id | BIT(31), 1155 1155 XFRM_DEV_OFFLOAD_IN, &flow_act); 1156 + else 1157 + err = mlx5_esw_ipsec_rx_setup_modify_header(sa_entry, &flow_act); 1158 + 1156 1159 if (err) 1157 1160 goto err_mod_header; 1158 1161 ··· 1644 1641 } 1645 1642 1646 1643 mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr); 1644 + mlx5_esw_ipsec_rx_id_mapping_remove(sa_entry); 1647 1645 rx_ft_put(sa_entry->ipsec, sa_entry->attrs.family, sa_entry->attrs.type); 1648 1646 } 1649 1647 ··· 1697 1693 kfree(ipsec->rx_ipv6); 1698 1694 1699 1695 if (ipsec->is_uplink_rep) { 1696 + xa_destroy(&ipsec->rx_esw->ipsec_obj_id_map); 1697 + 1700 1698 mutex_destroy(&ipsec->tx_esw->ft.mutex); 1701 1699 WARN_ON(ipsec->tx_esw->ft.refcnt); 1702 1700 kfree(ipsec->tx_esw); ··· 1759 1753 mutex_init(&ipsec->tx_esw->ft.mutex); 1760 1754 mutex_init(&ipsec->rx_esw->ft.mutex); 1761 1755 ipsec->tx_esw->ns = ns_esw; 1756 + xa_init_flags(&ipsec->rx_esw->ipsec_obj_id_map, XA_FLAGS_ALLOC1); 1762 1757 } else if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ROCE) { 1763 1758 ipsec->roce = mlx5_ipsec_fs_roce_init(mdev); 1764 1759 }
+22
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
··· 37 37 #include "ipsec.h" 38 38 #include "ipsec_rxtx.h" 39 39 #include "en.h" 40 + #include "esw/ipsec_fs.h" 40 41 41 42 enum { 42 43 MLX5E_IPSEC_TX_SYNDROME_OFFLOAD = 0x8, ··· 355 354 default: 356 355 atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_syndrome); 357 356 } 357 + } 358 + 359 + int mlx5_esw_ipsec_rx_make_metadata(struct mlx5e_priv *priv, u32 id, u32 *metadata) 360 + { 361 + struct mlx5e_ipsec *ipsec = priv->ipsec; 362 + u32 ipsec_obj_id; 363 + int err; 364 + 365 + if (!ipsec || !ipsec->is_uplink_rep) 366 + return -EINVAL; 367 + 368 + err = mlx5_esw_ipsec_rx_ipsec_obj_id_search(priv, id, &ipsec_obj_id); 369 + if (err) { 370 + atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_sadb_miss); 371 + return err; 372 + } 373 + 374 + *metadata = MLX5_IPSEC_METADATA_CREATE(ipsec_obj_id, 375 + MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED); 376 + 377 + return 0; 358 378 }
+2
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
··· 43 43 #define MLX5_IPSEC_METADATA_MARKER(metadata) (((metadata) >> 31) & 0x1) 44 44 #define MLX5_IPSEC_METADATA_SYNDROM(metadata) (((metadata) >> 24) & GENMASK(5, 0)) 45 45 #define MLX5_IPSEC_METADATA_HANDLE(metadata) ((metadata) & GENMASK(23, 0)) 46 + #define MLX5_IPSEC_METADATA_CREATE(id, syndrome) ((id) | ((syndrome) << 24)) 46 47 47 48 struct mlx5e_accel_tx_ipsec_state { 48 49 struct xfrm_offload *xo; ··· 68 67 void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev, 69 68 struct sk_buff *skb, 70 69 u32 ipsec_meta_data); 70 + int mlx5_esw_ipsec_rx_make_metadata(struct mlx5e_priv *priv, u32 id, u32 *metadata); 71 71 static inline unsigned int mlx5e_ipsec_tx_ids_len(struct mlx5e_accel_tx_ipsec_state *ipsec_st) 72 72 { 73 73 return ipsec_st->tailen;
+69
drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
··· 182 182 183 183 return 0; 184 184 } 185 + 186 + int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry, 187 + struct mlx5_flow_act *flow_act) 188 + { 189 + u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; 190 + struct mlx5e_ipsec *ipsec = sa_entry->ipsec; 191 + struct mlx5_core_dev *mdev = ipsec->mdev; 192 + struct mlx5_modify_hdr *modify_hdr; 193 + u32 mapped_id; 194 + int err; 195 + 196 + err = xa_alloc_bh(&ipsec->rx_esw->ipsec_obj_id_map, &mapped_id, 197 + xa_mk_value(sa_entry->ipsec_obj_id), 198 + XA_LIMIT(1, ESW_IPSEC_RX_MAPPED_ID_MASK), 0); 199 + if (err) 200 + return err; 201 + 202 + /* reuse tunnel bits for ipsec, 203 + * tun_id is always 0 and tun_opts is mapped to ipsec_obj_id. 204 + */ 205 + MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET); 206 + MLX5_SET(set_action_in, action, field, 207 + MLX5_ACTION_IN_FIELD_METADATA_REG_C_1); 208 + MLX5_SET(set_action_in, action, offset, ESW_ZONE_ID_BITS); 209 + MLX5_SET(set_action_in, action, length, 210 + ESW_TUN_ID_BITS + ESW_TUN_OPTS_BITS); 211 + MLX5_SET(set_action_in, action, data, mapped_id); 212 + 213 + modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_FDB, 214 + 1, action); 215 + if (IS_ERR(modify_hdr)) { 216 + err = PTR_ERR(modify_hdr); 217 + goto err_header_alloc; 218 + } 219 + 220 + sa_entry->rx_mapped_id = mapped_id; 221 + flow_act->modify_hdr = modify_hdr; 222 + flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 223 + 224 + return 0; 225 + 226 + err_header_alloc: 227 + xa_erase_bh(&ipsec->rx_esw->ipsec_obj_id_map, mapped_id); 228 + return err; 229 + } 230 + 231 + void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry) 232 + { 233 + struct mlx5e_ipsec *ipsec = sa_entry->ipsec; 234 + 235 + if (sa_entry->rx_mapped_id) 236 + xa_erase_bh(&ipsec->rx_esw->ipsec_obj_id_map, 237 + sa_entry->rx_mapped_id); 238 + } 239 + 240 + int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id, 241 + u32 *ipsec_obj_id) 242 + { 243 + struct mlx5e_ipsec *ipsec = priv->ipsec; 244 + void *val; 245 + 246 + val = xa_load(&ipsec->rx_esw->ipsec_obj_id_map, id); 247 + if (!val) 248 + return -ENOENT; 249 + 250 + *ipsec_obj_id = xa_to_value(val); 251 + 252 + return 0; 253 + }
+20
drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
··· 5 5 #define __MLX5_ESW_IPSEC_FS_H__ 6 6 7 7 struct mlx5e_ipsec; 8 + struct mlx5e_ipsec_sa_entry; 8 9 9 10 #ifdef CONFIG_MLX5_ESWITCH 10 11 void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec, ··· 17 16 struct mlx5e_ipsec_rx_create_attr *attr); 18 17 int mlx5_esw_ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec, 19 18 struct mlx5_flow_destination *dest); 19 + int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry, 20 + struct mlx5_flow_act *flow_act); 21 + void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry); 22 + int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id, 23 + u32 *ipsec_obj_id); 20 24 #else 21 25 static inline void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec, 22 26 struct mlx5e_ipsec_rx *rx) {} ··· 38 32 39 33 static inline int mlx5_esw_ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec, 40 34 struct mlx5_flow_destination *dest) 35 + { 36 + return -EINVAL; 37 + } 38 + 39 + static inline int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry, 40 + struct mlx5_flow_act *flow_act) 41 + { 42 + return -EINVAL; 43 + } 44 + 45 + static inline void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry) {} 46 + 47 + static inline int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id, 48 + u32 *ipsec_obj_id) 41 49 { 42 50 return -EINVAL; 43 51 }
+3
include/linux/mlx5/eswitch.h
··· 144 144 GENMASK(31 - ESW_TUN_ID_BITS - ESW_RESERVED_BITS, \ 145 145 ESW_TUN_OPTS_OFFSET + 1) 146 146 147 + /* reuse tun_opts for the mapped ipsec obj id when tun_id is 0 (invalid) */ 148 + #define ESW_IPSEC_RX_MAPPED_ID_MASK GENMASK(ESW_TUN_OPTS_BITS - 1, 0) 149 + 147 150 u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev); 148 151 u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev); 149 152 struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw);