Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'mlx5-next' of https://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux

Leon Romanovsky says:

====================
Mellanox shared branch that includes:

* Removal of FPGA TLS code https://lore.kernel.org/all/cover.1649073691.git.leonro@nvidia.com

Mellanox INNOVA TLS cards are EOL in May, 2018 [1]. As such, the code
is unmaintained, untested and not in-use by any upstream/distro oriented
customers. In order to reduce code complexity, drop the kernel code,
clean build config options and delete useless kTLS vs. TLS separation.

[1] https://network.nvidia.com/related-docs/eol/LCR-000286.pdf

* Removal of FPGA IPsec code https://lore.kernel.org/all/cover.1649232994.git.leonro@nvidia.com

Together with FPGA TLS, the IPsec went to EOL state in the November of
2019 [1]. Exactly like FPGA TLS, no active customers exist for this
upstream code and all the complexity around that area can be deleted.

[2] https://network.nvidia.com/related-docs/eol/LCR-000535.pdf

* Fix to undefined behavior from Borislav https://lore.kernel.org/all/20220405151517.29753-11-bp@alien8.de

* 'mlx5-next' of https://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux: (23 commits)
net/mlx5: Remove not-implemented IPsec capabilities
net/mlx5: Remove ipsec_ops function table
net/mlx5: Reduce kconfig complexity while building crypto support
net/mlx5: Move IPsec file to relevant directory
net/mlx5: Remove not-needed IPsec config
net/mlx5: Align flow steering allocation namespace to common style
net/mlx5: Unify device IPsec capabilities check
net/mlx5: Remove useless IPsec device checks
net/mlx5: Remove ipsec vs. ipsec offload file separation
RDMA/core: Delete IPsec flow action logic from the core
RDMA/mlx5: Drop crypto flow steering API
RDMA/mlx5: Delete never supported IPsec flow action
net/mlx5: Remove FPGA ipsec specific statistics
net/mlx5: Remove XFRM no_trailer flag
net/mlx5: Remove not-used IDA field from IPsec struct
net/mlx5: Delete metadata handling logic
net/mlx5_fpga: Drop INNOVA IPsec support
IB/mlx5: Fix undefined behavior due to shift overflowing the constant
net/mlx5: Cleanup kTLS function names and their exposure
net/mlx5: Remove tls vs. ktls separation as it is the same
...
====================

Link: https://lore.kernel.org/r/20220409055303.1223644-1-leon@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+365 -5355
-2
drivers/infiniband/core/device.c
··· 2613 2613 SET_DEVICE_OP(dev_ops, create_counters); 2614 2614 SET_DEVICE_OP(dev_ops, create_cq); 2615 2615 SET_DEVICE_OP(dev_ops, create_flow); 2616 - SET_DEVICE_OP(dev_ops, create_flow_action_esp); 2617 2616 SET_DEVICE_OP(dev_ops, create_qp); 2618 2617 SET_DEVICE_OP(dev_ops, create_rwq_ind_table); 2619 2618 SET_DEVICE_OP(dev_ops, create_srq); ··· 2675 2676 SET_DEVICE_OP(dev_ops, modify_ah); 2676 2677 SET_DEVICE_OP(dev_ops, modify_cq); 2677 2678 SET_DEVICE_OP(dev_ops, modify_device); 2678 - SET_DEVICE_OP(dev_ops, modify_flow_action_esp); 2679 2679 SET_DEVICE_OP(dev_ops, modify_hw_stat); 2680 2680 SET_DEVICE_OP(dev_ops, modify_port); 2681 2681 SET_DEVICE_OP(dev_ops, modify_qp);
+1 -382
drivers/infiniband/core/uverbs_std_types_flow_action.c
··· 46 46 return action->device->ops.destroy_flow_action(action); 47 47 } 48 48 49 - static u64 esp_flags_uverbs_to_verbs(struct uverbs_attr_bundle *attrs, 50 - u32 flags, bool is_modify) 51 - { 52 - u64 verbs_flags = flags; 53 - 54 - if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_ESN)) 55 - verbs_flags |= IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED; 56 - 57 - if (is_modify && uverbs_attr_is_valid(attrs, 58 - UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS)) 59 - verbs_flags |= IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS; 60 - 61 - return verbs_flags; 62 - }; 63 - 64 - static int validate_flow_action_esp_keymat_aes_gcm(struct ib_flow_action_attrs_esp_keymats *keymat) 65 - { 66 - struct ib_uverbs_flow_action_esp_keymat_aes_gcm *aes_gcm = 67 - &keymat->keymat.aes_gcm; 68 - 69 - if (aes_gcm->iv_algo > IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ) 70 - return -EOPNOTSUPP; 71 - 72 - if (aes_gcm->key_len != 32 && 73 - aes_gcm->key_len != 24 && 74 - aes_gcm->key_len != 16) 75 - return -EINVAL; 76 - 77 - if (aes_gcm->icv_len != 16 && 78 - aes_gcm->icv_len != 8 && 79 - aes_gcm->icv_len != 12) 80 - return -EINVAL; 81 - 82 - return 0; 83 - } 84 - 85 - static int (* const flow_action_esp_keymat_validate[])(struct ib_flow_action_attrs_esp_keymats *keymat) = { 86 - [IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM] = validate_flow_action_esp_keymat_aes_gcm, 87 - }; 88 - 89 - static int flow_action_esp_replay_none(struct ib_flow_action_attrs_esp_replays *replay, 90 - bool is_modify) 91 - { 92 - /* This is used in order to modify an esp flow action with an enabled 93 - * replay protection to a disabled one. This is only supported via 94 - * modify, as in create verb we can simply drop the REPLAY attribute and 95 - * achieve the same thing. 96 - */ 97 - return is_modify ? 0 : -EINVAL; 98 - } 99 - 100 - static int flow_action_esp_replay_def_ok(struct ib_flow_action_attrs_esp_replays *replay, 101 - bool is_modify) 102 - { 103 - /* Some replay protections could always be enabled without validating 104 - * anything. 105 - */ 106 - return 0; 107 - } 108 - 109 - static int (* const flow_action_esp_replay_validate[])(struct ib_flow_action_attrs_esp_replays *replay, 110 - bool is_modify) = { 111 - [IB_UVERBS_FLOW_ACTION_ESP_REPLAY_NONE] = flow_action_esp_replay_none, 112 - [IB_UVERBS_FLOW_ACTION_ESP_REPLAY_BMP] = flow_action_esp_replay_def_ok, 113 - }; 114 - 115 - static int parse_esp_ip(enum ib_flow_spec_type proto, 116 - const void __user *val_ptr, 117 - size_t len, union ib_flow_spec *out) 118 - { 119 - int ret; 120 - const struct ib_uverbs_flow_ipv4_filter ipv4 = { 121 - .src_ip = cpu_to_be32(0xffffffffUL), 122 - .dst_ip = cpu_to_be32(0xffffffffUL), 123 - .proto = 0xff, 124 - .tos = 0xff, 125 - .ttl = 0xff, 126 - .flags = 0xff, 127 - }; 128 - const struct ib_uverbs_flow_ipv6_filter ipv6 = { 129 - .src_ip = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 130 - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, 131 - .dst_ip = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 132 - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, 133 - .flow_label = cpu_to_be32(0xffffffffUL), 134 - .next_hdr = 0xff, 135 - .traffic_class = 0xff, 136 - .hop_limit = 0xff, 137 - }; 138 - union { 139 - struct ib_uverbs_flow_ipv4_filter ipv4; 140 - struct ib_uverbs_flow_ipv6_filter ipv6; 141 - } user_val = {}; 142 - const void *user_pmask; 143 - size_t val_len; 144 - 145 - /* If the flow IPv4/IPv6 flow specifications are extended, the mask 146 - * should be changed as well. 147 - */ 148 - BUILD_BUG_ON(offsetof(struct ib_uverbs_flow_ipv4_filter, flags) + 149 - sizeof(ipv4.flags) != sizeof(ipv4)); 150 - BUILD_BUG_ON(offsetof(struct ib_uverbs_flow_ipv6_filter, reserved) + 151 - sizeof(ipv6.reserved) != sizeof(ipv6)); 152 - 153 - switch (proto) { 154 - case IB_FLOW_SPEC_IPV4: 155 - if (len > sizeof(user_val.ipv4) && 156 - !ib_is_buffer_cleared(val_ptr + sizeof(user_val.ipv4), 157 - len - sizeof(user_val.ipv4))) 158 - return -EOPNOTSUPP; 159 - 160 - val_len = min_t(size_t, len, sizeof(user_val.ipv4)); 161 - ret = copy_from_user(&user_val.ipv4, val_ptr, 162 - val_len); 163 - if (ret) 164 - return -EFAULT; 165 - 166 - user_pmask = &ipv4; 167 - break; 168 - case IB_FLOW_SPEC_IPV6: 169 - if (len > sizeof(user_val.ipv6) && 170 - !ib_is_buffer_cleared(val_ptr + sizeof(user_val.ipv6), 171 - len - sizeof(user_val.ipv6))) 172 - return -EOPNOTSUPP; 173 - 174 - val_len = min_t(size_t, len, sizeof(user_val.ipv6)); 175 - ret = copy_from_user(&user_val.ipv6, val_ptr, 176 - val_len); 177 - if (ret) 178 - return -EFAULT; 179 - 180 - user_pmask = &ipv6; 181 - break; 182 - default: 183 - return -EOPNOTSUPP; 184 - } 185 - 186 - return ib_uverbs_kern_spec_to_ib_spec_filter(proto, user_pmask, 187 - &user_val, 188 - val_len, out); 189 - } 190 - 191 - static int flow_action_esp_get_encap(struct ib_flow_spec_list *out, 192 - struct uverbs_attr_bundle *attrs) 193 - { 194 - struct ib_uverbs_flow_action_esp_encap uverbs_encap; 195 - int ret; 196 - 197 - ret = uverbs_copy_from(&uverbs_encap, attrs, 198 - UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP); 199 - if (ret) 200 - return ret; 201 - 202 - /* We currently support only one encap */ 203 - if (uverbs_encap.next_ptr) 204 - return -EOPNOTSUPP; 205 - 206 - if (uverbs_encap.type != IB_FLOW_SPEC_IPV4 && 207 - uverbs_encap.type != IB_FLOW_SPEC_IPV6) 208 - return -EOPNOTSUPP; 209 - 210 - return parse_esp_ip(uverbs_encap.type, 211 - u64_to_user_ptr(uverbs_encap.val_ptr), 212 - uverbs_encap.len, 213 - &out->spec); 214 - } 215 - 216 - struct ib_flow_action_esp_attr { 217 - struct ib_flow_action_attrs_esp hdr; 218 - struct ib_flow_action_attrs_esp_keymats keymat; 219 - struct ib_flow_action_attrs_esp_replays replay; 220 - /* We currently support only one spec */ 221 - struct ib_flow_spec_list encap; 222 - }; 223 - 224 - #define ESP_LAST_SUPPORTED_FLAG IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW 225 - static int parse_flow_action_esp(struct ib_device *ib_dev, 226 - struct uverbs_attr_bundle *attrs, 227 - struct ib_flow_action_esp_attr *esp_attr, 228 - bool is_modify) 229 - { 230 - struct ib_uverbs_flow_action_esp uverbs_esp = {}; 231 - int ret; 232 - 233 - /* Optional param, if it doesn't exist, we get -ENOENT and skip it */ 234 - ret = uverbs_copy_from(&esp_attr->hdr.esn, attrs, 235 - UVERBS_ATTR_FLOW_ACTION_ESP_ESN); 236 - if (IS_UVERBS_COPY_ERR(ret)) 237 - return ret; 238 - 239 - /* This can be called from FLOW_ACTION_ESP_MODIFY where 240 - * UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS is optional 241 - */ 242 - if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS)) { 243 - ret = uverbs_copy_from_or_zero(&uverbs_esp, attrs, 244 - UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS); 245 - if (ret) 246 - return ret; 247 - 248 - if (uverbs_esp.flags & ~((ESP_LAST_SUPPORTED_FLAG << 1) - 1)) 249 - return -EOPNOTSUPP; 250 - 251 - esp_attr->hdr.spi = uverbs_esp.spi; 252 - esp_attr->hdr.seq = uverbs_esp.seq; 253 - esp_attr->hdr.tfc_pad = uverbs_esp.tfc_pad; 254 - esp_attr->hdr.hard_limit_pkts = uverbs_esp.hard_limit_pkts; 255 - } 256 - esp_attr->hdr.flags = esp_flags_uverbs_to_verbs(attrs, uverbs_esp.flags, 257 - is_modify); 258 - 259 - if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT)) { 260 - esp_attr->keymat.protocol = 261 - uverbs_attr_get_enum_id(attrs, 262 - UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT); 263 - ret = uverbs_copy_from_or_zero(&esp_attr->keymat.keymat, 264 - attrs, 265 - UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT); 266 - if (ret) 267 - return ret; 268 - 269 - ret = flow_action_esp_keymat_validate[esp_attr->keymat.protocol](&esp_attr->keymat); 270 - if (ret) 271 - return ret; 272 - 273 - esp_attr->hdr.keymat = &esp_attr->keymat; 274 - } 275 - 276 - if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY)) { 277 - esp_attr->replay.protocol = 278 - uverbs_attr_get_enum_id(attrs, 279 - UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY); 280 - 281 - ret = uverbs_copy_from_or_zero(&esp_attr->replay.replay, 282 - attrs, 283 - UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY); 284 - if (ret) 285 - return ret; 286 - 287 - ret = flow_action_esp_replay_validate[esp_attr->replay.protocol](&esp_attr->replay, 288 - is_modify); 289 - if (ret) 290 - return ret; 291 - 292 - esp_attr->hdr.replay = &esp_attr->replay; 293 - } 294 - 295 - if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP)) { 296 - ret = flow_action_esp_get_encap(&esp_attr->encap, attrs); 297 - if (ret) 298 - return ret; 299 - 300 - esp_attr->hdr.encap = &esp_attr->encap; 301 - } 302 - 303 - return 0; 304 - } 305 - 306 - static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE)( 307 - struct uverbs_attr_bundle *attrs) 308 - { 309 - struct ib_uobject *uobj = uverbs_attr_get_uobject( 310 - attrs, UVERBS_ATTR_CREATE_FLOW_ACTION_ESP_HANDLE); 311 - struct ib_device *ib_dev = attrs->context->device; 312 - int ret; 313 - struct ib_flow_action *action; 314 - struct ib_flow_action_esp_attr esp_attr = {}; 315 - 316 - if (!ib_dev->ops.create_flow_action_esp) 317 - return -EOPNOTSUPP; 318 - 319 - ret = parse_flow_action_esp(ib_dev, attrs, &esp_attr, false); 320 - if (ret) 321 - return ret; 322 - 323 - /* No need to check as this attribute is marked as MANDATORY */ 324 - action = ib_dev->ops.create_flow_action_esp(ib_dev, &esp_attr.hdr, 325 - attrs); 326 - if (IS_ERR(action)) 327 - return PTR_ERR(action); 328 - 329 - uverbs_flow_action_fill_action(action, uobj, ib_dev, 330 - IB_FLOW_ACTION_ESP); 331 - 332 - return 0; 333 - } 334 - 335 - static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY)( 336 - struct uverbs_attr_bundle *attrs) 337 - { 338 - struct ib_uobject *uobj = uverbs_attr_get_uobject( 339 - attrs, UVERBS_ATTR_MODIFY_FLOW_ACTION_ESP_HANDLE); 340 - struct ib_flow_action *action = uobj->object; 341 - int ret; 342 - struct ib_flow_action_esp_attr esp_attr = {}; 343 - 344 - if (!action->device->ops.modify_flow_action_esp) 345 - return -EOPNOTSUPP; 346 - 347 - ret = parse_flow_action_esp(action->device, attrs, &esp_attr, true); 348 - if (ret) 349 - return ret; 350 - 351 - if (action->type != IB_FLOW_ACTION_ESP) 352 - return -EINVAL; 353 - 354 - return action->device->ops.modify_flow_action_esp(action, 355 - &esp_attr.hdr, 356 - attrs); 357 - } 358 - 359 - static const struct uverbs_attr_spec uverbs_flow_action_esp_keymat[] = { 360 - [IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM] = { 361 - .type = UVERBS_ATTR_TYPE_PTR_IN, 362 - UVERBS_ATTR_STRUCT( 363 - struct ib_uverbs_flow_action_esp_keymat_aes_gcm, 364 - aes_key), 365 - }, 366 - }; 367 - 368 - static const struct uverbs_attr_spec uverbs_flow_action_esp_replay[] = { 369 - [IB_UVERBS_FLOW_ACTION_ESP_REPLAY_NONE] = { 370 - .type = UVERBS_ATTR_TYPE_PTR_IN, 371 - UVERBS_ATTR_NO_DATA(), 372 - }, 373 - [IB_UVERBS_FLOW_ACTION_ESP_REPLAY_BMP] = { 374 - .type = UVERBS_ATTR_TYPE_PTR_IN, 375 - UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp_replay_bmp, 376 - size), 377 - }, 378 - }; 379 - 380 - DECLARE_UVERBS_NAMED_METHOD( 381 - UVERBS_METHOD_FLOW_ACTION_ESP_CREATE, 382 - UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_FLOW_ACTION_ESP_HANDLE, 383 - UVERBS_OBJECT_FLOW_ACTION, 384 - UVERBS_ACCESS_NEW, 385 - UA_MANDATORY), 386 - UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS, 387 - UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp, 388 - hard_limit_pkts), 389 - UA_MANDATORY), 390 - UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ESN, 391 - UVERBS_ATTR_TYPE(__u32), 392 - UA_OPTIONAL), 393 - UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT, 394 - uverbs_flow_action_esp_keymat, 395 - UA_MANDATORY), 396 - UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY, 397 - uverbs_flow_action_esp_replay, 398 - UA_OPTIONAL), 399 - UVERBS_ATTR_PTR_IN( 400 - UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP, 401 - UVERBS_ATTR_TYPE(struct ib_uverbs_flow_action_esp_encap), 402 - UA_OPTIONAL)); 403 - 404 - DECLARE_UVERBS_NAMED_METHOD( 405 - UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY, 406 - UVERBS_ATTR_IDR(UVERBS_ATTR_MODIFY_FLOW_ACTION_ESP_HANDLE, 407 - UVERBS_OBJECT_FLOW_ACTION, 408 - UVERBS_ACCESS_WRITE, 409 - UA_MANDATORY), 410 - UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS, 411 - UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp, 412 - hard_limit_pkts), 413 - UA_OPTIONAL), 414 - UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ESN, 415 - UVERBS_ATTR_TYPE(__u32), 416 - UA_OPTIONAL), 417 - UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT, 418 - uverbs_flow_action_esp_keymat, 419 - UA_OPTIONAL), 420 - UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY, 421 - uverbs_flow_action_esp_replay, 422 - UA_OPTIONAL), 423 - UVERBS_ATTR_PTR_IN( 424 - UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP, 425 - UVERBS_ATTR_TYPE(struct ib_uverbs_flow_action_esp_encap), 426 - UA_OPTIONAL)); 427 - 428 49 DECLARE_UVERBS_NAMED_METHOD_DESTROY( 429 50 UVERBS_METHOD_FLOW_ACTION_DESTROY, 430 51 UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_FLOW_ACTION_HANDLE, ··· 56 435 DECLARE_UVERBS_NAMED_OBJECT( 57 436 UVERBS_OBJECT_FLOW_ACTION, 58 437 UVERBS_TYPE_ALLOC_IDR(uverbs_free_flow_action), 59 - &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE), 60 - &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_DESTROY), 61 - &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY)); 438 + &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_DESTROY)); 62 439 63 440 const struct uapi_definition uverbs_def_obj_flow_action[] = { 64 441 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
+2 -221
drivers/infiniband/hw/mlx5/fs.c
··· 15 15 #include <linux/mlx5/driver.h> 16 16 #include <linux/mlx5/fs.h> 17 17 #include <linux/mlx5/fs_helpers.h> 18 - #include <linux/mlx5/accel.h> 19 18 #include <linux/mlx5/eswitch.h> 20 19 #include <net/inet_ecn.h> 21 20 #include "mlx5_ib.h" ··· 147 148 { 148 149 149 150 switch (maction->ib_action.type) { 150 - case IB_FLOW_ACTION_ESP: 151 - if (action->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | 152 - MLX5_FLOW_CONTEXT_ACTION_DECRYPT)) 153 - return -EINVAL; 154 - /* Currently only AES_GCM keymat is supported by the driver */ 155 - action->esp_id = (uintptr_t)maction->esp_aes_gcm.ctx; 156 - action->action |= is_egress ? 157 - MLX5_FLOW_CONTEXT_ACTION_ENCRYPT : 158 - MLX5_FLOW_CONTEXT_ACTION_DECRYPT; 159 - return 0; 160 151 case IB_FLOW_ACTION_UNSPECIFIED: 161 152 if (maction->flow_action_raw.sub_type == 162 153 MLX5_IB_FLOW_ACTION_MODIFY_HEADER) { ··· 357 368 ib_spec->type & IB_FLOW_SPEC_INNER); 358 369 break; 359 370 case IB_FLOW_SPEC_ESP: 360 - if (ib_spec->esp.mask.seq) 361 - return -EOPNOTSUPP; 362 - 363 - MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, 364 - ntohl(ib_spec->esp.mask.spi)); 365 - MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi, 366 - ntohl(ib_spec->esp.val.spi)); 367 - break; 371 + return -EOPNOTSUPP; 368 372 case IB_FLOW_SPEC_TCP: 369 373 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, 370 374 LAST_TCP_UDP_FIELD)) ··· 567 585 } 568 586 569 587 return false; 570 - } 571 - 572 - enum valid_spec { 573 - VALID_SPEC_INVALID, 574 - VALID_SPEC_VALID, 575 - VALID_SPEC_NA, 576 - }; 577 - 578 - static enum valid_spec 579 - is_valid_esp_aes_gcm(struct mlx5_core_dev *mdev, 580 - const struct mlx5_flow_spec *spec, 581 - const struct mlx5_flow_act *flow_act, 582 - bool egress) 583 - { 584 - const u32 *match_c = spec->match_criteria; 585 - bool is_crypto = 586 - (flow_act->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | 587 - MLX5_FLOW_CONTEXT_ACTION_DECRYPT)); 588 - bool is_ipsec = mlx5_fs_is_ipsec_flow(match_c); 589 - bool is_drop = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_DROP; 590 - 591 - /* 592 - * Currently only crypto is supported in egress, when regular egress 593 - * rules would be supported, always return VALID_SPEC_NA. 594 - */ 595 - if (!is_crypto) 596 - return VALID_SPEC_NA; 597 - 598 - return is_crypto && is_ipsec && 599 - (!egress || (!is_drop && 600 - !(spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG))) ? 601 - VALID_SPEC_VALID : VALID_SPEC_INVALID; 602 - } 603 - 604 - static bool is_valid_spec(struct mlx5_core_dev *mdev, 605 - const struct mlx5_flow_spec *spec, 606 - const struct mlx5_flow_act *flow_act, 607 - bool egress) 608 - { 609 - /* We curretly only support ipsec egress flow */ 610 - return is_valid_esp_aes_gcm(mdev, spec, flow_act, egress) != VALID_SPEC_INVALID; 611 588 } 612 589 613 590 static bool is_valid_ethertype(struct mlx5_core_dev *mdev, ··· 1095 1154 1096 1155 spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria); 1097 1156 1098 - if (is_egress && 1099 - !is_valid_spec(dev->mdev, spec, &flow_act, is_egress)) { 1157 + if (is_egress) { 1100 1158 err = -EINVAL; 1101 1159 goto free; 1102 1160 } ··· 1680 1740 return ERR_PTR(err); 1681 1741 } 1682 1742 1683 - static u32 mlx5_ib_flow_action_flags_to_accel_xfrm_flags(u32 mlx5_flags) 1684 - { 1685 - u32 flags = 0; 1686 - 1687 - if (mlx5_flags & MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA) 1688 - flags |= MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA; 1689 - 1690 - return flags; 1691 - } 1692 - 1693 - #define MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED \ 1694 - MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA 1695 - static struct ib_flow_action * 1696 - mlx5_ib_create_flow_action_esp(struct ib_device *device, 1697 - const struct ib_flow_action_attrs_esp *attr, 1698 - struct uverbs_attr_bundle *attrs) 1699 - { 1700 - struct mlx5_ib_dev *mdev = to_mdev(device); 1701 - struct ib_uverbs_flow_action_esp_keymat_aes_gcm *aes_gcm; 1702 - struct mlx5_accel_esp_xfrm_attrs accel_attrs = {}; 1703 - struct mlx5_ib_flow_action *action; 1704 - u64 action_flags; 1705 - u64 flags; 1706 - int err = 0; 1707 - 1708 - err = uverbs_get_flags64( 1709 - &action_flags, attrs, MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS, 1710 - ((MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED << 1) - 1)); 1711 - if (err) 1712 - return ERR_PTR(err); 1713 - 1714 - flags = mlx5_ib_flow_action_flags_to_accel_xfrm_flags(action_flags); 1715 - 1716 - /* We current only support a subset of the standard features. Only a 1717 - * keymat of type AES_GCM, with icv_len == 16, iv_algo == SEQ and esn 1718 - * (with overlap). Full offload mode isn't supported. 1719 - */ 1720 - if (!attr->keymat || attr->replay || attr->encap || 1721 - attr->spi || attr->seq || attr->tfc_pad || 1722 - attr->hard_limit_pkts || 1723 - (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED | 1724 - IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT))) 1725 - return ERR_PTR(-EOPNOTSUPP); 1726 - 1727 - if (attr->keymat->protocol != 1728 - IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM) 1729 - return ERR_PTR(-EOPNOTSUPP); 1730 - 1731 - aes_gcm = &attr->keymat->keymat.aes_gcm; 1732 - 1733 - if (aes_gcm->icv_len != 16 || 1734 - aes_gcm->iv_algo != IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ) 1735 - return ERR_PTR(-EOPNOTSUPP); 1736 - 1737 - action = kmalloc(sizeof(*action), GFP_KERNEL); 1738 - if (!action) 1739 - return ERR_PTR(-ENOMEM); 1740 - 1741 - action->esp_aes_gcm.ib_flags = attr->flags; 1742 - memcpy(&accel_attrs.keymat.aes_gcm.aes_key, &aes_gcm->aes_key, 1743 - sizeof(accel_attrs.keymat.aes_gcm.aes_key)); 1744 - accel_attrs.keymat.aes_gcm.key_len = aes_gcm->key_len * 8; 1745 - memcpy(&accel_attrs.keymat.aes_gcm.salt, &aes_gcm->salt, 1746 - sizeof(accel_attrs.keymat.aes_gcm.salt)); 1747 - memcpy(&accel_attrs.keymat.aes_gcm.seq_iv, &aes_gcm->iv, 1748 - sizeof(accel_attrs.keymat.aes_gcm.seq_iv)); 1749 - accel_attrs.keymat.aes_gcm.icv_len = aes_gcm->icv_len * 8; 1750 - accel_attrs.keymat.aes_gcm.iv_algo = MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ; 1751 - accel_attrs.keymat_type = MLX5_ACCEL_ESP_KEYMAT_AES_GCM; 1752 - 1753 - accel_attrs.esn = attr->esn; 1754 - if (attr->flags & IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED) 1755 - accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED; 1756 - if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW) 1757 - accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP; 1758 - 1759 - if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT) 1760 - accel_attrs.action |= MLX5_ACCEL_ESP_ACTION_ENCRYPT; 1761 - 1762 - action->esp_aes_gcm.ctx = 1763 - mlx5_accel_esp_create_xfrm(mdev->mdev, &accel_attrs, flags); 1764 - if (IS_ERR(action->esp_aes_gcm.ctx)) { 1765 - err = PTR_ERR(action->esp_aes_gcm.ctx); 1766 - goto err_parse; 1767 - } 1768 - 1769 - action->esp_aes_gcm.ib_flags = attr->flags; 1770 - 1771 - return &action->ib_action; 1772 - 1773 - err_parse: 1774 - kfree(action); 1775 - return ERR_PTR(err); 1776 - } 1777 - 1778 - static int 1779 - mlx5_ib_modify_flow_action_esp(struct ib_flow_action *action, 1780 - const struct ib_flow_action_attrs_esp *attr, 1781 - struct uverbs_attr_bundle *attrs) 1782 - { 1783 - struct mlx5_ib_flow_action *maction = to_mflow_act(action); 1784 - struct mlx5_accel_esp_xfrm_attrs accel_attrs; 1785 - int err = 0; 1786 - 1787 - if (attr->keymat || attr->replay || attr->encap || 1788 - attr->spi || attr->seq || attr->tfc_pad || 1789 - attr->hard_limit_pkts || 1790 - (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED | 1791 - IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS | 1792 - IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW))) 1793 - return -EOPNOTSUPP; 1794 - 1795 - /* Only the ESN value or the MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP can 1796 - * be modified. 1797 - */ 1798 - if (!(maction->esp_aes_gcm.ib_flags & 1799 - IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED) && 1800 - attr->flags & (IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED | 1801 - IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)) 1802 - return -EINVAL; 1803 - 1804 - memcpy(&accel_attrs, &maction->esp_aes_gcm.ctx->attrs, 1805 - sizeof(accel_attrs)); 1806 - 1807 - accel_attrs.esn = attr->esn; 1808 - if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW) 1809 - accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP; 1810 - else 1811 - accel_attrs.flags &= ~MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP; 1812 - 1813 - err = mlx5_accel_esp_modify_xfrm(maction->esp_aes_gcm.ctx, 1814 - &accel_attrs); 1815 - if (err) 1816 - return err; 1817 - 1818 - maction->esp_aes_gcm.ib_flags &= 1819 - ~IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW; 1820 - maction->esp_aes_gcm.ib_flags |= 1821 - attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW; 1822 - 1823 - return 0; 1824 - } 1825 - 1826 1743 static void destroy_flow_action_raw(struct mlx5_ib_flow_action *maction) 1827 1744 { 1828 1745 switch (maction->flow_action_raw.sub_type) { ··· 1703 1906 struct mlx5_ib_flow_action *maction = to_mflow_act(action); 1704 1907 1705 1908 switch (action->type) { 1706 - case IB_FLOW_ACTION_ESP: 1707 - /* 1708 - * We only support aes_gcm by now, so we implicitly know this is 1709 - * the underline crypto. 1710 - */ 1711 - mlx5_accel_esp_destroy_xfrm(maction->esp_aes_gcm.ctx); 1712 - break; 1713 1909 case IB_FLOW_ACTION_UNSPECIFIED: 1714 1910 destroy_flow_action_raw(maction); 1715 1911 break; ··· 2499 2709 .destroy_flow_action = mlx5_ib_destroy_flow_action, 2500 2710 }; 2501 2711 2502 - static const struct ib_device_ops flow_ipsec_ops = { 2503 - .create_flow_action_esp = mlx5_ib_create_flow_action_esp, 2504 - .modify_flow_action_esp = mlx5_ib_modify_flow_action_esp, 2505 - }; 2506 - 2507 2712 int mlx5_ib_fs_init(struct mlx5_ib_dev *dev) 2508 2713 { 2509 2714 dev->flow_db = kzalloc(sizeof(*dev->flow_db), GFP_KERNEL); ··· 2509 2724 mutex_init(&dev->flow_db->lock); 2510 2725 2511 2726 ib_set_device_ops(&dev->ib_dev, &flow_ops); 2512 - if (mlx5_accel_ipsec_device_caps(dev->mdev) & 2513 - MLX5_ACCEL_IPSEC_CAP_DEVICE) 2514 - ib_set_device_ops(&dev->ib_dev, &flow_ipsec_ops); 2515 - 2516 2727 return 0; 2517 2728 }
-31
drivers/infiniband/hw/mlx5/main.c
··· 41 41 #include "wr.h" 42 42 #include "restrack.h" 43 43 #include "counters.h" 44 - #include <linux/mlx5/accel.h> 45 44 #include <rdma/uverbs_std_types.h> 46 45 #include <rdma/uverbs_ioctl.h> 47 46 #include <rdma/mlx5_user_ioctl_verbs.h> ··· 905 906 MLX5_RX_HASH_SRC_PORT_UDP | 906 907 MLX5_RX_HASH_DST_PORT_UDP | 907 908 MLX5_RX_HASH_INNER; 908 - if (mlx5_accel_ipsec_device_caps(dev->mdev) & 909 - MLX5_ACCEL_IPSEC_CAP_DEVICE) 910 - resp.rss_caps.rx_hash_fields_mask |= 911 - MLX5_RX_HASH_IPSEC_SPI; 912 909 resp.response_length += sizeof(resp.rss_caps); 913 910 } 914 911 } else { ··· 1786 1791 resp->num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? 1787 1792 MLX5_CAP_GEN(dev->mdev, 1788 1793 num_of_uars_per_page) : 1; 1789 - 1790 - if (mlx5_accel_ipsec_device_caps(dev->mdev) & 1791 - MLX5_ACCEL_IPSEC_CAP_DEVICE) { 1792 - if (mlx5_get_flow_namespace(dev->mdev, 1793 - MLX5_FLOW_NAMESPACE_EGRESS)) 1794 - resp->flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM; 1795 - if (mlx5_accel_ipsec_device_caps(dev->mdev) & 1796 - MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA) 1797 - resp->flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA; 1798 - if (MLX5_CAP_FLOWTABLE(dev->mdev, flow_table_properties_nic_receive.ft_field_support.outer_esp_spi)) 1799 - resp->flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING; 1800 - if (mlx5_accel_ipsec_device_caps(dev->mdev) & 1801 - MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN) 1802 - resp->flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN; 1803 - /* MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD is currently always 0 */ 1804 - } 1805 - 1806 1794 resp->tot_bfregs = bfregi->lib_uar_dyn ? 0 : 1807 1795 bfregi->total_num_bfregs - bfregi->num_dyn_bfregs; 1808 1796 resp->num_ports = dev->num_ports; ··· 3583 3605 &UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_DESTROY)); 3584 3606 3585 3607 ADD_UVERBS_ATTRIBUTES_SIMPLE( 3586 - mlx5_ib_flow_action, 3587 - UVERBS_OBJECT_FLOW_ACTION, 3588 - UVERBS_METHOD_FLOW_ACTION_ESP_CREATE, 3589 - UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS, 3590 - enum mlx5_ib_uapi_flow_action_flags)); 3591 - 3592 - ADD_UVERBS_ATTRIBUTES_SIMPLE( 3593 3608 mlx5_ib_query_context, 3594 3609 UVERBS_OBJECT_DEVICE, 3595 3610 UVERBS_METHOD_QUERY_CONTEXT, ··· 3599 3628 UAPI_DEF_CHAIN(mlx5_ib_std_types_defs), 3600 3629 UAPI_DEF_CHAIN(mlx5_ib_dm_defs), 3601 3630 3602 - UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION, 3603 - &mlx5_ib_flow_action), 3604 3631 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DEVICE, &mlx5_ib_query_context), 3605 3632 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_VAR, 3606 3633 UAPI_DEF_IS_OBJ_SUPPORTED(var_is_supported)),
+2 -56
drivers/net/ethernet/mellanox/mlx5/core/Kconfig
··· 16 16 Core driver for low level functionality of the ConnectX-4 and 17 17 Connect-IB cards by Mellanox Technologies. 18 18 19 - config MLX5_ACCEL 20 - bool 21 - 22 19 config MLX5_FPGA 23 20 bool "Mellanox Technologies Innova support" 24 21 depends on MLX5_CORE 25 - select MLX5_ACCEL 26 22 help 27 23 Build support for the Innova family of network cards by Mellanox 28 24 Technologies. Innova network cards are comprised of a ConnectX chip ··· 139 143 help 140 144 MLX5 IPoIB offloads & acceleration support. 141 145 142 - config MLX5_FPGA_IPSEC 143 - bool "Mellanox Technologies IPsec Innova support" 144 - depends on MLX5_CORE 145 - depends on MLX5_FPGA 146 - help 147 - Build IPsec support for the Innova family of network cards by Mellanox 148 - Technologies. Innova network cards are comprised of a ConnectX chip 149 - and an FPGA chip on one board. If you select this option, the 150 - mlx5_core driver will include the Innova FPGA core and allow building 151 - sandbox-specific client drivers. 152 - 153 - config MLX5_IPSEC 146 + config MLX5_EN_IPSEC 154 147 bool "Mellanox Technologies IPsec Connect-X support" 155 148 depends on MLX5_CORE_EN 156 149 depends on XFRM_OFFLOAD 157 150 depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD 158 - select MLX5_ACCEL 159 - help 160 - Build IPsec support for the Connect-X family of network cards by Mellanox 161 - Technologies. 162 - Note: If you select this option, the mlx5_core driver will include 163 - IPsec support for the Connect-X family. 164 - 165 - config MLX5_EN_IPSEC 166 - bool "IPSec XFRM cryptography-offload acceleration" 167 - depends on MLX5_CORE_EN 168 - depends on XFRM_OFFLOAD 169 - depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD 170 - depends on MLX5_FPGA_IPSEC || MLX5_IPSEC 171 151 help 172 152 Build support for IPsec cryptography-offload acceleration in the NIC. 173 - Note: Support for hardware with this capability needs to be selected 174 - for this option to become available. 175 153 176 - config MLX5_FPGA_TLS 177 - bool "Mellanox Technologies TLS Innova support" 178 - depends on TLS_DEVICE 179 - depends on TLS=y || MLX5_CORE=m 180 - depends on MLX5_CORE_EN 181 - depends on MLX5_FPGA 182 - select MLX5_EN_TLS 183 - help 184 - Build TLS support for the Innova family of network cards by Mellanox 185 - Technologies. Innova network cards are comprised of a ConnectX chip 186 - and an FPGA chip on one board. If you select this option, the 187 - mlx5_core driver will include the Innova FPGA core and allow building 188 - sandbox-specific client drivers. 189 - 190 - config MLX5_TLS 154 + config MLX5_EN_TLS 191 155 bool "Mellanox Technologies TLS Connect-X support" 192 156 depends on TLS_DEVICE 193 157 depends on TLS=y || MLX5_CORE=m 194 158 depends on MLX5_CORE_EN 195 - select MLX5_ACCEL 196 - select MLX5_EN_TLS 197 - help 198 - Build TLS support for the Connect-X family of network cards by Mellanox 199 - Technologies. 200 - 201 - config MLX5_EN_TLS 202 - bool 203 159 help 204 160 Build support for TLS cryptography-offload acceleration in the NIC. 205 - Note: Support for hardware with this capability needs to be selected 206 - for this option to become available. 207 161 208 162 config MLX5_SW_STEERING 209 163 bool "Mellanox Technologies software-managed steering"
+4 -8
drivers/net/ethernet/mellanox/mlx5/core/Makefile
··· 28 28 en_selftest.o en/port.o en/monitor_stats.o en/health.o \ 29 29 en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \ 30 30 en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o \ 31 - en/qos.o en/trap.o en/fs_tt_redirect.o en/selq.o 31 + en/qos.o en/trap.o en/fs_tt_redirect.o en/selq.o lib/crypto.o 32 32 33 33 # 34 34 # Netdev extra ··· 88 88 # 89 89 # Accelerations & FPGA 90 90 # 91 - mlx5_core-$(CONFIG_MLX5_IPSEC) += accel/ipsec_offload.o 92 - mlx5_core-$(CONFIG_MLX5_FPGA_IPSEC) += fpga/ipsec.o 93 - mlx5_core-$(CONFIG_MLX5_FPGA_TLS) += fpga/tls.o 94 - mlx5_core-$(CONFIG_MLX5_ACCEL) += lib/crypto.o accel/tls.o accel/ipsec.o 95 - 96 91 mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o 97 92 98 93 mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \ 99 - en_accel/ipsec_stats.o en_accel/ipsec_fs.o 94 + en_accel/ipsec_stats.o en_accel/ipsec_fs.o \ 95 + en_accel/ipsec_offload.o 100 96 101 - mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o \ 97 + mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/ktls_stats.o \ 102 98 en_accel/fs_tcp.o en_accel/ktls.o en_accel/ktls_txrx.o \ 103 99 en_accel/ktls_tx.o en_accel/ktls_rx.o 104 100
-36
drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h
··· 1 - #ifndef __MLX5E_ACCEL_H__ 2 - #define __MLX5E_ACCEL_H__ 3 - 4 - #ifdef CONFIG_MLX5_ACCEL 5 - 6 - #include <linux/skbuff.h> 7 - #include <linux/netdevice.h> 8 - 9 - static inline bool is_metadata_hdr_valid(struct sk_buff *skb) 10 - { 11 - __be16 *ethtype; 12 - 13 - if (unlikely(skb->len < ETH_HLEN + MLX5E_METADATA_ETHER_LEN)) 14 - return false; 15 - ethtype = (__be16 *)(skb->data + ETH_ALEN * 2); 16 - if (*ethtype != cpu_to_be16(MLX5E_METADATA_ETHER_TYPE)) 17 - return false; 18 - return true; 19 - } 20 - 21 - static inline void remove_metadata_hdr(struct sk_buff *skb) 22 - { 23 - struct ethhdr *old_eth; 24 - struct ethhdr *new_eth; 25 - 26 - /* Remove the metadata from the buffer */ 27 - old_eth = (struct ethhdr *)skb->data; 28 - new_eth = (struct ethhdr *)(skb->data + MLX5E_METADATA_ETHER_LEN); 29 - memmove(new_eth, old_eth, 2 * ETH_ALEN); 30 - /* Ethertype is already in its new place */ 31 - skb_pull_inline(skb, MLX5E_METADATA_ETHER_LEN); 32 - } 33 - 34 - #endif /* CONFIG_MLX5_ACCEL */ 35 - 36 - #endif /* __MLX5E_EN_ACCEL_H__ */
-179
drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c
··· 1 - /* 2 - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. 3 - * 4 - * This software is available to you under a choice of one of two 5 - * licenses. You may choose to be licensed under the terms of the GNU 6 - * General Public License (GPL) Version 2, available from the file 7 - * COPYING in the main directory of this source tree, or the 8 - * OpenIB.org BSD license below: 9 - * 10 - * Redistribution and use in source and binary forms, with or 11 - * without modification, are permitted provided that the following 12 - * conditions are met: 13 - * 14 - * - Redistributions of source code must retain the above 15 - * copyright notice, this list of conditions and the following 16 - * disclaimer. 17 - * 18 - * - Redistributions in binary form must reproduce the above 19 - * copyright notice, this list of conditions and the following 20 - * disclaimer in the documentation and/or other materials 21 - * provided with the distribution. 22 - * 23 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 - * SOFTWARE. 31 - * 32 - */ 33 - 34 - #include <linux/mlx5/device.h> 35 - 36 - #include "accel/ipsec.h" 37 - #include "mlx5_core.h" 38 - #include "fpga/ipsec.h" 39 - #include "accel/ipsec_offload.h" 40 - 41 - void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) 42 - { 43 - const struct mlx5_accel_ipsec_ops *ipsec_ops; 44 - int err = 0; 45 - 46 - ipsec_ops = (mlx5_ipsec_offload_ops(mdev)) ? 47 - mlx5_ipsec_offload_ops(mdev) : 48 - mlx5_fpga_ipsec_ops(mdev); 49 - 50 - if (!ipsec_ops || !ipsec_ops->init) { 51 - mlx5_core_dbg(mdev, "IPsec ops is not supported\n"); 52 - return; 53 - } 54 - 55 - err = ipsec_ops->init(mdev); 56 - if (err) { 57 - mlx5_core_warn_once(mdev, "Failed to start IPsec device, err = %d\n", err); 58 - return; 59 - } 60 - 61 - mdev->ipsec_ops = ipsec_ops; 62 - } 63 - 64 - void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev) 65 - { 66 - const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; 67 - 68 - if (!ipsec_ops || !ipsec_ops->cleanup) 69 - return; 70 - 71 - ipsec_ops->cleanup(mdev); 72 - } 73 - 74 - u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev) 75 - { 76 - const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; 77 - 78 - if (!ipsec_ops || !ipsec_ops->device_caps) 79 - return 0; 80 - 81 - return ipsec_ops->device_caps(mdev); 82 - } 83 - EXPORT_SYMBOL_GPL(mlx5_accel_ipsec_device_caps); 84 - 85 - unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev) 86 - { 87 - const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; 88 - 89 - if (!ipsec_ops || !ipsec_ops->counters_count) 90 - return -EOPNOTSUPP; 91 - 92 - return ipsec_ops->counters_count(mdev); 93 - } 94 - 95 - int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters, 96 - unsigned int count) 97 - { 98 - const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; 99 - 100 - if (!ipsec_ops || !ipsec_ops->counters_read) 101 - return -EOPNOTSUPP; 102 - 103 - return ipsec_ops->counters_read(mdev, counters, count); 104 - } 105 - 106 - void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, 107 - struct mlx5_accel_esp_xfrm *xfrm, 108 - u32 *sa_handle) 109 - { 110 - const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; 111 - __be32 saddr[4] = {}, daddr[4] = {}; 112 - 113 - if (!ipsec_ops || !ipsec_ops->create_hw_context) 114 - return ERR_PTR(-EOPNOTSUPP); 115 - 116 - if (!xfrm->attrs.is_ipv6) { 117 - saddr[3] = xfrm->attrs.saddr.a4; 118 - daddr[3] = xfrm->attrs.daddr.a4; 119 - } else { 120 - memcpy(saddr, xfrm->attrs.saddr.a6, sizeof(saddr)); 121 - memcpy(daddr, xfrm->attrs.daddr.a6, sizeof(daddr)); 122 - } 123 - 124 - return ipsec_ops->create_hw_context(mdev, xfrm, saddr, daddr, xfrm->attrs.spi, 125 - xfrm->attrs.is_ipv6, sa_handle); 126 - } 127 - 128 - void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context) 129 - { 130 - const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; 131 - 132 - if (!ipsec_ops || !ipsec_ops->free_hw_context) 133 - return; 134 - 135 - ipsec_ops->free_hw_context(context); 136 - } 137 - 138 - struct mlx5_accel_esp_xfrm * 139 - mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev, 140 - const struct mlx5_accel_esp_xfrm_attrs *attrs, 141 - u32 flags) 142 - { 143 - const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; 144 - struct mlx5_accel_esp_xfrm *xfrm; 145 - 146 - if (!ipsec_ops || !ipsec_ops->esp_create_xfrm) 147 - return ERR_PTR(-EOPNOTSUPP); 148 - 149 - xfrm = ipsec_ops->esp_create_xfrm(mdev, attrs, flags); 150 - if (IS_ERR(xfrm)) 151 - return xfrm; 152 - 153 - xfrm->mdev = mdev; 154 - return xfrm; 155 - } 156 - EXPORT_SYMBOL_GPL(mlx5_accel_esp_create_xfrm); 157 - 158 - void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) 159 - { 160 - const struct mlx5_accel_ipsec_ops *ipsec_ops = xfrm->mdev->ipsec_ops; 161 - 162 - if (!ipsec_ops || !ipsec_ops->esp_destroy_xfrm) 163 - return; 164 - 165 - ipsec_ops->esp_destroy_xfrm(xfrm); 166 - } 167 - EXPORT_SYMBOL_GPL(mlx5_accel_esp_destroy_xfrm); 168 - 169 - int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, 170 - const struct mlx5_accel_esp_xfrm_attrs *attrs) 171 - { 172 - const struct mlx5_accel_ipsec_ops *ipsec_ops = xfrm->mdev->ipsec_ops; 173 - 174 - if (!ipsec_ops || !ipsec_ops->esp_modify_xfrm) 175 - return -EOPNOTSUPP; 176 - 177 - return ipsec_ops->esp_modify_xfrm(xfrm, attrs); 178 - } 179 - EXPORT_SYMBOL_GPL(mlx5_accel_esp_modify_xfrm);
-96
drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h
··· 1 - /* 2 - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. 3 - * 4 - * This software is available to you under a choice of one of two 5 - * licenses. You may choose to be licensed under the terms of the GNU 6 - * General Public License (GPL) Version 2, available from the file 7 - * COPYING in the main directory of this source tree, or the 8 - * OpenIB.org BSD license below: 9 - * 10 - * Redistribution and use in source and binary forms, with or 11 - * without modification, are permitted provided that the following 12 - * conditions are met: 13 - * 14 - * - Redistributions of source code must retain the above 15 - * copyright notice, this list of conditions and the following 16 - * disclaimer. 17 - * 18 - * - Redistributions in binary form must reproduce the above 19 - * copyright notice, this list of conditions and the following 20 - * disclaimer in the documentation and/or other materials 21 - * provided with the distribution. 22 - * 23 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 - * SOFTWARE. 31 - * 32 - */ 33 - 34 - #ifndef __MLX5_ACCEL_IPSEC_H__ 35 - #define __MLX5_ACCEL_IPSEC_H__ 36 - 37 - #include <linux/mlx5/driver.h> 38 - #include <linux/mlx5/accel.h> 39 - 40 - #ifdef CONFIG_MLX5_ACCEL 41 - 42 - #define MLX5_IPSEC_DEV(mdev) (mlx5_accel_ipsec_device_caps(mdev) & \ 43 - MLX5_ACCEL_IPSEC_CAP_DEVICE) 44 - 45 - unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev); 46 - int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters, 47 - unsigned int count); 48 - 49 - void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, 50 - struct mlx5_accel_esp_xfrm *xfrm, 51 - u32 *sa_handle); 52 - void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context); 53 - 54 - void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev); 55 - void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev); 56 - 57 - struct mlx5_accel_ipsec_ops { 58 - u32 (*device_caps)(struct mlx5_core_dev *mdev); 59 - unsigned int (*counters_count)(struct mlx5_core_dev *mdev); 60 - int (*counters_read)(struct mlx5_core_dev *mdev, u64 *counters, unsigned int count); 61 - void* (*create_hw_context)(struct mlx5_core_dev *mdev, 62 - struct mlx5_accel_esp_xfrm *xfrm, 63 - const __be32 saddr[4], const __be32 daddr[4], 64 - const __be32 spi, bool is_ipv6, u32 *sa_handle); 65 - void (*free_hw_context)(void *context); 66 - int (*init)(struct mlx5_core_dev *mdev); 67 - void (*cleanup)(struct mlx5_core_dev *mdev); 68 - struct mlx5_accel_esp_xfrm* (*esp_create_xfrm)(struct mlx5_core_dev *mdev, 69 - const struct mlx5_accel_esp_xfrm_attrs *attrs, 70 - u32 flags); 71 - int (*esp_modify_xfrm)(struct mlx5_accel_esp_xfrm *xfrm, 72 - const struct mlx5_accel_esp_xfrm_attrs *attrs); 73 - void (*esp_destroy_xfrm)(struct mlx5_accel_esp_xfrm *xfrm); 74 - }; 75 - 76 - #else 77 - 78 - #define MLX5_IPSEC_DEV(mdev) false 79 - 80 - static inline void * 81 - mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, 82 - struct mlx5_accel_esp_xfrm *xfrm, 83 - u32 *sa_handle) 84 - { 85 - return NULL; 86 - } 87 - 88 - static inline void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context) {} 89 - 90 - static inline void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) {} 91 - 92 - static inline void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev) {} 93 - 94 - #endif /* CONFIG_MLX5_ACCEL */ 95 - 96 - #endif /* __MLX5_ACCEL_IPSEC_H__ */
+66 -31
drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 - /* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */ 2 + /* Copyright (c) 2017, Mellanox Technologies inc. All rights reserved. */ 3 3 4 4 #include "mlx5_core.h" 5 5 #include "ipsec_offload.h" 6 6 #include "lib/mlx5.h" 7 7 #include "en_accel/ipsec_fs.h" 8 - 9 - #define MLX5_IPSEC_DEV_BASIC_CAPS (MLX5_ACCEL_IPSEC_CAP_DEVICE | MLX5_ACCEL_IPSEC_CAP_IPV6 | \ 10 - MLX5_ACCEL_IPSEC_CAP_LSO) 11 8 12 9 struct mlx5_ipsec_sa_ctx { 13 10 struct rhash_head hash; ··· 22 25 struct mlx5_accel_esp_xfrm accel_xfrm; 23 26 }; 24 27 25 - static u32 mlx5_ipsec_offload_device_caps(struct mlx5_core_dev *mdev) 28 + u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev) 26 29 { 27 - u32 caps = MLX5_IPSEC_DEV_BASIC_CAPS; 30 + u32 caps; 28 31 29 - if (!mlx5_is_ipsec_device(mdev)) 32 + if (!MLX5_CAP_GEN(mdev, ipsec_offload)) 33 + return 0; 34 + 35 + if (!MLX5_CAP_GEN(mdev, log_max_dek)) 36 + return 0; 37 + 38 + if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) & 39 + MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC)) 40 + return 0; 41 + 42 + if (!MLX5_CAP_IPSEC(mdev, ipsec_crypto_offload) || 43 + !MLX5_CAP_ETH(mdev, insert_trailer)) 30 44 return 0; 31 45 32 46 if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ipsec_encrypt) || 33 47 !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ipsec_decrypt)) 34 48 return 0; 35 49 50 + caps = MLX5_ACCEL_IPSEC_CAP_DEVICE | MLX5_ACCEL_IPSEC_CAP_IPV6 | 51 + MLX5_ACCEL_IPSEC_CAP_LSO; 52 + 36 53 if (MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_encrypt) && 37 54 MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_decrypt)) 38 55 caps |= MLX5_ACCEL_IPSEC_CAP_ESP; 39 56 40 - if (MLX5_CAP_IPSEC(mdev, ipsec_esn)) { 57 + if (MLX5_CAP_IPSEC(mdev, ipsec_esn)) 41 58 caps |= MLX5_ACCEL_IPSEC_CAP_ESN; 42 - caps |= MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN; 43 - } 44 59 45 60 /* We can accommodate up to 2^24 different IPsec objects 46 61 * because we use up to 24 bit in flow table metadata ··· 61 52 WARN_ON_ONCE(MLX5_CAP_IPSEC(mdev, log_max_ipsec_offload) > 24); 62 53 return caps; 63 54 } 55 + EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps); 64 56 65 57 static int 66 58 mlx5_ipsec_offload_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev, ··· 104 94 105 95 static struct mlx5_accel_esp_xfrm * 106 96 mlx5_ipsec_offload_esp_create_xfrm(struct mlx5_core_dev *mdev, 107 - const struct mlx5_accel_esp_xfrm_attrs *attrs, 108 - u32 flags) 97 + const struct mlx5_accel_esp_xfrm_attrs *attrs) 109 98 { 110 99 struct mlx5_ipsec_esp_xfrm *mxfrm; 111 100 int err = 0; ··· 283 274 mutex_unlock(&mxfrm->lock); 284 275 } 285 276 286 - static int mlx5_ipsec_offload_init(struct mlx5_core_dev *mdev) 287 - { 288 - return 0; 289 - } 290 - 291 277 static int mlx5_modify_ipsec_obj(struct mlx5_core_dev *mdev, 292 278 struct mlx5_ipsec_obj_attrs *attrs, 293 279 u32 ipsec_id) ··· 370 366 return err; 371 367 } 372 368 373 - static const struct mlx5_accel_ipsec_ops ipsec_offload_ops = { 374 - .device_caps = mlx5_ipsec_offload_device_caps, 375 - .create_hw_context = mlx5_ipsec_offload_create_sa_ctx, 376 - .free_hw_context = mlx5_ipsec_offload_delete_sa_ctx, 377 - .init = mlx5_ipsec_offload_init, 378 - .esp_create_xfrm = mlx5_ipsec_offload_esp_create_xfrm, 379 - .esp_destroy_xfrm = mlx5_ipsec_offload_esp_destroy_xfrm, 380 - .esp_modify_xfrm = mlx5_ipsec_offload_esp_modify_xfrm, 381 - }; 382 - 383 - const struct mlx5_accel_ipsec_ops *mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev) 369 + void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, 370 + struct mlx5_accel_esp_xfrm *xfrm, 371 + u32 *sa_handle) 384 372 { 385 - if (!mlx5_ipsec_offload_device_caps(mdev)) 386 - return NULL; 373 + __be32 saddr[4] = {}, daddr[4] = {}; 387 374 388 - return &ipsec_offload_ops; 375 + if (!xfrm->attrs.is_ipv6) { 376 + saddr[3] = xfrm->attrs.saddr.a4; 377 + daddr[3] = xfrm->attrs.daddr.a4; 378 + } else { 379 + memcpy(saddr, xfrm->attrs.saddr.a6, sizeof(saddr)); 380 + memcpy(daddr, xfrm->attrs.daddr.a6, sizeof(daddr)); 381 + } 382 + 383 + return mlx5_ipsec_offload_create_sa_ctx(mdev, xfrm, saddr, daddr, 384 + xfrm->attrs.spi, 385 + xfrm->attrs.is_ipv6, sa_handle); 386 + } 387 + 388 + void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context) 389 + { 390 + mlx5_ipsec_offload_delete_sa_ctx(context); 391 + } 392 + 393 + struct mlx5_accel_esp_xfrm * 394 + mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev, 395 + const struct mlx5_accel_esp_xfrm_attrs *attrs) 396 + { 397 + struct mlx5_accel_esp_xfrm *xfrm; 398 + 399 + xfrm = mlx5_ipsec_offload_esp_create_xfrm(mdev, attrs); 400 + if (IS_ERR(xfrm)) 401 + return xfrm; 402 + 403 + xfrm->mdev = mdev; 404 + return xfrm; 405 + } 406 + 407 + void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) 408 + { 409 + mlx5_ipsec_offload_esp_destroy_xfrm(xfrm); 410 + } 411 + 412 + int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, 413 + const struct mlx5_accel_esp_xfrm_attrs *attrs) 414 + { 415 + return mlx5_ipsec_offload_esp_modify_xfrm(xfrm, attrs); 389 416 }
-38
drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ 2 - /* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */ 3 - 4 - #ifndef __MLX5_IPSEC_OFFLOAD_H__ 5 - #define __MLX5_IPSEC_OFFLOAD_H__ 6 - 7 - #include <linux/mlx5/driver.h> 8 - #include "accel/ipsec.h" 9 - 10 - #ifdef CONFIG_MLX5_IPSEC 11 - 12 - const struct mlx5_accel_ipsec_ops *mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev); 13 - static inline bool mlx5_is_ipsec_device(struct mlx5_core_dev *mdev) 14 - { 15 - if (!MLX5_CAP_GEN(mdev, ipsec_offload)) 16 - return false; 17 - 18 - if (!MLX5_CAP_GEN(mdev, log_max_dek)) 19 - return false; 20 - 21 - if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) & 22 - MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC)) 23 - return false; 24 - 25 - return MLX5_CAP_IPSEC(mdev, ipsec_crypto_offload) && 26 - MLX5_CAP_ETH(mdev, insert_trailer); 27 - } 28 - 29 - #else 30 - static inline const struct mlx5_accel_ipsec_ops * 31 - mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev) { return NULL; } 32 - static inline bool mlx5_is_ipsec_device(struct mlx5_core_dev *mdev) 33 - { 34 - return false; 35 - } 36 - 37 - #endif /* CONFIG_MLX5_IPSEC */ 38 - #endif /* __MLX5_IPSEC_OFFLOAD_H__ */
-125
drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c
··· 1 - /* 2 - * Copyright (c) 2018 Mellanox Technologies. All rights reserved. 3 - * 4 - * This software is available to you under a choice of one of two 5 - * licenses. You may choose to be licensed under the terms of the GNU 6 - * General Public License (GPL) Version 2, available from the file 7 - * COPYING in the main directory of this source tree, or the 8 - * OpenIB.org BSD license below: 9 - * 10 - * Redistribution and use in source and binary forms, with or 11 - * without modification, are permitted provided that the following 12 - * conditions are met: 13 - * 14 - * - Redistributions of source code must retain the above 15 - * copyright notice, this list of conditions and the following 16 - * disclaimer. 17 - * 18 - * - Redistributions in binary form must reproduce the above 19 - * copyright notice, this list of conditions and the following 20 - * disclaimer in the documentation and/or other materials 21 - * provided with the distribution. 22 - * 23 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 - * SOFTWARE. 31 - * 32 - */ 33 - 34 - #include <linux/mlx5/device.h> 35 - 36 - #include "accel/tls.h" 37 - #include "mlx5_core.h" 38 - #include "lib/mlx5.h" 39 - 40 - #ifdef CONFIG_MLX5_FPGA_TLS 41 - #include "fpga/tls.h" 42 - 43 - int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, 44 - struct tls_crypto_info *crypto_info, 45 - u32 start_offload_tcp_sn, u32 *p_swid, 46 - bool direction_sx) 47 - { 48 - return mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, 49 - start_offload_tcp_sn, p_swid, 50 - direction_sx); 51 - } 52 - 53 - void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, 54 - bool direction_sx) 55 - { 56 - mlx5_fpga_tls_del_flow(mdev, swid, GFP_KERNEL, direction_sx); 57 - } 58 - 59 - int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle, 60 - u32 seq, __be64 rcd_sn) 61 - { 62 - return mlx5_fpga_tls_resync_rx(mdev, handle, seq, rcd_sn); 63 - } 64 - 65 - bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev) 66 - { 67 - return mlx5_fpga_is_tls_device(mdev) || 68 - mlx5_accel_is_ktls_device(mdev); 69 - } 70 - 71 - u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev) 72 - { 73 - return mlx5_fpga_tls_device_caps(mdev); 74 - } 75 - 76 - int mlx5_accel_tls_init(struct mlx5_core_dev *mdev) 77 - { 78 - return mlx5_fpga_tls_init(mdev); 79 - } 80 - 81 - void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev) 82 - { 83 - mlx5_fpga_tls_cleanup(mdev); 84 - } 85 - #endif 86 - 87 - #ifdef CONFIG_MLX5_TLS 88 - int mlx5_ktls_create_key(struct mlx5_core_dev *mdev, 89 - struct tls_crypto_info *crypto_info, 90 - u32 *p_key_id) 91 - { 92 - u32 sz_bytes; 93 - void *key; 94 - 95 - switch (crypto_info->cipher_type) { 96 - case TLS_CIPHER_AES_GCM_128: { 97 - struct tls12_crypto_info_aes_gcm_128 *info = 98 - (struct tls12_crypto_info_aes_gcm_128 *)crypto_info; 99 - 100 - key = info->key; 101 - sz_bytes = sizeof(info->key); 102 - break; 103 - } 104 - case TLS_CIPHER_AES_GCM_256: { 105 - struct tls12_crypto_info_aes_gcm_256 *info = 106 - (struct tls12_crypto_info_aes_gcm_256 *)crypto_info; 107 - 108 - key = info->key; 109 - sz_bytes = sizeof(info->key); 110 - break; 111 - } 112 - default: 113 - return -EINVAL; 114 - } 115 - 116 - return mlx5_create_encryption_key(mdev, key, sz_bytes, 117 - MLX5_ACCEL_OBJ_TLS_KEY, 118 - p_key_id); 119 - } 120 - 121 - void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id) 122 - { 123 - mlx5_destroy_encryption_key(mdev, key_id); 124 - } 125 - #endif
-156
drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
··· 1 - /* 2 - * Copyright (c) 2018 Mellanox Technologies. All rights reserved. 3 - * 4 - * This software is available to you under a choice of one of two 5 - * licenses. You may choose to be licensed under the terms of the GNU 6 - * General Public License (GPL) Version 2, available from the file 7 - * COPYING in the main directory of this source tree, or the 8 - * OpenIB.org BSD license below: 9 - * 10 - * Redistribution and use in source and binary forms, with or 11 - * without modification, are permitted provided that the following 12 - * conditions are met: 13 - * 14 - * - Redistributions of source code must retain the above 15 - * copyright notice, this list of conditions and the following 16 - * disclaimer. 17 - * 18 - * - Redistributions in binary form must reproduce the above 19 - * copyright notice, this list of conditions and the following 20 - * disclaimer in the documentation and/or other materials 21 - * provided with the distribution. 22 - * 23 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 - * SOFTWARE. 31 - * 32 - */ 33 - 34 - #ifndef __MLX5_ACCEL_TLS_H__ 35 - #define __MLX5_ACCEL_TLS_H__ 36 - 37 - #include <linux/mlx5/driver.h> 38 - #include <linux/tls.h> 39 - 40 - #ifdef CONFIG_MLX5_TLS 41 - int mlx5_ktls_create_key(struct mlx5_core_dev *mdev, 42 - struct tls_crypto_info *crypto_info, 43 - u32 *p_key_id); 44 - void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id); 45 - 46 - static inline bool mlx5_accel_is_ktls_tx(struct mlx5_core_dev *mdev) 47 - { 48 - return MLX5_CAP_GEN(mdev, tls_tx); 49 - } 50 - 51 - static inline bool mlx5_accel_is_ktls_rx(struct mlx5_core_dev *mdev) 52 - { 53 - return MLX5_CAP_GEN(mdev, tls_rx); 54 - } 55 - 56 - static inline bool mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev) 57 - { 58 - if (!mlx5_accel_is_ktls_tx(mdev) && 59 - !mlx5_accel_is_ktls_rx(mdev)) 60 - return false; 61 - 62 - if (!MLX5_CAP_GEN(mdev, log_max_dek)) 63 - return false; 64 - 65 - return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128); 66 - } 67 - 68 - static inline bool mlx5e_ktls_type_check(struct mlx5_core_dev *mdev, 69 - struct tls_crypto_info *crypto_info) 70 - { 71 - switch (crypto_info->cipher_type) { 72 - case TLS_CIPHER_AES_GCM_128: 73 - if (crypto_info->version == TLS_1_2_VERSION) 74 - return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128); 75 - break; 76 - } 77 - 78 - return false; 79 - } 80 - #else 81 - static inline bool mlx5_accel_is_ktls_tx(struct mlx5_core_dev *mdev) 82 - { return false; } 83 - 84 - static inline bool mlx5_accel_is_ktls_rx(struct mlx5_core_dev *mdev) 85 - { return false; } 86 - 87 - static inline int 88 - mlx5_ktls_create_key(struct mlx5_core_dev *mdev, 89 - struct tls_crypto_info *crypto_info, 90 - u32 *p_key_id) { return -ENOTSUPP; } 91 - static inline void 92 - mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id) {} 93 - 94 - static inline bool 95 - mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev) { return false; } 96 - static inline bool 97 - mlx5e_ktls_type_check(struct mlx5_core_dev *mdev, 98 - struct tls_crypto_info *crypto_info) { return false; } 99 - #endif 100 - 101 - enum { 102 - MLX5_ACCEL_TLS_TX = BIT(0), 103 - MLX5_ACCEL_TLS_RX = BIT(1), 104 - MLX5_ACCEL_TLS_V12 = BIT(2), 105 - MLX5_ACCEL_TLS_V13 = BIT(3), 106 - MLX5_ACCEL_TLS_LRO = BIT(4), 107 - MLX5_ACCEL_TLS_IPV6 = BIT(5), 108 - MLX5_ACCEL_TLS_AES_GCM128 = BIT(30), 109 - MLX5_ACCEL_TLS_AES_GCM256 = BIT(31), 110 - }; 111 - 112 - struct mlx5_ifc_tls_flow_bits { 113 - u8 src_port[0x10]; 114 - u8 dst_port[0x10]; 115 - union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6; 116 - union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6; 117 - u8 ipv6[0x1]; 118 - u8 direction_sx[0x1]; 119 - u8 reserved_at_2[0x1e]; 120 - }; 121 - 122 - #ifdef CONFIG_MLX5_FPGA_TLS 123 - int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, 124 - struct tls_crypto_info *crypto_info, 125 - u32 start_offload_tcp_sn, u32 *p_swid, 126 - bool direction_sx); 127 - void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, 128 - bool direction_sx); 129 - int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle, 130 - u32 seq, __be64 rcd_sn); 131 - bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev); 132 - u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev); 133 - int mlx5_accel_tls_init(struct mlx5_core_dev *mdev); 134 - void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev); 135 - 136 - #else 137 - 138 - static inline int 139 - mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, 140 - struct tls_crypto_info *crypto_info, 141 - u32 start_offload_tcp_sn, u32 *p_swid, 142 - bool direction_sx) { return -ENOTSUPP; } 143 - static inline void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, 144 - bool direction_sx) { } 145 - static inline int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle, 146 - u32 seq, __be64 rcd_sn) { return 0; } 147 - static inline bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev) 148 - { 149 - return mlx5_accel_is_ktls_device(mdev); 150 - } 151 - static inline u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev) { return 0; } 152 - static inline int mlx5_accel_tls_init(struct mlx5_core_dev *mdev) { return 0; } 153 - static inline void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev) { } 154 - #endif 155 - 156 - #endif /* __MLX5_ACCEL_TLS_H__ */
-1
drivers/net/ethernet/mellanox/mlx5/core/en.h
··· 354 354 MLX5E_RQ_STATE_AM, 355 355 MLX5E_RQ_STATE_NO_CSUM_COMPLETE, 356 356 MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */ 357 - MLX5E_RQ_STATE_FPGA_TLS, /* FPGA TLS enabled */ 358 357 MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, /* set when mini_cqe_resp_stride_index cap is used */ 359 358 MLX5E_RQ_STATE_SHAMPO, /* set when SHAMPO cap is used */ 360 359 };
+6 -13
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
··· 5 5 #include "en/txrx.h" 6 6 #include "en/port.h" 7 7 #include "en_accel/en_accel.h" 8 - #include "accel/ipsec.h" 9 - #include "fpga/ipsec.h" 8 + #include "en_accel/ipsec_offload.h" 10 9 11 10 static bool mlx5e_rx_is_xdp(struct mlx5e_params *params, 12 11 struct mlx5e_xsk_param *xsk) ··· 206 207 bool is_mpwqe = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE); 207 208 u16 stop_room; 208 209 209 - stop_room = mlx5e_tls_get_stop_room(mdev, params); 210 + stop_room = mlx5e_ktls_get_stop_room(mdev, params); 210 211 stop_room += mlx5e_stop_room_for_max_wqe(mdev); 211 212 if (is_mpwqe) 212 213 /* A MPWQE can take up to the maximum-sized WQE + all the normal ··· 326 327 if (!mlx5e_check_fragmented_striding_rq_cap(mdev)) 327 328 return false; 328 329 329 - if (mlx5_fpga_is_ipsec_device(mdev)) 330 - return false; 331 - 332 330 if (params->xdp_prog) { 333 331 /* XSK params are not considered here. If striding RQ is in use, 334 332 * and an XSK is being opened, mlx5e_rx_mpwqe_is_linear_skb will ··· 418 422 u16 headroom; 419 423 int max_mtu; 420 424 int i; 421 - 422 - if (mlx5_fpga_is_ipsec_device(mdev)) 423 - byte_count += MLX5E_METADATA_ETHER_LEN; 424 425 425 426 if (mlx5e_rx_is_linear_skb(params, xsk)) { 426 427 int frag_stride; ··· 689 696 void *wq = MLX5_ADDR_OF(sqc, sqc, wq); 690 697 bool allow_swp; 691 698 692 - allow_swp = mlx5_geneve_tx_allowed(mdev) || 693 - !!MLX5_IPSEC_DEV(mdev); 699 + allow_swp = 700 + mlx5_geneve_tx_allowed(mdev) || !!mlx5_ipsec_device_caps(mdev); 694 701 mlx5e_build_sq_param_common(mdev, param); 695 702 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size); 696 703 MLX5_SET(sqc, sqc, allow_swp, allow_swp); ··· 797 804 798 805 static u8 mlx5e_build_async_icosq_log_wq_sz(struct mlx5_core_dev *mdev) 799 806 { 800 - if (mlx5e_accel_is_ktls_rx(mdev)) 807 + if (mlx5e_is_ktls_rx(mdev)) 801 808 return MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; 802 809 803 810 return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; ··· 826 833 827 834 mlx5e_build_sq_param_common(mdev, param); 828 835 param->stop_room = mlx5e_stop_room_for_wqe(mdev, 1); /* for XSK NOP */ 829 - param->is_tls = mlx5e_accel_is_ktls_rx(mdev); 836 + param->is_tls = mlx5e_is_ktls_rx(mdev); 830 837 if (param->is_tls) 831 838 param->stop_room += mlx5e_stop_room_for_wqe(mdev, 1); /* for TLS RX resync NOP */ 832 839 MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq));
+6 -5
drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
··· 37 37 #include <linux/skbuff.h> 38 38 #include <linux/netdevice.h> 39 39 #include "en_accel/ipsec_rxtx.h" 40 - #include "en_accel/tls.h" 41 - #include "en_accel/tls_rxtx.h" 40 + #include "en_accel/ktls.h" 41 + #include "en_accel/ktls_txrx.h" 42 42 #include "en.h" 43 43 #include "en/txrx.h" 44 44 ··· 124 124 125 125 #ifdef CONFIG_MLX5_EN_TLS 126 126 /* May send SKBs and WQEs. */ 127 - if (mlx5e_tls_skb_offloaded(skb)) 128 - if (unlikely(!mlx5e_tls_handle_tx_skb(dev, sq, skb, &state->tls))) 127 + if (mlx5e_ktls_skb_offloaded(skb)) 128 + if (unlikely(!mlx5e_ktls_handle_tx_skb(dev, sq, skb, 129 + &state->tls))) 129 130 return false; 130 131 #endif 131 132 ··· 175 174 struct mlx5_wqe_inline_seg *inlseg) 176 175 { 177 176 #ifdef CONFIG_MLX5_EN_TLS 178 - mlx5e_tls_handle_tx_wqe(&wqe->ctrl, &state->tls); 177 + mlx5e_ktls_handle_tx_wqe(&wqe->ctrl, &state->tls); 179 178 #endif 180 179 181 180 #ifdef CONFIG_MLX5_EN_IPSEC
+7 -23
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
··· 226 226 return -EINVAL; 227 227 } 228 228 if (x->props.flags & XFRM_STATE_ESN && 229 - !(mlx5_accel_ipsec_device_caps(priv->mdev) & 230 - MLX5_ACCEL_IPSEC_CAP_ESN)) { 229 + !(mlx5_ipsec_device_caps(priv->mdev) & MLX5_ACCEL_IPSEC_CAP_ESN)) { 231 230 netdev_info(netdev, "Cannot offload ESN xfrm states\n"); 232 231 return -EINVAL; 233 232 } ··· 274 275 return -EINVAL; 275 276 } 276 277 if (x->props.family == AF_INET6 && 277 - !(mlx5_accel_ipsec_device_caps(priv->mdev) & 278 - MLX5_ACCEL_IPSEC_CAP_IPV6)) { 278 + !(mlx5_ipsec_device_caps(priv->mdev) & MLX5_ACCEL_IPSEC_CAP_IPV6)) { 279 279 netdev_info(netdev, "IPv6 xfrm state offload is not supported by this device\n"); 280 280 return -EINVAL; 281 281 } ··· 284 286 static int mlx5e_xfrm_fs_add_rule(struct mlx5e_priv *priv, 285 287 struct mlx5e_ipsec_sa_entry *sa_entry) 286 288 { 287 - if (!mlx5_is_ipsec_device(priv->mdev)) 288 - return 0; 289 - 290 289 return mlx5e_accel_ipsec_fs_add_rule(priv, &sa_entry->xfrm->attrs, 291 290 sa_entry->ipsec_obj_id, 292 291 &sa_entry->ipsec_rule); ··· 292 297 static void mlx5e_xfrm_fs_del_rule(struct mlx5e_priv *priv, 293 298 struct mlx5e_ipsec_sa_entry *sa_entry) 294 299 { 295 - if (!mlx5_is_ipsec_device(priv->mdev)) 296 - return; 297 - 298 300 mlx5e_accel_ipsec_fs_del_rule(priv, &sa_entry->xfrm->attrs, 299 301 &sa_entry->ipsec_rule); 300 302 } ··· 325 333 326 334 /* create xfrm */ 327 335 mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs); 328 - sa_entry->xfrm = 329 - mlx5_accel_esp_create_xfrm(priv->mdev, &attrs, 330 - MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA); 336 + sa_entry->xfrm = mlx5_accel_esp_create_xfrm(priv->mdev, &attrs); 331 337 if (IS_ERR(sa_entry->xfrm)) { 332 338 err = PTR_ERR(sa_entry->xfrm); 333 339 goto err_sa_entry; ··· 404 414 { 405 415 struct mlx5e_ipsec *ipsec = NULL; 406 416 407 - if (!MLX5_IPSEC_DEV(priv->mdev)) { 417 + if (!mlx5_ipsec_device_caps(priv->mdev)) { 408 418 netdev_dbg(priv->netdev, "Not an IPSec offload device\n"); 409 419 return 0; 410 420 } ··· 415 425 416 426 hash_init(ipsec->sadb_rx); 417 427 spin_lock_init(&ipsec->sadb_rx_lock); 418 - ida_init(&ipsec->halloc); 419 428 ipsec->en_priv = priv; 420 - ipsec->no_trailer = !!(mlx5_accel_ipsec_device_caps(priv->mdev) & 421 - MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER); 422 429 ipsec->wq = alloc_ordered_workqueue("mlx5e_ipsec: %s", 0, 423 430 priv->netdev->name); 424 431 if (!ipsec->wq) { ··· 439 452 mlx5e_accel_ipsec_fs_cleanup(priv); 440 453 destroy_workqueue(ipsec->wq); 441 454 442 - ida_destroy(&ipsec->halloc); 443 455 kfree(ipsec); 444 456 priv->ipsec = NULL; 445 457 } ··· 517 531 struct mlx5_core_dev *mdev = priv->mdev; 518 532 struct net_device *netdev = priv->netdev; 519 533 520 - if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_ESP) || 534 + if (!(mlx5_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_ESP) || 521 535 !MLX5_CAP_ETH(mdev, swp)) { 522 536 mlx5_core_dbg(mdev, "mlx5e: ESP and SWP offload not supported\n"); 523 537 return; ··· 536 550 netdev->features |= NETIF_F_HW_ESP_TX_CSUM; 537 551 netdev->hw_enc_features |= NETIF_F_HW_ESP_TX_CSUM; 538 552 539 - if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_LSO) || 553 + if (!(mlx5_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_LSO) || 540 554 !MLX5_CAP_ETH(mdev, swp_lso)) { 541 555 mlx5_core_dbg(mdev, "mlx5e: ESP LSO not supported\n"); 542 556 return; 543 557 } 544 558 545 - if (mlx5_is_ipsec_device(mdev)) 546 - netdev->gso_partial_features |= NETIF_F_GSO_ESP; 547 - 559 + netdev->gso_partial_features |= NETIF_F_GSO_ESP; 548 560 mlx5_core_dbg(mdev, "mlx5e: ESP GSO capability turned on\n"); 549 561 netdev->features |= NETIF_F_GSO_ESP; 550 562 netdev->hw_features |= NETIF_F_GSO_ESP;
+2 -29
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
··· 40 40 #include <net/xfrm.h> 41 41 #include <linux/idr.h> 42 42 43 - #include "accel/ipsec.h" 43 + #include "ipsec_offload.h" 44 44 45 45 #define MLX5E_IPSEC_SADB_RX_BITS 10 46 46 #define MLX5E_IPSEC_ESN_SCOPE_MID 0x80000000L ··· 55 55 atomic64_t ipsec_tx_drop_no_state; 56 56 atomic64_t ipsec_tx_drop_not_ip; 57 57 atomic64_t ipsec_tx_drop_trailer; 58 - atomic64_t ipsec_tx_drop_metadata; 59 - }; 60 - 61 - struct mlx5e_ipsec_stats { 62 - u64 ipsec_dec_in_packets; 63 - u64 ipsec_dec_out_packets; 64 - u64 ipsec_dec_bypass_packets; 65 - u64 ipsec_enc_in_packets; 66 - u64 ipsec_enc_out_packets; 67 - u64 ipsec_enc_bypass_packets; 68 - u64 ipsec_dec_drop_packets; 69 - u64 ipsec_dec_auth_fail_packets; 70 - u64 ipsec_enc_drop_packets; 71 - u64 ipsec_add_sa_success; 72 - u64 ipsec_add_sa_fail; 73 - u64 ipsec_del_sa_success; 74 - u64 ipsec_del_sa_fail; 75 - u64 ipsec_cmd_drop; 76 58 }; 77 59 78 60 struct mlx5e_accel_fs_esp; ··· 63 81 struct mlx5e_ipsec { 64 82 struct mlx5e_priv *en_priv; 65 83 DECLARE_HASHTABLE(sadb_rx, MLX5E_IPSEC_SADB_RX_BITS); 66 - bool no_trailer; 67 - spinlock_t sadb_rx_lock; /* Protects sadb_rx and halloc */ 68 - struct ida halloc; 84 + spinlock_t sadb_rx_lock; /* Protects sadb_rx */ 69 85 struct mlx5e_ipsec_sw_stats sw_stats; 70 - struct mlx5e_ipsec_stats stats; 71 86 struct workqueue_struct *wq; 72 87 struct mlx5e_accel_fs_esp *rx_fs; 73 88 struct mlx5e_ipsec_tx *tx_fs; ··· 95 116 struct mlx5e_ipsec_rule ipsec_rule; 96 117 }; 97 118 98 - void mlx5e_ipsec_build_inverse_table(void); 99 119 int mlx5e_ipsec_init(struct mlx5e_priv *priv); 100 120 void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv); 101 121 void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv); ··· 103 125 unsigned int handle); 104 126 105 127 #else 106 - 107 - static inline void mlx5e_ipsec_build_inverse_table(void) 108 - { 109 - } 110 - 111 128 static inline int mlx5e_ipsec_init(struct mlx5e_priv *priv) 112 129 { 113 130 return 0;
+1 -4
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
··· 2 2 /* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */ 3 3 4 4 #include <linux/netdevice.h> 5 - #include "accel/ipsec_offload.h" 5 + #include "ipsec_offload.h" 6 6 #include "ipsec_fs.h" 7 7 #include "fs_core.h" 8 8 ··· 699 699 int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv) 700 700 { 701 701 int err; 702 - 703 - if (!mlx5_is_ipsec_device(priv->mdev) || !priv->ipsec) 704 - return -EOPNOTSUPP; 705 702 706 703 err = fs_init_tx(priv); 707 704 if (err)
+1 -6
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h
··· 6 6 7 7 #include "en.h" 8 8 #include "ipsec.h" 9 - #include "accel/ipsec_offload.h" 9 + #include "ipsec_offload.h" 10 10 #include "en/fs.h" 11 11 12 - #ifdef CONFIG_MLX5_EN_IPSEC 13 12 void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_priv *priv); 14 13 int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv); 15 14 int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv, ··· 18 19 void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv, 19 20 struct mlx5_accel_esp_xfrm_attrs *attrs, 20 21 struct mlx5e_ipsec_rule *ipsec_rule); 21 - #else 22 - static inline void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_priv *priv) {} 23 - static inline int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv) { return 0; } 24 - #endif 25 22 #endif /* __MLX5_IPSEC_STEERING_H__ */
+14
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ 2 + /* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */ 3 + 4 + #ifndef __MLX5_IPSEC_OFFLOAD_H__ 5 + #define __MLX5_IPSEC_OFFLOAD_H__ 6 + 7 + #include <linux/mlx5/driver.h> 8 + #include <linux/mlx5/accel.h> 9 + 10 + void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, 11 + struct mlx5_accel_esp_xfrm *xfrm, 12 + u32 *sa_handle); 13 + void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context); 14 + #endif /* __MLX5_IPSEC_OFFLOAD_H__ */
+20 -225
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
··· 34 34 #include <crypto/aead.h> 35 35 #include <net/xfrm.h> 36 36 #include <net/esp.h> 37 - #include "accel/ipsec_offload.h" 37 + #include "ipsec_offload.h" 38 38 #include "en_accel/ipsec_rxtx.h" 39 39 #include "en_accel/ipsec.h" 40 - #include "accel/accel.h" 41 40 #include "en.h" 42 - 43 - enum { 44 - MLX5E_IPSEC_RX_SYNDROME_DECRYPTED = 0x11, 45 - MLX5E_IPSEC_RX_SYNDROME_AUTH_FAILED = 0x12, 46 - MLX5E_IPSEC_RX_SYNDROME_BAD_PROTO = 0x17, 47 - }; 48 - 49 - struct mlx5e_ipsec_rx_metadata { 50 - unsigned char nexthdr; 51 - __be32 sa_handle; 52 - } __packed; 53 41 54 42 enum { 55 43 MLX5E_IPSEC_TX_SYNDROME_OFFLOAD = 0x8, 56 44 MLX5E_IPSEC_TX_SYNDROME_OFFLOAD_WITH_LSO_TCP = 0x9, 57 45 }; 58 - 59 - struct mlx5e_ipsec_tx_metadata { 60 - __be16 mss_inv; /* 1/MSS in 16bit fixed point, only for LSO */ 61 - __be16 seq; /* LSBs of the first TCP seq, only for LSO */ 62 - u8 esp_next_proto; /* Next protocol of ESP */ 63 - } __packed; 64 - 65 - struct mlx5e_ipsec_metadata { 66 - unsigned char syndrome; 67 - union { 68 - unsigned char raw[5]; 69 - /* from FPGA to host, on successful decrypt */ 70 - struct mlx5e_ipsec_rx_metadata rx; 71 - /* from host to FPGA */ 72 - struct mlx5e_ipsec_tx_metadata tx; 73 - } __packed content; 74 - /* packet type ID field */ 75 - __be16 ethertype; 76 - } __packed; 77 - 78 - #define MAX_LSO_MSS 2048 79 - 80 - /* Pre-calculated (Q0.16) fixed-point inverse 1/x function */ 81 - static __be16 mlx5e_ipsec_inverse_table[MAX_LSO_MSS]; 82 - 83 - static inline __be16 mlx5e_ipsec_mss_inv(struct sk_buff *skb) 84 - { 85 - return mlx5e_ipsec_inverse_table[skb_shinfo(skb)->gso_size]; 86 - } 87 - 88 - static struct mlx5e_ipsec_metadata *mlx5e_ipsec_add_metadata(struct sk_buff *skb) 89 - { 90 - struct mlx5e_ipsec_metadata *mdata; 91 - struct ethhdr *eth; 92 - 93 - if (unlikely(skb_cow_head(skb, sizeof(*mdata)))) 94 - return ERR_PTR(-ENOMEM); 95 - 96 - eth = (struct ethhdr *)skb_push(skb, sizeof(*mdata)); 97 - skb->mac_header -= sizeof(*mdata); 98 - mdata = (struct mlx5e_ipsec_metadata *)(eth + 1); 99 - 100 - memmove(skb->data, skb->data + sizeof(*mdata), 101 - 2 * ETH_ALEN); 102 - 103 - eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE); 104 - 105 - memset(mdata->content.raw, 0, sizeof(mdata->content.raw)); 106 - return mdata; 107 - } 108 46 109 47 static int mlx5e_ipsec_remove_trailer(struct sk_buff *skb, struct xfrm_state *x) 110 48 { ··· 182 244 skb_store_bits(skb, iv_offset, &seqno, 8); 183 245 } 184 246 185 - static void mlx5e_ipsec_set_metadata(struct sk_buff *skb, 186 - struct mlx5e_ipsec_metadata *mdata, 187 - struct xfrm_offload *xo) 188 - { 189 - struct ip_esp_hdr *esph; 190 - struct tcphdr *tcph; 191 - 192 - if (skb_is_gso(skb)) { 193 - /* Add LSO metadata indication */ 194 - esph = ip_esp_hdr(skb); 195 - tcph = inner_tcp_hdr(skb); 196 - netdev_dbg(skb->dev, " Offloading GSO packet outer L3 %u; L4 %u; Inner L3 %u; L4 %u\n", 197 - skb->network_header, 198 - skb->transport_header, 199 - skb->inner_network_header, 200 - skb->inner_transport_header); 201 - netdev_dbg(skb->dev, " Offloading GSO packet of len %u; mss %u; TCP sp %u dp %u seq 0x%x ESP seq 0x%x\n", 202 - skb->len, skb_shinfo(skb)->gso_size, 203 - ntohs(tcph->source), ntohs(tcph->dest), 204 - ntohl(tcph->seq), ntohl(esph->seq_no)); 205 - mdata->syndrome = MLX5E_IPSEC_TX_SYNDROME_OFFLOAD_WITH_LSO_TCP; 206 - mdata->content.tx.mss_inv = mlx5e_ipsec_mss_inv(skb); 207 - mdata->content.tx.seq = htons(ntohl(tcph->seq) & 0xFFFF); 208 - } else { 209 - mdata->syndrome = MLX5E_IPSEC_TX_SYNDROME_OFFLOAD; 210 - } 211 - mdata->content.tx.esp_next_proto = xo->proto; 212 - 213 - netdev_dbg(skb->dev, " TX metadata syndrome %u proto %u mss_inv %04x seq %04x\n", 214 - mdata->syndrome, mdata->content.tx.esp_next_proto, 215 - ntohs(mdata->content.tx.mss_inv), 216 - ntohs(mdata->content.tx.seq)); 217 - } 218 - 219 247 void mlx5e_ipsec_handle_tx_wqe(struct mlx5e_tx_wqe *wqe, 220 248 struct mlx5e_accel_tx_ipsec_state *ipsec_st, 221 249 struct mlx5_wqe_inline_seg *inlseg) ··· 202 298 203 299 ipsec_st->x = x; 204 300 ipsec_st->xo = xo; 205 - if (mlx5_is_ipsec_device(priv->mdev)) { 206 - aead = x->data; 207 - alen = crypto_aead_authsize(aead); 208 - blksize = ALIGN(crypto_aead_blocksize(aead), 4); 209 - clen = ALIGN(skb->len + 2, blksize); 210 - plen = max_t(u32, clen - skb->len, 4); 211 - tailen = plen + alen; 212 - ipsec_st->plen = plen; 213 - ipsec_st->tailen = tailen; 214 - } 301 + aead = x->data; 302 + alen = crypto_aead_authsize(aead); 303 + blksize = ALIGN(crypto_aead_blocksize(aead), 4); 304 + clen = ALIGN(skb->len + 2, blksize); 305 + plen = max_t(u32, clen - skb->len, 4); 306 + tailen = plen + alen; 307 + ipsec_st->plen = plen; 308 + ipsec_st->tailen = tailen; 215 309 216 310 return 0; 217 311 } ··· 242 340 ((struct iphdr *)skb_network_header(skb))->protocol : 243 341 ((struct ipv6hdr *)skb_network_header(skb))->nexthdr; 244 342 245 - if (mlx5_is_ipsec_device(priv->mdev)) { 246 - eseg->flow_table_metadata |= cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC); 247 - eseg->trailer |= cpu_to_be32(MLX5_ETH_WQE_INSERT_TRAILER); 248 - encap = x->encap; 249 - if (!encap) { 250 - eseg->trailer |= (l3_proto == IPPROTO_ESP) ? 251 - cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_IP_ASSOC) : 252 - cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_L4_ASSOC); 253 - } else if (encap->encap_type == UDP_ENCAP_ESPINUDP) { 254 - eseg->trailer |= (l3_proto == IPPROTO_ESP) ? 255 - cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_IP_ASSOC) : 256 - cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_L4_ASSOC); 257 - } 343 + eseg->flow_table_metadata |= cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC); 344 + eseg->trailer |= cpu_to_be32(MLX5_ETH_WQE_INSERT_TRAILER); 345 + encap = x->encap; 346 + if (!encap) { 347 + eseg->trailer |= (l3_proto == IPPROTO_ESP) ? 348 + cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_IP_ASSOC) : 349 + cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_L4_ASSOC); 350 + } else if (encap->encap_type == UDP_ENCAP_ESPINUDP) { 351 + eseg->trailer |= (l3_proto == IPPROTO_ESP) ? 352 + cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_IP_ASSOC) : 353 + cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_L4_ASSOC); 258 354 } 259 355 } 260 356 ··· 263 363 struct mlx5e_priv *priv = netdev_priv(netdev); 264 364 struct xfrm_offload *xo = xfrm_offload(skb); 265 365 struct mlx5e_ipsec_sa_entry *sa_entry; 266 - struct mlx5e_ipsec_metadata *mdata; 267 366 struct xfrm_state *x; 268 367 struct sec_path *sp; 269 368 ··· 291 392 goto drop; 292 393 } 293 394 294 - if (MLX5_CAP_GEN(priv->mdev, fpga)) { 295 - mdata = mlx5e_ipsec_add_metadata(skb); 296 - if (IS_ERR(mdata)) { 297 - atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_metadata); 298 - goto drop; 299 - } 300 - } 301 - 302 395 sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle; 303 396 sa_entry->set_iv_op(skb, x, xo); 304 - if (MLX5_CAP_GEN(priv->mdev, fpga)) 305 - mlx5e_ipsec_set_metadata(skb, mdata, xo); 306 - 307 397 mlx5e_ipsec_set_state(priv, skb, x, xo, ipsec_st); 308 398 309 399 return true; ··· 300 412 drop: 301 413 kfree_skb(skb); 302 414 return false; 303 - } 304 - 305 - static inline struct xfrm_state * 306 - mlx5e_ipsec_build_sp(struct net_device *netdev, struct sk_buff *skb, 307 - struct mlx5e_ipsec_metadata *mdata) 308 - { 309 - struct mlx5e_priv *priv = netdev_priv(netdev); 310 - struct xfrm_offload *xo; 311 - struct xfrm_state *xs; 312 - struct sec_path *sp; 313 - u32 sa_handle; 314 - 315 - sp = secpath_set(skb); 316 - if (unlikely(!sp)) { 317 - atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sp_alloc); 318 - return NULL; 319 - } 320 - 321 - sa_handle = be32_to_cpu(mdata->content.rx.sa_handle); 322 - xs = mlx5e_ipsec_sadb_rx_lookup(priv->ipsec, sa_handle); 323 - if (unlikely(!xs)) { 324 - atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sadb_miss); 325 - return NULL; 326 - } 327 - 328 - sp = skb_sec_path(skb); 329 - sp->xvec[sp->len++] = xs; 330 - sp->olen++; 331 - 332 - xo = xfrm_offload(skb); 333 - xo->flags = CRYPTO_DONE; 334 - switch (mdata->syndrome) { 335 - case MLX5E_IPSEC_RX_SYNDROME_DECRYPTED: 336 - xo->status = CRYPTO_SUCCESS; 337 - if (likely(priv->ipsec->no_trailer)) { 338 - xo->flags |= XFRM_ESP_NO_TRAILER; 339 - xo->proto = mdata->content.rx.nexthdr; 340 - } 341 - break; 342 - case MLX5E_IPSEC_RX_SYNDROME_AUTH_FAILED: 343 - xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED; 344 - break; 345 - case MLX5E_IPSEC_RX_SYNDROME_BAD_PROTO: 346 - xo->status = CRYPTO_INVALID_PROTOCOL; 347 - break; 348 - default: 349 - atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome); 350 - return NULL; 351 - } 352 - return xs; 353 - } 354 - 355 - struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev, 356 - struct sk_buff *skb, u32 *cqe_bcnt) 357 - { 358 - struct mlx5e_ipsec_metadata *mdata; 359 - struct xfrm_state *xs; 360 - 361 - if (!is_metadata_hdr_valid(skb)) 362 - return skb; 363 - 364 - /* Use the metadata */ 365 - mdata = (struct mlx5e_ipsec_metadata *)(skb->data + ETH_HLEN); 366 - xs = mlx5e_ipsec_build_sp(netdev, skb, mdata); 367 - if (unlikely(!xs)) { 368 - kfree_skb(skb); 369 - return NULL; 370 - } 371 - 372 - remove_metadata_hdr(skb); 373 - *cqe_bcnt -= MLX5E_METADATA_ETHER_LEN; 374 - 375 - return skb; 376 415 } 377 416 378 417 enum { ··· 343 528 switch (MLX5_IPSEC_METADATA_SYNDROM(ipsec_meta_data)) { 344 529 case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED: 345 530 xo->status = CRYPTO_SUCCESS; 346 - if (WARN_ON_ONCE(priv->ipsec->no_trailer)) 347 - xo->flags |= XFRM_ESP_NO_TRAILER; 348 531 break; 349 532 case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED: 350 533 xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED; ··· 352 539 break; 353 540 default: 354 541 atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome); 355 - } 356 - } 357 - 358 - void mlx5e_ipsec_build_inverse_table(void) 359 - { 360 - u16 mss_inv; 361 - u32 mss; 362 - 363 - /* Calculate 1/x inverse table for use in GSO data path. 364 - * Using this table, we provide the IPSec accelerator with the value of 365 - * 1/gso_size so that it can infer the position of each segment inside 366 - * the GSO, and increment the ESP sequence number, and generate the IV. 367 - * The HW needs this value in Q0.16 fixed-point number format 368 - */ 369 - mlx5e_ipsec_inverse_table[1] = htons(0xFFFF); 370 - for (mss = 2; mss < MAX_LSO_MSS; mss++) { 371 - mss_inv = div_u64(1ULL << 32, mss) >> 16; 372 - mlx5e_ipsec_inverse_table[mss] = htons(mss_inv); 373 542 } 374 543 }
-3
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
··· 53 53 54 54 #ifdef CONFIG_MLX5_EN_IPSEC 55 55 56 - struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev, 57 - struct sk_buff *skb, u32 *cqe_bcnt); 58 - 59 56 void mlx5e_ipsec_inverse_table_init(void); 60 57 void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x, 61 58 struct xfrm_offload *xo);
+1 -62
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
··· 35 35 #include <net/sock.h> 36 36 37 37 #include "en.h" 38 - #include "accel/ipsec.h" 38 + #include "ipsec_offload.h" 39 39 #include "fpga/sdk.h" 40 40 #include "en_accel/ipsec.h" 41 - #include "fpga/ipsec.h" 42 - 43 - static const struct counter_desc mlx5e_ipsec_hw_stats_desc[] = { 44 - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_in_packets) }, 45 - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_out_packets) }, 46 - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_bypass_packets) }, 47 - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_enc_in_packets) }, 48 - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_enc_out_packets) }, 49 - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_enc_bypass_packets) }, 50 - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_drop_packets) }, 51 - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_auth_fail_packets) }, 52 - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_enc_drop_packets) }, 53 - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_add_sa_success) }, 54 - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_add_sa_fail) }, 55 - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_del_sa_success) }, 56 - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_del_sa_fail) }, 57 - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_cmd_drop) }, 58 - }; 59 41 60 42 static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = { 61 43 { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sp_alloc) }, ··· 47 65 { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_no_state) }, 48 66 { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_not_ip) }, 49 67 { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_trailer) }, 50 - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_metadata) }, 51 68 }; 52 69 53 70 #define MLX5E_READ_CTR_ATOMIC64(ptr, dsc, i) \ 54 71 atomic64_read((atomic64_t *)((char *)(ptr) + (dsc)[i].offset)) 55 72 56 - #define NUM_IPSEC_HW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_hw_stats_desc) 57 73 #define NUM_IPSEC_SW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_sw_stats_desc) 58 74 59 75 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_sw) ··· 83 103 return idx; 84 104 } 85 105 86 - static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_hw) 87 - { 88 - return (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev)) ? NUM_IPSEC_HW_COUNTERS : 0; 89 - } 90 - 91 - static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_hw) 92 - { 93 - int ret = 0; 94 - 95 - if (priv->ipsec) 96 - ret = mlx5_accel_ipsec_counters_read(priv->mdev, (u64 *)&priv->ipsec->stats, 97 - NUM_IPSEC_HW_COUNTERS); 98 - if (ret) 99 - memset(&priv->ipsec->stats, 0, sizeof(priv->ipsec->stats)); 100 - } 101 - 102 - static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec_hw) 103 - { 104 - unsigned int i; 105 - 106 - if (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev)) 107 - for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++) 108 - strcpy(data + (idx++) * ETH_GSTRING_LEN, 109 - mlx5e_ipsec_hw_stats_desc[i].format); 110 - 111 - return idx; 112 - } 113 - 114 - static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_hw) 115 - { 116 - int i; 117 - 118 - if (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev)) 119 - for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++) 120 - data[idx++] = MLX5E_READ_CTR64_CPU(&priv->ipsec->stats, 121 - mlx5e_ipsec_hw_stats_desc, 122 - i); 123 - return idx; 124 - } 125 - 126 106 MLX5E_DEFINE_STATS_GRP(ipsec_sw, 0); 127 - MLX5E_DEFINE_STATS_GRP(ipsec_hw, 0);
+65 -6
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
··· 2 2 // Copyright (c) 2019 Mellanox Technologies. 3 3 4 4 #include "en.h" 5 - #include "en_accel/tls.h" 5 + #include "lib/mlx5.h" 6 6 #include "en_accel/ktls.h" 7 7 #include "en_accel/ktls_utils.h" 8 8 #include "en_accel/fs_tcp.h" 9 + 10 + int mlx5_ktls_create_key(struct mlx5_core_dev *mdev, 11 + struct tls_crypto_info *crypto_info, 12 + u32 *p_key_id) 13 + { 14 + u32 sz_bytes; 15 + void *key; 16 + 17 + switch (crypto_info->cipher_type) { 18 + case TLS_CIPHER_AES_GCM_128: { 19 + struct tls12_crypto_info_aes_gcm_128 *info = 20 + (struct tls12_crypto_info_aes_gcm_128 *)crypto_info; 21 + 22 + key = info->key; 23 + sz_bytes = sizeof(info->key); 24 + break; 25 + } 26 + case TLS_CIPHER_AES_GCM_256: { 27 + struct tls12_crypto_info_aes_gcm_256 *info = 28 + (struct tls12_crypto_info_aes_gcm_256 *)crypto_info; 29 + 30 + key = info->key; 31 + sz_bytes = sizeof(info->key); 32 + break; 33 + } 34 + default: 35 + return -EINVAL; 36 + } 37 + 38 + return mlx5_create_encryption_key(mdev, key, sz_bytes, 39 + MLX5_ACCEL_OBJ_TLS_KEY, 40 + p_key_id); 41 + } 42 + 43 + void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id) 44 + { 45 + mlx5_destroy_encryption_key(mdev, key_id); 46 + } 9 47 10 48 static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk, 11 49 enum tls_offload_ctx_dir direction, ··· 97 59 struct net_device *netdev = priv->netdev; 98 60 struct mlx5_core_dev *mdev = priv->mdev; 99 61 100 - if (!mlx5e_accel_is_ktls_tx(mdev) && !mlx5e_accel_is_ktls_rx(mdev)) 62 + if (!mlx5e_is_ktls_tx(mdev) && !mlx5e_is_ktls_rx(mdev)) 101 63 return; 102 64 103 - if (mlx5e_accel_is_ktls_tx(mdev)) { 65 + if (mlx5e_is_ktls_tx(mdev)) { 104 66 netdev->hw_features |= NETIF_F_HW_TLS_TX; 105 67 netdev->features |= NETIF_F_HW_TLS_TX; 106 68 } 107 69 108 - if (mlx5e_accel_is_ktls_rx(mdev)) 70 + if (mlx5e_is_ktls_rx(mdev)) 109 71 netdev->hw_features |= NETIF_F_HW_TLS_RX; 110 72 111 73 netdev->tlsdev_ops = &mlx5e_ktls_ops; ··· 130 92 { 131 93 int err; 132 94 133 - if (!mlx5e_accel_is_ktls_rx(priv->mdev)) 95 + if (!mlx5e_is_ktls_rx(priv->mdev)) 134 96 return 0; 135 97 136 98 priv->tls->rx_wq = create_singlethread_workqueue("mlx5e_tls_rx"); ··· 150 112 151 113 void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv) 152 114 { 153 - if (!mlx5e_accel_is_ktls_rx(priv->mdev)) 115 + if (!mlx5e_is_ktls_rx(priv->mdev)) 154 116 return; 155 117 156 118 if (priv->netdev->features & NETIF_F_HW_TLS_RX) 157 119 mlx5e_accel_fs_tcp_destroy(priv); 158 120 159 121 destroy_workqueue(priv->tls->rx_wq); 122 + } 123 + 124 + int mlx5e_ktls_init(struct mlx5e_priv *priv) 125 + { 126 + struct mlx5e_tls *tls; 127 + 128 + if (!mlx5e_is_ktls_device(priv->mdev)) 129 + return 0; 130 + 131 + tls = kzalloc(sizeof(*tls), GFP_KERNEL); 132 + if (!tls) 133 + return -ENOMEM; 134 + 135 + priv->tls = tls; 136 + return 0; 137 + } 138 + 139 + void mlx5e_ktls_cleanup(struct mlx5e_priv *priv) 140 + { 141 + kfree(priv->tls); 142 + priv->tls = NULL; 160 143 }
+71 -15
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
··· 4 4 #ifndef __MLX5E_KTLS_H__ 5 5 #define __MLX5E_KTLS_H__ 6 6 7 + #include <linux/tls.h> 8 + #include <net/tls.h> 7 9 #include "en.h" 8 10 9 11 #ifdef CONFIG_MLX5_EN_TLS 12 + int mlx5_ktls_create_key(struct mlx5_core_dev *mdev, 13 + struct tls_crypto_info *crypto_info, 14 + u32 *p_key_id); 15 + void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id); 16 + 17 + static inline bool mlx5e_is_ktls_device(struct mlx5_core_dev *mdev) 18 + { 19 + if (is_kdump_kernel()) 20 + return false; 21 + 22 + if (!MLX5_CAP_GEN(mdev, tls_tx) && !MLX5_CAP_GEN(mdev, tls_rx)) 23 + return false; 24 + 25 + if (!MLX5_CAP_GEN(mdev, log_max_dek)) 26 + return false; 27 + 28 + return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128); 29 + } 30 + 31 + static inline bool mlx5e_ktls_type_check(struct mlx5_core_dev *mdev, 32 + struct tls_crypto_info *crypto_info) 33 + { 34 + switch (crypto_info->cipher_type) { 35 + case TLS_CIPHER_AES_GCM_128: 36 + if (crypto_info->version == TLS_1_2_VERSION) 37 + return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128); 38 + break; 39 + } 40 + 41 + return false; 42 + } 10 43 11 44 void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv); 12 45 int mlx5e_ktls_init_rx(struct mlx5e_priv *priv); ··· 49 16 mlx5e_ktls_rx_resync_create_resp_list(void); 50 17 void mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_list); 51 18 52 - static inline bool mlx5e_accel_is_ktls_tx(struct mlx5_core_dev *mdev) 19 + static inline bool mlx5e_is_ktls_tx(struct mlx5_core_dev *mdev) 53 20 { 54 - return !is_kdump_kernel() && 55 - mlx5_accel_is_ktls_tx(mdev); 21 + return !is_kdump_kernel() && MLX5_CAP_GEN(mdev, tls_tx); 56 22 } 57 23 58 - static inline bool mlx5e_accel_is_ktls_rx(struct mlx5_core_dev *mdev) 24 + static inline bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev) 59 25 { 60 - return !is_kdump_kernel() && 61 - mlx5_accel_is_ktls_rx(mdev); 26 + return !is_kdump_kernel() && MLX5_CAP_GEN(mdev, tls_rx); 62 27 } 63 28 64 - static inline bool mlx5e_accel_is_ktls_device(struct mlx5_core_dev *mdev) 65 - { 66 - return !is_kdump_kernel() && 67 - mlx5_accel_is_ktls_device(mdev); 68 - } 29 + struct mlx5e_tls_sw_stats { 30 + atomic64_t tx_tls_ctx; 31 + atomic64_t tx_tls_del; 32 + atomic64_t rx_tls_ctx; 33 + atomic64_t rx_tls_del; 34 + }; 35 + 36 + struct mlx5e_tls { 37 + struct mlx5e_tls_sw_stats sw_stats; 38 + struct workqueue_struct *rx_wq; 39 + }; 40 + 41 + int mlx5e_ktls_init(struct mlx5e_priv *priv); 42 + void mlx5e_ktls_cleanup(struct mlx5e_priv *priv); 43 + 44 + int mlx5e_ktls_get_count(struct mlx5e_priv *priv); 45 + int mlx5e_ktls_get_strings(struct mlx5e_priv *priv, uint8_t *data); 46 + int mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 *data); 69 47 70 48 #else 71 - 72 49 static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv) 73 50 { 74 51 } ··· 107 64 static inline void 108 65 mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_list) {} 109 66 110 - static inline bool mlx5e_accel_is_ktls_tx(struct mlx5_core_dev *mdev) { return false; } 111 - static inline bool mlx5e_accel_is_ktls_rx(struct mlx5_core_dev *mdev) { return false; } 112 - static inline bool mlx5e_accel_is_ktls_device(struct mlx5_core_dev *mdev) { return false; } 67 + static inline bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev) 68 + { 69 + return false; 70 + } 113 71 72 + static inline int mlx5e_ktls_init(struct mlx5e_priv *priv) { return 0; } 73 + static inline void mlx5e_ktls_cleanup(struct mlx5e_priv *priv) { } 74 + static inline int mlx5e_ktls_get_count(struct mlx5e_priv *priv) { return 0; } 75 + static inline int mlx5e_ktls_get_strings(struct mlx5e_priv *priv, uint8_t *data) 76 + { 77 + return 0; 78 + } 79 + 80 + static inline int mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 *data) 81 + { 82 + return 0; 83 + } 114 84 #endif 115 85 116 86 #endif /* __MLX5E_TLS_H__ */
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
··· 3 3 4 4 #include <net/inet6_hashtables.h> 5 5 #include "en_accel/en_accel.h" 6 - #include "en_accel/tls.h" 6 + #include "en_accel/ktls.h" 7 7 #include "en_accel/ktls_txrx.h" 8 8 #include "en_accel/ktls_utils.h" 9 9 #include "en_accel/fs_tcp.h"
+16 -4
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 2 // Copyright (c) 2019 Mellanox Technologies. 3 3 4 - #include "en_accel/tls.h" 4 + #include "en_accel/ktls.h" 5 5 #include "en_accel/ktls_txrx.h" 6 6 #include "en_accel/ktls_utils.h" 7 7 ··· 27 27 { 28 28 u16 num_dumps, stop_room = 0; 29 29 30 - if (!mlx5e_accel_is_ktls_tx(mdev)) 30 + if (!mlx5e_is_ktls_tx(mdev)) 31 31 return 0; 32 32 33 33 num_dumps = mlx5e_ktls_dumps_num_wqes(params, MAX_SKB_FRAGS, TLS_MAX_PAYLOAD_SIZE); ··· 448 448 return MLX5E_KTLS_SYNC_FAIL; 449 449 } 450 450 451 - bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq, 452 - struct sk_buff *skb, int datalen, 451 + bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq, 452 + struct sk_buff *skb, 453 453 struct mlx5e_accel_tx_tls_state *state) 454 454 { 455 455 struct mlx5e_ktls_offload_context_tx *priv_tx; 456 456 struct mlx5e_sq_stats *stats = sq->stats; 457 + struct tls_context *tls_ctx; 458 + int datalen; 457 459 u32 seq; 460 + 461 + datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb)); 462 + if (!datalen) 463 + return true; 464 + 465 + mlx5e_tx_mpwqe_ensure_complete(sq); 466 + 467 + tls_ctx = tls_get_ctx(skb->sk); 468 + if (WARN_ON_ONCE(tls_ctx->netdev != netdev)) 469 + goto err_out; 458 470 459 471 priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx); 460 472
+26 -2
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
··· 16 16 17 17 u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params); 18 18 19 - bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq, 20 - struct sk_buff *skb, int datalen, 19 + bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq, 20 + struct sk_buff *skb, 21 21 struct mlx5e_accel_tx_tls_state *state); 22 22 void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb, 23 23 struct mlx5_cqe64 *cqe, u32 *cqe_bcnt); ··· 48 48 { 49 49 return budget && test_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &c->async_icosq.state); 50 50 } 51 + 52 + static inline bool mlx5e_ktls_skb_offloaded(struct sk_buff *skb) 53 + { 54 + return skb->sk && tls_is_sk_tx_device_offloaded(skb->sk); 55 + } 56 + 57 + static inline void 58 + mlx5e_ktls_handle_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg, 59 + struct mlx5e_accel_tx_tls_state *state) 60 + { 61 + cseg->tis_tir_num = cpu_to_be32(state->tls_tisn << 8); 62 + } 51 63 #else 52 64 static inline bool 53 65 mlx5e_ktls_tx_try_handle_resync_dump_comp(struct mlx5e_txqsq *sq, ··· 81 69 return false; 82 70 } 83 71 72 + static inline u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev, 73 + struct mlx5e_params *params) 74 + { 75 + return 0; 76 + } 77 + 78 + static inline void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, 79 + struct sk_buff *skb, 80 + struct mlx5_cqe64 *cqe, 81 + u32 *cqe_bcnt) 82 + { 83 + } 84 84 #endif /* CONFIG_MLX5_EN_TLS */ 85 85 86 86 #endif /* __MLX5E_TLS_TXRX_H__ */
-1
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h
··· 6 6 7 7 #include <net/tls.h> 8 8 #include "en.h" 9 - #include "accel/tls.h" 10 9 11 10 enum { 12 11 MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD = 0,
-247
drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
··· 1 - /* 2 - * Copyright (c) 2018 Mellanox Technologies. All rights reserved. 3 - * 4 - * This software is available to you under a choice of one of two 5 - * licenses. You may choose to be licensed under the terms of the GNU 6 - * General Public License (GPL) Version 2, available from the file 7 - * COPYING in the main directory of this source tree, or the 8 - * OpenIB.org BSD license below: 9 - * 10 - * Redistribution and use in source and binary forms, with or 11 - * without modification, are permitted provided that the following 12 - * conditions are met: 13 - * 14 - * - Redistributions of source code must retain the above 15 - * copyright notice, this list of conditions and the following 16 - * disclaimer. 17 - * 18 - * - Redistributions in binary form must reproduce the above 19 - * copyright notice, this list of conditions and the following 20 - * disclaimer in the documentation and/or other materials 21 - * provided with the distribution. 22 - * 23 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 - * SOFTWARE. 31 - * 32 - */ 33 - 34 - #include <linux/netdevice.h> 35 - #include <net/ipv6.h> 36 - #include "en_accel/tls.h" 37 - #include "accel/tls.h" 38 - 39 - static void mlx5e_tls_set_ipv4_flow(void *flow, struct sock *sk) 40 - { 41 - struct inet_sock *inet = inet_sk(sk); 42 - 43 - MLX5_SET(tls_flow, flow, ipv6, 0); 44 - memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 45 - &inet->inet_daddr, MLX5_FLD_SZ_BYTES(ipv4_layout, ipv4)); 46 - memcpy(MLX5_ADDR_OF(tls_flow, flow, src_ipv4_src_ipv6.ipv4_layout.ipv4), 47 - &inet->inet_rcv_saddr, MLX5_FLD_SZ_BYTES(ipv4_layout, ipv4)); 48 - } 49 - 50 - #if IS_ENABLED(CONFIG_IPV6) 51 - static void mlx5e_tls_set_ipv6_flow(void *flow, struct sock *sk) 52 - { 53 - struct ipv6_pinfo *np = inet6_sk(sk); 54 - 55 - MLX5_SET(tls_flow, flow, ipv6, 1); 56 - memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 57 - &sk->sk_v6_daddr, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); 58 - memcpy(MLX5_ADDR_OF(tls_flow, flow, src_ipv4_src_ipv6.ipv6_layout.ipv6), 59 - &np->saddr, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); 60 - } 61 - #endif 62 - 63 - static void mlx5e_tls_set_flow_tcp_ports(void *flow, struct sock *sk) 64 - { 65 - struct inet_sock *inet = inet_sk(sk); 66 - 67 - memcpy(MLX5_ADDR_OF(tls_flow, flow, src_port), &inet->inet_sport, 68 - MLX5_FLD_SZ_BYTES(tls_flow, src_port)); 69 - memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_port), &inet->inet_dport, 70 - MLX5_FLD_SZ_BYTES(tls_flow, dst_port)); 71 - } 72 - 73 - static int mlx5e_tls_set_flow(void *flow, struct sock *sk, u32 caps) 74 - { 75 - switch (sk->sk_family) { 76 - case AF_INET: 77 - mlx5e_tls_set_ipv4_flow(flow, sk); 78 - break; 79 - #if IS_ENABLED(CONFIG_IPV6) 80 - case AF_INET6: 81 - if (!sk->sk_ipv6only && 82 - ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) { 83 - mlx5e_tls_set_ipv4_flow(flow, sk); 84 - break; 85 - } 86 - if (!(caps & MLX5_ACCEL_TLS_IPV6)) 87 - goto error_out; 88 - 89 - mlx5e_tls_set_ipv6_flow(flow, sk); 90 - break; 91 - #endif 92 - default: 93 - goto error_out; 94 - } 95 - 96 - mlx5e_tls_set_flow_tcp_ports(flow, sk); 97 - return 0; 98 - error_out: 99 - return -EINVAL; 100 - } 101 - 102 - static int mlx5e_tls_add(struct net_device *netdev, struct sock *sk, 103 - enum tls_offload_ctx_dir direction, 104 - struct tls_crypto_info *crypto_info, 105 - u32 start_offload_tcp_sn) 106 - { 107 - struct mlx5e_priv *priv = netdev_priv(netdev); 108 - struct tls_context *tls_ctx = tls_get_ctx(sk); 109 - struct mlx5_core_dev *mdev = priv->mdev; 110 - u32 caps = mlx5_accel_tls_device_caps(mdev); 111 - int ret = -ENOMEM; 112 - void *flow; 113 - u32 swid; 114 - 115 - flow = kzalloc(MLX5_ST_SZ_BYTES(tls_flow), GFP_KERNEL); 116 - if (!flow) 117 - return ret; 118 - 119 - ret = mlx5e_tls_set_flow(flow, sk, caps); 120 - if (ret) 121 - goto free_flow; 122 - 123 - ret = mlx5_accel_tls_add_flow(mdev, flow, crypto_info, 124 - start_offload_tcp_sn, &swid, 125 - direction == TLS_OFFLOAD_CTX_DIR_TX); 126 - if (ret < 0) 127 - goto free_flow; 128 - 129 - if (direction == TLS_OFFLOAD_CTX_DIR_TX) { 130 - struct mlx5e_tls_offload_context_tx *tx_ctx = 131 - mlx5e_get_tls_tx_context(tls_ctx); 132 - 133 - tx_ctx->swid = htonl(swid); 134 - tx_ctx->expected_seq = start_offload_tcp_sn; 135 - } else { 136 - struct mlx5e_tls_offload_context_rx *rx_ctx = 137 - mlx5e_get_tls_rx_context(tls_ctx); 138 - 139 - rx_ctx->handle = htonl(swid); 140 - } 141 - 142 - return 0; 143 - free_flow: 144 - kfree(flow); 145 - return ret; 146 - } 147 - 148 - static void mlx5e_tls_del(struct net_device *netdev, 149 - struct tls_context *tls_ctx, 150 - enum tls_offload_ctx_dir direction) 151 - { 152 - struct mlx5e_priv *priv = netdev_priv(netdev); 153 - unsigned int handle; 154 - 155 - handle = ntohl((direction == TLS_OFFLOAD_CTX_DIR_TX) ? 156 - mlx5e_get_tls_tx_context(tls_ctx)->swid : 157 - mlx5e_get_tls_rx_context(tls_ctx)->handle); 158 - 159 - mlx5_accel_tls_del_flow(priv->mdev, handle, 160 - direction == TLS_OFFLOAD_CTX_DIR_TX); 161 - } 162 - 163 - static int mlx5e_tls_resync(struct net_device *netdev, struct sock *sk, 164 - u32 seq, u8 *rcd_sn_data, 165 - enum tls_offload_ctx_dir direction) 166 - { 167 - struct tls_context *tls_ctx = tls_get_ctx(sk); 168 - struct mlx5e_priv *priv = netdev_priv(netdev); 169 - struct mlx5e_tls_offload_context_rx *rx_ctx; 170 - __be64 rcd_sn = *(__be64 *)rcd_sn_data; 171 - 172 - if (WARN_ON_ONCE(direction != TLS_OFFLOAD_CTX_DIR_RX)) 173 - return -EINVAL; 174 - rx_ctx = mlx5e_get_tls_rx_context(tls_ctx); 175 - 176 - netdev_info(netdev, "resyncing seq %d rcd %lld\n", seq, 177 - be64_to_cpu(rcd_sn)); 178 - mlx5_accel_tls_resync_rx(priv->mdev, rx_ctx->handle, seq, rcd_sn); 179 - atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_reply); 180 - 181 - return 0; 182 - } 183 - 184 - static const struct tlsdev_ops mlx5e_tls_ops = { 185 - .tls_dev_add = mlx5e_tls_add, 186 - .tls_dev_del = mlx5e_tls_del, 187 - .tls_dev_resync = mlx5e_tls_resync, 188 - }; 189 - 190 - void mlx5e_tls_build_netdev(struct mlx5e_priv *priv) 191 - { 192 - struct net_device *netdev = priv->netdev; 193 - u32 caps; 194 - 195 - if (mlx5e_accel_is_ktls_device(priv->mdev)) { 196 - mlx5e_ktls_build_netdev(priv); 197 - return; 198 - } 199 - 200 - /* FPGA */ 201 - if (!mlx5e_accel_is_tls_device(priv->mdev)) 202 - return; 203 - 204 - caps = mlx5_accel_tls_device_caps(priv->mdev); 205 - if (caps & MLX5_ACCEL_TLS_TX) { 206 - netdev->features |= NETIF_F_HW_TLS_TX; 207 - netdev->hw_features |= NETIF_F_HW_TLS_TX; 208 - } 209 - 210 - if (caps & MLX5_ACCEL_TLS_RX) { 211 - netdev->features |= NETIF_F_HW_TLS_RX; 212 - netdev->hw_features |= NETIF_F_HW_TLS_RX; 213 - } 214 - 215 - if (!(caps & MLX5_ACCEL_TLS_LRO)) { 216 - netdev->features &= ~NETIF_F_LRO; 217 - netdev->hw_features &= ~NETIF_F_LRO; 218 - } 219 - 220 - netdev->tlsdev_ops = &mlx5e_tls_ops; 221 - } 222 - 223 - int mlx5e_tls_init(struct mlx5e_priv *priv) 224 - { 225 - struct mlx5e_tls *tls; 226 - 227 - if (!mlx5e_accel_is_tls_device(priv->mdev)) 228 - return 0; 229 - 230 - tls = kzalloc(sizeof(*tls), GFP_KERNEL); 231 - if (!tls) 232 - return -ENOMEM; 233 - 234 - priv->tls = tls; 235 - return 0; 236 - } 237 - 238 - void mlx5e_tls_cleanup(struct mlx5e_priv *priv) 239 - { 240 - struct mlx5e_tls *tls = priv->tls; 241 - 242 - if (!tls) 243 - return; 244 - 245 - kfree(tls); 246 - priv->tls = NULL; 247 - }
-132
drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
··· 1 - /* 2 - * Copyright (c) 2018 Mellanox Technologies. All rights reserved. 3 - * 4 - * This software is available to you under a choice of one of two 5 - * licenses. You may choose to be licensed under the terms of the GNU 6 - * General Public License (GPL) Version 2, available from the file 7 - * COPYING in the main directory of this source tree, or the 8 - * OpenIB.org BSD license below: 9 - * 10 - * Redistribution and use in source and binary forms, with or 11 - * without modification, are permitted provided that the following 12 - * conditions are met: 13 - * 14 - * - Redistributions of source code must retain the above 15 - * copyright notice, this list of conditions and the following 16 - * disclaimer. 17 - * 18 - * - Redistributions in binary form must reproduce the above 19 - * copyright notice, this list of conditions and the following 20 - * disclaimer in the documentation and/or other materials 21 - * provided with the distribution. 22 - * 23 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 - * SOFTWARE. 31 - * 32 - */ 33 - #ifndef __MLX5E_TLS_H__ 34 - #define __MLX5E_TLS_H__ 35 - 36 - #include "accel/tls.h" 37 - #include "en_accel/ktls.h" 38 - 39 - #ifdef CONFIG_MLX5_EN_TLS 40 - #include <net/tls.h> 41 - #include "en.h" 42 - 43 - struct mlx5e_tls_sw_stats { 44 - atomic64_t tx_tls_ctx; 45 - atomic64_t tx_tls_del; 46 - atomic64_t tx_tls_drop_metadata; 47 - atomic64_t tx_tls_drop_resync_alloc; 48 - atomic64_t tx_tls_drop_no_sync_data; 49 - atomic64_t tx_tls_drop_bypass_required; 50 - atomic64_t rx_tls_ctx; 51 - atomic64_t rx_tls_del; 52 - atomic64_t rx_tls_drop_resync_request; 53 - atomic64_t rx_tls_resync_request; 54 - atomic64_t rx_tls_resync_reply; 55 - atomic64_t rx_tls_auth_fail; 56 - }; 57 - 58 - struct mlx5e_tls { 59 - struct mlx5e_tls_sw_stats sw_stats; 60 - struct workqueue_struct *rx_wq; 61 - }; 62 - 63 - struct mlx5e_tls_offload_context_tx { 64 - struct tls_offload_context_tx base; 65 - u32 expected_seq; 66 - __be32 swid; 67 - }; 68 - 69 - static inline struct mlx5e_tls_offload_context_tx * 70 - mlx5e_get_tls_tx_context(struct tls_context *tls_ctx) 71 - { 72 - BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context_tx) > 73 - TLS_OFFLOAD_CONTEXT_SIZE_TX); 74 - return container_of(tls_offload_ctx_tx(tls_ctx), 75 - struct mlx5e_tls_offload_context_tx, 76 - base); 77 - } 78 - 79 - struct mlx5e_tls_offload_context_rx { 80 - struct tls_offload_context_rx base; 81 - __be32 handle; 82 - }; 83 - 84 - static inline struct mlx5e_tls_offload_context_rx * 85 - mlx5e_get_tls_rx_context(struct tls_context *tls_ctx) 86 - { 87 - BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context_rx) > 88 - TLS_OFFLOAD_CONTEXT_SIZE_RX); 89 - return container_of(tls_offload_ctx_rx(tls_ctx), 90 - struct mlx5e_tls_offload_context_rx, 91 - base); 92 - } 93 - 94 - static inline bool mlx5e_is_tls_on(struct mlx5e_priv *priv) 95 - { 96 - return priv->tls; 97 - } 98 - 99 - void mlx5e_tls_build_netdev(struct mlx5e_priv *priv); 100 - int mlx5e_tls_init(struct mlx5e_priv *priv); 101 - void mlx5e_tls_cleanup(struct mlx5e_priv *priv); 102 - 103 - int mlx5e_tls_get_count(struct mlx5e_priv *priv); 104 - int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data); 105 - int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data); 106 - 107 - static inline bool mlx5e_accel_is_tls_device(struct mlx5_core_dev *mdev) 108 - { 109 - return !is_kdump_kernel() && 110 - mlx5_accel_is_tls_device(mdev); 111 - } 112 - 113 - #else 114 - 115 - static inline void mlx5e_tls_build_netdev(struct mlx5e_priv *priv) 116 - { 117 - if (!is_kdump_kernel() && 118 - mlx5_accel_is_ktls_device(priv->mdev)) 119 - mlx5e_ktls_build_netdev(priv); 120 - } 121 - 122 - static inline bool mlx5e_is_tls_on(struct mlx5e_priv *priv) { return false; } 123 - static inline int mlx5e_tls_init(struct mlx5e_priv *priv) { return 0; } 124 - static inline void mlx5e_tls_cleanup(struct mlx5e_priv *priv) { } 125 - static inline int mlx5e_tls_get_count(struct mlx5e_priv *priv) { return 0; } 126 - static inline int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data) { return 0; } 127 - static inline int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data) { return 0; } 128 - static inline bool mlx5e_accel_is_tls_device(struct mlx5_core_dev *mdev) { return false; } 129 - 130 - #endif 131 - 132 - #endif /* __MLX5E_TLS_H__ */
-390
drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
··· 1 - /* 2 - * Copyright (c) 2018 Mellanox Technologies. All rights reserved. 3 - * 4 - * This software is available to you under a choice of one of two 5 - * licenses. You may choose to be licensed under the terms of the GNU 6 - * General Public License (GPL) Version 2, available from the file 7 - * COPYING in the main directory of this source tree, or the 8 - * OpenIB.org BSD license below: 9 - * 10 - * Redistribution and use in source and binary forms, with or 11 - * without modification, are permitted provided that the following 12 - * conditions are met: 13 - * 14 - * - Redistributions of source code must retain the above 15 - * copyright notice, this list of conditions and the following 16 - * disclaimer. 17 - * 18 - * - Redistributions in binary form must reproduce the above 19 - * copyright notice, this list of conditions and the following 20 - * disclaimer in the documentation and/or other materials 21 - * provided with the distribution. 22 - * 23 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 - * SOFTWARE. 31 - * 32 - */ 33 - 34 - #include "en_accel/tls.h" 35 - #include "en_accel/tls_rxtx.h" 36 - #include "accel/accel.h" 37 - 38 - #include <net/inet6_hashtables.h> 39 - #include <linux/ipv6.h> 40 - 41 - #define SYNDROM_DECRYPTED 0x30 42 - #define SYNDROM_RESYNC_REQUEST 0x31 43 - #define SYNDROM_AUTH_FAILED 0x32 44 - 45 - #define SYNDROME_OFFLOAD_REQUIRED 32 46 - #define SYNDROME_SYNC 33 47 - 48 - struct sync_info { 49 - u64 rcd_sn; 50 - s32 sync_len; 51 - int nr_frags; 52 - skb_frag_t frags[MAX_SKB_FRAGS]; 53 - }; 54 - 55 - struct recv_metadata_content { 56 - u8 syndrome; 57 - u8 reserved; 58 - __be32 sync_seq; 59 - } __packed; 60 - 61 - struct send_metadata_content { 62 - /* One byte of syndrome followed by 3 bytes of swid */ 63 - __be32 syndrome_swid; 64 - __be16 first_seq; 65 - } __packed; 66 - 67 - struct mlx5e_tls_metadata { 68 - union { 69 - /* from fpga to host */ 70 - struct recv_metadata_content recv; 71 - /* from host to fpga */ 72 - struct send_metadata_content send; 73 - unsigned char raw[6]; 74 - } __packed content; 75 - /* packet type ID field */ 76 - __be16 ethertype; 77 - } __packed; 78 - 79 - static int mlx5e_tls_add_metadata(struct sk_buff *skb, __be32 swid) 80 - { 81 - struct mlx5e_tls_metadata *pet; 82 - struct ethhdr *eth; 83 - 84 - if (skb_cow_head(skb, sizeof(struct mlx5e_tls_metadata))) 85 - return -ENOMEM; 86 - 87 - eth = (struct ethhdr *)skb_push(skb, sizeof(struct mlx5e_tls_metadata)); 88 - skb->mac_header -= sizeof(struct mlx5e_tls_metadata); 89 - pet = (struct mlx5e_tls_metadata *)(eth + 1); 90 - 91 - memmove(skb->data, skb->data + sizeof(struct mlx5e_tls_metadata), 92 - 2 * ETH_ALEN); 93 - 94 - eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE); 95 - pet->content.send.syndrome_swid = 96 - htonl(SYNDROME_OFFLOAD_REQUIRED << 24) | swid; 97 - 98 - return 0; 99 - } 100 - 101 - static int mlx5e_tls_get_sync_data(struct mlx5e_tls_offload_context_tx *context, 102 - u32 tcp_seq, struct sync_info *info) 103 - { 104 - int remaining, i = 0, ret = -EINVAL; 105 - struct tls_record_info *record; 106 - unsigned long flags; 107 - s32 sync_size; 108 - 109 - spin_lock_irqsave(&context->base.lock, flags); 110 - record = tls_get_record(&context->base, tcp_seq, &info->rcd_sn); 111 - 112 - if (unlikely(!record)) 113 - goto out; 114 - 115 - sync_size = tcp_seq - tls_record_start_seq(record); 116 - info->sync_len = sync_size; 117 - if (unlikely(sync_size < 0)) { 118 - if (tls_record_is_start_marker(record)) 119 - goto done; 120 - 121 - goto out; 122 - } 123 - 124 - remaining = sync_size; 125 - while (remaining > 0) { 126 - info->frags[i] = record->frags[i]; 127 - __skb_frag_ref(&info->frags[i]); 128 - remaining -= skb_frag_size(&info->frags[i]); 129 - 130 - if (remaining < 0) 131 - skb_frag_size_add(&info->frags[i], remaining); 132 - 133 - i++; 134 - } 135 - info->nr_frags = i; 136 - done: 137 - ret = 0; 138 - out: 139 - spin_unlock_irqrestore(&context->base.lock, flags); 140 - return ret; 141 - } 142 - 143 - static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb, 144 - struct sk_buff *nskb, u32 tcp_seq, 145 - int headln, __be64 rcd_sn) 146 - { 147 - struct mlx5e_tls_metadata *pet; 148 - u8 syndrome = SYNDROME_SYNC; 149 - struct iphdr *iph; 150 - struct tcphdr *th; 151 - int data_len, mss; 152 - 153 - nskb->dev = skb->dev; 154 - skb_reset_mac_header(nskb); 155 - skb_set_network_header(nskb, skb_network_offset(skb)); 156 - skb_set_transport_header(nskb, skb_transport_offset(skb)); 157 - memcpy(nskb->data, skb->data, headln); 158 - memcpy(nskb->data + headln, &rcd_sn, sizeof(rcd_sn)); 159 - 160 - iph = ip_hdr(nskb); 161 - iph->tot_len = htons(nskb->len - skb_network_offset(nskb)); 162 - th = tcp_hdr(nskb); 163 - data_len = nskb->len - headln; 164 - tcp_seq -= data_len; 165 - th->seq = htonl(tcp_seq); 166 - 167 - mss = nskb->dev->mtu - (headln - skb_network_offset(nskb)); 168 - skb_shinfo(nskb)->gso_size = 0; 169 - if (data_len > mss) { 170 - skb_shinfo(nskb)->gso_size = mss; 171 - skb_shinfo(nskb)->gso_segs = DIV_ROUND_UP(data_len, mss); 172 - } 173 - skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type; 174 - 175 - pet = (struct mlx5e_tls_metadata *)(nskb->data + sizeof(struct ethhdr)); 176 - memcpy(pet, &syndrome, sizeof(syndrome)); 177 - pet->content.send.first_seq = htons(tcp_seq); 178 - 179 - /* MLX5 devices don't care about the checksum partial start, offset 180 - * and pseudo header 181 - */ 182 - nskb->ip_summed = CHECKSUM_PARTIAL; 183 - 184 - nskb->queue_mapping = skb->queue_mapping; 185 - } 186 - 187 - static bool mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context, 188 - struct mlx5e_txqsq *sq, struct sk_buff *skb, 189 - struct mlx5e_tls *tls) 190 - { 191 - u32 tcp_seq = ntohl(tcp_hdr(skb)->seq); 192 - struct sync_info info; 193 - struct sk_buff *nskb; 194 - int linear_len = 0; 195 - int headln; 196 - int i; 197 - 198 - sq->stats->tls_ooo++; 199 - 200 - if (mlx5e_tls_get_sync_data(context, tcp_seq, &info)) { 201 - /* We might get here if a retransmission reaches the driver 202 - * after the relevant record is acked. 203 - * It should be safe to drop the packet in this case 204 - */ 205 - atomic64_inc(&tls->sw_stats.tx_tls_drop_no_sync_data); 206 - goto err_out; 207 - } 208 - 209 - if (unlikely(info.sync_len < 0)) { 210 - u32 payload; 211 - 212 - headln = skb_transport_offset(skb) + tcp_hdrlen(skb); 213 - payload = skb->len - headln; 214 - if (likely(payload <= -info.sync_len)) 215 - /* SKB payload doesn't require offload 216 - */ 217 - return true; 218 - 219 - atomic64_inc(&tls->sw_stats.tx_tls_drop_bypass_required); 220 - goto err_out; 221 - } 222 - 223 - if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) { 224 - atomic64_inc(&tls->sw_stats.tx_tls_drop_metadata); 225 - goto err_out; 226 - } 227 - 228 - headln = skb_transport_offset(skb) + tcp_hdrlen(skb); 229 - linear_len += headln + sizeof(info.rcd_sn); 230 - nskb = alloc_skb(linear_len, GFP_ATOMIC); 231 - if (unlikely(!nskb)) { 232 - atomic64_inc(&tls->sw_stats.tx_tls_drop_resync_alloc); 233 - goto err_out; 234 - } 235 - 236 - context->expected_seq = tcp_seq + skb->len - headln; 237 - skb_put(nskb, linear_len); 238 - for (i = 0; i < info.nr_frags; i++) 239 - skb_shinfo(nskb)->frags[i] = info.frags[i]; 240 - 241 - skb_shinfo(nskb)->nr_frags = info.nr_frags; 242 - nskb->data_len = info.sync_len; 243 - nskb->len += info.sync_len; 244 - sq->stats->tls_resync_bytes += nskb->len; 245 - mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln, 246 - cpu_to_be64(info.rcd_sn)); 247 - mlx5e_sq_xmit_simple(sq, nskb, true); 248 - 249 - return true; 250 - 251 - err_out: 252 - dev_kfree_skb_any(skb); 253 - return false; 254 - } 255 - 256 - bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq, 257 - struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state) 258 - { 259 - struct mlx5e_priv *priv = netdev_priv(netdev); 260 - struct mlx5e_tls_offload_context_tx *context; 261 - struct tls_context *tls_ctx; 262 - u32 expected_seq; 263 - int datalen; 264 - u32 skb_seq; 265 - 266 - datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb)); 267 - if (!datalen) 268 - return true; 269 - 270 - mlx5e_tx_mpwqe_ensure_complete(sq); 271 - 272 - tls_ctx = tls_get_ctx(skb->sk); 273 - if (WARN_ON_ONCE(tls_ctx->netdev != netdev)) 274 - goto err_out; 275 - 276 - if (mlx5e_accel_is_ktls_tx(sq->mdev)) 277 - return mlx5e_ktls_handle_tx_skb(tls_ctx, sq, skb, datalen, state); 278 - 279 - /* FPGA */ 280 - skb_seq = ntohl(tcp_hdr(skb)->seq); 281 - context = mlx5e_get_tls_tx_context(tls_ctx); 282 - expected_seq = context->expected_seq; 283 - 284 - if (unlikely(expected_seq != skb_seq)) 285 - return mlx5e_tls_handle_ooo(context, sq, skb, priv->tls); 286 - 287 - if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) { 288 - atomic64_inc(&priv->tls->sw_stats.tx_tls_drop_metadata); 289 - dev_kfree_skb_any(skb); 290 - return false; 291 - } 292 - 293 - context->expected_seq = skb_seq + datalen; 294 - return true; 295 - 296 - err_out: 297 - dev_kfree_skb_any(skb); 298 - return false; 299 - } 300 - 301 - static int tls_update_resync_sn(struct net_device *netdev, 302 - struct sk_buff *skb, 303 - struct mlx5e_tls_metadata *mdata) 304 - { 305 - struct sock *sk = NULL; 306 - struct iphdr *iph; 307 - struct tcphdr *th; 308 - __be32 seq; 309 - 310 - if (mdata->ethertype != htons(ETH_P_IP)) 311 - return -EINVAL; 312 - 313 - iph = (struct iphdr *)(mdata + 1); 314 - 315 - th = ((void *)iph) + iph->ihl * 4; 316 - 317 - if (iph->version == 4) { 318 - sk = inet_lookup_established(dev_net(netdev), &tcp_hashinfo, 319 - iph->saddr, th->source, iph->daddr, 320 - th->dest, netdev->ifindex); 321 - #if IS_ENABLED(CONFIG_IPV6) 322 - } else { 323 - struct ipv6hdr *ipv6h = (struct ipv6hdr *)iph; 324 - 325 - sk = __inet6_lookup_established(dev_net(netdev), &tcp_hashinfo, 326 - &ipv6h->saddr, th->source, 327 - &ipv6h->daddr, ntohs(th->dest), 328 - netdev->ifindex, 0); 329 - #endif 330 - } 331 - if (!sk || sk->sk_state == TCP_TIME_WAIT) { 332 - struct mlx5e_priv *priv = netdev_priv(netdev); 333 - 334 - atomic64_inc(&priv->tls->sw_stats.rx_tls_drop_resync_request); 335 - goto out; 336 - } 337 - 338 - skb->sk = sk; 339 - skb->destructor = sock_edemux; 340 - 341 - memcpy(&seq, &mdata->content.recv.sync_seq, sizeof(seq)); 342 - tls_offload_rx_resync_request(sk, seq); 343 - out: 344 - return 0; 345 - } 346 - 347 - /* FPGA tls rx handler */ 348 - void mlx5e_tls_handle_rx_skb_metadata(struct mlx5e_rq *rq, struct sk_buff *skb, 349 - u32 *cqe_bcnt) 350 - { 351 - struct mlx5e_tls_metadata *mdata; 352 - struct mlx5e_priv *priv; 353 - 354 - /* Use the metadata */ 355 - mdata = (struct mlx5e_tls_metadata *)(skb->data + ETH_HLEN); 356 - switch (mdata->content.recv.syndrome) { 357 - case SYNDROM_DECRYPTED: 358 - skb->decrypted = 1; 359 - break; 360 - case SYNDROM_RESYNC_REQUEST: 361 - tls_update_resync_sn(rq->netdev, skb, mdata); 362 - priv = netdev_priv(rq->netdev); 363 - atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_request); 364 - break; 365 - case SYNDROM_AUTH_FAILED: 366 - /* Authentication failure will be observed and verified by kTLS */ 367 - priv = netdev_priv(rq->netdev); 368 - atomic64_inc(&priv->tls->sw_stats.rx_tls_auth_fail); 369 - break; 370 - default: 371 - /* Bypass the metadata header to others */ 372 - return; 373 - } 374 - 375 - remove_metadata_hdr(skb); 376 - *cqe_bcnt -= MLX5E_METADATA_ETHER_LEN; 377 - } 378 - 379 - u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params) 380 - { 381 - if (!mlx5e_accel_is_tls_device(mdev)) 382 - return 0; 383 - 384 - if (mlx5e_accel_is_ktls_device(mdev)) 385 - return mlx5e_ktls_get_stop_room(mdev, params); 386 - 387 - /* FPGA */ 388 - /* Resync SKB. */ 389 - return mlx5e_stop_room_for_max_wqe(mdev); 390 - }
-91
drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
··· 1 - /* 2 - * Copyright (c) 2018 Mellanox Technologies. All rights reserved. 3 - * 4 - * This software is available to you under a choice of one of two 5 - * licenses. You may choose to be licensed under the terms of the GNU 6 - * General Public License (GPL) Version 2, available from the file 7 - * COPYING in the main directory of this source tree, or the 8 - * OpenIB.org BSD license below: 9 - * 10 - * Redistribution and use in source and binary forms, with or 11 - * without modification, are permitted provided that the following 12 - * conditions are met: 13 - * 14 - * - Redistributions of source code must retain the above 15 - * copyright notice, this list of conditions and the following 16 - * disclaimer. 17 - * 18 - * - Redistributions in binary form must reproduce the above 19 - * copyright notice, this list of conditions and the following 20 - * disclaimer in the documentation and/or other materials 21 - * provided with the distribution. 22 - * 23 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 - * SOFTWARE. 31 - * 32 - */ 33 - 34 - #ifndef __MLX5E_TLS_RXTX_H__ 35 - #define __MLX5E_TLS_RXTX_H__ 36 - 37 - #include "accel/accel.h" 38 - #include "en_accel/ktls_txrx.h" 39 - 40 - #ifdef CONFIG_MLX5_EN_TLS 41 - 42 - #include <linux/skbuff.h> 43 - #include "en.h" 44 - #include "en/txrx.h" 45 - 46 - u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params); 47 - 48 - bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq, 49 - struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state); 50 - 51 - static inline bool mlx5e_tls_skb_offloaded(struct sk_buff *skb) 52 - { 53 - return skb->sk && tls_is_sk_tx_device_offloaded(skb->sk); 54 - } 55 - 56 - static inline void 57 - mlx5e_tls_handle_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg, 58 - struct mlx5e_accel_tx_tls_state *state) 59 - { 60 - cseg->tis_tir_num = cpu_to_be32(state->tls_tisn << 8); 61 - } 62 - 63 - void mlx5e_tls_handle_rx_skb_metadata(struct mlx5e_rq *rq, struct sk_buff *skb, 64 - u32 *cqe_bcnt); 65 - 66 - static inline void 67 - mlx5e_tls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb, 68 - struct mlx5_cqe64 *cqe, u32 *cqe_bcnt) 69 - { 70 - if (unlikely(get_cqe_tls_offload(cqe))) /* cqe bit indicates a TLS device */ 71 - return mlx5e_ktls_handle_rx_skb(rq, skb, cqe, cqe_bcnt); 72 - 73 - if (unlikely(test_bit(MLX5E_RQ_STATE_FPGA_TLS, &rq->state) && is_metadata_hdr_valid(skb))) 74 - return mlx5e_tls_handle_rx_skb_metadata(rq, skb, cqe_bcnt); 75 - } 76 - 77 - #else 78 - 79 - static inline bool 80 - mlx5e_accel_is_tls(struct mlx5_cqe64 *cqe, struct sk_buff *skb) { return false; } 81 - static inline void 82 - mlx5e_tls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb, 83 - struct mlx5_cqe64 *cqe, u32 *cqe_bcnt) {} 84 - static inline u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params) 85 - { 86 - return 0; 87 - } 88 - 89 - #endif /* CONFIG_MLX5_EN_TLS */ 90 - 91 - #endif /* __MLX5E_TLS_RXTX_H__ */
+18 -33
drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c
··· 36 36 37 37 #include "en.h" 38 38 #include "fpga/sdk.h" 39 - #include "en_accel/tls.h" 40 - 41 - static const struct counter_desc mlx5e_tls_sw_stats_desc[] = { 42 - { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_metadata) }, 43 - { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_resync_alloc) }, 44 - { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_no_sync_data) }, 45 - { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_bypass_required) }, 46 - }; 39 + #include "en_accel/ktls.h" 47 40 48 41 static const struct counter_desc mlx5e_ktls_sw_stats_desc[] = { 49 42 { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_ctx) }, ··· 48 55 #define MLX5E_READ_CTR_ATOMIC64(ptr, dsc, i) \ 49 56 atomic64_read((atomic64_t *)((char *)(ptr) + (dsc)[i].offset)) 50 57 51 - static const struct counter_desc *get_tls_atomic_stats(struct mlx5e_priv *priv) 52 - { 53 - if (!priv->tls) 54 - return NULL; 55 - if (mlx5e_accel_is_ktls_device(priv->mdev)) 56 - return mlx5e_ktls_sw_stats_desc; 57 - return mlx5e_tls_sw_stats_desc; 58 - } 59 - 60 - int mlx5e_tls_get_count(struct mlx5e_priv *priv) 58 + int mlx5e_ktls_get_count(struct mlx5e_priv *priv) 61 59 { 62 60 if (!priv->tls) 63 61 return 0; 64 - if (mlx5e_accel_is_ktls_device(priv->mdev)) 65 - return ARRAY_SIZE(mlx5e_ktls_sw_stats_desc); 66 - return ARRAY_SIZE(mlx5e_tls_sw_stats_desc); 62 + 63 + return ARRAY_SIZE(mlx5e_ktls_sw_stats_desc); 67 64 } 68 65 69 - int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data) 66 + int mlx5e_ktls_get_strings(struct mlx5e_priv *priv, uint8_t *data) 70 67 { 71 - const struct counter_desc *stats_desc; 72 68 unsigned int i, n, idx = 0; 73 69 74 - stats_desc = get_tls_atomic_stats(priv); 75 - n = mlx5e_tls_get_count(priv); 70 + if (!priv->tls) 71 + return 0; 72 + 73 + n = mlx5e_ktls_get_count(priv); 76 74 77 75 for (i = 0; i < n; i++) 78 76 strcpy(data + (idx++) * ETH_GSTRING_LEN, 79 - stats_desc[i].format); 77 + mlx5e_ktls_sw_stats_desc[i].format); 80 78 81 79 return n; 82 80 } 83 81 84 - int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data) 82 + int mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 *data) 85 83 { 86 - const struct counter_desc *stats_desc; 87 84 unsigned int i, n, idx = 0; 88 85 89 - stats_desc = get_tls_atomic_stats(priv); 90 - n = mlx5e_tls_get_count(priv); 86 + if (!priv->tls) 87 + return 0; 88 + 89 + n = mlx5e_ktls_get_count(priv); 91 90 92 91 for (i = 0; i < n; i++) 93 - data[idx++] = 94 - MLX5E_READ_CTR_ATOMIC64(&priv->tls->sw_stats, 95 - stats_desc, i); 92 + data[idx++] = MLX5E_READ_CTR_ATOMIC64(&priv->tls->sw_stats, 93 + mlx5e_ktls_sw_stats_desc, 94 + i); 96 95 97 96 return n; 98 97 }
+6 -18
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 47 47 #include "en_rep.h" 48 48 #include "en_accel/ipsec.h" 49 49 #include "en_accel/en_accel.h" 50 - #include "en_accel/tls.h" 51 - #include "accel/ipsec.h" 52 - #include "accel/tls.h" 50 + #include "en_accel/ktls.h" 51 + #include "en_accel/ipsec_offload.h" 53 52 #include "lib/vxlan.h" 54 53 #include "lib/clock.h" 55 54 #include "en/port.h" ··· 67 68 #include "en/ptp.h" 68 69 #include "qos.h" 69 70 #include "en/trap.h" 70 - #include "fpga/ipsec.h" 71 71 72 72 bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) 73 73 { ··· 1034 1036 if (err) 1035 1037 goto err_destroy_rq; 1036 1038 1037 - if (mlx5e_is_tls_on(rq->priv) && !mlx5e_accel_is_ktls_device(mdev)) 1038 - __set_bit(MLX5E_RQ_STATE_FPGA_TLS, &rq->state); /* must be FPGA */ 1039 - 1040 1039 if (MLX5_CAP_ETH(mdev, cqe_checksum_full)) 1041 1040 __set_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state); 1042 1041 ··· 1329 1334 INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work); 1330 1335 if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert)) 1331 1336 set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state); 1332 - if (MLX5_IPSEC_DEV(c->priv->mdev)) 1337 + if (mlx5_ipsec_device_caps(c->priv->mdev)) 1333 1338 set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state); 1334 1339 if (param->is_mpw) 1335 1340 set_bit(MLX5E_SQ_STATE_MPWQE, &sq->state); ··· 4466 4471 return -EINVAL; 4467 4472 } 4468 4473 4469 - if (mlx5_fpga_is_ipsec_device(priv->mdev)) { 4470 - netdev_warn(netdev, 4471 - "XDP is not available on Innova cards with IPsec support\n"); 4472 - return -EINVAL; 4473 - } 4474 - 4475 4474 new_params = priv->channels.params; 4476 4475 new_params.xdp_prog = prog; 4477 4476 ··· 4923 4934 4924 4935 mlx5e_set_netdev_dev_addr(netdev); 4925 4936 mlx5e_ipsec_build_netdev(priv); 4926 - mlx5e_tls_build_netdev(priv); 4937 + mlx5e_ktls_build_netdev(priv); 4927 4938 } 4928 4939 4929 4940 void mlx5e_create_q_counters(struct mlx5e_priv *priv) ··· 4985 4996 if (err) 4986 4997 mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err); 4987 4998 4988 - err = mlx5e_tls_init(priv); 4999 + err = mlx5e_ktls_init(priv); 4989 5000 if (err) 4990 5001 mlx5_core_err(mdev, "TLS initialization failed, %d\n", err); 4991 5002 ··· 4996 5007 static void mlx5e_nic_cleanup(struct mlx5e_priv *priv) 4997 5008 { 4998 5009 mlx5e_health_destroy_reporters(priv); 4999 - mlx5e_tls_cleanup(priv); 5010 + mlx5e_ktls_cleanup(priv); 5000 5011 mlx5e_ipsec_cleanup(priv); 5001 5012 mlx5e_fs_cleanup(priv); 5002 5013 } ··· 5693 5704 { 5694 5705 int ret; 5695 5706 5696 - mlx5e_ipsec_build_inverse_table(); 5697 5707 mlx5e_build_ptys2ethtool_map(); 5698 5708 ret = auxiliary_driver_register(&mlx5e_driver); 5699 5709 if (ret)
-1
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
··· 1112 1112 &MLX5E_STATS_GRP(per_port_buff_congest), 1113 1113 #ifdef CONFIG_MLX5_EN_IPSEC 1114 1114 &MLX5E_STATS_GRP(ipsec_sw), 1115 - &MLX5E_STATS_GRP(ipsec_hw), 1116 1115 #endif 1117 1116 &MLX5E_STATS_GRP(ptp), 1118 1117 };
+5 -56
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
··· 48 48 #include "en_rep.h" 49 49 #include "en/rep/tc.h" 50 50 #include "ipoib/ipoib.h" 51 - #include "accel/ipsec.h" 52 - #include "fpga/ipsec.h" 51 + #include "en_accel/ipsec_offload.h" 53 52 #include "en_accel/ipsec_rxtx.h" 54 - #include "en_accel/tls_rxtx.h" 53 + #include "en_accel/ktls_txrx.h" 55 54 #include "en/xdp.h" 56 55 #include "en/xsk/rx.h" 57 56 #include "en/health.h" ··· 1415 1416 1416 1417 skb->mac_len = ETH_HLEN; 1417 1418 1418 - mlx5e_tls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt); 1419 + if (unlikely(get_cqe_tls_offload(cqe))) 1420 + mlx5e_ktls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt); 1419 1421 1420 1422 if (unlikely(mlx5_ipsec_is_rx_flow(cqe))) 1421 1423 mlx5e_ipsec_offload_handle_rx_skb(netdev, skb, cqe); ··· 2383 2383 }; 2384 2384 #endif /* CONFIG_MLX5_CORE_IPOIB */ 2385 2385 2386 - #ifdef CONFIG_MLX5_EN_IPSEC 2387 - 2388 - static void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) 2389 - { 2390 - struct mlx5_wq_cyc *wq = &rq->wqe.wq; 2391 - struct mlx5e_wqe_frag_info *wi; 2392 - struct sk_buff *skb; 2393 - u32 cqe_bcnt; 2394 - u16 ci; 2395 - 2396 - ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); 2397 - wi = get_frag(rq, ci); 2398 - cqe_bcnt = be32_to_cpu(cqe->byte_cnt); 2399 - 2400 - if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { 2401 - rq->stats->wqe_err++; 2402 - goto wq_free_wqe; 2403 - } 2404 - 2405 - skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe, 2406 - mlx5e_skb_from_cqe_linear, 2407 - mlx5e_skb_from_cqe_nonlinear, 2408 - rq, cqe, wi, cqe_bcnt); 2409 - if (unlikely(!skb)) /* a DROP, save the page-reuse checks */ 2410 - goto wq_free_wqe; 2411 - 2412 - skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb, &cqe_bcnt); 2413 - if (unlikely(!skb)) 2414 - goto wq_free_wqe; 2415 - 2416 - mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); 2417 - napi_gro_receive(rq->cq.napi, skb); 2418 - 2419 - wq_free_wqe: 2420 - mlx5e_free_rx_wqe(rq, wi, true); 2421 - mlx5_wq_cyc_pop(wq); 2422 - } 2423 - 2424 - #endif /* CONFIG_MLX5_EN_IPSEC */ 2425 - 2426 2386 int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk) 2427 2387 { 2428 2388 struct net_device *netdev = rq->netdev; ··· 2399 2439 rq->post_wqes = mlx5e_post_rx_mpwqes; 2400 2440 rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe; 2401 2441 2402 - if (mlx5_fpga_is_ipsec_device(mdev)) { 2403 - netdev_err(netdev, "MPWQE RQ with Innova IPSec offload not supported\n"); 2404 - return -EINVAL; 2405 - } 2406 2442 if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) { 2407 2443 rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe_shampo; 2408 2444 if (!rq->handle_rx_cqe) { ··· 2422 2466 mlx5e_skb_from_cqe_nonlinear; 2423 2467 rq->post_wqes = mlx5e_post_rx_wqes; 2424 2468 rq->dealloc_wqe = mlx5e_dealloc_rx_wqe; 2425 - 2426 - #ifdef CONFIG_MLX5_EN_IPSEC 2427 - if ((mlx5_fpga_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) && 2428 - priv->ipsec) 2429 - rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe; 2430 - else 2431 - #endif 2432 - rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe; 2469 + rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe; 2433 2470 if (!rq->handle_rx_cqe) { 2434 2471 netdev_err(netdev, "RX handler of RQ is not set\n"); 2435 2472 return -EINVAL;
+4 -5
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
··· 32 32 33 33 #include "lib/mlx5.h" 34 34 #include "en.h" 35 - #include "en_accel/tls.h" 35 + #include "en_accel/ktls.h" 36 36 #include "en_accel/en_accel.h" 37 37 #include "en/ptp.h" 38 38 #include "en/port.h" ··· 1900 1900 1901 1901 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls) 1902 1902 { 1903 - return mlx5e_tls_get_count(priv); 1903 + return mlx5e_ktls_get_count(priv); 1904 1904 } 1905 1905 1906 1906 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls) 1907 1907 { 1908 - return idx + mlx5e_tls_get_strings(priv, data + idx * ETH_GSTRING_LEN); 1908 + return idx + mlx5e_ktls_get_strings(priv, data + idx * ETH_GSTRING_LEN); 1909 1909 } 1910 1910 1911 1911 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls) 1912 1912 { 1913 - return idx + mlx5e_tls_get_stats(priv, data + idx); 1913 + return idx + mlx5e_ktls_get_stats(priv, data + idx); 1914 1914 } 1915 1915 1916 1916 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; } ··· 2443 2443 &MLX5E_STATS_GRP(pme), 2444 2444 #ifdef CONFIG_MLX5_EN_IPSEC 2445 2445 &MLX5E_STATS_GRP(ipsec_sw), 2446 - &MLX5E_STATS_GRP(ipsec_hw), 2447 2446 #endif 2448 2447 &MLX5E_STATS_GRP(tls), 2449 2448 &MLX5E_STATS_GRP(channels),
-1
drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
··· 482 482 extern MLX5E_DECLARE_STATS_GRP(pme); 483 483 extern MLX5E_DECLARE_STATS_GRP(channels); 484 484 extern MLX5E_DECLARE_STATS_GRP(per_port_buff_congest); 485 - extern MLX5E_DECLARE_STATS_GRP(ipsec_hw); 486 485 extern MLX5E_DECLARE_STATS_GRP(ipsec_sw); 487 486 extern MLX5E_DECLARE_STATS_GRP(ptp); 488 487
-3
drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
··· 57 57 u32 mkey; 58 58 struct mlx5_uars_page *uar; 59 59 } conn_res; 60 - 61 - struct mlx5_fpga_ipsec *ipsec; 62 - struct mlx5_fpga_tls *tls; 63 60 }; 64 61 65 62 #define mlx5_fpga_dbg(__adev, format, ...) \
-1582
drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
··· 1 - /* 2 - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. 3 - * 4 - * This software is available to you under a choice of one of two 5 - * licenses. You may choose to be licensed under the terms of the GNU 6 - * General Public License (GPL) Version 2, available from the file 7 - * COPYING in the main directory of this source tree, or the 8 - * OpenIB.org BSD license below: 9 - * 10 - * Redistribution and use in source and binary forms, with or 11 - * without modification, are permitted provided that the following 12 - * conditions are met: 13 - * 14 - * - Redistributions of source code must retain the above 15 - * copyright notice, this list of conditions and the following 16 - * disclaimer. 17 - * 18 - * - Redistributions in binary form must reproduce the above 19 - * copyright notice, this list of conditions and the following 20 - * disclaimer in the documentation and/or other materials 21 - * provided with the distribution. 22 - * 23 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 - * SOFTWARE. 31 - * 32 - */ 33 - 34 - #include <linux/rhashtable.h> 35 - #include <linux/mlx5/driver.h> 36 - #include <linux/mlx5/fs_helpers.h> 37 - #include <linux/mlx5/fs.h> 38 - #include <linux/rbtree.h> 39 - 40 - #include "mlx5_core.h" 41 - #include "fs_cmd.h" 42 - #include "fpga/ipsec.h" 43 - #include "fpga/sdk.h" 44 - #include "fpga/core.h" 45 - 46 - enum mlx5_fpga_ipsec_cmd_status { 47 - MLX5_FPGA_IPSEC_CMD_PENDING, 48 - MLX5_FPGA_IPSEC_CMD_SEND_FAIL, 49 - MLX5_FPGA_IPSEC_CMD_COMPLETE, 50 - }; 51 - 52 - struct mlx5_fpga_ipsec_cmd_context { 53 - struct mlx5_fpga_dma_buf buf; 54 - enum mlx5_fpga_ipsec_cmd_status status; 55 - struct mlx5_ifc_fpga_ipsec_cmd_resp resp; 56 - int status_code; 57 - struct completion complete; 58 - struct mlx5_fpga_device *dev; 59 - struct list_head list; /* Item in pending_cmds */ 60 - u8 command[]; 61 - }; 62 - 63 - struct mlx5_fpga_esp_xfrm; 64 - 65 - struct mlx5_fpga_ipsec_sa_ctx { 66 - struct rhash_head hash; 67 - struct mlx5_ifc_fpga_ipsec_sa hw_sa; 68 - u32 sa_handle; 69 - struct mlx5_core_dev *dev; 70 - struct mlx5_fpga_esp_xfrm *fpga_xfrm; 71 - }; 72 - 73 - struct mlx5_fpga_esp_xfrm { 74 - unsigned int num_rules; 75 - struct mlx5_fpga_ipsec_sa_ctx *sa_ctx; 76 - struct mutex lock; /* xfrm lock */ 77 - struct mlx5_accel_esp_xfrm accel_xfrm; 78 - }; 79 - 80 - struct mlx5_fpga_ipsec_rule { 81 - struct rb_node node; 82 - struct fs_fte *fte; 83 - struct mlx5_fpga_ipsec_sa_ctx *ctx; 84 - }; 85 - 86 - static const struct rhashtable_params rhash_sa = { 87 - /* Keep out "cmd" field from the key as it's 88 - * value is not constant during the lifetime 89 - * of the key object. 90 - */ 91 - .key_len = sizeof_field(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) - 92 - sizeof_field(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd), 93 - .key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) + 94 - sizeof_field(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd), 95 - .head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash), 96 - .automatic_shrinking = true, 97 - .min_size = 1, 98 - }; 99 - 100 - struct mlx5_fpga_ipsec { 101 - struct mlx5_fpga_device *fdev; 102 - struct list_head pending_cmds; 103 - spinlock_t pending_cmds_lock; /* Protects pending_cmds */ 104 - u32 caps[MLX5_ST_SZ_DW(ipsec_extended_cap)]; 105 - struct mlx5_fpga_conn *conn; 106 - 107 - struct notifier_block fs_notifier_ingress_bypass; 108 - struct notifier_block fs_notifier_egress; 109 - 110 - /* Map hardware SA --> SA context 111 - * (mlx5_fpga_ipsec_sa) (mlx5_fpga_ipsec_sa_ctx) 112 - * We will use this hash to avoid SAs duplication in fpga which 113 - * aren't allowed 114 - */ 115 - struct rhashtable sa_hash; /* hw_sa -> mlx5_fpga_ipsec_sa_ctx */ 116 - struct mutex sa_hash_lock; 117 - 118 - /* Tree holding all rules for this fpga device 119 - * Key for searching a rule (mlx5_fpga_ipsec_rule) is (ft, id) 120 - */ 121 - struct rb_root rules_rb; 122 - struct mutex rules_rb_lock; /* rules lock */ 123 - 124 - struct ida halloc; 125 - }; 126 - 127 - bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev) 128 - { 129 - if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga)) 130 - return false; 131 - 132 - if (MLX5_CAP_FPGA(mdev, ieee_vendor_id) != 133 - MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX) 134 - return false; 135 - 136 - if (MLX5_CAP_FPGA(mdev, sandbox_product_id) != 137 - MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_IPSEC) 138 - return false; 139 - 140 - return true; 141 - } 142 - 143 - static void mlx5_fpga_ipsec_send_complete(struct mlx5_fpga_conn *conn, 144 - struct mlx5_fpga_device *fdev, 145 - struct mlx5_fpga_dma_buf *buf, 146 - u8 status) 147 - { 148 - struct mlx5_fpga_ipsec_cmd_context *context; 149 - 150 - if (status) { 151 - context = container_of(buf, struct mlx5_fpga_ipsec_cmd_context, 152 - buf); 153 - mlx5_fpga_warn(fdev, "IPSec command send failed with status %u\n", 154 - status); 155 - context->status = MLX5_FPGA_IPSEC_CMD_SEND_FAIL; 156 - complete(&context->complete); 157 - } 158 - } 159 - 160 - static inline 161 - int syndrome_to_errno(enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome) 162 - { 163 - switch (syndrome) { 164 - case MLX5_FPGA_IPSEC_RESPONSE_SUCCESS: 165 - return 0; 166 - case MLX5_FPGA_IPSEC_RESPONSE_SADB_ISSUE: 167 - return -EEXIST; 168 - case MLX5_FPGA_IPSEC_RESPONSE_ILLEGAL_REQUEST: 169 - return -EINVAL; 170 - case MLX5_FPGA_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE: 171 - return -EIO; 172 - } 173 - return -EIO; 174 - } 175 - 176 - static void mlx5_fpga_ipsec_recv(void *cb_arg, struct mlx5_fpga_dma_buf *buf) 177 - { 178 - struct mlx5_ifc_fpga_ipsec_cmd_resp *resp = buf->sg[0].data; 179 - struct mlx5_fpga_ipsec_cmd_context *context; 180 - enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome; 181 - struct mlx5_fpga_device *fdev = cb_arg; 182 - unsigned long flags; 183 - 184 - if (buf->sg[0].size < sizeof(*resp)) { 185 - mlx5_fpga_warn(fdev, "Short receive from FPGA IPSec: %u < %zu bytes\n", 186 - buf->sg[0].size, sizeof(*resp)); 187 - return; 188 - } 189 - 190 - mlx5_fpga_dbg(fdev, "mlx5_ipsec recv_cb syndrome %08x\n", 191 - ntohl(resp->syndrome)); 192 - 193 - spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags); 194 - context = list_first_entry_or_null(&fdev->ipsec->pending_cmds, 195 - struct mlx5_fpga_ipsec_cmd_context, 196 - list); 197 - if (context) 198 - list_del(&context->list); 199 - spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags); 200 - 201 - if (!context) { 202 - mlx5_fpga_warn(fdev, "Received IPSec offload response without pending command request\n"); 203 - return; 204 - } 205 - mlx5_fpga_dbg(fdev, "Handling response for %p\n", context); 206 - 207 - syndrome = ntohl(resp->syndrome); 208 - context->status_code = syndrome_to_errno(syndrome); 209 - context->status = MLX5_FPGA_IPSEC_CMD_COMPLETE; 210 - memcpy(&context->resp, resp, sizeof(*resp)); 211 - 212 - if (context->status_code) 213 - mlx5_fpga_warn(fdev, "IPSec command failed with syndrome %08x\n", 214 - syndrome); 215 - 216 - complete(&context->complete); 217 - } 218 - 219 - static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev, 220 - const void *cmd, int cmd_size) 221 - { 222 - struct mlx5_fpga_ipsec_cmd_context *context; 223 - struct mlx5_fpga_device *fdev = mdev->fpga; 224 - unsigned long flags; 225 - int res; 226 - 227 - if (!fdev || !fdev->ipsec) 228 - return ERR_PTR(-EOPNOTSUPP); 229 - 230 - if (cmd_size & 3) 231 - return ERR_PTR(-EINVAL); 232 - 233 - context = kzalloc(sizeof(*context) + cmd_size, GFP_ATOMIC); 234 - if (!context) 235 - return ERR_PTR(-ENOMEM); 236 - 237 - context->status = MLX5_FPGA_IPSEC_CMD_PENDING; 238 - context->dev = fdev; 239 - context->buf.complete = mlx5_fpga_ipsec_send_complete; 240 - init_completion(&context->complete); 241 - memcpy(&context->command, cmd, cmd_size); 242 - context->buf.sg[0].size = cmd_size; 243 - context->buf.sg[0].data = &context->command; 244 - 245 - spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags); 246 - res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf); 247 - if (!res) 248 - list_add_tail(&context->list, &fdev->ipsec->pending_cmds); 249 - spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags); 250 - 251 - if (res) { 252 - mlx5_fpga_warn(fdev, "Failed to send IPSec command: %d\n", res); 253 - kfree(context); 254 - return ERR_PTR(res); 255 - } 256 - 257 - /* Context should be freed by the caller after completion. */ 258 - return context; 259 - } 260 - 261 - static int mlx5_fpga_ipsec_cmd_wait(void *ctx) 262 - { 263 - struct mlx5_fpga_ipsec_cmd_context *context = ctx; 264 - unsigned long timeout = 265 - msecs_to_jiffies(MLX5_FPGA_CMD_TIMEOUT_MSEC); 266 - int res; 267 - 268 - res = wait_for_completion_timeout(&context->complete, timeout); 269 - if (!res) { 270 - mlx5_fpga_warn(context->dev, "Failure waiting for IPSec command response\n"); 271 - return -ETIMEDOUT; 272 - } 273 - 274 - if (context->status == MLX5_FPGA_IPSEC_CMD_COMPLETE) 275 - res = context->status_code; 276 - else 277 - res = -EIO; 278 - 279 - return res; 280 - } 281 - 282 - static inline bool is_v2_sadb_supported(struct mlx5_fpga_ipsec *fipsec) 283 - { 284 - if (MLX5_GET(ipsec_extended_cap, fipsec->caps, v2_command)) 285 - return true; 286 - return false; 287 - } 288 - 289 - static int mlx5_fpga_ipsec_update_hw_sa(struct mlx5_fpga_device *fdev, 290 - struct mlx5_ifc_fpga_ipsec_sa *hw_sa, 291 - int opcode) 292 - { 293 - struct mlx5_core_dev *dev = fdev->mdev; 294 - struct mlx5_ifc_fpga_ipsec_sa *sa; 295 - struct mlx5_fpga_ipsec_cmd_context *cmd_context; 296 - size_t sa_cmd_size; 297 - int err; 298 - 299 - hw_sa->ipsec_sa_v1.cmd = htonl(opcode); 300 - if (is_v2_sadb_supported(fdev->ipsec)) 301 - sa_cmd_size = sizeof(*hw_sa); 302 - else 303 - sa_cmd_size = sizeof(hw_sa->ipsec_sa_v1); 304 - 305 - cmd_context = (struct mlx5_fpga_ipsec_cmd_context *) 306 - mlx5_fpga_ipsec_cmd_exec(dev, hw_sa, sa_cmd_size); 307 - if (IS_ERR(cmd_context)) 308 - return PTR_ERR(cmd_context); 309 - 310 - err = mlx5_fpga_ipsec_cmd_wait(cmd_context); 311 - if (err) 312 - goto out; 313 - 314 - sa = (struct mlx5_ifc_fpga_ipsec_sa *)&cmd_context->command; 315 - if (sa->ipsec_sa_v1.sw_sa_handle != cmd_context->resp.sw_sa_handle) { 316 - mlx5_fpga_err(fdev, "mismatch SA handle. cmd 0x%08x vs resp 0x%08x\n", 317 - ntohl(sa->ipsec_sa_v1.sw_sa_handle), 318 - ntohl(cmd_context->resp.sw_sa_handle)); 319 - err = -EIO; 320 - } 321 - 322 - out: 323 - kfree(cmd_context); 324 - return err; 325 - } 326 - 327 - u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev) 328 - { 329 - struct mlx5_fpga_device *fdev = mdev->fpga; 330 - u32 ret = 0; 331 - 332 - if (mlx5_fpga_is_ipsec_device(mdev)) { 333 - ret |= MLX5_ACCEL_IPSEC_CAP_DEVICE; 334 - ret |= MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA; 335 - } else { 336 - return ret; 337 - } 338 - 339 - if (!fdev->ipsec) 340 - return ret; 341 - 342 - if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, esp)) 343 - ret |= MLX5_ACCEL_IPSEC_CAP_ESP; 344 - 345 - if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, ipv6)) 346 - ret |= MLX5_ACCEL_IPSEC_CAP_IPV6; 347 - 348 - if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, lso)) 349 - ret |= MLX5_ACCEL_IPSEC_CAP_LSO; 350 - 351 - if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, rx_no_trailer)) 352 - ret |= MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER; 353 - 354 - if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, esn)) { 355 - ret |= MLX5_ACCEL_IPSEC_CAP_ESN; 356 - ret |= MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN; 357 - } 358 - 359 - return ret; 360 - } 361 - 362 - static unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev) 363 - { 364 - struct mlx5_fpga_device *fdev = mdev->fpga; 365 - 366 - if (!fdev || !fdev->ipsec) 367 - return 0; 368 - 369 - return MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, 370 - number_of_ipsec_counters); 371 - } 372 - 373 - static int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters, 374 - unsigned int counters_count) 375 - { 376 - struct mlx5_fpga_device *fdev = mdev->fpga; 377 - unsigned int i; 378 - __be32 *data; 379 - u32 count; 380 - u64 addr; 381 - int ret; 382 - 383 - if (!fdev || !fdev->ipsec) 384 - return 0; 385 - 386 - addr = (u64)MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, 387 - ipsec_counters_addr_low) + 388 - ((u64)MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, 389 - ipsec_counters_addr_high) << 32); 390 - 391 - count = mlx5_fpga_ipsec_counters_count(mdev); 392 - 393 - data = kzalloc(array3_size(sizeof(*data), count, 2), GFP_KERNEL); 394 - if (!data) { 395 - ret = -ENOMEM; 396 - goto out; 397 - } 398 - 399 - ret = mlx5_fpga_mem_read(fdev, count * sizeof(u64), addr, data, 400 - MLX5_FPGA_ACCESS_TYPE_DONTCARE); 401 - if (ret < 0) { 402 - mlx5_fpga_err(fdev, "Failed to read IPSec counters from HW: %d\n", 403 - ret); 404 - goto out; 405 - } 406 - ret = 0; 407 - 408 - if (count > counters_count) 409 - count = counters_count; 410 - 411 - /* Each counter is low word, then high. But each word is big-endian */ 412 - for (i = 0; i < count; i++) 413 - counters[i] = (u64)ntohl(data[i * 2]) | 414 - ((u64)ntohl(data[i * 2 + 1]) << 32); 415 - 416 - out: 417 - kfree(data); 418 - return ret; 419 - } 420 - 421 - static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags) 422 - { 423 - struct mlx5_fpga_ipsec_cmd_context *context; 424 - struct mlx5_ifc_fpga_ipsec_cmd_cap cmd = {0}; 425 - int err; 426 - 427 - cmd.cmd = htonl(MLX5_FPGA_IPSEC_CMD_OP_SET_CAP); 428 - cmd.flags = htonl(flags); 429 - context = mlx5_fpga_ipsec_cmd_exec(mdev, &cmd, sizeof(cmd)); 430 - if (IS_ERR(context)) 431 - return PTR_ERR(context); 432 - 433 - err = mlx5_fpga_ipsec_cmd_wait(context); 434 - if (err) 435 - goto out; 436 - 437 - if ((context->resp.flags & cmd.flags) != cmd.flags) { 438 - mlx5_fpga_err(context->dev, "Failed to set capabilities. cmd 0x%08x vs resp 0x%08x\n", 439 - cmd.flags, 440 - context->resp.flags); 441 - err = -EIO; 442 - } 443 - 444 - out: 445 - kfree(context); 446 - return err; 447 - } 448 - 449 - static int mlx5_fpga_ipsec_enable_supported_caps(struct mlx5_core_dev *mdev) 450 - { 451 - u32 dev_caps = mlx5_fpga_ipsec_device_caps(mdev); 452 - u32 flags = 0; 453 - 454 - if (dev_caps & MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER) 455 - flags |= MLX5_FPGA_IPSEC_CAP_NO_TRAILER; 456 - 457 - return mlx5_fpga_ipsec_set_caps(mdev, flags); 458 - } 459 - 460 - static void 461 - mlx5_fpga_ipsec_build_hw_xfrm(struct mlx5_core_dev *mdev, 462 - const struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs, 463 - struct mlx5_ifc_fpga_ipsec_sa *hw_sa) 464 - { 465 - const struct aes_gcm_keymat *aes_gcm = &xfrm_attrs->keymat.aes_gcm; 466 - 467 - /* key */ 468 - memcpy(&hw_sa->ipsec_sa_v1.key_enc, aes_gcm->aes_key, 469 - aes_gcm->key_len / 8); 470 - /* Duplicate 128 bit key twice according to HW layout */ 471 - if (aes_gcm->key_len == 128) 472 - memcpy(&hw_sa->ipsec_sa_v1.key_enc[16], 473 - aes_gcm->aes_key, aes_gcm->key_len / 8); 474 - 475 - /* salt and seq_iv */ 476 - memcpy(&hw_sa->ipsec_sa_v1.gcm.salt_iv, &aes_gcm->seq_iv, 477 - sizeof(aes_gcm->seq_iv)); 478 - memcpy(&hw_sa->ipsec_sa_v1.gcm.salt, &aes_gcm->salt, 479 - sizeof(aes_gcm->salt)); 480 - 481 - /* esn */ 482 - if (xfrm_attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) { 483 - hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_ESN_EN; 484 - hw_sa->ipsec_sa_v1.flags |= 485 - (xfrm_attrs->flags & 486 - MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) ? 487 - MLX5_FPGA_IPSEC_SA_ESN_OVERLAP : 0; 488 - hw_sa->esn = htonl(xfrm_attrs->esn); 489 - } else { 490 - hw_sa->ipsec_sa_v1.flags &= ~MLX5_FPGA_IPSEC_SA_ESN_EN; 491 - hw_sa->ipsec_sa_v1.flags &= 492 - ~(xfrm_attrs->flags & 493 - MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) ? 494 - MLX5_FPGA_IPSEC_SA_ESN_OVERLAP : 0; 495 - hw_sa->esn = 0; 496 - } 497 - 498 - /* rx handle */ 499 - hw_sa->ipsec_sa_v1.sw_sa_handle = htonl(xfrm_attrs->sa_handle); 500 - 501 - /* enc mode */ 502 - switch (aes_gcm->key_len) { 503 - case 128: 504 - hw_sa->ipsec_sa_v1.enc_mode = 505 - MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_128_AUTH_128; 506 - break; 507 - case 256: 508 - hw_sa->ipsec_sa_v1.enc_mode = 509 - MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_256_AUTH_128; 510 - break; 511 - } 512 - 513 - /* flags */ 514 - hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_SA_VALID | 515 - MLX5_FPGA_IPSEC_SA_SPI_EN | 516 - MLX5_FPGA_IPSEC_SA_IP_ESP; 517 - 518 - if (xfrm_attrs->action & MLX5_ACCEL_ESP_ACTION_ENCRYPT) 519 - hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_DIR_SX; 520 - else 521 - hw_sa->ipsec_sa_v1.flags &= ~MLX5_FPGA_IPSEC_SA_DIR_SX; 522 - } 523 - 524 - static void 525 - mlx5_fpga_ipsec_build_hw_sa(struct mlx5_core_dev *mdev, 526 - struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs, 527 - const __be32 saddr[4], 528 - const __be32 daddr[4], 529 - const __be32 spi, bool is_ipv6, 530 - struct mlx5_ifc_fpga_ipsec_sa *hw_sa) 531 - { 532 - mlx5_fpga_ipsec_build_hw_xfrm(mdev, xfrm_attrs, hw_sa); 533 - 534 - /* IPs */ 535 - memcpy(hw_sa->ipsec_sa_v1.sip, saddr, sizeof(hw_sa->ipsec_sa_v1.sip)); 536 - memcpy(hw_sa->ipsec_sa_v1.dip, daddr, sizeof(hw_sa->ipsec_sa_v1.dip)); 537 - 538 - /* SPI */ 539 - hw_sa->ipsec_sa_v1.spi = spi; 540 - 541 - /* flags */ 542 - if (is_ipv6) 543 - hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_IPV6; 544 - } 545 - 546 - static bool is_full_mask(const void *p, size_t len) 547 - { 548 - WARN_ON(len % 4); 549 - 550 - return !memchr_inv(p, 0xff, len); 551 - } 552 - 553 - static bool validate_fpga_full_mask(struct mlx5_core_dev *dev, 554 - const u32 *match_c, 555 - const u32 *match_v) 556 - { 557 - const void *misc_params_c = MLX5_ADDR_OF(fte_match_param, 558 - match_c, 559 - misc_parameters); 560 - const void *headers_c = MLX5_ADDR_OF(fte_match_param, 561 - match_c, 562 - outer_headers); 563 - const void *headers_v = MLX5_ADDR_OF(fte_match_param, 564 - match_v, 565 - outer_headers); 566 - 567 - if (mlx5_fs_is_outer_ipv4_flow(dev, headers_c, headers_v)) { 568 - const void *s_ipv4_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4, 569 - headers_c, 570 - src_ipv4_src_ipv6.ipv4_layout.ipv4); 571 - const void *d_ipv4_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4, 572 - headers_c, 573 - dst_ipv4_dst_ipv6.ipv4_layout.ipv4); 574 - 575 - if (!is_full_mask(s_ipv4_c, MLX5_FLD_SZ_BYTES(ipv4_layout, 576 - ipv4)) || 577 - !is_full_mask(d_ipv4_c, MLX5_FLD_SZ_BYTES(ipv4_layout, 578 - ipv4))) 579 - return false; 580 - } else { 581 - const void *s_ipv6_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4, 582 - headers_c, 583 - src_ipv4_src_ipv6.ipv6_layout.ipv6); 584 - const void *d_ipv6_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4, 585 - headers_c, 586 - dst_ipv4_dst_ipv6.ipv6_layout.ipv6); 587 - 588 - if (!is_full_mask(s_ipv6_c, MLX5_FLD_SZ_BYTES(ipv6_layout, 589 - ipv6)) || 590 - !is_full_mask(d_ipv6_c, MLX5_FLD_SZ_BYTES(ipv6_layout, 591 - ipv6))) 592 - return false; 593 - } 594 - 595 - if (!is_full_mask(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c, 596 - outer_esp_spi), 597 - MLX5_FLD_SZ_BYTES(fte_match_set_misc, outer_esp_spi))) 598 - return false; 599 - 600 - return true; 601 - } 602 - 603 - static bool mlx5_is_fpga_ipsec_rule(struct mlx5_core_dev *dev, 604 - u8 match_criteria_enable, 605 - const u32 *match_c, 606 - const u32 *match_v) 607 - { 608 - u32 ipsec_dev_caps = mlx5_fpga_ipsec_device_caps(dev); 609 - bool ipv6_flow; 610 - 611 - ipv6_flow = mlx5_fs_is_outer_ipv6_flow(dev, match_c, match_v); 612 - 613 - if (!(match_criteria_enable & MLX5_MATCH_OUTER_HEADERS) || 614 - mlx5_fs_is_outer_udp_flow(match_c, match_v) || 615 - mlx5_fs_is_outer_tcp_flow(match_c, match_v) || 616 - mlx5_fs_is_vxlan_flow(match_c) || 617 - !(mlx5_fs_is_outer_ipv4_flow(dev, match_c, match_v) || 618 - ipv6_flow)) 619 - return false; 620 - 621 - if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_DEVICE)) 622 - return false; 623 - 624 - if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_ESP) && 625 - mlx5_fs_is_outer_ipsec_flow(match_c)) 626 - return false; 627 - 628 - if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_IPV6) && 629 - ipv6_flow) 630 - return false; 631 - 632 - if (!validate_fpga_full_mask(dev, match_c, match_v)) 633 - return false; 634 - 635 - return true; 636 - } 637 - 638 - static bool mlx5_is_fpga_egress_ipsec_rule(struct mlx5_core_dev *dev, 639 - u8 match_criteria_enable, 640 - const u32 *match_c, 641 - const u32 *match_v, 642 - struct mlx5_flow_act *flow_act, 643 - struct mlx5_flow_context *flow_context) 644 - { 645 - const void *outer_c = MLX5_ADDR_OF(fte_match_param, match_c, 646 - outer_headers); 647 - bool is_dmac = MLX5_GET(fte_match_set_lyr_2_4, outer_c, dmac_47_16) || 648 - MLX5_GET(fte_match_set_lyr_2_4, outer_c, dmac_15_0); 649 - bool is_smac = MLX5_GET(fte_match_set_lyr_2_4, outer_c, smac_47_16) || 650 - MLX5_GET(fte_match_set_lyr_2_4, outer_c, smac_15_0); 651 - int ret; 652 - 653 - ret = mlx5_is_fpga_ipsec_rule(dev, match_criteria_enable, match_c, 654 - match_v); 655 - if (!ret) 656 - return ret; 657 - 658 - if (is_dmac || is_smac || 659 - (match_criteria_enable & 660 - ~(MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS)) || 661 - (flow_act->action & ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | MLX5_FLOW_CONTEXT_ACTION_ALLOW)) || 662 - (flow_context->flags & FLOW_CONTEXT_HAS_TAG)) 663 - return false; 664 - 665 - return true; 666 - } 667 - 668 - static void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev, 669 - struct mlx5_accel_esp_xfrm *accel_xfrm, 670 - const __be32 saddr[4], const __be32 daddr[4], 671 - const __be32 spi, bool is_ipv6, u32 *sa_handle) 672 - { 673 - struct mlx5_fpga_ipsec_sa_ctx *sa_ctx; 674 - struct mlx5_fpga_esp_xfrm *fpga_xfrm = 675 - container_of(accel_xfrm, typeof(*fpga_xfrm), 676 - accel_xfrm); 677 - struct mlx5_fpga_device *fdev = mdev->fpga; 678 - struct mlx5_fpga_ipsec *fipsec = fdev->ipsec; 679 - int opcode, err; 680 - void *context; 681 - 682 - /* alloc SA */ 683 - sa_ctx = kzalloc(sizeof(*sa_ctx), GFP_KERNEL); 684 - if (!sa_ctx) 685 - return ERR_PTR(-ENOMEM); 686 - 687 - sa_ctx->dev = mdev; 688 - 689 - /* build candidate SA */ 690 - mlx5_fpga_ipsec_build_hw_sa(mdev, &accel_xfrm->attrs, 691 - saddr, daddr, spi, is_ipv6, 692 - &sa_ctx->hw_sa); 693 - 694 - mutex_lock(&fpga_xfrm->lock); 695 - 696 - if (fpga_xfrm->sa_ctx) { /* multiple rules for same accel_xfrm */ 697 - /* all rules must be with same IPs and SPI */ 698 - if (memcmp(&sa_ctx->hw_sa, &fpga_xfrm->sa_ctx->hw_sa, 699 - sizeof(sa_ctx->hw_sa))) { 700 - context = ERR_PTR(-EINVAL); 701 - goto exists; 702 - } 703 - 704 - ++fpga_xfrm->num_rules; 705 - context = fpga_xfrm->sa_ctx; 706 - goto exists; 707 - } 708 - 709 - if (accel_xfrm->attrs.action == MLX5_ACCEL_ESP_ACTION_DECRYPT) { 710 - err = ida_alloc_min(&fipsec->halloc, 1, GFP_KERNEL); 711 - if (err < 0) { 712 - context = ERR_PTR(err); 713 - goto exists; 714 - } 715 - 716 - sa_ctx->sa_handle = err; 717 - if (sa_handle) 718 - *sa_handle = sa_ctx->sa_handle; 719 - } 720 - /* This is unbounded fpga_xfrm, try to add to hash */ 721 - mutex_lock(&fipsec->sa_hash_lock); 722 - 723 - err = rhashtable_lookup_insert_fast(&fipsec->sa_hash, &sa_ctx->hash, 724 - rhash_sa); 725 - if (err) { 726 - /* Can't bound different accel_xfrm to already existing sa_ctx. 727 - * This is because we can't support multiple ketmats for 728 - * same IPs and SPI 729 - */ 730 - context = ERR_PTR(-EEXIST); 731 - goto unlock_hash; 732 - } 733 - 734 - /* Bound accel_xfrm to sa_ctx */ 735 - opcode = is_v2_sadb_supported(fdev->ipsec) ? 736 - MLX5_FPGA_IPSEC_CMD_OP_ADD_SA_V2 : 737 - MLX5_FPGA_IPSEC_CMD_OP_ADD_SA; 738 - err = mlx5_fpga_ipsec_update_hw_sa(fdev, &sa_ctx->hw_sa, opcode); 739 - sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0; 740 - if (err) { 741 - context = ERR_PTR(err); 742 - goto delete_hash; 743 - } 744 - 745 - mutex_unlock(&fipsec->sa_hash_lock); 746 - 747 - ++fpga_xfrm->num_rules; 748 - fpga_xfrm->sa_ctx = sa_ctx; 749 - sa_ctx->fpga_xfrm = fpga_xfrm; 750 - 751 - mutex_unlock(&fpga_xfrm->lock); 752 - 753 - return sa_ctx; 754 - 755 - delete_hash: 756 - WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash, 757 - rhash_sa)); 758 - unlock_hash: 759 - mutex_unlock(&fipsec->sa_hash_lock); 760 - if (accel_xfrm->attrs.action == MLX5_ACCEL_ESP_ACTION_DECRYPT) 761 - ida_free(&fipsec->halloc, sa_ctx->sa_handle); 762 - exists: 763 - mutex_unlock(&fpga_xfrm->lock); 764 - kfree(sa_ctx); 765 - return context; 766 - } 767 - 768 - static void * 769 - mlx5_fpga_ipsec_fs_create_sa_ctx(struct mlx5_core_dev *mdev, 770 - struct fs_fte *fte, 771 - bool is_egress) 772 - { 773 - struct mlx5_accel_esp_xfrm *accel_xfrm; 774 - __be32 saddr[4], daddr[4], spi; 775 - struct mlx5_flow_group *fg; 776 - bool is_ipv6 = false; 777 - 778 - fs_get_obj(fg, fte->node.parent); 779 - /* validate */ 780 - if (is_egress && 781 - !mlx5_is_fpga_egress_ipsec_rule(mdev, 782 - fg->mask.match_criteria_enable, 783 - fg->mask.match_criteria, 784 - fte->val, 785 - &fte->action, 786 - &fte->flow_context)) 787 - return ERR_PTR(-EINVAL); 788 - else if (!mlx5_is_fpga_ipsec_rule(mdev, 789 - fg->mask.match_criteria_enable, 790 - fg->mask.match_criteria, 791 - fte->val)) 792 - return ERR_PTR(-EINVAL); 793 - 794 - /* get xfrm context */ 795 - accel_xfrm = 796 - (struct mlx5_accel_esp_xfrm *)fte->action.esp_id; 797 - 798 - /* IPs */ 799 - if (mlx5_fs_is_outer_ipv4_flow(mdev, fg->mask.match_criteria, 800 - fte->val)) { 801 - memcpy(&saddr[3], 802 - MLX5_ADDR_OF(fte_match_set_lyr_2_4, 803 - fte->val, 804 - src_ipv4_src_ipv6.ipv4_layout.ipv4), 805 - sizeof(saddr[3])); 806 - memcpy(&daddr[3], 807 - MLX5_ADDR_OF(fte_match_set_lyr_2_4, 808 - fte->val, 809 - dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 810 - sizeof(daddr[3])); 811 - } else { 812 - memcpy(saddr, 813 - MLX5_ADDR_OF(fte_match_param, 814 - fte->val, 815 - outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), 816 - sizeof(saddr)); 817 - memcpy(daddr, 818 - MLX5_ADDR_OF(fte_match_param, 819 - fte->val, 820 - outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 821 - sizeof(daddr)); 822 - is_ipv6 = true; 823 - } 824 - 825 - /* SPI */ 826 - spi = MLX5_GET_BE(typeof(spi), 827 - fte_match_param, fte->val, 828 - misc_parameters.outer_esp_spi); 829 - 830 - /* create */ 831 - return mlx5_fpga_ipsec_create_sa_ctx(mdev, accel_xfrm, 832 - saddr, daddr, 833 - spi, is_ipv6, NULL); 834 - } 835 - 836 - static void 837 - mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx *sa_ctx) 838 - { 839 - struct mlx5_fpga_device *fdev = sa_ctx->dev->fpga; 840 - struct mlx5_fpga_ipsec *fipsec = fdev->ipsec; 841 - int opcode = is_v2_sadb_supported(fdev->ipsec) ? 842 - MLX5_FPGA_IPSEC_CMD_OP_DEL_SA_V2 : 843 - MLX5_FPGA_IPSEC_CMD_OP_DEL_SA; 844 - int err; 845 - 846 - err = mlx5_fpga_ipsec_update_hw_sa(fdev, &sa_ctx->hw_sa, opcode); 847 - sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0; 848 - if (err) { 849 - WARN_ON(err); 850 - return; 851 - } 852 - 853 - if (sa_ctx->fpga_xfrm->accel_xfrm.attrs.action == 854 - MLX5_ACCEL_ESP_ACTION_DECRYPT) 855 - ida_free(&fipsec->halloc, sa_ctx->sa_handle); 856 - 857 - mutex_lock(&fipsec->sa_hash_lock); 858 - WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash, 859 - rhash_sa)); 860 - mutex_unlock(&fipsec->sa_hash_lock); 861 - } 862 - 863 - static void mlx5_fpga_ipsec_delete_sa_ctx(void *context) 864 - { 865 - struct mlx5_fpga_esp_xfrm *fpga_xfrm = 866 - ((struct mlx5_fpga_ipsec_sa_ctx *)context)->fpga_xfrm; 867 - 868 - mutex_lock(&fpga_xfrm->lock); 869 - if (!--fpga_xfrm->num_rules) { 870 - mlx5_fpga_ipsec_release_sa_ctx(fpga_xfrm->sa_ctx); 871 - kfree(fpga_xfrm->sa_ctx); 872 - fpga_xfrm->sa_ctx = NULL; 873 - } 874 - mutex_unlock(&fpga_xfrm->lock); 875 - } 876 - 877 - static inline struct mlx5_fpga_ipsec_rule * 878 - _rule_search(struct rb_root *root, struct fs_fte *fte) 879 - { 880 - struct rb_node *node = root->rb_node; 881 - 882 - while (node) { 883 - struct mlx5_fpga_ipsec_rule *rule = 884 - container_of(node, struct mlx5_fpga_ipsec_rule, 885 - node); 886 - 887 - if (rule->fte < fte) 888 - node = node->rb_left; 889 - else if (rule->fte > fte) 890 - node = node->rb_right; 891 - else 892 - return rule; 893 - } 894 - return NULL; 895 - } 896 - 897 - static struct mlx5_fpga_ipsec_rule * 898 - rule_search(struct mlx5_fpga_ipsec *ipsec_dev, struct fs_fte *fte) 899 - { 900 - struct mlx5_fpga_ipsec_rule *rule; 901 - 902 - mutex_lock(&ipsec_dev->rules_rb_lock); 903 - rule = _rule_search(&ipsec_dev->rules_rb, fte); 904 - mutex_unlock(&ipsec_dev->rules_rb_lock); 905 - 906 - return rule; 907 - } 908 - 909 - static inline int _rule_insert(struct rb_root *root, 910 - struct mlx5_fpga_ipsec_rule *rule) 911 - { 912 - struct rb_node **new = &root->rb_node, *parent = NULL; 913 - 914 - /* Figure out where to put new node */ 915 - while (*new) { 916 - struct mlx5_fpga_ipsec_rule *this = 917 - container_of(*new, struct mlx5_fpga_ipsec_rule, 918 - node); 919 - 920 - parent = *new; 921 - if (rule->fte < this->fte) 922 - new = &((*new)->rb_left); 923 - else if (rule->fte > this->fte) 924 - new = &((*new)->rb_right); 925 - else 926 - return -EEXIST; 927 - } 928 - 929 - /* Add new node and rebalance tree. */ 930 - rb_link_node(&rule->node, parent, new); 931 - rb_insert_color(&rule->node, root); 932 - 933 - return 0; 934 - } 935 - 936 - static int rule_insert(struct mlx5_fpga_ipsec *ipsec_dev, 937 - struct mlx5_fpga_ipsec_rule *rule) 938 - { 939 - int ret; 940 - 941 - mutex_lock(&ipsec_dev->rules_rb_lock); 942 - ret = _rule_insert(&ipsec_dev->rules_rb, rule); 943 - mutex_unlock(&ipsec_dev->rules_rb_lock); 944 - 945 - return ret; 946 - } 947 - 948 - static inline void _rule_delete(struct mlx5_fpga_ipsec *ipsec_dev, 949 - struct mlx5_fpga_ipsec_rule *rule) 950 - { 951 - struct rb_root *root = &ipsec_dev->rules_rb; 952 - 953 - mutex_lock(&ipsec_dev->rules_rb_lock); 954 - rb_erase(&rule->node, root); 955 - mutex_unlock(&ipsec_dev->rules_rb_lock); 956 - } 957 - 958 - static void rule_delete(struct mlx5_fpga_ipsec *ipsec_dev, 959 - struct mlx5_fpga_ipsec_rule *rule) 960 - { 961 - _rule_delete(ipsec_dev, rule); 962 - kfree(rule); 963 - } 964 - 965 - struct mailbox_mod { 966 - uintptr_t saved_esp_id; 967 - u32 saved_action; 968 - u32 saved_outer_esp_spi_value; 969 - }; 970 - 971 - static void restore_spec_mailbox(struct fs_fte *fte, 972 - struct mailbox_mod *mbox_mod) 973 - { 974 - char *misc_params_v = MLX5_ADDR_OF(fte_match_param, 975 - fte->val, 976 - misc_parameters); 977 - 978 - MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi, 979 - mbox_mod->saved_outer_esp_spi_value); 980 - fte->action.action |= mbox_mod->saved_action; 981 - fte->action.esp_id = (uintptr_t)mbox_mod->saved_esp_id; 982 - } 983 - 984 - static void modify_spec_mailbox(struct mlx5_core_dev *mdev, 985 - struct fs_fte *fte, 986 - struct mailbox_mod *mbox_mod) 987 - { 988 - char *misc_params_v = MLX5_ADDR_OF(fte_match_param, 989 - fte->val, 990 - misc_parameters); 991 - 992 - mbox_mod->saved_esp_id = fte->action.esp_id; 993 - mbox_mod->saved_action = fte->action.action & 994 - (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | 995 - MLX5_FLOW_CONTEXT_ACTION_DECRYPT); 996 - mbox_mod->saved_outer_esp_spi_value = 997 - MLX5_GET(fte_match_set_misc, misc_params_v, 998 - outer_esp_spi); 999 - 1000 - fte->action.esp_id = 0; 1001 - fte->action.action &= ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | 1002 - MLX5_FLOW_CONTEXT_ACTION_DECRYPT); 1003 - if (!MLX5_CAP_FLOWTABLE(mdev, 1004 - flow_table_properties_nic_receive.ft_field_support.outer_esp_spi)) 1005 - MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi, 0); 1006 - } 1007 - 1008 - static enum fs_flow_table_type egress_to_fs_ft(bool egress) 1009 - { 1010 - return egress ? FS_FT_NIC_TX : FS_FT_NIC_RX; 1011 - } 1012 - 1013 - static int fpga_ipsec_fs_create_flow_group(struct mlx5_flow_root_namespace *ns, 1014 - struct mlx5_flow_table *ft, 1015 - u32 *in, 1016 - struct mlx5_flow_group *fg, 1017 - bool is_egress) 1018 - { 1019 - int (*create_flow_group)(struct mlx5_flow_root_namespace *ns, 1020 - struct mlx5_flow_table *ft, u32 *in, 1021 - struct mlx5_flow_group *fg) = 1022 - mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_flow_group; 1023 - char *misc_params_c = MLX5_ADDR_OF(create_flow_group_in, in, 1024 - match_criteria.misc_parameters); 1025 - struct mlx5_core_dev *dev = ns->dev; 1026 - u32 saved_outer_esp_spi_mask; 1027 - u8 match_criteria_enable; 1028 - int ret; 1029 - 1030 - if (MLX5_CAP_FLOWTABLE(dev, 1031 - flow_table_properties_nic_receive.ft_field_support.outer_esp_spi)) 1032 - return create_flow_group(ns, ft, in, fg); 1033 - 1034 - match_criteria_enable = 1035 - MLX5_GET(create_flow_group_in, in, match_criteria_enable); 1036 - saved_outer_esp_spi_mask = 1037 - MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi); 1038 - if (!match_criteria_enable || !saved_outer_esp_spi_mask) 1039 - return create_flow_group(ns, ft, in, fg); 1040 - 1041 - MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, 0); 1042 - 1043 - if (!(*misc_params_c) && 1044 - !memcmp(misc_params_c, misc_params_c + 1, MLX5_ST_SZ_BYTES(fte_match_set_misc) - 1)) 1045 - MLX5_SET(create_flow_group_in, in, match_criteria_enable, 1046 - match_criteria_enable & ~MLX5_MATCH_MISC_PARAMETERS); 1047 - 1048 - ret = create_flow_group(ns, ft, in, fg); 1049 - 1050 - MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, saved_outer_esp_spi_mask); 1051 - MLX5_SET(create_flow_group_in, in, match_criteria_enable, match_criteria_enable); 1052 - 1053 - return ret; 1054 - } 1055 - 1056 - static int fpga_ipsec_fs_create_fte(struct mlx5_flow_root_namespace *ns, 1057 - struct mlx5_flow_table *ft, 1058 - struct mlx5_flow_group *fg, 1059 - struct fs_fte *fte, 1060 - bool is_egress) 1061 - { 1062 - int (*create_fte)(struct mlx5_flow_root_namespace *ns, 1063 - struct mlx5_flow_table *ft, 1064 - struct mlx5_flow_group *fg, 1065 - struct fs_fte *fte) = 1066 - mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_fte; 1067 - struct mlx5_core_dev *dev = ns->dev; 1068 - struct mlx5_fpga_device *fdev = dev->fpga; 1069 - struct mlx5_fpga_ipsec *fipsec = fdev->ipsec; 1070 - struct mlx5_fpga_ipsec_rule *rule; 1071 - bool is_esp = fte->action.esp_id; 1072 - struct mailbox_mod mbox_mod; 1073 - int ret; 1074 - 1075 - if (!is_esp || 1076 - !(fte->action.action & 1077 - (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | 1078 - MLX5_FLOW_CONTEXT_ACTION_DECRYPT))) 1079 - return create_fte(ns, ft, fg, fte); 1080 - 1081 - rule = kzalloc(sizeof(*rule), GFP_KERNEL); 1082 - if (!rule) 1083 - return -ENOMEM; 1084 - 1085 - rule->ctx = mlx5_fpga_ipsec_fs_create_sa_ctx(dev, fte, is_egress); 1086 - if (IS_ERR(rule->ctx)) { 1087 - int err = PTR_ERR(rule->ctx); 1088 - 1089 - kfree(rule); 1090 - return err; 1091 - } 1092 - 1093 - rule->fte = fte; 1094 - WARN_ON(rule_insert(fipsec, rule)); 1095 - 1096 - modify_spec_mailbox(dev, fte, &mbox_mod); 1097 - ret = create_fte(ns, ft, fg, fte); 1098 - restore_spec_mailbox(fte, &mbox_mod); 1099 - if (ret) { 1100 - _rule_delete(fipsec, rule); 1101 - mlx5_fpga_ipsec_delete_sa_ctx(rule->ctx); 1102 - kfree(rule); 1103 - } 1104 - 1105 - return ret; 1106 - } 1107 - 1108 - static int fpga_ipsec_fs_update_fte(struct mlx5_flow_root_namespace *ns, 1109 - struct mlx5_flow_table *ft, 1110 - struct mlx5_flow_group *fg, 1111 - int modify_mask, 1112 - struct fs_fte *fte, 1113 - bool is_egress) 1114 - { 1115 - int (*update_fte)(struct mlx5_flow_root_namespace *ns, 1116 - struct mlx5_flow_table *ft, 1117 - struct mlx5_flow_group *fg, 1118 - int modify_mask, 1119 - struct fs_fte *fte) = 1120 - mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->update_fte; 1121 - struct mlx5_core_dev *dev = ns->dev; 1122 - bool is_esp = fte->action.esp_id; 1123 - struct mailbox_mod mbox_mod; 1124 - int ret; 1125 - 1126 - if (!is_esp || 1127 - !(fte->action.action & 1128 - (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | 1129 - MLX5_FLOW_CONTEXT_ACTION_DECRYPT))) 1130 - return update_fte(ns, ft, fg, modify_mask, fte); 1131 - 1132 - modify_spec_mailbox(dev, fte, &mbox_mod); 1133 - ret = update_fte(ns, ft, fg, modify_mask, fte); 1134 - restore_spec_mailbox(fte, &mbox_mod); 1135 - 1136 - return ret; 1137 - } 1138 - 1139 - static int fpga_ipsec_fs_delete_fte(struct mlx5_flow_root_namespace *ns, 1140 - struct mlx5_flow_table *ft, 1141 - struct fs_fte *fte, 1142 - bool is_egress) 1143 - { 1144 - int (*delete_fte)(struct mlx5_flow_root_namespace *ns, 1145 - struct mlx5_flow_table *ft, 1146 - struct fs_fte *fte) = 1147 - mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->delete_fte; 1148 - struct mlx5_core_dev *dev = ns->dev; 1149 - struct mlx5_fpga_device *fdev = dev->fpga; 1150 - struct mlx5_fpga_ipsec *fipsec = fdev->ipsec; 1151 - struct mlx5_fpga_ipsec_rule *rule; 1152 - bool is_esp = fte->action.esp_id; 1153 - struct mailbox_mod mbox_mod; 1154 - int ret; 1155 - 1156 - if (!is_esp || 1157 - !(fte->action.action & 1158 - (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | 1159 - MLX5_FLOW_CONTEXT_ACTION_DECRYPT))) 1160 - return delete_fte(ns, ft, fte); 1161 - 1162 - rule = rule_search(fipsec, fte); 1163 - if (!rule) 1164 - return -ENOENT; 1165 - 1166 - mlx5_fpga_ipsec_delete_sa_ctx(rule->ctx); 1167 - rule_delete(fipsec, rule); 1168 - 1169 - modify_spec_mailbox(dev, fte, &mbox_mod); 1170 - ret = delete_fte(ns, ft, fte); 1171 - restore_spec_mailbox(fte, &mbox_mod); 1172 - 1173 - return ret; 1174 - } 1175 - 1176 - static int 1177 - mlx5_fpga_ipsec_fs_create_flow_group_egress(struct mlx5_flow_root_namespace *ns, 1178 - struct mlx5_flow_table *ft, 1179 - u32 *in, 1180 - struct mlx5_flow_group *fg) 1181 - { 1182 - return fpga_ipsec_fs_create_flow_group(ns, ft, in, fg, true); 1183 - } 1184 - 1185 - static int 1186 - mlx5_fpga_ipsec_fs_create_fte_egress(struct mlx5_flow_root_namespace *ns, 1187 - struct mlx5_flow_table *ft, 1188 - struct mlx5_flow_group *fg, 1189 - struct fs_fte *fte) 1190 - { 1191 - return fpga_ipsec_fs_create_fte(ns, ft, fg, fte, true); 1192 - } 1193 - 1194 - static int 1195 - mlx5_fpga_ipsec_fs_update_fte_egress(struct mlx5_flow_root_namespace *ns, 1196 - struct mlx5_flow_table *ft, 1197 - struct mlx5_flow_group *fg, 1198 - int modify_mask, 1199 - struct fs_fte *fte) 1200 - { 1201 - return fpga_ipsec_fs_update_fte(ns, ft, fg, modify_mask, fte, 1202 - true); 1203 - } 1204 - 1205 - static int 1206 - mlx5_fpga_ipsec_fs_delete_fte_egress(struct mlx5_flow_root_namespace *ns, 1207 - struct mlx5_flow_table *ft, 1208 - struct fs_fte *fte) 1209 - { 1210 - return fpga_ipsec_fs_delete_fte(ns, ft, fte, true); 1211 - } 1212 - 1213 - static int 1214 - mlx5_fpga_ipsec_fs_create_flow_group_ingress(struct mlx5_flow_root_namespace *ns, 1215 - struct mlx5_flow_table *ft, 1216 - u32 *in, 1217 - struct mlx5_flow_group *fg) 1218 - { 1219 - return fpga_ipsec_fs_create_flow_group(ns, ft, in, fg, false); 1220 - } 1221 - 1222 - static int 1223 - mlx5_fpga_ipsec_fs_create_fte_ingress(struct mlx5_flow_root_namespace *ns, 1224 - struct mlx5_flow_table *ft, 1225 - struct mlx5_flow_group *fg, 1226 - struct fs_fte *fte) 1227 - { 1228 - return fpga_ipsec_fs_create_fte(ns, ft, fg, fte, false); 1229 - } 1230 - 1231 - static int 1232 - mlx5_fpga_ipsec_fs_update_fte_ingress(struct mlx5_flow_root_namespace *ns, 1233 - struct mlx5_flow_table *ft, 1234 - struct mlx5_flow_group *fg, 1235 - int modify_mask, 1236 - struct fs_fte *fte) 1237 - { 1238 - return fpga_ipsec_fs_update_fte(ns, ft, fg, modify_mask, fte, 1239 - false); 1240 - } 1241 - 1242 - static int 1243 - mlx5_fpga_ipsec_fs_delete_fte_ingress(struct mlx5_flow_root_namespace *ns, 1244 - struct mlx5_flow_table *ft, 1245 - struct fs_fte *fte) 1246 - { 1247 - return fpga_ipsec_fs_delete_fte(ns, ft, fte, false); 1248 - } 1249 - 1250 - static struct mlx5_flow_cmds fpga_ipsec_ingress; 1251 - static struct mlx5_flow_cmds fpga_ipsec_egress; 1252 - 1253 - const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type) 1254 - { 1255 - switch (type) { 1256 - case FS_FT_NIC_RX: 1257 - return &fpga_ipsec_ingress; 1258 - case FS_FT_NIC_TX: 1259 - return &fpga_ipsec_egress; 1260 - default: 1261 - WARN_ON(true); 1262 - return NULL; 1263 - } 1264 - } 1265 - 1266 - static int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev) 1267 - { 1268 - struct mlx5_fpga_conn_attr init_attr = {0}; 1269 - struct mlx5_fpga_device *fdev = mdev->fpga; 1270 - struct mlx5_fpga_conn *conn; 1271 - int err; 1272 - 1273 - if (!mlx5_fpga_is_ipsec_device(mdev)) 1274 - return 0; 1275 - 1276 - fdev->ipsec = kzalloc(sizeof(*fdev->ipsec), GFP_KERNEL); 1277 - if (!fdev->ipsec) 1278 - return -ENOMEM; 1279 - 1280 - fdev->ipsec->fdev = fdev; 1281 - 1282 - err = mlx5_fpga_get_sbu_caps(fdev, sizeof(fdev->ipsec->caps), 1283 - fdev->ipsec->caps); 1284 - if (err) { 1285 - mlx5_fpga_err(fdev, "Failed to retrieve IPSec extended capabilities: %d\n", 1286 - err); 1287 - goto error; 1288 - } 1289 - 1290 - INIT_LIST_HEAD(&fdev->ipsec->pending_cmds); 1291 - spin_lock_init(&fdev->ipsec->pending_cmds_lock); 1292 - 1293 - init_attr.rx_size = SBU_QP_QUEUE_SIZE; 1294 - init_attr.tx_size = SBU_QP_QUEUE_SIZE; 1295 - init_attr.recv_cb = mlx5_fpga_ipsec_recv; 1296 - init_attr.cb_arg = fdev; 1297 - conn = mlx5_fpga_sbu_conn_create(fdev, &init_attr); 1298 - if (IS_ERR(conn)) { 1299 - err = PTR_ERR(conn); 1300 - mlx5_fpga_err(fdev, "Error creating IPSec command connection %d\n", 1301 - err); 1302 - goto error; 1303 - } 1304 - fdev->ipsec->conn = conn; 1305 - 1306 - err = rhashtable_init(&fdev->ipsec->sa_hash, &rhash_sa); 1307 - if (err) 1308 - goto err_destroy_conn; 1309 - mutex_init(&fdev->ipsec->sa_hash_lock); 1310 - 1311 - fdev->ipsec->rules_rb = RB_ROOT; 1312 - mutex_init(&fdev->ipsec->rules_rb_lock); 1313 - 1314 - err = mlx5_fpga_ipsec_enable_supported_caps(mdev); 1315 - if (err) { 1316 - mlx5_fpga_err(fdev, "Failed to enable IPSec extended capabilities: %d\n", 1317 - err); 1318 - goto err_destroy_hash; 1319 - } 1320 - 1321 - ida_init(&fdev->ipsec->halloc); 1322 - 1323 - return 0; 1324 - 1325 - err_destroy_hash: 1326 - rhashtable_destroy(&fdev->ipsec->sa_hash); 1327 - 1328 - err_destroy_conn: 1329 - mlx5_fpga_sbu_conn_destroy(conn); 1330 - 1331 - error: 1332 - kfree(fdev->ipsec); 1333 - fdev->ipsec = NULL; 1334 - return err; 1335 - } 1336 - 1337 - static void destroy_rules_rb(struct rb_root *root) 1338 - { 1339 - struct mlx5_fpga_ipsec_rule *r, *tmp; 1340 - 1341 - rbtree_postorder_for_each_entry_safe(r, tmp, root, node) { 1342 - rb_erase(&r->node, root); 1343 - mlx5_fpga_ipsec_delete_sa_ctx(r->ctx); 1344 - kfree(r); 1345 - } 1346 - } 1347 - 1348 - static void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev) 1349 - { 1350 - struct mlx5_fpga_device *fdev = mdev->fpga; 1351 - 1352 - if (!mlx5_fpga_is_ipsec_device(mdev)) 1353 - return; 1354 - 1355 - ida_destroy(&fdev->ipsec->halloc); 1356 - destroy_rules_rb(&fdev->ipsec->rules_rb); 1357 - rhashtable_destroy(&fdev->ipsec->sa_hash); 1358 - 1359 - mlx5_fpga_sbu_conn_destroy(fdev->ipsec->conn); 1360 - kfree(fdev->ipsec); 1361 - fdev->ipsec = NULL; 1362 - } 1363 - 1364 - void mlx5_fpga_ipsec_build_fs_cmds(void) 1365 - { 1366 - /* ingress */ 1367 - fpga_ipsec_ingress.create_flow_table = 1368 - mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->create_flow_table; 1369 - fpga_ipsec_ingress.destroy_flow_table = 1370 - mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->destroy_flow_table; 1371 - fpga_ipsec_ingress.modify_flow_table = 1372 - mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->modify_flow_table; 1373 - fpga_ipsec_ingress.create_flow_group = 1374 - mlx5_fpga_ipsec_fs_create_flow_group_ingress; 1375 - fpga_ipsec_ingress.destroy_flow_group = 1376 - mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->destroy_flow_group; 1377 - fpga_ipsec_ingress.create_fte = 1378 - mlx5_fpga_ipsec_fs_create_fte_ingress; 1379 - fpga_ipsec_ingress.update_fte = 1380 - mlx5_fpga_ipsec_fs_update_fte_ingress; 1381 - fpga_ipsec_ingress.delete_fte = 1382 - mlx5_fpga_ipsec_fs_delete_fte_ingress; 1383 - fpga_ipsec_ingress.update_root_ft = 1384 - mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->update_root_ft; 1385 - 1386 - /* egress */ 1387 - fpga_ipsec_egress.create_flow_table = 1388 - mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->create_flow_table; 1389 - fpga_ipsec_egress.destroy_flow_table = 1390 - mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->destroy_flow_table; 1391 - fpga_ipsec_egress.modify_flow_table = 1392 - mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->modify_flow_table; 1393 - fpga_ipsec_egress.create_flow_group = 1394 - mlx5_fpga_ipsec_fs_create_flow_group_egress; 1395 - fpga_ipsec_egress.destroy_flow_group = 1396 - mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->destroy_flow_group; 1397 - fpga_ipsec_egress.create_fte = 1398 - mlx5_fpga_ipsec_fs_create_fte_egress; 1399 - fpga_ipsec_egress.update_fte = 1400 - mlx5_fpga_ipsec_fs_update_fte_egress; 1401 - fpga_ipsec_egress.delete_fte = 1402 - mlx5_fpga_ipsec_fs_delete_fte_egress; 1403 - fpga_ipsec_egress.update_root_ft = 1404 - mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->update_root_ft; 1405 - } 1406 - 1407 - static int 1408 - mlx5_fpga_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev, 1409 - const struct mlx5_accel_esp_xfrm_attrs *attrs) 1410 - { 1411 - if (attrs->tfc_pad) { 1412 - mlx5_core_err(mdev, "Cannot offload xfrm states with tfc padding\n"); 1413 - return -EOPNOTSUPP; 1414 - } 1415 - 1416 - if (attrs->replay_type != MLX5_ACCEL_ESP_REPLAY_NONE) { 1417 - mlx5_core_err(mdev, "Cannot offload xfrm states with anti replay\n"); 1418 - return -EOPNOTSUPP; 1419 - } 1420 - 1421 - if (attrs->keymat_type != MLX5_ACCEL_ESP_KEYMAT_AES_GCM) { 1422 - mlx5_core_err(mdev, "Only aes gcm keymat is supported\n"); 1423 - return -EOPNOTSUPP; 1424 - } 1425 - 1426 - if (attrs->keymat.aes_gcm.iv_algo != 1427 - MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ) { 1428 - mlx5_core_err(mdev, "Only iv sequence algo is supported\n"); 1429 - return -EOPNOTSUPP; 1430 - } 1431 - 1432 - if (attrs->keymat.aes_gcm.icv_len != 128) { 1433 - mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD ICV length other than 128bit\n"); 1434 - return -EOPNOTSUPP; 1435 - } 1436 - 1437 - if (attrs->keymat.aes_gcm.key_len != 128 && 1438 - attrs->keymat.aes_gcm.key_len != 256) { 1439 - mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n"); 1440 - return -EOPNOTSUPP; 1441 - } 1442 - 1443 - if ((attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) && 1444 - (!MLX5_GET(ipsec_extended_cap, mdev->fpga->ipsec->caps, 1445 - v2_command))) { 1446 - mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n"); 1447 - return -EOPNOTSUPP; 1448 - } 1449 - 1450 - return 0; 1451 - } 1452 - 1453 - static struct mlx5_accel_esp_xfrm * 1454 - mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev, 1455 - const struct mlx5_accel_esp_xfrm_attrs *attrs, 1456 - u32 flags) 1457 - { 1458 - struct mlx5_fpga_esp_xfrm *fpga_xfrm; 1459 - 1460 - if (!(flags & MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA)) { 1461 - mlx5_core_warn(mdev, "Tried to create an esp action without metadata\n"); 1462 - return ERR_PTR(-EINVAL); 1463 - } 1464 - 1465 - if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) { 1466 - mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n"); 1467 - return ERR_PTR(-EOPNOTSUPP); 1468 - } 1469 - 1470 - fpga_xfrm = kzalloc(sizeof(*fpga_xfrm), GFP_KERNEL); 1471 - if (!fpga_xfrm) 1472 - return ERR_PTR(-ENOMEM); 1473 - 1474 - mutex_init(&fpga_xfrm->lock); 1475 - memcpy(&fpga_xfrm->accel_xfrm.attrs, attrs, 1476 - sizeof(fpga_xfrm->accel_xfrm.attrs)); 1477 - 1478 - return &fpga_xfrm->accel_xfrm; 1479 - } 1480 - 1481 - static void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) 1482 - { 1483 - struct mlx5_fpga_esp_xfrm *fpga_xfrm = 1484 - container_of(xfrm, struct mlx5_fpga_esp_xfrm, 1485 - accel_xfrm); 1486 - /* assuming no sa_ctx are connected to this xfrm_ctx */ 1487 - kfree(fpga_xfrm); 1488 - } 1489 - 1490 - static int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, 1491 - const struct mlx5_accel_esp_xfrm_attrs *attrs) 1492 - { 1493 - struct mlx5_core_dev *mdev = xfrm->mdev; 1494 - struct mlx5_fpga_device *fdev = mdev->fpga; 1495 - struct mlx5_fpga_ipsec *fipsec = fdev->ipsec; 1496 - struct mlx5_fpga_esp_xfrm *fpga_xfrm; 1497 - struct mlx5_ifc_fpga_ipsec_sa org_hw_sa; 1498 - 1499 - int err = 0; 1500 - 1501 - if (!memcmp(&xfrm->attrs, attrs, sizeof(xfrm->attrs))) 1502 - return 0; 1503 - 1504 - if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) { 1505 - mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n"); 1506 - return -EOPNOTSUPP; 1507 - } 1508 - 1509 - if (is_v2_sadb_supported(fipsec)) { 1510 - mlx5_core_warn(mdev, "Modify esp is not supported\n"); 1511 - return -EOPNOTSUPP; 1512 - } 1513 - 1514 - fpga_xfrm = container_of(xfrm, struct mlx5_fpga_esp_xfrm, accel_xfrm); 1515 - 1516 - mutex_lock(&fpga_xfrm->lock); 1517 - 1518 - if (!fpga_xfrm->sa_ctx) 1519 - /* Unbounded xfrm, change only sw attrs */ 1520 - goto change_sw_xfrm_attrs; 1521 - 1522 - /* copy original hw sa */ 1523 - memcpy(&org_hw_sa, &fpga_xfrm->sa_ctx->hw_sa, sizeof(org_hw_sa)); 1524 - mutex_lock(&fipsec->sa_hash_lock); 1525 - /* remove original hw sa from hash */ 1526 - WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, 1527 - &fpga_xfrm->sa_ctx->hash, rhash_sa)); 1528 - /* update hw_sa with new xfrm attrs*/ 1529 - mlx5_fpga_ipsec_build_hw_xfrm(xfrm->mdev, attrs, 1530 - &fpga_xfrm->sa_ctx->hw_sa); 1531 - /* try to insert new hw_sa to hash */ 1532 - err = rhashtable_insert_fast(&fipsec->sa_hash, 1533 - &fpga_xfrm->sa_ctx->hash, rhash_sa); 1534 - if (err) 1535 - goto rollback_sa; 1536 - 1537 - /* modify device with new hw_sa */ 1538 - err = mlx5_fpga_ipsec_update_hw_sa(fdev, &fpga_xfrm->sa_ctx->hw_sa, 1539 - MLX5_FPGA_IPSEC_CMD_OP_MOD_SA_V2); 1540 - fpga_xfrm->sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0; 1541 - if (err) 1542 - WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, 1543 - &fpga_xfrm->sa_ctx->hash, 1544 - rhash_sa)); 1545 - rollback_sa: 1546 - if (err) { 1547 - /* return original hw_sa to hash */ 1548 - memcpy(&fpga_xfrm->sa_ctx->hw_sa, &org_hw_sa, 1549 - sizeof(org_hw_sa)); 1550 - WARN_ON(rhashtable_insert_fast(&fipsec->sa_hash, 1551 - &fpga_xfrm->sa_ctx->hash, 1552 - rhash_sa)); 1553 - } 1554 - mutex_unlock(&fipsec->sa_hash_lock); 1555 - 1556 - change_sw_xfrm_attrs: 1557 - if (!err) 1558 - memcpy(&xfrm->attrs, attrs, sizeof(xfrm->attrs)); 1559 - mutex_unlock(&fpga_xfrm->lock); 1560 - return err; 1561 - } 1562 - 1563 - static const struct mlx5_accel_ipsec_ops fpga_ipsec_ops = { 1564 - .device_caps = mlx5_fpga_ipsec_device_caps, 1565 - .counters_count = mlx5_fpga_ipsec_counters_count, 1566 - .counters_read = mlx5_fpga_ipsec_counters_read, 1567 - .create_hw_context = mlx5_fpga_ipsec_create_sa_ctx, 1568 - .free_hw_context = mlx5_fpga_ipsec_delete_sa_ctx, 1569 - .init = mlx5_fpga_ipsec_init, 1570 - .cleanup = mlx5_fpga_ipsec_cleanup, 1571 - .esp_create_xfrm = mlx5_fpga_esp_create_xfrm, 1572 - .esp_modify_xfrm = mlx5_fpga_esp_modify_xfrm, 1573 - .esp_destroy_xfrm = mlx5_fpga_esp_destroy_xfrm, 1574 - }; 1575 - 1576 - const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev) 1577 - { 1578 - if (!mlx5_fpga_is_ipsec_device(mdev)) 1579 - return NULL; 1580 - 1581 - return &fpga_ipsec_ops; 1582 - }
-62
drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h
··· 1 - /* 2 - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. 3 - * 4 - * This software is available to you under a choice of one of two 5 - * licenses. You may choose to be licensed under the terms of the GNU 6 - * General Public License (GPL) Version 2, available from the file 7 - * COPYING in the main directory of this source tree, or the 8 - * OpenIB.org BSD license below: 9 - * 10 - * Redistribution and use in source and binary forms, with or 11 - * without modification, are permitted provided that the following 12 - * conditions are met: 13 - * 14 - * - Redistributions of source code must retain the above 15 - * copyright notice, this list of conditions and the following 16 - * disclaimer. 17 - * 18 - * - Redistributions in binary form must reproduce the above 19 - * copyright notice, this list of conditions and the following 20 - * disclaimer in the documentation and/or other materials 21 - * provided with the distribution. 22 - * 23 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 - * SOFTWARE. 31 - * 32 - */ 33 - 34 - #ifndef __MLX5_FPGA_IPSEC_H__ 35 - #define __MLX5_FPGA_IPSEC_H__ 36 - 37 - #include "accel/ipsec.h" 38 - #include "fs_cmd.h" 39 - 40 - #ifdef CONFIG_MLX5_FPGA_IPSEC 41 - const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev); 42 - u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev); 43 - const struct mlx5_flow_cmds * 44 - mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type); 45 - void mlx5_fpga_ipsec_build_fs_cmds(void); 46 - bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev); 47 - #else 48 - static inline 49 - const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev) 50 - { return NULL; } 51 - static inline u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev) { return 0; } 52 - static inline const struct mlx5_flow_cmds * 53 - mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type) 54 - { 55 - return mlx5_fs_cmd_get_default(type); 56 - } 57 - 58 - static inline void mlx5_fpga_ipsec_build_fs_cmds(void) {}; 59 - static inline bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev) { return false; } 60 - 61 - #endif /* CONFIG_MLX5_FPGA_IPSEC */ 62 - #endif /* __MLX5_FPGA_IPSEC_H__ */
-622
drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
··· 1 - /* 2 - * Copyright (c) 2018 Mellanox Technologies. All rights reserved. 3 - * 4 - * This software is available to you under a choice of one of two 5 - * licenses. You may choose to be licensed under the terms of the GNU 6 - * General Public License (GPL) Version 2, available from the file 7 - * COPYING in the main directory of this source tree, or the 8 - * OpenIB.org BSD license below: 9 - * 10 - * Redistribution and use in source and binary forms, with or 11 - * without modification, are permitted provided that the following 12 - * conditions are met: 13 - * 14 - * - Redistributions of source code must retain the above 15 - * copyright notice, this list of conditions and the following 16 - * disclaimer. 17 - * 18 - * - Redistributions in binary form must reproduce the above 19 - * copyright notice, this list of conditions and the following 20 - * disclaimer in the documentation and/or other materials 21 - * provided with the distribution. 22 - * 23 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 - * SOFTWARE. 31 - * 32 - */ 33 - 34 - #include <linux/mlx5/device.h> 35 - #include "fpga/tls.h" 36 - #include "fpga/cmd.h" 37 - #include "fpga/sdk.h" 38 - #include "fpga/core.h" 39 - #include "accel/tls.h" 40 - 41 - struct mlx5_fpga_tls_command_context; 42 - 43 - typedef void (*mlx5_fpga_tls_command_complete) 44 - (struct mlx5_fpga_conn *conn, struct mlx5_fpga_device *fdev, 45 - struct mlx5_fpga_tls_command_context *ctx, 46 - struct mlx5_fpga_dma_buf *resp); 47 - 48 - struct mlx5_fpga_tls_command_context { 49 - struct list_head list; 50 - /* There is no guarantee on the order between the TX completion 51 - * and the command response. 52 - * The TX completion is going to touch cmd->buf even in 53 - * the case of successful transmission. 54 - * So instead of requiring separate allocations for cmd 55 - * and cmd->buf we've decided to use a reference counter 56 - */ 57 - refcount_t ref; 58 - struct mlx5_fpga_dma_buf buf; 59 - mlx5_fpga_tls_command_complete complete; 60 - }; 61 - 62 - static void 63 - mlx5_fpga_tls_put_command_ctx(struct mlx5_fpga_tls_command_context *ctx) 64 - { 65 - if (refcount_dec_and_test(&ctx->ref)) 66 - kfree(ctx); 67 - } 68 - 69 - static void mlx5_fpga_tls_cmd_complete(struct mlx5_fpga_device *fdev, 70 - struct mlx5_fpga_dma_buf *resp) 71 - { 72 - struct mlx5_fpga_conn *conn = fdev->tls->conn; 73 - struct mlx5_fpga_tls_command_context *ctx; 74 - struct mlx5_fpga_tls *tls = fdev->tls; 75 - unsigned long flags; 76 - 77 - spin_lock_irqsave(&tls->pending_cmds_lock, flags); 78 - ctx = list_first_entry(&tls->pending_cmds, 79 - struct mlx5_fpga_tls_command_context, list); 80 - list_del(&ctx->list); 81 - spin_unlock_irqrestore(&tls->pending_cmds_lock, flags); 82 - ctx->complete(conn, fdev, ctx, resp); 83 - } 84 - 85 - static void mlx5_fpga_cmd_send_complete(struct mlx5_fpga_conn *conn, 86 - struct mlx5_fpga_device *fdev, 87 - struct mlx5_fpga_dma_buf *buf, 88 - u8 status) 89 - { 90 - struct mlx5_fpga_tls_command_context *ctx = 91 - container_of(buf, struct mlx5_fpga_tls_command_context, buf); 92 - 93 - mlx5_fpga_tls_put_command_ctx(ctx); 94 - 95 - if (unlikely(status)) 96 - mlx5_fpga_tls_cmd_complete(fdev, NULL); 97 - } 98 - 99 - static void mlx5_fpga_tls_cmd_send(struct mlx5_fpga_device *fdev, 100 - struct mlx5_fpga_tls_command_context *cmd, 101 - mlx5_fpga_tls_command_complete complete) 102 - { 103 - struct mlx5_fpga_tls *tls = fdev->tls; 104 - unsigned long flags; 105 - int ret; 106 - 107 - refcount_set(&cmd->ref, 2); 108 - cmd->complete = complete; 109 - cmd->buf.complete = mlx5_fpga_cmd_send_complete; 110 - 111 - spin_lock_irqsave(&tls->pending_cmds_lock, flags); 112 - /* mlx5_fpga_sbu_conn_sendmsg is called under pending_cmds_lock 113 - * to make sure commands are inserted to the tls->pending_cmds list 114 - * and the command QP in the same order. 115 - */ 116 - ret = mlx5_fpga_sbu_conn_sendmsg(tls->conn, &cmd->buf); 117 - if (likely(!ret)) 118 - list_add_tail(&cmd->list, &tls->pending_cmds); 119 - else 120 - complete(tls->conn, fdev, cmd, NULL); 121 - spin_unlock_irqrestore(&tls->pending_cmds_lock, flags); 122 - } 123 - 124 - /* Start of context identifiers range (inclusive) */ 125 - #define SWID_START 0 126 - /* End of context identifiers range (exclusive) */ 127 - #define SWID_END BIT(24) 128 - 129 - static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock, 130 - void *ptr) 131 - { 132 - unsigned long flags; 133 - int ret; 134 - 135 - /* TLS metadata format is 1 byte for syndrome followed 136 - * by 3 bytes of swid (software ID) 137 - * swid must not exceed 3 bytes. 138 - * See tls_rxtx.c:insert_pet() for details 139 - */ 140 - BUILD_BUG_ON((SWID_END - 1) & 0xFF000000); 141 - 142 - idr_preload(GFP_KERNEL); 143 - spin_lock_irqsave(idr_spinlock, flags); 144 - ret = idr_alloc(idr, ptr, SWID_START, SWID_END, GFP_ATOMIC); 145 - spin_unlock_irqrestore(idr_spinlock, flags); 146 - idr_preload_end(); 147 - 148 - return ret; 149 - } 150 - 151 - static void *mlx5_fpga_tls_release_swid(struct idr *idr, 152 - spinlock_t *idr_spinlock, u32 swid) 153 - { 154 - unsigned long flags; 155 - void *ptr; 156 - 157 - spin_lock_irqsave(idr_spinlock, flags); 158 - ptr = idr_remove(idr, swid); 159 - spin_unlock_irqrestore(idr_spinlock, flags); 160 - return ptr; 161 - } 162 - 163 - static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn, 164 - struct mlx5_fpga_device *fdev, 165 - struct mlx5_fpga_dma_buf *buf, u8 status) 166 - { 167 - kfree(buf); 168 - } 169 - 170 - static void 171 - mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn, 172 - struct mlx5_fpga_device *fdev, 173 - struct mlx5_fpga_tls_command_context *cmd, 174 - struct mlx5_fpga_dma_buf *resp) 175 - { 176 - if (resp) { 177 - u32 syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome); 178 - 179 - if (syndrome) 180 - mlx5_fpga_err(fdev, 181 - "Teardown stream failed with syndrome = %d", 182 - syndrome); 183 - } 184 - mlx5_fpga_tls_put_command_ctx(cmd); 185 - } 186 - 187 - static void mlx5_fpga_tls_flow_to_cmd(void *flow, void *cmd) 188 - { 189 - memcpy(MLX5_ADDR_OF(tls_cmd, cmd, src_port), flow, 190 - MLX5_BYTE_OFF(tls_flow, ipv6)); 191 - 192 - MLX5_SET(tls_cmd, cmd, ipv6, MLX5_GET(tls_flow, flow, ipv6)); 193 - MLX5_SET(tls_cmd, cmd, direction_sx, 194 - MLX5_GET(tls_flow, flow, direction_sx)); 195 - } 196 - 197 - int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle, 198 - u32 seq, __be64 rcd_sn) 199 - { 200 - struct mlx5_fpga_dma_buf *buf; 201 - int size = sizeof(*buf) + MLX5_TLS_COMMAND_SIZE; 202 - void *flow; 203 - void *cmd; 204 - int ret; 205 - 206 - buf = kzalloc(size, GFP_ATOMIC); 207 - if (!buf) 208 - return -ENOMEM; 209 - 210 - cmd = (buf + 1); 211 - 212 - rcu_read_lock(); 213 - flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle)); 214 - if (unlikely(!flow)) { 215 - rcu_read_unlock(); 216 - WARN_ONCE(1, "Received NULL pointer for handle\n"); 217 - kfree(buf); 218 - return -EINVAL; 219 - } 220 - mlx5_fpga_tls_flow_to_cmd(flow, cmd); 221 - rcu_read_unlock(); 222 - 223 - MLX5_SET(tls_cmd, cmd, swid, ntohl(handle)); 224 - MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn)); 225 - MLX5_SET(tls_cmd, cmd, tcp_sn, seq); 226 - MLX5_SET(tls_cmd, cmd, command_type, CMD_RESYNC_RX); 227 - 228 - buf->sg[0].data = cmd; 229 - buf->sg[0].size = MLX5_TLS_COMMAND_SIZE; 230 - buf->complete = mlx_tls_kfree_complete; 231 - 232 - ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf); 233 - if (ret < 0) 234 - kfree(buf); 235 - 236 - return ret; 237 - } 238 - 239 - static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, 240 - void *flow, u32 swid, gfp_t flags) 241 - { 242 - struct mlx5_fpga_tls_command_context *ctx; 243 - struct mlx5_fpga_dma_buf *buf; 244 - void *cmd; 245 - 246 - ctx = kzalloc(sizeof(*ctx) + MLX5_TLS_COMMAND_SIZE, flags); 247 - if (!ctx) 248 - return; 249 - 250 - buf = &ctx->buf; 251 - cmd = (ctx + 1); 252 - MLX5_SET(tls_cmd, cmd, command_type, CMD_TEARDOWN_STREAM); 253 - MLX5_SET(tls_cmd, cmd, swid, swid); 254 - 255 - mlx5_fpga_tls_flow_to_cmd(flow, cmd); 256 - kfree(flow); 257 - 258 - buf->sg[0].data = cmd; 259 - buf->sg[0].size = MLX5_TLS_COMMAND_SIZE; 260 - 261 - mlx5_fpga_tls_cmd_send(mdev->fpga, ctx, 262 - mlx5_fpga_tls_teardown_completion); 263 - } 264 - 265 - void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, 266 - gfp_t flags, bool direction_sx) 267 - { 268 - struct mlx5_fpga_tls *tls = mdev->fpga->tls; 269 - void *flow; 270 - 271 - if (direction_sx) 272 - flow = mlx5_fpga_tls_release_swid(&tls->tx_idr, 273 - &tls->tx_idr_spinlock, 274 - swid); 275 - else 276 - flow = mlx5_fpga_tls_release_swid(&tls->rx_idr, 277 - &tls->rx_idr_spinlock, 278 - swid); 279 - 280 - if (!flow) { 281 - mlx5_fpga_err(mdev->fpga, "No flow information for swid %u\n", 282 - swid); 283 - return; 284 - } 285 - 286 - synchronize_rcu(); /* before kfree(flow) */ 287 - mlx5_fpga_tls_send_teardown_cmd(mdev, flow, swid, flags); 288 - } 289 - 290 - enum mlx5_fpga_setup_stream_status { 291 - MLX5_FPGA_CMD_PENDING, 292 - MLX5_FPGA_CMD_SEND_FAILED, 293 - MLX5_FPGA_CMD_RESPONSE_RECEIVED, 294 - MLX5_FPGA_CMD_ABANDONED, 295 - }; 296 - 297 - struct mlx5_setup_stream_context { 298 - struct mlx5_fpga_tls_command_context cmd; 299 - atomic_t status; 300 - u32 syndrome; 301 - struct completion comp; 302 - }; 303 - 304 - static void 305 - mlx5_fpga_tls_setup_completion(struct mlx5_fpga_conn *conn, 306 - struct mlx5_fpga_device *fdev, 307 - struct mlx5_fpga_tls_command_context *cmd, 308 - struct mlx5_fpga_dma_buf *resp) 309 - { 310 - struct mlx5_setup_stream_context *ctx = 311 - container_of(cmd, struct mlx5_setup_stream_context, cmd); 312 - int status = MLX5_FPGA_CMD_SEND_FAILED; 313 - void *tls_cmd = ctx + 1; 314 - 315 - /* If we failed to send to command resp == NULL */ 316 - if (resp) { 317 - ctx->syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome); 318 - status = MLX5_FPGA_CMD_RESPONSE_RECEIVED; 319 - } 320 - 321 - status = atomic_xchg_release(&ctx->status, status); 322 - if (likely(status != MLX5_FPGA_CMD_ABANDONED)) { 323 - complete(&ctx->comp); 324 - return; 325 - } 326 - 327 - mlx5_fpga_err(fdev, "Command was abandoned, syndrome = %u\n", 328 - ctx->syndrome); 329 - 330 - if (!ctx->syndrome) { 331 - /* The process was killed while waiting for the context to be 332 - * added, and the add completed successfully. 333 - * We need to destroy the HW context, and we can't can't reuse 334 - * the command context because we might not have received 335 - * the tx completion yet. 336 - */ 337 - mlx5_fpga_tls_del_flow(fdev->mdev, 338 - MLX5_GET(tls_cmd, tls_cmd, swid), 339 - GFP_ATOMIC, 340 - MLX5_GET(tls_cmd, tls_cmd, 341 - direction_sx)); 342 - } 343 - 344 - mlx5_fpga_tls_put_command_ctx(cmd); 345 - } 346 - 347 - static int mlx5_fpga_tls_setup_stream_cmd(struct mlx5_core_dev *mdev, 348 - struct mlx5_setup_stream_context *ctx) 349 - { 350 - struct mlx5_fpga_dma_buf *buf; 351 - void *cmd = ctx + 1; 352 - int status, ret = 0; 353 - 354 - buf = &ctx->cmd.buf; 355 - buf->sg[0].data = cmd; 356 - buf->sg[0].size = MLX5_TLS_COMMAND_SIZE; 357 - MLX5_SET(tls_cmd, cmd, command_type, CMD_SETUP_STREAM); 358 - 359 - init_completion(&ctx->comp); 360 - atomic_set(&ctx->status, MLX5_FPGA_CMD_PENDING); 361 - ctx->syndrome = -1; 362 - 363 - mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd, 364 - mlx5_fpga_tls_setup_completion); 365 - wait_for_completion_killable(&ctx->comp); 366 - 367 - status = atomic_xchg_acquire(&ctx->status, MLX5_FPGA_CMD_ABANDONED); 368 - if (unlikely(status == MLX5_FPGA_CMD_PENDING)) 369 - /* ctx is going to be released in mlx5_fpga_tls_setup_completion */ 370 - return -EINTR; 371 - 372 - if (unlikely(ctx->syndrome)) 373 - ret = -ENOMEM; 374 - 375 - mlx5_fpga_tls_put_command_ctx(&ctx->cmd); 376 - return ret; 377 - } 378 - 379 - static void mlx5_fpga_tls_hw_qp_recv_cb(void *cb_arg, 380 - struct mlx5_fpga_dma_buf *buf) 381 - { 382 - struct mlx5_fpga_device *fdev = (struct mlx5_fpga_device *)cb_arg; 383 - 384 - mlx5_fpga_tls_cmd_complete(fdev, buf); 385 - } 386 - 387 - bool mlx5_fpga_is_tls_device(struct mlx5_core_dev *mdev) 388 - { 389 - if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga)) 390 - return false; 391 - 392 - if (MLX5_CAP_FPGA(mdev, ieee_vendor_id) != 393 - MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX) 394 - return false; 395 - 396 - if (MLX5_CAP_FPGA(mdev, sandbox_product_id) != 397 - MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_TLS) 398 - return false; 399 - 400 - if (MLX5_CAP_FPGA(mdev, sandbox_product_version) != 0) 401 - return false; 402 - 403 - return true; 404 - } 405 - 406 - static int mlx5_fpga_tls_get_caps(struct mlx5_fpga_device *fdev, 407 - u32 *p_caps) 408 - { 409 - int err, cap_size = MLX5_ST_SZ_BYTES(tls_extended_cap); 410 - u32 caps = 0; 411 - void *buf; 412 - 413 - buf = kzalloc(cap_size, GFP_KERNEL); 414 - if (!buf) 415 - return -ENOMEM; 416 - 417 - err = mlx5_fpga_get_sbu_caps(fdev, cap_size, buf); 418 - if (err) 419 - goto out; 420 - 421 - if (MLX5_GET(tls_extended_cap, buf, tx)) 422 - caps |= MLX5_ACCEL_TLS_TX; 423 - if (MLX5_GET(tls_extended_cap, buf, rx)) 424 - caps |= MLX5_ACCEL_TLS_RX; 425 - if (MLX5_GET(tls_extended_cap, buf, tls_v12)) 426 - caps |= MLX5_ACCEL_TLS_V12; 427 - if (MLX5_GET(tls_extended_cap, buf, tls_v13)) 428 - caps |= MLX5_ACCEL_TLS_V13; 429 - if (MLX5_GET(tls_extended_cap, buf, lro)) 430 - caps |= MLX5_ACCEL_TLS_LRO; 431 - if (MLX5_GET(tls_extended_cap, buf, ipv6)) 432 - caps |= MLX5_ACCEL_TLS_IPV6; 433 - 434 - if (MLX5_GET(tls_extended_cap, buf, aes_gcm_128)) 435 - caps |= MLX5_ACCEL_TLS_AES_GCM128; 436 - if (MLX5_GET(tls_extended_cap, buf, aes_gcm_256)) 437 - caps |= MLX5_ACCEL_TLS_AES_GCM256; 438 - 439 - *p_caps = caps; 440 - err = 0; 441 - out: 442 - kfree(buf); 443 - return err; 444 - } 445 - 446 - int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev) 447 - { 448 - struct mlx5_fpga_device *fdev = mdev->fpga; 449 - struct mlx5_fpga_conn_attr init_attr = {0}; 450 - struct mlx5_fpga_conn *conn; 451 - struct mlx5_fpga_tls *tls; 452 - int err = 0; 453 - 454 - if (!mlx5_fpga_is_tls_device(mdev) || !fdev) 455 - return 0; 456 - 457 - tls = kzalloc(sizeof(*tls), GFP_KERNEL); 458 - if (!tls) 459 - return -ENOMEM; 460 - 461 - err = mlx5_fpga_tls_get_caps(fdev, &tls->caps); 462 - if (err) 463 - goto error; 464 - 465 - if (!(tls->caps & (MLX5_ACCEL_TLS_V12 | MLX5_ACCEL_TLS_AES_GCM128))) { 466 - err = -ENOTSUPP; 467 - goto error; 468 - } 469 - 470 - init_attr.rx_size = SBU_QP_QUEUE_SIZE; 471 - init_attr.tx_size = SBU_QP_QUEUE_SIZE; 472 - init_attr.recv_cb = mlx5_fpga_tls_hw_qp_recv_cb; 473 - init_attr.cb_arg = fdev; 474 - conn = mlx5_fpga_sbu_conn_create(fdev, &init_attr); 475 - if (IS_ERR(conn)) { 476 - err = PTR_ERR(conn); 477 - mlx5_fpga_err(fdev, "Error creating TLS command connection %d\n", 478 - err); 479 - goto error; 480 - } 481 - 482 - tls->conn = conn; 483 - spin_lock_init(&tls->pending_cmds_lock); 484 - INIT_LIST_HEAD(&tls->pending_cmds); 485 - 486 - idr_init(&tls->tx_idr); 487 - idr_init(&tls->rx_idr); 488 - spin_lock_init(&tls->tx_idr_spinlock); 489 - spin_lock_init(&tls->rx_idr_spinlock); 490 - fdev->tls = tls; 491 - return 0; 492 - 493 - error: 494 - kfree(tls); 495 - return err; 496 - } 497 - 498 - void mlx5_fpga_tls_cleanup(struct mlx5_core_dev *mdev) 499 - { 500 - struct mlx5_fpga_device *fdev = mdev->fpga; 501 - 502 - if (!fdev || !fdev->tls) 503 - return; 504 - 505 - mlx5_fpga_sbu_conn_destroy(fdev->tls->conn); 506 - kfree(fdev->tls); 507 - fdev->tls = NULL; 508 - } 509 - 510 - static void mlx5_fpga_tls_set_aes_gcm128_ctx(void *cmd, 511 - struct tls_crypto_info *info, 512 - __be64 *rcd_sn) 513 - { 514 - struct tls12_crypto_info_aes_gcm_128 *crypto_info = 515 - (struct tls12_crypto_info_aes_gcm_128 *)info; 516 - 517 - memcpy(MLX5_ADDR_OF(tls_cmd, cmd, tls_rcd_sn), crypto_info->rec_seq, 518 - TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE); 519 - 520 - memcpy(MLX5_ADDR_OF(tls_cmd, cmd, tls_implicit_iv), 521 - crypto_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE); 522 - memcpy(MLX5_ADDR_OF(tls_cmd, cmd, encryption_key), 523 - crypto_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE); 524 - 525 - /* in AES-GCM 128 we need to write the key twice */ 526 - memcpy(MLX5_ADDR_OF(tls_cmd, cmd, encryption_key) + 527 - TLS_CIPHER_AES_GCM_128_KEY_SIZE, 528 - crypto_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE); 529 - 530 - MLX5_SET(tls_cmd, cmd, alg, MLX5_TLS_ALG_AES_GCM_128); 531 - } 532 - 533 - static int mlx5_fpga_tls_set_key_material(void *cmd, u32 caps, 534 - struct tls_crypto_info *crypto_info) 535 - { 536 - __be64 rcd_sn; 537 - 538 - switch (crypto_info->cipher_type) { 539 - case TLS_CIPHER_AES_GCM_128: 540 - if (!(caps & MLX5_ACCEL_TLS_AES_GCM128)) 541 - return -EINVAL; 542 - mlx5_fpga_tls_set_aes_gcm128_ctx(cmd, crypto_info, &rcd_sn); 543 - break; 544 - default: 545 - return -EINVAL; 546 - } 547 - 548 - return 0; 549 - } 550 - 551 - static int _mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, 552 - struct tls_crypto_info *crypto_info, 553 - u32 swid, u32 tcp_sn) 554 - { 555 - u32 caps = mlx5_fpga_tls_device_caps(mdev); 556 - struct mlx5_setup_stream_context *ctx; 557 - int ret = -ENOMEM; 558 - size_t cmd_size; 559 - void *cmd; 560 - 561 - cmd_size = MLX5_TLS_COMMAND_SIZE + sizeof(*ctx); 562 - ctx = kzalloc(cmd_size, GFP_KERNEL); 563 - if (!ctx) 564 - goto out; 565 - 566 - cmd = ctx + 1; 567 - ret = mlx5_fpga_tls_set_key_material(cmd, caps, crypto_info); 568 - if (ret) 569 - goto free_ctx; 570 - 571 - mlx5_fpga_tls_flow_to_cmd(flow, cmd); 572 - 573 - MLX5_SET(tls_cmd, cmd, swid, swid); 574 - MLX5_SET(tls_cmd, cmd, tcp_sn, tcp_sn); 575 - 576 - return mlx5_fpga_tls_setup_stream_cmd(mdev, ctx); 577 - 578 - free_ctx: 579 - kfree(ctx); 580 - out: 581 - return ret; 582 - } 583 - 584 - int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, 585 - struct tls_crypto_info *crypto_info, 586 - u32 start_offload_tcp_sn, u32 *p_swid, 587 - bool direction_sx) 588 - { 589 - struct mlx5_fpga_tls *tls = mdev->fpga->tls; 590 - int ret = -ENOMEM; 591 - u32 swid; 592 - 593 - if (direction_sx) 594 - ret = mlx5_fpga_tls_alloc_swid(&tls->tx_idr, 595 - &tls->tx_idr_spinlock, flow); 596 - else 597 - ret = mlx5_fpga_tls_alloc_swid(&tls->rx_idr, 598 - &tls->rx_idr_spinlock, flow); 599 - 600 - if (ret < 0) 601 - return ret; 602 - 603 - swid = ret; 604 - MLX5_SET(tls_flow, flow, direction_sx, direction_sx ? 1 : 0); 605 - 606 - ret = _mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, swid, 607 - start_offload_tcp_sn); 608 - if (ret && ret != -EINTR) 609 - goto free_swid; 610 - 611 - *p_swid = swid; 612 - return 0; 613 - free_swid: 614 - if (direction_sx) 615 - mlx5_fpga_tls_release_swid(&tls->tx_idr, 616 - &tls->tx_idr_spinlock, swid); 617 - else 618 - mlx5_fpga_tls_release_swid(&tls->rx_idr, 619 - &tls->rx_idr_spinlock, swid); 620 - 621 - return ret; 622 - }
-74
drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h
··· 1 - /* 2 - * Copyright (c) 2018 Mellanox Technologies. All rights reserved. 3 - * 4 - * This software is available to you under a choice of one of two 5 - * licenses. You may choose to be licensed under the terms of the GNU 6 - * General Public License (GPL) Version 2, available from the file 7 - * COPYING in the main directory of this source tree, or the 8 - * OpenIB.org BSD license below: 9 - * 10 - * Redistribution and use in source and binary forms, with or 11 - * without modification, are permitted provided that the following 12 - * conditions are met: 13 - * 14 - * - Redistributions of source code must retain the above 15 - * copyright notice, this list of conditions and the following 16 - * disclaimer. 17 - * 18 - * - Redistributions in binary form must reproduce the above 19 - * copyright notice, this list of conditions and the following 20 - * disclaimer in the documentation and/or other materials 21 - * provided with the distribution. 22 - * 23 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 - * SOFTWARE. 31 - * 32 - */ 33 - 34 - #ifndef __MLX5_FPGA_TLS_H__ 35 - #define __MLX5_FPGA_TLS_H__ 36 - 37 - #include <linux/mlx5/driver.h> 38 - 39 - #include <net/tls.h> 40 - #include "fpga/core.h" 41 - 42 - struct mlx5_fpga_tls { 43 - struct list_head pending_cmds; 44 - spinlock_t pending_cmds_lock; /* Protects pending_cmds */ 45 - u32 caps; 46 - struct mlx5_fpga_conn *conn; 47 - 48 - struct idr tx_idr; 49 - struct idr rx_idr; 50 - spinlock_t tx_idr_spinlock; /* protects the IDR */ 51 - spinlock_t rx_idr_spinlock; /* protects the IDR */ 52 - }; 53 - 54 - int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, 55 - struct tls_crypto_info *crypto_info, 56 - u32 start_offload_tcp_sn, u32 *p_swid, 57 - bool direction_sx); 58 - 59 - void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, 60 - gfp_t flags, bool direction_sx); 61 - 62 - bool mlx5_fpga_is_tls_device(struct mlx5_core_dev *mdev); 63 - int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev); 64 - void mlx5_fpga_tls_cleanup(struct mlx5_core_dev *mdev); 65 - 66 - static inline u32 mlx5_fpga_tls_device_caps(struct mlx5_core_dev *mdev) 67 - { 68 - return mdev->fpga->tls->caps; 69 - } 70 - 71 - int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle, 72 - u32 seq, __be64 rcd_sn); 73 - 74 - #endif /* __MLX5_FPGA_TLS_H__ */
-2
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
··· 878 878 table_type = FS_FT_NIC_RX; 879 879 break; 880 880 case MLX5_FLOW_NAMESPACE_EGRESS: 881 - #ifdef CONFIG_MLX5_IPSEC 882 881 case MLX5_FLOW_NAMESPACE_EGRESS_KERNEL: 883 - #endif 884 882 max_actions = MLX5_CAP_FLOWTABLE_NIC_TX(dev, max_modify_header_actions); 885 883 table_type = FS_FT_NIC_TX; 886 884 break;
+1 -14
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
··· 40 40 #include "fs_cmd.h" 41 41 #include "fs_ft_pool.h" 42 42 #include "diag/fs_tracepoint.h" 43 - #include "accel/ipsec.h" 44 - #include "fpga/ipsec.h" 45 43 46 44 #define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\ 47 45 sizeof(struct init_tree_node)) ··· 186 188 187 189 static struct init_tree_node egress_root_fs = { 188 190 .type = FS_TYPE_NAMESPACE, 189 - #ifdef CONFIG_MLX5_IPSEC 190 191 .ar_size = 2, 191 - #else 192 - .ar_size = 1, 193 - #endif 194 192 .children = (struct init_tree_node[]) { 195 193 ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0, 196 194 FS_CHAINING_CAPS_EGRESS, 197 195 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, 198 196 ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, 199 197 BY_PASS_PRIO_NUM_LEVELS))), 200 - #ifdef CONFIG_MLX5_IPSEC 201 198 ADD_PRIO(0, KERNEL_TX_MIN_LEVEL, 0, 202 199 FS_CHAINING_CAPS_EGRESS, 203 200 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, 204 201 ADD_MULTIPLE_PRIO(KERNEL_TX_IPSEC_NUM_PRIOS, 205 202 KERNEL_TX_IPSEC_NUM_LEVELS))), 206 - #endif 207 203 } 208 204 }; 209 205 ··· 2511 2519 struct mlx5_flow_root_namespace *root_ns; 2512 2520 struct mlx5_flow_namespace *ns; 2513 2521 2514 - if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE && 2515 - (table_type == FS_FT_NIC_RX || table_type == FS_FT_NIC_TX)) 2516 - cmds = mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type); 2517 - 2518 2522 /* Create the root namespace */ 2519 2523 root_ns = kzalloc(sizeof(*root_ns), GFP_KERNEL); 2520 2524 if (!root_ns) ··· 3160 3172 goto err; 3161 3173 } 3162 3174 3163 - if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE || 3164 - MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) { 3175 + if (MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) { 3165 3176 err = init_egress_root_ns(steering); 3166 3177 if (err) 3167 3178 goto err;
+1 -2
drivers/net/ethernet/mellanox/mlx5/core/fw.c
··· 35 35 #include "mlx5_core.h" 36 36 #include "../../mlxfw/mlxfw.h" 37 37 #include "lib/tout.h" 38 - #include "accel/tls.h" 39 38 40 39 enum { 41 40 MCQS_IDENTIFIER_BOOT_IMG = 0x1, ··· 248 249 return err; 249 250 } 250 251 251 - if (mlx5_accel_is_ktls_tx(dev) || mlx5_accel_is_ktls_rx(dev)) { 252 + if (MLX5_CAP_GEN(dev, tls_tx) || MLX5_CAP_GEN(dev, tls_rx)) { 252 253 err = mlx5_core_get_caps(dev, MLX5_CAP_TLS); 253 254 if (err) 254 255 return err;
+1 -17
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 62 62 #include "lib/mlx5.h" 63 63 #include "lib/tout.h" 64 64 #include "fpga/core.h" 65 - #include "fpga/ipsec.h" 66 - #include "accel/ipsec.h" 67 - #include "accel/tls.h" 65 + #include "en_accel/ipsec_offload.h" 68 66 #include "lib/clock.h" 69 67 #include "lib/vxlan.h" 70 68 #include "lib/geneve.h" ··· 1181 1183 goto err_fpga_start; 1182 1184 } 1183 1185 1184 - mlx5_accel_ipsec_init(dev); 1185 - 1186 - err = mlx5_accel_tls_init(dev); 1187 - if (err) { 1188 - mlx5_core_err(dev, "TLS device start failed %d\n", err); 1189 - goto err_tls_start; 1190 - } 1191 - 1192 1186 err = mlx5_init_fs(dev); 1193 1187 if (err) { 1194 1188 mlx5_core_err(dev, "Failed to init flow steering\n"); ··· 1228 1238 err_set_hca: 1229 1239 mlx5_cleanup_fs(dev); 1230 1240 err_fs: 1231 - mlx5_accel_tls_cleanup(dev); 1232 - err_tls_start: 1233 - mlx5_accel_ipsec_cleanup(dev); 1234 1241 mlx5_fpga_device_stop(dev); 1235 1242 err_fpga_start: 1236 1243 mlx5_rsc_dump_cleanup(dev); ··· 1253 1266 mlx5_sf_hw_table_destroy(dev); 1254 1267 mlx5_vhca_event_stop(dev); 1255 1268 mlx5_cleanup_fs(dev); 1256 - mlx5_accel_ipsec_cleanup(dev); 1257 - mlx5_accel_tls_cleanup(dev); 1258 1269 mlx5_fpga_device_stop(dev); 1259 1270 mlx5_rsc_dump_cleanup(dev); 1260 1271 mlx5_hv_vhca_cleanup(dev->hv_vhca); ··· 1932 1947 get_random_bytes(&sw_owner_id, sizeof(sw_owner_id)); 1933 1948 1934 1949 mlx5_core_verify_params(); 1935 - mlx5_fpga_ipsec_build_fs_cmds(); 1936 1950 mlx5_register_debugfs(); 1937 1951 1938 1952 err = pci_register_driver(&mlx5_core_driver);
+16 -19
include/linux/mlx5/accel.h
··· 111 111 struct mlx5_accel_esp_xfrm_attrs attrs; 112 112 }; 113 113 114 - enum { 115 - MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA = 1UL << 0, 116 - }; 117 - 118 114 enum mlx5_accel_ipsec_cap { 119 115 MLX5_ACCEL_IPSEC_CAP_DEVICE = 1 << 0, 120 - MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA = 1 << 1, 121 - MLX5_ACCEL_IPSEC_CAP_ESP = 1 << 2, 122 - MLX5_ACCEL_IPSEC_CAP_IPV6 = 1 << 3, 123 - MLX5_ACCEL_IPSEC_CAP_LSO = 1 << 4, 124 - MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER = 1 << 5, 125 - MLX5_ACCEL_IPSEC_CAP_ESN = 1 << 6, 126 - MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN = 1 << 7, 116 + MLX5_ACCEL_IPSEC_CAP_ESP = 1 << 1, 117 + MLX5_ACCEL_IPSEC_CAP_IPV6 = 1 << 2, 118 + MLX5_ACCEL_IPSEC_CAP_LSO = 1 << 3, 119 + MLX5_ACCEL_IPSEC_CAP_ESN = 1 << 4, 127 120 }; 128 121 129 - #ifdef CONFIG_MLX5_ACCEL 122 + #ifdef CONFIG_MLX5_EN_IPSEC 130 123 131 - u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev); 124 + u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev); 132 125 133 126 struct mlx5_accel_esp_xfrm * 134 127 mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev, 135 - const struct mlx5_accel_esp_xfrm_attrs *attrs, 136 - u32 flags); 128 + const struct mlx5_accel_esp_xfrm_attrs *attrs); 137 129 void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm); 138 130 int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, 139 131 const struct mlx5_accel_esp_xfrm_attrs *attrs); 140 132 141 133 #else 142 134 143 - static inline u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev) { return 0; } 135 + static inline u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev) 136 + { 137 + return 0; 138 + } 144 139 145 140 static inline struct mlx5_accel_esp_xfrm * 146 141 mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev, 147 - const struct mlx5_accel_esp_xfrm_attrs *attrs, 148 - u32 flags) { return ERR_PTR(-EOPNOTSUPP); } 142 + const struct mlx5_accel_esp_xfrm_attrs *attrs) 143 + { 144 + return ERR_PTR(-EOPNOTSUPP); 145 + } 149 146 static inline void 150 147 mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) {} 151 148 static inline int 152 149 mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, 153 150 const struct mlx5_accel_esp_xfrm_attrs *attrs) { return -EOPNOTSUPP; } 154 151 155 - #endif /* CONFIG_MLX5_ACCEL */ 152 + #endif /* CONFIG_MLX5_EN_IPSEC */ 156 153 #endif /* __MLX5_ACCEL_H__ */
-3
include/linux/mlx5/driver.h
··· 778 778 #ifdef CONFIG_MLX5_FPGA 779 779 struct mlx5_fpga_device *fpga; 780 780 #endif 781 - #ifdef CONFIG_MLX5_ACCEL 782 - const struct mlx5_accel_ipsec_ops *ipsec_ops; 783 - #endif 784 781 struct mlx5_clock clock; 785 782 struct mlx5_ib_clock_info *clock_info; 786 783 struct mlx5_fw_tracer *tracer;
-211
include/linux/mlx5/mlx5_ifc_fpga.h
··· 54 54 55 55 enum { 56 56 MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_IPSEC = 0x2, 57 - MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_TLS = 0x3, 58 57 }; 59 58 60 59 struct mlx5_ifc_fpga_shell_caps_bits { ··· 386 387 u8 reserved_at_40[0x40]; 387 388 }; 388 389 389 - struct mlx5_ifc_tls_extended_cap_bits { 390 - u8 aes_gcm_128[0x1]; 391 - u8 aes_gcm_256[0x1]; 392 - u8 reserved_at_2[0x1e]; 393 - u8 reserved_at_20[0x20]; 394 - u8 context_capacity_total[0x20]; 395 - u8 context_capacity_rx[0x20]; 396 - u8 context_capacity_tx[0x20]; 397 - u8 reserved_at_a0[0x10]; 398 - u8 tls_counter_size[0x10]; 399 - u8 tls_counters_addr_low[0x20]; 400 - u8 tls_counters_addr_high[0x20]; 401 - u8 rx[0x1]; 402 - u8 tx[0x1]; 403 - u8 tls_v12[0x1]; 404 - u8 tls_v13[0x1]; 405 - u8 lro[0x1]; 406 - u8 ipv6[0x1]; 407 - u8 reserved_at_106[0x1a]; 408 - }; 409 - 410 - struct mlx5_ifc_ipsec_extended_cap_bits { 411 - u8 encapsulation[0x20]; 412 - 413 - u8 reserved_0[0x12]; 414 - u8 v2_command[0x1]; 415 - u8 udp_encap[0x1]; 416 - u8 rx_no_trailer[0x1]; 417 - u8 ipv4_fragment[0x1]; 418 - u8 ipv6[0x1]; 419 - u8 esn[0x1]; 420 - u8 lso[0x1]; 421 - u8 transport_and_tunnel_mode[0x1]; 422 - u8 tunnel_mode[0x1]; 423 - u8 transport_mode[0x1]; 424 - u8 ah_esp[0x1]; 425 - u8 esp[0x1]; 426 - u8 ah[0x1]; 427 - u8 ipv4_options[0x1]; 428 - 429 - u8 auth_alg[0x20]; 430 - 431 - u8 enc_alg[0x20]; 432 - 433 - u8 sa_cap[0x20]; 434 - 435 - u8 reserved_1[0x10]; 436 - u8 number_of_ipsec_counters[0x10]; 437 - 438 - u8 ipsec_counters_addr_low[0x20]; 439 - u8 ipsec_counters_addr_high[0x20]; 440 - }; 441 - 442 - struct mlx5_ifc_ipsec_counters_bits { 443 - u8 dec_in_packets[0x40]; 444 - 445 - u8 dec_out_packets[0x40]; 446 - 447 - u8 dec_bypass_packets[0x40]; 448 - 449 - u8 enc_in_packets[0x40]; 450 - 451 - u8 enc_out_packets[0x40]; 452 - 453 - u8 enc_bypass_packets[0x40]; 454 - 455 - u8 drop_dec_packets[0x40]; 456 - 457 - u8 failed_auth_dec_packets[0x40]; 458 - 459 - u8 drop_enc_packets[0x40]; 460 - 461 - u8 success_add_sa[0x40]; 462 - 463 - u8 fail_add_sa[0x40]; 464 - 465 - u8 success_delete_sa[0x40]; 466 - 467 - u8 fail_delete_sa[0x40]; 468 - 469 - u8 dropped_cmd[0x40]; 470 - }; 471 - 472 390 enum { 473 391 MLX5_FPGA_QP_ERROR_EVENT_SYNDROME_RETRY_COUNTER_EXPIRED = 0x1, 474 392 MLX5_FPGA_QP_ERROR_EVENT_SYNDROME_RNR_EXPIRED = 0x2, ··· 402 486 u8 reserved_at_c0[0x8]; 403 487 u8 fpga_qpn[0x18]; 404 488 }; 405 - enum mlx5_ifc_fpga_ipsec_response_syndrome { 406 - MLX5_FPGA_IPSEC_RESPONSE_SUCCESS = 0, 407 - MLX5_FPGA_IPSEC_RESPONSE_ILLEGAL_REQUEST = 1, 408 - MLX5_FPGA_IPSEC_RESPONSE_SADB_ISSUE = 2, 409 - MLX5_FPGA_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE = 3, 410 - }; 411 - 412 - struct mlx5_ifc_fpga_ipsec_cmd_resp { 413 - __be32 syndrome; 414 - union { 415 - __be32 sw_sa_handle; 416 - __be32 flags; 417 - }; 418 - u8 reserved[24]; 419 - } __packed; 420 - 421 - enum mlx5_ifc_fpga_ipsec_cmd_opcode { 422 - MLX5_FPGA_IPSEC_CMD_OP_ADD_SA = 0, 423 - MLX5_FPGA_IPSEC_CMD_OP_DEL_SA = 1, 424 - MLX5_FPGA_IPSEC_CMD_OP_ADD_SA_V2 = 2, 425 - MLX5_FPGA_IPSEC_CMD_OP_DEL_SA_V2 = 3, 426 - MLX5_FPGA_IPSEC_CMD_OP_MOD_SA_V2 = 4, 427 - MLX5_FPGA_IPSEC_CMD_OP_SET_CAP = 5, 428 - }; 429 - 430 - enum mlx5_ifc_fpga_ipsec_cap { 431 - MLX5_FPGA_IPSEC_CAP_NO_TRAILER = BIT(0), 432 - }; 433 - 434 - struct mlx5_ifc_fpga_ipsec_cmd_cap { 435 - __be32 cmd; 436 - __be32 flags; 437 - u8 reserved[24]; 438 - } __packed; 439 - 440 - enum mlx5_ifc_fpga_ipsec_sa_flags { 441 - MLX5_FPGA_IPSEC_SA_ESN_EN = BIT(0), 442 - MLX5_FPGA_IPSEC_SA_ESN_OVERLAP = BIT(1), 443 - MLX5_FPGA_IPSEC_SA_IPV6 = BIT(2), 444 - MLX5_FPGA_IPSEC_SA_DIR_SX = BIT(3), 445 - MLX5_FPGA_IPSEC_SA_SPI_EN = BIT(4), 446 - MLX5_FPGA_IPSEC_SA_SA_VALID = BIT(5), 447 - MLX5_FPGA_IPSEC_SA_IP_ESP = BIT(6), 448 - MLX5_FPGA_IPSEC_SA_IP_AH = BIT(7), 449 - }; 450 - 451 - enum mlx5_ifc_fpga_ipsec_sa_enc_mode { 452 - MLX5_FPGA_IPSEC_SA_ENC_MODE_NONE = 0, 453 - MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_128_AUTH_128 = 1, 454 - MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_256_AUTH_128 = 3, 455 - }; 456 - 457 - struct mlx5_ifc_fpga_ipsec_sa_v1 { 458 - __be32 cmd; 459 - u8 key_enc[32]; 460 - u8 key_auth[32]; 461 - __be32 sip[4]; 462 - __be32 dip[4]; 463 - union { 464 - struct { 465 - __be32 reserved; 466 - u8 salt_iv[8]; 467 - __be32 salt; 468 - } __packed gcm; 469 - struct { 470 - u8 salt[16]; 471 - } __packed cbc; 472 - }; 473 - __be32 spi; 474 - __be32 sw_sa_handle; 475 - __be16 tfclen; 476 - u8 enc_mode; 477 - u8 reserved1[2]; 478 - u8 flags; 479 - u8 reserved2[2]; 480 - }; 481 - 482 - struct mlx5_ifc_fpga_ipsec_sa { 483 - struct mlx5_ifc_fpga_ipsec_sa_v1 ipsec_sa_v1; 484 - __be16 udp_sp; 485 - __be16 udp_dp; 486 - u8 reserved1[4]; 487 - __be32 esn; 488 - __be16 vid; /* only 12 bits, rest is reserved */ 489 - __be16 reserved2; 490 - } __packed; 491 - 492 - enum fpga_tls_cmds { 493 - CMD_SETUP_STREAM = 0x1001, 494 - CMD_TEARDOWN_STREAM = 0x1002, 495 - CMD_RESYNC_RX = 0x1003, 496 - }; 497 - 498 - #define MLX5_TLS_1_2 (0) 499 - 500 - #define MLX5_TLS_ALG_AES_GCM_128 (0) 501 - #define MLX5_TLS_ALG_AES_GCM_256 (1) 502 - 503 - struct mlx5_ifc_tls_cmd_bits { 504 - u8 command_type[0x20]; 505 - u8 ipv6[0x1]; 506 - u8 direction_sx[0x1]; 507 - u8 tls_version[0x2]; 508 - u8 reserved[0x1c]; 509 - u8 swid[0x20]; 510 - u8 src_port[0x10]; 511 - u8 dst_port[0x10]; 512 - union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6; 513 - union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6; 514 - u8 tls_rcd_sn[0x40]; 515 - u8 tcp_sn[0x20]; 516 - u8 tls_implicit_iv[0x20]; 517 - u8 tls_xor_iv[0x40]; 518 - u8 encryption_key[0x100]; 519 - u8 alg[4]; 520 - u8 reserved2[0x1c]; 521 - u8 reserved3[0x4a0]; 522 - }; 523 - 524 - struct mlx5_ifc_tls_resp_bits { 525 - u8 syndrome[0x20]; 526 - u8 stream_id[0x20]; 527 - u8 reserved[0x40]; 528 - }; 529 - 530 - #define MLX5_TLS_COMMAND_SIZE (0x100) 531 - 532 489 #endif /* MLX5_IFC_FPGA_H */
+1 -1
include/linux/mlx5/port.h
··· 141 141 MLX5_PTYS_WIDTH_12X = 1 << 4, 142 142 }; 143 143 144 - #define MLX5E_PROT_MASK(link_mode) (1 << link_mode) 144 + #define MLX5E_PROT_MASK(link_mode) (1U << link_mode) 145 145 #define MLX5_GET_ETH_PROTO(reg, out, ext, field) \ 146 146 (ext ? MLX5_GET(reg, out, ext_##field) : \ 147 147 MLX5_GET(reg, out, field))
-8
include/rdma/ib_verbs.h
··· 2497 2497 struct ib_flow_attr *flow_attr, 2498 2498 struct ib_udata *udata); 2499 2499 int (*destroy_flow)(struct ib_flow *flow_id); 2500 - struct ib_flow_action *(*create_flow_action_esp)( 2501 - struct ib_device *device, 2502 - const struct ib_flow_action_attrs_esp *attr, 2503 - struct uverbs_attr_bundle *attrs); 2504 2500 int (*destroy_flow_action)(struct ib_flow_action *action); 2505 - int (*modify_flow_action_esp)( 2506 - struct ib_flow_action *action, 2507 - const struct ib_flow_action_attrs_esp *attr, 2508 - struct uverbs_attr_bundle *attrs); 2509 2501 int (*set_vf_link_state)(struct ib_device *device, int vf, u32 port, 2510 2502 int state); 2511 2503 int (*get_vf_config)(struct ib_device *device, int vf, u32 port,