Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net/mlx5: Accel, add TLS rx offload routines

In Innova TLS, TLS contexts are added or deleted
via a command message over the SBU connection.
The HW then sends a response message over the same connection.

Complete the implementation for Innova TLS (FPGA-based) hardware by
adding support for rx inline crypto offload.

Signed-off-by: Boris Pismenny <borisp@mellanox.com>
Signed-off-by: Ilya Lesokhin <ilyal@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Boris Pismenny and committed by
David S. Miller
ab412e1d 0aadb2fc

+135 -46
+16 -7
drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c
··· 37 37 #include "mlx5_core.h" 38 38 #include "fpga/tls.h" 39 39 40 - int mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow, 41 - struct tls_crypto_info *crypto_info, 42 - u32 start_offload_tcp_sn, u32 *p_swid) 40 + int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, 41 + struct tls_crypto_info *crypto_info, 42 + u32 start_offload_tcp_sn, u32 *p_swid, 43 + bool direction_sx) 43 44 { 44 - return mlx5_fpga_tls_add_tx_flow(mdev, flow, crypto_info, 45 - start_offload_tcp_sn, p_swid); 45 + return mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, 46 + start_offload_tcp_sn, p_swid, 47 + direction_sx); 46 48 } 47 49 48 - void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid) 50 + void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, 51 + bool direction_sx) 49 52 { 50 - mlx5_fpga_tls_del_tx_flow(mdev, swid, GFP_KERNEL); 53 + mlx5_fpga_tls_del_flow(mdev, swid, GFP_KERNEL, direction_sx); 54 + } 55 + 56 + int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, 57 + u64 rcd_sn) 58 + { 59 + return mlx5_fpga_tls_resync_rx(mdev, handle, seq, rcd_sn); 51 60 } 52 61 53 62 bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev)
+17 -9
drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
··· 60 60 u8 reserved_at_2[0x1e]; 61 61 }; 62 62 63 - int mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow, 64 - struct tls_crypto_info *crypto_info, 65 - u32 start_offload_tcp_sn, u32 *p_swid); 66 - void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid); 63 + int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, 64 + struct tls_crypto_info *crypto_info, 65 + u32 start_offload_tcp_sn, u32 *p_swid, 66 + bool direction_sx); 67 + void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, 68 + bool direction_sx); 69 + int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, 70 + u64 rcd_sn); 67 71 bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev); 68 72 u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev); 69 73 int mlx5_accel_tls_init(struct mlx5_core_dev *mdev); ··· 75 71 76 72 #else 77 73 78 - static inline int 79 - mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow, 80 - struct tls_crypto_info *crypto_info, 81 - u32 start_offload_tcp_sn, u32 *p_swid) { return 0; } 82 - static inline void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid) { } 74 + static int 75 + mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, 76 + struct tls_crypto_info *crypto_info, 77 + u32 start_offload_tcp_sn, u32 *p_swid, 78 + bool direction_sx) { return -ENOTSUPP; } 79 + static inline void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, 80 + bool direction_sx) { } 81 + static inline int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, 82 + u32 seq, u64 rcd_sn) { return 0; } 83 83 static inline bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev) { return false; } 84 84 static inline u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev) { return 0; } 85 85 static inline int mlx5_accel_tls_init(struct mlx5_core_dev *mdev) { return 0; }
+89 -24
drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
··· 129 129 static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock, 130 130 void *ptr) 131 131 { 132 + unsigned long flags; 132 133 int ret; 133 134 134 135 /* TLS metadata format is 1 byte for syndrome followed ··· 140 139 BUILD_BUG_ON((SWID_END - 1) & 0xFF000000); 141 140 142 141 idr_preload(GFP_KERNEL); 143 - spin_lock_irq(idr_spinlock); 142 + spin_lock_irqsave(idr_spinlock, flags); 144 143 ret = idr_alloc(idr, ptr, SWID_START, SWID_END, GFP_ATOMIC); 145 - spin_unlock_irq(idr_spinlock); 144 + spin_unlock_irqrestore(idr_spinlock, flags); 146 145 idr_preload_end(); 147 146 148 147 return ret; ··· 156 155 spin_lock_irqsave(idr_spinlock, flags); 157 156 idr_remove(idr, swid); 158 157 spin_unlock_irqrestore(idr_spinlock, flags); 158 + } 159 + 160 + static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn, 161 + struct mlx5_fpga_device *fdev, 162 + struct mlx5_fpga_dma_buf *buf, u8 status) 163 + { 164 + kfree(buf); 159 165 } 160 166 161 167 struct mlx5_teardown_stream_context { ··· 186 178 mlx5_fpga_err(fdev, 187 179 "Teardown stream failed with syndrome = %d", 188 180 syndrome); 189 - else 181 + else if (MLX5_GET(tls_cmd, cmd->buf.sg[0].data, direction_sx)) 190 182 mlx5_fpga_tls_release_swid(&fdev->tls->tx_idr, 191 - &fdev->tls->idr_spinlock, 183 + &fdev->tls->tx_idr_spinlock, 184 + ctx->swid); 185 + else 186 + mlx5_fpga_tls_release_swid(&fdev->tls->rx_idr, 187 + &fdev->tls->rx_idr_spinlock, 192 188 ctx->swid); 193 189 } 194 190 mlx5_fpga_tls_put_command_ctx(cmd); ··· 206 194 MLX5_SET(tls_cmd, cmd, ipv6, MLX5_GET(tls_flow, flow, ipv6)); 207 195 MLX5_SET(tls_cmd, cmd, direction_sx, 208 196 MLX5_GET(tls_flow, flow, direction_sx)); 197 + } 198 + 199 + int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, 200 + u64 rcd_sn) 201 + { 202 + struct mlx5_fpga_dma_buf *buf; 203 + int size = sizeof(*buf) + MLX5_TLS_COMMAND_SIZE; 204 + void *flow; 205 + void *cmd; 206 + int ret; 207 + 208 + buf = kzalloc(size, GFP_ATOMIC); 209 + if (!buf) 210 + return -ENOMEM; 211 + 212 + cmd = (buf + 1); 213 + 214 + rcu_read_lock(); 215 + flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle)); 216 + rcu_read_unlock(); 217 + mlx5_fpga_tls_flow_to_cmd(flow, cmd); 218 + 219 + MLX5_SET(tls_cmd, cmd, swid, ntohl(handle)); 220 + MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn)); 221 + MLX5_SET(tls_cmd, cmd, tcp_sn, seq); 222 + MLX5_SET(tls_cmd, cmd, command_type, CMD_RESYNC_RX); 223 + 224 + buf->sg[0].data = cmd; 225 + buf->sg[0].size = MLX5_TLS_COMMAND_SIZE; 226 + buf->complete = mlx_tls_kfree_complete; 227 + 228 + ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf); 229 + 230 + return ret; 209 231 } 210 232 211 233 static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, ··· 269 223 mlx5_fpga_tls_teardown_completion); 270 224 } 271 225 272 - void mlx5_fpga_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid, 273 - gfp_t flags) 226 + void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, 227 + gfp_t flags, bool direction_sx) 274 228 { 275 229 struct mlx5_fpga_tls *tls = mdev->fpga->tls; 276 230 void *flow; 277 231 278 232 rcu_read_lock(); 279 - flow = idr_find(&tls->tx_idr, swid); 233 + if (direction_sx) 234 + flow = idr_find(&tls->tx_idr, swid); 235 + else 236 + flow = idr_find(&tls->rx_idr, swid); 237 + 280 238 rcu_read_unlock(); 281 239 282 240 if (!flow) { ··· 339 289 * the command context because we might not have received 340 290 * the tx completion yet. 341 291 */ 342 - mlx5_fpga_tls_del_tx_flow(fdev->mdev, 343 - MLX5_GET(tls_cmd, tls_cmd, swid), 344 - GFP_ATOMIC); 292 + mlx5_fpga_tls_del_flow(fdev->mdev, 293 + MLX5_GET(tls_cmd, tls_cmd, swid), 294 + GFP_ATOMIC, 295 + MLX5_GET(tls_cmd, tls_cmd, 296 + direction_sx)); 345 297 } 346 298 347 299 mlx5_fpga_tls_put_command_ctx(cmd); ··· 467 415 if (err) 468 416 goto error; 469 417 470 - if (!(tls->caps & (MLX5_ACCEL_TLS_TX | MLX5_ACCEL_TLS_V12 | 471 - MLX5_ACCEL_TLS_AES_GCM128))) { 418 + if (!(tls->caps & (MLX5_ACCEL_TLS_V12 | MLX5_ACCEL_TLS_AES_GCM128))) { 472 419 err = -ENOTSUPP; 473 420 goto error; 474 421 } ··· 489 438 INIT_LIST_HEAD(&tls->pending_cmds); 490 439 491 440 idr_init(&tls->tx_idr); 492 - spin_lock_init(&tls->idr_spinlock); 441 + idr_init(&tls->rx_idr); 442 + spin_lock_init(&tls->tx_idr_spinlock); 443 + spin_lock_init(&tls->rx_idr_spinlock); 493 444 fdev->tls = tls; 494 445 return 0; 495 446 ··· 553 500 return 0; 554 501 } 555 502 556 - static int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, 557 - struct tls_crypto_info *crypto_info, u32 swid, 558 - u32 tcp_sn) 503 + static int _mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, 504 + struct tls_crypto_info *crypto_info, 505 + u32 swid, u32 tcp_sn) 559 506 { 560 507 u32 caps = mlx5_fpga_tls_device_caps(mdev); 561 508 struct mlx5_setup_stream_context *ctx; ··· 586 533 return ret; 587 534 } 588 535 589 - int mlx5_fpga_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow, 590 - struct tls_crypto_info *crypto_info, 591 - u32 start_offload_tcp_sn, u32 *p_swid) 536 + int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, 537 + struct tls_crypto_info *crypto_info, 538 + u32 start_offload_tcp_sn, u32 *p_swid, 539 + bool direction_sx) 592 540 { 593 541 struct mlx5_fpga_tls *tls = mdev->fpga->tls; 594 542 int ret = -ENOMEM; 595 543 u32 swid; 596 544 597 - ret = mlx5_fpga_tls_alloc_swid(&tls->tx_idr, &tls->idr_spinlock, flow); 545 + if (direction_sx) 546 + ret = mlx5_fpga_tls_alloc_swid(&tls->tx_idr, 547 + &tls->tx_idr_spinlock, flow); 548 + else 549 + ret = mlx5_fpga_tls_alloc_swid(&tls->rx_idr, 550 + &tls->rx_idr_spinlock, flow); 551 + 598 552 if (ret < 0) 599 553 return ret; 600 554 601 555 swid = ret; 602 - MLX5_SET(tls_flow, flow, direction_sx, 1); 556 + MLX5_SET(tls_flow, flow, direction_sx, direction_sx ? 1 : 0); 603 557 604 - ret = mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, swid, 605 - start_offload_tcp_sn); 558 + ret = _mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, swid, 559 + start_offload_tcp_sn); 606 560 if (ret && ret != -EINTR) 607 561 goto free_swid; 608 562 609 563 *p_swid = swid; 610 564 return 0; 611 565 free_swid: 612 - mlx5_fpga_tls_release_swid(&tls->tx_idr, &tls->idr_spinlock, swid); 566 + if (direction_sx) 567 + mlx5_fpga_tls_release_swid(&tls->tx_idr, 568 + &tls->tx_idr_spinlock, swid); 569 + else 570 + mlx5_fpga_tls_release_swid(&tls->rx_idr, 571 + &tls->rx_idr_spinlock, swid); 613 572 614 573 return ret; 615 574 }
+12 -6
drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h
··· 46 46 struct mlx5_fpga_conn *conn; 47 47 48 48 struct idr tx_idr; 49 - spinlock_t idr_spinlock; /* protects the IDR */ 49 + struct idr rx_idr; 50 + spinlock_t tx_idr_spinlock; /* protects the IDR */ 51 + spinlock_t rx_idr_spinlock; /* protects the IDR */ 50 52 }; 51 53 52 - int mlx5_fpga_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow, 53 - struct tls_crypto_info *crypto_info, 54 - u32 start_offload_tcp_sn, u32 *p_swid); 54 + int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, 55 + struct tls_crypto_info *crypto_info, 56 + u32 start_offload_tcp_sn, u32 *p_swid, 57 + bool direction_sx); 55 58 56 - void mlx5_fpga_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid, 57 - gfp_t flags); 59 + void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, 60 + gfp_t flags, bool direction_sx); 58 61 59 62 bool mlx5_fpga_is_tls_device(struct mlx5_core_dev *mdev); 60 63 int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev); ··· 67 64 { 68 65 return mdev->fpga->tls->caps; 69 66 } 67 + 68 + int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, 69 + u64 rcd_sn); 70 70 71 71 #endif /* __MLX5_FPGA_TLS_H__ */
+1
include/linux/mlx5/mlx5_ifc_fpga.h
··· 576 576 enum fpga_tls_cmds { 577 577 CMD_SETUP_STREAM = 0x1001, 578 578 CMD_TEARDOWN_STREAM = 0x1002, 579 + CMD_RESYNC_RX = 0x1003, 579 580 }; 580 581 581 582 #define MLX5_TLS_1_2 (0)