Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: tls: Change async resync helpers argument

Update tls_offload_rx_resync_async_request_start() and
tls_offload_rx_resync_async_request_end() to get a struct
tls_offload_resync_async parameter directly, rather than
extracting it from struct sock.

This change aligns the function signatures with the upcoming
tls_offload_rx_resync_async_request_cancel() helper, which
will be introduced in a subsequent patch.

Signed-off-by: Shahar Shitrit <shshitrit@nvidia.com>
Reviewed-by: Sabrina Dubroca <sd@queasysnail.net>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Link: https://patch.msgid.link/1761508983-937977-2-git-send-email-tariqt@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

authored by

Shahar Shitrit and committed by
Jakub Kicinski
34892cfe e98cda76

+15 -15
+7 -2
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
··· 425 425 { 426 426 struct mlx5e_ktls_rx_resync_buf *buf = wi->tls_get_params.buf; 427 427 struct mlx5e_ktls_offload_context_rx *priv_rx; 428 + struct tls_offload_context_rx *rx_ctx; 428 429 u8 tracker_state, auth_state, *ctx; 429 430 struct device *dev; 430 431 u32 hw_seq; 431 432 432 433 priv_rx = buf->priv_rx; 433 434 dev = mlx5_core_dma_dev(sq->channel->mdev); 435 + rx_ctx = tls_offload_ctx_rx(tls_get_ctx(priv_rx->sk)); 434 436 if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) 435 437 goto out; 436 438 ··· 449 447 } 450 448 451 449 hw_seq = MLX5_GET(tls_progress_params, ctx, hw_resync_tcp_sn); 452 - tls_offload_rx_resync_async_request_end(priv_rx->sk, cpu_to_be32(hw_seq)); 450 + tls_offload_rx_resync_async_request_end(rx_ctx->resync_async, 451 + cpu_to_be32(hw_seq)); 453 452 priv_rx->rq_stats->tls_resync_req_end++; 454 453 out: 455 454 mlx5e_ktls_priv_rx_put(priv_rx); ··· 485 482 static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb) 486 483 { 487 484 struct ethhdr *eth = (struct ethhdr *)(skb->data); 485 + struct tls_offload_resync_async *resync_async; 488 486 struct net_device *netdev = rq->netdev; 489 487 struct net *net = dev_net(netdev); 490 488 struct sock *sk = NULL; ··· 531 527 532 528 seq = th->seq; 533 529 datalen = skb->len - depth; 534 - tls_offload_rx_resync_async_request_start(sk, seq, datalen); 530 + resync_async = tls_offload_ctx_rx(tls_get_ctx(sk))->resync_async; 531 + tls_offload_rx_resync_async_request_start(resync_async, seq, datalen); 535 532 rq->stats->tls_resync_req_start++; 536 533 537 534 unref:
+8 -13
include/net/tls.h
··· 451 451 452 452 /* Log all TLS record header TCP sequences in [seq, seq+len] */ 453 453 static inline void 454 - tls_offload_rx_resync_async_request_start(struct sock *sk, __be32 seq, u16 len) 454 + tls_offload_rx_resync_async_request_start(struct tls_offload_resync_async *resync_async, 455 + __be32 seq, u16 len) 455 456 { 456 - struct tls_context *tls_ctx = tls_get_ctx(sk); 457 - struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); 458 - 459 - atomic64_set(&rx_ctx->resync_async->req, ((u64)ntohl(seq) << 32) | 457 + atomic64_set(&resync_async->req, ((u64)ntohl(seq) << 32) | 460 458 ((u64)len << 16) | RESYNC_REQ | RESYNC_REQ_ASYNC); 461 - rx_ctx->resync_async->loglen = 0; 462 - rx_ctx->resync_async->rcd_delta = 0; 459 + resync_async->loglen = 0; 460 + resync_async->rcd_delta = 0; 463 461 } 464 462 465 463 static inline void 466 - tls_offload_rx_resync_async_request_end(struct sock *sk, __be32 seq) 464 + tls_offload_rx_resync_async_request_end(struct tls_offload_resync_async *resync_async, 465 + __be32 seq) 467 466 { 468 - struct tls_context *tls_ctx = tls_get_ctx(sk); 469 - struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); 470 - 471 - atomic64_set(&rx_ctx->resync_async->req, 472 - ((u64)ntohl(seq) << 32) | RESYNC_REQ); 467 + atomic64_set(&resync_async->req, ((u64)ntohl(seq) << 32) | RESYNC_REQ); 473 468 } 474 469 475 470 static inline void