Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net/tls: Add force_resync for driver resync

This patch adds a field to the tls rx offload context which enables
drivers to force a send_resync call.

This field can be used by drivers to request a resync at the next
possible tls record. It is beneficial for hardware that provides the
resync sequence number asynchronously. In such cases, the packet that
triggered the resync does not contain the information required for a
resync. Instead, the driver requests resync for all the following
TLS record until the asynchronous notification with the resync request
TCP sequence arrives.

A following series for mlx5e ConnectX-6DX TLS RX offload support will
use this mechanism.

Signed-off-by: Boris Pismenny <borisp@mellanox.com>
Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Reviewed-by: Maxim Mikityanskiy <maximmi@mellanox.com>
Reviewed-by: Saeed Mahameed <saeedm@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Tariq Toukan and committed by
David S. Miller
b3ae2459 bdad7f94

+17 -4
+11 -1
include/net/tls.h
··· 594 594 #endif 595 595 596 596 /* The TLS context is valid until sk_destruct is called */ 597 + #define RESYNC_REQ (1 << 0) 598 + #define RESYNC_REQ_FORCE (1 << 1) 597 599 static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq) 598 600 { 599 601 struct tls_context *tls_ctx = tls_get_ctx(sk); 600 602 struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); 601 603 602 - atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | 1); 604 + atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | RESYNC_REQ); 605 + } 606 + 607 + static inline void tls_offload_rx_force_resync_request(struct sock *sk) 608 + { 609 + struct tls_context *tls_ctx = tls_get_ctx(sk); 610 + struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); 611 + 612 + atomic64_set(&rx_ctx->resync_req, RESYNC_REQ | RESYNC_REQ_FORCE); 603 613 } 604 614 605 615 static inline void
+6 -3
net/tls/tls_device.c
··· 694 694 { 695 695 struct tls_context *tls_ctx = tls_get_ctx(sk); 696 696 struct tls_offload_context_rx *rx_ctx; 697 + bool is_req_pending, is_force_resync; 697 698 u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE]; 698 - u32 sock_data, is_req_pending; 699 699 struct tls_prot_info *prot; 700 700 s64 resync_req; 701 + u32 sock_data; 701 702 u32 req_seq; 702 703 703 704 if (tls_ctx->rx_conf != TLS_HW) ··· 713 712 resync_req = atomic64_read(&rx_ctx->resync_req); 714 713 req_seq = resync_req >> 32; 715 714 seq += TLS_HEADER_SIZE - 1; 716 - is_req_pending = resync_req; 715 + is_req_pending = resync_req & RESYNC_REQ; 716 + is_force_resync = resync_req & RESYNC_REQ_FORCE; 717 717 718 - if (likely(!is_req_pending) || req_seq != seq || 718 + if (likely(!is_req_pending) || 719 + (!is_force_resync && req_seq != seq) || 719 720 !atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0)) 720 721 return; 721 722 break;