Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

tls: rx: async: hold onto the input skb

Async crypto currently benefits from the fact that we decrypt
in place. When we allow input and output to be different skbs
we will have to hang onto the input while we move to the next
record. Clone the inputs and keep them on a list.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Jakub Kicinski and committed by
David S. Miller
c618db2a 6ececdc5

+39 -10
+1
include/net/tls.h
··· 123 123 atomic_t decrypt_pending; 124 124 /* protect crypto_wait with decrypt_pending*/ 125 125 spinlock_t decrypt_compl_lock; 126 + struct sk_buff_head async_hold; 126 127 struct wait_queue_head wq; 127 128 }; 128 129
+1 -1
net/tls/Makefile
··· 7 7 8 8 obj-$(CONFIG_TLS) += tls.o 9 9 10 - tls-y := tls_main.o tls_sw.o tls_proc.o trace.o 10 + tls-y := tls_main.o tls_sw.o tls_proc.o trace.o tls_strp.o 11 11 12 12 tls-$(CONFIG_TLS_TOE) += tls_toe.o 13 13 tls-$(CONFIG_TLS_DEVICE) += tls_device.o tls_device_fallback.o
+3
net/tls/tls.h
··· 124 124 struct tls_offload_context_tx *offload_ctx, 125 125 struct tls_crypto_info *crypto_info); 126 126 127 + int tls_strp_msg_hold(struct sock *sk, struct sk_buff *skb, 128 + struct sk_buff_head *dst); 129 + 127 130 static inline struct tls_msg *tls_msg(struct sk_buff *skb) 128 131 { 129 132 struct sk_skb_cb *scb = (struct sk_skb_cb *)skb->cb;
+17
net/tls/tls_strp.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + 3 + #include <linux/skbuff.h> 4 + 5 + #include "tls.h" 6 + 7 + int tls_strp_msg_hold(struct sock *sk, struct sk_buff *skb, 8 + struct sk_buff_head *dst) 9 + { 10 + struct sk_buff *clone; 11 + 12 + clone = skb_clone(skb, sk->sk_allocation); 13 + if (!clone) 14 + return -ENOMEM; 15 + __skb_queue_tail(dst, clone); 16 + return 0; 17 + }
+17 -9
net/tls/tls_sw.c
··· 1535 1535 goto exit_free_pages; 1536 1536 1537 1537 darg->skb = tls_strp_msg(ctx); 1538 - if (darg->async) 1539 - return 0; 1538 + 1539 + if (unlikely(darg->async)) { 1540 + err = tls_strp_msg_hold(sk, skb, &ctx->async_hold); 1541 + if (err) 1542 + __skb_queue_tail(&ctx->async_hold, darg->skb); 1543 + return err; 1544 + } 1540 1545 1541 1546 if (prot->tail_size) 1542 1547 darg->tail = dctx->tail; ··· 2003 1998 reinit_completion(&ctx->async_wait.completion); 2004 1999 pending = atomic_read(&ctx->decrypt_pending); 2005 2000 spin_unlock_bh(&ctx->decrypt_compl_lock); 2006 - if (pending) { 2001 + ret = 0; 2002 + if (pending) 2007 2003 ret = crypto_wait_req(-EINPROGRESS, &ctx->async_wait); 2008 - if (ret) { 2009 - if (err >= 0 || err == -EINPROGRESS) 2010 - err = ret; 2011 - decrypted = 0; 2012 - goto end; 2013 - } 2004 + __skb_queue_purge(&ctx->async_hold); 2005 + 2006 + if (ret) { 2007 + if (err >= 0 || err == -EINPROGRESS) 2008 + err = ret; 2009 + decrypted = 0; 2010 + goto end; 2014 2011 } 2015 2012 2016 2013 /* Drain records from the rx_list & copy if required */ ··· 2447 2440 crypto_info = &ctx->crypto_recv.info; 2448 2441 cctx = &ctx->rx; 2449 2442 skb_queue_head_init(&sw_ctx_rx->rx_list); 2443 + skb_queue_head_init(&sw_ctx_rx->async_hold); 2450 2444 aead = &sw_ctx_rx->aead_recv; 2451 2445 } 2452 2446