Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'tls-rx-decrypt-from-the-tcp-queue'

Jakub Kicinski says:

====================
tls: rx: decrypt from the TCP queue

This is the final part of my TLS Rx rework. It switches from
strparser to decrypting data from skbs queued in TCP. We don't
need the full strparser for TLS, its needs are very basic.
This set gives us a small but measurable (6%) performance
improvement (continuous stream).
====================

Link: https://lore.kernel.org/r/20220722235033.2594446-1-kuba@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+736 -137
+2
include/net/tcp.h
··· 673 673 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, 674 674 sk_read_actor_t recv_actor); 675 675 int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor); 676 + struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off); 677 + void tcp_read_done(struct sock *sk, size_t len); 676 678 677 679 void tcp_initialize_rcv_mss(struct sock *sk); 678 680
+17 -2
include/net/tls.h
··· 108 108 unsigned long tx_bitmask; 109 109 }; 110 110 111 + struct tls_strparser { 112 + struct sock *sk; 113 + 114 + u32 mark : 8; 115 + u32 stopped : 1; 116 + u32 copy_mode : 1; 117 + u32 msg_ready : 1; 118 + 119 + struct strp_msg stm; 120 + 121 + struct sk_buff *anchor; 122 + struct work_struct work; 123 + }; 124 + 111 125 struct tls_sw_context_rx { 112 126 struct crypto_aead *aead_recv; 113 127 struct crypto_wait async_wait; 114 - struct strparser strp; 115 128 struct sk_buff_head rx_list; /* list of decrypted 'data' records */ 116 129 void (*saved_data_ready)(struct sock *sk); 117 130 118 - struct sk_buff *recv_pkt; 119 131 u8 reader_present; 120 132 u8 async_capable:1; 121 133 u8 zc_capable:1; 122 134 u8 reader_contended:1; 135 + 136 + struct tls_strparser strp; 137 + 123 138 atomic_t decrypt_pending; 124 139 /* protect crypto_wait with decrypt_pending*/ 125 140 spinlock_t decrypt_compl_lock;
+41 -1
net/ipv4/tcp.c
··· 1635 1635 __kfree_skb(skb); 1636 1636 } 1637 1637 1638 - static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) 1638 + struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) 1639 1639 { 1640 1640 struct sk_buff *skb; 1641 1641 u32 offset; ··· 1658 1658 } 1659 1659 return NULL; 1660 1660 } 1661 + EXPORT_SYMBOL(tcp_recv_skb); 1661 1662 1662 1663 /* 1663 1664 * This routine provides an alternative to tcp_recvmsg() for routines ··· 1788 1787 return copied; 1789 1788 } 1790 1789 EXPORT_SYMBOL(tcp_read_skb); 1790 + 1791 + void tcp_read_done(struct sock *sk, size_t len) 1792 + { 1793 + struct tcp_sock *tp = tcp_sk(sk); 1794 + u32 seq = tp->copied_seq; 1795 + struct sk_buff *skb; 1796 + size_t left; 1797 + u32 offset; 1798 + 1799 + if (sk->sk_state == TCP_LISTEN) 1800 + return; 1801 + 1802 + left = len; 1803 + while (left && (skb = tcp_recv_skb(sk, seq, &offset)) != NULL) { 1804 + int used; 1805 + 1806 + used = min_t(size_t, skb->len - offset, left); 1807 + seq += used; 1808 + left -= used; 1809 + 1810 + if (skb->len > offset + used) 1811 + break; 1812 + 1813 + if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) { 1814 + tcp_eat_recv_skb(sk, skb); 1815 + ++seq; 1816 + break; 1817 + } 1818 + tcp_eat_recv_skb(sk, skb); 1819 + } 1820 + WRITE_ONCE(tp->copied_seq, seq); 1821 + 1822 + tcp_rcv_space_adjust(sk); 1823 + 1824 + /* Clean up data we have read: This will do ACK frames. */ 1825 + if (left != len) 1826 + tcp_cleanup_rbuf(sk, len - left); 1827 + } 1828 + EXPORT_SYMBOL(tcp_read_done); 1791 1829 1792 1830 int tcp_peek_len(struct socket *sock) 1793 1831 {
+26 -3
net/tls/tls.h
··· 1 1 /* 2 + * Copyright (c) 2016 Tom Herbert <tom@herbertland.com> 2 3 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved. 3 4 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved. 4 5 * ··· 128 127 struct tls_offload_context_tx *offload_ctx, 129 128 struct tls_crypto_info *crypto_info); 130 129 131 - int tls_strp_msg_hold(struct sock *sk, struct sk_buff *skb, 132 - struct sk_buff_head *dst); 130 + int tls_strp_dev_init(void); 131 + void tls_strp_dev_exit(void); 132 + 133 + void tls_strp_done(struct tls_strparser *strp); 134 + void tls_strp_stop(struct tls_strparser *strp); 135 + int tls_strp_init(struct tls_strparser *strp, struct sock *sk); 136 + void tls_strp_data_ready(struct tls_strparser *strp); 137 + 138 + void tls_strp_check_rcv(struct tls_strparser *strp); 139 + void tls_strp_msg_done(struct tls_strparser *strp); 140 + 141 + int tls_rx_msg_size(struct tls_strparser *strp, struct sk_buff *skb); 142 + void tls_rx_msg_ready(struct tls_strparser *strp); 143 + 144 + void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh); 145 + int tls_strp_msg_cow(struct tls_sw_context_rx *ctx); 146 + struct sk_buff *tls_strp_msg_detach(struct tls_sw_context_rx *ctx); 147 + int tls_strp_msg_hold(struct tls_strparser *strp, struct sk_buff_head *dst); 133 148 134 149 static inline struct tls_msg *tls_msg(struct sk_buff *skb) 135 150 { ··· 156 139 157 140 static inline struct sk_buff *tls_strp_msg(struct tls_sw_context_rx *ctx) 158 141 { 159 - return ctx->recv_pkt; 142 + DEBUG_NET_WARN_ON_ONCE(!ctx->strp.msg_ready || !ctx->strp.anchor->len); 143 + return ctx->strp.anchor; 144 + } 145 + 146 + static inline bool tls_strp_msg_ready(struct tls_sw_context_rx *ctx) 147 + { 148 + return ctx->strp.msg_ready; 160 149 } 161 150 162 151 #ifdef CONFIG_TLS_DEVICE
+9 -10
net/tls/tls_device.c
··· 894 894 static int 895 895 tls_device_reencrypt(struct sock *sk, struct tls_sw_context_rx *sw_ctx) 896 896 { 897 - int err = 0, offset, copy, nsg, data_len, pos; 898 - struct sk_buff *skb, *skb_iter, *unused; 897 + int err, offset, copy, data_len, pos; 898 + struct sk_buff *skb, *skb_iter; 899 899 struct scatterlist sg[1]; 900 900 struct strp_msg *rxm; 901 901 char *orig_buf, *buf; 902 902 903 - skb = tls_strp_msg(sw_ctx); 904 - rxm = strp_msg(skb); 905 - offset = rxm->offset; 906 - 903 + rxm = strp_msg(tls_strp_msg(sw_ctx)); 907 904 orig_buf = kmalloc(rxm->full_len + TLS_HEADER_SIZE + 908 905 TLS_CIPHER_AES_GCM_128_IV_SIZE, sk->sk_allocation); 909 906 if (!orig_buf) 910 907 return -ENOMEM; 911 908 buf = orig_buf; 912 909 913 - nsg = skb_cow_data(skb, 0, &unused); 914 - if (unlikely(nsg < 0)) { 915 - err = nsg; 910 + err = tls_strp_msg_cow(sw_ctx); 911 + if (unlikely(err)) 916 912 goto free_buf; 917 - } 913 + 914 + skb = tls_strp_msg(sw_ctx); 915 + rxm = strp_msg(skb); 916 + offset = rxm->offset; 918 917 919 918 sg_init_table(sg, 1); 920 919 sg_set_buf(&sg[0], buf,
+16 -4
net/tls/tls_main.c
··· 725 725 if (tx) { 726 726 ctx->sk_write_space = sk->sk_write_space; 727 727 sk->sk_write_space = tls_write_space; 728 + } else { 729 + struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(ctx); 730 + 731 + tls_strp_check_rcv(&rx_ctx->strp); 728 732 } 729 733 return 0; 730 734 ··· 1145 1141 if (err) 1146 1142 return err; 1147 1143 1144 + err = tls_strp_dev_init(); 1145 + if (err) 1146 + goto err_pernet; 1147 + 1148 1148 err = tls_device_init(); 1149 - if (err) { 1150 - unregister_pernet_subsys(&tls_proc_ops); 1151 - return err; 1152 - } 1149 + if (err) 1150 + goto err_strp; 1153 1151 1154 1152 tcp_register_ulp(&tcp_tls_ulp_ops); 1155 1153 1156 1154 return 0; 1155 + err_strp: 1156 + tls_strp_dev_exit(); 1157 + err_pernet: 1158 + unregister_pernet_subsys(&tls_proc_ops); 1159 + return err; 1157 1160 } 1158 1161 1159 1162 static void __exit tls_unregister(void) 1160 1163 { 1161 1164 tcp_unregister_ulp(&tcp_tls_ulp_ops); 1165 + tls_strp_dev_exit(); 1162 1166 tls_device_cleanup(); 1163 1167 unregister_pernet_subsys(&tls_proc_ops); 1164 1168 }
+483 -7
net/tls/tls_strp.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 + /* Copyright (c) 2016 Tom Herbert <tom@herbertland.com> */ 2 3 3 4 #include <linux/skbuff.h> 5 + #include <linux/workqueue.h> 6 + #include <net/strparser.h> 7 + #include <net/tcp.h> 8 + #include <net/sock.h> 9 + #include <net/tls.h> 4 10 5 11 #include "tls.h" 6 12 7 - int tls_strp_msg_hold(struct sock *sk, struct sk_buff *skb, 8 - struct sk_buff_head *dst) 9 - { 10 - struct sk_buff *clone; 13 + static struct workqueue_struct *tls_strp_wq; 11 14 12 - clone = skb_clone(skb, sk->sk_allocation); 13 - if (!clone) 15 + static void tls_strp_abort_strp(struct tls_strparser *strp, int err) 16 + { 17 + if (strp->stopped) 18 + return; 19 + 20 + strp->stopped = 1; 21 + 22 + /* Report an error on the lower socket */ 23 + strp->sk->sk_err = -err; 24 + sk_error_report(strp->sk); 25 + } 26 + 27 + static void tls_strp_anchor_free(struct tls_strparser *strp) 28 + { 29 + struct skb_shared_info *shinfo = skb_shinfo(strp->anchor); 30 + 31 + DEBUG_NET_WARN_ON_ONCE(atomic_read(&shinfo->dataref) != 1); 32 + shinfo->frag_list = NULL; 33 + consume_skb(strp->anchor); 34 + strp->anchor = NULL; 35 + } 36 + 37 + /* Create a new skb with the contents of input copied to its page frags */ 38 + static struct sk_buff *tls_strp_msg_make_copy(struct tls_strparser *strp) 39 + { 40 + struct strp_msg *rxm; 41 + struct sk_buff *skb; 42 + int i, err, offset; 43 + 44 + skb = alloc_skb_with_frags(0, strp->anchor->len, TLS_PAGE_ORDER, 45 + &err, strp->sk->sk_allocation); 46 + if (!skb) 47 + return NULL; 48 + 49 + offset = strp->stm.offset; 50 + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 51 + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 52 + 53 + WARN_ON_ONCE(skb_copy_bits(strp->anchor, offset, 54 + skb_frag_address(frag), 55 + skb_frag_size(frag))); 56 + offset += skb_frag_size(frag); 57 + } 58 + 59 + skb_copy_header(skb, strp->anchor); 60 + rxm = strp_msg(skb); 61 + rxm->offset = 0; 62 + return skb; 63 + } 64 + 65 + /* Steal the input skb, input msg is invalid after calling this function */ 66 + struct sk_buff *tls_strp_msg_detach(struct tls_sw_context_rx *ctx) 67 + { 68 + struct tls_strparser *strp = &ctx->strp; 69 + 70 + #ifdef CONFIG_TLS_DEVICE 71 + DEBUG_NET_WARN_ON_ONCE(!strp->anchor->decrypted); 72 + #else 73 + /* This function turns an input into an output, 74 + * that can only happen if we have offload. 75 + */ 76 + WARN_ON(1); 77 + #endif 78 + 79 + if (strp->copy_mode) { 80 + struct sk_buff *skb; 81 + 82 + /* Replace anchor with an empty skb, this is a little 83 + * dangerous but __tls_cur_msg() warns on empty skbs 84 + * so hopefully we'll catch abuses. 85 + */ 86 + skb = alloc_skb(0, strp->sk->sk_allocation); 87 + if (!skb) 88 + return NULL; 89 + 90 + swap(strp->anchor, skb); 91 + return skb; 92 + } 93 + 94 + return tls_strp_msg_make_copy(strp); 95 + } 96 + 97 + /* Force the input skb to be in copy mode. The data ownership remains 98 + * with the input skb itself (meaning unpause will wipe it) but it can 99 + * be modified. 100 + */ 101 + int tls_strp_msg_cow(struct tls_sw_context_rx *ctx) 102 + { 103 + struct tls_strparser *strp = &ctx->strp; 104 + struct sk_buff *skb; 105 + 106 + if (strp->copy_mode) 107 + return 0; 108 + 109 + skb = tls_strp_msg_make_copy(strp); 110 + if (!skb) 14 111 return -ENOMEM; 15 - __skb_queue_tail(dst, clone); 112 + 113 + tls_strp_anchor_free(strp); 114 + strp->anchor = skb; 115 + 116 + tcp_read_done(strp->sk, strp->stm.full_len); 117 + strp->copy_mode = 1; 118 + 16 119 return 0; 120 + } 121 + 122 + /* Make a clone (in the skb sense) of the input msg to keep a reference 123 + * to the underlying data. The reference-holding skbs get placed on 124 + * @dst. 125 + */ 126 + int tls_strp_msg_hold(struct tls_strparser *strp, struct sk_buff_head *dst) 127 + { 128 + struct skb_shared_info *shinfo = skb_shinfo(strp->anchor); 129 + 130 + if (strp->copy_mode) { 131 + struct sk_buff *skb; 132 + 133 + WARN_ON_ONCE(!shinfo->nr_frags); 134 + 135 + /* We can't skb_clone() the anchor, it gets wiped by unpause */ 136 + skb = alloc_skb(0, strp->sk->sk_allocation); 137 + if (!skb) 138 + return -ENOMEM; 139 + 140 + __skb_queue_tail(dst, strp->anchor); 141 + strp->anchor = skb; 142 + } else { 143 + struct sk_buff *iter, *clone; 144 + int chunk, len, offset; 145 + 146 + offset = strp->stm.offset; 147 + len = strp->stm.full_len; 148 + iter = shinfo->frag_list; 149 + 150 + while (len > 0) { 151 + if (iter->len <= offset) { 152 + offset -= iter->len; 153 + goto next; 154 + } 155 + 156 + chunk = iter->len - offset; 157 + offset = 0; 158 + 159 + clone = skb_clone(iter, strp->sk->sk_allocation); 160 + if (!clone) 161 + return -ENOMEM; 162 + __skb_queue_tail(dst, clone); 163 + 164 + len -= chunk; 165 + next: 166 + iter = iter->next; 167 + } 168 + } 169 + 170 + return 0; 171 + } 172 + 173 + static void tls_strp_flush_anchor_copy(struct tls_strparser *strp) 174 + { 175 + struct skb_shared_info *shinfo = skb_shinfo(strp->anchor); 176 + int i; 177 + 178 + DEBUG_NET_WARN_ON_ONCE(atomic_read(&shinfo->dataref) != 1); 179 + 180 + for (i = 0; i < shinfo->nr_frags; i++) 181 + __skb_frag_unref(&shinfo->frags[i], false); 182 + shinfo->nr_frags = 0; 183 + strp->copy_mode = 0; 184 + } 185 + 186 + static int tls_strp_copyin(read_descriptor_t *desc, struct sk_buff *in_skb, 187 + unsigned int offset, size_t in_len) 188 + { 189 + struct tls_strparser *strp = (struct tls_strparser *)desc->arg.data; 190 + size_t sz, len, chunk; 191 + struct sk_buff *skb; 192 + skb_frag_t *frag; 193 + 194 + if (strp->msg_ready) 195 + return 0; 196 + 197 + skb = strp->anchor; 198 + frag = &skb_shinfo(skb)->frags[skb->len / PAGE_SIZE]; 199 + 200 + len = in_len; 201 + /* First make sure we got the header */ 202 + if (!strp->stm.full_len) { 203 + /* Assume one page is more than enough for headers */ 204 + chunk = min_t(size_t, len, PAGE_SIZE - skb_frag_size(frag)); 205 + WARN_ON_ONCE(skb_copy_bits(in_skb, offset, 206 + skb_frag_address(frag) + 207 + skb_frag_size(frag), 208 + chunk)); 209 + 210 + sz = tls_rx_msg_size(strp, strp->anchor); 211 + if (sz < 0) { 212 + desc->error = sz; 213 + return 0; 214 + } 215 + 216 + /* We may have over-read, sz == 0 is guaranteed under-read */ 217 + if (sz > 0) 218 + chunk = min_t(size_t, chunk, sz - skb->len); 219 + 220 + skb->len += chunk; 221 + skb->data_len += chunk; 222 + skb_frag_size_add(frag, chunk); 223 + frag++; 224 + len -= chunk; 225 + offset += chunk; 226 + 227 + strp->stm.full_len = sz; 228 + if (!strp->stm.full_len) 229 + goto read_done; 230 + } 231 + 232 + /* Load up more data */ 233 + while (len && strp->stm.full_len > skb->len) { 234 + chunk = min_t(size_t, len, strp->stm.full_len - skb->len); 235 + chunk = min_t(size_t, chunk, PAGE_SIZE - skb_frag_size(frag)); 236 + WARN_ON_ONCE(skb_copy_bits(in_skb, offset, 237 + skb_frag_address(frag) + 238 + skb_frag_size(frag), 239 + chunk)); 240 + 241 + skb->len += chunk; 242 + skb->data_len += chunk; 243 + skb_frag_size_add(frag, chunk); 244 + frag++; 245 + len -= chunk; 246 + offset += chunk; 247 + } 248 + 249 + if (strp->stm.full_len == skb->len) { 250 + desc->count = 0; 251 + 252 + strp->msg_ready = 1; 253 + tls_rx_msg_ready(strp); 254 + } 255 + 256 + read_done: 257 + return in_len - len; 258 + } 259 + 260 + static int tls_strp_read_copyin(struct tls_strparser *strp) 261 + { 262 + struct socket *sock = strp->sk->sk_socket; 263 + read_descriptor_t desc; 264 + 265 + desc.arg.data = strp; 266 + desc.error = 0; 267 + desc.count = 1; /* give more than one skb per call */ 268 + 269 + /* sk should be locked here, so okay to do read_sock */ 270 + sock->ops->read_sock(strp->sk, &desc, tls_strp_copyin); 271 + 272 + return desc.error; 273 + } 274 + 275 + static int tls_strp_read_short(struct tls_strparser *strp) 276 + { 277 + struct skb_shared_info *shinfo; 278 + struct page *page; 279 + int need_spc, len; 280 + 281 + /* If the rbuf is small or rcv window has collapsed to 0 we need 282 + * to read the data out. Otherwise the connection will stall. 283 + * Without pressure threshold of INT_MAX will never be ready. 284 + */ 285 + if (likely(!tcp_epollin_ready(strp->sk, INT_MAX))) 286 + return 0; 287 + 288 + shinfo = skb_shinfo(strp->anchor); 289 + shinfo->frag_list = NULL; 290 + 291 + /* If we don't know the length go max plus page for cipher overhead */ 292 + need_spc = strp->stm.full_len ?: TLS_MAX_PAYLOAD_SIZE + PAGE_SIZE; 293 + 294 + for (len = need_spc; len > 0; len -= PAGE_SIZE) { 295 + page = alloc_page(strp->sk->sk_allocation); 296 + if (!page) { 297 + tls_strp_flush_anchor_copy(strp); 298 + return -ENOMEM; 299 + } 300 + 301 + skb_fill_page_desc(strp->anchor, shinfo->nr_frags++, 302 + page, 0, 0); 303 + } 304 + 305 + strp->copy_mode = 1; 306 + strp->stm.offset = 0; 307 + 308 + strp->anchor->len = 0; 309 + strp->anchor->data_len = 0; 310 + strp->anchor->truesize = round_up(need_spc, PAGE_SIZE); 311 + 312 + tls_strp_read_copyin(strp); 313 + 314 + return 0; 315 + } 316 + 317 + static void tls_strp_load_anchor_with_queue(struct tls_strparser *strp, int len) 318 + { 319 + struct tcp_sock *tp = tcp_sk(strp->sk); 320 + struct sk_buff *first; 321 + u32 offset; 322 + 323 + first = tcp_recv_skb(strp->sk, tp->copied_seq, &offset); 324 + if (WARN_ON_ONCE(!first)) 325 + return; 326 + 327 + /* Bestow the state onto the anchor */ 328 + strp->anchor->len = offset + len; 329 + strp->anchor->data_len = offset + len; 330 + strp->anchor->truesize = offset + len; 331 + 332 + skb_shinfo(strp->anchor)->frag_list = first; 333 + 334 + skb_copy_header(strp->anchor, first); 335 + strp->anchor->destructor = NULL; 336 + 337 + strp->stm.offset = offset; 338 + } 339 + 340 + void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh) 341 + { 342 + struct strp_msg *rxm; 343 + struct tls_msg *tlm; 344 + 345 + DEBUG_NET_WARN_ON_ONCE(!strp->msg_ready); 346 + DEBUG_NET_WARN_ON_ONCE(!strp->stm.full_len); 347 + 348 + if (!strp->copy_mode && force_refresh) { 349 + if (WARN_ON(tcp_inq(strp->sk) < strp->stm.full_len)) 350 + return; 351 + 352 + tls_strp_load_anchor_with_queue(strp, strp->stm.full_len); 353 + } 354 + 355 + rxm = strp_msg(strp->anchor); 356 + rxm->full_len = strp->stm.full_len; 357 + rxm->offset = strp->stm.offset; 358 + tlm = tls_msg(strp->anchor); 359 + tlm->control = strp->mark; 360 + } 361 + 362 + /* Called with lock held on lower socket */ 363 + static int tls_strp_read_sock(struct tls_strparser *strp) 364 + { 365 + int sz, inq; 366 + 367 + inq = tcp_inq(strp->sk); 368 + if (inq < 1) 369 + return 0; 370 + 371 + if (unlikely(strp->copy_mode)) 372 + return tls_strp_read_copyin(strp); 373 + 374 + if (inq < strp->stm.full_len) 375 + return tls_strp_read_short(strp); 376 + 377 + if (!strp->stm.full_len) { 378 + tls_strp_load_anchor_with_queue(strp, inq); 379 + 380 + sz = tls_rx_msg_size(strp, strp->anchor); 381 + if (sz < 0) { 382 + tls_strp_abort_strp(strp, sz); 383 + return sz; 384 + } 385 + 386 + strp->stm.full_len = sz; 387 + 388 + if (!strp->stm.full_len || inq < strp->stm.full_len) 389 + return tls_strp_read_short(strp); 390 + } 391 + 392 + strp->msg_ready = 1; 393 + tls_rx_msg_ready(strp); 394 + 395 + return 0; 396 + } 397 + 398 + void tls_strp_check_rcv(struct tls_strparser *strp) 399 + { 400 + if (unlikely(strp->stopped) || strp->msg_ready) 401 + return; 402 + 403 + if (tls_strp_read_sock(strp) == -ENOMEM) 404 + queue_work(tls_strp_wq, &strp->work); 405 + } 406 + 407 + /* Lower sock lock held */ 408 + void tls_strp_data_ready(struct tls_strparser *strp) 409 + { 410 + /* This check is needed to synchronize with do_tls_strp_work. 411 + * do_tls_strp_work acquires a process lock (lock_sock) whereas 412 + * the lock held here is bh_lock_sock. The two locks can be 413 + * held by different threads at the same time, but bh_lock_sock 414 + * allows a thread in BH context to safely check if the process 415 + * lock is held. In this case, if the lock is held, queue work. 416 + */ 417 + if (sock_owned_by_user_nocheck(strp->sk)) { 418 + queue_work(tls_strp_wq, &strp->work); 419 + return; 420 + } 421 + 422 + tls_strp_check_rcv(strp); 423 + } 424 + 425 + static void tls_strp_work(struct work_struct *w) 426 + { 427 + struct tls_strparser *strp = 428 + container_of(w, struct tls_strparser, work); 429 + 430 + lock_sock(strp->sk); 431 + tls_strp_check_rcv(strp); 432 + release_sock(strp->sk); 433 + } 434 + 435 + void tls_strp_msg_done(struct tls_strparser *strp) 436 + { 437 + WARN_ON(!strp->stm.full_len); 438 + 439 + if (likely(!strp->copy_mode)) 440 + tcp_read_done(strp->sk, strp->stm.full_len); 441 + else 442 + tls_strp_flush_anchor_copy(strp); 443 + 444 + strp->msg_ready = 0; 445 + memset(&strp->stm, 0, sizeof(strp->stm)); 446 + 447 + tls_strp_check_rcv(strp); 448 + } 449 + 450 + void tls_strp_stop(struct tls_strparser *strp) 451 + { 452 + strp->stopped = 1; 453 + } 454 + 455 + int tls_strp_init(struct tls_strparser *strp, struct sock *sk) 456 + { 457 + memset(strp, 0, sizeof(*strp)); 458 + 459 + strp->sk = sk; 460 + 461 + strp->anchor = alloc_skb(0, GFP_KERNEL); 462 + if (!strp->anchor) 463 + return -ENOMEM; 464 + 465 + INIT_WORK(&strp->work, tls_strp_work); 466 + 467 + return 0; 468 + } 469 + 470 + /* strp must already be stopped so that tls_strp_recv will no longer be called. 471 + * Note that tls_strp_done is not called with the lower socket held. 472 + */ 473 + void tls_strp_done(struct tls_strparser *strp) 474 + { 475 + WARN_ON(!strp->stopped); 476 + 477 + cancel_work_sync(&strp->work); 478 + tls_strp_anchor_free(strp); 479 + } 480 + 481 + int __init tls_strp_dev_init(void) 482 + { 483 + tls_strp_wq = create_singlethread_workqueue("kstrp"); 484 + if (unlikely(!tls_strp_wq)) 485 + return -ENOMEM; 486 + 487 + return 0; 488 + } 489 + 490 + void tls_strp_dev_exit(void) 491 + { 492 + destroy_workqueue(tls_strp_wq); 17 493 }
+142 -110
net/tls/tls_sw.c
··· 1283 1283 1284 1284 static int 1285 1285 tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock, 1286 - long timeo) 1286 + bool released, long timeo) 1287 1287 { 1288 1288 struct tls_context *tls_ctx = tls_get_ctx(sk); 1289 1289 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1290 1290 DEFINE_WAIT_FUNC(wait, woken_wake_function); 1291 1291 1292 - while (!ctx->recv_pkt) { 1292 + while (!tls_strp_msg_ready(ctx)) { 1293 1293 if (!sk_psock_queue_empty(psock)) 1294 1294 return 0; 1295 1295 ··· 1297 1297 return sock_error(sk); 1298 1298 1299 1299 if (!skb_queue_empty(&sk->sk_receive_queue)) { 1300 - __strp_unpause(&ctx->strp); 1301 - if (ctx->recv_pkt) 1300 + tls_strp_check_rcv(&ctx->strp); 1301 + if (tls_strp_msg_ready(ctx)) 1302 1302 break; 1303 1303 } 1304 1304 ··· 1311 1311 if (nonblock || !timeo) 1312 1312 return -EAGAIN; 1313 1313 1314 + released = true; 1314 1315 add_wait_queue(sk_sleep(sk), &wait); 1315 1316 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 1316 1317 sk_wait_event(sk, &timeo, 1317 - ctx->recv_pkt || !sk_psock_queue_empty(psock), 1318 + tls_strp_msg_ready(ctx) || 1319 + !sk_psock_queue_empty(psock), 1318 1320 &wait); 1319 1321 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 1320 1322 remove_wait_queue(sk_sleep(sk), &wait); ··· 1325 1323 if (signal_pending(current)) 1326 1324 return sock_intr_errno(timeo); 1327 1325 } 1326 + 1327 + tls_strp_msg_load(&ctx->strp, released); 1328 1328 1329 1329 return 1; 1330 1330 } ··· 1412 1408 1413 1409 /* Decrypt handlers 1414 1410 * 1415 - * tls_decrypt_sg() and tls_decrypt_device() are decrypt handlers. 1411 + * tls_decrypt_sw() and tls_decrypt_device() are decrypt handlers. 1416 1412 * They must transform the darg in/out argument are as follows: 1417 1413 * | Input | Output 1418 1414 * ------------------------------------------------------------------- 1419 1415 * zc | Zero-copy decrypt allowed | Zero-copy performed 1420 1416 * async | Async decrypt allowed | Async crypto used / in progress 1421 1417 * skb | * | Output skb 1418 + * 1419 + * If ZC decryption was performed darg.skb will point to the input skb. 1422 1420 */ 1423 1421 1424 1422 /* This function decrypts the input skb into either out_iov or in out_sg ··· 1573 1567 clear_skb = NULL; 1574 1568 1575 1569 if (unlikely(darg->async)) { 1576 - err = tls_strp_msg_hold(sk, skb, &ctx->async_hold); 1570 + err = tls_strp_msg_hold(&ctx->strp, &ctx->async_hold); 1577 1571 if (err) 1578 1572 __skb_queue_tail(&ctx->async_hold, darg->skb); 1579 1573 return err; ··· 1594 1588 } 1595 1589 1596 1590 static int 1597 - tls_decrypt_device(struct sock *sk, struct tls_context *tls_ctx, 1598 - struct tls_decrypt_arg *darg) 1591 + tls_decrypt_sw(struct sock *sk, struct tls_context *tls_ctx, 1592 + struct msghdr *msg, struct tls_decrypt_arg *darg) 1599 1593 { 1600 1594 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1601 - int err; 1595 + struct tls_prot_info *prot = &tls_ctx->prot_info; 1596 + struct strp_msg *rxm; 1597 + int pad, err; 1598 + 1599 + err = tls_decrypt_sg(sk, &msg->msg_iter, NULL, darg); 1600 + if (err < 0) { 1601 + if (err == -EBADMSG) 1602 + TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR); 1603 + return err; 1604 + } 1605 + /* keep going even for ->async, the code below is TLS 1.3 */ 1606 + 1607 + /* If opportunistic TLS 1.3 ZC failed retry without ZC */ 1608 + if (unlikely(darg->zc && prot->version == TLS_1_3_VERSION && 1609 + darg->tail != TLS_RECORD_TYPE_DATA)) { 1610 + darg->zc = false; 1611 + if (!darg->tail) 1612 + TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXNOPADVIOL); 1613 + TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTRETRY); 1614 + return tls_decrypt_sw(sk, tls_ctx, msg, darg); 1615 + } 1616 + 1617 + pad = tls_padding_length(prot, darg->skb, darg); 1618 + if (pad < 0) { 1619 + if (darg->skb != tls_strp_msg(ctx)) 1620 + consume_skb(darg->skb); 1621 + return pad; 1622 + } 1623 + 1624 + rxm = strp_msg(darg->skb); 1625 + rxm->full_len -= pad; 1626 + 1627 + return 0; 1628 + } 1629 + 1630 + static int 1631 + tls_decrypt_device(struct sock *sk, struct msghdr *msg, 1632 + struct tls_context *tls_ctx, struct tls_decrypt_arg *darg) 1633 + { 1634 + struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1635 + struct tls_prot_info *prot = &tls_ctx->prot_info; 1636 + struct strp_msg *rxm; 1637 + int pad, err; 1602 1638 1603 1639 if (tls_ctx->rx_conf != TLS_HW) 1604 1640 return 0; ··· 1649 1601 if (err <= 0) 1650 1602 return err; 1651 1603 1652 - darg->zc = false; 1604 + pad = tls_padding_length(prot, tls_strp_msg(ctx), darg); 1605 + if (pad < 0) 1606 + return pad; 1607 + 1653 1608 darg->async = false; 1654 1609 darg->skb = tls_strp_msg(ctx); 1655 - ctx->recv_pkt = NULL; 1656 - return 1; 1657 - } 1658 - 1659 - static int tls_rx_one_record(struct sock *sk, struct iov_iter *dest, 1660 - struct tls_decrypt_arg *darg) 1661 - { 1662 - struct tls_context *tls_ctx = tls_get_ctx(sk); 1663 - struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1664 - struct tls_prot_info *prot = &tls_ctx->prot_info; 1665 - struct strp_msg *rxm; 1666 - int pad, err; 1667 - 1668 - err = tls_decrypt_device(sk, tls_ctx, darg); 1669 - if (err < 0) 1670 - return err; 1671 - if (err) 1672 - goto decrypt_done; 1673 - 1674 - err = tls_decrypt_sg(sk, dest, NULL, darg); 1675 - if (err < 0) { 1676 - if (err == -EBADMSG) 1677 - TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR); 1678 - return err; 1679 - } 1680 - if (darg->async) 1681 - goto decrypt_done; 1682 - /* If opportunistic TLS 1.3 ZC failed retry without ZC */ 1683 - if (unlikely(darg->zc && prot->version == TLS_1_3_VERSION && 1684 - darg->tail != TLS_RECORD_TYPE_DATA)) { 1685 - darg->zc = false; 1686 - if (!darg->tail) 1687 - TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXNOPADVIOL); 1688 - TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTRETRY); 1689 - return tls_rx_one_record(sk, dest, darg); 1690 - } 1691 - 1692 - decrypt_done: 1693 - if (darg->skb == ctx->recv_pkt) 1694 - ctx->recv_pkt = NULL; 1695 - 1696 - pad = tls_padding_length(prot, darg->skb, darg); 1697 - if (pad < 0) { 1698 - consume_skb(darg->skb); 1699 - return pad; 1700 - } 1610 + /* ->zc downgrade check, in case TLS 1.3 gets here */ 1611 + darg->zc &= !(prot->version == TLS_1_3_VERSION && 1612 + tls_msg(darg->skb)->control != TLS_RECORD_TYPE_DATA); 1701 1613 1702 1614 rxm = strp_msg(darg->skb); 1703 1615 rxm->full_len -= pad; 1616 + 1617 + if (!darg->zc) { 1618 + /* Non-ZC case needs a real skb */ 1619 + darg->skb = tls_strp_msg_detach(ctx); 1620 + if (!darg->skb) 1621 + return -ENOMEM; 1622 + } else { 1623 + unsigned int off, len; 1624 + 1625 + /* In ZC case nobody cares about the output skb. 1626 + * Just copy the data here. Note the skb is not fully trimmed. 1627 + */ 1628 + off = rxm->offset + prot->prepend_size; 1629 + len = rxm->full_len - prot->overhead_size; 1630 + 1631 + err = skb_copy_datagram_msg(darg->skb, off, msg, len); 1632 + if (err) 1633 + return err; 1634 + } 1635 + return 1; 1636 + } 1637 + 1638 + static int tls_rx_one_record(struct sock *sk, struct msghdr *msg, 1639 + struct tls_decrypt_arg *darg) 1640 + { 1641 + struct tls_context *tls_ctx = tls_get_ctx(sk); 1642 + struct tls_prot_info *prot = &tls_ctx->prot_info; 1643 + struct strp_msg *rxm; 1644 + int err; 1645 + 1646 + err = tls_decrypt_device(sk, msg, tls_ctx, darg); 1647 + if (!err) 1648 + err = tls_decrypt_sw(sk, tls_ctx, msg, darg); 1649 + if (err < 0) 1650 + return err; 1651 + 1652 + rxm = strp_msg(darg->skb); 1704 1653 rxm->offset += prot->prepend_size; 1705 1654 rxm->full_len -= prot->overhead_size; 1706 1655 tls_advance_record_sn(sk, prot, &tls_ctx->rx); ··· 1737 1692 1738 1693 static void tls_rx_rec_done(struct tls_sw_context_rx *ctx) 1739 1694 { 1740 - consume_skb(ctx->recv_pkt); 1741 - ctx->recv_pkt = NULL; 1742 - __strp_unpause(&ctx->strp); 1695 + tls_strp_msg_done(&ctx->strp); 1743 1696 } 1744 1697 1745 1698 /* This function traverses the rx_list in tls receive context to copies the ··· 1824 1781 return copied ? : err; 1825 1782 } 1826 1783 1827 - static void 1784 + static bool 1828 1785 tls_read_flush_backlog(struct sock *sk, struct tls_prot_info *prot, 1829 1786 size_t len_left, size_t decrypted, ssize_t done, 1830 1787 size_t *flushed_at) ··· 1832 1789 size_t max_rec; 1833 1790 1834 1791 if (len_left <= decrypted) 1835 - return; 1792 + return false; 1836 1793 1837 1794 max_rec = prot->overhead_size - prot->tail_size + TLS_MAX_PAYLOAD_SIZE; 1838 1795 if (done - *flushed_at < SZ_128K && tcp_inq(sk) > max_rec) 1839 - return; 1796 + return false; 1840 1797 1841 1798 *flushed_at = done; 1842 - sk_flush_backlog(sk); 1799 + return sk_flush_backlog(sk); 1843 1800 } 1844 1801 1845 1802 static long tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx, ··· 1911 1868 size_t flushed_at = 0; 1912 1869 struct strp_msg *rxm; 1913 1870 struct tls_msg *tlm; 1914 - struct sk_buff *skb; 1915 1871 ssize_t copied = 0; 1916 1872 bool async = false; 1917 1873 int target, err = 0; 1918 1874 long timeo; 1919 1875 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter); 1920 1876 bool is_peek = flags & MSG_PEEK; 1877 + bool released = true; 1921 1878 bool bpf_strp_enabled; 1922 1879 bool zc_capable; 1923 1880 ··· 1950 1907 zc_capable = !bpf_strp_enabled && !is_kvec && !is_peek && 1951 1908 ctx->zc_capable; 1952 1909 decrypted = 0; 1953 - while (len && (decrypted + copied < target || ctx->recv_pkt)) { 1910 + while (len && (decrypted + copied < target || tls_strp_msg_ready(ctx))) { 1954 1911 struct tls_decrypt_arg darg; 1955 1912 int to_decrypt, chunk; 1956 1913 1957 - err = tls_rx_rec_wait(sk, psock, flags & MSG_DONTWAIT, timeo); 1914 + err = tls_rx_rec_wait(sk, psock, flags & MSG_DONTWAIT, released, 1915 + timeo); 1958 1916 if (err <= 0) { 1959 1917 if (psock) { 1960 1918 chunk = sk_msg_recvmsg(sk, psock, msg, len, ··· 1971 1927 1972 1928 memset(&darg.inargs, 0, sizeof(darg.inargs)); 1973 1929 1974 - rxm = strp_msg(ctx->recv_pkt); 1975 - tlm = tls_msg(ctx->recv_pkt); 1930 + rxm = strp_msg(tls_strp_msg(ctx)); 1931 + tlm = tls_msg(tls_strp_msg(ctx)); 1976 1932 1977 1933 to_decrypt = rxm->full_len - prot->overhead_size; 1978 1934 ··· 1986 1942 else 1987 1943 darg.async = false; 1988 1944 1989 - err = tls_rx_one_record(sk, &msg->msg_iter, &darg); 1945 + err = tls_rx_one_record(sk, msg, &darg); 1990 1946 if (err < 0) { 1991 1947 tls_err_abort(sk, -EBADMSG); 1992 1948 goto recv_end; 1993 1949 } 1994 - 1995 - skb = darg.skb; 1996 - rxm = strp_msg(skb); 1997 - tlm = tls_msg(skb); 1998 1950 1999 1951 async |= darg.async; 2000 1952 ··· 2001 1961 * is known just after record is dequeued from stream parser. 2002 1962 * For tls1.3, we disable async. 2003 1963 */ 2004 - err = tls_record_content_type(msg, tlm, &control); 1964 + err = tls_record_content_type(msg, tls_msg(darg.skb), &control); 2005 1965 if (err <= 0) { 1966 + DEBUG_NET_WARN_ON_ONCE(darg.zc); 2006 1967 tls_rx_rec_done(ctx); 2007 1968 put_on_rx_list_err: 2008 - __skb_queue_tail(&ctx->rx_list, skb); 1969 + __skb_queue_tail(&ctx->rx_list, darg.skb); 2009 1970 goto recv_end; 2010 1971 } 2011 1972 2012 1973 /* periodically flush backlog, and feed strparser */ 2013 - tls_read_flush_backlog(sk, prot, len, to_decrypt, 2014 - decrypted + copied, &flushed_at); 1974 + released = tls_read_flush_backlog(sk, prot, len, to_decrypt, 1975 + decrypted + copied, 1976 + &flushed_at); 2015 1977 2016 1978 /* TLS 1.3 may have updated the length by more than overhead */ 1979 + rxm = strp_msg(darg.skb); 2017 1980 chunk = rxm->full_len; 2018 1981 tls_rx_rec_done(ctx); 2019 1982 2020 1983 if (!darg.zc) { 2021 1984 bool partially_consumed = chunk > len; 1985 + struct sk_buff *skb = darg.skb; 1986 + 1987 + DEBUG_NET_WARN_ON_ONCE(darg.skb == tls_strp_msg(ctx)); 2022 1988 2023 1989 if (async) { 2024 1990 /* TLS 1.2-only, to_decrypt must be text len */ ··· 2038 1992 } 2039 1993 2040 1994 if (bpf_strp_enabled) { 1995 + released = true; 2041 1996 err = sk_psock_tls_strp_read(psock, skb); 2042 1997 if (err != __SK_PASS) { 2043 1998 rxm->offset = rxm->offset + rxm->full_len; ··· 2065 2018 rxm->full_len -= chunk; 2066 2019 goto put_on_rx_list; 2067 2020 } 2021 + 2022 + consume_skb(skb); 2068 2023 } 2069 2024 2070 2025 decrypted += chunk; 2071 2026 len -= chunk; 2072 - 2073 - consume_skb(skb); 2074 2027 2075 2028 /* Return full control message to userspace before trying 2076 2029 * to parse another message type ··· 2145 2098 struct tls_decrypt_arg darg; 2146 2099 2147 2100 err = tls_rx_rec_wait(sk, NULL, flags & SPLICE_F_NONBLOCK, 2148 - timeo); 2101 + true, timeo); 2149 2102 if (err <= 0) 2150 2103 goto splice_read_end; 2151 2104 ··· 2205 2158 ingress_empty = list_empty(&psock->ingress_msg); 2206 2159 rcu_read_unlock(); 2207 2160 2208 - return !ingress_empty || ctx->recv_pkt || 2161 + return !ingress_empty || tls_strp_msg_ready(ctx) || 2209 2162 !skb_queue_empty(&ctx->rx_list); 2210 2163 } 2211 2164 2212 - static int tls_read_size(struct strparser *strp, struct sk_buff *skb) 2165 + int tls_rx_msg_size(struct tls_strparser *strp, struct sk_buff *skb) 2213 2166 { 2214 2167 struct tls_context *tls_ctx = tls_get_ctx(strp->sk); 2215 2168 struct tls_prot_info *prot = &tls_ctx->prot_info; 2216 2169 char header[TLS_HEADER_SIZE + MAX_IV_SIZE]; 2217 - struct strp_msg *rxm = strp_msg(skb); 2218 - struct tls_msg *tlm = tls_msg(skb); 2219 2170 size_t cipher_overhead; 2220 2171 size_t data_len = 0; 2221 2172 int ret; 2222 2173 2223 2174 /* Verify that we have a full TLS header, or wait for more data */ 2224 - if (rxm->offset + prot->prepend_size > skb->len) 2175 + if (strp->stm.offset + prot->prepend_size > skb->len) 2225 2176 return 0; 2226 2177 2227 2178 /* Sanity-check size of on-stack buffer. */ ··· 2229 2184 } 2230 2185 2231 2186 /* Linearize header to local buffer */ 2232 - ret = skb_copy_bits(skb, rxm->offset, header, prot->prepend_size); 2187 + ret = skb_copy_bits(skb, strp->stm.offset, header, prot->prepend_size); 2233 2188 if (ret < 0) 2234 2189 goto read_failure; 2235 2190 2236 - tlm->control = header[0]; 2191 + strp->mark = header[0]; 2237 2192 2238 2193 data_len = ((header[4] & 0xFF) | (header[3] << 8)); 2239 2194 ··· 2260 2215 } 2261 2216 2262 2217 tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE, 2263 - TCP_SKB_CB(skb)->seq + rxm->offset); 2218 + TCP_SKB_CB(skb)->seq + strp->stm.offset); 2264 2219 return data_len + TLS_HEADER_SIZE; 2265 2220 2266 2221 read_failure: ··· 2269 2224 return ret; 2270 2225 } 2271 2226 2272 - static void tls_queue(struct strparser *strp, struct sk_buff *skb) 2227 + void tls_rx_msg_ready(struct tls_strparser *strp) 2273 2228 { 2274 - struct tls_context *tls_ctx = tls_get_ctx(strp->sk); 2275 - struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2229 + struct tls_sw_context_rx *ctx; 2276 2230 2277 - ctx->recv_pkt = skb; 2278 - strp_pause(strp); 2279 - 2231 + ctx = container_of(strp, struct tls_sw_context_rx, strp); 2280 2232 ctx->saved_data_ready(strp->sk); 2281 2233 } 2282 2234 ··· 2283 2241 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2284 2242 struct sk_psock *psock; 2285 2243 2286 - strp_data_ready(&ctx->strp); 2244 + tls_strp_data_ready(&ctx->strp); 2287 2245 2288 2246 psock = sk_psock_get(sk); 2289 2247 if (psock) { ··· 2359 2317 kfree(tls_ctx->rx.iv); 2360 2318 2361 2319 if (ctx->aead_recv) { 2362 - kfree_skb(ctx->recv_pkt); 2363 - ctx->recv_pkt = NULL; 2364 2320 __skb_queue_purge(&ctx->rx_list); 2365 2321 crypto_free_aead(ctx->aead_recv); 2366 - strp_stop(&ctx->strp); 2322 + tls_strp_stop(&ctx->strp); 2367 2323 /* If tls_sw_strparser_arm() was not called (cleanup paths) 2368 - * we still want to strp_stop(), but sk->sk_data_ready was 2324 + * we still want to tls_strp_stop(), but sk->sk_data_ready was 2369 2325 * never swapped. 2370 2326 */ 2371 2327 if (ctx->saved_data_ready) { ··· 2378 2338 { 2379 2339 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2380 2340 2381 - strp_done(&ctx->strp); 2341 + tls_strp_done(&ctx->strp); 2382 2342 } 2383 2343 2384 2344 void tls_sw_free_ctx_rx(struct tls_context *tls_ctx) ··· 2451 2411 rx_ctx->saved_data_ready = sk->sk_data_ready; 2452 2412 sk->sk_data_ready = tls_data_ready; 2453 2413 write_unlock_bh(&sk->sk_callback_lock); 2454 - 2455 - strp_check_rcv(&rx_ctx->strp); 2456 2414 } 2457 2415 2458 2416 void tls_update_rx_zc_capable(struct tls_context *tls_ctx) ··· 2470 2432 struct tls_sw_context_rx *sw_ctx_rx = NULL; 2471 2433 struct cipher_context *cctx; 2472 2434 struct crypto_aead **aead; 2473 - struct strp_callbacks cb; 2474 2435 u16 nonce_size, tag_size, iv_size, rec_seq_size, salt_size; 2475 2436 struct crypto_tfm *tfm; 2476 2437 char *iv, *rec_seq, *key, *salt, *cipher_name; ··· 2703 2666 crypto_info->version != TLS_1_3_VERSION && 2704 2667 !!(tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC); 2705 2668 2706 - /* Set up strparser */ 2707 - memset(&cb, 0, sizeof(cb)); 2708 - cb.rcv_msg = tls_queue; 2709 - cb.parse_msg = tls_read_size; 2710 - 2711 - strp_init(&sw_ctx_rx->strp, sk, &cb); 2669 + tls_strp_init(&sw_ctx_rx->strp, sk); 2712 2670 } 2713 2671 2714 2672 goto out;