Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

tls: rx: async: adjust record geometry immediately

Async crypto TLS Rx currently waits for crypto to be done
in order to strip the TLS header and tailer. Simplify
the code by moving the pointers immediately, since only
TLS 1.2 is supported here there is no message padding.

This simplifies the decryption into a new skb in the next
patch as we don't have to worry about input vs output
skb in the decrypt_done() handler any more.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Jakub Kicinski and committed by
David S. Miller
6ececdc5 6bd116c8

+10 -39
+10 -39
net/tls/tls_sw.c
··· 184 184 struct scatterlist *sgin = aead_req->src; 185 185 struct tls_sw_context_rx *ctx; 186 186 struct tls_context *tls_ctx; 187 - struct tls_prot_info *prot; 188 187 struct scatterlist *sg; 189 - struct sk_buff *skb; 190 188 unsigned int pages; 189 + struct sock *sk; 191 190 192 - skb = (struct sk_buff *)req->data; 193 - tls_ctx = tls_get_ctx(skb->sk); 191 + sk = (struct sock *)req->data; 192 + tls_ctx = tls_get_ctx(sk); 194 193 ctx = tls_sw_ctx_rx(tls_ctx); 195 - prot = &tls_ctx->prot_info; 196 194 197 195 /* Propagate if there was an err */ 198 196 if (err) { 199 197 if (err == -EBADMSG) 200 - TLS_INC_STATS(sock_net(skb->sk), 201 - LINUX_MIB_TLSDECRYPTERROR); 198 + TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR); 202 199 ctx->async_wait.err = err; 203 - tls_err_abort(skb->sk, err); 204 - } else { 205 - struct strp_msg *rxm = strp_msg(skb); 206 - 207 - /* No TLS 1.3 support with async crypto */ 208 - WARN_ON(prot->tail_size); 209 - 210 - rxm->offset += prot->prepend_size; 211 - rxm->full_len -= prot->overhead_size; 200 + tls_err_abort(sk, err); 212 201 } 213 - 214 - /* After using skb->sk to propagate sk through crypto async callback 215 - * we need to NULL it again. 216 - */ 217 - skb->sk = NULL; 218 - 219 202 220 203 /* Free the destination pages if skb was not decrypted inplace */ 221 204 if (sgout != sgin) { ··· 219 236 } 220 237 221 238 static int tls_do_decryption(struct sock *sk, 222 - struct sk_buff *skb, 223 239 struct scatterlist *sgin, 224 240 struct scatterlist *sgout, 225 241 char *iv_recv, ··· 238 256 (u8 *)iv_recv); 239 257 240 258 if (darg->async) { 241 - /* Using skb->sk to push sk through to crypto async callback 242 - * handler. This allows propagating errors up to the socket 243 - * if needed. It _must_ be cleared in the async handler 244 - * before consume_skb is called. We _know_ skb->sk is NULL 245 - * because it is a clone from strparser. 246 - */ 247 - skb->sk = sk; 248 259 aead_request_set_callback(aead_req, 249 260 CRYPTO_TFM_REQ_MAY_BACKLOG, 250 - tls_decrypt_done, skb); 261 + tls_decrypt_done, sk); 251 262 atomic_inc(&ctx->decrypt_pending); 252 263 } else { 253 264 aead_request_set_callback(aead_req, ··· 1529 1554 } 1530 1555 1531 1556 /* Prepare and submit AEAD request */ 1532 - err = tls_do_decryption(sk, skb, sgin, sgout, dctx->iv, 1557 + err = tls_do_decryption(sk, sgin, sgout, dctx->iv, 1533 1558 data_len + prot->tail_size, aead_req, darg); 1534 1559 if (err) 1535 1560 goto exit_free_pages; ··· 1592 1617 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR); 1593 1618 return err; 1594 1619 } 1595 - if (darg->async) { 1596 - if (darg->skb == ctx->recv_pkt) 1597 - ctx->recv_pkt = NULL; 1598 - goto decrypt_next; 1599 - } 1620 + if (darg->async) 1621 + goto decrypt_done; 1600 1622 /* If opportunistic TLS 1.3 ZC failed retry without ZC */ 1601 1623 if (unlikely(darg->zc && prot->version == TLS_1_3_VERSION && 1602 1624 darg->tail != TLS_RECORD_TYPE_DATA)) { ··· 1604 1632 return tls_rx_one_record(sk, dest, darg); 1605 1633 } 1606 1634 1635 + decrypt_done: 1607 1636 if (darg->skb == ctx->recv_pkt) 1608 1637 ctx->recv_pkt = NULL; 1609 1638 1610 - decrypt_done: 1611 1639 pad = tls_padding_length(prot, darg->skb, darg); 1612 1640 if (pad < 0) { 1613 1641 consume_skb(darg->skb); ··· 1618 1646 rxm->full_len -= pad; 1619 1647 rxm->offset += prot->prepend_size; 1620 1648 rxm->full_len -= prot->overhead_size; 1621 - decrypt_next: 1622 1649 tls_advance_record_sn(sk, prot, &tls_ctx->rx); 1623 1650 1624 1651 return 0;