Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

tls: rx: return the decrypted skb via darg

Instead of using ctx->recv_pkt after decryption read the skb
from darg.skb. This moves the decision of what the "output skb"
is to the decrypt handlers. For now after decrypt handler returns
successfully ctx->recv_pkt is simply moved to darg.skb, but it
will change soon.

Note that tls_decrypt_sg() cannot clear the ctx->recv_pkt
because it gets called to re-encrypt (i.e. by the device offload).
So we need an awkward temporary if() in tls_rx_one_record().

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Jakub Kicinski and committed by
David S. Miller
6bd116c8 541cc48b

+41 -12
+41 -12
net/tls/tls_sw.c
··· 47 47 #include "tls.h" 48 48 49 49 struct tls_decrypt_arg { 50 + struct_group(inargs, 50 51 bool zc; 51 52 bool async; 52 53 u8 tail; 54 + ); 55 + 56 + struct sk_buff *skb; 53 57 }; 54 58 55 59 struct tls_decrypt_ctx { ··· 1416 1412 * ------------------------------------------------------------------- 1417 1413 * zc | Zero-copy decrypt allowed | Zero-copy performed 1418 1414 * async | Async decrypt allowed | Async crypto used / in progress 1415 + * skb | * | Output skb 1419 1416 */ 1420 1417 1421 1418 /* This function decrypts the input skb into either out_iov or in out_sg ··· 1556 1551 /* Prepare and submit AEAD request */ 1557 1552 err = tls_do_decryption(sk, skb, sgin, sgout, dctx->iv, 1558 1553 data_len + prot->tail_size, aead_req, darg); 1554 + if (err) 1555 + goto exit_free_pages; 1556 + 1557 + darg->skb = tls_strp_msg(ctx); 1559 1558 if (darg->async) 1560 1559 return 0; 1561 1560 1562 1561 if (prot->tail_size) 1563 1562 darg->tail = dctx->tail; 1564 1563 1564 + exit_free_pages: 1565 1565 /* Release the pages in case iov was mapped to pages */ 1566 1566 for (; pages > 0; pages--) 1567 1567 put_page(sg_page(&sgout[pages])); ··· 1579 1569 tls_decrypt_device(struct sock *sk, struct tls_context *tls_ctx, 1580 1570 struct tls_decrypt_arg *darg) 1581 1571 { 1572 + struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1582 1573 int err; 1583 1574 1584 1575 if (tls_ctx->rx_conf != TLS_HW) ··· 1591 1580 1592 1581 darg->zc = false; 1593 1582 darg->async = false; 1583 + darg->skb = tls_strp_msg(ctx); 1584 + ctx->recv_pkt = NULL; 1594 1585 return 1; 1595 1586 } 1596 1587 ··· 1617 1604 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR); 1618 1605 return err; 1619 1606 } 1620 - if (darg->async) 1607 + if (darg->async) { 1608 + if (darg->skb == ctx->recv_pkt) 1609 + ctx->recv_pkt = NULL; 1621 1610 goto decrypt_next; 1611 + } 1622 1612 /* If opportunistic TLS 1.3 ZC failed retry without ZC */ 1623 1613 if (unlikely(darg->zc && prot->version == TLS_1_3_VERSION && 1624 1614 darg->tail != TLS_RECORD_TYPE_DATA)) { ··· 1632 1616 return tls_rx_one_record(sk, dest, darg); 1633 1617 } 1634 1618 1635 - decrypt_done: 1636 - pad = tls_padding_length(prot, ctx->recv_pkt, darg); 1637 - if (pad < 0) 1638 - return pad; 1619 + if (darg->skb == ctx->recv_pkt) 1620 + ctx->recv_pkt = NULL; 1639 1621 1640 - rxm = strp_msg(ctx->recv_pkt); 1622 + decrypt_done: 1623 + pad = tls_padding_length(prot, darg->skb, darg); 1624 + if (pad < 0) { 1625 + consume_skb(darg->skb); 1626 + return pad; 1627 + } 1628 + 1629 + rxm = strp_msg(darg->skb); 1641 1630 rxm->full_len -= pad; 1642 1631 rxm->offset += prot->prepend_size; 1643 1632 rxm->full_len -= prot->overhead_size; ··· 1684 1663 1685 1664 static void tls_rx_rec_done(struct tls_sw_context_rx *ctx) 1686 1665 { 1666 + consume_skb(ctx->recv_pkt); 1687 1667 ctx->recv_pkt = NULL; 1688 1668 __strp_unpause(&ctx->strp); 1689 1669 } ··· 1894 1872 ctx->zc_capable; 1895 1873 decrypted = 0; 1896 1874 while (len && (decrypted + copied < target || ctx->recv_pkt)) { 1897 - struct tls_decrypt_arg darg = {}; 1875 + struct tls_decrypt_arg darg; 1898 1876 int to_decrypt, chunk; 1899 1877 1900 1878 err = tls_rx_rec_wait(sk, psock, flags & MSG_DONTWAIT, timeo); ··· 1911 1889 goto recv_end; 1912 1890 } 1913 1891 1914 - skb = ctx->recv_pkt; 1915 - rxm = strp_msg(skb); 1916 - tlm = tls_msg(skb); 1892 + memset(&darg.inargs, 0, sizeof(darg.inargs)); 1893 + 1894 + rxm = strp_msg(ctx->recv_pkt); 1895 + tlm = tls_msg(ctx->recv_pkt); 1917 1896 1918 1897 to_decrypt = rxm->full_len - prot->overhead_size; 1919 1898 ··· 1933 1910 tls_err_abort(sk, -EBADMSG); 1934 1911 goto recv_end; 1935 1912 } 1913 + 1914 + skb = darg.skb; 1915 + rxm = strp_msg(skb); 1916 + tlm = tls_msg(skb); 1936 1917 1937 1918 async |= darg.async; 1938 1919 ··· 2078 2051 if (!skb_queue_empty(&ctx->rx_list)) { 2079 2052 skb = __skb_dequeue(&ctx->rx_list); 2080 2053 } else { 2081 - struct tls_decrypt_arg darg = {}; 2054 + struct tls_decrypt_arg darg; 2082 2055 2083 2056 err = tls_rx_rec_wait(sk, NULL, flags & SPLICE_F_NONBLOCK, 2084 2057 timeo); 2085 2058 if (err <= 0) 2086 2059 goto splice_read_end; 2060 + 2061 + memset(&darg.inargs, 0, sizeof(darg.inargs)); 2087 2062 2088 2063 err = tls_rx_one_record(sk, NULL, &darg); 2089 2064 if (err < 0) { ··· 2093 2064 goto splice_read_end; 2094 2065 } 2095 2066 2096 - skb = ctx->recv_pkt; 2097 2067 tls_rx_rec_done(ctx); 2068 + skb = darg.skb; 2098 2069 } 2099 2070 2100 2071 rxm = strp_msg(skb);