Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'net-tls-minor-cleanups'

Jakub Kicinski says:

====================
net/tls: minor cleanups

This set is a grab bag of TLS cleanups accumulated in my tree
in an attempt to avoid merge problems with net. Nothing stands
out. First patch dedups context information. Next control path
locking is very slightly optimized. Fourth patch cleans up
ugly #ifdefs.
====================

Reviewed-by: Boris Pismenny <borisp@mellanox.com>
Reviewed-by: John Fastabend <john.fastabend@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

+85 -99
+4 -2
drivers/crypto/chelsio/chtls/chtls_main.c
··· 474 474 struct tls_context *ctx = tls_get_ctx(sk); 475 475 476 476 if (level != SOL_TLS) 477 - return ctx->getsockopt(sk, level, optname, optval, optlen); 477 + return ctx->sk_proto->getsockopt(sk, level, 478 + optname, optval, optlen); 478 479 479 480 return do_chtls_getsockopt(sk, optval, optlen); 480 481 } ··· 542 541 struct tls_context *ctx = tls_get_ctx(sk); 543 542 544 543 if (level != SOL_TLS) 545 - return ctx->setsockopt(sk, level, optname, optval, optlen); 544 + return ctx->sk_proto->setsockopt(sk, level, 545 + optname, optval, optlen); 546 546 547 547 return do_chtls_setsockopt(sk, optname, optval, optlen); 548 548 }
+32 -16
include/net/tls.h
··· 275 275 struct proto *sk_proto; 276 276 277 277 void (*sk_destruct)(struct sock *sk); 278 - void (*sk_proto_close)(struct sock *sk, long timeout); 279 - 280 - int (*setsockopt)(struct sock *sk, int level, 281 - int optname, char __user *optval, 282 - unsigned int optlen); 283 - int (*getsockopt)(struct sock *sk, int level, 284 - int optname, char __user *optval, 285 - int __user *optlen); 286 - int (*hash)(struct sock *sk); 287 - void (*unhash)(struct sock *sk); 288 278 289 279 union tls_crypto_context crypto_send; 290 280 union tls_crypto_context crypto_recv; ··· 366 376 struct pipe_inode_info *pipe, 367 377 size_t len, unsigned int flags); 368 378 369 - int tls_set_device_offload(struct sock *sk, struct tls_context *ctx); 370 379 int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); 371 380 int tls_device_sendpage(struct sock *sk, struct page *page, 372 381 int offset, size_t size, int flags); 373 - void tls_device_free_resources_tx(struct sock *sk); 374 - void tls_device_init(void); 375 - void tls_device_cleanup(void); 376 382 int tls_tx_records(struct sock *sk, int flags); 377 383 378 384 struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context, ··· 645 659 unsigned char *record_type); 646 660 void tls_register_device(struct tls_device *device); 647 661 void tls_unregister_device(struct tls_device *device); 648 - int tls_device_decrypted(struct sock *sk, struct sk_buff *skb); 649 662 int decrypt_skb(struct sock *sk, struct sk_buff *skb, 650 663 struct scatterlist *sgout); 651 664 struct sk_buff *tls_encrypt_skb(struct sk_buff *skb); ··· 657 672 struct tls_offload_context_tx *offload_ctx, 658 673 struct tls_crypto_info *crypto_info); 659 674 675 + #ifdef CONFIG_TLS_DEVICE 676 + void tls_device_init(void); 677 + void tls_device_cleanup(void); 678 + int tls_set_device_offload(struct sock *sk, struct tls_context *ctx); 679 + void tls_device_free_resources_tx(struct sock *sk); 660 680 int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx); 661 - 662 681 void tls_device_offload_cleanup_rx(struct sock *sk); 663 682 void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq); 683 + int tls_device_decrypted(struct sock *sk, struct sk_buff *skb); 684 + #else 685 + static inline void tls_device_init(void) {} 686 + static inline void tls_device_cleanup(void) {} 664 687 688 + static inline int 689 + tls_set_device_offload(struct sock *sk, struct tls_context *ctx) 690 + { 691 + return -EOPNOTSUPP; 692 + } 693 + 694 + static inline void tls_device_free_resources_tx(struct sock *sk) {} 695 + 696 + static inline int 697 + tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx) 698 + { 699 + return -EOPNOTSUPP; 700 + } 701 + 702 + static inline void tls_device_offload_cleanup_rx(struct sock *sk) {} 703 + static inline void 704 + tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) {} 705 + 706 + static inline int tls_device_decrypted(struct sock *sk, struct sk_buff *skb) 707 + { 708 + return 0; 709 + } 710 + #endif 665 711 #endif /* _TLS_OFFLOAD_H */
+36 -42
net/tls/tls_device.c
··· 159 159 160 160 spin_lock_irqsave(&ctx->lock, flags); 161 161 info = ctx->retransmit_hint; 162 - if (info && !before(acked_seq, info->end_seq)) { 162 + if (info && !before(acked_seq, info->end_seq)) 163 163 ctx->retransmit_hint = NULL; 164 - list_del(&info->list); 165 - destroy_record(info); 166 - deleted_records++; 167 - } 168 164 169 165 list_for_each_entry_safe(info, temp, &ctx->records_list, list) { 170 166 if (before(acked_seq, info->end_seq)) ··· 834 838 struct net_device *netdev; 835 839 char *iv, *rec_seq; 836 840 struct sk_buff *skb; 837 - int rc = -EINVAL; 838 841 __be64 rcd_sn; 842 + int rc; 839 843 840 844 if (!ctx) 841 - goto out; 845 + return -EINVAL; 842 846 843 - if (ctx->priv_ctx_tx) { 844 - rc = -EEXIST; 845 - goto out; 846 - } 847 + if (ctx->priv_ctx_tx) 848 + return -EEXIST; 847 849 848 850 start_marker_record = kmalloc(sizeof(*start_marker_record), GFP_KERNEL); 849 - if (!start_marker_record) { 850 - rc = -ENOMEM; 851 - goto out; 852 - } 851 + if (!start_marker_record) 852 + return -ENOMEM; 853 853 854 854 offload_ctx = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_TX, GFP_KERNEL); 855 855 if (!offload_ctx) { ··· 931 939 if (skb) 932 940 TCP_SKB_CB(skb)->eor = 1; 933 941 934 - /* We support starting offload on multiple sockets 935 - * concurrently, so we only need a read lock here. 936 - * This lock must precede get_netdev_for_sock to prevent races between 937 - * NETDEV_DOWN and setsockopt. 938 - */ 939 - down_read(&device_offload_lock); 940 942 netdev = get_netdev_for_sock(sk); 941 943 if (!netdev) { 942 944 pr_err_ratelimited("%s: netdev not found\n", __func__); 943 945 rc = -EINVAL; 944 - goto release_lock; 946 + goto disable_cad; 945 947 } 946 948 947 949 if (!(netdev->features & NETIF_F_HW_TLS_TX)) { ··· 946 960 /* Avoid offloading if the device is down 947 961 * We don't want to offload new flows after 948 962 * the NETDEV_DOWN event 963 + * 964 + * device_offload_lock is taken in tls_devices's NETDEV_DOWN 965 + * handler thus protecting from the device going down before 966 + * ctx was added to tls_device_list. 949 967 */ 968 + down_read(&device_offload_lock); 950 969 if (!(netdev->flags & IFF_UP)) { 951 970 rc = -EINVAL; 952 - goto release_netdev; 971 + goto release_lock; 953 972 } 954 973 955 974 ctx->priv_ctx_tx = offload_ctx; ··· 962 971 &ctx->crypto_send.info, 963 972 tcp_sk(sk)->write_seq); 964 973 if (rc) 965 - goto release_netdev; 974 + goto release_lock; 966 975 967 976 tls_device_attach(ctx, sk, netdev); 977 + up_read(&device_offload_lock); 968 978 969 979 /* following this assignment tls_is_sk_tx_device_offloaded 970 980 * will return true and the context might be accessed ··· 973 981 */ 974 982 smp_store_release(&sk->sk_validate_xmit_skb, tls_validate_xmit_skb); 975 983 dev_put(netdev); 976 - up_read(&device_offload_lock); 977 - goto out; 978 984 979 - release_netdev: 980 - dev_put(netdev); 985 + return 0; 986 + 981 987 release_lock: 982 988 up_read(&device_offload_lock); 989 + release_netdev: 990 + dev_put(netdev); 991 + disable_cad: 983 992 clean_acked_data_disable(inet_csk(sk)); 984 993 crypto_free_aead(offload_ctx->aead_send); 985 994 free_rec_seq: ··· 992 999 ctx->priv_ctx_tx = NULL; 993 1000 free_marker_record: 994 1001 kfree(start_marker_record); 995 - out: 996 1002 return rc; 997 1003 } 998 1004 ··· 1004 1012 if (ctx->crypto_recv.info.version != TLS_1_2_VERSION) 1005 1013 return -EOPNOTSUPP; 1006 1014 1007 - /* We support starting offload on multiple sockets 1008 - * concurrently, so we only need a read lock here. 1009 - * This lock must precede get_netdev_for_sock to prevent races between 1010 - * NETDEV_DOWN and setsockopt. 1011 - */ 1012 - down_read(&device_offload_lock); 1013 1015 netdev = get_netdev_for_sock(sk); 1014 1016 if (!netdev) { 1015 1017 pr_err_ratelimited("%s: netdev not found\n", __func__); 1016 - rc = -EINVAL; 1017 - goto release_lock; 1018 + return -EINVAL; 1018 1019 } 1019 1020 1020 1021 if (!(netdev->features & NETIF_F_HW_TLS_RX)) { ··· 1018 1033 /* Avoid offloading if the device is down 1019 1034 * We don't want to offload new flows after 1020 1035 * the NETDEV_DOWN event 1036 + * 1037 + * device_offload_lock is taken in tls_devices's NETDEV_DOWN 1038 + * handler thus protecting from the device going down before 1039 + * ctx was added to tls_device_list. 1021 1040 */ 1041 + down_read(&device_offload_lock); 1022 1042 if (!(netdev->flags & IFF_UP)) { 1023 1043 rc = -EINVAL; 1024 - goto release_netdev; 1044 + goto release_lock; 1025 1045 } 1026 1046 1027 1047 context = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_RX, GFP_KERNEL); 1028 1048 if (!context) { 1029 1049 rc = -ENOMEM; 1030 - goto release_netdev; 1050 + goto release_lock; 1031 1051 } 1032 1052 context->resync_nh_reset = 1; 1033 1053 ··· 1048 1058 goto free_sw_resources; 1049 1059 1050 1060 tls_device_attach(ctx, sk, netdev); 1051 - goto release_netdev; 1061 + up_read(&device_offload_lock); 1062 + 1063 + dev_put(netdev); 1064 + 1065 + return 0; 1052 1066 1053 1067 free_sw_resources: 1054 1068 up_read(&device_offload_lock); ··· 1060 1066 down_read(&device_offload_lock); 1061 1067 release_ctx: 1062 1068 ctx->priv_ctx_rx = NULL; 1063 - release_netdev: 1064 - dev_put(netdev); 1065 1069 release_lock: 1066 1070 up_read(&device_offload_lock); 1071 + release_netdev: 1072 + dev_put(netdev); 1067 1073 return rc; 1068 1074 } 1069 1075
+11 -35
net/tls/tls_main.c
··· 286 286 kfree(ctx->tx.rec_seq); 287 287 kfree(ctx->tx.iv); 288 288 tls_sw_release_resources_tx(sk); 289 - #ifdef CONFIG_TLS_DEVICE 290 289 } else if (ctx->tx_conf == TLS_HW) { 291 290 tls_device_free_resources_tx(sk); 292 - #endif 293 291 } 294 292 295 293 if (ctx->rx_conf == TLS_SW) 296 294 tls_sw_release_resources_rx(sk); 297 - 298 - #ifdef CONFIG_TLS_DEVICE 299 - if (ctx->rx_conf == TLS_HW) 295 + else if (ctx->rx_conf == TLS_HW) 300 296 tls_device_offload_cleanup_rx(sk); 301 - #endif 302 297 } 303 298 304 299 static void tls_sk_proto_close(struct sock *sk, long timeout) ··· 326 331 tls_sw_strparser_done(ctx); 327 332 if (ctx->rx_conf == TLS_SW) 328 333 tls_sw_free_ctx_rx(ctx); 329 - ctx->sk_proto_close(sk, timeout); 334 + ctx->sk_proto->close(sk, timeout); 330 335 331 336 if (free_ctx) 332 337 tls_ctx_free(sk, ctx); ··· 446 451 struct tls_context *ctx = tls_get_ctx(sk); 447 452 448 453 if (level != SOL_TLS) 449 - return ctx->getsockopt(sk, level, optname, optval, optlen); 454 + return ctx->sk_proto->getsockopt(sk, level, 455 + optname, optval, optlen); 450 456 451 457 return do_tls_getsockopt(sk, optname, optval, optlen); 452 458 } ··· 532 536 } 533 537 534 538 if (tx) { 535 - #ifdef CONFIG_TLS_DEVICE 536 539 rc = tls_set_device_offload(sk, ctx); 537 540 conf = TLS_HW; 538 541 if (rc) { 539 - #else 540 - { 541 - #endif 542 542 rc = tls_set_sw_offload(sk, ctx, 1); 543 543 if (rc) 544 544 goto err_crypto_info; 545 545 conf = TLS_SW; 546 546 } 547 547 } else { 548 - #ifdef CONFIG_TLS_DEVICE 549 548 rc = tls_set_device_offload_rx(sk, ctx); 550 549 conf = TLS_HW; 551 550 if (rc) { 552 - #else 553 - { 554 - #endif 555 551 rc = tls_set_sw_offload(sk, ctx, 0); 556 552 if (rc) 557 553 goto err_crypto_info; ··· 597 609 struct tls_context *ctx = tls_get_ctx(sk); 598 610 599 611 if (level != SOL_TLS) 600 - return ctx->setsockopt(sk, level, optname, optval, optlen); 612 + return ctx->sk_proto->setsockopt(sk, level, optname, optval, 613 + optlen); 601 614 602 615 return do_tls_setsockopt(sk, optname, optval, optlen); 603 616 } ··· 613 624 return NULL; 614 625 615 626 rcu_assign_pointer(icsk->icsk_ulp_data, ctx); 616 - ctx->setsockopt = sk->sk_prot->setsockopt; 617 - ctx->getsockopt = sk->sk_prot->getsockopt; 618 - ctx->sk_proto_close = sk->sk_prot->close; 619 - ctx->unhash = sk->sk_prot->unhash; 627 + ctx->sk_proto = sk->sk_prot; 620 628 return ctx; 621 629 } 622 630 ··· 669 683 670 684 spin_unlock_bh(&device_spinlock); 671 685 tls_build_proto(sk); 672 - ctx->hash = sk->sk_prot->hash; 673 - ctx->unhash = sk->sk_prot->unhash; 674 - ctx->sk_proto_close = sk->sk_prot->close; 675 686 ctx->sk_destruct = sk->sk_destruct; 676 687 sk->sk_destruct = tls_hw_sk_destruct; 677 688 ctx->rx_conf = TLS_HW_RECORD; ··· 700 717 } 701 718 } 702 719 spin_unlock_bh(&device_spinlock); 703 - ctx->unhash(sk); 720 + ctx->sk_proto->unhash(sk); 704 721 } 705 722 706 723 static int tls_hw_hash(struct sock *sk) ··· 709 726 struct tls_device *dev; 710 727 int err; 711 728 712 - err = ctx->hash(sk); 729 + err = ctx->sk_proto->hash(sk); 713 730 spin_lock_bh(&device_spinlock); 714 731 list_for_each_entry(dev, &device_list, dev_list) { 715 732 if (dev->hash) { ··· 799 816 800 817 ctx->tx_conf = TLS_BASE; 801 818 ctx->rx_conf = TLS_BASE; 802 - ctx->sk_proto = sk->sk_prot; 803 819 update_sk_prot(sk, ctx); 804 820 out: 805 821 write_unlock_bh(&sk->sk_callback_lock); ··· 810 828 struct tls_context *ctx; 811 829 812 830 ctx = tls_get_ctx(sk); 813 - if (likely(ctx)) { 814 - ctx->sk_proto_close = p->close; 831 + if (likely(ctx)) 815 832 ctx->sk_proto = p; 816 - } else { 833 + else 817 834 sk->sk_prot = p; 818 - } 819 835 } 820 836 821 837 static int tls_get_info(const struct sock *sk, struct sk_buff *skb) ··· 907 927 tls_sw_proto_ops = inet_stream_ops; 908 928 tls_sw_proto_ops.splice_read = tls_sw_splice_read; 909 929 910 - #ifdef CONFIG_TLS_DEVICE 911 930 tls_device_init(); 912 - #endif 913 931 tcp_register_ulp(&tcp_tls_ulp_ops); 914 932 915 933 return 0; ··· 916 938 static void __exit tls_unregister(void) 917 939 { 918 940 tcp_unregister_ulp(&tcp_tls_ulp_ops); 919 - #ifdef CONFIG_TLS_DEVICE 920 941 tls_device_cleanup(); 921 - #endif 922 942 } 923 943 924 944 module_init(tls_register);
+2 -4
net/tls/tls_sw.c
··· 1489 1489 int pad, err = 0; 1490 1490 1491 1491 if (!ctx->decrypted) { 1492 - #ifdef CONFIG_TLS_DEVICE 1493 1492 if (tls_ctx->rx_conf == TLS_HW) { 1494 1493 err = tls_device_decrypted(sk, skb); 1495 1494 if (err < 0) 1496 1495 return err; 1497 1496 } 1498 - #endif 1497 + 1499 1498 /* Still not decrypted after tls_device */ 1500 1499 if (!ctx->decrypted) { 1501 1500 err = decrypt_internal(sk, skb, dest, NULL, chunk, zc, ··· 2013 2014 ret = -EINVAL; 2014 2015 goto read_failure; 2015 2016 } 2016 - #ifdef CONFIG_TLS_DEVICE 2017 + 2017 2018 tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE, 2018 2019 TCP_SKB_CB(skb)->seq + rxm->offset); 2019 - #endif 2020 2020 return data_len + TLS_HEADER_SIZE; 2021 2021 2022 2022 read_failure: