Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto/chtls: IPv6 support for inline TLS

Extends support to IPv6 for Inline TLS server.

Signed-off-by: Vinay Kumar Yadav <vinay.yadav@chelsio.com>

v1->v2:
- cc'd tcp folks.

v2->v3:
- changed EXPORT_SYMBOL() to EXPORT_SYMBOL_GPL()

Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Vinay Kumar Yadav and committed by
David S. Miller
6abde0b2 a56772dc

+171 -46
+158 -43
drivers/crypto/chelsio/chtls/chtls_cm.c
··· 18 18 #include <linux/kallsyms.h> 19 19 #include <linux/kprobes.h> 20 20 #include <linux/if_vlan.h> 21 + #include <linux/ipv6.h> 22 + #include <net/ipv6.h> 23 + #include <net/transp_v6.h> 24 + #include <net/ip6_route.h> 21 25 #include <net/inet_common.h> 22 26 #include <net/tcp.h> 23 27 #include <net/dst.h> 24 28 #include <net/tls.h> 29 + #include <net/addrconf.h> 30 + #include <net/secure_seq.h> 25 31 26 32 #include "chtls.h" 27 33 #include "chtls_cm.h" 34 + #include "clip_tbl.h" 28 35 29 36 /* 30 37 * State transitions and actions for close. Note that if we are in SYN_SENT ··· 89 82 kfree(csk); 90 83 } 91 84 92 - static struct net_device *chtls_ipv4_netdev(struct chtls_dev *cdev, 85 + static struct net_device *chtls_find_netdev(struct chtls_dev *cdev, 93 86 struct sock *sk) 94 87 { 95 88 struct net_device *ndev = cdev->ports[0]; 89 + struct net_device *temp; 90 + int addr_type; 96 91 97 - if (likely(!inet_sk(sk)->inet_rcv_saddr)) 98 - return ndev; 92 + switch (sk->sk_family) { 93 + case PF_INET: 94 + if (likely(!inet_sk(sk)->inet_rcv_saddr)) 95 + return ndev; 96 + ndev = ip_dev_find(&init_net, inet_sk(sk)->inet_rcv_saddr); 97 + break; 98 + case PF_INET6: 99 + addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr); 100 + if (likely(addr_type == IPV6_ADDR_ANY)) 101 + return ndev; 99 102 100 - ndev = ip_dev_find(&init_net, inet_sk(sk)->inet_rcv_saddr); 103 + for_each_netdev_rcu(&init_net, temp) { 104 + if (ipv6_chk_addr(&init_net, (struct in6_addr *) 105 + &sk->sk_v6_rcv_saddr, temp, 1)) { 106 + ndev = temp; 107 + break; 108 + } 109 + } 110 + break; 111 + default: 112 + return NULL; 113 + } 114 + 101 115 if (!ndev) 102 116 return NULL; 103 117 ··· 474 446 free_tls_keyid(sk); 475 447 kref_put(&csk->kref, chtls_sock_release); 476 448 csk->cdev = NULL; 477 - sk->sk_prot = &tcp_prot; 449 + if (sk->sk_family == AF_INET) 450 + sk->sk_prot = &tcp_prot; 451 + else 452 + sk->sk_prot = &tcpv6_prot; 478 453 sk->sk_prot->destroy(sk); 479 454 } 480 455 ··· 504 473 while (*pprev) { 505 474 struct request_sock *req = *pprev; 506 475 507 - if (req->rsk_ops == &chtls_rsk_ops) { 476 + if (req->rsk_ops == &chtls_rsk_ops || 477 + req->rsk_ops == &chtls_rsk_opsv6) { 508 478 struct sock *child = req->sk; 509 479 510 480 *pprev = req->dl_next; ··· 632 600 struct listen_ctx *ctx; 633 601 struct adapter *adap; 634 602 struct port_info *pi; 603 + bool clip_valid; 635 604 int stid; 636 605 int ret; 637 606 638 - if (sk->sk_family != PF_INET) 639 - return -EAGAIN; 640 - 607 + clip_valid = false; 641 608 rcu_read_lock(); 642 - ndev = chtls_ipv4_netdev(cdev, sk); 609 + ndev = chtls_find_netdev(cdev, sk); 643 610 rcu_read_unlock(); 644 611 if (!ndev) 645 612 return -EBADF; ··· 669 638 if (!listen_hash_add(cdev, sk, stid)) 670 639 goto free_stid; 671 640 672 - ret = cxgb4_create_server(ndev, stid, 673 - inet_sk(sk)->inet_rcv_saddr, 674 - inet_sk(sk)->inet_sport, 0, 675 - cdev->lldi->rxq_ids[0]); 641 + if (sk->sk_family == PF_INET) { 642 + ret = cxgb4_create_server(ndev, stid, 643 + inet_sk(sk)->inet_rcv_saddr, 644 + inet_sk(sk)->inet_sport, 0, 645 + cdev->lldi->rxq_ids[0]); 646 + } else { 647 + int addr_type; 648 + 649 + addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr); 650 + if (addr_type != IPV6_ADDR_ANY) { 651 + ret = cxgb4_clip_get(ndev, (const u32 *) 652 + &sk->sk_v6_rcv_saddr, 1); 653 + if (ret) 654 + goto del_hash; 655 + clip_valid = true; 656 + } 657 + ret = cxgb4_create_server6(ndev, stid, 658 + &sk->sk_v6_rcv_saddr, 659 + inet_sk(sk)->inet_sport, 660 + cdev->lldi->rxq_ids[0]); 661 + } 676 662 if (ret > 0) 677 663 ret = net_xmit_errno(ret); 678 664 if (ret) 679 665 goto del_hash; 680 666 return 0; 681 667 del_hash: 668 + if (clip_valid) 669 + cxgb4_clip_release(ndev, (const u32 *)&sk->sk_v6_rcv_saddr, 1); 682 670 listen_hash_del(cdev, sk); 683 671 free_stid: 684 672 cxgb4_free_stid(cdev->tids, stid, sk->sk_family); ··· 711 661 void chtls_listen_stop(struct chtls_dev *cdev, struct sock *sk) 712 662 { 713 663 struct listen_ctx *listen_ctx; 664 + struct chtls_sock *csk; 665 + int addr_type = 0; 714 666 int stid; 715 667 716 668 stid = listen_hash_del(cdev, sk); ··· 723 671 chtls_reset_synq(listen_ctx); 724 672 725 673 cxgb4_remove_server(cdev->lldi->ports[0], stid, 726 - cdev->lldi->rxq_ids[0], 0); 674 + cdev->lldi->rxq_ids[0], sk->sk_family == PF_INET6); 675 + 676 + if (sk->sk_family == PF_INET6) { 677 + csk = rcu_dereference_sk_user_data(sk); 678 + addr_type = ipv6_addr_type((const struct in6_addr *) 679 + &sk->sk_v6_rcv_saddr); 680 + if (addr_type != IPV6_ADDR_ANY) 681 + cxgb4_clip_release(csk->egress_dev, (const u32 *) 682 + &sk->sk_v6_rcv_saddr, 1); 683 + } 727 684 chtls_disconnect_acceptq(sk); 728 685 } 729 686 ··· 941 880 tp = tcp_sk(sk); 942 881 tcpoptsz = 0; 943 882 944 - iphdrsz = sizeof(struct iphdr) + sizeof(struct tcphdr); 883 + if (sk->sk_family == AF_INET6) 884 + iphdrsz = sizeof(struct ipv6hdr) + sizeof(struct tcphdr); 885 + else 886 + iphdrsz = sizeof(struct iphdr) + sizeof(struct tcphdr); 945 887 if (req->tcpopt.tstamp) 946 888 tcpoptsz += round_up(TCPOLEN_TIMESTAMP, 4); 947 889 ··· 1109 1045 if (!newsk) 1110 1046 goto free_oreq; 1111 1047 1112 - dst = inet_csk_route_child_sock(lsk, newsk, oreq); 1113 - if (!dst) 1114 - goto free_sk; 1048 + if (lsk->sk_family == AF_INET) { 1049 + dst = inet_csk_route_child_sock(lsk, newsk, oreq); 1050 + if (!dst) 1051 + goto free_sk; 1115 1052 1116 - n = dst_neigh_lookup(dst, &iph->saddr); 1053 + n = dst_neigh_lookup(dst, &iph->saddr); 1054 + } else { 1055 + const struct ipv6hdr *ip6h; 1056 + struct flowi6 fl6; 1057 + 1058 + ip6h = (const struct ipv6hdr *)network_hdr; 1059 + memset(&fl6, 0, sizeof(fl6)); 1060 + fl6.flowi6_proto = IPPROTO_TCP; 1061 + fl6.saddr = ip6h->daddr; 1062 + fl6.daddr = ip6h->saddr; 1063 + fl6.fl6_dport = inet_rsk(oreq)->ir_rmt_port; 1064 + fl6.fl6_sport = htons(inet_rsk(oreq)->ir_num); 1065 + security_req_classify_flow(oreq, flowi6_to_flowi(&fl6)); 1066 + dst = ip6_dst_lookup_flow(sock_net(lsk), lsk, &fl6, NULL); 1067 + if (IS_ERR(dst)) 1068 + goto free_sk; 1069 + n = dst_neigh_lookup(dst, &ip6h->saddr); 1070 + } 1117 1071 if (!n) 1118 1072 goto free_sk; 1119 1073 ··· 1154 1072 tp = tcp_sk(newsk); 1155 1073 newinet = inet_sk(newsk); 1156 1074 1157 - newinet->inet_daddr = iph->saddr; 1158 - newinet->inet_rcv_saddr = iph->daddr; 1159 - newinet->inet_saddr = iph->daddr; 1075 + if (iph->version == 0x4) { 1076 + newinet->inet_daddr = iph->saddr; 1077 + newinet->inet_rcv_saddr = iph->daddr; 1078 + newinet->inet_saddr = iph->daddr; 1079 + } else { 1080 + struct tcp6_sock *newtcp6sk = (struct tcp6_sock *)newsk; 1081 + struct inet_request_sock *treq = inet_rsk(oreq); 1082 + struct ipv6_pinfo *newnp = inet6_sk(newsk); 1083 + struct ipv6_pinfo *np = inet6_sk(lsk); 1084 + 1085 + inet_sk(newsk)->pinet6 = &newtcp6sk->inet6; 1086 + memcpy(newnp, np, sizeof(struct ipv6_pinfo)); 1087 + newsk->sk_v6_daddr = treq->ir_v6_rmt_addr; 1088 + newsk->sk_v6_rcv_saddr = treq->ir_v6_loc_addr; 1089 + inet6_sk(newsk)->saddr = treq->ir_v6_loc_addr; 1090 + newnp->ipv6_fl_list = NULL; 1091 + newnp->pktoptions = NULL; 1092 + newsk->sk_bound_dev_if = treq->ir_iif; 1093 + newinet->inet_opt = NULL; 1094 + newinet->inet_daddr = LOOPBACK4_IPV6; 1095 + newinet->inet_saddr = LOOPBACK4_IPV6; 1096 + } 1160 1097 1161 1098 oreq->ts_recent = PASS_OPEN_TID_G(ntohl(req->tos_stid)); 1162 1099 sk_setup_caps(newsk, dst); ··· 1257 1156 struct sk_buff *reply_skb; 1258 1157 struct chtls_sock *csk; 1259 1158 struct chtls_dev *cdev; 1159 + struct ipv6hdr *ip6h; 1260 1160 struct tcphdr *tcph; 1261 1161 struct sock *newsk; 1262 1162 struct ethhdr *eh; ··· 1298 1196 if (sk_acceptq_is_full(sk)) 1299 1197 goto reject; 1300 1198 1301 - oreq = inet_reqsk_alloc(&chtls_rsk_ops, sk, true); 1199 + 1200 + eth_hdr_len = T6_ETH_HDR_LEN_G(ntohl(req->hdr_len)); 1201 + if (eth_hdr_len == ETH_HLEN) { 1202 + eh = (struct ethhdr *)(req + 1); 1203 + iph = (struct iphdr *)(eh + 1); 1204 + ip6h = (struct ipv6hdr *)(eh + 1); 1205 + network_hdr = (void *)(eh + 1); 1206 + } else { 1207 + vlan_eh = (struct vlan_ethhdr *)(req + 1); 1208 + iph = (struct iphdr *)(vlan_eh + 1); 1209 + ip6h = (struct ipv6hdr *)(vlan_eh + 1); 1210 + network_hdr = (void *)(vlan_eh + 1); 1211 + } 1212 + 1213 + if (iph->version == 0x4) { 1214 + tcph = (struct tcphdr *)(iph + 1); 1215 + skb_set_network_header(skb, (void *)iph - (void *)req); 1216 + oreq = inet_reqsk_alloc(&chtls_rsk_ops, sk, true); 1217 + } else { 1218 + tcph = (struct tcphdr *)(ip6h + 1); 1219 + skb_set_network_header(skb, (void *)ip6h - (void *)req); 1220 + oreq = inet_reqsk_alloc(&chtls_rsk_opsv6, sk, false); 1221 + } 1222 + 1302 1223 if (!oreq) 1303 1224 goto reject; 1304 1225 ··· 1331 1206 oreq->mss = 0; 1332 1207 oreq->ts_recent = 0; 1333 1208 1334 - eth_hdr_len = T6_ETH_HDR_LEN_G(ntohl(req->hdr_len)); 1335 - if (eth_hdr_len == ETH_HLEN) { 1336 - eh = (struct ethhdr *)(req + 1); 1337 - iph = (struct iphdr *)(eh + 1); 1338 - network_hdr = (void *)(eh + 1); 1339 - } else { 1340 - vlan_eh = (struct vlan_ethhdr *)(req + 1); 1341 - iph = (struct iphdr *)(vlan_eh + 1); 1342 - network_hdr = (void *)(vlan_eh + 1); 1343 - } 1344 - if (iph->version != 0x4) 1345 - goto free_oreq; 1346 - 1347 - tcph = (struct tcphdr *)(iph + 1); 1348 - skb_set_network_header(skb, (void *)iph - (void *)req); 1349 - 1350 1209 tcp_rsk(oreq)->tfo_listener = false; 1351 1210 tcp_rsk(oreq)->rcv_isn = ntohl(tcph->seq); 1352 1211 chtls_set_req_port(oreq, tcph->source, tcph->dest); 1353 - chtls_set_req_addr(oreq, iph->daddr, iph->saddr); 1354 - ip_dsfield = ipv4_get_dsfield(iph); 1212 + if (iph->version == 0x4) { 1213 + chtls_set_req_addr(oreq, iph->daddr, iph->saddr); 1214 + ip_dsfield = ipv4_get_dsfield(iph); 1215 + } else { 1216 + inet_rsk(oreq)->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr; 1217 + inet_rsk(oreq)->ir_v6_loc_addr = ipv6_hdr(skb)->daddr; 1218 + ip_dsfield = ipv6_get_dsfield(ipv6_hdr(skb)); 1219 + } 1355 1220 if (req->tcpopt.wsf <= 14 && 1356 1221 sock_net(sk)->ipv4.sysctl_tcp_window_scaling) { 1357 1222 inet_rsk(oreq)->wscale_ok = 1; ··· 1358 1243 1359 1244 newsk = chtls_recv_sock(sk, oreq, network_hdr, req, cdev); 1360 1245 if (!newsk) 1361 - goto reject; 1246 + goto free_oreq; 1362 1247 1363 1248 if (chtls_get_module(newsk)) 1364 1249 goto reject;
+1
drivers/crypto/chelsio/chtls/chtls_cm.h
··· 79 79 80 80 typedef void (*defer_handler_t)(struct chtls_dev *dev, struct sk_buff *skb); 81 81 extern struct request_sock_ops chtls_rsk_ops; 82 + extern struct request_sock_ops chtls_rsk_opsv6; 82 83 83 84 struct deferred_skb_cb { 84 85 defer_handler_t handler;
+11 -3
drivers/crypto/chelsio/chtls/chtls_main.c
··· 13 13 #include <linux/net.h> 14 14 #include <linux/ip.h> 15 15 #include <linux/tcp.h> 16 + #include <net/ipv6.h> 17 + #include <net/transp_v6.h> 16 18 #include <net/tcp.h> 17 19 #include <net/tls.h> 18 20 ··· 32 30 33 31 static DEFINE_MUTEX(notify_mutex); 34 32 static RAW_NOTIFIER_HEAD(listen_notify_list); 35 - static struct proto chtls_cpl_prot; 36 - struct request_sock_ops chtls_rsk_ops; 33 + static struct proto chtls_cpl_prot, chtls_cpl_protv6; 34 + struct request_sock_ops chtls_rsk_ops, chtls_rsk_opsv6; 37 35 static uint send_page_order = (14 - PAGE_SHIFT < 0) ? 0 : 14 - PAGE_SHIFT; 38 36 39 37 static void register_listen_notifier(struct notifier_block *nb) ··· 588 586 589 587 void chtls_install_cpl_ops(struct sock *sk) 590 588 { 591 - sk->sk_prot = &chtls_cpl_prot; 589 + if (sk->sk_family == AF_INET) 590 + sk->sk_prot = &chtls_cpl_prot; 591 + else 592 + sk->sk_prot = &chtls_cpl_protv6; 592 593 } 593 594 594 595 static void __init chtls_init_ulp_ops(void) ··· 608 603 chtls_cpl_prot.recvmsg = chtls_recvmsg; 609 604 chtls_cpl_prot.setsockopt = chtls_setsockopt; 610 605 chtls_cpl_prot.getsockopt = chtls_getsockopt; 606 + chtls_cpl_protv6 = chtls_cpl_prot; 607 + chtls_init_rsk_ops(&chtls_cpl_protv6, &chtls_rsk_opsv6, 608 + &tcpv6_prot, PF_INET6); 611 609 } 612 610 613 611 static int __init chtls_register(void)
+1
net/ipv6/tcp_ipv6.c
··· 2121 2121 #endif 2122 2122 .diag_destroy = tcp_abort, 2123 2123 }; 2124 + EXPORT_SYMBOL_GPL(tcpv6_prot); 2124 2125 2125 2126 /* thinking of making this const? Don't. 2126 2127 * early_demux can change based on sysctl.