Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.25-rc5 2199 lines 55 kB view raw
1/* 2 * TCP over IPv6 3 * Linux INET6 implementation 4 * 5 * Authors: 6 * Pedro Roque <roque@di.fc.ul.pt> 7 * 8 * $Id: tcp_ipv6.c,v 1.144 2002/02/01 22:01:04 davem Exp $ 9 * 10 * Based on: 11 * linux/net/ipv4/tcp.c 12 * linux/net/ipv4/tcp_input.c 13 * linux/net/ipv4/tcp_output.c 14 * 15 * Fixes: 16 * Hideaki YOSHIFUJI : sin6_scope_id support 17 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which 18 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind 19 * a single port at the same time. 20 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file. 21 * 22 * This program is free software; you can redistribute it and/or 23 * modify it under the terms of the GNU General Public License 24 * as published by the Free Software Foundation; either version 25 * 2 of the License, or (at your option) any later version. 26 */ 27 28#include <linux/module.h> 29#include <linux/errno.h> 30#include <linux/types.h> 31#include <linux/socket.h> 32#include <linux/sockios.h> 33#include <linux/net.h> 34#include <linux/jiffies.h> 35#include <linux/in.h> 36#include <linux/in6.h> 37#include <linux/netdevice.h> 38#include <linux/init.h> 39#include <linux/jhash.h> 40#include <linux/ipsec.h> 41#include <linux/times.h> 42 43#include <linux/ipv6.h> 44#include <linux/icmpv6.h> 45#include <linux/random.h> 46 47#include <net/tcp.h> 48#include <net/ndisc.h> 49#include <net/inet6_hashtables.h> 50#include <net/inet6_connection_sock.h> 51#include <net/ipv6.h> 52#include <net/transp_v6.h> 53#include <net/addrconf.h> 54#include <net/ip6_route.h> 55#include <net/ip6_checksum.h> 56#include <net/inet_ecn.h> 57#include <net/protocol.h> 58#include <net/xfrm.h> 59#include <net/snmp.h> 60#include <net/dsfield.h> 61#include <net/timewait_sock.h> 62#include <net/netdma.h> 63 64#include <asm/uaccess.h> 65 66#include <linux/proc_fs.h> 67#include <linux/seq_file.h> 68 69#include <linux/crypto.h> 70#include <linux/scatterlist.h> 71 72/* Socket used for sending RSTs and ACKs */ 73static struct socket *tcp6_socket; 74 75static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb); 76static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req); 77static void tcp_v6_send_check(struct sock *sk, int len, 78 struct sk_buff *skb); 79 80static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); 81 82static struct inet_connection_sock_af_ops ipv6_mapped; 83static struct inet_connection_sock_af_ops ipv6_specific; 84#ifdef CONFIG_TCP_MD5SIG 85static struct tcp_sock_af_ops tcp_sock_ipv6_specific; 86static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific; 87#endif 88 89static void tcp_v6_hash(struct sock *sk) 90{ 91 if (sk->sk_state != TCP_CLOSE) { 92 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) { 93 tcp_prot.hash(sk); 94 return; 95 } 96 local_bh_disable(); 97 __inet6_hash(sk); 98 local_bh_enable(); 99 } 100} 101 102static __inline__ __sum16 tcp_v6_check(struct tcphdr *th, int len, 103 struct in6_addr *saddr, 104 struct in6_addr *daddr, 105 __wsum base) 106{ 107 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base); 108} 109 110static __u32 tcp_v6_init_sequence(struct sk_buff *skb) 111{ 112 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32, 113 ipv6_hdr(skb)->saddr.s6_addr32, 114 tcp_hdr(skb)->dest, 115 tcp_hdr(skb)->source); 116} 117 118static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, 119 int addr_len) 120{ 121 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; 122 struct inet_sock *inet = inet_sk(sk); 123 struct inet_connection_sock *icsk = inet_csk(sk); 124 struct ipv6_pinfo *np = inet6_sk(sk); 125 struct tcp_sock *tp = tcp_sk(sk); 126 struct in6_addr *saddr = NULL, *final_p = NULL, final; 127 struct flowi fl; 128 struct dst_entry *dst; 129 int addr_type; 130 int err; 131 132 if (addr_len < SIN6_LEN_RFC2133) 133 return -EINVAL; 134 135 if (usin->sin6_family != AF_INET6) 136 return(-EAFNOSUPPORT); 137 138 memset(&fl, 0, sizeof(fl)); 139 140 if (np->sndflow) { 141 fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK; 142 IP6_ECN_flow_init(fl.fl6_flowlabel); 143 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) { 144 struct ip6_flowlabel *flowlabel; 145 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel); 146 if (flowlabel == NULL) 147 return -EINVAL; 148 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst); 149 fl6_sock_release(flowlabel); 150 } 151 } 152 153 /* 154 * connect() to INADDR_ANY means loopback (BSD'ism). 155 */ 156 157 if(ipv6_addr_any(&usin->sin6_addr)) 158 usin->sin6_addr.s6_addr[15] = 0x1; 159 160 addr_type = ipv6_addr_type(&usin->sin6_addr); 161 162 if(addr_type & IPV6_ADDR_MULTICAST) 163 return -ENETUNREACH; 164 165 if (addr_type&IPV6_ADDR_LINKLOCAL) { 166 if (addr_len >= sizeof(struct sockaddr_in6) && 167 usin->sin6_scope_id) { 168 /* If interface is set while binding, indices 169 * must coincide. 170 */ 171 if (sk->sk_bound_dev_if && 172 sk->sk_bound_dev_if != usin->sin6_scope_id) 173 return -EINVAL; 174 175 sk->sk_bound_dev_if = usin->sin6_scope_id; 176 } 177 178 /* Connect to link-local address requires an interface */ 179 if (!sk->sk_bound_dev_if) 180 return -EINVAL; 181 } 182 183 if (tp->rx_opt.ts_recent_stamp && 184 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) { 185 tp->rx_opt.ts_recent = 0; 186 tp->rx_opt.ts_recent_stamp = 0; 187 tp->write_seq = 0; 188 } 189 190 ipv6_addr_copy(&np->daddr, &usin->sin6_addr); 191 np->flow_label = fl.fl6_flowlabel; 192 193 /* 194 * TCP over IPv4 195 */ 196 197 if (addr_type == IPV6_ADDR_MAPPED) { 198 u32 exthdrlen = icsk->icsk_ext_hdr_len; 199 struct sockaddr_in sin; 200 201 SOCK_DEBUG(sk, "connect: ipv4 mapped\n"); 202 203 if (__ipv6_only_sock(sk)) 204 return -ENETUNREACH; 205 206 sin.sin_family = AF_INET; 207 sin.sin_port = usin->sin6_port; 208 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3]; 209 210 icsk->icsk_af_ops = &ipv6_mapped; 211 sk->sk_backlog_rcv = tcp_v4_do_rcv; 212#ifdef CONFIG_TCP_MD5SIG 213 tp->af_specific = &tcp_sock_ipv6_mapped_specific; 214#endif 215 216 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin)); 217 218 if (err) { 219 icsk->icsk_ext_hdr_len = exthdrlen; 220 icsk->icsk_af_ops = &ipv6_specific; 221 sk->sk_backlog_rcv = tcp_v6_do_rcv; 222#ifdef CONFIG_TCP_MD5SIG 223 tp->af_specific = &tcp_sock_ipv6_specific; 224#endif 225 goto failure; 226 } else { 227 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF), 228 inet->saddr); 229 ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF), 230 inet->rcv_saddr); 231 } 232 233 return err; 234 } 235 236 if (!ipv6_addr_any(&np->rcv_saddr)) 237 saddr = &np->rcv_saddr; 238 239 fl.proto = IPPROTO_TCP; 240 ipv6_addr_copy(&fl.fl6_dst, &np->daddr); 241 ipv6_addr_copy(&fl.fl6_src, 242 (saddr ? saddr : &np->saddr)); 243 fl.oif = sk->sk_bound_dev_if; 244 fl.fl_ip_dport = usin->sin6_port; 245 fl.fl_ip_sport = inet->sport; 246 247 if (np->opt && np->opt->srcrt) { 248 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt; 249 ipv6_addr_copy(&final, &fl.fl6_dst); 250 ipv6_addr_copy(&fl.fl6_dst, rt0->addr); 251 final_p = &final; 252 } 253 254 security_sk_classify_flow(sk, &fl); 255 256 err = ip6_dst_lookup(sk, &dst, &fl); 257 if (err) 258 goto failure; 259 if (final_p) 260 ipv6_addr_copy(&fl.fl6_dst, final_p); 261 262 if ((err = __xfrm_lookup(&dst, &fl, sk, XFRM_LOOKUP_WAIT)) < 0) { 263 if (err == -EREMOTE) 264 err = ip6_dst_blackhole(sk, &dst, &fl); 265 if (err < 0) 266 goto failure; 267 } 268 269 if (saddr == NULL) { 270 saddr = &fl.fl6_src; 271 ipv6_addr_copy(&np->rcv_saddr, saddr); 272 } 273 274 /* set the source address */ 275 ipv6_addr_copy(&np->saddr, saddr); 276 inet->rcv_saddr = LOOPBACK4_IPV6; 277 278 sk->sk_gso_type = SKB_GSO_TCPV6; 279 __ip6_dst_store(sk, dst, NULL, NULL); 280 281 icsk->icsk_ext_hdr_len = 0; 282 if (np->opt) 283 icsk->icsk_ext_hdr_len = (np->opt->opt_flen + 284 np->opt->opt_nflen); 285 286 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); 287 288 inet->dport = usin->sin6_port; 289 290 tcp_set_state(sk, TCP_SYN_SENT); 291 err = inet6_hash_connect(&tcp_death_row, sk); 292 if (err) 293 goto late_failure; 294 295 if (!tp->write_seq) 296 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32, 297 np->daddr.s6_addr32, 298 inet->sport, 299 inet->dport); 300 301 err = tcp_connect(sk); 302 if (err) 303 goto late_failure; 304 305 return 0; 306 307late_failure: 308 tcp_set_state(sk, TCP_CLOSE); 309 __sk_dst_reset(sk); 310failure: 311 inet->dport = 0; 312 sk->sk_route_caps = 0; 313 return err; 314} 315 316static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 317 int type, int code, int offset, __be32 info) 318{ 319 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data; 320 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset); 321 struct ipv6_pinfo *np; 322 struct sock *sk; 323 int err; 324 struct tcp_sock *tp; 325 __u32 seq; 326 327 sk = inet6_lookup(skb->dev->nd_net, &tcp_hashinfo, &hdr->daddr, 328 th->dest, &hdr->saddr, th->source, skb->dev->ifindex); 329 330 if (sk == NULL) { 331 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS); 332 return; 333 } 334 335 if (sk->sk_state == TCP_TIME_WAIT) { 336 inet_twsk_put(inet_twsk(sk)); 337 return; 338 } 339 340 bh_lock_sock(sk); 341 if (sock_owned_by_user(sk)) 342 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS); 343 344 if (sk->sk_state == TCP_CLOSE) 345 goto out; 346 347 tp = tcp_sk(sk); 348 seq = ntohl(th->seq); 349 if (sk->sk_state != TCP_LISTEN && 350 !between(seq, tp->snd_una, tp->snd_nxt)) { 351 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); 352 goto out; 353 } 354 355 np = inet6_sk(sk); 356 357 if (type == ICMPV6_PKT_TOOBIG) { 358 struct dst_entry *dst = NULL; 359 360 if (sock_owned_by_user(sk)) 361 goto out; 362 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) 363 goto out; 364 365 /* icmp should have updated the destination cache entry */ 366 dst = __sk_dst_check(sk, np->dst_cookie); 367 368 if (dst == NULL) { 369 struct inet_sock *inet = inet_sk(sk); 370 struct flowi fl; 371 372 /* BUGGG_FUTURE: Again, it is not clear how 373 to handle rthdr case. Ignore this complexity 374 for now. 375 */ 376 memset(&fl, 0, sizeof(fl)); 377 fl.proto = IPPROTO_TCP; 378 ipv6_addr_copy(&fl.fl6_dst, &np->daddr); 379 ipv6_addr_copy(&fl.fl6_src, &np->saddr); 380 fl.oif = sk->sk_bound_dev_if; 381 fl.fl_ip_dport = inet->dport; 382 fl.fl_ip_sport = inet->sport; 383 security_skb_classify_flow(skb, &fl); 384 385 if ((err = ip6_dst_lookup(sk, &dst, &fl))) { 386 sk->sk_err_soft = -err; 387 goto out; 388 } 389 390 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) { 391 sk->sk_err_soft = -err; 392 goto out; 393 } 394 395 } else 396 dst_hold(dst); 397 398 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) { 399 tcp_sync_mss(sk, dst_mtu(dst)); 400 tcp_simple_retransmit(sk); 401 } /* else let the usual retransmit timer handle it */ 402 dst_release(dst); 403 goto out; 404 } 405 406 icmpv6_err_convert(type, code, &err); 407 408 /* Might be for an request_sock */ 409 switch (sk->sk_state) { 410 struct request_sock *req, **prev; 411 case TCP_LISTEN: 412 if (sock_owned_by_user(sk)) 413 goto out; 414 415 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr, 416 &hdr->saddr, inet6_iif(skb)); 417 if (!req) 418 goto out; 419 420 /* ICMPs are not backlogged, hence we cannot get 421 * an established socket here. 422 */ 423 BUG_TRAP(req->sk == NULL); 424 425 if (seq != tcp_rsk(req)->snt_isn) { 426 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); 427 goto out; 428 } 429 430 inet_csk_reqsk_queue_drop(sk, req, prev); 431 goto out; 432 433 case TCP_SYN_SENT: 434 case TCP_SYN_RECV: /* Cannot happen. 435 It can, it SYNs are crossed. --ANK */ 436 if (!sock_owned_by_user(sk)) { 437 sk->sk_err = err; 438 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */ 439 440 tcp_done(sk); 441 } else 442 sk->sk_err_soft = err; 443 goto out; 444 } 445 446 if (!sock_owned_by_user(sk) && np->recverr) { 447 sk->sk_err = err; 448 sk->sk_error_report(sk); 449 } else 450 sk->sk_err_soft = err; 451 452out: 453 bh_unlock_sock(sk); 454 sock_put(sk); 455} 456 457 458static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, 459 struct dst_entry *dst) 460{ 461 struct inet6_request_sock *treq = inet6_rsk(req); 462 struct ipv6_pinfo *np = inet6_sk(sk); 463 struct sk_buff * skb; 464 struct ipv6_txoptions *opt = NULL; 465 struct in6_addr * final_p = NULL, final; 466 struct flowi fl; 467 int err = -1; 468 469 memset(&fl, 0, sizeof(fl)); 470 fl.proto = IPPROTO_TCP; 471 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr); 472 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr); 473 fl.fl6_flowlabel = 0; 474 fl.oif = treq->iif; 475 fl.fl_ip_dport = inet_rsk(req)->rmt_port; 476 fl.fl_ip_sport = inet_sk(sk)->sport; 477 security_req_classify_flow(req, &fl); 478 479 if (dst == NULL) { 480 opt = np->opt; 481 if (opt && opt->srcrt) { 482 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt; 483 ipv6_addr_copy(&final, &fl.fl6_dst); 484 ipv6_addr_copy(&fl.fl6_dst, rt0->addr); 485 final_p = &final; 486 } 487 488 err = ip6_dst_lookup(sk, &dst, &fl); 489 if (err) 490 goto done; 491 if (final_p) 492 ipv6_addr_copy(&fl.fl6_dst, final_p); 493 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) 494 goto done; 495 } 496 497 skb = tcp_make_synack(sk, dst, req); 498 if (skb) { 499 struct tcphdr *th = tcp_hdr(skb); 500 501 th->check = tcp_v6_check(th, skb->len, 502 &treq->loc_addr, &treq->rmt_addr, 503 csum_partial((char *)th, skb->len, skb->csum)); 504 505 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr); 506 err = ip6_xmit(sk, skb, &fl, opt, 0); 507 err = net_xmit_eval(err); 508 } 509 510done: 511 if (opt && opt != np->opt) 512 sock_kfree_s(sk, opt, opt->tot_len); 513 dst_release(dst); 514 return err; 515} 516 517static void tcp_v6_reqsk_destructor(struct request_sock *req) 518{ 519 if (inet6_rsk(req)->pktopts) 520 kfree_skb(inet6_rsk(req)->pktopts); 521} 522 523#ifdef CONFIG_TCP_MD5SIG 524static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk, 525 struct in6_addr *addr) 526{ 527 struct tcp_sock *tp = tcp_sk(sk); 528 int i; 529 530 BUG_ON(tp == NULL); 531 532 if (!tp->md5sig_info || !tp->md5sig_info->entries6) 533 return NULL; 534 535 for (i = 0; i < tp->md5sig_info->entries6; i++) { 536 if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, addr) == 0) 537 return &tp->md5sig_info->keys6[i].base; 538 } 539 return NULL; 540} 541 542static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk, 543 struct sock *addr_sk) 544{ 545 return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr); 546} 547 548static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk, 549 struct request_sock *req) 550{ 551 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr); 552} 553 554static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer, 555 char *newkey, u8 newkeylen) 556{ 557 /* Add key to the list */ 558 struct tcp_md5sig_key *key; 559 struct tcp_sock *tp = tcp_sk(sk); 560 struct tcp6_md5sig_key *keys; 561 562 key = tcp_v6_md5_do_lookup(sk, peer); 563 if (key) { 564 /* modify existing entry - just update that one */ 565 kfree(key->key); 566 key->key = newkey; 567 key->keylen = newkeylen; 568 } else { 569 /* reallocate new list if current one is full. */ 570 if (!tp->md5sig_info) { 571 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC); 572 if (!tp->md5sig_info) { 573 kfree(newkey); 574 return -ENOMEM; 575 } 576 sk->sk_route_caps &= ~NETIF_F_GSO_MASK; 577 } 578 if (tcp_alloc_md5sig_pool() == NULL) { 579 kfree(newkey); 580 return -ENOMEM; 581 } 582 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) { 583 keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) * 584 (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC); 585 586 if (!keys) { 587 tcp_free_md5sig_pool(); 588 kfree(newkey); 589 return -ENOMEM; 590 } 591 592 if (tp->md5sig_info->entries6) 593 memmove(keys, tp->md5sig_info->keys6, 594 (sizeof (tp->md5sig_info->keys6[0]) * 595 tp->md5sig_info->entries6)); 596 597 kfree(tp->md5sig_info->keys6); 598 tp->md5sig_info->keys6 = keys; 599 tp->md5sig_info->alloced6++; 600 } 601 602 ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr, 603 peer); 604 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey; 605 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen; 606 607 tp->md5sig_info->entries6++; 608 } 609 return 0; 610} 611 612static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk, 613 u8 *newkey, __u8 newkeylen) 614{ 615 return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr, 616 newkey, newkeylen); 617} 618 619static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer) 620{ 621 struct tcp_sock *tp = tcp_sk(sk); 622 int i; 623 624 for (i = 0; i < tp->md5sig_info->entries6; i++) { 625 if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, peer) == 0) { 626 /* Free the key */ 627 kfree(tp->md5sig_info->keys6[i].base.key); 628 tp->md5sig_info->entries6--; 629 630 if (tp->md5sig_info->entries6 == 0) { 631 kfree(tp->md5sig_info->keys6); 632 tp->md5sig_info->keys6 = NULL; 633 tp->md5sig_info->alloced6 = 0; 634 } else { 635 /* shrink the database */ 636 if (tp->md5sig_info->entries6 != i) 637 memmove(&tp->md5sig_info->keys6[i], 638 &tp->md5sig_info->keys6[i+1], 639 (tp->md5sig_info->entries6 - i) 640 * sizeof (tp->md5sig_info->keys6[0])); 641 } 642 tcp_free_md5sig_pool(); 643 return 0; 644 } 645 } 646 return -ENOENT; 647} 648 649static void tcp_v6_clear_md5_list (struct sock *sk) 650{ 651 struct tcp_sock *tp = tcp_sk(sk); 652 int i; 653 654 if (tp->md5sig_info->entries6) { 655 for (i = 0; i < tp->md5sig_info->entries6; i++) 656 kfree(tp->md5sig_info->keys6[i].base.key); 657 tp->md5sig_info->entries6 = 0; 658 tcp_free_md5sig_pool(); 659 } 660 661 kfree(tp->md5sig_info->keys6); 662 tp->md5sig_info->keys6 = NULL; 663 tp->md5sig_info->alloced6 = 0; 664 665 if (tp->md5sig_info->entries4) { 666 for (i = 0; i < tp->md5sig_info->entries4; i++) 667 kfree(tp->md5sig_info->keys4[i].base.key); 668 tp->md5sig_info->entries4 = 0; 669 tcp_free_md5sig_pool(); 670 } 671 672 kfree(tp->md5sig_info->keys4); 673 tp->md5sig_info->keys4 = NULL; 674 tp->md5sig_info->alloced4 = 0; 675} 676 677static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval, 678 int optlen) 679{ 680 struct tcp_md5sig cmd; 681 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr; 682 u8 *newkey; 683 684 if (optlen < sizeof(cmd)) 685 return -EINVAL; 686 687 if (copy_from_user(&cmd, optval, sizeof(cmd))) 688 return -EFAULT; 689 690 if (sin6->sin6_family != AF_INET6) 691 return -EINVAL; 692 693 if (!cmd.tcpm_keylen) { 694 if (!tcp_sk(sk)->md5sig_info) 695 return -ENOENT; 696 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) 697 return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]); 698 return tcp_v6_md5_do_del(sk, &sin6->sin6_addr); 699 } 700 701 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN) 702 return -EINVAL; 703 704 if (!tcp_sk(sk)->md5sig_info) { 705 struct tcp_sock *tp = tcp_sk(sk); 706 struct tcp_md5sig_info *p; 707 708 p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL); 709 if (!p) 710 return -ENOMEM; 711 712 tp->md5sig_info = p; 713 sk->sk_route_caps &= ~NETIF_F_GSO_MASK; 714 } 715 716 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL); 717 if (!newkey) 718 return -ENOMEM; 719 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) { 720 return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3], 721 newkey, cmd.tcpm_keylen); 722 } 723 return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen); 724} 725 726static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, 727 struct in6_addr *saddr, 728 struct in6_addr *daddr, 729 struct tcphdr *th, int protocol, 730 unsigned int tcplen) 731{ 732 struct scatterlist sg[4]; 733 __u16 data_len; 734 int block = 0; 735 __sum16 cksum; 736 struct tcp_md5sig_pool *hp; 737 struct tcp6_pseudohdr *bp; 738 struct hash_desc *desc; 739 int err; 740 unsigned int nbytes = 0; 741 742 hp = tcp_get_md5sig_pool(); 743 if (!hp) { 744 printk(KERN_WARNING "%s(): hash pool not found...\n", __FUNCTION__); 745 goto clear_hash_noput; 746 } 747 bp = &hp->md5_blk.ip6; 748 desc = &hp->md5_desc; 749 750 /* 1. TCP pseudo-header (RFC2460) */ 751 ipv6_addr_copy(&bp->saddr, saddr); 752 ipv6_addr_copy(&bp->daddr, daddr); 753 bp->len = htonl(tcplen); 754 bp->protocol = htonl(protocol); 755 756 sg_init_table(sg, 4); 757 758 sg_set_buf(&sg[block++], bp, sizeof(*bp)); 759 nbytes += sizeof(*bp); 760 761 /* 2. TCP header, excluding options */ 762 cksum = th->check; 763 th->check = 0; 764 sg_set_buf(&sg[block++], th, sizeof(*th)); 765 nbytes += sizeof(*th); 766 767 /* 3. TCP segment data (if any) */ 768 data_len = tcplen - (th->doff << 2); 769 if (data_len > 0) { 770 u8 *data = (u8 *)th + (th->doff << 2); 771 sg_set_buf(&sg[block++], data, data_len); 772 nbytes += data_len; 773 } 774 775 /* 4. shared key */ 776 sg_set_buf(&sg[block++], key->key, key->keylen); 777 nbytes += key->keylen; 778 779 sg_mark_end(&sg[block - 1]); 780 781 /* Now store the hash into the packet */ 782 err = crypto_hash_init(desc); 783 if (err) { 784 printk(KERN_WARNING "%s(): hash_init failed\n", __FUNCTION__); 785 goto clear_hash; 786 } 787 err = crypto_hash_update(desc, sg, nbytes); 788 if (err) { 789 printk(KERN_WARNING "%s(): hash_update failed\n", __FUNCTION__); 790 goto clear_hash; 791 } 792 err = crypto_hash_final(desc, md5_hash); 793 if (err) { 794 printk(KERN_WARNING "%s(): hash_final failed\n", __FUNCTION__); 795 goto clear_hash; 796 } 797 798 /* Reset header, and free up the crypto */ 799 tcp_put_md5sig_pool(); 800 th->check = cksum; 801out: 802 return 0; 803clear_hash: 804 tcp_put_md5sig_pool(); 805clear_hash_noput: 806 memset(md5_hash, 0, 16); 807 goto out; 808} 809 810static int tcp_v6_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, 811 struct sock *sk, 812 struct dst_entry *dst, 813 struct request_sock *req, 814 struct tcphdr *th, int protocol, 815 unsigned int tcplen) 816{ 817 struct in6_addr *saddr, *daddr; 818 819 if (sk) { 820 saddr = &inet6_sk(sk)->saddr; 821 daddr = &inet6_sk(sk)->daddr; 822 } else { 823 saddr = &inet6_rsk(req)->loc_addr; 824 daddr = &inet6_rsk(req)->rmt_addr; 825 } 826 return tcp_v6_do_calc_md5_hash(md5_hash, key, 827 saddr, daddr, 828 th, protocol, tcplen); 829} 830 831static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb) 832{ 833 __u8 *hash_location = NULL; 834 struct tcp_md5sig_key *hash_expected; 835 struct ipv6hdr *ip6h = ipv6_hdr(skb); 836 struct tcphdr *th = tcp_hdr(skb); 837 int length = (th->doff << 2) - sizeof (*th); 838 int genhash; 839 u8 *ptr; 840 u8 newhash[16]; 841 842 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr); 843 844 /* If the TCP option is too short, we can short cut */ 845 if (length < TCPOLEN_MD5SIG) 846 return hash_expected ? 1 : 0; 847 848 /* parse options */ 849 ptr = (u8*)(th + 1); 850 while (length > 0) { 851 int opcode = *ptr++; 852 int opsize; 853 854 switch(opcode) { 855 case TCPOPT_EOL: 856 goto done_opts; 857 case TCPOPT_NOP: 858 length--; 859 continue; 860 default: 861 opsize = *ptr++; 862 if (opsize < 2 || opsize > length) 863 goto done_opts; 864 if (opcode == TCPOPT_MD5SIG) { 865 hash_location = ptr; 866 goto done_opts; 867 } 868 } 869 ptr += opsize - 2; 870 length -= opsize; 871 } 872 873done_opts: 874 /* do we have a hash as expected? */ 875 if (!hash_expected) { 876 if (!hash_location) 877 return 0; 878 if (net_ratelimit()) { 879 printk(KERN_INFO "MD5 Hash NOT expected but found " 880 "(" NIP6_FMT ", %u)->" 881 "(" NIP6_FMT ", %u)\n", 882 NIP6(ip6h->saddr), ntohs(th->source), 883 NIP6(ip6h->daddr), ntohs(th->dest)); 884 } 885 return 1; 886 } 887 888 if (!hash_location) { 889 if (net_ratelimit()) { 890 printk(KERN_INFO "MD5 Hash expected but NOT found " 891 "(" NIP6_FMT ", %u)->" 892 "(" NIP6_FMT ", %u)\n", 893 NIP6(ip6h->saddr), ntohs(th->source), 894 NIP6(ip6h->daddr), ntohs(th->dest)); 895 } 896 return 1; 897 } 898 899 /* check the signature */ 900 genhash = tcp_v6_do_calc_md5_hash(newhash, 901 hash_expected, 902 &ip6h->saddr, &ip6h->daddr, 903 th, sk->sk_protocol, 904 skb->len); 905 if (genhash || memcmp(hash_location, newhash, 16) != 0) { 906 if (net_ratelimit()) { 907 printk(KERN_INFO "MD5 Hash %s for " 908 "(" NIP6_FMT ", %u)->" 909 "(" NIP6_FMT ", %u)\n", 910 genhash ? "failed" : "mismatch", 911 NIP6(ip6h->saddr), ntohs(th->source), 912 NIP6(ip6h->daddr), ntohs(th->dest)); 913 } 914 return 1; 915 } 916 return 0; 917} 918#endif 919 920static struct request_sock_ops tcp6_request_sock_ops __read_mostly = { 921 .family = AF_INET6, 922 .obj_size = sizeof(struct tcp6_request_sock), 923 .rtx_syn_ack = tcp_v6_send_synack, 924 .send_ack = tcp_v6_reqsk_send_ack, 925 .destructor = tcp_v6_reqsk_destructor, 926 .send_reset = tcp_v6_send_reset 927}; 928 929#ifdef CONFIG_TCP_MD5SIG 930static struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = { 931 .md5_lookup = tcp_v6_reqsk_md5_lookup, 932}; 933#endif 934 935static struct timewait_sock_ops tcp6_timewait_sock_ops = { 936 .twsk_obj_size = sizeof(struct tcp6_timewait_sock), 937 .twsk_unique = tcp_twsk_unique, 938 .twsk_destructor= tcp_twsk_destructor, 939}; 940 941static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb) 942{ 943 struct ipv6_pinfo *np = inet6_sk(sk); 944 struct tcphdr *th = tcp_hdr(skb); 945 946 if (skb->ip_summed == CHECKSUM_PARTIAL) { 947 th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0); 948 skb->csum_start = skb_transport_header(skb) - skb->head; 949 skb->csum_offset = offsetof(struct tcphdr, check); 950 } else { 951 th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 952 csum_partial((char *)th, th->doff<<2, 953 skb->csum)); 954 } 955} 956 957static int tcp_v6_gso_send_check(struct sk_buff *skb) 958{ 959 struct ipv6hdr *ipv6h; 960 struct tcphdr *th; 961 962 if (!pskb_may_pull(skb, sizeof(*th))) 963 return -EINVAL; 964 965 ipv6h = ipv6_hdr(skb); 966 th = tcp_hdr(skb); 967 968 th->check = 0; 969 th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len, 970 IPPROTO_TCP, 0); 971 skb->csum_start = skb_transport_header(skb) - skb->head; 972 skb->csum_offset = offsetof(struct tcphdr, check); 973 skb->ip_summed = CHECKSUM_PARTIAL; 974 return 0; 975} 976 977static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb) 978{ 979 struct tcphdr *th = tcp_hdr(skb), *t1; 980 struct sk_buff *buff; 981 struct flowi fl; 982 unsigned int tot_len = sizeof(*th); 983#ifdef CONFIG_TCP_MD5SIG 984 struct tcp_md5sig_key *key; 985#endif 986 987 if (th->rst) 988 return; 989 990 if (!ipv6_unicast_destination(skb)) 991 return; 992 993#ifdef CONFIG_TCP_MD5SIG 994 if (sk) 995 key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr); 996 else 997 key = NULL; 998 999 if (key) 1000 tot_len += TCPOLEN_MD5SIG_ALIGNED; 1001#endif 1002 1003 /* 1004 * We need to grab some memory, and put together an RST, 1005 * and then put it into the queue to be sent. 1006 */ 1007 1008 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len, 1009 GFP_ATOMIC); 1010 if (buff == NULL) 1011 return; 1012 1013 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len); 1014 1015 t1 = (struct tcphdr *) skb_push(buff, tot_len); 1016 1017 /* Swap the send and the receive. */ 1018 memset(t1, 0, sizeof(*t1)); 1019 t1->dest = th->source; 1020 t1->source = th->dest; 1021 t1->doff = tot_len / 4; 1022 t1->rst = 1; 1023 1024 if(th->ack) { 1025 t1->seq = th->ack_seq; 1026 } else { 1027 t1->ack = 1; 1028 t1->ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin 1029 + skb->len - (th->doff<<2)); 1030 } 1031 1032#ifdef CONFIG_TCP_MD5SIG 1033 if (key) { 1034 __be32 *opt = (__be32*)(t1 + 1); 1035 opt[0] = htonl((TCPOPT_NOP << 24) | 1036 (TCPOPT_NOP << 16) | 1037 (TCPOPT_MD5SIG << 8) | 1038 TCPOLEN_MD5SIG); 1039 tcp_v6_do_calc_md5_hash((__u8 *)&opt[1], key, 1040 &ipv6_hdr(skb)->daddr, 1041 &ipv6_hdr(skb)->saddr, 1042 t1, IPPROTO_TCP, tot_len); 1043 } 1044#endif 1045 1046 buff->csum = csum_partial((char *)t1, sizeof(*t1), 0); 1047 1048 memset(&fl, 0, sizeof(fl)); 1049 ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr); 1050 ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr); 1051 1052 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst, 1053 sizeof(*t1), IPPROTO_TCP, 1054 buff->csum); 1055 1056 fl.proto = IPPROTO_TCP; 1057 fl.oif = inet6_iif(skb); 1058 fl.fl_ip_dport = t1->dest; 1059 fl.fl_ip_sport = t1->source; 1060 security_skb_classify_flow(skb, &fl); 1061 1062 /* sk = NULL, but it is safe for now. RST socket required. */ 1063 if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) { 1064 1065 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) { 1066 ip6_xmit(tcp6_socket->sk, buff, &fl, NULL, 0); 1067 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS); 1068 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS); 1069 return; 1070 } 1071 } 1072 1073 kfree_skb(buff); 1074} 1075 1076static void tcp_v6_send_ack(struct tcp_timewait_sock *tw, 1077 struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts) 1078{ 1079 struct tcphdr *th = tcp_hdr(skb), *t1; 1080 struct sk_buff *buff; 1081 struct flowi fl; 1082 unsigned int tot_len = sizeof(struct tcphdr); 1083 __be32 *topt; 1084#ifdef CONFIG_TCP_MD5SIG 1085 struct tcp_md5sig_key *key; 1086 struct tcp_md5sig_key tw_key; 1087#endif 1088 1089#ifdef CONFIG_TCP_MD5SIG 1090 if (!tw && skb->sk) { 1091 key = tcp_v6_md5_do_lookup(skb->sk, &ipv6_hdr(skb)->daddr); 1092 } else if (tw && tw->tw_md5_keylen) { 1093 tw_key.key = tw->tw_md5_key; 1094 tw_key.keylen = tw->tw_md5_keylen; 1095 key = &tw_key; 1096 } else { 1097 key = NULL; 1098 } 1099#endif 1100 1101 if (ts) 1102 tot_len += TCPOLEN_TSTAMP_ALIGNED; 1103#ifdef CONFIG_TCP_MD5SIG 1104 if (key) 1105 tot_len += TCPOLEN_MD5SIG_ALIGNED; 1106#endif 1107 1108 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len, 1109 GFP_ATOMIC); 1110 if (buff == NULL) 1111 return; 1112 1113 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len); 1114 1115 t1 = (struct tcphdr *) skb_push(buff,tot_len); 1116 1117 /* Swap the send and the receive. */ 1118 memset(t1, 0, sizeof(*t1)); 1119 t1->dest = th->source; 1120 t1->source = th->dest; 1121 t1->doff = tot_len/4; 1122 t1->seq = htonl(seq); 1123 t1->ack_seq = htonl(ack); 1124 t1->ack = 1; 1125 t1->window = htons(win); 1126 1127 topt = (__be32 *)(t1 + 1); 1128 1129 if (ts) { 1130 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 1131 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); 1132 *topt++ = htonl(tcp_time_stamp); 1133 *topt = htonl(ts); 1134 } 1135 1136#ifdef CONFIG_TCP_MD5SIG 1137 if (key) { 1138 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 1139 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); 1140 tcp_v6_do_calc_md5_hash((__u8 *)topt, key, 1141 &ipv6_hdr(skb)->daddr, 1142 &ipv6_hdr(skb)->saddr, 1143 t1, IPPROTO_TCP, tot_len); 1144 } 1145#endif 1146 1147 buff->csum = csum_partial((char *)t1, tot_len, 0); 1148 1149 memset(&fl, 0, sizeof(fl)); 1150 ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr); 1151 ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr); 1152 1153 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst, 1154 tot_len, IPPROTO_TCP, 1155 buff->csum); 1156 1157 fl.proto = IPPROTO_TCP; 1158 fl.oif = inet6_iif(skb); 1159 fl.fl_ip_dport = t1->dest; 1160 fl.fl_ip_sport = t1->source; 1161 security_skb_classify_flow(skb, &fl); 1162 1163 if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) { 1164 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) { 1165 ip6_xmit(tcp6_socket->sk, buff, &fl, NULL, 0); 1166 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS); 1167 return; 1168 } 1169 } 1170 1171 kfree_skb(buff); 1172} 1173 1174static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) 1175{ 1176 struct inet_timewait_sock *tw = inet_twsk(sk); 1177 struct tcp_timewait_sock *tcptw = tcp_twsk(sk); 1178 1179 tcp_v6_send_ack(tcptw, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, 1180 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, 1181 tcptw->tw_ts_recent); 1182 1183 inet_twsk_put(tw); 1184} 1185 1186static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req) 1187{ 1188 tcp_v6_send_ack(NULL, skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent); 1189} 1190 1191 1192static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) 1193{ 1194 struct request_sock *req, **prev; 1195 const struct tcphdr *th = tcp_hdr(skb); 1196 struct sock *nsk; 1197 1198 /* Find possible connection requests. */ 1199 req = inet6_csk_search_req(sk, &prev, th->source, 1200 &ipv6_hdr(skb)->saddr, 1201 &ipv6_hdr(skb)->daddr, inet6_iif(skb)); 1202 if (req) 1203 return tcp_check_req(sk, skb, req, prev); 1204 1205 nsk = __inet6_lookup_established(sk->sk_net, &tcp_hashinfo, 1206 &ipv6_hdr(skb)->saddr, th->source, 1207 &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb)); 1208 1209 if (nsk) { 1210 if (nsk->sk_state != TCP_TIME_WAIT) { 1211 bh_lock_sock(nsk); 1212 return nsk; 1213 } 1214 inet_twsk_put(inet_twsk(nsk)); 1215 return NULL; 1216 } 1217 1218#if 0 /*def CONFIG_SYN_COOKIES*/ 1219 if (!th->rst && !th->syn && th->ack) 1220 sk = cookie_v6_check(sk, skb, &(IPCB(skb)->opt)); 1221#endif 1222 return sk; 1223} 1224 1225/* FIXME: this is substantially similar to the ipv4 code. 1226 * Can some kind of merge be done? -- erics 1227 */ 1228static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) 1229{ 1230 struct inet6_request_sock *treq; 1231 struct ipv6_pinfo *np = inet6_sk(sk); 1232 struct tcp_options_received tmp_opt; 1233 struct tcp_sock *tp = tcp_sk(sk); 1234 struct request_sock *req = NULL; 1235 __u32 isn = TCP_SKB_CB(skb)->when; 1236 1237 if (skb->protocol == htons(ETH_P_IP)) 1238 return tcp_v4_conn_request(sk, skb); 1239 1240 if (!ipv6_unicast_destination(skb)) 1241 goto drop; 1242 1243 /* 1244 * There are no SYN attacks on IPv6, yet... 1245 */ 1246 if (inet_csk_reqsk_queue_is_full(sk) && !isn) { 1247 if (net_ratelimit()) 1248 printk(KERN_INFO "TCPv6: dropping request, synflood is possible\n"); 1249 goto drop; 1250 } 1251 1252 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) 1253 goto drop; 1254 1255 req = inet6_reqsk_alloc(&tcp6_request_sock_ops); 1256 if (req == NULL) 1257 goto drop; 1258 1259#ifdef CONFIG_TCP_MD5SIG 1260 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops; 1261#endif 1262 1263 tcp_clear_options(&tmp_opt); 1264 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); 1265 tmp_opt.user_mss = tp->rx_opt.user_mss; 1266 1267 tcp_parse_options(skb, &tmp_opt, 0); 1268 1269 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp; 1270 tcp_openreq_init(req, &tmp_opt, skb); 1271 1272 treq = inet6_rsk(req); 1273 ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr); 1274 ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr); 1275 TCP_ECN_create_request(req, tcp_hdr(skb)); 1276 treq->pktopts = NULL; 1277 if (ipv6_opt_accepted(sk, skb) || 1278 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || 1279 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) { 1280 atomic_inc(&skb->users); 1281 treq->pktopts = skb; 1282 } 1283 treq->iif = sk->sk_bound_dev_if; 1284 1285 /* So that link locals have meaning */ 1286 if (!sk->sk_bound_dev_if && 1287 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL) 1288 treq->iif = inet6_iif(skb); 1289 1290 if (isn == 0) 1291 isn = tcp_v6_init_sequence(skb); 1292 1293 tcp_rsk(req)->snt_isn = isn; 1294 1295 security_inet_conn_request(sk, skb, req); 1296 1297 if (tcp_v6_send_synack(sk, req, NULL)) 1298 goto drop; 1299 1300 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); 1301 return 0; 1302 1303drop: 1304 if (req) 1305 reqsk_free(req); 1306 1307 return 0; /* don't send reset */ 1308} 1309 1310static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, 1311 struct request_sock *req, 1312 struct dst_entry *dst) 1313{ 1314 struct inet6_request_sock *treq = inet6_rsk(req); 1315 struct ipv6_pinfo *newnp, *np = inet6_sk(sk); 1316 struct tcp6_sock *newtcp6sk; 1317 struct inet_sock *newinet; 1318 struct tcp_sock *newtp; 1319 struct sock *newsk; 1320 struct ipv6_txoptions *opt; 1321#ifdef CONFIG_TCP_MD5SIG 1322 struct tcp_md5sig_key *key; 1323#endif 1324 1325 if (skb->protocol == htons(ETH_P_IP)) { 1326 /* 1327 * v6 mapped 1328 */ 1329 1330 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst); 1331 1332 if (newsk == NULL) 1333 return NULL; 1334 1335 newtcp6sk = (struct tcp6_sock *)newsk; 1336 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6; 1337 1338 newinet = inet_sk(newsk); 1339 newnp = inet6_sk(newsk); 1340 newtp = tcp_sk(newsk); 1341 1342 memcpy(newnp, np, sizeof(struct ipv6_pinfo)); 1343 1344 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF), 1345 newinet->daddr); 1346 1347 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF), 1348 newinet->saddr); 1349 1350 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr); 1351 1352 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped; 1353 newsk->sk_backlog_rcv = tcp_v4_do_rcv; 1354#ifdef CONFIG_TCP_MD5SIG 1355 newtp->af_specific = &tcp_sock_ipv6_mapped_specific; 1356#endif 1357 1358 newnp->pktoptions = NULL; 1359 newnp->opt = NULL; 1360 newnp->mcast_oif = inet6_iif(skb); 1361 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; 1362 1363 /* 1364 * No need to charge this sock to the relevant IPv6 refcnt debug socks count 1365 * here, tcp_create_openreq_child now does this for us, see the comment in 1366 * that function for the gory details. -acme 1367 */ 1368 1369 /* It is tricky place. Until this moment IPv4 tcp 1370 worked with IPv6 icsk.icsk_af_ops. 1371 Sync it now. 1372 */ 1373 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie); 1374 1375 return newsk; 1376 } 1377 1378 opt = np->opt; 1379 1380 if (sk_acceptq_is_full(sk)) 1381 goto out_overflow; 1382 1383 if (dst == NULL) { 1384 struct in6_addr *final_p = NULL, final; 1385 struct flowi fl; 1386 1387 memset(&fl, 0, sizeof(fl)); 1388 fl.proto = IPPROTO_TCP; 1389 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr); 1390 if (opt && opt->srcrt) { 1391 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt; 1392 ipv6_addr_copy(&final, &fl.fl6_dst); 1393 ipv6_addr_copy(&fl.fl6_dst, rt0->addr); 1394 final_p = &final; 1395 } 1396 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr); 1397 fl.oif = sk->sk_bound_dev_if; 1398 fl.fl_ip_dport = inet_rsk(req)->rmt_port; 1399 fl.fl_ip_sport = inet_sk(sk)->sport; 1400 security_req_classify_flow(req, &fl); 1401 1402 if (ip6_dst_lookup(sk, &dst, &fl)) 1403 goto out; 1404 1405 if (final_p) 1406 ipv6_addr_copy(&fl.fl6_dst, final_p); 1407 1408 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0) 1409 goto out; 1410 } 1411 1412 newsk = tcp_create_openreq_child(sk, req, skb); 1413 if (newsk == NULL) 1414 goto out; 1415 1416 /* 1417 * No need to charge this sock to the relevant IPv6 refcnt debug socks 1418 * count here, tcp_create_openreq_child now does this for us, see the 1419 * comment in that function for the gory details. -acme 1420 */ 1421 1422 newsk->sk_gso_type = SKB_GSO_TCPV6; 1423 __ip6_dst_store(newsk, dst, NULL, NULL); 1424 1425 newtcp6sk = (struct tcp6_sock *)newsk; 1426 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6; 1427 1428 newtp = tcp_sk(newsk); 1429 newinet = inet_sk(newsk); 1430 newnp = inet6_sk(newsk); 1431 1432 memcpy(newnp, np, sizeof(struct ipv6_pinfo)); 1433 1434 ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr); 1435 ipv6_addr_copy(&newnp->saddr, &treq->loc_addr); 1436 ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr); 1437 newsk->sk_bound_dev_if = treq->iif; 1438 1439 /* Now IPv6 options... 1440 1441 First: no IPv4 options. 1442 */ 1443 newinet->opt = NULL; 1444 newnp->ipv6_fl_list = NULL; 1445 1446 /* Clone RX bits */ 1447 newnp->rxopt.all = np->rxopt.all; 1448 1449 /* Clone pktoptions received with SYN */ 1450 newnp->pktoptions = NULL; 1451 if (treq->pktopts != NULL) { 1452 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC); 1453 kfree_skb(treq->pktopts); 1454 treq->pktopts = NULL; 1455 if (newnp->pktoptions) 1456 skb_set_owner_r(newnp->pktoptions, newsk); 1457 } 1458 newnp->opt = NULL; 1459 newnp->mcast_oif = inet6_iif(skb); 1460 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; 1461 1462 /* Clone native IPv6 options from listening socket (if any) 1463 1464 Yes, keeping reference count would be much more clever, 1465 but we make one more one thing there: reattach optmem 1466 to newsk. 1467 */ 1468 if (opt) { 1469 newnp->opt = ipv6_dup_options(newsk, opt); 1470 if (opt != np->opt) 1471 sock_kfree_s(sk, opt, opt->tot_len); 1472 } 1473 1474 inet_csk(newsk)->icsk_ext_hdr_len = 0; 1475 if (newnp->opt) 1476 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen + 1477 newnp->opt->opt_flen); 1478 1479 tcp_mtup_init(newsk); 1480 tcp_sync_mss(newsk, dst_mtu(dst)); 1481 newtp->advmss = dst_metric(dst, RTAX_ADVMSS); 1482 tcp_initialize_rcv_mss(newsk); 1483 1484 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6; 1485 1486#ifdef CONFIG_TCP_MD5SIG 1487 /* Copy over the MD5 key from the original socket */ 1488 if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) { 1489 /* We're using one, so create a matching key 1490 * on the newsk structure. If we fail to get 1491 * memory, then we end up not copying the key 1492 * across. Shucks. 1493 */ 1494 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC); 1495 if (newkey != NULL) 1496 tcp_v6_md5_do_add(newsk, &inet6_sk(sk)->daddr, 1497 newkey, key->keylen); 1498 } 1499#endif 1500 1501 __inet6_hash(newsk); 1502 inet_inherit_port(sk, newsk); 1503 1504 return newsk; 1505 1506out_overflow: 1507 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS); 1508out: 1509 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS); 1510 if (opt && opt != np->opt) 1511 sock_kfree_s(sk, opt, opt->tot_len); 1512 dst_release(dst); 1513 return NULL; 1514} 1515 1516static __sum16 tcp_v6_checksum_init(struct sk_buff *skb) 1517{ 1518 if (skb->ip_summed == CHECKSUM_COMPLETE) { 1519 if (!tcp_v6_check(tcp_hdr(skb), skb->len, &ipv6_hdr(skb)->saddr, 1520 &ipv6_hdr(skb)->daddr, skb->csum)) { 1521 skb->ip_summed = CHECKSUM_UNNECESSARY; 1522 return 0; 1523 } 1524 } 1525 1526 skb->csum = ~csum_unfold(tcp_v6_check(tcp_hdr(skb), skb->len, 1527 &ipv6_hdr(skb)->saddr, 1528 &ipv6_hdr(skb)->daddr, 0)); 1529 1530 if (skb->len <= 76) { 1531 return __skb_checksum_complete(skb); 1532 } 1533 return 0; 1534} 1535 1536/* The socket must have it's spinlock held when we get 1537 * here. 1538 * 1539 * We have a potential double-lock case here, so even when 1540 * doing backlog processing we use the BH locking scheme. 1541 * This is because we cannot sleep with the original spinlock 1542 * held. 1543 */ 1544static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) 1545{ 1546 struct ipv6_pinfo *np = inet6_sk(sk); 1547 struct tcp_sock *tp; 1548 struct sk_buff *opt_skb = NULL; 1549 1550 /* Imagine: socket is IPv6. IPv4 packet arrives, 1551 goes to IPv4 receive handler and backlogged. 1552 From backlog it always goes here. Kerboom... 1553 Fortunately, tcp_rcv_established and rcv_established 1554 handle them correctly, but it is not case with 1555 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK 1556 */ 1557 1558 if (skb->protocol == htons(ETH_P_IP)) 1559 return tcp_v4_do_rcv(sk, skb); 1560 1561#ifdef CONFIG_TCP_MD5SIG 1562 if (tcp_v6_inbound_md5_hash (sk, skb)) 1563 goto discard; 1564#endif 1565 1566 if (sk_filter(sk, skb)) 1567 goto discard; 1568 1569 /* 1570 * socket locking is here for SMP purposes as backlog rcv 1571 * is currently called with bh processing disabled. 1572 */ 1573 1574 /* Do Stevens' IPV6_PKTOPTIONS. 1575 1576 Yes, guys, it is the only place in our code, where we 1577 may make it not affecting IPv4. 1578 The rest of code is protocol independent, 1579 and I do not like idea to uglify IPv4. 1580 1581 Actually, all the idea behind IPV6_PKTOPTIONS 1582 looks not very well thought. For now we latch 1583 options, received in the last packet, enqueued 1584 by tcp. Feel free to propose better solution. 1585 --ANK (980728) 1586 */ 1587 if (np->rxopt.all) 1588 opt_skb = skb_clone(skb, GFP_ATOMIC); 1589 1590 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ 1591 TCP_CHECK_TIMER(sk); 1592 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) 1593 goto reset; 1594 TCP_CHECK_TIMER(sk); 1595 if (opt_skb) 1596 goto ipv6_pktoptions; 1597 return 0; 1598 } 1599 1600 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb)) 1601 goto csum_err; 1602 1603 if (sk->sk_state == TCP_LISTEN) { 1604 struct sock *nsk = tcp_v6_hnd_req(sk, skb); 1605 if (!nsk) 1606 goto discard; 1607 1608 /* 1609 * Queue it on the new socket if the new socket is active, 1610 * otherwise we just shortcircuit this and continue with 1611 * the new socket.. 1612 */ 1613 if(nsk != sk) { 1614 if (tcp_child_process(sk, nsk, skb)) 1615 goto reset; 1616 if (opt_skb) 1617 __kfree_skb(opt_skb); 1618 return 0; 1619 } 1620 } 1621 1622 TCP_CHECK_TIMER(sk); 1623 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) 1624 goto reset; 1625 TCP_CHECK_TIMER(sk); 1626 if (opt_skb) 1627 goto ipv6_pktoptions; 1628 return 0; 1629 1630reset: 1631 tcp_v6_send_reset(sk, skb); 1632discard: 1633 if (opt_skb) 1634 __kfree_skb(opt_skb); 1635 kfree_skb(skb); 1636 return 0; 1637csum_err: 1638 TCP_INC_STATS_BH(TCP_MIB_INERRS); 1639 goto discard; 1640 1641 1642ipv6_pktoptions: 1643 /* Do you ask, what is it? 1644 1645 1. skb was enqueued by tcp. 1646 2. skb is added to tail of read queue, rather than out of order. 1647 3. socket is not in passive state. 1648 4. Finally, it really contains options, which user wants to receive. 1649 */ 1650 tp = tcp_sk(sk); 1651 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt && 1652 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) { 1653 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo) 1654 np->mcast_oif = inet6_iif(opt_skb); 1655 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) 1656 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit; 1657 if (ipv6_opt_accepted(sk, opt_skb)) { 1658 skb_set_owner_r(opt_skb, sk); 1659 opt_skb = xchg(&np->pktoptions, opt_skb); 1660 } else { 1661 __kfree_skb(opt_skb); 1662 opt_skb = xchg(&np->pktoptions, NULL); 1663 } 1664 } 1665 1666 if (opt_skb) 1667 kfree_skb(opt_skb); 1668 return 0; 1669} 1670 1671static int tcp_v6_rcv(struct sk_buff *skb) 1672{ 1673 struct tcphdr *th; 1674 struct sock *sk; 1675 int ret; 1676 1677 if (skb->pkt_type != PACKET_HOST) 1678 goto discard_it; 1679 1680 /* 1681 * Count it even if it's bad. 1682 */ 1683 TCP_INC_STATS_BH(TCP_MIB_INSEGS); 1684 1685 if (!pskb_may_pull(skb, sizeof(struct tcphdr))) 1686 goto discard_it; 1687 1688 th = tcp_hdr(skb); 1689 1690 if (th->doff < sizeof(struct tcphdr)/4) 1691 goto bad_packet; 1692 if (!pskb_may_pull(skb, th->doff*4)) 1693 goto discard_it; 1694 1695 if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb)) 1696 goto bad_packet; 1697 1698 th = tcp_hdr(skb); 1699 TCP_SKB_CB(skb)->seq = ntohl(th->seq); 1700 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + 1701 skb->len - th->doff*4); 1702 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); 1703 TCP_SKB_CB(skb)->when = 0; 1704 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(ipv6_hdr(skb)); 1705 TCP_SKB_CB(skb)->sacked = 0; 1706 1707 sk = __inet6_lookup(skb->dev->nd_net, &tcp_hashinfo, 1708 &ipv6_hdr(skb)->saddr, th->source, 1709 &ipv6_hdr(skb)->daddr, ntohs(th->dest), 1710 inet6_iif(skb)); 1711 1712 if (!sk) 1713 goto no_tcp_socket; 1714 1715process: 1716 if (sk->sk_state == TCP_TIME_WAIT) 1717 goto do_time_wait; 1718 1719 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) 1720 goto discard_and_relse; 1721 1722 if (sk_filter(sk, skb)) 1723 goto discard_and_relse; 1724 1725 skb->dev = NULL; 1726 1727 bh_lock_sock_nested(sk); 1728 ret = 0; 1729 if (!sock_owned_by_user(sk)) { 1730#ifdef CONFIG_NET_DMA 1731 struct tcp_sock *tp = tcp_sk(sk); 1732 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 1733 tp->ucopy.dma_chan = get_softnet_dma(); 1734 if (tp->ucopy.dma_chan) 1735 ret = tcp_v6_do_rcv(sk, skb); 1736 else 1737#endif 1738 { 1739 if (!tcp_prequeue(sk, skb)) 1740 ret = tcp_v6_do_rcv(sk, skb); 1741 } 1742 } else 1743 sk_add_backlog(sk, skb); 1744 bh_unlock_sock(sk); 1745 1746 sock_put(sk); 1747 return ret ? -1 : 0; 1748 1749no_tcp_socket: 1750 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) 1751 goto discard_it; 1752 1753 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) { 1754bad_packet: 1755 TCP_INC_STATS_BH(TCP_MIB_INERRS); 1756 } else { 1757 tcp_v6_send_reset(NULL, skb); 1758 } 1759 1760discard_it: 1761 1762 /* 1763 * Discard frame 1764 */ 1765 1766 kfree_skb(skb); 1767 return 0; 1768 1769discard_and_relse: 1770 sock_put(sk); 1771 goto discard_it; 1772 1773do_time_wait: 1774 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { 1775 inet_twsk_put(inet_twsk(sk)); 1776 goto discard_it; 1777 } 1778 1779 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) { 1780 TCP_INC_STATS_BH(TCP_MIB_INERRS); 1781 inet_twsk_put(inet_twsk(sk)); 1782 goto discard_it; 1783 } 1784 1785 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) { 1786 case TCP_TW_SYN: 1787 { 1788 struct sock *sk2; 1789 1790 sk2 = inet6_lookup_listener(skb->dev->nd_net, &tcp_hashinfo, 1791 &ipv6_hdr(skb)->daddr, 1792 ntohs(th->dest), inet6_iif(skb)); 1793 if (sk2 != NULL) { 1794 struct inet_timewait_sock *tw = inet_twsk(sk); 1795 inet_twsk_deschedule(tw, &tcp_death_row); 1796 inet_twsk_put(tw); 1797 sk = sk2; 1798 goto process; 1799 } 1800 /* Fall through to ACK */ 1801 } 1802 case TCP_TW_ACK: 1803 tcp_v6_timewait_ack(sk, skb); 1804 break; 1805 case TCP_TW_RST: 1806 goto no_tcp_socket; 1807 case TCP_TW_SUCCESS:; 1808 } 1809 goto discard_it; 1810} 1811 1812static int tcp_v6_remember_stamp(struct sock *sk) 1813{ 1814 /* Alas, not yet... */ 1815 return 0; 1816} 1817 1818static struct inet_connection_sock_af_ops ipv6_specific = { 1819 .queue_xmit = inet6_csk_xmit, 1820 .send_check = tcp_v6_send_check, 1821 .rebuild_header = inet6_sk_rebuild_header, 1822 .conn_request = tcp_v6_conn_request, 1823 .syn_recv_sock = tcp_v6_syn_recv_sock, 1824 .remember_stamp = tcp_v6_remember_stamp, 1825 .net_header_len = sizeof(struct ipv6hdr), 1826 .setsockopt = ipv6_setsockopt, 1827 .getsockopt = ipv6_getsockopt, 1828 .addr2sockaddr = inet6_csk_addr2sockaddr, 1829 .sockaddr_len = sizeof(struct sockaddr_in6), 1830 .bind_conflict = inet6_csk_bind_conflict, 1831#ifdef CONFIG_COMPAT 1832 .compat_setsockopt = compat_ipv6_setsockopt, 1833 .compat_getsockopt = compat_ipv6_getsockopt, 1834#endif 1835}; 1836 1837#ifdef CONFIG_TCP_MD5SIG 1838static struct tcp_sock_af_ops tcp_sock_ipv6_specific = { 1839 .md5_lookup = tcp_v6_md5_lookup, 1840 .calc_md5_hash = tcp_v6_calc_md5_hash, 1841 .md5_add = tcp_v6_md5_add_func, 1842 .md5_parse = tcp_v6_parse_md5_keys, 1843}; 1844#endif 1845 1846/* 1847 * TCP over IPv4 via INET6 API 1848 */ 1849 1850static struct inet_connection_sock_af_ops ipv6_mapped = { 1851 .queue_xmit = ip_queue_xmit, 1852 .send_check = tcp_v4_send_check, 1853 .rebuild_header = inet_sk_rebuild_header, 1854 .conn_request = tcp_v6_conn_request, 1855 .syn_recv_sock = tcp_v6_syn_recv_sock, 1856 .remember_stamp = tcp_v4_remember_stamp, 1857 .net_header_len = sizeof(struct iphdr), 1858 .setsockopt = ipv6_setsockopt, 1859 .getsockopt = ipv6_getsockopt, 1860 .addr2sockaddr = inet6_csk_addr2sockaddr, 1861 .sockaddr_len = sizeof(struct sockaddr_in6), 1862 .bind_conflict = inet6_csk_bind_conflict, 1863#ifdef CONFIG_COMPAT 1864 .compat_setsockopt = compat_ipv6_setsockopt, 1865 .compat_getsockopt = compat_ipv6_getsockopt, 1866#endif 1867}; 1868 1869#ifdef CONFIG_TCP_MD5SIG 1870static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = { 1871 .md5_lookup = tcp_v4_md5_lookup, 1872 .calc_md5_hash = tcp_v4_calc_md5_hash, 1873 .md5_add = tcp_v6_md5_add_func, 1874 .md5_parse = tcp_v6_parse_md5_keys, 1875}; 1876#endif 1877 1878/* NOTE: A lot of things set to zero explicitly by call to 1879 * sk_alloc() so need not be done here. 1880 */ 1881static int tcp_v6_init_sock(struct sock *sk) 1882{ 1883 struct inet_connection_sock *icsk = inet_csk(sk); 1884 struct tcp_sock *tp = tcp_sk(sk); 1885 1886 skb_queue_head_init(&tp->out_of_order_queue); 1887 tcp_init_xmit_timers(sk); 1888 tcp_prequeue_init(tp); 1889 1890 icsk->icsk_rto = TCP_TIMEOUT_INIT; 1891 tp->mdev = TCP_TIMEOUT_INIT; 1892 1893 /* So many TCP implementations out there (incorrectly) count the 1894 * initial SYN frame in their delayed-ACK and congestion control 1895 * algorithms that we must have the following bandaid to talk 1896 * efficiently to them. -DaveM 1897 */ 1898 tp->snd_cwnd = 2; 1899 1900 /* See draft-stevens-tcpca-spec-01 for discussion of the 1901 * initialization of these values. 1902 */ 1903 tp->snd_ssthresh = 0x7fffffff; 1904 tp->snd_cwnd_clamp = ~0; 1905 tp->mss_cache = 536; 1906 1907 tp->reordering = sysctl_tcp_reordering; 1908 1909 sk->sk_state = TCP_CLOSE; 1910 1911 icsk->icsk_af_ops = &ipv6_specific; 1912 icsk->icsk_ca_ops = &tcp_init_congestion_ops; 1913 icsk->icsk_sync_mss = tcp_sync_mss; 1914 sk->sk_write_space = sk_stream_write_space; 1915 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); 1916 1917#ifdef CONFIG_TCP_MD5SIG 1918 tp->af_specific = &tcp_sock_ipv6_specific; 1919#endif 1920 1921 sk->sk_sndbuf = sysctl_tcp_wmem[1]; 1922 sk->sk_rcvbuf = sysctl_tcp_rmem[1]; 1923 1924 atomic_inc(&tcp_sockets_allocated); 1925 1926 return 0; 1927} 1928 1929static int tcp_v6_destroy_sock(struct sock *sk) 1930{ 1931#ifdef CONFIG_TCP_MD5SIG 1932 /* Clean up the MD5 key list */ 1933 if (tcp_sk(sk)->md5sig_info) 1934 tcp_v6_clear_md5_list(sk); 1935#endif 1936 tcp_v4_destroy_sock(sk); 1937 return inet6_destroy_sock(sk); 1938} 1939 1940#ifdef CONFIG_PROC_FS 1941/* Proc filesystem TCPv6 sock list dumping. */ 1942static void get_openreq6(struct seq_file *seq, 1943 struct sock *sk, struct request_sock *req, int i, int uid) 1944{ 1945 int ttd = req->expires - jiffies; 1946 struct in6_addr *src = &inet6_rsk(req)->loc_addr; 1947 struct in6_addr *dest = &inet6_rsk(req)->rmt_addr; 1948 1949 if (ttd < 0) 1950 ttd = 0; 1951 1952 seq_printf(seq, 1953 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " 1954 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n", 1955 i, 1956 src->s6_addr32[0], src->s6_addr32[1], 1957 src->s6_addr32[2], src->s6_addr32[3], 1958 ntohs(inet_sk(sk)->sport), 1959 dest->s6_addr32[0], dest->s6_addr32[1], 1960 dest->s6_addr32[2], dest->s6_addr32[3], 1961 ntohs(inet_rsk(req)->rmt_port), 1962 TCP_SYN_RECV, 1963 0,0, /* could print option size, but that is af dependent. */ 1964 1, /* timers active (only the expire timer) */ 1965 jiffies_to_clock_t(ttd), 1966 req->retrans, 1967 uid, 1968 0, /* non standard timer */ 1969 0, /* open_requests have no inode */ 1970 0, req); 1971} 1972 1973static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) 1974{ 1975 struct in6_addr *dest, *src; 1976 __u16 destp, srcp; 1977 int timer_active; 1978 unsigned long timer_expires; 1979 struct inet_sock *inet = inet_sk(sp); 1980 struct tcp_sock *tp = tcp_sk(sp); 1981 const struct inet_connection_sock *icsk = inet_csk(sp); 1982 struct ipv6_pinfo *np = inet6_sk(sp); 1983 1984 dest = &np->daddr; 1985 src = &np->rcv_saddr; 1986 destp = ntohs(inet->dport); 1987 srcp = ntohs(inet->sport); 1988 1989 if (icsk->icsk_pending == ICSK_TIME_RETRANS) { 1990 timer_active = 1; 1991 timer_expires = icsk->icsk_timeout; 1992 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { 1993 timer_active = 4; 1994 timer_expires = icsk->icsk_timeout; 1995 } else if (timer_pending(&sp->sk_timer)) { 1996 timer_active = 2; 1997 timer_expires = sp->sk_timer.expires; 1998 } else { 1999 timer_active = 0; 2000 timer_expires = jiffies; 2001 } 2002 2003 seq_printf(seq, 2004 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " 2005 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %u %u %u %u %d\n", 2006 i, 2007 src->s6_addr32[0], src->s6_addr32[1], 2008 src->s6_addr32[2], src->s6_addr32[3], srcp, 2009 dest->s6_addr32[0], dest->s6_addr32[1], 2010 dest->s6_addr32[2], dest->s6_addr32[3], destp, 2011 sp->sk_state, 2012 tp->write_seq-tp->snd_una, 2013 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq), 2014 timer_active, 2015 jiffies_to_clock_t(timer_expires - jiffies), 2016 icsk->icsk_retransmits, 2017 sock_i_uid(sp), 2018 icsk->icsk_probes_out, 2019 sock_i_ino(sp), 2020 atomic_read(&sp->sk_refcnt), sp, 2021 icsk->icsk_rto, 2022 icsk->icsk_ack.ato, 2023 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong, 2024 tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh 2025 ); 2026} 2027 2028static void get_timewait6_sock(struct seq_file *seq, 2029 struct inet_timewait_sock *tw, int i) 2030{ 2031 struct in6_addr *dest, *src; 2032 __u16 destp, srcp; 2033 struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw); 2034 int ttd = tw->tw_ttd - jiffies; 2035 2036 if (ttd < 0) 2037 ttd = 0; 2038 2039 dest = &tw6->tw_v6_daddr; 2040 src = &tw6->tw_v6_rcv_saddr; 2041 destp = ntohs(tw->tw_dport); 2042 srcp = ntohs(tw->tw_sport); 2043 2044 seq_printf(seq, 2045 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " 2046 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n", 2047 i, 2048 src->s6_addr32[0], src->s6_addr32[1], 2049 src->s6_addr32[2], src->s6_addr32[3], srcp, 2050 dest->s6_addr32[0], dest->s6_addr32[1], 2051 dest->s6_addr32[2], dest->s6_addr32[3], destp, 2052 tw->tw_substate, 0, 0, 2053 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0, 2054 atomic_read(&tw->tw_refcnt), tw); 2055} 2056 2057static int tcp6_seq_show(struct seq_file *seq, void *v) 2058{ 2059 struct tcp_iter_state *st; 2060 2061 if (v == SEQ_START_TOKEN) { 2062 seq_puts(seq, 2063 " sl " 2064 "local_address " 2065 "remote_address " 2066 "st tx_queue rx_queue tr tm->when retrnsmt" 2067 " uid timeout inode\n"); 2068 goto out; 2069 } 2070 st = seq->private; 2071 2072 switch (st->state) { 2073 case TCP_SEQ_STATE_LISTENING: 2074 case TCP_SEQ_STATE_ESTABLISHED: 2075 get_tcp6_sock(seq, v, st->num); 2076 break; 2077 case TCP_SEQ_STATE_OPENREQ: 2078 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid); 2079 break; 2080 case TCP_SEQ_STATE_TIME_WAIT: 2081 get_timewait6_sock(seq, v, st->num); 2082 break; 2083 } 2084out: 2085 return 0; 2086} 2087 2088static struct file_operations tcp6_seq_fops; 2089static struct tcp_seq_afinfo tcp6_seq_afinfo = { 2090 .owner = THIS_MODULE, 2091 .name = "tcp6", 2092 .family = AF_INET6, 2093 .seq_show = tcp6_seq_show, 2094 .seq_fops = &tcp6_seq_fops, 2095}; 2096 2097int __init tcp6_proc_init(void) 2098{ 2099 return tcp_proc_register(&tcp6_seq_afinfo); 2100} 2101 2102void tcp6_proc_exit(void) 2103{ 2104 tcp_proc_unregister(&tcp6_seq_afinfo); 2105} 2106#endif 2107 2108DEFINE_PROTO_INUSE(tcpv6) 2109 2110struct proto tcpv6_prot = { 2111 .name = "TCPv6", 2112 .owner = THIS_MODULE, 2113 .close = tcp_close, 2114 .connect = tcp_v6_connect, 2115 .disconnect = tcp_disconnect, 2116 .accept = inet_csk_accept, 2117 .ioctl = tcp_ioctl, 2118 .init = tcp_v6_init_sock, 2119 .destroy = tcp_v6_destroy_sock, 2120 .shutdown = tcp_shutdown, 2121 .setsockopt = tcp_setsockopt, 2122 .getsockopt = tcp_getsockopt, 2123 .recvmsg = tcp_recvmsg, 2124 .backlog_rcv = tcp_v6_do_rcv, 2125 .hash = tcp_v6_hash, 2126 .unhash = inet_unhash, 2127 .get_port = inet_csk_get_port, 2128 .enter_memory_pressure = tcp_enter_memory_pressure, 2129 .sockets_allocated = &tcp_sockets_allocated, 2130 .memory_allocated = &tcp_memory_allocated, 2131 .memory_pressure = &tcp_memory_pressure, 2132 .orphan_count = &tcp_orphan_count, 2133 .sysctl_mem = sysctl_tcp_mem, 2134 .sysctl_wmem = sysctl_tcp_wmem, 2135 .sysctl_rmem = sysctl_tcp_rmem, 2136 .max_header = MAX_TCP_HEADER, 2137 .obj_size = sizeof(struct tcp6_sock), 2138 .twsk_prot = &tcp6_timewait_sock_ops, 2139 .rsk_prot = &tcp6_request_sock_ops, 2140 .hashinfo = &tcp_hashinfo, 2141#ifdef CONFIG_COMPAT 2142 .compat_setsockopt = compat_tcp_setsockopt, 2143 .compat_getsockopt = compat_tcp_getsockopt, 2144#endif 2145 REF_PROTO_INUSE(tcpv6) 2146}; 2147 2148static struct inet6_protocol tcpv6_protocol = { 2149 .handler = tcp_v6_rcv, 2150 .err_handler = tcp_v6_err, 2151 .gso_send_check = tcp_v6_gso_send_check, 2152 .gso_segment = tcp_tso_segment, 2153 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, 2154}; 2155 2156static struct inet_protosw tcpv6_protosw = { 2157 .type = SOCK_STREAM, 2158 .protocol = IPPROTO_TCP, 2159 .prot = &tcpv6_prot, 2160 .ops = &inet6_stream_ops, 2161 .capability = -1, 2162 .no_check = 0, 2163 .flags = INET_PROTOSW_PERMANENT | 2164 INET_PROTOSW_ICSK, 2165}; 2166 2167int __init tcpv6_init(void) 2168{ 2169 int ret; 2170 2171 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP); 2172 if (ret) 2173 goto out; 2174 2175 /* register inet6 protocol */ 2176 ret = inet6_register_protosw(&tcpv6_protosw); 2177 if (ret) 2178 goto out_tcpv6_protocol; 2179 2180 ret = inet_csk_ctl_sock_create(&tcp6_socket, PF_INET6, 2181 SOCK_RAW, IPPROTO_TCP); 2182 if (ret) 2183 goto out_tcpv6_protosw; 2184out: 2185 return ret; 2186 2187out_tcpv6_protocol: 2188 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP); 2189out_tcpv6_protosw: 2190 inet6_unregister_protosw(&tcpv6_protosw); 2191 goto out; 2192} 2193 2194void tcpv6_exit(void) 2195{ 2196 sock_release(tcp6_socket); 2197 inet6_unregister_protosw(&tcpv6_protosw); 2198 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP); 2199}