Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[SOCK]: Introduce sk_clone

Out of tcp_create_openreq_child, will be used in
dccp_create_openreq_child, and is a nice sock function anyway.

Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Arnaldo Carvalho de Melo and committed by
David S. Miller
87d11ceb c676270b

+79 -67
+2
include/net/sock.h
··· 737 737 unsigned int __nocast priority, 738 738 struct proto *prot, int zero_it); 739 739 extern void sk_free(struct sock *sk); 740 + extern struct sock *sk_clone(const struct sock *sk, 741 + const unsigned int __nocast priority); 740 742 741 743 extern struct sk_buff *sock_wmalloc(struct sock *sk, 742 744 unsigned long size, int force,
+74
net/core/sock.c
··· 700 700 module_put(owner); 701 701 } 702 702 703 + struct sock *sk_clone(const struct sock *sk, const unsigned int __nocast priority) 704 + { 705 + struct sock *newsk = sk_alloc(sk->sk_family, priority, sk->sk_prot, 0); 706 + 707 + if (newsk != NULL) { 708 + struct sk_filter *filter; 709 + 710 + memcpy(newsk, sk, sk->sk_prot->obj_size); 711 + 712 + /* SANITY */ 713 + sk_node_init(&newsk->sk_node); 714 + sock_lock_init(newsk); 715 + bh_lock_sock(newsk); 716 + 717 + atomic_set(&newsk->sk_rmem_alloc, 0); 718 + atomic_set(&newsk->sk_wmem_alloc, 0); 719 + atomic_set(&newsk->sk_omem_alloc, 0); 720 + skb_queue_head_init(&newsk->sk_receive_queue); 721 + skb_queue_head_init(&newsk->sk_write_queue); 722 + 723 + rwlock_init(&newsk->sk_dst_lock); 724 + rwlock_init(&newsk->sk_callback_lock); 725 + 726 + newsk->sk_dst_cache = NULL; 727 + newsk->sk_wmem_queued = 0; 728 + newsk->sk_forward_alloc = 0; 729 + newsk->sk_send_head = NULL; 730 + newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; 731 + newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; 732 + 733 + sock_reset_flag(newsk, SOCK_DONE); 734 + skb_queue_head_init(&newsk->sk_error_queue); 735 + 736 + filter = newsk->sk_filter; 737 + if (filter != NULL) 738 + sk_filter_charge(newsk, filter); 739 + 740 + if (unlikely(xfrm_sk_clone_policy(newsk))) { 741 + /* It is still raw copy of parent, so invalidate 742 + * destructor and make plain sk_free() */ 743 + newsk->sk_destruct = NULL; 744 + sk_free(newsk); 745 + newsk = NULL; 746 + goto out; 747 + } 748 + 749 + newsk->sk_err = 0; 750 + newsk->sk_priority = 0; 751 + atomic_set(&newsk->sk_refcnt, 2); 752 + 753 + /* 754 + * Increment the counter in the same struct proto as the master 755 + * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that 756 + * is the same as sk->sk_prot->socks, as this field was copied 757 + * with memcpy). 758 + * 759 + * This _changes_ the previous behaviour, where 760 + * tcp_create_openreq_child always was incrementing the 761 + * equivalent to tcp_prot->socks (inet_sock_nr), so this have 762 + * to be taken into account in all callers. -acme 763 + */ 764 + sk_refcnt_debug_inc(newsk); 765 + newsk->sk_socket = NULL; 766 + newsk->sk_sleep = NULL; 767 + 768 + if (newsk->sk_prot->sockets_allocated) 769 + atomic_inc(newsk->sk_prot->sockets_allocated); 770 + } 771 + out: 772 + return newsk; 773 + } 774 + 775 + EXPORT_SYMBOL_GPL(sk_clone); 776 + 703 777 void __init sk_init(void) 704 778 { 705 779 if (num_physpages <= 4096) {
+3 -67
net/ipv4/tcp_minisocks.c
··· 599 599 */ 600 600 struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb) 601 601 { 602 - /* allocate the newsk from the same slab of the master sock, 603 - * if not, at sk_free time we'll try to free it from the wrong 604 - * slabcache (i.e. is it TCPv4 or v6?), this is handled thru sk->sk_prot -acme */ 605 - struct sock *newsk = sk_alloc(PF_INET, GFP_ATOMIC, sk->sk_prot, 0); 602 + struct sock *newsk = sk_clone(sk, GFP_ATOMIC); 606 603 607 - if(newsk != NULL) { 604 + if (newsk != NULL) { 608 605 struct inet_request_sock *ireq = inet_rsk(req); 609 606 struct tcp_request_sock *treq = tcp_rsk(req); 610 607 struct inet_sock *newinet = inet_sk(newsk); 611 608 struct tcp_sock *newtp; 612 - struct sk_filter *filter; 613 609 614 - memcpy(newsk, sk, sizeof(struct tcp_sock)); 615 610 newsk->sk_state = TCP_SYN_RECV; 616 - 617 - /* SANITY */ 618 - sk_node_init(&newsk->sk_node); 619 611 newinet->bind_hash = NULL; 620 612 621 613 /* Clone the TCP header template */ 622 614 newinet->dport = ireq->rmt_port; 623 - 624 - sock_lock_init(newsk); 625 - bh_lock_sock(newsk); 626 - 627 - rwlock_init(&newsk->sk_dst_lock); 628 - newsk->sk_dst_cache = NULL; 629 - atomic_set(&newsk->sk_rmem_alloc, 0); 630 - skb_queue_head_init(&newsk->sk_receive_queue); 631 - atomic_set(&newsk->sk_wmem_alloc, 0); 632 - skb_queue_head_init(&newsk->sk_write_queue); 633 - atomic_set(&newsk->sk_omem_alloc, 0); 634 - newsk->sk_wmem_queued = 0; 635 - newsk->sk_forward_alloc = 0; 636 - 637 - sock_reset_flag(newsk, SOCK_DONE); 638 - newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; 639 - newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; 640 - newsk->sk_send_head = NULL; 641 - rwlock_init(&newsk->sk_callback_lock); 642 - skb_queue_head_init(&newsk->sk_error_queue); 643 615 newsk->sk_write_space = sk_stream_write_space; 644 - 645 - if ((filter = newsk->sk_filter) != NULL) 646 - sk_filter_charge(newsk, filter); 647 - 648 - if (unlikely(xfrm_sk_clone_policy(newsk))) { 649 - /* It is still raw copy of parent, so invalidate 650 - * destructor and make plain sk_free() */ 651 - newsk->sk_destruct = NULL; 652 - sk_free(newsk); 653 - return NULL; 654 - } 655 616 656 617 /* Now setup tcp_sock */ 657 618 newtp = tcp_sk(newsk); 658 619 newtp->pred_flags = 0; 659 620 newtp->rcv_nxt = treq->rcv_isn + 1; 660 - newtp->snd_nxt = treq->snt_isn + 1; 661 - newtp->snd_una = treq->snt_isn + 1; 662 - newtp->snd_sml = treq->snt_isn + 1; 621 + newtp->snd_nxt = newtp->snd_una = newtp->snd_sml = treq->snt_isn + 1; 663 622 664 623 tcp_prequeue_init(newtp); 665 624 ··· 669 710 /* Deinitialize accept_queue to trap illegal accesses. */ 670 711 memset(&newtp->accept_queue, 0, sizeof(newtp->accept_queue)); 671 712 672 - /* Back to base struct sock members. */ 673 - newsk->sk_err = 0; 674 - newsk->sk_priority = 0; 675 - atomic_set(&newsk->sk_refcnt, 2); 676 - 677 - /* 678 - * Increment the counter in the same struct proto as the master 679 - * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that 680 - * is the same as sk->sk_prot->socks, as this field was copied 681 - * with memcpy), same rationale as the first comment in this 682 - * function. 683 - * 684 - * This _changes_ the previous behaviour, where 685 - * tcp_create_openreq_child always was incrementing the 686 - * equivalent to tcp_prot->socks (inet_sock_nr), so this have 687 - * to be taken into account in all callers. -acme 688 - */ 689 - sk_refcnt_debug_inc(newsk); 690 - 691 - atomic_inc(&tcp_sockets_allocated); 692 - 693 713 if (sock_flag(newsk, SOCK_KEEPOPEN)) 694 714 tcp_reset_keepalive_timer(newsk, 695 715 keepalive_time_when(newtp)); 696 - newsk->sk_socket = NULL; 697 - newsk->sk_sleep = NULL; 698 716 699 717 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok; 700 718 if((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {