Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Phonet: allocate sock from accept syscall rather than soft IRQ

This moves most of the accept logic to process context like other
socket stacks do. Then we can use a few more common socket helpers
and simplify a bit.

Signed-off-by: Rémi Denis-Courmont <remi.denis-courmont@nokia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Rémi Denis-Courmont and committed by
David S. Miller
f7ae8d59 44c9ab16

+127 -180
-1
include/net/phonet/pep.h
··· 28 28 29 29 /* XXX: union-ify listening vs connected stuff ? */ 30 30 /* Listening socket stuff: */ 31 - struct hlist_head ackq; 32 31 struct hlist_head hlist; 33 32 34 33 /* Connected socket stuff: */
+123 -173
net/phonet/pep.c
··· 42 42 * TCP_ESTABLISHED connected pipe in enabled state 43 43 * 44 44 * pep_sock locking: 45 - * - sk_state, ackq, hlist: sock lock needed 45 + * - sk_state, hlist: sock lock needed 46 46 * - listener: read only 47 47 * - pipe_handle: read only 48 48 */ ··· 202 202 GFP_KERNEL); 203 203 } 204 204 205 - static int pep_reject_conn(struct sock *sk, struct sk_buff *skb, u8 code) 205 + static int pep_reject_conn(struct sock *sk, struct sk_buff *skb, u8 code, 206 + gfp_t priority) 206 207 { 207 208 static const u8 data[4] = { PAD, PAD, PAD, 0 /* sub-blocks */ }; 208 209 WARN_ON(code == PN_PIPE_NO_ERROR); 209 - return pep_reply(sk, skb, code, data, sizeof(data), GFP_ATOMIC); 210 + return pep_reply(sk, skb, code, data, sizeof(data), priority); 210 211 } 211 212 212 213 /* Control requests are not sent by the pipe service and have a specific ··· 366 365 367 366 switch (hdr->message_id) { 368 367 case PNS_PEP_CONNECT_REQ: 369 - pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE); 368 + pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE, GFP_ATOMIC); 370 369 break; 371 370 372 371 case PNS_PEP_DISCONNECT_REQ: ··· 575 574 576 575 sk->sk_state = TCP_SYN_RECV; 577 576 sk->sk_backlog_rcv = pipe_do_rcv; 578 - sk->sk_destruct = pipe_destruct; 579 577 pn->rx_credits = 0; 580 578 sk->sk_state_change(sk); 581 579 582 580 return pipe_handler_send_created_ind(sk); 583 581 } 584 582 #endif 585 - 586 - static int pep_connreq_rcv(struct sock *sk, struct sk_buff *skb) 587 - { 588 - struct sock *newsk; 589 - struct pep_sock *newpn, *pn = pep_sk(sk); 590 - struct pnpipehdr *hdr; 591 - struct sockaddr_pn dst, src; 592 - u16 peer_type; 593 - u8 pipe_handle, enabled, n_sb; 594 - u8 aligned = 0; 595 - 596 - if (!pskb_pull(skb, sizeof(*hdr) + 4)) 597 - return -EINVAL; 598 - 599 - hdr = pnp_hdr(skb); 600 - pipe_handle = hdr->pipe_handle; 601 - switch (hdr->state_after_connect) { 602 - case PN_PIPE_DISABLE: 603 - enabled = 0; 604 - break; 605 - case PN_PIPE_ENABLE: 606 - enabled = 1; 607 - break; 608 - default: 609 - pep_reject_conn(sk, skb, PN_PIPE_ERR_INVALID_PARAM); 610 - return -EINVAL; 611 - } 612 - peer_type = hdr->other_pep_type << 8; 613 - 614 - /* Parse sub-blocks (options) */ 615 - n_sb = hdr->data[4]; 616 - while (n_sb > 0) { 617 - u8 type, buf[1], len = sizeof(buf); 618 - const u8 *data = pep_get_sb(skb, &type, &len, buf); 619 - 620 - if (data == NULL) 621 - return -EINVAL; 622 - switch (type) { 623 - case PN_PIPE_SB_CONNECT_REQ_PEP_SUB_TYPE: 624 - if (len < 1) 625 - return -EINVAL; 626 - peer_type = (peer_type & 0xff00) | data[0]; 627 - break; 628 - case PN_PIPE_SB_ALIGNED_DATA: 629 - aligned = data[0] != 0; 630 - break; 631 - } 632 - n_sb--; 633 - } 634 - 635 - skb = skb_clone(skb, GFP_ATOMIC); 636 - if (!skb) 637 - return -ENOMEM; 638 - 639 - /* Create a new to-be-accepted sock */ 640 - newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_ATOMIC, sk->sk_prot); 641 - if (!newsk) { 642 - kfree_skb(skb); 643 - return -ENOMEM; 644 - } 645 - sock_init_data(NULL, newsk); 646 - newsk->sk_state = TCP_SYN_RECV; 647 - newsk->sk_backlog_rcv = pipe_do_rcv; 648 - newsk->sk_protocol = sk->sk_protocol; 649 - newsk->sk_destruct = pipe_destruct; 650 - 651 - newpn = pep_sk(newsk); 652 - pn_skb_get_dst_sockaddr(skb, &dst); 653 - pn_skb_get_src_sockaddr(skb, &src); 654 - newpn->pn_sk.sobject = pn_sockaddr_get_object(&dst); 655 - newpn->pn_sk.dobject = pn_sockaddr_get_object(&src); 656 - newpn->pn_sk.resource = pn_sockaddr_get_resource(&dst); 657 - skb_queue_head_init(&newpn->ctrlreq_queue); 658 - newpn->pipe_handle = pipe_handle; 659 - atomic_set(&newpn->tx_credits, 0); 660 - newpn->peer_type = peer_type; 661 - newpn->rx_credits = 0; 662 - newpn->rx_fc = newpn->tx_fc = PN_LEGACY_FLOW_CONTROL; 663 - newpn->init_enable = enabled; 664 - newpn->aligned = aligned; 665 - 666 - BUG_ON(!skb_queue_empty(&newsk->sk_receive_queue)); 667 - skb_queue_head(&newsk->sk_receive_queue, skb); 668 - if (!sock_flag(sk, SOCK_DEAD)) 669 - sk->sk_data_ready(sk, 0); 670 - 671 - sk_acceptq_added(sk); 672 - sk_add_node(newsk, &pn->ackq); 673 - return 0; 674 - } 675 583 676 584 /* Listening sock must be locked */ 677 585 static struct sock *pep_find_pipe(const struct hlist_head *hlist, ··· 636 726 if (sknode) 637 727 return sk_receive_skb(sknode, skb, 1); 638 728 639 - /* Look for a pipe handle pending accept */ 640 - sknode = pep_find_pipe(&pn->ackq, &dst, pipe_handle); 641 - if (sknode) { 642 - sock_put(sknode); 643 - if (net_ratelimit()) 644 - printk(KERN_WARNING"Phonet unconnected PEP ignored"); 645 - goto drop; 646 - } 647 - 648 729 switch (hdr->message_id) { 649 730 case PNS_PEP_CONNECT_REQ: 650 - if (sk->sk_state == TCP_LISTEN && !sk_acceptq_is_full(sk)) 651 - pep_connreq_rcv(sk, skb); 652 - else 653 - pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE); 654 - break; 731 + if (sk->sk_state != TCP_LISTEN || sk_acceptq_is_full(sk)) { 732 + pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE, 733 + GFP_ATOMIC); 734 + break; 735 + } 736 + skb_queue_head(&sk->sk_receive_queue, skb); 737 + sk_acceptq_added(sk); 738 + if (!sock_flag(sk, SOCK_DEAD)) 739 + sk->sk_data_ready(sk, 0); 740 + return NET_RX_SUCCESS; 655 741 656 742 #ifdef CONFIG_PHONET_PIPECTRLR 657 743 case PNS_PEP_CONNECT_RESP: ··· 705 799 sk_common_release(sk); 706 800 707 801 lock_sock(sk); 708 - if (sk->sk_state == TCP_LISTEN) { 709 - /* Destroy the listen queue */ 710 - struct sock *sknode; 711 - struct hlist_node *p, *n; 712 - 713 - sk_for_each_safe(sknode, p, n, &pn->ackq) 714 - sk_del_node_init(sknode); 715 - sk->sk_state = TCP_CLOSE; 716 - } else if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED)) { 802 + if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED)) { 717 803 #ifndef CONFIG_PHONET_PIPECTRLR 718 804 /* Forcefully remove dangling Phonet pipe */ 719 805 pipe_do_remove(sk); 720 806 #else 721 807 /* send pep disconnect request */ 722 808 pipe_handler_request(sk, PNS_PEP_DISCONNECT_REQ, PAD, NULL, 0); 723 - sk->sk_state = TCP_CLOSE; 724 809 #endif 725 810 } 811 + sk->sk_state = TCP_CLOSE; 726 812 727 813 ifindex = pn->ifindex; 728 814 pn->ifindex = 0; ··· 725 827 sock_put(sk); 726 828 } 727 829 728 - static int pep_wait_connreq(struct sock *sk, int noblock) 729 - { 730 - struct task_struct *tsk = current; 731 - struct pep_sock *pn = pep_sk(sk); 732 - long timeo = sock_rcvtimeo(sk, noblock); 733 - 734 - for (;;) { 735 - DEFINE_WAIT(wait); 736 - 737 - if (sk->sk_state != TCP_LISTEN) 738 - return -EINVAL; 739 - if (!hlist_empty(&pn->ackq)) 740 - break; 741 - if (!timeo) 742 - return -EWOULDBLOCK; 743 - if (signal_pending(tsk)) 744 - return sock_intr_errno(timeo); 745 - 746 - prepare_to_wait_exclusive(sk_sleep(sk), &wait, 747 - TASK_INTERRUPTIBLE); 748 - release_sock(sk); 749 - timeo = schedule_timeout(timeo); 750 - lock_sock(sk); 751 - finish_wait(sk_sleep(sk), &wait); 752 - } 753 - 754 - return 0; 755 - } 756 - 757 830 static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp) 758 831 { 759 - struct pep_sock *pn = pep_sk(sk); 832 + struct pep_sock *pn = pep_sk(sk), *newpn; 760 833 struct sock *newsk = NULL; 761 - struct sk_buff *oskb; 834 + struct sk_buff *skb; 835 + struct pnpipehdr *hdr; 836 + struct sockaddr_pn dst, src; 762 837 int err; 838 + u16 peer_type; 839 + u8 pipe_handle, enabled, n_sb; 840 + u8 aligned = 0; 841 + 842 + skb = skb_recv_datagram(sk, 0, flags & O_NONBLOCK, errp); 843 + if (!skb) 844 + return NULL; 763 845 764 846 lock_sock(sk); 765 - err = pep_wait_connreq(sk, flags & O_NONBLOCK); 766 - if (err) 767 - goto out; 768 - 769 - newsk = __sk_head(&pn->ackq); 770 - 771 - oskb = skb_dequeue(&newsk->sk_receive_queue); 772 - err = pep_accept_conn(newsk, oskb); 773 - if (err) { 774 - skb_queue_head(&newsk->sk_receive_queue, oskb); 775 - newsk = NULL; 776 - goto out; 847 + if (sk->sk_state != TCP_LISTEN) { 848 + err = -EINVAL; 849 + goto drop; 777 850 } 778 - kfree_skb(oskb); 779 - 780 - sock_hold(sk); 781 - pep_sk(newsk)->listener = sk; 782 - 783 - sock_hold(newsk); 784 - sk_del_node_init(newsk); 785 851 sk_acceptq_removed(sk); 786 - sk_add_node(newsk, &pn->hlist); 787 - __sock_put(newsk); 788 852 789 - out: 853 + err = -EPROTO; 854 + if (!pskb_may_pull(skb, sizeof(*hdr) + 4)) 855 + goto drop; 856 + 857 + hdr = pnp_hdr(skb); 858 + pipe_handle = hdr->pipe_handle; 859 + switch (hdr->state_after_connect) { 860 + case PN_PIPE_DISABLE: 861 + enabled = 0; 862 + break; 863 + case PN_PIPE_ENABLE: 864 + enabled = 1; 865 + break; 866 + default: 867 + pep_reject_conn(sk, skb, PN_PIPE_ERR_INVALID_PARAM, 868 + GFP_KERNEL); 869 + goto drop; 870 + } 871 + peer_type = hdr->other_pep_type << 8; 872 + 873 + /* Parse sub-blocks (options) */ 874 + n_sb = hdr->data[4]; 875 + while (n_sb > 0) { 876 + u8 type, buf[1], len = sizeof(buf); 877 + const u8 *data = pep_get_sb(skb, &type, &len, buf); 878 + 879 + if (data == NULL) 880 + goto drop; 881 + switch (type) { 882 + case PN_PIPE_SB_CONNECT_REQ_PEP_SUB_TYPE: 883 + if (len < 1) 884 + goto drop; 885 + peer_type = (peer_type & 0xff00) | data[0]; 886 + break; 887 + case PN_PIPE_SB_ALIGNED_DATA: 888 + aligned = data[0] != 0; 889 + break; 890 + } 891 + n_sb--; 892 + } 893 + 894 + /* Check for duplicate pipe handle */ 895 + newsk = pep_find_pipe(&pn->hlist, &dst, pipe_handle); 896 + if (unlikely(newsk)) { 897 + __sock_put(newsk); 898 + newsk = NULL; 899 + pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE, GFP_KERNEL); 900 + goto drop; 901 + } 902 + 903 + /* Create a new to-be-accepted sock */ 904 + newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_KERNEL, sk->sk_prot); 905 + if (!newsk) { 906 + pep_reject_conn(sk, skb, PN_PIPE_ERR_OVERLOAD, GFP_KERNEL); 907 + err = -ENOBUFS; 908 + goto drop; 909 + } 910 + 911 + sock_init_data(NULL, newsk); 912 + newsk->sk_state = TCP_SYN_RECV; 913 + newsk->sk_backlog_rcv = pipe_do_rcv; 914 + newsk->sk_protocol = sk->sk_protocol; 915 + newsk->sk_destruct = pipe_destruct; 916 + 917 + newpn = pep_sk(newsk); 918 + pn_skb_get_dst_sockaddr(skb, &dst); 919 + pn_skb_get_src_sockaddr(skb, &src); 920 + newpn->pn_sk.sobject = pn_sockaddr_get_object(&dst); 921 + newpn->pn_sk.dobject = pn_sockaddr_get_object(&src); 922 + newpn->pn_sk.resource = pn_sockaddr_get_resource(&dst); 923 + sock_hold(sk); 924 + newpn->listener = sk; 925 + skb_queue_head_init(&newpn->ctrlreq_queue); 926 + newpn->pipe_handle = pipe_handle; 927 + atomic_set(&newpn->tx_credits, 0); 928 + newpn->ifindex = 0; 929 + newpn->peer_type = peer_type; 930 + newpn->rx_credits = 0; 931 + newpn->rx_fc = newpn->tx_fc = PN_LEGACY_FLOW_CONTROL; 932 + newpn->init_enable = enabled; 933 + newpn->aligned = aligned; 934 + 935 + err = pep_accept_conn(newsk, skb); 936 + if (err) { 937 + sock_put(newsk); 938 + newsk = NULL; 939 + goto drop; 940 + } 941 + sk_add_node(newsk, &pn->hlist); 942 + drop: 790 943 release_sock(sk); 944 + kfree_skb(skb); 791 945 *errp = err; 792 946 return newsk; 793 947 } ··· 887 937 { 888 938 struct pep_sock *pn = pep_sk(sk); 889 939 890 - INIT_HLIST_HEAD(&pn->ackq); 940 + sk->sk_destruct = pipe_destruct; 891 941 INIT_HLIST_HEAD(&pn->hlist); 892 942 skb_queue_head_init(&pn->ctrlreq_queue); 893 943 pn->pipe_handle = PN_PIPE_INVALID_HANDLE;
+4 -6
net/phonet/socket.c
··· 327 327 struct sock *newsk; 328 328 int err; 329 329 330 + if (unlikely(sk->sk_state != TCP_LISTEN)) 331 + return -EINVAL; 332 + 330 333 newsk = sk->sk_prot->accept(sk, flags, &err); 331 334 if (!newsk) 332 335 return err; ··· 366 363 367 364 poll_wait(file, sk_sleep(sk), wait); 368 365 369 - switch (sk->sk_state) { 370 - case TCP_LISTEN: 371 - return hlist_empty(&pn->ackq) ? 0 : POLLIN; 372 - case TCP_CLOSE: 366 + if (sk->sk_state == TCP_CLOSE) 373 367 return POLLERR; 374 - } 375 - 376 368 if (!skb_queue_empty(&sk->sk_receive_queue)) 377 369 mask |= POLLIN | POLLRDNORM; 378 370 if (!skb_queue_empty(&pn->ctrlreq_queue))