Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Revert "net: simplify sock_poll_wait"

This reverts commit dd979b4df817e9976f18fb6f9d134d6bc4a3c317.

This broke tcp_poll for SMC fallback: An AF_SMC socket establishes an
internal TCP socket for the initial handshake with the remote peer.
Whenever the SMC connection can not be established this TCP socket is
used as a fallback. All socket operations on the SMC socket are then
forwarded to the TCP socket. In case of poll, the file->private_data
pointer references the SMC socket because the TCP socket has no file
assigned. This causes tcp_poll to wait on the wrong socket.

Signed-off-by: Karsten Graul <kgraul@linux.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Karsten Graul and committed by
David S. Miller
89ab066d 6b7a02f7

+22 -16
+1 -1
crypto/af_alg.c
··· 1071 1071 struct af_alg_ctx *ctx = ask->private; 1072 1072 __poll_t mask; 1073 1073 1074 - sock_poll_wait(file, wait); 1074 + sock_poll_wait(file, sock, wait); 1075 1075 mask = 0; 1076 1076 1077 1077 if (!ctx->more || ctx->used)
+9 -3
include/net/sock.h
··· 2059 2059 /** 2060 2060 * sock_poll_wait - place memory barrier behind the poll_wait call. 2061 2061 * @filp: file 2062 + * @sock: socket to wait on 2062 2063 * @p: poll_table 2063 2064 * 2064 2065 * See the comments in the wq_has_sleeper function. 2066 + * 2067 + * Do not derive sock from filp->private_data here. An SMC socket establishes 2068 + * an internal TCP socket that is used in the fallback case. All socket 2069 + * operations on the SMC socket are then forwarded to the TCP socket. In case of 2070 + * poll, the filp->private_data pointer references the SMC socket because the 2071 + * TCP socket has no file assigned. 2065 2072 */ 2066 - static inline void sock_poll_wait(struct file *filp, poll_table *p) 2073 + static inline void sock_poll_wait(struct file *filp, struct socket *sock, 2074 + poll_table *p) 2067 2075 { 2068 - struct socket *sock = filp->private_data; 2069 - 2070 2076 if (!poll_does_not_wait(p)) { 2071 2077 poll_wait(filp, &sock->wq->wait, p); 2072 2078 /* We need to be sure we are in sync with the
+1 -1
net/atm/common.c
··· 653 653 struct atm_vcc *vcc; 654 654 __poll_t mask; 655 655 656 - sock_poll_wait(file, wait); 656 + sock_poll_wait(file, sock, wait); 657 657 mask = 0; 658 658 659 659 vcc = ATM_SD(sock);
+1 -1
net/caif/caif_socket.c
··· 941 941 __poll_t mask; 942 942 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); 943 943 944 - sock_poll_wait(file, wait); 944 + sock_poll_wait(file, sock, wait); 945 945 mask = 0; 946 946 947 947 /* exceptional events? */
+1 -1
net/core/datagram.c
··· 837 837 struct sock *sk = sock->sk; 838 838 __poll_t mask; 839 839 840 - sock_poll_wait(file, wait); 840 + sock_poll_wait(file, sock, wait); 841 841 mask = 0; 842 842 843 843 /* exceptional events? */
+1 -1
net/dccp/proto.c
··· 325 325 __poll_t mask; 326 326 struct sock *sk = sock->sk; 327 327 328 - sock_poll_wait(file, wait); 328 + sock_poll_wait(file, sock, wait); 329 329 if (sk->sk_state == DCCP_LISTEN) 330 330 return inet_csk_listen_poll(sk); 331 331
+1 -1
net/ipv4/tcp.c
··· 507 507 const struct tcp_sock *tp = tcp_sk(sk); 508 508 int state; 509 509 510 - sock_poll_wait(file, wait); 510 + sock_poll_wait(file, sock, wait); 511 511 512 512 state = inet_sk_state_load(sk); 513 513 if (state == TCP_LISTEN)
+1 -1
net/iucv/af_iucv.c
··· 1504 1504 struct sock *sk = sock->sk; 1505 1505 __poll_t mask = 0; 1506 1506 1507 - sock_poll_wait(file, wait); 1507 + sock_poll_wait(file, sock, wait); 1508 1508 1509 1509 if (sk->sk_state == IUCV_LISTEN) 1510 1510 return iucv_accept_poll(sk);
+1 -1
net/nfc/llcp_sock.c
··· 556 556 557 557 pr_debug("%p\n", sk); 558 558 559 - sock_poll_wait(file, wait); 559 + sock_poll_wait(file, sock, wait); 560 560 561 561 if (sk->sk_state == LLCP_LISTEN) 562 562 return llcp_accept_poll(sk);
+1 -1
net/rxrpc/af_rxrpc.c
··· 756 756 struct rxrpc_sock *rx = rxrpc_sk(sk); 757 757 __poll_t mask; 758 758 759 - sock_poll_wait(file, wait); 759 + sock_poll_wait(file, sock, wait); 760 760 mask = 0; 761 761 762 762 /* the socket is readable if there are any messages waiting on the Rx
+1 -1
net/smc/af_smc.c
··· 1543 1543 mask |= EPOLLERR; 1544 1544 } else { 1545 1545 if (sk->sk_state != SMC_CLOSED) 1546 - sock_poll_wait(file, wait); 1546 + sock_poll_wait(file, sock, wait); 1547 1547 if (sk->sk_err) 1548 1548 mask |= EPOLLERR; 1549 1549 if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
+1 -1
net/tipc/socket.c
··· 717 717 struct tipc_sock *tsk = tipc_sk(sk); 718 718 __poll_t revents = 0; 719 719 720 - sock_poll_wait(file, wait); 720 + sock_poll_wait(file, sock, wait); 721 721 722 722 if (sk->sk_shutdown & RCV_SHUTDOWN) 723 723 revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
+2 -2
net/unix/af_unix.c
··· 2642 2642 struct sock *sk = sock->sk; 2643 2643 __poll_t mask; 2644 2644 2645 - sock_poll_wait(file, wait); 2645 + sock_poll_wait(file, sock, wait); 2646 2646 mask = 0; 2647 2647 2648 2648 /* exceptional events? */ ··· 2679 2679 unsigned int writable; 2680 2680 __poll_t mask; 2681 2681 2682 - sock_poll_wait(file, wait); 2682 + sock_poll_wait(file, sock, wait); 2683 2683 mask = 0; 2684 2684 2685 2685 /* exceptional events? */