Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: rename SOCK_ASYNC_NOSPACE and SOCK_ASYNC_WAITDATA

This patch is a cleanup to make following patch easier to
review.

Goal is to move SOCK_ASYNC_NOSPACE and SOCK_ASYNC_WAITDATA
from (struct socket)->flags to a (struct socket_wq)->flags
to benefit from RCU protection in sock_wake_async()

To ease backports, we rename both constants.

Two new helpers, sk_set_bit(int nr, struct sock *sk)
and sk_clear_bit(int net, struct sock *sk) are added so that
following patch can change their implementation.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Eric Dumazet and committed by
David S. Miller
9cd3e072 5738a09d

+60 -52
+2 -2
crypto/algif_aead.c
··· 125 125 if (flags & MSG_DONTWAIT) 126 126 return -EAGAIN; 127 127 128 - set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 128 + sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 129 129 130 130 for (;;) { 131 131 if (signal_pending(current)) ··· 139 139 } 140 140 finish_wait(sk_sleep(sk), &wait); 141 141 142 - clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 142 + sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 143 143 144 144 return err; 145 145 }
+3 -3
crypto/algif_skcipher.c
··· 212 212 if (flags & MSG_DONTWAIT) 213 213 return -EAGAIN; 214 214 215 - set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 215 + sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 216 216 217 217 for (;;) { 218 218 if (signal_pending(current)) ··· 258 258 return -EAGAIN; 259 259 } 260 260 261 - set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 261 + sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 262 262 263 263 for (;;) { 264 264 if (signal_pending(current)) ··· 272 272 } 273 273 finish_wait(sk_sleep(sk), &wait); 274 274 275 - clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 275 + sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 276 276 277 277 return err; 278 278 }
+2 -2
drivers/net/macvtap.c
··· 498 498 wait_queue_head_t *wqueue; 499 499 500 500 if (!sock_writeable(sk) || 501 - !test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags)) 501 + !test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags)) 502 502 return; 503 503 504 504 wqueue = sk_sleep(sk); ··· 585 585 mask |= POLLIN | POLLRDNORM; 586 586 587 587 if (sock_writeable(&q->sk) || 588 - (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &q->sock.flags) && 588 + (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &q->sock.flags) && 589 589 sock_writeable(&q->sk))) 590 590 mask |= POLLOUT | POLLWRNORM; 591 591
+2 -2
drivers/net/tun.c
··· 1040 1040 mask |= POLLIN | POLLRDNORM; 1041 1041 1042 1042 if (sock_writeable(sk) || 1043 - (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags) && 1043 + (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && 1044 1044 sock_writeable(sk))) 1045 1045 mask |= POLLOUT | POLLWRNORM; 1046 1046 ··· 1488 1488 if (!sock_writeable(sk)) 1489 1489 return; 1490 1490 1491 - if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags)) 1491 + if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags)) 1492 1492 return; 1493 1493 1494 1494 wqueue = sk_sleep(sk);
+2 -2
fs/dlm/lowcomms.c
··· 421 421 422 422 if (test_and_clear_bit(CF_APP_LIMITED, &con->flags)) { 423 423 con->sock->sk->sk_write_pending--; 424 - clear_bit(SOCK_ASYNC_NOSPACE, &con->sock->flags); 424 + clear_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags); 425 425 } 426 426 427 427 if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags)) ··· 1448 1448 msg_flags); 1449 1449 if (ret == -EAGAIN || ret == 0) { 1450 1450 if (ret == -EAGAIN && 1451 - test_bit(SOCK_ASYNC_NOSPACE, &con->sock->flags) && 1451 + test_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags) && 1452 1452 !test_and_set_bit(CF_APP_LIMITED, &con->flags)) { 1453 1453 /* Notify TCP that we're limited by the 1454 1454 * application window size.
+3 -3
include/linux/net.h
··· 34 34 struct file; 35 35 struct net; 36 36 37 - #define SOCK_ASYNC_NOSPACE 0 38 - #define SOCK_ASYNC_WAITDATA 1 37 + #define SOCKWQ_ASYNC_NOSPACE 0 38 + #define SOCKWQ_ASYNC_WAITDATA 1 39 39 #define SOCK_NOSPACE 2 40 40 #define SOCK_PASSCRED 3 41 41 #define SOCK_PASSSEC 4 ··· 96 96 * struct socket - general BSD socket 97 97 * @state: socket state (%SS_CONNECTED, etc) 98 98 * @type: socket type (%SOCK_STREAM, etc) 99 - * @flags: socket flags (%SOCK_ASYNC_NOSPACE, etc) 99 + * @flags: socket flags (%SOCK_NOSPACE, etc) 100 100 * @ops: protocol specific socket operations 101 101 * @file: File back pointer for gc 102 102 * @sk: internal networking protocol agnostic socket representation
+10
include/net/sock.h
··· 2005 2005 return amt; 2006 2006 } 2007 2007 2008 + static inline void sk_set_bit(int nr, struct sock *sk) 2009 + { 2010 + set_bit(nr, &sk->sk_socket->flags); 2011 + } 2012 + 2013 + static inline void sk_clear_bit(int nr, struct sock *sk) 2014 + { 2015 + clear_bit(nr, &sk->sk_socket->flags); 2016 + } 2017 + 2008 2018 static inline void sk_wake_async(struct sock *sk, int how, int band) 2009 2019 { 2010 2020 if (sock_flag(sk, SOCK_FASYNC))
+3 -3
net/bluetooth/af_bluetooth.c
··· 271 271 if (signal_pending(current) || !timeo) 272 272 break; 273 273 274 - set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 274 + sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 275 275 release_sock(sk); 276 276 timeo = schedule_timeout(timeo); 277 277 lock_sock(sk); 278 - clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 278 + sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 279 279 } 280 280 281 281 __set_current_state(TASK_RUNNING); ··· 441 441 if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk)) 442 442 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 443 443 else 444 - set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 444 + sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 445 445 446 446 return mask; 447 447 }
+2 -2
net/caif/caif_socket.c
··· 323 323 !timeo) 324 324 break; 325 325 326 - set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 326 + sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 327 327 release_sock(sk); 328 328 timeo = schedule_timeout(timeo); 329 329 lock_sock(sk); ··· 331 331 if (sock_flag(sk, SOCK_DEAD)) 332 332 break; 333 333 334 - clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 334 + sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 335 335 } 336 336 337 337 finish_wait(sk_sleep(sk), &wait);
+1 -1
net/core/datagram.c
··· 785 785 if (sock_writeable(sk)) 786 786 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 787 787 else 788 - set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 788 + sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 789 789 790 790 return mask; 791 791 }
+4 -4
net/core/sock.c
··· 1815 1815 { 1816 1816 DEFINE_WAIT(wait); 1817 1817 1818 - clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1818 + sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 1819 1819 for (;;) { 1820 1820 if (!timeo) 1821 1821 break; ··· 1861 1861 if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf) 1862 1862 break; 1863 1863 1864 - set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1864 + sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 1865 1865 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1866 1866 err = -EAGAIN; 1867 1867 if (!timeo) ··· 2048 2048 DEFINE_WAIT(wait); 2049 2049 2050 2050 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 2051 - set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 2051 + sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 2052 2052 rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb); 2053 - clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 2053 + sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 2054 2054 finish_wait(sk_sleep(sk), &wait); 2055 2055 return rc; 2056 2056 }
+2 -2
net/core/stream.c
··· 126 126 current_timeo = vm_wait = (prandom_u32() % (HZ / 5)) + 2; 127 127 128 128 while (1) { 129 - set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 129 + sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 130 130 131 131 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 132 132 ··· 139 139 } 140 140 if (signal_pending(current)) 141 141 goto do_interrupted; 142 - clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 142 + sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 143 143 if (sk_stream_memory_free(sk) && !vm_wait) 144 144 break; 145 145
+1 -2
net/dccp/proto.c
··· 339 339 if (sk_stream_is_writeable(sk)) { 340 340 mask |= POLLOUT | POLLWRNORM; 341 341 } else { /* send SIGIO later */ 342 - set_bit(SOCK_ASYNC_NOSPACE, 343 - &sk->sk_socket->flags); 342 + sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 344 343 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 345 344 346 345 /* Race breaker. If space is freed after
+4 -4
net/decnet/af_decnet.c
··· 1747 1747 } 1748 1748 1749 1749 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1750 - set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1750 + sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 1751 1751 sk_wait_event(sk, &timeo, dn_data_ready(sk, queue, flags, target)); 1752 - clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1752 + sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 1753 1753 finish_wait(sk_sleep(sk), &wait); 1754 1754 } 1755 1755 ··· 2004 2004 } 2005 2005 2006 2006 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 2007 - set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 2007 + sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 2008 2008 sk_wait_event(sk, &timeo, 2009 2009 !dn_queue_too_long(scp, queue, flags)); 2010 - clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 2010 + sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 2011 2011 finish_wait(sk_sleep(sk), &wait); 2012 2012 continue; 2013 2013 }
+3 -4
net/ipv4/tcp.c
··· 517 517 if (sk_stream_is_writeable(sk)) { 518 518 mask |= POLLOUT | POLLWRNORM; 519 519 } else { /* send SIGIO later */ 520 - set_bit(SOCK_ASYNC_NOSPACE, 521 - &sk->sk_socket->flags); 520 + sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 522 521 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 523 522 524 523 /* Race breaker. If space is freed after ··· 905 906 goto out_err; 906 907 } 907 908 908 - clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 909 + sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 909 910 910 911 mss_now = tcp_send_mss(sk, &size_goal, flags); 911 912 copied = 0; ··· 1133 1134 } 1134 1135 1135 1136 /* This should be in poll */ 1136 - clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1137 + sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 1137 1138 1138 1139 mss_now = tcp_send_mss(sk, &size_goal, flags); 1139 1140
+1 -1
net/iucv/af_iucv.c
··· 1483 1483 if (sock_writeable(sk) && iucv_below_msglim(sk)) 1484 1484 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 1485 1485 else 1486 - set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1486 + sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 1487 1487 1488 1488 return mask; 1489 1489 }
+1 -1
net/nfc/llcp_sock.c
··· 572 572 if (sock_writeable(sk) && sk->sk_state == LLCP_CONNECTED) 573 573 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 574 574 else 575 - set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 575 + sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 576 576 577 577 pr_debug("mask 0x%x\n", mask); 578 578
+1 -1
net/rxrpc/ar-output.c
··· 531 531 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 532 532 533 533 /* this should be in poll */ 534 - clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 534 + sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 535 535 536 536 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 537 537 return -EPIPE;
+1 -1
net/sctp/socket.c
··· 6458 6458 if (sctp_writeable(sk)) { 6459 6459 mask |= POLLOUT | POLLWRNORM; 6460 6460 } else { 6461 - set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 6461 + sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 6462 6462 /* 6463 6463 * Since the socket is not locked, the buffer 6464 6464 * might be made available after the writeable check and
+2 -2
net/socket.c
··· 1072 1072 } 1073 1073 switch (how) { 1074 1074 case SOCK_WAKE_WAITD: 1075 - if (test_bit(SOCK_ASYNC_WAITDATA, &sock->flags)) 1075 + if (test_bit(SOCKWQ_ASYNC_WAITDATA, &sock->flags)) 1076 1076 break; 1077 1077 goto call_kill; 1078 1078 case SOCK_WAKE_SPACE: 1079 - if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags)) 1079 + if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sock->flags)) 1080 1080 break; 1081 1081 /* fall through */ 1082 1082 case SOCK_WAKE_IO:
+7 -7
net/sunrpc/xprtsock.c
··· 398 398 if (unlikely(!sock)) 399 399 return -ENOTSOCK; 400 400 401 - clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags); 401 + clear_bit(SOCKWQ_ASYNC_NOSPACE, &sock->flags); 402 402 if (base != 0) { 403 403 addr = NULL; 404 404 addrlen = 0; ··· 442 442 struct sock_xprt *transport = container_of(task->tk_rqstp->rq_xprt, struct sock_xprt, xprt); 443 443 444 444 transport->inet->sk_write_pending--; 445 - clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); 445 + clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags); 446 446 } 447 447 448 448 /** ··· 467 467 468 468 /* Don't race with disconnect */ 469 469 if (xprt_connected(xprt)) { 470 - if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) { 470 + if (test_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags)) { 471 471 /* 472 472 * Notify TCP that we're limited by the application 473 473 * window size ··· 478 478 xprt_wait_for_buffer_space(task, xs_nospace_callback); 479 479 } 480 480 } else { 481 - clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); 481 + clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags); 482 482 ret = -ENOTCONN; 483 483 } 484 484 ··· 626 626 case -EPERM: 627 627 /* When the server has died, an ICMP port unreachable message 628 628 * prompts ECONNREFUSED. */ 629 - clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); 629 + clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags); 630 630 } 631 631 632 632 return status; ··· 715 715 case -EADDRINUSE: 716 716 case -ENOBUFS: 717 717 case -EPIPE: 718 - clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); 718 + clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags); 719 719 } 720 720 721 721 return status; ··· 1618 1618 1619 1619 if (unlikely(!(xprt = xprt_from_sock(sk)))) 1620 1620 return; 1621 - if (test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags) == 0) 1621 + if (test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sock->flags) == 0) 1622 1622 return; 1623 1623 1624 1624 xprt_write_space(xprt);
+3 -3
net/unix/af_unix.c
··· 2191 2191 !timeo) 2192 2192 break; 2193 2193 2194 - set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 2194 + sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 2195 2195 unix_state_unlock(sk); 2196 2196 timeo = freezable_schedule_timeout(timeo); 2197 2197 unix_state_lock(sk); ··· 2199 2199 if (sock_flag(sk, SOCK_DEAD)) 2200 2200 break; 2201 2201 2202 - clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 2202 + sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 2203 2203 } 2204 2204 2205 2205 finish_wait(sk_sleep(sk), &wait); ··· 2683 2683 if (writable) 2684 2684 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 2685 2685 else 2686 - set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 2686 + sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 2687 2687 2688 2688 return mask; 2689 2689 }