Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: include/net/sock.h cleanup

bool/const conversions where possible

__inline__ -> inline

space cleanups

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Eric Dumazet and committed by
David S. Miller
dc6b9b78 1f352920

+84 -87
+74 -77
include/net/sock.h
··· 97 97 #else 98 98 /* Validate arguments and do nothing */ 99 99 static inline __printf(2, 3) 100 - void SOCK_DEBUG(struct sock *sk, const char *msg, ...) 100 + void SOCK_DEBUG(const struct sock *sk, const char *msg, ...) 101 101 { 102 102 } 103 103 #endif ··· 372 372 void (*sk_data_ready)(struct sock *sk, int bytes); 373 373 void (*sk_write_space)(struct sock *sk); 374 374 void (*sk_error_report)(struct sock *sk); 375 - int (*sk_backlog_rcv)(struct sock *sk, 376 - struct sk_buff *skb); 375 + int (*sk_backlog_rcv)(struct sock *sk, 376 + struct sk_buff *skb); 377 377 void (*sk_destruct)(struct sock *sk); 378 378 }; 379 379 ··· 454 454 NULL; 455 455 } 456 456 457 - static inline int sk_unhashed(const struct sock *sk) 457 + static inline bool sk_unhashed(const struct sock *sk) 458 458 { 459 459 return hlist_unhashed(&sk->sk_node); 460 460 } 461 461 462 - static inline int sk_hashed(const struct sock *sk) 462 + static inline bool sk_hashed(const struct sock *sk) 463 463 { 464 464 return !sk_unhashed(sk); 465 465 } 466 466 467 - static __inline__ void sk_node_init(struct hlist_node *node) 467 + static inline void sk_node_init(struct hlist_node *node) 468 468 { 469 469 node->pprev = NULL; 470 470 } 471 471 472 - static __inline__ void sk_nulls_node_init(struct hlist_nulls_node *node) 472 + static inline void sk_nulls_node_init(struct hlist_nulls_node *node) 473 473 { 474 474 node->pprev = NULL; 475 475 } 476 476 477 - static __inline__ void __sk_del_node(struct sock *sk) 477 + static inline void __sk_del_node(struct sock *sk) 478 478 { 479 479 __hlist_del(&sk->sk_node); 480 480 } 481 481 482 482 /* NB: equivalent to hlist_del_init_rcu */ 483 - static __inline__ int __sk_del_node_init(struct sock *sk) 483 + static inline bool __sk_del_node_init(struct sock *sk) 484 484 { 485 485 if (sk_hashed(sk)) { 486 486 __sk_del_node(sk); 487 487 sk_node_init(&sk->sk_node); 488 - return 1; 488 + return true; 489 489 } 490 - return 0; 490 + return false; 491 491 } 492 492 493 493 /* Grab socket reference count. This operation is valid only ··· 509 509 atomic_dec(&sk->sk_refcnt); 510 510 } 511 511 512 - static __inline__ int sk_del_node_init(struct sock *sk) 512 + static inline bool sk_del_node_init(struct sock *sk) 513 513 { 514 - int rc = __sk_del_node_init(sk); 514 + bool rc = __sk_del_node_init(sk); 515 515 516 516 if (rc) { 517 517 /* paranoid for a while -acme */ ··· 522 522 } 523 523 #define sk_del_node_init_rcu(sk) sk_del_node_init(sk) 524 524 525 - static __inline__ int __sk_nulls_del_node_init_rcu(struct sock *sk) 525 + static inline bool __sk_nulls_del_node_init_rcu(struct sock *sk) 526 526 { 527 527 if (sk_hashed(sk)) { 528 528 hlist_nulls_del_init_rcu(&sk->sk_nulls_node); 529 - return 1; 529 + return true; 530 530 } 531 - return 0; 531 + return false; 532 532 } 533 533 534 - static __inline__ int sk_nulls_del_node_init_rcu(struct sock *sk) 534 + static inline bool sk_nulls_del_node_init_rcu(struct sock *sk) 535 535 { 536 - int rc = __sk_nulls_del_node_init_rcu(sk); 536 + bool rc = __sk_nulls_del_node_init_rcu(sk); 537 537 538 538 if (rc) { 539 539 /* paranoid for a while -acme */ ··· 543 543 return rc; 544 544 } 545 545 546 - static __inline__ void __sk_add_node(struct sock *sk, struct hlist_head *list) 546 + static inline void __sk_add_node(struct sock *sk, struct hlist_head *list) 547 547 { 548 548 hlist_add_head(&sk->sk_node, list); 549 549 } 550 550 551 - static __inline__ void sk_add_node(struct sock *sk, struct hlist_head *list) 551 + static inline void sk_add_node(struct sock *sk, struct hlist_head *list) 552 552 { 553 553 sock_hold(sk); 554 554 __sk_add_node(sk, list); 555 555 } 556 556 557 - static __inline__ void sk_add_node_rcu(struct sock *sk, struct hlist_head *list) 557 + static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list) 558 558 { 559 559 sock_hold(sk); 560 560 hlist_add_head_rcu(&sk->sk_node, list); 561 561 } 562 562 563 - static __inline__ void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) 563 + static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) 564 564 { 565 565 hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); 566 566 } 567 567 568 - static __inline__ void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) 568 + static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) 569 569 { 570 570 sock_hold(sk); 571 571 __sk_nulls_add_node_rcu(sk, list); 572 572 } 573 573 574 - static __inline__ void __sk_del_bind_node(struct sock *sk) 574 + static inline void __sk_del_bind_node(struct sock *sk) 575 575 { 576 576 __hlist_del(&sk->sk_bind_node); 577 577 } 578 578 579 - static __inline__ void sk_add_bind_node(struct sock *sk, 579 + static inline void sk_add_bind_node(struct sock *sk, 580 580 struct hlist_head *list) 581 581 { 582 582 hlist_add_head(&sk->sk_bind_node, list); ··· 665 665 sk->sk_ack_backlog++; 666 666 } 667 667 668 - static inline int sk_acceptq_is_full(struct sock *sk) 668 + static inline bool sk_acceptq_is_full(const struct sock *sk) 669 669 { 670 670 return sk->sk_ack_backlog > sk->sk_max_ack_backlog; 671 671 } ··· 673 673 /* 674 674 * Compute minimal free write space needed to queue new packets. 675 675 */ 676 - static inline int sk_stream_min_wspace(struct sock *sk) 676 + static inline int sk_stream_min_wspace(const struct sock *sk) 677 677 { 678 678 return sk->sk_wmem_queued >> 1; 679 679 } 680 680 681 - static inline int sk_stream_wspace(struct sock *sk) 681 + static inline int sk_stream_wspace(const struct sock *sk) 682 682 { 683 683 return sk->sk_sndbuf - sk->sk_wmem_queued; 684 684 } 685 685 686 686 extern void sk_stream_write_space(struct sock *sk); 687 687 688 - static inline int sk_stream_memory_free(struct sock *sk) 688 + static inline bool sk_stream_memory_free(const struct sock *sk) 689 689 { 690 690 return sk->sk_wmem_queued < sk->sk_sndbuf; 691 691 } ··· 809 809 * transport -> network interface is defined by struct inet_proto 810 810 */ 811 811 struct proto { 812 - void (*close)(struct sock *sk, 812 + void (*close)(struct sock *sk, 813 813 long timeout); 814 814 int (*connect)(struct sock *sk, 815 - struct sockaddr *uaddr, 815 + struct sockaddr *uaddr, 816 816 int addr_len); 817 817 int (*disconnect)(struct sock *sk, int flags); 818 818 819 - struct sock * (*accept) (struct sock *sk, int flags, int *err); 819 + struct sock * (*accept)(struct sock *sk, int flags, int *err); 820 820 821 821 int (*ioctl)(struct sock *sk, int cmd, 822 822 unsigned long arg); 823 823 int (*init)(struct sock *sk); 824 824 void (*destroy)(struct sock *sk); 825 825 void (*shutdown)(struct sock *sk, int how); 826 - int (*setsockopt)(struct sock *sk, int level, 826 + int (*setsockopt)(struct sock *sk, int level, 827 827 int optname, char __user *optval, 828 828 unsigned int optlen); 829 - int (*getsockopt)(struct sock *sk, int level, 830 - int optname, char __user *optval, 831 - int __user *option); 829 + int (*getsockopt)(struct sock *sk, int level, 830 + int optname, char __user *optval, 831 + int __user *option); 832 832 #ifdef CONFIG_COMPAT 833 833 int (*compat_setsockopt)(struct sock *sk, 834 834 int level, ··· 845 845 struct msghdr *msg, size_t len); 846 846 int (*recvmsg)(struct kiocb *iocb, struct sock *sk, 847 847 struct msghdr *msg, 848 - size_t len, int noblock, int flags, 849 - int *addr_len); 848 + size_t len, int noblock, int flags, 849 + int *addr_len); 850 850 int (*sendpage)(struct sock *sk, struct page *page, 851 851 int offset, size_t size, int flags); 852 - int (*bind)(struct sock *sk, 852 + int (*bind)(struct sock *sk, 853 853 struct sockaddr *uaddr, int addr_len); 854 854 855 - int (*backlog_rcv) (struct sock *sk, 855 + int (*backlog_rcv) (struct sock *sk, 856 856 struct sk_buff *skb); 857 857 858 858 /* Keeping track of sk's, looking them up, and port selection methods. */ ··· 1173 1173 extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc); 1174 1174 extern int sock_prot_inuse_get(struct net *net, struct proto *proto); 1175 1175 #else 1176 - static void inline sock_prot_inuse_add(struct net *net, struct proto *prot, 1176 + static inline void sock_prot_inuse_add(struct net *net, struct proto *prot, 1177 1177 int inc) 1178 1178 { 1179 1179 } ··· 1260 1260 return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT; 1261 1261 } 1262 1262 1263 - static inline int sk_has_account(struct sock *sk) 1263 + static inline bool sk_has_account(struct sock *sk) 1264 1264 { 1265 1265 /* return true if protocol supports memory accounting */ 1266 1266 return !!sk->sk_prot->memory_allocated; 1267 1267 } 1268 1268 1269 - static inline int sk_wmem_schedule(struct sock *sk, int size) 1269 + static inline bool sk_wmem_schedule(struct sock *sk, int size) 1270 1270 { 1271 1271 if (!sk_has_account(sk)) 1272 - return 1; 1272 + return true; 1273 1273 return size <= sk->sk_forward_alloc || 1274 1274 __sk_mem_schedule(sk, size, SK_MEM_SEND); 1275 1275 } 1276 1276 1277 - static inline int sk_rmem_schedule(struct sock *sk, int size) 1277 + static inline bool sk_rmem_schedule(struct sock *sk, int size) 1278 1278 { 1279 1279 if (!sk_has_account(sk)) 1280 - return 1; 1280 + return true; 1281 1281 return size <= sk->sk_forward_alloc || 1282 1282 __sk_mem_schedule(sk, size, SK_MEM_RECV); 1283 1283 } ··· 1342 1342 * Mark both the sk_lock and the sk_lock.slock as a 1343 1343 * per-address-family lock class. 1344 1344 */ 1345 - #define sock_lock_init_class_and_name(sk, sname, skey, name, key) \ 1345 + #define sock_lock_init_class_and_name(sk, sname, skey, name, key) \ 1346 1346 do { \ 1347 1347 sk->sk_lock.owned = 0; \ 1348 1348 init_waitqueue_head(&sk->sk_lock.wq); \ ··· 1350 1350 debug_check_no_locks_freed((void *)&(sk)->sk_lock, \ 1351 1351 sizeof((sk)->sk_lock)); \ 1352 1352 lockdep_set_class_and_name(&(sk)->sk_lock.slock, \ 1353 - (skey), (sname)); \ 1353 + (skey), (sname)); \ 1354 1354 lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \ 1355 1355 } while (0) 1356 1356 ··· 1410 1410 unsigned int optlen); 1411 1411 1412 1412 extern int sock_getsockopt(struct socket *sock, int level, 1413 - int op, char __user *optval, 1413 + int op, char __user *optval, 1414 1414 int __user *optlen); 1415 - extern struct sk_buff *sock_alloc_send_skb(struct sock *sk, 1415 + extern struct sk_buff *sock_alloc_send_skb(struct sock *sk, 1416 1416 unsigned long size, 1417 1417 int noblock, 1418 1418 int *errcode); 1419 - extern struct sk_buff *sock_alloc_send_pskb(struct sock *sk, 1419 + extern struct sk_buff *sock_alloc_send_pskb(struct sock *sk, 1420 1420 unsigned long header_len, 1421 1421 unsigned long data_len, 1422 1422 int noblock, ··· 1438 1438 * Functions to fill in entries in struct proto_ops when a protocol 1439 1439 * does not implement a particular function. 1440 1440 */ 1441 - extern int sock_no_bind(struct socket *, 1441 + extern int sock_no_bind(struct socket *, 1442 1442 struct sockaddr *, int); 1443 1443 extern int sock_no_connect(struct socket *, 1444 1444 struct sockaddr *, int, int); ··· 1467 1467 struct vm_area_struct *vma); 1468 1468 extern ssize_t sock_no_sendpage(struct socket *sock, 1469 1469 struct page *page, 1470 - int offset, size_t size, 1470 + int offset, size_t size, 1471 1471 int flags); 1472 1472 1473 1473 /* ··· 1490 1490 /* 1491 1491 * Default socket callbacks and setup code 1492 1492 */ 1493 - 1493 + 1494 1494 /* Initialise core socket variables */ 1495 1495 extern void sock_init_data(struct socket *sock, struct sock *sk); 1496 1496 ··· 1690 1690 1691 1691 extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie); 1692 1692 1693 - static inline int sk_can_gso(const struct sock *sk) 1693 + static inline bool sk_can_gso(const struct sock *sk) 1694 1694 { 1695 1695 return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type); 1696 1696 } ··· 1807 1807 * 1808 1808 * Returns true if socket has write or read allocations 1809 1809 */ 1810 - static inline int sk_has_allocations(const struct sock *sk) 1810 + static inline bool sk_has_allocations(const struct sock *sk) 1811 1811 { 1812 1812 return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk); 1813 1813 } ··· 1846 1846 */ 1847 1847 static inline bool wq_has_sleeper(struct socket_wq *wq) 1848 1848 { 1849 - 1850 - /* 1851 - * We need to be sure we are in sync with the 1849 + /* We need to be sure we are in sync with the 1852 1850 * add_wait_queue modifications to the wait queue. 1853 1851 * 1854 1852 * This memory barrier is paired in the sock_poll_wait. ··· 1868 1870 { 1869 1871 if (!poll_does_not_wait(p) && wait_address) { 1870 1872 poll_wait(filp, wait_address, p); 1871 - /* 1872 - * We need to be sure we are in sync with the 1873 + /* We need to be sure we are in sync with the 1873 1874 * socket flags modification. 1874 1875 * 1875 1876 * This memory barrier is paired in the wq_has_sleeper. 1876 - */ 1877 + */ 1877 1878 smp_mb(); 1878 1879 } 1879 1880 } 1880 1881 1881 1882 /* 1882 - * Queue a received datagram if it will fit. Stream and sequenced 1883 + * Queue a received datagram if it will fit. Stream and sequenced 1883 1884 * protocols can't normally use this as they need to fit buffers in 1884 1885 * and play with them. 1885 1886 * 1886 - * Inlined as it's very short and called for pretty much every 1887 + * Inlined as it's very short and called for pretty much every 1887 1888 * packet ever received. 1888 1889 */ 1889 1890 ··· 1908 1911 sk_mem_charge(sk, skb->truesize); 1909 1912 } 1910 1913 1911 - extern void sk_reset_timer(struct sock *sk, struct timer_list* timer, 1914 + extern void sk_reset_timer(struct sock *sk, struct timer_list *timer, 1912 1915 unsigned long expires); 1913 1916 1914 - extern void sk_stop_timer(struct sock *sk, struct timer_list* timer); 1917 + extern void sk_stop_timer(struct sock *sk, struct timer_list *timer); 1915 1918 1916 1919 extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); 1917 1920 ··· 1920 1923 /* 1921 1924 * Recover an error report and clear atomically 1922 1925 */ 1923 - 1926 + 1924 1927 static inline int sock_error(struct sock *sk) 1925 1928 { 1926 1929 int err; ··· 1936 1939 1937 1940 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { 1938 1941 amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); 1939 - if (amt < 0) 1942 + if (amt < 0) 1940 1943 amt = 0; 1941 1944 } 1942 1945 return amt; ··· 1980 1983 /* 1981 1984 * Default write policy as shown to user space via poll/select/SIGIO 1982 1985 */ 1983 - static inline int sock_writeable(const struct sock *sk) 1986 + static inline bool sock_writeable(const struct sock *sk) 1984 1987 { 1985 1988 return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1); 1986 1989 } ··· 1990 1993 return in_softirq() ? GFP_ATOMIC : GFP_KERNEL; 1991 1994 } 1992 1995 1993 - static inline long sock_rcvtimeo(const struct sock *sk, int noblock) 1996 + static inline long sock_rcvtimeo(const struct sock *sk, bool noblock) 1994 1997 { 1995 1998 return noblock ? 0 : sk->sk_rcvtimeo; 1996 1999 } 1997 2000 1998 - static inline long sock_sndtimeo(const struct sock *sk, int noblock) 2001 + static inline long sock_sndtimeo(const struct sock *sk, bool noblock) 1999 2002 { 2000 2003 return noblock ? 0 : sk->sk_sndtimeo; 2001 2004 } ··· 2018 2021 extern void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk, 2019 2022 struct sk_buff *skb); 2020 2023 2021 - static __inline__ void 2024 + static inline void 2022 2025 sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) 2023 2026 { 2024 2027 ktime_t kt = skb->tstamp; ··· 2059 2062 (1UL << SOCK_RCVTSTAMP) | \ 2060 2063 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \ 2061 2064 (1UL << SOCK_TIMESTAMPING_SOFTWARE) | \ 2062 - (1UL << SOCK_TIMESTAMPING_RAW_HARDWARE) | \ 2065 + (1UL << SOCK_TIMESTAMPING_RAW_HARDWARE) | \ 2063 2066 (1UL << SOCK_TIMESTAMPING_SYS_HARDWARE)) 2064 2067 2065 2068 if (sk->sk_flags & FLAGS_TS_OR_DROPS) ··· 2088 2091 * locked so that the sk_buff queue operation is ok. 2089 2092 */ 2090 2093 #ifdef CONFIG_NET_DMA 2091 - static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early) 2094 + static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, bool copied_early) 2092 2095 { 2093 2096 __skb_unlink(skb, &sk->sk_receive_queue); 2094 2097 if (!copied_early) ··· 2097 2100 __skb_queue_tail(&sk->sk_async_wait_queue, skb); 2098 2101 } 2099 2102 #else 2100 - static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early) 2103 + static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, bool copied_early) 2101 2104 { 2102 2105 __skb_unlink(skb, &sk->sk_receive_queue); 2103 2106 __kfree_skb(skb); ··· 2144 2147 extern int sock_get_timestamp(struct sock *, struct timeval __user *); 2145 2148 extern int sock_get_timestampns(struct sock *, struct timespec __user *); 2146 2149 2147 - /* 2148 - * Enable debug/info messages 2150 + /* 2151 + * Enable debug/info messages 2149 2152 */ 2150 2153 extern int net_msg_warn; 2151 2154 #define NETDEBUG(fmt, args...) \
+2 -2
net/dccp/proto.c
··· 848 848 default: 849 849 dccp_pr_debug("packet_type=%s\n", 850 850 dccp_packet_name(dh->dccph_type)); 851 - sk_eat_skb(sk, skb, 0); 851 + sk_eat_skb(sk, skb, false); 852 852 } 853 853 verify_sock_status: 854 854 if (sock_flag(sk, SOCK_DONE)) { ··· 905 905 len = skb->len; 906 906 found_fin_ok: 907 907 if (!(flags & MSG_PEEK)) 908 - sk_eat_skb(sk, skb, 0); 908 + sk_eat_skb(sk, skb, false); 909 909 break; 910 910 } while (1); 911 911 out:
+6 -6
net/ipv4/tcp.c
··· 1473 1473 break; 1474 1474 } 1475 1475 if (tcp_hdr(skb)->fin) { 1476 - sk_eat_skb(sk, skb, 0); 1476 + sk_eat_skb(sk, skb, false); 1477 1477 ++seq; 1478 1478 break; 1479 1479 } 1480 - sk_eat_skb(sk, skb, 0); 1480 + sk_eat_skb(sk, skb, false); 1481 1481 if (!desc->count) 1482 1482 break; 1483 1483 tp->copied_seq = seq; ··· 1513 1513 int target; /* Read at least this many bytes */ 1514 1514 long timeo; 1515 1515 struct task_struct *user_recv = NULL; 1516 - int copied_early = 0; 1516 + bool copied_early = false; 1517 1517 struct sk_buff *skb; 1518 1518 u32 urg_hole = 0; 1519 1519 ··· 1801 1801 dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); 1802 1802 1803 1803 if ((offset + used) == skb->len) 1804 - copied_early = 1; 1804 + copied_early = true; 1805 1805 1806 1806 } else 1807 1807 #endif ··· 1835 1835 goto found_fin_ok; 1836 1836 if (!(flags & MSG_PEEK)) { 1837 1837 sk_eat_skb(sk, skb, copied_early); 1838 - copied_early = 0; 1838 + copied_early = false; 1839 1839 } 1840 1840 continue; 1841 1841 ··· 1844 1844 ++*seq; 1845 1845 if (!(flags & MSG_PEEK)) { 1846 1846 sk_eat_skb(sk, skb, copied_early); 1847 - copied_early = 0; 1847 + copied_early = false; 1848 1848 } 1849 1849 break; 1850 1850 } while (len > 0);
+2 -2
net/llc/af_llc.c
··· 838 838 839 839 if (!(flags & MSG_PEEK)) { 840 840 spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags); 841 - sk_eat_skb(sk, skb, 0); 841 + sk_eat_skb(sk, skb, false); 842 842 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags); 843 843 *seq = 0; 844 844 } ··· 861 861 862 862 if (!(flags & MSG_PEEK)) { 863 863 spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags); 864 - sk_eat_skb(sk, skb, 0); 864 + sk_eat_skb(sk, skb, false); 865 865 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags); 866 866 *seq = 0; 867 867 }