Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: convert sock.sk_wmem_alloc from atomic_t to refcount_t

refcount_t type and corresponding API should be
used instead of atomic_t when the variable is used as
a reference counter. This allows to avoid accidental
refcounter overflows that might lead to use-after-free
situations.

Signed-off-by: Elena Reshetova <elena.reshetova@intel.com>
Signed-off-by: Hans Liljestrand <ishkamiel@gmail.com>
Signed-off-by: Kees Cook <keescook@chromium.org>
Signed-off-by: David Windsor <dwindsor@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Reshetova, Elena and committed by
David S. Miller
14afee4b 2638595a

+74 -85
+1 -11
drivers/atm/fore200e.c
··· 924 924 else { 925 925 dev_kfree_skb_any(entry->skb); 926 926 } 927 - #if 1 928 - /* race fixed by the above incarnation mechanism, but... */ 929 - if (atomic_read(&sk_atm(vcc)->sk_wmem_alloc) < 0) { 930 - atomic_set(&sk_atm(vcc)->sk_wmem_alloc, 0); 931 - } 932 - #endif 927 + 933 928 /* check error condition */ 934 929 if (*entry->status & STATUS_ERROR) 935 930 atomic_inc(&vcc->stats->tx_err); ··· 1125 1130 return -ENOMEM; 1126 1131 } 1127 1132 1128 - ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0); 1129 - 1130 1133 vcc->push(vcc, skb); 1131 1134 atomic_inc(&vcc->stats->rx); 1132 - 1133 - ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0); 1134 1135 1135 1136 return 0; 1136 1137 } ··· 1563 1572 unsigned long flags; 1564 1573 1565 1574 ASSERT(vcc); 1566 - ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0); 1567 1575 ASSERT(fore200e); 1568 1576 ASSERT(fore200e_vcc); 1569 1577
+1 -1
drivers/atm/he.c
··· 2395 2395 * TBRQ, the host issues the close command to the adapter. 2396 2396 */ 2397 2397 2398 - while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) && 2398 + while (((tx_inuse = refcount_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) && 2399 2399 (retry < MAX_RETRY)) { 2400 2400 msleep(sleep); 2401 2401 if (sleep < 250)
+2 -2
drivers/atm/idt77252.c
··· 724 724 struct sock *sk = sk_atm(vcc); 725 725 726 726 vc->estimator->cells += (skb->len + 47) / 48; 727 - if (atomic_read(&sk->sk_wmem_alloc) > 727 + if (refcount_read(&sk->sk_wmem_alloc) > 728 728 (sk->sk_sndbuf >> 1)) { 729 729 u32 cps = vc->estimator->maxcps; 730 730 ··· 2009 2009 atomic_inc(&vcc->stats->tx_err); 2010 2010 return -ENOMEM; 2011 2011 } 2012 - atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); 2012 + refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); 2013 2013 2014 2014 skb_put_data(skb, cell, 52); 2015 2015
+1 -1
include/linux/atmdev.h
··· 254 254 255 255 static inline int atm_may_send(struct atm_vcc *vcc,unsigned int size) 256 256 { 257 - return (size + atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) < 257 + return (size + refcount_read(&sk_atm(vcc)->sk_wmem_alloc)) < 258 258 sk_atm(vcc)->sk_sndbuf; 259 259 } 260 260
+4 -4
include/net/sock.h
··· 390 390 391 391 /* ===== cache line for TX ===== */ 392 392 int sk_wmem_queued; 393 - atomic_t sk_wmem_alloc; 393 + refcount_t sk_wmem_alloc; 394 394 unsigned long sk_tsq_flags; 395 395 struct sk_buff *sk_send_head; 396 396 struct sk_buff_head sk_write_queue; ··· 1911 1911 */ 1912 1912 static inline int sk_wmem_alloc_get(const struct sock *sk) 1913 1913 { 1914 - return atomic_read(&sk->sk_wmem_alloc) - 1; 1914 + return refcount_read(&sk->sk_wmem_alloc) - 1; 1915 1915 } 1916 1916 1917 1917 /** ··· 2055 2055 int amt = 0; 2056 2056 2057 2057 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { 2058 - amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); 2058 + amt = sk->sk_sndbuf - refcount_read(&sk->sk_wmem_alloc); 2059 2059 if (amt < 0) 2060 2060 amt = 0; 2061 2061 } ··· 2136 2136 */ 2137 2137 static inline bool sock_writeable(const struct sock *sk) 2138 2138 { 2139 - return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1); 2139 + return refcount_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1); 2140 2140 } 2141 2141 2142 2142 static inline gfp_t gfp_any(void)
+1 -1
net/atm/br2684.c
··· 252 252 253 253 ATM_SKB(skb)->vcc = atmvcc = brvcc->atmvcc; 254 254 pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, atmvcc, atmvcc->dev); 255 - atomic_add(skb->truesize, &sk_atm(atmvcc)->sk_wmem_alloc); 255 + refcount_add(skb->truesize, &sk_atm(atmvcc)->sk_wmem_alloc); 256 256 ATM_SKB(skb)->atm_options = atmvcc->atm_options; 257 257 dev->stats.tx_packets++; 258 258 dev->stats.tx_bytes += skb->len;
+1 -1
net/atm/clip.c
··· 381 381 memcpy(here, llc_oui, sizeof(llc_oui)); 382 382 ((__be16 *) here)[3] = skb->protocol; 383 383 } 384 - atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); 384 + refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); 385 385 ATM_SKB(skb)->atm_options = vcc->atm_options; 386 386 entry->vccs->last_use = jiffies; 387 387 pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, vcc, vcc->dev);
+5 -5
net/atm/common.c
··· 80 80 printk(KERN_DEBUG "%s: rmem leakage (%d bytes) detected.\n", 81 81 __func__, atomic_read(&sk->sk_rmem_alloc)); 82 82 83 - if (atomic_read(&sk->sk_wmem_alloc)) 83 + if (refcount_read(&sk->sk_wmem_alloc)) 84 84 printk(KERN_DEBUG "%s: wmem leakage (%d bytes) detected.\n", 85 - __func__, atomic_read(&sk->sk_wmem_alloc)); 85 + __func__, refcount_read(&sk->sk_wmem_alloc)); 86 86 } 87 87 88 88 static void vcc_def_wakeup(struct sock *sk) ··· 101 101 struct atm_vcc *vcc = atm_sk(sk); 102 102 103 103 return (vcc->qos.txtp.max_sdu + 104 - atomic_read(&sk->sk_wmem_alloc)) <= sk->sk_sndbuf; 104 + refcount_read(&sk->sk_wmem_alloc)) <= sk->sk_sndbuf; 105 105 } 106 106 107 107 static void vcc_write_space(struct sock *sk) ··· 156 156 memset(&vcc->local, 0, sizeof(struct sockaddr_atmsvc)); 157 157 memset(&vcc->remote, 0, sizeof(struct sockaddr_atmsvc)); 158 158 vcc->qos.txtp.max_sdu = 1 << 16; /* for meta VCs */ 159 - atomic_set(&sk->sk_wmem_alloc, 1); 159 + refcount_set(&sk->sk_wmem_alloc, 1); 160 160 atomic_set(&sk->sk_rmem_alloc, 0); 161 161 vcc->push = NULL; 162 162 vcc->pop = NULL; ··· 630 630 goto out; 631 631 } 632 632 pr_debug("%d += %d\n", sk_wmem_alloc_get(sk), skb->truesize); 633 - atomic_add(skb->truesize, &sk->sk_wmem_alloc); 633 + refcount_add(skb->truesize, &sk->sk_wmem_alloc); 634 634 635 635 skb->dev = NULL; /* for paths shared with net_device interfaces */ 636 636 ATM_SKB(skb)->atm_options = vcc->atm_options;
+2 -2
net/atm/lec.c
··· 181 181 ATM_SKB(skb)->vcc = vcc; 182 182 ATM_SKB(skb)->atm_options = vcc->atm_options; 183 183 184 - atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); 184 + refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); 185 185 if (vcc->send(vcc, skb) < 0) { 186 186 dev->stats.tx_dropped++; 187 187 return; ··· 345 345 int i; 346 346 char *tmp; /* FIXME */ 347 347 348 - atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); 348 + WARN_ON(refcount_sub_and_test(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc)); 349 349 mesg = (struct atmlec_msg *)skb->data; 350 350 tmp = skb->data; 351 351 tmp += sizeof(struct atmlec_msg);
+2 -2
net/atm/mpc.c
··· 555 555 sizeof(struct llc_snap_hdr)); 556 556 } 557 557 558 - atomic_add(skb->truesize, &sk_atm(entry->shortcut)->sk_wmem_alloc); 558 + refcount_add(skb->truesize, &sk_atm(entry->shortcut)->sk_wmem_alloc); 559 559 ATM_SKB(skb)->atm_options = entry->shortcut->atm_options; 560 560 entry->shortcut->send(entry->shortcut, skb); 561 561 entry->packets_fwded++; ··· 911 911 912 912 struct mpoa_client *mpc = find_mpc_by_vcc(vcc); 913 913 struct k_message *mesg = (struct k_message *)skb->data; 914 - atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); 914 + WARN_ON(refcount_sub_and_test(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc)); 915 915 916 916 if (mpc == NULL) { 917 917 pr_info("no mpc found\n");
+1 -1
net/atm/pppoatm.c
··· 350 350 return 1; 351 351 } 352 352 353 - atomic_add(skb->truesize, &sk_atm(ATM_SKB(skb)->vcc)->sk_wmem_alloc); 353 + refcount_add(skb->truesize, &sk_atm(ATM_SKB(skb)->vcc)->sk_wmem_alloc); 354 354 ATM_SKB(skb)->atm_options = ATM_SKB(skb)->vcc->atm_options; 355 355 pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", 356 356 skb, ATM_SKB(skb)->vcc, ATM_SKB(skb)->vcc->dev);
+1 -1
net/atm/raw.c
··· 35 35 36 36 pr_debug("(%d) %d -= %d\n", 37 37 vcc->vci, sk_wmem_alloc_get(sk), skb->truesize); 38 - atomic_sub(skb->truesize, &sk->sk_wmem_alloc); 38 + WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc)); 39 39 dev_kfree_skb_any(skb); 40 40 sk->sk_write_space(sk); 41 41 }
+1 -1
net/atm/signaling.c
··· 67 67 struct sock *sk; 68 68 69 69 msg = (struct atmsvc_msg *) skb->data; 70 - atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); 70 + WARN_ON(refcount_sub_and_test(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc)); 71 71 vcc = *(struct atm_vcc **) &msg->vcc; 72 72 pr_debug("%d (0x%lx)\n", (int)msg->type, (unsigned long)vcc); 73 73 sk = sk_atm(vcc);
+1 -1
net/caif/caif_socket.c
··· 1013 1013 static void caif_sock_destructor(struct sock *sk) 1014 1014 { 1015 1015 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); 1016 - caif_assert(!atomic_read(&sk->sk_wmem_alloc)); 1016 + caif_assert(!refcount_read(&sk->sk_wmem_alloc)); 1017 1017 caif_assert(sk_unhashed(sk)); 1018 1018 caif_assert(!sk->sk_socket); 1019 1019 if (!sock_flag(sk, SOCK_DEAD)) {
+1 -1
net/core/datagram.c
··· 614 614 skb->data_len += copied; 615 615 skb->len += copied; 616 616 skb->truesize += truesize; 617 - atomic_add(truesize, &skb->sk->sk_wmem_alloc); 617 + refcount_add(truesize, &skb->sk->sk_wmem_alloc); 618 618 while (copied) { 619 619 int size = min_t(int, copied, PAGE_SIZE - start); 620 620 skb_fill_page_desc(skb, frag++, pages[n], start, size);
+1 -1
net/core/skbuff.c
··· 3024 3024 get_page(pfrag->page); 3025 3025 3026 3026 skb->truesize += copy; 3027 - atomic_add(copy, &sk->sk_wmem_alloc); 3027 + refcount_add(copy, &sk->sk_wmem_alloc); 3028 3028 skb->len += copy; 3029 3029 skb->data_len += copy; 3030 3030 offset += copy;
+13 -13
net/core/sock.c
··· 1528 1528 if (likely(sk->sk_net_refcnt)) 1529 1529 get_net(net); 1530 1530 sock_net_set(sk, net); 1531 - atomic_set(&sk->sk_wmem_alloc, 1); 1531 + refcount_set(&sk->sk_wmem_alloc, 1); 1532 1532 1533 1533 mem_cgroup_sk_alloc(sk); 1534 1534 cgroup_sk_alloc(&sk->sk_cgrp_data); ··· 1552 1552 sk->sk_destruct(sk); 1553 1553 1554 1554 filter = rcu_dereference_check(sk->sk_filter, 1555 - atomic_read(&sk->sk_wmem_alloc) == 0); 1555 + refcount_read(&sk->sk_wmem_alloc) == 0); 1556 1556 if (filter) { 1557 1557 sk_filter_uncharge(sk, filter); 1558 1558 RCU_INIT_POINTER(sk->sk_filter, NULL); ··· 1602 1602 * some packets are still in some tx queue. 1603 1603 * If not null, sock_wfree() will call __sk_free(sk) later 1604 1604 */ 1605 - if (atomic_dec_and_test(&sk->sk_wmem_alloc)) 1605 + if (refcount_dec_and_test(&sk->sk_wmem_alloc)) 1606 1606 __sk_free(sk); 1607 1607 } 1608 1608 EXPORT_SYMBOL(sk_free); ··· 1659 1659 /* 1660 1660 * sk_wmem_alloc set to one (see sk_free() and sock_wfree()) 1661 1661 */ 1662 - atomic_set(&newsk->sk_wmem_alloc, 1); 1662 + refcount_set(&newsk->sk_wmem_alloc, 1); 1663 1663 atomic_set(&newsk->sk_omem_alloc, 0); 1664 1664 sk_init_common(newsk); 1665 1665 ··· 1787 1787 * Keep a reference on sk_wmem_alloc, this will be released 1788 1788 * after sk_write_space() call 1789 1789 */ 1790 - atomic_sub(len - 1, &sk->sk_wmem_alloc); 1790 + WARN_ON(refcount_sub_and_test(len - 1, &sk->sk_wmem_alloc)); 1791 1791 sk->sk_write_space(sk); 1792 1792 len = 1; 1793 1793 } ··· 1795 1795 * if sk_wmem_alloc reaches 0, we must finish what sk_free() 1796 1796 * could not do because of in-flight packets 1797 1797 */ 1798 - if (atomic_sub_and_test(len, &sk->sk_wmem_alloc)) 1798 + if (refcount_sub_and_test(len, &sk->sk_wmem_alloc)) 1799 1799 __sk_free(sk); 1800 1800 } 1801 1801 EXPORT_SYMBOL(sock_wfree); ··· 1807 1807 { 1808 1808 struct sock *sk = skb->sk; 1809 1809 1810 - if (atomic_sub_and_test(skb->truesize, &sk->sk_wmem_alloc)) 1810 + if (refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc)) 1811 1811 __sk_free(sk); 1812 1812 } 1813 1813 ··· 1829 1829 * is enough to guarantee sk_free() wont free this sock until 1830 1830 * all in-flight packets are completed 1831 1831 */ 1832 - atomic_add(skb->truesize, &sk->sk_wmem_alloc); 1832 + refcount_add(skb->truesize, &sk->sk_wmem_alloc); 1833 1833 } 1834 1834 EXPORT_SYMBOL(skb_set_owner_w); 1835 1835 ··· 1852 1852 struct sock *sk = skb->sk; 1853 1853 1854 1854 if (atomic_inc_not_zero(&sk->sk_refcnt)) { 1855 - atomic_sub(skb->truesize, &sk->sk_wmem_alloc); 1855 + WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc)); 1856 1856 skb->destructor = sock_efree; 1857 1857 } 1858 1858 } else { ··· 1912 1912 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, 1913 1913 gfp_t priority) 1914 1914 { 1915 - if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { 1915 + if (force || refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { 1916 1916 struct sk_buff *skb = alloc_skb(size, priority); 1917 1917 if (skb) { 1918 1918 skb_set_owner_w(skb, sk); ··· 1987 1987 break; 1988 1988 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1989 1989 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1990 - if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) 1990 + if (refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) 1991 1991 break; 1992 1992 if (sk->sk_shutdown & SEND_SHUTDOWN) 1993 1993 break; ··· 2310 2310 if (sk->sk_type == SOCK_STREAM) { 2311 2311 if (sk->sk_wmem_queued < prot->sysctl_wmem[0]) 2312 2312 return 1; 2313 - } else if (atomic_read(&sk->sk_wmem_alloc) < 2313 + } else if (refcount_read(&sk->sk_wmem_alloc) < 2314 2314 prot->sysctl_wmem[0]) 2315 2315 return 1; 2316 2316 } ··· 2577 2577 /* Do not wake up a writer until he can make "significant" 2578 2578 * progress. --DaveM 2579 2579 */ 2580 - if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { 2580 + if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { 2581 2581 wq = rcu_dereference(sk->sk_wq); 2582 2582 if (skwq_has_sleeper(wq)) 2583 2583 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
+1 -1
net/ipv4/af_inet.c
··· 150 150 } 151 151 152 152 WARN_ON(atomic_read(&sk->sk_rmem_alloc)); 153 - WARN_ON(atomic_read(&sk->sk_wmem_alloc)); 153 + WARN_ON(refcount_read(&sk->sk_wmem_alloc)); 154 154 WARN_ON(sk->sk_wmem_queued); 155 155 WARN_ON(sk->sk_forward_alloc); 156 156
+1 -1
net/ipv4/esp4.c
··· 307 307 skb->data_len += tailen; 308 308 skb->truesize += tailen; 309 309 if (sk) 310 - atomic_add(tailen, &sk->sk_wmem_alloc); 310 + refcount_add(tailen, &sk->sk_wmem_alloc); 311 311 312 312 goto out; 313 313 }
+3 -3
net/ipv4/ip_output.c
··· 1037 1037 (flags & MSG_DONTWAIT), &err); 1038 1038 } else { 1039 1039 skb = NULL; 1040 - if (atomic_read(&sk->sk_wmem_alloc) <= 1040 + if (refcount_read(&sk->sk_wmem_alloc) <= 1041 1041 2 * sk->sk_sndbuf) 1042 1042 skb = sock_wmalloc(sk, 1043 1043 alloclen + hh_len + 15, 1, ··· 1145 1145 skb->len += copy; 1146 1146 skb->data_len += copy; 1147 1147 skb->truesize += copy; 1148 - atomic_add(copy, &sk->sk_wmem_alloc); 1148 + refcount_add(copy, &sk->sk_wmem_alloc); 1149 1149 } 1150 1150 offset += copy; 1151 1151 length -= copy; ··· 1369 1369 skb->len += len; 1370 1370 skb->data_len += len; 1371 1371 skb->truesize += len; 1372 - atomic_add(len, &sk->sk_wmem_alloc); 1372 + refcount_add(len, &sk->sk_wmem_alloc); 1373 1373 offset += len; 1374 1374 size -= len; 1375 1375 }
+2 -2
net/ipv4/tcp.c
··· 664 664 return skb->len < size_goal && 665 665 sysctl_tcp_autocorking && 666 666 skb != tcp_write_queue_head(sk) && 667 - atomic_read(&sk->sk_wmem_alloc) > skb->truesize; 667 + refcount_read(&sk->sk_wmem_alloc) > skb->truesize; 668 668 } 669 669 670 670 static void tcp_push(struct sock *sk, int flags, int mss_now, ··· 692 692 /* It is possible TX completion already happened 693 693 * before we set TSQ_THROTTLED. 694 694 */ 695 - if (atomic_read(&sk->sk_wmem_alloc) > skb->truesize) 695 + if (refcount_read(&sk->sk_wmem_alloc) > skb->truesize) 696 696 return; 697 697 } 698 698
+1 -1
net/ipv4/tcp_offload.c
··· 152 152 swap(gso_skb->sk, skb->sk); 153 153 swap(gso_skb->destructor, skb->destructor); 154 154 sum_truesize += skb->truesize; 155 - atomic_add(sum_truesize - gso_skb->truesize, 155 + refcount_add(sum_truesize - gso_skb->truesize, 156 156 &skb->sk->sk_wmem_alloc); 157 157 } 158 158
+7 -8
net/ipv4/tcp_output.c
··· 861 861 struct sock *sk = skb->sk; 862 862 struct tcp_sock *tp = tcp_sk(sk); 863 863 unsigned long flags, nval, oval; 864 - int wmem; 865 864 866 865 /* Keep one reference on sk_wmem_alloc. 867 866 * Will be released by sk_free() from here or tcp_tasklet_func() 868 867 */ 869 - wmem = atomic_sub_return(skb->truesize - 1, &sk->sk_wmem_alloc); 868 + WARN_ON(refcount_sub_and_test(skb->truesize - 1, &sk->sk_wmem_alloc)); 870 869 871 870 /* If this softirq is serviced by ksoftirqd, we are likely under stress. 872 871 * Wait until our queues (qdisc + devices) are drained. ··· 874 875 * - chance for incoming ACK (processed by another cpu maybe) 875 876 * to migrate this flow (skb->ooo_okay will be eventually set) 876 877 */ 877 - if (wmem >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current) 878 + if (refcount_read(&sk->sk_wmem_alloc) >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current) 878 879 goto out; 879 880 880 881 for (oval = READ_ONCE(sk->sk_tsq_flags);; oval = nval) { ··· 924 925 if (nval != oval) 925 926 continue; 926 927 927 - if (!atomic_inc_not_zero(&sk->sk_wmem_alloc)) 928 + if (!refcount_inc_not_zero(&sk->sk_wmem_alloc)) 928 929 break; 929 930 /* queue this socket to tasklet queue */ 930 931 tsq = this_cpu_ptr(&tsq_tasklet); ··· 1044 1045 skb->sk = sk; 1045 1046 skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree; 1046 1047 skb_set_hash_from_sk(skb, sk); 1047 - atomic_add(skb->truesize, &sk->sk_wmem_alloc); 1048 + refcount_add(skb->truesize, &sk->sk_wmem_alloc); 1048 1049 1049 1050 skb_set_dst_pending_confirm(skb, sk->sk_dst_pending_confirm); 1050 1051 ··· 2175 2176 limit = min_t(u32, limit, sysctl_tcp_limit_output_bytes); 2176 2177 limit <<= factor; 2177 2178 2178 - if (atomic_read(&sk->sk_wmem_alloc) > limit) { 2179 + if (refcount_read(&sk->sk_wmem_alloc) > limit) { 2179 2180 /* Always send the 1st or 2nd skb in write queue. 2180 2181 * No need to wait for TX completion to call us back, 2181 2182 * after softirq/tasklet schedule. ··· 2191 2192 * test again the condition. 2192 2193 */ 2193 2194 smp_mb__after_atomic(); 2194 - if (atomic_read(&sk->sk_wmem_alloc) > limit) 2195 + if (refcount_read(&sk->sk_wmem_alloc) > limit) 2195 2196 return true; 2196 2197 } 2197 2198 return false; ··· 2811 2812 /* Do not sent more than we queued. 1/4 is reserved for possible 2812 2813 * copying overhead: fragmentation, tunneling, mangling etc. 2813 2814 */ 2814 - if (atomic_read(&sk->sk_wmem_alloc) > 2815 + if (refcount_read(&sk->sk_wmem_alloc) > 2815 2816 min_t(u32, sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), 2816 2817 sk->sk_sndbuf)) 2817 2818 return -EAGAIN;
+1 -1
net/ipv6/esp6.c
··· 275 275 skb->data_len += tailen; 276 276 skb->truesize += tailen; 277 277 if (sk) 278 - atomic_add(tailen, &sk->sk_wmem_alloc); 278 + refcount_add(tailen, &sk->sk_wmem_alloc); 279 279 280 280 goto out; 281 281 }
+2 -2
net/ipv6/ip6_output.c
··· 1472 1472 (flags & MSG_DONTWAIT), &err); 1473 1473 } else { 1474 1474 skb = NULL; 1475 - if (atomic_read(&sk->sk_wmem_alloc) <= 1475 + if (refcount_read(&sk->sk_wmem_alloc) <= 1476 1476 2 * sk->sk_sndbuf) 1477 1477 skb = sock_wmalloc(sk, 1478 1478 alloclen + hh_len, 1, ··· 1581 1581 skb->len += copy; 1582 1582 skb->data_len += copy; 1583 1583 skb->truesize += copy; 1584 - atomic_add(copy, &sk->sk_wmem_alloc); 1584 + refcount_add(copy, &sk->sk_wmem_alloc); 1585 1585 } 1586 1586 offset += copy; 1587 1587 length -= copy;
+1 -1
net/kcm/kcmproc.c
··· 162 162 psock->sk->sk_receive_queue.qlen, 163 163 atomic_read(&psock->sk->sk_rmem_alloc), 164 164 psock->sk->sk_write_queue.qlen, 165 - atomic_read(&psock->sk->sk_wmem_alloc)); 165 + refcount_read(&psock->sk->sk_wmem_alloc)); 166 166 167 167 if (psock->done) 168 168 seq_puts(seq, "Done ");
+1 -1
net/key/af_key.c
··· 109 109 } 110 110 111 111 WARN_ON(atomic_read(&sk->sk_rmem_alloc)); 112 - WARN_ON(atomic_read(&sk->sk_wmem_alloc)); 112 + WARN_ON(refcount_read(&sk->sk_wmem_alloc)); 113 113 114 114 atomic_dec(&net_pfkey->socks_nr); 115 115 }
+1 -1
net/netlink/af_netlink.c
··· 372 372 } 373 373 374 374 WARN_ON(atomic_read(&sk->sk_rmem_alloc)); 375 - WARN_ON(atomic_read(&sk->sk_wmem_alloc)); 375 + WARN_ON(refcount_read(&sk->sk_wmem_alloc)); 376 376 WARN_ON(nlk_sk(sk)->groups); 377 377 } 378 378
+2 -2
net/packet/af_packet.c
··· 1317 1317 skb_queue_purge(&sk->sk_error_queue); 1318 1318 1319 1319 WARN_ON(atomic_read(&sk->sk_rmem_alloc)); 1320 - WARN_ON(atomic_read(&sk->sk_wmem_alloc)); 1320 + WARN_ON(refcount_read(&sk->sk_wmem_alloc)); 1321 1321 1322 1322 if (!sock_flag(sk, SOCK_DEAD)) { 1323 1323 pr_err("Attempt to release alive packet socket: %p\n", sk); ··· 2523 2523 skb->data_len = to_write; 2524 2524 skb->len += to_write; 2525 2525 skb->truesize += to_write; 2526 - atomic_add(to_write, &po->sk.sk_wmem_alloc); 2526 + refcount_add(to_write, &po->sk.sk_wmem_alloc); 2527 2527 2528 2528 while (likely(to_write)) { 2529 2529 nr_frags = skb_shinfo(skb)->nr_frags;
+1 -1
net/phonet/socket.c
··· 360 360 return POLLHUP; 361 361 362 362 if (sk->sk_state == TCP_ESTABLISHED && 363 - atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf && 363 + refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf && 364 364 atomic_read(&pn->tx_credits)) 365 365 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 366 366
+1 -1
net/rds/tcp_send.c
··· 202 202 tc->t_last_seen_una = rds_tcp_snd_una(tc); 203 203 rds_send_path_drop_acked(cp, rds_tcp_snd_una(tc), rds_tcp_is_acked); 204 204 205 - if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) 205 + if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) 206 206 queue_delayed_work(rds_wq, &cp->cp_send_w, 0); 207 207 208 208 out:
+2 -2
net/rxrpc/af_rxrpc.c
··· 53 53 */ 54 54 static inline int rxrpc_writable(struct sock *sk) 55 55 { 56 - return atomic_read(&sk->sk_wmem_alloc) < (size_t) sk->sk_sndbuf; 56 + return refcount_read(&sk->sk_wmem_alloc) < (size_t) sk->sk_sndbuf; 57 57 } 58 58 59 59 /* ··· 730 730 731 731 rxrpc_purge_queue(&sk->sk_receive_queue); 732 732 733 - WARN_ON(atomic_read(&sk->sk_wmem_alloc)); 733 + WARN_ON(refcount_read(&sk->sk_wmem_alloc)); 734 734 WARN_ON(!sk_unhashed(sk)); 735 735 WARN_ON(sk->sk_socket); 736 736
+1 -1
net/sched/sch_atm.c
··· 498 498 ATM_SKB(skb)->vcc = flow->vcc; 499 499 memcpy(skb_push(skb, flow->hdr_len), flow->hdr, 500 500 flow->hdr_len); 501 - atomic_add(skb->truesize, 501 + refcount_add(skb->truesize, 502 502 &sk_atm(flow->vcc)->sk_wmem_alloc); 503 503 /* atm.atm_options are already set by atm_tc_enqueue */ 504 504 flow->vcc->send(flow->vcc, skb);
+1 -1
net/sctp/output.c
··· 402 402 * therefore only reserve a single byte to keep socket around until 403 403 * the packet has been transmitted. 404 404 */ 405 - atomic_inc(&sk->sk_wmem_alloc); 405 + refcount_inc(&sk->sk_wmem_alloc); 406 406 } 407 407 408 408 static int sctp_packet_pack(struct sctp_packet *packet,
+1 -1
net/sctp/proc.c
··· 363 363 assoc->stream.outcnt, assoc->max_retrans, 364 364 assoc->init_retries, assoc->shutdown_retries, 365 365 assoc->rtx_data_chunks, 366 - atomic_read(&sk->sk_wmem_alloc), 366 + refcount_read(&sk->sk_wmem_alloc), 367 367 sk->sk_wmem_queued, 368 368 sk->sk_sndbuf, 369 369 sk->sk_rcvbuf);
+2 -2
net/sctp/socket.c
··· 164 164 sizeof(struct sk_buff) + 165 165 sizeof(struct sctp_chunk); 166 166 167 - atomic_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); 167 + refcount_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); 168 168 sk->sk_wmem_queued += chunk->skb->truesize; 169 169 sk_mem_charge(sk, chunk->skb->truesize); 170 170 } ··· 7684 7684 sizeof(struct sk_buff) + 7685 7685 sizeof(struct sctp_chunk); 7686 7686 7687 - atomic_sub(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); 7687 + WARN_ON(refcount_sub_and_test(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc)); 7688 7688 7689 7689 /* 7690 7690 * This undoes what is done via sctp_set_owner_w and sk_mem_charge
+3 -3
net/unix/af_unix.c
··· 442 442 static int unix_writable(const struct sock *sk) 443 443 { 444 444 return sk->sk_state != TCP_LISTEN && 445 - (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf; 445 + (refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf; 446 446 } 447 447 448 448 static void unix_write_space(struct sock *sk) ··· 487 487 488 488 skb_queue_purge(&sk->sk_receive_queue); 489 489 490 - WARN_ON(atomic_read(&sk->sk_wmem_alloc)); 490 + WARN_ON(refcount_read(&sk->sk_wmem_alloc)); 491 491 WARN_ON(!sk_unhashed(sk)); 492 492 WARN_ON(sk->sk_socket); 493 493 if (!sock_flag(sk, SOCK_DEAD)) { ··· 2033 2033 skb->len += size; 2034 2034 skb->data_len += size; 2035 2035 skb->truesize += size; 2036 - atomic_add(size, &sk->sk_wmem_alloc); 2036 + refcount_add(size, &sk->sk_wmem_alloc); 2037 2037 2038 2038 if (newskb) { 2039 2039 err = unix_scm_to_skb(&scm, skb, false);