Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'net-data-races'

Eric Dumazet says:

====================
net: annotate data-races

This series was inspired by a syzbot/KCSAN report.

This will later also permit some optimizations,
like not having to lock the socket while reading/writing
some of its fields.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+94 -80
+4 -3
include/net/inet_sock.h
··· 107 107 108 108 static inline u32 inet_request_mark(const struct sock *sk, struct sk_buff *skb) 109 109 { 110 - if (!sk->sk_mark && 111 - READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept)) 110 + u32 mark = READ_ONCE(sk->sk_mark); 111 + 112 + if (!mark && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept)) 112 113 return skb->mark; 113 114 114 - return sk->sk_mark; 115 + return mark; 115 116 } 116 117 117 118 static inline int inet_request_bound_dev_if(const struct sock *sk,
+1 -1
include/net/ip.h
··· 93 93 { 94 94 ipcm_init(ipcm); 95 95 96 - ipcm->sockc.mark = inet->sk.sk_mark; 96 + ipcm->sockc.mark = READ_ONCE(inet->sk.sk_mark); 97 97 ipcm->sockc.tsflags = inet->sk.sk_tsflags; 98 98 ipcm->oif = READ_ONCE(inet->sk.sk_bound_dev_if); 99 99 ipcm->addr = inet->inet_saddr;
+2 -2
include/net/route.h
··· 168 168 __be16 dport, __be16 sport, 169 169 __u8 proto, __u8 tos, int oif) 170 170 { 171 - flowi4_init_output(fl4, oif, sk ? sk->sk_mark : 0, tos, 171 + flowi4_init_output(fl4, oif, sk ? READ_ONCE(sk->sk_mark) : 0, tos, 172 172 RT_SCOPE_UNIVERSE, proto, 173 173 sk ? inet_sk_flowi_flags(sk) : 0, 174 174 daddr, saddr, dport, sport, sock_net_uid(net, sk)); ··· 301 301 if (inet_sk(sk)->transparent) 302 302 flow_flags |= FLOWI_FLAG_ANYSRC; 303 303 304 - flowi4_init_output(fl4, oif, sk->sk_mark, ip_sock_rt_tos(sk), 304 + flowi4_init_output(fl4, oif, READ_ONCE(sk->sk_mark), ip_sock_rt_tos(sk), 305 305 ip_sock_rt_scope(sk), protocol, flow_flags, dst, 306 306 src, dport, sport, sk->sk_uid); 307 307 }
+1 -1
net/can/raw.c
··· 865 865 866 866 skb->dev = dev; 867 867 skb->priority = sk->sk_priority; 868 - skb->mark = sk->sk_mark; 868 + skb->mark = READ_ONCE(sk->sk_mark); 869 869 skb->tstamp = sockc.transmit_time; 870 870 871 871 skb_setup_tx_timestamp(skb, sockc.tsflags);
+40 -29
net/core/sock.c
··· 429 429 { 430 430 struct __kernel_sock_timeval tv; 431 431 int err = sock_copy_user_timeval(&tv, optval, optlen, old_timeval); 432 + long val; 432 433 433 434 if (err) 434 435 return err; ··· 440 439 if (tv.tv_sec < 0) { 441 440 static int warned __read_mostly; 442 441 443 - *timeo_p = 0; 442 + WRITE_ONCE(*timeo_p, 0); 444 443 if (warned < 10 && net_ratelimit()) { 445 444 warned++; 446 445 pr_info("%s: `%s' (pid %d) tries to set negative timeout\n", ··· 448 447 } 449 448 return 0; 450 449 } 451 - *timeo_p = MAX_SCHEDULE_TIMEOUT; 452 - if (tv.tv_sec == 0 && tv.tv_usec == 0) 453 - return 0; 454 - if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1)) 455 - *timeo_p = tv.tv_sec * HZ + DIV_ROUND_UP((unsigned long)tv.tv_usec, USEC_PER_SEC / HZ); 450 + val = MAX_SCHEDULE_TIMEOUT; 451 + if ((tv.tv_sec || tv.tv_usec) && 452 + (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1))) 453 + val = tv.tv_sec * HZ + DIV_ROUND_UP((unsigned long)tv.tv_usec, 454 + USEC_PER_SEC / HZ); 455 + WRITE_ONCE(*timeo_p, val); 456 456 return 0; 457 457 } 458 458 ··· 806 804 void sock_set_priority(struct sock *sk, u32 priority) 807 805 { 808 806 lock_sock(sk); 809 - sk->sk_priority = priority; 807 + WRITE_ONCE(sk->sk_priority, priority); 810 808 release_sock(sk); 811 809 } 812 810 EXPORT_SYMBOL(sock_set_priority); ··· 815 813 { 816 814 lock_sock(sk); 817 815 if (secs && secs < MAX_SCHEDULE_TIMEOUT / HZ - 1) 818 - sk->sk_sndtimeo = secs * HZ; 816 + WRITE_ONCE(sk->sk_sndtimeo, secs * HZ); 819 817 else 820 - sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; 818 + WRITE_ONCE(sk->sk_sndtimeo, MAX_SCHEDULE_TIMEOUT); 821 819 release_sock(sk); 822 820 } 823 821 EXPORT_SYMBOL(sock_set_sndtimeo); ··· 990 988 static void __sock_set_mark(struct sock *sk, u32 val) 991 989 { 992 990 if (val != sk->sk_mark) { 993 - sk->sk_mark = val; 991 + WRITE_ONCE(sk->sk_mark, val); 994 992 sk_dst_reset(sk); 995 993 } 996 994 } ··· 1009 1007 bytes = round_down(bytes, PAGE_SIZE); 1010 1008 1011 1009 WARN_ON(bytes > sk->sk_reserved_mem); 1012 - sk->sk_reserved_mem -= bytes; 1010 + WRITE_ONCE(sk->sk_reserved_mem, sk->sk_reserved_mem - bytes); 1013 1011 sk_mem_reclaim(sk); 1014 1012 } 1015 1013 ··· 1046 1044 } 1047 1045 sk->sk_forward_alloc += pages << PAGE_SHIFT; 1048 1046 1049 - sk->sk_reserved_mem += pages << PAGE_SHIFT; 1047 + WRITE_ONCE(sk->sk_reserved_mem, 1048 + sk->sk_reserved_mem + (pages << PAGE_SHIFT)); 1050 1049 1051 1050 return 0; 1052 1051 } ··· 1216 1213 if ((val >= 0 && val <= 6) || 1217 1214 sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) || 1218 1215 sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) 1219 - sk->sk_priority = val; 1216 + WRITE_ONCE(sk->sk_priority, val); 1220 1217 else 1221 1218 ret = -EPERM; 1222 1219 break; ··· 1441 1438 cmpxchg(&sk->sk_pacing_status, 1442 1439 SK_PACING_NONE, 1443 1440 SK_PACING_NEEDED); 1444 - sk->sk_max_pacing_rate = ulval; 1441 + /* Pairs with READ_ONCE() from sk_getsockopt() */ 1442 + WRITE_ONCE(sk->sk_max_pacing_rate, ulval); 1445 1443 sk->sk_pacing_rate = min(sk->sk_pacing_rate, ulval); 1446 1444 break; 1447 1445 } ··· 1537 1533 } 1538 1534 if ((u8)val == SOCK_TXREHASH_DEFAULT) 1539 1535 val = READ_ONCE(sock_net(sk)->core.sysctl_txrehash); 1540 - /* Paired with READ_ONCE() in tcp_rtx_synack() */ 1536 + /* Paired with READ_ONCE() in tcp_rtx_synack() 1537 + * and sk_getsockopt(). 1538 + */ 1541 1539 WRITE_ONCE(sk->sk_txrehash, (u8)val); 1542 1540 break; 1543 1541 ··· 1639 1633 break; 1640 1634 1641 1635 case SO_SNDBUF: 1642 - v.val = sk->sk_sndbuf; 1636 + v.val = READ_ONCE(sk->sk_sndbuf); 1643 1637 break; 1644 1638 1645 1639 case SO_RCVBUF: 1646 - v.val = sk->sk_rcvbuf; 1640 + v.val = READ_ONCE(sk->sk_rcvbuf); 1647 1641 break; 1648 1642 1649 1643 case SO_REUSEADDR: ··· 1685 1679 break; 1686 1680 1687 1681 case SO_PRIORITY: 1688 - v.val = sk->sk_priority; 1682 + v.val = READ_ONCE(sk->sk_priority); 1689 1683 break; 1690 1684 1691 1685 case SO_LINGER: ··· 1723 1717 1724 1718 case SO_RCVTIMEO_OLD: 1725 1719 case SO_RCVTIMEO_NEW: 1726 - lv = sock_get_timeout(sk->sk_rcvtimeo, &v, SO_RCVTIMEO_OLD == optname); 1720 + lv = sock_get_timeout(READ_ONCE(sk->sk_rcvtimeo), &v, 1721 + SO_RCVTIMEO_OLD == optname); 1727 1722 break; 1728 1723 1729 1724 case SO_SNDTIMEO_OLD: 1730 1725 case SO_SNDTIMEO_NEW: 1731 - lv = sock_get_timeout(sk->sk_sndtimeo, &v, SO_SNDTIMEO_OLD == optname); 1726 + lv = sock_get_timeout(READ_ONCE(sk->sk_sndtimeo), &v, 1727 + SO_SNDTIMEO_OLD == optname); 1732 1728 break; 1733 1729 1734 1730 case SO_RCVLOWAT: 1735 - v.val = sk->sk_rcvlowat; 1731 + v.val = READ_ONCE(sk->sk_rcvlowat); 1736 1732 break; 1737 1733 1738 1734 case SO_SNDLOWAT: ··· 1851 1843 optval, optlen, len); 1852 1844 1853 1845 case SO_MARK: 1854 - v.val = sk->sk_mark; 1846 + v.val = READ_ONCE(sk->sk_mark); 1855 1847 break; 1856 1848 1857 1849 case SO_RCVMARK: ··· 1870 1862 if (!sock->ops->set_peek_off) 1871 1863 return -EOPNOTSUPP; 1872 1864 1873 - v.val = sk->sk_peek_off; 1865 + v.val = READ_ONCE(sk->sk_peek_off); 1874 1866 break; 1875 1867 case SO_NOFCS: 1876 1868 v.val = sock_flag(sk, SOCK_NOFCS); ··· 1900 1892 1901 1893 #ifdef CONFIG_NET_RX_BUSY_POLL 1902 1894 case SO_BUSY_POLL: 1903 - v.val = sk->sk_ll_usec; 1895 + v.val = READ_ONCE(sk->sk_ll_usec); 1904 1896 break; 1905 1897 case SO_PREFER_BUSY_POLL: 1906 1898 v.val = READ_ONCE(sk->sk_prefer_busy_poll); ··· 1908 1900 #endif 1909 1901 1910 1902 case SO_MAX_PACING_RATE: 1903 + /* The READ_ONCE() pair with the WRITE_ONCE() in sk_setsockopt() */ 1911 1904 if (sizeof(v.ulval) != sizeof(v.val) && len >= sizeof(v.ulval)) { 1912 1905 lv = sizeof(v.ulval); 1913 - v.ulval = sk->sk_max_pacing_rate; 1906 + v.ulval = READ_ONCE(sk->sk_max_pacing_rate); 1914 1907 } else { 1915 1908 /* 32bit version */ 1916 - v.val = min_t(unsigned long, sk->sk_max_pacing_rate, ~0U); 1909 + v.val = min_t(unsigned long, ~0U, 1910 + READ_ONCE(sk->sk_max_pacing_rate)); 1917 1911 } 1918 1912 break; 1919 1913 ··· 1983 1973 break; 1984 1974 1985 1975 case SO_RESERVE_MEM: 1986 - v.val = sk->sk_reserved_mem; 1976 + v.val = READ_ONCE(sk->sk_reserved_mem); 1987 1977 break; 1988 1978 1989 1979 case SO_TXREHASH: 1990 - v.val = sk->sk_txrehash; 1980 + /* Paired with WRITE_ONCE() in sk_setsockopt() */ 1981 + v.val = READ_ONCE(sk->sk_txrehash); 1991 1982 break; 1992 1983 1993 1984 default: ··· 3179 3168 3180 3169 int sk_set_peek_off(struct sock *sk, int val) 3181 3170 { 3182 - sk->sk_peek_off = val; 3171 + WRITE_ONCE(sk->sk_peek_off, val); 3183 3172 return 0; 3184 3173 } 3185 3174 EXPORT_SYMBOL_GPL(sk_set_peek_off);
+2 -2
net/dccp/ipv6.c
··· 238 238 opt = ireq->ipv6_opt; 239 239 if (!opt) 240 240 opt = rcu_dereference(np->opt); 241 - err = ip6_xmit(sk, skb, &fl6, sk->sk_mark, opt, np->tclass, 242 - sk->sk_priority); 241 + err = ip6_xmit(sk, skb, &fl6, READ_ONCE(sk->sk_mark), opt, 242 + np->tclass, sk->sk_priority); 243 243 rcu_read_unlock(); 244 244 err = net_xmit_eval(err); 245 245 }
+2 -2
net/ipv4/inet_diag.c
··· 150 150 } 151 151 #endif 152 152 153 - if (net_admin && nla_put_u32(skb, INET_DIAG_MARK, sk->sk_mark)) 153 + if (net_admin && nla_put_u32(skb, INET_DIAG_MARK, READ_ONCE(sk->sk_mark))) 154 154 goto errout; 155 155 156 156 if (ext & (1 << (INET_DIAG_CLASS_ID - 1)) || ··· 799 799 entry.ifindex = sk->sk_bound_dev_if; 800 800 entry.userlocks = sk_fullsock(sk) ? sk->sk_userlocks : 0; 801 801 if (sk_fullsock(sk)) 802 - entry.mark = sk->sk_mark; 802 + entry.mark = READ_ONCE(sk->sk_mark); 803 803 else if (sk->sk_state == TCP_NEW_SYN_RECV) 804 804 entry.mark = inet_rsk(inet_reqsk(sk))->ir_mark; 805 805 else if (sk->sk_state == TCP_TIME_WAIT)
+4 -4
net/ipv4/ip_output.c
··· 184 184 ip_options_build(skb, &opt->opt, daddr, rt); 185 185 } 186 186 187 - skb->priority = sk->sk_priority; 187 + skb->priority = READ_ONCE(sk->sk_priority); 188 188 if (!skb->mark) 189 - skb->mark = sk->sk_mark; 189 + skb->mark = READ_ONCE(sk->sk_mark); 190 190 191 191 /* Send it out. */ 192 192 return ip_local_out(net, skb->sk, skb); ··· 528 528 skb_shinfo(skb)->gso_segs ?: 1); 529 529 530 530 /* TODO : should we use skb->sk here instead of sk ? */ 531 - skb->priority = sk->sk_priority; 532 - skb->mark = sk->sk_mark; 531 + skb->priority = READ_ONCE(sk->sk_priority); 532 + skb->mark = READ_ONCE(sk->sk_mark); 533 533 534 534 res = ip_local_out(net, sk, skb); 535 535 rcu_read_unlock();
+1 -1
net/ipv4/ip_sockglue.c
··· 592 592 } 593 593 if (inet_sk(sk)->tos != val) { 594 594 inet_sk(sk)->tos = val; 595 - sk->sk_priority = rt_tos2priority(val); 595 + WRITE_ONCE(sk->sk_priority, rt_tos2priority(val)); 596 596 sk_dst_reset(sk); 597 597 } 598 598 }
+1 -1
net/ipv4/raw.c
··· 348 348 goto error; 349 349 skb_reserve(skb, hlen); 350 350 351 - skb->priority = sk->sk_priority; 351 + skb->priority = READ_ONCE(sk->sk_priority); 352 352 skb->mark = sockc->mark; 353 353 skb->tstamp = sockc->transmit_time; 354 354 skb_dst_set(skb, &rt->dst);
+2 -2
net/ipv4/route.c
··· 518 518 const struct inet_sock *inet = inet_sk(sk); 519 519 520 520 oif = sk->sk_bound_dev_if; 521 - mark = sk->sk_mark; 521 + mark = READ_ONCE(sk->sk_mark); 522 522 tos = ip_sock_rt_tos(sk); 523 523 scope = ip_sock_rt_scope(sk); 524 524 prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol; ··· 552 552 inet_opt = rcu_dereference(inet->inet_opt); 553 553 if (inet_opt && inet_opt->opt.srr) 554 554 daddr = inet_opt->opt.faddr; 555 - flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark, 555 + flowi4_init_output(fl4, sk->sk_bound_dev_if, READ_ONCE(sk->sk_mark), 556 556 ip_sock_rt_tos(sk) & IPTOS_RT_MASK, 557 557 ip_sock_rt_scope(sk), 558 558 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
+2 -2
net/ipv4/tcp_ipv4.c
··· 931 931 ctl_sk = this_cpu_read(ipv4_tcp_sk); 932 932 sock_net_set(ctl_sk, net); 933 933 ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ? 934 - inet_twsk(sk)->tw_mark : sk->sk_mark; 934 + inet_twsk(sk)->tw_mark : READ_ONCE(sk->sk_mark); 935 935 ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ? 936 - inet_twsk(sk)->tw_priority : sk->sk_priority; 936 + inet_twsk(sk)->tw_priority : READ_ONCE(sk->sk_priority); 937 937 transmit_time = tcp_transmit_time(sk); 938 938 ip_send_unicast_reply(ctl_sk, 939 939 skb, &TCP_SKB_CB(skb)->header.h4.opt,
+1 -1
net/ipv6/ping.c
··· 120 120 121 121 ipcm6_init_sk(&ipc6, np); 122 122 ipc6.sockc.tsflags = sk->sk_tsflags; 123 - ipc6.sockc.mark = sk->sk_mark; 123 + ipc6.sockc.mark = READ_ONCE(sk->sk_mark); 124 124 125 125 fl6.flowi6_oif = oif; 126 126
+3 -3
net/ipv6/raw.c
··· 614 614 skb_reserve(skb, hlen); 615 615 616 616 skb->protocol = htons(ETH_P_IPV6); 617 - skb->priority = sk->sk_priority; 617 + skb->priority = READ_ONCE(sk->sk_priority); 618 618 skb->mark = sockc->mark; 619 619 skb->tstamp = sockc->transmit_time; 620 620 ··· 774 774 */ 775 775 memset(&fl6, 0, sizeof(fl6)); 776 776 777 - fl6.flowi6_mark = sk->sk_mark; 777 + fl6.flowi6_mark = READ_ONCE(sk->sk_mark); 778 778 fl6.flowi6_uid = sk->sk_uid; 779 779 780 780 ipcm6_init(&ipc6); 781 781 ipc6.sockc.tsflags = sk->sk_tsflags; 782 - ipc6.sockc.mark = sk->sk_mark; 782 + ipc6.sockc.mark = fl6.flowi6_mark; 783 783 784 784 if (sin6) { 785 785 if (addr_len < SIN6_LEN_RFC2133)
+4 -3
net/ipv6/route.c
··· 2951 2951 if (!oif && skb->dev) 2952 2952 oif = l3mdev_master_ifindex(skb->dev); 2953 2953 2954 - ip6_update_pmtu(skb, sock_net(sk), mtu, oif, sk->sk_mark, sk->sk_uid); 2954 + ip6_update_pmtu(skb, sock_net(sk), mtu, oif, READ_ONCE(sk->sk_mark), 2955 + sk->sk_uid); 2955 2956 2956 2957 dst = __sk_dst_get(sk); 2957 2958 if (!dst || !dst->obsolete || ··· 3173 3172 3174 3173 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk) 3175 3174 { 3176 - ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark, 3177 - sk->sk_uid); 3175 + ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, 3176 + READ_ONCE(sk->sk_mark), sk->sk_uid); 3178 3177 } 3179 3178 EXPORT_SYMBOL_GPL(ip6_sk_redirect); 3180 3179
+5 -4
net/ipv6/tcp_ipv6.c
··· 564 564 opt = ireq->ipv6_opt; 565 565 if (!opt) 566 566 opt = rcu_dereference(np->opt); 567 - err = ip6_xmit(sk, skb, fl6, skb->mark ? : sk->sk_mark, opt, 568 - tclass, sk->sk_priority); 567 + err = ip6_xmit(sk, skb, fl6, skb->mark ? : READ_ONCE(sk->sk_mark), 568 + opt, tclass, sk->sk_priority); 569 569 rcu_read_unlock(); 570 570 err = net_xmit_eval(err); 571 571 } ··· 939 939 if (sk->sk_state == TCP_TIME_WAIT) 940 940 mark = inet_twsk(sk)->tw_mark; 941 941 else 942 - mark = sk->sk_mark; 942 + mark = READ_ONCE(sk->sk_mark); 943 943 skb_set_delivery_time(buff, tcp_transmit_time(sk), true); 944 944 } 945 945 if (txhash) { ··· 1128 1128 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off, 1129 1129 READ_ONCE(req->ts_recent), sk->sk_bound_dev_if, 1130 1130 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr, l3index), 1131 - ipv6_get_dsfield(ipv6_hdr(skb)), 0, sk->sk_priority, 1131 + ipv6_get_dsfield(ipv6_hdr(skb)), 0, 1132 + READ_ONCE(sk->sk_priority), 1132 1133 READ_ONCE(tcp_rsk(req)->txhash)); 1133 1134 } 1134 1135
+2 -2
net/ipv6/udp.c
··· 628 628 if (type == NDISC_REDIRECT) { 629 629 if (tunnel) { 630 630 ip6_redirect(skb, sock_net(sk), inet6_iif(skb), 631 - sk->sk_mark, sk->sk_uid); 631 + READ_ONCE(sk->sk_mark), sk->sk_uid); 632 632 } else { 633 633 ip6_sk_redirect(skb, sk); 634 634 } ··· 1360 1360 ipcm6_init(&ipc6); 1361 1361 ipc6.gso_size = READ_ONCE(up->gso_size); 1362 1362 ipc6.sockc.tsflags = sk->sk_tsflags; 1363 - ipc6.sockc.mark = sk->sk_mark; 1363 + ipc6.sockc.mark = READ_ONCE(sk->sk_mark); 1364 1364 1365 1365 /* destination address check */ 1366 1366 if (sin6) {
+1 -1
net/l2tp/l2tp_ip6.c
··· 519 519 /* Get and verify the address */ 520 520 memset(&fl6, 0, sizeof(fl6)); 521 521 522 - fl6.flowi6_mark = sk->sk_mark; 522 + fl6.flowi6_mark = READ_ONCE(sk->sk_mark); 523 523 fl6.flowi6_uid = sk->sk_uid; 524 524 525 525 ipcm6_init(&ipc6);
+1 -1
net/mptcp/sockopt.c
··· 103 103 break; 104 104 case SO_MARK: 105 105 if (READ_ONCE(ssk->sk_mark) != sk->sk_mark) { 106 - ssk->sk_mark = sk->sk_mark; 106 + WRITE_ONCE(ssk->sk_mark, sk->sk_mark); 107 107 sk_dst_reset(ssk); 108 108 } 109 109 break;
+1 -1
net/netfilter/nft_socket.c
··· 107 107 break; 108 108 case NFT_SOCKET_MARK: 109 109 if (sk_fullsock(sk)) { 110 - *dest = sk->sk_mark; 110 + *dest = READ_ONCE(sk->sk_mark); 111 111 } else { 112 112 regs->verdict.code = NFT_BREAK; 113 113 return;
+2 -2
net/netfilter/xt_socket.c
··· 77 77 78 78 if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard && 79 79 transparent && sk_fullsock(sk)) 80 - pskb->mark = sk->sk_mark; 80 + pskb->mark = READ_ONCE(sk->sk_mark); 81 81 82 82 if (sk != skb->sk) 83 83 sock_gen_put(sk); ··· 138 138 139 139 if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard && 140 140 transparent && sk_fullsock(sk)) 141 - pskb->mark = sk->sk_mark; 141 + pskb->mark = READ_ONCE(sk->sk_mark); 142 142 143 143 if (sk != skb->sk) 144 144 sock_gen_put(sk);
+6 -6
net/packet/af_packet.c
··· 2050 2050 2051 2051 skb->protocol = proto; 2052 2052 skb->dev = dev; 2053 - skb->priority = sk->sk_priority; 2054 - skb->mark = sk->sk_mark; 2053 + skb->priority = READ_ONCE(sk->sk_priority); 2054 + skb->mark = READ_ONCE(sk->sk_mark); 2055 2055 skb->tstamp = sockc.transmit_time; 2056 2056 2057 2057 skb_setup_tx_timestamp(skb, sockc.tsflags); ··· 2585 2585 2586 2586 skb->protocol = proto; 2587 2587 skb->dev = dev; 2588 - skb->priority = po->sk.sk_priority; 2589 - skb->mark = po->sk.sk_mark; 2588 + skb->priority = READ_ONCE(po->sk.sk_priority); 2589 + skb->mark = READ_ONCE(po->sk.sk_mark); 2590 2590 skb->tstamp = sockc->transmit_time; 2591 2591 skb_setup_tx_timestamp(skb, sockc->tsflags); 2592 2592 skb_zcopy_set_nouarg(skb, ph.raw); ··· 2988 2988 goto out_unlock; 2989 2989 2990 2990 sockcm_init(&sockc, sk); 2991 - sockc.mark = sk->sk_mark; 2991 + sockc.mark = READ_ONCE(sk->sk_mark); 2992 2992 if (msg->msg_controllen) { 2993 2993 err = sock_cmsg_send(sk, msg, &sockc); 2994 2994 if (unlikely(err)) ··· 3061 3061 3062 3062 skb->protocol = proto; 3063 3063 skb->dev = dev; 3064 - skb->priority = sk->sk_priority; 3064 + skb->priority = READ_ONCE(sk->sk_priority); 3065 3065 skb->mark = sockc.mark; 3066 3066 skb->tstamp = sockc.transmit_time; 3067 3067
+2 -2
net/sched/em_meta.c
··· 568 568 *err = -1; 569 569 return; 570 570 } 571 - dst->value = sk->sk_rcvtimeo / HZ; 571 + dst->value = READ_ONCE(sk->sk_rcvtimeo) / HZ; 572 572 } 573 573 574 574 META_COLLECTOR(int_sk_sndtimeo) ··· 579 579 *err = -1; 580 580 return; 581 581 } 582 - dst->value = sk->sk_sndtimeo / HZ; 582 + dst->value = READ_ONCE(sk->sk_sndtimeo) / HZ; 583 583 } 584 584 585 585 META_COLLECTOR(int_sk_sendmsg_off)
+1 -1
net/smc/af_smc.c
··· 445 445 nsk->sk_rcvbuf = osk->sk_rcvbuf; 446 446 nsk->sk_sndtimeo = osk->sk_sndtimeo; 447 447 nsk->sk_rcvtimeo = osk->sk_rcvtimeo; 448 - nsk->sk_mark = osk->sk_mark; 448 + nsk->sk_mark = READ_ONCE(osk->sk_mark); 449 449 nsk->sk_priority = osk->sk_priority; 450 450 nsk->sk_rcvlowat = osk->sk_rcvlowat; 451 451 nsk->sk_bound_dev_if = osk->sk_bound_dev_if;
+1 -1
net/unix/af_unix.c
··· 790 790 if (mutex_lock_interruptible(&u->iolock)) 791 791 return -EINTR; 792 792 793 - sk->sk_peek_off = val; 793 + WRITE_ONCE(sk->sk_peek_off, val); 794 794 mutex_unlock(&u->iolock); 795 795 796 796 return 0;
+1 -1
net/xdp/xsk.c
··· 505 505 506 506 skb->dev = dev; 507 507 skb->priority = xs->sk.sk_priority; 508 - skb->mark = xs->sk.sk_mark; 508 + skb->mark = READ_ONCE(xs->sk.sk_mark); 509 509 skb_shinfo(skb)->destructor_arg = (void *)(long)desc->addr; 510 510 skb->destructor = xsk_destruct_skb; 511 511
+1 -1
net/xfrm/xfrm_policy.c
··· 2250 2250 2251 2251 match = xfrm_selector_match(&pol->selector, fl, family); 2252 2252 if (match) { 2253 - if ((sk->sk_mark & pol->mark.m) != pol->mark.v || 2253 + if ((READ_ONCE(sk->sk_mark) & pol->mark.m) != pol->mark.v || 2254 2254 pol->if_id != if_id) { 2255 2255 pol = NULL; 2256 2256 goto out;