Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: annotate races around sk->sk_uid

sk->sk_uid can be read while another thread changes its
value in sockfs_setattr().

Add sk_uid(const struct sock *sk) helper to factorize the needed
READ_ONCE() annotations, and add corresponding WRITE_ONCE()
where needed.

Fixes: 86741ec25462 ("net: core: Add a UID field to struct sock.")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Lorenzo Colitti <lorenzo@google.com>
Reviewed-by: Maciej Żenczykowski <maze@google.com>
Link: https://patch.msgid.link/20250620133001.4090592-2-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

authored by

Eric Dumazet and committed by
Jakub Kicinski
e84a4927 b630c781

+42 -28
+2 -2
include/net/route.h
··· 153 153 ip_sock_rt_tos(sk), ip_sock_rt_scope(sk), 154 154 sk->sk_protocol, inet_sk_flowi_flags(sk), daddr, 155 155 inet->inet_saddr, inet->inet_dport, 156 - inet->inet_sport, sk->sk_uid); 156 + inet->inet_sport, sk_uid(sk)); 157 157 security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4)); 158 158 } 159 159 ··· 331 331 332 332 flowi4_init_output(fl4, oif, READ_ONCE(sk->sk_mark), ip_sock_rt_tos(sk), 333 333 ip_sock_rt_scope(sk), protocol, flow_flags, dst, 334 - src, dport, sport, sk->sk_uid); 334 + src, dport, sport, sk_uid(sk)); 335 335 } 336 336 337 337 static inline struct rtable *ip_route_connect(struct flowi4 *fl4, __be32 dst,
+10 -2
include/net/sock.h
··· 2076 2076 sock_set_flag(sk, SOCK_DEAD); 2077 2077 sk_set_socket(sk, NULL); 2078 2078 sk->sk_wq = NULL; 2079 + /* Note: sk_uid is unchanged. */ 2079 2080 write_unlock_bh(&sk->sk_callback_lock); 2080 2081 } 2081 2082 ··· 2087 2086 rcu_assign_pointer(sk->sk_wq, &parent->wq); 2088 2087 parent->sk = sk; 2089 2088 sk_set_socket(sk, parent); 2090 - sk->sk_uid = SOCK_INODE(parent)->i_uid; 2089 + WRITE_ONCE(sk->sk_uid, SOCK_INODE(parent)->i_uid); 2091 2090 security_sock_graft(sk, parent); 2092 2091 write_unlock_bh(&sk->sk_callback_lock); 2093 2092 } 2094 2093 2095 2094 kuid_t sock_i_uid(struct sock *sk); 2095 + 2096 + static inline kuid_t sk_uid(const struct sock *sk) 2097 + { 2098 + /* Paired with WRITE_ONCE() in sockfs_setattr() */ 2099 + return READ_ONCE(sk->sk_uid); 2100 + } 2101 + 2096 2102 unsigned long __sock_i_ino(struct sock *sk); 2097 2103 unsigned long sock_i_ino(struct sock *sk); 2098 2104 2099 2105 static inline kuid_t sock_net_uid(const struct net *net, const struct sock *sk) 2100 2106 { 2101 - return sk ? sk->sk_uid : make_kuid(net->user_ns, 0); 2107 + return sk ? sk_uid(sk) : make_kuid(net->user_ns, 0); 2102 2108 } 2103 2109 2104 2110 static inline u32 net_tx_rndhash(void)
+2 -2
net/ipv4/inet_connection_sock.c
··· 812 812 sk->sk_protocol, inet_sk_flowi_flags(sk), 813 813 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr, 814 814 ireq->ir_loc_addr, ireq->ir_rmt_port, 815 - htons(ireq->ir_num), sk->sk_uid); 815 + htons(ireq->ir_num), sk_uid(sk)); 816 816 security_req_classify_flow(req, flowi4_to_flowi_common(fl4)); 817 817 rt = ip_route_output_flow(net, fl4, sk); 818 818 if (IS_ERR(rt)) ··· 849 849 sk->sk_protocol, inet_sk_flowi_flags(sk), 850 850 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr, 851 851 ireq->ir_loc_addr, ireq->ir_rmt_port, 852 - htons(ireq->ir_num), sk->sk_uid); 852 + htons(ireq->ir_num), sk_uid(sk)); 853 853 security_req_classify_flow(req, flowi4_to_flowi_common(fl4)); 854 854 rt = ip_route_output_flow(net, fl4, sk); 855 855 if (IS_ERR(rt))
+1 -1
net/ipv4/ping.c
··· 781 781 flowi4_init_output(&fl4, ipc.oif, ipc.sockc.mark, 782 782 ipc.tos & INET_DSCP_MASK, scope, 783 783 sk->sk_protocol, inet_sk_flowi_flags(sk), faddr, 784 - saddr, 0, 0, sk->sk_uid); 784 + saddr, 0, 0, sk_uid(sk)); 785 785 786 786 fl4.fl4_icmp_type = user_icmph.type; 787 787 fl4.fl4_icmp_code = user_icmph.code;
+1 -1
net/ipv4/raw.c
··· 610 610 hdrincl ? ipc.protocol : sk->sk_protocol, 611 611 inet_sk_flowi_flags(sk) | 612 612 (hdrincl ? FLOWI_FLAG_KNOWN_NH : 0), 613 - daddr, saddr, 0, 0, sk->sk_uid); 613 + daddr, saddr, 0, 0, sk_uid(sk)); 614 614 615 615 fl4.fl4_icmp_type = 0; 616 616 fl4.fl4_icmp_code = 0;
+2 -1
net/ipv4/route.c
··· 556 556 inet_test_bit(HDRINCL, sk) ? 557 557 IPPROTO_RAW : sk->sk_protocol, 558 558 inet_sk_flowi_flags(sk), 559 - daddr, inet->inet_saddr, 0, 0, sk->sk_uid); 559 + daddr, inet->inet_saddr, 0, 0, 560 + sk_uid(sk)); 560 561 rcu_read_unlock(); 561 562 } 562 563
+2 -1
net/ipv4/syncookies.c
··· 454 454 ip_sock_rt_tos(sk), ip_sock_rt_scope(sk), 455 455 IPPROTO_TCP, inet_sk_flowi_flags(sk), 456 456 opt->srr ? opt->faddr : ireq->ir_rmt_addr, 457 - ireq->ir_loc_addr, th->source, th->dest, sk->sk_uid); 457 + ireq->ir_loc_addr, th->source, th->dest, 458 + sk_uid(sk)); 458 459 security_req_classify_flow(req, flowi4_to_flowi_common(&fl4)); 459 460 rt = ip_route_output_key(net, &fl4); 460 461 if (IS_ERR(rt)) {
+2 -1
net/ipv4/udp.c
··· 1445 1445 flowi4_init_output(fl4, ipc.oif, ipc.sockc.mark, 1446 1446 ipc.tos & INET_DSCP_MASK, scope, 1447 1447 sk->sk_protocol, flow_flags, faddr, saddr, 1448 - dport, inet->inet_sport, sk->sk_uid); 1448 + dport, inet->inet_sport, 1449 + sk_uid(sk)); 1449 1450 1450 1451 security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4)); 1451 1452 rt = ip_route_output_flow(net, fl4, sk);
+1 -1
net/ipv6/af_inet6.c
··· 842 842 fl6.flowi6_mark = sk->sk_mark; 843 843 fl6.fl6_dport = inet->inet_dport; 844 844 fl6.fl6_sport = inet->inet_sport; 845 - fl6.flowi6_uid = sk->sk_uid; 845 + fl6.flowi6_uid = sk_uid(sk); 846 846 security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6)); 847 847 848 848 rcu_read_lock();
+1 -1
net/ipv6/datagram.c
··· 53 53 fl6->fl6_dport = inet->inet_dport; 54 54 fl6->fl6_sport = inet->inet_sport; 55 55 fl6->flowlabel = ip6_make_flowinfo(np->tclass, np->flow_label); 56 - fl6->flowi6_uid = sk->sk_uid; 56 + fl6->flowi6_uid = sk_uid(sk); 57 57 58 58 if (!oif) 59 59 oif = np->sticky_pktinfo.ipi6_ifindex;
+2 -2
net/ipv6/inet6_connection_sock.c
··· 45 45 fl6->flowi6_mark = ireq->ir_mark; 46 46 fl6->fl6_dport = ireq->ir_rmt_port; 47 47 fl6->fl6_sport = htons(ireq->ir_num); 48 - fl6->flowi6_uid = sk->sk_uid; 48 + fl6->flowi6_uid = sk_uid(sk); 49 49 security_req_classify_flow(req, flowi6_to_flowi_common(fl6)); 50 50 51 51 dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p); ··· 79 79 fl6->flowi6_mark = sk->sk_mark; 80 80 fl6->fl6_sport = inet->inet_sport; 81 81 fl6->fl6_dport = inet->inet_dport; 82 - fl6->flowi6_uid = sk->sk_uid; 82 + fl6->flowi6_uid = sk_uid(sk); 83 83 security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6)); 84 84 85 85 rcu_read_lock();
+1 -1
net/ipv6/ping.c
··· 142 142 fl6.saddr = np->saddr; 143 143 fl6.daddr = *daddr; 144 144 fl6.flowi6_mark = ipc6.sockc.mark; 145 - fl6.flowi6_uid = sk->sk_uid; 145 + fl6.flowi6_uid = sk_uid(sk); 146 146 fl6.fl6_icmp_type = user_icmph.icmp6_type; 147 147 fl6.fl6_icmp_code = user_icmph.icmp6_code; 148 148 security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6));
+1 -1
net/ipv6/raw.c
··· 777 777 memset(&fl6, 0, sizeof(fl6)); 778 778 779 779 fl6.flowi6_mark = ipc6.sockc.mark; 780 - fl6.flowi6_uid = sk->sk_uid; 780 + fl6.flowi6_uid = sk_uid(sk); 781 781 782 782 if (sin6) { 783 783 if (addr_len < SIN6_LEN_RFC2133)
+2 -2
net/ipv6/route.c
··· 3011 3011 oif = l3mdev_master_ifindex(skb->dev); 3012 3012 3013 3013 ip6_update_pmtu(skb, sock_net(sk), mtu, oif, READ_ONCE(sk->sk_mark), 3014 - sk->sk_uid); 3014 + sk_uid(sk)); 3015 3015 3016 3016 dst = __sk_dst_get(sk); 3017 3017 if (!dst || !dst->obsolete || ··· 3233 3233 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk) 3234 3234 { 3235 3235 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, 3236 - READ_ONCE(sk->sk_mark), sk->sk_uid); 3236 + READ_ONCE(sk->sk_mark), sk_uid(sk)); 3237 3237 } 3238 3238 EXPORT_SYMBOL_GPL(ip6_sk_redirect); 3239 3239
+1 -1
net/ipv6/syncookies.c
··· 236 236 fl6.flowi6_mark = ireq->ir_mark; 237 237 fl6.fl6_dport = ireq->ir_rmt_port; 238 238 fl6.fl6_sport = inet_sk(sk)->inet_sport; 239 - fl6.flowi6_uid = sk->sk_uid; 239 + fl6.flowi6_uid = sk_uid(sk); 240 240 security_req_classify_flow(req, flowi6_to_flowi_common(&fl6)); 241 241 242 242 dst = ip6_dst_lookup_flow(net, sk, &fl6, final_p);
+1 -1
net/ipv6/tcp_ipv6.c
··· 269 269 fl6.fl6_sport = inet->inet_sport; 270 270 if (IS_ENABLED(CONFIG_IP_ROUTE_MULTIPATH) && !fl6.fl6_sport) 271 271 fl6.flowi6_flags = FLOWI_FLAG_ANY_SPORT; 272 - fl6.flowi6_uid = sk->sk_uid; 272 + fl6.flowi6_uid = sk_uid(sk); 273 273 274 274 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk)); 275 275 final_p = fl6_update_dst(&fl6, opt, &final);
+3 -2
net/ipv6/udp.c
··· 750 750 if (type == NDISC_REDIRECT) { 751 751 if (tunnel) { 752 752 ip6_redirect(skb, sock_net(sk), inet6_iif(skb), 753 - READ_ONCE(sk->sk_mark), sk->sk_uid); 753 + READ_ONCE(sk->sk_mark), 754 + sk_uid(sk)); 754 755 } else { 755 756 ip6_sk_redirect(skb, sk); 756 757 } ··· 1621 1620 if (!fl6->flowi6_oif) 1622 1621 fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex; 1623 1622 1624 - fl6->flowi6_uid = sk->sk_uid; 1623 + fl6->flowi6_uid = sk_uid(sk); 1625 1624 1626 1625 if (msg->msg_controllen) { 1627 1626 opt = &opt_space;
+1 -1
net/l2tp/l2tp_ip6.c
··· 545 545 memset(&fl6, 0, sizeof(fl6)); 546 546 547 547 fl6.flowi6_mark = READ_ONCE(sk->sk_mark); 548 - fl6.flowi6_uid = sk->sk_uid; 548 + fl6.flowi6_uid = sk_uid(sk); 549 549 550 550 ipcm6_init_sk(&ipc6, sk); 551 551
+1 -1
net/mptcp/protocol.c
··· 3503 3503 write_lock_bh(&sk->sk_callback_lock); 3504 3504 rcu_assign_pointer(sk->sk_wq, &parent->wq); 3505 3505 sk_set_socket(sk, parent); 3506 - sk->sk_uid = SOCK_INODE(parent)->i_uid; 3506 + WRITE_ONCE(sk->sk_uid, SOCK_INODE(parent)->i_uid); 3507 3507 write_unlock_bh(&sk->sk_callback_lock); 3508 3508 } 3509 3509
+5 -3
net/socket.c
··· 592 592 if (!err && (iattr->ia_valid & ATTR_UID)) { 593 593 struct socket *sock = SOCKET_I(d_inode(dentry)); 594 594 595 - if (sock->sk) 596 - sock->sk->sk_uid = iattr->ia_uid; 597 - else 595 + if (sock->sk) { 596 + /* Paired with READ_ONCE() in sk_uid() */ 597 + WRITE_ONCE(sock->sk->sk_uid, iattr->ia_uid); 598 + } else { 598 599 err = -ENOENT; 600 + } 599 601 } 600 602 601 603 return err;