Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: remove sock_i_uid()

Difference between sock_i_uid() and sk_uid() is that
after sock_orphan(), sock_i_uid() returns GLOBAL_ROOT_UID
while sk_uid() returns the last cached sk->sk_uid value.

None of sock_i_uid() callers care about this.

Use sk_uid() which is much faster and inlined.

Note that diag/dump users are calling sock_i_ino() and
can not see the full benefit yet.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Lorenzo Colitti <lorenzo@google.com>
Reviewed-by: Maciej Żenczykowski <maze@google.com>
Link: https://patch.msgid.link/20250620133001.4090592-3-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

authored by

Eric Dumazet and committed by
Jakub Kicinski
c51da3f7 e84a4927

+50 -66
-2
include/net/sock.h
··· 2092 2092 write_unlock_bh(&sk->sk_callback_lock); 2093 2093 } 2094 2094 2095 - kuid_t sock_i_uid(struct sock *sk); 2096 - 2097 2095 static inline kuid_t sk_uid(const struct sock *sk) 2098 2096 { 2099 2097 /* Paired with WRITE_ONCE() in sockfs_setattr() */
+1 -1
net/appletalk/atalk_proc.c
··· 181 181 sk_wmem_alloc_get(s), 182 182 sk_rmem_alloc_get(s), 183 183 s->sk_state, 184 - from_kuid_munged(seq_user_ns(seq), sock_i_uid(s))); 184 + from_kuid_munged(seq_user_ns(seq), sk_uid(s))); 185 185 out: 186 186 return 0; 187 187 }
+1 -1
net/bluetooth/af_bluetooth.c
··· 815 815 refcount_read(&sk->sk_refcnt), 816 816 sk_rmem_alloc_get(sk), 817 817 sk_wmem_alloc_get(sk), 818 - from_kuid(seq_user_ns(seq), sock_i_uid(sk)), 818 + from_kuid(seq_user_ns(seq), sk_uid(sk)), 819 819 sock_i_ino(sk), 820 820 bt->parent ? sock_i_ino(bt->parent) : 0LU); 821 821
-11
net/core/sock.c
··· 2780 2780 EXPORT_SYMBOL(sock_pfree); 2781 2781 #endif /* CONFIG_INET */ 2782 2782 2783 - kuid_t sock_i_uid(struct sock *sk) 2784 - { 2785 - kuid_t uid; 2786 - 2787 - read_lock_bh(&sk->sk_callback_lock); 2788 - uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID; 2789 - read_unlock_bh(&sk->sk_callback_lock); 2790 - return uid; 2791 - } 2792 - EXPORT_SYMBOL(sock_i_uid); 2793 - 2794 2783 unsigned long __sock_i_ino(struct sock *sk) 2795 2784 { 2796 2785 unsigned long ino;
+12 -15
net/ipv4/inet_connection_sock.c
··· 168 168 } 169 169 170 170 static bool inet_bind_conflict(const struct sock *sk, struct sock *sk2, 171 - kuid_t sk_uid, bool relax, 171 + kuid_t uid, bool relax, 172 172 bool reuseport_cb_ok, bool reuseport_ok) 173 173 { 174 174 int bound_dev_if2; ··· 185 185 if (!relax || (!reuseport_ok && sk->sk_reuseport && 186 186 sk2->sk_reuseport && reuseport_cb_ok && 187 187 (sk2->sk_state == TCP_TIME_WAIT || 188 - uid_eq(sk_uid, sock_i_uid(sk2))))) 188 + uid_eq(uid, sk_uid(sk2))))) 189 189 return true; 190 190 } else if (!reuseport_ok || !sk->sk_reuseport || 191 191 !sk2->sk_reuseport || !reuseport_cb_ok || 192 192 (sk2->sk_state != TCP_TIME_WAIT && 193 - !uid_eq(sk_uid, sock_i_uid(sk2)))) { 193 + !uid_eq(uid, sk_uid(sk2)))) { 194 194 return true; 195 195 } 196 196 } ··· 198 198 } 199 199 200 200 static bool __inet_bhash2_conflict(const struct sock *sk, struct sock *sk2, 201 - kuid_t sk_uid, bool relax, 201 + kuid_t uid, bool relax, 202 202 bool reuseport_cb_ok, bool reuseport_ok) 203 203 { 204 204 if (ipv6_only_sock(sk2)) { ··· 211 211 #endif 212 212 } 213 213 214 - return inet_bind_conflict(sk, sk2, sk_uid, relax, 214 + return inet_bind_conflict(sk, sk2, uid, relax, 215 215 reuseport_cb_ok, reuseport_ok); 216 216 } 217 217 218 218 static bool inet_bhash2_conflict(const struct sock *sk, 219 219 const struct inet_bind2_bucket *tb2, 220 - kuid_t sk_uid, 220 + kuid_t uid, 221 221 bool relax, bool reuseport_cb_ok, 222 222 bool reuseport_ok) 223 223 { 224 224 struct sock *sk2; 225 225 226 226 sk_for_each_bound(sk2, &tb2->owners) { 227 - if (__inet_bhash2_conflict(sk, sk2, sk_uid, relax, 227 + if (__inet_bhash2_conflict(sk, sk2, uid, relax, 228 228 reuseport_cb_ok, reuseport_ok)) 229 229 return true; 230 230 } ··· 242 242 const struct inet_bind2_bucket *tb2, /* may be null */ 243 243 bool relax, bool reuseport_ok) 244 244 { 245 - kuid_t uid = sock_i_uid((struct sock *)sk); 246 245 struct sock_reuseport *reuseport_cb; 246 + kuid_t uid = sk_uid(sk); 247 247 bool reuseport_cb_ok; 248 248 struct sock *sk2; 249 249 ··· 287 287 static bool inet_bhash2_addr_any_conflict(const struct sock *sk, int port, int l3mdev, 288 288 bool relax, bool reuseport_ok) 289 289 { 290 - kuid_t uid = sock_i_uid((struct sock *)sk); 291 290 const struct net *net = sock_net(sk); 292 291 struct sock_reuseport *reuseport_cb; 293 292 struct inet_bind_hashbucket *head2; 294 293 struct inet_bind2_bucket *tb2; 294 + kuid_t uid = sk_uid(sk); 295 295 bool conflict = false; 296 296 bool reuseport_cb_ok; 297 297 ··· 425 425 static inline int sk_reuseport_match(struct inet_bind_bucket *tb, 426 426 struct sock *sk) 427 427 { 428 - kuid_t uid = sock_i_uid(sk); 429 - 430 428 if (tb->fastreuseport <= 0) 431 429 return 0; 432 430 if (!sk->sk_reuseport) 433 431 return 0; 434 432 if (rcu_access_pointer(sk->sk_reuseport_cb)) 435 433 return 0; 436 - if (!uid_eq(tb->fastuid, uid)) 434 + if (!uid_eq(tb->fastuid, sk_uid(sk))) 437 435 return 0; 438 436 /* We only need to check the rcv_saddr if this tb was once marked 439 437 * without fastreuseport and then was reset, as we can only know that ··· 456 458 void inet_csk_update_fastreuse(struct inet_bind_bucket *tb, 457 459 struct sock *sk) 458 460 { 459 - kuid_t uid = sock_i_uid(sk); 460 461 bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN; 461 462 462 463 if (hlist_empty(&tb->bhash2)) { 463 464 tb->fastreuse = reuse; 464 465 if (sk->sk_reuseport) { 465 466 tb->fastreuseport = FASTREUSEPORT_ANY; 466 - tb->fastuid = uid; 467 + tb->fastuid = sk_uid(sk); 467 468 tb->fast_rcv_saddr = sk->sk_rcv_saddr; 468 469 tb->fast_ipv6_only = ipv6_only_sock(sk); 469 470 tb->fast_sk_family = sk->sk_family; ··· 489 492 */ 490 493 if (!sk_reuseport_match(tb, sk)) { 491 494 tb->fastreuseport = FASTREUSEPORT_STRICT; 492 - tb->fastuid = uid; 495 + tb->fastuid = sk_uid(sk); 493 496 tb->fast_rcv_saddr = sk->sk_rcv_saddr; 494 497 tb->fast_ipv6_only = ipv6_only_sock(sk); 495 498 tb->fast_sk_family = sk->sk_family;
+1 -1
net/ipv4/inet_diag.c
··· 181 181 goto errout; 182 182 #endif 183 183 184 - r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk)); 184 + r->idiag_uid = from_kuid_munged(user_ns, sk_uid(sk)); 185 185 r->idiag_inode = sock_i_ino(sk); 186 186 187 187 memset(&inet_sockopt, 0, sizeof(inet_sockopt));
+2 -2
net/ipv4/inet_hashtables.c
··· 721 721 { 722 722 struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash; 723 723 const struct hlist_nulls_node *node; 724 + kuid_t uid = sk_uid(sk); 724 725 struct sock *sk2; 725 - kuid_t uid = sock_i_uid(sk); 726 726 727 727 sk_nulls_for_each_rcu(sk2, node, &ilb->nulls_head) { 728 728 if (sk2 != sk && ··· 730 730 ipv6_only_sock(sk2) == ipv6_only_sock(sk) && 731 731 sk2->sk_bound_dev_if == sk->sk_bound_dev_if && 732 732 inet_csk(sk2)->icsk_bind_hash == tb && 733 - sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) && 733 + sk2->sk_reuseport && uid_eq(uid, sk_uid(sk2)) && 734 734 inet_rcv_saddr_equal(sk, sk2, false)) 735 735 return reuseport_add_sock(sk, sk2, 736 736 inet_rcv_saddr_any(sk));
+1 -1
net/ipv4/ping.c
··· 1116 1116 sk_wmem_alloc_get(sp), 1117 1117 sk_rmem_alloc_get(sp), 1118 1118 0, 0L, 0, 1119 - from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)), 1119 + from_kuid_munged(seq_user_ns(f), sk_uid(sp)), 1120 1120 0, sock_i_ino(sp), 1121 1121 refcount_read(&sp->sk_refcnt), sp, 1122 1122 atomic_read(&sp->sk_drops));
+1 -1
net/ipv4/raw.c
··· 1043 1043 sk_wmem_alloc_get(sp), 1044 1044 sk_rmem_alloc_get(sp), 1045 1045 0, 0L, 0, 1046 - from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)), 1046 + from_kuid_munged(seq_user_ns(seq), sk_uid(sp)), 1047 1047 0, sock_i_ino(sp), 1048 1048 refcount_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops)); 1049 1049 }
+4 -4
net/ipv4/tcp_ipv4.c
··· 2896 2896 jiffies_delta_to_clock_t(delta), 2897 2897 req->num_timeout, 2898 2898 from_kuid_munged(seq_user_ns(f), 2899 - sock_i_uid(req->rsk_listener)), 2899 + sk_uid(req->rsk_listener)), 2900 2900 0, /* non standard timer */ 2901 2901 0, /* open_requests have no inode */ 2902 2902 0, ··· 2954 2954 timer_active, 2955 2955 jiffies_delta_to_clock_t(timer_expires - jiffies), 2956 2956 icsk->icsk_retransmits, 2957 - from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)), 2957 + from_kuid_munged(seq_user_ns(f), sk_uid(sk)), 2958 2958 icsk->icsk_probes_out, 2959 2959 sock_i_ino(sk), 2960 2960 refcount_read(&sk->sk_refcnt), sk, ··· 3246 3246 const struct request_sock *req = v; 3247 3247 3248 3248 uid = from_kuid_munged(seq_user_ns(seq), 3249 - sock_i_uid(req->rsk_listener)); 3249 + sk_uid(req->rsk_listener)); 3250 3250 } else { 3251 - uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)); 3251 + uid = from_kuid_munged(seq_user_ns(seq), sk_uid(sk)); 3252 3252 } 3253 3253 3254 3254 meta.seq = seq;
+8 -8
net/ipv4/udp.c
··· 145 145 unsigned long *bitmap, 146 146 struct sock *sk, unsigned int log) 147 147 { 148 + kuid_t uid = sk_uid(sk); 148 149 struct sock *sk2; 149 - kuid_t uid = sock_i_uid(sk); 150 150 151 151 sk_for_each(sk2, &hslot->head) { 152 152 if (net_eq(sock_net(sk2), net) && ··· 158 158 inet_rcv_saddr_equal(sk, sk2, true)) { 159 159 if (sk2->sk_reuseport && sk->sk_reuseport && 160 160 !rcu_access_pointer(sk->sk_reuseport_cb) && 161 - uid_eq(uid, sock_i_uid(sk2))) { 161 + uid_eq(uid, sk_uid(sk2))) { 162 162 if (!bitmap) 163 163 return 0; 164 164 } else { ··· 180 180 struct udp_hslot *hslot2, 181 181 struct sock *sk) 182 182 { 183 + kuid_t uid = sk_uid(sk); 183 184 struct sock *sk2; 184 - kuid_t uid = sock_i_uid(sk); 185 185 int res = 0; 186 186 187 187 spin_lock(&hslot2->lock); ··· 195 195 inet_rcv_saddr_equal(sk, sk2, true)) { 196 196 if (sk2->sk_reuseport && sk->sk_reuseport && 197 197 !rcu_access_pointer(sk->sk_reuseport_cb) && 198 - uid_eq(uid, sock_i_uid(sk2))) { 198 + uid_eq(uid, sk_uid(sk2))) { 199 199 res = 0; 200 200 } else { 201 201 res = 1; ··· 210 210 static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot) 211 211 { 212 212 struct net *net = sock_net(sk); 213 - kuid_t uid = sock_i_uid(sk); 213 + kuid_t uid = sk_uid(sk); 214 214 struct sock *sk2; 215 215 216 216 sk_for_each(sk2, &hslot->head) { ··· 220 220 ipv6_only_sock(sk2) == ipv6_only_sock(sk) && 221 221 (udp_sk(sk2)->udp_port_hash == udp_sk(sk)->udp_port_hash) && 222 222 (sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && 223 - sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) && 223 + sk2->sk_reuseport && uid_eq(uid, sk_uid(sk2)) && 224 224 inet_rcv_saddr_equal(sk, sk2, false)) { 225 225 return reuseport_add_sock(sk, sk2, 226 226 inet_rcv_saddr_any(sk)); ··· 3387 3387 sk_wmem_alloc_get(sp), 3388 3388 udp_rqueue_get(sp), 3389 3389 0, 0L, 0, 3390 - from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)), 3390 + from_kuid_munged(seq_user_ns(f), sk_uid(sp)), 3391 3391 0, sock_i_ino(sp), 3392 3392 refcount_read(&sp->sk_refcnt), sp, 3393 3393 atomic_read(&sp->sk_drops)); ··· 3630 3630 goto unlock; 3631 3631 } 3632 3632 3633 - uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)); 3633 + uid = from_kuid_munged(seq_user_ns(seq), sk_uid(sk)); 3634 3634 meta.seq = seq; 3635 3635 prog = bpf_iter_get_info(&meta, false); 3636 3636 ret = udp_prog_seq_show(prog, &meta, v, uid, state->bucket);
+1 -1
net/ipv6/datagram.c
··· 1064 1064 sk_wmem_alloc_get(sp), 1065 1065 rqueue, 1066 1066 0, 0L, 0, 1067 - from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)), 1067 + from_kuid_munged(seq_user_ns(seq), sk_uid(sp)), 1068 1068 0, 1069 1069 sock_i_ino(sp), 1070 1070 refcount_read(&sp->sk_refcnt), sp,
+2 -2
net/ipv6/tcp_ipv6.c
··· 2168 2168 jiffies_to_clock_t(ttd), 2169 2169 req->num_timeout, 2170 2170 from_kuid_munged(seq_user_ns(seq), 2171 - sock_i_uid(req->rsk_listener)), 2171 + sk_uid(req->rsk_listener)), 2172 2172 0, /* non standard timer */ 2173 2173 0, /* open_requests have no inode */ 2174 2174 0, req); ··· 2234 2234 timer_active, 2235 2235 jiffies_delta_to_clock_t(timer_expires - jiffies), 2236 2236 icsk->icsk_retransmits, 2237 - from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)), 2237 + from_kuid_munged(seq_user_ns(seq), sk_uid(sp)), 2238 2238 icsk->icsk_probes_out, 2239 2239 sock_i_ino(sp), 2240 2240 refcount_read(&sp->sk_refcnt), sp,
+1 -1
net/key/af_key.c
··· 3788 3788 refcount_read(&s->sk_refcnt), 3789 3789 sk_rmem_alloc_get(s), 3790 3790 sk_wmem_alloc_get(s), 3791 - from_kuid_munged(seq_user_ns(f), sock_i_uid(s)), 3791 + from_kuid_munged(seq_user_ns(f), sk_uid(s)), 3792 3792 sock_i_ino(s) 3793 3793 ); 3794 3794 return 0;
+1 -1
net/llc/llc_proc.c
··· 151 151 sk_wmem_alloc_get(sk), 152 152 sk_rmem_alloc_get(sk) - llc->copied_seq, 153 153 sk->sk_state, 154 - from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)), 154 + from_kuid_munged(seq_user_ns(seq), sk_uid(sk)), 155 155 llc->link); 156 156 out: 157 157 return 0;
+1 -1
net/packet/af_packet.c
··· 4783 4783 READ_ONCE(po->ifindex), 4784 4784 packet_sock_flag(po, PACKET_SOCK_RUNNING), 4785 4785 atomic_read(&s->sk_rmem_alloc), 4786 - from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)), 4786 + from_kuid_munged(seq_user_ns(seq), sk_uid(s)), 4787 4787 sock_i_ino(s)); 4788 4788 } 4789 4789
+1 -1
net/packet/diag.c
··· 153 153 154 154 if ((req->pdiag_show & PACKET_SHOW_INFO) && 155 155 nla_put_u32(skb, PACKET_DIAG_UID, 156 - from_kuid_munged(user_ns, sock_i_uid(sk)))) 156 + from_kuid_munged(user_ns, sk_uid(sk)))) 157 157 goto out_nlmsg_trim; 158 158 159 159 if ((req->pdiag_show & PACKET_SHOW_MCLIST) &&
+2 -2
net/phonet/socket.c
··· 584 584 sk->sk_protocol, pn->sobject, pn->dobject, 585 585 pn->resource, sk->sk_state, 586 586 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk), 587 - from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)), 587 + from_kuid_munged(seq_user_ns(seq), sk_uid(sk)), 588 588 sock_i_ino(sk), 589 589 refcount_read(&sk->sk_refcnt), sk, 590 590 atomic_read(&sk->sk_drops)); ··· 755 755 756 756 seq_printf(seq, "%02X %5u %lu", 757 757 (int) (psk - pnres.sk), 758 - from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)), 758 + from_kuid_munged(seq_user_ns(seq), sk_uid(sk)), 759 759 sock_i_ino(sk)); 760 760 } 761 761 seq_pad(seq, '\n');
+1 -1
net/sctp/input.c
··· 756 756 struct sock *sk2 = ep2->base.sk; 757 757 758 758 if (!net_eq(sock_net(sk2), net) || sk2 == sk || 759 - !uid_eq(sock_i_uid(sk2), sock_i_uid(sk)) || 759 + !uid_eq(sk_uid(sk2), sk_uid(sk)) || 760 760 !sk2->sk_reuseport) 761 761 continue; 762 762
+2 -2
net/sctp/proc.c
··· 177 177 seq_printf(seq, "%8pK %8pK %-3d %-3d %-4d %-5d %5u %5lu ", ep, sk, 178 178 sctp_sk(sk)->type, sk->sk_state, hash, 179 179 ep->base.bind_addr.port, 180 - from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)), 180 + from_kuid_munged(seq_user_ns(seq), sk_uid(sk)), 181 181 sock_i_ino(sk)); 182 182 183 183 sctp_seq_dump_local_addrs(seq, &ep->base); ··· 267 267 assoc->assoc_id, 268 268 assoc->sndbuf_used, 269 269 atomic_read(&assoc->rmem_alloc), 270 - from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)), 270 + from_kuid_munged(seq_user_ns(seq), sk_uid(sk)), 271 271 sock_i_ino(sk), 272 272 epb->bind_addr.port, 273 273 assoc->peer.port);
+2 -2
net/sctp/socket.c
··· 8345 8345 bool reuse = (sk->sk_reuse || sp->reuse); 8346 8346 struct sctp_bind_hashbucket *head; /* hash list */ 8347 8347 struct net *net = sock_net(sk); 8348 - kuid_t uid = sock_i_uid(sk); 8349 8348 struct sctp_bind_bucket *pp; 8349 + kuid_t uid = sk_uid(sk); 8350 8350 unsigned short snum; 8351 8351 int ret; 8352 8352 ··· 8444 8444 (reuse && (sk2->sk_reuse || sp2->reuse) && 8445 8445 sk2->sk_state != SCTP_SS_LISTENING) || 8446 8446 (sk->sk_reuseport && sk2->sk_reuseport && 8447 - uid_eq(uid, sock_i_uid(sk2)))) 8447 + uid_eq(uid, sk_uid(sk2)))) 8448 8448 continue; 8449 8449 8450 8450 if ((!sk->sk_bound_dev_if || !bound_dev_if2 ||
+1 -1
net/smc/smc_diag.c
··· 64 64 if (nla_put_u8(skb, SMC_DIAG_SHUTDOWN, sk->sk_shutdown)) 65 65 return 1; 66 66 67 - r->diag_uid = from_kuid_munged(user_ns, sock_i_uid(sk)); 67 + r->diag_uid = from_kuid_munged(user_ns, sk_uid(sk)); 68 68 r->diag_inode = sock_i_ino(sk); 69 69 return 0; 70 70 }
+1 -1
net/tipc/socket.c
··· 3642 3642 nla_put_u32(skb, TIPC_NLA_SOCK_INO, sock_i_ino(sk)) || 3643 3643 nla_put_u32(skb, TIPC_NLA_SOCK_UID, 3644 3644 from_kuid_munged(sk_user_ns(NETLINK_CB(cb->skb).sk), 3645 - sock_i_uid(sk))) || 3645 + sk_uid(sk))) || 3646 3646 nla_put_u64_64bit(skb, TIPC_NLA_SOCK_COOKIE, 3647 3647 tipc_diag_gen_cookie(sk), 3648 3648 TIPC_NLA_SOCK_PAD))
+1 -1
net/unix/af_unix.c
··· 3682 3682 goto unlock; 3683 3683 } 3684 3684 3685 - uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)); 3685 + uid = from_kuid_munged(seq_user_ns(seq), sk_uid(sk)); 3686 3686 meta.seq = seq; 3687 3687 prog = bpf_iter_get_info(&meta, false); 3688 3688 ret = unix_prog_seq_show(prog, &meta, v, uid);
+1 -1
net/unix/diag.c
··· 106 106 static int sk_diag_dump_uid(struct sock *sk, struct sk_buff *nlskb, 107 107 struct user_namespace *user_ns) 108 108 { 109 - uid_t uid = from_kuid_munged(user_ns, sock_i_uid(sk)); 109 + uid_t uid = from_kuid_munged(user_ns, sk_uid(sk)); 110 110 return nla_put(nlskb, UNIX_DIAG_UID, sizeof(uid_t), &uid); 111 111 } 112 112
+1 -1
net/xdp/xsk_diag.c
··· 119 119 120 120 if ((req->xdiag_show & XDP_SHOW_INFO) && 121 121 nla_put_u32(nlskb, XDP_DIAG_UID, 122 - from_kuid_munged(user_ns, sock_i_uid(sk)))) 122 + from_kuid_munged(user_ns, sk_uid(sk)))) 123 123 goto out_nlmsg_trim; 124 124 125 125 if ((req->xdiag_show & XDP_SHOW_RING_CFG) &&