Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: add sk_drops_read(), sk_drops_inc() and sk_drops_reset() helpers

We want to split sk->sk_drops in the future to reduce
potential contention on this field.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com>
Link: https://patch.msgid.link/20250826125031.1578842-2-edumazet@google.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>

authored by

Eric Dumazet and committed by
Paolo Abeni
f86f42ed c2a75689

+57 -42
+16 -1
include/net/sock.h
··· 2682 2682 #define sock_skb_cb_check_size(size) \ 2683 2683 BUILD_BUG_ON((size) > SOCK_SKB_CB_OFFSET) 2684 2684 2685 + static inline void sk_drops_inc(struct sock *sk) 2686 + { 2687 + atomic_inc(&sk->sk_drops); 2688 + } 2689 + 2690 + static inline int sk_drops_read(const struct sock *sk) 2691 + { 2692 + return atomic_read(&sk->sk_drops); 2693 + } 2694 + 2695 + static inline void sk_drops_reset(struct sock *sk) 2696 + { 2697 + atomic_set(&sk->sk_drops, 0); 2698 + } 2699 + 2685 2700 static inline void 2686 2701 sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb) 2687 2702 { 2688 2703 SOCK_SKB_CB(skb)->dropcount = sock_flag(sk, SOCK_RXQ_OVFL) ? 2689 - atomic_read(&sk->sk_drops) : 0; 2704 + sk_drops_read(sk) : 0; 2690 2705 } 2691 2706 2692 2707 static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb)
+1 -1
include/net/tcp.h
··· 2612 2612 */ 2613 2613 static inline void tcp_listendrop(const struct sock *sk) 2614 2614 { 2615 - atomic_inc(&((struct sock *)sk)->sk_drops); 2615 + sk_drops_inc((struct sock *)sk); 2616 2616 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS); 2617 2617 } 2618 2618
+1 -1
net/core/datagram.c
··· 345 345 spin_unlock_bh(&sk_queue->lock); 346 346 } 347 347 348 - atomic_inc(&sk->sk_drops); 348 + sk_drops_inc(sk); 349 349 return err; 350 350 } 351 351 EXPORT_SYMBOL(__sk_queue_drop_skb);
+7 -7
net/core/sock.c
··· 491 491 struct sk_buff_head *list = &sk->sk_receive_queue; 492 492 493 493 if (atomic_read(&sk->sk_rmem_alloc) >= READ_ONCE(sk->sk_rcvbuf)) { 494 - atomic_inc(&sk->sk_drops); 494 + sk_drops_inc(sk); 495 495 trace_sock_rcvqueue_full(sk, skb); 496 496 return -ENOMEM; 497 497 } 498 498 499 499 if (!sk_rmem_schedule(sk, skb, skb->truesize)) { 500 - atomic_inc(&sk->sk_drops); 500 + sk_drops_inc(sk); 501 501 return -ENOBUFS; 502 502 } 503 503 ··· 562 562 skb->dev = NULL; 563 563 564 564 if (sk_rcvqueues_full(sk, READ_ONCE(sk->sk_rcvbuf))) { 565 - atomic_inc(&sk->sk_drops); 565 + sk_drops_inc(sk); 566 566 reason = SKB_DROP_REASON_SOCKET_RCVBUFF; 567 567 goto discard_and_relse; 568 568 } ··· 585 585 reason = SKB_DROP_REASON_PFMEMALLOC; 586 586 if (err == -ENOBUFS) 587 587 reason = SKB_DROP_REASON_SOCKET_BACKLOG; 588 - atomic_inc(&sk->sk_drops); 588 + sk_drops_inc(sk); 589 589 goto discard_and_relse; 590 590 } 591 591 ··· 2505 2505 newsk->sk_wmem_queued = 0; 2506 2506 newsk->sk_forward_alloc = 0; 2507 2507 newsk->sk_reserved_mem = 0; 2508 - atomic_set(&newsk->sk_drops, 0); 2508 + sk_drops_reset(newsk); 2509 2509 newsk->sk_send_head = NULL; 2510 2510 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; 2511 2511 atomic_set(&newsk->sk_zckey, 0); ··· 3713 3713 */ 3714 3714 smp_wmb(); 3715 3715 refcount_set(&sk->sk_refcnt, 1); 3716 - atomic_set(&sk->sk_drops, 0); 3716 + sk_drops_reset(sk); 3717 3717 } 3718 3718 EXPORT_SYMBOL(sock_init_data_uid); 3719 3719 ··· 3973 3973 mem[SK_MEMINFO_WMEM_QUEUED] = READ_ONCE(sk->sk_wmem_queued); 3974 3974 mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc); 3975 3975 mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len); 3976 - mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops); 3976 + mem[SK_MEMINFO_DROPS] = sk_drops_read(sk); 3977 3977 } 3978 3978 3979 3979 #ifdef CONFIG_PROC_FS
+1 -1
net/ipv4/ping.c
··· 1119 1119 from_kuid_munged(seq_user_ns(f), sk_uid(sp)), 1120 1120 0, sock_i_ino(sp), 1121 1121 refcount_read(&sp->sk_refcnt), sp, 1122 - atomic_read(&sp->sk_drops)); 1122 + sk_drops_read(sp)); 1123 1123 } 1124 1124 1125 1125 static int ping_v4_seq_show(struct seq_file *seq, void *v)
+3 -3
net/ipv4/raw.c
··· 178 178 179 179 if (atomic_read(&sk->sk_rmem_alloc) >= 180 180 READ_ONCE(sk->sk_rcvbuf)) { 181 - atomic_inc(&sk->sk_drops); 181 + sk_drops_inc(sk); 182 182 continue; 183 183 } 184 184 ··· 311 311 int raw_rcv(struct sock *sk, struct sk_buff *skb) 312 312 { 313 313 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) { 314 - atomic_inc(&sk->sk_drops); 314 + sk_drops_inc(sk); 315 315 sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_XFRM_POLICY); 316 316 return NET_RX_DROP; 317 317 } ··· 1045 1045 0, 0L, 0, 1046 1046 from_kuid_munged(seq_user_ns(seq), sk_uid(sp)), 1047 1047 0, sock_i_ino(sp), 1048 - refcount_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops)); 1048 + refcount_read(&sp->sk_refcnt), sp, sk_drops_read(sp)); 1049 1049 } 1050 1050 1051 1051 static int raw_seq_show(struct seq_file *seq, void *v)
+7 -7
net/ipv4/udp.c
··· 1787 1787 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 1788 1788 1789 1789 drop: 1790 - atomic_inc(&sk->sk_drops); 1790 + sk_drops_inc(sk); 1791 1791 busylock_release(busy); 1792 1792 return err; 1793 1793 } ··· 1852 1852 IS_UDPLITE(sk)); 1853 1853 __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, 1854 1854 IS_UDPLITE(sk)); 1855 - atomic_inc(&sk->sk_drops); 1855 + sk_drops_inc(sk); 1856 1856 __skb_unlink(skb, rcvq); 1857 1857 *total += skb->truesize; 1858 1858 kfree_skb_reason(skb, SKB_DROP_REASON_UDP_CSUM); ··· 2008 2008 2009 2009 __UDP_INC_STATS(net, UDP_MIB_CSUMERRORS, is_udplite); 2010 2010 __UDP_INC_STATS(net, UDP_MIB_INERRORS, is_udplite); 2011 - atomic_inc(&sk->sk_drops); 2011 + sk_drops_inc(sk); 2012 2012 kfree_skb_reason(skb, SKB_DROP_REASON_UDP_CSUM); 2013 2013 goto try_again; 2014 2014 } ··· 2078 2078 2079 2079 if (unlikely(err)) { 2080 2080 if (!peeking) { 2081 - atomic_inc(&sk->sk_drops); 2081 + sk_drops_inc(sk); 2082 2082 UDP_INC_STATS(sock_net(sk), 2083 2083 UDP_MIB_INERRORS, is_udplite); 2084 2084 } ··· 2449 2449 __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); 2450 2450 drop: 2451 2451 __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 2452 - atomic_inc(&sk->sk_drops); 2452 + sk_drops_inc(sk); 2453 2453 sk_skb_reason_drop(sk, skb, drop_reason); 2454 2454 return -1; 2455 2455 } ··· 2534 2534 nskb = skb_clone(skb, GFP_ATOMIC); 2535 2535 2536 2536 if (unlikely(!nskb)) { 2537 - atomic_inc(&sk->sk_drops); 2537 + sk_drops_inc(sk); 2538 2538 __UDP_INC_STATS(net, UDP_MIB_RCVBUFERRORS, 2539 2539 IS_UDPLITE(sk)); 2540 2540 __UDP_INC_STATS(net, UDP_MIB_INERRORS, ··· 3386 3386 from_kuid_munged(seq_user_ns(f), sk_uid(sp)), 3387 3387 0, sock_i_ino(sp), 3388 3388 refcount_read(&sp->sk_refcnt), sp, 3389 - atomic_read(&sp->sk_drops)); 3389 + sk_drops_read(sp)); 3390 3390 } 3391 3391 3392 3392 int udp4_seq_show(struct seq_file *seq, void *v)
+1 -1
net/ipv6/datagram.c
··· 1068 1068 0, 1069 1069 sock_i_ino(sp), 1070 1070 refcount_read(&sp->sk_refcnt), sp, 1071 - atomic_read(&sp->sk_drops)); 1071 + sk_drops_read(sp)); 1072 1072 }
+4 -4
net/ipv6/raw.c
··· 163 163 164 164 if (atomic_read(&sk->sk_rmem_alloc) >= 165 165 READ_ONCE(sk->sk_rcvbuf)) { 166 - atomic_inc(&sk->sk_drops); 166 + sk_drops_inc(sk); 167 167 continue; 168 168 } 169 169 ··· 361 361 362 362 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) && 363 363 skb_checksum_complete(skb)) { 364 - atomic_inc(&sk->sk_drops); 364 + sk_drops_inc(sk); 365 365 sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_SKB_CSUM); 366 366 return NET_RX_DROP; 367 367 } ··· 389 389 struct raw6_sock *rp = raw6_sk(sk); 390 390 391 391 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) { 392 - atomic_inc(&sk->sk_drops); 392 + sk_drops_inc(sk); 393 393 sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_XFRM_POLICY); 394 394 return NET_RX_DROP; 395 395 } ··· 414 414 415 415 if (inet_test_bit(HDRINCL, sk)) { 416 416 if (skb_checksum_complete(skb)) { 417 - atomic_inc(&sk->sk_drops); 417 + sk_drops_inc(sk); 418 418 sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_SKB_CSUM); 419 419 return NET_RX_DROP; 420 420 }
+3 -3
net/ipv6/udp.c
··· 524 524 } 525 525 if (unlikely(err)) { 526 526 if (!peeking) { 527 - atomic_inc(&sk->sk_drops); 527 + sk_drops_inc(sk); 528 528 SNMP_INC_STATS(mib, UDP_MIB_INERRORS); 529 529 } 530 530 kfree_skb(skb); ··· 908 908 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); 909 909 drop: 910 910 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 911 - atomic_inc(&sk->sk_drops); 911 + sk_drops_inc(sk); 912 912 sk_skb_reason_drop(sk, skb, drop_reason); 913 913 return -1; 914 914 } ··· 1013 1013 } 1014 1014 nskb = skb_clone(skb, GFP_ATOMIC); 1015 1015 if (unlikely(!nskb)) { 1016 - atomic_inc(&sk->sk_drops); 1016 + sk_drops_inc(sk); 1017 1017 __UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS, 1018 1018 IS_UDPLITE(sk)); 1019 1019 __UDP6_INC_STATS(net, UDP_MIB_INERRORS,
+2 -2
net/iucv/af_iucv.c
··· 1187 1187 1188 1188 IUCV_SKB_CB(skb)->offset = 0; 1189 1189 if (sk_filter(sk, skb)) { 1190 - atomic_inc(&sk->sk_drops); /* skb rejected by filter */ 1190 + sk_drops_inc(sk); /* skb rejected by filter */ 1191 1191 kfree_skb(skb); 1192 1192 return; 1193 1193 } ··· 2011 2011 skb_reset_network_header(skb); 2012 2012 IUCV_SKB_CB(skb)->offset = 0; 2013 2013 if (sk_filter(sk, skb)) { 2014 - atomic_inc(&sk->sk_drops); /* skb rejected by filter */ 2014 + sk_drops_inc(sk); /* skb rejected by filter */ 2015 2015 kfree_skb(skb); 2016 2016 return NET_RX_SUCCESS; 2017 2017 }
+2 -2
net/netlink/af_netlink.c
··· 356 356 sk_error_report(sk); 357 357 } 358 358 } 359 - atomic_inc(&sk->sk_drops); 359 + sk_drops_inc(sk); 360 360 } 361 361 362 362 static void netlink_rcv_wake(struct sock *sk) ··· 2711 2711 sk_wmem_alloc_get(s), 2712 2712 READ_ONCE(nlk->cb_running), 2713 2713 refcount_read(&s->sk_refcnt), 2714 - atomic_read(&s->sk_drops), 2714 + sk_drops_read(s), 2715 2715 sock_i_ino(s) 2716 2716 ); 2717 2717
+1 -1
net/packet/af_packet.c
··· 2265 2265 2266 2266 drop_n_acct: 2267 2267 atomic_inc(&po->tp_drops); 2268 - atomic_inc(&sk->sk_drops); 2268 + sk_drops_inc(sk); 2269 2269 drop_reason = SKB_DROP_REASON_PACKET_SOCK_ERROR; 2270 2270 2271 2271 drop_n_restore:
+3 -3
net/phonet/pep.c
··· 376 376 377 377 case PNS_PEP_CTRL_REQ: 378 378 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) { 379 - atomic_inc(&sk->sk_drops); 379 + sk_drops_inc(sk); 380 380 break; 381 381 } 382 382 __skb_pull(skb, 4); ··· 397 397 } 398 398 399 399 if (pn->rx_credits == 0) { 400 - atomic_inc(&sk->sk_drops); 400 + sk_drops_inc(sk); 401 401 err = -ENOBUFS; 402 402 break; 403 403 } ··· 567 567 } 568 568 569 569 if (pn->rx_credits == 0) { 570 - atomic_inc(&sk->sk_drops); 570 + sk_drops_inc(sk); 571 571 err = NET_RX_DROP; 572 572 break; 573 573 }
+1 -1
net/phonet/socket.c
··· 587 587 from_kuid_munged(seq_user_ns(seq), sk_uid(sk)), 588 588 sock_i_ino(sk), 589 589 refcount_read(&sk->sk_refcnt), sk, 590 - atomic_read(&sk->sk_drops)); 590 + sk_drops_read(sk)); 591 591 } 592 592 seq_pad(seq, '\n'); 593 593 return 0;
+1 -1
net/sctp/diag.c
··· 173 173 mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued; 174 174 mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc); 175 175 mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len); 176 - mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops); 176 + mem[SK_MEMINFO_DROPS] = sk_drops_read(sk); 177 177 178 178 if (nla_put(skb, INET_DIAG_SKMEMINFO, sizeof(mem), &mem) < 0) 179 179 goto errout;
+3 -3
net/tipc/socket.c
··· 2366 2366 else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) { 2367 2367 trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL, 2368 2368 "err_overload2!"); 2369 - atomic_inc(&sk->sk_drops); 2369 + sk_drops_inc(sk); 2370 2370 err = TIPC_ERR_OVERLOAD; 2371 2371 } 2372 2372 ··· 2458 2458 trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL, "err_overload!"); 2459 2459 /* Overload => reject message back to sender */ 2460 2460 onode = tipc_own_addr(sock_net(sk)); 2461 - atomic_inc(&sk->sk_drops); 2461 + sk_drops_inc(sk); 2462 2462 if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD)) { 2463 2463 trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_ALL, 2464 2464 "@sk_enqueue!"); ··· 3657 3657 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_SENDQ, 3658 3658 skb_queue_len(&sk->sk_write_queue)) || 3659 3659 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_DROP, 3660 - atomic_read(&sk->sk_drops))) 3660 + sk_drops_read(sk))) 3661 3661 goto stat_msg_cancel; 3662 3662 3663 3663 if (tsk->cong_link_cnt &&