Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net_sched: act: remove tcfa_qstats

tcfa_qstats is currently only used to hold drops and overlimits counters.

tcf_action_inc_drop_qstats() and tcf_action_inc_overlimit_qstats()
currently acquire a->tcfa_lock to increment these counters.

Switch to two atomic_t to get lock-free accounting.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Jamal Hadi Salim <jhs@mojatatu.com>
Link: https://patch.msgid.link/20250901093141.2093176-1-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

authored by

Eric Dumazet and committed by
Jakub Kicinski
5d14bbf9 3016024d

+14 -12
+6 -8
include/net/act_api.h
··· 33 33 struct tcf_t tcfa_tm; 34 34 struct gnet_stats_basic_sync tcfa_bstats; 35 35 struct gnet_stats_basic_sync tcfa_bstats_hw; 36 - struct gnet_stats_queue tcfa_qstats; 36 + 37 + atomic_t tcfa_drops; 38 + atomic_t tcfa_overlimits; 39 + 37 40 struct net_rate_estimator __rcu *tcfa_rate_est; 38 41 spinlock_t tcfa_lock; 39 42 struct gnet_stats_basic_sync __percpu *cpu_bstats; ··· 56 53 #define tcf_action common.tcfa_action 57 54 #define tcf_tm common.tcfa_tm 58 55 #define tcf_bstats common.tcfa_bstats 59 - #define tcf_qstats common.tcfa_qstats 60 56 #define tcf_rate_est common.tcfa_rate_est 61 57 #define tcf_lock common.tcfa_lock 62 58 ··· 243 241 qstats_drop_inc(this_cpu_ptr(a->cpu_qstats)); 244 242 return; 245 243 } 246 - spin_lock(&a->tcfa_lock); 247 - qstats_drop_inc(&a->tcfa_qstats); 248 - spin_unlock(&a->tcfa_lock); 244 + atomic_inc(&a->tcfa_drops); 249 245 } 250 246 251 247 static inline void tcf_action_inc_overlimit_qstats(struct tc_action *a) ··· 252 252 qstats_overlimit_inc(this_cpu_ptr(a->cpu_qstats)); 253 253 return; 254 254 } 255 - spin_lock(&a->tcfa_lock); 256 - qstats_overlimit_inc(&a->tcfa_qstats); 257 - spin_unlock(&a->tcfa_lock); 255 + atomic_inc(&a->tcfa_overlimits); 258 256 } 259 257 260 258 void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets,
+8 -4
net/sched/act_api.c
··· 1585 1585 } 1586 1586 1587 1587 _bstats_update(&a->tcfa_bstats, bytes, packets); 1588 - a->tcfa_qstats.drops += drops; 1588 + atomic_add(drops, &a->tcfa_drops); 1589 1589 if (hw) 1590 1590 _bstats_update(&a->tcfa_bstats_hw, bytes, packets); 1591 1591 } ··· 1594 1594 int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p, 1595 1595 int compat_mode) 1596 1596 { 1597 - int err = 0; 1597 + struct gnet_stats_queue qstats = {0}; 1598 1598 struct gnet_dump d; 1599 + int err = 0; 1599 1600 1600 1601 if (p == NULL) 1601 1602 goto errout; ··· 1620 1619 if (err < 0) 1621 1620 goto errout; 1622 1621 1622 + qstats.drops = atomic_read(&p->tcfa_drops); 1623 + qstats.overlimits = atomic_read(&p->tcfa_overlimits); 1624 + 1623 1625 if (gnet_stats_copy_basic(&d, p->cpu_bstats, 1624 1626 &p->tcfa_bstats, false) < 0 || 1625 1627 gnet_stats_copy_basic_hw(&d, p->cpu_bstats_hw, 1626 1628 &p->tcfa_bstats_hw, false) < 0 || 1627 1629 gnet_stats_copy_rate_est(&d, &p->tcfa_rate_est) < 0 || 1628 1630 gnet_stats_copy_queue(&d, p->cpu_qstats, 1629 - &p->tcfa_qstats, 1630 - p->tcfa_qstats.qlen) < 0) 1631 + &qstats, 1632 + qstats.qlen) < 0) 1631 1633 goto errout; 1632 1634 1633 1635 if (gnet_stats_finish_copy(&d) < 0)