Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: sched: do not acquire qdisc spinlock in qdisc/class stats dump

Large tc dumps (tc -s {qdisc|class} sh dev ethX) done by Google BwE host
agent [1] are problematic at scale :

For each qdisc/class found in the dump, we currently lock the root qdisc
spinlock in order to get stats. Sampling stats every 5 seconds from
thousands of HTB classes is a challenge when the root qdisc spinlock is
under high pressure. Not only the dumps take time, they also slow
down the fast path (queue/dequeue packets) by 10 % to 20 % in some cases.

An audit of existing qdiscs showed that sch_fq_codel is the only qdisc
that might need the qdisc lock in fq_codel_dump_stats() and
fq_codel_dump_class_stats()

In v2 of this patch, I now use the Qdisc running seqcount to provide
consistent reads of packets/bytes counters, regardless of 32/64 bit arches.

I also changed rate estimators to use the same infrastructure
so that they no longer need to lock root qdisc lock.

[1]
http://static.googleusercontent.com/media/research.google.com/en//pubs/archive/43838.pdf

Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Cong Wang <xiyou.wangcong@gmail.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Cc: Kevin Athey <kda@google.com>
Cc: Xiaotian Pei <xiaotian@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Eric Dumazet and committed by
David S. Miller
edb09eb1 f9eb8aea

+126 -69
+1 -1
Documentation/networking/gen_stats.txt
··· 21 21 ... 22 22 }; 23 23 24 - Update statistics: 24 + Update statistics, in dequeue() methods only, (while owning qdisc->running) 25 25 mystruct->tstats.packet++; 26 26 mystruct->qstats.backlog += skb->pkt_len; 27 27
+8 -4
include/net/gen_stats.h
··· 33 33 spinlock_t *lock, struct gnet_dump *d, 34 34 int padattr); 35 35 36 - int gnet_stats_copy_basic(struct gnet_dump *d, 36 + int gnet_stats_copy_basic(const seqcount_t *running, 37 + struct gnet_dump *d, 37 38 struct gnet_stats_basic_cpu __percpu *cpu, 38 39 struct gnet_stats_basic_packed *b); 39 - void __gnet_stats_copy_basic(struct gnet_stats_basic_packed *bstats, 40 + void __gnet_stats_copy_basic(const seqcount_t *running, 41 + struct gnet_stats_basic_packed *bstats, 40 42 struct gnet_stats_basic_cpu __percpu *cpu, 41 43 struct gnet_stats_basic_packed *b); 42 44 int gnet_stats_copy_rate_est(struct gnet_dump *d, ··· 54 52 int gen_new_estimator(struct gnet_stats_basic_packed *bstats, 55 53 struct gnet_stats_basic_cpu __percpu *cpu_bstats, 56 54 struct gnet_stats_rate_est64 *rate_est, 57 - spinlock_t *stats_lock, struct nlattr *opt); 55 + spinlock_t *stats_lock, 56 + seqcount_t *running, struct nlattr *opt); 58 57 void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, 59 58 struct gnet_stats_rate_est64 *rate_est); 60 59 int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, 61 60 struct gnet_stats_basic_cpu __percpu *cpu_bstats, 62 61 struct gnet_stats_rate_est64 *rate_est, 63 - spinlock_t *stats_lock, struct nlattr *opt); 62 + spinlock_t *stats_lock, 63 + seqcount_t *running, struct nlattr *opt); 64 64 bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, 65 65 const struct gnet_stats_rate_est64 *rate_est); 66 66 #endif
+8
include/net/sch_generic.h
··· 314 314 return qdisc_lock(root); 315 315 } 316 316 317 + static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc) 318 + { 319 + struct Qdisc *root = qdisc_root_sleeping(qdisc); 320 + 321 + ASSERT_RTNL(); 322 + return &root->running; 323 + } 324 + 317 325 static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc) 318 326 { 319 327 return qdisc->dev_queue->dev;
+16 -8
net/core/gen_estimator.c
··· 84 84 struct gnet_stats_basic_packed *bstats; 85 85 struct gnet_stats_rate_est64 *rate_est; 86 86 spinlock_t *stats_lock; 87 + seqcount_t *running; 87 88 int ewma_log; 88 89 u32 last_packets; 89 90 unsigned long avpps; ··· 122 121 unsigned long rate; 123 122 u64 brate; 124 123 125 - spin_lock(e->stats_lock); 124 + if (e->stats_lock) 125 + spin_lock(e->stats_lock); 126 126 read_lock(&est_lock); 127 127 if (e->bstats == NULL) 128 128 goto skip; 129 129 130 - __gnet_stats_copy_basic(&b, e->cpu_bstats, e->bstats); 130 + __gnet_stats_copy_basic(e->running, &b, e->cpu_bstats, e->bstats); 131 131 132 132 brate = (b.bytes - e->last_bytes)<<(7 - idx); 133 133 e->last_bytes = b.bytes; 134 134 e->avbps += (brate >> e->ewma_log) - (e->avbps >> e->ewma_log); 135 - e->rate_est->bps = (e->avbps+0xF)>>5; 135 + WRITE_ONCE(e->rate_est->bps, (e->avbps + 0xF) >> 5); 136 136 137 137 rate = b.packets - e->last_packets; 138 138 rate <<= (7 - idx); 139 139 e->last_packets = b.packets; 140 140 e->avpps += (rate >> e->ewma_log) - (e->avpps >> e->ewma_log); 141 - e->rate_est->pps = (e->avpps + 0xF) >> 5; 141 + WRITE_ONCE(e->rate_est->pps, (e->avpps + 0xF) >> 5); 142 142 skip: 143 143 read_unlock(&est_lock); 144 - spin_unlock(e->stats_lock); 144 + if (e->stats_lock) 145 + spin_unlock(e->stats_lock); 145 146 } 146 147 147 148 if (!list_empty(&elist[idx].list)) ··· 197 194 * @cpu_bstats: bstats per cpu 198 195 * @rate_est: rate estimator statistics 199 196 * @stats_lock: statistics lock 197 + * @running: qdisc running seqcount 200 198 * @opt: rate estimator configuration TLV 201 199 * 202 200 * Creates a new rate estimator with &bstats as source and &rate_est ··· 213 209 struct gnet_stats_basic_cpu __percpu *cpu_bstats, 214 210 struct gnet_stats_rate_est64 *rate_est, 215 211 spinlock_t *stats_lock, 212 + seqcount_t *running, 216 213 struct nlattr *opt) 217 214 { 218 215 struct gen_estimator *est; ··· 231 226 if (est == NULL) 232 227 return -ENOBUFS; 233 228 234 - __gnet_stats_copy_basic(&b, cpu_bstats, bstats); 229 + __gnet_stats_copy_basic(running, &b, cpu_bstats, bstats); 235 230 236 231 idx = parm->interval + 2; 237 232 est->bstats = bstats; 238 233 est->rate_est = rate_est; 239 234 est->stats_lock = stats_lock; 235 + est->running = running; 240 236 est->ewma_log = parm->ewma_log; 241 237 est->last_bytes = b.bytes; 242 238 est->avbps = rate_est->bps<<5; ··· 297 291 * @cpu_bstats: bstats per cpu 298 292 * @rate_est: rate estimator statistics 299 293 * @stats_lock: statistics lock 294 + * @running: qdisc running seqcount (might be NULL) 300 295 * @opt: rate estimator configuration TLV 301 296 * 302 297 * Replaces the configuration of a rate estimator by calling ··· 308 301 int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, 309 302 struct gnet_stats_basic_cpu __percpu *cpu_bstats, 310 303 struct gnet_stats_rate_est64 *rate_est, 311 - spinlock_t *stats_lock, struct nlattr *opt) 304 + spinlock_t *stats_lock, 305 + seqcount_t *running, struct nlattr *opt) 312 306 { 313 307 gen_kill_estimator(bstats, rate_est); 314 - return gen_new_estimator(bstats, cpu_bstats, rate_est, stats_lock, opt); 308 + return gen_new_estimator(bstats, cpu_bstats, rate_est, stats_lock, running, opt); 315 309 } 316 310 EXPORT_SYMBOL(gen_replace_estimator); 317 311
+23 -11
net/core/gen_stats.c
··· 32 32 return 0; 33 33 34 34 nla_put_failure: 35 + if (d->lock) 36 + spin_unlock_bh(d->lock); 35 37 kfree(d->xstats); 36 38 d->xstats = NULL; 37 39 d->xstats_len = 0; 38 - spin_unlock_bh(d->lock); 39 40 return -1; 40 41 } 41 42 ··· 66 65 { 67 66 memset(d, 0, sizeof(*d)); 68 67 69 - spin_lock_bh(lock); 70 - d->lock = lock; 71 68 if (type) 72 69 d->tail = (struct nlattr *)skb_tail_pointer(skb); 73 70 d->skb = skb; 74 71 d->compat_tc_stats = tc_stats_type; 75 72 d->compat_xstats = xstats_type; 76 73 d->padattr = padattr; 77 - 74 + if (lock) { 75 + d->lock = lock; 76 + spin_lock_bh(lock); 77 + } 78 78 if (d->tail) 79 79 return gnet_stats_copy(d, type, NULL, 0, padattr); 80 80 ··· 128 126 } 129 127 130 128 void 131 - __gnet_stats_copy_basic(struct gnet_stats_basic_packed *bstats, 129 + __gnet_stats_copy_basic(const seqcount_t *running, 130 + struct gnet_stats_basic_packed *bstats, 132 131 struct gnet_stats_basic_cpu __percpu *cpu, 133 132 struct gnet_stats_basic_packed *b) 134 133 { 134 + unsigned int seq; 135 + 135 136 if (cpu) { 136 137 __gnet_stats_copy_basic_cpu(bstats, cpu); 137 - } else { 138 + return; 139 + } 140 + do { 141 + if (running) 142 + seq = read_seqcount_begin(running); 138 143 bstats->bytes = b->bytes; 139 144 bstats->packets = b->packets; 140 - } 145 + } while (running && read_seqcount_retry(running, seq)); 141 146 } 142 147 EXPORT_SYMBOL(__gnet_stats_copy_basic); 143 148 ··· 161 152 * if the room in the socket buffer was not sufficient. 162 153 */ 163 154 int 164 - gnet_stats_copy_basic(struct gnet_dump *d, 155 + gnet_stats_copy_basic(const seqcount_t *running, 156 + struct gnet_dump *d, 165 157 struct gnet_stats_basic_cpu __percpu *cpu, 166 158 struct gnet_stats_basic_packed *b) 167 159 { 168 160 struct gnet_stats_basic_packed bstats = {0}; 169 161 170 - __gnet_stats_copy_basic(&bstats, cpu, b); 162 + __gnet_stats_copy_basic(running, &bstats, cpu, b); 171 163 172 164 if (d->compat_tc_stats) { 173 165 d->tc_stats.bytes = bstats.bytes; ··· 338 328 return 0; 339 329 340 330 err_out: 331 + if (d->lock) 332 + spin_unlock_bh(d->lock); 341 333 d->xstats_len = 0; 342 - spin_unlock_bh(d->lock); 343 334 return -1; 344 335 } 345 336 EXPORT_SYMBOL(gnet_stats_copy_app); ··· 374 363 return -1; 375 364 } 376 365 366 + if (d->lock) 367 + spin_unlock_bh(d->lock); 377 368 kfree(d->xstats); 378 369 d->xstats = NULL; 379 370 d->xstats_len = 0; 380 - spin_unlock_bh(d->lock); 381 371 return 0; 382 372 } 383 373 EXPORT_SYMBOL(gnet_stats_finish_copy);
+1 -1
net/netfilter/xt_RATEEST.c
··· 137 137 cfg.est.ewma_log = info->ewma_log; 138 138 139 139 ret = gen_new_estimator(&est->bstats, NULL, &est->rstats, 140 - &est->lock, &cfg.opt); 140 + &est->lock, NULL, &cfg.opt); 141 141 if (ret < 0) 142 142 goto err2; 143 143
+2 -2
net/sched/act_api.c
··· 287 287 if (est) { 288 288 err = gen_new_estimator(&p->tcfc_bstats, p->cpu_bstats, 289 289 &p->tcfc_rate_est, 290 - &p->tcfc_lock, est); 290 + &p->tcfc_lock, NULL, est); 291 291 if (err) { 292 292 free_percpu(p->cpu_qstats); 293 293 goto err2; ··· 671 671 if (err < 0) 672 672 goto errout; 673 673 674 - if (gnet_stats_copy_basic(&d, p->cpu_bstats, &p->tcfc_bstats) < 0 || 674 + if (gnet_stats_copy_basic(NULL, &d, p->cpu_bstats, &p->tcfc_bstats) < 0 || 675 675 gnet_stats_copy_rate_est(&d, &p->tcfc_bstats, 676 676 &p->tcfc_rate_est) < 0 || 677 677 gnet_stats_copy_queue(&d, p->cpu_qstats,
+2 -1
net/sched/act_police.c
··· 185 185 if (est) { 186 186 err = gen_replace_estimator(&police->tcf_bstats, NULL, 187 187 &police->tcf_rate_est, 188 - &police->tcf_lock, est); 188 + &police->tcf_lock, 189 + NULL, est); 189 190 if (err) 190 191 goto failure_unlock; 191 192 } else if (tb[TCA_POLICE_AVRATE] &&
+11 -10
net/sched/sch_api.c
··· 982 982 rcu_assign_pointer(sch->stab, stab); 983 983 } 984 984 if (tca[TCA_RATE]) { 985 - spinlock_t *root_lock; 985 + seqcount_t *running; 986 986 987 987 err = -EOPNOTSUPP; 988 988 if (sch->flags & TCQ_F_MQROOT) ··· 991 991 if ((sch->parent != TC_H_ROOT) && 992 992 !(sch->flags & TCQ_F_INGRESS) && 993 993 (!p || !(p->flags & TCQ_F_MQROOT))) 994 - root_lock = qdisc_root_sleeping_lock(sch); 994 + running = qdisc_root_sleeping_running(sch); 995 995 else 996 - root_lock = qdisc_lock(sch); 996 + running = &sch->running; 997 997 998 998 err = gen_new_estimator(&sch->bstats, 999 999 sch->cpu_bstats, 1000 1000 &sch->rate_est, 1001 - root_lock, 1001 + NULL, 1002 + running, 1002 1003 tca[TCA_RATE]); 1003 1004 if (err) 1004 1005 goto err_out4; ··· 1062 1061 gen_replace_estimator(&sch->bstats, 1063 1062 sch->cpu_bstats, 1064 1063 &sch->rate_est, 1065 - qdisc_root_sleeping_lock(sch), 1064 + NULL, 1065 + qdisc_root_sleeping_running(sch), 1066 1066 tca[TCA_RATE]); 1067 1067 } 1068 1068 out: ··· 1371 1369 goto nla_put_failure; 1372 1370 1373 1371 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS, 1374 - qdisc_root_sleeping_lock(q), &d, 1375 - TCA_PAD) < 0) 1372 + NULL, &d, TCA_PAD) < 0) 1376 1373 goto nla_put_failure; 1377 1374 1378 1375 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0) ··· 1382 1381 cpu_qstats = q->cpu_qstats; 1383 1382 } 1384 1383 1385 - if (gnet_stats_copy_basic(&d, cpu_bstats, &q->bstats) < 0 || 1384 + if (gnet_stats_copy_basic(qdisc_root_sleeping_running(q), 1385 + &d, cpu_bstats, &q->bstats) < 0 || 1386 1386 gnet_stats_copy_rate_est(&d, &q->bstats, &q->rate_est) < 0 || 1387 1387 gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0) 1388 1388 goto nla_put_failure; ··· 1686 1684 goto nla_put_failure; 1687 1685 1688 1686 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS, 1689 - qdisc_root_sleeping_lock(q), &d, 1690 - TCA_PAD) < 0) 1687 + NULL, &d, TCA_PAD) < 0) 1691 1688 goto nla_put_failure; 1692 1689 1693 1690 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
+2 -1
net/sched/sch_atm.c
··· 637 637 { 638 638 struct atm_flow_data *flow = (struct atm_flow_data *)arg; 639 639 640 - if (gnet_stats_copy_basic(d, NULL, &flow->bstats) < 0 || 640 + if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), 641 + d, NULL, &flow->bstats) < 0 || 641 642 gnet_stats_copy_queue(d, NULL, &flow->qstats, flow->q->q.qlen) < 0) 642 643 return -1; 643 644
+6 -3
net/sched/sch_cbq.c
··· 1600 1600 if (cl->undertime != PSCHED_PASTPERFECT) 1601 1601 cl->xstats.undertime = cl->undertime - q->now; 1602 1602 1603 - if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 || 1603 + if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), 1604 + d, NULL, &cl->bstats) < 0 || 1604 1605 gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || 1605 1606 gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->q->q.qlen) < 0) 1606 1607 return -1; ··· 1756 1755 if (tca[TCA_RATE]) { 1757 1756 err = gen_replace_estimator(&cl->bstats, NULL, 1758 1757 &cl->rate_est, 1759 - qdisc_root_sleeping_lock(sch), 1758 + NULL, 1759 + qdisc_root_sleeping_running(sch), 1760 1760 tca[TCA_RATE]); 1761 1761 if (err) { 1762 1762 qdisc_put_rtab(rtab); ··· 1850 1848 1851 1849 if (tca[TCA_RATE]) { 1852 1850 err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est, 1853 - qdisc_root_sleeping_lock(sch), 1851 + NULL, 1852 + qdisc_root_sleeping_running(sch), 1854 1853 tca[TCA_RATE]); 1855 1854 if (err) { 1856 1855 kfree(cl);
+6 -3
net/sched/sch_drr.c
··· 91 91 if (tca[TCA_RATE]) { 92 92 err = gen_replace_estimator(&cl->bstats, NULL, 93 93 &cl->rate_est, 94 - qdisc_root_sleeping_lock(sch), 94 + NULL, 95 + qdisc_root_sleeping_running(sch), 95 96 tca[TCA_RATE]); 96 97 if (err) 97 98 return err; ··· 120 119 121 120 if (tca[TCA_RATE]) { 122 121 err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est, 123 - qdisc_root_sleeping_lock(sch), 122 + NULL, 123 + qdisc_root_sleeping_running(sch), 124 124 tca[TCA_RATE]); 125 125 if (err) { 126 126 qdisc_destroy(cl->qdisc); ··· 281 279 if (qlen) 282 280 xstats.deficit = cl->deficit; 283 281 284 - if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 || 282 + if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), 283 + d, NULL, &cl->bstats) < 0 || 285 284 gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || 286 285 gnet_stats_copy_queue(d, NULL, &cl->qdisc->qstats, qlen) < 0) 287 286 return -1;
+11 -4
net/sched/sch_fq_codel.c
··· 566 566 st.qdisc_stats.memory_usage = q->memory_usage; 567 567 st.qdisc_stats.drop_overmemory = q->drop_overmemory; 568 568 569 + sch_tree_lock(sch); 569 570 list_for_each(pos, &q->new_flows) 570 571 st.qdisc_stats.new_flows_len++; 571 572 572 573 list_for_each(pos, &q->old_flows) 573 574 st.qdisc_stats.old_flows_len++; 575 + sch_tree_unlock(sch); 574 576 575 577 return gnet_stats_copy_app(d, &st, sizeof(st)); 576 578 } ··· 626 624 627 625 if (idx < q->flows_cnt) { 628 626 const struct fq_codel_flow *flow = &q->flows[idx]; 629 - const struct sk_buff *skb = flow->head; 627 + const struct sk_buff *skb; 630 628 631 629 memset(&xstats, 0, sizeof(xstats)); 632 630 xstats.type = TCA_FQ_CODEL_XSTATS_CLASS; ··· 644 642 codel_time_to_us(delta) : 645 643 -codel_time_to_us(-delta); 646 644 } 647 - while (skb) { 648 - qs.qlen++; 649 - skb = skb->next; 645 + if (flow->head) { 646 + sch_tree_lock(sch); 647 + skb = flow->head; 648 + while (skb) { 649 + qs.qlen++; 650 + skb = skb->next; 651 + } 652 + sch_tree_unlock(sch); 650 653 } 651 654 qs.backlog = q->backlogs[idx]; 652 655 qs.drops = flow->dropped;
+5 -5
net/sched/sch_hfsc.c
··· 1015 1015 cur_time = psched_get_time(); 1016 1016 1017 1017 if (tca[TCA_RATE]) { 1018 - spinlock_t *lock = qdisc_root_sleeping_lock(sch); 1019 - 1020 1018 err = gen_replace_estimator(&cl->bstats, NULL, 1021 1019 &cl->rate_est, 1022 - lock, 1020 + NULL, 1021 + qdisc_root_sleeping_running(sch), 1023 1022 tca[TCA_RATE]); 1024 1023 if (err) 1025 1024 return err; ··· 1067 1068 1068 1069 if (tca[TCA_RATE]) { 1069 1070 err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est, 1070 - qdisc_root_sleeping_lock(sch), 1071 + NULL, 1072 + qdisc_root_sleeping_running(sch), 1071 1073 tca[TCA_RATE]); 1072 1074 if (err) { 1073 1075 kfree(cl); ··· 1373 1373 xstats.work = cl->cl_total; 1374 1374 xstats.rtwork = cl->cl_cumul; 1375 1375 1376 - if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 || 1376 + if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, NULL, &cl->bstats) < 0 || 1377 1377 gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || 1378 1378 gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->qdisc->q.qlen) < 0) 1379 1379 return -1;
+6 -5
net/sched/sch_htb.c
··· 1141 1141 cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens); 1142 1142 cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens); 1143 1143 1144 - if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 || 1144 + if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), 1145 + d, NULL, &cl->bstats) < 0 || 1145 1146 gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 || 1146 1147 gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0) 1147 1148 return -1; ··· 1396 1395 if (htb_rate_est || tca[TCA_RATE]) { 1397 1396 err = gen_new_estimator(&cl->bstats, NULL, 1398 1397 &cl->rate_est, 1399 - qdisc_root_sleeping_lock(sch), 1398 + NULL, 1399 + qdisc_root_sleeping_running(sch), 1400 1400 tca[TCA_RATE] ? : &est.nla); 1401 1401 if (err) { 1402 1402 kfree(cl); ··· 1459 1457 parent->children++; 1460 1458 } else { 1461 1459 if (tca[TCA_RATE]) { 1462 - spinlock_t *lock = qdisc_root_sleeping_lock(sch); 1463 - 1464 1460 err = gen_replace_estimator(&cl->bstats, NULL, 1465 1461 &cl->rate_est, 1466 - lock, 1462 + NULL, 1463 + qdisc_root_sleeping_running(sch), 1467 1464 tca[TCA_RATE]); 1468 1465 if (err) 1469 1466 return err;
+1 -1
net/sched/sch_mq.c
··· 199 199 struct netdev_queue *dev_queue = mq_queue_get(sch, cl); 200 200 201 201 sch = dev_queue->qdisc_sleeping; 202 - if (gnet_stats_copy_basic(d, NULL, &sch->bstats) < 0 || 202 + if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 || 203 203 gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0) 204 204 return -1; 205 205 return 0;
+7 -4
net/sched/sch_mqprio.c
··· 342 342 * hold here is the look on dev_queue->qdisc_sleeping 343 343 * also acquired below. 344 344 */ 345 - spin_unlock_bh(d->lock); 345 + if (d->lock) 346 + spin_unlock_bh(d->lock); 346 347 347 348 for (i = tc.offset; i < tc.offset + tc.count; i++) { 348 349 struct netdev_queue *q = netdev_get_tx_queue(dev, i); ··· 360 359 spin_unlock_bh(qdisc_lock(qdisc)); 361 360 } 362 361 /* Reclaim root sleeping lock before completing stats */ 363 - spin_lock_bh(d->lock); 364 - if (gnet_stats_copy_basic(d, NULL, &bstats) < 0 || 362 + if (d->lock) 363 + spin_lock_bh(d->lock); 364 + if (gnet_stats_copy_basic(NULL, d, NULL, &bstats) < 0 || 365 365 gnet_stats_copy_queue(d, NULL, &qstats, qlen) < 0) 366 366 return -1; 367 367 } else { 368 368 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl); 369 369 370 370 sch = dev_queue->qdisc_sleeping; 371 - if (gnet_stats_copy_basic(d, NULL, &sch->bstats) < 0 || 371 + if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), 372 + d, NULL, &sch->bstats) < 0 || 372 373 gnet_stats_copy_queue(d, NULL, 373 374 &sch->qstats, sch->q.qlen) < 0) 374 375 return -1;
+2 -1
net/sched/sch_multiq.c
··· 356 356 struct Qdisc *cl_q; 357 357 358 358 cl_q = q->queues[cl - 1]; 359 - if (gnet_stats_copy_basic(d, NULL, &cl_q->bstats) < 0 || 359 + if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), 360 + d, NULL, &cl_q->bstats) < 0 || 360 361 gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0) 361 362 return -1; 362 363
+2 -1
net/sched/sch_prio.c
··· 319 319 struct Qdisc *cl_q; 320 320 321 321 cl_q = q->queues[cl - 1]; 322 - if (gnet_stats_copy_basic(d, NULL, &cl_q->bstats) < 0 || 322 + if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), 323 + d, NULL, &cl_q->bstats) < 0 || 323 324 gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0) 324 325 return -1; 325 326
+6 -3
net/sched/sch_qfq.c
··· 460 460 if (tca[TCA_RATE]) { 461 461 err = gen_replace_estimator(&cl->bstats, NULL, 462 462 &cl->rate_est, 463 - qdisc_root_sleeping_lock(sch), 463 + NULL, 464 + qdisc_root_sleeping_running(sch), 464 465 tca[TCA_RATE]); 465 466 if (err) 466 467 return err; ··· 487 486 if (tca[TCA_RATE]) { 488 487 err = gen_new_estimator(&cl->bstats, NULL, 489 488 &cl->rate_est, 490 - qdisc_root_sleeping_lock(sch), 489 + NULL, 490 + qdisc_root_sleeping_running(sch), 491 491 tca[TCA_RATE]); 492 492 if (err) 493 493 goto destroy_class; ··· 665 663 xstats.weight = cl->agg->class_weight; 666 664 xstats.lmax = cl->agg->lmax; 667 665 668 - if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 || 666 + if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), 667 + d, NULL, &cl->bstats) < 0 || 669 668 gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || 670 669 gnet_stats_copy_queue(d, NULL, 671 670 &cl->qdisc->qstats, cl->qdisc->q.qlen) < 0)