Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net_sched: factorize qdisc stats handling

HTB takes into account skb is segmented in stats updates.
Generalize this to all schedulers.

They should use qdisc_bstats_update() helper instead of manipulating
bstats.bytes and bstats.packets

Add bstats_update() helper too for classes that use
gnet_stats_basic_packed fields.

Note : Right now, TCQ_F_CAN_BYPASS shortcurt can be taken only if no
stab is setup on qdisc.

Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Eric Dumazet and committed by
David S. Miller
bfe0d029 f1593d22

+50 -72
+14 -6
include/net/sch_generic.h
··· 207 207 return q->q.qlen; 208 208 } 209 209 210 - static inline struct qdisc_skb_cb *qdisc_skb_cb(struct sk_buff *skb) 210 + static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb) 211 211 { 212 212 return (struct qdisc_skb_cb *)skb->cb; 213 213 } ··· 394 394 return true; 395 395 } 396 396 397 - static inline unsigned int qdisc_pkt_len(struct sk_buff *skb) 397 + static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb) 398 398 { 399 399 return qdisc_skb_cb(skb)->pkt_len; 400 400 } ··· 426 426 return qdisc_enqueue(skb, sch) & NET_XMIT_MASK; 427 427 } 428 428 429 - static inline void __qdisc_update_bstats(struct Qdisc *sch, unsigned int len) 429 + 430 + static inline void bstats_update(struct gnet_stats_basic_packed *bstats, 431 + const struct sk_buff *skb) 430 432 { 431 - sch->bstats.bytes += len; 432 - sch->bstats.packets++; 433 + bstats->bytes += qdisc_pkt_len(skb); 434 + bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; 435 + } 436 + 437 + static inline void qdisc_bstats_update(struct Qdisc *sch, 438 + const struct sk_buff *skb) 439 + { 440 + bstats_update(&sch->bstats, skb); 433 441 } 434 442 435 443 static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, ··· 445 437 { 446 438 __skb_queue_tail(list, skb); 447 439 sch->qstats.backlog += qdisc_pkt_len(skb); 448 - __qdisc_update_bstats(sch, qdisc_pkt_len(skb)); 440 + qdisc_bstats_update(sch, skb); 449 441 450 442 return NET_XMIT_SUCCESS; 451 443 }
+4 -1
net/core/dev.c
··· 2297 2297 */ 2298 2298 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE)) 2299 2299 skb_dst_force(skb); 2300 - __qdisc_update_bstats(q, skb->len); 2300 + 2301 + qdisc_skb_cb(skb)->pkt_len = skb->len; 2302 + qdisc_bstats_update(q, skb); 2303 + 2301 2304 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) { 2302 2305 if (unlikely(contended)) { 2303 2306 spin_unlock(&q->busylock);
+1 -2
net/sched/act_csum.c
··· 508 508 509 509 spin_lock(&p->tcf_lock); 510 510 p->tcf_tm.lastuse = jiffies; 511 - p->tcf_bstats.bytes += qdisc_pkt_len(skb); 512 - p->tcf_bstats.packets++; 511 + bstats_update(&p->tcf_bstats, skb); 513 512 action = p->tcf_action; 514 513 update_flags = p->update_flags; 515 514 spin_unlock(&p->tcf_lock);
+1 -2
net/sched/act_ipt.c
··· 209 209 spin_lock(&ipt->tcf_lock); 210 210 211 211 ipt->tcf_tm.lastuse = jiffies; 212 - ipt->tcf_bstats.bytes += qdisc_pkt_len(skb); 213 - ipt->tcf_bstats.packets++; 212 + bstats_update(&ipt->tcf_bstats, skb); 214 213 215 214 /* yes, we have to worry about both in and out dev 216 215 worry later - danger - this API seems to have changed
+1 -2
net/sched/act_mirred.c
··· 165 165 166 166 spin_lock(&m->tcf_lock); 167 167 m->tcf_tm.lastuse = jiffies; 168 - m->tcf_bstats.bytes += qdisc_pkt_len(skb); 169 - m->tcf_bstats.packets++; 168 + bstats_update(&m->tcf_bstats, skb); 170 169 171 170 dev = m->tcfm_dev; 172 171 if (!dev) {
+1 -2
net/sched/act_nat.c
··· 125 125 egress = p->flags & TCA_NAT_FLAG_EGRESS; 126 126 action = p->tcf_action; 127 127 128 - p->tcf_bstats.bytes += qdisc_pkt_len(skb); 129 - p->tcf_bstats.packets++; 128 + bstats_update(&p->tcf_bstats, skb); 130 129 131 130 spin_unlock(&p->tcf_lock); 132 131
+1 -2
net/sched/act_pedit.c
··· 187 187 bad: 188 188 p->tcf_qstats.overlimits++; 189 189 done: 190 - p->tcf_bstats.bytes += qdisc_pkt_len(skb); 191 - p->tcf_bstats.packets++; 190 + bstats_update(&p->tcf_bstats, skb); 192 191 spin_unlock(&p->tcf_lock); 193 192 return p->tcf_action; 194 193 }
+1 -2
net/sched/act_police.c
··· 298 298 299 299 spin_lock(&police->tcf_lock); 300 300 301 - police->tcf_bstats.bytes += qdisc_pkt_len(skb); 302 - police->tcf_bstats.packets++; 301 + bstats_update(&police->tcf_bstats, skb); 303 302 304 303 if (police->tcfp_ewma_rate && 305 304 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
+1 -2
net/sched/act_simple.c
··· 42 42 43 43 spin_lock(&d->tcf_lock); 44 44 d->tcf_tm.lastuse = jiffies; 45 - d->tcf_bstats.bytes += qdisc_pkt_len(skb); 46 - d->tcf_bstats.packets++; 45 + bstats_update(&d->tcf_bstats, skb); 47 46 48 47 /* print policy string followed by _ then packet count 49 48 * Example if this was the 3rd packet and the string was "hello"
+1 -2
net/sched/act_skbedit.c
··· 46 46 47 47 spin_lock(&d->tcf_lock); 48 48 d->tcf_tm.lastuse = jiffies; 49 - d->tcf_bstats.bytes += qdisc_pkt_len(skb); 50 - d->tcf_bstats.packets++; 49 + bstats_update(&d->tcf_bstats, skb); 51 50 52 51 if (d->flags & SKBEDIT_F_PRIORITY) 53 52 skb->priority = d->priority;
+2 -4
net/sched/sch_atm.c
··· 422 422 } 423 423 return ret; 424 424 } 425 - sch->bstats.bytes += qdisc_pkt_len(skb); 426 - sch->bstats.packets++; 427 - flow->bstats.bytes += qdisc_pkt_len(skb); 428 - flow->bstats.packets++; 425 + qdisc_bstats_update(sch, skb); 426 + bstats_update(&flow->bstats, skb); 429 427 /* 430 428 * Okay, this may seem weird. We pretend we've dropped the packet if 431 429 * it goes via ATM. The reason for this is that the outer qdisc
+2 -4
net/sched/sch_cbq.c
··· 390 390 ret = qdisc_enqueue(skb, cl->q); 391 391 if (ret == NET_XMIT_SUCCESS) { 392 392 sch->q.qlen++; 393 - sch->bstats.packets++; 394 - sch->bstats.bytes += qdisc_pkt_len(skb); 393 + qdisc_bstats_update(sch, skb); 395 394 cbq_mark_toplevel(q, cl); 396 395 if (!cl->next_alive) 397 396 cbq_activate_class(cl); ··· 649 650 ret = qdisc_enqueue(skb, cl->q); 650 651 if (ret == NET_XMIT_SUCCESS) { 651 652 sch->q.qlen++; 652 - sch->bstats.packets++; 653 - sch->bstats.bytes += qdisc_pkt_len(skb); 653 + qdisc_bstats_update(sch, skb); 654 654 if (!cl->next_alive) 655 655 cbq_activate_class(cl); 656 656 return 0;
+2 -6
net/sched/sch_drr.c
··· 351 351 { 352 352 struct drr_sched *q = qdisc_priv(sch); 353 353 struct drr_class *cl; 354 - unsigned int len; 355 354 int err; 356 355 357 356 cl = drr_classify(skb, sch, &err); ··· 361 362 return err; 362 363 } 363 364 364 - len = qdisc_pkt_len(skb); 365 365 err = qdisc_enqueue(skb, cl->qdisc); 366 366 if (unlikely(err != NET_XMIT_SUCCESS)) { 367 367 if (net_xmit_drop_count(err)) { ··· 375 377 cl->deficit = cl->quantum; 376 378 } 377 379 378 - cl->bstats.packets++; 379 - cl->bstats.bytes += len; 380 - sch->bstats.packets++; 381 - sch->bstats.bytes += len; 380 + bstats_update(&cl->bstats, skb); 381 + qdisc_bstats_update(sch, skb); 382 382 383 383 sch->q.qlen++; 384 384 return err;
+1 -2
net/sched/sch_dsmark.c
··· 260 260 return err; 261 261 } 262 262 263 - sch->bstats.bytes += qdisc_pkt_len(skb); 264 - sch->bstats.packets++; 263 + qdisc_bstats_update(sch, skb); 265 264 sch->q.qlen++; 266 265 267 266 return NET_XMIT_SUCCESS;
+2 -4
net/sched/sch_hfsc.c
··· 1599 1599 if (cl->qdisc->q.qlen == 1) 1600 1600 set_active(cl, qdisc_pkt_len(skb)); 1601 1601 1602 - cl->bstats.packets++; 1603 - cl->bstats.bytes += qdisc_pkt_len(skb); 1604 - sch->bstats.packets++; 1605 - sch->bstats.bytes += qdisc_pkt_len(skb); 1602 + bstats_update(&cl->bstats, skb); 1603 + qdisc_bstats_update(sch, skb); 1606 1604 sch->q.qlen++; 1607 1605 1608 1606 return NET_XMIT_SUCCESS;
+6 -11
net/sched/sch_htb.c
··· 569 569 } 570 570 return ret; 571 571 } else { 572 - cl->bstats.packets += 573 - skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1; 574 - cl->bstats.bytes += qdisc_pkt_len(skb); 572 + bstats_update(&cl->bstats, skb); 575 573 htb_activate(q, cl); 576 574 } 577 575 578 576 sch->q.qlen++; 579 - sch->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1; 580 - sch->bstats.bytes += qdisc_pkt_len(skb); 577 + qdisc_bstats_update(sch, skb); 581 578 return NET_XMIT_SUCCESS; 582 579 } 583 580 ··· 645 648 htb_add_to_wait_tree(q, cl, diff); 646 649 } 647 650 648 - /* update byte stats except for leaves which are already updated */ 649 - if (cl->level) { 650 - cl->bstats.bytes += bytes; 651 - cl->bstats.packets += skb_is_gso(skb)? 652 - skb_shinfo(skb)->gso_segs:1; 653 - } 651 + /* update basic stats except for leaves which are already updated */ 652 + if (cl->level) 653 + bstats_update(&cl->bstats, skb); 654 + 654 655 cl = cl->parent; 655 656 } 656 657 }
+1 -2
net/sched/sch_ingress.c
··· 63 63 64 64 result = tc_classify(skb, p->filter_list, &res); 65 65 66 - sch->bstats.packets++; 67 - sch->bstats.bytes += qdisc_pkt_len(skb); 66 + qdisc_bstats_update(sch, skb); 68 67 switch (result) { 69 68 case TC_ACT_SHOT: 70 69 result = TC_ACT_SHOT;
+1 -2
net/sched/sch_multiq.c
··· 83 83 84 84 ret = qdisc_enqueue(skb, qdisc); 85 85 if (ret == NET_XMIT_SUCCESS) { 86 - sch->bstats.bytes += qdisc_pkt_len(skb); 87 - sch->bstats.packets++; 86 + qdisc_bstats_update(sch, skb); 88 87 sch->q.qlen++; 89 88 return NET_XMIT_SUCCESS; 90 89 }
+2 -4
net/sched/sch_netem.c
··· 240 240 241 241 if (likely(ret == NET_XMIT_SUCCESS)) { 242 242 sch->q.qlen++; 243 - sch->bstats.bytes += qdisc_pkt_len(skb); 244 - sch->bstats.packets++; 243 + qdisc_bstats_update(sch, skb); 245 244 } else if (net_xmit_drop_count(ret)) { 246 245 sch->qstats.drops++; 247 246 } ··· 476 477 __skb_queue_after(list, skb, nskb); 477 478 478 479 sch->qstats.backlog += qdisc_pkt_len(nskb); 479 - sch->bstats.bytes += qdisc_pkt_len(nskb); 480 - sch->bstats.packets++; 480 + qdisc_bstats_update(sch, nskb); 481 481 482 482 return NET_XMIT_SUCCESS; 483 483 }
+1 -2
net/sched/sch_prio.c
··· 84 84 85 85 ret = qdisc_enqueue(skb, qdisc); 86 86 if (ret == NET_XMIT_SUCCESS) { 87 - sch->bstats.bytes += qdisc_pkt_len(skb); 88 - sch->bstats.packets++; 87 + qdisc_bstats_update(sch, skb); 89 88 sch->q.qlen++; 90 89 return NET_XMIT_SUCCESS; 91 90 }
+1 -2
net/sched/sch_red.c
··· 94 94 95 95 ret = qdisc_enqueue(skb, child); 96 96 if (likely(ret == NET_XMIT_SUCCESS)) { 97 - sch->bstats.bytes += qdisc_pkt_len(skb); 98 - sch->bstats.packets++; 97 + qdisc_bstats_update(sch, skb); 99 98 sch->q.qlen++; 100 99 } else if (net_xmit_drop_count(ret)) { 101 100 q->stats.pdrop++;
+1 -2
net/sched/sch_sfq.c
··· 403 403 slot->allot = q->scaled_quantum; 404 404 } 405 405 if (++sch->q.qlen <= q->limit) { 406 - sch->bstats.bytes += qdisc_pkt_len(skb); 407 - sch->bstats.packets++; 406 + qdisc_bstats_update(sch, skb); 408 407 return NET_XMIT_SUCCESS; 409 408 } 410 409
+1 -2
net/sched/sch_tbf.c
··· 134 134 } 135 135 136 136 sch->q.qlen++; 137 - sch->bstats.bytes += qdisc_pkt_len(skb); 138 - sch->bstats.packets++; 137 + qdisc_bstats_update(sch, skb); 139 138 return NET_XMIT_SUCCESS; 140 139 } 141 140
+1 -2
net/sched/sch_teql.c
··· 83 83 84 84 if (q->q.qlen < dev->tx_queue_len) { 85 85 __skb_queue_tail(&q->q, skb); 86 - sch->bstats.bytes += qdisc_pkt_len(skb); 87 - sch->bstats.packets++; 86 + qdisc_bstats_update(sch, skb); 88 87 return NET_XMIT_SUCCESS; 89 88 } 90 89