Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[NET_SCHED]: sch_htb: use generic estimator

Use the generic estimator instead of reimplementing (parts of) it.
For compatibility always create a default estimator for new classes.

Signed-off-by: Patrick McHardy <kaber@trash.net>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Patrick McHardy and committed by
David S. Miller
ee39e10c 4bdf3991

+24 -61
+24 -61
net/sched/sch_htb.c
··· 69 69 */ 70 70 71 71 #define HTB_HSIZE 16 /* classid hash size */ 72 - #define HTB_EWMAC 2 /* rate average over HTB_EWMAC*HTB_HSIZE sec */ 73 - #define HTB_RATECM 1 /* whether to use rate computer */ 74 72 #define HTB_HYSTERESIS 1 /* whether to use mode hysteresis for speedup */ 75 73 #define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */ 76 74 ··· 92 94 struct gnet_stats_rate_est rate_est; 93 95 struct tc_htb_xstats xstats; /* our special stats */ 94 96 int refcnt; /* usage count of this class */ 95 - 96 - #ifdef HTB_RATECM 97 - /* rate measurement counters */ 98 - unsigned long rate_bytes, sum_bytes; 99 - unsigned long rate_packets, sum_packets; 100 - #endif 101 97 102 98 /* topology */ 103 99 int level; /* our level (see above) */ ··· 186 194 int rate2quantum; /* quant = rate / rate2quantum */ 187 195 psched_time_t now; /* cached dequeue time */ 188 196 struct qdisc_watchdog watchdog; 189 - #ifdef HTB_RATECM 190 - struct timer_list rttim; /* rate computer timer */ 191 - int recmp_bucket; /* which hash bucket to recompute next */ 192 - #endif 193 197 194 198 /* non shaped skbs; let them go directly thru */ 195 199 struct sk_buff_head direct_queue; ··· 665 677 return NET_XMIT_SUCCESS; 666 678 } 667 679 668 - #ifdef HTB_RATECM 669 - #define RT_GEN(D,R) R+=D-(R/HTB_EWMAC);D=0 670 - static void htb_rate_timer(unsigned long arg) 671 - { 672 - struct Qdisc *sch = (struct Qdisc *)arg; 673 - struct htb_sched *q = qdisc_priv(sch); 674 - struct hlist_node *p; 675 - struct htb_class *cl; 676 - 677 - 678 - /* lock queue so that we can muck with it */ 679 - spin_lock_bh(&sch->dev->queue_lock); 680 - 681 - q->rttim.expires = jiffies + HZ; 682 - add_timer(&q->rttim); 683 - 684 - /* scan and recompute one bucket at time */ 685 - if (++q->recmp_bucket >= HTB_HSIZE) 686 - q->recmp_bucket = 0; 687 - 688 - hlist_for_each_entry(cl,p, q->hash + q->recmp_bucket, hlist) { 689 - RT_GEN(cl->sum_bytes, cl->rate_bytes); 690 - RT_GEN(cl->sum_packets, cl->rate_packets); 691 - } 692 - spin_unlock_bh(&sch->dev->queue_lock); 693 - } 694 - #endif 695 - 696 680 /** 697 681 * htb_charge_class - charges amount "bytes" to leaf and ancestors 698 682 * ··· 710 750 if (cl->cmode != HTB_CAN_SEND) 711 751 htb_add_to_wait_tree(q, cl, diff); 712 752 } 713 - #ifdef HTB_RATECM 714 - /* update rate counters */ 715 - cl->sum_bytes += bytes; 716 - cl->sum_packets++; 717 - #endif 718 753 719 754 /* update byte stats except for leaves which are already updated */ 720 755 if (cl->level) { ··· 1050 1095 if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */ 1051 1096 q->direct_qlen = 2; 1052 1097 1053 - #ifdef HTB_RATECM 1054 - init_timer(&q->rttim); 1055 - q->rttim.function = htb_rate_timer; 1056 - q->rttim.data = (unsigned long)sch; 1057 - q->rttim.expires = jiffies + HZ; 1058 - add_timer(&q->rttim); 1059 - #endif 1060 1098 if ((q->rate2quantum = gopt->rate2quantum) < 1) 1061 1099 q->rate2quantum = 1; 1062 1100 q->defcls = gopt->defcls; ··· 1122 1174 htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d) 1123 1175 { 1124 1176 struct htb_class *cl = (struct htb_class *)arg; 1125 - 1126 - #ifdef HTB_RATECM 1127 - cl->rate_est.bps = cl->rate_bytes / (HTB_EWMAC * HTB_HSIZE); 1128 - cl->rate_est.pps = cl->rate_packets / (HTB_EWMAC * HTB_HSIZE); 1129 - #endif 1130 1177 1131 1178 if (!cl->level && cl->un.leaf.q) 1132 1179 cl->qstats.qlen = cl->un.leaf.q->q.qlen; ··· 1220 1277 BUG_TRAP(cl->un.leaf.q); 1221 1278 qdisc_destroy(cl->un.leaf.q); 1222 1279 } 1280 + gen_kill_estimator(&cl->bstats, &cl->rate_est); 1223 1281 qdisc_put_rtab(cl->rate); 1224 1282 qdisc_put_rtab(cl->ceil); 1225 1283 ··· 1249 1305 struct htb_sched *q = qdisc_priv(sch); 1250 1306 1251 1307 qdisc_watchdog_cancel(&q->watchdog); 1252 - #ifdef HTB_RATECM 1253 - del_timer_sync(&q->rttim); 1254 - #endif 1255 1308 /* This line used to be after htb_destroy_class call below 1256 1309 and surprisingly it worked in 2.4. But it must precede it 1257 1310 because filter need its target class alive to be able to call ··· 1344 1403 if (!cl) { /* new class */ 1345 1404 struct Qdisc *new_q; 1346 1405 int prio; 1406 + struct { 1407 + struct rtattr rta; 1408 + struct gnet_estimator opt; 1409 + } est = { 1410 + .rta = { 1411 + .rta_len = RTA_LENGTH(sizeof(est.opt)), 1412 + .rta_type = TCA_RATE, 1413 + }, 1414 + .opt = { 1415 + /* 4s interval, 16s averaging constant */ 1416 + .interval = 2, 1417 + .ewma_log = 2, 1418 + }, 1419 + }; 1347 1420 1348 1421 /* check for valid classid */ 1349 1422 if (!classid || TC_H_MAJ(classid ^ sch->handle) ··· 1373 1418 if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL) 1374 1419 goto failure; 1375 1420 1421 + gen_new_estimator(&cl->bstats, &cl->rate_est, 1422 + &sch->dev->queue_lock, 1423 + tca[TCA_RATE-1] ? : &est.rta); 1376 1424 cl->refcnt = 1; 1377 1425 INIT_LIST_HEAD(&cl->sibling); 1378 1426 INIT_HLIST_NODE(&cl->hlist); ··· 1427 1469 hlist_add_head(&cl->hlist, q->hash + htb_hash(classid)); 1428 1470 list_add_tail(&cl->sibling, 1429 1471 parent ? &parent->children : &q->root); 1430 - } else 1472 + } else { 1473 + if (tca[TCA_RATE-1]) 1474 + gen_replace_estimator(&cl->bstats, &cl->rate_est, 1475 + &sch->dev->queue_lock, 1476 + tca[TCA_RATE-1]); 1431 1477 sch_tree_lock(sch); 1478 + } 1432 1479 1433 1480 /* it used to be a nasty bug here, we have to check that node 1434 1481 is really leaf before changing cl->un.leaf ! */