Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.23-rc1 542 lines 12 kB view raw
1/* 2 * net/sched/sch_prio.c Simple 3-band priority "scheduler". 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 10 * Fixes: 19990609: J Hadi Salim <hadi@nortelnetworks.com>: 11 * Init -- EINVAL when opt undefined 12 */ 13 14#include <linux/module.h> 15#include <linux/types.h> 16#include <linux/kernel.h> 17#include <linux/string.h> 18#include <linux/errno.h> 19#include <linux/skbuff.h> 20#include <net/netlink.h> 21#include <net/pkt_sched.h> 22 23 24struct prio_sched_data 25{ 26 int bands; 27 int curband; /* for round-robin */ 28 struct tcf_proto *filter_list; 29 u8 prio2band[TC_PRIO_MAX+1]; 30 struct Qdisc *queues[TCQ_PRIO_BANDS]; 31 int mq; 32}; 33 34 35static struct Qdisc * 36prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) 37{ 38 struct prio_sched_data *q = qdisc_priv(sch); 39 u32 band = skb->priority; 40 struct tcf_result res; 41 42 *qerr = NET_XMIT_BYPASS; 43 if (TC_H_MAJ(skb->priority) != sch->handle) { 44#ifdef CONFIG_NET_CLS_ACT 45 switch (tc_classify(skb, q->filter_list, &res)) { 46 case TC_ACT_STOLEN: 47 case TC_ACT_QUEUED: 48 *qerr = NET_XMIT_SUCCESS; 49 case TC_ACT_SHOT: 50 return NULL; 51 } 52 53 if (!q->filter_list ) { 54#else 55 if (!q->filter_list || tc_classify(skb, q->filter_list, &res)) { 56#endif 57 if (TC_H_MAJ(band)) 58 band = 0; 59 band = q->prio2band[band&TC_PRIO_MAX]; 60 goto out; 61 } 62 band = res.classid; 63 } 64 band = TC_H_MIN(band) - 1; 65 if (band >= q->bands) 66 band = q->prio2band[0]; 67out: 68 if (q->mq) 69 skb_set_queue_mapping(skb, band); 70 return q->queues[band]; 71} 72 73static int 74prio_enqueue(struct sk_buff *skb, struct Qdisc *sch) 75{ 76 struct Qdisc *qdisc; 77 int ret; 78 79 qdisc = prio_classify(skb, sch, &ret); 80#ifdef CONFIG_NET_CLS_ACT 81 if (qdisc == NULL) { 82 83 if (ret == NET_XMIT_BYPASS) 84 sch->qstats.drops++; 85 kfree_skb(skb); 86 return ret; 87 } 88#endif 89 90 if ((ret = qdisc->enqueue(skb, qdisc)) == NET_XMIT_SUCCESS) { 91 sch->bstats.bytes += skb->len; 92 sch->bstats.packets++; 93 sch->q.qlen++; 94 return NET_XMIT_SUCCESS; 95 } 96 sch->qstats.drops++; 97 return ret; 98} 99 100 101static int 102prio_requeue(struct sk_buff *skb, struct Qdisc* sch) 103{ 104 struct Qdisc *qdisc; 105 int ret; 106 107 qdisc = prio_classify(skb, sch, &ret); 108#ifdef CONFIG_NET_CLS_ACT 109 if (qdisc == NULL) { 110 if (ret == NET_XMIT_BYPASS) 111 sch->qstats.drops++; 112 kfree_skb(skb); 113 return ret; 114 } 115#endif 116 117 if ((ret = qdisc->ops->requeue(skb, qdisc)) == NET_XMIT_SUCCESS) { 118 sch->q.qlen++; 119 sch->qstats.requeues++; 120 return 0; 121 } 122 sch->qstats.drops++; 123 return NET_XMIT_DROP; 124} 125 126 127static struct sk_buff * 128prio_dequeue(struct Qdisc* sch) 129{ 130 struct sk_buff *skb; 131 struct prio_sched_data *q = qdisc_priv(sch); 132 int prio; 133 struct Qdisc *qdisc; 134 135 for (prio = 0; prio < q->bands; prio++) { 136 /* Check if the target subqueue is available before 137 * pulling an skb. This way we avoid excessive requeues 138 * for slower queues. 139 */ 140 if (!netif_subqueue_stopped(sch->dev, (q->mq ? prio : 0))) { 141 qdisc = q->queues[prio]; 142 skb = qdisc->dequeue(qdisc); 143 if (skb) { 144 sch->q.qlen--; 145 return skb; 146 } 147 } 148 } 149 return NULL; 150 151} 152 153static struct sk_buff *rr_dequeue(struct Qdisc* sch) 154{ 155 struct sk_buff *skb; 156 struct prio_sched_data *q = qdisc_priv(sch); 157 struct Qdisc *qdisc; 158 int bandcount; 159 160 /* Only take one pass through the queues. If nothing is available, 161 * return nothing. 162 */ 163 for (bandcount = 0; bandcount < q->bands; bandcount++) { 164 /* Check if the target subqueue is available before 165 * pulling an skb. This way we avoid excessive requeues 166 * for slower queues. If the queue is stopped, try the 167 * next queue. 168 */ 169 if (!netif_subqueue_stopped(sch->dev, 170 (q->mq ? q->curband : 0))) { 171 qdisc = q->queues[q->curband]; 172 skb = qdisc->dequeue(qdisc); 173 if (skb) { 174 sch->q.qlen--; 175 q->curband++; 176 if (q->curband >= q->bands) 177 q->curband = 0; 178 return skb; 179 } 180 } 181 q->curband++; 182 if (q->curband >= q->bands) 183 q->curband = 0; 184 } 185 return NULL; 186} 187 188static unsigned int prio_drop(struct Qdisc* sch) 189{ 190 struct prio_sched_data *q = qdisc_priv(sch); 191 int prio; 192 unsigned int len; 193 struct Qdisc *qdisc; 194 195 for (prio = q->bands-1; prio >= 0; prio--) { 196 qdisc = q->queues[prio]; 197 if (qdisc->ops->drop && (len = qdisc->ops->drop(qdisc)) != 0) { 198 sch->q.qlen--; 199 return len; 200 } 201 } 202 return 0; 203} 204 205 206static void 207prio_reset(struct Qdisc* sch) 208{ 209 int prio; 210 struct prio_sched_data *q = qdisc_priv(sch); 211 212 for (prio=0; prio<q->bands; prio++) 213 qdisc_reset(q->queues[prio]); 214 sch->q.qlen = 0; 215} 216 217static void 218prio_destroy(struct Qdisc* sch) 219{ 220 int prio; 221 struct prio_sched_data *q = qdisc_priv(sch); 222 223 tcf_destroy_chain(q->filter_list); 224 for (prio=0; prio<q->bands; prio++) 225 qdisc_destroy(q->queues[prio]); 226} 227 228static int prio_tune(struct Qdisc *sch, struct rtattr *opt) 229{ 230 struct prio_sched_data *q = qdisc_priv(sch); 231 struct tc_prio_qopt *qopt; 232 struct rtattr *tb[TCA_PRIO_MAX]; 233 int i; 234 235 if (rtattr_parse_nested_compat(tb, TCA_PRIO_MAX, opt, qopt, 236 sizeof(*qopt))) 237 return -EINVAL; 238 q->bands = qopt->bands; 239 /* If we're multiqueue, make sure the number of incoming bands 240 * matches the number of queues on the device we're associating with. 241 * If the number of bands requested is zero, then set q->bands to 242 * dev->egress_subqueue_count. 243 */ 244 q->mq = RTA_GET_FLAG(tb[TCA_PRIO_MQ - 1]); 245 if (q->mq) { 246 if (sch->handle != TC_H_ROOT) 247 return -EINVAL; 248 if (netif_is_multiqueue(sch->dev)) { 249 if (q->bands == 0) 250 q->bands = sch->dev->egress_subqueue_count; 251 else if (q->bands != sch->dev->egress_subqueue_count) 252 return -EINVAL; 253 } else 254 return -EOPNOTSUPP; 255 } 256 257 if (q->bands > TCQ_PRIO_BANDS || q->bands < 2) 258 return -EINVAL; 259 260 for (i=0; i<=TC_PRIO_MAX; i++) { 261 if (qopt->priomap[i] >= q->bands) 262 return -EINVAL; 263 } 264 265 sch_tree_lock(sch); 266 memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1); 267 268 for (i=q->bands; i<TCQ_PRIO_BANDS; i++) { 269 struct Qdisc *child = xchg(&q->queues[i], &noop_qdisc); 270 if (child != &noop_qdisc) { 271 qdisc_tree_decrease_qlen(child, child->q.qlen); 272 qdisc_destroy(child); 273 } 274 } 275 sch_tree_unlock(sch); 276 277 for (i=0; i<q->bands; i++) { 278 if (q->queues[i] == &noop_qdisc) { 279 struct Qdisc *child; 280 child = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, 281 TC_H_MAKE(sch->handle, i + 1)); 282 if (child) { 283 sch_tree_lock(sch); 284 child = xchg(&q->queues[i], child); 285 286 if (child != &noop_qdisc) { 287 qdisc_tree_decrease_qlen(child, 288 child->q.qlen); 289 qdisc_destroy(child); 290 } 291 sch_tree_unlock(sch); 292 } 293 } 294 } 295 return 0; 296} 297 298static int prio_init(struct Qdisc *sch, struct rtattr *opt) 299{ 300 struct prio_sched_data *q = qdisc_priv(sch); 301 int i; 302 303 for (i=0; i<TCQ_PRIO_BANDS; i++) 304 q->queues[i] = &noop_qdisc; 305 306 if (opt == NULL) { 307 return -EINVAL; 308 } else { 309 int err; 310 311 if ((err= prio_tune(sch, opt)) != 0) 312 return err; 313 } 314 return 0; 315} 316 317static int prio_dump(struct Qdisc *sch, struct sk_buff *skb) 318{ 319 struct prio_sched_data *q = qdisc_priv(sch); 320 unsigned char *b = skb_tail_pointer(skb); 321 struct rtattr *nest; 322 struct tc_prio_qopt opt; 323 324 opt.bands = q->bands; 325 memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX+1); 326 327 nest = RTA_NEST_COMPAT(skb, TCA_OPTIONS, sizeof(opt), &opt); 328 if (q->mq) 329 RTA_PUT_FLAG(skb, TCA_PRIO_MQ); 330 RTA_NEST_COMPAT_END(skb, nest); 331 332 return skb->len; 333 334rtattr_failure: 335 nlmsg_trim(skb, b); 336 return -1; 337} 338 339static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, 340 struct Qdisc **old) 341{ 342 struct prio_sched_data *q = qdisc_priv(sch); 343 unsigned long band = arg - 1; 344 345 if (band >= q->bands) 346 return -EINVAL; 347 348 if (new == NULL) 349 new = &noop_qdisc; 350 351 sch_tree_lock(sch); 352 *old = q->queues[band]; 353 q->queues[band] = new; 354 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); 355 qdisc_reset(*old); 356 sch_tree_unlock(sch); 357 358 return 0; 359} 360 361static struct Qdisc * 362prio_leaf(struct Qdisc *sch, unsigned long arg) 363{ 364 struct prio_sched_data *q = qdisc_priv(sch); 365 unsigned long band = arg - 1; 366 367 if (band >= q->bands) 368 return NULL; 369 370 return q->queues[band]; 371} 372 373static unsigned long prio_get(struct Qdisc *sch, u32 classid) 374{ 375 struct prio_sched_data *q = qdisc_priv(sch); 376 unsigned long band = TC_H_MIN(classid); 377 378 if (band - 1 >= q->bands) 379 return 0; 380 return band; 381} 382 383static unsigned long prio_bind(struct Qdisc *sch, unsigned long parent, u32 classid) 384{ 385 return prio_get(sch, classid); 386} 387 388 389static void prio_put(struct Qdisc *q, unsigned long cl) 390{ 391 return; 392} 393 394static int prio_change(struct Qdisc *sch, u32 handle, u32 parent, struct rtattr **tca, unsigned long *arg) 395{ 396 unsigned long cl = *arg; 397 struct prio_sched_data *q = qdisc_priv(sch); 398 399 if (cl - 1 > q->bands) 400 return -ENOENT; 401 return 0; 402} 403 404static int prio_delete(struct Qdisc *sch, unsigned long cl) 405{ 406 struct prio_sched_data *q = qdisc_priv(sch); 407 if (cl - 1 > q->bands) 408 return -ENOENT; 409 return 0; 410} 411 412 413static int prio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, 414 struct tcmsg *tcm) 415{ 416 struct prio_sched_data *q = qdisc_priv(sch); 417 418 if (cl - 1 > q->bands) 419 return -ENOENT; 420 tcm->tcm_handle |= TC_H_MIN(cl); 421 if (q->queues[cl-1]) 422 tcm->tcm_info = q->queues[cl-1]->handle; 423 return 0; 424} 425 426static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl, 427 struct gnet_dump *d) 428{ 429 struct prio_sched_data *q = qdisc_priv(sch); 430 struct Qdisc *cl_q; 431 432 cl_q = q->queues[cl - 1]; 433 if (gnet_stats_copy_basic(d, &cl_q->bstats) < 0 || 434 gnet_stats_copy_queue(d, &cl_q->qstats) < 0) 435 return -1; 436 437 return 0; 438} 439 440static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg) 441{ 442 struct prio_sched_data *q = qdisc_priv(sch); 443 int prio; 444 445 if (arg->stop) 446 return; 447 448 for (prio = 0; prio < q->bands; prio++) { 449 if (arg->count < arg->skip) { 450 arg->count++; 451 continue; 452 } 453 if (arg->fn(sch, prio+1, arg) < 0) { 454 arg->stop = 1; 455 break; 456 } 457 arg->count++; 458 } 459} 460 461static struct tcf_proto ** prio_find_tcf(struct Qdisc *sch, unsigned long cl) 462{ 463 struct prio_sched_data *q = qdisc_priv(sch); 464 465 if (cl) 466 return NULL; 467 return &q->filter_list; 468} 469 470static struct Qdisc_class_ops prio_class_ops = { 471 .graft = prio_graft, 472 .leaf = prio_leaf, 473 .get = prio_get, 474 .put = prio_put, 475 .change = prio_change, 476 .delete = prio_delete, 477 .walk = prio_walk, 478 .tcf_chain = prio_find_tcf, 479 .bind_tcf = prio_bind, 480 .unbind_tcf = prio_put, 481 .dump = prio_dump_class, 482 .dump_stats = prio_dump_class_stats, 483}; 484 485static struct Qdisc_ops prio_qdisc_ops = { 486 .next = NULL, 487 .cl_ops = &prio_class_ops, 488 .id = "prio", 489 .priv_size = sizeof(struct prio_sched_data), 490 .enqueue = prio_enqueue, 491 .dequeue = prio_dequeue, 492 .requeue = prio_requeue, 493 .drop = prio_drop, 494 .init = prio_init, 495 .reset = prio_reset, 496 .destroy = prio_destroy, 497 .change = prio_tune, 498 .dump = prio_dump, 499 .owner = THIS_MODULE, 500}; 501 502static struct Qdisc_ops rr_qdisc_ops = { 503 .next = NULL, 504 .cl_ops = &prio_class_ops, 505 .id = "rr", 506 .priv_size = sizeof(struct prio_sched_data), 507 .enqueue = prio_enqueue, 508 .dequeue = rr_dequeue, 509 .requeue = prio_requeue, 510 .drop = prio_drop, 511 .init = prio_init, 512 .reset = prio_reset, 513 .destroy = prio_destroy, 514 .change = prio_tune, 515 .dump = prio_dump, 516 .owner = THIS_MODULE, 517}; 518 519static int __init prio_module_init(void) 520{ 521 int err; 522 523 err = register_qdisc(&prio_qdisc_ops); 524 if (err < 0) 525 return err; 526 err = register_qdisc(&rr_qdisc_ops); 527 if (err < 0) 528 unregister_qdisc(&prio_qdisc_ops); 529 return err; 530} 531 532static void __exit prio_module_exit(void) 533{ 534 unregister_qdisc(&prio_qdisc_ops); 535 unregister_qdisc(&rr_qdisc_ops); 536} 537 538module_init(prio_module_init) 539module_exit(prio_module_exit) 540 541MODULE_LICENSE("GPL"); 542MODULE_ALIAS("sch_rr");