at v2.6.23 1271 lines 30 kB view raw
1/* 2 * net/sched/sch_api.c Packet scheduler API. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 10 * 11 * Fixes: 12 * 13 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired. 14 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support 15 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support 16 */ 17 18#include <linux/module.h> 19#include <linux/types.h> 20#include <linux/kernel.h> 21#include <linux/string.h> 22#include <linux/errno.h> 23#include <linux/skbuff.h> 24#include <linux/init.h> 25#include <linux/proc_fs.h> 26#include <linux/seq_file.h> 27#include <linux/kmod.h> 28#include <linux/list.h> 29#include <linux/hrtimer.h> 30 31#include <net/netlink.h> 32#include <net/pkt_sched.h> 33 34static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n, u32 clid, 35 struct Qdisc *old, struct Qdisc *new); 36static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n, 37 struct Qdisc *q, unsigned long cl, int event); 38 39/* 40 41 Short review. 42 ------------- 43 44 This file consists of two interrelated parts: 45 46 1. queueing disciplines manager frontend. 47 2. traffic classes manager frontend. 48 49 Generally, queueing discipline ("qdisc") is a black box, 50 which is able to enqueue packets and to dequeue them (when 51 device is ready to send something) in order and at times 52 determined by algorithm hidden in it. 53 54 qdisc's are divided to two categories: 55 - "queues", which have no internal structure visible from outside. 56 - "schedulers", which split all the packets to "traffic classes", 57 using "packet classifiers" (look at cls_api.c) 58 59 In turn, classes may have child qdiscs (as rule, queues) 60 attached to them etc. etc. etc. 61 62 The goal of the routines in this file is to translate 63 information supplied by user in the form of handles 64 to more intelligible for kernel form, to make some sanity 65 checks and part of work, which is common to all qdiscs 66 and to provide rtnetlink notifications. 67 68 All real intelligent work is done inside qdisc modules. 69 70 71 72 Every discipline has two major routines: enqueue and dequeue. 73 74 ---dequeue 75 76 dequeue usually returns a skb to send. It is allowed to return NULL, 77 but it does not mean that queue is empty, it just means that 78 discipline does not want to send anything this time. 79 Queue is really empty if q->q.qlen == 0. 80 For complicated disciplines with multiple queues q->q is not 81 real packet queue, but however q->q.qlen must be valid. 82 83 ---enqueue 84 85 enqueue returns 0, if packet was enqueued successfully. 86 If packet (this one or another one) was dropped, it returns 87 not zero error code. 88 NET_XMIT_DROP - this packet dropped 89 Expected action: do not backoff, but wait until queue will clear. 90 NET_XMIT_CN - probably this packet enqueued, but another one dropped. 91 Expected action: backoff or ignore 92 NET_XMIT_POLICED - dropped by police. 93 Expected action: backoff or error to real-time apps. 94 95 Auxiliary routines: 96 97 ---requeue 98 99 requeues once dequeued packet. It is used for non-standard or 100 just buggy devices, which can defer output even if dev->tbusy=0. 101 102 ---reset 103 104 returns qdisc to initial state: purge all buffers, clear all 105 timers, counters (except for statistics) etc. 106 107 ---init 108 109 initializes newly created qdisc. 110 111 ---destroy 112 113 destroys resources allocated by init and during lifetime of qdisc. 114 115 ---change 116 117 changes qdisc parameters. 118 */ 119 120/* Protects list of registered TC modules. It is pure SMP lock. */ 121static DEFINE_RWLOCK(qdisc_mod_lock); 122 123 124/************************************************ 125 * Queueing disciplines manipulation. * 126 ************************************************/ 127 128 129/* The list of all installed queueing disciplines. */ 130 131static struct Qdisc_ops *qdisc_base; 132 133/* Register/uregister queueing discipline */ 134 135int register_qdisc(struct Qdisc_ops *qops) 136{ 137 struct Qdisc_ops *q, **qp; 138 int rc = -EEXIST; 139 140 write_lock(&qdisc_mod_lock); 141 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next) 142 if (!strcmp(qops->id, q->id)) 143 goto out; 144 145 if (qops->enqueue == NULL) 146 qops->enqueue = noop_qdisc_ops.enqueue; 147 if (qops->requeue == NULL) 148 qops->requeue = noop_qdisc_ops.requeue; 149 if (qops->dequeue == NULL) 150 qops->dequeue = noop_qdisc_ops.dequeue; 151 152 qops->next = NULL; 153 *qp = qops; 154 rc = 0; 155out: 156 write_unlock(&qdisc_mod_lock); 157 return rc; 158} 159 160int unregister_qdisc(struct Qdisc_ops *qops) 161{ 162 struct Qdisc_ops *q, **qp; 163 int err = -ENOENT; 164 165 write_lock(&qdisc_mod_lock); 166 for (qp = &qdisc_base; (q=*qp)!=NULL; qp = &q->next) 167 if (q == qops) 168 break; 169 if (q) { 170 *qp = q->next; 171 q->next = NULL; 172 err = 0; 173 } 174 write_unlock(&qdisc_mod_lock); 175 return err; 176} 177 178/* We know handle. Find qdisc among all qdisc's attached to device 179 (root qdisc, all its children, children of children etc.) 180 */ 181 182struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) 183{ 184 struct Qdisc *q; 185 186 list_for_each_entry(q, &dev->qdisc_list, list) { 187 if (q->handle == handle) 188 return q; 189 } 190 return NULL; 191} 192 193static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid) 194{ 195 unsigned long cl; 196 struct Qdisc *leaf; 197 struct Qdisc_class_ops *cops = p->ops->cl_ops; 198 199 if (cops == NULL) 200 return NULL; 201 cl = cops->get(p, classid); 202 203 if (cl == 0) 204 return NULL; 205 leaf = cops->leaf(p, cl); 206 cops->put(p, cl); 207 return leaf; 208} 209 210/* Find queueing discipline by name */ 211 212static struct Qdisc_ops *qdisc_lookup_ops(struct rtattr *kind) 213{ 214 struct Qdisc_ops *q = NULL; 215 216 if (kind) { 217 read_lock(&qdisc_mod_lock); 218 for (q = qdisc_base; q; q = q->next) { 219 if (rtattr_strcmp(kind, q->id) == 0) { 220 if (!try_module_get(q->owner)) 221 q = NULL; 222 break; 223 } 224 } 225 read_unlock(&qdisc_mod_lock); 226 } 227 return q; 228} 229 230static struct qdisc_rate_table *qdisc_rtab_list; 231 232struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct rtattr *tab) 233{ 234 struct qdisc_rate_table *rtab; 235 236 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) { 237 if (memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) == 0) { 238 rtab->refcnt++; 239 return rtab; 240 } 241 } 242 243 if (tab == NULL || r->rate == 0 || r->cell_log == 0 || RTA_PAYLOAD(tab) != 1024) 244 return NULL; 245 246 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL); 247 if (rtab) { 248 rtab->rate = *r; 249 rtab->refcnt = 1; 250 memcpy(rtab->data, RTA_DATA(tab), 1024); 251 rtab->next = qdisc_rtab_list; 252 qdisc_rtab_list = rtab; 253 } 254 return rtab; 255} 256 257void qdisc_put_rtab(struct qdisc_rate_table *tab) 258{ 259 struct qdisc_rate_table *rtab, **rtabp; 260 261 if (!tab || --tab->refcnt) 262 return; 263 264 for (rtabp = &qdisc_rtab_list; (rtab=*rtabp) != NULL; rtabp = &rtab->next) { 265 if (rtab == tab) { 266 *rtabp = rtab->next; 267 kfree(rtab); 268 return; 269 } 270 } 271} 272 273static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer) 274{ 275 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog, 276 timer); 277 struct net_device *dev = wd->qdisc->dev; 278 279 wd->qdisc->flags &= ~TCQ_F_THROTTLED; 280 smp_wmb(); 281 netif_schedule(dev); 282 283 return HRTIMER_NORESTART; 284} 285 286void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc) 287{ 288 hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 289 wd->timer.function = qdisc_watchdog; 290 wd->qdisc = qdisc; 291} 292EXPORT_SYMBOL(qdisc_watchdog_init); 293 294void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires) 295{ 296 ktime_t time; 297 298 wd->qdisc->flags |= TCQ_F_THROTTLED; 299 time = ktime_set(0, 0); 300 time = ktime_add_ns(time, PSCHED_US2NS(expires)); 301 hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS); 302} 303EXPORT_SYMBOL(qdisc_watchdog_schedule); 304 305void qdisc_watchdog_cancel(struct qdisc_watchdog *wd) 306{ 307 hrtimer_cancel(&wd->timer); 308 wd->qdisc->flags &= ~TCQ_F_THROTTLED; 309} 310EXPORT_SYMBOL(qdisc_watchdog_cancel); 311 312/* Allocate an unique handle from space managed by kernel */ 313 314static u32 qdisc_alloc_handle(struct net_device *dev) 315{ 316 int i = 0x10000; 317 static u32 autohandle = TC_H_MAKE(0x80000000U, 0); 318 319 do { 320 autohandle += TC_H_MAKE(0x10000U, 0); 321 if (autohandle == TC_H_MAKE(TC_H_ROOT, 0)) 322 autohandle = TC_H_MAKE(0x80000000U, 0); 323 } while (qdisc_lookup(dev, autohandle) && --i > 0); 324 325 return i>0 ? autohandle : 0; 326} 327 328/* Attach toplevel qdisc to device dev */ 329 330static struct Qdisc * 331dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc) 332{ 333 struct Qdisc *oqdisc; 334 335 if (dev->flags & IFF_UP) 336 dev_deactivate(dev); 337 338 qdisc_lock_tree(dev); 339 if (qdisc && qdisc->flags&TCQ_F_INGRESS) { 340 oqdisc = dev->qdisc_ingress; 341 /* Prune old scheduler */ 342 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) { 343 /* delete */ 344 qdisc_reset(oqdisc); 345 dev->qdisc_ingress = NULL; 346 } else { /* new */ 347 dev->qdisc_ingress = qdisc; 348 } 349 350 } else { 351 352 oqdisc = dev->qdisc_sleeping; 353 354 /* Prune old scheduler */ 355 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) 356 qdisc_reset(oqdisc); 357 358 /* ... and graft new one */ 359 if (qdisc == NULL) 360 qdisc = &noop_qdisc; 361 dev->qdisc_sleeping = qdisc; 362 dev->qdisc = &noop_qdisc; 363 } 364 365 qdisc_unlock_tree(dev); 366 367 if (dev->flags & IFF_UP) 368 dev_activate(dev); 369 370 return oqdisc; 371} 372 373void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n) 374{ 375 struct Qdisc_class_ops *cops; 376 unsigned long cl; 377 u32 parentid; 378 379 if (n == 0) 380 return; 381 while ((parentid = sch->parent)) { 382 sch = qdisc_lookup(sch->dev, TC_H_MAJ(parentid)); 383 if (sch == NULL) { 384 WARN_ON(parentid != TC_H_ROOT); 385 return; 386 } 387 cops = sch->ops->cl_ops; 388 if (cops->qlen_notify) { 389 cl = cops->get(sch, parentid); 390 cops->qlen_notify(sch, cl); 391 cops->put(sch, cl); 392 } 393 sch->q.qlen -= n; 394 } 395} 396EXPORT_SYMBOL(qdisc_tree_decrease_qlen); 397 398/* Graft qdisc "new" to class "classid" of qdisc "parent" or 399 to device "dev". 400 401 Old qdisc is not destroyed but returned in *old. 402 */ 403 404static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, 405 u32 classid, 406 struct Qdisc *new, struct Qdisc **old) 407{ 408 int err = 0; 409 struct Qdisc *q = *old; 410 411 412 if (parent == NULL) { 413 if (q && q->flags&TCQ_F_INGRESS) { 414 *old = dev_graft_qdisc(dev, q); 415 } else { 416 *old = dev_graft_qdisc(dev, new); 417 } 418 } else { 419 struct Qdisc_class_ops *cops = parent->ops->cl_ops; 420 421 err = -EINVAL; 422 423 if (cops) { 424 unsigned long cl = cops->get(parent, classid); 425 if (cl) { 426 err = cops->graft(parent, cl, new, old); 427 cops->put(parent, cl); 428 } 429 } 430 } 431 return err; 432} 433 434/* 435 Allocate and initialize new qdisc. 436 437 Parameters are passed via opt. 438 */ 439 440static struct Qdisc * 441qdisc_create(struct net_device *dev, u32 parent, u32 handle, 442 struct rtattr **tca, int *errp) 443{ 444 int err; 445 struct rtattr *kind = tca[TCA_KIND-1]; 446 struct Qdisc *sch; 447 struct Qdisc_ops *ops; 448 449 ops = qdisc_lookup_ops(kind); 450#ifdef CONFIG_KMOD 451 if (ops == NULL && kind != NULL) { 452 char name[IFNAMSIZ]; 453 if (rtattr_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) { 454 /* We dropped the RTNL semaphore in order to 455 * perform the module load. So, even if we 456 * succeeded in loading the module we have to 457 * tell the caller to replay the request. We 458 * indicate this using -EAGAIN. 459 * We replay the request because the device may 460 * go away in the mean time. 461 */ 462 rtnl_unlock(); 463 request_module("sch_%s", name); 464 rtnl_lock(); 465 ops = qdisc_lookup_ops(kind); 466 if (ops != NULL) { 467 /* We will try again qdisc_lookup_ops, 468 * so don't keep a reference. 469 */ 470 module_put(ops->owner); 471 err = -EAGAIN; 472 goto err_out; 473 } 474 } 475 } 476#endif 477 478 err = -ENOENT; 479 if (ops == NULL) 480 goto err_out; 481 482 sch = qdisc_alloc(dev, ops); 483 if (IS_ERR(sch)) { 484 err = PTR_ERR(sch); 485 goto err_out2; 486 } 487 488 sch->parent = parent; 489 490 if (handle == TC_H_INGRESS) { 491 sch->flags |= TCQ_F_INGRESS; 492 sch->stats_lock = &dev->ingress_lock; 493 handle = TC_H_MAKE(TC_H_INGRESS, 0); 494 } else { 495 sch->stats_lock = &dev->queue_lock; 496 if (handle == 0) { 497 handle = qdisc_alloc_handle(dev); 498 err = -ENOMEM; 499 if (handle == 0) 500 goto err_out3; 501 } 502 } 503 504 sch->handle = handle; 505 506 if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS-1])) == 0) { 507 if (tca[TCA_RATE-1]) { 508 err = gen_new_estimator(&sch->bstats, &sch->rate_est, 509 sch->stats_lock, 510 tca[TCA_RATE-1]); 511 if (err) { 512 /* 513 * Any broken qdiscs that would require 514 * a ops->reset() here? The qdisc was never 515 * in action so it shouldn't be necessary. 516 */ 517 if (ops->destroy) 518 ops->destroy(sch); 519 goto err_out3; 520 } 521 } 522 qdisc_lock_tree(dev); 523 list_add_tail(&sch->list, &dev->qdisc_list); 524 qdisc_unlock_tree(dev); 525 526 return sch; 527 } 528err_out3: 529 dev_put(dev); 530 kfree((char *) sch - sch->padded); 531err_out2: 532 module_put(ops->owner); 533err_out: 534 *errp = err; 535 return NULL; 536} 537 538static int qdisc_change(struct Qdisc *sch, struct rtattr **tca) 539{ 540 if (tca[TCA_OPTIONS-1]) { 541 int err; 542 543 if (sch->ops->change == NULL) 544 return -EINVAL; 545 err = sch->ops->change(sch, tca[TCA_OPTIONS-1]); 546 if (err) 547 return err; 548 } 549 if (tca[TCA_RATE-1]) 550 gen_replace_estimator(&sch->bstats, &sch->rate_est, 551 sch->stats_lock, tca[TCA_RATE-1]); 552 return 0; 553} 554 555struct check_loop_arg 556{ 557 struct qdisc_walker w; 558 struct Qdisc *p; 559 int depth; 560}; 561 562static int check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w); 563 564static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth) 565{ 566 struct check_loop_arg arg; 567 568 if (q->ops->cl_ops == NULL) 569 return 0; 570 571 arg.w.stop = arg.w.skip = arg.w.count = 0; 572 arg.w.fn = check_loop_fn; 573 arg.depth = depth; 574 arg.p = p; 575 q->ops->cl_ops->walk(q, &arg.w); 576 return arg.w.stop ? -ELOOP : 0; 577} 578 579static int 580check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w) 581{ 582 struct Qdisc *leaf; 583 struct Qdisc_class_ops *cops = q->ops->cl_ops; 584 struct check_loop_arg *arg = (struct check_loop_arg *)w; 585 586 leaf = cops->leaf(q, cl); 587 if (leaf) { 588 if (leaf == arg->p || arg->depth > 7) 589 return -ELOOP; 590 return check_loop(leaf, arg->p, arg->depth + 1); 591 } 592 return 0; 593} 594 595/* 596 * Delete/get qdisc. 597 */ 598 599static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) 600{ 601 struct tcmsg *tcm = NLMSG_DATA(n); 602 struct rtattr **tca = arg; 603 struct net_device *dev; 604 u32 clid = tcm->tcm_parent; 605 struct Qdisc *q = NULL; 606 struct Qdisc *p = NULL; 607 int err; 608 609 if ((dev = __dev_get_by_index(tcm->tcm_ifindex)) == NULL) 610 return -ENODEV; 611 612 if (clid) { 613 if (clid != TC_H_ROOT) { 614 if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) { 615 if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL) 616 return -ENOENT; 617 q = qdisc_leaf(p, clid); 618 } else { /* ingress */ 619 q = dev->qdisc_ingress; 620 } 621 } else { 622 q = dev->qdisc_sleeping; 623 } 624 if (!q) 625 return -ENOENT; 626 627 if (tcm->tcm_handle && q->handle != tcm->tcm_handle) 628 return -EINVAL; 629 } else { 630 if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL) 631 return -ENOENT; 632 } 633 634 if (tca[TCA_KIND-1] && rtattr_strcmp(tca[TCA_KIND-1], q->ops->id)) 635 return -EINVAL; 636 637 if (n->nlmsg_type == RTM_DELQDISC) { 638 if (!clid) 639 return -EINVAL; 640 if (q->handle == 0) 641 return -ENOENT; 642 if ((err = qdisc_graft(dev, p, clid, NULL, &q)) != 0) 643 return err; 644 if (q) { 645 qdisc_notify(skb, n, clid, q, NULL); 646 qdisc_lock_tree(dev); 647 qdisc_destroy(q); 648 qdisc_unlock_tree(dev); 649 } 650 } else { 651 qdisc_notify(skb, n, clid, NULL, q); 652 } 653 return 0; 654} 655 656/* 657 Create/change qdisc. 658 */ 659 660static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) 661{ 662 struct tcmsg *tcm; 663 struct rtattr **tca; 664 struct net_device *dev; 665 u32 clid; 666 struct Qdisc *q, *p; 667 int err; 668 669replay: 670 /* Reinit, just in case something touches this. */ 671 tcm = NLMSG_DATA(n); 672 tca = arg; 673 clid = tcm->tcm_parent; 674 q = p = NULL; 675 676 if ((dev = __dev_get_by_index(tcm->tcm_ifindex)) == NULL) 677 return -ENODEV; 678 679 if (clid) { 680 if (clid != TC_H_ROOT) { 681 if (clid != TC_H_INGRESS) { 682 if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL) 683 return -ENOENT; 684 q = qdisc_leaf(p, clid); 685 } else { /*ingress */ 686 q = dev->qdisc_ingress; 687 } 688 } else { 689 q = dev->qdisc_sleeping; 690 } 691 692 /* It may be default qdisc, ignore it */ 693 if (q && q->handle == 0) 694 q = NULL; 695 696 if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) { 697 if (tcm->tcm_handle) { 698 if (q && !(n->nlmsg_flags&NLM_F_REPLACE)) 699 return -EEXIST; 700 if (TC_H_MIN(tcm->tcm_handle)) 701 return -EINVAL; 702 if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL) 703 goto create_n_graft; 704 if (n->nlmsg_flags&NLM_F_EXCL) 705 return -EEXIST; 706 if (tca[TCA_KIND-1] && rtattr_strcmp(tca[TCA_KIND-1], q->ops->id)) 707 return -EINVAL; 708 if (q == p || 709 (p && check_loop(q, p, 0))) 710 return -ELOOP; 711 atomic_inc(&q->refcnt); 712 goto graft; 713 } else { 714 if (q == NULL) 715 goto create_n_graft; 716 717 /* This magic test requires explanation. 718 * 719 * We know, that some child q is already 720 * attached to this parent and have choice: 721 * either to change it or to create/graft new one. 722 * 723 * 1. We are allowed to create/graft only 724 * if CREATE and REPLACE flags are set. 725 * 726 * 2. If EXCL is set, requestor wanted to say, 727 * that qdisc tcm_handle is not expected 728 * to exist, so that we choose create/graft too. 729 * 730 * 3. The last case is when no flags are set. 731 * Alas, it is sort of hole in API, we 732 * cannot decide what to do unambiguously. 733 * For now we select create/graft, if 734 * user gave KIND, which does not match existing. 735 */ 736 if ((n->nlmsg_flags&NLM_F_CREATE) && 737 (n->nlmsg_flags&NLM_F_REPLACE) && 738 ((n->nlmsg_flags&NLM_F_EXCL) || 739 (tca[TCA_KIND-1] && 740 rtattr_strcmp(tca[TCA_KIND-1], q->ops->id)))) 741 goto create_n_graft; 742 } 743 } 744 } else { 745 if (!tcm->tcm_handle) 746 return -EINVAL; 747 q = qdisc_lookup(dev, tcm->tcm_handle); 748 } 749 750 /* Change qdisc parameters */ 751 if (q == NULL) 752 return -ENOENT; 753 if (n->nlmsg_flags&NLM_F_EXCL) 754 return -EEXIST; 755 if (tca[TCA_KIND-1] && rtattr_strcmp(tca[TCA_KIND-1], q->ops->id)) 756 return -EINVAL; 757 err = qdisc_change(q, tca); 758 if (err == 0) 759 qdisc_notify(skb, n, clid, NULL, q); 760 return err; 761 762create_n_graft: 763 if (!(n->nlmsg_flags&NLM_F_CREATE)) 764 return -ENOENT; 765 if (clid == TC_H_INGRESS) 766 q = qdisc_create(dev, tcm->tcm_parent, tcm->tcm_parent, 767 tca, &err); 768 else 769 q = qdisc_create(dev, tcm->tcm_parent, tcm->tcm_handle, 770 tca, &err); 771 if (q == NULL) { 772 if (err == -EAGAIN) 773 goto replay; 774 return err; 775 } 776 777graft: 778 if (1) { 779 struct Qdisc *old_q = NULL; 780 err = qdisc_graft(dev, p, clid, q, &old_q); 781 if (err) { 782 if (q) { 783 qdisc_lock_tree(dev); 784 qdisc_destroy(q); 785 qdisc_unlock_tree(dev); 786 } 787 return err; 788 } 789 qdisc_notify(skb, n, clid, old_q, q); 790 if (old_q) { 791 qdisc_lock_tree(dev); 792 qdisc_destroy(old_q); 793 qdisc_unlock_tree(dev); 794 } 795 } 796 return 0; 797} 798 799static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, 800 u32 pid, u32 seq, u16 flags, int event) 801{ 802 struct tcmsg *tcm; 803 struct nlmsghdr *nlh; 804 unsigned char *b = skb_tail_pointer(skb); 805 struct gnet_dump d; 806 807 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags); 808 tcm = NLMSG_DATA(nlh); 809 tcm->tcm_family = AF_UNSPEC; 810 tcm->tcm__pad1 = 0; 811 tcm->tcm__pad2 = 0; 812 tcm->tcm_ifindex = q->dev->ifindex; 813 tcm->tcm_parent = clid; 814 tcm->tcm_handle = q->handle; 815 tcm->tcm_info = atomic_read(&q->refcnt); 816 RTA_PUT(skb, TCA_KIND, IFNAMSIZ, q->ops->id); 817 if (q->ops->dump && q->ops->dump(q, skb) < 0) 818 goto rtattr_failure; 819 q->qstats.qlen = q->q.qlen; 820 821 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, 822 TCA_XSTATS, q->stats_lock, &d) < 0) 823 goto rtattr_failure; 824 825 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0) 826 goto rtattr_failure; 827 828 if (gnet_stats_copy_basic(&d, &q->bstats) < 0 || 829 gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 || 830 gnet_stats_copy_queue(&d, &q->qstats) < 0) 831 goto rtattr_failure; 832 833 if (gnet_stats_finish_copy(&d) < 0) 834 goto rtattr_failure; 835 836 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 837 return skb->len; 838 839nlmsg_failure: 840rtattr_failure: 841 nlmsg_trim(skb, b); 842 return -1; 843} 844 845static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n, 846 u32 clid, struct Qdisc *old, struct Qdisc *new) 847{ 848 struct sk_buff *skb; 849 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0; 850 851 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 852 if (!skb) 853 return -ENOBUFS; 854 855 if (old && old->handle) { 856 if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq, 0, RTM_DELQDISC) < 0) 857 goto err_out; 858 } 859 if (new) { 860 if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq, old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0) 861 goto err_out; 862 } 863 864 if (skb->len) 865 return rtnetlink_send(skb, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); 866 867err_out: 868 kfree_skb(skb); 869 return -EINVAL; 870} 871 872static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) 873{ 874 int idx, q_idx; 875 int s_idx, s_q_idx; 876 struct net_device *dev; 877 struct Qdisc *q; 878 879 s_idx = cb->args[0]; 880 s_q_idx = q_idx = cb->args[1]; 881 read_lock(&dev_base_lock); 882 idx = 0; 883 for_each_netdev(dev) { 884 if (idx < s_idx) 885 goto cont; 886 if (idx > s_idx) 887 s_q_idx = 0; 888 q_idx = 0; 889 list_for_each_entry(q, &dev->qdisc_list, list) { 890 if (q_idx < s_q_idx) { 891 q_idx++; 892 continue; 893 } 894 if (tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid, 895 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0) 896 goto done; 897 q_idx++; 898 } 899cont: 900 idx++; 901 } 902 903done: 904 read_unlock(&dev_base_lock); 905 906 cb->args[0] = idx; 907 cb->args[1] = q_idx; 908 909 return skb->len; 910} 911 912 913 914/************************************************ 915 * Traffic classes manipulation. * 916 ************************************************/ 917 918 919 920static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) 921{ 922 struct tcmsg *tcm = NLMSG_DATA(n); 923 struct rtattr **tca = arg; 924 struct net_device *dev; 925 struct Qdisc *q = NULL; 926 struct Qdisc_class_ops *cops; 927 unsigned long cl = 0; 928 unsigned long new_cl; 929 u32 pid = tcm->tcm_parent; 930 u32 clid = tcm->tcm_handle; 931 u32 qid = TC_H_MAJ(clid); 932 int err; 933 934 if ((dev = __dev_get_by_index(tcm->tcm_ifindex)) == NULL) 935 return -ENODEV; 936 937 /* 938 parent == TC_H_UNSPEC - unspecified parent. 939 parent == TC_H_ROOT - class is root, which has no parent. 940 parent == X:0 - parent is root class. 941 parent == X:Y - parent is a node in hierarchy. 942 parent == 0:Y - parent is X:Y, where X:0 is qdisc. 943 944 handle == 0:0 - generate handle from kernel pool. 945 handle == 0:Y - class is X:Y, where X:0 is qdisc. 946 handle == X:Y - clear. 947 handle == X:0 - root class. 948 */ 949 950 /* Step 1. Determine qdisc handle X:0 */ 951 952 if (pid != TC_H_ROOT) { 953 u32 qid1 = TC_H_MAJ(pid); 954 955 if (qid && qid1) { 956 /* If both majors are known, they must be identical. */ 957 if (qid != qid1) 958 return -EINVAL; 959 } else if (qid1) { 960 qid = qid1; 961 } else if (qid == 0) 962 qid = dev->qdisc_sleeping->handle; 963 964 /* Now qid is genuine qdisc handle consistent 965 both with parent and child. 966 967 TC_H_MAJ(pid) still may be unspecified, complete it now. 968 */ 969 if (pid) 970 pid = TC_H_MAKE(qid, pid); 971 } else { 972 if (qid == 0) 973 qid = dev->qdisc_sleeping->handle; 974 } 975 976 /* OK. Locate qdisc */ 977 if ((q = qdisc_lookup(dev, qid)) == NULL) 978 return -ENOENT; 979 980 /* An check that it supports classes */ 981 cops = q->ops->cl_ops; 982 if (cops == NULL) 983 return -EINVAL; 984 985 /* Now try to get class */ 986 if (clid == 0) { 987 if (pid == TC_H_ROOT) 988 clid = qid; 989 } else 990 clid = TC_H_MAKE(qid, clid); 991 992 if (clid) 993 cl = cops->get(q, clid); 994 995 if (cl == 0) { 996 err = -ENOENT; 997 if (n->nlmsg_type != RTM_NEWTCLASS || !(n->nlmsg_flags&NLM_F_CREATE)) 998 goto out; 999 } else { 1000 switch (n->nlmsg_type) { 1001 case RTM_NEWTCLASS: 1002 err = -EEXIST; 1003 if (n->nlmsg_flags&NLM_F_EXCL) 1004 goto out; 1005 break; 1006 case RTM_DELTCLASS: 1007 err = cops->delete(q, cl); 1008 if (err == 0) 1009 tclass_notify(skb, n, q, cl, RTM_DELTCLASS); 1010 goto out; 1011 case RTM_GETTCLASS: 1012 err = tclass_notify(skb, n, q, cl, RTM_NEWTCLASS); 1013 goto out; 1014 default: 1015 err = -EINVAL; 1016 goto out; 1017 } 1018 } 1019 1020 new_cl = cl; 1021 err = cops->change(q, clid, pid, tca, &new_cl); 1022 if (err == 0) 1023 tclass_notify(skb, n, q, new_cl, RTM_NEWTCLASS); 1024 1025out: 1026 if (cl) 1027 cops->put(q, cl); 1028 1029 return err; 1030} 1031 1032 1033static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q, 1034 unsigned long cl, 1035 u32 pid, u32 seq, u16 flags, int event) 1036{ 1037 struct tcmsg *tcm; 1038 struct nlmsghdr *nlh; 1039 unsigned char *b = skb_tail_pointer(skb); 1040 struct gnet_dump d; 1041 struct Qdisc_class_ops *cl_ops = q->ops->cl_ops; 1042 1043 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags); 1044 tcm = NLMSG_DATA(nlh); 1045 tcm->tcm_family = AF_UNSPEC; 1046 tcm->tcm_ifindex = q->dev->ifindex; 1047 tcm->tcm_parent = q->handle; 1048 tcm->tcm_handle = q->handle; 1049 tcm->tcm_info = 0; 1050 RTA_PUT(skb, TCA_KIND, IFNAMSIZ, q->ops->id); 1051 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0) 1052 goto rtattr_failure; 1053 1054 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, 1055 TCA_XSTATS, q->stats_lock, &d) < 0) 1056 goto rtattr_failure; 1057 1058 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0) 1059 goto rtattr_failure; 1060 1061 if (gnet_stats_finish_copy(&d) < 0) 1062 goto rtattr_failure; 1063 1064 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1065 return skb->len; 1066 1067nlmsg_failure: 1068rtattr_failure: 1069 nlmsg_trim(skb, b); 1070 return -1; 1071} 1072 1073static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n, 1074 struct Qdisc *q, unsigned long cl, int event) 1075{ 1076 struct sk_buff *skb; 1077 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0; 1078 1079 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1080 if (!skb) 1081 return -ENOBUFS; 1082 1083 if (tc_fill_tclass(skb, q, cl, pid, n->nlmsg_seq, 0, event) < 0) { 1084 kfree_skb(skb); 1085 return -EINVAL; 1086 } 1087 1088 return rtnetlink_send(skb, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); 1089} 1090 1091struct qdisc_dump_args 1092{ 1093 struct qdisc_walker w; 1094 struct sk_buff *skb; 1095 struct netlink_callback *cb; 1096}; 1097 1098static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walker *arg) 1099{ 1100 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg; 1101 1102 return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).pid, 1103 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTCLASS); 1104} 1105 1106static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) 1107{ 1108 int t; 1109 int s_t; 1110 struct net_device *dev; 1111 struct Qdisc *q; 1112 struct tcmsg *tcm = (struct tcmsg*)NLMSG_DATA(cb->nlh); 1113 struct qdisc_dump_args arg; 1114 1115 if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm))) 1116 return 0; 1117 if ((dev = dev_get_by_index(tcm->tcm_ifindex)) == NULL) 1118 return 0; 1119 1120 s_t = cb->args[0]; 1121 t = 0; 1122 1123 list_for_each_entry(q, &dev->qdisc_list, list) { 1124 if (t < s_t || !q->ops->cl_ops || 1125 (tcm->tcm_parent && 1126 TC_H_MAJ(tcm->tcm_parent) != q->handle)) { 1127 t++; 1128 continue; 1129 } 1130 if (t > s_t) 1131 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0])); 1132 arg.w.fn = qdisc_class_dump; 1133 arg.skb = skb; 1134 arg.cb = cb; 1135 arg.w.stop = 0; 1136 arg.w.skip = cb->args[1]; 1137 arg.w.count = 0; 1138 q->ops->cl_ops->walk(q, &arg.w); 1139 cb->args[1] = arg.w.count; 1140 if (arg.w.stop) 1141 break; 1142 t++; 1143 } 1144 1145 cb->args[0] = t; 1146 1147 dev_put(dev); 1148 return skb->len; 1149} 1150 1151/* Main classifier routine: scans classifier chain attached 1152 to this qdisc, (optionally) tests for protocol and asks 1153 specific classifiers. 1154 */ 1155int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp, 1156 struct tcf_result *res) 1157{ 1158 __be16 protocol = skb->protocol; 1159 int err = 0; 1160 1161 for (; tp; tp = tp->next) { 1162 if ((tp->protocol == protocol || 1163 tp->protocol == htons(ETH_P_ALL)) && 1164 (err = tp->classify(skb, tp, res)) >= 0) { 1165#ifdef CONFIG_NET_CLS_ACT 1166 if (err != TC_ACT_RECLASSIFY && skb->tc_verd) 1167 skb->tc_verd = SET_TC_VERD(skb->tc_verd, 0); 1168#endif 1169 return err; 1170 } 1171 } 1172 return -1; 1173} 1174EXPORT_SYMBOL(tc_classify_compat); 1175 1176int tc_classify(struct sk_buff *skb, struct tcf_proto *tp, 1177 struct tcf_result *res) 1178{ 1179 int err = 0; 1180 __be16 protocol; 1181#ifdef CONFIG_NET_CLS_ACT 1182 struct tcf_proto *otp = tp; 1183reclassify: 1184#endif 1185 protocol = skb->protocol; 1186 1187 err = tc_classify_compat(skb, tp, res); 1188#ifdef CONFIG_NET_CLS_ACT 1189 if (err == TC_ACT_RECLASSIFY) { 1190 u32 verd = G_TC_VERD(skb->tc_verd); 1191 tp = otp; 1192 1193 if (verd++ >= MAX_REC_LOOP) { 1194 printk("rule prio %u protocol %02x reclassify loop, " 1195 "packet dropped\n", 1196 tp->prio&0xffff, ntohs(tp->protocol)); 1197 return TC_ACT_SHOT; 1198 } 1199 skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd); 1200 goto reclassify; 1201 } 1202#endif 1203 return err; 1204} 1205EXPORT_SYMBOL(tc_classify); 1206 1207void tcf_destroy(struct tcf_proto *tp) 1208{ 1209 tp->ops->destroy(tp); 1210 module_put(tp->ops->owner); 1211 kfree(tp); 1212} 1213 1214void tcf_destroy_chain(struct tcf_proto *fl) 1215{ 1216 struct tcf_proto *tp; 1217 1218 while ((tp = fl) != NULL) { 1219 fl = tp->next; 1220 tcf_destroy(tp); 1221 } 1222} 1223EXPORT_SYMBOL(tcf_destroy_chain); 1224 1225#ifdef CONFIG_PROC_FS 1226static int psched_show(struct seq_file *seq, void *v) 1227{ 1228 seq_printf(seq, "%08x %08x %08x %08x\n", 1229 (u32)NSEC_PER_USEC, (u32)PSCHED_US2NS(1), 1230 1000000, 1231 (u32)NSEC_PER_SEC/(u32)ktime_to_ns(KTIME_MONOTONIC_RES)); 1232 1233 return 0; 1234} 1235 1236static int psched_open(struct inode *inode, struct file *file) 1237{ 1238 return single_open(file, psched_show, PDE(inode)->data); 1239} 1240 1241static const struct file_operations psched_fops = { 1242 .owner = THIS_MODULE, 1243 .open = psched_open, 1244 .read = seq_read, 1245 .llseek = seq_lseek, 1246 .release = single_release, 1247}; 1248#endif 1249 1250static int __init pktsched_init(void) 1251{ 1252 register_qdisc(&pfifo_qdisc_ops); 1253 register_qdisc(&bfifo_qdisc_ops); 1254 proc_net_fops_create("psched", 0, &psched_fops); 1255 1256 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL); 1257 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL); 1258 rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc); 1259 rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL); 1260 rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL); 1261 rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass); 1262 1263 return 0; 1264} 1265 1266subsys_initcall(pktsched_init); 1267 1268EXPORT_SYMBOL(qdisc_get_rtab); 1269EXPORT_SYMBOL(qdisc_put_rtab); 1270EXPORT_SYMBOL(register_qdisc); 1271EXPORT_SYMBOL(unregister_qdisc);