Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[NET_SCHED]: Convert packet schedulers from rtnetlink to new netlink API

Convert packet schedulers to use the netlink API. Unfortunately a gradual
conversion is not possible without breaking compilation in the middle or
adding lots of casts, so this patch converts them all in one step. The
patch has been mostly generated automatically with some minor edits to
at least allow seperate conversion of classifiers and actions.

Signed-off-by: Patrick McHardy <kaber@trash.net>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Patrick McHardy and committed by
David S. Miller
1e90474c 01480e1c

+474 -436
+3 -3
include/net/gen_stats.h
··· 10 10 { 11 11 spinlock_t * lock; 12 12 struct sk_buff * skb; 13 - struct rtattr * tail; 13 + struct nlattr * tail; 14 14 15 15 /* Backward compatability */ 16 16 int compat_tc_stats; ··· 39 39 40 40 extern int gen_new_estimator(struct gnet_stats_basic *bstats, 41 41 struct gnet_stats_rate_est *rate_est, 42 - spinlock_t *stats_lock, struct rtattr *opt); 42 + spinlock_t *stats_lock, struct nlattr *opt); 43 43 extern void gen_kill_estimator(struct gnet_stats_basic *bstats, 44 44 struct gnet_stats_rate_est *rate_est); 45 45 extern int gen_replace_estimator(struct gnet_stats_basic *bstats, 46 46 struct gnet_stats_rate_est *rate_est, 47 - spinlock_t *stats_lock, struct rtattr *opt); 47 + spinlock_t *stats_lock, struct nlattr *opt); 48 48 49 49 #endif
+1 -1
include/net/pkt_sched.h
··· 77 77 extern struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle); 78 78 extern struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle); 79 79 extern struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, 80 - struct rtattr *tab); 80 + struct nlattr *tab); 81 81 extern void qdisc_put_rtab(struct qdisc_rate_table *tab); 82 82 83 83 extern void __qdisc_run(struct net_device *dev);
+3 -3
include/net/sch_generic.h
··· 66 66 unsigned long (*get)(struct Qdisc *, u32 classid); 67 67 void (*put)(struct Qdisc *, unsigned long); 68 68 int (*change)(struct Qdisc *, u32, u32, 69 - struct rtattr **, unsigned long *); 69 + struct nlattr **, unsigned long *); 70 70 int (*delete)(struct Qdisc *, unsigned long); 71 71 void (*walk)(struct Qdisc *, struct qdisc_walker * arg); 72 72 ··· 95 95 int (*requeue)(struct sk_buff *, struct Qdisc *); 96 96 unsigned int (*drop)(struct Qdisc *); 97 97 98 - int (*init)(struct Qdisc *, struct rtattr *arg); 98 + int (*init)(struct Qdisc *, struct nlattr *arg); 99 99 void (*reset)(struct Qdisc *); 100 100 void (*destroy)(struct Qdisc *); 101 - int (*change)(struct Qdisc *, struct rtattr *arg); 101 + int (*change)(struct Qdisc *, struct nlattr *arg); 102 102 103 103 int (*dump)(struct Qdisc *, struct sk_buff *); 104 104 int (*dump_stats)(struct Qdisc *, struct gnet_dump *);
+4 -4
net/core/gen_estimator.c
··· 159 159 int gen_new_estimator(struct gnet_stats_basic *bstats, 160 160 struct gnet_stats_rate_est *rate_est, 161 161 spinlock_t *stats_lock, 162 - struct rtattr *opt) 162 + struct nlattr *opt) 163 163 { 164 164 struct gen_estimator *est; 165 - struct gnet_estimator *parm = RTA_DATA(opt); 165 + struct gnet_estimator *parm = nla_data(opt); 166 166 int idx; 167 167 168 - if (RTA_PAYLOAD(opt) < sizeof(*parm)) 168 + if (nla_len(opt) < sizeof(*parm)) 169 169 return -EINVAL; 170 170 171 171 if (parm->interval < -2 || parm->interval > 3) ··· 254 254 */ 255 255 int gen_replace_estimator(struct gnet_stats_basic *bstats, 256 256 struct gnet_stats_rate_est *rate_est, 257 - spinlock_t *stats_lock, struct rtattr *opt) 257 + spinlock_t *stats_lock, struct nlattr *opt) 258 258 { 259 259 gen_kill_estimator(bstats, rate_est); 260 260 return gen_new_estimator(bstats, rate_est, stats_lock, opt);
+5 -4
net/core/gen_stats.c
··· 20 20 #include <linux/socket.h> 21 21 #include <linux/rtnetlink.h> 22 22 #include <linux/gen_stats.h> 23 + #include <net/netlink.h> 23 24 #include <net/gen_stats.h> 24 25 25 26 26 27 static inline int 27 28 gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size) 28 29 { 29 - RTA_PUT(d->skb, type, size, buf); 30 + NLA_PUT(d->skb, type, size, buf); 30 31 return 0; 31 32 32 - rtattr_failure: 33 + nla_put_failure: 33 34 spin_unlock_bh(d->lock); 34 35 return -1; 35 36 } ··· 63 62 spin_lock_bh(lock); 64 63 d->lock = lock; 65 64 if (type) 66 - d->tail = (struct rtattr *)skb_tail_pointer(skb); 65 + d->tail = (struct nlattr *)skb_tail_pointer(skb); 67 66 d->skb = skb; 68 67 d->compat_tc_stats = tc_stats_type; 69 68 d->compat_xstats = xstats_type; ··· 214 213 gnet_stats_finish_copy(struct gnet_dump *d) 215 214 { 216 215 if (d->tail) 217 - d->tail->rta_len = skb_tail_pointer(d->skb) - (u8 *)d->tail; 216 + d->tail->nla_len = skb_tail_pointer(d->skb) - (u8 *)d->tail; 218 217 219 218 if (d->compat_tc_stats) 220 219 if (gnet_stats_copy(d, d->compat_tc_stats, &d->tc_stats,
+7 -7
net/mac80211/wme.c
··· 297 297 298 298 299 299 /* called whenever parameters are updated on existing qdisc */ 300 - static int wme_qdiscop_tune(struct Qdisc *qd, struct rtattr *opt) 300 + static int wme_qdiscop_tune(struct Qdisc *qd, struct nlattr *opt) 301 301 { 302 302 /* struct ieee80211_sched_data *q = qdisc_priv(qd); 303 303 */ 304 304 /* check our options block is the right size */ 305 305 /* copy any options to our local structure */ 306 306 /* Ignore options block for now - always use static mapping 307 - struct tc_ieee80211_qopt *qopt = RTA_DATA(opt); 307 + struct tc_ieee80211_qopt *qopt = nla_data(opt); 308 308 309 - if (opt->rta_len < RTA_LENGTH(sizeof(*qopt))) 309 + if (opt->nla_len < nla_attr_size(sizeof(*qopt))) 310 310 return -EINVAL; 311 311 memcpy(q->tag2queue, qopt->tag2queue, sizeof(qopt->tag2queue)); 312 312 */ ··· 315 315 316 316 317 317 /* called during initial creation of qdisc on device */ 318 - static int wme_qdiscop_init(struct Qdisc *qd, struct rtattr *opt) 318 + static int wme_qdiscop_init(struct Qdisc *qd, struct nlattr *opt) 319 319 { 320 320 struct ieee80211_sched_data *q = qdisc_priv(qd); 321 321 struct net_device *dev = qd->dev; ··· 370 370 struct tc_ieee80211_qopt opt; 371 371 372 372 memcpy(&opt.tag2queue, q->tag2queue, TC_80211_MAX_TAG + 1); 373 - RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); 373 + NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); 374 374 */ return skb->len; 375 375 /* 376 - rtattr_failure: 376 + nla_put_failure: 377 377 skb_trim(skb, p - skb->data);*/ 378 378 return -1; 379 379 } ··· 444 444 445 445 446 446 static int wme_classop_change(struct Qdisc *qd, u32 handle, u32 parent, 447 - struct rtattr **tca, unsigned long *arg) 447 + struct nlattr **tca, unsigned long *arg) 448 448 { 449 449 unsigned long cl = *arg; 450 450 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
+4 -3
net/netfilter/xt_RATEEST.c
··· 12 12 #include <linux/rtnetlink.h> 13 13 #include <linux/random.h> 14 14 #include <net/gen_stats.h> 15 + #include <net/netlink.h> 15 16 16 17 #include <linux/netfilter/x_tables.h> 17 18 #include <linux/netfilter/xt_RATEEST.h> ··· 99 98 struct xt_rateest_target_info *info = (void *)targinfo; 100 99 struct xt_rateest *est; 101 100 struct { 102 - struct rtattr opt; 101 + struct nlattr opt; 103 102 struct gnet_estimator est; 104 103 } cfg; 105 104 ··· 129 128 est->params.interval = info->interval; 130 129 est->params.ewma_log = info->ewma_log; 131 130 132 - cfg.opt.rta_len = RTA_LENGTH(sizeof(cfg.est)); 133 - cfg.opt.rta_type = TCA_STATS_RATE_EST; 131 + cfg.opt.nla_len = nla_attr_size(sizeof(cfg.est)); 132 + cfg.opt.nla_type = TCA_STATS_RATE_EST; 134 133 cfg.est.interval = info->interval; 135 134 cfg.est.ewma_log = info->ewma_log; 136 135
+1 -1
net/sched/act_api.c
··· 227 227 p->tcfc_tm.lastuse = jiffies; 228 228 if (est) 229 229 gen_new_estimator(&p->tcfc_bstats, &p->tcfc_rate_est, 230 - &p->tcfc_lock, est); 230 + &p->tcfc_lock, (struct nlattr *)est); 231 231 a->priv = (void *) p; 232 232 return p; 233 233 }
+3 -3
net/sched/act_police.c
··· 174 174 override: 175 175 if (parm->rate.rate) { 176 176 err = -ENOMEM; 177 - R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE-1]); 177 + R_tab = qdisc_get_rtab(&parm->rate, (struct nlattr *)tb[TCA_POLICE_RATE-1]); 178 178 if (R_tab == NULL) 179 179 goto failure; 180 180 if (parm->peakrate.rate) { 181 181 P_tab = qdisc_get_rtab(&parm->peakrate, 182 - tb[TCA_POLICE_PEAKRATE-1]); 182 + (struct nlattr *)tb[TCA_POLICE_PEAKRATE-1]); 183 183 if (P_tab == NULL) { 184 184 qdisc_put_rtab(R_tab); 185 185 goto failure; ··· 216 216 if (est) 217 217 gen_replace_estimator(&police->tcf_bstats, 218 218 &police->tcf_rate_est, 219 - &police->tcf_lock, est); 219 + &police->tcf_lock, (struct nlattr *)est); 220 220 221 221 spin_unlock_bh(&police->tcf_lock); 222 222 if (ret != ACT_P_CREATED)
+49 -38
net/sched/sch_api.c
··· 213 213 214 214 /* Find queueing discipline by name */ 215 215 216 - static struct Qdisc_ops *qdisc_lookup_ops(struct rtattr *kind) 216 + static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind) 217 217 { 218 218 struct Qdisc_ops *q = NULL; 219 219 220 220 if (kind) { 221 221 read_lock(&qdisc_mod_lock); 222 222 for (q = qdisc_base; q; q = q->next) { 223 - if (rtattr_strcmp(kind, q->id) == 0) { 223 + if (nla_strcmp(kind, q->id) == 0) { 224 224 if (!try_module_get(q->owner)) 225 225 q = NULL; 226 226 break; ··· 233 233 234 234 static struct qdisc_rate_table *qdisc_rtab_list; 235 235 236 - struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct rtattr *tab) 236 + struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab) 237 237 { 238 238 struct qdisc_rate_table *rtab; 239 239 ··· 244 244 } 245 245 } 246 246 247 - if (tab == NULL || r->rate == 0 || r->cell_log == 0 || RTA_PAYLOAD(tab) != 1024) 247 + if (tab == NULL || r->rate == 0 || r->cell_log == 0 || nla_len(tab) != 1024) 248 248 return NULL; 249 249 250 250 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL); 251 251 if (rtab) { 252 252 rtab->rate = *r; 253 253 rtab->refcnt = 1; 254 - memcpy(rtab->data, RTA_DATA(tab), 1024); 254 + memcpy(rtab->data, nla_data(tab), 1024); 255 255 rtab->next = qdisc_rtab_list; 256 256 qdisc_rtab_list = rtab; 257 257 } ··· 445 445 446 446 static struct Qdisc * 447 447 qdisc_create(struct net_device *dev, u32 parent, u32 handle, 448 - struct rtattr **tca, int *errp) 448 + struct nlattr **tca, int *errp) 449 449 { 450 450 int err; 451 - struct rtattr *kind = tca[TCA_KIND-1]; 451 + struct nlattr *kind = tca[TCA_KIND]; 452 452 struct Qdisc *sch; 453 453 struct Qdisc_ops *ops; 454 454 ··· 456 456 #ifdef CONFIG_KMOD 457 457 if (ops == NULL && kind != NULL) { 458 458 char name[IFNAMSIZ]; 459 - if (rtattr_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) { 459 + if (nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) { 460 460 /* We dropped the RTNL semaphore in order to 461 461 * perform the module load. So, even if we 462 462 * succeeded in loading the module we have to ··· 509 509 510 510 sch->handle = handle; 511 511 512 - if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS-1])) == 0) { 513 - if (tca[TCA_RATE-1]) { 512 + if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) { 513 + if (tca[TCA_RATE]) { 514 514 err = gen_new_estimator(&sch->bstats, &sch->rate_est, 515 515 sch->stats_lock, 516 - tca[TCA_RATE-1]); 516 + tca[TCA_RATE]); 517 517 if (err) { 518 518 /* 519 519 * Any broken qdiscs that would require ··· 541 541 return NULL; 542 542 } 543 543 544 - static int qdisc_change(struct Qdisc *sch, struct rtattr **tca) 544 + static int qdisc_change(struct Qdisc *sch, struct nlattr **tca) 545 545 { 546 - if (tca[TCA_OPTIONS-1]) { 546 + if (tca[TCA_OPTIONS]) { 547 547 int err; 548 548 549 549 if (sch->ops->change == NULL) 550 550 return -EINVAL; 551 - err = sch->ops->change(sch, tca[TCA_OPTIONS-1]); 551 + err = sch->ops->change(sch, tca[TCA_OPTIONS]); 552 552 if (err) 553 553 return err; 554 554 } 555 - if (tca[TCA_RATE-1]) 555 + if (tca[TCA_RATE]) 556 556 gen_replace_estimator(&sch->bstats, &sch->rate_est, 557 - sch->stats_lock, tca[TCA_RATE-1]); 557 + sch->stats_lock, tca[TCA_RATE]); 558 558 return 0; 559 559 } 560 560 ··· 606 606 { 607 607 struct net *net = skb->sk->sk_net; 608 608 struct tcmsg *tcm = NLMSG_DATA(n); 609 - struct rtattr **tca = arg; 609 + struct nlattr *tca[TCA_MAX + 1]; 610 610 struct net_device *dev; 611 611 u32 clid = tcm->tcm_parent; 612 612 struct Qdisc *q = NULL; ··· 618 618 619 619 if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) 620 620 return -ENODEV; 621 + 622 + err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); 623 + if (err < 0) 624 + return err; 621 625 622 626 if (clid) { 623 627 if (clid != TC_H_ROOT) { ··· 645 641 return -ENOENT; 646 642 } 647 643 648 - if (tca[TCA_KIND-1] && rtattr_strcmp(tca[TCA_KIND-1], q->ops->id)) 644 + if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) 649 645 return -EINVAL; 650 646 651 647 if (n->nlmsg_type == RTM_DELQDISC) { ··· 675 671 { 676 672 struct net *net = skb->sk->sk_net; 677 673 struct tcmsg *tcm; 678 - struct rtattr **tca; 674 + struct nlattr *tca[TCA_MAX + 1]; 679 675 struct net_device *dev; 680 676 u32 clid; 681 677 struct Qdisc *q, *p; ··· 687 683 replay: 688 684 /* Reinit, just in case something touches this. */ 689 685 tcm = NLMSG_DATA(n); 690 - tca = arg; 691 686 clid = tcm->tcm_parent; 692 687 q = p = NULL; 693 688 694 689 if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) 695 690 return -ENODEV; 691 + 692 + err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); 693 + if (err < 0) 694 + return err; 696 695 697 696 if (clid) { 698 697 if (clid != TC_H_ROOT) { ··· 724 717 goto create_n_graft; 725 718 if (n->nlmsg_flags&NLM_F_EXCL) 726 719 return -EEXIST; 727 - if (tca[TCA_KIND-1] && rtattr_strcmp(tca[TCA_KIND-1], q->ops->id)) 720 + if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) 728 721 return -EINVAL; 729 722 if (q == p || 730 723 (p && check_loop(q, p, 0))) ··· 757 750 if ((n->nlmsg_flags&NLM_F_CREATE) && 758 751 (n->nlmsg_flags&NLM_F_REPLACE) && 759 752 ((n->nlmsg_flags&NLM_F_EXCL) || 760 - (tca[TCA_KIND-1] && 761 - rtattr_strcmp(tca[TCA_KIND-1], q->ops->id)))) 753 + (tca[TCA_KIND] && 754 + nla_strcmp(tca[TCA_KIND], q->ops->id)))) 762 755 goto create_n_graft; 763 756 } 764 757 } ··· 773 766 return -ENOENT; 774 767 if (n->nlmsg_flags&NLM_F_EXCL) 775 768 return -EEXIST; 776 - if (tca[TCA_KIND-1] && rtattr_strcmp(tca[TCA_KIND-1], q->ops->id)) 769 + if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) 777 770 return -EINVAL; 778 771 err = qdisc_change(q, tca); 779 772 if (err == 0) ··· 834 827 tcm->tcm_parent = clid; 835 828 tcm->tcm_handle = q->handle; 836 829 tcm->tcm_info = atomic_read(&q->refcnt); 837 - RTA_PUT(skb, TCA_KIND, IFNAMSIZ, q->ops->id); 830 + NLA_PUT(skb, TCA_KIND, IFNAMSIZ, q->ops->id); 838 831 if (q->ops->dump && q->ops->dump(q, skb) < 0) 839 - goto rtattr_failure; 832 + goto nla_put_failure; 840 833 q->qstats.qlen = q->q.qlen; 841 834 842 835 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, 843 836 TCA_XSTATS, q->stats_lock, &d) < 0) 844 - goto rtattr_failure; 837 + goto nla_put_failure; 845 838 846 839 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0) 847 - goto rtattr_failure; 840 + goto nla_put_failure; 848 841 849 842 if (gnet_stats_copy_basic(&d, &q->bstats) < 0 || 850 843 gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 || 851 844 gnet_stats_copy_queue(&d, &q->qstats) < 0) 852 - goto rtattr_failure; 845 + goto nla_put_failure; 853 846 854 847 if (gnet_stats_finish_copy(&d) < 0) 855 - goto rtattr_failure; 848 + goto nla_put_failure; 856 849 857 850 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 858 851 return skb->len; 859 852 860 853 nlmsg_failure: 861 - rtattr_failure: 854 + nla_put_failure: 862 855 nlmsg_trim(skb, b); 863 856 return -1; 864 857 } ··· 946 939 { 947 940 struct net *net = skb->sk->sk_net; 948 941 struct tcmsg *tcm = NLMSG_DATA(n); 949 - struct rtattr **tca = arg; 942 + struct nlattr *tca[TCA_MAX + 1]; 950 943 struct net_device *dev; 951 944 struct Qdisc *q = NULL; 952 945 const struct Qdisc_class_ops *cops; ··· 962 955 963 956 if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) 964 957 return -ENODEV; 958 + 959 + err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); 960 + if (err < 0) 961 + return err; 965 962 966 963 /* 967 964 parent == TC_H_UNSPEC - unspecified parent. ··· 1080 1069 tcm->tcm_parent = q->handle; 1081 1070 tcm->tcm_handle = q->handle; 1082 1071 tcm->tcm_info = 0; 1083 - RTA_PUT(skb, TCA_KIND, IFNAMSIZ, q->ops->id); 1072 + NLA_PUT(skb, TCA_KIND, IFNAMSIZ, q->ops->id); 1084 1073 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0) 1085 - goto rtattr_failure; 1074 + goto nla_put_failure; 1086 1075 1087 1076 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, 1088 1077 TCA_XSTATS, q->stats_lock, &d) < 0) 1089 - goto rtattr_failure; 1078 + goto nla_put_failure; 1090 1079 1091 1080 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0) 1092 - goto rtattr_failure; 1081 + goto nla_put_failure; 1093 1082 1094 1083 if (gnet_stats_finish_copy(&d) < 0) 1095 - goto rtattr_failure; 1084 + goto nla_put_failure; 1096 1085 1097 1086 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1098 1087 return skb->len; 1099 1088 1100 1089 nlmsg_failure: 1101 - rtattr_failure: 1090 + nla_put_failure: 1102 1091 nlmsg_trim(skb, b); 1103 1092 return -1; 1104 1093 }
+24 -24
net/sched/sch_atm.c
··· 196 196 }; /* Ethertype IP (0800) */ 197 197 198 198 static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent, 199 - struct rtattr **tca, unsigned long *arg) 199 + struct nlattr **tca, unsigned long *arg) 200 200 { 201 201 struct atm_qdisc_data *p = qdisc_priv(sch); 202 202 struct atm_flow_data *flow = (struct atm_flow_data *)*arg; 203 203 struct atm_flow_data *excess = NULL; 204 - struct rtattr *opt = tca[TCA_OPTIONS - 1]; 205 - struct rtattr *tb[TCA_ATM_MAX]; 204 + struct nlattr *opt = tca[TCA_OPTIONS]; 205 + struct nlattr *tb[TCA_ATM_MAX + 1]; 206 206 struct socket *sock; 207 207 int fd, error, hdr_len; 208 208 void *hdr; ··· 223 223 */ 224 224 if (flow) 225 225 return -EBUSY; 226 - if (opt == NULL || rtattr_parse_nested(tb, TCA_ATM_MAX, opt)) 226 + if (opt == NULL || nla_parse_nested(tb, TCA_ATM_MAX, opt, NULL)) 227 227 return -EINVAL; 228 - if (!tb[TCA_ATM_FD - 1] || RTA_PAYLOAD(tb[TCA_ATM_FD - 1]) < sizeof(fd)) 228 + if (!tb[TCA_ATM_FD] || nla_len(tb[TCA_ATM_FD]) < sizeof(fd)) 229 229 return -EINVAL; 230 - fd = *(int *)RTA_DATA(tb[TCA_ATM_FD - 1]); 230 + fd = *(int *)nla_data(tb[TCA_ATM_FD]); 231 231 pr_debug("atm_tc_change: fd %d\n", fd); 232 - if (tb[TCA_ATM_HDR - 1]) { 233 - hdr_len = RTA_PAYLOAD(tb[TCA_ATM_HDR - 1]); 234 - hdr = RTA_DATA(tb[TCA_ATM_HDR - 1]); 232 + if (tb[TCA_ATM_HDR]) { 233 + hdr_len = nla_len(tb[TCA_ATM_HDR]); 234 + hdr = nla_data(tb[TCA_ATM_HDR]); 235 235 } else { 236 236 hdr_len = RFC1483LLC_LEN; 237 237 hdr = NULL; /* default LLC/SNAP for IP */ 238 238 } 239 - if (!tb[TCA_ATM_EXCESS - 1]) 239 + if (!tb[TCA_ATM_EXCESS]) 240 240 excess = NULL; 241 241 else { 242 - if (RTA_PAYLOAD(tb[TCA_ATM_EXCESS - 1]) != sizeof(u32)) 242 + if (nla_len(tb[TCA_ATM_EXCESS]) != sizeof(u32)) 243 243 return -EINVAL; 244 244 excess = (struct atm_flow_data *) 245 - atm_tc_get(sch, *(u32 *)RTA_DATA(tb[TCA_ATM_EXCESS - 1])); 245 + atm_tc_get(sch, *(u32 *)nla_data(tb[TCA_ATM_EXCESS])); 246 246 if (!excess) 247 247 return -ENOENT; 248 248 } 249 249 pr_debug("atm_tc_change: type %d, payload %lu, hdr_len %d\n", 250 - opt->rta_type, RTA_PAYLOAD(opt), hdr_len); 250 + opt->nla_type, nla_len(opt), hdr_len); 251 251 sock = sockfd_lookup(fd, &error); 252 252 if (!sock) 253 253 return error; /* f_count++ */ ··· 541 541 return 0; 542 542 } 543 543 544 - static int atm_tc_init(struct Qdisc *sch, struct rtattr *opt) 544 + static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt) 545 545 { 546 546 struct atm_qdisc_data *p = qdisc_priv(sch); 547 547 ··· 602 602 struct atm_qdisc_data *p = qdisc_priv(sch); 603 603 struct atm_flow_data *flow = (struct atm_flow_data *)cl; 604 604 unsigned char *b = skb_tail_pointer(skb); 605 - struct rtattr *rta; 605 + struct nlattr *nla; 606 606 607 607 pr_debug("atm_tc_dump_class(sch %p,[qdisc %p],flow %p,skb %p,tcm %p)\n", 608 608 sch, p, flow, skb, tcm); ··· 610 610 return -EINVAL; 611 611 tcm->tcm_handle = flow->classid; 612 612 tcm->tcm_info = flow->q->handle; 613 - rta = (struct rtattr *)b; 614 - RTA_PUT(skb, TCA_OPTIONS, 0, NULL); 615 - RTA_PUT(skb, TCA_ATM_HDR, flow->hdr_len, flow->hdr); 613 + nla = (struct nlattr *)b; 614 + NLA_PUT(skb, TCA_OPTIONS, 0, NULL); 615 + NLA_PUT(skb, TCA_ATM_HDR, flow->hdr_len, flow->hdr); 616 616 if (flow->vcc) { 617 617 struct sockaddr_atmpvc pvc; 618 618 int state; ··· 621 621 pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1; 622 622 pvc.sap_addr.vpi = flow->vcc->vpi; 623 623 pvc.sap_addr.vci = flow->vcc->vci; 624 - RTA_PUT(skb, TCA_ATM_ADDR, sizeof(pvc), &pvc); 624 + NLA_PUT(skb, TCA_ATM_ADDR, sizeof(pvc), &pvc); 625 625 state = ATM_VF2VS(flow->vcc->flags); 626 - RTA_PUT(skb, TCA_ATM_STATE, sizeof(state), &state); 626 + NLA_PUT(skb, TCA_ATM_STATE, sizeof(state), &state); 627 627 } 628 628 if (flow->excess) 629 - RTA_PUT(skb, TCA_ATM_EXCESS, sizeof(u32), &flow->classid); 629 + NLA_PUT(skb, TCA_ATM_EXCESS, sizeof(u32), &flow->classid); 630 630 else { 631 631 static u32 zero; 632 632 633 - RTA_PUT(skb, TCA_ATM_EXCESS, sizeof(zero), &zero); 633 + NLA_PUT(skb, TCA_ATM_EXCESS, sizeof(zero), &zero); 634 634 } 635 - rta->rta_len = skb_tail_pointer(skb) - b; 635 + nla->nla_len = skb_tail_pointer(skb) - b; 636 636 return skb->len; 637 637 638 - rtattr_failure: 638 + nla_put_failure: 639 639 nlmsg_trim(skb, b); 640 640 return -1; 641 641 }
+78 -78
net/sched/sch_cbq.c
··· 1377 1377 return 0; 1378 1378 } 1379 1379 1380 - static int cbq_init(struct Qdisc *sch, struct rtattr *opt) 1380 + static int cbq_init(struct Qdisc *sch, struct nlattr *opt) 1381 1381 { 1382 1382 struct cbq_sched_data *q = qdisc_priv(sch); 1383 - struct rtattr *tb[TCA_CBQ_MAX]; 1383 + struct nlattr *tb[TCA_CBQ_MAX + 1]; 1384 1384 struct tc_ratespec *r; 1385 1385 1386 - if (rtattr_parse_nested(tb, TCA_CBQ_MAX, opt) < 0 || 1387 - tb[TCA_CBQ_RTAB-1] == NULL || tb[TCA_CBQ_RATE-1] == NULL || 1388 - RTA_PAYLOAD(tb[TCA_CBQ_RATE-1]) < sizeof(struct tc_ratespec)) 1386 + if (nla_parse_nested(tb, TCA_CBQ_MAX, opt, NULL) < 0 || 1387 + tb[TCA_CBQ_RTAB] == NULL || tb[TCA_CBQ_RATE] == NULL || 1388 + nla_len(tb[TCA_CBQ_RATE]) < sizeof(struct tc_ratespec)) 1389 1389 return -EINVAL; 1390 1390 1391 - if (tb[TCA_CBQ_LSSOPT-1] && 1392 - RTA_PAYLOAD(tb[TCA_CBQ_LSSOPT-1]) < sizeof(struct tc_cbq_lssopt)) 1391 + if (tb[TCA_CBQ_LSSOPT] && 1392 + nla_len(tb[TCA_CBQ_LSSOPT]) < sizeof(struct tc_cbq_lssopt)) 1393 1393 return -EINVAL; 1394 1394 1395 - r = RTA_DATA(tb[TCA_CBQ_RATE-1]); 1395 + r = nla_data(tb[TCA_CBQ_RATE]); 1396 1396 1397 - if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB-1])) == NULL) 1397 + if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL) 1398 1398 return -EINVAL; 1399 1399 1400 1400 q->link.refcnt = 1; ··· 1427 1427 1428 1428 cbq_link_class(&q->link); 1429 1429 1430 - if (tb[TCA_CBQ_LSSOPT-1]) 1431 - cbq_set_lss(&q->link, RTA_DATA(tb[TCA_CBQ_LSSOPT-1])); 1430 + if (tb[TCA_CBQ_LSSOPT]) 1431 + cbq_set_lss(&q->link, nla_data(tb[TCA_CBQ_LSSOPT])); 1432 1432 1433 1433 cbq_addprio(q, &q->link); 1434 1434 return 0; ··· 1438 1438 { 1439 1439 unsigned char *b = skb_tail_pointer(skb); 1440 1440 1441 - RTA_PUT(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate); 1441 + NLA_PUT(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate); 1442 1442 return skb->len; 1443 1443 1444 - rtattr_failure: 1444 + nla_put_failure: 1445 1445 nlmsg_trim(skb, b); 1446 1446 return -1; 1447 1447 } ··· 1463 1463 opt.minidle = (u32)(-cl->minidle); 1464 1464 opt.offtime = cl->offtime; 1465 1465 opt.change = ~0; 1466 - RTA_PUT(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt); 1466 + NLA_PUT(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt); 1467 1467 return skb->len; 1468 1468 1469 - rtattr_failure: 1469 + nla_put_failure: 1470 1470 nlmsg_trim(skb, b); 1471 1471 return -1; 1472 1472 } ··· 1481 1481 opt.priority = cl->priority+1; 1482 1482 opt.cpriority = cl->cpriority+1; 1483 1483 opt.weight = cl->weight; 1484 - RTA_PUT(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt); 1484 + NLA_PUT(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt); 1485 1485 return skb->len; 1486 1486 1487 - rtattr_failure: 1487 + nla_put_failure: 1488 1488 nlmsg_trim(skb, b); 1489 1489 return -1; 1490 1490 } ··· 1498 1498 opt.priority2 = cl->priority2+1; 1499 1499 opt.pad = 0; 1500 1500 opt.penalty = cl->penalty; 1501 - RTA_PUT(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt); 1501 + NLA_PUT(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt); 1502 1502 return skb->len; 1503 1503 1504 - rtattr_failure: 1504 + nla_put_failure: 1505 1505 nlmsg_trim(skb, b); 1506 1506 return -1; 1507 1507 } ··· 1515 1515 opt.split = cl->split ? cl->split->classid : 0; 1516 1516 opt.defmap = cl->defmap; 1517 1517 opt.defchange = ~0; 1518 - RTA_PUT(skb, TCA_CBQ_FOPT, sizeof(opt), &opt); 1518 + NLA_PUT(skb, TCA_CBQ_FOPT, sizeof(opt), &opt); 1519 1519 } 1520 1520 return skb->len; 1521 1521 1522 - rtattr_failure: 1522 + nla_put_failure: 1523 1523 nlmsg_trim(skb, b); 1524 1524 return -1; 1525 1525 } ··· 1534 1534 opt.police = cl->police; 1535 1535 opt.__res1 = 0; 1536 1536 opt.__res2 = 0; 1537 - RTA_PUT(skb, TCA_CBQ_POLICE, sizeof(opt), &opt); 1537 + NLA_PUT(skb, TCA_CBQ_POLICE, sizeof(opt), &opt); 1538 1538 } 1539 1539 return skb->len; 1540 1540 1541 - rtattr_failure: 1541 + nla_put_failure: 1542 1542 nlmsg_trim(skb, b); 1543 1543 return -1; 1544 1544 } ··· 1562 1562 { 1563 1563 struct cbq_sched_data *q = qdisc_priv(sch); 1564 1564 unsigned char *b = skb_tail_pointer(skb); 1565 - struct rtattr *rta; 1565 + struct nlattr *nla; 1566 1566 1567 - rta = (struct rtattr*)b; 1568 - RTA_PUT(skb, TCA_OPTIONS, 0, NULL); 1567 + nla = (struct nlattr*)b; 1568 + NLA_PUT(skb, TCA_OPTIONS, 0, NULL); 1569 1569 if (cbq_dump_attr(skb, &q->link) < 0) 1570 - goto rtattr_failure; 1571 - rta->rta_len = skb_tail_pointer(skb) - b; 1570 + goto nla_put_failure; 1571 + nla->nla_len = skb_tail_pointer(skb) - b; 1572 1572 return skb->len; 1573 1573 1574 - rtattr_failure: 1574 + nla_put_failure: 1575 1575 nlmsg_trim(skb, b); 1576 1576 return -1; 1577 1577 } ··· 1591 1591 { 1592 1592 struct cbq_class *cl = (struct cbq_class*)arg; 1593 1593 unsigned char *b = skb_tail_pointer(skb); 1594 - struct rtattr *rta; 1594 + struct nlattr *nla; 1595 1595 1596 1596 if (cl->tparent) 1597 1597 tcm->tcm_parent = cl->tparent->classid; ··· 1600 1600 tcm->tcm_handle = cl->classid; 1601 1601 tcm->tcm_info = cl->q->handle; 1602 1602 1603 - rta = (struct rtattr*)b; 1604 - RTA_PUT(skb, TCA_OPTIONS, 0, NULL); 1603 + nla = (struct nlattr*)b; 1604 + NLA_PUT(skb, TCA_OPTIONS, 0, NULL); 1605 1605 if (cbq_dump_attr(skb, cl) < 0) 1606 - goto rtattr_failure; 1607 - rta->rta_len = skb_tail_pointer(skb) - b; 1606 + goto nla_put_failure; 1607 + nla->nla_len = skb_tail_pointer(skb) - b; 1608 1608 return skb->len; 1609 1609 1610 - rtattr_failure: 1610 + nla_put_failure: 1611 1611 nlmsg_trim(skb, b); 1612 1612 return -1; 1613 1613 } ··· 1753 1753 } 1754 1754 1755 1755 static int 1756 - cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **tca, 1756 + cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca, 1757 1757 unsigned long *arg) 1758 1758 { 1759 1759 int err; 1760 1760 struct cbq_sched_data *q = qdisc_priv(sch); 1761 1761 struct cbq_class *cl = (struct cbq_class*)*arg; 1762 - struct rtattr *opt = tca[TCA_OPTIONS-1]; 1763 - struct rtattr *tb[TCA_CBQ_MAX]; 1762 + struct nlattr *opt = tca[TCA_OPTIONS]; 1763 + struct nlattr *tb[TCA_CBQ_MAX + 1]; 1764 1764 struct cbq_class *parent; 1765 1765 struct qdisc_rate_table *rtab = NULL; 1766 1766 1767 - if (opt==NULL || rtattr_parse_nested(tb, TCA_CBQ_MAX, opt)) 1767 + if (opt==NULL || nla_parse_nested(tb, TCA_CBQ_MAX, opt, NULL)) 1768 1768 return -EINVAL; 1769 1769 1770 - if (tb[TCA_CBQ_OVL_STRATEGY-1] && 1771 - RTA_PAYLOAD(tb[TCA_CBQ_OVL_STRATEGY-1]) < sizeof(struct tc_cbq_ovl)) 1770 + if (tb[TCA_CBQ_OVL_STRATEGY] && 1771 + nla_len(tb[TCA_CBQ_OVL_STRATEGY]) < sizeof(struct tc_cbq_ovl)) 1772 1772 return -EINVAL; 1773 1773 1774 - if (tb[TCA_CBQ_FOPT-1] && 1775 - RTA_PAYLOAD(tb[TCA_CBQ_FOPT-1]) < sizeof(struct tc_cbq_fopt)) 1774 + if (tb[TCA_CBQ_FOPT] && 1775 + nla_len(tb[TCA_CBQ_FOPT]) < sizeof(struct tc_cbq_fopt)) 1776 1776 return -EINVAL; 1777 1777 1778 - if (tb[TCA_CBQ_RATE-1] && 1779 - RTA_PAYLOAD(tb[TCA_CBQ_RATE-1]) < sizeof(struct tc_ratespec)) 1778 + if (tb[TCA_CBQ_RATE] && 1779 + nla_len(tb[TCA_CBQ_RATE]) < sizeof(struct tc_ratespec)) 1780 1780 return -EINVAL; 1781 1781 1782 - if (tb[TCA_CBQ_LSSOPT-1] && 1783 - RTA_PAYLOAD(tb[TCA_CBQ_LSSOPT-1]) < sizeof(struct tc_cbq_lssopt)) 1782 + if (tb[TCA_CBQ_LSSOPT] && 1783 + nla_len(tb[TCA_CBQ_LSSOPT]) < sizeof(struct tc_cbq_lssopt)) 1784 1784 return -EINVAL; 1785 1785 1786 - if (tb[TCA_CBQ_WRROPT-1] && 1787 - RTA_PAYLOAD(tb[TCA_CBQ_WRROPT-1]) < sizeof(struct tc_cbq_wrropt)) 1786 + if (tb[TCA_CBQ_WRROPT] && 1787 + nla_len(tb[TCA_CBQ_WRROPT]) < sizeof(struct tc_cbq_wrropt)) 1788 1788 return -EINVAL; 1789 1789 1790 1790 #ifdef CONFIG_NET_CLS_ACT 1791 - if (tb[TCA_CBQ_POLICE-1] && 1792 - RTA_PAYLOAD(tb[TCA_CBQ_POLICE-1]) < sizeof(struct tc_cbq_police)) 1791 + if (tb[TCA_CBQ_POLICE] && 1792 + nla_len(tb[TCA_CBQ_POLICE]) < sizeof(struct tc_cbq_police)) 1793 1793 return -EINVAL; 1794 1794 #endif 1795 1795 ··· 1802 1802 return -EINVAL; 1803 1803 } 1804 1804 1805 - if (tb[TCA_CBQ_RATE-1]) { 1806 - rtab = qdisc_get_rtab(RTA_DATA(tb[TCA_CBQ_RATE-1]), tb[TCA_CBQ_RTAB-1]); 1805 + if (tb[TCA_CBQ_RATE]) { 1806 + rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB]); 1807 1807 if (rtab == NULL) 1808 1808 return -EINVAL; 1809 1809 } ··· 1819 1819 qdisc_put_rtab(rtab); 1820 1820 } 1821 1821 1822 - if (tb[TCA_CBQ_LSSOPT-1]) 1823 - cbq_set_lss(cl, RTA_DATA(tb[TCA_CBQ_LSSOPT-1])); 1822 + if (tb[TCA_CBQ_LSSOPT]) 1823 + cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT])); 1824 1824 1825 - if (tb[TCA_CBQ_WRROPT-1]) { 1825 + if (tb[TCA_CBQ_WRROPT]) { 1826 1826 cbq_rmprio(q, cl); 1827 - cbq_set_wrr(cl, RTA_DATA(tb[TCA_CBQ_WRROPT-1])); 1827 + cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT])); 1828 1828 } 1829 1829 1830 - if (tb[TCA_CBQ_OVL_STRATEGY-1]) 1831 - cbq_set_overlimit(cl, RTA_DATA(tb[TCA_CBQ_OVL_STRATEGY-1])); 1830 + if (tb[TCA_CBQ_OVL_STRATEGY]) 1831 + cbq_set_overlimit(cl, nla_data(tb[TCA_CBQ_OVL_STRATEGY])); 1832 1832 1833 1833 #ifdef CONFIG_NET_CLS_ACT 1834 - if (tb[TCA_CBQ_POLICE-1]) 1835 - cbq_set_police(cl, RTA_DATA(tb[TCA_CBQ_POLICE-1])); 1834 + if (tb[TCA_CBQ_POLICE]) 1835 + cbq_set_police(cl, nla_data(tb[TCA_CBQ_POLICE])); 1836 1836 #endif 1837 1837 1838 - if (tb[TCA_CBQ_FOPT-1]) 1839 - cbq_set_fopt(cl, RTA_DATA(tb[TCA_CBQ_FOPT-1])); 1838 + if (tb[TCA_CBQ_FOPT]) 1839 + cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT])); 1840 1840 1841 1841 if (cl->q->q.qlen) 1842 1842 cbq_activate_class(cl); 1843 1843 1844 1844 sch_tree_unlock(sch); 1845 1845 1846 - if (tca[TCA_RATE-1]) 1846 + if (tca[TCA_RATE]) 1847 1847 gen_replace_estimator(&cl->bstats, &cl->rate_est, 1848 1848 &sch->dev->queue_lock, 1849 - tca[TCA_RATE-1]); 1849 + tca[TCA_RATE]); 1850 1850 return 0; 1851 1851 } 1852 1852 1853 1853 if (parentid == TC_H_ROOT) 1854 1854 return -EINVAL; 1855 1855 1856 - if (tb[TCA_CBQ_WRROPT-1] == NULL || tb[TCA_CBQ_RATE-1] == NULL || 1857 - tb[TCA_CBQ_LSSOPT-1] == NULL) 1856 + if (tb[TCA_CBQ_WRROPT] == NULL || tb[TCA_CBQ_RATE] == NULL || 1857 + tb[TCA_CBQ_LSSOPT] == NULL) 1858 1858 return -EINVAL; 1859 1859 1860 - rtab = qdisc_get_rtab(RTA_DATA(tb[TCA_CBQ_RATE-1]), tb[TCA_CBQ_RTAB-1]); 1860 + rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB]); 1861 1861 if (rtab == NULL) 1862 1862 return -EINVAL; 1863 1863 ··· 1912 1912 cl->share = cl->tparent; 1913 1913 cbq_adjust_levels(parent); 1914 1914 cl->minidle = -0x7FFFFFFF; 1915 - cbq_set_lss(cl, RTA_DATA(tb[TCA_CBQ_LSSOPT-1])); 1916 - cbq_set_wrr(cl, RTA_DATA(tb[TCA_CBQ_WRROPT-1])); 1915 + cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT])); 1916 + cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT])); 1917 1917 if (cl->ewma_log==0) 1918 1918 cl->ewma_log = q->link.ewma_log; 1919 1919 if (cl->maxidle==0) ··· 1921 1921 if (cl->avpkt==0) 1922 1922 cl->avpkt = q->link.avpkt; 1923 1923 cl->overlimit = cbq_ovl_classic; 1924 - if (tb[TCA_CBQ_OVL_STRATEGY-1]) 1925 - cbq_set_overlimit(cl, RTA_DATA(tb[TCA_CBQ_OVL_STRATEGY-1])); 1924 + if (tb[TCA_CBQ_OVL_STRATEGY]) 1925 + cbq_set_overlimit(cl, nla_data(tb[TCA_CBQ_OVL_STRATEGY])); 1926 1926 #ifdef CONFIG_NET_CLS_ACT 1927 - if (tb[TCA_CBQ_POLICE-1]) 1928 - cbq_set_police(cl, RTA_DATA(tb[TCA_CBQ_POLICE-1])); 1927 + if (tb[TCA_CBQ_POLICE]) 1928 + cbq_set_police(cl, nla_data(tb[TCA_CBQ_POLICE])); 1929 1929 #endif 1930 - if (tb[TCA_CBQ_FOPT-1]) 1931 - cbq_set_fopt(cl, RTA_DATA(tb[TCA_CBQ_FOPT-1])); 1930 + if (tb[TCA_CBQ_FOPT]) 1931 + cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT])); 1932 1932 sch_tree_unlock(sch); 1933 1933 1934 - if (tca[TCA_RATE-1]) 1934 + if (tca[TCA_RATE]) 1935 1935 gen_new_estimator(&cl->bstats, &cl->rate_est, 1936 - &sch->dev->queue_lock, tca[TCA_RATE-1]); 1936 + &sch->dev->queue_lock, tca[TCA_RATE]); 1937 1937 1938 1938 *arg = (unsigned long)cl; 1939 1939 return 0;
+49 -36
net/sched/sch_dsmark.c
··· 100 100 } 101 101 102 102 static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent, 103 - struct rtattr **tca, unsigned long *arg) 103 + struct nlattr **tca, unsigned long *arg) 104 104 { 105 105 struct dsmark_qdisc_data *p = qdisc_priv(sch); 106 - struct rtattr *opt = tca[TCA_OPTIONS-1]; 107 - struct rtattr *tb[TCA_DSMARK_MAX]; 106 + struct nlattr *opt = tca[TCA_OPTIONS]; 107 + struct nlattr *tb[TCA_DSMARK_MAX + 1]; 108 108 int err = -EINVAL; 109 109 u8 mask = 0; 110 110 ··· 113 113 114 114 if (!dsmark_valid_index(p, *arg)) { 115 115 err = -ENOENT; 116 - goto rtattr_failure; 116 + goto errout; 117 117 } 118 118 119 - if (!opt || rtattr_parse_nested(tb, TCA_DSMARK_MAX, opt)) 120 - goto rtattr_failure; 119 + if (!opt || nla_parse_nested(tb, TCA_DSMARK_MAX, opt, NULL)) 120 + goto errout; 121 121 122 - if (tb[TCA_DSMARK_MASK-1]) 123 - mask = RTA_GET_U8(tb[TCA_DSMARK_MASK-1]); 122 + if (tb[TCA_DSMARK_MASK]) { 123 + if (nla_len(tb[TCA_DSMARK_MASK]) < sizeof(u8)) 124 + goto errout; 125 + mask = nla_get_u8(tb[TCA_DSMARK_MASK]); 126 + } 127 + if (tb[TCA_DSMARK_VALUE]) { 128 + if (nla_len(tb[TCA_DSMARK_VALUE]) < sizeof(u8)) 129 + goto errout; 130 + p->value[*arg-1] = nla_get_u8(tb[TCA_DSMARK_VALUE]); 131 + } 124 132 125 - if (tb[TCA_DSMARK_VALUE-1]) 126 - p->value[*arg-1] = RTA_GET_U8(tb[TCA_DSMARK_VALUE-1]); 127 - 128 - if (tb[TCA_DSMARK_MASK-1]) 133 + if (tb[TCA_DSMARK_MASK]) 129 134 p->mask[*arg-1] = mask; 130 135 131 136 err = 0; 132 137 133 - rtattr_failure: 138 + errout: 134 139 return err; 135 140 } 136 141 ··· 340 335 return len; 341 336 } 342 337 343 - static int dsmark_init(struct Qdisc *sch, struct rtattr *opt) 338 + static int dsmark_init(struct Qdisc *sch, struct nlattr *opt) 344 339 { 345 340 struct dsmark_qdisc_data *p = qdisc_priv(sch); 346 - struct rtattr *tb[TCA_DSMARK_MAX]; 341 + struct nlattr *tb[TCA_DSMARK_MAX + 1]; 347 342 int err = -EINVAL; 348 343 u32 default_index = NO_DEFAULT_INDEX; 349 344 u16 indices; ··· 351 346 352 347 pr_debug("dsmark_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt); 353 348 354 - if (!opt || rtattr_parse_nested(tb, TCA_DSMARK_MAX, opt) < 0) 349 + if (!opt || nla_parse_nested(tb, TCA_DSMARK_MAX, opt, NULL) < 0) 355 350 goto errout; 356 351 357 - indices = RTA_GET_U16(tb[TCA_DSMARK_INDICES-1]); 352 + if (nla_len(tb[TCA_DSMARK_INDICES]) < sizeof(u16)) 353 + goto errout; 354 + indices = nla_get_u16(tb[TCA_DSMARK_INDICES]); 358 355 359 356 if (hweight32(indices) != 1) 360 357 goto errout; 361 358 362 - if (tb[TCA_DSMARK_DEFAULT_INDEX-1]) 363 - default_index = RTA_GET_U16(tb[TCA_DSMARK_DEFAULT_INDEX-1]); 359 + if (tb[TCA_DSMARK_DEFAULT_INDEX]) { 360 + if (nla_len(tb[TCA_DSMARK_DEFAULT_INDEX]) < sizeof(u16)) 361 + goto errout; 362 + default_index = nla_get_u16(tb[TCA_DSMARK_DEFAULT_INDEX]); 363 + } 364 364 365 365 mask = kmalloc(indices * 2, GFP_KERNEL); 366 366 if (mask == NULL) { ··· 381 371 382 372 p->indices = indices; 383 373 p->default_index = default_index; 384 - p->set_tc_index = RTA_GET_FLAG(tb[TCA_DSMARK_SET_TC_INDEX-1]); 374 + p->set_tc_index = nla_get_flag(tb[TCA_DSMARK_SET_TC_INDEX]); 385 375 386 376 p->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, sch->handle); 387 377 if (p->q == NULL) ··· 391 381 392 382 err = 0; 393 383 errout: 394 - rtattr_failure: 395 384 return err; 396 385 } 397 386 ··· 418 409 struct sk_buff *skb, struct tcmsg *tcm) 419 410 { 420 411 struct dsmark_qdisc_data *p = qdisc_priv(sch); 421 - struct rtattr *opts = NULL; 412 + struct nlattr *opts = NULL; 422 413 423 414 pr_debug("dsmark_dump_class(sch %p,[qdisc %p],class %ld\n", sch, p, cl); 424 415 ··· 428 419 tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl-1); 429 420 tcm->tcm_info = p->q->handle; 430 421 431 - opts = RTA_NEST(skb, TCA_OPTIONS); 432 - RTA_PUT_U8(skb, TCA_DSMARK_MASK, p->mask[cl-1]); 433 - RTA_PUT_U8(skb, TCA_DSMARK_VALUE, p->value[cl-1]); 422 + opts = nla_nest_start(skb, TCA_OPTIONS); 423 + if (opts == NULL) 424 + goto nla_put_failure; 425 + NLA_PUT_U8(skb, TCA_DSMARK_MASK, p->mask[cl-1]); 426 + NLA_PUT_U8(skb, TCA_DSMARK_VALUE, p->value[cl-1]); 434 427 435 - return RTA_NEST_END(skb, opts); 428 + return nla_nest_end(skb, opts); 436 429 437 - rtattr_failure: 438 - return RTA_NEST_CANCEL(skb, opts); 430 + nla_put_failure: 431 + return nla_nest_cancel(skb, opts); 439 432 } 440 433 441 434 static int dsmark_dump(struct Qdisc *sch, struct sk_buff *skb) 442 435 { 443 436 struct dsmark_qdisc_data *p = qdisc_priv(sch); 444 - struct rtattr *opts = NULL; 437 + struct nlattr *opts = NULL; 445 438 446 - opts = RTA_NEST(skb, TCA_OPTIONS); 447 - RTA_PUT_U16(skb, TCA_DSMARK_INDICES, p->indices); 439 + opts = nla_nest_start(skb, TCA_OPTIONS); 440 + if (opts == NULL) 441 + goto nla_put_failure; 442 + NLA_PUT_U16(skb, TCA_DSMARK_INDICES, p->indices); 448 443 449 444 if (p->default_index != NO_DEFAULT_INDEX) 450 - RTA_PUT_U16(skb, TCA_DSMARK_DEFAULT_INDEX, p->default_index); 445 + NLA_PUT_U16(skb, TCA_DSMARK_DEFAULT_INDEX, p->default_index); 451 446 452 447 if (p->set_tc_index) 453 - RTA_PUT_FLAG(skb, TCA_DSMARK_SET_TC_INDEX); 448 + NLA_PUT_FLAG(skb, TCA_DSMARK_SET_TC_INDEX); 454 449 455 - return RTA_NEST_END(skb, opts); 450 + return nla_nest_end(skb, opts); 456 451 457 - rtattr_failure: 458 - return RTA_NEST_CANCEL(skb, opts); 452 + nla_put_failure: 453 + return nla_nest_cancel(skb, opts); 459 454 } 460 455 461 456 static const struct Qdisc_class_ops dsmark_class_ops = {
+5 -5
net/sched/sch_fifo.c
··· 43 43 return qdisc_reshape_fail(skb, sch); 44 44 } 45 45 46 - static int fifo_init(struct Qdisc *sch, struct rtattr *opt) 46 + static int fifo_init(struct Qdisc *sch, struct nlattr *opt) 47 47 { 48 48 struct fifo_sched_data *q = qdisc_priv(sch); 49 49 ··· 55 55 56 56 q->limit = limit; 57 57 } else { 58 - struct tc_fifo_qopt *ctl = RTA_DATA(opt); 58 + struct tc_fifo_qopt *ctl = nla_data(opt); 59 59 60 - if (RTA_PAYLOAD(opt) < sizeof(*ctl)) 60 + if (nla_len(opt) < sizeof(*ctl)) 61 61 return -EINVAL; 62 62 63 63 q->limit = ctl->limit; ··· 71 71 struct fifo_sched_data *q = qdisc_priv(sch); 72 72 struct tc_fifo_qopt opt = { .limit = q->limit }; 73 73 74 - RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); 74 + NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); 75 75 return skb->len; 76 76 77 - rtattr_failure: 77 + nla_put_failure: 78 78 return -1; 79 79 } 80 80
+3 -3
net/sched/sch_generic.c
··· 397 397 struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS }; 398 398 399 399 memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1); 400 - RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); 400 + NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); 401 401 return skb->len; 402 402 403 - rtattr_failure: 403 + nla_put_failure: 404 404 return -1; 405 405 } 406 406 407 - static int pfifo_fast_init(struct Qdisc *qdisc, struct rtattr *opt) 407 + static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt) 408 408 { 409 409 int prio; 410 410 struct sk_buff_head *list = qdisc_priv(qdisc);
+32 -27
net/sched/sch_gred.c
··· 350 350 kfree(q); 351 351 } 352 352 353 - static inline int gred_change_table_def(struct Qdisc *sch, struct rtattr *dps) 353 + static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps) 354 354 { 355 355 struct gred_sched *table = qdisc_priv(sch); 356 356 struct tc_gred_sopt *sopt; 357 357 int i; 358 358 359 - if (dps == NULL || RTA_PAYLOAD(dps) < sizeof(*sopt)) 359 + if (dps == NULL || nla_len(dps) < sizeof(*sopt)) 360 360 return -EINVAL; 361 361 362 - sopt = RTA_DATA(dps); 362 + sopt = nla_data(dps); 363 363 364 364 if (sopt->DPs > MAX_DPs || sopt->DPs == 0 || sopt->def_DP >= sopt->DPs) 365 365 return -EINVAL; ··· 425 425 return 0; 426 426 } 427 427 428 - static int gred_change(struct Qdisc *sch, struct rtattr *opt) 428 + static int gred_change(struct Qdisc *sch, struct nlattr *opt) 429 429 { 430 430 struct gred_sched *table = qdisc_priv(sch); 431 431 struct tc_gred_qopt *ctl; 432 - struct rtattr *tb[TCA_GRED_MAX]; 432 + struct nlattr *tb[TCA_GRED_MAX + 1]; 433 433 int err = -EINVAL, prio = GRED_DEF_PRIO; 434 434 u8 *stab; 435 435 436 - if (opt == NULL || rtattr_parse_nested(tb, TCA_GRED_MAX, opt)) 436 + if (opt == NULL || nla_parse_nested(tb, TCA_GRED_MAX, opt, NULL)) 437 437 return -EINVAL; 438 438 439 - if (tb[TCA_GRED_PARMS-1] == NULL && tb[TCA_GRED_STAB-1] == NULL) 439 + if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) 440 440 return gred_change_table_def(sch, opt); 441 441 442 - if (tb[TCA_GRED_PARMS-1] == NULL || 443 - RTA_PAYLOAD(tb[TCA_GRED_PARMS-1]) < sizeof(*ctl) || 444 - tb[TCA_GRED_STAB-1] == NULL || 445 - RTA_PAYLOAD(tb[TCA_GRED_STAB-1]) < 256) 442 + if (tb[TCA_GRED_PARMS] == NULL || 443 + nla_len(tb[TCA_GRED_PARMS]) < sizeof(*ctl) || 444 + tb[TCA_GRED_STAB] == NULL || 445 + nla_len(tb[TCA_GRED_STAB]) < 256) 446 446 return -EINVAL; 447 447 448 - ctl = RTA_DATA(tb[TCA_GRED_PARMS-1]); 449 - stab = RTA_DATA(tb[TCA_GRED_STAB-1]); 448 + ctl = nla_data(tb[TCA_GRED_PARMS]); 449 + stab = nla_data(tb[TCA_GRED_STAB]); 450 450 451 451 if (ctl->DP >= table->DPs) 452 452 goto errout; ··· 486 486 return err; 487 487 } 488 488 489 - static int gred_init(struct Qdisc *sch, struct rtattr *opt) 489 + static int gred_init(struct Qdisc *sch, struct nlattr *opt) 490 490 { 491 - struct rtattr *tb[TCA_GRED_MAX]; 491 + struct nlattr *tb[TCA_GRED_MAX + 1]; 492 492 493 - if (opt == NULL || rtattr_parse_nested(tb, TCA_GRED_MAX, opt)) 493 + if (opt == NULL || nla_parse_nested(tb, TCA_GRED_MAX, opt, NULL)) 494 494 return -EINVAL; 495 495 496 - if (tb[TCA_GRED_PARMS-1] || tb[TCA_GRED_STAB-1]) 496 + if (tb[TCA_GRED_PARMS] || tb[TCA_GRED_STAB]) 497 497 return -EINVAL; 498 498 499 - return gred_change_table_def(sch, tb[TCA_GRED_DPS-1]); 499 + return gred_change_table_def(sch, tb[TCA_GRED_DPS]); 500 500 } 501 501 502 502 static int gred_dump(struct Qdisc *sch, struct sk_buff *skb) 503 503 { 504 504 struct gred_sched *table = qdisc_priv(sch); 505 - struct rtattr *parms, *opts = NULL; 505 + struct nlattr *parms, *opts = NULL; 506 506 int i; 507 507 struct tc_gred_sopt sopt = { 508 508 .DPs = table->DPs, ··· 511 511 .flags = table->red_flags, 512 512 }; 513 513 514 - opts = RTA_NEST(skb, TCA_OPTIONS); 515 - RTA_PUT(skb, TCA_GRED_DPS, sizeof(sopt), &sopt); 516 - parms = RTA_NEST(skb, TCA_GRED_PARMS); 514 + opts = nla_nest_start(skb, TCA_OPTIONS); 515 + if (opts == NULL) 516 + goto nla_put_failure; 517 + NLA_PUT(skb, TCA_GRED_DPS, sizeof(sopt), &sopt); 518 + parms = nla_nest_start(skb, TCA_GRED_PARMS); 519 + if (parms == NULL) 520 + goto nla_put_failure; 517 521 518 522 for (i = 0; i < MAX_DPs; i++) { 519 523 struct gred_sched_data *q = table->tab[i]; ··· 559 555 opt.qave = red_calc_qavg(&q->parms, q->parms.qavg); 560 556 561 557 append_opt: 562 - RTA_APPEND(skb, sizeof(opt), &opt); 558 + if (nla_append(skb, sizeof(opt), &opt) < 0) 559 + goto nla_put_failure; 563 560 } 564 561 565 - RTA_NEST_END(skb, parms); 562 + nla_nest_end(skb, parms); 566 563 567 - return RTA_NEST_END(skb, opts); 564 + return nla_nest_end(skb, opts); 568 565 569 - rtattr_failure: 570 - return RTA_NEST_CANCEL(skb, opts); 566 + nla_put_failure: 567 + return nla_nest_cancel(skb, opts); 571 568 } 572 569 573 570 static void gred_destroy(struct Qdisc *sch)
+36 -36
net/sched/sch_hfsc.c
··· 988 988 989 989 static int 990 990 hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, 991 - struct rtattr **tca, unsigned long *arg) 991 + struct nlattr **tca, unsigned long *arg) 992 992 { 993 993 struct hfsc_sched *q = qdisc_priv(sch); 994 994 struct hfsc_class *cl = (struct hfsc_class *)*arg; 995 995 struct hfsc_class *parent = NULL; 996 - struct rtattr *opt = tca[TCA_OPTIONS-1]; 997 - struct rtattr *tb[TCA_HFSC_MAX]; 996 + struct nlattr *opt = tca[TCA_OPTIONS]; 997 + struct nlattr *tb[TCA_HFSC_MAX + 1]; 998 998 struct tc_service_curve *rsc = NULL, *fsc = NULL, *usc = NULL; 999 999 u64 cur_time; 1000 1000 1001 - if (opt == NULL || rtattr_parse_nested(tb, TCA_HFSC_MAX, opt)) 1001 + if (opt == NULL || nla_parse_nested(tb, TCA_HFSC_MAX, opt, NULL)) 1002 1002 return -EINVAL; 1003 1003 1004 - if (tb[TCA_HFSC_RSC-1]) { 1005 - if (RTA_PAYLOAD(tb[TCA_HFSC_RSC-1]) < sizeof(*rsc)) 1004 + if (tb[TCA_HFSC_RSC]) { 1005 + if (nla_len(tb[TCA_HFSC_RSC]) < sizeof(*rsc)) 1006 1006 return -EINVAL; 1007 - rsc = RTA_DATA(tb[TCA_HFSC_RSC-1]); 1007 + rsc = nla_data(tb[TCA_HFSC_RSC]); 1008 1008 if (rsc->m1 == 0 && rsc->m2 == 0) 1009 1009 rsc = NULL; 1010 1010 } 1011 1011 1012 - if (tb[TCA_HFSC_FSC-1]) { 1013 - if (RTA_PAYLOAD(tb[TCA_HFSC_FSC-1]) < sizeof(*fsc)) 1012 + if (tb[TCA_HFSC_FSC]) { 1013 + if (nla_len(tb[TCA_HFSC_FSC]) < sizeof(*fsc)) 1014 1014 return -EINVAL; 1015 - fsc = RTA_DATA(tb[TCA_HFSC_FSC-1]); 1015 + fsc = nla_data(tb[TCA_HFSC_FSC]); 1016 1016 if (fsc->m1 == 0 && fsc->m2 == 0) 1017 1017 fsc = NULL; 1018 1018 } 1019 1019 1020 - if (tb[TCA_HFSC_USC-1]) { 1021 - if (RTA_PAYLOAD(tb[TCA_HFSC_USC-1]) < sizeof(*usc)) 1020 + if (tb[TCA_HFSC_USC]) { 1021 + if (nla_len(tb[TCA_HFSC_USC]) < sizeof(*usc)) 1022 1022 return -EINVAL; 1023 - usc = RTA_DATA(tb[TCA_HFSC_USC-1]); 1023 + usc = nla_data(tb[TCA_HFSC_USC]); 1024 1024 if (usc->m1 == 0 && usc->m2 == 0) 1025 1025 usc = NULL; 1026 1026 } ··· 1050 1050 } 1051 1051 sch_tree_unlock(sch); 1052 1052 1053 - if (tca[TCA_RATE-1]) 1053 + if (tca[TCA_RATE]) 1054 1054 gen_replace_estimator(&cl->bstats, &cl->rate_est, 1055 1055 &sch->dev->queue_lock, 1056 - tca[TCA_RATE-1]); 1056 + tca[TCA_RATE]); 1057 1057 return 0; 1058 1058 } 1059 1059 ··· 1106 1106 cl->cl_pcvtoff = parent->cl_cvtoff; 1107 1107 sch_tree_unlock(sch); 1108 1108 1109 - if (tca[TCA_RATE-1]) 1109 + if (tca[TCA_RATE]) 1110 1110 gen_new_estimator(&cl->bstats, &cl->rate_est, 1111 - &sch->dev->queue_lock, tca[TCA_RATE-1]); 1111 + &sch->dev->queue_lock, tca[TCA_RATE]); 1112 1112 *arg = (unsigned long)cl; 1113 1113 return 0; 1114 1114 } ··· 1304 1304 tsc.m1 = sm2m(sc->sm1); 1305 1305 tsc.d = dx2d(sc->dx); 1306 1306 tsc.m2 = sm2m(sc->sm2); 1307 - RTA_PUT(skb, attr, sizeof(tsc), &tsc); 1307 + NLA_PUT(skb, attr, sizeof(tsc), &tsc); 1308 1308 1309 1309 return skb->len; 1310 1310 1311 - rtattr_failure: 1311 + nla_put_failure: 1312 1312 return -1; 1313 1313 } 1314 1314 ··· 1317 1317 { 1318 1318 if ((cl->cl_flags & HFSC_RSC) && 1319 1319 (hfsc_dump_sc(skb, TCA_HFSC_RSC, &cl->cl_rsc) < 0)) 1320 - goto rtattr_failure; 1320 + goto nla_put_failure; 1321 1321 1322 1322 if ((cl->cl_flags & HFSC_FSC) && 1323 1323 (hfsc_dump_sc(skb, TCA_HFSC_FSC, &cl->cl_fsc) < 0)) 1324 - goto rtattr_failure; 1324 + goto nla_put_failure; 1325 1325 1326 1326 if ((cl->cl_flags & HFSC_USC) && 1327 1327 (hfsc_dump_sc(skb, TCA_HFSC_USC, &cl->cl_usc) < 0)) 1328 - goto rtattr_failure; 1328 + goto nla_put_failure; 1329 1329 1330 1330 return skb->len; 1331 1331 1332 - rtattr_failure: 1332 + nla_put_failure: 1333 1333 return -1; 1334 1334 } 1335 1335 ··· 1339 1339 { 1340 1340 struct hfsc_class *cl = (struct hfsc_class *)arg; 1341 1341 unsigned char *b = skb_tail_pointer(skb); 1342 - struct rtattr *rta = (struct rtattr *)b; 1342 + struct nlattr *nla = (struct nlattr *)b; 1343 1343 1344 1344 tcm->tcm_parent = cl->cl_parent ? cl->cl_parent->classid : TC_H_ROOT; 1345 1345 tcm->tcm_handle = cl->classid; 1346 1346 if (cl->level == 0) 1347 1347 tcm->tcm_info = cl->qdisc->handle; 1348 1348 1349 - RTA_PUT(skb, TCA_OPTIONS, 0, NULL); 1349 + NLA_PUT(skb, TCA_OPTIONS, 0, NULL); 1350 1350 if (hfsc_dump_curves(skb, cl) < 0) 1351 - goto rtattr_failure; 1352 - rta->rta_len = skb_tail_pointer(skb) - b; 1351 + goto nla_put_failure; 1352 + nla->nla_len = skb_tail_pointer(skb) - b; 1353 1353 return skb->len; 1354 1354 1355 - rtattr_failure: 1355 + nla_put_failure: 1356 1356 nlmsg_trim(skb, b); 1357 1357 return -1; 1358 1358 } ··· 1423 1423 } 1424 1424 1425 1425 static int 1426 - hfsc_init_qdisc(struct Qdisc *sch, struct rtattr *opt) 1426 + hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt) 1427 1427 { 1428 1428 struct hfsc_sched *q = qdisc_priv(sch); 1429 1429 struct tc_hfsc_qopt *qopt; 1430 1430 unsigned int i; 1431 1431 1432 - if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt)) 1432 + if (opt == NULL || nla_len(opt) < sizeof(*qopt)) 1433 1433 return -EINVAL; 1434 - qopt = RTA_DATA(opt); 1434 + qopt = nla_data(opt); 1435 1435 1436 1436 q->defcls = qopt->defcls; 1437 1437 for (i = 0; i < HFSC_HSIZE; i++) ··· 1459 1459 } 1460 1460 1461 1461 static int 1462 - hfsc_change_qdisc(struct Qdisc *sch, struct rtattr *opt) 1462 + hfsc_change_qdisc(struct Qdisc *sch, struct nlattr *opt) 1463 1463 { 1464 1464 struct hfsc_sched *q = qdisc_priv(sch); 1465 1465 struct tc_hfsc_qopt *qopt; 1466 1466 1467 - if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt)) 1467 + if (opt == NULL || nla_len(opt) < sizeof(*qopt)) 1468 1468 return -EINVAL; 1469 - qopt = RTA_DATA(opt); 1469 + qopt = nla_data(opt); 1470 1470 1471 1471 sch_tree_lock(sch); 1472 1472 q->defcls = qopt->defcls; ··· 1550 1550 struct tc_hfsc_qopt qopt; 1551 1551 1552 1552 qopt.defcls = q->defcls; 1553 - RTA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt); 1553 + NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt); 1554 1554 return skb->len; 1555 1555 1556 - rtattr_failure: 1556 + nla_put_failure: 1557 1557 nlmsg_trim(skb, b); 1558 1558 return -1; 1559 1559 }
+34 -34
net/sched/sch_htb.c
··· 992 992 INIT_LIST_HEAD(q->drops + i); 993 993 } 994 994 995 - static int htb_init(struct Qdisc *sch, struct rtattr *opt) 995 + static int htb_init(struct Qdisc *sch, struct nlattr *opt) 996 996 { 997 997 struct htb_sched *q = qdisc_priv(sch); 998 - struct rtattr *tb[TCA_HTB_INIT]; 998 + struct nlattr *tb[TCA_HTB_INIT + 1]; 999 999 struct tc_htb_glob *gopt; 1000 1000 int i; 1001 - if (!opt || rtattr_parse_nested(tb, TCA_HTB_INIT, opt) || 1002 - tb[TCA_HTB_INIT - 1] == NULL || 1003 - RTA_PAYLOAD(tb[TCA_HTB_INIT - 1]) < sizeof(*gopt)) { 1001 + if (!opt || nla_parse_nested(tb, TCA_HTB_INIT, opt, NULL) || 1002 + tb[TCA_HTB_INIT] == NULL || 1003 + nla_len(tb[TCA_HTB_INIT]) < sizeof(*gopt)) { 1004 1004 printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n"); 1005 1005 return -EINVAL; 1006 1006 } 1007 - gopt = RTA_DATA(tb[TCA_HTB_INIT - 1]); 1007 + gopt = nla_data(tb[TCA_HTB_INIT]); 1008 1008 if (gopt->version != HTB_VER >> 16) { 1009 1009 printk(KERN_ERR 1010 1010 "HTB: need tc/htb version %d (minor is %d), you have %d\n", ··· 1036 1036 { 1037 1037 struct htb_sched *q = qdisc_priv(sch); 1038 1038 unsigned char *b = skb_tail_pointer(skb); 1039 - struct rtattr *rta; 1039 + struct nlattr *nla; 1040 1040 struct tc_htb_glob gopt; 1041 1041 spin_lock_bh(&sch->dev->queue_lock); 1042 1042 gopt.direct_pkts = q->direct_pkts; ··· 1045 1045 gopt.rate2quantum = q->rate2quantum; 1046 1046 gopt.defcls = q->defcls; 1047 1047 gopt.debug = 0; 1048 - rta = (struct rtattr *)b; 1049 - RTA_PUT(skb, TCA_OPTIONS, 0, NULL); 1050 - RTA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt); 1051 - rta->rta_len = skb_tail_pointer(skb) - b; 1048 + nla = (struct nlattr *)b; 1049 + NLA_PUT(skb, TCA_OPTIONS, 0, NULL); 1050 + NLA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt); 1051 + nla->nla_len = skb_tail_pointer(skb) - b; 1052 1052 spin_unlock_bh(&sch->dev->queue_lock); 1053 1053 return skb->len; 1054 - rtattr_failure: 1054 + nla_put_failure: 1055 1055 spin_unlock_bh(&sch->dev->queue_lock); 1056 1056 nlmsg_trim(skb, skb_tail_pointer(skb)); 1057 1057 return -1; ··· 1062 1062 { 1063 1063 struct htb_class *cl = (struct htb_class *)arg; 1064 1064 unsigned char *b = skb_tail_pointer(skb); 1065 - struct rtattr *rta; 1065 + struct nlattr *nla; 1066 1066 struct tc_htb_opt opt; 1067 1067 1068 1068 spin_lock_bh(&sch->dev->queue_lock); ··· 1071 1071 if (!cl->level && cl->un.leaf.q) 1072 1072 tcm->tcm_info = cl->un.leaf.q->handle; 1073 1073 1074 - rta = (struct rtattr *)b; 1075 - RTA_PUT(skb, TCA_OPTIONS, 0, NULL); 1074 + nla = (struct nlattr *)b; 1075 + NLA_PUT(skb, TCA_OPTIONS, 0, NULL); 1076 1076 1077 1077 memset(&opt, 0, sizeof(opt)); 1078 1078 ··· 1083 1083 opt.quantum = cl->un.leaf.quantum; 1084 1084 opt.prio = cl->un.leaf.prio; 1085 1085 opt.level = cl->level; 1086 - RTA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt); 1087 - rta->rta_len = skb_tail_pointer(skb) - b; 1086 + NLA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt); 1087 + nla->nla_len = skb_tail_pointer(skb) - b; 1088 1088 spin_unlock_bh(&sch->dev->queue_lock); 1089 1089 return skb->len; 1090 - rtattr_failure: 1090 + nla_put_failure: 1091 1091 spin_unlock_bh(&sch->dev->queue_lock); 1092 1092 nlmsg_trim(skb, b); 1093 1093 return -1; ··· 1290 1290 } 1291 1291 1292 1292 static int htb_change_class(struct Qdisc *sch, u32 classid, 1293 - u32 parentid, struct rtattr **tca, 1293 + u32 parentid, struct nlattr **tca, 1294 1294 unsigned long *arg) 1295 1295 { 1296 1296 int err = -EINVAL; 1297 1297 struct htb_sched *q = qdisc_priv(sch); 1298 1298 struct htb_class *cl = (struct htb_class *)*arg, *parent; 1299 - struct rtattr *opt = tca[TCA_OPTIONS - 1]; 1299 + struct nlattr *opt = tca[TCA_OPTIONS]; 1300 1300 struct qdisc_rate_table *rtab = NULL, *ctab = NULL; 1301 - struct rtattr *tb[TCA_HTB_RTAB]; 1301 + struct nlattr *tb[TCA_HTB_RTAB + 1]; 1302 1302 struct tc_htb_opt *hopt; 1303 1303 1304 1304 /* extract all subattrs from opt attr */ 1305 - if (!opt || rtattr_parse_nested(tb, TCA_HTB_RTAB, opt) || 1306 - tb[TCA_HTB_PARMS - 1] == NULL || 1307 - RTA_PAYLOAD(tb[TCA_HTB_PARMS - 1]) < sizeof(*hopt)) 1305 + if (!opt || nla_parse_nested(tb, TCA_HTB_RTAB, opt, NULL) || 1306 + tb[TCA_HTB_PARMS] == NULL || 1307 + nla_len(tb[TCA_HTB_PARMS]) < sizeof(*hopt)) 1308 1308 goto failure; 1309 1309 1310 1310 parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch); 1311 1311 1312 - hopt = RTA_DATA(tb[TCA_HTB_PARMS - 1]); 1312 + hopt = nla_data(tb[TCA_HTB_PARMS]); 1313 1313 1314 - rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB - 1]); 1315 - ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB - 1]); 1314 + rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]); 1315 + ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]); 1316 1316 if (!rtab || !ctab) 1317 1317 goto failure; 1318 1318 ··· 1320 1320 struct Qdisc *new_q; 1321 1321 int prio; 1322 1322 struct { 1323 - struct rtattr rta; 1323 + struct nlattr nla; 1324 1324 struct gnet_estimator opt; 1325 1325 } est = { 1326 - .rta = { 1327 - .rta_len = RTA_LENGTH(sizeof(est.opt)), 1328 - .rta_type = TCA_RATE, 1326 + .nla = { 1327 + .nla_len = nla_attr_size(sizeof(est.opt)), 1328 + .nla_type = TCA_RATE, 1329 1329 }, 1330 1330 .opt = { 1331 1331 /* 4s interval, 16s averaging constant */ ··· 1350 1350 1351 1351 gen_new_estimator(&cl->bstats, &cl->rate_est, 1352 1352 &sch->dev->queue_lock, 1353 - tca[TCA_RATE-1] ? : &est.rta); 1353 + tca[TCA_RATE] ? : &est.nla); 1354 1354 cl->refcnt = 1; 1355 1355 INIT_LIST_HEAD(&cl->sibling); 1356 1356 INIT_HLIST_NODE(&cl->hlist); ··· 1403 1403 list_add_tail(&cl->sibling, 1404 1404 parent ? &parent->children : &q->root); 1405 1405 } else { 1406 - if (tca[TCA_RATE-1]) 1406 + if (tca[TCA_RATE]) 1407 1407 gen_replace_estimator(&cl->bstats, &cl->rate_est, 1408 1408 &sch->dev->queue_lock, 1409 - tca[TCA_RATE-1]); 1409 + tca[TCA_RATE]); 1410 1410 sch_tree_lock(sch); 1411 1411 } 1412 1412
+7 -7
net/sched/sch_ingress.c
··· 57 57 } 58 58 59 59 static int ingress_change(struct Qdisc *sch, u32 classid, u32 parent, 60 - struct rtattr **tca, unsigned long *arg) 60 + struct nlattr **tca, unsigned long *arg) 61 61 { 62 62 return 0; 63 63 } ··· 156 156 }; 157 157 #endif 158 158 159 - static int ingress_init(struct Qdisc *sch, struct rtattr *opt) 159 + static int ingress_init(struct Qdisc *sch, struct nlattr *opt) 160 160 { 161 161 #if !defined(CONFIG_NET_CLS_ACT) && defined(CONFIG_NETFILTER) 162 162 printk("Ingress scheduler: Classifier actions prefered over netfilter\n"); ··· 184 184 static int ingress_dump(struct Qdisc *sch, struct sk_buff *skb) 185 185 { 186 186 unsigned char *b = skb_tail_pointer(skb); 187 - struct rtattr *rta; 187 + struct nlattr *nla; 188 188 189 - rta = (struct rtattr *)b; 190 - RTA_PUT(skb, TCA_OPTIONS, 0, NULL); 191 - rta->rta_len = skb_tail_pointer(skb) - b; 189 + nla = (struct nlattr *)b; 190 + NLA_PUT(skb, TCA_OPTIONS, 0, NULL); 191 + nla->nla_len = skb_tail_pointer(skb) - b; 192 192 return skb->len; 193 193 194 - rtattr_failure: 194 + nla_put_failure: 195 195 nlmsg_trim(skb, b); 196 196 return -1; 197 197 }
+50 -50
net/sched/sch_netem.c
··· 313 313 /* Pass size change message down to embedded FIFO */ 314 314 static int set_fifo_limit(struct Qdisc *q, int limit) 315 315 { 316 - struct rtattr *rta; 316 + struct nlattr *nla; 317 317 int ret = -ENOMEM; 318 318 319 319 /* Hack to avoid sending change message to non-FIFO */ 320 320 if (strncmp(q->ops->id + 1, "fifo", 4) != 0) 321 321 return 0; 322 322 323 - rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL); 324 - if (rta) { 325 - rta->rta_type = RTM_NEWQDISC; 326 - rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt)); 327 - ((struct tc_fifo_qopt *)RTA_DATA(rta))->limit = limit; 323 + nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)), GFP_KERNEL); 324 + if (nla) { 325 + nla->nla_type = RTM_NEWQDISC; 326 + nla->nla_len = nla_attr_size(sizeof(struct tc_fifo_qopt)); 327 + ((struct tc_fifo_qopt *)nla_data(nla))->limit = limit; 328 328 329 - ret = q->ops->change(q, rta); 330 - kfree(rta); 329 + ret = q->ops->change(q, nla); 330 + kfree(nla); 331 331 } 332 332 return ret; 333 333 } ··· 336 336 * Distribution data is a variable size payload containing 337 337 * signed 16 bit values. 338 338 */ 339 - static int get_dist_table(struct Qdisc *sch, const struct rtattr *attr) 339 + static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr) 340 340 { 341 341 struct netem_sched_data *q = qdisc_priv(sch); 342 - unsigned long n = RTA_PAYLOAD(attr)/sizeof(__s16); 343 - const __s16 *data = RTA_DATA(attr); 342 + unsigned long n = nla_len(attr)/sizeof(__s16); 343 + const __s16 *data = nla_data(attr); 344 344 struct disttable *d; 345 345 int i; 346 346 ··· 363 363 return 0; 364 364 } 365 365 366 - static int get_correlation(struct Qdisc *sch, const struct rtattr *attr) 366 + static int get_correlation(struct Qdisc *sch, const struct nlattr *attr) 367 367 { 368 368 struct netem_sched_data *q = qdisc_priv(sch); 369 - const struct tc_netem_corr *c = RTA_DATA(attr); 369 + const struct tc_netem_corr *c = nla_data(attr); 370 370 371 - if (RTA_PAYLOAD(attr) != sizeof(*c)) 371 + if (nla_len(attr) != sizeof(*c)) 372 372 return -EINVAL; 373 373 374 374 init_crandom(&q->delay_cor, c->delay_corr); ··· 377 377 return 0; 378 378 } 379 379 380 - static int get_reorder(struct Qdisc *sch, const struct rtattr *attr) 380 + static int get_reorder(struct Qdisc *sch, const struct nlattr *attr) 381 381 { 382 382 struct netem_sched_data *q = qdisc_priv(sch); 383 - const struct tc_netem_reorder *r = RTA_DATA(attr); 383 + const struct tc_netem_reorder *r = nla_data(attr); 384 384 385 - if (RTA_PAYLOAD(attr) != sizeof(*r)) 385 + if (nla_len(attr) != sizeof(*r)) 386 386 return -EINVAL; 387 387 388 388 q->reorder = r->probability; ··· 390 390 return 0; 391 391 } 392 392 393 - static int get_corrupt(struct Qdisc *sch, const struct rtattr *attr) 393 + static int get_corrupt(struct Qdisc *sch, const struct nlattr *attr) 394 394 { 395 395 struct netem_sched_data *q = qdisc_priv(sch); 396 - const struct tc_netem_corrupt *r = RTA_DATA(attr); 396 + const struct tc_netem_corrupt *r = nla_data(attr); 397 397 398 - if (RTA_PAYLOAD(attr) != sizeof(*r)) 398 + if (nla_len(attr) != sizeof(*r)) 399 399 return -EINVAL; 400 400 401 401 q->corrupt = r->probability; ··· 404 404 } 405 405 406 406 /* Parse netlink message to set options */ 407 - static int netem_change(struct Qdisc *sch, struct rtattr *opt) 407 + static int netem_change(struct Qdisc *sch, struct nlattr *opt) 408 408 { 409 409 struct netem_sched_data *q = qdisc_priv(sch); 410 410 struct tc_netem_qopt *qopt; 411 411 int ret; 412 412 413 - if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt)) 413 + if (opt == NULL || nla_len(opt) < sizeof(*qopt)) 414 414 return -EINVAL; 415 415 416 - qopt = RTA_DATA(opt); 416 + qopt = nla_data(opt); 417 417 ret = set_fifo_limit(q->qdisc, qopt->limit); 418 418 if (ret) { 419 419 pr_debug("netem: can't set fifo limit\n"); ··· 437 437 /* Handle nested options after initial queue options. 438 438 * Should have put all options in nested format but too late now. 439 439 */ 440 - if (RTA_PAYLOAD(opt) > sizeof(*qopt)) { 441 - struct rtattr *tb[TCA_NETEM_MAX]; 442 - if (rtattr_parse(tb, TCA_NETEM_MAX, 443 - RTA_DATA(opt) + sizeof(*qopt), 444 - RTA_PAYLOAD(opt) - sizeof(*qopt))) 440 + if (nla_len(opt) > sizeof(*qopt)) { 441 + struct nlattr *tb[TCA_NETEM_MAX + 1]; 442 + if (nla_parse(tb, TCA_NETEM_MAX, 443 + nla_data(opt) + sizeof(*qopt), 444 + nla_len(opt) - sizeof(*qopt), NULL)) 445 445 return -EINVAL; 446 446 447 - if (tb[TCA_NETEM_CORR-1]) { 448 - ret = get_correlation(sch, tb[TCA_NETEM_CORR-1]); 447 + if (tb[TCA_NETEM_CORR]) { 448 + ret = get_correlation(sch, tb[TCA_NETEM_CORR]); 449 449 if (ret) 450 450 return ret; 451 451 } 452 452 453 - if (tb[TCA_NETEM_DELAY_DIST-1]) { 454 - ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST-1]); 453 + if (tb[TCA_NETEM_DELAY_DIST]) { 454 + ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]); 455 455 if (ret) 456 456 return ret; 457 457 } 458 458 459 - if (tb[TCA_NETEM_REORDER-1]) { 460 - ret = get_reorder(sch, tb[TCA_NETEM_REORDER-1]); 459 + if (tb[TCA_NETEM_REORDER]) { 460 + ret = get_reorder(sch, tb[TCA_NETEM_REORDER]); 461 461 if (ret) 462 462 return ret; 463 463 } 464 464 465 - if (tb[TCA_NETEM_CORRUPT-1]) { 466 - ret = get_corrupt(sch, tb[TCA_NETEM_CORRUPT-1]); 465 + if (tb[TCA_NETEM_CORRUPT]) { 466 + ret = get_corrupt(sch, tb[TCA_NETEM_CORRUPT]); 467 467 if (ret) 468 468 return ret; 469 469 } ··· 515 515 return qdisc_reshape_fail(nskb, sch); 516 516 } 517 517 518 - static int tfifo_init(struct Qdisc *sch, struct rtattr *opt) 518 + static int tfifo_init(struct Qdisc *sch, struct nlattr *opt) 519 519 { 520 520 struct fifo_sched_data *q = qdisc_priv(sch); 521 521 522 522 if (opt) { 523 - struct tc_fifo_qopt *ctl = RTA_DATA(opt); 524 - if (RTA_PAYLOAD(opt) < sizeof(*ctl)) 523 + struct tc_fifo_qopt *ctl = nla_data(opt); 524 + if (nla_len(opt) < sizeof(*ctl)) 525 525 return -EINVAL; 526 526 527 527 q->limit = ctl->limit; ··· 537 537 struct fifo_sched_data *q = qdisc_priv(sch); 538 538 struct tc_fifo_qopt opt = { .limit = q->limit }; 539 539 540 - RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); 540 + NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); 541 541 return skb->len; 542 542 543 - rtattr_failure: 543 + nla_put_failure: 544 544 return -1; 545 545 } 546 546 ··· 557 557 .dump = tfifo_dump, 558 558 }; 559 559 560 - static int netem_init(struct Qdisc *sch, struct rtattr *opt) 560 + static int netem_init(struct Qdisc *sch, struct nlattr *opt) 561 561 { 562 562 struct netem_sched_data *q = qdisc_priv(sch); 563 563 int ret; ··· 595 595 { 596 596 const struct netem_sched_data *q = qdisc_priv(sch); 597 597 unsigned char *b = skb_tail_pointer(skb); 598 - struct rtattr *rta = (struct rtattr *) b; 598 + struct nlattr *nla = (struct nlattr *) b; 599 599 struct tc_netem_qopt qopt; 600 600 struct tc_netem_corr cor; 601 601 struct tc_netem_reorder reorder; ··· 607 607 qopt.loss = q->loss; 608 608 qopt.gap = q->gap; 609 609 qopt.duplicate = q->duplicate; 610 - RTA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt); 610 + NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt); 611 611 612 612 cor.delay_corr = q->delay_cor.rho; 613 613 cor.loss_corr = q->loss_cor.rho; 614 614 cor.dup_corr = q->dup_cor.rho; 615 - RTA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor); 615 + NLA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor); 616 616 617 617 reorder.probability = q->reorder; 618 618 reorder.correlation = q->reorder_cor.rho; 619 - RTA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder); 619 + NLA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder); 620 620 621 621 corrupt.probability = q->corrupt; 622 622 corrupt.correlation = q->corrupt_cor.rho; 623 - RTA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt); 623 + NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt); 624 624 625 - rta->rta_len = skb_tail_pointer(skb) - b; 625 + nla->nla_len = skb_tail_pointer(skb) - b; 626 626 627 627 return skb->len; 628 628 629 - rtattr_failure: 629 + nla_put_failure: 630 630 nlmsg_trim(skb, b); 631 631 return -1; 632 632 } ··· 678 678 } 679 679 680 680 static int netem_change_class(struct Qdisc *sch, u32 classid, u32 parentid, 681 - struct rtattr **tca, unsigned long *arg) 681 + struct nlattr **tca, unsigned long *arg) 682 682 { 683 683 return -ENOSYS; 684 684 }
+17 -13
net/sched/sch_prio.c
··· 224 224 qdisc_destroy(q->queues[prio]); 225 225 } 226 226 227 - static int prio_tune(struct Qdisc *sch, struct rtattr *opt) 227 + static int prio_tune(struct Qdisc *sch, struct nlattr *opt) 228 228 { 229 229 struct prio_sched_data *q = qdisc_priv(sch); 230 230 struct tc_prio_qopt *qopt; 231 - struct rtattr *tb[TCA_PRIO_MAX]; 231 + struct nlattr *tb[TCA_PRIO_MAX + 1]; 232 232 int i; 233 233 234 - if (rtattr_parse_nested_compat(tb, TCA_PRIO_MAX, opt, qopt, 235 - sizeof(*qopt))) 234 + if (nla_parse_nested_compat(tb, TCA_PRIO_MAX, opt, NULL, qopt, 235 + sizeof(*qopt))) 236 236 return -EINVAL; 237 237 q->bands = qopt->bands; 238 238 /* If we're multiqueue, make sure the number of incoming bands ··· 242 242 * only one that is enabled for multiqueue, since it's the only one 243 243 * that interacts with the underlying device. 244 244 */ 245 - q->mq = RTA_GET_FLAG(tb[TCA_PRIO_MQ - 1]); 245 + q->mq = nla_get_flag(tb[TCA_PRIO_MQ]); 246 246 if (q->mq) { 247 247 if (sch->parent != TC_H_ROOT) 248 248 return -EINVAL; ··· 296 296 return 0; 297 297 } 298 298 299 - static int prio_init(struct Qdisc *sch, struct rtattr *opt) 299 + static int prio_init(struct Qdisc *sch, struct nlattr *opt) 300 300 { 301 301 struct prio_sched_data *q = qdisc_priv(sch); 302 302 int i; ··· 319 319 { 320 320 struct prio_sched_data *q = qdisc_priv(sch); 321 321 unsigned char *b = skb_tail_pointer(skb); 322 - struct rtattr *nest; 322 + struct nlattr *nest; 323 323 struct tc_prio_qopt opt; 324 324 325 325 opt.bands = q->bands; 326 326 memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX+1); 327 327 328 - nest = RTA_NEST_COMPAT(skb, TCA_OPTIONS, sizeof(opt), &opt); 329 - if (q->mq) 330 - RTA_PUT_FLAG(skb, TCA_PRIO_MQ); 331 - RTA_NEST_COMPAT_END(skb, nest); 328 + nest = nla_nest_compat_start(skb, TCA_OPTIONS, sizeof(opt), &opt); 329 + if (nest == NULL) 330 + goto nla_put_failure; 331 + if (q->mq) { 332 + if (nla_put_flag(skb, TCA_PRIO_MQ) < 0) 333 + goto nla_put_failure; 334 + } 335 + nla_nest_compat_end(skb, nest); 332 336 333 337 return skb->len; 334 338 335 - rtattr_failure: 339 + nla_put_failure: 336 340 nlmsg_trim(skb, b); 337 341 return -1; 338 342 } ··· 396 392 return; 397 393 } 398 394 399 - static int prio_change(struct Qdisc *sch, u32 handle, u32 parent, struct rtattr **tca, unsigned long *arg) 395 + static int prio_change(struct Qdisc *sch, u32 handle, u32 parent, struct nlattr **tca, unsigned long *arg) 400 396 { 401 397 unsigned long cl = *arg; 402 398 struct prio_sched_data *q = qdisc_priv(sch);
+27 -25
net/sched/sch_red.c
··· 177 177 static struct Qdisc *red_create_dflt(struct Qdisc *sch, u32 limit) 178 178 { 179 179 struct Qdisc *q; 180 - struct rtattr *rta; 180 + struct nlattr *nla; 181 181 int ret; 182 182 183 183 q = qdisc_create_dflt(sch->dev, &bfifo_qdisc_ops, 184 184 TC_H_MAKE(sch->handle, 1)); 185 185 if (q) { 186 - rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), 186 + nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)), 187 187 GFP_KERNEL); 188 - if (rta) { 189 - rta->rta_type = RTM_NEWQDISC; 190 - rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt)); 191 - ((struct tc_fifo_qopt *)RTA_DATA(rta))->limit = limit; 188 + if (nla) { 189 + nla->nla_type = RTM_NEWQDISC; 190 + nla->nla_len = nla_attr_size(sizeof(struct tc_fifo_qopt)); 191 + ((struct tc_fifo_qopt *)nla_data(nla))->limit = limit; 192 192 193 - ret = q->ops->change(q, rta); 194 - kfree(rta); 193 + ret = q->ops->change(q, nla); 194 + kfree(nla); 195 195 196 196 if (ret == 0) 197 197 return q; ··· 201 201 return NULL; 202 202 } 203 203 204 - static int red_change(struct Qdisc *sch, struct rtattr *opt) 204 + static int red_change(struct Qdisc *sch, struct nlattr *opt) 205 205 { 206 206 struct red_sched_data *q = qdisc_priv(sch); 207 - struct rtattr *tb[TCA_RED_MAX]; 207 + struct nlattr *tb[TCA_RED_MAX + 1]; 208 208 struct tc_red_qopt *ctl; 209 209 struct Qdisc *child = NULL; 210 210 211 - if (opt == NULL || rtattr_parse_nested(tb, TCA_RED_MAX, opt)) 211 + if (opt == NULL || nla_parse_nested(tb, TCA_RED_MAX, opt, NULL)) 212 212 return -EINVAL; 213 213 214 - if (tb[TCA_RED_PARMS-1] == NULL || 215 - RTA_PAYLOAD(tb[TCA_RED_PARMS-1]) < sizeof(*ctl) || 216 - tb[TCA_RED_STAB-1] == NULL || 217 - RTA_PAYLOAD(tb[TCA_RED_STAB-1]) < RED_STAB_SIZE) 214 + if (tb[TCA_RED_PARMS] == NULL || 215 + nla_len(tb[TCA_RED_PARMS]) < sizeof(*ctl) || 216 + tb[TCA_RED_STAB] == NULL || 217 + nla_len(tb[TCA_RED_STAB]) < RED_STAB_SIZE) 218 218 return -EINVAL; 219 219 220 - ctl = RTA_DATA(tb[TCA_RED_PARMS-1]); 220 + ctl = nla_data(tb[TCA_RED_PARMS]); 221 221 222 222 if (ctl->limit > 0) { 223 223 child = red_create_dflt(sch, ctl->limit); ··· 235 235 236 236 red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog, 237 237 ctl->Plog, ctl->Scell_log, 238 - RTA_DATA(tb[TCA_RED_STAB-1])); 238 + nla_data(tb[TCA_RED_STAB])); 239 239 240 240 if (skb_queue_empty(&sch->q)) 241 241 red_end_of_idle_period(&q->parms); ··· 244 244 return 0; 245 245 } 246 246 247 - static int red_init(struct Qdisc* sch, struct rtattr *opt) 247 + static int red_init(struct Qdisc* sch, struct nlattr *opt) 248 248 { 249 249 struct red_sched_data *q = qdisc_priv(sch); 250 250 ··· 255 255 static int red_dump(struct Qdisc *sch, struct sk_buff *skb) 256 256 { 257 257 struct red_sched_data *q = qdisc_priv(sch); 258 - struct rtattr *opts = NULL; 258 + struct nlattr *opts = NULL; 259 259 struct tc_red_qopt opt = { 260 260 .limit = q->limit, 261 261 .flags = q->flags, ··· 266 266 .Scell_log = q->parms.Scell_log, 267 267 }; 268 268 269 - opts = RTA_NEST(skb, TCA_OPTIONS); 270 - RTA_PUT(skb, TCA_RED_PARMS, sizeof(opt), &opt); 271 - return RTA_NEST_END(skb, opts); 269 + opts = nla_nest_start(skb, TCA_OPTIONS); 270 + if (opts == NULL) 271 + goto nla_put_failure; 272 + NLA_PUT(skb, TCA_RED_PARMS, sizeof(opt), &opt); 273 + return nla_nest_end(skb, opts); 272 274 273 - rtattr_failure: 274 - return RTA_NEST_CANCEL(skb, opts); 275 + nla_put_failure: 276 + return nla_nest_cancel(skb, opts); 275 277 } 276 278 277 279 static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d) ··· 334 332 } 335 333 336 334 static int red_change_class(struct Qdisc *sch, u32 classid, u32 parentid, 337 - struct rtattr **tca, unsigned long *arg) 335 + struct nlattr **tca, unsigned long *arg) 338 336 { 339 337 return -ENOSYS; 340 338 }
+6 -6
net/sched/sch_sfq.c
··· 397 397 mod_timer(&q->perturb_timer, jiffies + q->perturb_period); 398 398 } 399 399 400 - static int sfq_change(struct Qdisc *sch, struct rtattr *opt) 400 + static int sfq_change(struct Qdisc *sch, struct nlattr *opt) 401 401 { 402 402 struct sfq_sched_data *q = qdisc_priv(sch); 403 - struct tc_sfq_qopt *ctl = RTA_DATA(opt); 403 + struct tc_sfq_qopt *ctl = nla_data(opt); 404 404 unsigned int qlen; 405 405 406 - if (opt->rta_len < RTA_LENGTH(sizeof(*ctl))) 406 + if (opt->nla_len < nla_attr_size(sizeof(*ctl))) 407 407 return -EINVAL; 408 408 409 409 sch_tree_lock(sch); ··· 426 426 return 0; 427 427 } 428 428 429 - static int sfq_init(struct Qdisc *sch, struct rtattr *opt) 429 + static int sfq_init(struct Qdisc *sch, struct nlattr *opt) 430 430 { 431 431 struct sfq_sched_data *q = qdisc_priv(sch); 432 432 int i; ··· 481 481 opt.divisor = SFQ_HASH_DIVISOR; 482 482 opt.flows = q->limit; 483 483 484 - RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); 484 + NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); 485 485 486 486 return skb->len; 487 487 488 - rtattr_failure: 488 + nla_put_failure: 489 489 nlmsg_trim(skb, b); 490 490 return -1; 491 491 }
+25 -24
net/sched/sch_tbf.c
··· 245 245 static struct Qdisc *tbf_create_dflt_qdisc(struct Qdisc *sch, u32 limit) 246 246 { 247 247 struct Qdisc *q; 248 - struct rtattr *rta; 248 + struct nlattr *nla; 249 249 int ret; 250 250 251 251 q = qdisc_create_dflt(sch->dev, &bfifo_qdisc_ops, 252 252 TC_H_MAKE(sch->handle, 1)); 253 253 if (q) { 254 - rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL); 255 - if (rta) { 256 - rta->rta_type = RTM_NEWQDISC; 257 - rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt)); 258 - ((struct tc_fifo_qopt *)RTA_DATA(rta))->limit = limit; 254 + nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)), 255 + GFP_KERNEL); 256 + if (nla) { 257 + nla->nla_type = RTM_NEWQDISC; 258 + nla->nla_len = nla_attr_size(sizeof(struct tc_fifo_qopt)); 259 + ((struct tc_fifo_qopt *)nla_data(nla))->limit = limit; 259 260 260 - ret = q->ops->change(q, rta); 261 - kfree(rta); 261 + ret = q->ops->change(q, nla); 262 + kfree(nla); 262 263 263 264 if (ret == 0) 264 265 return q; ··· 270 269 return NULL; 271 270 } 272 271 273 - static int tbf_change(struct Qdisc* sch, struct rtattr *opt) 272 + static int tbf_change(struct Qdisc* sch, struct nlattr *opt) 274 273 { 275 274 int err = -EINVAL; 276 275 struct tbf_sched_data *q = qdisc_priv(sch); 277 - struct rtattr *tb[TCA_TBF_PTAB]; 276 + struct nlattr *tb[TCA_TBF_PTAB + 1]; 278 277 struct tc_tbf_qopt *qopt; 279 278 struct qdisc_rate_table *rtab = NULL; 280 279 struct qdisc_rate_table *ptab = NULL; 281 280 struct Qdisc *child = NULL; 282 281 int max_size,n; 283 282 284 - if (rtattr_parse_nested(tb, TCA_TBF_PTAB, opt) || 285 - tb[TCA_TBF_PARMS-1] == NULL || 286 - RTA_PAYLOAD(tb[TCA_TBF_PARMS-1]) < sizeof(*qopt)) 283 + if (nla_parse_nested(tb, TCA_TBF_PTAB, opt, NULL) || 284 + tb[TCA_TBF_PARMS] == NULL || 285 + nla_len(tb[TCA_TBF_PARMS]) < sizeof(*qopt)) 287 286 goto done; 288 287 289 - qopt = RTA_DATA(tb[TCA_TBF_PARMS-1]); 290 - rtab = qdisc_get_rtab(&qopt->rate, tb[TCA_TBF_RTAB-1]); 288 + qopt = nla_data(tb[TCA_TBF_PARMS]); 289 + rtab = qdisc_get_rtab(&qopt->rate, tb[TCA_TBF_RTAB]); 291 290 if (rtab == NULL) 292 291 goto done; 293 292 294 293 if (qopt->peakrate.rate) { 295 294 if (qopt->peakrate.rate > qopt->rate.rate) 296 - ptab = qdisc_get_rtab(&qopt->peakrate, tb[TCA_TBF_PTAB-1]); 295 + ptab = qdisc_get_rtab(&qopt->peakrate, tb[TCA_TBF_PTAB]); 297 296 if (ptab == NULL) 298 297 goto done; 299 298 } ··· 340 339 return err; 341 340 } 342 341 343 - static int tbf_init(struct Qdisc* sch, struct rtattr *opt) 342 + static int tbf_init(struct Qdisc* sch, struct nlattr *opt) 344 343 { 345 344 struct tbf_sched_data *q = qdisc_priv(sch); 346 345 ··· 372 371 { 373 372 struct tbf_sched_data *q = qdisc_priv(sch); 374 373 unsigned char *b = skb_tail_pointer(skb); 375 - struct rtattr *rta; 374 + struct nlattr *nla; 376 375 struct tc_tbf_qopt opt; 377 376 378 - rta = (struct rtattr*)b; 379 - RTA_PUT(skb, TCA_OPTIONS, 0, NULL); 377 + nla = (struct nlattr*)b; 378 + NLA_PUT(skb, TCA_OPTIONS, 0, NULL); 380 379 381 380 opt.limit = q->limit; 382 381 opt.rate = q->R_tab->rate; ··· 386 385 memset(&opt.peakrate, 0, sizeof(opt.peakrate)); 387 386 opt.mtu = q->mtu; 388 387 opt.buffer = q->buffer; 389 - RTA_PUT(skb, TCA_TBF_PARMS, sizeof(opt), &opt); 390 - rta->rta_len = skb_tail_pointer(skb) - b; 388 + NLA_PUT(skb, TCA_TBF_PARMS, sizeof(opt), &opt); 389 + nla->nla_len = skb_tail_pointer(skb) - b; 391 390 392 391 return skb->len; 393 392 394 - rtattr_failure: 393 + nla_put_failure: 395 394 nlmsg_trim(skb, b); 396 395 return -1; 397 396 } ··· 443 442 } 444 443 445 444 static int tbf_change_class(struct Qdisc *sch, u32 classid, u32 parentid, 446 - struct rtattr **tca, unsigned long *arg) 445 + struct nlattr **tca, unsigned long *arg) 447 446 { 448 447 return -ENOSYS; 449 448 }
+1 -1
net/sched/sch_teql.c
··· 168 168 } 169 169 } 170 170 171 - static int teql_qdisc_init(struct Qdisc *sch, struct rtattr *opt) 171 + static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt) 172 172 { 173 173 struct net_device *dev = sch->dev; 174 174 struct teql_master *m = (struct teql_master*)sch->ops;