Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sched: align nlattr properly when needed

Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Nicolas Dichtel and committed by
David S. Miller
9854518e b676338f

+72 -37
+4 -2
Documentation/networking/gen_stats.txt
··· 33 33 { 34 34 struct gnet_dump dump; 35 35 36 - if (gnet_stats_start_copy(skb, TCA_STATS2, &mystruct->lock, &dump) < 0) 36 + if (gnet_stats_start_copy(skb, TCA_STATS2, &mystruct->lock, &dump, 37 + TCA_PAD) < 0) 37 38 goto rtattr_failure; 38 39 39 40 if (gnet_stats_copy_basic(&dump, &mystruct->bstats) < 0 || ··· 57 56 my_dumping_routine(struct sk_buff *skb, ...) 58 57 { 59 58 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, 60 - TCA_XSTATS, &mystruct->lock, &dump) < 0) 59 + TCA_XSTATS, &mystruct->lock, &dump, 60 + TCA_PAD) < 0) 61 61 goto rtattr_failure; 62 62 ... 63 63 }
+4 -2
include/net/gen_stats.h
··· 19 19 /* Backward compatibility */ 20 20 int compat_tc_stats; 21 21 int compat_xstats; 22 + int padattr; 22 23 void * xstats; 23 24 int xstats_len; 24 25 struct tc_stats tc_stats; 25 26 }; 26 27 27 28 int gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock, 28 - struct gnet_dump *d); 29 + struct gnet_dump *d, int padattr); 29 30 30 31 int gnet_stats_start_copy_compat(struct sk_buff *skb, int type, 31 32 int tc_stats_type, int xstats_type, 32 - spinlock_t *lock, struct gnet_dump *d); 33 + spinlock_t *lock, struct gnet_dump *d, 34 + int padattr); 33 35 34 36 int gnet_stats_copy_basic(struct gnet_dump *d, 35 37 struct gnet_stats_basic_cpu __percpu *cpu,
+1
include/uapi/linux/gen_stats.h
··· 10 10 TCA_STATS_QUEUE, 11 11 TCA_STATS_APP, 12 12 TCA_STATS_RATE_EST64, 13 + TCA_STATS_PAD, 13 14 __TCA_STATS_MAX, 14 15 }; 15 16 #define TCA_STATS_MAX (__TCA_STATS_MAX - 1)
+2
include/uapi/linux/pkt_cls.h
··· 66 66 TCA_ACT_OPTIONS, 67 67 TCA_ACT_INDEX, 68 68 TCA_ACT_STATS, 69 + TCA_ACT_PAD, 69 70 __TCA_ACT_MAX 70 71 }; 71 72 ··· 174 173 TCA_U32_PCNT, 175 174 TCA_U32_MARK, 176 175 TCA_U32_FLAGS, 176 + TCA_U32_PAD, 177 177 __TCA_U32_MAX 178 178 }; 179 179
+1
include/uapi/linux/rtnetlink.h
··· 542 542 TCA_FCNT, 543 543 TCA_STATS2, 544 544 TCA_STAB, 545 + TCA_PAD, 545 546 __TCA_MAX 546 547 }; 547 548
+1
include/uapi/linux/tc_act/tc_bpf.h
··· 26 26 TCA_ACT_BPF_OPS, 27 27 TCA_ACT_BPF_FD, 28 28 TCA_ACT_BPF_NAME, 29 + TCA_ACT_BPF_PAD, 29 30 __TCA_ACT_BPF_MAX, 30 31 }; 31 32 #define TCA_ACT_BPF_MAX (__TCA_ACT_BPF_MAX - 1)
+1
include/uapi/linux/tc_act/tc_connmark.h
··· 15 15 TCA_CONNMARK_UNSPEC, 16 16 TCA_CONNMARK_PARMS, 17 17 TCA_CONNMARK_TM, 18 + TCA_CONNMARK_PAD, 18 19 __TCA_CONNMARK_MAX 19 20 }; 20 21 #define TCA_CONNMARK_MAX (__TCA_CONNMARK_MAX - 1)
+1
include/uapi/linux/tc_act/tc_csum.h
··· 10 10 TCA_CSUM_UNSPEC, 11 11 TCA_CSUM_PARMS, 12 12 TCA_CSUM_TM, 13 + TCA_CSUM_PAD, 13 14 __TCA_CSUM_MAX 14 15 }; 15 16 #define TCA_CSUM_MAX (__TCA_CSUM_MAX - 1)
+1
include/uapi/linux/tc_act/tc_defact.h
··· 12 12 TCA_DEF_TM, 13 13 TCA_DEF_PARMS, 14 14 TCA_DEF_DATA, 15 + TCA_DEF_PAD, 15 16 __TCA_DEF_MAX 16 17 }; 17 18 #define TCA_DEF_MAX (__TCA_DEF_MAX - 1)
+1
include/uapi/linux/tc_act/tc_gact.h
··· 25 25 TCA_GACT_TM, 26 26 TCA_GACT_PARMS, 27 27 TCA_GACT_PROB, 28 + TCA_GACT_PAD, 28 29 __TCA_GACT_MAX 29 30 }; 30 31 #define TCA_GACT_MAX (__TCA_GACT_MAX - 1)
+1
include/uapi/linux/tc_act/tc_ife.h
··· 23 23 TCA_IFE_SMAC, 24 24 TCA_IFE_TYPE, 25 25 TCA_IFE_METALST, 26 + TCA_IFE_PAD, 26 27 __TCA_IFE_MAX 27 28 }; 28 29 #define TCA_IFE_MAX (__TCA_IFE_MAX - 1)
+1
include/uapi/linux/tc_act/tc_ipt.h
··· 14 14 TCA_IPT_CNT, 15 15 TCA_IPT_TM, 16 16 TCA_IPT_TARG, 17 + TCA_IPT_PAD, 17 18 __TCA_IPT_MAX 18 19 }; 19 20 #define TCA_IPT_MAX (__TCA_IPT_MAX - 1)
+1
include/uapi/linux/tc_act/tc_mirred.h
··· 20 20 TCA_MIRRED_UNSPEC, 21 21 TCA_MIRRED_TM, 22 22 TCA_MIRRED_PARMS, 23 + TCA_MIRRED_PAD, 23 24 __TCA_MIRRED_MAX 24 25 }; 25 26 #define TCA_MIRRED_MAX (__TCA_MIRRED_MAX - 1)
+1
include/uapi/linux/tc_act/tc_nat.h
··· 10 10 TCA_NAT_UNSPEC, 11 11 TCA_NAT_PARMS, 12 12 TCA_NAT_TM, 13 + TCA_NAT_PAD, 13 14 __TCA_NAT_MAX 14 15 }; 15 16 #define TCA_NAT_MAX (__TCA_NAT_MAX - 1)
+1
include/uapi/linux/tc_act/tc_pedit.h
··· 10 10 TCA_PEDIT_UNSPEC, 11 11 TCA_PEDIT_TM, 12 12 TCA_PEDIT_PARMS, 13 + TCA_PEDIT_PAD, 13 14 __TCA_PEDIT_MAX 14 15 }; 15 16 #define TCA_PEDIT_MAX (__TCA_PEDIT_MAX - 1)
+1
include/uapi/linux/tc_act/tc_skbedit.h
··· 39 39 TCA_SKBEDIT_PRIORITY, 40 40 TCA_SKBEDIT_QUEUE_MAPPING, 41 41 TCA_SKBEDIT_MARK, 42 + TCA_SKBEDIT_PAD, 42 43 __TCA_SKBEDIT_MAX 43 44 }; 44 45 #define TCA_SKBEDIT_MAX (__TCA_SKBEDIT_MAX - 1)
+1
include/uapi/linux/tc_act/tc_vlan.h
··· 28 28 TCA_VLAN_PARMS, 29 29 TCA_VLAN_PUSH_VLAN_ID, 30 30 TCA_VLAN_PUSH_VLAN_PROTOCOL, 31 + TCA_VLAN_PAD, 31 32 __TCA_VLAN_MAX, 32 33 }; 33 34 #define TCA_VLAN_MAX (__TCA_VLAN_MAX - 1)
+21 -14
net/core/gen_stats.c
··· 25 25 26 26 27 27 static inline int 28 - gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size) 28 + gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size, int padattr) 29 29 { 30 - if (nla_put(d->skb, type, size, buf)) 30 + if (nla_put_64bit(d->skb, type, size, buf, padattr)) 31 31 goto nla_put_failure; 32 32 return 0; 33 33 ··· 59 59 */ 60 60 int 61 61 gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type, 62 - int xstats_type, spinlock_t *lock, struct gnet_dump *d) 62 + int xstats_type, spinlock_t *lock, 63 + struct gnet_dump *d, int padattr) 63 64 __acquires(lock) 64 65 { 65 66 memset(d, 0, sizeof(*d)); ··· 72 71 d->skb = skb; 73 72 d->compat_tc_stats = tc_stats_type; 74 73 d->compat_xstats = xstats_type; 74 + d->padattr = padattr; 75 75 76 76 if (d->tail) 77 - return gnet_stats_copy(d, type, NULL, 0); 77 + return gnet_stats_copy(d, type, NULL, 0, padattr); 78 78 79 79 return 0; 80 80 } 81 81 EXPORT_SYMBOL(gnet_stats_start_copy_compat); 82 82 83 83 /** 84 - * gnet_stats_start_copy_compat - start dumping procedure in compatibility mode 84 + * gnet_stats_start_copy - start dumping procedure in compatibility mode 85 85 * @skb: socket buffer to put statistics TLVs into 86 86 * @type: TLV type for top level statistic TLV 87 87 * @lock: statistics lock ··· 96 94 */ 97 95 int 98 96 gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock, 99 - struct gnet_dump *d) 97 + struct gnet_dump *d, int padattr) 100 98 { 101 - return gnet_stats_start_copy_compat(skb, type, 0, 0, lock, d); 99 + return gnet_stats_start_copy_compat(skb, type, 0, 0, lock, d, padattr); 102 100 } 103 101 EXPORT_SYMBOL(gnet_stats_start_copy); 104 102 ··· 171 169 memset(&sb, 0, sizeof(sb)); 172 170 sb.bytes = bstats.bytes; 173 171 sb.packets = bstats.packets; 174 - return gnet_stats_copy(d, TCA_STATS_BASIC, &sb, sizeof(sb)); 172 + return gnet_stats_copy(d, TCA_STATS_BASIC, &sb, sizeof(sb), 173 + TCA_STATS_PAD); 175 174 } 176 175 return 0; 177 176 } ··· 211 208 } 212 209 213 210 if (d->tail) { 214 - res = gnet_stats_copy(d, TCA_STATS_RATE_EST, &est, sizeof(est)); 211 + res = gnet_stats_copy(d, TCA_STATS_RATE_EST, &est, sizeof(est), 212 + TCA_STATS_PAD); 215 213 if (res < 0 || est.bps == r->bps) 216 214 return res; 217 215 /* emit 64bit stats only if needed */ 218 - return gnet_stats_copy(d, TCA_STATS_RATE_EST64, r, sizeof(*r)); 216 + return gnet_stats_copy(d, TCA_STATS_RATE_EST64, r, sizeof(*r), 217 + TCA_STATS_PAD); 219 218 } 220 219 221 220 return 0; ··· 291 286 292 287 if (d->tail) 293 288 return gnet_stats_copy(d, TCA_STATS_QUEUE, 294 - &qstats, sizeof(qstats)); 289 + &qstats, sizeof(qstats), 290 + TCA_STATS_PAD); 295 291 296 292 return 0; 297 293 } ··· 322 316 } 323 317 324 318 if (d->tail) 325 - return gnet_stats_copy(d, TCA_STATS_APP, st, len); 319 + return gnet_stats_copy(d, TCA_STATS_APP, st, len, 320 + TCA_STATS_PAD); 326 321 327 322 return 0; 328 323 ··· 354 347 355 348 if (d->compat_tc_stats) 356 349 if (gnet_stats_copy(d, d->compat_tc_stats, &d->tc_stats, 357 - sizeof(d->tc_stats)) < 0) 350 + sizeof(d->tc_stats), d->padattr) < 0) 358 351 return -1; 359 352 360 353 if (d->compat_xstats && d->xstats) { 361 354 if (gnet_stats_copy(d, d->compat_xstats, d->xstats, 362 - d->xstats_len) < 0) 355 + d->xstats_len, d->padattr) < 0) 363 356 return -1; 364 357 } 365 358
+5 -2
net/sched/act_api.c
··· 657 657 if (compat_mode) { 658 658 if (a->type == TCA_OLD_COMPAT) 659 659 err = gnet_stats_start_copy_compat(skb, 0, 660 - TCA_STATS, TCA_XSTATS, &p->tcfc_lock, &d); 660 + TCA_STATS, 661 + TCA_XSTATS, 662 + &p->tcfc_lock, &d, 663 + TCA_PAD); 661 664 else 662 665 return 0; 663 666 } else 664 667 err = gnet_stats_start_copy(skb, TCA_ACT_STATS, 665 - &p->tcfc_lock, &d); 668 + &p->tcfc_lock, &d, TCA_ACT_PAD); 666 669 667 670 if (err < 0) 668 671 goto errout;
+2 -1
net/sched/act_bpf.c
··· 156 156 tm.lastuse = jiffies_to_clock_t(jiffies - prog->tcf_tm.lastuse); 157 157 tm.expires = jiffies_to_clock_t(prog->tcf_tm.expires); 158 158 159 - if (nla_put(skb, TCA_ACT_BPF_TM, sizeof(tm), &tm)) 159 + if (nla_put_64bit(skb, TCA_ACT_BPF_TM, sizeof(tm), &tm, 160 + TCA_ACT_BPF_PAD)) 160 161 goto nla_put_failure; 161 162 162 163 return skb->len;
+2 -1
net/sched/act_connmark.c
··· 163 163 t.install = jiffies_to_clock_t(jiffies - ci->tcf_tm.install); 164 164 t.lastuse = jiffies_to_clock_t(jiffies - ci->tcf_tm.lastuse); 165 165 t.expires = jiffies_to_clock_t(ci->tcf_tm.expires); 166 - if (nla_put(skb, TCA_CONNMARK_TM, sizeof(t), &t)) 166 + if (nla_put_64bit(skb, TCA_CONNMARK_TM, sizeof(t), &t, 167 + TCA_CONNMARK_PAD)) 167 168 goto nla_put_failure; 168 169 169 170 return skb->len;
+1 -1
net/sched/act_csum.c
··· 549 549 t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install); 550 550 t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse); 551 551 t.expires = jiffies_to_clock_t(p->tcf_tm.expires); 552 - if (nla_put(skb, TCA_CSUM_TM, sizeof(t), &t)) 552 + if (nla_put_64bit(skb, TCA_CSUM_TM, sizeof(t), &t, TCA_CSUM_PAD)) 553 553 goto nla_put_failure; 554 554 555 555 return skb->len;
+1 -1
net/sched/act_gact.c
··· 177 177 t.install = jiffies_to_clock_t(jiffies - gact->tcf_tm.install); 178 178 t.lastuse = jiffies_to_clock_t(jiffies - gact->tcf_tm.lastuse); 179 179 t.expires = jiffies_to_clock_t(gact->tcf_tm.expires); 180 - if (nla_put(skb, TCA_GACT_TM, sizeof(t), &t)) 180 + if (nla_put_64bit(skb, TCA_GACT_TM, sizeof(t), &t, TCA_GACT_PAD)) 181 181 goto nla_put_failure; 182 182 return skb->len; 183 183
+1 -1
net/sched/act_ife.c
··· 550 550 t.install = jiffies_to_clock_t(jiffies - ife->tcf_tm.install); 551 551 t.lastuse = jiffies_to_clock_t(jiffies - ife->tcf_tm.lastuse); 552 552 t.expires = jiffies_to_clock_t(ife->tcf_tm.expires); 553 - if (nla_put(skb, TCA_IFE_TM, sizeof(t), &t)) 553 + if (nla_put_64bit(skb, TCA_IFE_TM, sizeof(t), &t, TCA_IFE_PAD)) 554 554 goto nla_put_failure; 555 555 556 556 if (!is_zero_ether_addr(ife->eth_dst)) {
+1 -1
net/sched/act_ipt.c
··· 275 275 tm.install = jiffies_to_clock_t(jiffies - ipt->tcf_tm.install); 276 276 tm.lastuse = jiffies_to_clock_t(jiffies - ipt->tcf_tm.lastuse); 277 277 tm.expires = jiffies_to_clock_t(ipt->tcf_tm.expires); 278 - if (nla_put(skb, TCA_IPT_TM, sizeof (tm), &tm)) 278 + if (nla_put_64bit(skb, TCA_IPT_TM, sizeof(tm), &tm, TCA_IPT_PAD)) 279 279 goto nla_put_failure; 280 280 kfree(t); 281 281 return skb->len;
+1 -1
net/sched/act_mirred.c
··· 214 214 t.install = jiffies_to_clock_t(jiffies - m->tcf_tm.install); 215 215 t.lastuse = jiffies_to_clock_t(jiffies - m->tcf_tm.lastuse); 216 216 t.expires = jiffies_to_clock_t(m->tcf_tm.expires); 217 - if (nla_put(skb, TCA_MIRRED_TM, sizeof(t), &t)) 217 + if (nla_put_64bit(skb, TCA_MIRRED_TM, sizeof(t), &t, TCA_MIRRED_PAD)) 218 218 goto nla_put_failure; 219 219 return skb->len; 220 220
+1 -1
net/sched/act_nat.c
··· 267 267 t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install); 268 268 t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse); 269 269 t.expires = jiffies_to_clock_t(p->tcf_tm.expires); 270 - if (nla_put(skb, TCA_NAT_TM, sizeof(t), &t)) 270 + if (nla_put_64bit(skb, TCA_NAT_TM, sizeof(t), &t, TCA_NAT_PAD)) 271 271 goto nla_put_failure; 272 272 273 273 return skb->len;
+1 -1
net/sched/act_pedit.c
··· 203 203 t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install); 204 204 t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse); 205 205 t.expires = jiffies_to_clock_t(p->tcf_tm.expires); 206 - if (nla_put(skb, TCA_PEDIT_TM, sizeof(t), &t)) 206 + if (nla_put_64bit(skb, TCA_PEDIT_TM, sizeof(t), &t, TCA_PEDIT_PAD)) 207 207 goto nla_put_failure; 208 208 kfree(opt); 209 209 return skb->len;
+1 -1
net/sched/act_simple.c
··· 155 155 t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install); 156 156 t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse); 157 157 t.expires = jiffies_to_clock_t(d->tcf_tm.expires); 158 - if (nla_put(skb, TCA_DEF_TM, sizeof(t), &t)) 158 + if (nla_put_64bit(skb, TCA_DEF_TM, sizeof(t), &t, TCA_DEF_PAD)) 159 159 goto nla_put_failure; 160 160 return skb->len; 161 161
+1 -1
net/sched/act_skbedit.c
··· 167 167 t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install); 168 168 t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse); 169 169 t.expires = jiffies_to_clock_t(d->tcf_tm.expires); 170 - if (nla_put(skb, TCA_SKBEDIT_TM, sizeof(t), &t)) 170 + if (nla_put_64bit(skb, TCA_SKBEDIT_TM, sizeof(t), &t, TCA_SKBEDIT_PAD)) 171 171 goto nla_put_failure; 172 172 return skb->len; 173 173
+1 -1
net/sched/act_vlan.c
··· 175 175 t.install = jiffies_to_clock_t(jiffies - v->tcf_tm.install); 176 176 t.lastuse = jiffies_to_clock_t(jiffies - v->tcf_tm.lastuse); 177 177 t.expires = jiffies_to_clock_t(v->tcf_tm.expires); 178 - if (nla_put(skb, TCA_VLAN_TM, sizeof(t), &t)) 178 + if (nla_put_64bit(skb, TCA_VLAN_TM, sizeof(t), &t, TCA_VLAN_PAD)) 179 179 goto nla_put_failure; 180 180 return skb->len; 181 181
+4 -3
net/sched/cls_u32.c
··· 1140 1140 gpf->kcnts[i] += pf->kcnts[i]; 1141 1141 } 1142 1142 1143 - if (nla_put(skb, TCA_U32_PCNT, 1144 - sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64), 1145 - gpf)) { 1143 + if (nla_put_64bit(skb, TCA_U32_PCNT, 1144 + sizeof(struct tc_u32_pcnt) + 1145 + n->sel.nkeys * sizeof(u64), 1146 + gpf, TCA_U32_PAD)) { 1146 1147 kfree(gpf); 1147 1148 goto nla_put_failure; 1148 1149 }
+4 -2
net/sched/sch_api.c
··· 1365 1365 goto nla_put_failure; 1366 1366 1367 1367 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS, 1368 - qdisc_root_sleeping_lock(q), &d) < 0) 1368 + qdisc_root_sleeping_lock(q), &d, 1369 + TCA_PAD) < 0) 1369 1370 goto nla_put_failure; 1370 1371 1371 1372 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0) ··· 1680 1679 goto nla_put_failure; 1681 1680 1682 1681 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS, 1683 - qdisc_root_sleeping_lock(q), &d) < 0) 1682 + qdisc_root_sleeping_lock(q), &d, 1683 + TCA_PAD) < 0) 1684 1684 goto nla_put_failure; 1685 1685 1686 1686 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)