Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net_sched: fix ops->bind_class() implementations

The current implementations of ops->bind_class() are merely
searching for classid and updating class in the struct tcf_result,
without invoking either of cl_ops->bind_tcf() or
cl_ops->unbind_tcf(). This breaks the design of them as qdisc's
like cbq use them to count filters too. This is why syzbot triggered
the warning in cbq_destroy_class().

In order to fix this, we have to call cl_ops->bind_tcf() and
cl_ops->unbind_tcf() like the filter binding path. This patch does
so by refactoring out two helper functions __tcf_bind_filter()
and __tcf_unbind_filter(), which are lockless and accept a Qdisc
pointer, then teaching each implementation to call them correctly.

Note, we merely pass the Qdisc pointer as an opaque pointer to
each filter, they only need to pass it down to the helper
functions without understanding it at all.

Fixes: 07d79fc7d94e ("net_sched: add reverse binding for tc class")
Reported-and-tested-by: syzbot+0a0596220218fcb603a8@syzkaller.appspotmail.com
Reported-and-tested-by: syzbot+63bdb6006961d8c917c6@syzkaller.appspotmail.com
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: Jiri Pirko <jiri@resnulli.us>
Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Cong Wang and committed by
David S. Miller
2e24cd75 16b25d1a

+97 -44
+19 -14
include/net/pkt_cls.h
··· 141 141 return xchg(clp, cl); 142 142 } 143 143 144 - static inline unsigned long 145 - cls_set_class(struct Qdisc *q, unsigned long *clp, unsigned long cl) 144 + static inline void 145 + __tcf_bind_filter(struct Qdisc *q, struct tcf_result *r, unsigned long base) 146 146 { 147 - unsigned long old_cl; 147 + unsigned long cl; 148 148 149 - sch_tree_lock(q); 150 - old_cl = __cls_set_class(clp, cl); 151 - sch_tree_unlock(q); 152 - return old_cl; 149 + cl = q->ops->cl_ops->bind_tcf(q, base, r->classid); 150 + cl = __cls_set_class(&r->class, cl); 151 + if (cl) 152 + q->ops->cl_ops->unbind_tcf(q, cl); 153 153 } 154 154 155 155 static inline void 156 156 tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base) 157 157 { 158 158 struct Qdisc *q = tp->chain->block->q; 159 - unsigned long cl; 160 159 161 160 /* Check q as it is not set for shared blocks. In that case, 162 161 * setting class is not supported. 163 162 */ 164 163 if (!q) 165 164 return; 166 - cl = q->ops->cl_ops->bind_tcf(q, base, r->classid); 167 - cl = cls_set_class(q, &r->class, cl); 168 - if (cl) 165 + sch_tree_lock(q); 166 + __tcf_bind_filter(q, r, base); 167 + sch_tree_unlock(q); 168 + } 169 + 170 + static inline void 171 + __tcf_unbind_filter(struct Qdisc *q, struct tcf_result *r) 172 + { 173 + unsigned long cl; 174 + 175 + if ((cl = __cls_set_class(&r->class, 0)) != 0) 169 176 q->ops->cl_ops->unbind_tcf(q, cl); 170 177 } 171 178 ··· 180 173 tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r) 181 174 { 182 175 struct Qdisc *q = tp->chain->block->q; 183 - unsigned long cl; 184 176 185 177 if (!q) 186 178 return; 187 - if ((cl = __cls_set_class(&r->class, 0)) != 0) 188 - q->ops->cl_ops->unbind_tcf(q, cl); 179 + __tcf_unbind_filter(q, r); 189 180 } 190 181 191 182 struct tcf_exts {
+2 -1
include/net/sch_generic.h
··· 318 318 void *type_data); 319 319 void (*hw_del)(struct tcf_proto *tp, 320 320 void *type_data); 321 - void (*bind_class)(void *, u32, unsigned long); 321 + void (*bind_class)(void *, u32, unsigned long, 322 + void *, unsigned long); 322 323 void * (*tmplt_create)(struct net *net, 323 324 struct tcf_chain *chain, 324 325 struct nlattr **tca,
+8 -3
net/sched/cls_basic.c
··· 263 263 } 264 264 } 265 265 266 - static void basic_bind_class(void *fh, u32 classid, unsigned long cl) 266 + static void basic_bind_class(void *fh, u32 classid, unsigned long cl, void *q, 267 + unsigned long base) 267 268 { 268 269 struct basic_filter *f = fh; 269 270 270 - if (f && f->res.classid == classid) 271 - f->res.class = cl; 271 + if (f && f->res.classid == classid) { 272 + if (cl) 273 + __tcf_bind_filter(q, &f->res, base); 274 + else 275 + __tcf_unbind_filter(q, &f->res); 276 + } 272 277 } 273 278 274 279 static int basic_dump(struct net *net, struct tcf_proto *tp, void *fh,
+8 -3
net/sched/cls_bpf.c
··· 631 631 return -1; 632 632 } 633 633 634 - static void cls_bpf_bind_class(void *fh, u32 classid, unsigned long cl) 634 + static void cls_bpf_bind_class(void *fh, u32 classid, unsigned long cl, 635 + void *q, unsigned long base) 635 636 { 636 637 struct cls_bpf_prog *prog = fh; 637 638 638 - if (prog && prog->res.classid == classid) 639 - prog->res.class = cl; 639 + if (prog && prog->res.classid == classid) { 640 + if (cl) 641 + __tcf_bind_filter(q, &prog->res, base); 642 + else 643 + __tcf_unbind_filter(q, &prog->res); 644 + } 640 645 } 641 646 642 647 static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg,
+8 -3
net/sched/cls_flower.c
··· 2765 2765 return -EMSGSIZE; 2766 2766 } 2767 2767 2768 - static void fl_bind_class(void *fh, u32 classid, unsigned long cl) 2768 + static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q, 2769 + unsigned long base) 2769 2770 { 2770 2771 struct cls_fl_filter *f = fh; 2771 2772 2772 - if (f && f->res.classid == classid) 2773 - f->res.class = cl; 2773 + if (f && f->res.classid == classid) { 2774 + if (cl) 2775 + __tcf_bind_filter(q, &f->res, base); 2776 + else 2777 + __tcf_unbind_filter(q, &f->res); 2778 + } 2774 2779 } 2775 2780 2776 2781 static bool fl_delete_empty(struct tcf_proto *tp)
+8 -3
net/sched/cls_fw.c
··· 419 419 return -1; 420 420 } 421 421 422 - static void fw_bind_class(void *fh, u32 classid, unsigned long cl) 422 + static void fw_bind_class(void *fh, u32 classid, unsigned long cl, void *q, 423 + unsigned long base) 423 424 { 424 425 struct fw_filter *f = fh; 425 426 426 - if (f && f->res.classid == classid) 427 - f->res.class = cl; 427 + if (f && f->res.classid == classid) { 428 + if (cl) 429 + __tcf_bind_filter(q, &f->res, base); 430 + else 431 + __tcf_unbind_filter(q, &f->res); 432 + } 428 433 } 429 434 430 435 static struct tcf_proto_ops cls_fw_ops __read_mostly = {
+8 -3
net/sched/cls_matchall.c
··· 393 393 return -1; 394 394 } 395 395 396 - static void mall_bind_class(void *fh, u32 classid, unsigned long cl) 396 + static void mall_bind_class(void *fh, u32 classid, unsigned long cl, void *q, 397 + unsigned long base) 397 398 { 398 399 struct cls_mall_head *head = fh; 399 400 400 - if (head && head->res.classid == classid) 401 - head->res.class = cl; 401 + if (head && head->res.classid == classid) { 402 + if (cl) 403 + __tcf_bind_filter(q, &head->res, base); 404 + else 405 + __tcf_unbind_filter(q, &head->res); 406 + } 402 407 } 403 408 404 409 static struct tcf_proto_ops cls_mall_ops __read_mostly = {
+8 -3
net/sched/cls_route.c
··· 641 641 return -1; 642 642 } 643 643 644 - static void route4_bind_class(void *fh, u32 classid, unsigned long cl) 644 + static void route4_bind_class(void *fh, u32 classid, unsigned long cl, void *q, 645 + unsigned long base) 645 646 { 646 647 struct route4_filter *f = fh; 647 648 648 - if (f && f->res.classid == classid) 649 - f->res.class = cl; 649 + if (f && f->res.classid == classid) { 650 + if (cl) 651 + __tcf_bind_filter(q, &f->res, base); 652 + else 653 + __tcf_unbind_filter(q, &f->res); 654 + } 650 655 } 651 656 652 657 static struct tcf_proto_ops cls_route4_ops __read_mostly = {
+8 -3
net/sched/cls_rsvp.h
··· 738 738 return -1; 739 739 } 740 740 741 - static void rsvp_bind_class(void *fh, u32 classid, unsigned long cl) 741 + static void rsvp_bind_class(void *fh, u32 classid, unsigned long cl, void *q, 742 + unsigned long base) 742 743 { 743 744 struct rsvp_filter *f = fh; 744 745 745 - if (f && f->res.classid == classid) 746 - f->res.class = cl; 746 + if (f && f->res.classid == classid) { 747 + if (cl) 748 + __tcf_bind_filter(q, &f->res, base); 749 + else 750 + __tcf_unbind_filter(q, &f->res); 751 + } 747 752 } 748 753 749 754 static struct tcf_proto_ops RSVP_OPS __read_mostly = {
+8 -3
net/sched/cls_tcindex.c
··· 654 654 return -1; 655 655 } 656 656 657 - static void tcindex_bind_class(void *fh, u32 classid, unsigned long cl) 657 + static void tcindex_bind_class(void *fh, u32 classid, unsigned long cl, 658 + void *q, unsigned long base) 658 659 { 659 660 struct tcindex_filter_result *r = fh; 660 661 661 - if (r && r->res.classid == classid) 662 - r->res.class = cl; 662 + if (r && r->res.classid == classid) { 663 + if (cl) 664 + __tcf_bind_filter(q, &r->res, base); 665 + else 666 + __tcf_unbind_filter(q, &r->res); 667 + } 663 668 } 664 669 665 670 static struct tcf_proto_ops cls_tcindex_ops __read_mostly = {
+8 -3
net/sched/cls_u32.c
··· 1255 1255 return 0; 1256 1256 } 1257 1257 1258 - static void u32_bind_class(void *fh, u32 classid, unsigned long cl) 1258 + static void u32_bind_class(void *fh, u32 classid, unsigned long cl, void *q, 1259 + unsigned long base) 1259 1260 { 1260 1261 struct tc_u_knode *n = fh; 1261 1262 1262 - if (n && n->res.classid == classid) 1263 - n->res.class = cl; 1263 + if (n && n->res.classid == classid) { 1264 + if (cl) 1265 + __tcf_bind_filter(q, &n->res, base); 1266 + else 1267 + __tcf_unbind_filter(q, &n->res); 1268 + } 1264 1269 } 1265 1270 1266 1271 static int u32_dump(struct net *net, struct tcf_proto *tp, void *fh,
+4 -2
net/sched/sch_api.c
··· 1891 1891 1892 1892 struct tcf_bind_args { 1893 1893 struct tcf_walker w; 1894 - u32 classid; 1894 + unsigned long base; 1895 1895 unsigned long cl; 1896 + u32 classid; 1896 1897 }; 1897 1898 1898 1899 static int tcf_node_bind(struct tcf_proto *tp, void *n, struct tcf_walker *arg) ··· 1904 1903 struct Qdisc *q = tcf_block_q(tp->chain->block); 1905 1904 1906 1905 sch_tree_lock(q); 1907 - tp->ops->bind_class(n, a->classid, a->cl); 1906 + tp->ops->bind_class(n, a->classid, a->cl, q, a->base); 1908 1907 sch_tree_unlock(q); 1909 1908 } 1910 1909 return 0; ··· 1937 1936 1938 1937 arg.w.fn = tcf_node_bind; 1939 1938 arg.classid = clid; 1939 + arg.base = cl; 1940 1940 arg.cl = new_cl; 1941 1941 tp->ops->walk(tp, &arg.w, true); 1942 1942 }