Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: sched: fix tc_should_offload for specific clsact classes

When offloading classifiers such as u32 or flower to hardware, and the
qdisc is clsact (TC_H_CLSACT), then we need to differentiate its classes,
since not all of them handle ingress, therefore we must leave those in
software path. Add a .tcf_cl_offload() callback, so we can generically
handle them, tested on ixgbe.

Fixes: 10cbc6843446 ("net/sched: cls_flower: Hardware offloaded filters statistics support")
Fixes: 5b33f48842fa ("net/flower: Introduce hardware offload support")
Fixes: a1b7c5fd7fe9 ("net: sched: add cls_u32 offload hooks for netdevs")
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: John Fastabend <john.r.fastabend@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Daniel Borkmann and committed by
David S. Miller
92c075db a03e6fe5

+27 -10
+7 -3
include/net/pkt_cls.h
··· 392 392 }; 393 393 }; 394 394 395 - static inline bool tc_should_offload(struct net_device *dev, u32 flags) 395 + static inline bool tc_should_offload(const struct net_device *dev, 396 + const struct tcf_proto *tp, u32 flags) 396 397 { 398 + const struct Qdisc *sch = tp->q; 399 + const struct Qdisc_class_ops *cops = sch->ops->cl_ops; 400 + 397 401 if (!(dev->features & NETIF_F_HW_TC)) 398 402 return false; 399 - 400 403 if (flags & TCA_CLS_FLAGS_SKIP_HW) 401 404 return false; 402 - 403 405 if (!dev->netdev_ops->ndo_setup_tc) 404 406 return false; 407 + if (cops && cops->tcf_cl_offload) 408 + return cops->tcf_cl_offload(tp->classid); 405 409 406 410 return true; 407 411 }
+1
include/net/sch_generic.h
··· 168 168 169 169 /* Filter manipulation */ 170 170 struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long); 171 + bool (*tcf_cl_offload)(u32 classid); 171 172 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, 172 173 u32 classid); 173 174 void (*unbind_tcf)(struct Qdisc *, unsigned long);
+3 -3
net/sched/cls_flower.c
··· 171 171 struct tc_cls_flower_offload offload = {0}; 172 172 struct tc_to_netdev tc; 173 173 174 - if (!tc_should_offload(dev, 0)) 174 + if (!tc_should_offload(dev, tp, 0)) 175 175 return; 176 176 177 177 offload.command = TC_CLSFLOWER_DESTROY; ··· 194 194 struct tc_cls_flower_offload offload = {0}; 195 195 struct tc_to_netdev tc; 196 196 197 - if (!tc_should_offload(dev, flags)) 197 + if (!tc_should_offload(dev, tp, flags)) 198 198 return; 199 199 200 200 offload.command = TC_CLSFLOWER_REPLACE; ··· 216 216 struct tc_cls_flower_offload offload = {0}; 217 217 struct tc_to_netdev tc; 218 218 219 - if (!tc_should_offload(dev, 0)) 219 + if (!tc_should_offload(dev, tp, 0)) 220 220 return; 221 221 222 222 offload.command = TC_CLSFLOWER_STATS;
+4 -4
net/sched/cls_u32.c
··· 440 440 offload.type = TC_SETUP_CLSU32; 441 441 offload.cls_u32 = &u32_offload; 442 442 443 - if (tc_should_offload(dev, 0)) { 443 + if (tc_should_offload(dev, tp, 0)) { 444 444 offload.cls_u32->command = TC_CLSU32_DELETE_KNODE; 445 445 offload.cls_u32->knode.handle = handle; 446 446 dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, ··· 457 457 struct tc_to_netdev offload; 458 458 int err; 459 459 460 - if (!tc_should_offload(dev, flags)) 460 + if (!tc_should_offload(dev, tp, flags)) 461 461 return tc_skip_sw(flags) ? -EINVAL : 0; 462 462 463 463 offload.type = TC_SETUP_CLSU32; ··· 485 485 offload.type = TC_SETUP_CLSU32; 486 486 offload.cls_u32 = &u32_offload; 487 487 488 - if (tc_should_offload(dev, 0)) { 488 + if (tc_should_offload(dev, tp, 0)) { 489 489 offload.cls_u32->command = TC_CLSU32_DELETE_HNODE; 490 490 offload.cls_u32->hnode.divisor = h->divisor; 491 491 offload.cls_u32->hnode.handle = h->handle; ··· 508 508 offload.type = TC_SETUP_CLSU32; 509 509 offload.cls_u32 = &u32_offload; 510 510 511 - if (tc_should_offload(dev, flags)) { 511 + if (tc_should_offload(dev, tp, flags)) { 512 512 offload.cls_u32->command = TC_CLSU32_REPLACE_KNODE; 513 513 offload.cls_u32->knode.handle = n->handle; 514 514 offload.cls_u32->knode.fshift = n->fshift;
+12
net/sched/sch_ingress.c
··· 27 27 return TC_H_MIN(classid) + 1; 28 28 } 29 29 30 + static bool ingress_cl_offload(u32 classid) 31 + { 32 + return true; 33 + } 34 + 30 35 static unsigned long ingress_bind_filter(struct Qdisc *sch, 31 36 unsigned long parent, u32 classid) 32 37 { ··· 91 86 .put = ingress_put, 92 87 .walk = ingress_walk, 93 88 .tcf_chain = ingress_find_tcf, 89 + .tcf_cl_offload = ingress_cl_offload, 94 90 .bind_tcf = ingress_bind_filter, 95 91 .unbind_tcf = ingress_put, 96 92 }; ··· 114 108 default: 115 109 return 0; 116 110 } 111 + } 112 + 113 + static bool clsact_cl_offload(u32 classid) 114 + { 115 + return TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS); 117 116 } 118 117 119 118 static unsigned long clsact_bind_filter(struct Qdisc *sch, ··· 169 158 .put = ingress_put, 170 159 .walk = ingress_walk, 171 160 .tcf_chain = clsact_find_tcf, 161 + .tcf_cl_offload = clsact_cl_offload, 172 162 .bind_tcf = clsact_bind_filter, 173 163 .unbind_tcf = ingress_put, 174 164 };