Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net/sched: Add match-all classifier hw offloading.

Following the work that have been done on offloading classifiers like u32
and flower, now the match-all classifier hw offloading is possible. if
the interface supports tc offloading.

To control the offloading, two tc flags have been introduced: skip_sw and
skip_hw. Typical usage:

tc filter add dev eth25 parent ffff: \
matchall skip_sw \
action mirred egress mirror \
dev eth27

Signed-off-by: Yotam Gigi <yotamg@mellanox.com>
Signed-off-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Yotam Gigi and committed by
David S. Miller
b87f7936 bf3994d2

+87 -3
+2
include/linux/netdevice.h
··· 787 787 TC_SETUP_MQPRIO, 788 788 TC_SETUP_CLSU32, 789 789 TC_SETUP_CLSFLOWER, 790 + TC_SETUP_MATCHALL, 790 791 }; 791 792 792 793 struct tc_cls_u32_offload; ··· 798 797 u8 tc; 799 798 struct tc_cls_u32_offload *cls_u32; 800 799 struct tc_cls_flower_offload *cls_flower; 800 + struct tc_cls_matchall_offload *cls_mall; 801 801 }; 802 802 }; 803 803
+11
include/net/pkt_cls.h
··· 442 442 struct tcf_exts *exts; 443 443 }; 444 444 445 + enum tc_matchall_command { 446 + TC_CLSMATCHALL_REPLACE, 447 + TC_CLSMATCHALL_DESTROY, 448 + }; 449 + 450 + struct tc_cls_matchall_offload { 451 + enum tc_matchall_command command; 452 + struct tcf_exts *exts; 453 + unsigned long cookie; 454 + }; 455 + 445 456 #endif
+1
include/uapi/linux/pkt_cls.h
··· 439 439 TCA_MATCHALL_UNSPEC, 440 440 TCA_MATCHALL_CLASSID, 441 441 TCA_MATCHALL_ACT, 442 + TCA_MATCHALL_FLAGS, 442 443 __TCA_MATCHALL_MAX, 443 444 }; 444 445
+73 -3
net/sched/cls_matchall.c
··· 21 21 struct tcf_result res; 22 22 u32 handle; 23 23 struct rcu_head rcu; 24 + u32 flags; 24 25 }; 25 26 26 27 struct cls_mall_head { ··· 34 33 { 35 34 struct cls_mall_head *head = rcu_dereference_bh(tp->root); 36 35 struct cls_mall_filter *f = head->filter; 36 + 37 + if (tc_skip_sw(f->flags)) 38 + return -1; 37 39 38 40 return tcf_exts_exec(skb, &f->exts, res); 39 41 } ··· 59 55 struct cls_mall_filter *f = container_of(head, struct cls_mall_filter, rcu); 60 56 61 57 tcf_exts_destroy(&f->exts); 58 + 62 59 kfree(f); 60 + } 61 + 62 + static int mall_replace_hw_filter(struct tcf_proto *tp, 63 + struct cls_mall_filter *f, 64 + unsigned long cookie) 65 + { 66 + struct net_device *dev = tp->q->dev_queue->dev; 67 + struct tc_to_netdev offload; 68 + struct tc_cls_matchall_offload mall_offload = {0}; 69 + 70 + offload.type = TC_SETUP_MATCHALL; 71 + offload.cls_mall = &mall_offload; 72 + offload.cls_mall->command = TC_CLSMATCHALL_REPLACE; 73 + offload.cls_mall->exts = &f->exts; 74 + offload.cls_mall->cookie = cookie; 75 + 76 + return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, 77 + &offload); 78 + } 79 + 80 + static void mall_destroy_hw_filter(struct tcf_proto *tp, 81 + struct cls_mall_filter *f, 82 + unsigned long cookie) 83 + { 84 + struct net_device *dev = tp->q->dev_queue->dev; 85 + struct tc_to_netdev offload; 86 + struct tc_cls_matchall_offload mall_offload = {0}; 87 + 88 + offload.type = TC_SETUP_MATCHALL; 89 + offload.cls_mall = &mall_offload; 90 + offload.cls_mall->command = TC_CLSMATCHALL_DESTROY; 91 + offload.cls_mall->exts = NULL; 92 + offload.cls_mall->cookie = cookie; 93 + 94 + dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, 95 + &offload); 63 96 } 64 97 65 98 static bool mall_destroy(struct tcf_proto *tp, bool force) 66 99 { 67 100 struct cls_mall_head *head = rtnl_dereference(tp->root); 101 + struct net_device *dev = tp->q->dev_queue->dev; 102 + struct cls_mall_filter *f = head->filter; 68 103 69 - if (!force && head->filter) 104 + if (!force && f) 70 105 return false; 71 106 72 - if (head->filter) 73 - call_rcu(&head->filter->rcu, mall_destroy_filter); 107 + if (f) { 108 + if (tc_should_offload(dev, tp, f->flags)) 109 + mall_destroy_hw_filter(tp, f, (unsigned long) f); 110 + 111 + call_rcu(&f->rcu, mall_destroy_filter); 112 + } 74 113 RCU_INIT_POINTER(tp->root, NULL); 75 114 kfree_rcu(head, rcu); 76 115 return true; ··· 164 117 { 165 118 struct cls_mall_head *head = rtnl_dereference(tp->root); 166 119 struct cls_mall_filter *fold = (struct cls_mall_filter *) *arg; 120 + struct net_device *dev = tp->q->dev_queue->dev; 167 121 struct cls_mall_filter *f; 168 122 struct nlattr *tb[TCA_MATCHALL_MAX + 1]; 123 + u32 flags = 0; 169 124 int err; 170 125 171 126 if (!tca[TCA_OPTIONS]) ··· 184 135 if (err < 0) 185 136 return err; 186 137 138 + if (tb[TCA_MATCHALL_FLAGS]) { 139 + flags = nla_get_u32(tb[TCA_MATCHALL_FLAGS]); 140 + if (!tc_flags_valid(flags)) 141 + return -EINVAL; 142 + } 143 + 187 144 f = kzalloc(sizeof(*f), GFP_KERNEL); 188 145 if (!f) 189 146 return -ENOBUFS; ··· 199 144 if (!handle) 200 145 handle = 1; 201 146 f->handle = handle; 147 + f->flags = flags; 202 148 203 149 err = mall_set_parms(net, tp, f, base, tb, tca[TCA_RATE], ovr); 204 150 if (err) 205 151 goto errout; 152 + 153 + if (tc_should_offload(dev, tp, flags)) { 154 + err = mall_replace_hw_filter(tp, f, (unsigned long) f); 155 + if (err) { 156 + if (tc_skip_sw(flags)) 157 + goto errout; 158 + else 159 + err = 0; 160 + } 161 + } 206 162 207 163 *arg = (unsigned long) f; 208 164 rcu_assign_pointer(head->filter, f); ··· 229 163 { 230 164 struct cls_mall_head *head = rtnl_dereference(tp->root); 231 165 struct cls_mall_filter *f = (struct cls_mall_filter *) arg; 166 + struct net_device *dev = tp->q->dev_queue->dev; 167 + 168 + if (tc_should_offload(dev, tp, f->flags)) 169 + mall_destroy_hw_filter(tp, f, (unsigned long) f); 232 170 233 171 RCU_INIT_POINTER(head->filter, NULL); 234 172 tcf_unbind_filter(tp, &f->res);