Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.16-rc2 234 lines 5.4 kB view raw
1/* 2 * net/sched/cls_cgroup.c Control Group Classifier 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Authors: Thomas Graf <tgraf@suug.ch> 10 */ 11 12#include <linux/module.h> 13#include <linux/slab.h> 14#include <linux/skbuff.h> 15#include <linux/rcupdate.h> 16#include <net/rtnetlink.h> 17#include <net/pkt_cls.h> 18#include <net/sock.h> 19#include <net/cls_cgroup.h> 20 21struct cls_cgroup_head { 22 u32 handle; 23 struct tcf_exts exts; 24 struct tcf_ematch_tree ematches; 25 struct tcf_proto *tp; 26 union { 27 struct work_struct work; 28 struct rcu_head rcu; 29 }; 30}; 31 32static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp, 33 struct tcf_result *res) 34{ 35 struct cls_cgroup_head *head = rcu_dereference_bh(tp->root); 36 u32 classid = task_get_classid(skb); 37 38 if (!classid) 39 return -1; 40 if (!tcf_em_tree_match(skb, &head->ematches, NULL)) 41 return -1; 42 43 res->classid = classid; 44 res->class = 0; 45 46 return tcf_exts_exec(skb, &head->exts, res); 47} 48 49static void *cls_cgroup_get(struct tcf_proto *tp, u32 handle) 50{ 51 return NULL; 52} 53 54static int cls_cgroup_init(struct tcf_proto *tp) 55{ 56 return 0; 57} 58 59static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = { 60 [TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED }, 61}; 62 63static void __cls_cgroup_destroy(struct cls_cgroup_head *head) 64{ 65 tcf_exts_destroy(&head->exts); 66 tcf_em_tree_destroy(&head->ematches); 67 tcf_exts_put_net(&head->exts); 68 kfree(head); 69} 70 71static void cls_cgroup_destroy_work(struct work_struct *work) 72{ 73 struct cls_cgroup_head *head = container_of(work, 74 struct cls_cgroup_head, 75 work); 76 rtnl_lock(); 77 __cls_cgroup_destroy(head); 78 rtnl_unlock(); 79} 80 81static void cls_cgroup_destroy_rcu(struct rcu_head *root) 82{ 83 struct cls_cgroup_head *head = container_of(root, 84 struct cls_cgroup_head, 85 rcu); 86 87 INIT_WORK(&head->work, cls_cgroup_destroy_work); 88 tcf_queue_work(&head->work); 89} 90 91static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb, 92 struct tcf_proto *tp, unsigned long base, 93 u32 handle, struct nlattr **tca, 94 void **arg, bool ovr, 95 struct netlink_ext_ack *extack) 96{ 97 struct nlattr *tb[TCA_CGROUP_MAX + 1]; 98 struct cls_cgroup_head *head = rtnl_dereference(tp->root); 99 struct cls_cgroup_head *new; 100 int err; 101 102 if (!tca[TCA_OPTIONS]) 103 return -EINVAL; 104 105 if (!head && !handle) 106 return -EINVAL; 107 108 if (head && handle != head->handle) 109 return -ENOENT; 110 111 new = kzalloc(sizeof(*head), GFP_KERNEL); 112 if (!new) 113 return -ENOBUFS; 114 115 err = tcf_exts_init(&new->exts, TCA_CGROUP_ACT, TCA_CGROUP_POLICE); 116 if (err < 0) 117 goto errout; 118 new->handle = handle; 119 new->tp = tp; 120 err = nla_parse_nested(tb, TCA_CGROUP_MAX, tca[TCA_OPTIONS], 121 cgroup_policy, NULL); 122 if (err < 0) 123 goto errout; 124 125 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &new->exts, ovr, 126 extack); 127 if (err < 0) 128 goto errout; 129 130 err = tcf_em_tree_validate(tp, tb[TCA_CGROUP_EMATCHES], &new->ematches); 131 if (err < 0) 132 goto errout; 133 134 rcu_assign_pointer(tp->root, new); 135 if (head) { 136 tcf_exts_get_net(&head->exts); 137 call_rcu(&head->rcu, cls_cgroup_destroy_rcu); 138 } 139 return 0; 140errout: 141 tcf_exts_destroy(&new->exts); 142 kfree(new); 143 return err; 144} 145 146static void cls_cgroup_destroy(struct tcf_proto *tp, 147 struct netlink_ext_ack *extack) 148{ 149 struct cls_cgroup_head *head = rtnl_dereference(tp->root); 150 151 /* Head can still be NULL due to cls_cgroup_init(). */ 152 if (head) { 153 if (tcf_exts_get_net(&head->exts)) 154 call_rcu(&head->rcu, cls_cgroup_destroy_rcu); 155 else 156 __cls_cgroup_destroy(head); 157 } 158} 159 160static int cls_cgroup_delete(struct tcf_proto *tp, void *arg, bool *last, 161 struct netlink_ext_ack *extack) 162{ 163 return -EOPNOTSUPP; 164} 165 166static void cls_cgroup_walk(struct tcf_proto *tp, struct tcf_walker *arg) 167{ 168 struct cls_cgroup_head *head = rtnl_dereference(tp->root); 169 170 if (arg->count < arg->skip) 171 goto skip; 172 173 if (arg->fn(tp, head, arg) < 0) { 174 arg->stop = 1; 175 return; 176 } 177skip: 178 arg->count++; 179} 180 181static int cls_cgroup_dump(struct net *net, struct tcf_proto *tp, void *fh, 182 struct sk_buff *skb, struct tcmsg *t) 183{ 184 struct cls_cgroup_head *head = rtnl_dereference(tp->root); 185 struct nlattr *nest; 186 187 t->tcm_handle = head->handle; 188 189 nest = nla_nest_start(skb, TCA_OPTIONS); 190 if (nest == NULL) 191 goto nla_put_failure; 192 193 if (tcf_exts_dump(skb, &head->exts) < 0 || 194 tcf_em_tree_dump(skb, &head->ematches, TCA_CGROUP_EMATCHES) < 0) 195 goto nla_put_failure; 196 197 nla_nest_end(skb, nest); 198 199 if (tcf_exts_dump_stats(skb, &head->exts) < 0) 200 goto nla_put_failure; 201 202 return skb->len; 203 204nla_put_failure: 205 nla_nest_cancel(skb, nest); 206 return -1; 207} 208 209static struct tcf_proto_ops cls_cgroup_ops __read_mostly = { 210 .kind = "cgroup", 211 .init = cls_cgroup_init, 212 .change = cls_cgroup_change, 213 .classify = cls_cgroup_classify, 214 .destroy = cls_cgroup_destroy, 215 .get = cls_cgroup_get, 216 .delete = cls_cgroup_delete, 217 .walk = cls_cgroup_walk, 218 .dump = cls_cgroup_dump, 219 .owner = THIS_MODULE, 220}; 221 222static int __init init_cgroup_cls(void) 223{ 224 return register_tcf_proto_ops(&cls_cgroup_ops); 225} 226 227static void __exit exit_cgroup_cls(void) 228{ 229 unregister_tcf_proto_ops(&cls_cgroup_ops); 230} 231 232module_init(init_cgroup_cls); 233module_exit(exit_cgroup_cls); 234MODULE_LICENSE("GPL");