cls_cgroup: Store classid in struct sock

Up until now cls_cgroup has relied on fetching the classid out of
the current executing thread. This runs into trouble when a packet
processing is delayed in which case it may execute out of another
thread's context.

Furthermore, even when a packet is not delayed we may fail to
classify it if soft IRQs have been disabled, because this scenario
is indistinguishable from one where a packet unrelated to the
current thread is processed by a real soft IRQ.

In fact, the current semantics is inherently broken, as a single
skb may be constructed out of the writes of two different tasks.
A different manifestation of this problem is when the TCP stack
transmits in response of an incoming ACK. This is currently
unclassified.

As we already have a concept of packet ownership for accounting
purposes in the skb->sk pointer, this is a natural place to store
the classid in a persistent manner.

This patch adds the cls_cgroup classid in struct sock, filling up
an existing hole on 64-bit :)

The value is set at socket creation time. So all sockets created
via socket(2) automatically gains the ID of the thread creating it.
Whenever another process touches the socket by either reading or
writing to it, we will change the socket classid to that of the
process if it has a valid (non-zero) classid.

For sockets created on inbound connections through accept(2), we
inherit the classid of the original listening socket through
sk_clone, possibly preceding the actual accept(2) call.

In order to minimise risks, I have not made this the authoritative
classid. For now it is only used as a backup when we execute
with soft IRQs disabled. Once we're completely happy with its
semantics we can use it as the sole classid.

Footnote: I have rearranged the error path on cls_group module
creation. If we didn't do this, then there is a window where
someone could create a tc rule using cls_group before the cgroup
subsystem has been registered.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by Herbert Xu and committed by David S. Miller f8451725 eda6e6f8

+133 -17
+63
include/net/cls_cgroup.h
··· 1 + /* 2 + * cls_cgroup.h Control Group Classifier 3 + * 4 + * Authors: Thomas Graf <tgraf@suug.ch> 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms of the GNU General Public License as published by the Free 8 + * Software Foundation; either version 2 of the License, or (at your option) 9 + * any later version. 10 + * 11 + */ 12 + 13 + #ifndef _NET_CLS_CGROUP_H 14 + #define _NET_CLS_CGROUP_H 15 + 16 + #include <linux/cgroup.h> 17 + #include <linux/hardirq.h> 18 + #include <linux/rcupdate.h> 19 + 20 + #ifdef CONFIG_CGROUPS 21 + struct cgroup_cls_state 22 + { 23 + struct cgroup_subsys_state css; 24 + u32 classid; 25 + }; 26 + 27 + #ifdef CONFIG_NET_CLS_CGROUP 28 + static inline u32 task_cls_classid(struct task_struct *p) 29 + { 30 + if (in_interrupt()) 31 + return 0; 32 + 33 + return container_of(task_subsys_state(p, net_cls_subsys_id), 34 + struct cgroup_cls_state, css).classid; 35 + } 36 + #else 37 + extern int net_cls_subsys_id; 38 + 39 + static inline u32 task_cls_classid(struct task_struct *p) 40 + { 41 + int id; 42 + u32 classid; 43 + 44 + if (in_interrupt()) 45 + return 0; 46 + 47 + rcu_read_lock(); 48 + id = rcu_dereference(net_cls_subsys_id); 49 + if (id >= 0) 50 + classid = container_of(task_subsys_state(p, id), 51 + struct cgroup_cls_state, css)->classid; 52 + rcu_read_unlock(); 53 + 54 + return classid; 55 + } 56 + #endif 57 + #else 58 + static inline u32 task_cls_classid(struct task_struct *p) 59 + { 60 + return 0; 61 + } 62 + #endif 63 + #endif /* _NET_CLS_CGROUP_H */
+9 -1
include/net/sock.h
··· 312 312 void *sk_security; 313 313 #endif 314 314 __u32 sk_mark; 315 - /* XXX 4 bytes hole on 64 bit */ 315 + u32 sk_classid; 316 316 void (*sk_state_change)(struct sock *sk); 317 317 void (*sk_data_ready)(struct sock *sk, int bytes); 318 318 void (*sk_write_space)(struct sock *sk); ··· 1073 1073 gfp_t priority); 1074 1074 extern void sock_kfree_s(struct sock *sk, void *mem, int size); 1075 1075 extern void sk_send_sigurg(struct sock *sk); 1076 + 1077 + #ifdef CONFIG_CGROUPS 1078 + extern void sock_update_classid(struct sock *sk); 1079 + #else 1080 + static inline void sock_update_classid(struct sock *sk) 1081 + { 1082 + } 1083 + #endif 1076 1084 1077 1085 /* 1078 1086 * Functions to fill in entries in struct proto_ops when a protocol
+18
net/core/sock.c
··· 123 123 #include <linux/net_tstamp.h> 124 124 #include <net/xfrm.h> 125 125 #include <linux/ipsec.h> 126 + #include <net/cls_cgroup.h> 126 127 127 128 #include <linux/filter.h> 128 129 ··· 217 216 /* Maximal space eaten by iovec or ancilliary data plus some space */ 218 217 int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512); 219 218 EXPORT_SYMBOL(sysctl_optmem_max); 219 + 220 + #if defined(CONFIG_CGROUPS) && !defined(CONFIG_NET_CLS_CGROUP) 221 + int net_cls_subsys_id = -1; 222 + EXPORT_SYMBOL_GPL(net_cls_subsys_id); 223 + #endif 220 224 221 225 static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen) 222 226 { ··· 1056 1050 module_put(owner); 1057 1051 } 1058 1052 1053 + #ifdef CONFIG_CGROUPS 1054 + void sock_update_classid(struct sock *sk) 1055 + { 1056 + u32 classid = task_cls_classid(current); 1057 + 1058 + if (classid && classid != sk->sk_classid) 1059 + sk->sk_classid = classid; 1060 + } 1061 + #endif 1062 + 1059 1063 /** 1060 1064 * sk_alloc - All socket objects are allocated here 1061 1065 * @net: the applicable net namespace ··· 1089 1073 sock_lock_init(sk); 1090 1074 sock_net_set(sk, get_net(net)); 1091 1075 atomic_set(&sk->sk_wmem_alloc, 1); 1076 + 1077 + sock_update_classid(sk); 1092 1078 } 1093 1079 1094 1080 return sk;
+34 -16
net/sched/cls_cgroup.c
··· 16 16 #include <linux/errno.h> 17 17 #include <linux/skbuff.h> 18 18 #include <linux/cgroup.h> 19 + #include <linux/rcupdate.h> 19 20 #include <net/rtnetlink.h> 20 21 #include <net/pkt_cls.h> 21 - 22 - struct cgroup_cls_state 23 - { 24 - struct cgroup_subsys_state css; 25 - u32 classid; 26 - }; 22 + #include <net/sock.h> 23 + #include <net/cls_cgroup.h> 27 24 28 25 static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss, 29 26 struct cgroup *cgrp); ··· 109 112 struct cls_cgroup_head *head = tp->root; 110 113 u32 classid; 111 114 115 + rcu_read_lock(); 116 + classid = task_cls_state(current)->classid; 117 + rcu_read_unlock(); 118 + 112 119 /* 113 120 * Due to the nature of the classifier it is required to ignore all 114 121 * packets originating from softirq context as accessing `current' ··· 123 122 * calls by looking at the number of nested bh disable calls because 124 123 * softirqs always disables bh. 125 124 */ 126 - if (softirq_count() != SOFTIRQ_OFFSET) 127 - return -1; 128 - 129 - rcu_read_lock(); 130 - classid = task_cls_state(current)->classid; 131 - rcu_read_unlock(); 125 + if (softirq_count() != SOFTIRQ_OFFSET) { 126 + /* If there is an sk_classid we'll use that. */ 127 + if (!skb->sk) 128 + return -1; 129 + classid = skb->sk->sk_classid; 130 + } 132 131 133 132 if (!classid) 134 133 return -1; ··· 290 289 291 290 static int __init init_cgroup_cls(void) 292 291 { 293 - int ret = register_tcf_proto_ops(&cls_cgroup_ops); 294 - if (ret) 295 - return ret; 292 + int ret; 293 + 296 294 ret = cgroup_load_subsys(&net_cls_subsys); 297 295 if (ret) 298 - unregister_tcf_proto_ops(&cls_cgroup_ops); 296 + goto out; 297 + 298 + #ifndef CONFIG_NET_CLS_CGROUP 299 + /* We can't use rcu_assign_pointer because this is an int. */ 300 + smp_wmb(); 301 + net_cls_subsys_id = net_cls_subsys.subsys_id; 302 + #endif 303 + 304 + ret = register_tcf_proto_ops(&cls_cgroup_ops); 305 + if (ret) 306 + cgroup_unload_subsys(&net_cls_subsys); 307 + 308 + out: 299 309 return ret; 300 310 } 301 311 302 312 static void __exit exit_cgroup_cls(void) 303 313 { 304 314 unregister_tcf_proto_ops(&cls_cgroup_ops); 315 + 316 + #ifndef CONFIG_NET_CLS_CGROUP 317 + net_cls_subsys_id = -1; 318 + synchronize_rcu(); 319 + #endif 320 + 305 321 cgroup_unload_subsys(&net_cls_subsys); 306 322 } 307 323
+9
net/socket.c
··· 94 94 95 95 #include <net/compat.h> 96 96 #include <net/wext.h> 97 + #include <net/cls_cgroup.h> 97 98 98 99 #include <net/sock.h> 99 100 #include <linux/netfilter.h> ··· 559 558 struct sock_iocb *si = kiocb_to_siocb(iocb); 560 559 int err; 561 560 561 + sock_update_classid(sock->sk); 562 + 562 563 si->sock = sock; 563 564 si->scm = NULL; 564 565 si->msg = msg; ··· 687 684 { 688 685 struct sock_iocb *si = kiocb_to_siocb(iocb); 689 686 687 + sock_update_classid(sock->sk); 688 + 690 689 si->sock = sock; 691 690 si->scm = NULL; 692 691 si->msg = msg; ··· 781 776 782 777 if (unlikely(!sock->ops->splice_read)) 783 778 return -EINVAL; 779 + 780 + sock_update_classid(sock->sk); 784 781 785 782 return sock->ops->splice_read(sock, ppos, pipe, len, flags); 786 783 } ··· 3076 3069 int kernel_sendpage(struct socket *sock, struct page *page, int offset, 3077 3070 size_t size, int flags) 3078 3071 { 3072 + sock_update_classid(sock->sk); 3073 + 3079 3074 if (sock->ops->sendpage) 3080 3075 return sock->ops->sendpage(sock, page, offset, size, flags); 3081 3076