cls_cgroup: Store classid in struct sock

Up until now cls_cgroup has relied on fetching the classid out of
the current executing thread. This runs into trouble when a packet
processing is delayed in which case it may execute out of another
thread's context.

Furthermore, even when a packet is not delayed we may fail to
classify it if soft IRQs have been disabled, because this scenario
is indistinguishable from one where a packet unrelated to the
current thread is processed by a real soft IRQ.

In fact, the current semantics is inherently broken, as a single
skb may be constructed out of the writes of two different tasks.
A different manifestation of this problem is when the TCP stack
transmits in response of an incoming ACK. This is currently
unclassified.

As we already have a concept of packet ownership for accounting
purposes in the skb->sk pointer, this is a natural place to store
the classid in a persistent manner.

This patch adds the cls_cgroup classid in struct sock, filling up
an existing hole on 64-bit :)

The value is set at socket creation time. So all sockets created
via socket(2) automatically gains the ID of the thread creating it.
Whenever another process touches the socket by either reading or
writing to it, we will change the socket classid to that of the
process if it has a valid (non-zero) classid.

For sockets created on inbound connections through accept(2), we
inherit the classid of the original listening socket through
sk_clone, possibly preceding the actual accept(2) call.

In order to minimise risks, I have not made this the authoritative
classid. For now it is only used as a backup when we execute
with soft IRQs disabled. Once we're completely happy with its
semantics we can use it as the sole classid.

Footnote: I have rearranged the error path on cls_group module
creation. If we didn't do this, then there is a window where
someone could create a tc rule using cls_group before the cgroup
subsystem has been registered.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by Herbert Xu and committed by David S. Miller f8451725 eda6e6f8

+133 -17
+63
include/net/cls_cgroup.h
···
··· 1 + /* 2 + * cls_cgroup.h Control Group Classifier 3 + * 4 + * Authors: Thomas Graf <tgraf@suug.ch> 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms of the GNU General Public License as published by the Free 8 + * Software Foundation; either version 2 of the License, or (at your option) 9 + * any later version. 10 + * 11 + */ 12 + 13 + #ifndef _NET_CLS_CGROUP_H 14 + #define _NET_CLS_CGROUP_H 15 + 16 + #include <linux/cgroup.h> 17 + #include <linux/hardirq.h> 18 + #include <linux/rcupdate.h> 19 + 20 + #ifdef CONFIG_CGROUPS 21 + struct cgroup_cls_state 22 + { 23 + struct cgroup_subsys_state css; 24 + u32 classid; 25 + }; 26 + 27 + #ifdef CONFIG_NET_CLS_CGROUP 28 + static inline u32 task_cls_classid(struct task_struct *p) 29 + { 30 + if (in_interrupt()) 31 + return 0; 32 + 33 + return container_of(task_subsys_state(p, net_cls_subsys_id), 34 + struct cgroup_cls_state, css).classid; 35 + } 36 + #else 37 + extern int net_cls_subsys_id; 38 + 39 + static inline u32 task_cls_classid(struct task_struct *p) 40 + { 41 + int id; 42 + u32 classid; 43 + 44 + if (in_interrupt()) 45 + return 0; 46 + 47 + rcu_read_lock(); 48 + id = rcu_dereference(net_cls_subsys_id); 49 + if (id >= 0) 50 + classid = container_of(task_subsys_state(p, id), 51 + struct cgroup_cls_state, css)->classid; 52 + rcu_read_unlock(); 53 + 54 + return classid; 55 + } 56 + #endif 57 + #else 58 + static inline u32 task_cls_classid(struct task_struct *p) 59 + { 60 + return 0; 61 + } 62 + #endif 63 + #endif /* _NET_CLS_CGROUP_H */
+9 -1
include/net/sock.h
··· 312 void *sk_security; 313 #endif 314 __u32 sk_mark; 315 - /* XXX 4 bytes hole on 64 bit */ 316 void (*sk_state_change)(struct sock *sk); 317 void (*sk_data_ready)(struct sock *sk, int bytes); 318 void (*sk_write_space)(struct sock *sk); ··· 1073 gfp_t priority); 1074 extern void sock_kfree_s(struct sock *sk, void *mem, int size); 1075 extern void sk_send_sigurg(struct sock *sk); 1076 1077 /* 1078 * Functions to fill in entries in struct proto_ops when a protocol
··· 312 void *sk_security; 313 #endif 314 __u32 sk_mark; 315 + u32 sk_classid; 316 void (*sk_state_change)(struct sock *sk); 317 void (*sk_data_ready)(struct sock *sk, int bytes); 318 void (*sk_write_space)(struct sock *sk); ··· 1073 gfp_t priority); 1074 extern void sock_kfree_s(struct sock *sk, void *mem, int size); 1075 extern void sk_send_sigurg(struct sock *sk); 1076 + 1077 + #ifdef CONFIG_CGROUPS 1078 + extern void sock_update_classid(struct sock *sk); 1079 + #else 1080 + static inline void sock_update_classid(struct sock *sk) 1081 + { 1082 + } 1083 + #endif 1084 1085 /* 1086 * Functions to fill in entries in struct proto_ops when a protocol
+18
net/core/sock.c
··· 123 #include <linux/net_tstamp.h> 124 #include <net/xfrm.h> 125 #include <linux/ipsec.h> 126 127 #include <linux/filter.h> 128 ··· 217 /* Maximal space eaten by iovec or ancilliary data plus some space */ 218 int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512); 219 EXPORT_SYMBOL(sysctl_optmem_max); 220 221 static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen) 222 { ··· 1056 module_put(owner); 1057 } 1058 1059 /** 1060 * sk_alloc - All socket objects are allocated here 1061 * @net: the applicable net namespace ··· 1089 sock_lock_init(sk); 1090 sock_net_set(sk, get_net(net)); 1091 atomic_set(&sk->sk_wmem_alloc, 1); 1092 } 1093 1094 return sk;
··· 123 #include <linux/net_tstamp.h> 124 #include <net/xfrm.h> 125 #include <linux/ipsec.h> 126 + #include <net/cls_cgroup.h> 127 128 #include <linux/filter.h> 129 ··· 216 /* Maximal space eaten by iovec or ancilliary data plus some space */ 217 int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512); 218 EXPORT_SYMBOL(sysctl_optmem_max); 219 + 220 + #if defined(CONFIG_CGROUPS) && !defined(CONFIG_NET_CLS_CGROUP) 221 + int net_cls_subsys_id = -1; 222 + EXPORT_SYMBOL_GPL(net_cls_subsys_id); 223 + #endif 224 225 static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen) 226 { ··· 1050 module_put(owner); 1051 } 1052 1053 + #ifdef CONFIG_CGROUPS 1054 + void sock_update_classid(struct sock *sk) 1055 + { 1056 + u32 classid = task_cls_classid(current); 1057 + 1058 + if (classid && classid != sk->sk_classid) 1059 + sk->sk_classid = classid; 1060 + } 1061 + #endif 1062 + 1063 /** 1064 * sk_alloc - All socket objects are allocated here 1065 * @net: the applicable net namespace ··· 1073 sock_lock_init(sk); 1074 sock_net_set(sk, get_net(net)); 1075 atomic_set(&sk->sk_wmem_alloc, 1); 1076 + 1077 + sock_update_classid(sk); 1078 } 1079 1080 return sk;
+34 -16
net/sched/cls_cgroup.c
··· 16 #include <linux/errno.h> 17 #include <linux/skbuff.h> 18 #include <linux/cgroup.h> 19 #include <net/rtnetlink.h> 20 #include <net/pkt_cls.h> 21 - 22 - struct cgroup_cls_state 23 - { 24 - struct cgroup_subsys_state css; 25 - u32 classid; 26 - }; 27 28 static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss, 29 struct cgroup *cgrp); ··· 109 struct cls_cgroup_head *head = tp->root; 110 u32 classid; 111 112 /* 113 * Due to the nature of the classifier it is required to ignore all 114 * packets originating from softirq context as accessing `current' ··· 123 * calls by looking at the number of nested bh disable calls because 124 * softirqs always disables bh. 125 */ 126 - if (softirq_count() != SOFTIRQ_OFFSET) 127 - return -1; 128 - 129 - rcu_read_lock(); 130 - classid = task_cls_state(current)->classid; 131 - rcu_read_unlock(); 132 133 if (!classid) 134 return -1; ··· 290 291 static int __init init_cgroup_cls(void) 292 { 293 - int ret = register_tcf_proto_ops(&cls_cgroup_ops); 294 - if (ret) 295 - return ret; 296 ret = cgroup_load_subsys(&net_cls_subsys); 297 if (ret) 298 - unregister_tcf_proto_ops(&cls_cgroup_ops); 299 return ret; 300 } 301 302 static void __exit exit_cgroup_cls(void) 303 { 304 unregister_tcf_proto_ops(&cls_cgroup_ops); 305 cgroup_unload_subsys(&net_cls_subsys); 306 } 307
··· 16 #include <linux/errno.h> 17 #include <linux/skbuff.h> 18 #include <linux/cgroup.h> 19 + #include <linux/rcupdate.h> 20 #include <net/rtnetlink.h> 21 #include <net/pkt_cls.h> 22 + #include <net/sock.h> 23 + #include <net/cls_cgroup.h> 24 25 static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss, 26 struct cgroup *cgrp); ··· 112 struct cls_cgroup_head *head = tp->root; 113 u32 classid; 114 115 + rcu_read_lock(); 116 + classid = task_cls_state(current)->classid; 117 + rcu_read_unlock(); 118 + 119 /* 120 * Due to the nature of the classifier it is required to ignore all 121 * packets originating from softirq context as accessing `current' ··· 122 * calls by looking at the number of nested bh disable calls because 123 * softirqs always disables bh. 124 */ 125 + if (softirq_count() != SOFTIRQ_OFFSET) { 126 + /* If there is an sk_classid we'll use that. */ 127 + if (!skb->sk) 128 + return -1; 129 + classid = skb->sk->sk_classid; 130 + } 131 132 if (!classid) 133 return -1; ··· 289 290 static int __init init_cgroup_cls(void) 291 { 292 + int ret; 293 + 294 ret = cgroup_load_subsys(&net_cls_subsys); 295 if (ret) 296 + goto out; 297 + 298 + #ifndef CONFIG_NET_CLS_CGROUP 299 + /* We can't use rcu_assign_pointer because this is an int. */ 300 + smp_wmb(); 301 + net_cls_subsys_id = net_cls_subsys.subsys_id; 302 + #endif 303 + 304 + ret = register_tcf_proto_ops(&cls_cgroup_ops); 305 + if (ret) 306 + cgroup_unload_subsys(&net_cls_subsys); 307 + 308 + out: 309 return ret; 310 } 311 312 static void __exit exit_cgroup_cls(void) 313 { 314 unregister_tcf_proto_ops(&cls_cgroup_ops); 315 + 316 + #ifndef CONFIG_NET_CLS_CGROUP 317 + net_cls_subsys_id = -1; 318 + synchronize_rcu(); 319 + #endif 320 + 321 cgroup_unload_subsys(&net_cls_subsys); 322 } 323
+9
net/socket.c
··· 94 95 #include <net/compat.h> 96 #include <net/wext.h> 97 98 #include <net/sock.h> 99 #include <linux/netfilter.h> ··· 559 struct sock_iocb *si = kiocb_to_siocb(iocb); 560 int err; 561 562 si->sock = sock; 563 si->scm = NULL; 564 si->msg = msg; ··· 687 { 688 struct sock_iocb *si = kiocb_to_siocb(iocb); 689 690 si->sock = sock; 691 si->scm = NULL; 692 si->msg = msg; ··· 781 782 if (unlikely(!sock->ops->splice_read)) 783 return -EINVAL; 784 785 return sock->ops->splice_read(sock, ppos, pipe, len, flags); 786 } ··· 3076 int kernel_sendpage(struct socket *sock, struct page *page, int offset, 3077 size_t size, int flags) 3078 { 3079 if (sock->ops->sendpage) 3080 return sock->ops->sendpage(sock, page, offset, size, flags); 3081
··· 94 95 #include <net/compat.h> 96 #include <net/wext.h> 97 + #include <net/cls_cgroup.h> 98 99 #include <net/sock.h> 100 #include <linux/netfilter.h> ··· 558 struct sock_iocb *si = kiocb_to_siocb(iocb); 559 int err; 560 561 + sock_update_classid(sock->sk); 562 + 563 si->sock = sock; 564 si->scm = NULL; 565 si->msg = msg; ··· 684 { 685 struct sock_iocb *si = kiocb_to_siocb(iocb); 686 687 + sock_update_classid(sock->sk); 688 + 689 si->sock = sock; 690 si->scm = NULL; 691 si->msg = msg; ··· 776 777 if (unlikely(!sock->ops->splice_read)) 778 return -EINVAL; 779 + 780 + sock_update_classid(sock->sk); 781 782 return sock->ops->splice_read(sock, ppos, pipe, len, flags); 783 } ··· 3069 int kernel_sendpage(struct socket *sock, struct page *page, int offset, 3070 size_t size, int flags) 3071 { 3072 + sock_update_classid(sock->sk); 3073 + 3074 if (sock->ops->sendpage) 3075 return sock->ops->sendpage(sock, page, offset, size, flags); 3076