Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _BPF_CGROUP_H
3#define _BPF_CGROUP_H
4
5#include <linux/bpf.h>
6#include <linux/errno.h>
7#include <linux/jump_label.h>
8#include <linux/percpu.h>
9#include <linux/percpu-refcount.h>
10#include <linux/rbtree.h>
11#include <uapi/linux/bpf.h>
12
13struct sock;
14struct sockaddr;
15struct cgroup;
16struct sk_buff;
17struct bpf_map;
18struct bpf_prog;
19struct bpf_sock_ops_kern;
20struct bpf_cgroup_storage;
21struct ctl_table;
22struct ctl_table_header;
23
24#ifdef CONFIG_CGROUP_BPF
25
26extern struct static_key_false cgroup_bpf_enabled_key;
27#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
28
29DECLARE_PER_CPU(struct bpf_cgroup_storage*,
30 bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
31
32#define for_each_cgroup_storage_type(stype) \
33 for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
34
35struct bpf_cgroup_storage_map;
36
37struct bpf_storage_buffer {
38 struct rcu_head rcu;
39 char data[];
40};
41
42struct bpf_cgroup_storage {
43 union {
44 struct bpf_storage_buffer *buf;
45 void __percpu *percpu_buf;
46 };
47 struct bpf_cgroup_storage_map *map;
48 struct bpf_cgroup_storage_key key;
49 struct list_head list;
50 struct rb_node node;
51 struct rcu_head rcu;
52};
53
54struct bpf_cgroup_link {
55 struct bpf_link link;
56 struct cgroup *cgroup;
57 enum bpf_attach_type type;
58};
59
60struct bpf_prog_list {
61 struct list_head node;
62 struct bpf_prog *prog;
63 struct bpf_cgroup_link *link;
64 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
65};
66
67struct bpf_prog_array;
68
69struct cgroup_bpf {
70 /* array of effective progs in this cgroup */
71 struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE];
72
73 /* attached progs to this cgroup and attach flags
74 * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
75 * have either zero or one element
76 * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
77 */
78 struct list_head progs[MAX_BPF_ATTACH_TYPE];
79 u32 flags[MAX_BPF_ATTACH_TYPE];
80
81 /* temp storage for effective prog array used by prog_attach/detach */
82 struct bpf_prog_array *inactive;
83
84 /* reference counter used to detach bpf programs after cgroup removal */
85 struct percpu_ref refcnt;
86
87 /* cgroup_bpf is released using a work queue */
88 struct work_struct release_work;
89};
90
91int cgroup_bpf_inherit(struct cgroup *cgrp);
92void cgroup_bpf_offline(struct cgroup *cgrp);
93
94int __cgroup_bpf_attach(struct cgroup *cgrp,
95 struct bpf_prog *prog, struct bpf_prog *replace_prog,
96 struct bpf_cgroup_link *link,
97 enum bpf_attach_type type, u32 flags);
98int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
99 struct bpf_cgroup_link *link,
100 enum bpf_attach_type type);
101int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
102 union bpf_attr __user *uattr);
103
104/* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
105int cgroup_bpf_attach(struct cgroup *cgrp,
106 struct bpf_prog *prog, struct bpf_prog *replace_prog,
107 struct bpf_cgroup_link *link, enum bpf_attach_type type,
108 u32 flags);
109int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
110 enum bpf_attach_type type);
111int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
112 union bpf_attr __user *uattr);
113
114int __cgroup_bpf_run_filter_skb(struct sock *sk,
115 struct sk_buff *skb,
116 enum bpf_attach_type type);
117
118int __cgroup_bpf_run_filter_sk(struct sock *sk,
119 enum bpf_attach_type type);
120
121int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
122 struct sockaddr *uaddr,
123 enum bpf_attach_type type,
124 void *t_ctx);
125
126int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
127 struct bpf_sock_ops_kern *sock_ops,
128 enum bpf_attach_type type);
129
130int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
131 short access, enum bpf_attach_type type);
132
133int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
134 struct ctl_table *table, int write,
135 void **buf, size_t *pcount, loff_t *ppos,
136 enum bpf_attach_type type);
137
138int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level,
139 int *optname, char __user *optval,
140 int *optlen, char **kernel_optval);
141int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
142 int optname, char __user *optval,
143 int __user *optlen, int max_optlen,
144 int retval);
145
146static inline enum bpf_cgroup_storage_type cgroup_storage_type(
147 struct bpf_map *map)
148{
149 if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
150 return BPF_CGROUP_STORAGE_PERCPU;
151
152 return BPF_CGROUP_STORAGE_SHARED;
153}
154
155static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage
156 *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
157{
158 enum bpf_cgroup_storage_type stype;
159
160 for_each_cgroup_storage_type(stype)
161 this_cpu_write(bpf_cgroup_storage[stype], storage[stype]);
162}
163
164struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
165 enum bpf_cgroup_storage_type stype);
166void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
167void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
168 struct cgroup *cgroup,
169 enum bpf_attach_type type);
170void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
171int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);
172void bpf_cgroup_storage_release(struct bpf_prog_aux *aux, struct bpf_map *map);
173
174int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
175int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
176 void *value, u64 flags);
177
178/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
179#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
180({ \
181 int __ret = 0; \
182 if (cgroup_bpf_enabled) \
183 __ret = __cgroup_bpf_run_filter_skb(sk, skb, \
184 BPF_CGROUP_INET_INGRESS); \
185 \
186 __ret; \
187})
188
189#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
190({ \
191 int __ret = 0; \
192 if (cgroup_bpf_enabled && sk && sk == skb->sk) { \
193 typeof(sk) __sk = sk_to_full_sk(sk); \
194 if (sk_fullsock(__sk)) \
195 __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
196 BPF_CGROUP_INET_EGRESS); \
197 } \
198 __ret; \
199})
200
201#define BPF_CGROUP_RUN_SK_PROG(sk, type) \
202({ \
203 int __ret = 0; \
204 if (cgroup_bpf_enabled) { \
205 __ret = __cgroup_bpf_run_filter_sk(sk, type); \
206 } \
207 __ret; \
208})
209
210#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
211 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE)
212
213#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \
214 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND)
215
216#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \
217 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND)
218
219#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \
220({ \
221 int __ret = 0; \
222 if (cgroup_bpf_enabled) \
223 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
224 NULL); \
225 __ret; \
226})
227
228#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) \
229({ \
230 int __ret = 0; \
231 if (cgroup_bpf_enabled) { \
232 lock_sock(sk); \
233 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
234 t_ctx); \
235 release_sock(sk); \
236 } \
237 __ret; \
238})
239
240#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) \
241 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND)
242
243#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) \
244 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND)
245
246#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \
247 sk->sk_prot->pre_connect)
248
249#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \
250 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
251
252#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \
253 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
254
255#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \
256 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL)
257
258#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \
259 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL)
260
261#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \
262 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx)
263
264#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \
265 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx)
266
267#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) \
268 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_RECVMSG, NULL)
269
270#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) \
271 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_RECVMSG, NULL)
272
273#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
274({ \
275 int __ret = 0; \
276 if (cgroup_bpf_enabled && (sock_ops)->sk) { \
277 typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
278 if (__sk && sk_fullsock(__sk)) \
279 __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
280 sock_ops, \
281 BPF_CGROUP_SOCK_OPS); \
282 } \
283 __ret; \
284})
285
286#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access) \
287({ \
288 int __ret = 0; \
289 if (cgroup_bpf_enabled) \
290 __ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
291 access, \
292 BPF_CGROUP_DEVICE); \
293 \
294 __ret; \
295})
296
297
298#define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos) \
299({ \
300 int __ret = 0; \
301 if (cgroup_bpf_enabled) \
302 __ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \
303 buf, count, pos, \
304 BPF_CGROUP_SYSCTL); \
305 __ret; \
306})
307
308#define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
309 kernel_optval) \
310({ \
311 int __ret = 0; \
312 if (cgroup_bpf_enabled) \
313 __ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \
314 optname, optval, \
315 optlen, \
316 kernel_optval); \
317 __ret; \
318})
319
320#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) \
321({ \
322 int __ret = 0; \
323 if (cgroup_bpf_enabled) \
324 get_user(__ret, optlen); \
325 __ret; \
326})
327
328#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen, \
329 max_optlen, retval) \
330({ \
331 int __ret = retval; \
332 if (cgroup_bpf_enabled) \
333 __ret = __cgroup_bpf_run_filter_getsockopt(sock, level, \
334 optname, optval, \
335 optlen, max_optlen, \
336 retval); \
337 __ret; \
338})
339
340int cgroup_bpf_prog_attach(const union bpf_attr *attr,
341 enum bpf_prog_type ptype, struct bpf_prog *prog);
342int cgroup_bpf_prog_detach(const union bpf_attr *attr,
343 enum bpf_prog_type ptype);
344int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
345int cgroup_bpf_prog_query(const union bpf_attr *attr,
346 union bpf_attr __user *uattr);
347#else
348
349struct bpf_prog;
350struct cgroup_bpf {};
351static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
352static inline void cgroup_bpf_offline(struct cgroup *cgrp) {}
353
354static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
355 enum bpf_prog_type ptype,
356 struct bpf_prog *prog)
357{
358 return -EINVAL;
359}
360
361static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
362 enum bpf_prog_type ptype)
363{
364 return -EINVAL;
365}
366
367static inline int cgroup_bpf_link_attach(const union bpf_attr *attr,
368 struct bpf_prog *prog)
369{
370 return -EINVAL;
371}
372
373static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
374 union bpf_attr __user *uattr)
375{
376 return -EINVAL;
377}
378
379static inline void bpf_cgroup_storage_set(
380 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {}
381static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
382 struct bpf_map *map) { return 0; }
383static inline void bpf_cgroup_storage_release(struct bpf_prog_aux *aux,
384 struct bpf_map *map) {}
385static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
386 struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; }
387static inline void bpf_cgroup_storage_free(
388 struct bpf_cgroup_storage *storage) {}
389static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
390 void *value) {
391 return 0;
392}
393static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
394 void *key, void *value, u64 flags) {
395 return 0;
396}
397
398#define cgroup_bpf_enabled (0)
399#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) ({ 0; })
400#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
401#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
402#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
403#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
404#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; })
405#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; })
406#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
407#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
408#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
409#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
410#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
411#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
412#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
413#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
414#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; })
415#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; })
416#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
417#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
418#define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; })
419#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; })
420#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
421 optlen, max_optlen, retval) ({ retval; })
422#define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
423 kernel_optval) ({ 0; })
424
425#define for_each_cgroup_storage_type(stype) for (; false; )
426
427#endif /* CONFIG_CGROUP_BPF */
428
429#endif /* _BPF_CGROUP_H */