Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
fork
Configure Feed
Select the types of activity you want to include in your feed.
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _BPF_CGROUP_H
3#define _BPF_CGROUP_H
4
5#include <linux/bpf.h>
6#include <linux/errno.h>
7#include <linux/jump_label.h>
8#include <linux/percpu.h>
9#include <linux/rbtree.h>
10#include <uapi/linux/bpf.h>
11
12struct sock;
13struct sockaddr;
14struct cgroup;
15struct sk_buff;
16struct bpf_map;
17struct bpf_prog;
18struct bpf_sock_ops_kern;
19struct bpf_cgroup_storage;
20struct ctl_table;
21struct ctl_table_header;
22
23#ifdef CONFIG_CGROUP_BPF
24
25extern struct static_key_false cgroup_bpf_enabled_key;
26#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
27
28DECLARE_PER_CPU(struct bpf_cgroup_storage*,
29 bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
30
31#define for_each_cgroup_storage_type(stype) \
32 for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
33
34struct bpf_cgroup_storage_map;
35
36struct bpf_storage_buffer {
37 struct rcu_head rcu;
38 char data[0];
39};
40
41struct bpf_cgroup_storage {
42 union {
43 struct bpf_storage_buffer *buf;
44 void __percpu *percpu_buf;
45 };
46 struct bpf_cgroup_storage_map *map;
47 struct bpf_cgroup_storage_key key;
48 struct list_head list;
49 struct rb_node node;
50 struct rcu_head rcu;
51};
52
53struct bpf_prog_list {
54 struct list_head node;
55 struct bpf_prog *prog;
56 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
57};
58
59struct bpf_prog_array;
60
61struct cgroup_bpf {
62 /* array of effective progs in this cgroup */
63 struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE];
64
65 /* attached progs to this cgroup and attach flags
66 * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
67 * have either zero or one element
68 * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
69 */
70 struct list_head progs[MAX_BPF_ATTACH_TYPE];
71 u32 flags[MAX_BPF_ATTACH_TYPE];
72
73 /* temp storage for effective prog array used by prog_attach/detach */
74 struct bpf_prog_array __rcu *inactive;
75};
76
77void cgroup_bpf_put(struct cgroup *cgrp);
78int cgroup_bpf_inherit(struct cgroup *cgrp);
79
80int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
81 enum bpf_attach_type type, u32 flags);
82int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
83 enum bpf_attach_type type);
84int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
85 union bpf_attr __user *uattr);
86
87/* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
88int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
89 enum bpf_attach_type type, u32 flags);
90int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
91 enum bpf_attach_type type, u32 flags);
92int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
93 union bpf_attr __user *uattr);
94
95int __cgroup_bpf_run_filter_skb(struct sock *sk,
96 struct sk_buff *skb,
97 enum bpf_attach_type type);
98
99int __cgroup_bpf_run_filter_sk(struct sock *sk,
100 enum bpf_attach_type type);
101
102int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
103 struct sockaddr *uaddr,
104 enum bpf_attach_type type,
105 void *t_ctx);
106
107int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
108 struct bpf_sock_ops_kern *sock_ops,
109 enum bpf_attach_type type);
110
111int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
112 short access, enum bpf_attach_type type);
113
114int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
115 struct ctl_table *table, int write,
116 void __user *buf, size_t *pcount,
117 loff_t *ppos, void **new_buf,
118 enum bpf_attach_type type);
119
120static inline enum bpf_cgroup_storage_type cgroup_storage_type(
121 struct bpf_map *map)
122{
123 if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
124 return BPF_CGROUP_STORAGE_PERCPU;
125
126 return BPF_CGROUP_STORAGE_SHARED;
127}
128
129static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage
130 *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
131{
132 enum bpf_cgroup_storage_type stype;
133
134 for_each_cgroup_storage_type(stype)
135 this_cpu_write(bpf_cgroup_storage[stype], storage[stype]);
136}
137
138struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
139 enum bpf_cgroup_storage_type stype);
140void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
141void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
142 struct cgroup *cgroup,
143 enum bpf_attach_type type);
144void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
145int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *map);
146void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *map);
147
148int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
149int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
150 void *value, u64 flags);
151
152/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
153#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
154({ \
155 int __ret = 0; \
156 if (cgroup_bpf_enabled) \
157 __ret = __cgroup_bpf_run_filter_skb(sk, skb, \
158 BPF_CGROUP_INET_INGRESS); \
159 \
160 __ret; \
161})
162
163#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
164({ \
165 int __ret = 0; \
166 if (cgroup_bpf_enabled && sk && sk == skb->sk) { \
167 typeof(sk) __sk = sk_to_full_sk(sk); \
168 if (sk_fullsock(__sk)) \
169 __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
170 BPF_CGROUP_INET_EGRESS); \
171 } \
172 __ret; \
173})
174
175#define BPF_CGROUP_RUN_SK_PROG(sk, type) \
176({ \
177 int __ret = 0; \
178 if (cgroup_bpf_enabled) { \
179 __ret = __cgroup_bpf_run_filter_sk(sk, type); \
180 } \
181 __ret; \
182})
183
184#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
185 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE)
186
187#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \
188 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND)
189
190#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \
191 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND)
192
193#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \
194({ \
195 int __ret = 0; \
196 if (cgroup_bpf_enabled) \
197 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
198 NULL); \
199 __ret; \
200})
201
202#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) \
203({ \
204 int __ret = 0; \
205 if (cgroup_bpf_enabled) { \
206 lock_sock(sk); \
207 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
208 t_ctx); \
209 release_sock(sk); \
210 } \
211 __ret; \
212})
213
214#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) \
215 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND)
216
217#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) \
218 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND)
219
220#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \
221 sk->sk_prot->pre_connect)
222
223#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \
224 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
225
226#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \
227 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
228
229#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \
230 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL)
231
232#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \
233 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL)
234
235#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \
236 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx)
237
238#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \
239 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx)
240
241#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
242({ \
243 int __ret = 0; \
244 if (cgroup_bpf_enabled && (sock_ops)->sk) { \
245 typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
246 if (__sk && sk_fullsock(__sk)) \
247 __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
248 sock_ops, \
249 BPF_CGROUP_SOCK_OPS); \
250 } \
251 __ret; \
252})
253
254#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access) \
255({ \
256 int __ret = 0; \
257 if (cgroup_bpf_enabled) \
258 __ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
259 access, \
260 BPF_CGROUP_DEVICE); \
261 \
262 __ret; \
263})
264
265
266#define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos, nbuf) \
267({ \
268 int __ret = 0; \
269 if (cgroup_bpf_enabled) \
270 __ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \
271 buf, count, pos, nbuf, \
272 BPF_CGROUP_SYSCTL); \
273 __ret; \
274})
275
276int cgroup_bpf_prog_attach(const union bpf_attr *attr,
277 enum bpf_prog_type ptype, struct bpf_prog *prog);
278int cgroup_bpf_prog_detach(const union bpf_attr *attr,
279 enum bpf_prog_type ptype);
280int cgroup_bpf_prog_query(const union bpf_attr *attr,
281 union bpf_attr __user *uattr);
282#else
283
284struct bpf_prog;
285struct cgroup_bpf {};
286static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
287static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
288
289static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
290 enum bpf_prog_type ptype,
291 struct bpf_prog *prog)
292{
293 return -EINVAL;
294}
295
296static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
297 enum bpf_prog_type ptype)
298{
299 return -EINVAL;
300}
301
302static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
303 union bpf_attr __user *uattr)
304{
305 return -EINVAL;
306}
307
308static inline void bpf_cgroup_storage_set(
309 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {}
310static inline int bpf_cgroup_storage_assign(struct bpf_prog *prog,
311 struct bpf_map *map) { return 0; }
312static inline void bpf_cgroup_storage_release(struct bpf_prog *prog,
313 struct bpf_map *map) {}
314static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
315 struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; }
316static inline void bpf_cgroup_storage_free(
317 struct bpf_cgroup_storage *storage) {}
318static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
319 void *value) {
320 return 0;
321}
322static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
323 void *key, void *value, u64 flags) {
324 return 0;
325}
326
327#define cgroup_bpf_enabled (0)
328#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
329#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
330#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
331#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
332#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; })
333#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; })
334#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
335#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
336#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
337#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
338#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
339#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
340#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
341#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
342#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
343#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
344#define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos,nbuf) ({ 0; })
345
346#define for_each_cgroup_storage_type(stype) for (; false; )
347
348#endif /* CONFIG_CGROUP_BPF */
349
350#endif /* _BPF_CGROUP_H */