Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-only */
2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 */
4#ifndef _LINUX_BPF_H
5#define _LINUX_BPF_H 1
6
7#include <uapi/linux/bpf.h>
8
9#include <linux/workqueue.h>
10#include <linux/file.h>
11#include <linux/percpu.h>
12#include <linux/err.h>
13#include <linux/rbtree_latch.h>
14#include <linux/numa.h>
15#include <linux/mm_types.h>
16#include <linux/wait.h>
17#include <linux/u64_stats_sync.h>
18#include <linux/refcount.h>
19#include <linux/mutex.h>
20
21struct bpf_verifier_env;
22struct bpf_verifier_log;
23struct perf_event;
24struct bpf_prog;
25struct bpf_prog_aux;
26struct bpf_map;
27struct sock;
28struct seq_file;
29struct btf;
30struct btf_type;
31struct exception_table_entry;
32
33extern struct idr btf_idr;
34extern spinlock_t btf_idr_lock;
35
36/* map is generic key/value storage optionally accesible by eBPF programs */
37struct bpf_map_ops {
38 /* funcs callable from userspace (via syscall) */
39 int (*map_alloc_check)(union bpf_attr *attr);
40 struct bpf_map *(*map_alloc)(union bpf_attr *attr);
41 void (*map_release)(struct bpf_map *map, struct file *map_file);
42 void (*map_free)(struct bpf_map *map);
43 int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
44 void (*map_release_uref)(struct bpf_map *map);
45 void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
46
47 /* funcs callable from userspace and from eBPF programs */
48 void *(*map_lookup_elem)(struct bpf_map *map, void *key);
49 int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
50 int (*map_delete_elem)(struct bpf_map *map, void *key);
51 int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
52 int (*map_pop_elem)(struct bpf_map *map, void *value);
53 int (*map_peek_elem)(struct bpf_map *map, void *value);
54
55 /* funcs called by prog_array and perf_event_array map */
56 void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
57 int fd);
58 void (*map_fd_put_ptr)(void *ptr);
59 u32 (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
60 u32 (*map_fd_sys_lookup_elem)(void *ptr);
61 void (*map_seq_show_elem)(struct bpf_map *map, void *key,
62 struct seq_file *m);
63 int (*map_check_btf)(const struct bpf_map *map,
64 const struct btf *btf,
65 const struct btf_type *key_type,
66 const struct btf_type *value_type);
67
68 /* Prog poke tracking helpers. */
69 int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux);
70 void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux);
71 void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old,
72 struct bpf_prog *new);
73
74 /* Direct value access helpers. */
75 int (*map_direct_value_addr)(const struct bpf_map *map,
76 u64 *imm, u32 off);
77 int (*map_direct_value_meta)(const struct bpf_map *map,
78 u64 imm, u32 *off);
79 int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma);
80};
81
82struct bpf_map_memory {
83 u32 pages;
84 struct user_struct *user;
85};
86
87struct bpf_map {
88 /* The first two cachelines with read-mostly members of which some
89 * are also accessed in fast-path (e.g. ops, max_entries).
90 */
91 const struct bpf_map_ops *ops ____cacheline_aligned;
92 struct bpf_map *inner_map_meta;
93#ifdef CONFIG_SECURITY
94 void *security;
95#endif
96 enum bpf_map_type map_type;
97 u32 key_size;
98 u32 value_size;
99 u32 max_entries;
100 u32 map_flags;
101 int spin_lock_off; /* >=0 valid offset, <0 error */
102 u32 id;
103 int numa_node;
104 u32 btf_key_type_id;
105 u32 btf_value_type_id;
106 struct btf *btf;
107 struct bpf_map_memory memory;
108 char name[BPF_OBJ_NAME_LEN];
109 bool unpriv_array;
110 bool frozen; /* write-once; write-protected by freeze_mutex */
111 /* 22 bytes hole */
112
113 /* The 3rd and 4th cacheline with misc members to avoid false sharing
114 * particularly with refcounting.
115 */
116 atomic64_t refcnt ____cacheline_aligned;
117 atomic64_t usercnt;
118 struct work_struct work;
119 struct mutex freeze_mutex;
120 u64 writecnt; /* writable mmap cnt; protected by freeze_mutex */
121};
122
123static inline bool map_value_has_spin_lock(const struct bpf_map *map)
124{
125 return map->spin_lock_off >= 0;
126}
127
128static inline void check_and_init_map_lock(struct bpf_map *map, void *dst)
129{
130 if (likely(!map_value_has_spin_lock(map)))
131 return;
132 *(struct bpf_spin_lock *)(dst + map->spin_lock_off) =
133 (struct bpf_spin_lock){};
134}
135
136/* copy everything but bpf_spin_lock */
137static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
138{
139 if (unlikely(map_value_has_spin_lock(map))) {
140 u32 off = map->spin_lock_off;
141
142 memcpy(dst, src, off);
143 memcpy(dst + off + sizeof(struct bpf_spin_lock),
144 src + off + sizeof(struct bpf_spin_lock),
145 map->value_size - off - sizeof(struct bpf_spin_lock));
146 } else {
147 memcpy(dst, src, map->value_size);
148 }
149}
150void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
151 bool lock_src);
152
153struct bpf_offload_dev;
154struct bpf_offloaded_map;
155
156struct bpf_map_dev_ops {
157 int (*map_get_next_key)(struct bpf_offloaded_map *map,
158 void *key, void *next_key);
159 int (*map_lookup_elem)(struct bpf_offloaded_map *map,
160 void *key, void *value);
161 int (*map_update_elem)(struct bpf_offloaded_map *map,
162 void *key, void *value, u64 flags);
163 int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key);
164};
165
166struct bpf_offloaded_map {
167 struct bpf_map map;
168 struct net_device *netdev;
169 const struct bpf_map_dev_ops *dev_ops;
170 void *dev_priv;
171 struct list_head offloads;
172};
173
174static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map)
175{
176 return container_of(map, struct bpf_offloaded_map, map);
177}
178
179static inline bool bpf_map_offload_neutral(const struct bpf_map *map)
180{
181 return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
182}
183
184static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
185{
186 return map->btf && map->ops->map_seq_show_elem;
187}
188
189int map_check_no_btf(const struct bpf_map *map,
190 const struct btf *btf,
191 const struct btf_type *key_type,
192 const struct btf_type *value_type);
193
194extern const struct bpf_map_ops bpf_map_offload_ops;
195
196/* function argument constraints */
197enum bpf_arg_type {
198 ARG_DONTCARE = 0, /* unused argument in helper function */
199
200 /* the following constraints used to prototype
201 * bpf_map_lookup/update/delete_elem() functions
202 */
203 ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */
204 ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */
205 ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */
206 ARG_PTR_TO_UNINIT_MAP_VALUE, /* pointer to valid memory used to store a map value */
207 ARG_PTR_TO_MAP_VALUE_OR_NULL, /* pointer to stack used as map value or NULL */
208
209 /* the following constraints used to prototype bpf_memcmp() and other
210 * functions that access data on eBPF program stack
211 */
212 ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */
213 ARG_PTR_TO_MEM_OR_NULL, /* pointer to valid memory or NULL */
214 ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized,
215 * helper function must fill all bytes or clear
216 * them in error case.
217 */
218
219 ARG_CONST_SIZE, /* number of bytes accessed from memory */
220 ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */
221
222 ARG_PTR_TO_CTX, /* pointer to context */
223 ARG_ANYTHING, /* any (initialized) argument is ok */
224 ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */
225 ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */
226 ARG_PTR_TO_INT, /* pointer to int */
227 ARG_PTR_TO_LONG, /* pointer to long */
228 ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */
229 ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */
230};
231
232/* type of values returned from helper functions */
233enum bpf_return_type {
234 RET_INTEGER, /* function returns integer */
235 RET_VOID, /* function doesn't return anything */
236 RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */
237 RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */
238 RET_PTR_TO_SOCKET_OR_NULL, /* returns a pointer to a socket or NULL */
239 RET_PTR_TO_TCP_SOCK_OR_NULL, /* returns a pointer to a tcp_sock or NULL */
240 RET_PTR_TO_SOCK_COMMON_OR_NULL, /* returns a pointer to a sock_common or NULL */
241};
242
243/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
244 * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL
245 * instructions after verifying
246 */
247struct bpf_func_proto {
248 u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
249 bool gpl_only;
250 bool pkt_access;
251 enum bpf_return_type ret_type;
252 union {
253 struct {
254 enum bpf_arg_type arg1_type;
255 enum bpf_arg_type arg2_type;
256 enum bpf_arg_type arg3_type;
257 enum bpf_arg_type arg4_type;
258 enum bpf_arg_type arg5_type;
259 };
260 enum bpf_arg_type arg_type[5];
261 };
262 int *btf_id; /* BTF ids of arguments */
263};
264
265/* bpf_context is intentionally undefined structure. Pointer to bpf_context is
266 * the first argument to eBPF programs.
267 * For socket filters: 'struct bpf_context *' == 'struct sk_buff *'
268 */
269struct bpf_context;
270
271enum bpf_access_type {
272 BPF_READ = 1,
273 BPF_WRITE = 2
274};
275
276/* types of values stored in eBPF registers */
277/* Pointer types represent:
278 * pointer
279 * pointer + imm
280 * pointer + (u16) var
281 * pointer + (u16) var + imm
282 * if (range > 0) then [ptr, ptr + range - off) is safe to access
283 * if (id > 0) means that some 'var' was added
284 * if (off > 0) means that 'imm' was added
285 */
286enum bpf_reg_type {
287 NOT_INIT = 0, /* nothing was written into register */
288 SCALAR_VALUE, /* reg doesn't contain a valid pointer */
289 PTR_TO_CTX, /* reg points to bpf_context */
290 CONST_PTR_TO_MAP, /* reg points to struct bpf_map */
291 PTR_TO_MAP_VALUE, /* reg points to map element value */
292 PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */
293 PTR_TO_STACK, /* reg == frame_pointer + offset */
294 PTR_TO_PACKET_META, /* skb->data - meta_len */
295 PTR_TO_PACKET, /* reg points to skb->data */
296 PTR_TO_PACKET_END, /* skb->data + headlen */
297 PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */
298 PTR_TO_SOCKET, /* reg points to struct bpf_sock */
299 PTR_TO_SOCKET_OR_NULL, /* reg points to struct bpf_sock or NULL */
300 PTR_TO_SOCK_COMMON, /* reg points to sock_common */
301 PTR_TO_SOCK_COMMON_OR_NULL, /* reg points to sock_common or NULL */
302 PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */
303 PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */
304 PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */
305 PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */
306 PTR_TO_BTF_ID, /* reg points to kernel struct */
307};
308
309/* The information passed from prog-specific *_is_valid_access
310 * back to the verifier.
311 */
312struct bpf_insn_access_aux {
313 enum bpf_reg_type reg_type;
314 union {
315 int ctx_field_size;
316 u32 btf_id;
317 };
318 struct bpf_verifier_log *log; /* for verbose logs */
319};
320
321static inline void
322bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
323{
324 aux->ctx_field_size = size;
325}
326
327struct bpf_prog_ops {
328 int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
329 union bpf_attr __user *uattr);
330};
331
332struct bpf_verifier_ops {
333 /* return eBPF function prototype for verification */
334 const struct bpf_func_proto *
335 (*get_func_proto)(enum bpf_func_id func_id,
336 const struct bpf_prog *prog);
337
338 /* return true if 'size' wide access at offset 'off' within bpf_context
339 * with 'type' (read or write) is allowed
340 */
341 bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
342 const struct bpf_prog *prog,
343 struct bpf_insn_access_aux *info);
344 int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
345 const struct bpf_prog *prog);
346 int (*gen_ld_abs)(const struct bpf_insn *orig,
347 struct bpf_insn *insn_buf);
348 u32 (*convert_ctx_access)(enum bpf_access_type type,
349 const struct bpf_insn *src,
350 struct bpf_insn *dst,
351 struct bpf_prog *prog, u32 *target_size);
352};
353
354struct bpf_prog_offload_ops {
355 /* verifier basic callbacks */
356 int (*insn_hook)(struct bpf_verifier_env *env,
357 int insn_idx, int prev_insn_idx);
358 int (*finalize)(struct bpf_verifier_env *env);
359 /* verifier optimization callbacks (called after .finalize) */
360 int (*replace_insn)(struct bpf_verifier_env *env, u32 off,
361 struct bpf_insn *insn);
362 int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt);
363 /* program management callbacks */
364 int (*prepare)(struct bpf_prog *prog);
365 int (*translate)(struct bpf_prog *prog);
366 void (*destroy)(struct bpf_prog *prog);
367};
368
369struct bpf_prog_offload {
370 struct bpf_prog *prog;
371 struct net_device *netdev;
372 struct bpf_offload_dev *offdev;
373 void *dev_priv;
374 struct list_head offloads;
375 bool dev_state;
376 bool opt_failed;
377 void *jited_image;
378 u32 jited_len;
379};
380
381enum bpf_cgroup_storage_type {
382 BPF_CGROUP_STORAGE_SHARED,
383 BPF_CGROUP_STORAGE_PERCPU,
384 __BPF_CGROUP_STORAGE_MAX
385};
386
387#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
388
389/* The longest tracepoint has 12 args.
390 * See include/trace/bpf_probe.h
391 */
392#define MAX_BPF_FUNC_ARGS 12
393
394struct bpf_prog_stats {
395 u64 cnt;
396 u64 nsecs;
397 struct u64_stats_sync syncp;
398} __aligned(2 * sizeof(u64));
399
400struct btf_func_model {
401 u8 ret_size;
402 u8 nr_args;
403 u8 arg_size[MAX_BPF_FUNC_ARGS];
404};
405
406/* Restore arguments before returning from trampoline to let original function
407 * continue executing. This flag is used for fentry progs when there are no
408 * fexit progs.
409 */
410#define BPF_TRAMP_F_RESTORE_REGS BIT(0)
411/* Call original function after fentry progs, but before fexit progs.
412 * Makes sense for fentry/fexit, normal calls and indirect calls.
413 */
414#define BPF_TRAMP_F_CALL_ORIG BIT(1)
415/* Skip current frame and return to parent. Makes sense for fentry/fexit
416 * programs only. Should not be used with normal calls and indirect calls.
417 */
418#define BPF_TRAMP_F_SKIP_FRAME BIT(2)
419
420/* Different use cases for BPF trampoline:
421 * 1. replace nop at the function entry (kprobe equivalent)
422 * flags = BPF_TRAMP_F_RESTORE_REGS
423 * fentry = a set of programs to run before returning from trampoline
424 *
425 * 2. replace nop at the function entry (kprobe + kretprobe equivalent)
426 * flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME
427 * orig_call = fentry_ip + MCOUNT_INSN_SIZE
428 * fentry = a set of program to run before calling original function
429 * fexit = a set of program to run after original function
430 *
431 * 3. replace direct call instruction anywhere in the function body
432 * or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid)
433 * With flags = 0
434 * fentry = a set of programs to run before returning from trampoline
435 * With flags = BPF_TRAMP_F_CALL_ORIG
436 * orig_call = original callback addr or direct function addr
437 * fentry = a set of program to run before calling original function
438 * fexit = a set of program to run after original function
439 */
440int arch_prepare_bpf_trampoline(void *image, struct btf_func_model *m, u32 flags,
441 struct bpf_prog **fentry_progs, int fentry_cnt,
442 struct bpf_prog **fexit_progs, int fexit_cnt,
443 void *orig_call);
444/* these two functions are called from generated trampoline */
445u64 notrace __bpf_prog_enter(void);
446void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start);
447
448enum bpf_tramp_prog_type {
449 BPF_TRAMP_FENTRY,
450 BPF_TRAMP_FEXIT,
451 BPF_TRAMP_MAX
452};
453
454struct bpf_trampoline {
455 /* hlist for trampoline_table */
456 struct hlist_node hlist;
457 /* serializes access to fields of this trampoline */
458 struct mutex mutex;
459 refcount_t refcnt;
460 u64 key;
461 struct {
462 struct btf_func_model model;
463 void *addr;
464 } func;
465 /* list of BPF programs using this trampoline */
466 struct hlist_head progs_hlist[BPF_TRAMP_MAX];
467 /* Number of attached programs. A counter per kind. */
468 int progs_cnt[BPF_TRAMP_MAX];
469 /* Executable image of trampoline */
470 void *image;
471 u64 selector;
472};
473#ifdef CONFIG_BPF_JIT
474struct bpf_trampoline *bpf_trampoline_lookup(u64 key);
475int bpf_trampoline_link_prog(struct bpf_prog *prog);
476int bpf_trampoline_unlink_prog(struct bpf_prog *prog);
477void bpf_trampoline_put(struct bpf_trampoline *tr);
478#else
479static inline struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
480{
481 return NULL;
482}
483static inline int bpf_trampoline_link_prog(struct bpf_prog *prog)
484{
485 return -ENOTSUPP;
486}
487static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog)
488{
489 return -ENOTSUPP;
490}
491static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
492#endif
493
494struct bpf_func_info_aux {
495 bool unreliable;
496};
497
498enum bpf_jit_poke_reason {
499 BPF_POKE_REASON_TAIL_CALL,
500};
501
502/* Descriptor of pokes pointing /into/ the JITed image. */
503struct bpf_jit_poke_descriptor {
504 void *ip;
505 union {
506 struct {
507 struct bpf_map *map;
508 u32 key;
509 } tail_call;
510 };
511 bool ip_stable;
512 u8 adj_off;
513 u16 reason;
514};
515
516struct bpf_prog_aux {
517 atomic64_t refcnt;
518 u32 used_map_cnt;
519 u32 max_ctx_offset;
520 u32 max_pkt_offset;
521 u32 max_tp_access;
522 u32 stack_depth;
523 u32 id;
524 u32 func_cnt; /* used by non-func prog as the number of func progs */
525 u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
526 u32 attach_btf_id; /* in-kernel BTF type id to attach to */
527 struct bpf_prog *linked_prog;
528 bool verifier_zext; /* Zero extensions has been inserted by verifier. */
529 bool offload_requested;
530 bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */
531 bool func_proto_unreliable;
532 enum bpf_tramp_prog_type trampoline_prog_type;
533 struct bpf_trampoline *trampoline;
534 struct hlist_node tramp_hlist;
535 /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
536 const struct btf_type *attach_func_proto;
537 /* function name for valid attach_btf_id */
538 const char *attach_func_name;
539 struct bpf_prog **func;
540 void *jit_data; /* JIT specific data. arch dependent */
541 struct bpf_jit_poke_descriptor *poke_tab;
542 u32 size_poke_tab;
543 struct latch_tree_node ksym_tnode;
544 struct list_head ksym_lnode;
545 const struct bpf_prog_ops *ops;
546 struct bpf_map **used_maps;
547 struct bpf_prog *prog;
548 struct user_struct *user;
549 u64 load_time; /* ns since boottime */
550 struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
551 char name[BPF_OBJ_NAME_LEN];
552#ifdef CONFIG_SECURITY
553 void *security;
554#endif
555 struct bpf_prog_offload *offload;
556 struct btf *btf;
557 struct bpf_func_info *func_info;
558 struct bpf_func_info_aux *func_info_aux;
559 /* bpf_line_info loaded from userspace. linfo->insn_off
560 * has the xlated insn offset.
561 * Both the main and sub prog share the same linfo.
562 * The subprog can access its first linfo by
563 * using the linfo_idx.
564 */
565 struct bpf_line_info *linfo;
566 /* jited_linfo is the jited addr of the linfo. It has a
567 * one to one mapping to linfo:
568 * jited_linfo[i] is the jited addr for the linfo[i]->insn_off.
569 * Both the main and sub prog share the same jited_linfo.
570 * The subprog can access its first jited_linfo by
571 * using the linfo_idx.
572 */
573 void **jited_linfo;
574 u32 func_info_cnt;
575 u32 nr_linfo;
576 /* subprog can use linfo_idx to access its first linfo and
577 * jited_linfo.
578 * main prog always has linfo_idx == 0
579 */
580 u32 linfo_idx;
581 u32 num_exentries;
582 struct exception_table_entry *extable;
583 struct bpf_prog_stats __percpu *stats;
584 union {
585 struct work_struct work;
586 struct rcu_head rcu;
587 };
588};
589
590struct bpf_array_aux {
591 /* 'Ownership' of prog array is claimed by the first program that
592 * is going to use this map or by the first program which FD is
593 * stored in the map to make sure that all callers and callees have
594 * the same prog type and JITed flag.
595 */
596 enum bpf_prog_type type;
597 bool jited;
598 /* Programs with direct jumps into programs part of this array. */
599 struct list_head poke_progs;
600 struct bpf_map *map;
601 struct mutex poke_mutex;
602 struct work_struct work;
603};
604
605struct bpf_array {
606 struct bpf_map map;
607 u32 elem_size;
608 u32 index_mask;
609 struct bpf_array_aux *aux;
610 union {
611 char value[0] __aligned(8);
612 void *ptrs[0] __aligned(8);
613 void __percpu *pptrs[0] __aligned(8);
614 };
615};
616
617#define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */
618#define MAX_TAIL_CALL_CNT 32
619
620#define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \
621 BPF_F_RDONLY_PROG | \
622 BPF_F_WRONLY | \
623 BPF_F_WRONLY_PROG)
624
625#define BPF_MAP_CAN_READ BIT(0)
626#define BPF_MAP_CAN_WRITE BIT(1)
627
628static inline u32 bpf_map_flags_to_cap(struct bpf_map *map)
629{
630 u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
631
632 /* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is
633 * not possible.
634 */
635 if (access_flags & BPF_F_RDONLY_PROG)
636 return BPF_MAP_CAN_READ;
637 else if (access_flags & BPF_F_WRONLY_PROG)
638 return BPF_MAP_CAN_WRITE;
639 else
640 return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE;
641}
642
643static inline bool bpf_map_flags_access_ok(u32 access_flags)
644{
645 return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) !=
646 (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
647}
648
649struct bpf_event_entry {
650 struct perf_event *event;
651 struct file *perf_file;
652 struct file *map_file;
653 struct rcu_head rcu;
654};
655
656bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
657int bpf_prog_calc_tag(struct bpf_prog *fp);
658const char *kernel_type_name(u32 btf_type_id);
659
660const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
661
662typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
663 unsigned long off, unsigned long len);
664typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type,
665 const struct bpf_insn *src,
666 struct bpf_insn *dst,
667 struct bpf_prog *prog,
668 u32 *target_size);
669
670u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
671 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy);
672
673/* an array of programs to be executed under rcu_lock.
674 *
675 * Typical usage:
676 * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, BPF_PROG_RUN);
677 *
678 * the structure returned by bpf_prog_array_alloc() should be populated
679 * with program pointers and the last pointer must be NULL.
680 * The user has to keep refcnt on the program and make sure the program
681 * is removed from the array before bpf_prog_put().
682 * The 'struct bpf_prog_array *' should only be replaced with xchg()
683 * since other cpus are walking the array of pointers in parallel.
684 */
685struct bpf_prog_array_item {
686 struct bpf_prog *prog;
687 struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
688};
689
690struct bpf_prog_array {
691 struct rcu_head rcu;
692 struct bpf_prog_array_item items[0];
693};
694
695struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
696void bpf_prog_array_free(struct bpf_prog_array *progs);
697int bpf_prog_array_length(struct bpf_prog_array *progs);
698bool bpf_prog_array_is_empty(struct bpf_prog_array *array);
699int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs,
700 __u32 __user *prog_ids, u32 cnt);
701
702void bpf_prog_array_delete_safe(struct bpf_prog_array *progs,
703 struct bpf_prog *old_prog);
704int bpf_prog_array_copy_info(struct bpf_prog_array *array,
705 u32 *prog_ids, u32 request_cnt,
706 u32 *prog_cnt);
707int bpf_prog_array_copy(struct bpf_prog_array *old_array,
708 struct bpf_prog *exclude_prog,
709 struct bpf_prog *include_prog,
710 struct bpf_prog_array **new_array);
711
712#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null) \
713 ({ \
714 struct bpf_prog_array_item *_item; \
715 struct bpf_prog *_prog; \
716 struct bpf_prog_array *_array; \
717 u32 _ret = 1; \
718 preempt_disable(); \
719 rcu_read_lock(); \
720 _array = rcu_dereference(array); \
721 if (unlikely(check_non_null && !_array))\
722 goto _out; \
723 _item = &_array->items[0]; \
724 while ((_prog = READ_ONCE(_item->prog))) { \
725 bpf_cgroup_storage_set(_item->cgroup_storage); \
726 _ret &= func(_prog, ctx); \
727 _item++; \
728 } \
729_out: \
730 rcu_read_unlock(); \
731 preempt_enable(); \
732 _ret; \
733 })
734
735/* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs
736 * so BPF programs can request cwr for TCP packets.
737 *
738 * Current cgroup skb programs can only return 0 or 1 (0 to drop the
739 * packet. This macro changes the behavior so the low order bit
740 * indicates whether the packet should be dropped (0) or not (1)
741 * and the next bit is a congestion notification bit. This could be
742 * used by TCP to call tcp_enter_cwr()
743 *
744 * Hence, new allowed return values of CGROUP EGRESS BPF programs are:
745 * 0: drop packet
746 * 1: keep packet
747 * 2: drop packet and cn
748 * 3: keep packet and cn
749 *
750 * This macro then converts it to one of the NET_XMIT or an error
751 * code that is then interpreted as drop packet (and no cn):
752 * 0: NET_XMIT_SUCCESS skb should be transmitted
753 * 1: NET_XMIT_DROP skb should be dropped and cn
754 * 2: NET_XMIT_CN skb should be transmitted and cn
755 * 3: -EPERM skb should be dropped
756 */
757#define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func) \
758 ({ \
759 struct bpf_prog_array_item *_item; \
760 struct bpf_prog *_prog; \
761 struct bpf_prog_array *_array; \
762 u32 ret; \
763 u32 _ret = 1; \
764 u32 _cn = 0; \
765 preempt_disable(); \
766 rcu_read_lock(); \
767 _array = rcu_dereference(array); \
768 _item = &_array->items[0]; \
769 while ((_prog = READ_ONCE(_item->prog))) { \
770 bpf_cgroup_storage_set(_item->cgroup_storage); \
771 ret = func(_prog, ctx); \
772 _ret &= (ret & 1); \
773 _cn |= (ret & 2); \
774 _item++; \
775 } \
776 rcu_read_unlock(); \
777 preempt_enable(); \
778 if (_ret) \
779 _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \
780 else \
781 _ret = (_cn ? NET_XMIT_DROP : -EPERM); \
782 _ret; \
783 })
784
785#define BPF_PROG_RUN_ARRAY(array, ctx, func) \
786 __BPF_PROG_RUN_ARRAY(array, ctx, func, false)
787
788#define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func) \
789 __BPF_PROG_RUN_ARRAY(array, ctx, func, true)
790
791#ifdef CONFIG_BPF_SYSCALL
792DECLARE_PER_CPU(int, bpf_prog_active);
793
794extern const struct file_operations bpf_map_fops;
795extern const struct file_operations bpf_prog_fops;
796
797#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
798 extern const struct bpf_prog_ops _name ## _prog_ops; \
799 extern const struct bpf_verifier_ops _name ## _verifier_ops;
800#define BPF_MAP_TYPE(_id, _ops) \
801 extern const struct bpf_map_ops _ops;
802#include <linux/bpf_types.h>
803#undef BPF_PROG_TYPE
804#undef BPF_MAP_TYPE
805
806extern const struct bpf_prog_ops bpf_offload_prog_ops;
807extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops;
808extern const struct bpf_verifier_ops xdp_analyzer_ops;
809
810struct bpf_prog *bpf_prog_get(u32 ufd);
811struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
812 bool attach_drv);
813void bpf_prog_add(struct bpf_prog *prog, int i);
814void bpf_prog_sub(struct bpf_prog *prog, int i);
815void bpf_prog_inc(struct bpf_prog *prog);
816struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
817void bpf_prog_put(struct bpf_prog *prog);
818int __bpf_prog_charge(struct user_struct *user, u32 pages);
819void __bpf_prog_uncharge(struct user_struct *user, u32 pages);
820
821void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
822void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
823
824struct bpf_map *bpf_map_get_with_uref(u32 ufd);
825struct bpf_map *__bpf_map_get(struct fd f);
826void bpf_map_inc(struct bpf_map *map);
827void bpf_map_inc_with_uref(struct bpf_map *map);
828struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map);
829void bpf_map_put_with_uref(struct bpf_map *map);
830void bpf_map_put(struct bpf_map *map);
831int bpf_map_charge_memlock(struct bpf_map *map, u32 pages);
832void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages);
833int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size);
834void bpf_map_charge_finish(struct bpf_map_memory *mem);
835void bpf_map_charge_move(struct bpf_map_memory *dst,
836 struct bpf_map_memory *src);
837void *bpf_map_area_alloc(u64 size, int numa_node);
838void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
839void bpf_map_area_free(void *base);
840void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
841
842extern int sysctl_unprivileged_bpf_disabled;
843
844int bpf_map_new_fd(struct bpf_map *map, int flags);
845int bpf_prog_new_fd(struct bpf_prog *prog);
846
847int bpf_obj_pin_user(u32 ufd, const char __user *pathname);
848int bpf_obj_get_user(const char __user *pathname, int flags);
849
850int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
851int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
852int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
853 u64 flags);
854int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
855 u64 flags);
856
857int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
858
859int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
860 void *key, void *value, u64 map_flags);
861int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
862int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
863 void *key, void *value, u64 map_flags);
864int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
865
866int bpf_get_file_flag(int flags);
867int bpf_check_uarg_tail_zero(void __user *uaddr, size_t expected_size,
868 size_t actual_size);
869
870/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
871 * forced to use 'long' read/writes to try to atomically copy long counters.
872 * Best-effort only. No barriers here, since it _will_ race with concurrent
873 * updates from BPF programs. Called from bpf syscall and mostly used with
874 * size 8 or 16 bytes, so ask compiler to inline it.
875 */
876static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
877{
878 const long *lsrc = src;
879 long *ldst = dst;
880
881 size /= sizeof(long);
882 while (size--)
883 *ldst++ = *lsrc++;
884}
885
886/* verify correctness of eBPF program */
887int bpf_check(struct bpf_prog **fp, union bpf_attr *attr,
888 union bpf_attr __user *uattr);
889void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
890
891/* Map specifics */
892struct xdp_buff;
893struct sk_buff;
894
895struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
896struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key);
897void __dev_map_flush(struct bpf_map *map);
898int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
899 struct net_device *dev_rx);
900int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
901 struct bpf_prog *xdp_prog);
902
903struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
904void __cpu_map_flush(struct bpf_map *map);
905int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
906 struct net_device *dev_rx);
907
908/* Return map's numa specified by userspace */
909static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
910{
911 return (attr->map_flags & BPF_F_NUMA_NODE) ?
912 attr->numa_node : NUMA_NO_NODE;
913}
914
915struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
916int array_map_alloc_check(union bpf_attr *attr);
917
918int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
919 union bpf_attr __user *uattr);
920int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
921 union bpf_attr __user *uattr);
922int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
923 const union bpf_attr *kattr,
924 union bpf_attr __user *uattr);
925bool btf_ctx_access(int off, int size, enum bpf_access_type type,
926 const struct bpf_prog *prog,
927 struct bpf_insn_access_aux *info);
928int btf_struct_access(struct bpf_verifier_log *log,
929 const struct btf_type *t, int off, int size,
930 enum bpf_access_type atype,
931 u32 *next_btf_id);
932int btf_resolve_helper_id(struct bpf_verifier_log *log,
933 const struct bpf_func_proto *fn, int);
934
935int btf_distill_func_proto(struct bpf_verifier_log *log,
936 struct btf *btf,
937 const struct btf_type *func_proto,
938 const char *func_name,
939 struct btf_func_model *m);
940
941int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog);
942
943#else /* !CONFIG_BPF_SYSCALL */
944static inline struct bpf_prog *bpf_prog_get(u32 ufd)
945{
946 return ERR_PTR(-EOPNOTSUPP);
947}
948
949static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd,
950 enum bpf_prog_type type,
951 bool attach_drv)
952{
953 return ERR_PTR(-EOPNOTSUPP);
954}
955
956static inline void bpf_prog_add(struct bpf_prog *prog, int i)
957{
958}
959
960static inline void bpf_prog_sub(struct bpf_prog *prog, int i)
961{
962}
963
964static inline void bpf_prog_put(struct bpf_prog *prog)
965{
966}
967
968static inline void bpf_prog_inc(struct bpf_prog *prog)
969{
970}
971
972static inline struct bpf_prog *__must_check
973bpf_prog_inc_not_zero(struct bpf_prog *prog)
974{
975 return ERR_PTR(-EOPNOTSUPP);
976}
977
978static inline int __bpf_prog_charge(struct user_struct *user, u32 pages)
979{
980 return 0;
981}
982
983static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
984{
985}
986
987static inline int bpf_obj_get_user(const char __user *pathname, int flags)
988{
989 return -EOPNOTSUPP;
990}
991
992static inline struct net_device *__dev_map_lookup_elem(struct bpf_map *map,
993 u32 key)
994{
995 return NULL;
996}
997
998static inline struct net_device *__dev_map_hash_lookup_elem(struct bpf_map *map,
999 u32 key)
1000{
1001 return NULL;
1002}
1003
1004static inline void __dev_map_flush(struct bpf_map *map)
1005{
1006}
1007
1008struct xdp_buff;
1009struct bpf_dtab_netdev;
1010
1011static inline
1012int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
1013 struct net_device *dev_rx)
1014{
1015 return 0;
1016}
1017
1018struct sk_buff;
1019
1020static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
1021 struct sk_buff *skb,
1022 struct bpf_prog *xdp_prog)
1023{
1024 return 0;
1025}
1026
1027static inline
1028struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
1029{
1030 return NULL;
1031}
1032
1033static inline void __cpu_map_flush(struct bpf_map *map)
1034{
1035}
1036
1037static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
1038 struct xdp_buff *xdp,
1039 struct net_device *dev_rx)
1040{
1041 return 0;
1042}
1043
1044static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
1045 enum bpf_prog_type type)
1046{
1047 return ERR_PTR(-EOPNOTSUPP);
1048}
1049
1050static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog,
1051 const union bpf_attr *kattr,
1052 union bpf_attr __user *uattr)
1053{
1054 return -ENOTSUPP;
1055}
1056
1057static inline int bpf_prog_test_run_skb(struct bpf_prog *prog,
1058 const union bpf_attr *kattr,
1059 union bpf_attr __user *uattr)
1060{
1061 return -ENOTSUPP;
1062}
1063
1064static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
1065 const union bpf_attr *kattr,
1066 union bpf_attr __user *uattr)
1067{
1068 return -ENOTSUPP;
1069}
1070
1071static inline void bpf_map_put(struct bpf_map *map)
1072{
1073}
1074#endif /* CONFIG_BPF_SYSCALL */
1075
1076static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
1077 enum bpf_prog_type type)
1078{
1079 return bpf_prog_get_type_dev(ufd, type, false);
1080}
1081
1082bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);
1083
1084int bpf_prog_offload_compile(struct bpf_prog *prog);
1085void bpf_prog_offload_destroy(struct bpf_prog *prog);
1086int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
1087 struct bpf_prog *prog);
1088
1089int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map);
1090
1091int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value);
1092int bpf_map_offload_update_elem(struct bpf_map *map,
1093 void *key, void *value, u64 flags);
1094int bpf_map_offload_delete_elem(struct bpf_map *map, void *key);
1095int bpf_map_offload_get_next_key(struct bpf_map *map,
1096 void *key, void *next_key);
1097
1098bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
1099
1100struct bpf_offload_dev *
1101bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv);
1102void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev);
1103void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev);
1104int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
1105 struct net_device *netdev);
1106void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
1107 struct net_device *netdev);
1108bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev);
1109
1110#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
1111int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
1112
1113static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
1114{
1115 return aux->offload_requested;
1116}
1117
1118static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
1119{
1120 return unlikely(map->ops == &bpf_map_offload_ops);
1121}
1122
1123struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr);
1124void bpf_map_offload_map_free(struct bpf_map *map);
1125#else
1126static inline int bpf_prog_offload_init(struct bpf_prog *prog,
1127 union bpf_attr *attr)
1128{
1129 return -EOPNOTSUPP;
1130}
1131
1132static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
1133{
1134 return false;
1135}
1136
1137static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
1138{
1139 return false;
1140}
1141
1142static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
1143{
1144 return ERR_PTR(-EOPNOTSUPP);
1145}
1146
1147static inline void bpf_map_offload_map_free(struct bpf_map *map)
1148{
1149}
1150#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
1151
1152#if defined(CONFIG_BPF_STREAM_PARSER)
1153int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, u32 which);
1154int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
1155#else
1156static inline int sock_map_prog_update(struct bpf_map *map,
1157 struct bpf_prog *prog, u32 which)
1158{
1159 return -EOPNOTSUPP;
1160}
1161
1162static inline int sock_map_get_from_fd(const union bpf_attr *attr,
1163 struct bpf_prog *prog)
1164{
1165 return -EINVAL;
1166}
1167#endif
1168
1169#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
1170void bpf_sk_reuseport_detach(struct sock *sk);
1171int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
1172 void *value);
1173int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
1174 void *value, u64 map_flags);
1175#else
1176static inline void bpf_sk_reuseport_detach(struct sock *sk)
1177{
1178}
1179
1180#ifdef CONFIG_BPF_SYSCALL
1181static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map,
1182 void *key, void *value)
1183{
1184 return -EOPNOTSUPP;
1185}
1186
1187static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map,
1188 void *key, void *value,
1189 u64 map_flags)
1190{
1191 return -EOPNOTSUPP;
1192}
1193#endif /* CONFIG_BPF_SYSCALL */
1194#endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */
1195
1196/* verifier prototypes for helper functions called from eBPF programs */
1197extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
1198extern const struct bpf_func_proto bpf_map_update_elem_proto;
1199extern const struct bpf_func_proto bpf_map_delete_elem_proto;
1200extern const struct bpf_func_proto bpf_map_push_elem_proto;
1201extern const struct bpf_func_proto bpf_map_pop_elem_proto;
1202extern const struct bpf_func_proto bpf_map_peek_elem_proto;
1203
1204extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
1205extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
1206extern const struct bpf_func_proto bpf_get_numa_node_id_proto;
1207extern const struct bpf_func_proto bpf_tail_call_proto;
1208extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
1209extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
1210extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
1211extern const struct bpf_func_proto bpf_get_current_comm_proto;
1212extern const struct bpf_func_proto bpf_get_stackid_proto;
1213extern const struct bpf_func_proto bpf_get_stack_proto;
1214extern const struct bpf_func_proto bpf_sock_map_update_proto;
1215extern const struct bpf_func_proto bpf_sock_hash_update_proto;
1216extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
1217extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
1218extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
1219extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
1220extern const struct bpf_func_proto bpf_sk_redirect_map_proto;
1221extern const struct bpf_func_proto bpf_spin_lock_proto;
1222extern const struct bpf_func_proto bpf_spin_unlock_proto;
1223extern const struct bpf_func_proto bpf_get_local_storage_proto;
1224extern const struct bpf_func_proto bpf_strtol_proto;
1225extern const struct bpf_func_proto bpf_strtoul_proto;
1226extern const struct bpf_func_proto bpf_tcp_sock_proto;
1227
1228/* Shared helpers among cBPF and eBPF. */
1229void bpf_user_rnd_init_once(void);
1230u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
1231
1232#if defined(CONFIG_NET)
1233bool bpf_sock_common_is_valid_access(int off, int size,
1234 enum bpf_access_type type,
1235 struct bpf_insn_access_aux *info);
1236bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
1237 struct bpf_insn_access_aux *info);
1238u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
1239 const struct bpf_insn *si,
1240 struct bpf_insn *insn_buf,
1241 struct bpf_prog *prog,
1242 u32 *target_size);
1243#else
1244static inline bool bpf_sock_common_is_valid_access(int off, int size,
1245 enum bpf_access_type type,
1246 struct bpf_insn_access_aux *info)
1247{
1248 return false;
1249}
1250static inline bool bpf_sock_is_valid_access(int off, int size,
1251 enum bpf_access_type type,
1252 struct bpf_insn_access_aux *info)
1253{
1254 return false;
1255}
1256static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
1257 const struct bpf_insn *si,
1258 struct bpf_insn *insn_buf,
1259 struct bpf_prog *prog,
1260 u32 *target_size)
1261{
1262 return 0;
1263}
1264#endif
1265
1266#ifdef CONFIG_INET
1267struct sk_reuseport_kern {
1268 struct sk_buff *skb;
1269 struct sock *sk;
1270 struct sock *selected_sk;
1271 void *data_end;
1272 u32 hash;
1273 u32 reuseport_id;
1274 bool bind_inany;
1275};
1276bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
1277 struct bpf_insn_access_aux *info);
1278
1279u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
1280 const struct bpf_insn *si,
1281 struct bpf_insn *insn_buf,
1282 struct bpf_prog *prog,
1283 u32 *target_size);
1284
1285bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
1286 struct bpf_insn_access_aux *info);
1287
1288u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
1289 const struct bpf_insn *si,
1290 struct bpf_insn *insn_buf,
1291 struct bpf_prog *prog,
1292 u32 *target_size);
1293#else
1294static inline bool bpf_tcp_sock_is_valid_access(int off, int size,
1295 enum bpf_access_type type,
1296 struct bpf_insn_access_aux *info)
1297{
1298 return false;
1299}
1300
1301static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
1302 const struct bpf_insn *si,
1303 struct bpf_insn *insn_buf,
1304 struct bpf_prog *prog,
1305 u32 *target_size)
1306{
1307 return 0;
1308}
1309static inline bool bpf_xdp_sock_is_valid_access(int off, int size,
1310 enum bpf_access_type type,
1311 struct bpf_insn_access_aux *info)
1312{
1313 return false;
1314}
1315
1316static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
1317 const struct bpf_insn *si,
1318 struct bpf_insn *insn_buf,
1319 struct bpf_prog *prog,
1320 u32 *target_size)
1321{
1322 return 0;
1323}
1324#endif /* CONFIG_INET */
1325
1326enum bpf_text_poke_type {
1327 BPF_MOD_CALL,
1328 BPF_MOD_JUMP,
1329};
1330
1331int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
1332 void *addr1, void *addr2);
1333
1334#endif /* _LINUX_BPF_H */