Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 */
8#ifndef _UAPI__LINUX_BPF_H__
9#define _UAPI__LINUX_BPF_H__
10
11#include <linux/types.h>
12#include <linux/bpf_common.h>
13
14/* Extended instruction set based on top of classic BPF */
15
16/* instruction classes */
17#define BPF_JMP32 0x06 /* jmp mode in word width */
18#define BPF_ALU64 0x07 /* alu mode in double word width */
19
20/* ld/ldx fields */
21#define BPF_DW 0x18 /* double word (64-bit) */
22#define BPF_MEMSX 0x80 /* load with sign extension */
23#define BPF_ATOMIC 0xc0 /* atomic memory ops - op type in immediate */
24#define BPF_XADD 0xc0 /* exclusive add - legacy name */
25
26/* alu/jmp fields */
27#define BPF_MOV 0xb0 /* mov reg to reg */
28#define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */
29
30/* change endianness of a register */
31#define BPF_END 0xd0 /* flags for endianness conversion: */
32#define BPF_TO_LE 0x00 /* convert to little-endian */
33#define BPF_TO_BE 0x08 /* convert to big-endian */
34#define BPF_FROM_LE BPF_TO_LE
35#define BPF_FROM_BE BPF_TO_BE
36
37/* jmp encodings */
38#define BPF_JNE 0x50 /* jump != */
39#define BPF_JLT 0xa0 /* LT is unsigned, '<' */
40#define BPF_JLE 0xb0 /* LE is unsigned, '<=' */
41#define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */
42#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */
43#define BPF_JSLT 0xc0 /* SLT is signed, '<' */
44#define BPF_JSLE 0xd0 /* SLE is signed, '<=' */
45#define BPF_JCOND 0xe0 /* conditional pseudo jumps: may_goto, goto_or_nop */
46#define BPF_CALL 0x80 /* function call */
47#define BPF_EXIT 0x90 /* function return */
48
49/* atomic op type fields (stored in immediate) */
50#define BPF_FETCH 0x01 /* not an opcode on its own, used to build others */
51#define BPF_XCHG (0xe0 | BPF_FETCH) /* atomic exchange */
52#define BPF_CMPXCHG (0xf0 | BPF_FETCH) /* atomic compare-and-write */
53
54enum bpf_cond_pseudo_jmp {
55 BPF_MAY_GOTO = 0,
56};
57
58/* Register numbers */
59enum {
60 BPF_REG_0 = 0,
61 BPF_REG_1,
62 BPF_REG_2,
63 BPF_REG_3,
64 BPF_REG_4,
65 BPF_REG_5,
66 BPF_REG_6,
67 BPF_REG_7,
68 BPF_REG_8,
69 BPF_REG_9,
70 BPF_REG_10,
71 __MAX_BPF_REG,
72};
73
74/* BPF has 10 general purpose 64-bit registers and stack frame. */
75#define MAX_BPF_REG __MAX_BPF_REG
76
77struct bpf_insn {
78 __u8 code; /* opcode */
79 __u8 dst_reg:4; /* dest register */
80 __u8 src_reg:4; /* source register */
81 __s16 off; /* signed offset */
82 __s32 imm; /* signed immediate constant */
83};
84
85/* Deprecated: use struct bpf_lpm_trie_key_u8 (when the "data" member is needed for
86 * byte access) or struct bpf_lpm_trie_key_hdr (when using an alternative type for
87 * the trailing flexible array member) instead.
88 */
89struct bpf_lpm_trie_key {
90 __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */
91 __u8 data[0]; /* Arbitrary size */
92};
93
94/* Header for bpf_lpm_trie_key structs */
95struct bpf_lpm_trie_key_hdr {
96 __u32 prefixlen;
97};
98
99/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry, with trailing byte array. */
100struct bpf_lpm_trie_key_u8 {
101 union {
102 struct bpf_lpm_trie_key_hdr hdr;
103 __u32 prefixlen;
104 };
105 __u8 data[]; /* Arbitrary size */
106};
107
108struct bpf_cgroup_storage_key {
109 __u64 cgroup_inode_id; /* cgroup inode id */
110 __u32 attach_type; /* program attach type (enum bpf_attach_type) */
111};
112
113enum bpf_cgroup_iter_order {
114 BPF_CGROUP_ITER_ORDER_UNSPEC = 0,
115 BPF_CGROUP_ITER_SELF_ONLY, /* process only a single object. */
116 BPF_CGROUP_ITER_DESCENDANTS_PRE, /* walk descendants in pre-order. */
117 BPF_CGROUP_ITER_DESCENDANTS_POST, /* walk descendants in post-order. */
118 BPF_CGROUP_ITER_ANCESTORS_UP, /* walk ancestors upward. */
119};
120
121union bpf_iter_link_info {
122 struct {
123 __u32 map_fd;
124 } map;
125 struct {
126 enum bpf_cgroup_iter_order order;
127
128 /* At most one of cgroup_fd and cgroup_id can be non-zero. If
129 * both are zero, the walk starts from the default cgroup v2
130 * root. For walking v1 hierarchy, one should always explicitly
131 * specify cgroup_fd.
132 */
133 __u32 cgroup_fd;
134 __u64 cgroup_id;
135 } cgroup;
136 /* Parameters of task iterators. */
137 struct {
138 __u32 tid;
139 __u32 pid;
140 __u32 pid_fd;
141 } task;
142};
143
144/* BPF syscall commands, see bpf(2) man-page for more details. */
145/**
146 * DOC: eBPF Syscall Preamble
147 *
148 * The operation to be performed by the **bpf**\ () system call is determined
149 * by the *cmd* argument. Each operation takes an accompanying argument,
150 * provided via *attr*, which is a pointer to a union of type *bpf_attr* (see
151 * below). The size argument is the size of the union pointed to by *attr*.
152 */
153/**
154 * DOC: eBPF Syscall Commands
155 *
156 * BPF_MAP_CREATE
157 * Description
158 * Create a map and return a file descriptor that refers to the
159 * map. The close-on-exec file descriptor flag (see **fcntl**\ (2))
160 * is automatically enabled for the new file descriptor.
161 *
162 * Applying **close**\ (2) to the file descriptor returned by
163 * **BPF_MAP_CREATE** will delete the map (but see NOTES).
164 *
165 * Return
166 * A new file descriptor (a nonnegative integer), or -1 if an
167 * error occurred (in which case, *errno* is set appropriately).
168 *
169 * BPF_MAP_LOOKUP_ELEM
170 * Description
171 * Look up an element with a given *key* in the map referred to
172 * by the file descriptor *map_fd*.
173 *
174 * The *flags* argument may be specified as one of the
175 * following:
176 *
177 * **BPF_F_LOCK**
178 * Look up the value of a spin-locked map without
179 * returning the lock. This must be specified if the
180 * elements contain a spinlock.
181 *
182 * Return
183 * Returns zero on success. On error, -1 is returned and *errno*
184 * is set appropriately.
185 *
186 * BPF_MAP_UPDATE_ELEM
187 * Description
188 * Create or update an element (key/value pair) in a specified map.
189 *
190 * The *flags* argument should be specified as one of the
191 * following:
192 *
193 * **BPF_ANY**
194 * Create a new element or update an existing element.
195 * **BPF_NOEXIST**
196 * Create a new element only if it did not exist.
197 * **BPF_EXIST**
198 * Update an existing element.
199 * **BPF_F_LOCK**
200 * Update a spin_lock-ed map element.
201 *
202 * Return
203 * Returns zero on success. On error, -1 is returned and *errno*
204 * is set appropriately.
205 *
206 * May set *errno* to **EINVAL**, **EPERM**, **ENOMEM**,
207 * **E2BIG**, **EEXIST**, or **ENOENT**.
208 *
209 * **E2BIG**
210 * The number of elements in the map reached the
211 * *max_entries* limit specified at map creation time.
212 * **EEXIST**
213 * If *flags* specifies **BPF_NOEXIST** and the element
214 * with *key* already exists in the map.
215 * **ENOENT**
216 * If *flags* specifies **BPF_EXIST** and the element with
217 * *key* does not exist in the map.
218 *
219 * BPF_MAP_DELETE_ELEM
220 * Description
221 * Look up and delete an element by key in a specified map.
222 *
223 * Return
224 * Returns zero on success. On error, -1 is returned and *errno*
225 * is set appropriately.
226 *
227 * BPF_MAP_GET_NEXT_KEY
228 * Description
229 * Look up an element by key in a specified map and return the key
230 * of the next element. Can be used to iterate over all elements
231 * in the map.
232 *
233 * Return
234 * Returns zero on success. On error, -1 is returned and *errno*
235 * is set appropriately.
236 *
237 * The following cases can be used to iterate over all elements of
238 * the map:
239 *
240 * * If *key* is not found, the operation returns zero and sets
241 * the *next_key* pointer to the key of the first element.
242 * * If *key* is found, the operation returns zero and sets the
243 * *next_key* pointer to the key of the next element.
244 * * If *key* is the last element, returns -1 and *errno* is set
245 * to **ENOENT**.
246 *
247 * May set *errno* to **ENOMEM**, **EFAULT**, **EPERM**, or
248 * **EINVAL** on error.
249 *
250 * BPF_PROG_LOAD
251 * Description
252 * Verify and load an eBPF program, returning a new file
253 * descriptor associated with the program.
254 *
255 * Applying **close**\ (2) to the file descriptor returned by
256 * **BPF_PROG_LOAD** will unload the eBPF program (but see NOTES).
257 *
258 * The close-on-exec file descriptor flag (see **fcntl**\ (2)) is
259 * automatically enabled for the new file descriptor.
260 *
261 * Return
262 * A new file descriptor (a nonnegative integer), or -1 if an
263 * error occurred (in which case, *errno* is set appropriately).
264 *
265 * BPF_OBJ_PIN
266 * Description
267 * Pin an eBPF program or map referred by the specified *bpf_fd*
268 * to the provided *pathname* on the filesystem.
269 *
270 * The *pathname* argument must not contain a dot (".").
271 *
272 * On success, *pathname* retains a reference to the eBPF object,
273 * preventing deallocation of the object when the original
274 * *bpf_fd* is closed. This allow the eBPF object to live beyond
275 * **close**\ (\ *bpf_fd*\ ), and hence the lifetime of the parent
276 * process.
277 *
278 * Applying **unlink**\ (2) or similar calls to the *pathname*
279 * unpins the object from the filesystem, removing the reference.
280 * If no other file descriptors or filesystem nodes refer to the
281 * same object, it will be deallocated (see NOTES).
282 *
283 * The filesystem type for the parent directory of *pathname* must
284 * be **BPF_FS_MAGIC**.
285 *
286 * Return
287 * Returns zero on success. On error, -1 is returned and *errno*
288 * is set appropriately.
289 *
290 * BPF_OBJ_GET
291 * Description
292 * Open a file descriptor for the eBPF object pinned to the
293 * specified *pathname*.
294 *
295 * Return
296 * A new file descriptor (a nonnegative integer), or -1 if an
297 * error occurred (in which case, *errno* is set appropriately).
298 *
299 * BPF_PROG_ATTACH
300 * Description
301 * Attach an eBPF program to a *target_fd* at the specified
302 * *attach_type* hook.
303 *
304 * The *attach_type* specifies the eBPF attachment point to
305 * attach the program to, and must be one of *bpf_attach_type*
306 * (see below).
307 *
308 * The *attach_bpf_fd* must be a valid file descriptor for a
309 * loaded eBPF program of a cgroup, flow dissector, LIRC, sockmap
310 * or sock_ops type corresponding to the specified *attach_type*.
311 *
312 * The *target_fd* must be a valid file descriptor for a kernel
313 * object which depends on the attach type of *attach_bpf_fd*:
314 *
315 * **BPF_PROG_TYPE_CGROUP_DEVICE**,
316 * **BPF_PROG_TYPE_CGROUP_SKB**,
317 * **BPF_PROG_TYPE_CGROUP_SOCK**,
318 * **BPF_PROG_TYPE_CGROUP_SOCK_ADDR**,
319 * **BPF_PROG_TYPE_CGROUP_SOCKOPT**,
320 * **BPF_PROG_TYPE_CGROUP_SYSCTL**,
321 * **BPF_PROG_TYPE_SOCK_OPS**
322 *
323 * Control Group v2 hierarchy with the eBPF controller
324 * enabled. Requires the kernel to be compiled with
325 * **CONFIG_CGROUP_BPF**.
326 *
327 * **BPF_PROG_TYPE_FLOW_DISSECTOR**
328 *
329 * Network namespace (eg /proc/self/ns/net).
330 *
331 * **BPF_PROG_TYPE_LIRC_MODE2**
332 *
333 * LIRC device path (eg /dev/lircN). Requires the kernel
334 * to be compiled with **CONFIG_BPF_LIRC_MODE2**.
335 *
336 * **BPF_PROG_TYPE_SK_SKB**,
337 * **BPF_PROG_TYPE_SK_MSG**
338 *
339 * eBPF map of socket type (eg **BPF_MAP_TYPE_SOCKHASH**).
340 *
341 * Return
342 * Returns zero on success. On error, -1 is returned and *errno*
343 * is set appropriately.
344 *
345 * BPF_PROG_DETACH
346 * Description
347 * Detach the eBPF program associated with the *target_fd* at the
348 * hook specified by *attach_type*. The program must have been
349 * previously attached using **BPF_PROG_ATTACH**.
350 *
351 * Return
352 * Returns zero on success. On error, -1 is returned and *errno*
353 * is set appropriately.
354 *
355 * BPF_PROG_TEST_RUN
356 * Description
357 * Run the eBPF program associated with the *prog_fd* a *repeat*
358 * number of times against a provided program context *ctx_in* and
359 * data *data_in*, and return the modified program context
360 * *ctx_out*, *data_out* (for example, packet data), result of the
361 * execution *retval*, and *duration* of the test run.
362 *
363 * The sizes of the buffers provided as input and output
364 * parameters *ctx_in*, *ctx_out*, *data_in*, and *data_out* must
365 * be provided in the corresponding variables *ctx_size_in*,
366 * *ctx_size_out*, *data_size_in*, and/or *data_size_out*. If any
367 * of these parameters are not provided (ie set to NULL), the
368 * corresponding size field must be zero.
369 *
370 * Some program types have particular requirements:
371 *
372 * **BPF_PROG_TYPE_SK_LOOKUP**
373 * *data_in* and *data_out* must be NULL.
374 *
375 * **BPF_PROG_TYPE_RAW_TRACEPOINT**,
376 * **BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE**
377 *
378 * *ctx_out*, *data_in* and *data_out* must be NULL.
379 * *repeat* must be zero.
380 *
381 * BPF_PROG_RUN is an alias for BPF_PROG_TEST_RUN.
382 *
383 * Return
384 * Returns zero on success. On error, -1 is returned and *errno*
385 * is set appropriately.
386 *
387 * **ENOSPC**
388 * Either *data_size_out* or *ctx_size_out* is too small.
389 * **ENOTSUPP**
390 * This command is not supported by the program type of
391 * the program referred to by *prog_fd*.
392 *
393 * BPF_PROG_GET_NEXT_ID
394 * Description
395 * Fetch the next eBPF program currently loaded into the kernel.
396 *
397 * Looks for the eBPF program with an id greater than *start_id*
398 * and updates *next_id* on success. If no other eBPF programs
399 * remain with ids higher than *start_id*, returns -1 and sets
400 * *errno* to **ENOENT**.
401 *
402 * Return
403 * Returns zero on success. On error, or when no id remains, -1
404 * is returned and *errno* is set appropriately.
405 *
406 * BPF_MAP_GET_NEXT_ID
407 * Description
408 * Fetch the next eBPF map currently loaded into the kernel.
409 *
410 * Looks for the eBPF map with an id greater than *start_id*
411 * and updates *next_id* on success. If no other eBPF maps
412 * remain with ids higher than *start_id*, returns -1 and sets
413 * *errno* to **ENOENT**.
414 *
415 * Return
416 * Returns zero on success. On error, or when no id remains, -1
417 * is returned and *errno* is set appropriately.
418 *
419 * BPF_PROG_GET_FD_BY_ID
420 * Description
421 * Open a file descriptor for the eBPF program corresponding to
422 * *prog_id*.
423 *
424 * Return
425 * A new file descriptor (a nonnegative integer), or -1 if an
426 * error occurred (in which case, *errno* is set appropriately).
427 *
428 * BPF_MAP_GET_FD_BY_ID
429 * Description
430 * Open a file descriptor for the eBPF map corresponding to
431 * *map_id*.
432 *
433 * Return
434 * A new file descriptor (a nonnegative integer), or -1 if an
435 * error occurred (in which case, *errno* is set appropriately).
436 *
437 * BPF_OBJ_GET_INFO_BY_FD
438 * Description
439 * Obtain information about the eBPF object corresponding to
440 * *bpf_fd*.
441 *
442 * Populates up to *info_len* bytes of *info*, which will be in
443 * one of the following formats depending on the eBPF object type
444 * of *bpf_fd*:
445 *
446 * * **struct bpf_prog_info**
447 * * **struct bpf_map_info**
448 * * **struct bpf_btf_info**
449 * * **struct bpf_link_info**
450 *
451 * Return
452 * Returns zero on success. On error, -1 is returned and *errno*
453 * is set appropriately.
454 *
455 * BPF_PROG_QUERY
456 * Description
457 * Obtain information about eBPF programs associated with the
458 * specified *attach_type* hook.
459 *
460 * The *target_fd* must be a valid file descriptor for a kernel
461 * object which depends on the attach type of *attach_bpf_fd*:
462 *
463 * **BPF_PROG_TYPE_CGROUP_DEVICE**,
464 * **BPF_PROG_TYPE_CGROUP_SKB**,
465 * **BPF_PROG_TYPE_CGROUP_SOCK**,
466 * **BPF_PROG_TYPE_CGROUP_SOCK_ADDR**,
467 * **BPF_PROG_TYPE_CGROUP_SOCKOPT**,
468 * **BPF_PROG_TYPE_CGROUP_SYSCTL**,
469 * **BPF_PROG_TYPE_SOCK_OPS**
470 *
471 * Control Group v2 hierarchy with the eBPF controller
472 * enabled. Requires the kernel to be compiled with
473 * **CONFIG_CGROUP_BPF**.
474 *
475 * **BPF_PROG_TYPE_FLOW_DISSECTOR**
476 *
477 * Network namespace (eg /proc/self/ns/net).
478 *
479 * **BPF_PROG_TYPE_LIRC_MODE2**
480 *
481 * LIRC device path (eg /dev/lircN). Requires the kernel
482 * to be compiled with **CONFIG_BPF_LIRC_MODE2**.
483 *
484 * **BPF_PROG_QUERY** always fetches the number of programs
485 * attached and the *attach_flags* which were used to attach those
486 * programs. Additionally, if *prog_ids* is nonzero and the number
487 * of attached programs is less than *prog_cnt*, populates
488 * *prog_ids* with the eBPF program ids of the programs attached
489 * at *target_fd*.
490 *
491 * The following flags may alter the result:
492 *
493 * **BPF_F_QUERY_EFFECTIVE**
494 * Only return information regarding programs which are
495 * currently effective at the specified *target_fd*.
496 *
497 * Return
498 * Returns zero on success. On error, -1 is returned and *errno*
499 * is set appropriately.
500 *
501 * BPF_RAW_TRACEPOINT_OPEN
502 * Description
503 * Attach an eBPF program to a tracepoint *name* to access kernel
504 * internal arguments of the tracepoint in their raw form.
505 *
506 * The *prog_fd* must be a valid file descriptor associated with
507 * a loaded eBPF program of type **BPF_PROG_TYPE_RAW_TRACEPOINT**.
508 *
509 * No ABI guarantees are made about the content of tracepoint
510 * arguments exposed to the corresponding eBPF program.
511 *
512 * Applying **close**\ (2) to the file descriptor returned by
513 * **BPF_RAW_TRACEPOINT_OPEN** will delete the map (but see NOTES).
514 *
515 * Return
516 * A new file descriptor (a nonnegative integer), or -1 if an
517 * error occurred (in which case, *errno* is set appropriately).
518 *
519 * BPF_BTF_LOAD
520 * Description
521 * Verify and load BPF Type Format (BTF) metadata into the kernel,
522 * returning a new file descriptor associated with the metadata.
523 * BTF is described in more detail at
524 * https://www.kernel.org/doc/html/latest/bpf/btf.html.
525 *
526 * The *btf* parameter must point to valid memory providing
527 * *btf_size* bytes of BTF binary metadata.
528 *
529 * The returned file descriptor can be passed to other **bpf**\ ()
530 * subcommands such as **BPF_PROG_LOAD** or **BPF_MAP_CREATE** to
531 * associate the BTF with those objects.
532 *
533 * Similar to **BPF_PROG_LOAD**, **BPF_BTF_LOAD** has optional
534 * parameters to specify a *btf_log_buf*, *btf_log_size* and
535 * *btf_log_level* which allow the kernel to return freeform log
536 * output regarding the BTF verification process.
537 *
538 * Return
539 * A new file descriptor (a nonnegative integer), or -1 if an
540 * error occurred (in which case, *errno* is set appropriately).
541 *
542 * BPF_BTF_GET_FD_BY_ID
543 * Description
544 * Open a file descriptor for the BPF Type Format (BTF)
545 * corresponding to *btf_id*.
546 *
547 * Return
548 * A new file descriptor (a nonnegative integer), or -1 if an
549 * error occurred (in which case, *errno* is set appropriately).
550 *
551 * BPF_TASK_FD_QUERY
552 * Description
553 * Obtain information about eBPF programs associated with the
554 * target process identified by *pid* and *fd*.
555 *
556 * If the *pid* and *fd* are associated with a tracepoint, kprobe
557 * or uprobe perf event, then the *prog_id* and *fd_type* will
558 * be populated with the eBPF program id and file descriptor type
559 * of type **bpf_task_fd_type**. If associated with a kprobe or
560 * uprobe, the *probe_offset* and *probe_addr* will also be
561 * populated. Optionally, if *buf* is provided, then up to
562 * *buf_len* bytes of *buf* will be populated with the name of
563 * the tracepoint, kprobe or uprobe.
564 *
565 * The resulting *prog_id* may be introspected in deeper detail
566 * using **BPF_PROG_GET_FD_BY_ID** and **BPF_OBJ_GET_INFO_BY_FD**.
567 *
568 * Return
569 * Returns zero on success. On error, -1 is returned and *errno*
570 * is set appropriately.
571 *
572 * BPF_MAP_LOOKUP_AND_DELETE_ELEM
573 * Description
574 * Look up an element with the given *key* in the map referred to
575 * by the file descriptor *fd*, and if found, delete the element.
576 *
577 * For **BPF_MAP_TYPE_QUEUE** and **BPF_MAP_TYPE_STACK** map
578 * types, the *flags* argument needs to be set to 0, but for other
579 * map types, it may be specified as:
580 *
581 * **BPF_F_LOCK**
582 * Look up and delete the value of a spin-locked map
583 * without returning the lock. This must be specified if
584 * the elements contain a spinlock.
585 *
586 * The **BPF_MAP_TYPE_QUEUE** and **BPF_MAP_TYPE_STACK** map types
587 * implement this command as a "pop" operation, deleting the top
588 * element rather than one corresponding to *key*.
589 * The *key* and *key_len* parameters should be zeroed when
590 * issuing this operation for these map types.
591 *
592 * This command is only valid for the following map types:
593 * * **BPF_MAP_TYPE_QUEUE**
594 * * **BPF_MAP_TYPE_STACK**
595 * * **BPF_MAP_TYPE_HASH**
596 * * **BPF_MAP_TYPE_PERCPU_HASH**
597 * * **BPF_MAP_TYPE_LRU_HASH**
598 * * **BPF_MAP_TYPE_LRU_PERCPU_HASH**
599 *
600 * Return
601 * Returns zero on success. On error, -1 is returned and *errno*
602 * is set appropriately.
603 *
604 * BPF_MAP_FREEZE
605 * Description
606 * Freeze the permissions of the specified map.
607 *
608 * Write permissions may be frozen by passing zero *flags*.
609 * Upon success, no future syscall invocations may alter the
610 * map state of *map_fd*. Write operations from eBPF programs
611 * are still possible for a frozen map.
612 *
613 * Not supported for maps of type **BPF_MAP_TYPE_STRUCT_OPS**.
614 *
615 * Return
616 * Returns zero on success. On error, -1 is returned and *errno*
617 * is set appropriately.
618 *
619 * BPF_BTF_GET_NEXT_ID
620 * Description
621 * Fetch the next BPF Type Format (BTF) object currently loaded
622 * into the kernel.
623 *
624 * Looks for the BTF object with an id greater than *start_id*
625 * and updates *next_id* on success. If no other BTF objects
626 * remain with ids higher than *start_id*, returns -1 and sets
627 * *errno* to **ENOENT**.
628 *
629 * Return
630 * Returns zero on success. On error, or when no id remains, -1
631 * is returned and *errno* is set appropriately.
632 *
633 * BPF_MAP_LOOKUP_BATCH
634 * Description
635 * Iterate and fetch multiple elements in a map.
636 *
637 * Two opaque values are used to manage batch operations,
638 * *in_batch* and *out_batch*. Initially, *in_batch* must be set
639 * to NULL to begin the batched operation. After each subsequent
640 * **BPF_MAP_LOOKUP_BATCH**, the caller should pass the resultant
641 * *out_batch* as the *in_batch* for the next operation to
642 * continue iteration from the current point. Both *in_batch* and
643 * *out_batch* must point to memory large enough to hold a key,
644 * except for maps of type **BPF_MAP_TYPE_{HASH, PERCPU_HASH,
645 * LRU_HASH, LRU_PERCPU_HASH}**, for which batch parameters
646 * must be at least 4 bytes wide regardless of key size.
647 *
648 * The *keys* and *values* are output parameters which must point
649 * to memory large enough to hold *count* items based on the key
650 * and value size of the map *map_fd*. The *keys* buffer must be
651 * of *key_size* * *count*. The *values* buffer must be of
652 * *value_size* * *count*.
653 *
654 * The *elem_flags* argument may be specified as one of the
655 * following:
656 *
657 * **BPF_F_LOCK**
658 * Look up the value of a spin-locked map without
659 * returning the lock. This must be specified if the
660 * elements contain a spinlock.
661 *
662 * On success, *count* elements from the map are copied into the
663 * user buffer, with the keys copied into *keys* and the values
664 * copied into the corresponding indices in *values*.
665 *
666 * If an error is returned and *errno* is not **EFAULT**, *count*
667 * is set to the number of successfully processed elements.
668 *
669 * Return
670 * Returns zero on success. On error, -1 is returned and *errno*
671 * is set appropriately.
672 *
673 * May set *errno* to **ENOSPC** to indicate that *keys* or
674 * *values* is too small to dump an entire bucket during
675 * iteration of a hash-based map type.
676 *
677 * BPF_MAP_LOOKUP_AND_DELETE_BATCH
678 * Description
679 * Iterate and delete all elements in a map.
680 *
681 * This operation has the same behavior as
682 * **BPF_MAP_LOOKUP_BATCH** with two exceptions:
683 *
684 * * Every element that is successfully returned is also deleted
685 * from the map. This is at least *count* elements. Note that
686 * *count* is both an input and an output parameter.
687 * * Upon returning with *errno* set to **EFAULT**, up to
688 * *count* elements may be deleted without returning the keys
689 * and values of the deleted elements.
690 *
691 * Return
692 * Returns zero on success. On error, -1 is returned and *errno*
693 * is set appropriately.
694 *
695 * BPF_MAP_UPDATE_BATCH
696 * Description
697 * Update multiple elements in a map by *key*.
698 *
699 * The *keys* and *values* are input parameters which must point
700 * to memory large enough to hold *count* items based on the key
701 * and value size of the map *map_fd*. The *keys* buffer must be
702 * of *key_size* * *count*. The *values* buffer must be of
703 * *value_size* * *count*.
704 *
705 * Each element specified in *keys* is sequentially updated to the
706 * value in the corresponding index in *values*. The *in_batch*
707 * and *out_batch* parameters are ignored and should be zeroed.
708 *
709 * The *elem_flags* argument should be specified as one of the
710 * following:
711 *
712 * **BPF_ANY**
713 * Create new elements or update a existing elements.
714 * **BPF_NOEXIST**
715 * Create new elements only if they do not exist.
716 * **BPF_EXIST**
717 * Update existing elements.
718 * **BPF_F_LOCK**
719 * Update spin_lock-ed map elements. This must be
720 * specified if the map value contains a spinlock.
721 *
722 * On success, *count* elements from the map are updated.
723 *
724 * If an error is returned and *errno* is not **EFAULT**, *count*
725 * is set to the number of successfully processed elements.
726 *
727 * Return
728 * Returns zero on success. On error, -1 is returned and *errno*
729 * is set appropriately.
730 *
731 * May set *errno* to **EINVAL**, **EPERM**, **ENOMEM**, or
732 * **E2BIG**. **E2BIG** indicates that the number of elements in
733 * the map reached the *max_entries* limit specified at map
734 * creation time.
735 *
736 * May set *errno* to one of the following error codes under
737 * specific circumstances:
738 *
739 * **EEXIST**
740 * If *flags* specifies **BPF_NOEXIST** and the element
741 * with *key* already exists in the map.
742 * **ENOENT**
743 * If *flags* specifies **BPF_EXIST** and the element with
744 * *key* does not exist in the map.
745 *
746 * BPF_MAP_DELETE_BATCH
747 * Description
748 * Delete multiple elements in a map by *key*.
749 *
750 * The *keys* parameter is an input parameter which must point
751 * to memory large enough to hold *count* items based on the key
752 * size of the map *map_fd*, that is, *key_size* * *count*.
753 *
754 * Each element specified in *keys* is sequentially deleted. The
755 * *in_batch*, *out_batch*, and *values* parameters are ignored
756 * and should be zeroed.
757 *
758 * The *elem_flags* argument may be specified as one of the
759 * following:
760 *
761 * **BPF_F_LOCK**
762 * Look up the value of a spin-locked map without
763 * returning the lock. This must be specified if the
764 * elements contain a spinlock.
765 *
766 * On success, *count* elements from the map are updated.
767 *
768 * If an error is returned and *errno* is not **EFAULT**, *count*
769 * is set to the number of successfully processed elements. If
770 * *errno* is **EFAULT**, up to *count* elements may be been
771 * deleted.
772 *
773 * Return
774 * Returns zero on success. On error, -1 is returned and *errno*
775 * is set appropriately.
776 *
777 * BPF_LINK_CREATE
778 * Description
779 * Attach an eBPF program to a *target_fd* at the specified
780 * *attach_type* hook and return a file descriptor handle for
781 * managing the link.
782 *
783 * Return
784 * A new file descriptor (a nonnegative integer), or -1 if an
785 * error occurred (in which case, *errno* is set appropriately).
786 *
787 * BPF_LINK_UPDATE
788 * Description
789 * Update the eBPF program in the specified *link_fd* to
790 * *new_prog_fd*.
791 *
792 * Return
793 * Returns zero on success. On error, -1 is returned and *errno*
794 * is set appropriately.
795 *
796 * BPF_LINK_GET_FD_BY_ID
797 * Description
798 * Open a file descriptor for the eBPF Link corresponding to
799 * *link_id*.
800 *
801 * Return
802 * A new file descriptor (a nonnegative integer), or -1 if an
803 * error occurred (in which case, *errno* is set appropriately).
804 *
805 * BPF_LINK_GET_NEXT_ID
806 * Description
807 * Fetch the next eBPF link currently loaded into the kernel.
808 *
809 * Looks for the eBPF link with an id greater than *start_id*
810 * and updates *next_id* on success. If no other eBPF links
811 * remain with ids higher than *start_id*, returns -1 and sets
812 * *errno* to **ENOENT**.
813 *
814 * Return
815 * Returns zero on success. On error, or when no id remains, -1
816 * is returned and *errno* is set appropriately.
817 *
818 * BPF_ENABLE_STATS
819 * Description
820 * Enable eBPF runtime statistics gathering.
821 *
822 * Runtime statistics gathering for the eBPF runtime is disabled
823 * by default to minimize the corresponding performance overhead.
824 * This command enables statistics globally.
825 *
826 * Multiple programs may independently enable statistics.
827 * After gathering the desired statistics, eBPF runtime statistics
828 * may be disabled again by calling **close**\ (2) for the file
829 * descriptor returned by this function. Statistics will only be
830 * disabled system-wide when all outstanding file descriptors
831 * returned by prior calls for this subcommand are closed.
832 *
833 * Return
834 * A new file descriptor (a nonnegative integer), or -1 if an
835 * error occurred (in which case, *errno* is set appropriately).
836 *
837 * BPF_ITER_CREATE
838 * Description
839 * Create an iterator on top of the specified *link_fd* (as
840 * previously created using **BPF_LINK_CREATE**) and return a
841 * file descriptor that can be used to trigger the iteration.
842 *
843 * If the resulting file descriptor is pinned to the filesystem
844 * using **BPF_OBJ_PIN**, then subsequent **read**\ (2) syscalls
845 * for that path will trigger the iterator to read kernel state
846 * using the eBPF program attached to *link_fd*.
847 *
848 * Return
849 * A new file descriptor (a nonnegative integer), or -1 if an
850 * error occurred (in which case, *errno* is set appropriately).
851 *
852 * BPF_LINK_DETACH
853 * Description
854 * Forcefully detach the specified *link_fd* from its
855 * corresponding attachment point.
856 *
857 * Return
858 * Returns zero on success. On error, -1 is returned and *errno*
859 * is set appropriately.
860 *
861 * BPF_PROG_BIND_MAP
862 * Description
863 * Bind a map to the lifetime of an eBPF program.
864 *
865 * The map identified by *map_fd* is bound to the program
866 * identified by *prog_fd* and only released when *prog_fd* is
867 * released. This may be used in cases where metadata should be
868 * associated with a program which otherwise does not contain any
869 * references to the map (for example, embedded in the eBPF
870 * program instructions).
871 *
872 * Return
873 * Returns zero on success. On error, -1 is returned and *errno*
874 * is set appropriately.
875 *
876 * BPF_TOKEN_CREATE
877 * Description
878 * Create BPF token with embedded information about what
879 * BPF-related functionality it allows:
880 * - a set of allowed bpf() syscall commands;
881 * - a set of allowed BPF map types to be created with
882 * BPF_MAP_CREATE command, if BPF_MAP_CREATE itself is allowed;
883 * - a set of allowed BPF program types and BPF program attach
884 * types to be loaded with BPF_PROG_LOAD command, if
885 * BPF_PROG_LOAD itself is allowed.
886 *
887 * BPF token is created (derived) from an instance of BPF FS,
888 * assuming it has necessary delegation mount options specified.
889 * This BPF token can be passed as an extra parameter to various
890 * bpf() syscall commands to grant BPF subsystem functionality to
891 * unprivileged processes.
892 *
893 * When created, BPF token is "associated" with the owning
894 * user namespace of BPF FS instance (super block) that it was
895 * derived from, and subsequent BPF operations performed with
896 * BPF token would be performing capabilities checks (i.e.,
897 * CAP_BPF, CAP_PERFMON, CAP_NET_ADMIN, CAP_SYS_ADMIN) within
898 * that user namespace. Without BPF token, such capabilities
899 * have to be granted in init user namespace, making bpf()
900 * syscall incompatible with user namespace, for the most part.
901 *
902 * Return
903 * A new file descriptor (a nonnegative integer), or -1 if an
904 * error occurred (in which case, *errno* is set appropriately).
905 *
906 * NOTES
907 * eBPF objects (maps and programs) can be shared between processes.
908 *
909 * * After **fork**\ (2), the child inherits file descriptors
910 * referring to the same eBPF objects.
911 * * File descriptors referring to eBPF objects can be transferred over
912 * **unix**\ (7) domain sockets.
913 * * File descriptors referring to eBPF objects can be duplicated in the
914 * usual way, using **dup**\ (2) and similar calls.
915 * * File descriptors referring to eBPF objects can be pinned to the
916 * filesystem using the **BPF_OBJ_PIN** command of **bpf**\ (2).
917 *
918 * An eBPF object is deallocated only after all file descriptors referring
919 * to the object have been closed and no references remain pinned to the
920 * filesystem or attached (for example, bound to a program or device).
921 */
922enum bpf_cmd {
923 BPF_MAP_CREATE,
924 BPF_MAP_LOOKUP_ELEM,
925 BPF_MAP_UPDATE_ELEM,
926 BPF_MAP_DELETE_ELEM,
927 BPF_MAP_GET_NEXT_KEY,
928 BPF_PROG_LOAD,
929 BPF_OBJ_PIN,
930 BPF_OBJ_GET,
931 BPF_PROG_ATTACH,
932 BPF_PROG_DETACH,
933 BPF_PROG_TEST_RUN,
934 BPF_PROG_RUN = BPF_PROG_TEST_RUN,
935 BPF_PROG_GET_NEXT_ID,
936 BPF_MAP_GET_NEXT_ID,
937 BPF_PROG_GET_FD_BY_ID,
938 BPF_MAP_GET_FD_BY_ID,
939 BPF_OBJ_GET_INFO_BY_FD,
940 BPF_PROG_QUERY,
941 BPF_RAW_TRACEPOINT_OPEN,
942 BPF_BTF_LOAD,
943 BPF_BTF_GET_FD_BY_ID,
944 BPF_TASK_FD_QUERY,
945 BPF_MAP_LOOKUP_AND_DELETE_ELEM,
946 BPF_MAP_FREEZE,
947 BPF_BTF_GET_NEXT_ID,
948 BPF_MAP_LOOKUP_BATCH,
949 BPF_MAP_LOOKUP_AND_DELETE_BATCH,
950 BPF_MAP_UPDATE_BATCH,
951 BPF_MAP_DELETE_BATCH,
952 BPF_LINK_CREATE,
953 BPF_LINK_UPDATE,
954 BPF_LINK_GET_FD_BY_ID,
955 BPF_LINK_GET_NEXT_ID,
956 BPF_ENABLE_STATS,
957 BPF_ITER_CREATE,
958 BPF_LINK_DETACH,
959 BPF_PROG_BIND_MAP,
960 BPF_TOKEN_CREATE,
961 __MAX_BPF_CMD,
962};
963
964enum bpf_map_type {
965 BPF_MAP_TYPE_UNSPEC,
966 BPF_MAP_TYPE_HASH,
967 BPF_MAP_TYPE_ARRAY,
968 BPF_MAP_TYPE_PROG_ARRAY,
969 BPF_MAP_TYPE_PERF_EVENT_ARRAY,
970 BPF_MAP_TYPE_PERCPU_HASH,
971 BPF_MAP_TYPE_PERCPU_ARRAY,
972 BPF_MAP_TYPE_STACK_TRACE,
973 BPF_MAP_TYPE_CGROUP_ARRAY,
974 BPF_MAP_TYPE_LRU_HASH,
975 BPF_MAP_TYPE_LRU_PERCPU_HASH,
976 BPF_MAP_TYPE_LPM_TRIE,
977 BPF_MAP_TYPE_ARRAY_OF_MAPS,
978 BPF_MAP_TYPE_HASH_OF_MAPS,
979 BPF_MAP_TYPE_DEVMAP,
980 BPF_MAP_TYPE_SOCKMAP,
981 BPF_MAP_TYPE_CPUMAP,
982 BPF_MAP_TYPE_XSKMAP,
983 BPF_MAP_TYPE_SOCKHASH,
984 BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED,
985 /* BPF_MAP_TYPE_CGROUP_STORAGE is available to bpf programs attaching
986 * to a cgroup. The newer BPF_MAP_TYPE_CGRP_STORAGE is available to
987 * both cgroup-attached and other progs and supports all functionality
988 * provided by BPF_MAP_TYPE_CGROUP_STORAGE. So mark
989 * BPF_MAP_TYPE_CGROUP_STORAGE deprecated.
990 */
991 BPF_MAP_TYPE_CGROUP_STORAGE = BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED,
992 BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
993 BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED,
994 /* BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE is available to bpf programs
995 * attaching to a cgroup. The new mechanism (BPF_MAP_TYPE_CGRP_STORAGE +
996 * local percpu kptr) supports all BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE
997 * functionality and more. So mark * BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE
998 * deprecated.
999 */
1000 BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED,
1001 BPF_MAP_TYPE_QUEUE,
1002 BPF_MAP_TYPE_STACK,
1003 BPF_MAP_TYPE_SK_STORAGE,
1004 BPF_MAP_TYPE_DEVMAP_HASH,
1005 BPF_MAP_TYPE_STRUCT_OPS,
1006 BPF_MAP_TYPE_RINGBUF,
1007 BPF_MAP_TYPE_INODE_STORAGE,
1008 BPF_MAP_TYPE_TASK_STORAGE,
1009 BPF_MAP_TYPE_BLOOM_FILTER,
1010 BPF_MAP_TYPE_USER_RINGBUF,
1011 BPF_MAP_TYPE_CGRP_STORAGE,
1012 BPF_MAP_TYPE_ARENA,
1013 __MAX_BPF_MAP_TYPE
1014};
1015
1016/* Note that tracing related programs such as
1017 * BPF_PROG_TYPE_{KPROBE,TRACEPOINT,PERF_EVENT,RAW_TRACEPOINT}
1018 * are not subject to a stable API since kernel internal data
1019 * structures can change from release to release and may
1020 * therefore break existing tracing BPF programs. Tracing BPF
1021 * programs correspond to /a/ specific kernel which is to be
1022 * analyzed, and not /a/ specific kernel /and/ all future ones.
1023 */
1024enum bpf_prog_type {
1025 BPF_PROG_TYPE_UNSPEC,
1026 BPF_PROG_TYPE_SOCKET_FILTER,
1027 BPF_PROG_TYPE_KPROBE,
1028 BPF_PROG_TYPE_SCHED_CLS,
1029 BPF_PROG_TYPE_SCHED_ACT,
1030 BPF_PROG_TYPE_TRACEPOINT,
1031 BPF_PROG_TYPE_XDP,
1032 BPF_PROG_TYPE_PERF_EVENT,
1033 BPF_PROG_TYPE_CGROUP_SKB,
1034 BPF_PROG_TYPE_CGROUP_SOCK,
1035 BPF_PROG_TYPE_LWT_IN,
1036 BPF_PROG_TYPE_LWT_OUT,
1037 BPF_PROG_TYPE_LWT_XMIT,
1038 BPF_PROG_TYPE_SOCK_OPS,
1039 BPF_PROG_TYPE_SK_SKB,
1040 BPF_PROG_TYPE_CGROUP_DEVICE,
1041 BPF_PROG_TYPE_SK_MSG,
1042 BPF_PROG_TYPE_RAW_TRACEPOINT,
1043 BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
1044 BPF_PROG_TYPE_LWT_SEG6LOCAL,
1045 BPF_PROG_TYPE_LIRC_MODE2,
1046 BPF_PROG_TYPE_SK_REUSEPORT,
1047 BPF_PROG_TYPE_FLOW_DISSECTOR,
1048 BPF_PROG_TYPE_CGROUP_SYSCTL,
1049 BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE,
1050 BPF_PROG_TYPE_CGROUP_SOCKOPT,
1051 BPF_PROG_TYPE_TRACING,
1052 BPF_PROG_TYPE_STRUCT_OPS,
1053 BPF_PROG_TYPE_EXT,
1054 BPF_PROG_TYPE_LSM,
1055 BPF_PROG_TYPE_SK_LOOKUP,
1056 BPF_PROG_TYPE_SYSCALL, /* a program that can execute syscalls */
1057 BPF_PROG_TYPE_NETFILTER,
1058 __MAX_BPF_PROG_TYPE
1059};
1060
1061enum bpf_attach_type {
1062 BPF_CGROUP_INET_INGRESS,
1063 BPF_CGROUP_INET_EGRESS,
1064 BPF_CGROUP_INET_SOCK_CREATE,
1065 BPF_CGROUP_SOCK_OPS,
1066 BPF_SK_SKB_STREAM_PARSER,
1067 BPF_SK_SKB_STREAM_VERDICT,
1068 BPF_CGROUP_DEVICE,
1069 BPF_SK_MSG_VERDICT,
1070 BPF_CGROUP_INET4_BIND,
1071 BPF_CGROUP_INET6_BIND,
1072 BPF_CGROUP_INET4_CONNECT,
1073 BPF_CGROUP_INET6_CONNECT,
1074 BPF_CGROUP_INET4_POST_BIND,
1075 BPF_CGROUP_INET6_POST_BIND,
1076 BPF_CGROUP_UDP4_SENDMSG,
1077 BPF_CGROUP_UDP6_SENDMSG,
1078 BPF_LIRC_MODE2,
1079 BPF_FLOW_DISSECTOR,
1080 BPF_CGROUP_SYSCTL,
1081 BPF_CGROUP_UDP4_RECVMSG,
1082 BPF_CGROUP_UDP6_RECVMSG,
1083 BPF_CGROUP_GETSOCKOPT,
1084 BPF_CGROUP_SETSOCKOPT,
1085 BPF_TRACE_RAW_TP,
1086 BPF_TRACE_FENTRY,
1087 BPF_TRACE_FEXIT,
1088 BPF_MODIFY_RETURN,
1089 BPF_LSM_MAC,
1090 BPF_TRACE_ITER,
1091 BPF_CGROUP_INET4_GETPEERNAME,
1092 BPF_CGROUP_INET6_GETPEERNAME,
1093 BPF_CGROUP_INET4_GETSOCKNAME,
1094 BPF_CGROUP_INET6_GETSOCKNAME,
1095 BPF_XDP_DEVMAP,
1096 BPF_CGROUP_INET_SOCK_RELEASE,
1097 BPF_XDP_CPUMAP,
1098 BPF_SK_LOOKUP,
1099 BPF_XDP,
1100 BPF_SK_SKB_VERDICT,
1101 BPF_SK_REUSEPORT_SELECT,
1102 BPF_SK_REUSEPORT_SELECT_OR_MIGRATE,
1103 BPF_PERF_EVENT,
1104 BPF_TRACE_KPROBE_MULTI,
1105 BPF_LSM_CGROUP,
1106 BPF_STRUCT_OPS,
1107 BPF_NETFILTER,
1108 BPF_TCX_INGRESS,
1109 BPF_TCX_EGRESS,
1110 BPF_TRACE_UPROBE_MULTI,
1111 BPF_CGROUP_UNIX_CONNECT,
1112 BPF_CGROUP_UNIX_SENDMSG,
1113 BPF_CGROUP_UNIX_RECVMSG,
1114 BPF_CGROUP_UNIX_GETPEERNAME,
1115 BPF_CGROUP_UNIX_GETSOCKNAME,
1116 BPF_NETKIT_PRIMARY,
1117 BPF_NETKIT_PEER,
1118 BPF_TRACE_KPROBE_SESSION,
1119 __MAX_BPF_ATTACH_TYPE
1120};
1121
1122#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
1123
1124enum bpf_link_type {
1125 BPF_LINK_TYPE_UNSPEC = 0,
1126 BPF_LINK_TYPE_RAW_TRACEPOINT = 1,
1127 BPF_LINK_TYPE_TRACING = 2,
1128 BPF_LINK_TYPE_CGROUP = 3,
1129 BPF_LINK_TYPE_ITER = 4,
1130 BPF_LINK_TYPE_NETNS = 5,
1131 BPF_LINK_TYPE_XDP = 6,
1132 BPF_LINK_TYPE_PERF_EVENT = 7,
1133 BPF_LINK_TYPE_KPROBE_MULTI = 8,
1134 BPF_LINK_TYPE_STRUCT_OPS = 9,
1135 BPF_LINK_TYPE_NETFILTER = 10,
1136 BPF_LINK_TYPE_TCX = 11,
1137 BPF_LINK_TYPE_UPROBE_MULTI = 12,
1138 BPF_LINK_TYPE_NETKIT = 13,
1139 BPF_LINK_TYPE_SOCKMAP = 14,
1140 __MAX_BPF_LINK_TYPE,
1141};
1142
1143#define MAX_BPF_LINK_TYPE __MAX_BPF_LINK_TYPE
1144
1145enum bpf_perf_event_type {
1146 BPF_PERF_EVENT_UNSPEC = 0,
1147 BPF_PERF_EVENT_UPROBE = 1,
1148 BPF_PERF_EVENT_URETPROBE = 2,
1149 BPF_PERF_EVENT_KPROBE = 3,
1150 BPF_PERF_EVENT_KRETPROBE = 4,
1151 BPF_PERF_EVENT_TRACEPOINT = 5,
1152 BPF_PERF_EVENT_EVENT = 6,
1153};
1154
1155/* cgroup-bpf attach flags used in BPF_PROG_ATTACH command
1156 *
1157 * NONE(default): No further bpf programs allowed in the subtree.
1158 *
1159 * BPF_F_ALLOW_OVERRIDE: If a sub-cgroup installs some bpf program,
1160 * the program in this cgroup yields to sub-cgroup program.
1161 *
1162 * BPF_F_ALLOW_MULTI: If a sub-cgroup installs some bpf program,
1163 * that cgroup program gets run in addition to the program in this cgroup.
1164 *
1165 * Only one program is allowed to be attached to a cgroup with
1166 * NONE or BPF_F_ALLOW_OVERRIDE flag.
1167 * Attaching another program on top of NONE or BPF_F_ALLOW_OVERRIDE will
1168 * release old program and attach the new one. Attach flags has to match.
1169 *
1170 * Multiple programs are allowed to be attached to a cgroup with
1171 * BPF_F_ALLOW_MULTI flag. They are executed in FIFO order
1172 * (those that were attached first, run first)
1173 * The programs of sub-cgroup are executed first, then programs of
1174 * this cgroup and then programs of parent cgroup.
1175 * When children program makes decision (like picking TCP CA or sock bind)
1176 * parent program has a chance to override it.
1177 *
1178 * With BPF_F_ALLOW_MULTI a new program is added to the end of the list of
1179 * programs for a cgroup. Though it's possible to replace an old program at
1180 * any position by also specifying BPF_F_REPLACE flag and position itself in
1181 * replace_bpf_fd attribute. Old program at this position will be released.
1182 *
1183 * A cgroup with MULTI or OVERRIDE flag allows any attach flags in sub-cgroups.
1184 * A cgroup with NONE doesn't allow any programs in sub-cgroups.
1185 * Ex1:
1186 * cgrp1 (MULTI progs A, B) ->
1187 * cgrp2 (OVERRIDE prog C) ->
1188 * cgrp3 (MULTI prog D) ->
1189 * cgrp4 (OVERRIDE prog E) ->
1190 * cgrp5 (NONE prog F)
1191 * the event in cgrp5 triggers execution of F,D,A,B in that order.
1192 * if prog F is detached, the execution is E,D,A,B
1193 * if prog F and D are detached, the execution is E,A,B
1194 * if prog F, E and D are detached, the execution is C,A,B
1195 *
1196 * All eligible programs are executed regardless of return code from
1197 * earlier programs.
1198 */
1199#define BPF_F_ALLOW_OVERRIDE (1U << 0)
1200#define BPF_F_ALLOW_MULTI (1U << 1)
1201/* Generic attachment flags. */
1202#define BPF_F_REPLACE (1U << 2)
1203#define BPF_F_BEFORE (1U << 3)
1204#define BPF_F_AFTER (1U << 4)
1205#define BPF_F_ID (1U << 5)
1206#define BPF_F_LINK BPF_F_LINK /* 1 << 13 */
1207
1208/* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the
1209 * verifier will perform strict alignment checking as if the kernel
1210 * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set,
1211 * and NET_IP_ALIGN defined to 2.
1212 */
1213#define BPF_F_STRICT_ALIGNMENT (1U << 0)
1214
1215/* If BPF_F_ANY_ALIGNMENT is used in BPF_PROG_LOAD command, the
1216 * verifier will allow any alignment whatsoever. On platforms
1217 * with strict alignment requirements for loads ands stores (such
1218 * as sparc and mips) the verifier validates that all loads and
1219 * stores provably follow this requirement. This flag turns that
1220 * checking and enforcement off.
1221 *
1222 * It is mostly used for testing when we want to validate the
1223 * context and memory access aspects of the verifier, but because
1224 * of an unaligned access the alignment check would trigger before
1225 * the one we are interested in.
1226 */
1227#define BPF_F_ANY_ALIGNMENT (1U << 1)
1228
1229/* BPF_F_TEST_RND_HI32 is used in BPF_PROG_LOAD command for testing purpose.
1230 * Verifier does sub-register def/use analysis and identifies instructions whose
1231 * def only matters for low 32-bit, high 32-bit is never referenced later
1232 * through implicit zero extension. Therefore verifier notifies JIT back-ends
1233 * that it is safe to ignore clearing high 32-bit for these instructions. This
1234 * saves some back-ends a lot of code-gen. However such optimization is not
1235 * necessary on some arches, for example x86_64, arm64 etc, whose JIT back-ends
1236 * hence hasn't used verifier's analysis result. But, we really want to have a
1237 * way to be able to verify the correctness of the described optimization on
1238 * x86_64 on which testsuites are frequently exercised.
1239 *
1240 * So, this flag is introduced. Once it is set, verifier will randomize high
1241 * 32-bit for those instructions who has been identified as safe to ignore them.
1242 * Then, if verifier is not doing correct analysis, such randomization will
1243 * regress tests to expose bugs.
1244 */
1245#define BPF_F_TEST_RND_HI32 (1U << 2)
1246
1247/* The verifier internal test flag. Behavior is undefined */
1248#define BPF_F_TEST_STATE_FREQ (1U << 3)
1249
1250/* If BPF_F_SLEEPABLE is used in BPF_PROG_LOAD command, the verifier will
1251 * restrict map and helper usage for such programs. Sleepable BPF programs can
1252 * only be attached to hooks where kernel execution context allows sleeping.
1253 * Such programs are allowed to use helpers that may sleep like
1254 * bpf_copy_from_user().
1255 */
1256#define BPF_F_SLEEPABLE (1U << 4)
1257
1258/* If BPF_F_XDP_HAS_FRAGS is used in BPF_PROG_LOAD command, the loaded program
1259 * fully support xdp frags.
1260 */
1261#define BPF_F_XDP_HAS_FRAGS (1U << 5)
1262
1263/* If BPF_F_XDP_DEV_BOUND_ONLY is used in BPF_PROG_LOAD command, the loaded
1264 * program becomes device-bound but can access XDP metadata.
1265 */
1266#define BPF_F_XDP_DEV_BOUND_ONLY (1U << 6)
1267
1268/* The verifier internal test flag. Behavior is undefined */
1269#define BPF_F_TEST_REG_INVARIANTS (1U << 7)
1270
1271/* link_create.kprobe_multi.flags used in LINK_CREATE command for
1272 * BPF_TRACE_KPROBE_MULTI attach type to create return probe.
1273 */
1274enum {
1275 BPF_F_KPROBE_MULTI_RETURN = (1U << 0)
1276};
1277
1278/* link_create.uprobe_multi.flags used in LINK_CREATE command for
1279 * BPF_TRACE_UPROBE_MULTI attach type to create return probe.
1280 */
1281enum {
1282 BPF_F_UPROBE_MULTI_RETURN = (1U << 0)
1283};
1284
1285/* link_create.netfilter.flags used in LINK_CREATE command for
1286 * BPF_PROG_TYPE_NETFILTER to enable IP packet defragmentation.
1287 */
1288#define BPF_F_NETFILTER_IP_DEFRAG (1U << 0)
1289
1290/* When BPF ldimm64's insn[0].src_reg != 0 then this can have
1291 * the following extensions:
1292 *
1293 * insn[0].src_reg: BPF_PSEUDO_MAP_[FD|IDX]
1294 * insn[0].imm: map fd or fd_idx
1295 * insn[1].imm: 0
1296 * insn[0].off: 0
1297 * insn[1].off: 0
1298 * ldimm64 rewrite: address of map
1299 * verifier type: CONST_PTR_TO_MAP
1300 */
1301#define BPF_PSEUDO_MAP_FD 1
1302#define BPF_PSEUDO_MAP_IDX 5
1303
1304/* insn[0].src_reg: BPF_PSEUDO_MAP_[IDX_]VALUE
1305 * insn[0].imm: map fd or fd_idx
1306 * insn[1].imm: offset into value
1307 * insn[0].off: 0
1308 * insn[1].off: 0
1309 * ldimm64 rewrite: address of map[0]+offset
1310 * verifier type: PTR_TO_MAP_VALUE
1311 */
1312#define BPF_PSEUDO_MAP_VALUE 2
1313#define BPF_PSEUDO_MAP_IDX_VALUE 6
1314
1315/* insn[0].src_reg: BPF_PSEUDO_BTF_ID
1316 * insn[0].imm: kernel btd id of VAR
1317 * insn[1].imm: 0
1318 * insn[0].off: 0
1319 * insn[1].off: 0
1320 * ldimm64 rewrite: address of the kernel variable
1321 * verifier type: PTR_TO_BTF_ID or PTR_TO_MEM, depending on whether the var
1322 * is struct/union.
1323 */
1324#define BPF_PSEUDO_BTF_ID 3
1325/* insn[0].src_reg: BPF_PSEUDO_FUNC
1326 * insn[0].imm: insn offset to the func
1327 * insn[1].imm: 0
1328 * insn[0].off: 0
1329 * insn[1].off: 0
1330 * ldimm64 rewrite: address of the function
1331 * verifier type: PTR_TO_FUNC.
1332 */
1333#define BPF_PSEUDO_FUNC 4
1334
1335/* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative
1336 * offset to another bpf function
1337 */
1338#define BPF_PSEUDO_CALL 1
1339/* when bpf_call->src_reg == BPF_PSEUDO_KFUNC_CALL,
1340 * bpf_call->imm == btf_id of a BTF_KIND_FUNC in the running kernel
1341 */
1342#define BPF_PSEUDO_KFUNC_CALL 2
1343
1344enum bpf_addr_space_cast {
1345 BPF_ADDR_SPACE_CAST = 1,
1346};
1347
1348/* flags for BPF_MAP_UPDATE_ELEM command */
1349enum {
1350 BPF_ANY = 0, /* create new element or update existing */
1351 BPF_NOEXIST = 1, /* create new element if it didn't exist */
1352 BPF_EXIST = 2, /* update existing element */
1353 BPF_F_LOCK = 4, /* spin_lock-ed map_lookup/map_update */
1354};
1355
1356/* flags for BPF_MAP_CREATE command */
1357enum {
1358 BPF_F_NO_PREALLOC = (1U << 0),
1359/* Instead of having one common LRU list in the
1360 * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list
1361 * which can scale and perform better.
1362 * Note, the LRU nodes (including free nodes) cannot be moved
1363 * across different LRU lists.
1364 */
1365 BPF_F_NO_COMMON_LRU = (1U << 1),
1366/* Specify numa node during map creation */
1367 BPF_F_NUMA_NODE = (1U << 2),
1368
1369/* Flags for accessing BPF object from syscall side. */
1370 BPF_F_RDONLY = (1U << 3),
1371 BPF_F_WRONLY = (1U << 4),
1372
1373/* Flag for stack_map, store build_id+offset instead of pointer */
1374 BPF_F_STACK_BUILD_ID = (1U << 5),
1375
1376/* Zero-initialize hash function seed. This should only be used for testing. */
1377 BPF_F_ZERO_SEED = (1U << 6),
1378
1379/* Flags for accessing BPF object from program side. */
1380 BPF_F_RDONLY_PROG = (1U << 7),
1381 BPF_F_WRONLY_PROG = (1U << 8),
1382
1383/* Clone map from listener for newly accepted socket */
1384 BPF_F_CLONE = (1U << 9),
1385
1386/* Enable memory-mapping BPF map */
1387 BPF_F_MMAPABLE = (1U << 10),
1388
1389/* Share perf_event among processes */
1390 BPF_F_PRESERVE_ELEMS = (1U << 11),
1391
1392/* Create a map that is suitable to be an inner map with dynamic max entries */
1393 BPF_F_INNER_MAP = (1U << 12),
1394
1395/* Create a map that will be registered/unregesitered by the backed bpf_link */
1396 BPF_F_LINK = (1U << 13),
1397
1398/* Get path from provided FD in BPF_OBJ_PIN/BPF_OBJ_GET commands */
1399 BPF_F_PATH_FD = (1U << 14),
1400
1401/* Flag for value_type_btf_obj_fd, the fd is available */
1402 BPF_F_VTYPE_BTF_OBJ_FD = (1U << 15),
1403
1404/* BPF token FD is passed in a corresponding command's token_fd field */
1405 BPF_F_TOKEN_FD = (1U << 16),
1406
1407/* When user space page faults in bpf_arena send SIGSEGV instead of inserting new page */
1408 BPF_F_SEGV_ON_FAULT = (1U << 17),
1409
1410/* Do not translate kernel bpf_arena pointers to user pointers */
1411 BPF_F_NO_USER_CONV = (1U << 18),
1412};
1413
1414/* Flags for BPF_PROG_QUERY. */
1415
1416/* Query effective (directly attached + inherited from ancestor cgroups)
1417 * programs that will be executed for events within a cgroup.
1418 * attach_flags with this flag are always returned 0.
1419 */
1420#define BPF_F_QUERY_EFFECTIVE (1U << 0)
1421
1422/* Flags for BPF_PROG_TEST_RUN */
1423
1424/* If set, run the test on the cpu specified by bpf_attr.test.cpu */
1425#define BPF_F_TEST_RUN_ON_CPU (1U << 0)
1426/* If set, XDP frames will be transmitted after processing */
1427#define BPF_F_TEST_XDP_LIVE_FRAMES (1U << 1)
1428
1429/* type for BPF_ENABLE_STATS */
1430enum bpf_stats_type {
1431 /* enabled run_time_ns and run_cnt */
1432 BPF_STATS_RUN_TIME = 0,
1433};
1434
1435enum bpf_stack_build_id_status {
1436 /* user space need an empty entry to identify end of a trace */
1437 BPF_STACK_BUILD_ID_EMPTY = 0,
1438 /* with valid build_id and offset */
1439 BPF_STACK_BUILD_ID_VALID = 1,
1440 /* couldn't get build_id, fallback to ip */
1441 BPF_STACK_BUILD_ID_IP = 2,
1442};
1443
1444#define BPF_BUILD_ID_SIZE 20
1445struct bpf_stack_build_id {
1446 __s32 status;
1447 unsigned char build_id[BPF_BUILD_ID_SIZE];
1448 union {
1449 __u64 offset;
1450 __u64 ip;
1451 };
1452};
1453
1454#define BPF_OBJ_NAME_LEN 16U
1455
1456union bpf_attr {
1457 struct { /* anonymous struct used by BPF_MAP_CREATE command */
1458 __u32 map_type; /* one of enum bpf_map_type */
1459 __u32 key_size; /* size of key in bytes */
1460 __u32 value_size; /* size of value in bytes */
1461 __u32 max_entries; /* max number of entries in a map */
1462 __u32 map_flags; /* BPF_MAP_CREATE related
1463 * flags defined above.
1464 */
1465 __u32 inner_map_fd; /* fd pointing to the inner map */
1466 __u32 numa_node; /* numa node (effective only if
1467 * BPF_F_NUMA_NODE is set).
1468 */
1469 char map_name[BPF_OBJ_NAME_LEN];
1470 __u32 map_ifindex; /* ifindex of netdev to create on */
1471 __u32 btf_fd; /* fd pointing to a BTF type data */
1472 __u32 btf_key_type_id; /* BTF type_id of the key */
1473 __u32 btf_value_type_id; /* BTF type_id of the value */
1474 __u32 btf_vmlinux_value_type_id;/* BTF type_id of a kernel-
1475 * struct stored as the
1476 * map value
1477 */
1478 /* Any per-map-type extra fields
1479 *
1480 * BPF_MAP_TYPE_BLOOM_FILTER - the lowest 4 bits indicate the
1481 * number of hash functions (if 0, the bloom filter will default
1482 * to using 5 hash functions).
1483 *
1484 * BPF_MAP_TYPE_ARENA - contains the address where user space
1485 * is going to mmap() the arena. It has to be page aligned.
1486 */
1487 __u64 map_extra;
1488
1489 __s32 value_type_btf_obj_fd; /* fd pointing to a BTF
1490 * type data for
1491 * btf_vmlinux_value_type_id.
1492 */
1493 /* BPF token FD to use with BPF_MAP_CREATE operation.
1494 * If provided, map_flags should have BPF_F_TOKEN_FD flag set.
1495 */
1496 __s32 map_token_fd;
1497 };
1498
1499 struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
1500 __u32 map_fd;
1501 __aligned_u64 key;
1502 union {
1503 __aligned_u64 value;
1504 __aligned_u64 next_key;
1505 };
1506 __u64 flags;
1507 };
1508
1509 struct { /* struct used by BPF_MAP_*_BATCH commands */
1510 __aligned_u64 in_batch; /* start batch,
1511 * NULL to start from beginning
1512 */
1513 __aligned_u64 out_batch; /* output: next start batch */
1514 __aligned_u64 keys;
1515 __aligned_u64 values;
1516 __u32 count; /* input/output:
1517 * input: # of key/value
1518 * elements
1519 * output: # of filled elements
1520 */
1521 __u32 map_fd;
1522 __u64 elem_flags;
1523 __u64 flags;
1524 } batch;
1525
1526 struct { /* anonymous struct used by BPF_PROG_LOAD command */
1527 __u32 prog_type; /* one of enum bpf_prog_type */
1528 __u32 insn_cnt;
1529 __aligned_u64 insns;
1530 __aligned_u64 license;
1531 __u32 log_level; /* verbosity level of verifier */
1532 __u32 log_size; /* size of user buffer */
1533 __aligned_u64 log_buf; /* user supplied buffer */
1534 __u32 kern_version; /* not used */
1535 __u32 prog_flags;
1536 char prog_name[BPF_OBJ_NAME_LEN];
1537 __u32 prog_ifindex; /* ifindex of netdev to prep for */
1538 /* For some prog types expected attach type must be known at
1539 * load time to verify attach type specific parts of prog
1540 * (context accesses, allowed helpers, etc).
1541 */
1542 __u32 expected_attach_type;
1543 __u32 prog_btf_fd; /* fd pointing to BTF type data */
1544 __u32 func_info_rec_size; /* userspace bpf_func_info size */
1545 __aligned_u64 func_info; /* func info */
1546 __u32 func_info_cnt; /* number of bpf_func_info records */
1547 __u32 line_info_rec_size; /* userspace bpf_line_info size */
1548 __aligned_u64 line_info; /* line info */
1549 __u32 line_info_cnt; /* number of bpf_line_info records */
1550 __u32 attach_btf_id; /* in-kernel BTF type id to attach to */
1551 union {
1552 /* valid prog_fd to attach to bpf prog */
1553 __u32 attach_prog_fd;
1554 /* or valid module BTF object fd or 0 to attach to vmlinux */
1555 __u32 attach_btf_obj_fd;
1556 };
1557 __u32 core_relo_cnt; /* number of bpf_core_relo */
1558 __aligned_u64 fd_array; /* array of FDs */
1559 __aligned_u64 core_relos;
1560 __u32 core_relo_rec_size; /* sizeof(struct bpf_core_relo) */
1561 /* output: actual total log contents size (including termintaing zero).
1562 * It could be both larger than original log_size (if log was
1563 * truncated), or smaller (if log buffer wasn't filled completely).
1564 */
1565 __u32 log_true_size;
1566 /* BPF token FD to use with BPF_PROG_LOAD operation.
1567 * If provided, prog_flags should have BPF_F_TOKEN_FD flag set.
1568 */
1569 __s32 prog_token_fd;
1570 };
1571
1572 struct { /* anonymous struct used by BPF_OBJ_* commands */
1573 __aligned_u64 pathname;
1574 __u32 bpf_fd;
1575 __u32 file_flags;
1576 /* Same as dirfd in openat() syscall; see openat(2)
1577 * manpage for details of path FD and pathname semantics;
1578 * path_fd should accompanied by BPF_F_PATH_FD flag set in
1579 * file_flags field, otherwise it should be set to zero;
1580 * if BPF_F_PATH_FD flag is not set, AT_FDCWD is assumed.
1581 */
1582 __s32 path_fd;
1583 };
1584
1585 struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */
1586 union {
1587 __u32 target_fd; /* target object to attach to or ... */
1588 __u32 target_ifindex; /* target ifindex */
1589 };
1590 __u32 attach_bpf_fd;
1591 __u32 attach_type;
1592 __u32 attach_flags;
1593 __u32 replace_bpf_fd;
1594 union {
1595 __u32 relative_fd;
1596 __u32 relative_id;
1597 };
1598 __u64 expected_revision;
1599 };
1600
1601 struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
1602 __u32 prog_fd;
1603 __u32 retval;
1604 __u32 data_size_in; /* input: len of data_in */
1605 __u32 data_size_out; /* input/output: len of data_out
1606 * returns ENOSPC if data_out
1607 * is too small.
1608 */
1609 __aligned_u64 data_in;
1610 __aligned_u64 data_out;
1611 __u32 repeat;
1612 __u32 duration;
1613 __u32 ctx_size_in; /* input: len of ctx_in */
1614 __u32 ctx_size_out; /* input/output: len of ctx_out
1615 * returns ENOSPC if ctx_out
1616 * is too small.
1617 */
1618 __aligned_u64 ctx_in;
1619 __aligned_u64 ctx_out;
1620 __u32 flags;
1621 __u32 cpu;
1622 __u32 batch_size;
1623 } test;
1624
1625 struct { /* anonymous struct used by BPF_*_GET_*_ID */
1626 union {
1627 __u32 start_id;
1628 __u32 prog_id;
1629 __u32 map_id;
1630 __u32 btf_id;
1631 __u32 link_id;
1632 };
1633 __u32 next_id;
1634 __u32 open_flags;
1635 };
1636
1637 struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */
1638 __u32 bpf_fd;
1639 __u32 info_len;
1640 __aligned_u64 info;
1641 } info;
1642
1643 struct { /* anonymous struct used by BPF_PROG_QUERY command */
1644 union {
1645 __u32 target_fd; /* target object to query or ... */
1646 __u32 target_ifindex; /* target ifindex */
1647 };
1648 __u32 attach_type;
1649 __u32 query_flags;
1650 __u32 attach_flags;
1651 __aligned_u64 prog_ids;
1652 union {
1653 __u32 prog_cnt;
1654 __u32 count;
1655 };
1656 __u32 :32;
1657 /* output: per-program attach_flags.
1658 * not allowed to be set during effective query.
1659 */
1660 __aligned_u64 prog_attach_flags;
1661 __aligned_u64 link_ids;
1662 __aligned_u64 link_attach_flags;
1663 __u64 revision;
1664 } query;
1665
1666 struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */
1667 __u64 name;
1668 __u32 prog_fd;
1669 __u32 :32;
1670 __aligned_u64 cookie;
1671 } raw_tracepoint;
1672
1673 struct { /* anonymous struct for BPF_BTF_LOAD */
1674 __aligned_u64 btf;
1675 __aligned_u64 btf_log_buf;
1676 __u32 btf_size;
1677 __u32 btf_log_size;
1678 __u32 btf_log_level;
1679 /* output: actual total log contents size (including termintaing zero).
1680 * It could be both larger than original log_size (if log was
1681 * truncated), or smaller (if log buffer wasn't filled completely).
1682 */
1683 __u32 btf_log_true_size;
1684 __u32 btf_flags;
1685 /* BPF token FD to use with BPF_BTF_LOAD operation.
1686 * If provided, btf_flags should have BPF_F_TOKEN_FD flag set.
1687 */
1688 __s32 btf_token_fd;
1689 };
1690
1691 struct {
1692 __u32 pid; /* input: pid */
1693 __u32 fd; /* input: fd */
1694 __u32 flags; /* input: flags */
1695 __u32 buf_len; /* input/output: buf len */
1696 __aligned_u64 buf; /* input/output:
1697 * tp_name for tracepoint
1698 * symbol for kprobe
1699 * filename for uprobe
1700 */
1701 __u32 prog_id; /* output: prod_id */
1702 __u32 fd_type; /* output: BPF_FD_TYPE_* */
1703 __u64 probe_offset; /* output: probe_offset */
1704 __u64 probe_addr; /* output: probe_addr */
1705 } task_fd_query;
1706
1707 struct { /* struct used by BPF_LINK_CREATE command */
1708 union {
1709 __u32 prog_fd; /* eBPF program to attach */
1710 __u32 map_fd; /* struct_ops to attach */
1711 };
1712 union {
1713 __u32 target_fd; /* target object to attach to or ... */
1714 __u32 target_ifindex; /* target ifindex */
1715 };
1716 __u32 attach_type; /* attach type */
1717 __u32 flags; /* extra flags */
1718 union {
1719 __u32 target_btf_id; /* btf_id of target to attach to */
1720 struct {
1721 __aligned_u64 iter_info; /* extra bpf_iter_link_info */
1722 __u32 iter_info_len; /* iter_info length */
1723 };
1724 struct {
1725 /* black box user-provided value passed through
1726 * to BPF program at the execution time and
1727 * accessible through bpf_get_attach_cookie() BPF helper
1728 */
1729 __u64 bpf_cookie;
1730 } perf_event;
1731 struct {
1732 __u32 flags;
1733 __u32 cnt;
1734 __aligned_u64 syms;
1735 __aligned_u64 addrs;
1736 __aligned_u64 cookies;
1737 } kprobe_multi;
1738 struct {
1739 /* this is overlaid with the target_btf_id above. */
1740 __u32 target_btf_id;
1741 /* black box user-provided value passed through
1742 * to BPF program at the execution time and
1743 * accessible through bpf_get_attach_cookie() BPF helper
1744 */
1745 __u64 cookie;
1746 } tracing;
1747 struct {
1748 __u32 pf;
1749 __u32 hooknum;
1750 __s32 priority;
1751 __u32 flags;
1752 } netfilter;
1753 struct {
1754 union {
1755 __u32 relative_fd;
1756 __u32 relative_id;
1757 };
1758 __u64 expected_revision;
1759 } tcx;
1760 struct {
1761 __aligned_u64 path;
1762 __aligned_u64 offsets;
1763 __aligned_u64 ref_ctr_offsets;
1764 __aligned_u64 cookies;
1765 __u32 cnt;
1766 __u32 flags;
1767 __u32 pid;
1768 } uprobe_multi;
1769 struct {
1770 union {
1771 __u32 relative_fd;
1772 __u32 relative_id;
1773 };
1774 __u64 expected_revision;
1775 } netkit;
1776 };
1777 } link_create;
1778
1779 struct { /* struct used by BPF_LINK_UPDATE command */
1780 __u32 link_fd; /* link fd */
1781 union {
1782 /* new program fd to update link with */
1783 __u32 new_prog_fd;
1784 /* new struct_ops map fd to update link with */
1785 __u32 new_map_fd;
1786 };
1787 __u32 flags; /* extra flags */
1788 union {
1789 /* expected link's program fd; is specified only if
1790 * BPF_F_REPLACE flag is set in flags.
1791 */
1792 __u32 old_prog_fd;
1793 /* expected link's map fd; is specified only
1794 * if BPF_F_REPLACE flag is set.
1795 */
1796 __u32 old_map_fd;
1797 };
1798 } link_update;
1799
1800 struct {
1801 __u32 link_fd;
1802 } link_detach;
1803
1804 struct { /* struct used by BPF_ENABLE_STATS command */
1805 __u32 type;
1806 } enable_stats;
1807
1808 struct { /* struct used by BPF_ITER_CREATE command */
1809 __u32 link_fd;
1810 __u32 flags;
1811 } iter_create;
1812
1813 struct { /* struct used by BPF_PROG_BIND_MAP command */
1814 __u32 prog_fd;
1815 __u32 map_fd;
1816 __u32 flags; /* extra flags */
1817 } prog_bind_map;
1818
1819 struct { /* struct used by BPF_TOKEN_CREATE command */
1820 __u32 flags;
1821 __u32 bpffs_fd;
1822 } token_create;
1823
1824} __attribute__((aligned(8)));
1825
1826/* The description below is an attempt at providing documentation to eBPF
1827 * developers about the multiple available eBPF helper functions. It can be
1828 * parsed and used to produce a manual page. The workflow is the following,
1829 * and requires the rst2man utility:
1830 *
1831 * $ ./scripts/bpf_doc.py \
1832 * --filename include/uapi/linux/bpf.h > /tmp/bpf-helpers.rst
1833 * $ rst2man /tmp/bpf-helpers.rst > /tmp/bpf-helpers.7
1834 * $ man /tmp/bpf-helpers.7
1835 *
1836 * Note that in order to produce this external documentation, some RST
1837 * formatting is used in the descriptions to get "bold" and "italics" in
1838 * manual pages. Also note that the few trailing white spaces are
1839 * intentional, removing them would break paragraphs for rst2man.
1840 *
1841 * Start of BPF helper function descriptions:
1842 *
1843 * void *bpf_map_lookup_elem(struct bpf_map *map, const void *key)
1844 * Description
1845 * Perform a lookup in *map* for an entry associated to *key*.
1846 * Return
1847 * Map value associated to *key*, or **NULL** if no entry was
1848 * found.
1849 *
1850 * long bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags)
1851 * Description
1852 * Add or update the value of the entry associated to *key* in
1853 * *map* with *value*. *flags* is one of:
1854 *
1855 * **BPF_NOEXIST**
1856 * The entry for *key* must not exist in the map.
1857 * **BPF_EXIST**
1858 * The entry for *key* must already exist in the map.
1859 * **BPF_ANY**
1860 * No condition on the existence of the entry for *key*.
1861 *
1862 * Flag value **BPF_NOEXIST** cannot be used for maps of types
1863 * **BPF_MAP_TYPE_ARRAY** or **BPF_MAP_TYPE_PERCPU_ARRAY** (all
1864 * elements always exist), the helper would return an error.
1865 * Return
1866 * 0 on success, or a negative error in case of failure.
1867 *
1868 * long bpf_map_delete_elem(struct bpf_map *map, const void *key)
1869 * Description
1870 * Delete entry with *key* from *map*.
1871 * Return
1872 * 0 on success, or a negative error in case of failure.
1873 *
1874 * long bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr)
1875 * Description
1876 * For tracing programs, safely attempt to read *size* bytes from
1877 * kernel space address *unsafe_ptr* and store the data in *dst*.
1878 *
1879 * Generally, use **bpf_probe_read_user**\ () or
1880 * **bpf_probe_read_kernel**\ () instead.
1881 * Return
1882 * 0 on success, or a negative error in case of failure.
1883 *
1884 * u64 bpf_ktime_get_ns(void)
1885 * Description
1886 * Return the time elapsed since system boot, in nanoseconds.
1887 * Does not include time the system was suspended.
1888 * See: **clock_gettime**\ (**CLOCK_MONOTONIC**)
1889 * Return
1890 * Current *ktime*.
1891 *
1892 * long bpf_trace_printk(const char *fmt, u32 fmt_size, ...)
1893 * Description
1894 * This helper is a "printk()-like" facility for debugging. It
1895 * prints a message defined by format *fmt* (of size *fmt_size*)
1896 * to file *\/sys/kernel/tracing/trace* from TraceFS, if
1897 * available. It can take up to three additional **u64**
1898 * arguments (as an eBPF helpers, the total number of arguments is
1899 * limited to five).
1900 *
1901 * Each time the helper is called, it appends a line to the trace.
1902 * Lines are discarded while *\/sys/kernel/tracing/trace* is
1903 * open, use *\/sys/kernel/tracing/trace_pipe* to avoid this.
1904 * The format of the trace is customizable, and the exact output
1905 * one will get depends on the options set in
1906 * *\/sys/kernel/tracing/trace_options* (see also the
1907 * *README* file under the same directory). However, it usually
1908 * defaults to something like:
1909 *
1910 * ::
1911 *
1912 * telnet-470 [001] .N.. 419421.045894: 0x00000001: <formatted msg>
1913 *
1914 * In the above:
1915 *
1916 * * ``telnet`` is the name of the current task.
1917 * * ``470`` is the PID of the current task.
1918 * * ``001`` is the CPU number on which the task is
1919 * running.
1920 * * In ``.N..``, each character refers to a set of
1921 * options (whether irqs are enabled, scheduling
1922 * options, whether hard/softirqs are running, level of
1923 * preempt_disabled respectively). **N** means that
1924 * **TIF_NEED_RESCHED** and **PREEMPT_NEED_RESCHED**
1925 * are set.
1926 * * ``419421.045894`` is a timestamp.
1927 * * ``0x00000001`` is a fake value used by BPF for the
1928 * instruction pointer register.
1929 * * ``<formatted msg>`` is the message formatted with
1930 * *fmt*.
1931 *
1932 * The conversion specifiers supported by *fmt* are similar, but
1933 * more limited than for printk(). They are **%d**, **%i**,
1934 * **%u**, **%x**, **%ld**, **%li**, **%lu**, **%lx**, **%lld**,
1935 * **%lli**, **%llu**, **%llx**, **%p**, **%s**. No modifier (size
1936 * of field, padding with zeroes, etc.) is available, and the
1937 * helper will return **-EINVAL** (but print nothing) if it
1938 * encounters an unknown specifier.
1939 *
1940 * Also, note that **bpf_trace_printk**\ () is slow, and should
1941 * only be used for debugging purposes. For this reason, a notice
1942 * block (spanning several lines) is printed to kernel logs and
1943 * states that the helper should not be used "for production use"
1944 * the first time this helper is used (or more precisely, when
1945 * **trace_printk**\ () buffers are allocated). For passing values
1946 * to user space, perf events should be preferred.
1947 * Return
1948 * The number of bytes written to the buffer, or a negative error
1949 * in case of failure.
1950 *
1951 * u32 bpf_get_prandom_u32(void)
1952 * Description
1953 * Get a pseudo-random number.
1954 *
1955 * From a security point of view, this helper uses its own
1956 * pseudo-random internal state, and cannot be used to infer the
1957 * seed of other random functions in the kernel. However, it is
1958 * essential to note that the generator used by the helper is not
1959 * cryptographically secure.
1960 * Return
1961 * A random 32-bit unsigned value.
1962 *
1963 * u32 bpf_get_smp_processor_id(void)
1964 * Description
1965 * Get the SMP (symmetric multiprocessing) processor id. Note that
1966 * all programs run with migration disabled, which means that the
1967 * SMP processor id is stable during all the execution of the
1968 * program.
1969 * Return
1970 * The SMP id of the processor running the program.
1971 *
1972 * long bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags)
1973 * Description
1974 * Store *len* bytes from address *from* into the packet
1975 * associated to *skb*, at *offset*. *flags* are a combination of
1976 * **BPF_F_RECOMPUTE_CSUM** (automatically recompute the
1977 * checksum for the packet after storing the bytes) and
1978 * **BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\
1979 * **->swhash** and *skb*\ **->l4hash** to 0).
1980 *
1981 * A call to this helper is susceptible to change the underlying
1982 * packet buffer. Therefore, at load time, all checks on pointers
1983 * previously done by the verifier are invalidated and must be
1984 * performed again, if the helper is used in combination with
1985 * direct packet access.
1986 * Return
1987 * 0 on success, or a negative error in case of failure.
1988 *
1989 * long bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size)
1990 * Description
1991 * Recompute the layer 3 (e.g. IP) checksum for the packet
1992 * associated to *skb*. Computation is incremental, so the helper
1993 * must know the former value of the header field that was
1994 * modified (*from*), the new value of this field (*to*), and the
1995 * number of bytes (2 or 4) for this field, stored in *size*.
1996 * Alternatively, it is possible to store the difference between
1997 * the previous and the new values of the header field in *to*, by
1998 * setting *from* and *size* to 0. For both methods, *offset*
1999 * indicates the location of the IP checksum within the packet.
2000 *
2001 * This helper works in combination with **bpf_csum_diff**\ (),
2002 * which does not update the checksum in-place, but offers more
2003 * flexibility and can handle sizes larger than 2 or 4 for the
2004 * checksum to update.
2005 *
2006 * A call to this helper is susceptible to change the underlying
2007 * packet buffer. Therefore, at load time, all checks on pointers
2008 * previously done by the verifier are invalidated and must be
2009 * performed again, if the helper is used in combination with
2010 * direct packet access.
2011 * Return
2012 * 0 on success, or a negative error in case of failure.
2013 *
2014 * long bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags)
2015 * Description
2016 * Recompute the layer 4 (e.g. TCP, UDP or ICMP) checksum for the
2017 * packet associated to *skb*. Computation is incremental, so the
2018 * helper must know the former value of the header field that was
2019 * modified (*from*), the new value of this field (*to*), and the
2020 * number of bytes (2 or 4) for this field, stored on the lowest
2021 * four bits of *flags*. Alternatively, it is possible to store
2022 * the difference between the previous and the new values of the
2023 * header field in *to*, by setting *from* and the four lowest
2024 * bits of *flags* to 0. For both methods, *offset* indicates the
2025 * location of the IP checksum within the packet. In addition to
2026 * the size of the field, *flags* can be added (bitwise OR) actual
2027 * flags. With **BPF_F_MARK_MANGLED_0**, a null checksum is left
2028 * untouched (unless **BPF_F_MARK_ENFORCE** is added as well), and
2029 * for updates resulting in a null checksum the value is set to
2030 * **CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates
2031 * the checksum is to be computed against a pseudo-header.
2032 *
2033 * This helper works in combination with **bpf_csum_diff**\ (),
2034 * which does not update the checksum in-place, but offers more
2035 * flexibility and can handle sizes larger than 2 or 4 for the
2036 * checksum to update.
2037 *
2038 * A call to this helper is susceptible to change the underlying
2039 * packet buffer. Therefore, at load time, all checks on pointers
2040 * previously done by the verifier are invalidated and must be
2041 * performed again, if the helper is used in combination with
2042 * direct packet access.
2043 * Return
2044 * 0 on success, or a negative error in case of failure.
2045 *
2046 * long bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index)
2047 * Description
2048 * This special helper is used to trigger a "tail call", or in
2049 * other words, to jump into another eBPF program. The same stack
2050 * frame is used (but values on stack and in registers for the
2051 * caller are not accessible to the callee). This mechanism allows
2052 * for program chaining, either for raising the maximum number of
2053 * available eBPF instructions, or to execute given programs in
2054 * conditional blocks. For security reasons, there is an upper
2055 * limit to the number of successive tail calls that can be
2056 * performed.
2057 *
2058 * Upon call of this helper, the program attempts to jump into a
2059 * program referenced at index *index* in *prog_array_map*, a
2060 * special map of type **BPF_MAP_TYPE_PROG_ARRAY**, and passes
2061 * *ctx*, a pointer to the context.
2062 *
2063 * If the call succeeds, the kernel immediately runs the first
2064 * instruction of the new program. This is not a function call,
2065 * and it never returns to the previous program. If the call
2066 * fails, then the helper has no effect, and the caller continues
2067 * to run its subsequent instructions. A call can fail if the
2068 * destination program for the jump does not exist (i.e. *index*
2069 * is superior to the number of entries in *prog_array_map*), or
2070 * if the maximum number of tail calls has been reached for this
2071 * chain of programs. This limit is defined in the kernel by the
2072 * macro **MAX_TAIL_CALL_CNT** (not accessible to user space),
2073 * which is currently set to 33.
2074 * Return
2075 * 0 on success, or a negative error in case of failure.
2076 *
2077 * long bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags)
2078 * Description
2079 * Clone and redirect the packet associated to *skb* to another
2080 * net device of index *ifindex*. Both ingress and egress
2081 * interfaces can be used for redirection. The **BPF_F_INGRESS**
2082 * value in *flags* is used to make the distinction (ingress path
2083 * is selected if the flag is present, egress path otherwise).
2084 * This is the only flag supported for now.
2085 *
2086 * In comparison with **bpf_redirect**\ () helper,
2087 * **bpf_clone_redirect**\ () has the associated cost of
2088 * duplicating the packet buffer, but this can be executed out of
2089 * the eBPF program. Conversely, **bpf_redirect**\ () is more
2090 * efficient, but it is handled through an action code where the
2091 * redirection happens only after the eBPF program has returned.
2092 *
2093 * A call to this helper is susceptible to change the underlying
2094 * packet buffer. Therefore, at load time, all checks on pointers
2095 * previously done by the verifier are invalidated and must be
2096 * performed again, if the helper is used in combination with
2097 * direct packet access.
2098 * Return
2099 * 0 on success, or a negative error in case of failure. Positive
2100 * error indicates a potential drop or congestion in the target
2101 * device. The particular positive error codes are not defined.
2102 *
2103 * u64 bpf_get_current_pid_tgid(void)
2104 * Description
2105 * Get the current pid and tgid.
2106 * Return
2107 * A 64-bit integer containing the current tgid and pid, and
2108 * created as such:
2109 * *current_task*\ **->tgid << 32 \|**
2110 * *current_task*\ **->pid**.
2111 *
2112 * u64 bpf_get_current_uid_gid(void)
2113 * Description
2114 * Get the current uid and gid.
2115 * Return
2116 * A 64-bit integer containing the current GID and UID, and
2117 * created as such: *current_gid* **<< 32 \|** *current_uid*.
2118 *
2119 * long bpf_get_current_comm(void *buf, u32 size_of_buf)
2120 * Description
2121 * Copy the **comm** attribute of the current task into *buf* of
2122 * *size_of_buf*. The **comm** attribute contains the name of
2123 * the executable (excluding the path) for the current task. The
2124 * *size_of_buf* must be strictly positive. On success, the
2125 * helper makes sure that the *buf* is NUL-terminated. On failure,
2126 * it is filled with zeroes.
2127 * Return
2128 * 0 on success, or a negative error in case of failure.
2129 *
2130 * u32 bpf_get_cgroup_classid(struct sk_buff *skb)
2131 * Description
2132 * Retrieve the classid for the current task, i.e. for the net_cls
2133 * cgroup to which *skb* belongs.
2134 *
2135 * This helper can be used on TC egress path, but not on ingress.
2136 *
2137 * The net_cls cgroup provides an interface to tag network packets
2138 * based on a user-provided identifier for all traffic coming from
2139 * the tasks belonging to the related cgroup. See also the related
2140 * kernel documentation, available from the Linux sources in file
2141 * *Documentation/admin-guide/cgroup-v1/net_cls.rst*.
2142 *
2143 * The Linux kernel has two versions for cgroups: there are
2144 * cgroups v1 and cgroups v2. Both are available to users, who can
2145 * use a mixture of them, but note that the net_cls cgroup is for
2146 * cgroup v1 only. This makes it incompatible with BPF programs
2147 * run on cgroups, which is a cgroup-v2-only feature (a socket can
2148 * only hold data for one version of cgroups at a time).
2149 *
2150 * This helper is only available is the kernel was compiled with
2151 * the **CONFIG_CGROUP_NET_CLASSID** configuration option set to
2152 * "**y**" or to "**m**".
2153 * Return
2154 * The classid, or 0 for the default unconfigured classid.
2155 *
2156 * long bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
2157 * Description
2158 * Push a *vlan_tci* (VLAN tag control information) of protocol
2159 * *vlan_proto* to the packet associated to *skb*, then update
2160 * the checksum. Note that if *vlan_proto* is different from
2161 * **ETH_P_8021Q** and **ETH_P_8021AD**, it is considered to
2162 * be **ETH_P_8021Q**.
2163 *
2164 * A call to this helper is susceptible to change the underlying
2165 * packet buffer. Therefore, at load time, all checks on pointers
2166 * previously done by the verifier are invalidated and must be
2167 * performed again, if the helper is used in combination with
2168 * direct packet access.
2169 * Return
2170 * 0 on success, or a negative error in case of failure.
2171 *
2172 * long bpf_skb_vlan_pop(struct sk_buff *skb)
2173 * Description
2174 * Pop a VLAN header from the packet associated to *skb*.
2175 *
2176 * A call to this helper is susceptible to change the underlying
2177 * packet buffer. Therefore, at load time, all checks on pointers
2178 * previously done by the verifier are invalidated and must be
2179 * performed again, if the helper is used in combination with
2180 * direct packet access.
2181 * Return
2182 * 0 on success, or a negative error in case of failure.
2183 *
2184 * long bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags)
2185 * Description
2186 * Get tunnel metadata. This helper takes a pointer *key* to an
2187 * empty **struct bpf_tunnel_key** of **size**, that will be
2188 * filled with tunnel metadata for the packet associated to *skb*.
2189 * The *flags* can be set to **BPF_F_TUNINFO_IPV6**, which
2190 * indicates that the tunnel is based on IPv6 protocol instead of
2191 * IPv4.
2192 *
2193 * The **struct bpf_tunnel_key** is an object that generalizes the
2194 * principal parameters used by various tunneling protocols into a
2195 * single struct. This way, it can be used to easily make a
2196 * decision based on the contents of the encapsulation header,
2197 * "summarized" in this struct. In particular, it holds the IP
2198 * address of the remote end (IPv4 or IPv6, depending on the case)
2199 * in *key*\ **->remote_ipv4** or *key*\ **->remote_ipv6**. Also,
2200 * this struct exposes the *key*\ **->tunnel_id**, which is
2201 * generally mapped to a VNI (Virtual Network Identifier), making
2202 * it programmable together with the **bpf_skb_set_tunnel_key**\
2203 * () helper.
2204 *
2205 * Let's imagine that the following code is part of a program
2206 * attached to the TC ingress interface, on one end of a GRE
2207 * tunnel, and is supposed to filter out all messages coming from
2208 * remote ends with IPv4 address other than 10.0.0.1:
2209 *
2210 * ::
2211 *
2212 * int ret;
2213 * struct bpf_tunnel_key key = {};
2214 *
2215 * ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
2216 * if (ret < 0)
2217 * return TC_ACT_SHOT; // drop packet
2218 *
2219 * if (key.remote_ipv4 != 0x0a000001)
2220 * return TC_ACT_SHOT; // drop packet
2221 *
2222 * return TC_ACT_OK; // accept packet
2223 *
2224 * This interface can also be used with all encapsulation devices
2225 * that can operate in "collect metadata" mode: instead of having
2226 * one network device per specific configuration, the "collect
2227 * metadata" mode only requires a single device where the
2228 * configuration can be extracted from this helper.
2229 *
2230 * This can be used together with various tunnels such as VXLan,
2231 * Geneve, GRE or IP in IP (IPIP).
2232 * Return
2233 * 0 on success, or a negative error in case of failure.
2234 *
2235 * long bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags)
2236 * Description
2237 * Populate tunnel metadata for packet associated to *skb.* The
2238 * tunnel metadata is set to the contents of *key*, of *size*. The
2239 * *flags* can be set to a combination of the following values:
2240 *
2241 * **BPF_F_TUNINFO_IPV6**
2242 * Indicate that the tunnel is based on IPv6 protocol
2243 * instead of IPv4.
2244 * **BPF_F_ZERO_CSUM_TX**
2245 * For IPv4 packets, add a flag to tunnel metadata
2246 * indicating that checksum computation should be skipped
2247 * and checksum set to zeroes.
2248 * **BPF_F_DONT_FRAGMENT**
2249 * Add a flag to tunnel metadata indicating that the
2250 * packet should not be fragmented.
2251 * **BPF_F_SEQ_NUMBER**
2252 * Add a flag to tunnel metadata indicating that a
2253 * sequence number should be added to tunnel header before
2254 * sending the packet. This flag was added for GRE
2255 * encapsulation, but might be used with other protocols
2256 * as well in the future.
2257 * **BPF_F_NO_TUNNEL_KEY**
2258 * Add a flag to tunnel metadata indicating that no tunnel
2259 * key should be set in the resulting tunnel header.
2260 *
2261 * Here is a typical usage on the transmit path:
2262 *
2263 * ::
2264 *
2265 * struct bpf_tunnel_key key;
2266 * populate key ...
2267 * bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0);
2268 * bpf_clone_redirect(skb, vxlan_dev_ifindex, 0);
2269 *
2270 * See also the description of the **bpf_skb_get_tunnel_key**\ ()
2271 * helper for additional information.
2272 * Return
2273 * 0 on success, or a negative error in case of failure.
2274 *
2275 * u64 bpf_perf_event_read(struct bpf_map *map, u64 flags)
2276 * Description
2277 * Read the value of a perf event counter. This helper relies on a
2278 * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of
2279 * the perf event counter is selected when *map* is updated with
2280 * perf event file descriptors. The *map* is an array whose size
2281 * is the number of available CPUs, and each cell contains a value
2282 * relative to one CPU. The value to retrieve is indicated by
2283 * *flags*, that contains the index of the CPU to look up, masked
2284 * with **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to
2285 * **BPF_F_CURRENT_CPU** to indicate that the value for the
2286 * current CPU should be retrieved.
2287 *
2288 * Note that before Linux 4.13, only hardware perf event can be
2289 * retrieved.
2290 *
2291 * Also, be aware that the newer helper
2292 * **bpf_perf_event_read_value**\ () is recommended over
2293 * **bpf_perf_event_read**\ () in general. The latter has some ABI
2294 * quirks where error and counter value are used as a return code
2295 * (which is wrong to do since ranges may overlap). This issue is
2296 * fixed with **bpf_perf_event_read_value**\ (), which at the same
2297 * time provides more features over the **bpf_perf_event_read**\
2298 * () interface. Please refer to the description of
2299 * **bpf_perf_event_read_value**\ () for details.
2300 * Return
2301 * The value of the perf event counter read from the map, or a
2302 * negative error code in case of failure.
2303 *
2304 * long bpf_redirect(u32 ifindex, u64 flags)
2305 * Description
2306 * Redirect the packet to another net device of index *ifindex*.
2307 * This helper is somewhat similar to **bpf_clone_redirect**\
2308 * (), except that the packet is not cloned, which provides
2309 * increased performance.
2310 *
2311 * Except for XDP, both ingress and egress interfaces can be used
2312 * for redirection. The **BPF_F_INGRESS** value in *flags* is used
2313 * to make the distinction (ingress path is selected if the flag
2314 * is present, egress path otherwise). Currently, XDP only
2315 * supports redirection to the egress interface, and accepts no
2316 * flag at all.
2317 *
2318 * The same effect can also be attained with the more generic
2319 * **bpf_redirect_map**\ (), which uses a BPF map to store the
2320 * redirect target instead of providing it directly to the helper.
2321 * Return
2322 * For XDP, the helper returns **XDP_REDIRECT** on success or
2323 * **XDP_ABORTED** on error. For other program types, the values
2324 * are **TC_ACT_REDIRECT** on success or **TC_ACT_SHOT** on
2325 * error.
2326 *
2327 * u32 bpf_get_route_realm(struct sk_buff *skb)
2328 * Description
2329 * Retrieve the realm or the route, that is to say the
2330 * **tclassid** field of the destination for the *skb*. The
2331 * identifier retrieved is a user-provided tag, similar to the
2332 * one used with the net_cls cgroup (see description for
2333 * **bpf_get_cgroup_classid**\ () helper), but here this tag is
2334 * held by a route (a destination entry), not by a task.
2335 *
2336 * Retrieving this identifier works with the clsact TC egress hook
2337 * (see also **tc-bpf(8)**), or alternatively on conventional
2338 * classful egress qdiscs, but not on TC ingress path. In case of
2339 * clsact TC egress hook, this has the advantage that, internally,
2340 * the destination entry has not been dropped yet in the transmit
2341 * path. Therefore, the destination entry does not need to be
2342 * artificially held via **netif_keep_dst**\ () for a classful
2343 * qdisc until the *skb* is freed.
2344 *
2345 * This helper is available only if the kernel was compiled with
2346 * **CONFIG_IP_ROUTE_CLASSID** configuration option.
2347 * Return
2348 * The realm of the route for the packet associated to *skb*, or 0
2349 * if none was found.
2350 *
2351 * long bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
2352 * Description
2353 * Write raw *data* blob into a special BPF perf event held by
2354 * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
2355 * event must have the following attributes: **PERF_SAMPLE_RAW**
2356 * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and
2357 * **PERF_COUNT_SW_BPF_OUTPUT** as **config**.
2358 *
2359 * The *flags* are used to indicate the index in *map* for which
2360 * the value must be put, masked with **BPF_F_INDEX_MASK**.
2361 * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU**
2362 * to indicate that the index of the current CPU core should be
2363 * used.
2364 *
2365 * The value to write, of *size*, is passed through eBPF stack and
2366 * pointed by *data*.
2367 *
2368 * The context of the program *ctx* needs also be passed to the
2369 * helper.
2370 *
2371 * On user space, a program willing to read the values needs to
2372 * call **perf_event_open**\ () on the perf event (either for
2373 * one or for all CPUs) and to store the file descriptor into the
2374 * *map*. This must be done before the eBPF program can send data
2375 * into it. An example is available in file
2376 * *samples/bpf/trace_output_user.c* in the Linux kernel source
2377 * tree (the eBPF program counterpart is in
2378 * *samples/bpf/trace_output_kern.c*).
2379 *
2380 * **bpf_perf_event_output**\ () achieves better performance
2381 * than **bpf_trace_printk**\ () for sharing data with user
2382 * space, and is much better suitable for streaming data from eBPF
2383 * programs.
2384 *
2385 * Note that this helper is not restricted to tracing use cases
2386 * and can be used with programs attached to TC or XDP as well,
2387 * where it allows for passing data to user space listeners. Data
2388 * can be:
2389 *
2390 * * Only custom structs,
2391 * * Only the packet payload, or
2392 * * A combination of both.
2393 * Return
2394 * 0 on success, or a negative error in case of failure.
2395 *
2396 * long bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len)
2397 * Description
2398 * This helper was provided as an easy way to load data from a
2399 * packet. It can be used to load *len* bytes from *offset* from
2400 * the packet associated to *skb*, into the buffer pointed by
2401 * *to*.
2402 *
2403 * Since Linux 4.7, usage of this helper has mostly been replaced
2404 * by "direct packet access", enabling packet data to be
2405 * manipulated with *skb*\ **->data** and *skb*\ **->data_end**
2406 * pointing respectively to the first byte of packet data and to
2407 * the byte after the last byte of packet data. However, it
2408 * remains useful if one wishes to read large quantities of data
2409 * at once from a packet into the eBPF stack.
2410 * Return
2411 * 0 on success, or a negative error in case of failure.
2412 *
2413 * long bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags)
2414 * Description
2415 * Walk a user or a kernel stack and return its id. To achieve
2416 * this, the helper needs *ctx*, which is a pointer to the context
2417 * on which the tracing program is executed, and a pointer to a
2418 * *map* of type **BPF_MAP_TYPE_STACK_TRACE**.
2419 *
2420 * The last argument, *flags*, holds the number of stack frames to
2421 * skip (from 0 to 255), masked with
2422 * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set
2423 * a combination of the following flags:
2424 *
2425 * **BPF_F_USER_STACK**
2426 * Collect a user space stack instead of a kernel stack.
2427 * **BPF_F_FAST_STACK_CMP**
2428 * Compare stacks by hash only.
2429 * **BPF_F_REUSE_STACKID**
2430 * If two different stacks hash into the same *stackid*,
2431 * discard the old one.
2432 *
2433 * The stack id retrieved is a 32 bit long integer handle which
2434 * can be further combined with other data (including other stack
2435 * ids) and used as a key into maps. This can be useful for
2436 * generating a variety of graphs (such as flame graphs or off-cpu
2437 * graphs).
2438 *
2439 * For walking a stack, this helper is an improvement over
2440 * **bpf_probe_read**\ (), which can be used with unrolled loops
2441 * but is not efficient and consumes a lot of eBPF instructions.
2442 * Instead, **bpf_get_stackid**\ () can collect up to
2443 * **PERF_MAX_STACK_DEPTH** both kernel and user frames. Note that
2444 * this limit can be controlled with the **sysctl** program, and
2445 * that it should be manually increased in order to profile long
2446 * user stacks (such as stacks for Java programs). To do so, use:
2447 *
2448 * ::
2449 *
2450 * # sysctl kernel.perf_event_max_stack=<new value>
2451 * Return
2452 * The positive or null stack id on success, or a negative error
2453 * in case of failure.
2454 *
2455 * s64 bpf_csum_diff(__be32 *from, u32 from_size, __be32 *to, u32 to_size, __wsum seed)
2456 * Description
2457 * Compute a checksum difference, from the raw buffer pointed by
2458 * *from*, of length *from_size* (that must be a multiple of 4),
2459 * towards the raw buffer pointed by *to*, of size *to_size*
2460 * (same remark). An optional *seed* can be added to the value
2461 * (this can be cascaded, the seed may come from a previous call
2462 * to the helper).
2463 *
2464 * This is flexible enough to be used in several ways:
2465 *
2466 * * With *from_size* == 0, *to_size* > 0 and *seed* set to
2467 * checksum, it can be used when pushing new data.
2468 * * With *from_size* > 0, *to_size* == 0 and *seed* set to
2469 * checksum, it can be used when removing data from a packet.
2470 * * With *from_size* > 0, *to_size* > 0 and *seed* set to 0, it
2471 * can be used to compute a diff. Note that *from_size* and
2472 * *to_size* do not need to be equal.
2473 *
2474 * This helper can be used in combination with
2475 * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ (), to
2476 * which one can feed in the difference computed with
2477 * **bpf_csum_diff**\ ().
2478 * Return
2479 * The checksum result, or a negative error code in case of
2480 * failure.
2481 *
2482 * long bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)
2483 * Description
2484 * Retrieve tunnel options metadata for the packet associated to
2485 * *skb*, and store the raw tunnel option data to the buffer *opt*
2486 * of *size*.
2487 *
2488 * This helper can be used with encapsulation devices that can
2489 * operate in "collect metadata" mode (please refer to the related
2490 * note in the description of **bpf_skb_get_tunnel_key**\ () for
2491 * more details). A particular example where this can be used is
2492 * in combination with the Geneve encapsulation protocol, where it
2493 * allows for pushing (with **bpf_skb_get_tunnel_opt**\ () helper)
2494 * and retrieving arbitrary TLVs (Type-Length-Value headers) from
2495 * the eBPF program. This allows for full customization of these
2496 * headers.
2497 * Return
2498 * The size of the option data retrieved.
2499 *
2500 * long bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)
2501 * Description
2502 * Set tunnel options metadata for the packet associated to *skb*
2503 * to the option data contained in the raw buffer *opt* of *size*.
2504 *
2505 * See also the description of the **bpf_skb_get_tunnel_opt**\ ()
2506 * helper for additional information.
2507 * Return
2508 * 0 on success, or a negative error in case of failure.
2509 *
2510 * long bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags)
2511 * Description
2512 * Change the protocol of the *skb* to *proto*. Currently
2513 * supported are transition from IPv4 to IPv6, and from IPv6 to
2514 * IPv4. The helper takes care of the groundwork for the
2515 * transition, including resizing the socket buffer. The eBPF
2516 * program is expected to fill the new headers, if any, via
2517 * **skb_store_bytes**\ () and to recompute the checksums with
2518 * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\
2519 * (). The main case for this helper is to perform NAT64
2520 * operations out of an eBPF program.
2521 *
2522 * Internally, the GSO type is marked as dodgy so that headers are
2523 * checked and segments are recalculated by the GSO/GRO engine.
2524 * The size for GSO target is adapted as well.
2525 *
2526 * All values for *flags* are reserved for future usage, and must
2527 * be left at zero.
2528 *
2529 * A call to this helper is susceptible to change the underlying
2530 * packet buffer. Therefore, at load time, all checks on pointers
2531 * previously done by the verifier are invalidated and must be
2532 * performed again, if the helper is used in combination with
2533 * direct packet access.
2534 * Return
2535 * 0 on success, or a negative error in case of failure.
2536 *
2537 * long bpf_skb_change_type(struct sk_buff *skb, u32 type)
2538 * Description
2539 * Change the packet type for the packet associated to *skb*. This
2540 * comes down to setting *skb*\ **->pkt_type** to *type*, except
2541 * the eBPF program does not have a write access to *skb*\
2542 * **->pkt_type** beside this helper. Using a helper here allows
2543 * for graceful handling of errors.
2544 *
2545 * The major use case is to change incoming *skb*s to
2546 * **PACKET_HOST** in a programmatic way instead of having to
2547 * recirculate via **redirect**\ (..., **BPF_F_INGRESS**), for
2548 * example.
2549 *
2550 * Note that *type* only allows certain values. At this time, they
2551 * are:
2552 *
2553 * **PACKET_HOST**
2554 * Packet is for us.
2555 * **PACKET_BROADCAST**
2556 * Send packet to all.
2557 * **PACKET_MULTICAST**
2558 * Send packet to group.
2559 * **PACKET_OTHERHOST**
2560 * Send packet to someone else.
2561 * Return
2562 * 0 on success, or a negative error in case of failure.
2563 *
2564 * long bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index)
2565 * Description
2566 * Check whether *skb* is a descendant of the cgroup2 held by
2567 * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*.
2568 * Return
2569 * The return value depends on the result of the test, and can be:
2570 *
2571 * * 0, if the *skb* failed the cgroup2 descendant test.
2572 * * 1, if the *skb* succeeded the cgroup2 descendant test.
2573 * * A negative error code, if an error occurred.
2574 *
2575 * u32 bpf_get_hash_recalc(struct sk_buff *skb)
2576 * Description
2577 * Retrieve the hash of the packet, *skb*\ **->hash**. If it is
2578 * not set, in particular if the hash was cleared due to mangling,
2579 * recompute this hash. Later accesses to the hash can be done
2580 * directly with *skb*\ **->hash**.
2581 *
2582 * Calling **bpf_set_hash_invalid**\ (), changing a packet
2583 * prototype with **bpf_skb_change_proto**\ (), or calling
2584 * **bpf_skb_store_bytes**\ () with the
2585 * **BPF_F_INVALIDATE_HASH** are actions susceptible to clear
2586 * the hash and to trigger a new computation for the next call to
2587 * **bpf_get_hash_recalc**\ ().
2588 * Return
2589 * The 32-bit hash.
2590 *
2591 * u64 bpf_get_current_task(void)
2592 * Description
2593 * Get the current task.
2594 * Return
2595 * A pointer to the current task struct.
2596 *
2597 * long bpf_probe_write_user(void *dst, const void *src, u32 len)
2598 * Description
2599 * Attempt in a safe way to write *len* bytes from the buffer
2600 * *src* to *dst* in memory. It only works for threads that are in
2601 * user context, and *dst* must be a valid user space address.
2602 *
2603 * This helper should not be used to implement any kind of
2604 * security mechanism because of TOC-TOU attacks, but rather to
2605 * debug, divert, and manipulate execution of semi-cooperative
2606 * processes.
2607 *
2608 * Keep in mind that this feature is meant for experiments, and it
2609 * has a risk of crashing the system and running programs.
2610 * Therefore, when an eBPF program using this helper is attached,
2611 * a warning including PID and process name is printed to kernel
2612 * logs.
2613 * Return
2614 * 0 on success, or a negative error in case of failure.
2615 *
2616 * long bpf_current_task_under_cgroup(struct bpf_map *map, u32 index)
2617 * Description
2618 * Check whether the probe is being run is the context of a given
2619 * subset of the cgroup2 hierarchy. The cgroup2 to test is held by
2620 * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*.
2621 * Return
2622 * The return value depends on the result of the test, and can be:
2623 *
2624 * * 1, if current task belongs to the cgroup2.
2625 * * 0, if current task does not belong to the cgroup2.
2626 * * A negative error code, if an error occurred.
2627 *
2628 * long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags)
2629 * Description
2630 * Resize (trim or grow) the packet associated to *skb* to the
2631 * new *len*. The *flags* are reserved for future usage, and must
2632 * be left at zero.
2633 *
2634 * The basic idea is that the helper performs the needed work to
2635 * change the size of the packet, then the eBPF program rewrites
2636 * the rest via helpers like **bpf_skb_store_bytes**\ (),
2637 * **bpf_l3_csum_replace**\ (), **bpf_l3_csum_replace**\ ()
2638 * and others. This helper is a slow path utility intended for
2639 * replies with control messages. And because it is targeted for
2640 * slow path, the helper itself can afford to be slow: it
2641 * implicitly linearizes, unclones and drops offloads from the
2642 * *skb*.
2643 *
2644 * A call to this helper is susceptible to change the underlying
2645 * packet buffer. Therefore, at load time, all checks on pointers
2646 * previously done by the verifier are invalidated and must be
2647 * performed again, if the helper is used in combination with
2648 * direct packet access.
2649 * Return
2650 * 0 on success, or a negative error in case of failure.
2651 *
2652 * long bpf_skb_pull_data(struct sk_buff *skb, u32 len)
2653 * Description
2654 * Pull in non-linear data in case the *skb* is non-linear and not
2655 * all of *len* are part of the linear section. Make *len* bytes
2656 * from *skb* readable and writable. If a zero value is passed for
2657 * *len*, then all bytes in the linear part of *skb* will be made
2658 * readable and writable.
2659 *
2660 * This helper is only needed for reading and writing with direct
2661 * packet access.
2662 *
2663 * For direct packet access, testing that offsets to access
2664 * are within packet boundaries (test on *skb*\ **->data_end**) is
2665 * susceptible to fail if offsets are invalid, or if the requested
2666 * data is in non-linear parts of the *skb*. On failure the
2667 * program can just bail out, or in the case of a non-linear
2668 * buffer, use a helper to make the data available. The
2669 * **bpf_skb_load_bytes**\ () helper is a first solution to access
2670 * the data. Another one consists in using **bpf_skb_pull_data**
2671 * to pull in once the non-linear parts, then retesting and
2672 * eventually access the data.
2673 *
2674 * At the same time, this also makes sure the *skb* is uncloned,
2675 * which is a necessary condition for direct write. As this needs
2676 * to be an invariant for the write part only, the verifier
2677 * detects writes and adds a prologue that is calling
2678 * **bpf_skb_pull_data()** to effectively unclone the *skb* from
2679 * the very beginning in case it is indeed cloned.
2680 *
2681 * A call to this helper is susceptible to change the underlying
2682 * packet buffer. Therefore, at load time, all checks on pointers
2683 * previously done by the verifier are invalidated and must be
2684 * performed again, if the helper is used in combination with
2685 * direct packet access.
2686 * Return
2687 * 0 on success, or a negative error in case of failure.
2688 *
2689 * s64 bpf_csum_update(struct sk_buff *skb, __wsum csum)
2690 * Description
2691 * Add the checksum *csum* into *skb*\ **->csum** in case the
2692 * driver has supplied a checksum for the entire packet into that
2693 * field. Return an error otherwise. This helper is intended to be
2694 * used in combination with **bpf_csum_diff**\ (), in particular
2695 * when the checksum needs to be updated after data has been
2696 * written into the packet through direct packet access.
2697 * Return
2698 * The checksum on success, or a negative error code in case of
2699 * failure.
2700 *
2701 * void bpf_set_hash_invalid(struct sk_buff *skb)
2702 * Description
2703 * Invalidate the current *skb*\ **->hash**. It can be used after
2704 * mangling on headers through direct packet access, in order to
2705 * indicate that the hash is outdated and to trigger a
2706 * recalculation the next time the kernel tries to access this
2707 * hash or when the **bpf_get_hash_recalc**\ () helper is called.
2708 * Return
2709 * void.
2710 *
2711 * long bpf_get_numa_node_id(void)
2712 * Description
2713 * Return the id of the current NUMA node. The primary use case
2714 * for this helper is the selection of sockets for the local NUMA
2715 * node, when the program is attached to sockets using the
2716 * **SO_ATTACH_REUSEPORT_EBPF** option (see also **socket(7)**),
2717 * but the helper is also available to other eBPF program types,
2718 * similarly to **bpf_get_smp_processor_id**\ ().
2719 * Return
2720 * The id of current NUMA node.
2721 *
2722 * long bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags)
2723 * Description
2724 * Grows headroom of packet associated to *skb* and adjusts the
2725 * offset of the MAC header accordingly, adding *len* bytes of
2726 * space. It automatically extends and reallocates memory as
2727 * required.
2728 *
2729 * This helper can be used on a layer 3 *skb* to push a MAC header
2730 * for redirection into a layer 2 device.
2731 *
2732 * All values for *flags* are reserved for future usage, and must
2733 * be left at zero.
2734 *
2735 * A call to this helper is susceptible to change the underlying
2736 * packet buffer. Therefore, at load time, all checks on pointers
2737 * previously done by the verifier are invalidated and must be
2738 * performed again, if the helper is used in combination with
2739 * direct packet access.
2740 * Return
2741 * 0 on success, or a negative error in case of failure.
2742 *
2743 * long bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta)
2744 * Description
2745 * Adjust (move) *xdp_md*\ **->data** by *delta* bytes. Note that
2746 * it is possible to use a negative value for *delta*. This helper
2747 * can be used to prepare the packet for pushing or popping
2748 * headers.
2749 *
2750 * A call to this helper is susceptible to change the underlying
2751 * packet buffer. Therefore, at load time, all checks on pointers
2752 * previously done by the verifier are invalidated and must be
2753 * performed again, if the helper is used in combination with
2754 * direct packet access.
2755 * Return
2756 * 0 on success, or a negative error in case of failure.
2757 *
2758 * long bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr)
2759 * Description
2760 * Copy a NUL terminated string from an unsafe kernel address
2761 * *unsafe_ptr* to *dst*. See **bpf_probe_read_kernel_str**\ () for
2762 * more details.
2763 *
2764 * Generally, use **bpf_probe_read_user_str**\ () or
2765 * **bpf_probe_read_kernel_str**\ () instead.
2766 * Return
2767 * On success, the strictly positive length of the string,
2768 * including the trailing NUL character. On error, a negative
2769 * value.
2770 *
2771 * u64 bpf_get_socket_cookie(struct sk_buff *skb)
2772 * Description
2773 * If the **struct sk_buff** pointed by *skb* has a known socket,
2774 * retrieve the cookie (generated by the kernel) of this socket.
2775 * If no cookie has been set yet, generate a new cookie. Once
2776 * generated, the socket cookie remains stable for the life of the
2777 * socket. This helper can be useful for monitoring per socket
2778 * networking traffic statistics as it provides a global socket
2779 * identifier that can be assumed unique.
2780 * Return
2781 * A 8-byte long unique number on success, or 0 if the socket
2782 * field is missing inside *skb*.
2783 *
2784 * u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx)
2785 * Description
2786 * Equivalent to bpf_get_socket_cookie() helper that accepts
2787 * *skb*, but gets socket from **struct bpf_sock_addr** context.
2788 * Return
2789 * A 8-byte long unique number.
2790 *
2791 * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx)
2792 * Description
2793 * Equivalent to **bpf_get_socket_cookie**\ () helper that accepts
2794 * *skb*, but gets socket from **struct bpf_sock_ops** context.
2795 * Return
2796 * A 8-byte long unique number.
2797 *
2798 * u64 bpf_get_socket_cookie(struct sock *sk)
2799 * Description
2800 * Equivalent to **bpf_get_socket_cookie**\ () helper that accepts
2801 * *sk*, but gets socket from a BTF **struct sock**. This helper
2802 * also works for sleepable programs.
2803 * Return
2804 * A 8-byte long unique number or 0 if *sk* is NULL.
2805 *
2806 * u32 bpf_get_socket_uid(struct sk_buff *skb)
2807 * Description
2808 * Get the owner UID of the socked associated to *skb*.
2809 * Return
2810 * The owner UID of the socket associated to *skb*. If the socket
2811 * is **NULL**, or if it is not a full socket (i.e. if it is a
2812 * time-wait or a request socket instead), **overflowuid** value
2813 * is returned (note that **overflowuid** might also be the actual
2814 * UID value for the socket).
2815 *
2816 * long bpf_set_hash(struct sk_buff *skb, u32 hash)
2817 * Description
2818 * Set the full hash for *skb* (set the field *skb*\ **->hash**)
2819 * to value *hash*.
2820 * Return
2821 * 0
2822 *
2823 * long bpf_setsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen)
2824 * Description
2825 * Emulate a call to **setsockopt()** on the socket associated to
2826 * *bpf_socket*, which must be a full socket. The *level* at
2827 * which the option resides and the name *optname* of the option
2828 * must be specified, see **setsockopt(2)** for more information.
2829 * The option value of length *optlen* is pointed by *optval*.
2830 *
2831 * *bpf_socket* should be one of the following:
2832 *
2833 * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**.
2834 * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**,
2835 * **BPF_CGROUP_INET6_CONNECT** and **BPF_CGROUP_UNIX_CONNECT**.
2836 *
2837 * This helper actually implements a subset of **setsockopt()**.
2838 * It supports the following *level*\ s:
2839 *
2840 * * **SOL_SOCKET**, which supports the following *optname*\ s:
2841 * **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**,
2842 * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**,
2843 * **SO_BINDTODEVICE**, **SO_KEEPALIVE**, **SO_REUSEADDR**,
2844 * **SO_REUSEPORT**, **SO_BINDTOIFINDEX**, **SO_TXREHASH**.
2845 * * **IPPROTO_TCP**, which supports the following *optname*\ s:
2846 * **TCP_CONGESTION**, **TCP_BPF_IW**,
2847 * **TCP_BPF_SNDCWND_CLAMP**, **TCP_SAVE_SYN**,
2848 * **TCP_KEEPIDLE**, **TCP_KEEPINTVL**, **TCP_KEEPCNT**,
2849 * **TCP_SYNCNT**, **TCP_USER_TIMEOUT**, **TCP_NOTSENT_LOWAT**,
2850 * **TCP_NODELAY**, **TCP_MAXSEG**, **TCP_WINDOW_CLAMP**,
2851 * **TCP_THIN_LINEAR_TIMEOUTS**, **TCP_BPF_DELACK_MAX**,
2852 * **TCP_BPF_RTO_MIN**.
2853 * * **IPPROTO_IP**, which supports *optname* **IP_TOS**.
2854 * * **IPPROTO_IPV6**, which supports the following *optname*\ s:
2855 * **IPV6_TCLASS**, **IPV6_AUTOFLOWLABEL**.
2856 * Return
2857 * 0 on success, or a negative error in case of failure.
2858 *
2859 * long bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags)
2860 * Description
2861 * Grow or shrink the room for data in the packet associated to
2862 * *skb* by *len_diff*, and according to the selected *mode*.
2863 *
2864 * By default, the helper will reset any offloaded checksum
2865 * indicator of the skb to CHECKSUM_NONE. This can be avoided
2866 * by the following flag:
2867 *
2868 * * **BPF_F_ADJ_ROOM_NO_CSUM_RESET**: Do not reset offloaded
2869 * checksum data of the skb to CHECKSUM_NONE.
2870 *
2871 * There are two supported modes at this time:
2872 *
2873 * * **BPF_ADJ_ROOM_MAC**: Adjust room at the mac layer
2874 * (room space is added or removed between the layer 2 and
2875 * layer 3 headers).
2876 *
2877 * * **BPF_ADJ_ROOM_NET**: Adjust room at the network layer
2878 * (room space is added or removed between the layer 3 and
2879 * layer 4 headers).
2880 *
2881 * The following flags are supported at this time:
2882 *
2883 * * **BPF_F_ADJ_ROOM_FIXED_GSO**: Do not adjust gso_size.
2884 * Adjusting mss in this way is not allowed for datagrams.
2885 *
2886 * * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV4**,
2887 * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV6**:
2888 * Any new space is reserved to hold a tunnel header.
2889 * Configure skb offsets and other fields accordingly.
2890 *
2891 * * **BPF_F_ADJ_ROOM_ENCAP_L4_GRE**,
2892 * **BPF_F_ADJ_ROOM_ENCAP_L4_UDP**:
2893 * Use with ENCAP_L3 flags to further specify the tunnel type.
2894 *
2895 * * **BPF_F_ADJ_ROOM_ENCAP_L2**\ (*len*):
2896 * Use with ENCAP_L3/L4 flags to further specify the tunnel
2897 * type; *len* is the length of the inner MAC header.
2898 *
2899 * * **BPF_F_ADJ_ROOM_ENCAP_L2_ETH**:
2900 * Use with BPF_F_ADJ_ROOM_ENCAP_L2 flag to further specify the
2901 * L2 type as Ethernet.
2902 *
2903 * * **BPF_F_ADJ_ROOM_DECAP_L3_IPV4**,
2904 * **BPF_F_ADJ_ROOM_DECAP_L3_IPV6**:
2905 * Indicate the new IP header version after decapsulating the outer
2906 * IP header. Used when the inner and outer IP versions are different.
2907 *
2908 * A call to this helper is susceptible to change the underlying
2909 * packet buffer. Therefore, at load time, all checks on pointers
2910 * previously done by the verifier are invalidated and must be
2911 * performed again, if the helper is used in combination with
2912 * direct packet access.
2913 * Return
2914 * 0 on success, or a negative error in case of failure.
2915 *
2916 * long bpf_redirect_map(struct bpf_map *map, u64 key, u64 flags)
2917 * Description
2918 * Redirect the packet to the endpoint referenced by *map* at
2919 * index *key*. Depending on its type, this *map* can contain
2920 * references to net devices (for forwarding packets through other
2921 * ports), or to CPUs (for redirecting XDP frames to another CPU;
2922 * but this is only implemented for native XDP (with driver
2923 * support) as of this writing).
2924 *
2925 * The lower two bits of *flags* are used as the return code if
2926 * the map lookup fails. This is so that the return value can be
2927 * one of the XDP program return codes up to **XDP_TX**, as chosen
2928 * by the caller. The higher bits of *flags* can be set to
2929 * BPF_F_BROADCAST or BPF_F_EXCLUDE_INGRESS as defined below.
2930 *
2931 * With BPF_F_BROADCAST the packet will be broadcasted to all the
2932 * interfaces in the map, with BPF_F_EXCLUDE_INGRESS the ingress
2933 * interface will be excluded when do broadcasting.
2934 *
2935 * See also **bpf_redirect**\ (), which only supports redirecting
2936 * to an ifindex, but doesn't require a map to do so.
2937 * Return
2938 * **XDP_REDIRECT** on success, or the value of the two lower bits
2939 * of the *flags* argument on error.
2940 *
2941 * long bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags)
2942 * Description
2943 * Redirect the packet to the socket referenced by *map* (of type
2944 * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and
2945 * egress interfaces can be used for redirection. The
2946 * **BPF_F_INGRESS** value in *flags* is used to make the
2947 * distinction (ingress path is selected if the flag is present,
2948 * egress path otherwise). This is the only flag supported for now.
2949 * Return
2950 * **SK_PASS** on success, or **SK_DROP** on error.
2951 *
2952 * long bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
2953 * Description
2954 * Add an entry to, or update a *map* referencing sockets. The
2955 * *skops* is used as a new value for the entry associated to
2956 * *key*. *flags* is one of:
2957 *
2958 * **BPF_NOEXIST**
2959 * The entry for *key* must not exist in the map.
2960 * **BPF_EXIST**
2961 * The entry for *key* must already exist in the map.
2962 * **BPF_ANY**
2963 * No condition on the existence of the entry for *key*.
2964 *
2965 * If the *map* has eBPF programs (parser and verdict), those will
2966 * be inherited by the socket being added. If the socket is
2967 * already attached to eBPF programs, this results in an error.
2968 * Return
2969 * 0 on success, or a negative error in case of failure.
2970 *
2971 * long bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta)
2972 * Description
2973 * Adjust the address pointed by *xdp_md*\ **->data_meta** by
2974 * *delta* (which can be positive or negative). Note that this
2975 * operation modifies the address stored in *xdp_md*\ **->data**,
2976 * so the latter must be loaded only after the helper has been
2977 * called.
2978 *
2979 * The use of *xdp_md*\ **->data_meta** is optional and programs
2980 * are not required to use it. The rationale is that when the
2981 * packet is processed with XDP (e.g. as DoS filter), it is
2982 * possible to push further meta data along with it before passing
2983 * to the stack, and to give the guarantee that an ingress eBPF
2984 * program attached as a TC classifier on the same device can pick
2985 * this up for further post-processing. Since TC works with socket
2986 * buffers, it remains possible to set from XDP the **mark** or
2987 * **priority** pointers, or other pointers for the socket buffer.
2988 * Having this scratch space generic and programmable allows for
2989 * more flexibility as the user is free to store whatever meta
2990 * data they need.
2991 *
2992 * A call to this helper is susceptible to change the underlying
2993 * packet buffer. Therefore, at load time, all checks on pointers
2994 * previously done by the verifier are invalidated and must be
2995 * performed again, if the helper is used in combination with
2996 * direct packet access.
2997 * Return
2998 * 0 on success, or a negative error in case of failure.
2999 *
3000 * long bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size)
3001 * Description
3002 * Read the value of a perf event counter, and store it into *buf*
3003 * of size *buf_size*. This helper relies on a *map* of type
3004 * **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of the perf event
3005 * counter is selected when *map* is updated with perf event file
3006 * descriptors. The *map* is an array whose size is the number of
3007 * available CPUs, and each cell contains a value relative to one
3008 * CPU. The value to retrieve is indicated by *flags*, that
3009 * contains the index of the CPU to look up, masked with
3010 * **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to
3011 * **BPF_F_CURRENT_CPU** to indicate that the value for the
3012 * current CPU should be retrieved.
3013 *
3014 * This helper behaves in a way close to
3015 * **bpf_perf_event_read**\ () helper, save that instead of
3016 * just returning the value observed, it fills the *buf*
3017 * structure. This allows for additional data to be retrieved: in
3018 * particular, the enabled and running times (in *buf*\
3019 * **->enabled** and *buf*\ **->running**, respectively) are
3020 * copied. In general, **bpf_perf_event_read_value**\ () is
3021 * recommended over **bpf_perf_event_read**\ (), which has some
3022 * ABI issues and provides fewer functionalities.
3023 *
3024 * These values are interesting, because hardware PMU (Performance
3025 * Monitoring Unit) counters are limited resources. When there are
3026 * more PMU based perf events opened than available counters,
3027 * kernel will multiplex these events so each event gets certain
3028 * percentage (but not all) of the PMU time. In case that
3029 * multiplexing happens, the number of samples or counter value
3030 * will not reflect the case compared to when no multiplexing
3031 * occurs. This makes comparison between different runs difficult.
3032 * Typically, the counter value should be normalized before
3033 * comparing to other experiments. The usual normalization is done
3034 * as follows.
3035 *
3036 * ::
3037 *
3038 * normalized_counter = counter * t_enabled / t_running
3039 *
3040 * Where t_enabled is the time enabled for event and t_running is
3041 * the time running for event since last normalization. The
3042 * enabled and running times are accumulated since the perf event
3043 * open. To achieve scaling factor between two invocations of an
3044 * eBPF program, users can use CPU id as the key (which is
3045 * typical for perf array usage model) to remember the previous
3046 * value and do the calculation inside the eBPF program.
3047 * Return
3048 * 0 on success, or a negative error in case of failure.
3049 *
3050 * long bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size)
3051 * Description
3052 * For an eBPF program attached to a perf event, retrieve the
3053 * value of the event counter associated to *ctx* and store it in
3054 * the structure pointed by *buf* and of size *buf_size*. Enabled
3055 * and running times are also stored in the structure (see
3056 * description of helper **bpf_perf_event_read_value**\ () for
3057 * more details).
3058 * Return
3059 * 0 on success, or a negative error in case of failure.
3060 *
3061 * long bpf_getsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen)
3062 * Description
3063 * Emulate a call to **getsockopt()** on the socket associated to
3064 * *bpf_socket*, which must be a full socket. The *level* at
3065 * which the option resides and the name *optname* of the option
3066 * must be specified, see **getsockopt(2)** for more information.
3067 * The retrieved value is stored in the structure pointed by
3068 * *opval* and of length *optlen*.
3069 *
3070 * *bpf_socket* should be one of the following:
3071 *
3072 * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**.
3073 * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**,
3074 * **BPF_CGROUP_INET6_CONNECT** and **BPF_CGROUP_UNIX_CONNECT**.
3075 *
3076 * This helper actually implements a subset of **getsockopt()**.
3077 * It supports the same set of *optname*\ s that is supported by
3078 * the **bpf_setsockopt**\ () helper. The exceptions are
3079 * **TCP_BPF_*** is **bpf_setsockopt**\ () only and
3080 * **TCP_SAVED_SYN** is **bpf_getsockopt**\ () only.
3081 * Return
3082 * 0 on success, or a negative error in case of failure.
3083 *
3084 * long bpf_override_return(struct pt_regs *regs, u64 rc)
3085 * Description
3086 * Used for error injection, this helper uses kprobes to override
3087 * the return value of the probed function, and to set it to *rc*.
3088 * The first argument is the context *regs* on which the kprobe
3089 * works.
3090 *
3091 * This helper works by setting the PC (program counter)
3092 * to an override function which is run in place of the original
3093 * probed function. This means the probed function is not run at
3094 * all. The replacement function just returns with the required
3095 * value.
3096 *
3097 * This helper has security implications, and thus is subject to
3098 * restrictions. It is only available if the kernel was compiled
3099 * with the **CONFIG_BPF_KPROBE_OVERRIDE** configuration
3100 * option, and in this case it only works on functions tagged with
3101 * **ALLOW_ERROR_INJECTION** in the kernel code.
3102 *
3103 * Also, the helper is only available for the architectures having
3104 * the CONFIG_FUNCTION_ERROR_INJECTION option. As of this writing,
3105 * x86 architecture is the only one to support this feature.
3106 * Return
3107 * 0
3108 *
3109 * long bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval)
3110 * Description
3111 * Attempt to set the value of the **bpf_sock_ops_cb_flags** field
3112 * for the full TCP socket associated to *bpf_sock_ops* to
3113 * *argval*.
3114 *
3115 * The primary use of this field is to determine if there should
3116 * be calls to eBPF programs of type
3117 * **BPF_PROG_TYPE_SOCK_OPS** at various points in the TCP
3118 * code. A program of the same type can change its value, per
3119 * connection and as necessary, when the connection is
3120 * established. This field is directly accessible for reading, but
3121 * this helper must be used for updates in order to return an
3122 * error if an eBPF program tries to set a callback that is not
3123 * supported in the current kernel.
3124 *
3125 * *argval* is a flag array which can combine these flags:
3126 *
3127 * * **BPF_SOCK_OPS_RTO_CB_FLAG** (retransmission time out)
3128 * * **BPF_SOCK_OPS_RETRANS_CB_FLAG** (retransmission)
3129 * * **BPF_SOCK_OPS_STATE_CB_FLAG** (TCP state change)
3130 * * **BPF_SOCK_OPS_RTT_CB_FLAG** (every RTT)
3131 *
3132 * Therefore, this function can be used to clear a callback flag by
3133 * setting the appropriate bit to zero. e.g. to disable the RTO
3134 * callback:
3135 *
3136 * **bpf_sock_ops_cb_flags_set(bpf_sock,**
3137 * **bpf_sock->bpf_sock_ops_cb_flags & ~BPF_SOCK_OPS_RTO_CB_FLAG)**
3138 *
3139 * Here are some examples of where one could call such eBPF
3140 * program:
3141 *
3142 * * When RTO fires.
3143 * * When a packet is retransmitted.
3144 * * When the connection terminates.
3145 * * When a packet is sent.
3146 * * When a packet is received.
3147 * Return
3148 * Code **-EINVAL** if the socket is not a full TCP socket;
3149 * otherwise, a positive number containing the bits that could not
3150 * be set is returned (which comes down to 0 if all bits were set
3151 * as required).
3152 *
3153 * long bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags)
3154 * Description
3155 * This helper is used in programs implementing policies at the
3156 * socket level. If the message *msg* is allowed to pass (i.e. if
3157 * the verdict eBPF program returns **SK_PASS**), redirect it to
3158 * the socket referenced by *map* (of type
3159 * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and
3160 * egress interfaces can be used for redirection. The
3161 * **BPF_F_INGRESS** value in *flags* is used to make the
3162 * distinction (ingress path is selected if the flag is present,
3163 * egress path otherwise). This is the only flag supported for now.
3164 * Return
3165 * **SK_PASS** on success, or **SK_DROP** on error.
3166 *
3167 * long bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes)
3168 * Description
3169 * For socket policies, apply the verdict of the eBPF program to
3170 * the next *bytes* (number of bytes) of message *msg*.
3171 *
3172 * For example, this helper can be used in the following cases:
3173 *
3174 * * A single **sendmsg**\ () or **sendfile**\ () system call
3175 * contains multiple logical messages that the eBPF program is
3176 * supposed to read and for which it should apply a verdict.
3177 * * An eBPF program only cares to read the first *bytes* of a
3178 * *msg*. If the message has a large payload, then setting up
3179 * and calling the eBPF program repeatedly for all bytes, even
3180 * though the verdict is already known, would create unnecessary
3181 * overhead.
3182 *
3183 * When called from within an eBPF program, the helper sets a
3184 * counter internal to the BPF infrastructure, that is used to
3185 * apply the last verdict to the next *bytes*. If *bytes* is
3186 * smaller than the current data being processed from a
3187 * **sendmsg**\ () or **sendfile**\ () system call, the first
3188 * *bytes* will be sent and the eBPF program will be re-run with
3189 * the pointer for start of data pointing to byte number *bytes*
3190 * **+ 1**. If *bytes* is larger than the current data being
3191 * processed, then the eBPF verdict will be applied to multiple
3192 * **sendmsg**\ () or **sendfile**\ () calls until *bytes* are
3193 * consumed.
3194 *
3195 * Note that if a socket closes with the internal counter holding
3196 * a non-zero value, this is not a problem because data is not
3197 * being buffered for *bytes* and is sent as it is received.
3198 * Return
3199 * 0
3200 *
3201 * long bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes)
3202 * Description
3203 * For socket policies, prevent the execution of the verdict eBPF
3204 * program for message *msg* until *bytes* (byte number) have been
3205 * accumulated.
3206 *
3207 * This can be used when one needs a specific number of bytes
3208 * before a verdict can be assigned, even if the data spans
3209 * multiple **sendmsg**\ () or **sendfile**\ () calls. The extreme
3210 * case would be a user calling **sendmsg**\ () repeatedly with
3211 * 1-byte long message segments. Obviously, this is bad for
3212 * performance, but it is still valid. If the eBPF program needs
3213 * *bytes* bytes to validate a header, this helper can be used to
3214 * prevent the eBPF program to be called again until *bytes* have
3215 * been accumulated.
3216 * Return
3217 * 0
3218 *
3219 * long bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags)
3220 * Description
3221 * For socket policies, pull in non-linear data from user space
3222 * for *msg* and set pointers *msg*\ **->data** and *msg*\
3223 * **->data_end** to *start* and *end* bytes offsets into *msg*,
3224 * respectively.
3225 *
3226 * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a
3227 * *msg* it can only parse data that the (**data**, **data_end**)
3228 * pointers have already consumed. For **sendmsg**\ () hooks this
3229 * is likely the first scatterlist element. But for calls relying
3230 * on the **sendpage** handler (e.g. **sendfile**\ ()) this will
3231 * be the range (**0**, **0**) because the data is shared with
3232 * user space and by default the objective is to avoid allowing
3233 * user space to modify data while (or after) eBPF verdict is
3234 * being decided. This helper can be used to pull in data and to
3235 * set the start and end pointer to given values. Data will be
3236 * copied if necessary (i.e. if data was not linear and if start
3237 * and end pointers do not point to the same chunk).
3238 *
3239 * A call to this helper is susceptible to change the underlying
3240 * packet buffer. Therefore, at load time, all checks on pointers
3241 * previously done by the verifier are invalidated and must be
3242 * performed again, if the helper is used in combination with
3243 * direct packet access.
3244 *
3245 * All values for *flags* are reserved for future usage, and must
3246 * be left at zero.
3247 * Return
3248 * 0 on success, or a negative error in case of failure.
3249 *
3250 * long bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len)
3251 * Description
3252 * Bind the socket associated to *ctx* to the address pointed by
3253 * *addr*, of length *addr_len*. This allows for making outgoing
3254 * connection from the desired IP address, which can be useful for
3255 * example when all processes inside a cgroup should use one
3256 * single IP address on a host that has multiple IP configured.
3257 *
3258 * This helper works for IPv4 and IPv6, TCP and UDP sockets. The
3259 * domain (*addr*\ **->sa_family**) must be **AF_INET** (or
3260 * **AF_INET6**). It's advised to pass zero port (**sin_port**
3261 * or **sin6_port**) which triggers IP_BIND_ADDRESS_NO_PORT-like
3262 * behavior and lets the kernel efficiently pick up an unused
3263 * port as long as 4-tuple is unique. Passing non-zero port might
3264 * lead to degraded performance.
3265 * Return
3266 * 0 on success, or a negative error in case of failure.
3267 *
3268 * long bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta)
3269 * Description
3270 * Adjust (move) *xdp_md*\ **->data_end** by *delta* bytes. It is
3271 * possible to both shrink and grow the packet tail.
3272 * Shrink done via *delta* being a negative integer.
3273 *
3274 * A call to this helper is susceptible to change the underlying
3275 * packet buffer. Therefore, at load time, all checks on pointers
3276 * previously done by the verifier are invalidated and must be
3277 * performed again, if the helper is used in combination with
3278 * direct packet access.
3279 * Return
3280 * 0 on success, or a negative error in case of failure.
3281 *
3282 * long bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags)
3283 * Description
3284 * Retrieve the XFRM state (IP transform framework, see also
3285 * **ip-xfrm(8)**) at *index* in XFRM "security path" for *skb*.
3286 *
3287 * The retrieved value is stored in the **struct bpf_xfrm_state**
3288 * pointed by *xfrm_state* and of length *size*.
3289 *
3290 * All values for *flags* are reserved for future usage, and must
3291 * be left at zero.
3292 *
3293 * This helper is available only if the kernel was compiled with
3294 * **CONFIG_XFRM** configuration option.
3295 * Return
3296 * 0 on success, or a negative error in case of failure.
3297 *
3298 * long bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags)
3299 * Description
3300 * Return a user or a kernel stack in bpf program provided buffer.
3301 * To achieve this, the helper needs *ctx*, which is a pointer
3302 * to the context on which the tracing program is executed.
3303 * To store the stacktrace, the bpf program provides *buf* with
3304 * a nonnegative *size*.
3305 *
3306 * The last argument, *flags*, holds the number of stack frames to
3307 * skip (from 0 to 255), masked with
3308 * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set
3309 * the following flags:
3310 *
3311 * **BPF_F_USER_STACK**
3312 * Collect a user space stack instead of a kernel stack.
3313 * **BPF_F_USER_BUILD_ID**
3314 * Collect (build_id, file_offset) instead of ips for user
3315 * stack, only valid if **BPF_F_USER_STACK** is also
3316 * specified.
3317 *
3318 * *file_offset* is an offset relative to the beginning
3319 * of the executable or shared object file backing the vma
3320 * which the *ip* falls in. It is *not* an offset relative
3321 * to that object's base address. Accordingly, it must be
3322 * adjusted by adding (sh_addr - sh_offset), where
3323 * sh_{addr,offset} correspond to the executable section
3324 * containing *file_offset* in the object, for comparisons
3325 * to symbols' st_value to be valid.
3326 *
3327 * **bpf_get_stack**\ () can collect up to
3328 * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject
3329 * to sufficient large buffer size. Note that
3330 * this limit can be controlled with the **sysctl** program, and
3331 * that it should be manually increased in order to profile long
3332 * user stacks (such as stacks for Java programs). To do so, use:
3333 *
3334 * ::
3335 *
3336 * # sysctl kernel.perf_event_max_stack=<new value>
3337 * Return
3338 * The non-negative copied *buf* length equal to or less than
3339 * *size* on success, or a negative error in case of failure.
3340 *
3341 * long bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header)
3342 * Description
3343 * This helper is similar to **bpf_skb_load_bytes**\ () in that
3344 * it provides an easy way to load *len* bytes from *offset*
3345 * from the packet associated to *skb*, into the buffer pointed
3346 * by *to*. The difference to **bpf_skb_load_bytes**\ () is that
3347 * a fifth argument *start_header* exists in order to select a
3348 * base offset to start from. *start_header* can be one of:
3349 *
3350 * **BPF_HDR_START_MAC**
3351 * Base offset to load data from is *skb*'s mac header.
3352 * **BPF_HDR_START_NET**
3353 * Base offset to load data from is *skb*'s network header.
3354 *
3355 * In general, "direct packet access" is the preferred method to
3356 * access packet data, however, this helper is in particular useful
3357 * in socket filters where *skb*\ **->data** does not always point
3358 * to the start of the mac header and where "direct packet access"
3359 * is not available.
3360 * Return
3361 * 0 on success, or a negative error in case of failure.
3362 *
3363 * long bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags)
3364 * Description
3365 * Do FIB lookup in kernel tables using parameters in *params*.
3366 * If lookup is successful and result shows packet is to be
3367 * forwarded, the neighbor tables are searched for the nexthop.
3368 * If successful (ie., FIB lookup shows forwarding and nexthop
3369 * is resolved), the nexthop address is returned in ipv4_dst
3370 * or ipv6_dst based on family, smac is set to mac address of
3371 * egress device, dmac is set to nexthop mac address, rt_metric
3372 * is set to metric from route (IPv4/IPv6 only), and ifindex
3373 * is set to the device index of the nexthop from the FIB lookup.
3374 *
3375 * *plen* argument is the size of the passed in struct.
3376 * *flags* argument can be a combination of one or more of the
3377 * following values:
3378 *
3379 * **BPF_FIB_LOOKUP_DIRECT**
3380 * Do a direct table lookup vs full lookup using FIB
3381 * rules.
3382 * **BPF_FIB_LOOKUP_TBID**
3383 * Used with BPF_FIB_LOOKUP_DIRECT.
3384 * Use the routing table ID present in *params*->tbid
3385 * for the fib lookup.
3386 * **BPF_FIB_LOOKUP_OUTPUT**
3387 * Perform lookup from an egress perspective (default is
3388 * ingress).
3389 * **BPF_FIB_LOOKUP_SKIP_NEIGH**
3390 * Skip the neighbour table lookup. *params*->dmac
3391 * and *params*->smac will not be set as output. A common
3392 * use case is to call **bpf_redirect_neigh**\ () after
3393 * doing **bpf_fib_lookup**\ ().
3394 * **BPF_FIB_LOOKUP_SRC**
3395 * Derive and set source IP addr in *params*->ipv{4,6}_src
3396 * for the nexthop. If the src addr cannot be derived,
3397 * **BPF_FIB_LKUP_RET_NO_SRC_ADDR** is returned. In this
3398 * case, *params*->dmac and *params*->smac are not set either.
3399 * **BPF_FIB_LOOKUP_MARK**
3400 * Use the mark present in *params*->mark for the fib lookup.
3401 * This option should not be used with BPF_FIB_LOOKUP_DIRECT,
3402 * as it only has meaning for full lookups.
3403 *
3404 * *ctx* is either **struct xdp_md** for XDP programs or
3405 * **struct sk_buff** tc cls_act programs.
3406 * Return
3407 * * < 0 if any input argument is invalid
3408 * * 0 on success (packet is forwarded, nexthop neighbor exists)
3409 * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the
3410 * packet is not forwarded or needs assist from full stack
3411 *
3412 * If lookup fails with BPF_FIB_LKUP_RET_FRAG_NEEDED, then the MTU
3413 * was exceeded and output params->mtu_result contains the MTU.
3414 *
3415 * long bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
3416 * Description
3417 * Add an entry to, or update a sockhash *map* referencing sockets.
3418 * The *skops* is used as a new value for the entry associated to
3419 * *key*. *flags* is one of:
3420 *
3421 * **BPF_NOEXIST**
3422 * The entry for *key* must not exist in the map.
3423 * **BPF_EXIST**
3424 * The entry for *key* must already exist in the map.
3425 * **BPF_ANY**
3426 * No condition on the existence of the entry for *key*.
3427 *
3428 * If the *map* has eBPF programs (parser and verdict), those will
3429 * be inherited by the socket being added. If the socket is
3430 * already attached to eBPF programs, this results in an error.
3431 * Return
3432 * 0 on success, or a negative error in case of failure.
3433 *
3434 * long bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags)
3435 * Description
3436 * This helper is used in programs implementing policies at the
3437 * socket level. If the message *msg* is allowed to pass (i.e. if
3438 * the verdict eBPF program returns **SK_PASS**), redirect it to
3439 * the socket referenced by *map* (of type
3440 * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and
3441 * egress interfaces can be used for redirection. The
3442 * **BPF_F_INGRESS** value in *flags* is used to make the
3443 * distinction (ingress path is selected if the flag is present,
3444 * egress path otherwise). This is the only flag supported for now.
3445 * Return
3446 * **SK_PASS** on success, or **SK_DROP** on error.
3447 *
3448 * long bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags)
3449 * Description
3450 * This helper is used in programs implementing policies at the
3451 * skb socket level. If the sk_buff *skb* is allowed to pass (i.e.
3452 * if the verdict eBPF program returns **SK_PASS**), redirect it
3453 * to the socket referenced by *map* (of type
3454 * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and
3455 * egress interfaces can be used for redirection. The
3456 * **BPF_F_INGRESS** value in *flags* is used to make the
3457 * distinction (ingress path is selected if the flag is present,
3458 * egress otherwise). This is the only flag supported for now.
3459 * Return
3460 * **SK_PASS** on success, or **SK_DROP** on error.
3461 *
3462 * long bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len)
3463 * Description
3464 * Encapsulate the packet associated to *skb* within a Layer 3
3465 * protocol header. This header is provided in the buffer at
3466 * address *hdr*, with *len* its size in bytes. *type* indicates
3467 * the protocol of the header and can be one of:
3468 *
3469 * **BPF_LWT_ENCAP_SEG6**
3470 * IPv6 encapsulation with Segment Routing Header
3471 * (**struct ipv6_sr_hdr**). *hdr* only contains the SRH,
3472 * the IPv6 header is computed by the kernel.
3473 * **BPF_LWT_ENCAP_SEG6_INLINE**
3474 * Only works if *skb* contains an IPv6 packet. Insert a
3475 * Segment Routing Header (**struct ipv6_sr_hdr**) inside
3476 * the IPv6 header.
3477 * **BPF_LWT_ENCAP_IP**
3478 * IP encapsulation (GRE/GUE/IPIP/etc). The outer header
3479 * must be IPv4 or IPv6, followed by zero or more
3480 * additional headers, up to **LWT_BPF_MAX_HEADROOM**
3481 * total bytes in all prepended headers. Please note that
3482 * if **skb_is_gso**\ (*skb*) is true, no more than two
3483 * headers can be prepended, and the inner header, if
3484 * present, should be either GRE or UDP/GUE.
3485 *
3486 * **BPF_LWT_ENCAP_SEG6**\ \* types can be called by BPF programs
3487 * of type **BPF_PROG_TYPE_LWT_IN**; **BPF_LWT_ENCAP_IP** type can
3488 * be called by bpf programs of types **BPF_PROG_TYPE_LWT_IN** and
3489 * **BPF_PROG_TYPE_LWT_XMIT**.
3490 *
3491 * A call to this helper is susceptible to change the underlying
3492 * packet buffer. Therefore, at load time, all checks on pointers
3493 * previously done by the verifier are invalidated and must be
3494 * performed again, if the helper is used in combination with
3495 * direct packet access.
3496 * Return
3497 * 0 on success, or a negative error in case of failure.
3498 *
3499 * long bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len)
3500 * Description
3501 * Store *len* bytes from address *from* into the packet
3502 * associated to *skb*, at *offset*. Only the flags, tag and TLVs
3503 * inside the outermost IPv6 Segment Routing Header can be
3504 * modified through this helper.
3505 *
3506 * A call to this helper is susceptible to change the underlying
3507 * packet buffer. Therefore, at load time, all checks on pointers
3508 * previously done by the verifier are invalidated and must be
3509 * performed again, if the helper is used in combination with
3510 * direct packet access.
3511 * Return
3512 * 0 on success, or a negative error in case of failure.
3513 *
3514 * long bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta)
3515 * Description
3516 * Adjust the size allocated to TLVs in the outermost IPv6
3517 * Segment Routing Header contained in the packet associated to
3518 * *skb*, at position *offset* by *delta* bytes. Only offsets
3519 * after the segments are accepted. *delta* can be as well
3520 * positive (growing) as negative (shrinking).
3521 *
3522 * A call to this helper is susceptible to change the underlying
3523 * packet buffer. Therefore, at load time, all checks on pointers
3524 * previously done by the verifier are invalidated and must be
3525 * performed again, if the helper is used in combination with
3526 * direct packet access.
3527 * Return
3528 * 0 on success, or a negative error in case of failure.
3529 *
3530 * long bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len)
3531 * Description
3532 * Apply an IPv6 Segment Routing action of type *action* to the
3533 * packet associated to *skb*. Each action takes a parameter
3534 * contained at address *param*, and of length *param_len* bytes.
3535 * *action* can be one of:
3536 *
3537 * **SEG6_LOCAL_ACTION_END_X**
3538 * End.X action: Endpoint with Layer-3 cross-connect.
3539 * Type of *param*: **struct in6_addr**.
3540 * **SEG6_LOCAL_ACTION_END_T**
3541 * End.T action: Endpoint with specific IPv6 table lookup.
3542 * Type of *param*: **int**.
3543 * **SEG6_LOCAL_ACTION_END_B6**
3544 * End.B6 action: Endpoint bound to an SRv6 policy.
3545 * Type of *param*: **struct ipv6_sr_hdr**.
3546 * **SEG6_LOCAL_ACTION_END_B6_ENCAP**
3547 * End.B6.Encap action: Endpoint bound to an SRv6
3548 * encapsulation policy.
3549 * Type of *param*: **struct ipv6_sr_hdr**.
3550 *
3551 * A call to this helper is susceptible to change the underlying
3552 * packet buffer. Therefore, at load time, all checks on pointers
3553 * previously done by the verifier are invalidated and must be
3554 * performed again, if the helper is used in combination with
3555 * direct packet access.
3556 * Return
3557 * 0 on success, or a negative error in case of failure.
3558 *
3559 * long bpf_rc_repeat(void *ctx)
3560 * Description
3561 * This helper is used in programs implementing IR decoding, to
3562 * report a successfully decoded repeat key message. This delays
3563 * the generation of a key up event for previously generated
3564 * key down event.
3565 *
3566 * Some IR protocols like NEC have a special IR message for
3567 * repeating last button, for when a button is held down.
3568 *
3569 * The *ctx* should point to the lirc sample as passed into
3570 * the program.
3571 *
3572 * This helper is only available is the kernel was compiled with
3573 * the **CONFIG_BPF_LIRC_MODE2** configuration option set to
3574 * "**y**".
3575 * Return
3576 * 0
3577 *
3578 * long bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
3579 * Description
3580 * This helper is used in programs implementing IR decoding, to
3581 * report a successfully decoded key press with *scancode*,
3582 * *toggle* value in the given *protocol*. The scancode will be
3583 * translated to a keycode using the rc keymap, and reported as
3584 * an input key down event. After a period a key up event is
3585 * generated. This period can be extended by calling either
3586 * **bpf_rc_keydown**\ () again with the same values, or calling
3587 * **bpf_rc_repeat**\ ().
3588 *
3589 * Some protocols include a toggle bit, in case the button was
3590 * released and pressed again between consecutive scancodes.
3591 *
3592 * The *ctx* should point to the lirc sample as passed into
3593 * the program.
3594 *
3595 * The *protocol* is the decoded protocol number (see
3596 * **enum rc_proto** for some predefined values).
3597 *
3598 * This helper is only available is the kernel was compiled with
3599 * the **CONFIG_BPF_LIRC_MODE2** configuration option set to
3600 * "**y**".
3601 * Return
3602 * 0
3603 *
3604 * u64 bpf_skb_cgroup_id(struct sk_buff *skb)
3605 * Description
3606 * Return the cgroup v2 id of the socket associated with the *skb*.
3607 * This is roughly similar to the **bpf_get_cgroup_classid**\ ()
3608 * helper for cgroup v1 by providing a tag resp. identifier that
3609 * can be matched on or used for map lookups e.g. to implement
3610 * policy. The cgroup v2 id of a given path in the hierarchy is
3611 * exposed in user space through the f_handle API in order to get
3612 * to the same 64-bit id.
3613 *
3614 * This helper can be used on TC egress path, but not on ingress,
3615 * and is available only if the kernel was compiled with the
3616 * **CONFIG_SOCK_CGROUP_DATA** configuration option.
3617 * Return
3618 * The id is returned or 0 in case the id could not be retrieved.
3619 *
3620 * u64 bpf_get_current_cgroup_id(void)
3621 * Description
3622 * Get the current cgroup id based on the cgroup within which
3623 * the current task is running.
3624 * Return
3625 * A 64-bit integer containing the current cgroup id based
3626 * on the cgroup within which the current task is running.
3627 *
3628 * void *bpf_get_local_storage(void *map, u64 flags)
3629 * Description
3630 * Get the pointer to the local storage area.
3631 * The type and the size of the local storage is defined
3632 * by the *map* argument.
3633 * The *flags* meaning is specific for each map type,
3634 * and has to be 0 for cgroup local storage.
3635 *
3636 * Depending on the BPF program type, a local storage area
3637 * can be shared between multiple instances of the BPF program,
3638 * running simultaneously.
3639 *
3640 * A user should care about the synchronization by himself.
3641 * For example, by using the **BPF_ATOMIC** instructions to alter
3642 * the shared data.
3643 * Return
3644 * A pointer to the local storage area.
3645 *
3646 * long bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags)
3647 * Description
3648 * Select a **SO_REUSEPORT** socket from a
3649 * **BPF_MAP_TYPE_REUSEPORT_SOCKARRAY** *map*.
3650 * It checks the selected socket is matching the incoming
3651 * request in the socket buffer.
3652 * Return
3653 * 0 on success, or a negative error in case of failure.
3654 *
3655 * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
3656 * Description
3657 * Return id of cgroup v2 that is ancestor of cgroup associated
3658 * with the *skb* at the *ancestor_level*. The root cgroup is at
3659 * *ancestor_level* zero and each step down the hierarchy
3660 * increments the level. If *ancestor_level* == level of cgroup
3661 * associated with *skb*, then return value will be same as that
3662 * of **bpf_skb_cgroup_id**\ ().
3663 *
3664 * The helper is useful to implement policies based on cgroups
3665 * that are upper in hierarchy than immediate cgroup associated
3666 * with *skb*.
3667 *
3668 * The format of returned id and helper limitations are same as in
3669 * **bpf_skb_cgroup_id**\ ().
3670 * Return
3671 * The id is returned or 0 in case the id could not be retrieved.
3672 *
3673 * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
3674 * Description
3675 * Look for TCP socket matching *tuple*, optionally in a child
3676 * network namespace *netns*. The return value must be checked,
3677 * and if non-**NULL**, released via **bpf_sk_release**\ ().
3678 *
3679 * The *ctx* should point to the context of the program, such as
3680 * the skb or socket (depending on the hook in use). This is used
3681 * to determine the base network namespace for the lookup.
3682 *
3683 * *tuple_size* must be one of:
3684 *
3685 * **sizeof**\ (*tuple*\ **->ipv4**)
3686 * Look for an IPv4 socket.
3687 * **sizeof**\ (*tuple*\ **->ipv6**)
3688 * Look for an IPv6 socket.
3689 *
3690 * If the *netns* is a negative signed 32-bit integer, then the
3691 * socket lookup table in the netns associated with the *ctx*
3692 * will be used. For the TC hooks, this is the netns of the device
3693 * in the skb. For socket hooks, this is the netns of the socket.
3694 * If *netns* is any other signed 32-bit value greater than or
3695 * equal to zero then it specifies the ID of the netns relative to
3696 * the netns associated with the *ctx*. *netns* values beyond the
3697 * range of 32-bit integers are reserved for future use.
3698 *
3699 * All values for *flags* are reserved for future usage, and must
3700 * be left at zero.
3701 *
3702 * This helper is available only if the kernel was compiled with
3703 * **CONFIG_NET** configuration option.
3704 * Return
3705 * Pointer to **struct bpf_sock**, or **NULL** in case of failure.
3706 * For sockets with reuseport option, the **struct bpf_sock**
3707 * result is from *reuse*\ **->socks**\ [] using the hash of the
3708 * tuple.
3709 *
3710 * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
3711 * Description
3712 * Look for UDP socket matching *tuple*, optionally in a child
3713 * network namespace *netns*. The return value must be checked,
3714 * and if non-**NULL**, released via **bpf_sk_release**\ ().
3715 *
3716 * The *ctx* should point to the context of the program, such as
3717 * the skb or socket (depending on the hook in use). This is used
3718 * to determine the base network namespace for the lookup.
3719 *
3720 * *tuple_size* must be one of:
3721 *
3722 * **sizeof**\ (*tuple*\ **->ipv4**)
3723 * Look for an IPv4 socket.
3724 * **sizeof**\ (*tuple*\ **->ipv6**)
3725 * Look for an IPv6 socket.
3726 *
3727 * If the *netns* is a negative signed 32-bit integer, then the
3728 * socket lookup table in the netns associated with the *ctx*
3729 * will be used. For the TC hooks, this is the netns of the device
3730 * in the skb. For socket hooks, this is the netns of the socket.
3731 * If *netns* is any other signed 32-bit value greater than or
3732 * equal to zero then it specifies the ID of the netns relative to
3733 * the netns associated with the *ctx*. *netns* values beyond the
3734 * range of 32-bit integers are reserved for future use.
3735 *
3736 * All values for *flags* are reserved for future usage, and must
3737 * be left at zero.
3738 *
3739 * This helper is available only if the kernel was compiled with
3740 * **CONFIG_NET** configuration option.
3741 * Return
3742 * Pointer to **struct bpf_sock**, or **NULL** in case of failure.
3743 * For sockets with reuseport option, the **struct bpf_sock**
3744 * result is from *reuse*\ **->socks**\ [] using the hash of the
3745 * tuple.
3746 *
3747 * long bpf_sk_release(void *sock)
3748 * Description
3749 * Release the reference held by *sock*. *sock* must be a
3750 * non-**NULL** pointer that was returned from
3751 * **bpf_sk_lookup_xxx**\ ().
3752 * Return
3753 * 0 on success, or a negative error in case of failure.
3754 *
3755 * long bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
3756 * Description
3757 * Push an element *value* in *map*. *flags* is one of:
3758 *
3759 * **BPF_EXIST**
3760 * If the queue/stack is full, the oldest element is
3761 * removed to make room for this.
3762 * Return
3763 * 0 on success, or a negative error in case of failure.
3764 *
3765 * long bpf_map_pop_elem(struct bpf_map *map, void *value)
3766 * Description
3767 * Pop an element from *map*.
3768 * Return
3769 * 0 on success, or a negative error in case of failure.
3770 *
3771 * long bpf_map_peek_elem(struct bpf_map *map, void *value)
3772 * Description
3773 * Get an element from *map* without removing it.
3774 * Return
3775 * 0 on success, or a negative error in case of failure.
3776 *
3777 * long bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)
3778 * Description
3779 * For socket policies, insert *len* bytes into *msg* at offset
3780 * *start*.
3781 *
3782 * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a
3783 * *msg* it may want to insert metadata or options into the *msg*.
3784 * This can later be read and used by any of the lower layer BPF
3785 * hooks.
3786 *
3787 * This helper may fail if under memory pressure (a malloc
3788 * fails) in these cases BPF programs will get an appropriate
3789 * error and BPF programs will need to handle them.
3790 * Return
3791 * 0 on success, or a negative error in case of failure.
3792 *
3793 * long bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)
3794 * Description
3795 * Will remove *len* bytes from a *msg* starting at byte *start*.
3796 * This may result in **ENOMEM** errors under certain situations if
3797 * an allocation and copy are required due to a full ring buffer.
3798 * However, the helper will try to avoid doing the allocation
3799 * if possible. Other errors can occur if input parameters are
3800 * invalid either due to *start* byte not being valid part of *msg*
3801 * payload and/or *pop* value being to large.
3802 * Return
3803 * 0 on success, or a negative error in case of failure.
3804 *
3805 * long bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y)
3806 * Description
3807 * This helper is used in programs implementing IR decoding, to
3808 * report a successfully decoded pointer movement.
3809 *
3810 * The *ctx* should point to the lirc sample as passed into
3811 * the program.
3812 *
3813 * This helper is only available is the kernel was compiled with
3814 * the **CONFIG_BPF_LIRC_MODE2** configuration option set to
3815 * "**y**".
3816 * Return
3817 * 0
3818 *
3819 * long bpf_spin_lock(struct bpf_spin_lock *lock)
3820 * Description
3821 * Acquire a spinlock represented by the pointer *lock*, which is
3822 * stored as part of a value of a map. Taking the lock allows to
3823 * safely update the rest of the fields in that value. The
3824 * spinlock can (and must) later be released with a call to
3825 * **bpf_spin_unlock**\ (\ *lock*\ ).
3826 *
3827 * Spinlocks in BPF programs come with a number of restrictions
3828 * and constraints:
3829 *
3830 * * **bpf_spin_lock** objects are only allowed inside maps of
3831 * types **BPF_MAP_TYPE_HASH** and **BPF_MAP_TYPE_ARRAY** (this
3832 * list could be extended in the future).
3833 * * BTF description of the map is mandatory.
3834 * * The BPF program can take ONE lock at a time, since taking two
3835 * or more could cause dead locks.
3836 * * Only one **struct bpf_spin_lock** is allowed per map element.
3837 * * When the lock is taken, calls (either BPF to BPF or helpers)
3838 * are not allowed.
3839 * * The **BPF_LD_ABS** and **BPF_LD_IND** instructions are not
3840 * allowed inside a spinlock-ed region.
3841 * * The BPF program MUST call **bpf_spin_unlock**\ () to release
3842 * the lock, on all execution paths, before it returns.
3843 * * The BPF program can access **struct bpf_spin_lock** only via
3844 * the **bpf_spin_lock**\ () and **bpf_spin_unlock**\ ()
3845 * helpers. Loading or storing data into the **struct
3846 * bpf_spin_lock** *lock*\ **;** field of a map is not allowed.
3847 * * To use the **bpf_spin_lock**\ () helper, the BTF description
3848 * of the map value must be a struct and have **struct
3849 * bpf_spin_lock** *anyname*\ **;** field at the top level.
3850 * Nested lock inside another struct is not allowed.
3851 * * The **struct bpf_spin_lock** *lock* field in a map value must
3852 * be aligned on a multiple of 4 bytes in that value.
3853 * * Syscall with command **BPF_MAP_LOOKUP_ELEM** does not copy
3854 * the **bpf_spin_lock** field to user space.
3855 * * Syscall with command **BPF_MAP_UPDATE_ELEM**, or update from
3856 * a BPF program, do not update the **bpf_spin_lock** field.
3857 * * **bpf_spin_lock** cannot be on the stack or inside a
3858 * networking packet (it can only be inside of a map values).
3859 * * **bpf_spin_lock** is available to root only.
3860 * * Tracing programs and socket filter programs cannot use
3861 * **bpf_spin_lock**\ () due to insufficient preemption checks
3862 * (but this may change in the future).
3863 * * **bpf_spin_lock** is not allowed in inner maps of map-in-map.
3864 * Return
3865 * 0
3866 *
3867 * long bpf_spin_unlock(struct bpf_spin_lock *lock)
3868 * Description
3869 * Release the *lock* previously locked by a call to
3870 * **bpf_spin_lock**\ (\ *lock*\ ).
3871 * Return
3872 * 0
3873 *
3874 * struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk)
3875 * Description
3876 * This helper gets a **struct bpf_sock** pointer such
3877 * that all the fields in this **bpf_sock** can be accessed.
3878 * Return
3879 * A **struct bpf_sock** pointer on success, or **NULL** in
3880 * case of failure.
3881 *
3882 * struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk)
3883 * Description
3884 * This helper gets a **struct bpf_tcp_sock** pointer from a
3885 * **struct bpf_sock** pointer.
3886 * Return
3887 * A **struct bpf_tcp_sock** pointer on success, or **NULL** in
3888 * case of failure.
3889 *
3890 * long bpf_skb_ecn_set_ce(struct sk_buff *skb)
3891 * Description
3892 * Set ECN (Explicit Congestion Notification) field of IP header
3893 * to **CE** (Congestion Encountered) if current value is **ECT**
3894 * (ECN Capable Transport). Otherwise, do nothing. Works with IPv6
3895 * and IPv4.
3896 * Return
3897 * 1 if the **CE** flag is set (either by the current helper call
3898 * or because it was already present), 0 if it is not set.
3899 *
3900 * struct bpf_sock *bpf_get_listener_sock(struct bpf_sock *sk)
3901 * Description
3902 * Return a **struct bpf_sock** pointer in **TCP_LISTEN** state.
3903 * **bpf_sk_release**\ () is unnecessary and not allowed.
3904 * Return
3905 * A **struct bpf_sock** pointer on success, or **NULL** in
3906 * case of failure.
3907 *
3908 * struct bpf_sock *bpf_skc_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
3909 * Description
3910 * Look for TCP socket matching *tuple*, optionally in a child
3911 * network namespace *netns*. The return value must be checked,
3912 * and if non-**NULL**, released via **bpf_sk_release**\ ().
3913 *
3914 * This function is identical to **bpf_sk_lookup_tcp**\ (), except
3915 * that it also returns timewait or request sockets. Use
3916 * **bpf_sk_fullsock**\ () or **bpf_tcp_sock**\ () to access the
3917 * full structure.
3918 *
3919 * This helper is available only if the kernel was compiled with
3920 * **CONFIG_NET** configuration option.
3921 * Return
3922 * Pointer to **struct bpf_sock**, or **NULL** in case of failure.
3923 * For sockets with reuseport option, the **struct bpf_sock**
3924 * result is from *reuse*\ **->socks**\ [] using the hash of the
3925 * tuple.
3926 *
3927 * long bpf_tcp_check_syncookie(void *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
3928 * Description
3929 * Check whether *iph* and *th* contain a valid SYN cookie ACK for
3930 * the listening socket in *sk*.
3931 *
3932 * *iph* points to the start of the IPv4 or IPv6 header, while
3933 * *iph_len* contains **sizeof**\ (**struct iphdr**) or
3934 * **sizeof**\ (**struct ipv6hdr**).
3935 *
3936 * *th* points to the start of the TCP header, while *th_len*
3937 * contains the length of the TCP header (at least
3938 * **sizeof**\ (**struct tcphdr**)).
3939 * Return
3940 * 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative
3941 * error otherwise.
3942 *
3943 * long bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags)
3944 * Description
3945 * Get name of sysctl in /proc/sys/ and copy it into provided by
3946 * program buffer *buf* of size *buf_len*.
3947 *
3948 * The buffer is always NUL terminated, unless it's zero-sized.
3949 *
3950 * If *flags* is zero, full name (e.g. "net/ipv4/tcp_mem") is
3951 * copied. Use **BPF_F_SYSCTL_BASE_NAME** flag to copy base name
3952 * only (e.g. "tcp_mem").
3953 * Return
3954 * Number of character copied (not including the trailing NUL).
3955 *
3956 * **-E2BIG** if the buffer wasn't big enough (*buf* will contain
3957 * truncated name in this case).
3958 *
3959 * long bpf_sysctl_get_current_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len)
3960 * Description
3961 * Get current value of sysctl as it is presented in /proc/sys
3962 * (incl. newline, etc), and copy it as a string into provided
3963 * by program buffer *buf* of size *buf_len*.
3964 *
3965 * The whole value is copied, no matter what file position user
3966 * space issued e.g. sys_read at.
3967 *
3968 * The buffer is always NUL terminated, unless it's zero-sized.
3969 * Return
3970 * Number of character copied (not including the trailing NUL).
3971 *
3972 * **-E2BIG** if the buffer wasn't big enough (*buf* will contain
3973 * truncated name in this case).
3974 *
3975 * **-EINVAL** if current value was unavailable, e.g. because
3976 * sysctl is uninitialized and read returns -EIO for it.
3977 *
3978 * long bpf_sysctl_get_new_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len)
3979 * Description
3980 * Get new value being written by user space to sysctl (before
3981 * the actual write happens) and copy it as a string into
3982 * provided by program buffer *buf* of size *buf_len*.
3983 *
3984 * User space may write new value at file position > 0.
3985 *
3986 * The buffer is always NUL terminated, unless it's zero-sized.
3987 * Return
3988 * Number of character copied (not including the trailing NUL).
3989 *
3990 * **-E2BIG** if the buffer wasn't big enough (*buf* will contain
3991 * truncated name in this case).
3992 *
3993 * **-EINVAL** if sysctl is being read.
3994 *
3995 * long bpf_sysctl_set_new_value(struct bpf_sysctl *ctx, const char *buf, size_t buf_len)
3996 * Description
3997 * Override new value being written by user space to sysctl with
3998 * value provided by program in buffer *buf* of size *buf_len*.
3999 *
4000 * *buf* should contain a string in same form as provided by user
4001 * space on sysctl write.
4002 *
4003 * User space may write new value at file position > 0. To override
4004 * the whole sysctl value file position should be set to zero.
4005 * Return
4006 * 0 on success.
4007 *
4008 * **-E2BIG** if the *buf_len* is too big.
4009 *
4010 * **-EINVAL** if sysctl is being read.
4011 *
4012 * long bpf_strtol(const char *buf, size_t buf_len, u64 flags, long *res)
4013 * Description
4014 * Convert the initial part of the string from buffer *buf* of
4015 * size *buf_len* to a long integer according to the given base
4016 * and save the result in *res*.
4017 *
4018 * The string may begin with an arbitrary amount of white space
4019 * (as determined by **isspace**\ (3)) followed by a single
4020 * optional '**-**' sign.
4021 *
4022 * Five least significant bits of *flags* encode base, other bits
4023 * are currently unused.
4024 *
4025 * Base must be either 8, 10, 16 or 0 to detect it automatically
4026 * similar to user space **strtol**\ (3).
4027 * Return
4028 * Number of characters consumed on success. Must be positive but
4029 * no more than *buf_len*.
4030 *
4031 * **-EINVAL** if no valid digits were found or unsupported base
4032 * was provided.
4033 *
4034 * **-ERANGE** if resulting value was out of range.
4035 *
4036 * long bpf_strtoul(const char *buf, size_t buf_len, u64 flags, unsigned long *res)
4037 * Description
4038 * Convert the initial part of the string from buffer *buf* of
4039 * size *buf_len* to an unsigned long integer according to the
4040 * given base and save the result in *res*.
4041 *
4042 * The string may begin with an arbitrary amount of white space
4043 * (as determined by **isspace**\ (3)).
4044 *
4045 * Five least significant bits of *flags* encode base, other bits
4046 * are currently unused.
4047 *
4048 * Base must be either 8, 10, 16 or 0 to detect it automatically
4049 * similar to user space **strtoul**\ (3).
4050 * Return
4051 * Number of characters consumed on success. Must be positive but
4052 * no more than *buf_len*.
4053 *
4054 * **-EINVAL** if no valid digits were found or unsupported base
4055 * was provided.
4056 *
4057 * **-ERANGE** if resulting value was out of range.
4058 *
4059 * void *bpf_sk_storage_get(struct bpf_map *map, void *sk, void *value, u64 flags)
4060 * Description
4061 * Get a bpf-local-storage from a *sk*.
4062 *
4063 * Logically, it could be thought of getting the value from
4064 * a *map* with *sk* as the **key**. From this
4065 * perspective, the usage is not much different from
4066 * **bpf_map_lookup_elem**\ (*map*, **&**\ *sk*) except this
4067 * helper enforces the key must be a full socket and the map must
4068 * be a **BPF_MAP_TYPE_SK_STORAGE** also.
4069 *
4070 * Underneath, the value is stored locally at *sk* instead of
4071 * the *map*. The *map* is used as the bpf-local-storage
4072 * "type". The bpf-local-storage "type" (i.e. the *map*) is
4073 * searched against all bpf-local-storages residing at *sk*.
4074 *
4075 * *sk* is a kernel **struct sock** pointer for LSM program.
4076 * *sk* is a **struct bpf_sock** pointer for other program types.
4077 *
4078 * An optional *flags* (**BPF_SK_STORAGE_GET_F_CREATE**) can be
4079 * used such that a new bpf-local-storage will be
4080 * created if one does not exist. *value* can be used
4081 * together with **BPF_SK_STORAGE_GET_F_CREATE** to specify
4082 * the initial value of a bpf-local-storage. If *value* is
4083 * **NULL**, the new bpf-local-storage will be zero initialized.
4084 * Return
4085 * A bpf-local-storage pointer is returned on success.
4086 *
4087 * **NULL** if not found or there was an error in adding
4088 * a new bpf-local-storage.
4089 *
4090 * long bpf_sk_storage_delete(struct bpf_map *map, void *sk)
4091 * Description
4092 * Delete a bpf-local-storage from a *sk*.
4093 * Return
4094 * 0 on success.
4095 *
4096 * **-ENOENT** if the bpf-local-storage cannot be found.
4097 * **-EINVAL** if sk is not a fullsock (e.g. a request_sock).
4098 *
4099 * long bpf_send_signal(u32 sig)
4100 * Description
4101 * Send signal *sig* to the process of the current task.
4102 * The signal may be delivered to any of this process's threads.
4103 * Return
4104 * 0 on success or successfully queued.
4105 *
4106 * **-EBUSY** if work queue under nmi is full.
4107 *
4108 * **-EINVAL** if *sig* is invalid.
4109 *
4110 * **-EPERM** if no permission to send the *sig*.
4111 *
4112 * **-EAGAIN** if bpf program can try again.
4113 *
4114 * s64 bpf_tcp_gen_syncookie(void *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
4115 * Description
4116 * Try to issue a SYN cookie for the packet with corresponding
4117 * IP/TCP headers, *iph* and *th*, on the listening socket in *sk*.
4118 *
4119 * *iph* points to the start of the IPv4 or IPv6 header, while
4120 * *iph_len* contains **sizeof**\ (**struct iphdr**) or
4121 * **sizeof**\ (**struct ipv6hdr**).
4122 *
4123 * *th* points to the start of the TCP header, while *th_len*
4124 * contains the length of the TCP header with options (at least
4125 * **sizeof**\ (**struct tcphdr**)).
4126 * Return
4127 * On success, lower 32 bits hold the generated SYN cookie in
4128 * followed by 16 bits which hold the MSS value for that cookie,
4129 * and the top 16 bits are unused.
4130 *
4131 * On failure, the returned value is one of the following:
4132 *
4133 * **-EINVAL** SYN cookie cannot be issued due to error
4134 *
4135 * **-ENOENT** SYN cookie should not be issued (no SYN flood)
4136 *
4137 * **-EOPNOTSUPP** kernel configuration does not enable SYN cookies
4138 *
4139 * **-EPROTONOSUPPORT** IP packet version is not 4 or 6
4140 *
4141 * long bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
4142 * Description
4143 * Write raw *data* blob into a special BPF perf event held by
4144 * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
4145 * event must have the following attributes: **PERF_SAMPLE_RAW**
4146 * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and
4147 * **PERF_COUNT_SW_BPF_OUTPUT** as **config**.
4148 *
4149 * The *flags* are used to indicate the index in *map* for which
4150 * the value must be put, masked with **BPF_F_INDEX_MASK**.
4151 * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU**
4152 * to indicate that the index of the current CPU core should be
4153 * used.
4154 *
4155 * The value to write, of *size*, is passed through eBPF stack and
4156 * pointed by *data*.
4157 *
4158 * *ctx* is a pointer to in-kernel struct sk_buff.
4159 *
4160 * This helper is similar to **bpf_perf_event_output**\ () but
4161 * restricted to raw_tracepoint bpf programs.
4162 * Return
4163 * 0 on success, or a negative error in case of failure.
4164 *
4165 * long bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr)
4166 * Description
4167 * Safely attempt to read *size* bytes from user space address
4168 * *unsafe_ptr* and store the data in *dst*.
4169 * Return
4170 * 0 on success, or a negative error in case of failure.
4171 *
4172 * long bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
4173 * Description
4174 * Safely attempt to read *size* bytes from kernel space address
4175 * *unsafe_ptr* and store the data in *dst*.
4176 * Return
4177 * 0 on success, or a negative error in case of failure.
4178 *
4179 * long bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr)
4180 * Description
4181 * Copy a NUL terminated string from an unsafe user address
4182 * *unsafe_ptr* to *dst*. The *size* should include the
4183 * terminating NUL byte. In case the string length is smaller than
4184 * *size*, the target is not padded with further NUL bytes. If the
4185 * string length is larger than *size*, just *size*-1 bytes are
4186 * copied and the last byte is set to NUL.
4187 *
4188 * On success, returns the number of bytes that were written,
4189 * including the terminal NUL. This makes this helper useful in
4190 * tracing programs for reading strings, and more importantly to
4191 * get its length at runtime. See the following snippet:
4192 *
4193 * ::
4194 *
4195 * SEC("kprobe/sys_open")
4196 * void bpf_sys_open(struct pt_regs *ctx)
4197 * {
4198 * char buf[PATHLEN]; // PATHLEN is defined to 256
4199 * int res = bpf_probe_read_user_str(buf, sizeof(buf),
4200 * ctx->di);
4201 *
4202 * // Consume buf, for example push it to
4203 * // userspace via bpf_perf_event_output(); we
4204 * // can use res (the string length) as event
4205 * // size, after checking its boundaries.
4206 * }
4207 *
4208 * In comparison, using **bpf_probe_read_user**\ () helper here
4209 * instead to read the string would require to estimate the length
4210 * at compile time, and would often result in copying more memory
4211 * than necessary.
4212 *
4213 * Another useful use case is when parsing individual process
4214 * arguments or individual environment variables navigating
4215 * *current*\ **->mm->arg_start** and *current*\
4216 * **->mm->env_start**: using this helper and the return value,
4217 * one can quickly iterate at the right offset of the memory area.
4218 * Return
4219 * On success, the strictly positive length of the output string,
4220 * including the trailing NUL character. On error, a negative
4221 * value.
4222 *
4223 * long bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr)
4224 * Description
4225 * Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr*
4226 * to *dst*. Same semantics as with **bpf_probe_read_user_str**\ () apply.
4227 * Return
4228 * On success, the strictly positive length of the string, including
4229 * the trailing NUL character. On error, a negative value.
4230 *
4231 * long bpf_tcp_send_ack(void *tp, u32 rcv_nxt)
4232 * Description
4233 * Send out a tcp-ack. *tp* is the in-kernel struct **tcp_sock**.
4234 * *rcv_nxt* is the ack_seq to be sent out.
4235 * Return
4236 * 0 on success, or a negative error in case of failure.
4237 *
4238 * long bpf_send_signal_thread(u32 sig)
4239 * Description
4240 * Send signal *sig* to the thread corresponding to the current task.
4241 * Return
4242 * 0 on success or successfully queued.
4243 *
4244 * **-EBUSY** if work queue under nmi is full.
4245 *
4246 * **-EINVAL** if *sig* is invalid.
4247 *
4248 * **-EPERM** if no permission to send the *sig*.
4249 *
4250 * **-EAGAIN** if bpf program can try again.
4251 *
4252 * u64 bpf_jiffies64(void)
4253 * Description
4254 * Obtain the 64bit jiffies
4255 * Return
4256 * The 64 bit jiffies
4257 *
4258 * long bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags)
4259 * Description
4260 * For an eBPF program attached to a perf event, retrieve the
4261 * branch records (**struct perf_branch_entry**) associated to *ctx*
4262 * and store it in the buffer pointed by *buf* up to size
4263 * *size* bytes.
4264 * Return
4265 * On success, number of bytes written to *buf*. On error, a
4266 * negative value.
4267 *
4268 * The *flags* can be set to **BPF_F_GET_BRANCH_RECORDS_SIZE** to
4269 * instead return the number of bytes required to store all the
4270 * branch entries. If this flag is set, *buf* may be NULL.
4271 *
4272 * **-EINVAL** if arguments invalid or **size** not a multiple
4273 * of **sizeof**\ (**struct perf_branch_entry**\ ).
4274 *
4275 * **-ENOENT** if architecture does not support branch records.
4276 *
4277 * long bpf_get_ns_current_pid_tgid(u64 dev, u64 ino, struct bpf_pidns_info *nsdata, u32 size)
4278 * Description
4279 * Returns 0 on success, values for *pid* and *tgid* as seen from the current
4280 * *namespace* will be returned in *nsdata*.
4281 * Return
4282 * 0 on success, or one of the following in case of failure:
4283 *
4284 * **-EINVAL** if dev and inum supplied don't match dev_t and inode number
4285 * with nsfs of current task, or if dev conversion to dev_t lost high bits.
4286 *
4287 * **-ENOENT** if pidns does not exists for the current task.
4288 *
4289 * long bpf_xdp_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
4290 * Description
4291 * Write raw *data* blob into a special BPF perf event held by
4292 * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
4293 * event must have the following attributes: **PERF_SAMPLE_RAW**
4294 * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and
4295 * **PERF_COUNT_SW_BPF_OUTPUT** as **config**.
4296 *
4297 * The *flags* are used to indicate the index in *map* for which
4298 * the value must be put, masked with **BPF_F_INDEX_MASK**.
4299 * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU**
4300 * to indicate that the index of the current CPU core should be
4301 * used.
4302 *
4303 * The value to write, of *size*, is passed through eBPF stack and
4304 * pointed by *data*.
4305 *
4306 * *ctx* is a pointer to in-kernel struct xdp_buff.
4307 *
4308 * This helper is similar to **bpf_perf_eventoutput**\ () but
4309 * restricted to raw_tracepoint bpf programs.
4310 * Return
4311 * 0 on success, or a negative error in case of failure.
4312 *
4313 * u64 bpf_get_netns_cookie(void *ctx)
4314 * Description
4315 * Retrieve the cookie (generated by the kernel) of the network
4316 * namespace the input *ctx* is associated with. The network
4317 * namespace cookie remains stable for its lifetime and provides
4318 * a global identifier that can be assumed unique. If *ctx* is
4319 * NULL, then the helper returns the cookie for the initial
4320 * network namespace. The cookie itself is very similar to that
4321 * of **bpf_get_socket_cookie**\ () helper, but for network
4322 * namespaces instead of sockets.
4323 * Return
4324 * A 8-byte long opaque number.
4325 *
4326 * u64 bpf_get_current_ancestor_cgroup_id(int ancestor_level)
4327 * Description
4328 * Return id of cgroup v2 that is ancestor of the cgroup associated
4329 * with the current task at the *ancestor_level*. The root cgroup
4330 * is at *ancestor_level* zero and each step down the hierarchy
4331 * increments the level. If *ancestor_level* == level of cgroup
4332 * associated with the current task, then return value will be the
4333 * same as that of **bpf_get_current_cgroup_id**\ ().
4334 *
4335 * The helper is useful to implement policies based on cgroups
4336 * that are upper in hierarchy than immediate cgroup associated
4337 * with the current task.
4338 *
4339 * The format of returned id and helper limitations are same as in
4340 * **bpf_get_current_cgroup_id**\ ().
4341 * Return
4342 * The id is returned or 0 in case the id could not be retrieved.
4343 *
4344 * long bpf_sk_assign(struct sk_buff *skb, void *sk, u64 flags)
4345 * Description
4346 * Helper is overloaded depending on BPF program type. This
4347 * description applies to **BPF_PROG_TYPE_SCHED_CLS** and
4348 * **BPF_PROG_TYPE_SCHED_ACT** programs.
4349 *
4350 * Assign the *sk* to the *skb*. When combined with appropriate
4351 * routing configuration to receive the packet towards the socket,
4352 * will cause *skb* to be delivered to the specified socket.
4353 * Subsequent redirection of *skb* via **bpf_redirect**\ (),
4354 * **bpf_clone_redirect**\ () or other methods outside of BPF may
4355 * interfere with successful delivery to the socket.
4356 *
4357 * This operation is only valid from TC ingress path.
4358 *
4359 * The *flags* argument must be zero.
4360 * Return
4361 * 0 on success, or a negative error in case of failure:
4362 *
4363 * **-EINVAL** if specified *flags* are not supported.
4364 *
4365 * **-ENOENT** if the socket is unavailable for assignment.
4366 *
4367 * **-ENETUNREACH** if the socket is unreachable (wrong netns).
4368 *
4369 * **-EOPNOTSUPP** if the operation is not supported, for example
4370 * a call from outside of TC ingress.
4371 *
4372 * long bpf_sk_assign(struct bpf_sk_lookup *ctx, struct bpf_sock *sk, u64 flags)
4373 * Description
4374 * Helper is overloaded depending on BPF program type. This
4375 * description applies to **BPF_PROG_TYPE_SK_LOOKUP** programs.
4376 *
4377 * Select the *sk* as a result of a socket lookup.
4378 *
4379 * For the operation to succeed passed socket must be compatible
4380 * with the packet description provided by the *ctx* object.
4381 *
4382 * L4 protocol (**IPPROTO_TCP** or **IPPROTO_UDP**) must
4383 * be an exact match. While IP family (**AF_INET** or
4384 * **AF_INET6**) must be compatible, that is IPv6 sockets
4385 * that are not v6-only can be selected for IPv4 packets.
4386 *
4387 * Only TCP listeners and UDP unconnected sockets can be
4388 * selected. *sk* can also be NULL to reset any previous
4389 * selection.
4390 *
4391 * *flags* argument can combination of following values:
4392 *
4393 * * **BPF_SK_LOOKUP_F_REPLACE** to override the previous
4394 * socket selection, potentially done by a BPF program
4395 * that ran before us.
4396 *
4397 * * **BPF_SK_LOOKUP_F_NO_REUSEPORT** to skip
4398 * load-balancing within reuseport group for the socket
4399 * being selected.
4400 *
4401 * On success *ctx->sk* will point to the selected socket.
4402 *
4403 * Return
4404 * 0 on success, or a negative errno in case of failure.
4405 *
4406 * * **-EAFNOSUPPORT** if socket family (*sk->family*) is
4407 * not compatible with packet family (*ctx->family*).
4408 *
4409 * * **-EEXIST** if socket has been already selected,
4410 * potentially by another program, and
4411 * **BPF_SK_LOOKUP_F_REPLACE** flag was not specified.
4412 *
4413 * * **-EINVAL** if unsupported flags were specified.
4414 *
4415 * * **-EPROTOTYPE** if socket L4 protocol
4416 * (*sk->protocol*) doesn't match packet protocol
4417 * (*ctx->protocol*).
4418 *
4419 * * **-ESOCKTNOSUPPORT** if socket is not in allowed
4420 * state (TCP listening or UDP unconnected).
4421 *
4422 * u64 bpf_ktime_get_boot_ns(void)
4423 * Description
4424 * Return the time elapsed since system boot, in nanoseconds.
4425 * Does include the time the system was suspended.
4426 * See: **clock_gettime**\ (**CLOCK_BOOTTIME**)
4427 * Return
4428 * Current *ktime*.
4429 *
4430 * long bpf_seq_printf(struct seq_file *m, const char *fmt, u32 fmt_size, const void *data, u32 data_len)
4431 * Description
4432 * **bpf_seq_printf**\ () uses seq_file **seq_printf**\ () to print
4433 * out the format string.
4434 * The *m* represents the seq_file. The *fmt* and *fmt_size* are for
4435 * the format string itself. The *data* and *data_len* are format string
4436 * arguments. The *data* are a **u64** array and corresponding format string
4437 * values are stored in the array. For strings and pointers where pointees
4438 * are accessed, only the pointer values are stored in the *data* array.
4439 * The *data_len* is the size of *data* in bytes - must be a multiple of 8.
4440 *
4441 * Formats **%s**, **%p{i,I}{4,6}** requires to read kernel memory.
4442 * Reading kernel memory may fail due to either invalid address or
4443 * valid address but requiring a major memory fault. If reading kernel memory
4444 * fails, the string for **%s** will be an empty string, and the ip
4445 * address for **%p{i,I}{4,6}** will be 0. Not returning error to
4446 * bpf program is consistent with what **bpf_trace_printk**\ () does for now.
4447 * Return
4448 * 0 on success, or a negative error in case of failure:
4449 *
4450 * **-EBUSY** if per-CPU memory copy buffer is busy, can try again
4451 * by returning 1 from bpf program.
4452 *
4453 * **-EINVAL** if arguments are invalid, or if *fmt* is invalid/unsupported.
4454 *
4455 * **-E2BIG** if *fmt* contains too many format specifiers.
4456 *
4457 * **-EOVERFLOW** if an overflow happened: The same object will be tried again.
4458 *
4459 * long bpf_seq_write(struct seq_file *m, const void *data, u32 len)
4460 * Description
4461 * **bpf_seq_write**\ () uses seq_file **seq_write**\ () to write the data.
4462 * The *m* represents the seq_file. The *data* and *len* represent the
4463 * data to write in bytes.
4464 * Return
4465 * 0 on success, or a negative error in case of failure:
4466 *
4467 * **-EOVERFLOW** if an overflow happened: The same object will be tried again.
4468 *
4469 * u64 bpf_sk_cgroup_id(void *sk)
4470 * Description
4471 * Return the cgroup v2 id of the socket *sk*.
4472 *
4473 * *sk* must be a non-**NULL** pointer to a socket, e.g. one
4474 * returned from **bpf_sk_lookup_xxx**\ (),
4475 * **bpf_sk_fullsock**\ (), etc. The format of returned id is
4476 * same as in **bpf_skb_cgroup_id**\ ().
4477 *
4478 * This helper is available only if the kernel was compiled with
4479 * the **CONFIG_SOCK_CGROUP_DATA** configuration option.
4480 * Return
4481 * The id is returned or 0 in case the id could not be retrieved.
4482 *
4483 * u64 bpf_sk_ancestor_cgroup_id(void *sk, int ancestor_level)
4484 * Description
4485 * Return id of cgroup v2 that is ancestor of cgroup associated
4486 * with the *sk* at the *ancestor_level*. The root cgroup is at
4487 * *ancestor_level* zero and each step down the hierarchy
4488 * increments the level. If *ancestor_level* == level of cgroup
4489 * associated with *sk*, then return value will be same as that
4490 * of **bpf_sk_cgroup_id**\ ().
4491 *
4492 * The helper is useful to implement policies based on cgroups
4493 * that are upper in hierarchy than immediate cgroup associated
4494 * with *sk*.
4495 *
4496 * The format of returned id and helper limitations are same as in
4497 * **bpf_sk_cgroup_id**\ ().
4498 * Return
4499 * The id is returned or 0 in case the id could not be retrieved.
4500 *
4501 * long bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags)
4502 * Description
4503 * Copy *size* bytes from *data* into a ring buffer *ringbuf*.
4504 * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
4505 * of new data availability is sent.
4506 * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
4507 * of new data availability is sent unconditionally.
4508 * If **0** is specified in *flags*, an adaptive notification
4509 * of new data availability is sent.
4510 *
4511 * An adaptive notification is a notification sent whenever the user-space
4512 * process has caught up and consumed all available payloads. In case the user-space
4513 * process is still processing a previous payload, then no notification is needed
4514 * as it will process the newly added payload automatically.
4515 * Return
4516 * 0 on success, or a negative error in case of failure.
4517 *
4518 * void *bpf_ringbuf_reserve(void *ringbuf, u64 size, u64 flags)
4519 * Description
4520 * Reserve *size* bytes of payload in a ring buffer *ringbuf*.
4521 * *flags* must be 0.
4522 * Return
4523 * Valid pointer with *size* bytes of memory available; NULL,
4524 * otherwise.
4525 *
4526 * void bpf_ringbuf_submit(void *data, u64 flags)
4527 * Description
4528 * Submit reserved ring buffer sample, pointed to by *data*.
4529 * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
4530 * of new data availability is sent.
4531 * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
4532 * of new data availability is sent unconditionally.
4533 * If **0** is specified in *flags*, an adaptive notification
4534 * of new data availability is sent.
4535 *
4536 * See 'bpf_ringbuf_output()' for the definition of adaptive notification.
4537 * Return
4538 * Nothing. Always succeeds.
4539 *
4540 * void bpf_ringbuf_discard(void *data, u64 flags)
4541 * Description
4542 * Discard reserved ring buffer sample, pointed to by *data*.
4543 * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
4544 * of new data availability is sent.
4545 * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
4546 * of new data availability is sent unconditionally.
4547 * If **0** is specified in *flags*, an adaptive notification
4548 * of new data availability is sent.
4549 *
4550 * See 'bpf_ringbuf_output()' for the definition of adaptive notification.
4551 * Return
4552 * Nothing. Always succeeds.
4553 *
4554 * u64 bpf_ringbuf_query(void *ringbuf, u64 flags)
4555 * Description
4556 * Query various characteristics of provided ring buffer. What
4557 * exactly is queries is determined by *flags*:
4558 *
4559 * * **BPF_RB_AVAIL_DATA**: Amount of data not yet consumed.
4560 * * **BPF_RB_RING_SIZE**: The size of ring buffer.
4561 * * **BPF_RB_CONS_POS**: Consumer position (can wrap around).
4562 * * **BPF_RB_PROD_POS**: Producer(s) position (can wrap around).
4563 *
4564 * Data returned is just a momentary snapshot of actual values
4565 * and could be inaccurate, so this facility should be used to
4566 * power heuristics and for reporting, not to make 100% correct
4567 * calculation.
4568 * Return
4569 * Requested value, or 0, if *flags* are not recognized.
4570 *
4571 * long bpf_csum_level(struct sk_buff *skb, u64 level)
4572 * Description
4573 * Change the skbs checksum level by one layer up or down, or
4574 * reset it entirely to none in order to have the stack perform
4575 * checksum validation. The level is applicable to the following
4576 * protocols: TCP, UDP, GRE, SCTP, FCOE. For example, a decap of
4577 * | ETH | IP | UDP | GUE | IP | TCP | into | ETH | IP | TCP |
4578 * through **bpf_skb_adjust_room**\ () helper with passing in
4579 * **BPF_F_ADJ_ROOM_NO_CSUM_RESET** flag would require one call
4580 * to **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_DEC** since
4581 * the UDP header is removed. Similarly, an encap of the latter
4582 * into the former could be accompanied by a helper call to
4583 * **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_INC** if the
4584 * skb is still intended to be processed in higher layers of the
4585 * stack instead of just egressing at tc.
4586 *
4587 * There are three supported level settings at this time:
4588 *
4589 * * **BPF_CSUM_LEVEL_INC**: Increases skb->csum_level for skbs
4590 * with CHECKSUM_UNNECESSARY.
4591 * * **BPF_CSUM_LEVEL_DEC**: Decreases skb->csum_level for skbs
4592 * with CHECKSUM_UNNECESSARY.
4593 * * **BPF_CSUM_LEVEL_RESET**: Resets skb->csum_level to 0 and
4594 * sets CHECKSUM_NONE to force checksum validation by the stack.
4595 * * **BPF_CSUM_LEVEL_QUERY**: No-op, returns the current
4596 * skb->csum_level.
4597 * Return
4598 * 0 on success, or a negative error in case of failure. In the
4599 * case of **BPF_CSUM_LEVEL_QUERY**, the current skb->csum_level
4600 * is returned or the error code -EACCES in case the skb is not
4601 * subject to CHECKSUM_UNNECESSARY.
4602 *
4603 * struct tcp6_sock *bpf_skc_to_tcp6_sock(void *sk)
4604 * Description
4605 * Dynamically cast a *sk* pointer to a *tcp6_sock* pointer.
4606 * Return
4607 * *sk* if casting is valid, or **NULL** otherwise.
4608 *
4609 * struct tcp_sock *bpf_skc_to_tcp_sock(void *sk)
4610 * Description
4611 * Dynamically cast a *sk* pointer to a *tcp_sock* pointer.
4612 * Return
4613 * *sk* if casting is valid, or **NULL** otherwise.
4614 *
4615 * struct tcp_timewait_sock *bpf_skc_to_tcp_timewait_sock(void *sk)
4616 * Description
4617 * Dynamically cast a *sk* pointer to a *tcp_timewait_sock* pointer.
4618 * Return
4619 * *sk* if casting is valid, or **NULL** otherwise.
4620 *
4621 * struct tcp_request_sock *bpf_skc_to_tcp_request_sock(void *sk)
4622 * Description
4623 * Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer.
4624 * Return
4625 * *sk* if casting is valid, or **NULL** otherwise.
4626 *
4627 * struct udp6_sock *bpf_skc_to_udp6_sock(void *sk)
4628 * Description
4629 * Dynamically cast a *sk* pointer to a *udp6_sock* pointer.
4630 * Return
4631 * *sk* if casting is valid, or **NULL** otherwise.
4632 *
4633 * long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags)
4634 * Description
4635 * Return a user or a kernel stack in bpf program provided buffer.
4636 * Note: the user stack will only be populated if the *task* is
4637 * the current task; all other tasks will return -EOPNOTSUPP.
4638 * To achieve this, the helper needs *task*, which is a valid
4639 * pointer to **struct task_struct**. To store the stacktrace, the
4640 * bpf program provides *buf* with a nonnegative *size*.
4641 *
4642 * The last argument, *flags*, holds the number of stack frames to
4643 * skip (from 0 to 255), masked with
4644 * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set
4645 * the following flags:
4646 *
4647 * **BPF_F_USER_STACK**
4648 * Collect a user space stack instead of a kernel stack.
4649 * The *task* must be the current task.
4650 * **BPF_F_USER_BUILD_ID**
4651 * Collect buildid+offset instead of ips for user stack,
4652 * only valid if **BPF_F_USER_STACK** is also specified.
4653 *
4654 * **bpf_get_task_stack**\ () can collect up to
4655 * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject
4656 * to sufficient large buffer size. Note that
4657 * this limit can be controlled with the **sysctl** program, and
4658 * that it should be manually increased in order to profile long
4659 * user stacks (such as stacks for Java programs). To do so, use:
4660 *
4661 * ::
4662 *
4663 * # sysctl kernel.perf_event_max_stack=<new value>
4664 * Return
4665 * The non-negative copied *buf* length equal to or less than
4666 * *size* on success, or a negative error in case of failure.
4667 *
4668 * long bpf_load_hdr_opt(struct bpf_sock_ops *skops, void *searchby_res, u32 len, u64 flags)
4669 * Description
4670 * Load header option. Support reading a particular TCP header
4671 * option for bpf program (**BPF_PROG_TYPE_SOCK_OPS**).
4672 *
4673 * If *flags* is 0, it will search the option from the
4674 * *skops*\ **->skb_data**. The comment in **struct bpf_sock_ops**
4675 * has details on what skb_data contains under different
4676 * *skops*\ **->op**.
4677 *
4678 * The first byte of the *searchby_res* specifies the
4679 * kind that it wants to search.
4680 *
4681 * If the searching kind is an experimental kind
4682 * (i.e. 253 or 254 according to RFC6994). It also
4683 * needs to specify the "magic" which is either
4684 * 2 bytes or 4 bytes. It then also needs to
4685 * specify the size of the magic by using
4686 * the 2nd byte which is "kind-length" of a TCP
4687 * header option and the "kind-length" also
4688 * includes the first 2 bytes "kind" and "kind-length"
4689 * itself as a normal TCP header option also does.
4690 *
4691 * For example, to search experimental kind 254 with
4692 * 2 byte magic 0xeB9F, the searchby_res should be
4693 * [ 254, 4, 0xeB, 0x9F, 0, 0, .... 0 ].
4694 *
4695 * To search for the standard window scale option (3),
4696 * the *searchby_res* should be [ 3, 0, 0, .... 0 ].
4697 * Note, kind-length must be 0 for regular option.
4698 *
4699 * Searching for No-Op (0) and End-of-Option-List (1) are
4700 * not supported.
4701 *
4702 * *len* must be at least 2 bytes which is the minimal size
4703 * of a header option.
4704 *
4705 * Supported flags:
4706 *
4707 * * **BPF_LOAD_HDR_OPT_TCP_SYN** to search from the
4708 * saved_syn packet or the just-received syn packet.
4709 *
4710 * Return
4711 * > 0 when found, the header option is copied to *searchby_res*.
4712 * The return value is the total length copied. On failure, a
4713 * negative error code is returned:
4714 *
4715 * **-EINVAL** if a parameter is invalid.
4716 *
4717 * **-ENOMSG** if the option is not found.
4718 *
4719 * **-ENOENT** if no syn packet is available when
4720 * **BPF_LOAD_HDR_OPT_TCP_SYN** is used.
4721 *
4722 * **-ENOSPC** if there is not enough space. Only *len* number of
4723 * bytes are copied.
4724 *
4725 * **-EFAULT** on failure to parse the header options in the
4726 * packet.
4727 *
4728 * **-EPERM** if the helper cannot be used under the current
4729 * *skops*\ **->op**.
4730 *
4731 * long bpf_store_hdr_opt(struct bpf_sock_ops *skops, const void *from, u32 len, u64 flags)
4732 * Description
4733 * Store header option. The data will be copied
4734 * from buffer *from* with length *len* to the TCP header.
4735 *
4736 * The buffer *from* should have the whole option that
4737 * includes the kind, kind-length, and the actual
4738 * option data. The *len* must be at least kind-length
4739 * long. The kind-length does not have to be 4 byte
4740 * aligned. The kernel will take care of the padding
4741 * and setting the 4 bytes aligned value to th->doff.
4742 *
4743 * This helper will check for duplicated option
4744 * by searching the same option in the outgoing skb.
4745 *
4746 * This helper can only be called during
4747 * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**.
4748 *
4749 * Return
4750 * 0 on success, or negative error in case of failure:
4751 *
4752 * **-EINVAL** If param is invalid.
4753 *
4754 * **-ENOSPC** if there is not enough space in the header.
4755 * Nothing has been written
4756 *
4757 * **-EEXIST** if the option already exists.
4758 *
4759 * **-EFAULT** on failure to parse the existing header options.
4760 *
4761 * **-EPERM** if the helper cannot be used under the current
4762 * *skops*\ **->op**.
4763 *
4764 * long bpf_reserve_hdr_opt(struct bpf_sock_ops *skops, u32 len, u64 flags)
4765 * Description
4766 * Reserve *len* bytes for the bpf header option. The
4767 * space will be used by **bpf_store_hdr_opt**\ () later in
4768 * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**.
4769 *
4770 * If **bpf_reserve_hdr_opt**\ () is called multiple times,
4771 * the total number of bytes will be reserved.
4772 *
4773 * This helper can only be called during
4774 * **BPF_SOCK_OPS_HDR_OPT_LEN_CB**.
4775 *
4776 * Return
4777 * 0 on success, or negative error in case of failure:
4778 *
4779 * **-EINVAL** if a parameter is invalid.
4780 *
4781 * **-ENOSPC** if there is not enough space in the header.
4782 *
4783 * **-EPERM** if the helper cannot be used under the current
4784 * *skops*\ **->op**.
4785 *
4786 * void *bpf_inode_storage_get(struct bpf_map *map, void *inode, void *value, u64 flags)
4787 * Description
4788 * Get a bpf_local_storage from an *inode*.
4789 *
4790 * Logically, it could be thought of as getting the value from
4791 * a *map* with *inode* as the **key**. From this
4792 * perspective, the usage is not much different from
4793 * **bpf_map_lookup_elem**\ (*map*, **&**\ *inode*) except this
4794 * helper enforces the key must be an inode and the map must also
4795 * be a **BPF_MAP_TYPE_INODE_STORAGE**.
4796 *
4797 * Underneath, the value is stored locally at *inode* instead of
4798 * the *map*. The *map* is used as the bpf-local-storage
4799 * "type". The bpf-local-storage "type" (i.e. the *map*) is
4800 * searched against all bpf_local_storage residing at *inode*.
4801 *
4802 * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be
4803 * used such that a new bpf_local_storage will be
4804 * created if one does not exist. *value* can be used
4805 * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify
4806 * the initial value of a bpf_local_storage. If *value* is
4807 * **NULL**, the new bpf_local_storage will be zero initialized.
4808 * Return
4809 * A bpf_local_storage pointer is returned on success.
4810 *
4811 * **NULL** if not found or there was an error in adding
4812 * a new bpf_local_storage.
4813 *
4814 * int bpf_inode_storage_delete(struct bpf_map *map, void *inode)
4815 * Description
4816 * Delete a bpf_local_storage from an *inode*.
4817 * Return
4818 * 0 on success.
4819 *
4820 * **-ENOENT** if the bpf_local_storage cannot be found.
4821 *
4822 * long bpf_d_path(struct path *path, char *buf, u32 sz)
4823 * Description
4824 * Return full path for given **struct path** object, which
4825 * needs to be the kernel BTF *path* object. The path is
4826 * returned in the provided buffer *buf* of size *sz* and
4827 * is zero terminated.
4828 *
4829 * Return
4830 * On success, the strictly positive length of the string,
4831 * including the trailing NUL character. On error, a negative
4832 * value.
4833 *
4834 * long bpf_copy_from_user(void *dst, u32 size, const void *user_ptr)
4835 * Description
4836 * Read *size* bytes from user space address *user_ptr* and store
4837 * the data in *dst*. This is a wrapper of **copy_from_user**\ ().
4838 * Return
4839 * 0 on success, or a negative error in case of failure.
4840 *
4841 * long bpf_snprintf_btf(char *str, u32 str_size, struct btf_ptr *ptr, u32 btf_ptr_size, u64 flags)
4842 * Description
4843 * Use BTF to store a string representation of *ptr*->ptr in *str*,
4844 * using *ptr*->type_id. This value should specify the type
4845 * that *ptr*->ptr points to. LLVM __builtin_btf_type_id(type, 1)
4846 * can be used to look up vmlinux BTF type ids. Traversing the
4847 * data structure using BTF, the type information and values are
4848 * stored in the first *str_size* - 1 bytes of *str*. Safe copy of
4849 * the pointer data is carried out to avoid kernel crashes during
4850 * operation. Smaller types can use string space on the stack;
4851 * larger programs can use map data to store the string
4852 * representation.
4853 *
4854 * The string can be subsequently shared with userspace via
4855 * bpf_perf_event_output() or ring buffer interfaces.
4856 * bpf_trace_printk() is to be avoided as it places too small
4857 * a limit on string size to be useful.
4858 *
4859 * *flags* is a combination of
4860 *
4861 * **BTF_F_COMPACT**
4862 * no formatting around type information
4863 * **BTF_F_NONAME**
4864 * no struct/union member names/types
4865 * **BTF_F_PTR_RAW**
4866 * show raw (unobfuscated) pointer values;
4867 * equivalent to printk specifier %px.
4868 * **BTF_F_ZERO**
4869 * show zero-valued struct/union members; they
4870 * are not displayed by default
4871 *
4872 * Return
4873 * The number of bytes that were written (or would have been
4874 * written if output had to be truncated due to string size),
4875 * or a negative error in cases of failure.
4876 *
4877 * long bpf_seq_printf_btf(struct seq_file *m, struct btf_ptr *ptr, u32 ptr_size, u64 flags)
4878 * Description
4879 * Use BTF to write to seq_write a string representation of
4880 * *ptr*->ptr, using *ptr*->type_id as per bpf_snprintf_btf().
4881 * *flags* are identical to those used for bpf_snprintf_btf.
4882 * Return
4883 * 0 on success or a negative error in case of failure.
4884 *
4885 * u64 bpf_skb_cgroup_classid(struct sk_buff *skb)
4886 * Description
4887 * See **bpf_get_cgroup_classid**\ () for the main description.
4888 * This helper differs from **bpf_get_cgroup_classid**\ () in that
4889 * the cgroup v1 net_cls class is retrieved only from the *skb*'s
4890 * associated socket instead of the current process.
4891 * Return
4892 * The id is returned or 0 in case the id could not be retrieved.
4893 *
4894 * long bpf_redirect_neigh(u32 ifindex, struct bpf_redir_neigh *params, int plen, u64 flags)
4895 * Description
4896 * Redirect the packet to another net device of index *ifindex*
4897 * and fill in L2 addresses from neighboring subsystem. This helper
4898 * is somewhat similar to **bpf_redirect**\ (), except that it
4899 * populates L2 addresses as well, meaning, internally, the helper
4900 * relies on the neighbor lookup for the L2 address of the nexthop.
4901 *
4902 * The helper will perform a FIB lookup based on the skb's
4903 * networking header to get the address of the next hop, unless
4904 * this is supplied by the caller in the *params* argument. The
4905 * *plen* argument indicates the len of *params* and should be set
4906 * to 0 if *params* is NULL.
4907 *
4908 * The *flags* argument is reserved and must be 0. The helper is
4909 * currently only supported for tc BPF program types, and enabled
4910 * for IPv4 and IPv6 protocols.
4911 * Return
4912 * The helper returns **TC_ACT_REDIRECT** on success or
4913 * **TC_ACT_SHOT** on error.
4914 *
4915 * void *bpf_per_cpu_ptr(const void *percpu_ptr, u32 cpu)
4916 * Description
4917 * Take a pointer to a percpu ksym, *percpu_ptr*, and return a
4918 * pointer to the percpu kernel variable on *cpu*. A ksym is an
4919 * extern variable decorated with '__ksym'. For ksym, there is a
4920 * global var (either static or global) defined of the same name
4921 * in the kernel. The ksym is percpu if the global var is percpu.
4922 * The returned pointer points to the global percpu var on *cpu*.
4923 *
4924 * bpf_per_cpu_ptr() has the same semantic as per_cpu_ptr() in the
4925 * kernel, except that bpf_per_cpu_ptr() may return NULL. This
4926 * happens if *cpu* is larger than nr_cpu_ids. The caller of
4927 * bpf_per_cpu_ptr() must check the returned value.
4928 * Return
4929 * A pointer pointing to the kernel percpu variable on *cpu*, or
4930 * NULL, if *cpu* is invalid.
4931 *
4932 * void *bpf_this_cpu_ptr(const void *percpu_ptr)
4933 * Description
4934 * Take a pointer to a percpu ksym, *percpu_ptr*, and return a
4935 * pointer to the percpu kernel variable on this cpu. See the
4936 * description of 'ksym' in **bpf_per_cpu_ptr**\ ().
4937 *
4938 * bpf_this_cpu_ptr() has the same semantic as this_cpu_ptr() in
4939 * the kernel. Different from **bpf_per_cpu_ptr**\ (), it would
4940 * never return NULL.
4941 * Return
4942 * A pointer pointing to the kernel percpu variable on this cpu.
4943 *
4944 * long bpf_redirect_peer(u32 ifindex, u64 flags)
4945 * Description
4946 * Redirect the packet to another net device of index *ifindex*.
4947 * This helper is somewhat similar to **bpf_redirect**\ (), except
4948 * that the redirection happens to the *ifindex*' peer device and
4949 * the netns switch takes place from ingress to ingress without
4950 * going through the CPU's backlog queue.
4951 *
4952 * The *flags* argument is reserved and must be 0. The helper is
4953 * currently only supported for tc BPF program types at the
4954 * ingress hook and for veth and netkit target device types. The
4955 * peer device must reside in a different network namespace.
4956 * Return
4957 * The helper returns **TC_ACT_REDIRECT** on success or
4958 * **TC_ACT_SHOT** on error.
4959 *
4960 * void *bpf_task_storage_get(struct bpf_map *map, struct task_struct *task, void *value, u64 flags)
4961 * Description
4962 * Get a bpf_local_storage from the *task*.
4963 *
4964 * Logically, it could be thought of as getting the value from
4965 * a *map* with *task* as the **key**. From this
4966 * perspective, the usage is not much different from
4967 * **bpf_map_lookup_elem**\ (*map*, **&**\ *task*) except this
4968 * helper enforces the key must be a task_struct and the map must also
4969 * be a **BPF_MAP_TYPE_TASK_STORAGE**.
4970 *
4971 * Underneath, the value is stored locally at *task* instead of
4972 * the *map*. The *map* is used as the bpf-local-storage
4973 * "type". The bpf-local-storage "type" (i.e. the *map*) is
4974 * searched against all bpf_local_storage residing at *task*.
4975 *
4976 * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be
4977 * used such that a new bpf_local_storage will be
4978 * created if one does not exist. *value* can be used
4979 * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify
4980 * the initial value of a bpf_local_storage. If *value* is
4981 * **NULL**, the new bpf_local_storage will be zero initialized.
4982 * Return
4983 * A bpf_local_storage pointer is returned on success.
4984 *
4985 * **NULL** if not found or there was an error in adding
4986 * a new bpf_local_storage.
4987 *
4988 * long bpf_task_storage_delete(struct bpf_map *map, struct task_struct *task)
4989 * Description
4990 * Delete a bpf_local_storage from a *task*.
4991 * Return
4992 * 0 on success.
4993 *
4994 * **-ENOENT** if the bpf_local_storage cannot be found.
4995 *
4996 * struct task_struct *bpf_get_current_task_btf(void)
4997 * Description
4998 * Return a BTF pointer to the "current" task.
4999 * This pointer can also be used in helpers that accept an
5000 * *ARG_PTR_TO_BTF_ID* of type *task_struct*.
5001 * Return
5002 * Pointer to the current task.
5003 *
5004 * long bpf_bprm_opts_set(struct linux_binprm *bprm, u64 flags)
5005 * Description
5006 * Set or clear certain options on *bprm*:
5007 *
5008 * **BPF_F_BPRM_SECUREEXEC** Set the secureexec bit
5009 * which sets the **AT_SECURE** auxv for glibc. The bit
5010 * is cleared if the flag is not specified.
5011 * Return
5012 * **-EINVAL** if invalid *flags* are passed, zero otherwise.
5013 *
5014 * u64 bpf_ktime_get_coarse_ns(void)
5015 * Description
5016 * Return a coarse-grained version of the time elapsed since
5017 * system boot, in nanoseconds. Does not include time the system
5018 * was suspended.
5019 *
5020 * See: **clock_gettime**\ (**CLOCK_MONOTONIC_COARSE**)
5021 * Return
5022 * Current *ktime*.
5023 *
5024 * long bpf_ima_inode_hash(struct inode *inode, void *dst, u32 size)
5025 * Description
5026 * Returns the stored IMA hash of the *inode* (if it's available).
5027 * If the hash is larger than *size*, then only *size*
5028 * bytes will be copied to *dst*
5029 * Return
5030 * The **hash_algo** is returned on success,
5031 * **-EOPNOTSUPP** if IMA is disabled or **-EINVAL** if
5032 * invalid arguments are passed.
5033 *
5034 * struct socket *bpf_sock_from_file(struct file *file)
5035 * Description
5036 * If the given file represents a socket, returns the associated
5037 * socket.
5038 * Return
5039 * A pointer to a struct socket on success or NULL if the file is
5040 * not a socket.
5041 *
5042 * long bpf_check_mtu(void *ctx, u32 ifindex, u32 *mtu_len, s32 len_diff, u64 flags)
5043 * Description
5044 * Check packet size against exceeding MTU of net device (based
5045 * on *ifindex*). This helper will likely be used in combination
5046 * with helpers that adjust/change the packet size.
5047 *
5048 * The argument *len_diff* can be used for querying with a planned
5049 * size change. This allows to check MTU prior to changing packet
5050 * ctx. Providing a *len_diff* adjustment that is larger than the
5051 * actual packet size (resulting in negative packet size) will in
5052 * principle not exceed the MTU, which is why it is not considered
5053 * a failure. Other BPF helpers are needed for performing the
5054 * planned size change; therefore the responsibility for catching
5055 * a negative packet size belongs in those helpers.
5056 *
5057 * Specifying *ifindex* zero means the MTU check is performed
5058 * against the current net device. This is practical if this isn't
5059 * used prior to redirect.
5060 *
5061 * On input *mtu_len* must be a valid pointer, else verifier will
5062 * reject BPF program. If the value *mtu_len* is initialized to
5063 * zero then the ctx packet size is use. When value *mtu_len* is
5064 * provided as input this specify the L3 length that the MTU check
5065 * is done against. Remember XDP and TC length operate at L2, but
5066 * this value is L3 as this correlate to MTU and IP-header tot_len
5067 * values which are L3 (similar behavior as bpf_fib_lookup).
5068 *
5069 * The Linux kernel route table can configure MTUs on a more
5070 * specific per route level, which is not provided by this helper.
5071 * For route level MTU checks use the **bpf_fib_lookup**\ ()
5072 * helper.
5073 *
5074 * *ctx* is either **struct xdp_md** for XDP programs or
5075 * **struct sk_buff** for tc cls_act programs.
5076 *
5077 * The *flags* argument can be a combination of one or more of the
5078 * following values:
5079 *
5080 * **BPF_MTU_CHK_SEGS**
5081 * This flag will only works for *ctx* **struct sk_buff**.
5082 * If packet context contains extra packet segment buffers
5083 * (often knows as GSO skb), then MTU check is harder to
5084 * check at this point, because in transmit path it is
5085 * possible for the skb packet to get re-segmented
5086 * (depending on net device features). This could still be
5087 * a MTU violation, so this flag enables performing MTU
5088 * check against segments, with a different violation
5089 * return code to tell it apart. Check cannot use len_diff.
5090 *
5091 * On return *mtu_len* pointer contains the MTU value of the net
5092 * device. Remember the net device configured MTU is the L3 size,
5093 * which is returned here and XDP and TC length operate at L2.
5094 * Helper take this into account for you, but remember when using
5095 * MTU value in your BPF-code.
5096 *
5097 * Return
5098 * * 0 on success, and populate MTU value in *mtu_len* pointer.
5099 *
5100 * * < 0 if any input argument is invalid (*mtu_len* not updated)
5101 *
5102 * MTU violations return positive values, but also populate MTU
5103 * value in *mtu_len* pointer, as this can be needed for
5104 * implementing PMTU handing:
5105 *
5106 * * **BPF_MTU_CHK_RET_FRAG_NEEDED**
5107 * * **BPF_MTU_CHK_RET_SEGS_TOOBIG**
5108 *
5109 * long bpf_for_each_map_elem(struct bpf_map *map, void *callback_fn, void *callback_ctx, u64 flags)
5110 * Description
5111 * For each element in **map**, call **callback_fn** function with
5112 * **map**, **callback_ctx** and other map-specific parameters.
5113 * The **callback_fn** should be a static function and
5114 * the **callback_ctx** should be a pointer to the stack.
5115 * The **flags** is used to control certain aspects of the helper.
5116 * Currently, the **flags** must be 0.
5117 *
5118 * The following are a list of supported map types and their
5119 * respective expected callback signatures:
5120 *
5121 * BPF_MAP_TYPE_HASH, BPF_MAP_TYPE_PERCPU_HASH,
5122 * BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH,
5123 * BPF_MAP_TYPE_ARRAY, BPF_MAP_TYPE_PERCPU_ARRAY
5124 *
5125 * long (\*callback_fn)(struct bpf_map \*map, const void \*key, void \*value, void \*ctx);
5126 *
5127 * For per_cpu maps, the map_value is the value on the cpu where the
5128 * bpf_prog is running.
5129 *
5130 * If **callback_fn** return 0, the helper will continue to the next
5131 * element. If return value is 1, the helper will skip the rest of
5132 * elements and return. Other return values are not used now.
5133 *
5134 * Return
5135 * The number of traversed map elements for success, **-EINVAL** for
5136 * invalid **flags**.
5137 *
5138 * long bpf_snprintf(char *str, u32 str_size, const char *fmt, u64 *data, u32 data_len)
5139 * Description
5140 * Outputs a string into the **str** buffer of size **str_size**
5141 * based on a format string stored in a read-only map pointed by
5142 * **fmt**.
5143 *
5144 * Each format specifier in **fmt** corresponds to one u64 element
5145 * in the **data** array. For strings and pointers where pointees
5146 * are accessed, only the pointer values are stored in the *data*
5147 * array. The *data_len* is the size of *data* in bytes - must be
5148 * a multiple of 8.
5149 *
5150 * Formats **%s** and **%p{i,I}{4,6}** require to read kernel
5151 * memory. Reading kernel memory may fail due to either invalid
5152 * address or valid address but requiring a major memory fault. If
5153 * reading kernel memory fails, the string for **%s** will be an
5154 * empty string, and the ip address for **%p{i,I}{4,6}** will be 0.
5155 * Not returning error to bpf program is consistent with what
5156 * **bpf_trace_printk**\ () does for now.
5157 *
5158 * Return
5159 * The strictly positive length of the formatted string, including
5160 * the trailing zero character. If the return value is greater than
5161 * **str_size**, **str** contains a truncated string, guaranteed to
5162 * be zero-terminated except when **str_size** is 0.
5163 *
5164 * Or **-EBUSY** if the per-CPU memory copy buffer is busy.
5165 *
5166 * long bpf_sys_bpf(u32 cmd, void *attr, u32 attr_size)
5167 * Description
5168 * Execute bpf syscall with given arguments.
5169 * Return
5170 * A syscall result.
5171 *
5172 * long bpf_btf_find_by_name_kind(char *name, int name_sz, u32 kind, int flags)
5173 * Description
5174 * Find BTF type with given name and kind in vmlinux BTF or in module's BTFs.
5175 * Return
5176 * Returns btf_id and btf_obj_fd in lower and upper 32 bits.
5177 *
5178 * long bpf_sys_close(u32 fd)
5179 * Description
5180 * Execute close syscall for given FD.
5181 * Return
5182 * A syscall result.
5183 *
5184 * long bpf_timer_init(struct bpf_timer *timer, struct bpf_map *map, u64 flags)
5185 * Description
5186 * Initialize the timer.
5187 * First 4 bits of *flags* specify clockid.
5188 * Only CLOCK_MONOTONIC, CLOCK_REALTIME, CLOCK_BOOTTIME are allowed.
5189 * All other bits of *flags* are reserved.
5190 * The verifier will reject the program if *timer* is not from
5191 * the same *map*.
5192 * Return
5193 * 0 on success.
5194 * **-EBUSY** if *timer* is already initialized.
5195 * **-EINVAL** if invalid *flags* are passed.
5196 * **-EPERM** if *timer* is in a map that doesn't have any user references.
5197 * The user space should either hold a file descriptor to a map with timers
5198 * or pin such map in bpffs. When map is unpinned or file descriptor is
5199 * closed all timers in the map will be cancelled and freed.
5200 *
5201 * long bpf_timer_set_callback(struct bpf_timer *timer, void *callback_fn)
5202 * Description
5203 * Configure the timer to call *callback_fn* static function.
5204 * Return
5205 * 0 on success.
5206 * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier.
5207 * **-EPERM** if *timer* is in a map that doesn't have any user references.
5208 * The user space should either hold a file descriptor to a map with timers
5209 * or pin such map in bpffs. When map is unpinned or file descriptor is
5210 * closed all timers in the map will be cancelled and freed.
5211 *
5212 * long bpf_timer_start(struct bpf_timer *timer, u64 nsecs, u64 flags)
5213 * Description
5214 * Set timer expiration N nanoseconds from the current time. The
5215 * configured callback will be invoked in soft irq context on some cpu
5216 * and will not repeat unless another bpf_timer_start() is made.
5217 * In such case the next invocation can migrate to a different cpu.
5218 * Since struct bpf_timer is a field inside map element the map
5219 * owns the timer. The bpf_timer_set_callback() will increment refcnt
5220 * of BPF program to make sure that callback_fn code stays valid.
5221 * When user space reference to a map reaches zero all timers
5222 * in a map are cancelled and corresponding program's refcnts are
5223 * decremented. This is done to make sure that Ctrl-C of a user
5224 * process doesn't leave any timers running. If map is pinned in
5225 * bpffs the callback_fn can re-arm itself indefinitely.
5226 * bpf_map_update/delete_elem() helpers and user space sys_bpf commands
5227 * cancel and free the timer in the given map element.
5228 * The map can contain timers that invoke callback_fn-s from different
5229 * programs. The same callback_fn can serve different timers from
5230 * different maps if key/value layout matches across maps.
5231 * Every bpf_timer_set_callback() can have different callback_fn.
5232 *
5233 * *flags* can be one of:
5234 *
5235 * **BPF_F_TIMER_ABS**
5236 * Start the timer in absolute expire value instead of the
5237 * default relative one.
5238 * **BPF_F_TIMER_CPU_PIN**
5239 * Timer will be pinned to the CPU of the caller.
5240 *
5241 * Return
5242 * 0 on success.
5243 * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier
5244 * or invalid *flags* are passed.
5245 *
5246 * long bpf_timer_cancel(struct bpf_timer *timer)
5247 * Description
5248 * Cancel the timer and wait for callback_fn to finish if it was running.
5249 * Return
5250 * 0 if the timer was not active.
5251 * 1 if the timer was active.
5252 * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier.
5253 * **-EDEADLK** if callback_fn tried to call bpf_timer_cancel() on its
5254 * own timer which would have led to a deadlock otherwise.
5255 *
5256 * u64 bpf_get_func_ip(void *ctx)
5257 * Description
5258 * Get address of the traced function (for tracing and kprobe programs).
5259 *
5260 * When called for kprobe program attached as uprobe it returns
5261 * probe address for both entry and return uprobe.
5262 *
5263 * Return
5264 * Address of the traced function for kprobe.
5265 * 0 for kprobes placed within the function (not at the entry).
5266 * Address of the probe for uprobe and return uprobe.
5267 *
5268 * u64 bpf_get_attach_cookie(void *ctx)
5269 * Description
5270 * Get bpf_cookie value provided (optionally) during the program
5271 * attachment. It might be different for each individual
5272 * attachment, even if BPF program itself is the same.
5273 * Expects BPF program context *ctx* as a first argument.
5274 *
5275 * Supported for the following program types:
5276 * - kprobe/uprobe;
5277 * - tracepoint;
5278 * - perf_event.
5279 * Return
5280 * Value specified by user at BPF link creation/attachment time
5281 * or 0, if it was not specified.
5282 *
5283 * long bpf_task_pt_regs(struct task_struct *task)
5284 * Description
5285 * Get the struct pt_regs associated with **task**.
5286 * Return
5287 * A pointer to struct pt_regs.
5288 *
5289 * long bpf_get_branch_snapshot(void *entries, u32 size, u64 flags)
5290 * Description
5291 * Get branch trace from hardware engines like Intel LBR. The
5292 * hardware engine is stopped shortly after the helper is
5293 * called. Therefore, the user need to filter branch entries
5294 * based on the actual use case. To capture branch trace
5295 * before the trigger point of the BPF program, the helper
5296 * should be called at the beginning of the BPF program.
5297 *
5298 * The data is stored as struct perf_branch_entry into output
5299 * buffer *entries*. *size* is the size of *entries* in bytes.
5300 * *flags* is reserved for now and must be zero.
5301 *
5302 * Return
5303 * On success, number of bytes written to *buf*. On error, a
5304 * negative value.
5305 *
5306 * **-EINVAL** if *flags* is not zero.
5307 *
5308 * **-ENOENT** if architecture does not support branch records.
5309 *
5310 * long bpf_trace_vprintk(const char *fmt, u32 fmt_size, const void *data, u32 data_len)
5311 * Description
5312 * Behaves like **bpf_trace_printk**\ () helper, but takes an array of u64
5313 * to format and can handle more format args as a result.
5314 *
5315 * Arguments are to be used as in **bpf_seq_printf**\ () helper.
5316 * Return
5317 * The number of bytes written to the buffer, or a negative error
5318 * in case of failure.
5319 *
5320 * struct unix_sock *bpf_skc_to_unix_sock(void *sk)
5321 * Description
5322 * Dynamically cast a *sk* pointer to a *unix_sock* pointer.
5323 * Return
5324 * *sk* if casting is valid, or **NULL** otherwise.
5325 *
5326 * long bpf_kallsyms_lookup_name(const char *name, int name_sz, int flags, u64 *res)
5327 * Description
5328 * Get the address of a kernel symbol, returned in *res*. *res* is
5329 * set to 0 if the symbol is not found.
5330 * Return
5331 * On success, zero. On error, a negative value.
5332 *
5333 * **-EINVAL** if *flags* is not zero.
5334 *
5335 * **-EINVAL** if string *name* is not the same size as *name_sz*.
5336 *
5337 * **-ENOENT** if symbol is not found.
5338 *
5339 * **-EPERM** if caller does not have permission to obtain kernel address.
5340 *
5341 * long bpf_find_vma(struct task_struct *task, u64 addr, void *callback_fn, void *callback_ctx, u64 flags)
5342 * Description
5343 * Find vma of *task* that contains *addr*, call *callback_fn*
5344 * function with *task*, *vma*, and *callback_ctx*.
5345 * The *callback_fn* should be a static function and
5346 * the *callback_ctx* should be a pointer to the stack.
5347 * The *flags* is used to control certain aspects of the helper.
5348 * Currently, the *flags* must be 0.
5349 *
5350 * The expected callback signature is
5351 *
5352 * long (\*callback_fn)(struct task_struct \*task, struct vm_area_struct \*vma, void \*callback_ctx);
5353 *
5354 * Return
5355 * 0 on success.
5356 * **-ENOENT** if *task->mm* is NULL, or no vma contains *addr*.
5357 * **-EBUSY** if failed to try lock mmap_lock.
5358 * **-EINVAL** for invalid **flags**.
5359 *
5360 * long bpf_loop(u32 nr_loops, void *callback_fn, void *callback_ctx, u64 flags)
5361 * Description
5362 * For **nr_loops**, call **callback_fn** function
5363 * with **callback_ctx** as the context parameter.
5364 * The **callback_fn** should be a static function and
5365 * the **callback_ctx** should be a pointer to the stack.
5366 * The **flags** is used to control certain aspects of the helper.
5367 * Currently, the **flags** must be 0. Currently, nr_loops is
5368 * limited to 1 << 23 (~8 million) loops.
5369 *
5370 * long (\*callback_fn)(u32 index, void \*ctx);
5371 *
5372 * where **index** is the current index in the loop. The index
5373 * is zero-indexed.
5374 *
5375 * If **callback_fn** returns 0, the helper will continue to the next
5376 * loop. If return value is 1, the helper will skip the rest of
5377 * the loops and return. Other return values are not used now,
5378 * and will be rejected by the verifier.
5379 *
5380 * Return
5381 * The number of loops performed, **-EINVAL** for invalid **flags**,
5382 * **-E2BIG** if **nr_loops** exceeds the maximum number of loops.
5383 *
5384 * long bpf_strncmp(const char *s1, u32 s1_sz, const char *s2)
5385 * Description
5386 * Do strncmp() between **s1** and **s2**. **s1** doesn't need
5387 * to be null-terminated and **s1_sz** is the maximum storage
5388 * size of **s1**. **s2** must be a read-only string.
5389 * Return
5390 * An integer less than, equal to, or greater than zero
5391 * if the first **s1_sz** bytes of **s1** is found to be
5392 * less than, to match, or be greater than **s2**.
5393 *
5394 * long bpf_get_func_arg(void *ctx, u32 n, u64 *value)
5395 * Description
5396 * Get **n**-th argument register (zero based) of the traced function (for tracing programs)
5397 * returned in **value**.
5398 *
5399 * Return
5400 * 0 on success.
5401 * **-EINVAL** if n >= argument register count of traced function.
5402 *
5403 * long bpf_get_func_ret(void *ctx, u64 *value)
5404 * Description
5405 * Get return value of the traced function (for tracing programs)
5406 * in **value**.
5407 *
5408 * Return
5409 * 0 on success.
5410 * **-EOPNOTSUPP** for tracing programs other than BPF_TRACE_FEXIT or BPF_MODIFY_RETURN.
5411 *
5412 * long bpf_get_func_arg_cnt(void *ctx)
5413 * Description
5414 * Get number of registers of the traced function (for tracing programs) where
5415 * function arguments are stored in these registers.
5416 *
5417 * Return
5418 * The number of argument registers of the traced function.
5419 *
5420 * int bpf_get_retval(void)
5421 * Description
5422 * Get the BPF program's return value that will be returned to the upper layers.
5423 *
5424 * This helper is currently supported by cgroup programs and only by the hooks
5425 * where BPF program's return value is returned to the userspace via errno.
5426 * Return
5427 * The BPF program's return value.
5428 *
5429 * int bpf_set_retval(int retval)
5430 * Description
5431 * Set the BPF program's return value that will be returned to the upper layers.
5432 *
5433 * This helper is currently supported by cgroup programs and only by the hooks
5434 * where BPF program's return value is returned to the userspace via errno.
5435 *
5436 * Note that there is the following corner case where the program exports an error
5437 * via bpf_set_retval but signals success via 'return 1':
5438 *
5439 * bpf_set_retval(-EPERM);
5440 * return 1;
5441 *
5442 * In this case, the BPF program's return value will use helper's -EPERM. This
5443 * still holds true for cgroup/bind{4,6} which supports extra 'return 3' success case.
5444 *
5445 * Return
5446 * 0 on success, or a negative error in case of failure.
5447 *
5448 * u64 bpf_xdp_get_buff_len(struct xdp_buff *xdp_md)
5449 * Description
5450 * Get the total size of a given xdp buff (linear and paged area)
5451 * Return
5452 * The total size of a given xdp buffer.
5453 *
5454 * long bpf_xdp_load_bytes(struct xdp_buff *xdp_md, u32 offset, void *buf, u32 len)
5455 * Description
5456 * This helper is provided as an easy way to load data from a
5457 * xdp buffer. It can be used to load *len* bytes from *offset* from
5458 * the frame associated to *xdp_md*, into the buffer pointed by
5459 * *buf*.
5460 * Return
5461 * 0 on success, or a negative error in case of failure.
5462 *
5463 * long bpf_xdp_store_bytes(struct xdp_buff *xdp_md, u32 offset, void *buf, u32 len)
5464 * Description
5465 * Store *len* bytes from buffer *buf* into the frame
5466 * associated to *xdp_md*, at *offset*.
5467 * Return
5468 * 0 on success, or a negative error in case of failure.
5469 *
5470 * long bpf_copy_from_user_task(void *dst, u32 size, const void *user_ptr, struct task_struct *tsk, u64 flags)
5471 * Description
5472 * Read *size* bytes from user space address *user_ptr* in *tsk*'s
5473 * address space, and stores the data in *dst*. *flags* is not
5474 * used yet and is provided for future extensibility. This helper
5475 * can only be used by sleepable programs.
5476 * Return
5477 * 0 on success, or a negative error in case of failure. On error
5478 * *dst* buffer is zeroed out.
5479 *
5480 * long bpf_skb_set_tstamp(struct sk_buff *skb, u64 tstamp, u32 tstamp_type)
5481 * Description
5482 * Change the __sk_buff->tstamp_type to *tstamp_type*
5483 * and set *tstamp* to the __sk_buff->tstamp together.
5484 *
5485 * If there is no need to change the __sk_buff->tstamp_type,
5486 * the tstamp value can be directly written to __sk_buff->tstamp
5487 * instead.
5488 *
5489 * BPF_SKB_TSTAMP_DELIVERY_MONO is the only tstamp that
5490 * will be kept during bpf_redirect_*(). A non zero
5491 * *tstamp* must be used with the BPF_SKB_TSTAMP_DELIVERY_MONO
5492 * *tstamp_type*.
5493 *
5494 * A BPF_SKB_TSTAMP_UNSPEC *tstamp_type* can only be used
5495 * with a zero *tstamp*.
5496 *
5497 * Only IPv4 and IPv6 skb->protocol are supported.
5498 *
5499 * This function is most useful when it needs to set a
5500 * mono delivery time to __sk_buff->tstamp and then
5501 * bpf_redirect_*() to the egress of an iface. For example,
5502 * changing the (rcv) timestamp in __sk_buff->tstamp at
5503 * ingress to a mono delivery time and then bpf_redirect_*()
5504 * to sch_fq@phy-dev.
5505 * Return
5506 * 0 on success.
5507 * **-EINVAL** for invalid input
5508 * **-EOPNOTSUPP** for unsupported protocol
5509 *
5510 * long bpf_ima_file_hash(struct file *file, void *dst, u32 size)
5511 * Description
5512 * Returns a calculated IMA hash of the *file*.
5513 * If the hash is larger than *size*, then only *size*
5514 * bytes will be copied to *dst*
5515 * Return
5516 * The **hash_algo** is returned on success,
5517 * **-EOPNOTSUPP** if the hash calculation failed or **-EINVAL** if
5518 * invalid arguments are passed.
5519 *
5520 * void *bpf_kptr_xchg(void *map_value, void *ptr)
5521 * Description
5522 * Exchange kptr at pointer *map_value* with *ptr*, and return the
5523 * old value. *ptr* can be NULL, otherwise it must be a referenced
5524 * pointer which will be released when this helper is called.
5525 * Return
5526 * The old value of kptr (which can be NULL). The returned pointer
5527 * if not NULL, is a reference which must be released using its
5528 * corresponding release function, or moved into a BPF map before
5529 * program exit.
5530 *
5531 * void *bpf_map_lookup_percpu_elem(struct bpf_map *map, const void *key, u32 cpu)
5532 * Description
5533 * Perform a lookup in *percpu map* for an entry associated to
5534 * *key* on *cpu*.
5535 * Return
5536 * Map value associated to *key* on *cpu*, or **NULL** if no entry
5537 * was found or *cpu* is invalid.
5538 *
5539 * struct mptcp_sock *bpf_skc_to_mptcp_sock(void *sk)
5540 * Description
5541 * Dynamically cast a *sk* pointer to a *mptcp_sock* pointer.
5542 * Return
5543 * *sk* if casting is valid, or **NULL** otherwise.
5544 *
5545 * long bpf_dynptr_from_mem(void *data, u32 size, u64 flags, struct bpf_dynptr *ptr)
5546 * Description
5547 * Get a dynptr to local memory *data*.
5548 *
5549 * *data* must be a ptr to a map value.
5550 * The maximum *size* supported is DYNPTR_MAX_SIZE.
5551 * *flags* is currently unused.
5552 * Return
5553 * 0 on success, -E2BIG if the size exceeds DYNPTR_MAX_SIZE,
5554 * -EINVAL if flags is not 0.
5555 *
5556 * long bpf_ringbuf_reserve_dynptr(void *ringbuf, u32 size, u64 flags, struct bpf_dynptr *ptr)
5557 * Description
5558 * Reserve *size* bytes of payload in a ring buffer *ringbuf*
5559 * through the dynptr interface. *flags* must be 0.
5560 *
5561 * Please note that a corresponding bpf_ringbuf_submit_dynptr or
5562 * bpf_ringbuf_discard_dynptr must be called on *ptr*, even if the
5563 * reservation fails. This is enforced by the verifier.
5564 * Return
5565 * 0 on success, or a negative error in case of failure.
5566 *
5567 * void bpf_ringbuf_submit_dynptr(struct bpf_dynptr *ptr, u64 flags)
5568 * Description
5569 * Submit reserved ring buffer sample, pointed to by *data*,
5570 * through the dynptr interface. This is a no-op if the dynptr is
5571 * invalid/null.
5572 *
5573 * For more information on *flags*, please see
5574 * 'bpf_ringbuf_submit'.
5575 * Return
5576 * Nothing. Always succeeds.
5577 *
5578 * void bpf_ringbuf_discard_dynptr(struct bpf_dynptr *ptr, u64 flags)
5579 * Description
5580 * Discard reserved ring buffer sample through the dynptr
5581 * interface. This is a no-op if the dynptr is invalid/null.
5582 *
5583 * For more information on *flags*, please see
5584 * 'bpf_ringbuf_discard'.
5585 * Return
5586 * Nothing. Always succeeds.
5587 *
5588 * long bpf_dynptr_read(void *dst, u32 len, const struct bpf_dynptr *src, u32 offset, u64 flags)
5589 * Description
5590 * Read *len* bytes from *src* into *dst*, starting from *offset*
5591 * into *src*.
5592 * *flags* is currently unused.
5593 * Return
5594 * 0 on success, -E2BIG if *offset* + *len* exceeds the length
5595 * of *src*'s data, -EINVAL if *src* is an invalid dynptr or if
5596 * *flags* is not 0.
5597 *
5598 * long bpf_dynptr_write(const struct bpf_dynptr *dst, u32 offset, void *src, u32 len, u64 flags)
5599 * Description
5600 * Write *len* bytes from *src* into *dst*, starting from *offset*
5601 * into *dst*.
5602 *
5603 * *flags* must be 0 except for skb-type dynptrs.
5604 *
5605 * For skb-type dynptrs:
5606 * * All data slices of the dynptr are automatically
5607 * invalidated after **bpf_dynptr_write**\ (). This is
5608 * because writing may pull the skb and change the
5609 * underlying packet buffer.
5610 *
5611 * * For *flags*, please see the flags accepted by
5612 * **bpf_skb_store_bytes**\ ().
5613 * Return
5614 * 0 on success, -E2BIG if *offset* + *len* exceeds the length
5615 * of *dst*'s data, -EINVAL if *dst* is an invalid dynptr or if *dst*
5616 * is a read-only dynptr or if *flags* is not correct. For skb-type dynptrs,
5617 * other errors correspond to errors returned by **bpf_skb_store_bytes**\ ().
5618 *
5619 * void *bpf_dynptr_data(const struct bpf_dynptr *ptr, u32 offset, u32 len)
5620 * Description
5621 * Get a pointer to the underlying dynptr data.
5622 *
5623 * *len* must be a statically known value. The returned data slice
5624 * is invalidated whenever the dynptr is invalidated.
5625 *
5626 * skb and xdp type dynptrs may not use bpf_dynptr_data. They should
5627 * instead use bpf_dynptr_slice and bpf_dynptr_slice_rdwr.
5628 * Return
5629 * Pointer to the underlying dynptr data, NULL if the dynptr is
5630 * read-only, if the dynptr is invalid, or if the offset and length
5631 * is out of bounds.
5632 *
5633 * s64 bpf_tcp_raw_gen_syncookie_ipv4(struct iphdr *iph, struct tcphdr *th, u32 th_len)
5634 * Description
5635 * Try to issue a SYN cookie for the packet with corresponding
5636 * IPv4/TCP headers, *iph* and *th*, without depending on a
5637 * listening socket.
5638 *
5639 * *iph* points to the IPv4 header.
5640 *
5641 * *th* points to the start of the TCP header, while *th_len*
5642 * contains the length of the TCP header (at least
5643 * **sizeof**\ (**struct tcphdr**)).
5644 * Return
5645 * On success, lower 32 bits hold the generated SYN cookie in
5646 * followed by 16 bits which hold the MSS value for that cookie,
5647 * and the top 16 bits are unused.
5648 *
5649 * On failure, the returned value is one of the following:
5650 *
5651 * **-EINVAL** if *th_len* is invalid.
5652 *
5653 * s64 bpf_tcp_raw_gen_syncookie_ipv6(struct ipv6hdr *iph, struct tcphdr *th, u32 th_len)
5654 * Description
5655 * Try to issue a SYN cookie for the packet with corresponding
5656 * IPv6/TCP headers, *iph* and *th*, without depending on a
5657 * listening socket.
5658 *
5659 * *iph* points to the IPv6 header.
5660 *
5661 * *th* points to the start of the TCP header, while *th_len*
5662 * contains the length of the TCP header (at least
5663 * **sizeof**\ (**struct tcphdr**)).
5664 * Return
5665 * On success, lower 32 bits hold the generated SYN cookie in
5666 * followed by 16 bits which hold the MSS value for that cookie,
5667 * and the top 16 bits are unused.
5668 *
5669 * On failure, the returned value is one of the following:
5670 *
5671 * **-EINVAL** if *th_len* is invalid.
5672 *
5673 * **-EPROTONOSUPPORT** if CONFIG_IPV6 is not builtin.
5674 *
5675 * long bpf_tcp_raw_check_syncookie_ipv4(struct iphdr *iph, struct tcphdr *th)
5676 * Description
5677 * Check whether *iph* and *th* contain a valid SYN cookie ACK
5678 * without depending on a listening socket.
5679 *
5680 * *iph* points to the IPv4 header.
5681 *
5682 * *th* points to the TCP header.
5683 * Return
5684 * 0 if *iph* and *th* are a valid SYN cookie ACK.
5685 *
5686 * On failure, the returned value is one of the following:
5687 *
5688 * **-EACCES** if the SYN cookie is not valid.
5689 *
5690 * long bpf_tcp_raw_check_syncookie_ipv6(struct ipv6hdr *iph, struct tcphdr *th)
5691 * Description
5692 * Check whether *iph* and *th* contain a valid SYN cookie ACK
5693 * without depending on a listening socket.
5694 *
5695 * *iph* points to the IPv6 header.
5696 *
5697 * *th* points to the TCP header.
5698 * Return
5699 * 0 if *iph* and *th* are a valid SYN cookie ACK.
5700 *
5701 * On failure, the returned value is one of the following:
5702 *
5703 * **-EACCES** if the SYN cookie is not valid.
5704 *
5705 * **-EPROTONOSUPPORT** if CONFIG_IPV6 is not builtin.
5706 *
5707 * u64 bpf_ktime_get_tai_ns(void)
5708 * Description
5709 * A nonsettable system-wide clock derived from wall-clock time but
5710 * ignoring leap seconds. This clock does not experience
5711 * discontinuities and backwards jumps caused by NTP inserting leap
5712 * seconds as CLOCK_REALTIME does.
5713 *
5714 * See: **clock_gettime**\ (**CLOCK_TAI**)
5715 * Return
5716 * Current *ktime*.
5717 *
5718 * long bpf_user_ringbuf_drain(struct bpf_map *map, void *callback_fn, void *ctx, u64 flags)
5719 * Description
5720 * Drain samples from the specified user ring buffer, and invoke
5721 * the provided callback for each such sample:
5722 *
5723 * long (\*callback_fn)(const struct bpf_dynptr \*dynptr, void \*ctx);
5724 *
5725 * If **callback_fn** returns 0, the helper will continue to try
5726 * and drain the next sample, up to a maximum of
5727 * BPF_MAX_USER_RINGBUF_SAMPLES samples. If the return value is 1,
5728 * the helper will skip the rest of the samples and return. Other
5729 * return values are not used now, and will be rejected by the
5730 * verifier.
5731 * Return
5732 * The number of drained samples if no error was encountered while
5733 * draining samples, or 0 if no samples were present in the ring
5734 * buffer. If a user-space producer was epoll-waiting on this map,
5735 * and at least one sample was drained, they will receive an event
5736 * notification notifying them of available space in the ring
5737 * buffer. If the BPF_RB_NO_WAKEUP flag is passed to this
5738 * function, no wakeup notification will be sent. If the
5739 * BPF_RB_FORCE_WAKEUP flag is passed, a wakeup notification will
5740 * be sent even if no sample was drained.
5741 *
5742 * On failure, the returned value is one of the following:
5743 *
5744 * **-EBUSY** if the ring buffer is contended, and another calling
5745 * context was concurrently draining the ring buffer.
5746 *
5747 * **-EINVAL** if user-space is not properly tracking the ring
5748 * buffer due to the producer position not being aligned to 8
5749 * bytes, a sample not being aligned to 8 bytes, or the producer
5750 * position not matching the advertised length of a sample.
5751 *
5752 * **-E2BIG** if user-space has tried to publish a sample which is
5753 * larger than the size of the ring buffer, or which cannot fit
5754 * within a struct bpf_dynptr.
5755 *
5756 * void *bpf_cgrp_storage_get(struct bpf_map *map, struct cgroup *cgroup, void *value, u64 flags)
5757 * Description
5758 * Get a bpf_local_storage from the *cgroup*.
5759 *
5760 * Logically, it could be thought of as getting the value from
5761 * a *map* with *cgroup* as the **key**. From this
5762 * perspective, the usage is not much different from
5763 * **bpf_map_lookup_elem**\ (*map*, **&**\ *cgroup*) except this
5764 * helper enforces the key must be a cgroup struct and the map must also
5765 * be a **BPF_MAP_TYPE_CGRP_STORAGE**.
5766 *
5767 * In reality, the local-storage value is embedded directly inside of the
5768 * *cgroup* object itself, rather than being located in the
5769 * **BPF_MAP_TYPE_CGRP_STORAGE** map. When the local-storage value is
5770 * queried for some *map* on a *cgroup* object, the kernel will perform an
5771 * O(n) iteration over all of the live local-storage values for that
5772 * *cgroup* object until the local-storage value for the *map* is found.
5773 *
5774 * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be
5775 * used such that a new bpf_local_storage will be
5776 * created if one does not exist. *value* can be used
5777 * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify
5778 * the initial value of a bpf_local_storage. If *value* is
5779 * **NULL**, the new bpf_local_storage will be zero initialized.
5780 * Return
5781 * A bpf_local_storage pointer is returned on success.
5782 *
5783 * **NULL** if not found or there was an error in adding
5784 * a new bpf_local_storage.
5785 *
5786 * long bpf_cgrp_storage_delete(struct bpf_map *map, struct cgroup *cgroup)
5787 * Description
5788 * Delete a bpf_local_storage from a *cgroup*.
5789 * Return
5790 * 0 on success.
5791 *
5792 * **-ENOENT** if the bpf_local_storage cannot be found.
5793 */
5794#define ___BPF_FUNC_MAPPER(FN, ctx...) \
5795 FN(unspec, 0, ##ctx) \
5796 FN(map_lookup_elem, 1, ##ctx) \
5797 FN(map_update_elem, 2, ##ctx) \
5798 FN(map_delete_elem, 3, ##ctx) \
5799 FN(probe_read, 4, ##ctx) \
5800 FN(ktime_get_ns, 5, ##ctx) \
5801 FN(trace_printk, 6, ##ctx) \
5802 FN(get_prandom_u32, 7, ##ctx) \
5803 FN(get_smp_processor_id, 8, ##ctx) \
5804 FN(skb_store_bytes, 9, ##ctx) \
5805 FN(l3_csum_replace, 10, ##ctx) \
5806 FN(l4_csum_replace, 11, ##ctx) \
5807 FN(tail_call, 12, ##ctx) \
5808 FN(clone_redirect, 13, ##ctx) \
5809 FN(get_current_pid_tgid, 14, ##ctx) \
5810 FN(get_current_uid_gid, 15, ##ctx) \
5811 FN(get_current_comm, 16, ##ctx) \
5812 FN(get_cgroup_classid, 17, ##ctx) \
5813 FN(skb_vlan_push, 18, ##ctx) \
5814 FN(skb_vlan_pop, 19, ##ctx) \
5815 FN(skb_get_tunnel_key, 20, ##ctx) \
5816 FN(skb_set_tunnel_key, 21, ##ctx) \
5817 FN(perf_event_read, 22, ##ctx) \
5818 FN(redirect, 23, ##ctx) \
5819 FN(get_route_realm, 24, ##ctx) \
5820 FN(perf_event_output, 25, ##ctx) \
5821 FN(skb_load_bytes, 26, ##ctx) \
5822 FN(get_stackid, 27, ##ctx) \
5823 FN(csum_diff, 28, ##ctx) \
5824 FN(skb_get_tunnel_opt, 29, ##ctx) \
5825 FN(skb_set_tunnel_opt, 30, ##ctx) \
5826 FN(skb_change_proto, 31, ##ctx) \
5827 FN(skb_change_type, 32, ##ctx) \
5828 FN(skb_under_cgroup, 33, ##ctx) \
5829 FN(get_hash_recalc, 34, ##ctx) \
5830 FN(get_current_task, 35, ##ctx) \
5831 FN(probe_write_user, 36, ##ctx) \
5832 FN(current_task_under_cgroup, 37, ##ctx) \
5833 FN(skb_change_tail, 38, ##ctx) \
5834 FN(skb_pull_data, 39, ##ctx) \
5835 FN(csum_update, 40, ##ctx) \
5836 FN(set_hash_invalid, 41, ##ctx) \
5837 FN(get_numa_node_id, 42, ##ctx) \
5838 FN(skb_change_head, 43, ##ctx) \
5839 FN(xdp_adjust_head, 44, ##ctx) \
5840 FN(probe_read_str, 45, ##ctx) \
5841 FN(get_socket_cookie, 46, ##ctx) \
5842 FN(get_socket_uid, 47, ##ctx) \
5843 FN(set_hash, 48, ##ctx) \
5844 FN(setsockopt, 49, ##ctx) \
5845 FN(skb_adjust_room, 50, ##ctx) \
5846 FN(redirect_map, 51, ##ctx) \
5847 FN(sk_redirect_map, 52, ##ctx) \
5848 FN(sock_map_update, 53, ##ctx) \
5849 FN(xdp_adjust_meta, 54, ##ctx) \
5850 FN(perf_event_read_value, 55, ##ctx) \
5851 FN(perf_prog_read_value, 56, ##ctx) \
5852 FN(getsockopt, 57, ##ctx) \
5853 FN(override_return, 58, ##ctx) \
5854 FN(sock_ops_cb_flags_set, 59, ##ctx) \
5855 FN(msg_redirect_map, 60, ##ctx) \
5856 FN(msg_apply_bytes, 61, ##ctx) \
5857 FN(msg_cork_bytes, 62, ##ctx) \
5858 FN(msg_pull_data, 63, ##ctx) \
5859 FN(bind, 64, ##ctx) \
5860 FN(xdp_adjust_tail, 65, ##ctx) \
5861 FN(skb_get_xfrm_state, 66, ##ctx) \
5862 FN(get_stack, 67, ##ctx) \
5863 FN(skb_load_bytes_relative, 68, ##ctx) \
5864 FN(fib_lookup, 69, ##ctx) \
5865 FN(sock_hash_update, 70, ##ctx) \
5866 FN(msg_redirect_hash, 71, ##ctx) \
5867 FN(sk_redirect_hash, 72, ##ctx) \
5868 FN(lwt_push_encap, 73, ##ctx) \
5869 FN(lwt_seg6_store_bytes, 74, ##ctx) \
5870 FN(lwt_seg6_adjust_srh, 75, ##ctx) \
5871 FN(lwt_seg6_action, 76, ##ctx) \
5872 FN(rc_repeat, 77, ##ctx) \
5873 FN(rc_keydown, 78, ##ctx) \
5874 FN(skb_cgroup_id, 79, ##ctx) \
5875 FN(get_current_cgroup_id, 80, ##ctx) \
5876 FN(get_local_storage, 81, ##ctx) \
5877 FN(sk_select_reuseport, 82, ##ctx) \
5878 FN(skb_ancestor_cgroup_id, 83, ##ctx) \
5879 FN(sk_lookup_tcp, 84, ##ctx) \
5880 FN(sk_lookup_udp, 85, ##ctx) \
5881 FN(sk_release, 86, ##ctx) \
5882 FN(map_push_elem, 87, ##ctx) \
5883 FN(map_pop_elem, 88, ##ctx) \
5884 FN(map_peek_elem, 89, ##ctx) \
5885 FN(msg_push_data, 90, ##ctx) \
5886 FN(msg_pop_data, 91, ##ctx) \
5887 FN(rc_pointer_rel, 92, ##ctx) \
5888 FN(spin_lock, 93, ##ctx) \
5889 FN(spin_unlock, 94, ##ctx) \
5890 FN(sk_fullsock, 95, ##ctx) \
5891 FN(tcp_sock, 96, ##ctx) \
5892 FN(skb_ecn_set_ce, 97, ##ctx) \
5893 FN(get_listener_sock, 98, ##ctx) \
5894 FN(skc_lookup_tcp, 99, ##ctx) \
5895 FN(tcp_check_syncookie, 100, ##ctx) \
5896 FN(sysctl_get_name, 101, ##ctx) \
5897 FN(sysctl_get_current_value, 102, ##ctx) \
5898 FN(sysctl_get_new_value, 103, ##ctx) \
5899 FN(sysctl_set_new_value, 104, ##ctx) \
5900 FN(strtol, 105, ##ctx) \
5901 FN(strtoul, 106, ##ctx) \
5902 FN(sk_storage_get, 107, ##ctx) \
5903 FN(sk_storage_delete, 108, ##ctx) \
5904 FN(send_signal, 109, ##ctx) \
5905 FN(tcp_gen_syncookie, 110, ##ctx) \
5906 FN(skb_output, 111, ##ctx) \
5907 FN(probe_read_user, 112, ##ctx) \
5908 FN(probe_read_kernel, 113, ##ctx) \
5909 FN(probe_read_user_str, 114, ##ctx) \
5910 FN(probe_read_kernel_str, 115, ##ctx) \
5911 FN(tcp_send_ack, 116, ##ctx) \
5912 FN(send_signal_thread, 117, ##ctx) \
5913 FN(jiffies64, 118, ##ctx) \
5914 FN(read_branch_records, 119, ##ctx) \
5915 FN(get_ns_current_pid_tgid, 120, ##ctx) \
5916 FN(xdp_output, 121, ##ctx) \
5917 FN(get_netns_cookie, 122, ##ctx) \
5918 FN(get_current_ancestor_cgroup_id, 123, ##ctx) \
5919 FN(sk_assign, 124, ##ctx) \
5920 FN(ktime_get_boot_ns, 125, ##ctx) \
5921 FN(seq_printf, 126, ##ctx) \
5922 FN(seq_write, 127, ##ctx) \
5923 FN(sk_cgroup_id, 128, ##ctx) \
5924 FN(sk_ancestor_cgroup_id, 129, ##ctx) \
5925 FN(ringbuf_output, 130, ##ctx) \
5926 FN(ringbuf_reserve, 131, ##ctx) \
5927 FN(ringbuf_submit, 132, ##ctx) \
5928 FN(ringbuf_discard, 133, ##ctx) \
5929 FN(ringbuf_query, 134, ##ctx) \
5930 FN(csum_level, 135, ##ctx) \
5931 FN(skc_to_tcp6_sock, 136, ##ctx) \
5932 FN(skc_to_tcp_sock, 137, ##ctx) \
5933 FN(skc_to_tcp_timewait_sock, 138, ##ctx) \
5934 FN(skc_to_tcp_request_sock, 139, ##ctx) \
5935 FN(skc_to_udp6_sock, 140, ##ctx) \
5936 FN(get_task_stack, 141, ##ctx) \
5937 FN(load_hdr_opt, 142, ##ctx) \
5938 FN(store_hdr_opt, 143, ##ctx) \
5939 FN(reserve_hdr_opt, 144, ##ctx) \
5940 FN(inode_storage_get, 145, ##ctx) \
5941 FN(inode_storage_delete, 146, ##ctx) \
5942 FN(d_path, 147, ##ctx) \
5943 FN(copy_from_user, 148, ##ctx) \
5944 FN(snprintf_btf, 149, ##ctx) \
5945 FN(seq_printf_btf, 150, ##ctx) \
5946 FN(skb_cgroup_classid, 151, ##ctx) \
5947 FN(redirect_neigh, 152, ##ctx) \
5948 FN(per_cpu_ptr, 153, ##ctx) \
5949 FN(this_cpu_ptr, 154, ##ctx) \
5950 FN(redirect_peer, 155, ##ctx) \
5951 FN(task_storage_get, 156, ##ctx) \
5952 FN(task_storage_delete, 157, ##ctx) \
5953 FN(get_current_task_btf, 158, ##ctx) \
5954 FN(bprm_opts_set, 159, ##ctx) \
5955 FN(ktime_get_coarse_ns, 160, ##ctx) \
5956 FN(ima_inode_hash, 161, ##ctx) \
5957 FN(sock_from_file, 162, ##ctx) \
5958 FN(check_mtu, 163, ##ctx) \
5959 FN(for_each_map_elem, 164, ##ctx) \
5960 FN(snprintf, 165, ##ctx) \
5961 FN(sys_bpf, 166, ##ctx) \
5962 FN(btf_find_by_name_kind, 167, ##ctx) \
5963 FN(sys_close, 168, ##ctx) \
5964 FN(timer_init, 169, ##ctx) \
5965 FN(timer_set_callback, 170, ##ctx) \
5966 FN(timer_start, 171, ##ctx) \
5967 FN(timer_cancel, 172, ##ctx) \
5968 FN(get_func_ip, 173, ##ctx) \
5969 FN(get_attach_cookie, 174, ##ctx) \
5970 FN(task_pt_regs, 175, ##ctx) \
5971 FN(get_branch_snapshot, 176, ##ctx) \
5972 FN(trace_vprintk, 177, ##ctx) \
5973 FN(skc_to_unix_sock, 178, ##ctx) \
5974 FN(kallsyms_lookup_name, 179, ##ctx) \
5975 FN(find_vma, 180, ##ctx) \
5976 FN(loop, 181, ##ctx) \
5977 FN(strncmp, 182, ##ctx) \
5978 FN(get_func_arg, 183, ##ctx) \
5979 FN(get_func_ret, 184, ##ctx) \
5980 FN(get_func_arg_cnt, 185, ##ctx) \
5981 FN(get_retval, 186, ##ctx) \
5982 FN(set_retval, 187, ##ctx) \
5983 FN(xdp_get_buff_len, 188, ##ctx) \
5984 FN(xdp_load_bytes, 189, ##ctx) \
5985 FN(xdp_store_bytes, 190, ##ctx) \
5986 FN(copy_from_user_task, 191, ##ctx) \
5987 FN(skb_set_tstamp, 192, ##ctx) \
5988 FN(ima_file_hash, 193, ##ctx) \
5989 FN(kptr_xchg, 194, ##ctx) \
5990 FN(map_lookup_percpu_elem, 195, ##ctx) \
5991 FN(skc_to_mptcp_sock, 196, ##ctx) \
5992 FN(dynptr_from_mem, 197, ##ctx) \
5993 FN(ringbuf_reserve_dynptr, 198, ##ctx) \
5994 FN(ringbuf_submit_dynptr, 199, ##ctx) \
5995 FN(ringbuf_discard_dynptr, 200, ##ctx) \
5996 FN(dynptr_read, 201, ##ctx) \
5997 FN(dynptr_write, 202, ##ctx) \
5998 FN(dynptr_data, 203, ##ctx) \
5999 FN(tcp_raw_gen_syncookie_ipv4, 204, ##ctx) \
6000 FN(tcp_raw_gen_syncookie_ipv6, 205, ##ctx) \
6001 FN(tcp_raw_check_syncookie_ipv4, 206, ##ctx) \
6002 FN(tcp_raw_check_syncookie_ipv6, 207, ##ctx) \
6003 FN(ktime_get_tai_ns, 208, ##ctx) \
6004 FN(user_ringbuf_drain, 209, ##ctx) \
6005 FN(cgrp_storage_get, 210, ##ctx) \
6006 FN(cgrp_storage_delete, 211, ##ctx) \
6007 /* */
6008
6009/* backwards-compatibility macros for users of __BPF_FUNC_MAPPER that don't
6010 * know or care about integer value that is now passed as second argument
6011 */
6012#define __BPF_FUNC_MAPPER_APPLY(name, value, FN) FN(name),
6013#define __BPF_FUNC_MAPPER(FN) ___BPF_FUNC_MAPPER(__BPF_FUNC_MAPPER_APPLY, FN)
6014
6015/* integer value in 'imm' field of BPF_CALL instruction selects which helper
6016 * function eBPF program intends to call
6017 */
6018#define __BPF_ENUM_FN(x, y) BPF_FUNC_ ## x = y,
6019enum bpf_func_id {
6020 ___BPF_FUNC_MAPPER(__BPF_ENUM_FN)
6021 __BPF_FUNC_MAX_ID,
6022};
6023#undef __BPF_ENUM_FN
6024
6025/* All flags used by eBPF helper functions, placed here. */
6026
6027/* BPF_FUNC_skb_store_bytes flags. */
6028enum {
6029 BPF_F_RECOMPUTE_CSUM = (1ULL << 0),
6030 BPF_F_INVALIDATE_HASH = (1ULL << 1),
6031};
6032
6033/* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags.
6034 * First 4 bits are for passing the header field size.
6035 */
6036enum {
6037 BPF_F_HDR_FIELD_MASK = 0xfULL,
6038};
6039
6040/* BPF_FUNC_l4_csum_replace flags. */
6041enum {
6042 BPF_F_PSEUDO_HDR = (1ULL << 4),
6043 BPF_F_MARK_MANGLED_0 = (1ULL << 5),
6044 BPF_F_MARK_ENFORCE = (1ULL << 6),
6045};
6046
6047/* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */
6048enum {
6049 BPF_F_INGRESS = (1ULL << 0),
6050};
6051
6052/* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */
6053enum {
6054 BPF_F_TUNINFO_IPV6 = (1ULL << 0),
6055};
6056
6057/* flags for both BPF_FUNC_get_stackid and BPF_FUNC_get_stack. */
6058enum {
6059 BPF_F_SKIP_FIELD_MASK = 0xffULL,
6060 BPF_F_USER_STACK = (1ULL << 8),
6061/* flags used by BPF_FUNC_get_stackid only. */
6062 BPF_F_FAST_STACK_CMP = (1ULL << 9),
6063 BPF_F_REUSE_STACKID = (1ULL << 10),
6064/* flags used by BPF_FUNC_get_stack only. */
6065 BPF_F_USER_BUILD_ID = (1ULL << 11),
6066};
6067
6068/* BPF_FUNC_skb_set_tunnel_key flags. */
6069enum {
6070 BPF_F_ZERO_CSUM_TX = (1ULL << 1),
6071 BPF_F_DONT_FRAGMENT = (1ULL << 2),
6072 BPF_F_SEQ_NUMBER = (1ULL << 3),
6073 BPF_F_NO_TUNNEL_KEY = (1ULL << 4),
6074};
6075
6076/* BPF_FUNC_skb_get_tunnel_key flags. */
6077enum {
6078 BPF_F_TUNINFO_FLAGS = (1ULL << 4),
6079};
6080
6081/* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and
6082 * BPF_FUNC_perf_event_read_value flags.
6083 */
6084enum {
6085 BPF_F_INDEX_MASK = 0xffffffffULL,
6086 BPF_F_CURRENT_CPU = BPF_F_INDEX_MASK,
6087/* BPF_FUNC_perf_event_output for sk_buff input context. */
6088 BPF_F_CTXLEN_MASK = (0xfffffULL << 32),
6089};
6090
6091/* Current network namespace */
6092enum {
6093 BPF_F_CURRENT_NETNS = (-1L),
6094};
6095
6096/* BPF_FUNC_csum_level level values. */
6097enum {
6098 BPF_CSUM_LEVEL_QUERY,
6099 BPF_CSUM_LEVEL_INC,
6100 BPF_CSUM_LEVEL_DEC,
6101 BPF_CSUM_LEVEL_RESET,
6102};
6103
6104/* BPF_FUNC_skb_adjust_room flags. */
6105enum {
6106 BPF_F_ADJ_ROOM_FIXED_GSO = (1ULL << 0),
6107 BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = (1ULL << 1),
6108 BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = (1ULL << 2),
6109 BPF_F_ADJ_ROOM_ENCAP_L4_GRE = (1ULL << 3),
6110 BPF_F_ADJ_ROOM_ENCAP_L4_UDP = (1ULL << 4),
6111 BPF_F_ADJ_ROOM_NO_CSUM_RESET = (1ULL << 5),
6112 BPF_F_ADJ_ROOM_ENCAP_L2_ETH = (1ULL << 6),
6113 BPF_F_ADJ_ROOM_DECAP_L3_IPV4 = (1ULL << 7),
6114 BPF_F_ADJ_ROOM_DECAP_L3_IPV6 = (1ULL << 8),
6115};
6116
6117enum {
6118 BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff,
6119 BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 56,
6120};
6121
6122#define BPF_F_ADJ_ROOM_ENCAP_L2(len) (((__u64)len & \
6123 BPF_ADJ_ROOM_ENCAP_L2_MASK) \
6124 << BPF_ADJ_ROOM_ENCAP_L2_SHIFT)
6125
6126/* BPF_FUNC_sysctl_get_name flags. */
6127enum {
6128 BPF_F_SYSCTL_BASE_NAME = (1ULL << 0),
6129};
6130
6131/* BPF_FUNC_<kernel_obj>_storage_get flags */
6132enum {
6133 BPF_LOCAL_STORAGE_GET_F_CREATE = (1ULL << 0),
6134 /* BPF_SK_STORAGE_GET_F_CREATE is only kept for backward compatibility
6135 * and BPF_LOCAL_STORAGE_GET_F_CREATE must be used instead.
6136 */
6137 BPF_SK_STORAGE_GET_F_CREATE = BPF_LOCAL_STORAGE_GET_F_CREATE,
6138};
6139
6140/* BPF_FUNC_read_branch_records flags. */
6141enum {
6142 BPF_F_GET_BRANCH_RECORDS_SIZE = (1ULL << 0),
6143};
6144
6145/* BPF_FUNC_bpf_ringbuf_commit, BPF_FUNC_bpf_ringbuf_discard, and
6146 * BPF_FUNC_bpf_ringbuf_output flags.
6147 */
6148enum {
6149 BPF_RB_NO_WAKEUP = (1ULL << 0),
6150 BPF_RB_FORCE_WAKEUP = (1ULL << 1),
6151};
6152
6153/* BPF_FUNC_bpf_ringbuf_query flags */
6154enum {
6155 BPF_RB_AVAIL_DATA = 0,
6156 BPF_RB_RING_SIZE = 1,
6157 BPF_RB_CONS_POS = 2,
6158 BPF_RB_PROD_POS = 3,
6159};
6160
6161/* BPF ring buffer constants */
6162enum {
6163 BPF_RINGBUF_BUSY_BIT = (1U << 31),
6164 BPF_RINGBUF_DISCARD_BIT = (1U << 30),
6165 BPF_RINGBUF_HDR_SZ = 8,
6166};
6167
6168/* BPF_FUNC_sk_assign flags in bpf_sk_lookup context. */
6169enum {
6170 BPF_SK_LOOKUP_F_REPLACE = (1ULL << 0),
6171 BPF_SK_LOOKUP_F_NO_REUSEPORT = (1ULL << 1),
6172};
6173
6174/* Mode for BPF_FUNC_skb_adjust_room helper. */
6175enum bpf_adj_room_mode {
6176 BPF_ADJ_ROOM_NET,
6177 BPF_ADJ_ROOM_MAC,
6178};
6179
6180/* Mode for BPF_FUNC_skb_load_bytes_relative helper. */
6181enum bpf_hdr_start_off {
6182 BPF_HDR_START_MAC,
6183 BPF_HDR_START_NET,
6184};
6185
6186/* Encapsulation type for BPF_FUNC_lwt_push_encap helper. */
6187enum bpf_lwt_encap_mode {
6188 BPF_LWT_ENCAP_SEG6,
6189 BPF_LWT_ENCAP_SEG6_INLINE,
6190 BPF_LWT_ENCAP_IP,
6191};
6192
6193/* Flags for bpf_bprm_opts_set helper */
6194enum {
6195 BPF_F_BPRM_SECUREEXEC = (1ULL << 0),
6196};
6197
6198/* Flags for bpf_redirect_map helper */
6199enum {
6200 BPF_F_BROADCAST = (1ULL << 3),
6201 BPF_F_EXCLUDE_INGRESS = (1ULL << 4),
6202};
6203
6204#define __bpf_md_ptr(type, name) \
6205union { \
6206 type name; \
6207 __u64 :64; \
6208} __attribute__((aligned(8)))
6209
6210enum {
6211 BPF_SKB_TSTAMP_UNSPEC,
6212 BPF_SKB_TSTAMP_DELIVERY_MONO, /* tstamp has mono delivery time */
6213 /* For any BPF_SKB_TSTAMP_* that the bpf prog cannot handle,
6214 * the bpf prog should handle it like BPF_SKB_TSTAMP_UNSPEC
6215 * and try to deduce it by ingress, egress or skb->sk->sk_clockid.
6216 */
6217};
6218
6219/* user accessible mirror of in-kernel sk_buff.
6220 * new fields can only be added to the end of this structure
6221 */
6222struct __sk_buff {
6223 __u32 len;
6224 __u32 pkt_type;
6225 __u32 mark;
6226 __u32 queue_mapping;
6227 __u32 protocol;
6228 __u32 vlan_present;
6229 __u32 vlan_tci;
6230 __u32 vlan_proto;
6231 __u32 priority;
6232 __u32 ingress_ifindex;
6233 __u32 ifindex;
6234 __u32 tc_index;
6235 __u32 cb[5];
6236 __u32 hash;
6237 __u32 tc_classid;
6238 __u32 data;
6239 __u32 data_end;
6240 __u32 napi_id;
6241
6242 /* Accessed by BPF_PROG_TYPE_sk_skb types from here to ... */
6243 __u32 family;
6244 __u32 remote_ip4; /* Stored in network byte order */
6245 __u32 local_ip4; /* Stored in network byte order */
6246 __u32 remote_ip6[4]; /* Stored in network byte order */
6247 __u32 local_ip6[4]; /* Stored in network byte order */
6248 __u32 remote_port; /* Stored in network byte order */
6249 __u32 local_port; /* stored in host byte order */
6250 /* ... here. */
6251
6252 __u32 data_meta;
6253 __bpf_md_ptr(struct bpf_flow_keys *, flow_keys);
6254 __u64 tstamp;
6255 __u32 wire_len;
6256 __u32 gso_segs;
6257 __bpf_md_ptr(struct bpf_sock *, sk);
6258 __u32 gso_size;
6259 __u8 tstamp_type;
6260 __u32 :24; /* Padding, future use. */
6261 __u64 hwtstamp;
6262};
6263
6264struct bpf_tunnel_key {
6265 __u32 tunnel_id;
6266 union {
6267 __u32 remote_ipv4;
6268 __u32 remote_ipv6[4];
6269 };
6270 __u8 tunnel_tos;
6271 __u8 tunnel_ttl;
6272 union {
6273 __u16 tunnel_ext; /* compat */
6274 __be16 tunnel_flags;
6275 };
6276 __u32 tunnel_label;
6277 union {
6278 __u32 local_ipv4;
6279 __u32 local_ipv6[4];
6280 };
6281};
6282
6283/* user accessible mirror of in-kernel xfrm_state.
6284 * new fields can only be added to the end of this structure
6285 */
6286struct bpf_xfrm_state {
6287 __u32 reqid;
6288 __u32 spi; /* Stored in network byte order */
6289 __u16 family;
6290 __u16 ext; /* Padding, future use. */
6291 union {
6292 __u32 remote_ipv4; /* Stored in network byte order */
6293 __u32 remote_ipv6[4]; /* Stored in network byte order */
6294 };
6295};
6296
6297/* Generic BPF return codes which all BPF program types may support.
6298 * The values are binary compatible with their TC_ACT_* counter-part to
6299 * provide backwards compatibility with existing SCHED_CLS and SCHED_ACT
6300 * programs.
6301 *
6302 * XDP is handled seprately, see XDP_*.
6303 */
6304enum bpf_ret_code {
6305 BPF_OK = 0,
6306 /* 1 reserved */
6307 BPF_DROP = 2,
6308 /* 3-6 reserved */
6309 BPF_REDIRECT = 7,
6310 /* >127 are reserved for prog type specific return codes.
6311 *
6312 * BPF_LWT_REROUTE: used by BPF_PROG_TYPE_LWT_IN and
6313 * BPF_PROG_TYPE_LWT_XMIT to indicate that skb had been
6314 * changed and should be routed based on its new L3 header.
6315 * (This is an L3 redirect, as opposed to L2 redirect
6316 * represented by BPF_REDIRECT above).
6317 */
6318 BPF_LWT_REROUTE = 128,
6319 /* BPF_FLOW_DISSECTOR_CONTINUE: used by BPF_PROG_TYPE_FLOW_DISSECTOR
6320 * to indicate that no custom dissection was performed, and
6321 * fallback to standard dissector is requested.
6322 */
6323 BPF_FLOW_DISSECTOR_CONTINUE = 129,
6324};
6325
6326struct bpf_sock {
6327 __u32 bound_dev_if;
6328 __u32 family;
6329 __u32 type;
6330 __u32 protocol;
6331 __u32 mark;
6332 __u32 priority;
6333 /* IP address also allows 1 and 2 bytes access */
6334 __u32 src_ip4;
6335 __u32 src_ip6[4];
6336 __u32 src_port; /* host byte order */
6337 __be16 dst_port; /* network byte order */
6338 __u16 :16; /* zero padding */
6339 __u32 dst_ip4;
6340 __u32 dst_ip6[4];
6341 __u32 state;
6342 __s32 rx_queue_mapping;
6343};
6344
6345struct bpf_tcp_sock {
6346 __u32 snd_cwnd; /* Sending congestion window */
6347 __u32 srtt_us; /* smoothed round trip time << 3 in usecs */
6348 __u32 rtt_min;
6349 __u32 snd_ssthresh; /* Slow start size threshold */
6350 __u32 rcv_nxt; /* What we want to receive next */
6351 __u32 snd_nxt; /* Next sequence we send */
6352 __u32 snd_una; /* First byte we want an ack for */
6353 __u32 mss_cache; /* Cached effective mss, not including SACKS */
6354 __u32 ecn_flags; /* ECN status bits. */
6355 __u32 rate_delivered; /* saved rate sample: packets delivered */
6356 __u32 rate_interval_us; /* saved rate sample: time elapsed */
6357 __u32 packets_out; /* Packets which are "in flight" */
6358 __u32 retrans_out; /* Retransmitted packets out */
6359 __u32 total_retrans; /* Total retransmits for entire connection */
6360 __u32 segs_in; /* RFC4898 tcpEStatsPerfSegsIn
6361 * total number of segments in.
6362 */
6363 __u32 data_segs_in; /* RFC4898 tcpEStatsPerfDataSegsIn
6364 * total number of data segments in.
6365 */
6366 __u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut
6367 * The total number of segments sent.
6368 */
6369 __u32 data_segs_out; /* RFC4898 tcpEStatsPerfDataSegsOut
6370 * total number of data segments sent.
6371 */
6372 __u32 lost_out; /* Lost packets */
6373 __u32 sacked_out; /* SACK'd packets */
6374 __u64 bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived
6375 * sum(delta(rcv_nxt)), or how many bytes
6376 * were acked.
6377 */
6378 __u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked
6379 * sum(delta(snd_una)), or how many bytes
6380 * were acked.
6381 */
6382 __u32 dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups
6383 * total number of DSACK blocks received
6384 */
6385 __u32 delivered; /* Total data packets delivered incl. rexmits */
6386 __u32 delivered_ce; /* Like the above but only ECE marked packets */
6387 __u32 icsk_retransmits; /* Number of unrecovered [RTO] timeouts */
6388};
6389
6390struct bpf_sock_tuple {
6391 union {
6392 struct {
6393 __be32 saddr;
6394 __be32 daddr;
6395 __be16 sport;
6396 __be16 dport;
6397 } ipv4;
6398 struct {
6399 __be32 saddr[4];
6400 __be32 daddr[4];
6401 __be16 sport;
6402 __be16 dport;
6403 } ipv6;
6404 };
6405};
6406
6407/* (Simplified) user return codes for tcx prog type.
6408 * A valid tcx program must return one of these defined values. All other
6409 * return codes are reserved for future use. Must remain compatible with
6410 * their TC_ACT_* counter-parts. For compatibility in behavior, unknown
6411 * return codes are mapped to TCX_NEXT.
6412 */
6413enum tcx_action_base {
6414 TCX_NEXT = -1,
6415 TCX_PASS = 0,
6416 TCX_DROP = 2,
6417 TCX_REDIRECT = 7,
6418};
6419
6420struct bpf_xdp_sock {
6421 __u32 queue_id;
6422};
6423
6424#define XDP_PACKET_HEADROOM 256
6425
6426/* User return codes for XDP prog type.
6427 * A valid XDP program must return one of these defined values. All other
6428 * return codes are reserved for future use. Unknown return codes will
6429 * result in packet drops and a warning via bpf_warn_invalid_xdp_action().
6430 */
6431enum xdp_action {
6432 XDP_ABORTED = 0,
6433 XDP_DROP,
6434 XDP_PASS,
6435 XDP_TX,
6436 XDP_REDIRECT,
6437};
6438
6439/* user accessible metadata for XDP packet hook
6440 * new fields must be added to the end of this structure
6441 */
6442struct xdp_md {
6443 __u32 data;
6444 __u32 data_end;
6445 __u32 data_meta;
6446 /* Below access go through struct xdp_rxq_info */
6447 __u32 ingress_ifindex; /* rxq->dev->ifindex */
6448 __u32 rx_queue_index; /* rxq->queue_index */
6449
6450 __u32 egress_ifindex; /* txq->dev->ifindex */
6451};
6452
6453/* DEVMAP map-value layout
6454 *
6455 * The struct data-layout of map-value is a configuration interface.
6456 * New members can only be added to the end of this structure.
6457 */
6458struct bpf_devmap_val {
6459 __u32 ifindex; /* device index */
6460 union {
6461 int fd; /* prog fd on map write */
6462 __u32 id; /* prog id on map read */
6463 } bpf_prog;
6464};
6465
6466/* CPUMAP map-value layout
6467 *
6468 * The struct data-layout of map-value is a configuration interface.
6469 * New members can only be added to the end of this structure.
6470 */
6471struct bpf_cpumap_val {
6472 __u32 qsize; /* queue size to remote target CPU */
6473 union {
6474 int fd; /* prog fd on map write */
6475 __u32 id; /* prog id on map read */
6476 } bpf_prog;
6477};
6478
6479enum sk_action {
6480 SK_DROP = 0,
6481 SK_PASS,
6482};
6483
6484/* user accessible metadata for SK_MSG packet hook, new fields must
6485 * be added to the end of this structure
6486 */
6487struct sk_msg_md {
6488 __bpf_md_ptr(void *, data);
6489 __bpf_md_ptr(void *, data_end);
6490
6491 __u32 family;
6492 __u32 remote_ip4; /* Stored in network byte order */
6493 __u32 local_ip4; /* Stored in network byte order */
6494 __u32 remote_ip6[4]; /* Stored in network byte order */
6495 __u32 local_ip6[4]; /* Stored in network byte order */
6496 __u32 remote_port; /* Stored in network byte order */
6497 __u32 local_port; /* stored in host byte order */
6498 __u32 size; /* Total size of sk_msg */
6499
6500 __bpf_md_ptr(struct bpf_sock *, sk); /* current socket */
6501};
6502
6503struct sk_reuseport_md {
6504 /*
6505 * Start of directly accessible data. It begins from
6506 * the tcp/udp header.
6507 */
6508 __bpf_md_ptr(void *, data);
6509 /* End of directly accessible data */
6510 __bpf_md_ptr(void *, data_end);
6511 /*
6512 * Total length of packet (starting from the tcp/udp header).
6513 * Note that the directly accessible bytes (data_end - data)
6514 * could be less than this "len". Those bytes could be
6515 * indirectly read by a helper "bpf_skb_load_bytes()".
6516 */
6517 __u32 len;
6518 /*
6519 * Eth protocol in the mac header (network byte order). e.g.
6520 * ETH_P_IP(0x0800) and ETH_P_IPV6(0x86DD)
6521 */
6522 __u32 eth_protocol;
6523 __u32 ip_protocol; /* IP protocol. e.g. IPPROTO_TCP, IPPROTO_UDP */
6524 __u32 bind_inany; /* Is sock bound to an INANY address? */
6525 __u32 hash; /* A hash of the packet 4 tuples */
6526 /* When reuse->migrating_sk is NULL, it is selecting a sk for the
6527 * new incoming connection request (e.g. selecting a listen sk for
6528 * the received SYN in the TCP case). reuse->sk is one of the sk
6529 * in the reuseport group. The bpf prog can use reuse->sk to learn
6530 * the local listening ip/port without looking into the skb.
6531 *
6532 * When reuse->migrating_sk is not NULL, reuse->sk is closed and
6533 * reuse->migrating_sk is the socket that needs to be migrated
6534 * to another listening socket. migrating_sk could be a fullsock
6535 * sk that is fully established or a reqsk that is in-the-middle
6536 * of 3-way handshake.
6537 */
6538 __bpf_md_ptr(struct bpf_sock *, sk);
6539 __bpf_md_ptr(struct bpf_sock *, migrating_sk);
6540};
6541
6542#define BPF_TAG_SIZE 8
6543
6544struct bpf_prog_info {
6545 __u32 type;
6546 __u32 id;
6547 __u8 tag[BPF_TAG_SIZE];
6548 __u32 jited_prog_len;
6549 __u32 xlated_prog_len;
6550 __aligned_u64 jited_prog_insns;
6551 __aligned_u64 xlated_prog_insns;
6552 __u64 load_time; /* ns since boottime */
6553 __u32 created_by_uid;
6554 __u32 nr_map_ids;
6555 __aligned_u64 map_ids;
6556 char name[BPF_OBJ_NAME_LEN];
6557 __u32 ifindex;
6558 __u32 gpl_compatible:1;
6559 __u32 :31; /* alignment pad */
6560 __u64 netns_dev;
6561 __u64 netns_ino;
6562 __u32 nr_jited_ksyms;
6563 __u32 nr_jited_func_lens;
6564 __aligned_u64 jited_ksyms;
6565 __aligned_u64 jited_func_lens;
6566 __u32 btf_id;
6567 __u32 func_info_rec_size;
6568 __aligned_u64 func_info;
6569 __u32 nr_func_info;
6570 __u32 nr_line_info;
6571 __aligned_u64 line_info;
6572 __aligned_u64 jited_line_info;
6573 __u32 nr_jited_line_info;
6574 __u32 line_info_rec_size;
6575 __u32 jited_line_info_rec_size;
6576 __u32 nr_prog_tags;
6577 __aligned_u64 prog_tags;
6578 __u64 run_time_ns;
6579 __u64 run_cnt;
6580 __u64 recursion_misses;
6581 __u32 verified_insns;
6582 __u32 attach_btf_obj_id;
6583 __u32 attach_btf_id;
6584} __attribute__((aligned(8)));
6585
6586struct bpf_map_info {
6587 __u32 type;
6588 __u32 id;
6589 __u32 key_size;
6590 __u32 value_size;
6591 __u32 max_entries;
6592 __u32 map_flags;
6593 char name[BPF_OBJ_NAME_LEN];
6594 __u32 ifindex;
6595 __u32 btf_vmlinux_value_type_id;
6596 __u64 netns_dev;
6597 __u64 netns_ino;
6598 __u32 btf_id;
6599 __u32 btf_key_type_id;
6600 __u32 btf_value_type_id;
6601 __u32 btf_vmlinux_id;
6602 __u64 map_extra;
6603} __attribute__((aligned(8)));
6604
6605struct bpf_btf_info {
6606 __aligned_u64 btf;
6607 __u32 btf_size;
6608 __u32 id;
6609 __aligned_u64 name;
6610 __u32 name_len;
6611 __u32 kernel_btf;
6612} __attribute__((aligned(8)));
6613
6614struct bpf_link_info {
6615 __u32 type;
6616 __u32 id;
6617 __u32 prog_id;
6618 union {
6619 struct {
6620 __aligned_u64 tp_name; /* in/out: tp_name buffer ptr */
6621 __u32 tp_name_len; /* in/out: tp_name buffer len */
6622 } raw_tracepoint;
6623 struct {
6624 __u32 attach_type;
6625 __u32 target_obj_id; /* prog_id for PROG_EXT, otherwise btf object id */
6626 __u32 target_btf_id; /* BTF type id inside the object */
6627 } tracing;
6628 struct {
6629 __u64 cgroup_id;
6630 __u32 attach_type;
6631 } cgroup;
6632 struct {
6633 __aligned_u64 target_name; /* in/out: target_name buffer ptr */
6634 __u32 target_name_len; /* in/out: target_name buffer len */
6635
6636 /* If the iter specific field is 32 bits, it can be put
6637 * in the first or second union. Otherwise it should be
6638 * put in the second union.
6639 */
6640 union {
6641 struct {
6642 __u32 map_id;
6643 } map;
6644 };
6645 union {
6646 struct {
6647 __u64 cgroup_id;
6648 __u32 order;
6649 } cgroup;
6650 struct {
6651 __u32 tid;
6652 __u32 pid;
6653 } task;
6654 };
6655 } iter;
6656 struct {
6657 __u32 netns_ino;
6658 __u32 attach_type;
6659 } netns;
6660 struct {
6661 __u32 ifindex;
6662 } xdp;
6663 struct {
6664 __u32 map_id;
6665 } struct_ops;
6666 struct {
6667 __u32 pf;
6668 __u32 hooknum;
6669 __s32 priority;
6670 __u32 flags;
6671 } netfilter;
6672 struct {
6673 __aligned_u64 addrs;
6674 __u32 count; /* in/out: kprobe_multi function count */
6675 __u32 flags;
6676 __u64 missed;
6677 __aligned_u64 cookies;
6678 } kprobe_multi;
6679 struct {
6680 __aligned_u64 path;
6681 __aligned_u64 offsets;
6682 __aligned_u64 ref_ctr_offsets;
6683 __aligned_u64 cookies;
6684 __u32 path_size; /* in/out: real path size on success, including zero byte */
6685 __u32 count; /* in/out: uprobe_multi offsets/ref_ctr_offsets/cookies count */
6686 __u32 flags;
6687 __u32 pid;
6688 } uprobe_multi;
6689 struct {
6690 __u32 type; /* enum bpf_perf_event_type */
6691 __u32 :32;
6692 union {
6693 struct {
6694 __aligned_u64 file_name; /* in/out */
6695 __u32 name_len;
6696 __u32 offset; /* offset from file_name */
6697 __u64 cookie;
6698 } uprobe; /* BPF_PERF_EVENT_UPROBE, BPF_PERF_EVENT_URETPROBE */
6699 struct {
6700 __aligned_u64 func_name; /* in/out */
6701 __u32 name_len;
6702 __u32 offset; /* offset from func_name */
6703 __u64 addr;
6704 __u64 missed;
6705 __u64 cookie;
6706 } kprobe; /* BPF_PERF_EVENT_KPROBE, BPF_PERF_EVENT_KRETPROBE */
6707 struct {
6708 __aligned_u64 tp_name; /* in/out */
6709 __u32 name_len;
6710 __u32 :32;
6711 __u64 cookie;
6712 } tracepoint; /* BPF_PERF_EVENT_TRACEPOINT */
6713 struct {
6714 __u64 config;
6715 __u32 type;
6716 __u32 :32;
6717 __u64 cookie;
6718 } event; /* BPF_PERF_EVENT_EVENT */
6719 };
6720 } perf_event;
6721 struct {
6722 __u32 ifindex;
6723 __u32 attach_type;
6724 } tcx;
6725 struct {
6726 __u32 ifindex;
6727 __u32 attach_type;
6728 } netkit;
6729 struct {
6730 __u32 map_id;
6731 __u32 attach_type;
6732 } sockmap;
6733 };
6734} __attribute__((aligned(8)));
6735
6736/* User bpf_sock_addr struct to access socket fields and sockaddr struct passed
6737 * by user and intended to be used by socket (e.g. to bind to, depends on
6738 * attach type).
6739 */
6740struct bpf_sock_addr {
6741 __u32 user_family; /* Allows 4-byte read, but no write. */
6742 __u32 user_ip4; /* Allows 1,2,4-byte read and 4-byte write.
6743 * Stored in network byte order.
6744 */
6745 __u32 user_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write.
6746 * Stored in network byte order.
6747 */
6748 __u32 user_port; /* Allows 1,2,4-byte read and 4-byte write.
6749 * Stored in network byte order
6750 */
6751 __u32 family; /* Allows 4-byte read, but no write */
6752 __u32 type; /* Allows 4-byte read, but no write */
6753 __u32 protocol; /* Allows 4-byte read, but no write */
6754 __u32 msg_src_ip4; /* Allows 1,2,4-byte read and 4-byte write.
6755 * Stored in network byte order.
6756 */
6757 __u32 msg_src_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write.
6758 * Stored in network byte order.
6759 */
6760 __bpf_md_ptr(struct bpf_sock *, sk);
6761};
6762
6763/* User bpf_sock_ops struct to access socket values and specify request ops
6764 * and their replies.
6765 * Some of this fields are in network (bigendian) byte order and may need
6766 * to be converted before use (bpf_ntohl() defined in samples/bpf/bpf_endian.h).
6767 * New fields can only be added at the end of this structure
6768 */
6769struct bpf_sock_ops {
6770 __u32 op;
6771 union {
6772 __u32 args[4]; /* Optionally passed to bpf program */
6773 __u32 reply; /* Returned by bpf program */
6774 __u32 replylong[4]; /* Optionally returned by bpf prog */
6775 };
6776 __u32 family;
6777 __u32 remote_ip4; /* Stored in network byte order */
6778 __u32 local_ip4; /* Stored in network byte order */
6779 __u32 remote_ip6[4]; /* Stored in network byte order */
6780 __u32 local_ip6[4]; /* Stored in network byte order */
6781 __u32 remote_port; /* Stored in network byte order */
6782 __u32 local_port; /* stored in host byte order */
6783 __u32 is_fullsock; /* Some TCP fields are only valid if
6784 * there is a full socket. If not, the
6785 * fields read as zero.
6786 */
6787 __u32 snd_cwnd;
6788 __u32 srtt_us; /* Averaged RTT << 3 in usecs */
6789 __u32 bpf_sock_ops_cb_flags; /* flags defined in uapi/linux/tcp.h */
6790 __u32 state;
6791 __u32 rtt_min;
6792 __u32 snd_ssthresh;
6793 __u32 rcv_nxt;
6794 __u32 snd_nxt;
6795 __u32 snd_una;
6796 __u32 mss_cache;
6797 __u32 ecn_flags;
6798 __u32 rate_delivered;
6799 __u32 rate_interval_us;
6800 __u32 packets_out;
6801 __u32 retrans_out;
6802 __u32 total_retrans;
6803 __u32 segs_in;
6804 __u32 data_segs_in;
6805 __u32 segs_out;
6806 __u32 data_segs_out;
6807 __u32 lost_out;
6808 __u32 sacked_out;
6809 __u32 sk_txhash;
6810 __u64 bytes_received;
6811 __u64 bytes_acked;
6812 __bpf_md_ptr(struct bpf_sock *, sk);
6813 /* [skb_data, skb_data_end) covers the whole TCP header.
6814 *
6815 * BPF_SOCK_OPS_PARSE_HDR_OPT_CB: The packet received
6816 * BPF_SOCK_OPS_HDR_OPT_LEN_CB: Not useful because the
6817 * header has not been written.
6818 * BPF_SOCK_OPS_WRITE_HDR_OPT_CB: The header and options have
6819 * been written so far.
6820 * BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB: The SYNACK that concludes
6821 * the 3WHS.
6822 * BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB: The ACK that concludes
6823 * the 3WHS.
6824 *
6825 * bpf_load_hdr_opt() can also be used to read a particular option.
6826 */
6827 __bpf_md_ptr(void *, skb_data);
6828 __bpf_md_ptr(void *, skb_data_end);
6829 __u32 skb_len; /* The total length of a packet.
6830 * It includes the header, options,
6831 * and payload.
6832 */
6833 __u32 skb_tcp_flags; /* tcp_flags of the header. It provides
6834 * an easy way to check for tcp_flags
6835 * without parsing skb_data.
6836 *
6837 * In particular, the skb_tcp_flags
6838 * will still be available in
6839 * BPF_SOCK_OPS_HDR_OPT_LEN even though
6840 * the outgoing header has not
6841 * been written yet.
6842 */
6843 __u64 skb_hwtstamp;
6844};
6845
6846/* Definitions for bpf_sock_ops_cb_flags */
6847enum {
6848 BPF_SOCK_OPS_RTO_CB_FLAG = (1<<0),
6849 BPF_SOCK_OPS_RETRANS_CB_FLAG = (1<<1),
6850 BPF_SOCK_OPS_STATE_CB_FLAG = (1<<2),
6851 BPF_SOCK_OPS_RTT_CB_FLAG = (1<<3),
6852 /* Call bpf for all received TCP headers. The bpf prog will be
6853 * called under sock_ops->op == BPF_SOCK_OPS_PARSE_HDR_OPT_CB
6854 *
6855 * Please refer to the comment in BPF_SOCK_OPS_PARSE_HDR_OPT_CB
6856 * for the header option related helpers that will be useful
6857 * to the bpf programs.
6858 *
6859 * It could be used at the client/active side (i.e. connect() side)
6860 * when the server told it that the server was in syncookie
6861 * mode and required the active side to resend the bpf-written
6862 * options. The active side can keep writing the bpf-options until
6863 * it received a valid packet from the server side to confirm
6864 * the earlier packet (and options) has been received. The later
6865 * example patch is using it like this at the active side when the
6866 * server is in syncookie mode.
6867 *
6868 * The bpf prog will usually turn this off in the common cases.
6869 */
6870 BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG = (1<<4),
6871 /* Call bpf when kernel has received a header option that
6872 * the kernel cannot handle. The bpf prog will be called under
6873 * sock_ops->op == BPF_SOCK_OPS_PARSE_HDR_OPT_CB.
6874 *
6875 * Please refer to the comment in BPF_SOCK_OPS_PARSE_HDR_OPT_CB
6876 * for the header option related helpers that will be useful
6877 * to the bpf programs.
6878 */
6879 BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG = (1<<5),
6880 /* Call bpf when the kernel is writing header options for the
6881 * outgoing packet. The bpf prog will first be called
6882 * to reserve space in a skb under
6883 * sock_ops->op == BPF_SOCK_OPS_HDR_OPT_LEN_CB. Then
6884 * the bpf prog will be called to write the header option(s)
6885 * under sock_ops->op == BPF_SOCK_OPS_WRITE_HDR_OPT_CB.
6886 *
6887 * Please refer to the comment in BPF_SOCK_OPS_HDR_OPT_LEN_CB
6888 * and BPF_SOCK_OPS_WRITE_HDR_OPT_CB for the header option
6889 * related helpers that will be useful to the bpf programs.
6890 *
6891 * The kernel gets its chance to reserve space and write
6892 * options first before the BPF program does.
6893 */
6894 BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG = (1<<6),
6895/* Mask of all currently supported cb flags */
6896 BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7F,
6897};
6898
6899/* List of known BPF sock_ops operators.
6900 * New entries can only be added at the end
6901 */
6902enum {
6903 BPF_SOCK_OPS_VOID,
6904 BPF_SOCK_OPS_TIMEOUT_INIT, /* Should return SYN-RTO value to use or
6905 * -1 if default value should be used
6906 */
6907 BPF_SOCK_OPS_RWND_INIT, /* Should return initial advertized
6908 * window (in packets) or -1 if default
6909 * value should be used
6910 */
6911 BPF_SOCK_OPS_TCP_CONNECT_CB, /* Calls BPF program right before an
6912 * active connection is initialized
6913 */
6914 BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB, /* Calls BPF program when an
6915 * active connection is
6916 * established
6917 */
6918 BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, /* Calls BPF program when a
6919 * passive connection is
6920 * established
6921 */
6922 BPF_SOCK_OPS_NEEDS_ECN, /* If connection's congestion control
6923 * needs ECN
6924 */
6925 BPF_SOCK_OPS_BASE_RTT, /* Get base RTT. The correct value is
6926 * based on the path and may be
6927 * dependent on the congestion control
6928 * algorithm. In general it indicates
6929 * a congestion threshold. RTTs above
6930 * this indicate congestion
6931 */
6932 BPF_SOCK_OPS_RTO_CB, /* Called when an RTO has triggered.
6933 * Arg1: value of icsk_retransmits
6934 * Arg2: value of icsk_rto
6935 * Arg3: whether RTO has expired
6936 */
6937 BPF_SOCK_OPS_RETRANS_CB, /* Called when skb is retransmitted.
6938 * Arg1: sequence number of 1st byte
6939 * Arg2: # segments
6940 * Arg3: return value of
6941 * tcp_transmit_skb (0 => success)
6942 */
6943 BPF_SOCK_OPS_STATE_CB, /* Called when TCP changes state.
6944 * Arg1: old_state
6945 * Arg2: new_state
6946 */
6947 BPF_SOCK_OPS_TCP_LISTEN_CB, /* Called on listen(2), right after
6948 * socket transition to LISTEN state.
6949 */
6950 BPF_SOCK_OPS_RTT_CB, /* Called on every RTT.
6951 * Arg1: measured RTT input (mrtt)
6952 * Arg2: updated srtt
6953 */
6954 BPF_SOCK_OPS_PARSE_HDR_OPT_CB, /* Parse the header option.
6955 * It will be called to handle
6956 * the packets received at
6957 * an already established
6958 * connection.
6959 *
6960 * sock_ops->skb_data:
6961 * Referring to the received skb.
6962 * It covers the TCP header only.
6963 *
6964 * bpf_load_hdr_opt() can also
6965 * be used to search for a
6966 * particular option.
6967 */
6968 BPF_SOCK_OPS_HDR_OPT_LEN_CB, /* Reserve space for writing the
6969 * header option later in
6970 * BPF_SOCK_OPS_WRITE_HDR_OPT_CB.
6971 * Arg1: bool want_cookie. (in
6972 * writing SYNACK only)
6973 *
6974 * sock_ops->skb_data:
6975 * Not available because no header has
6976 * been written yet.
6977 *
6978 * sock_ops->skb_tcp_flags:
6979 * The tcp_flags of the
6980 * outgoing skb. (e.g. SYN, ACK, FIN).
6981 *
6982 * bpf_reserve_hdr_opt() should
6983 * be used to reserve space.
6984 */
6985 BPF_SOCK_OPS_WRITE_HDR_OPT_CB, /* Write the header options
6986 * Arg1: bool want_cookie. (in
6987 * writing SYNACK only)
6988 *
6989 * sock_ops->skb_data:
6990 * Referring to the outgoing skb.
6991 * It covers the TCP header
6992 * that has already been written
6993 * by the kernel and the
6994 * earlier bpf-progs.
6995 *
6996 * sock_ops->skb_tcp_flags:
6997 * The tcp_flags of the outgoing
6998 * skb. (e.g. SYN, ACK, FIN).
6999 *
7000 * bpf_store_hdr_opt() should
7001 * be used to write the
7002 * option.
7003 *
7004 * bpf_load_hdr_opt() can also
7005 * be used to search for a
7006 * particular option that
7007 * has already been written
7008 * by the kernel or the
7009 * earlier bpf-progs.
7010 */
7011};
7012
7013/* List of TCP states. There is a build check in net/ipv4/tcp.c to detect
7014 * changes between the TCP and BPF versions. Ideally this should never happen.
7015 * If it does, we need to add code to convert them before calling
7016 * the BPF sock_ops function.
7017 */
7018enum {
7019 BPF_TCP_ESTABLISHED = 1,
7020 BPF_TCP_SYN_SENT,
7021 BPF_TCP_SYN_RECV,
7022 BPF_TCP_FIN_WAIT1,
7023 BPF_TCP_FIN_WAIT2,
7024 BPF_TCP_TIME_WAIT,
7025 BPF_TCP_CLOSE,
7026 BPF_TCP_CLOSE_WAIT,
7027 BPF_TCP_LAST_ACK,
7028 BPF_TCP_LISTEN,
7029 BPF_TCP_CLOSING, /* Now a valid state */
7030 BPF_TCP_NEW_SYN_RECV,
7031 BPF_TCP_BOUND_INACTIVE,
7032
7033 BPF_TCP_MAX_STATES /* Leave at the end! */
7034};
7035
7036enum {
7037 TCP_BPF_IW = 1001, /* Set TCP initial congestion window */
7038 TCP_BPF_SNDCWND_CLAMP = 1002, /* Set sndcwnd_clamp */
7039 TCP_BPF_DELACK_MAX = 1003, /* Max delay ack in usecs */
7040 TCP_BPF_RTO_MIN = 1004, /* Min delay ack in usecs */
7041 /* Copy the SYN pkt to optval
7042 *
7043 * BPF_PROG_TYPE_SOCK_OPS only. It is similar to the
7044 * bpf_getsockopt(TCP_SAVED_SYN) but it does not limit
7045 * to only getting from the saved_syn. It can either get the
7046 * syn packet from:
7047 *
7048 * 1. the just-received SYN packet (only available when writing the
7049 * SYNACK). It will be useful when it is not necessary to
7050 * save the SYN packet for latter use. It is also the only way
7051 * to get the SYN during syncookie mode because the syn
7052 * packet cannot be saved during syncookie.
7053 *
7054 * OR
7055 *
7056 * 2. the earlier saved syn which was done by
7057 * bpf_setsockopt(TCP_SAVE_SYN).
7058 *
7059 * The bpf_getsockopt(TCP_BPF_SYN*) option will hide where the
7060 * SYN packet is obtained.
7061 *
7062 * If the bpf-prog does not need the IP[46] header, the
7063 * bpf-prog can avoid parsing the IP header by using
7064 * TCP_BPF_SYN. Otherwise, the bpf-prog can get both
7065 * IP[46] and TCP header by using TCP_BPF_SYN_IP.
7066 *
7067 * >0: Total number of bytes copied
7068 * -ENOSPC: Not enough space in optval. Only optlen number of
7069 * bytes is copied.
7070 * -ENOENT: The SYN skb is not available now and the earlier SYN pkt
7071 * is not saved by setsockopt(TCP_SAVE_SYN).
7072 */
7073 TCP_BPF_SYN = 1005, /* Copy the TCP header */
7074 TCP_BPF_SYN_IP = 1006, /* Copy the IP[46] and TCP header */
7075 TCP_BPF_SYN_MAC = 1007, /* Copy the MAC, IP[46], and TCP header */
7076};
7077
7078enum {
7079 BPF_LOAD_HDR_OPT_TCP_SYN = (1ULL << 0),
7080};
7081
7082/* args[0] value during BPF_SOCK_OPS_HDR_OPT_LEN_CB and
7083 * BPF_SOCK_OPS_WRITE_HDR_OPT_CB.
7084 */
7085enum {
7086 BPF_WRITE_HDR_TCP_CURRENT_MSS = 1, /* Kernel is finding the
7087 * total option spaces
7088 * required for an established
7089 * sk in order to calculate the
7090 * MSS. No skb is actually
7091 * sent.
7092 */
7093 BPF_WRITE_HDR_TCP_SYNACK_COOKIE = 2, /* Kernel is in syncookie mode
7094 * when sending a SYN.
7095 */
7096};
7097
7098struct bpf_perf_event_value {
7099 __u64 counter;
7100 __u64 enabled;
7101 __u64 running;
7102};
7103
7104enum {
7105 BPF_DEVCG_ACC_MKNOD = (1ULL << 0),
7106 BPF_DEVCG_ACC_READ = (1ULL << 1),
7107 BPF_DEVCG_ACC_WRITE = (1ULL << 2),
7108};
7109
7110enum {
7111 BPF_DEVCG_DEV_BLOCK = (1ULL << 0),
7112 BPF_DEVCG_DEV_CHAR = (1ULL << 1),
7113};
7114
7115struct bpf_cgroup_dev_ctx {
7116 /* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */
7117 __u32 access_type;
7118 __u32 major;
7119 __u32 minor;
7120};
7121
7122struct bpf_raw_tracepoint_args {
7123 __u64 args[0];
7124};
7125
7126/* DIRECT: Skip the FIB rules and go to FIB table associated with device
7127 * OUTPUT: Do lookup from egress perspective; default is ingress
7128 */
7129enum {
7130 BPF_FIB_LOOKUP_DIRECT = (1U << 0),
7131 BPF_FIB_LOOKUP_OUTPUT = (1U << 1),
7132 BPF_FIB_LOOKUP_SKIP_NEIGH = (1U << 2),
7133 BPF_FIB_LOOKUP_TBID = (1U << 3),
7134 BPF_FIB_LOOKUP_SRC = (1U << 4),
7135 BPF_FIB_LOOKUP_MARK = (1U << 5),
7136};
7137
7138enum {
7139 BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */
7140 BPF_FIB_LKUP_RET_BLACKHOLE, /* dest is blackholed; can be dropped */
7141 BPF_FIB_LKUP_RET_UNREACHABLE, /* dest is unreachable; can be dropped */
7142 BPF_FIB_LKUP_RET_PROHIBIT, /* dest not allowed; can be dropped */
7143 BPF_FIB_LKUP_RET_NOT_FWDED, /* packet is not forwarded */
7144 BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */
7145 BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */
7146 BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */
7147 BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */
7148 BPF_FIB_LKUP_RET_NO_SRC_ADDR, /* failed to derive IP src addr */
7149};
7150
7151struct bpf_fib_lookup {
7152 /* input: network family for lookup (AF_INET, AF_INET6)
7153 * output: network family of egress nexthop
7154 */
7155 __u8 family;
7156
7157 /* set if lookup is to consider L4 data - e.g., FIB rules */
7158 __u8 l4_protocol;
7159 __be16 sport;
7160 __be16 dport;
7161
7162 union { /* used for MTU check */
7163 /* input to lookup */
7164 __u16 tot_len; /* L3 length from network hdr (iph->tot_len) */
7165
7166 /* output: MTU value */
7167 __u16 mtu_result;
7168 } __attribute__((packed, aligned(2)));
7169 /* input: L3 device index for lookup
7170 * output: device index from FIB lookup
7171 */
7172 __u32 ifindex;
7173
7174 union {
7175 /* inputs to lookup */
7176 __u8 tos; /* AF_INET */
7177 __be32 flowinfo; /* AF_INET6, flow_label + priority */
7178
7179 /* output: metric of fib result (IPv4/IPv6 only) */
7180 __u32 rt_metric;
7181 };
7182
7183 /* input: source address to consider for lookup
7184 * output: source address result from lookup
7185 */
7186 union {
7187 __be32 ipv4_src;
7188 __u32 ipv6_src[4]; /* in6_addr; network order */
7189 };
7190
7191 /* input to bpf_fib_lookup, ipv{4,6}_dst is destination address in
7192 * network header. output: bpf_fib_lookup sets to gateway address
7193 * if FIB lookup returns gateway route
7194 */
7195 union {
7196 __be32 ipv4_dst;
7197 __u32 ipv6_dst[4]; /* in6_addr; network order */
7198 };
7199
7200 union {
7201 struct {
7202 /* output */
7203 __be16 h_vlan_proto;
7204 __be16 h_vlan_TCI;
7205 };
7206 /* input: when accompanied with the
7207 * 'BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_TBID` flags, a
7208 * specific routing table to use for the fib lookup.
7209 */
7210 __u32 tbid;
7211 };
7212
7213 union {
7214 /* input */
7215 struct {
7216 __u32 mark; /* policy routing */
7217 /* 2 4-byte holes for input */
7218 };
7219
7220 /* output: source and dest mac */
7221 struct {
7222 __u8 smac[6]; /* ETH_ALEN */
7223 __u8 dmac[6]; /* ETH_ALEN */
7224 };
7225 };
7226};
7227
7228struct bpf_redir_neigh {
7229 /* network family for lookup (AF_INET, AF_INET6) */
7230 __u32 nh_family;
7231 /* network address of nexthop; skips fib lookup to find gateway */
7232 union {
7233 __be32 ipv4_nh;
7234 __u32 ipv6_nh[4]; /* in6_addr; network order */
7235 };
7236};
7237
7238/* bpf_check_mtu flags*/
7239enum bpf_check_mtu_flags {
7240 BPF_MTU_CHK_SEGS = (1U << 0),
7241};
7242
7243enum bpf_check_mtu_ret {
7244 BPF_MTU_CHK_RET_SUCCESS, /* check and lookup successful */
7245 BPF_MTU_CHK_RET_FRAG_NEEDED, /* fragmentation required to fwd */
7246 BPF_MTU_CHK_RET_SEGS_TOOBIG, /* GSO re-segmentation needed to fwd */
7247};
7248
7249enum bpf_task_fd_type {
7250 BPF_FD_TYPE_RAW_TRACEPOINT, /* tp name */
7251 BPF_FD_TYPE_TRACEPOINT, /* tp name */
7252 BPF_FD_TYPE_KPROBE, /* (symbol + offset) or addr */
7253 BPF_FD_TYPE_KRETPROBE, /* (symbol + offset) or addr */
7254 BPF_FD_TYPE_UPROBE, /* filename + offset */
7255 BPF_FD_TYPE_URETPROBE, /* filename + offset */
7256};
7257
7258enum {
7259 BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = (1U << 0),
7260 BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = (1U << 1),
7261 BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = (1U << 2),
7262};
7263
7264struct bpf_flow_keys {
7265 __u16 nhoff;
7266 __u16 thoff;
7267 __u16 addr_proto; /* ETH_P_* of valid addrs */
7268 __u8 is_frag;
7269 __u8 is_first_frag;
7270 __u8 is_encap;
7271 __u8 ip_proto;
7272 __be16 n_proto;
7273 __be16 sport;
7274 __be16 dport;
7275 union {
7276 struct {
7277 __be32 ipv4_src;
7278 __be32 ipv4_dst;
7279 };
7280 struct {
7281 __u32 ipv6_src[4]; /* in6_addr; network order */
7282 __u32 ipv6_dst[4]; /* in6_addr; network order */
7283 };
7284 };
7285 __u32 flags;
7286 __be32 flow_label;
7287};
7288
7289struct bpf_func_info {
7290 __u32 insn_off;
7291 __u32 type_id;
7292};
7293
7294#define BPF_LINE_INFO_LINE_NUM(line_col) ((line_col) >> 10)
7295#define BPF_LINE_INFO_LINE_COL(line_col) ((line_col) & 0x3ff)
7296
7297struct bpf_line_info {
7298 __u32 insn_off;
7299 __u32 file_name_off;
7300 __u32 line_off;
7301 __u32 line_col;
7302};
7303
7304struct bpf_spin_lock {
7305 __u32 val;
7306};
7307
7308struct bpf_timer {
7309 __u64 __opaque[2];
7310} __attribute__((aligned(8)));
7311
7312struct bpf_wq {
7313 __u64 __opaque[2];
7314} __attribute__((aligned(8)));
7315
7316struct bpf_dynptr {
7317 __u64 __opaque[2];
7318} __attribute__((aligned(8)));
7319
7320struct bpf_list_head {
7321 __u64 __opaque[2];
7322} __attribute__((aligned(8)));
7323
7324struct bpf_list_node {
7325 __u64 __opaque[3];
7326} __attribute__((aligned(8)));
7327
7328struct bpf_rb_root {
7329 __u64 __opaque[2];
7330} __attribute__((aligned(8)));
7331
7332struct bpf_rb_node {
7333 __u64 __opaque[4];
7334} __attribute__((aligned(8)));
7335
7336struct bpf_refcount {
7337 __u32 __opaque[1];
7338} __attribute__((aligned(4)));
7339
7340struct bpf_sysctl {
7341 __u32 write; /* Sysctl is being read (= 0) or written (= 1).
7342 * Allows 1,2,4-byte read, but no write.
7343 */
7344 __u32 file_pos; /* Sysctl file position to read from, write to.
7345 * Allows 1,2,4-byte read an 4-byte write.
7346 */
7347};
7348
7349struct bpf_sockopt {
7350 __bpf_md_ptr(struct bpf_sock *, sk);
7351 __bpf_md_ptr(void *, optval);
7352 __bpf_md_ptr(void *, optval_end);
7353
7354 __s32 level;
7355 __s32 optname;
7356 __s32 optlen;
7357 __s32 retval;
7358};
7359
7360struct bpf_pidns_info {
7361 __u32 pid;
7362 __u32 tgid;
7363};
7364
7365/* User accessible data for SK_LOOKUP programs. Add new fields at the end. */
7366struct bpf_sk_lookup {
7367 union {
7368 __bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */
7369 __u64 cookie; /* Non-zero if socket was selected in PROG_TEST_RUN */
7370 };
7371
7372 __u32 family; /* Protocol family (AF_INET, AF_INET6) */
7373 __u32 protocol; /* IP protocol (IPPROTO_TCP, IPPROTO_UDP) */
7374 __u32 remote_ip4; /* Network byte order */
7375 __u32 remote_ip6[4]; /* Network byte order */
7376 __be16 remote_port; /* Network byte order */
7377 __u16 :16; /* Zero padding */
7378 __u32 local_ip4; /* Network byte order */
7379 __u32 local_ip6[4]; /* Network byte order */
7380 __u32 local_port; /* Host byte order */
7381 __u32 ingress_ifindex; /* The arriving interface. Determined by inet_iif. */
7382};
7383
7384/*
7385 * struct btf_ptr is used for typed pointer representation; the
7386 * type id is used to render the pointer data as the appropriate type
7387 * via the bpf_snprintf_btf() helper described above. A flags field -
7388 * potentially to specify additional details about the BTF pointer
7389 * (rather than its mode of display) - is included for future use.
7390 * Display flags - BTF_F_* - are passed to bpf_snprintf_btf separately.
7391 */
7392struct btf_ptr {
7393 void *ptr;
7394 __u32 type_id;
7395 __u32 flags; /* BTF ptr flags; unused at present. */
7396};
7397
7398/*
7399 * Flags to control bpf_snprintf_btf() behaviour.
7400 * - BTF_F_COMPACT: no formatting around type information
7401 * - BTF_F_NONAME: no struct/union member names/types
7402 * - BTF_F_PTR_RAW: show raw (unobfuscated) pointer values;
7403 * equivalent to %px.
7404 * - BTF_F_ZERO: show zero-valued struct/union members; they
7405 * are not displayed by default
7406 */
7407enum {
7408 BTF_F_COMPACT = (1ULL << 0),
7409 BTF_F_NONAME = (1ULL << 1),
7410 BTF_F_PTR_RAW = (1ULL << 2),
7411 BTF_F_ZERO = (1ULL << 3),
7412};
7413
7414/* bpf_core_relo_kind encodes which aspect of captured field/type/enum value
7415 * has to be adjusted by relocations. It is emitted by llvm and passed to
7416 * libbpf and later to the kernel.
7417 */
7418enum bpf_core_relo_kind {
7419 BPF_CORE_FIELD_BYTE_OFFSET = 0, /* field byte offset */
7420 BPF_CORE_FIELD_BYTE_SIZE = 1, /* field size in bytes */
7421 BPF_CORE_FIELD_EXISTS = 2, /* field existence in target kernel */
7422 BPF_CORE_FIELD_SIGNED = 3, /* field signedness (0 - unsigned, 1 - signed) */
7423 BPF_CORE_FIELD_LSHIFT_U64 = 4, /* bitfield-specific left bitshift */
7424 BPF_CORE_FIELD_RSHIFT_U64 = 5, /* bitfield-specific right bitshift */
7425 BPF_CORE_TYPE_ID_LOCAL = 6, /* type ID in local BPF object */
7426 BPF_CORE_TYPE_ID_TARGET = 7, /* type ID in target kernel */
7427 BPF_CORE_TYPE_EXISTS = 8, /* type existence in target kernel */
7428 BPF_CORE_TYPE_SIZE = 9, /* type size in bytes */
7429 BPF_CORE_ENUMVAL_EXISTS = 10, /* enum value existence in target kernel */
7430 BPF_CORE_ENUMVAL_VALUE = 11, /* enum value integer value */
7431 BPF_CORE_TYPE_MATCHES = 12, /* type match in target kernel */
7432};
7433
7434/*
7435 * "struct bpf_core_relo" is used to pass relocation data form LLVM to libbpf
7436 * and from libbpf to the kernel.
7437 *
7438 * CO-RE relocation captures the following data:
7439 * - insn_off - instruction offset (in bytes) within a BPF program that needs
7440 * its insn->imm field to be relocated with actual field info;
7441 * - type_id - BTF type ID of the "root" (containing) entity of a relocatable
7442 * type or field;
7443 * - access_str_off - offset into corresponding .BTF string section. String
7444 * interpretation depends on specific relocation kind:
7445 * - for field-based relocations, string encodes an accessed field using
7446 * a sequence of field and array indices, separated by colon (:). It's
7447 * conceptually very close to LLVM's getelementptr ([0]) instruction's
7448 * arguments for identifying offset to a field.
7449 * - for type-based relocations, strings is expected to be just "0";
7450 * - for enum value-based relocations, string contains an index of enum
7451 * value within its enum type;
7452 * - kind - one of enum bpf_core_relo_kind;
7453 *
7454 * Example:
7455 * struct sample {
7456 * int a;
7457 * struct {
7458 * int b[10];
7459 * };
7460 * };
7461 *
7462 * struct sample *s = ...;
7463 * int *x = &s->a; // encoded as "0:0" (a is field #0)
7464 * int *y = &s->b[5]; // encoded as "0:1:0:5" (anon struct is field #1,
7465 * // b is field #0 inside anon struct, accessing elem #5)
7466 * int *z = &s[10]->b; // encoded as "10:1" (ptr is used as an array)
7467 *
7468 * type_id for all relocs in this example will capture BTF type id of
7469 * `struct sample`.
7470 *
7471 * Such relocation is emitted when using __builtin_preserve_access_index()
7472 * Clang built-in, passing expression that captures field address, e.g.:
7473 *
7474 * bpf_probe_read(&dst, sizeof(dst),
7475 * __builtin_preserve_access_index(&src->a.b.c));
7476 *
7477 * In this case Clang will emit field relocation recording necessary data to
7478 * be able to find offset of embedded `a.b.c` field within `src` struct.
7479 *
7480 * [0] https://llvm.org/docs/LangRef.html#getelementptr-instruction
7481 */
7482struct bpf_core_relo {
7483 __u32 insn_off;
7484 __u32 type_id;
7485 __u32 access_str_off;
7486 enum bpf_core_relo_kind kind;
7487};
7488
7489/*
7490 * Flags to control bpf_timer_start() behaviour.
7491 * - BPF_F_TIMER_ABS: Timeout passed is absolute time, by default it is
7492 * relative to current time.
7493 * - BPF_F_TIMER_CPU_PIN: Timer will be pinned to the CPU of the caller.
7494 */
7495enum {
7496 BPF_F_TIMER_ABS = (1ULL << 0),
7497 BPF_F_TIMER_CPU_PIN = (1ULL << 1),
7498};
7499
7500/* BPF numbers iterator state */
7501struct bpf_iter_num {
7502 /* opaque iterator state; having __u64 here allows to preserve correct
7503 * alignment requirements in vmlinux.h, generated from BTF
7504 */
7505 __u64 __opaque[1];
7506} __attribute__((aligned(8)));
7507
7508#endif /* _UAPI__LINUX_BPF_H__ */