Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
2
3/*
4 * Common eBPF ELF object loading operations.
5 *
6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8 * Copyright (C) 2015 Huawei Inc.
9 */
10#ifndef __LIBBPF_LIBBPF_H
11#define __LIBBPF_LIBBPF_H
12
13#include <stdarg.h>
14#include <stdio.h>
15#include <stdint.h>
16#include <stdbool.h>
17#include <sys/types.h> // for size_t
18#include <linux/bpf.h>
19
20#include "libbpf_common.h"
21#include "libbpf_legacy.h"
22
23#ifdef __cplusplus
24extern "C" {
25#endif
26
27/**
28 * @brief **libbpf_major_version()** provides the major version of libbpf.
29 * @return An integer, the major version number
30 */
31LIBBPF_API __u32 libbpf_major_version(void);
32
33/**
34 * @brief **libbpf_minor_version()** provides the minor version of libbpf.
35 * @return An integer, the minor version number
36 */
37LIBBPF_API __u32 libbpf_minor_version(void);
38
39/**
40 * @brief **libbpf_version_string()** provides the version of libbpf in a
41 * human-readable form, e.g., "v1.7".
42 * @return Pointer to a static string containing the version
43 *
44 * The format is *not* a part of a stable API and may change in the future.
45 */
46LIBBPF_API const char *libbpf_version_string(void);
47
48enum libbpf_errno {
49 __LIBBPF_ERRNO__START = 4000,
50
51 /* Something wrong in libelf */
52 LIBBPF_ERRNO__LIBELF = __LIBBPF_ERRNO__START,
53 LIBBPF_ERRNO__FORMAT, /* BPF object format invalid */
54 LIBBPF_ERRNO__KVERSION, /* Incorrect or no 'version' section */
55 LIBBPF_ERRNO__ENDIAN, /* Endian mismatch */
56 LIBBPF_ERRNO__INTERNAL, /* Internal error in libbpf */
57 LIBBPF_ERRNO__RELOC, /* Relocation failed */
58 LIBBPF_ERRNO__LOAD, /* Load program failure for unknown reason */
59 LIBBPF_ERRNO__VERIFY, /* Kernel verifier blocks program loading */
60 LIBBPF_ERRNO__PROG2BIG, /* Program too big */
61 LIBBPF_ERRNO__KVER, /* Incorrect kernel version */
62 LIBBPF_ERRNO__PROGTYPE, /* Kernel doesn't support this program type */
63 LIBBPF_ERRNO__WRNGPID, /* Wrong pid in netlink message */
64 LIBBPF_ERRNO__INVSEQ, /* Invalid netlink sequence */
65 LIBBPF_ERRNO__NLPARSE, /* netlink parsing error */
66 __LIBBPF_ERRNO__END,
67};
68
69/**
70 * @brief **libbpf_strerror()** converts the provided error code into a
71 * human-readable string.
72 * @param err The error code to convert
73 * @param buf Pointer to a buffer where the error message will be stored
74 * @param size The number of bytes in the buffer
75 * @return 0, on success; negative error code, otherwise
76 */
77LIBBPF_API int libbpf_strerror(int err, char *buf, size_t size);
78
79/**
80 * @brief **libbpf_bpf_attach_type_str()** converts the provided attach type
81 * value into a textual representation.
82 * @param t The attach type.
83 * @return Pointer to a static string identifying the attach type. NULL is
84 * returned for unknown **bpf_attach_type** values.
85 */
86LIBBPF_API const char *libbpf_bpf_attach_type_str(enum bpf_attach_type t);
87
88/**
89 * @brief **libbpf_bpf_link_type_str()** converts the provided link type value
90 * into a textual representation.
91 * @param t The link type.
92 * @return Pointer to a static string identifying the link type. NULL is
93 * returned for unknown **bpf_link_type** values.
94 */
95LIBBPF_API const char *libbpf_bpf_link_type_str(enum bpf_link_type t);
96
97/**
98 * @brief **libbpf_bpf_map_type_str()** converts the provided map type value
99 * into a textual representation.
100 * @param t The map type.
101 * @return Pointer to a static string identifying the map type. NULL is
102 * returned for unknown **bpf_map_type** values.
103 */
104LIBBPF_API const char *libbpf_bpf_map_type_str(enum bpf_map_type t);
105
106/**
107 * @brief **libbpf_bpf_prog_type_str()** converts the provided program type
108 * value into a textual representation.
109 * @param t The program type.
110 * @return Pointer to a static string identifying the program type. NULL is
111 * returned for unknown **bpf_prog_type** values.
112 */
113LIBBPF_API const char *libbpf_bpf_prog_type_str(enum bpf_prog_type t);
114
115enum libbpf_print_level {
116 LIBBPF_WARN,
117 LIBBPF_INFO,
118 LIBBPF_DEBUG,
119};
120
121typedef int (*libbpf_print_fn_t)(enum libbpf_print_level level,
122 const char *, va_list ap);
123
124/**
125 * @brief **libbpf_set_print()** sets user-provided log callback function to
126 * be used for libbpf warnings and informational messages. If the user callback
127 * is not set, messages are logged to stderr by default. The verbosity of these
128 * messages can be controlled by setting the environment variable
129 * LIBBPF_LOG_LEVEL to either warn, info, or debug.
130 * @param fn The log print function. If NULL, libbpf won't print anything.
131 * @return Pointer to old print function.
132 *
133 * This function is thread-safe.
134 */
135LIBBPF_API libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn);
136
137/* Hide internal to user */
138struct bpf_object;
139
140struct bpf_object_open_opts {
141 /* size of this struct, for forward/backward compatibility */
142 size_t sz;
143 /* object name override, if provided:
144 * - for object open from file, this will override setting object
145 * name from file path's base name;
146 * - for object open from memory buffer, this will specify an object
147 * name and will override default "<addr>-<buf-size>" name;
148 */
149 const char *object_name;
150 /* parse map definitions non-strictly, allowing extra attributes/data */
151 bool relaxed_maps;
152 /* maps that set the 'pinning' attribute in their definition will have
153 * their pin_path attribute set to a file in this directory, and be
154 * auto-pinned to that path on load; defaults to "/sys/fs/bpf".
155 */
156 const char *pin_root_path;
157
158 __u32 :32; /* stub out now removed attach_prog_fd */
159
160 /* Additional kernel config content that augments and overrides
161 * system Kconfig for CONFIG_xxx externs.
162 */
163 const char *kconfig;
164 /* Path to the custom BTF to be used for BPF CO-RE relocations.
165 * This custom BTF completely replaces the use of vmlinux BTF
166 * for the purpose of CO-RE relocations.
167 * NOTE: any other BPF feature (e.g., fentry/fexit programs,
168 * struct_ops, etc) will need actual kernel BTF at /sys/kernel/btf/vmlinux.
169 */
170 const char *btf_custom_path;
171 /* Pointer to a buffer for storing kernel logs for applicable BPF
172 * commands. Valid kernel_log_size has to be specified as well and are
173 * passed-through to bpf() syscall. Keep in mind that kernel might
174 * fail operation with -ENOSPC error if provided buffer is too small
175 * to contain entire log output.
176 * See the comment below for kernel_log_level for interaction between
177 * log_buf and log_level settings.
178 *
179 * If specified, this log buffer will be passed for:
180 * - each BPF progral load (BPF_PROG_LOAD) attempt, unless overridden
181 * with bpf_program__set_log() on per-program level, to get
182 * BPF verifier log output.
183 * - during BPF object's BTF load into kernel (BPF_BTF_LOAD) to get
184 * BTF sanity checking log.
185 *
186 * Each BPF command (BPF_BTF_LOAD or BPF_PROG_LOAD) will overwrite
187 * previous contents, so if you need more fine-grained control, set
188 * per-program buffer with bpf_program__set_log_buf() to preserve each
189 * individual program's verification log. Keep using kernel_log_buf
190 * for BTF verification log, if necessary.
191 */
192 char *kernel_log_buf;
193 size_t kernel_log_size;
194 /*
195 * Log level can be set independently from log buffer. Log_level=0
196 * means that libbpf will attempt loading BTF or program without any
197 * logging requested, but will retry with either its own or custom log
198 * buffer, if provided, and log_level=1 on any error.
199 * And vice versa, setting log_level>0 will request BTF or prog
200 * loading with verbose log from the first attempt (and as such also
201 * for successfully loaded BTF or program), and the actual log buffer
202 * could be either libbpf's own auto-allocated log buffer, if
203 * kernel_log_buffer is NULL, or user-provided custom kernel_log_buf.
204 * If user didn't provide custom log buffer, libbpf will emit captured
205 * logs through its print callback.
206 */
207 __u32 kernel_log_level;
208 /* Path to BPF FS mount point to derive BPF token from.
209 *
210 * Created BPF token will be used for all bpf() syscall operations
211 * that accept BPF token (e.g., map creation, BTF and program loads,
212 * etc) automatically within instantiated BPF object.
213 *
214 * If bpf_token_path is not specified, libbpf will consult
215 * LIBBPF_BPF_TOKEN_PATH environment variable. If set, it will be
216 * taken as a value of bpf_token_path option and will force libbpf to
217 * either create BPF token from provided custom BPF FS path, or will
218 * disable implicit BPF token creation, if envvar value is an empty
219 * string. bpf_token_path overrides LIBBPF_BPF_TOKEN_PATH, if both are
220 * set at the same time.
221 *
222 * Setting bpf_token_path option to empty string disables libbpf's
223 * automatic attempt to create BPF token from default BPF FS mount
224 * point (/sys/fs/bpf), in case this default behavior is undesirable.
225 */
226 const char *bpf_token_path;
227
228 size_t :0;
229};
230#define bpf_object_open_opts__last_field bpf_token_path
231
232/**
233 * @brief **bpf_object__open()** creates a bpf_object by opening
234 * the BPF ELF object file pointed to by the passed path and loading it
235 * into memory.
236 * @param path BPF object file path.
237 * @return pointer to the new bpf_object; or NULL is returned on error,
238 * error code is stored in errno
239 */
240LIBBPF_API struct bpf_object *bpf_object__open(const char *path);
241
242/**
243 * @brief **bpf_object__open_file()** creates a bpf_object by opening
244 * the BPF ELF object file pointed to by the passed path and loading it
245 * into memory.
246 * @param path BPF object file path
247 * @param opts options for how to load the bpf object, this parameter is
248 * optional and can be set to NULL
249 * @return pointer to the new bpf_object; or NULL is returned on error,
250 * error code is stored in errno
251 */
252LIBBPF_API struct bpf_object *
253bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts);
254
255/**
256 * @brief **bpf_object__open_mem()** creates a bpf_object by reading
257 * the BPF objects raw bytes from a memory buffer containing a valid
258 * BPF ELF object file.
259 * @param obj_buf pointer to the buffer containing ELF file bytes
260 * @param obj_buf_sz number of bytes in the buffer
261 * @param opts options for how to load the bpf object
262 * @return pointer to the new bpf_object; or NULL is returned on error,
263 * error code is stored in errno
264 */
265LIBBPF_API struct bpf_object *
266bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
267 const struct bpf_object_open_opts *opts);
268
269/**
270 * @brief **bpf_object__prepare()** prepares BPF object for loading:
271 * performs ELF processing, relocations, prepares final state of BPF program
272 * instructions (accessible with bpf_program__insns()), creates and
273 * (potentially) pins maps. Leaves BPF object in the state ready for program
274 * loading.
275 * @param obj Pointer to a valid BPF object instance returned by
276 * **bpf_object__open*()** API
277 * @return 0, on success; negative error code, otherwise, error code is
278 * stored in errno
279 */
280LIBBPF_API int bpf_object__prepare(struct bpf_object *obj);
281
282/**
283 * @brief **bpf_object__load()** loads BPF object into kernel.
284 * @param obj Pointer to a valid BPF object instance returned by
285 * **bpf_object__open*()** APIs
286 * @return 0, on success; negative error code, otherwise, error code is
287 * stored in errno
288 */
289LIBBPF_API int bpf_object__load(struct bpf_object *obj);
290
291/**
292 * @brief **bpf_object__close()** closes a BPF object and releases all
293 * resources.
294 * @param obj Pointer to a valid BPF object
295 */
296LIBBPF_API void bpf_object__close(struct bpf_object *obj);
297
298/**
299 * @brief **bpf_object__pin_maps()** pins each map contained within
300 * the BPF object at the passed directory.
301 * @param obj Pointer to a valid BPF object
302 * @param path A directory where maps should be pinned.
303 * @return 0, on success; negative error code, otherwise
304 *
305 * If `path` is NULL `bpf_map__pin` (which is being used on each map)
306 * will use the pin_path attribute of each map. In this case, maps that
307 * don't have a pin_path set will be ignored.
308 */
309LIBBPF_API int bpf_object__pin_maps(struct bpf_object *obj, const char *path);
310
311/**
312 * @brief **bpf_object__unpin_maps()** unpins each map contained within
313 * the BPF object found in the passed directory.
314 * @param obj Pointer to a valid BPF object
315 * @param path A directory where pinned maps should be searched for.
316 * @return 0, on success; negative error code, otherwise
317 *
318 * If `path` is NULL `bpf_map__unpin` (which is being used on each map)
319 * will use the pin_path attribute of each map. In this case, maps that
320 * don't have a pin_path set will be ignored.
321 */
322LIBBPF_API int bpf_object__unpin_maps(struct bpf_object *obj,
323 const char *path);
324LIBBPF_API int bpf_object__pin_programs(struct bpf_object *obj,
325 const char *path);
326LIBBPF_API int bpf_object__unpin_programs(struct bpf_object *obj,
327 const char *path);
328LIBBPF_API int bpf_object__pin(struct bpf_object *object, const char *path);
329LIBBPF_API int bpf_object__unpin(struct bpf_object *object, const char *path);
330
331LIBBPF_API const char *bpf_object__name(const struct bpf_object *obj);
332LIBBPF_API unsigned int bpf_object__kversion(const struct bpf_object *obj);
333LIBBPF_API int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version);
334
335/**
336 * @brief **bpf_object__token_fd** is an accessor for BPF token FD associated
337 * with BPF object.
338 * @param obj Pointer to a valid BPF object
339 * @return BPF token FD or -1, if it wasn't set
340 */
341LIBBPF_API int bpf_object__token_fd(const struct bpf_object *obj);
342
343struct btf;
344LIBBPF_API struct btf *bpf_object__btf(const struct bpf_object *obj);
345LIBBPF_API int bpf_object__btf_fd(const struct bpf_object *obj);
346
347LIBBPF_API struct bpf_program *
348bpf_object__find_program_by_name(const struct bpf_object *obj,
349 const char *name);
350
351LIBBPF_API int
352libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
353 enum bpf_attach_type *expected_attach_type);
354LIBBPF_API int libbpf_attach_type_by_name(const char *name,
355 enum bpf_attach_type *attach_type);
356LIBBPF_API int libbpf_find_vmlinux_btf_id(const char *name,
357 enum bpf_attach_type attach_type);
358
359/* Accessors of bpf_program */
360struct bpf_program;
361
362LIBBPF_API struct bpf_program *
363bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prog);
364
365#define bpf_object__for_each_program(pos, obj) \
366 for ((pos) = bpf_object__next_program((obj), NULL); \
367 (pos) != NULL; \
368 (pos) = bpf_object__next_program((obj), (pos)))
369
370LIBBPF_API struct bpf_program *
371bpf_object__prev_program(const struct bpf_object *obj, struct bpf_program *prog);
372
373LIBBPF_API void bpf_program__set_ifindex(struct bpf_program *prog,
374 __u32 ifindex);
375
376LIBBPF_API const char *bpf_program__name(const struct bpf_program *prog);
377LIBBPF_API const char *bpf_program__section_name(const struct bpf_program *prog);
378LIBBPF_API bool bpf_program__autoload(const struct bpf_program *prog);
379LIBBPF_API int bpf_program__set_autoload(struct bpf_program *prog, bool autoload);
380LIBBPF_API bool bpf_program__autoattach(const struct bpf_program *prog);
381LIBBPF_API void bpf_program__set_autoattach(struct bpf_program *prog, bool autoattach);
382
383struct bpf_insn;
384
385/**
386 * @brief **bpf_program__insns()** gives read-only access to BPF program's
387 * underlying BPF instructions.
388 * @param prog BPF program for which to return instructions
389 * @return a pointer to an array of BPF instructions that belong to the
390 * specified BPF program
391 *
392 * Returned pointer is always valid and not NULL. Number of `struct bpf_insn`
393 * pointed to can be fetched using **bpf_program__insn_cnt()** API.
394 *
395 * Keep in mind, libbpf can modify and append/delete BPF program's
396 * instructions as it processes BPF object file and prepares everything for
397 * uploading into the kernel. So depending on the point in BPF object
398 * lifetime, **bpf_program__insns()** can return different sets of
399 * instructions. As an example, during BPF object load phase BPF program
400 * instructions will be CO-RE-relocated, BPF subprograms instructions will be
401 * appended, ldimm64 instructions will have FDs embedded, etc. So instructions
402 * returned before **bpf_object__load()** and after it might be quite
403 * different.
404 */
405LIBBPF_API const struct bpf_insn *bpf_program__insns(const struct bpf_program *prog);
406
407/**
408 * @brief **bpf_program__set_insns()** can set BPF program's underlying
409 * BPF instructions.
410 *
411 * WARNING: This is a very advanced libbpf API and users need to know
412 * what they are doing. This should be used from prog_prepare_load_fn
413 * callback only.
414 *
415 * @param prog BPF program for which to return instructions
416 * @param new_insns a pointer to an array of BPF instructions
417 * @param new_insn_cnt number of `struct bpf_insn`'s that form
418 * specified BPF program
419 * @return 0, on success; negative error code, otherwise
420 */
421LIBBPF_API int bpf_program__set_insns(struct bpf_program *prog,
422 struct bpf_insn *new_insns, size_t new_insn_cnt);
423
424/**
425 * @brief **bpf_program__insn_cnt()** returns number of `struct bpf_insn`'s
426 * that form specified BPF program.
427 * @param prog BPF program for which to return number of BPF instructions
428 *
429 * See **bpf_program__insns()** documentation for notes on how libbpf can
430 * change instructions and their count during different phases of
431 * **bpf_object** lifetime.
432 */
433LIBBPF_API size_t bpf_program__insn_cnt(const struct bpf_program *prog);
434
435LIBBPF_API int bpf_program__fd(const struct bpf_program *prog);
436
437/**
438 * @brief **bpf_program__pin()** pins the BPF program to a file
439 * in the BPF FS specified by a path. This increments the programs
440 * reference count, allowing it to stay loaded after the process
441 * which loaded it has exited.
442 *
443 * @param prog BPF program to pin, must already be loaded
444 * @param path file path in a BPF file system
445 * @return 0, on success; negative error code, otherwise
446 */
447LIBBPF_API int bpf_program__pin(struct bpf_program *prog, const char *path);
448
449/**
450 * @brief **bpf_program__unpin()** unpins the BPF program from a file
451 * in the BPFFS specified by a path. This decrements program's in-kernel
452 * reference count.
453 *
454 * The file pinning the BPF program can also be unlinked by a different
455 * process in which case this function will return an error.
456 *
457 * @param prog BPF program to unpin
458 * @param path file path to the pin in a BPF file system
459 * @return 0, on success; negative error code, otherwise
460 */
461LIBBPF_API int bpf_program__unpin(struct bpf_program *prog, const char *path);
462LIBBPF_API void bpf_program__unload(struct bpf_program *prog);
463
464struct bpf_link;
465
466LIBBPF_API struct bpf_link *bpf_link__open(const char *path);
467LIBBPF_API int bpf_link__fd(const struct bpf_link *link);
468LIBBPF_API const char *bpf_link__pin_path(const struct bpf_link *link);
469/**
470 * @brief **bpf_link__pin()** pins the BPF link to a file
471 * in the BPF FS specified by a path. This increments the links
472 * reference count, allowing it to stay loaded after the process
473 * which loaded it has exited.
474 *
475 * @param link BPF link to pin, must already be loaded
476 * @param path file path in a BPF file system
477 * @return 0, on success; negative error code, otherwise
478 */
479
480LIBBPF_API int bpf_link__pin(struct bpf_link *link, const char *path);
481
482/**
483 * @brief **bpf_link__unpin()** unpins the BPF link from a file
484 * in the BPFFS. This decrements link's in-kernel reference count.
485 *
486 * The file pinning the BPF link can also be unlinked by a different
487 * process in which case this function will return an error.
488 *
489 * @param link BPF link to unpin
490 * @return 0, on success; negative error code, otherwise
491 */
492LIBBPF_API int bpf_link__unpin(struct bpf_link *link);
493LIBBPF_API int bpf_link__update_program(struct bpf_link *link,
494 struct bpf_program *prog);
495LIBBPF_API void bpf_link__disconnect(struct bpf_link *link);
496LIBBPF_API int bpf_link__detach(struct bpf_link *link);
497LIBBPF_API int bpf_link__destroy(struct bpf_link *link);
498
499/**
500 * @brief **bpf_program__attach()** is a generic function for attaching
501 * a BPF program based on auto-detection of program type, attach type,
502 * and extra parameters, where applicable.
503 *
504 * @param prog BPF program to attach
505 * @return Reference to the newly created BPF link; or NULL is returned on error,
506 * error code is stored in errno
507 *
508 * This is supported for:
509 * - kprobe/kretprobe (depends on SEC() definition)
510 * - uprobe/uretprobe (depends on SEC() definition)
511 * - tracepoint
512 * - raw tracepoint
513 * - tracing programs (typed raw TP/fentry/fexit/fmod_ret)
514 */
515LIBBPF_API struct bpf_link *
516bpf_program__attach(const struct bpf_program *prog);
517
518struct bpf_perf_event_opts {
519 /* size of this struct, for forward/backward compatibility */
520 size_t sz;
521 /* custom user-provided value fetchable through bpf_get_attach_cookie() */
522 __u64 bpf_cookie;
523 /* don't use BPF link when attach BPF program */
524 bool force_ioctl_attach;
525 /* don't automatically enable the event */
526 bool dont_enable;
527 size_t :0;
528};
529#define bpf_perf_event_opts__last_field dont_enable
530
531LIBBPF_API struct bpf_link *
532bpf_program__attach_perf_event(const struct bpf_program *prog, int pfd);
533
534LIBBPF_API struct bpf_link *
535bpf_program__attach_perf_event_opts(const struct bpf_program *prog, int pfd,
536 const struct bpf_perf_event_opts *opts);
537
538/**
539 * enum probe_attach_mode - the mode to attach kprobe/uprobe
540 *
541 * force libbpf to attach kprobe/uprobe in specific mode, -ENOTSUP will
542 * be returned if it is not supported by the kernel.
543 */
544enum probe_attach_mode {
545 /* attach probe in latest supported mode by kernel */
546 PROBE_ATTACH_MODE_DEFAULT = 0,
547 /* attach probe in legacy mode, using debugfs/tracefs */
548 PROBE_ATTACH_MODE_LEGACY,
549 /* create perf event with perf_event_open() syscall */
550 PROBE_ATTACH_MODE_PERF,
551 /* attach probe with BPF link */
552 PROBE_ATTACH_MODE_LINK,
553};
554
555struct bpf_kprobe_opts {
556 /* size of this struct, for forward/backward compatibility */
557 size_t sz;
558 /* custom user-provided value fetchable through bpf_get_attach_cookie() */
559 __u64 bpf_cookie;
560 /* function's offset to install kprobe to */
561 size_t offset;
562 /* kprobe is return probe */
563 bool retprobe;
564 /* kprobe attach mode */
565 enum probe_attach_mode attach_mode;
566 size_t :0;
567};
568#define bpf_kprobe_opts__last_field attach_mode
569
570LIBBPF_API struct bpf_link *
571bpf_program__attach_kprobe(const struct bpf_program *prog, bool retprobe,
572 const char *func_name);
573LIBBPF_API struct bpf_link *
574bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
575 const char *func_name,
576 const struct bpf_kprobe_opts *opts);
577
578struct bpf_kprobe_multi_opts {
579 /* size of this struct, for forward/backward compatibility */
580 size_t sz;
581 /* array of function symbols to attach */
582 const char **syms;
583 /* array of function addresses to attach */
584 const unsigned long *addrs;
585 /* array of user-provided values fetchable through bpf_get_attach_cookie */
586 const __u64 *cookies;
587 /* number of elements in syms/addrs/cookies arrays */
588 size_t cnt;
589 /* create return kprobes */
590 bool retprobe;
591 /* create session kprobes */
592 bool session;
593 /* enforce unique match */
594 bool unique_match;
595 size_t :0;
596};
597
598#define bpf_kprobe_multi_opts__last_field unique_match
599
600LIBBPF_API struct bpf_link *
601bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
602 const char *pattern,
603 const struct bpf_kprobe_multi_opts *opts);
604
605struct bpf_uprobe_multi_opts {
606 /* size of this struct, for forward/backward compatibility */
607 size_t sz;
608 /* array of function symbols to attach to */
609 const char **syms;
610 /* array of function addresses to attach to */
611 const unsigned long *offsets;
612 /* optional, array of associated ref counter offsets */
613 const unsigned long *ref_ctr_offsets;
614 /* optional, array of associated BPF cookies */
615 const __u64 *cookies;
616 /* number of elements in syms/addrs/cookies arrays */
617 size_t cnt;
618 /* create return uprobes */
619 bool retprobe;
620 /* create session kprobes */
621 bool session;
622 size_t :0;
623};
624
625#define bpf_uprobe_multi_opts__last_field session
626
627/**
628 * @brief **bpf_program__attach_uprobe_multi()** attaches a BPF program
629 * to multiple uprobes with uprobe_multi link.
630 *
631 * User can specify 2 mutually exclusive set of inputs:
632 *
633 * 1) use only path/func_pattern/pid arguments
634 *
635 * 2) use path/pid with allowed combinations of
636 * syms/offsets/ref_ctr_offsets/cookies/cnt
637 *
638 * - syms and offsets are mutually exclusive
639 * - ref_ctr_offsets and cookies are optional
640 *
641 *
642 * @param prog BPF program to attach
643 * @param pid Process ID to attach the uprobe to, 0 for self (own process),
644 * -1 for all processes
645 * @param binary_path Path to binary
646 * @param func_pattern Regular expression to specify functions to attach
647 * BPF program to
648 * @param opts Additional options (see **struct bpf_uprobe_multi_opts**)
649 * @return 0, on success; negative error code, otherwise
650 */
651LIBBPF_API struct bpf_link *
652bpf_program__attach_uprobe_multi(const struct bpf_program *prog,
653 pid_t pid,
654 const char *binary_path,
655 const char *func_pattern,
656 const struct bpf_uprobe_multi_opts *opts);
657
658struct bpf_ksyscall_opts {
659 /* size of this struct, for forward/backward compatibility */
660 size_t sz;
661 /* custom user-provided value fetchable through bpf_get_attach_cookie() */
662 __u64 bpf_cookie;
663 /* attach as return probe? */
664 bool retprobe;
665 size_t :0;
666};
667#define bpf_ksyscall_opts__last_field retprobe
668
669/**
670 * @brief **bpf_program__attach_ksyscall()** attaches a BPF program
671 * to kernel syscall handler of a specified syscall. Optionally it's possible
672 * to request to install retprobe that will be triggered at syscall exit. It's
673 * also possible to associate BPF cookie (though options).
674 *
675 * Libbpf automatically will determine correct full kernel function name,
676 * which depending on system architecture and kernel version/configuration
677 * could be of the form __<arch>_sys_<syscall> or __se_sys_<syscall>, and will
678 * attach specified program using kprobe/kretprobe mechanism.
679 *
680 * **bpf_program__attach_ksyscall()** is an API counterpart of declarative
681 * **SEC("ksyscall/<syscall>")** annotation of BPF programs.
682 *
683 * At the moment **SEC("ksyscall")** and **bpf_program__attach_ksyscall()** do
684 * not handle all the calling convention quirks for mmap(), clone() and compat
685 * syscalls. It also only attaches to "native" syscall interfaces. If host
686 * system supports compat syscalls or defines 32-bit syscalls in 64-bit
687 * kernel, such syscall interfaces won't be attached to by libbpf.
688 *
689 * These limitations may or may not change in the future. Therefore it is
690 * recommended to use SEC("kprobe") for these syscalls or if working with
691 * compat and 32-bit interfaces is required.
692 *
693 * @param prog BPF program to attach
694 * @param syscall_name Symbolic name of the syscall (e.g., "bpf")
695 * @param opts Additional options (see **struct bpf_ksyscall_opts**)
696 * @return Reference to the newly created BPF link; or NULL is returned on
697 * error, error code is stored in errno
698 */
699LIBBPF_API struct bpf_link *
700bpf_program__attach_ksyscall(const struct bpf_program *prog,
701 const char *syscall_name,
702 const struct bpf_ksyscall_opts *opts);
703
704struct bpf_uprobe_opts {
705 /* size of this struct, for forward/backward compatibility */
706 size_t sz;
707 /* offset of kernel reference counted USDT semaphore, added in
708 * a6ca88b241d5 ("trace_uprobe: support reference counter in fd-based uprobe")
709 */
710 size_t ref_ctr_offset;
711 /* custom user-provided value fetchable through bpf_get_attach_cookie() */
712 __u64 bpf_cookie;
713 /* uprobe is return probe, invoked at function return time */
714 bool retprobe;
715 /* Function name to attach to. Could be an unqualified ("abc") or library-qualified
716 * "abc@LIBXYZ" name. To specify function entry, func_name should be set while
717 * func_offset argument to bpf_prog__attach_uprobe_opts() should be 0. To trace an
718 * offset within a function, specify func_name and use func_offset argument to specify
719 * offset within the function. Shared library functions must specify the shared library
720 * binary_path.
721 */
722 const char *func_name;
723 /* uprobe attach mode */
724 enum probe_attach_mode attach_mode;
725 size_t :0;
726};
727#define bpf_uprobe_opts__last_field attach_mode
728
729/**
730 * @brief **bpf_program__attach_uprobe()** attaches a BPF program
731 * to the userspace function which is found by binary path and
732 * offset. You can optionally specify a particular process to attach
733 * to. You can also optionally attach the program to the function
734 * exit instead of entry.
735 *
736 * @param prog BPF program to attach
737 * @param retprobe Attach to function exit
738 * @param pid Process ID to attach the uprobe to, 0 for self (own process),
739 * -1 for all processes
740 * @param binary_path Path to binary that contains the function symbol
741 * @param func_offset Offset within the binary of the function symbol
742 * @return Reference to the newly created BPF link; or NULL is returned on error,
743 * error code is stored in errno
744 */
745LIBBPF_API struct bpf_link *
746bpf_program__attach_uprobe(const struct bpf_program *prog, bool retprobe,
747 pid_t pid, const char *binary_path,
748 size_t func_offset);
749
750/**
751 * @brief **bpf_program__attach_uprobe_opts()** is just like
752 * bpf_program__attach_uprobe() except with a options struct
753 * for various configurations.
754 *
755 * @param prog BPF program to attach
756 * @param pid Process ID to attach the uprobe to, 0 for self (own process),
757 * -1 for all processes
758 * @param binary_path Path to binary that contains the function symbol
759 * @param func_offset Offset within the binary of the function symbol
760 * @param opts Options for altering program attachment
761 * @return Reference to the newly created BPF link; or NULL is returned on error,
762 * error code is stored in errno
763 */
764LIBBPF_API struct bpf_link *
765bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
766 const char *binary_path, size_t func_offset,
767 const struct bpf_uprobe_opts *opts);
768
769struct bpf_usdt_opts {
770 /* size of this struct, for forward/backward compatibility */
771 size_t sz;
772 /* custom user-provided value accessible through usdt_cookie() */
773 __u64 usdt_cookie;
774 size_t :0;
775};
776#define bpf_usdt_opts__last_field usdt_cookie
777
778/**
779 * @brief **bpf_program__attach_usdt()** is just like
780 * bpf_program__attach_uprobe_opts() except it covers USDT (User-space
781 * Statically Defined Tracepoint) attachment, instead of attaching to
782 * user-space function entry or exit.
783 *
784 * @param prog BPF program to attach
785 * @param pid Process ID to attach the uprobe to, 0 for self (own process),
786 * -1 for all processes
787 * @param binary_path Path to binary that contains provided USDT probe
788 * @param usdt_provider USDT provider name
789 * @param usdt_name USDT probe name
790 * @param opts Options for altering program attachment
791 * @return Reference to the newly created BPF link; or NULL is returned on error,
792 * error code is stored in errno
793 */
794LIBBPF_API struct bpf_link *
795bpf_program__attach_usdt(const struct bpf_program *prog,
796 pid_t pid, const char *binary_path,
797 const char *usdt_provider, const char *usdt_name,
798 const struct bpf_usdt_opts *opts);
799
800struct bpf_tracepoint_opts {
801 /* size of this struct, for forward/backward compatibility */
802 size_t sz;
803 /* custom user-provided value fetchable through bpf_get_attach_cookie() */
804 __u64 bpf_cookie;
805};
806#define bpf_tracepoint_opts__last_field bpf_cookie
807
808LIBBPF_API struct bpf_link *
809bpf_program__attach_tracepoint(const struct bpf_program *prog,
810 const char *tp_category,
811 const char *tp_name);
812LIBBPF_API struct bpf_link *
813bpf_program__attach_tracepoint_opts(const struct bpf_program *prog,
814 const char *tp_category,
815 const char *tp_name,
816 const struct bpf_tracepoint_opts *opts);
817
818struct bpf_raw_tracepoint_opts {
819 size_t sz; /* size of this struct for forward/backward compatibility */
820 __u64 cookie;
821 size_t :0;
822};
823#define bpf_raw_tracepoint_opts__last_field cookie
824
825LIBBPF_API struct bpf_link *
826bpf_program__attach_raw_tracepoint(const struct bpf_program *prog,
827 const char *tp_name);
828LIBBPF_API struct bpf_link *
829bpf_program__attach_raw_tracepoint_opts(const struct bpf_program *prog,
830 const char *tp_name,
831 struct bpf_raw_tracepoint_opts *opts);
832
833struct bpf_trace_opts {
834 /* size of this struct, for forward/backward compatibility */
835 size_t sz;
836 /* custom user-provided value fetchable through bpf_get_attach_cookie() */
837 __u64 cookie;
838};
839#define bpf_trace_opts__last_field cookie
840
841LIBBPF_API struct bpf_link *
842bpf_program__attach_trace(const struct bpf_program *prog);
843LIBBPF_API struct bpf_link *
844bpf_program__attach_trace_opts(const struct bpf_program *prog, const struct bpf_trace_opts *opts);
845
846LIBBPF_API struct bpf_link *
847bpf_program__attach_lsm(const struct bpf_program *prog);
848LIBBPF_API struct bpf_link *
849bpf_program__attach_cgroup(const struct bpf_program *prog, int cgroup_fd);
850LIBBPF_API struct bpf_link *
851bpf_program__attach_netns(const struct bpf_program *prog, int netns_fd);
852LIBBPF_API struct bpf_link *
853bpf_program__attach_sockmap(const struct bpf_program *prog, int map_fd);
854LIBBPF_API struct bpf_link *
855bpf_program__attach_xdp(const struct bpf_program *prog, int ifindex);
856LIBBPF_API struct bpf_link *
857bpf_program__attach_freplace(const struct bpf_program *prog,
858 int target_fd, const char *attach_func_name);
859
860struct bpf_netfilter_opts {
861 /* size of this struct, for forward/backward compatibility */
862 size_t sz;
863
864 __u32 pf;
865 __u32 hooknum;
866 __s32 priority;
867 __u32 flags;
868};
869#define bpf_netfilter_opts__last_field flags
870
871LIBBPF_API struct bpf_link *
872bpf_program__attach_netfilter(const struct bpf_program *prog,
873 const struct bpf_netfilter_opts *opts);
874
875struct bpf_tcx_opts {
876 /* size of this struct, for forward/backward compatibility */
877 size_t sz;
878 __u32 flags;
879 __u32 relative_fd;
880 __u32 relative_id;
881 __u64 expected_revision;
882 size_t :0;
883};
884#define bpf_tcx_opts__last_field expected_revision
885
886LIBBPF_API struct bpf_link *
887bpf_program__attach_tcx(const struct bpf_program *prog, int ifindex,
888 const struct bpf_tcx_opts *opts);
889
890struct bpf_netkit_opts {
891 /* size of this struct, for forward/backward compatibility */
892 size_t sz;
893 __u32 flags;
894 __u32 relative_fd;
895 __u32 relative_id;
896 __u64 expected_revision;
897 size_t :0;
898};
899#define bpf_netkit_opts__last_field expected_revision
900
901LIBBPF_API struct bpf_link *
902bpf_program__attach_netkit(const struct bpf_program *prog, int ifindex,
903 const struct bpf_netkit_opts *opts);
904
905struct bpf_cgroup_opts {
906 /* size of this struct, for forward/backward compatibility */
907 size_t sz;
908 __u32 flags;
909 __u32 relative_fd;
910 __u32 relative_id;
911 __u64 expected_revision;
912 size_t :0;
913};
914#define bpf_cgroup_opts__last_field expected_revision
915
916LIBBPF_API struct bpf_link *
917bpf_program__attach_cgroup_opts(const struct bpf_program *prog, int cgroup_fd,
918 const struct bpf_cgroup_opts *opts);
919
920struct bpf_map;
921
922LIBBPF_API struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map);
923LIBBPF_API int bpf_link__update_map(struct bpf_link *link, const struct bpf_map *map);
924
925struct bpf_iter_attach_opts {
926 size_t sz; /* size of this struct for forward/backward compatibility */
927 union bpf_iter_link_info *link_info;
928 __u32 link_info_len;
929};
930#define bpf_iter_attach_opts__last_field link_info_len
931
932LIBBPF_API struct bpf_link *
933bpf_program__attach_iter(const struct bpf_program *prog,
934 const struct bpf_iter_attach_opts *opts);
935
936LIBBPF_API enum bpf_prog_type bpf_program__type(const struct bpf_program *prog);
937
938/**
939 * @brief **bpf_program__set_type()** sets the program
940 * type of the passed BPF program.
941 * @param prog BPF program to set the program type for
942 * @param type program type to set the BPF map to have
943 * @return error code; or 0 if no error. An error occurs
944 * if the object is already loaded.
945 *
946 * This must be called before the BPF object is loaded,
947 * otherwise it has no effect and an error is returned.
948 */
949LIBBPF_API int bpf_program__set_type(struct bpf_program *prog,
950 enum bpf_prog_type type);
951
952LIBBPF_API enum bpf_attach_type
953bpf_program__expected_attach_type(const struct bpf_program *prog);
954
955/**
956 * @brief **bpf_program__set_expected_attach_type()** sets the
957 * attach type of the passed BPF program. This is used for
958 * auto-detection of attachment when programs are loaded.
959 * @param prog BPF program to set the attach type for
960 * @param type attach type to set the BPF map to have
961 * @return error code; or 0 if no error. An error occurs
962 * if the object is already loaded.
963 *
964 * This must be called before the BPF object is loaded,
965 * otherwise it has no effect and an error is returned.
966 */
967LIBBPF_API int
968bpf_program__set_expected_attach_type(struct bpf_program *prog,
969 enum bpf_attach_type type);
970
971LIBBPF_API __u32 bpf_program__flags(const struct bpf_program *prog);
972LIBBPF_API int bpf_program__set_flags(struct bpf_program *prog, __u32 flags);
973
974/* Per-program log level and log buffer getters/setters.
975 * See bpf_object_open_opts comments regarding log_level and log_buf
976 * interactions.
977 */
978LIBBPF_API __u32 bpf_program__log_level(const struct bpf_program *prog);
979LIBBPF_API int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_level);
980LIBBPF_API const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_size);
981LIBBPF_API int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size);
982
983LIBBPF_API struct bpf_func_info *bpf_program__func_info(const struct bpf_program *prog);
984LIBBPF_API __u32 bpf_program__func_info_cnt(const struct bpf_program *prog);
985
986LIBBPF_API struct bpf_line_info *bpf_program__line_info(const struct bpf_program *prog);
987LIBBPF_API __u32 bpf_program__line_info_cnt(const struct bpf_program *prog);
988
989/**
990 * @brief **bpf_program__set_attach_target()** sets BTF-based attach target
991 * for supported BPF program types:
992 * - BTF-aware raw tracepoints (tp_btf);
993 * - fentry/fexit/fmod_ret;
994 * - lsm;
995 * - freplace.
996 * @param prog BPF program to configure; must be not yet loaded.
997 * @param attach_prog_fd FD of target BPF program (for freplace/extension).
998 * If >0 and func name omitted, defers BTF ID resolution.
999 * @param attach_func_name Target function name. Used either with
1000 * attach_prog_fd to find destination BTF type ID in that BPF program, or
1001 * alone (no attach_prog_fd) to resolve kernel (vmlinux/module) BTF ID.
1002 * Must be provided if attach_prog_fd is 0.
1003 * @return error code; or 0 if no error occurred.
1004 */
1005LIBBPF_API int
1006bpf_program__set_attach_target(struct bpf_program *prog, int attach_prog_fd,
1007 const char *attach_func_name);
1008
1009/**
1010 * @brief **bpf_object__find_map_by_name()** returns BPF map of
1011 * the given name, if it exists within the passed BPF object
1012 * @param obj BPF object
1013 * @param name name of the BPF map
1014 * @return BPF map instance, if such map exists within the BPF object;
1015 * or NULL otherwise.
1016 */
1017LIBBPF_API struct bpf_map *
1018bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name);
1019
1020LIBBPF_API int
1021bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name);
1022
1023LIBBPF_API struct bpf_map *
1024bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *map);
1025
1026#define bpf_object__for_each_map(pos, obj) \
1027 for ((pos) = bpf_object__next_map((obj), NULL); \
1028 (pos) != NULL; \
1029 (pos) = bpf_object__next_map((obj), (pos)))
1030#define bpf_map__for_each bpf_object__for_each_map
1031
1032LIBBPF_API struct bpf_map *
1033bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *map);
1034
1035/**
1036 * @brief **bpf_map__set_autocreate()** sets whether libbpf has to auto-create
1037 * BPF map during BPF object load phase.
1038 * @param map the BPF map instance
1039 * @param autocreate whether to create BPF map during BPF object load
1040 * @return 0 on success; -EBUSY if BPF object was already loaded
1041 *
1042 * **bpf_map__set_autocreate()** allows to opt-out from libbpf auto-creating
1043 * BPF map. By default, libbpf will attempt to create every single BPF map
1044 * defined in BPF object file using BPF_MAP_CREATE command of bpf() syscall
1045 * and fill in map FD in BPF instructions.
1046 *
1047 * This API allows to opt-out of this process for specific map instance. This
1048 * can be useful if host kernel doesn't support such BPF map type or used
1049 * combination of flags and user application wants to avoid creating such
1050 * a map in the first place. User is still responsible to make sure that their
1051 * BPF-side code that expects to use such missing BPF map is recognized by BPF
1052 * verifier as dead code, otherwise BPF verifier will reject such BPF program.
1053 */
1054LIBBPF_API int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate);
1055LIBBPF_API bool bpf_map__autocreate(const struct bpf_map *map);
1056
1057/**
1058 * @brief **bpf_map__set_autoattach()** sets whether libbpf has to auto-attach
1059 * map during BPF skeleton attach phase.
1060 * @param map the BPF map instance
1061 * @param autoattach whether to attach map during BPF skeleton attach phase
1062 * @return 0 on success; negative error code, otherwise
1063 */
1064LIBBPF_API int bpf_map__set_autoattach(struct bpf_map *map, bool autoattach);
1065
1066/**
1067 * @brief **bpf_map__autoattach()** returns whether BPF map is configured to
1068 * auto-attach during BPF skeleton attach phase.
1069 * @param map the BPF map instance
1070 * @return true if map is set to auto-attach during skeleton attach phase; false, otherwise
1071 */
1072LIBBPF_API bool bpf_map__autoattach(const struct bpf_map *map);
1073
1074/**
1075 * @brief **bpf_map__fd()** gets the file descriptor of the passed
1076 * BPF map
1077 * @param map the BPF map instance
1078 * @return the file descriptor; or -EINVAL in case of an error
1079 */
1080LIBBPF_API int bpf_map__fd(const struct bpf_map *map);
1081LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd);
1082/* get map name */
1083LIBBPF_API const char *bpf_map__name(const struct bpf_map *map);
1084/* get/set map type */
1085LIBBPF_API enum bpf_map_type bpf_map__type(const struct bpf_map *map);
1086LIBBPF_API int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type);
1087/* get/set map size (max_entries) */
1088LIBBPF_API __u32 bpf_map__max_entries(const struct bpf_map *map);
1089LIBBPF_API int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries);
1090/* get/set map flags */
1091LIBBPF_API __u32 bpf_map__map_flags(const struct bpf_map *map);
1092LIBBPF_API int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags);
1093/* get/set map NUMA node */
1094LIBBPF_API __u32 bpf_map__numa_node(const struct bpf_map *map);
1095LIBBPF_API int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node);
1096/* get/set map key size */
1097LIBBPF_API __u32 bpf_map__key_size(const struct bpf_map *map);
1098LIBBPF_API int bpf_map__set_key_size(struct bpf_map *map, __u32 size);
1099/* get map value size */
1100LIBBPF_API __u32 bpf_map__value_size(const struct bpf_map *map);
1101/**
1102 * @brief **bpf_map__set_value_size()** sets map value size.
1103 * @param map the BPF map instance
1104 * @param size the new value size
1105 * @return 0, on success; negative error, otherwise
1106 *
1107 * There is a special case for maps with associated memory-mapped regions, like
1108 * the global data section maps (bss, data, rodata). When this function is used
1109 * on such a map, the mapped region is resized. Afterward, an attempt is made to
1110 * adjust the corresponding BTF info. This attempt is best-effort and can only
1111 * succeed if the last variable of the data section map is an array. The array
1112 * BTF type is replaced by a new BTF array type with a different length.
1113 * Any previously existing pointers returned from bpf_map__initial_value() or
1114 * corresponding data section skeleton pointer must be reinitialized.
1115 */
1116LIBBPF_API int bpf_map__set_value_size(struct bpf_map *map, __u32 size);
1117/* get map key/value BTF type IDs */
1118LIBBPF_API __u32 bpf_map__btf_key_type_id(const struct bpf_map *map);
1119LIBBPF_API __u32 bpf_map__btf_value_type_id(const struct bpf_map *map);
1120/* get/set map if_index */
1121LIBBPF_API __u32 bpf_map__ifindex(const struct bpf_map *map);
1122LIBBPF_API int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex);
1123/* get/set map map_extra flags */
1124LIBBPF_API __u64 bpf_map__map_extra(const struct bpf_map *map);
1125LIBBPF_API int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra);
1126
1127LIBBPF_API int bpf_map__set_initial_value(struct bpf_map *map,
1128 const void *data, size_t size);
1129LIBBPF_API void *bpf_map__initial_value(const struct bpf_map *map, size_t *psize);
1130
1131/**
1132 * @brief **bpf_map__is_internal()** tells the caller whether or not the
1133 * passed map is a special map created by libbpf automatically for things like
1134 * global variables, __ksym externs, Kconfig values, etc
1135 * @param map the bpf_map
1136 * @return true, if the map is an internal map; false, otherwise
1137 */
1138LIBBPF_API bool bpf_map__is_internal(const struct bpf_map *map);
1139
1140/**
1141 * @brief **bpf_map__set_pin_path()** sets the path attribute that tells where the
1142 * BPF map should be pinned. This does not actually create the 'pin'.
1143 * @param map The bpf_map
1144 * @param path The path
1145 * @return 0, on success; negative error, otherwise
1146 */
1147LIBBPF_API int bpf_map__set_pin_path(struct bpf_map *map, const char *path);
1148
1149/**
1150 * @brief **bpf_map__pin_path()** gets the path attribute that tells where the
1151 * BPF map should be pinned.
1152 * @param map The bpf_map
1153 * @return The path string; which can be NULL
1154 */
1155LIBBPF_API const char *bpf_map__pin_path(const struct bpf_map *map);
1156
1157/**
1158 * @brief **bpf_map__is_pinned()** tells the caller whether or not the
1159 * passed map has been pinned via a 'pin' file.
1160 * @param map The bpf_map
1161 * @return true, if the map is pinned; false, otherwise
1162 */
1163LIBBPF_API bool bpf_map__is_pinned(const struct bpf_map *map);
1164
1165/**
1166 * @brief **bpf_map__pin()** creates a file that serves as a 'pin'
1167 * for the BPF map. This increments the reference count on the
1168 * BPF map which will keep the BPF map loaded even after the
1169 * userspace process which loaded it has exited.
1170 * @param map The bpf_map to pin
1171 * @param path A file path for the 'pin'
1172 * @return 0, on success; negative error, otherwise
1173 *
1174 * If `path` is NULL the maps `pin_path` attribute will be used. If this is
1175 * also NULL, an error will be returned and the map will not be pinned.
1176 */
1177LIBBPF_API int bpf_map__pin(struct bpf_map *map, const char *path);
1178
1179/**
1180 * @brief **bpf_map__unpin()** removes the file that serves as a
1181 * 'pin' for the BPF map.
1182 * @param map The bpf_map to unpin
1183 * @param path A file path for the 'pin'
1184 * @return 0, on success; negative error, otherwise
1185 *
1186 * The `path` parameter can be NULL, in which case the `pin_path`
1187 * map attribute is unpinned. If both the `path` parameter and
1188 * `pin_path` map attribute are set, they must be equal.
1189 */
1190LIBBPF_API int bpf_map__unpin(struct bpf_map *map, const char *path);
1191
1192LIBBPF_API int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd);
1193LIBBPF_API struct bpf_map *bpf_map__inner_map(struct bpf_map *map);
1194
1195/**
1196 * @brief **bpf_map__lookup_elem()** allows to lookup BPF map value
1197 * corresponding to provided key.
1198 * @param map BPF map to lookup element in
1199 * @param key pointer to memory containing bytes of the key used for lookup
1200 * @param key_sz size in bytes of key data, needs to match BPF map definition's **key_size**
1201 * @param value pointer to memory in which looked up value will be stored
1202 * @param value_sz size in byte of value data memory; it has to match BPF map
1203 * definition's **value_size**. For per-CPU BPF maps value size has to be
1204 * a product of BPF map value size and number of possible CPUs in the system
1205 * (could be fetched with **libbpf_num_possible_cpus()**). Note also that for
1206 * per-CPU values value size has to be aligned up to closest 8 bytes for
1207 * alignment reasons, so expected size is: `round_up(value_size, 8)
1208 * * libbpf_num_possible_cpus()`.
1209 * @param flags extra flags passed to kernel for this operation
1210 * @return 0, on success; negative error, otherwise
1211 *
1212 * **bpf_map__lookup_elem()** is high-level equivalent of
1213 * **bpf_map_lookup_elem()** API with added check for key and value size.
1214 */
1215LIBBPF_API int bpf_map__lookup_elem(const struct bpf_map *map,
1216 const void *key, size_t key_sz,
1217 void *value, size_t value_sz, __u64 flags);
1218
1219/**
1220 * @brief **bpf_map__update_elem()** allows to insert or update value in BPF
1221 * map that corresponds to provided key.
1222 * @param map BPF map to insert to or update element in
1223 * @param key pointer to memory containing bytes of the key
1224 * @param key_sz size in bytes of key data, needs to match BPF map definition's **key_size**
1225 * @param value pointer to memory containing bytes of the value
1226 * @param value_sz size in byte of value data memory; it has to match BPF map
1227 * definition's **value_size**. For per-CPU BPF maps value size has to be
1228 * a product of BPF map value size and number of possible CPUs in the system
1229 * (could be fetched with **libbpf_num_possible_cpus()**). Note also that for
1230 * per-CPU values value size has to be aligned up to closest 8 bytes for
1231 * alignment reasons, so expected size is: `round_up(value_size, 8)
1232 * * libbpf_num_possible_cpus()`.
1233 * @param flags extra flags passed to kernel for this operation
1234 * @return 0, on success; negative error, otherwise
1235 *
1236 * **bpf_map__update_elem()** is high-level equivalent of
1237 * **bpf_map_update_elem()** API with added check for key and value size.
1238 */
1239LIBBPF_API int bpf_map__update_elem(const struct bpf_map *map,
1240 const void *key, size_t key_sz,
1241 const void *value, size_t value_sz, __u64 flags);
1242
1243/**
1244 * @brief **bpf_map__delete_elem()** allows to delete element in BPF map that
1245 * corresponds to provided key.
1246 * @param map BPF map to delete element from
1247 * @param key pointer to memory containing bytes of the key
1248 * @param key_sz size in bytes of key data, needs to match BPF map definition's **key_size**
1249 * @param flags extra flags passed to kernel for this operation
1250 * @return 0, on success; negative error, otherwise
1251 *
1252 * **bpf_map__delete_elem()** is high-level equivalent of
1253 * **bpf_map_delete_elem()** API with added check for key size.
1254 */
1255LIBBPF_API int bpf_map__delete_elem(const struct bpf_map *map,
1256 const void *key, size_t key_sz, __u64 flags);
1257
1258/**
1259 * @brief **bpf_map__lookup_and_delete_elem()** allows to lookup BPF map value
1260 * corresponding to provided key and atomically delete it afterwards.
1261 * @param map BPF map to lookup element in
1262 * @param key pointer to memory containing bytes of the key used for lookup
1263 * @param key_sz size in bytes of key data, needs to match BPF map definition's **key_size**
1264 * @param value pointer to memory in which looked up value will be stored
1265 * @param value_sz size in byte of value data memory; it has to match BPF map
1266 * definition's **value_size**. For per-CPU BPF maps value size has to be
1267 * a product of BPF map value size and number of possible CPUs in the system
1268 * (could be fetched with **libbpf_num_possible_cpus()**). Note also that for
1269 * per-CPU values value size has to be aligned up to closest 8 bytes for
1270 * alignment reasons, so expected size is: `round_up(value_size, 8)
1271 * * libbpf_num_possible_cpus()`.
1272 * @param flags extra flags passed to kernel for this operation
1273 * @return 0, on success; negative error, otherwise
1274 *
1275 * **bpf_map__lookup_and_delete_elem()** is high-level equivalent of
1276 * **bpf_map_lookup_and_delete_elem()** API with added check for key and value size.
1277 */
1278LIBBPF_API int bpf_map__lookup_and_delete_elem(const struct bpf_map *map,
1279 const void *key, size_t key_sz,
1280 void *value, size_t value_sz, __u64 flags);
1281
1282/**
1283 * @brief **bpf_map__get_next_key()** allows to iterate BPF map keys by
1284 * fetching next key that follows current key.
1285 * @param map BPF map to fetch next key from
1286 * @param cur_key pointer to memory containing bytes of current key or NULL to
1287 * fetch the first key
1288 * @param next_key pointer to memory to write next key into
1289 * @param key_sz size in bytes of key data, needs to match BPF map definition's **key_size**
1290 * @return 0, on success; -ENOENT if **cur_key** is the last key in BPF map;
1291 * negative error, otherwise
1292 *
1293 * **bpf_map__get_next_key()** is high-level equivalent of
1294 * **bpf_map_get_next_key()** API with added check for key size.
1295 */
1296LIBBPF_API int bpf_map__get_next_key(const struct bpf_map *map,
1297 const void *cur_key, void *next_key, size_t key_sz);
1298/**
1299 * @brief **bpf_map__set_exclusive_program()** sets a map to be exclusive to the
1300 * specified program. This must be called *before* the map is created.
1301 *
1302 * @param map BPF map to make exclusive.
1303 * @param prog BPF program to be the exclusive user of the map. Must belong
1304 * to the same bpf_object as the map.
1305 * @return 0 on success; a negative error code otherwise.
1306 *
1307 * This function must be called after the BPF object is opened but before
1308 * it is loaded. Once the object is loaded, only the specified program
1309 * will be able to access the map's contents.
1310 */
1311LIBBPF_API int bpf_map__set_exclusive_program(struct bpf_map *map, struct bpf_program *prog);
1312
1313/**
1314 * @brief **bpf_map__exclusive_program()** returns the exclusive program
1315 * that is registered with the map (if any).
1316 * @param map BPF map to which the exclusive program is registered.
1317 * @return the registered exclusive program.
1318 */
1319LIBBPF_API struct bpf_program *bpf_map__exclusive_program(struct bpf_map *map);
1320
1321struct bpf_xdp_set_link_opts {
1322 size_t sz;
1323 int old_fd;
1324 size_t :0;
1325};
1326#define bpf_xdp_set_link_opts__last_field old_fd
1327
1328struct bpf_xdp_attach_opts {
1329 size_t sz;
1330 int old_prog_fd;
1331 size_t :0;
1332};
1333#define bpf_xdp_attach_opts__last_field old_prog_fd
1334
1335struct bpf_xdp_query_opts {
1336 size_t sz;
1337 __u32 prog_id; /* output */
1338 __u32 drv_prog_id; /* output */
1339 __u32 hw_prog_id; /* output */
1340 __u32 skb_prog_id; /* output */
1341 __u8 attach_mode; /* output */
1342 __u64 feature_flags; /* output */
1343 __u32 xdp_zc_max_segs; /* output */
1344 size_t :0;
1345};
1346#define bpf_xdp_query_opts__last_field xdp_zc_max_segs
1347
1348LIBBPF_API int bpf_xdp_attach(int ifindex, int prog_fd, __u32 flags,
1349 const struct bpf_xdp_attach_opts *opts);
1350LIBBPF_API int bpf_xdp_detach(int ifindex, __u32 flags,
1351 const struct bpf_xdp_attach_opts *opts);
1352LIBBPF_API int bpf_xdp_query(int ifindex, int flags, struct bpf_xdp_query_opts *opts);
1353LIBBPF_API int bpf_xdp_query_id(int ifindex, int flags, __u32 *prog_id);
1354
1355/* TC related API */
1356enum bpf_tc_attach_point {
1357 BPF_TC_INGRESS = 1 << 0,
1358 BPF_TC_EGRESS = 1 << 1,
1359 BPF_TC_CUSTOM = 1 << 2,
1360 BPF_TC_QDISC = 1 << 3,
1361};
1362
1363#define BPF_TC_PARENT(a, b) \
1364 ((((a) << 16) & 0xFFFF0000U) | ((b) & 0x0000FFFFU))
1365
1366enum bpf_tc_flags {
1367 BPF_TC_F_REPLACE = 1 << 0,
1368};
1369
1370struct bpf_tc_hook {
1371 size_t sz;
1372 int ifindex;
1373 enum bpf_tc_attach_point attach_point;
1374 __u32 parent;
1375 __u32 handle;
1376 const char *qdisc;
1377 size_t :0;
1378};
1379#define bpf_tc_hook__last_field qdisc
1380
1381struct bpf_tc_opts {
1382 size_t sz;
1383 int prog_fd;
1384 __u32 flags;
1385 __u32 prog_id;
1386 __u32 handle;
1387 __u32 priority;
1388 size_t :0;
1389};
1390#define bpf_tc_opts__last_field priority
1391
1392LIBBPF_API int bpf_tc_hook_create(struct bpf_tc_hook *hook);
1393LIBBPF_API int bpf_tc_hook_destroy(struct bpf_tc_hook *hook);
1394LIBBPF_API int bpf_tc_attach(const struct bpf_tc_hook *hook,
1395 struct bpf_tc_opts *opts);
1396LIBBPF_API int bpf_tc_detach(const struct bpf_tc_hook *hook,
1397 const struct bpf_tc_opts *opts);
1398LIBBPF_API int bpf_tc_query(const struct bpf_tc_hook *hook,
1399 struct bpf_tc_opts *opts);
1400
1401/* Ring buffer APIs */
1402struct ring_buffer;
1403struct ring;
1404struct user_ring_buffer;
1405
1406typedef int (*ring_buffer_sample_fn)(void *ctx, void *data, size_t size);
1407
1408struct ring_buffer_opts {
1409 size_t sz; /* size of this struct, for forward/backward compatibility */
1410};
1411
1412#define ring_buffer_opts__last_field sz
1413
1414LIBBPF_API struct ring_buffer *
1415ring_buffer__new(int map_fd, ring_buffer_sample_fn sample_cb, void *ctx,
1416 const struct ring_buffer_opts *opts);
1417LIBBPF_API void ring_buffer__free(struct ring_buffer *rb);
1418LIBBPF_API int ring_buffer__add(struct ring_buffer *rb, int map_fd,
1419 ring_buffer_sample_fn sample_cb, void *ctx);
1420LIBBPF_API int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms);
1421LIBBPF_API int ring_buffer__consume(struct ring_buffer *rb);
1422LIBBPF_API int ring_buffer__consume_n(struct ring_buffer *rb, size_t n);
1423LIBBPF_API int ring_buffer__epoll_fd(const struct ring_buffer *rb);
1424
1425/**
1426 * @brief **ring_buffer__ring()** returns the ringbuffer object inside a given
1427 * ringbuffer manager representing a single BPF_MAP_TYPE_RINGBUF map instance.
1428 *
1429 * @param rb A ringbuffer manager object.
1430 * @param idx An index into the ringbuffers contained within the ringbuffer
1431 * manager object. The index is 0-based and corresponds to the order in which
1432 * ring_buffer__add was called.
1433 * @return A ringbuffer object on success; NULL and errno set if the index is
1434 * invalid.
1435 */
1436LIBBPF_API struct ring *ring_buffer__ring(struct ring_buffer *rb,
1437 unsigned int idx);
1438
1439/**
1440 * @brief **ring__consumer_pos()** returns the current consumer position in the
1441 * given ringbuffer.
1442 *
1443 * @param r A ringbuffer object.
1444 * @return The current consumer position.
1445 */
1446LIBBPF_API unsigned long ring__consumer_pos(const struct ring *r);
1447
1448/**
1449 * @brief **ring__producer_pos()** returns the current producer position in the
1450 * given ringbuffer.
1451 *
1452 * @param r A ringbuffer object.
1453 * @return The current producer position.
1454 */
1455LIBBPF_API unsigned long ring__producer_pos(const struct ring *r);
1456
1457/**
1458 * @brief **ring__avail_data_size()** returns the number of bytes in the
1459 * ringbuffer not yet consumed. This has no locking associated with it, so it
1460 * can be inaccurate if operations are ongoing while this is called. However, it
1461 * should still show the correct trend over the long-term.
1462 *
1463 * @param r A ringbuffer object.
1464 * @return The number of bytes not yet consumed.
1465 */
1466LIBBPF_API size_t ring__avail_data_size(const struct ring *r);
1467
1468/**
1469 * @brief **ring__size()** returns the total size of the ringbuffer's map data
1470 * area (excluding special producer/consumer pages). Effectively this gives the
1471 * amount of usable bytes of data inside the ringbuffer.
1472 *
1473 * @param r A ringbuffer object.
1474 * @return The total size of the ringbuffer map data area.
1475 */
1476LIBBPF_API size_t ring__size(const struct ring *r);
1477
1478/**
1479 * @brief **ring__map_fd()** returns the file descriptor underlying the given
1480 * ringbuffer.
1481 *
1482 * @param r A ringbuffer object.
1483 * @return The underlying ringbuffer file descriptor
1484 */
1485LIBBPF_API int ring__map_fd(const struct ring *r);
1486
1487/**
1488 * @brief **ring__consume()** consumes available ringbuffer data without event
1489 * polling.
1490 *
1491 * @param r A ringbuffer object.
1492 * @return The number of records consumed (or INT_MAX, whichever is less), or
1493 * a negative number if any of the callbacks return an error.
1494 */
1495LIBBPF_API int ring__consume(struct ring *r);
1496
1497/**
1498 * @brief **ring__consume_n()** consumes up to a requested amount of items from
1499 * a ringbuffer without event polling.
1500 *
1501 * @param r A ringbuffer object.
1502 * @param n Maximum amount of items to consume.
1503 * @return The number of items consumed, or a negative number if any of the
1504 * callbacks return an error.
1505 */
1506LIBBPF_API int ring__consume_n(struct ring *r, size_t n);
1507
1508struct user_ring_buffer_opts {
1509 size_t sz; /* size of this struct, for forward/backward compatibility */
1510};
1511
1512#define user_ring_buffer_opts__last_field sz
1513
1514/**
1515 * @brief **user_ring_buffer__new()** creates a new instance of a user ring
1516 * buffer.
1517 *
1518 * @param map_fd A file descriptor to a BPF_MAP_TYPE_USER_RINGBUF map.
1519 * @param opts Options for how the ring buffer should be created.
1520 * @return A user ring buffer on success; NULL and errno being set on a
1521 * failure.
1522 */
1523LIBBPF_API struct user_ring_buffer *
1524user_ring_buffer__new(int map_fd, const struct user_ring_buffer_opts *opts);
1525
1526/**
1527 * @brief **user_ring_buffer__reserve()** reserves a pointer to a sample in the
1528 * user ring buffer.
1529 * @param rb A pointer to a user ring buffer.
1530 * @param size The size of the sample, in bytes.
1531 * @return A pointer to an 8-byte aligned reserved region of the user ring
1532 * buffer; NULL, and errno being set if a sample could not be reserved.
1533 *
1534 * This function is *not* thread safe, and callers must synchronize accessing
1535 * this function if there are multiple producers. If a size is requested that
1536 * is larger than the size of the entire ring buffer, errno will be set to
1537 * E2BIG and NULL is returned. If the ring buffer could accommodate the size,
1538 * but currently does not have enough space, errno is set to ENOSPC and NULL is
1539 * returned.
1540 *
1541 * After initializing the sample, callers must invoke
1542 * **user_ring_buffer__submit()** to post the sample to the kernel. Otherwise,
1543 * the sample must be freed with **user_ring_buffer__discard()**.
1544 */
1545LIBBPF_API void *user_ring_buffer__reserve(struct user_ring_buffer *rb, __u32 size);
1546
1547/**
1548 * @brief **user_ring_buffer__reserve_blocking()** reserves a record in the
1549 * ring buffer, possibly blocking for up to @timeout_ms until a sample becomes
1550 * available.
1551 * @param rb The user ring buffer.
1552 * @param size The size of the sample, in bytes.
1553 * @param timeout_ms The amount of time, in milliseconds, for which the caller
1554 * should block when waiting for a sample. -1 causes the caller to block
1555 * indefinitely.
1556 * @return A pointer to an 8-byte aligned reserved region of the user ring
1557 * buffer; NULL, and errno being set if a sample could not be reserved.
1558 *
1559 * This function is *not* thread safe, and callers must synchronize
1560 * accessing this function if there are multiple producers
1561 *
1562 * If **timeout_ms** is -1, the function will block indefinitely until a sample
1563 * becomes available. Otherwise, **timeout_ms** must be non-negative, or errno
1564 * is set to EINVAL, and NULL is returned. If **timeout_ms** is 0, no blocking
1565 * will occur and the function will return immediately after attempting to
1566 * reserve a sample.
1567 *
1568 * If **size** is larger than the size of the entire ring buffer, errno is set
1569 * to E2BIG and NULL is returned. If the ring buffer could accommodate
1570 * **size**, but currently does not have enough space, the caller will block
1571 * until at most **timeout_ms** has elapsed. If insufficient space is available
1572 * at that time, errno is set to ENOSPC, and NULL is returned.
1573 *
1574 * The kernel guarantees that it will wake up this thread to check if
1575 * sufficient space is available in the ring buffer at least once per
1576 * invocation of the **bpf_ringbuf_drain()** helper function, provided that at
1577 * least one sample is consumed, and the BPF program did not invoke the
1578 * function with BPF_RB_NO_WAKEUP. A wakeup may occur sooner than that, but the
1579 * kernel does not guarantee this. If the helper function is invoked with
1580 * BPF_RB_FORCE_WAKEUP, a wakeup event will be sent even if no sample is
1581 * consumed.
1582 *
1583 * When a sample of size **size** is found within **timeout_ms**, a pointer to
1584 * the sample is returned. After initializing the sample, callers must invoke
1585 * **user_ring_buffer__submit()** to post the sample to the ring buffer.
1586 * Otherwise, the sample must be freed with **user_ring_buffer__discard()**.
1587 */
1588LIBBPF_API void *user_ring_buffer__reserve_blocking(struct user_ring_buffer *rb,
1589 __u32 size,
1590 int timeout_ms);
1591
1592/**
1593 * @brief **user_ring_buffer__submit()** submits a previously reserved sample
1594 * into the ring buffer.
1595 * @param rb The user ring buffer.
1596 * @param sample A reserved sample.
1597 *
1598 * It is not necessary to synchronize amongst multiple producers when invoking
1599 * this function.
1600 */
1601LIBBPF_API void user_ring_buffer__submit(struct user_ring_buffer *rb, void *sample);
1602
1603/**
1604 * @brief **user_ring_buffer__discard()** discards a previously reserved sample.
1605 * @param rb The user ring buffer.
1606 * @param sample A reserved sample.
1607 *
1608 * It is not necessary to synchronize amongst multiple producers when invoking
1609 * this function.
1610 */
1611LIBBPF_API void user_ring_buffer__discard(struct user_ring_buffer *rb, void *sample);
1612
1613/**
1614 * @brief **user_ring_buffer__free()** frees a ring buffer that was previously
1615 * created with **user_ring_buffer__new()**.
1616 * @param rb The user ring buffer being freed.
1617 */
1618LIBBPF_API void user_ring_buffer__free(struct user_ring_buffer *rb);
1619
1620/* Perf buffer APIs */
1621struct perf_buffer;
1622
1623typedef void (*perf_buffer_sample_fn)(void *ctx, int cpu,
1624 void *data, __u32 size);
1625typedef void (*perf_buffer_lost_fn)(void *ctx, int cpu, __u64 cnt);
1626
1627/* common use perf buffer options */
1628struct perf_buffer_opts {
1629 size_t sz;
1630 __u32 sample_period;
1631 size_t :0;
1632};
1633#define perf_buffer_opts__last_field sample_period
1634
1635/**
1636 * @brief **perf_buffer__new()** creates BPF perfbuf manager for a specified
1637 * BPF_PERF_EVENT_ARRAY map
1638 * @param map_fd FD of BPF_PERF_EVENT_ARRAY BPF map that will be used by BPF
1639 * code to send data over to user-space
1640 * @param page_cnt number of memory pages allocated for each per-CPU buffer
1641 * @param sample_cb function called on each received data record
1642 * @param lost_cb function called when record loss has occurred
1643 * @param ctx user-provided extra context passed into *sample_cb* and *lost_cb*
1644 * @param opts optional parameters for the perf buffer, can be null
1645 * @return a new instance of struct perf_buffer on success, NULL on error with
1646 * *errno* containing an error code
1647 */
1648LIBBPF_API struct perf_buffer *
1649perf_buffer__new(int map_fd, size_t page_cnt,
1650 perf_buffer_sample_fn sample_cb, perf_buffer_lost_fn lost_cb, void *ctx,
1651 const struct perf_buffer_opts *opts);
1652
1653enum bpf_perf_event_ret {
1654 LIBBPF_PERF_EVENT_DONE = 0,
1655 LIBBPF_PERF_EVENT_ERROR = -1,
1656 LIBBPF_PERF_EVENT_CONT = -2,
1657};
1658
1659struct perf_event_header;
1660
1661typedef enum bpf_perf_event_ret
1662(*perf_buffer_event_fn)(void *ctx, int cpu, struct perf_event_header *event);
1663
1664/* raw perf buffer options, giving most power and control */
1665struct perf_buffer_raw_opts {
1666 size_t sz;
1667 long :0;
1668 long :0;
1669 /* if cpu_cnt == 0, open all on all possible CPUs (up to the number of
1670 * max_entries of given PERF_EVENT_ARRAY map)
1671 */
1672 int cpu_cnt;
1673 /* if cpu_cnt > 0, cpus is an array of CPUs to open ring buffers on */
1674 int *cpus;
1675 /* if cpu_cnt > 0, map_keys specify map keys to set per-CPU FDs for */
1676 int *map_keys;
1677};
1678#define perf_buffer_raw_opts__last_field map_keys
1679
1680struct perf_event_attr;
1681
1682LIBBPF_API struct perf_buffer *
1683perf_buffer__new_raw(int map_fd, size_t page_cnt, struct perf_event_attr *attr,
1684 perf_buffer_event_fn event_cb, void *ctx,
1685 const struct perf_buffer_raw_opts *opts);
1686
1687LIBBPF_API void perf_buffer__free(struct perf_buffer *pb);
1688LIBBPF_API int perf_buffer__epoll_fd(const struct perf_buffer *pb);
1689LIBBPF_API int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms);
1690LIBBPF_API int perf_buffer__consume(struct perf_buffer *pb);
1691LIBBPF_API int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx);
1692LIBBPF_API size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb);
1693LIBBPF_API int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx);
1694/**
1695 * @brief **perf_buffer__buffer()** returns the per-cpu raw mmap()'ed underlying
1696 * memory region of the ring buffer.
1697 * This ring buffer can be used to implement a custom events consumer.
1698 * The ring buffer starts with the *struct perf_event_mmap_page*, which
1699 * holds the ring buffer management fields, when accessing the header
1700 * structure it's important to be SMP aware.
1701 * You can refer to *perf_event_read_simple* for a simple example.
1702 * @param pb the perf buffer structure
1703 * @param buf_idx the buffer index to retrieve
1704 * @param buf (out) gets the base pointer of the mmap()'ed memory
1705 * @param buf_size (out) gets the size of the mmap()'ed region
1706 * @return 0 on success, negative error code for failure
1707 */
1708LIBBPF_API int perf_buffer__buffer(struct perf_buffer *pb, int buf_idx, void **buf,
1709 size_t *buf_size);
1710
1711struct bpf_prog_linfo;
1712struct bpf_prog_info;
1713
1714LIBBPF_API void bpf_prog_linfo__free(struct bpf_prog_linfo *prog_linfo);
1715LIBBPF_API struct bpf_prog_linfo *
1716bpf_prog_linfo__new(const struct bpf_prog_info *info);
1717LIBBPF_API const struct bpf_line_info *
1718bpf_prog_linfo__lfind_addr_func(const struct bpf_prog_linfo *prog_linfo,
1719 __u64 addr, __u32 func_idx, __u32 nr_skip);
1720LIBBPF_API const struct bpf_line_info *
1721bpf_prog_linfo__lfind(const struct bpf_prog_linfo *prog_linfo,
1722 __u32 insn_off, __u32 nr_skip);
1723
1724/*
1725 * Probe for supported system features
1726 *
1727 * Note that running many of these probes in a short amount of time can cause
1728 * the kernel to reach the maximal size of lockable memory allowed for the
1729 * user, causing subsequent probes to fail. In this case, the caller may want
1730 * to adjust that limit with setrlimit().
1731 */
1732
1733/**
1734 * @brief **libbpf_probe_bpf_prog_type()** detects if host kernel supports
1735 * BPF programs of a given type.
1736 * @param prog_type BPF program type to detect kernel support for
1737 * @param opts reserved for future extensibility, should be NULL
1738 * @return 1, if given program type is supported; 0, if given program type is
1739 * not supported; negative error code if feature detection failed or can't be
1740 * performed
1741 *
1742 * Make sure the process has required set of CAP_* permissions (or runs as
1743 * root) when performing feature checking.
1744 */
1745LIBBPF_API int libbpf_probe_bpf_prog_type(enum bpf_prog_type prog_type, const void *opts);
1746/**
1747 * @brief **libbpf_probe_bpf_map_type()** detects if host kernel supports
1748 * BPF maps of a given type.
1749 * @param map_type BPF map type to detect kernel support for
1750 * @param opts reserved for future extensibility, should be NULL
1751 * @return 1, if given map type is supported; 0, if given map type is
1752 * not supported; negative error code if feature detection failed or can't be
1753 * performed
1754 *
1755 * Make sure the process has required set of CAP_* permissions (or runs as
1756 * root) when performing feature checking.
1757 */
1758LIBBPF_API int libbpf_probe_bpf_map_type(enum bpf_map_type map_type, const void *opts);
1759/**
1760 * @brief **libbpf_probe_bpf_helper()** detects if host kernel supports the
1761 * use of a given BPF helper from specified BPF program type.
1762 * @param prog_type BPF program type used to check the support of BPF helper
1763 * @param helper_id BPF helper ID (enum bpf_func_id) to check support for
1764 * @param opts reserved for future extensibility, should be NULL
1765 * @return 1, if given combination of program type and helper is supported; 0,
1766 * if the combination is not supported; negative error code if feature
1767 * detection for provided input arguments failed or can't be performed
1768 *
1769 * Make sure the process has required set of CAP_* permissions (or runs as
1770 * root) when performing feature checking.
1771 */
1772LIBBPF_API int libbpf_probe_bpf_helper(enum bpf_prog_type prog_type,
1773 enum bpf_func_id helper_id, const void *opts);
1774
1775/**
1776 * @brief **libbpf_num_possible_cpus()** is a helper function to get the
1777 * number of possible CPUs that the host kernel supports and expects.
1778 * @return number of possible CPUs; or error code on failure
1779 *
1780 * Example usage:
1781 *
1782 * int ncpus = libbpf_num_possible_cpus();
1783 * if (ncpus < 0) {
1784 * // error handling
1785 * }
1786 * long values[ncpus];
1787 * bpf_map_lookup_elem(per_cpu_map_fd, key, values);
1788 */
1789LIBBPF_API int libbpf_num_possible_cpus(void);
1790
1791struct bpf_map_skeleton {
1792 const char *name;
1793 struct bpf_map **map;
1794 void **mmaped;
1795 struct bpf_link **link;
1796};
1797
1798struct bpf_prog_skeleton {
1799 const char *name;
1800 struct bpf_program **prog;
1801 struct bpf_link **link;
1802};
1803
1804struct bpf_object_skeleton {
1805 size_t sz; /* size of this struct, for forward/backward compatibility */
1806
1807 const char *name;
1808 const void *data;
1809 size_t data_sz;
1810
1811 struct bpf_object **obj;
1812
1813 int map_cnt;
1814 int map_skel_sz; /* sizeof(struct bpf_map_skeleton) */
1815 struct bpf_map_skeleton *maps;
1816
1817 int prog_cnt;
1818 int prog_skel_sz; /* sizeof(struct bpf_prog_skeleton) */
1819 struct bpf_prog_skeleton *progs;
1820};
1821
1822LIBBPF_API int
1823bpf_object__open_skeleton(struct bpf_object_skeleton *s,
1824 const struct bpf_object_open_opts *opts);
1825LIBBPF_API int bpf_object__load_skeleton(struct bpf_object_skeleton *s);
1826LIBBPF_API int bpf_object__attach_skeleton(struct bpf_object_skeleton *s);
1827LIBBPF_API void bpf_object__detach_skeleton(struct bpf_object_skeleton *s);
1828LIBBPF_API void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s);
1829
1830struct bpf_var_skeleton {
1831 const char *name;
1832 struct bpf_map **map;
1833 void **addr;
1834};
1835
1836struct bpf_object_subskeleton {
1837 size_t sz; /* size of this struct, for forward/backward compatibility */
1838
1839 const struct bpf_object *obj;
1840
1841 int map_cnt;
1842 int map_skel_sz; /* sizeof(struct bpf_map_skeleton) */
1843 struct bpf_map_skeleton *maps;
1844
1845 int prog_cnt;
1846 int prog_skel_sz; /* sizeof(struct bpf_prog_skeleton) */
1847 struct bpf_prog_skeleton *progs;
1848
1849 int var_cnt;
1850 int var_skel_sz; /* sizeof(struct bpf_var_skeleton) */
1851 struct bpf_var_skeleton *vars;
1852};
1853
1854LIBBPF_API int
1855bpf_object__open_subskeleton(struct bpf_object_subskeleton *s);
1856LIBBPF_API void
1857bpf_object__destroy_subskeleton(struct bpf_object_subskeleton *s);
1858
1859struct gen_loader_opts {
1860 size_t sz; /* size of this struct, for forward/backward compatibility */
1861 const char *data;
1862 const char *insns;
1863 __u32 data_sz;
1864 __u32 insns_sz;
1865 bool gen_hash;
1866};
1867
1868#define gen_loader_opts__last_field gen_hash
1869LIBBPF_API int bpf_object__gen_loader(struct bpf_object *obj,
1870 struct gen_loader_opts *opts);
1871
1872enum libbpf_tristate {
1873 TRI_NO = 0,
1874 TRI_YES = 1,
1875 TRI_MODULE = 2,
1876};
1877
1878struct bpf_linker_opts {
1879 /* size of this struct, for forward/backward compatibility */
1880 size_t sz;
1881};
1882#define bpf_linker_opts__last_field sz
1883
1884struct bpf_linker_file_opts {
1885 /* size of this struct, for forward/backward compatibility */
1886 size_t sz;
1887};
1888#define bpf_linker_file_opts__last_field sz
1889
1890struct bpf_linker;
1891
1892LIBBPF_API struct bpf_linker *bpf_linker__new(const char *filename, struct bpf_linker_opts *opts);
1893LIBBPF_API struct bpf_linker *bpf_linker__new_fd(int fd, struct bpf_linker_opts *opts);
1894LIBBPF_API int bpf_linker__add_file(struct bpf_linker *linker,
1895 const char *filename,
1896 const struct bpf_linker_file_opts *opts);
1897LIBBPF_API int bpf_linker__add_fd(struct bpf_linker *linker, int fd,
1898 const struct bpf_linker_file_opts *opts);
1899LIBBPF_API int bpf_linker__add_buf(struct bpf_linker *linker, void *buf, size_t buf_sz,
1900 const struct bpf_linker_file_opts *opts);
1901LIBBPF_API int bpf_linker__finalize(struct bpf_linker *linker);
1902LIBBPF_API void bpf_linker__free(struct bpf_linker *linker);
1903
1904/*
1905 * Custom handling of BPF program's SEC() definitions
1906 */
1907
1908struct bpf_prog_load_opts; /* defined in bpf.h */
1909
1910/* Called during bpf_object__open() for each recognized BPF program. Callback
1911 * can use various bpf_program__set_*() setters to adjust whatever properties
1912 * are necessary.
1913 */
1914typedef int (*libbpf_prog_setup_fn_t)(struct bpf_program *prog, long cookie);
1915
1916/* Called right before libbpf performs bpf_prog_load() to load BPF program
1917 * into the kernel. Callback can adjust opts as necessary.
1918 */
1919typedef int (*libbpf_prog_prepare_load_fn_t)(struct bpf_program *prog,
1920 struct bpf_prog_load_opts *opts, long cookie);
1921
1922/* Called during skeleton attach or through bpf_program__attach(). If
1923 * auto-attach is not supported, callback should return 0 and set link to
1924 * NULL (it's not considered an error during skeleton attach, but it will be
1925 * an error for bpf_program__attach() calls). On error, error should be
1926 * returned directly and link set to NULL. On success, return 0 and set link
1927 * to a valid struct bpf_link.
1928 */
1929typedef int (*libbpf_prog_attach_fn_t)(const struct bpf_program *prog, long cookie,
1930 struct bpf_link **link);
1931
1932struct libbpf_prog_handler_opts {
1933 /* size of this struct, for forward/backward compatibility */
1934 size_t sz;
1935 /* User-provided value that is passed to prog_setup_fn,
1936 * prog_prepare_load_fn, and prog_attach_fn callbacks. Allows user to
1937 * register one set of callbacks for multiple SEC() definitions and
1938 * still be able to distinguish them, if necessary. For example,
1939 * libbpf itself is using this to pass necessary flags (e.g.,
1940 * sleepable flag) to a common internal SEC() handler.
1941 */
1942 long cookie;
1943 /* BPF program initialization callback (see libbpf_prog_setup_fn_t).
1944 * Callback is optional, pass NULL if it's not necessary.
1945 */
1946 libbpf_prog_setup_fn_t prog_setup_fn;
1947 /* BPF program loading callback (see libbpf_prog_prepare_load_fn_t).
1948 * Callback is optional, pass NULL if it's not necessary.
1949 */
1950 libbpf_prog_prepare_load_fn_t prog_prepare_load_fn;
1951 /* BPF program attach callback (see libbpf_prog_attach_fn_t).
1952 * Callback is optional, pass NULL if it's not necessary.
1953 */
1954 libbpf_prog_attach_fn_t prog_attach_fn;
1955};
1956#define libbpf_prog_handler_opts__last_field prog_attach_fn
1957
1958/**
1959 * @brief **libbpf_register_prog_handler()** registers a custom BPF program
1960 * SEC() handler.
1961 * @param sec section prefix for which custom handler is registered
1962 * @param prog_type BPF program type associated with specified section
1963 * @param exp_attach_type Expected BPF attach type associated with specified section
1964 * @param opts optional cookie, callbacks, and other extra options
1965 * @return Non-negative handler ID is returned on success. This handler ID has
1966 * to be passed to *libbpf_unregister_prog_handler()* to unregister such
1967 * custom handler. Negative error code is returned on error.
1968 *
1969 * *sec* defines which SEC() definitions are handled by this custom handler
1970 * registration. *sec* can have few different forms:
1971 * - if *sec* is just a plain string (e.g., "abc"), it will match only
1972 * SEC("abc"). If BPF program specifies SEC("abc/whatever") it will result
1973 * in an error;
1974 * - if *sec* is of the form "abc/", proper SEC() form is
1975 * SEC("abc/something"), where acceptable "something" should be checked by
1976 * *prog_init_fn* callback, if there are additional restrictions;
1977 * - if *sec* is of the form "abc+", it will successfully match both
1978 * SEC("abc") and SEC("abc/whatever") forms;
1979 * - if *sec* is NULL, custom handler is registered for any BPF program that
1980 * doesn't match any of the registered (custom or libbpf's own) SEC()
1981 * handlers. There could be only one such generic custom handler registered
1982 * at any given time.
1983 *
1984 * All custom handlers (except the one with *sec* == NULL) are processed
1985 * before libbpf's own SEC() handlers. It is allowed to "override" libbpf's
1986 * SEC() handlers by registering custom ones for the same section prefix
1987 * (i.e., it's possible to have custom SEC("perf_event/LLC-load-misses")
1988 * handler).
1989 *
1990 * Note, like much of global libbpf APIs (e.g., libbpf_set_print(),
1991 * libbpf_set_strict_mode(), etc)) these APIs are not thread-safe. User needs
1992 * to ensure synchronization if there is a risk of running this API from
1993 * multiple threads simultaneously.
1994 */
1995LIBBPF_API int libbpf_register_prog_handler(const char *sec,
1996 enum bpf_prog_type prog_type,
1997 enum bpf_attach_type exp_attach_type,
1998 const struct libbpf_prog_handler_opts *opts);
1999/**
2000 * @brief *libbpf_unregister_prog_handler()* unregisters previously registered
2001 * custom BPF program SEC() handler.
2002 * @param handler_id handler ID returned by *libbpf_register_prog_handler()*
2003 * after successful registration
2004 * @return 0 on success, negative error code if handler isn't found
2005 *
2006 * Note, like much of global libbpf APIs (e.g., libbpf_set_print(),
2007 * libbpf_set_strict_mode(), etc)) these APIs are not thread-safe. User needs
2008 * to ensure synchronization if there is a risk of running this API from
2009 * multiple threads simultaneously.
2010 */
2011LIBBPF_API int libbpf_unregister_prog_handler(int handler_id);
2012
2013#ifdef __cplusplus
2014} /* extern "C" */
2015#endif
2016
2017#endif /* __LIBBPF_LIBBPF_H */