at v6.19 760 lines 22 kB view raw
1/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ 2 3/* 4 * Internal libbpf helpers. 5 * 6 * Copyright (c) 2019 Facebook 7 */ 8 9#ifndef __LIBBPF_LIBBPF_INTERNAL_H 10#define __LIBBPF_LIBBPF_INTERNAL_H 11 12#include <stdlib.h> 13#include <byteswap.h> 14#include <limits.h> 15#include <errno.h> 16#include <linux/err.h> 17#include <fcntl.h> 18#include <unistd.h> 19#include <sys/syscall.h> 20#include <libelf.h> 21#include "relo_core.h" 22 23/* Android's libc doesn't support AT_EACCESS in faccessat() implementation 24 * ([0]), and just returns -EINVAL even if file exists and is accessible. 25 * See [1] for issues caused by this. 26 * 27 * So just redefine it to 0 on Android. 28 * 29 * [0] https://android.googlesource.com/platform/bionic/+/refs/heads/android13-release/libc/bionic/faccessat.cpp#50 30 * [1] https://github.com/libbpf/libbpf-bootstrap/issues/250#issuecomment-1911324250 31 */ 32#ifdef __ANDROID__ 33#undef AT_EACCESS 34#define AT_EACCESS 0 35#endif 36 37/* make sure libbpf doesn't use kernel-only integer typedefs */ 38#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64 39 40/* prevent accidental re-addition of reallocarray() */ 41#pragma GCC poison reallocarray 42 43#include "libbpf.h" 44#include "btf.h" 45 46#ifndef EM_BPF 47#define EM_BPF 247 48#endif 49 50#ifndef R_BPF_64_64 51#define R_BPF_64_64 1 52#endif 53#ifndef R_BPF_64_ABS64 54#define R_BPF_64_ABS64 2 55#endif 56#ifndef R_BPF_64_ABS32 57#define R_BPF_64_ABS32 3 58#endif 59#ifndef R_BPF_64_32 60#define R_BPF_64_32 10 61#endif 62 63#ifndef SHT_LLVM_ADDRSIG 64#define SHT_LLVM_ADDRSIG 0x6FFF4C03 65#endif 66 67/* if libelf is old and doesn't support mmap(), fall back to read() */ 68#ifndef ELF_C_READ_MMAP 69#define ELF_C_READ_MMAP ELF_C_READ 70#endif 71 72/* Older libelf all end up in this expression, for both 32 and 64 bit */ 73#ifndef ELF64_ST_VISIBILITY 74#define ELF64_ST_VISIBILITY(o) ((o) & 0x03) 75#endif 76 77#define JUMPTABLES_SEC ".jumptables" 78 79#define BTF_INFO_ENC(kind, kind_flag, vlen) \ 80 ((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN)) 81#define BTF_TYPE_ENC(name, info, size_or_type) (name), (info), (size_or_type) 82#define BTF_INT_ENC(encoding, bits_offset, nr_bits) \ 83 ((encoding) << 24 | (bits_offset) << 16 | (nr_bits)) 84#define BTF_TYPE_INT_ENC(name, encoding, bits_offset, bits, sz) \ 85 BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_INT, 0, 0), sz), \ 86 BTF_INT_ENC(encoding, bits_offset, bits) 87#define BTF_MEMBER_ENC(name, type, bits_offset) (name), (type), (bits_offset) 88#define BTF_PARAM_ENC(name, type) (name), (type) 89#define BTF_VAR_SECINFO_ENC(type, offset, size) (type), (offset), (size) 90#define BTF_TYPE_FLOAT_ENC(name, sz) \ 91 BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_FLOAT, 0, 0), sz) 92#define BTF_TYPE_DECL_TAG_ENC(value, type, component_idx) \ 93 BTF_TYPE_ENC(value, BTF_INFO_ENC(BTF_KIND_DECL_TAG, 0, 0), type), (component_idx) 94#define BTF_TYPE_TYPE_TAG_ENC(value, type) \ 95 BTF_TYPE_ENC(value, BTF_INFO_ENC(BTF_KIND_TYPE_TAG, 0, 0), type) 96 97#ifndef likely 98#define likely(x) __builtin_expect(!!(x), 1) 99#endif 100#ifndef unlikely 101#define unlikely(x) __builtin_expect(!!(x), 0) 102#endif 103#ifndef min 104# define min(x, y) ((x) < (y) ? (x) : (y)) 105#endif 106#ifndef max 107# define max(x, y) ((x) < (y) ? (y) : (x)) 108#endif 109#ifndef offsetofend 110# define offsetofend(TYPE, FIELD) \ 111 (offsetof(TYPE, FIELD) + sizeof(((TYPE *)0)->FIELD)) 112#endif 113#ifndef __alias 114#define __alias(symbol) __attribute__((alias(#symbol))) 115#endif 116 117/* Check whether a string `str` has prefix `pfx`, regardless if `pfx` is 118 * a string literal known at compilation time or char * pointer known only at 119 * runtime. 120 */ 121#define str_has_pfx(str, pfx) \ 122 (strncmp(str, pfx, __builtin_constant_p(pfx) ? sizeof(pfx) - 1 : strlen(pfx)) == 0) 123 124/* suffix check */ 125static inline bool str_has_sfx(const char *str, const char *sfx) 126{ 127 size_t str_len = strlen(str); 128 size_t sfx_len = strlen(sfx); 129 130 if (sfx_len > str_len) 131 return false; 132 return strcmp(str + str_len - sfx_len, sfx) == 0; 133} 134 135/* Symbol versioning is different between static and shared library. 136 * Properly versioned symbols are needed for shared library, but 137 * only the symbol of the new version is needed for static library. 138 * Starting with GNU C 10, use symver attribute instead of .symver assembler 139 * directive, which works better with GCC LTO builds. 140 */ 141#if defined(SHARED) && defined(__GNUC__) && __GNUC__ >= 10 142 143#define DEFAULT_VERSION(internal_name, api_name, version) \ 144 __attribute__((symver(#api_name "@@" #version))) 145#define COMPAT_VERSION(internal_name, api_name, version) \ 146 __attribute__((symver(#api_name "@" #version))) 147 148#elif defined(SHARED) 149 150#define COMPAT_VERSION(internal_name, api_name, version) \ 151 asm(".symver " #internal_name "," #api_name "@" #version); 152#define DEFAULT_VERSION(internal_name, api_name, version) \ 153 asm(".symver " #internal_name "," #api_name "@@" #version); 154 155#else /* !SHARED */ 156 157#define COMPAT_VERSION(internal_name, api_name, version) 158#define DEFAULT_VERSION(internal_name, api_name, version) \ 159 extern typeof(internal_name) api_name \ 160 __attribute__((alias(#internal_name))); 161 162#endif 163 164extern void libbpf_print(enum libbpf_print_level level, 165 const char *format, ...) 166 __attribute__((format(printf, 2, 3))); 167 168#define __pr(level, fmt, ...) \ 169do { \ 170 libbpf_print(level, "libbpf: " fmt, ##__VA_ARGS__); \ 171} while (0) 172 173#define pr_warn(fmt, ...) __pr(LIBBPF_WARN, fmt, ##__VA_ARGS__) 174#define pr_info(fmt, ...) __pr(LIBBPF_INFO, fmt, ##__VA_ARGS__) 175#define pr_debug(fmt, ...) __pr(LIBBPF_DEBUG, fmt, ##__VA_ARGS__) 176 177/** 178 * @brief **libbpf_errstr()** returns string corresponding to numeric errno 179 * @param err negative numeric errno 180 * @return pointer to string representation of the errno, that is invalidated 181 * upon the next call. 182 */ 183const char *libbpf_errstr(int err); 184 185#define errstr(err) libbpf_errstr(err) 186 187#ifndef __has_builtin 188#define __has_builtin(x) 0 189#endif 190 191struct bpf_link { 192 int (*detach)(struct bpf_link *link); 193 void (*dealloc)(struct bpf_link *link); 194 char *pin_path; /* NULL, if not pinned */ 195 int fd; /* hook FD, -1 if not applicable */ 196 bool disconnected; 197}; 198 199/* 200 * Re-implement glibc's reallocarray() for libbpf internal-only use. 201 * reallocarray(), unfortunately, is not available in all versions of glibc, 202 * so requires extra feature detection and using reallocarray() stub from 203 * <tools/libc_compat.h> and COMPAT_NEED_REALLOCARRAY. All this complicates 204 * build of libbpf unnecessarily and is just a maintenance burden. Instead, 205 * it's trivial to implement libbpf-specific internal version and use it 206 * throughout libbpf. 207 */ 208static inline void *libbpf_reallocarray(void *ptr, size_t nmemb, size_t size) 209{ 210 size_t total; 211 212#if __has_builtin(__builtin_mul_overflow) 213 if (unlikely(__builtin_mul_overflow(nmemb, size, &total))) 214 return NULL; 215#else 216 if (size == 0 || nmemb > ULONG_MAX / size) 217 return NULL; 218 total = nmemb * size; 219#endif 220 return realloc(ptr, total); 221} 222 223/* Copy up to sz - 1 bytes from zero-terminated src string and ensure that dst 224 * is zero-terminated string no matter what (unless sz == 0, in which case 225 * it's a no-op). It's conceptually close to FreeBSD's strlcpy(), but differs 226 * in what is returned. Given this is internal helper, it's trivial to extend 227 * this, when necessary. Use this instead of strncpy inside libbpf source code. 228 */ 229static inline void libbpf_strlcpy(char *dst, const char *src, size_t sz) 230{ 231 size_t i; 232 233 if (sz == 0) 234 return; 235 236 sz--; 237 for (i = 0; i < sz && src[i]; i++) 238 dst[i] = src[i]; 239 dst[i] = '\0'; 240} 241 242__u32 get_kernel_version(void); 243 244struct btf; 245struct btf_type; 246 247struct btf_type *btf_type_by_id(const struct btf *btf, __u32 type_id); 248const char *btf_kind_str(const struct btf_type *t); 249const struct btf_type *skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id); 250const struct btf_header *btf_header(const struct btf *btf); 251void btf_set_base_btf(struct btf *btf, const struct btf *base_btf); 252int btf_relocate(struct btf *btf, const struct btf *base_btf, __u32 **id_map); 253 254static inline enum btf_func_linkage btf_func_linkage(const struct btf_type *t) 255{ 256 return (enum btf_func_linkage)(int)btf_vlen(t); 257} 258 259static inline __u32 btf_type_info(int kind, int vlen, int kflag) 260{ 261 return (kflag << 31) | (kind << 24) | vlen; 262} 263 264enum map_def_parts { 265 MAP_DEF_MAP_TYPE = 0x001, 266 MAP_DEF_KEY_TYPE = 0x002, 267 MAP_DEF_KEY_SIZE = 0x004, 268 MAP_DEF_VALUE_TYPE = 0x008, 269 MAP_DEF_VALUE_SIZE = 0x010, 270 MAP_DEF_MAX_ENTRIES = 0x020, 271 MAP_DEF_MAP_FLAGS = 0x040, 272 MAP_DEF_NUMA_NODE = 0x080, 273 MAP_DEF_PINNING = 0x100, 274 MAP_DEF_INNER_MAP = 0x200, 275 MAP_DEF_MAP_EXTRA = 0x400, 276 277 MAP_DEF_ALL = 0x7ff, /* combination of all above */ 278}; 279 280struct btf_map_def { 281 enum map_def_parts parts; 282 __u32 map_type; 283 __u32 key_type_id; 284 __u32 key_size; 285 __u32 value_type_id; 286 __u32 value_size; 287 __u32 max_entries; 288 __u32 map_flags; 289 __u32 numa_node; 290 __u32 pinning; 291 __u64 map_extra; 292}; 293 294int parse_btf_map_def(const char *map_name, struct btf *btf, 295 const struct btf_type *def_t, bool strict, 296 struct btf_map_def *map_def, struct btf_map_def *inner_def); 297 298void *libbpf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz, 299 size_t cur_cnt, size_t max_cnt, size_t add_cnt); 300int libbpf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt); 301 302static inline bool libbpf_is_mem_zeroed(const char *p, ssize_t len) 303{ 304 while (len > 0) { 305 if (*p) 306 return false; 307 p++; 308 len--; 309 } 310 return true; 311} 312 313static inline bool libbpf_validate_opts(const char *opts, 314 size_t opts_sz, size_t user_sz, 315 const char *type_name) 316{ 317 if (user_sz < sizeof(size_t)) { 318 pr_warn("%s size (%zu) is too small\n", type_name, user_sz); 319 return false; 320 } 321 if (!libbpf_is_mem_zeroed(opts + opts_sz, (ssize_t)user_sz - opts_sz)) { 322 pr_warn("%s has non-zero extra bytes\n", type_name); 323 return false; 324 } 325 return true; 326} 327 328#define OPTS_VALID(opts, type) \ 329 (!(opts) || libbpf_validate_opts((const char *)opts, \ 330 offsetofend(struct type, \ 331 type##__last_field), \ 332 (opts)->sz, #type)) 333#define OPTS_HAS(opts, field) \ 334 ((opts) && opts->sz >= offsetofend(typeof(*(opts)), field)) 335#define OPTS_GET(opts, field, fallback_value) \ 336 (OPTS_HAS(opts, field) ? (opts)->field : fallback_value) 337#define OPTS_SET(opts, field, value) \ 338 do { \ 339 if (OPTS_HAS(opts, field)) \ 340 (opts)->field = value; \ 341 } while (0) 342 343#define OPTS_ZEROED(opts, last_nonzero_field) \ 344({ \ 345 ssize_t __off = offsetofend(typeof(*(opts)), last_nonzero_field); \ 346 !(opts) || libbpf_is_mem_zeroed((const void *)opts + __off, \ 347 (opts)->sz - __off); \ 348}) 349 350enum kern_feature_id { 351 /* v4.14: kernel support for program & map names. */ 352 FEAT_PROG_NAME, 353 /* v5.2: kernel support for global data sections. */ 354 FEAT_GLOBAL_DATA, 355 /* BTF support */ 356 FEAT_BTF, 357 /* BTF_KIND_FUNC and BTF_KIND_FUNC_PROTO support */ 358 FEAT_BTF_FUNC, 359 /* BTF_KIND_VAR and BTF_KIND_DATASEC support */ 360 FEAT_BTF_DATASEC, 361 /* BTF_FUNC_GLOBAL is supported */ 362 FEAT_BTF_GLOBAL_FUNC, 363 /* BPF_F_MMAPABLE is supported for arrays */ 364 FEAT_ARRAY_MMAP, 365 /* kernel support for expected_attach_type in BPF_PROG_LOAD */ 366 FEAT_EXP_ATTACH_TYPE, 367 /* bpf_probe_read_{kernel,user}[_str] helpers */ 368 FEAT_PROBE_READ_KERN, 369 /* BPF_PROG_BIND_MAP is supported */ 370 FEAT_PROG_BIND_MAP, 371 /* Kernel support for module BTFs */ 372 FEAT_MODULE_BTF, 373 /* BTF_KIND_FLOAT support */ 374 FEAT_BTF_FLOAT, 375 /* BPF perf link support */ 376 FEAT_PERF_LINK, 377 /* BTF_KIND_DECL_TAG support */ 378 FEAT_BTF_DECL_TAG, 379 /* BTF_KIND_TYPE_TAG support */ 380 FEAT_BTF_TYPE_TAG, 381 /* memcg-based accounting for BPF maps and progs */ 382 FEAT_MEMCG_ACCOUNT, 383 /* BPF cookie (bpf_get_attach_cookie() BPF helper) support */ 384 FEAT_BPF_COOKIE, 385 /* BTF_KIND_ENUM64 support and BTF_KIND_ENUM kflag support */ 386 FEAT_BTF_ENUM64, 387 /* Kernel uses syscall wrapper (CONFIG_ARCH_HAS_SYSCALL_WRAPPER) */ 388 FEAT_SYSCALL_WRAPPER, 389 /* BPF multi-uprobe link support */ 390 FEAT_UPROBE_MULTI_LINK, 391 /* Kernel supports arg:ctx tag (__arg_ctx) for global subprogs natively */ 392 FEAT_ARG_CTX_TAG, 393 /* Kernel supports '?' at the front of datasec names */ 394 FEAT_BTF_QMARK_DATASEC, 395 __FEAT_CNT, 396}; 397 398enum kern_feature_result { 399 FEAT_UNKNOWN = 0, 400 FEAT_SUPPORTED = 1, 401 FEAT_MISSING = 2, 402}; 403 404struct kern_feature_cache { 405 enum kern_feature_result res[__FEAT_CNT]; 406 int token_fd; 407}; 408 409bool feat_supported(struct kern_feature_cache *cache, enum kern_feature_id feat_id); 410bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id); 411 412int probe_kern_syscall_wrapper(int token_fd); 413int probe_memcg_account(int token_fd); 414int bump_rlimit_memlock(void); 415 416int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz); 417int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz); 418int libbpf__load_raw_btf(const char *raw_types, size_t types_len, 419 const char *str_sec, size_t str_len, 420 int token_fd); 421int btf_load_into_kernel(struct btf *btf, 422 char *log_buf, size_t log_sz, __u32 log_level, 423 int token_fd); 424struct btf *btf_load_from_kernel(__u32 id, struct btf *base_btf, int token_fd); 425 426struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf); 427void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type, 428 const char **prefix, int *kind); 429 430struct btf_ext_info { 431 /* 432 * info points to the individual info section (e.g. func_info and 433 * line_info) from the .BTF.ext. It does not include the __u32 rec_size. 434 */ 435 void *info; 436 __u32 rec_size; 437 __u32 len; 438 /* optional (maintained internally by libbpf) mapping between .BTF.ext 439 * section and corresponding ELF section. This is used to join 440 * information like CO-RE relocation records with corresponding BPF 441 * programs defined in ELF sections 442 */ 443 __u32 *sec_idxs; 444 int sec_cnt; 445}; 446 447#define for_each_btf_ext_sec(seg, sec) \ 448 for (sec = (seg)->info; \ 449 (void *)sec < (seg)->info + (seg)->len; \ 450 sec = (void *)sec + sizeof(struct btf_ext_info_sec) + \ 451 (seg)->rec_size * sec->num_info) 452 453#define for_each_btf_ext_rec(seg, sec, i, rec) \ 454 for (i = 0, rec = (void *)&(sec)->data; \ 455 i < (sec)->num_info; \ 456 i++, rec = (void *)rec + (seg)->rec_size) 457 458/* 459 * The .BTF.ext ELF section layout defined as 460 * struct btf_ext_header 461 * func_info subsection 462 * 463 * The func_info subsection layout: 464 * record size for struct bpf_func_info in the func_info subsection 465 * struct btf_ext_info_sec for section #1 466 * a list of bpf_func_info records for section #1 467 * where struct bpf_func_info mimics one in include/uapi/linux/bpf.h 468 * but may not be identical 469 * struct btf_ext_info_sec for section #2 470 * a list of bpf_func_info records for section #2 471 * ...... 472 * 473 * Note that the bpf_func_info record size in .BTF.ext may not 474 * be the same as the one defined in include/uapi/linux/bpf.h. 475 * The loader should ensure that record_size meets minimum 476 * requirement and pass the record as is to the kernel. The 477 * kernel will handle the func_info properly based on its contents. 478 */ 479struct btf_ext_header { 480 __u16 magic; 481 __u8 version; 482 __u8 flags; 483 __u32 hdr_len; 484 485 /* All offsets are in bytes relative to the end of this header */ 486 __u32 func_info_off; 487 __u32 func_info_len; 488 __u32 line_info_off; 489 __u32 line_info_len; 490 491 /* optional part of .BTF.ext header */ 492 __u32 core_relo_off; 493 __u32 core_relo_len; 494}; 495 496struct btf_ext { 497 union { 498 struct btf_ext_header *hdr; 499 void *data; 500 }; 501 void *data_swapped; 502 bool swapped_endian; 503 struct btf_ext_info func_info; 504 struct btf_ext_info line_info; 505 struct btf_ext_info core_relo_info; 506 __u32 data_size; 507}; 508 509struct btf_ext_info_sec { 510 __u32 sec_name_off; 511 __u32 num_info; 512 /* Followed by num_info * record_size number of bytes */ 513 __u8 data[]; 514}; 515 516/* The minimum bpf_func_info checked by the loader */ 517struct bpf_func_info_min { 518 __u32 insn_off; 519 __u32 type_id; 520}; 521 522/* The minimum bpf_line_info checked by the loader */ 523struct bpf_line_info_min { 524 __u32 insn_off; 525 __u32 file_name_off; 526 __u32 line_off; 527 __u32 line_col; 528}; 529 530/* Functions to byte-swap info records */ 531 532typedef void (*info_rec_bswap_fn)(void *); 533 534static inline void bpf_func_info_bswap(struct bpf_func_info *i) 535{ 536 i->insn_off = bswap_32(i->insn_off); 537 i->type_id = bswap_32(i->type_id); 538} 539 540static inline void bpf_line_info_bswap(struct bpf_line_info *i) 541{ 542 i->insn_off = bswap_32(i->insn_off); 543 i->file_name_off = bswap_32(i->file_name_off); 544 i->line_off = bswap_32(i->line_off); 545 i->line_col = bswap_32(i->line_col); 546} 547 548static inline void bpf_core_relo_bswap(struct bpf_core_relo *i) 549{ 550 i->insn_off = bswap_32(i->insn_off); 551 i->type_id = bswap_32(i->type_id); 552 i->access_str_off = bswap_32(i->access_str_off); 553 i->kind = bswap_32(i->kind); 554} 555 556enum btf_field_iter_kind { 557 BTF_FIELD_ITER_IDS, 558 BTF_FIELD_ITER_STRS, 559}; 560 561struct btf_field_desc { 562 /* once-per-type offsets */ 563 int t_off_cnt, t_offs[2]; 564 /* member struct size, or zero, if no members */ 565 int m_sz; 566 /* repeated per-member offsets */ 567 int m_off_cnt, m_offs[1]; 568}; 569 570struct btf_field_iter { 571 struct btf_field_desc desc; 572 void *p; 573 int m_idx; 574 int off_idx; 575 int vlen; 576}; 577 578int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t, enum btf_field_iter_kind iter_kind); 579__u32 *btf_field_iter_next(struct btf_field_iter *it); 580 581typedef int (*type_id_visit_fn)(__u32 *type_id, void *ctx); 582typedef int (*str_off_visit_fn)(__u32 *str_off, void *ctx); 583int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx); 584int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx); 585__s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name, 586 __u32 kind); 587 588/* handle direct returned errors */ 589static inline int libbpf_err(int ret) 590{ 591 if (ret < 0) 592 errno = -ret; 593 return ret; 594} 595 596/* handle errno-based (e.g., syscall or libc) errors according to libbpf's 597 * strict mode settings 598 */ 599static inline int libbpf_err_errno(int ret) 600{ 601 /* errno is already assumed to be set on error */ 602 return ret < 0 ? -errno : ret; 603} 604 605/* handle error for pointer-returning APIs, err is assumed to be < 0 always */ 606static inline void *libbpf_err_ptr(int err) 607{ 608 /* set errno on error, this doesn't break anything */ 609 errno = -err; 610 return NULL; 611} 612 613/* handle pointer-returning APIs' error handling */ 614static inline void *libbpf_ptr(void *ret) 615{ 616 /* set errno on error, this doesn't break anything */ 617 if (IS_ERR(ret)) 618 errno = -PTR_ERR(ret); 619 620 return IS_ERR(ret) ? NULL : ret; 621} 622 623static inline bool str_is_empty(const char *s) 624{ 625 return !s || !s[0]; 626} 627 628static inline bool is_ldimm64_insn(struct bpf_insn *insn) 629{ 630 return insn->code == (BPF_LD | BPF_IMM | BPF_DW); 631} 632 633static inline void bpf_insn_bswap(struct bpf_insn *insn) 634{ 635 __u8 tmp_reg = insn->dst_reg; 636 637 insn->dst_reg = insn->src_reg; 638 insn->src_reg = tmp_reg; 639 insn->off = bswap_16(insn->off); 640 insn->imm = bswap_32(insn->imm); 641} 642 643/* Unconditionally dup FD, ensuring it doesn't use [0, 2] range. 644 * Original FD is not closed or altered in any other way. 645 * Preserves original FD value, if it's invalid (negative). 646 */ 647static inline int dup_good_fd(int fd) 648{ 649 if (fd < 0) 650 return fd; 651 return fcntl(fd, F_DUPFD_CLOEXEC, 3); 652} 653 654/* if fd is stdin, stdout, or stderr, dup to a fd greater than 2 655 * Takes ownership of the fd passed in, and closes it if calling 656 * fcntl(fd, F_DUPFD_CLOEXEC, 3). 657 */ 658static inline int ensure_good_fd(int fd) 659{ 660 int old_fd = fd, saved_errno; 661 662 if (fd < 0) 663 return fd; 664 if (fd < 3) { 665 fd = dup_good_fd(fd); 666 saved_errno = errno; 667 close(old_fd); 668 errno = saved_errno; 669 if (fd < 0) { 670 pr_warn("failed to dup FD %d to FD > 2: %d\n", old_fd, -saved_errno); 671 errno = saved_errno; 672 } 673 } 674 return fd; 675} 676 677static inline int sys_dup3(int oldfd, int newfd, int flags) 678{ 679 return syscall(__NR_dup3, oldfd, newfd, flags); 680} 681 682/* Some versions of Android don't provide memfd_create() in their libc 683 * implementation, so avoid complications and just go straight to Linux 684 * syscall. 685 */ 686static inline int sys_memfd_create(const char *name, unsigned flags) 687{ 688 return syscall(__NR_memfd_create, name, flags); 689} 690 691/* Point *fixed_fd* to the same file that *tmp_fd* points to. 692 * Regardless of success, *tmp_fd* is closed. 693 * Whatever *fixed_fd* pointed to is closed silently. 694 */ 695static inline int reuse_fd(int fixed_fd, int tmp_fd) 696{ 697 int err; 698 699 err = sys_dup3(tmp_fd, fixed_fd, O_CLOEXEC); 700 err = err < 0 ? -errno : 0; 701 close(tmp_fd); /* clean up temporary FD */ 702 return err; 703} 704 705/* The following two functions are exposed to bpftool */ 706int bpf_core_add_cands(struct bpf_core_cand *local_cand, 707 size_t local_essent_len, 708 const struct btf *targ_btf, 709 const char *targ_btf_name, 710 int targ_start_id, 711 struct bpf_core_cand_list *cands); 712void bpf_core_free_cands(struct bpf_core_cand_list *cands); 713 714struct usdt_manager *usdt_manager_new(struct bpf_object *obj); 715void usdt_manager_free(struct usdt_manager *man); 716struct bpf_link * usdt_manager_attach_usdt(struct usdt_manager *man, 717 const struct bpf_program *prog, 718 pid_t pid, const char *path, 719 const char *usdt_provider, const char *usdt_name, 720 __u64 usdt_cookie); 721 722static inline bool is_pow_of_2(size_t x) 723{ 724 return x && (x & (x - 1)) == 0; 725} 726 727static inline __u32 ror32(__u32 v, int bits) 728{ 729 return (v >> bits) | (v << (32 - bits)); 730} 731 732#define PROG_LOAD_ATTEMPTS 5 733int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts); 734 735bool glob_match(const char *str, const char *pat); 736 737long elf_find_func_offset(Elf *elf, const char *binary_path, const char *name); 738long elf_find_func_offset_from_file(const char *binary_path, const char *name); 739 740struct elf_fd { 741 Elf *elf; 742 int fd; 743}; 744 745int elf_open(const char *binary_path, struct elf_fd *elf_fd); 746void elf_close(struct elf_fd *elf_fd); 747 748int elf_resolve_syms_offsets(const char *binary_path, int cnt, 749 const char **syms, unsigned long **poffsets, 750 int st_type); 751int elf_resolve_pattern_offsets(const char *binary_path, const char *pattern, 752 unsigned long **poffsets, size_t *pcnt); 753 754int probe_fd(int fd); 755 756#define SHA256_DIGEST_LENGTH 32 757#define SHA256_DWORD_SIZE SHA256_DIGEST_LENGTH / sizeof(__u64) 758 759void libbpf_sha256(const void *data, size_t len, __u8 out[SHA256_DIGEST_LENGTH]); 760#endif /* __LIBBPF_LIBBPF_INTERNAL_H */