Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2
3/*
4 * Common eBPF ELF object loading operations.
5 *
6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8 * Copyright (C) 2015 Huawei Inc.
9 * Copyright (C) 2017 Nicira, Inc.
10 * Copyright (C) 2019 Isovalent, Inc.
11 */
12
13#ifndef _GNU_SOURCE
14#define _GNU_SOURCE
15#endif
16#include <stdlib.h>
17#include <stdio.h>
18#include <stdarg.h>
19#include <libgen.h>
20#include <inttypes.h>
21#include <limits.h>
22#include <string.h>
23#include <unistd.h>
24#include <endian.h>
25#include <fcntl.h>
26#include <errno.h>
27#include <ctype.h>
28#include <asm/unistd.h>
29#include <linux/err.h>
30#include <linux/kernel.h>
31#include <linux/bpf.h>
32#include <linux/btf.h>
33#include <linux/filter.h>
34#include <linux/list.h>
35#include <linux/limits.h>
36#include <linux/perf_event.h>
37#include <linux/ring_buffer.h>
38#include <linux/version.h>
39#include <sys/epoll.h>
40#include <sys/ioctl.h>
41#include <sys/mman.h>
42#include <sys/stat.h>
43#include <sys/types.h>
44#include <sys/vfs.h>
45#include <sys/utsname.h>
46#include <sys/resource.h>
47#include <libelf.h>
48#include <gelf.h>
49#include <zlib.h>
50
51#include "libbpf.h"
52#include "bpf.h"
53#include "btf.h"
54#include "str_error.h"
55#include "libbpf_internal.h"
56#include "hashmap.h"
57#include "bpf_gen_internal.h"
58
59#ifndef BPF_FS_MAGIC
60#define BPF_FS_MAGIC 0xcafe4a11
61#endif
62
63#define BPF_INSN_SZ (sizeof(struct bpf_insn))
64
65/* vsprintf() in __base_pr() uses nonliteral format string. It may break
66 * compilation if user enables corresponding warning. Disable it explicitly.
67 */
68#pragma GCC diagnostic ignored "-Wformat-nonliteral"
69
70#define __printf(a, b) __attribute__((format(printf, a, b)))
71
72static struct bpf_map *bpf_object__add_map(struct bpf_object *obj);
73static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog);
74
75static int __base_pr(enum libbpf_print_level level, const char *format,
76 va_list args)
77{
78 if (level == LIBBPF_DEBUG)
79 return 0;
80
81 return vfprintf(stderr, format, args);
82}
83
84static libbpf_print_fn_t __libbpf_pr = __base_pr;
85
86libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn)
87{
88 libbpf_print_fn_t old_print_fn = __libbpf_pr;
89
90 __libbpf_pr = fn;
91 return old_print_fn;
92}
93
94__printf(2, 3)
95void libbpf_print(enum libbpf_print_level level, const char *format, ...)
96{
97 va_list args;
98
99 if (!__libbpf_pr)
100 return;
101
102 va_start(args, format);
103 __libbpf_pr(level, format, args);
104 va_end(args);
105}
106
107static void pr_perm_msg(int err)
108{
109 struct rlimit limit;
110 char buf[100];
111
112 if (err != -EPERM || geteuid() != 0)
113 return;
114
115 err = getrlimit(RLIMIT_MEMLOCK, &limit);
116 if (err)
117 return;
118
119 if (limit.rlim_cur == RLIM_INFINITY)
120 return;
121
122 if (limit.rlim_cur < 1024)
123 snprintf(buf, sizeof(buf), "%zu bytes", (size_t)limit.rlim_cur);
124 else if (limit.rlim_cur < 1024*1024)
125 snprintf(buf, sizeof(buf), "%.1f KiB", (double)limit.rlim_cur / 1024);
126 else
127 snprintf(buf, sizeof(buf), "%.1f MiB", (double)limit.rlim_cur / (1024*1024));
128
129 pr_warn("permission error while running as root; try raising 'ulimit -l'? current value: %s\n",
130 buf);
131}
132
133#define STRERR_BUFSIZE 128
134
135/* Copied from tools/perf/util/util.h */
136#ifndef zfree
137# define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
138#endif
139
140#ifndef zclose
141# define zclose(fd) ({ \
142 int ___err = 0; \
143 if ((fd) >= 0) \
144 ___err = close((fd)); \
145 fd = -1; \
146 ___err; })
147#endif
148
149static inline __u64 ptr_to_u64(const void *ptr)
150{
151 return (__u64) (unsigned long) ptr;
152}
153
154/* this goes away in libbpf 1.0 */
155enum libbpf_strict_mode libbpf_mode = LIBBPF_STRICT_NONE;
156
157int libbpf_set_strict_mode(enum libbpf_strict_mode mode)
158{
159 /* __LIBBPF_STRICT_LAST is the last power-of-2 value used + 1, so to
160 * get all possible values we compensate last +1, and then (2*x - 1)
161 * to get the bit mask
162 */
163 if (mode != LIBBPF_STRICT_ALL
164 && (mode & ~((__LIBBPF_STRICT_LAST - 1) * 2 - 1)))
165 return errno = EINVAL, -EINVAL;
166
167 libbpf_mode = mode;
168 return 0;
169}
170
171enum kern_feature_id {
172 /* v4.14: kernel support for program & map names. */
173 FEAT_PROG_NAME,
174 /* v5.2: kernel support for global data sections. */
175 FEAT_GLOBAL_DATA,
176 /* BTF support */
177 FEAT_BTF,
178 /* BTF_KIND_FUNC and BTF_KIND_FUNC_PROTO support */
179 FEAT_BTF_FUNC,
180 /* BTF_KIND_VAR and BTF_KIND_DATASEC support */
181 FEAT_BTF_DATASEC,
182 /* BTF_FUNC_GLOBAL is supported */
183 FEAT_BTF_GLOBAL_FUNC,
184 /* BPF_F_MMAPABLE is supported for arrays */
185 FEAT_ARRAY_MMAP,
186 /* kernel support for expected_attach_type in BPF_PROG_LOAD */
187 FEAT_EXP_ATTACH_TYPE,
188 /* bpf_probe_read_{kernel,user}[_str] helpers */
189 FEAT_PROBE_READ_KERN,
190 /* BPF_PROG_BIND_MAP is supported */
191 FEAT_PROG_BIND_MAP,
192 /* Kernel support for module BTFs */
193 FEAT_MODULE_BTF,
194 /* BTF_KIND_FLOAT support */
195 FEAT_BTF_FLOAT,
196 __FEAT_CNT,
197};
198
199static bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id);
200
201enum reloc_type {
202 RELO_LD64,
203 RELO_CALL,
204 RELO_DATA,
205 RELO_EXTERN_VAR,
206 RELO_EXTERN_FUNC,
207 RELO_SUBPROG_ADDR,
208};
209
210struct reloc_desc {
211 enum reloc_type type;
212 int insn_idx;
213 int map_idx;
214 int sym_off;
215};
216
217struct bpf_sec_def;
218
219typedef struct bpf_link *(*attach_fn_t)(const struct bpf_sec_def *sec,
220 struct bpf_program *prog);
221
222struct bpf_sec_def {
223 const char *sec;
224 size_t len;
225 enum bpf_prog_type prog_type;
226 enum bpf_attach_type expected_attach_type;
227 bool is_exp_attach_type_optional;
228 bool is_attachable;
229 bool is_attach_btf;
230 bool is_sleepable;
231 attach_fn_t attach_fn;
232};
233
234/*
235 * bpf_prog should be a better name but it has been used in
236 * linux/filter.h.
237 */
238struct bpf_program {
239 const struct bpf_sec_def *sec_def;
240 char *sec_name;
241 size_t sec_idx;
242 /* this program's instruction offset (in number of instructions)
243 * within its containing ELF section
244 */
245 size_t sec_insn_off;
246 /* number of original instructions in ELF section belonging to this
247 * program, not taking into account subprogram instructions possible
248 * appended later during relocation
249 */
250 size_t sec_insn_cnt;
251 /* Offset (in number of instructions) of the start of instruction
252 * belonging to this BPF program within its containing main BPF
253 * program. For the entry-point (main) BPF program, this is always
254 * zero. For a sub-program, this gets reset before each of main BPF
255 * programs are processed and relocated and is used to determined
256 * whether sub-program was already appended to the main program, and
257 * if yes, at which instruction offset.
258 */
259 size_t sub_insn_off;
260
261 char *name;
262 /* sec_name with / replaced by _; makes recursive pinning
263 * in bpf_object__pin_programs easier
264 */
265 char *pin_name;
266
267 /* instructions that belong to BPF program; insns[0] is located at
268 * sec_insn_off instruction within its ELF section in ELF file, so
269 * when mapping ELF file instruction index to the local instruction,
270 * one needs to subtract sec_insn_off; and vice versa.
271 */
272 struct bpf_insn *insns;
273 /* actual number of instruction in this BPF program's image; for
274 * entry-point BPF programs this includes the size of main program
275 * itself plus all the used sub-programs, appended at the end
276 */
277 size_t insns_cnt;
278
279 struct reloc_desc *reloc_desc;
280 int nr_reloc;
281 int log_level;
282
283 struct {
284 int nr;
285 int *fds;
286 } instances;
287 bpf_program_prep_t preprocessor;
288
289 struct bpf_object *obj;
290 void *priv;
291 bpf_program_clear_priv_t clear_priv;
292
293 bool load;
294 bool mark_btf_static;
295 enum bpf_prog_type type;
296 enum bpf_attach_type expected_attach_type;
297 int prog_ifindex;
298 __u32 attach_btf_obj_fd;
299 __u32 attach_btf_id;
300 __u32 attach_prog_fd;
301 void *func_info;
302 __u32 func_info_rec_size;
303 __u32 func_info_cnt;
304
305 void *line_info;
306 __u32 line_info_rec_size;
307 __u32 line_info_cnt;
308 __u32 prog_flags;
309};
310
311struct bpf_struct_ops {
312 const char *tname;
313 const struct btf_type *type;
314 struct bpf_program **progs;
315 __u32 *kern_func_off;
316 /* e.g. struct tcp_congestion_ops in bpf_prog's btf format */
317 void *data;
318 /* e.g. struct bpf_struct_ops_tcp_congestion_ops in
319 * btf_vmlinux's format.
320 * struct bpf_struct_ops_tcp_congestion_ops {
321 * [... some other kernel fields ...]
322 * struct tcp_congestion_ops data;
323 * }
324 * kern_vdata-size == sizeof(struct bpf_struct_ops_tcp_congestion_ops)
325 * bpf_map__init_kern_struct_ops() will populate the "kern_vdata"
326 * from "data".
327 */
328 void *kern_vdata;
329 __u32 type_id;
330};
331
332#define DATA_SEC ".data"
333#define BSS_SEC ".bss"
334#define RODATA_SEC ".rodata"
335#define KCONFIG_SEC ".kconfig"
336#define KSYMS_SEC ".ksyms"
337#define STRUCT_OPS_SEC ".struct_ops"
338
339enum libbpf_map_type {
340 LIBBPF_MAP_UNSPEC,
341 LIBBPF_MAP_DATA,
342 LIBBPF_MAP_BSS,
343 LIBBPF_MAP_RODATA,
344 LIBBPF_MAP_KCONFIG,
345};
346
347static const char * const libbpf_type_to_btf_name[] = {
348 [LIBBPF_MAP_DATA] = DATA_SEC,
349 [LIBBPF_MAP_BSS] = BSS_SEC,
350 [LIBBPF_MAP_RODATA] = RODATA_SEC,
351 [LIBBPF_MAP_KCONFIG] = KCONFIG_SEC,
352};
353
354struct bpf_map {
355 char *name;
356 int fd;
357 int sec_idx;
358 size_t sec_offset;
359 int map_ifindex;
360 int inner_map_fd;
361 struct bpf_map_def def;
362 __u32 numa_node;
363 __u32 btf_var_idx;
364 __u32 btf_key_type_id;
365 __u32 btf_value_type_id;
366 __u32 btf_vmlinux_value_type_id;
367 void *priv;
368 bpf_map_clear_priv_t clear_priv;
369 enum libbpf_map_type libbpf_type;
370 void *mmaped;
371 struct bpf_struct_ops *st_ops;
372 struct bpf_map *inner_map;
373 void **init_slots;
374 int init_slots_sz;
375 char *pin_path;
376 bool pinned;
377 bool reused;
378};
379
380enum extern_type {
381 EXT_UNKNOWN,
382 EXT_KCFG,
383 EXT_KSYM,
384};
385
386enum kcfg_type {
387 KCFG_UNKNOWN,
388 KCFG_CHAR,
389 KCFG_BOOL,
390 KCFG_INT,
391 KCFG_TRISTATE,
392 KCFG_CHAR_ARR,
393};
394
395struct extern_desc {
396 enum extern_type type;
397 int sym_idx;
398 int btf_id;
399 int sec_btf_id;
400 const char *name;
401 bool is_set;
402 bool is_weak;
403 union {
404 struct {
405 enum kcfg_type type;
406 int sz;
407 int align;
408 int data_off;
409 bool is_signed;
410 } kcfg;
411 struct {
412 unsigned long long addr;
413
414 /* target btf_id of the corresponding kernel var. */
415 int kernel_btf_obj_fd;
416 int kernel_btf_id;
417
418 /* local btf_id of the ksym extern's type. */
419 __u32 type_id;
420 } ksym;
421 };
422};
423
424static LIST_HEAD(bpf_objects_list);
425
426struct module_btf {
427 struct btf *btf;
428 char *name;
429 __u32 id;
430 int fd;
431};
432
433struct bpf_object {
434 char name[BPF_OBJ_NAME_LEN];
435 char license[64];
436 __u32 kern_version;
437
438 struct bpf_program *programs;
439 size_t nr_programs;
440 struct bpf_map *maps;
441 size_t nr_maps;
442 size_t maps_cap;
443
444 char *kconfig;
445 struct extern_desc *externs;
446 int nr_extern;
447 int kconfig_map_idx;
448 int rodata_map_idx;
449
450 bool loaded;
451 bool has_subcalls;
452
453 struct bpf_gen *gen_loader;
454
455 /*
456 * Information when doing elf related work. Only valid if fd
457 * is valid.
458 */
459 struct {
460 int fd;
461 const void *obj_buf;
462 size_t obj_buf_sz;
463 Elf *elf;
464 GElf_Ehdr ehdr;
465 Elf_Data *symbols;
466 Elf_Data *data;
467 Elf_Data *rodata;
468 Elf_Data *bss;
469 Elf_Data *st_ops_data;
470 size_t shstrndx; /* section index for section name strings */
471 size_t strtabidx;
472 struct {
473 GElf_Shdr shdr;
474 Elf_Data *data;
475 } *reloc_sects;
476 int nr_reloc_sects;
477 int maps_shndx;
478 int btf_maps_shndx;
479 __u32 btf_maps_sec_btf_id;
480 int text_shndx;
481 int symbols_shndx;
482 int data_shndx;
483 int rodata_shndx;
484 int bss_shndx;
485 int st_ops_shndx;
486 } efile;
487 /*
488 * All loaded bpf_object is linked in a list, which is
489 * hidden to caller. bpf_objects__<func> handlers deal with
490 * all objects.
491 */
492 struct list_head list;
493
494 struct btf *btf;
495 struct btf_ext *btf_ext;
496
497 /* Parse and load BTF vmlinux if any of the programs in the object need
498 * it at load time.
499 */
500 struct btf *btf_vmlinux;
501 /* vmlinux BTF override for CO-RE relocations */
502 struct btf *btf_vmlinux_override;
503 /* Lazily initialized kernel module BTFs */
504 struct module_btf *btf_modules;
505 bool btf_modules_loaded;
506 size_t btf_module_cnt;
507 size_t btf_module_cap;
508
509 void *priv;
510 bpf_object_clear_priv_t clear_priv;
511
512 char path[];
513};
514#define obj_elf_valid(o) ((o)->efile.elf)
515
516static const char *elf_sym_str(const struct bpf_object *obj, size_t off);
517static const char *elf_sec_str(const struct bpf_object *obj, size_t off);
518static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx);
519static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name);
520static int elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn, GElf_Shdr *hdr);
521static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn);
522static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn);
523
524void bpf_program__unload(struct bpf_program *prog)
525{
526 int i;
527
528 if (!prog)
529 return;
530
531 /*
532 * If the object is opened but the program was never loaded,
533 * it is possible that prog->instances.nr == -1.
534 */
535 if (prog->instances.nr > 0) {
536 for (i = 0; i < prog->instances.nr; i++)
537 zclose(prog->instances.fds[i]);
538 } else if (prog->instances.nr != -1) {
539 pr_warn("Internal error: instances.nr is %d\n",
540 prog->instances.nr);
541 }
542
543 prog->instances.nr = -1;
544 zfree(&prog->instances.fds);
545
546 zfree(&prog->func_info);
547 zfree(&prog->line_info);
548}
549
550static void bpf_program__exit(struct bpf_program *prog)
551{
552 if (!prog)
553 return;
554
555 if (prog->clear_priv)
556 prog->clear_priv(prog, prog->priv);
557
558 prog->priv = NULL;
559 prog->clear_priv = NULL;
560
561 bpf_program__unload(prog);
562 zfree(&prog->name);
563 zfree(&prog->sec_name);
564 zfree(&prog->pin_name);
565 zfree(&prog->insns);
566 zfree(&prog->reloc_desc);
567
568 prog->nr_reloc = 0;
569 prog->insns_cnt = 0;
570 prog->sec_idx = -1;
571}
572
573static char *__bpf_program__pin_name(struct bpf_program *prog)
574{
575 char *name, *p;
576
577 name = p = strdup(prog->sec_name);
578 while ((p = strchr(p, '/')))
579 *p = '_';
580
581 return name;
582}
583
584static bool insn_is_subprog_call(const struct bpf_insn *insn)
585{
586 return BPF_CLASS(insn->code) == BPF_JMP &&
587 BPF_OP(insn->code) == BPF_CALL &&
588 BPF_SRC(insn->code) == BPF_K &&
589 insn->src_reg == BPF_PSEUDO_CALL &&
590 insn->dst_reg == 0 &&
591 insn->off == 0;
592}
593
594static bool is_ldimm64_insn(struct bpf_insn *insn)
595{
596 return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
597}
598
599static bool is_call_insn(const struct bpf_insn *insn)
600{
601 return insn->code == (BPF_JMP | BPF_CALL);
602}
603
604static bool insn_is_pseudo_func(struct bpf_insn *insn)
605{
606 return is_ldimm64_insn(insn) && insn->src_reg == BPF_PSEUDO_FUNC;
607}
608
609static int
610bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog,
611 const char *name, size_t sec_idx, const char *sec_name,
612 size_t sec_off, void *insn_data, size_t insn_data_sz)
613{
614 if (insn_data_sz == 0 || insn_data_sz % BPF_INSN_SZ || sec_off % BPF_INSN_SZ) {
615 pr_warn("sec '%s': corrupted program '%s', offset %zu, size %zu\n",
616 sec_name, name, sec_off, insn_data_sz);
617 return -EINVAL;
618 }
619
620 memset(prog, 0, sizeof(*prog));
621 prog->obj = obj;
622
623 prog->sec_idx = sec_idx;
624 prog->sec_insn_off = sec_off / BPF_INSN_SZ;
625 prog->sec_insn_cnt = insn_data_sz / BPF_INSN_SZ;
626 /* insns_cnt can later be increased by appending used subprograms */
627 prog->insns_cnt = prog->sec_insn_cnt;
628
629 prog->type = BPF_PROG_TYPE_UNSPEC;
630 prog->load = true;
631
632 prog->instances.fds = NULL;
633 prog->instances.nr = -1;
634
635 prog->sec_name = strdup(sec_name);
636 if (!prog->sec_name)
637 goto errout;
638
639 prog->name = strdup(name);
640 if (!prog->name)
641 goto errout;
642
643 prog->pin_name = __bpf_program__pin_name(prog);
644 if (!prog->pin_name)
645 goto errout;
646
647 prog->insns = malloc(insn_data_sz);
648 if (!prog->insns)
649 goto errout;
650 memcpy(prog->insns, insn_data, insn_data_sz);
651
652 return 0;
653errout:
654 pr_warn("sec '%s': failed to allocate memory for prog '%s'\n", sec_name, name);
655 bpf_program__exit(prog);
656 return -ENOMEM;
657}
658
659static int
660bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
661 const char *sec_name, int sec_idx)
662{
663 Elf_Data *symbols = obj->efile.symbols;
664 struct bpf_program *prog, *progs;
665 void *data = sec_data->d_buf;
666 size_t sec_sz = sec_data->d_size, sec_off, prog_sz, nr_syms;
667 int nr_progs, err, i;
668 const char *name;
669 GElf_Sym sym;
670
671 progs = obj->programs;
672 nr_progs = obj->nr_programs;
673 nr_syms = symbols->d_size / sizeof(GElf_Sym);
674 sec_off = 0;
675
676 for (i = 0; i < nr_syms; i++) {
677 if (!gelf_getsym(symbols, i, &sym))
678 continue;
679 if (sym.st_shndx != sec_idx)
680 continue;
681 if (GELF_ST_TYPE(sym.st_info) != STT_FUNC)
682 continue;
683
684 prog_sz = sym.st_size;
685 sec_off = sym.st_value;
686
687 name = elf_sym_str(obj, sym.st_name);
688 if (!name) {
689 pr_warn("sec '%s': failed to get symbol name for offset %zu\n",
690 sec_name, sec_off);
691 return -LIBBPF_ERRNO__FORMAT;
692 }
693
694 if (sec_off + prog_sz > sec_sz) {
695 pr_warn("sec '%s': program at offset %zu crosses section boundary\n",
696 sec_name, sec_off);
697 return -LIBBPF_ERRNO__FORMAT;
698 }
699
700 if (sec_idx != obj->efile.text_shndx && GELF_ST_BIND(sym.st_info) == STB_LOCAL) {
701 pr_warn("sec '%s': program '%s' is static and not supported\n", sec_name, name);
702 return -ENOTSUP;
703 }
704
705 pr_debug("sec '%s': found program '%s' at insn offset %zu (%zu bytes), code size %zu insns (%zu bytes)\n",
706 sec_name, name, sec_off / BPF_INSN_SZ, sec_off, prog_sz / BPF_INSN_SZ, prog_sz);
707
708 progs = libbpf_reallocarray(progs, nr_progs + 1, sizeof(*progs));
709 if (!progs) {
710 /*
711 * In this case the original obj->programs
712 * is still valid, so don't need special treat for
713 * bpf_close_object().
714 */
715 pr_warn("sec '%s': failed to alloc memory for new program '%s'\n",
716 sec_name, name);
717 return -ENOMEM;
718 }
719 obj->programs = progs;
720
721 prog = &progs[nr_progs];
722
723 err = bpf_object__init_prog(obj, prog, name, sec_idx, sec_name,
724 sec_off, data + sec_off, prog_sz);
725 if (err)
726 return err;
727
728 /* if function is a global/weak symbol, but has restricted
729 * (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF FUNC
730 * as static to enable more permissive BPF verification mode
731 * with more outside context available to BPF verifier
732 */
733 if (GELF_ST_BIND(sym.st_info) != STB_LOCAL
734 && (GELF_ST_VISIBILITY(sym.st_other) == STV_HIDDEN
735 || GELF_ST_VISIBILITY(sym.st_other) == STV_INTERNAL))
736 prog->mark_btf_static = true;
737
738 nr_progs++;
739 obj->nr_programs = nr_progs;
740 }
741
742 return 0;
743}
744
745static __u32 get_kernel_version(void)
746{
747 __u32 major, minor, patch;
748 struct utsname info;
749
750 uname(&info);
751 if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3)
752 return 0;
753 return KERNEL_VERSION(major, minor, patch);
754}
755
756static const struct btf_member *
757find_member_by_offset(const struct btf_type *t, __u32 bit_offset)
758{
759 struct btf_member *m;
760 int i;
761
762 for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
763 if (btf_member_bit_offset(t, i) == bit_offset)
764 return m;
765 }
766
767 return NULL;
768}
769
770static const struct btf_member *
771find_member_by_name(const struct btf *btf, const struct btf_type *t,
772 const char *name)
773{
774 struct btf_member *m;
775 int i;
776
777 for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
778 if (!strcmp(btf__name_by_offset(btf, m->name_off), name))
779 return m;
780 }
781
782 return NULL;
783}
784
785#define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_"
786static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
787 const char *name, __u32 kind);
788
789static int
790find_struct_ops_kern_types(const struct btf *btf, const char *tname,
791 const struct btf_type **type, __u32 *type_id,
792 const struct btf_type **vtype, __u32 *vtype_id,
793 const struct btf_member **data_member)
794{
795 const struct btf_type *kern_type, *kern_vtype;
796 const struct btf_member *kern_data_member;
797 __s32 kern_vtype_id, kern_type_id;
798 __u32 i;
799
800 kern_type_id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT);
801 if (kern_type_id < 0) {
802 pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n",
803 tname);
804 return kern_type_id;
805 }
806 kern_type = btf__type_by_id(btf, kern_type_id);
807
808 /* Find the corresponding "map_value" type that will be used
809 * in map_update(BPF_MAP_TYPE_STRUCT_OPS). For example,
810 * find "struct bpf_struct_ops_tcp_congestion_ops" from the
811 * btf_vmlinux.
812 */
813 kern_vtype_id = find_btf_by_prefix_kind(btf, STRUCT_OPS_VALUE_PREFIX,
814 tname, BTF_KIND_STRUCT);
815 if (kern_vtype_id < 0) {
816 pr_warn("struct_ops init_kern: struct %s%s is not found in kernel BTF\n",
817 STRUCT_OPS_VALUE_PREFIX, tname);
818 return kern_vtype_id;
819 }
820 kern_vtype = btf__type_by_id(btf, kern_vtype_id);
821
822 /* Find "struct tcp_congestion_ops" from
823 * struct bpf_struct_ops_tcp_congestion_ops {
824 * [ ... ]
825 * struct tcp_congestion_ops data;
826 * }
827 */
828 kern_data_member = btf_members(kern_vtype);
829 for (i = 0; i < btf_vlen(kern_vtype); i++, kern_data_member++) {
830 if (kern_data_member->type == kern_type_id)
831 break;
832 }
833 if (i == btf_vlen(kern_vtype)) {
834 pr_warn("struct_ops init_kern: struct %s data is not found in struct %s%s\n",
835 tname, STRUCT_OPS_VALUE_PREFIX, tname);
836 return -EINVAL;
837 }
838
839 *type = kern_type;
840 *type_id = kern_type_id;
841 *vtype = kern_vtype;
842 *vtype_id = kern_vtype_id;
843 *data_member = kern_data_member;
844
845 return 0;
846}
847
848static bool bpf_map__is_struct_ops(const struct bpf_map *map)
849{
850 return map->def.type == BPF_MAP_TYPE_STRUCT_OPS;
851}
852
853/* Init the map's fields that depend on kern_btf */
854static int bpf_map__init_kern_struct_ops(struct bpf_map *map,
855 const struct btf *btf,
856 const struct btf *kern_btf)
857{
858 const struct btf_member *member, *kern_member, *kern_data_member;
859 const struct btf_type *type, *kern_type, *kern_vtype;
860 __u32 i, kern_type_id, kern_vtype_id, kern_data_off;
861 struct bpf_struct_ops *st_ops;
862 void *data, *kern_data;
863 const char *tname;
864 int err;
865
866 st_ops = map->st_ops;
867 type = st_ops->type;
868 tname = st_ops->tname;
869 err = find_struct_ops_kern_types(kern_btf, tname,
870 &kern_type, &kern_type_id,
871 &kern_vtype, &kern_vtype_id,
872 &kern_data_member);
873 if (err)
874 return err;
875
876 pr_debug("struct_ops init_kern %s: type_id:%u kern_type_id:%u kern_vtype_id:%u\n",
877 map->name, st_ops->type_id, kern_type_id, kern_vtype_id);
878
879 map->def.value_size = kern_vtype->size;
880 map->btf_vmlinux_value_type_id = kern_vtype_id;
881
882 st_ops->kern_vdata = calloc(1, kern_vtype->size);
883 if (!st_ops->kern_vdata)
884 return -ENOMEM;
885
886 data = st_ops->data;
887 kern_data_off = kern_data_member->offset / 8;
888 kern_data = st_ops->kern_vdata + kern_data_off;
889
890 member = btf_members(type);
891 for (i = 0; i < btf_vlen(type); i++, member++) {
892 const struct btf_type *mtype, *kern_mtype;
893 __u32 mtype_id, kern_mtype_id;
894 void *mdata, *kern_mdata;
895 __s64 msize, kern_msize;
896 __u32 moff, kern_moff;
897 __u32 kern_member_idx;
898 const char *mname;
899
900 mname = btf__name_by_offset(btf, member->name_off);
901 kern_member = find_member_by_name(kern_btf, kern_type, mname);
902 if (!kern_member) {
903 pr_warn("struct_ops init_kern %s: Cannot find member %s in kernel BTF\n",
904 map->name, mname);
905 return -ENOTSUP;
906 }
907
908 kern_member_idx = kern_member - btf_members(kern_type);
909 if (btf_member_bitfield_size(type, i) ||
910 btf_member_bitfield_size(kern_type, kern_member_idx)) {
911 pr_warn("struct_ops init_kern %s: bitfield %s is not supported\n",
912 map->name, mname);
913 return -ENOTSUP;
914 }
915
916 moff = member->offset / 8;
917 kern_moff = kern_member->offset / 8;
918
919 mdata = data + moff;
920 kern_mdata = kern_data + kern_moff;
921
922 mtype = skip_mods_and_typedefs(btf, member->type, &mtype_id);
923 kern_mtype = skip_mods_and_typedefs(kern_btf, kern_member->type,
924 &kern_mtype_id);
925 if (BTF_INFO_KIND(mtype->info) !=
926 BTF_INFO_KIND(kern_mtype->info)) {
927 pr_warn("struct_ops init_kern %s: Unmatched member type %s %u != %u(kernel)\n",
928 map->name, mname, BTF_INFO_KIND(mtype->info),
929 BTF_INFO_KIND(kern_mtype->info));
930 return -ENOTSUP;
931 }
932
933 if (btf_is_ptr(mtype)) {
934 struct bpf_program *prog;
935
936 prog = st_ops->progs[i];
937 if (!prog)
938 continue;
939
940 kern_mtype = skip_mods_and_typedefs(kern_btf,
941 kern_mtype->type,
942 &kern_mtype_id);
943
944 /* mtype->type must be a func_proto which was
945 * guaranteed in bpf_object__collect_st_ops_relos(),
946 * so only check kern_mtype for func_proto here.
947 */
948 if (!btf_is_func_proto(kern_mtype)) {
949 pr_warn("struct_ops init_kern %s: kernel member %s is not a func ptr\n",
950 map->name, mname);
951 return -ENOTSUP;
952 }
953
954 prog->attach_btf_id = kern_type_id;
955 prog->expected_attach_type = kern_member_idx;
956
957 st_ops->kern_func_off[i] = kern_data_off + kern_moff;
958
959 pr_debug("struct_ops init_kern %s: func ptr %s is set to prog %s from data(+%u) to kern_data(+%u)\n",
960 map->name, mname, prog->name, moff,
961 kern_moff);
962
963 continue;
964 }
965
966 msize = btf__resolve_size(btf, mtype_id);
967 kern_msize = btf__resolve_size(kern_btf, kern_mtype_id);
968 if (msize < 0 || kern_msize < 0 || msize != kern_msize) {
969 pr_warn("struct_ops init_kern %s: Error in size of member %s: %zd != %zd(kernel)\n",
970 map->name, mname, (ssize_t)msize,
971 (ssize_t)kern_msize);
972 return -ENOTSUP;
973 }
974
975 pr_debug("struct_ops init_kern %s: copy %s %u bytes from data(+%u) to kern_data(+%u)\n",
976 map->name, mname, (unsigned int)msize,
977 moff, kern_moff);
978 memcpy(kern_mdata, mdata, msize);
979 }
980
981 return 0;
982}
983
984static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
985{
986 struct bpf_map *map;
987 size_t i;
988 int err;
989
990 for (i = 0; i < obj->nr_maps; i++) {
991 map = &obj->maps[i];
992
993 if (!bpf_map__is_struct_ops(map))
994 continue;
995
996 err = bpf_map__init_kern_struct_ops(map, obj->btf,
997 obj->btf_vmlinux);
998 if (err)
999 return err;
1000 }
1001
1002 return 0;
1003}
1004
1005static int bpf_object__init_struct_ops_maps(struct bpf_object *obj)
1006{
1007 const struct btf_type *type, *datasec;
1008 const struct btf_var_secinfo *vsi;
1009 struct bpf_struct_ops *st_ops;
1010 const char *tname, *var_name;
1011 __s32 type_id, datasec_id;
1012 const struct btf *btf;
1013 struct bpf_map *map;
1014 __u32 i;
1015
1016 if (obj->efile.st_ops_shndx == -1)
1017 return 0;
1018
1019 btf = obj->btf;
1020 datasec_id = btf__find_by_name_kind(btf, STRUCT_OPS_SEC,
1021 BTF_KIND_DATASEC);
1022 if (datasec_id < 0) {
1023 pr_warn("struct_ops init: DATASEC %s not found\n",
1024 STRUCT_OPS_SEC);
1025 return -EINVAL;
1026 }
1027
1028 datasec = btf__type_by_id(btf, datasec_id);
1029 vsi = btf_var_secinfos(datasec);
1030 for (i = 0; i < btf_vlen(datasec); i++, vsi++) {
1031 type = btf__type_by_id(obj->btf, vsi->type);
1032 var_name = btf__name_by_offset(obj->btf, type->name_off);
1033
1034 type_id = btf__resolve_type(obj->btf, vsi->type);
1035 if (type_id < 0) {
1036 pr_warn("struct_ops init: Cannot resolve var type_id %u in DATASEC %s\n",
1037 vsi->type, STRUCT_OPS_SEC);
1038 return -EINVAL;
1039 }
1040
1041 type = btf__type_by_id(obj->btf, type_id);
1042 tname = btf__name_by_offset(obj->btf, type->name_off);
1043 if (!tname[0]) {
1044 pr_warn("struct_ops init: anonymous type is not supported\n");
1045 return -ENOTSUP;
1046 }
1047 if (!btf_is_struct(type)) {
1048 pr_warn("struct_ops init: %s is not a struct\n", tname);
1049 return -EINVAL;
1050 }
1051
1052 map = bpf_object__add_map(obj);
1053 if (IS_ERR(map))
1054 return PTR_ERR(map);
1055
1056 map->sec_idx = obj->efile.st_ops_shndx;
1057 map->sec_offset = vsi->offset;
1058 map->name = strdup(var_name);
1059 if (!map->name)
1060 return -ENOMEM;
1061
1062 map->def.type = BPF_MAP_TYPE_STRUCT_OPS;
1063 map->def.key_size = sizeof(int);
1064 map->def.value_size = type->size;
1065 map->def.max_entries = 1;
1066
1067 map->st_ops = calloc(1, sizeof(*map->st_ops));
1068 if (!map->st_ops)
1069 return -ENOMEM;
1070 st_ops = map->st_ops;
1071 st_ops->data = malloc(type->size);
1072 st_ops->progs = calloc(btf_vlen(type), sizeof(*st_ops->progs));
1073 st_ops->kern_func_off = malloc(btf_vlen(type) *
1074 sizeof(*st_ops->kern_func_off));
1075 if (!st_ops->data || !st_ops->progs || !st_ops->kern_func_off)
1076 return -ENOMEM;
1077
1078 if (vsi->offset + type->size > obj->efile.st_ops_data->d_size) {
1079 pr_warn("struct_ops init: var %s is beyond the end of DATASEC %s\n",
1080 var_name, STRUCT_OPS_SEC);
1081 return -EINVAL;
1082 }
1083
1084 memcpy(st_ops->data,
1085 obj->efile.st_ops_data->d_buf + vsi->offset,
1086 type->size);
1087 st_ops->tname = tname;
1088 st_ops->type = type;
1089 st_ops->type_id = type_id;
1090
1091 pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n",
1092 tname, type_id, var_name, vsi->offset);
1093 }
1094
1095 return 0;
1096}
1097
1098static struct bpf_object *bpf_object__new(const char *path,
1099 const void *obj_buf,
1100 size_t obj_buf_sz,
1101 const char *obj_name)
1102{
1103 struct bpf_object *obj;
1104 char *end;
1105
1106 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
1107 if (!obj) {
1108 pr_warn("alloc memory failed for %s\n", path);
1109 return ERR_PTR(-ENOMEM);
1110 }
1111
1112 strcpy(obj->path, path);
1113 if (obj_name) {
1114 strncpy(obj->name, obj_name, sizeof(obj->name) - 1);
1115 obj->name[sizeof(obj->name) - 1] = 0;
1116 } else {
1117 /* Using basename() GNU version which doesn't modify arg. */
1118 strncpy(obj->name, basename((void *)path),
1119 sizeof(obj->name) - 1);
1120 end = strchr(obj->name, '.');
1121 if (end)
1122 *end = 0;
1123 }
1124
1125 obj->efile.fd = -1;
1126 /*
1127 * Caller of this function should also call
1128 * bpf_object__elf_finish() after data collection to return
1129 * obj_buf to user. If not, we should duplicate the buffer to
1130 * avoid user freeing them before elf finish.
1131 */
1132 obj->efile.obj_buf = obj_buf;
1133 obj->efile.obj_buf_sz = obj_buf_sz;
1134 obj->efile.maps_shndx = -1;
1135 obj->efile.btf_maps_shndx = -1;
1136 obj->efile.data_shndx = -1;
1137 obj->efile.rodata_shndx = -1;
1138 obj->efile.bss_shndx = -1;
1139 obj->efile.st_ops_shndx = -1;
1140 obj->kconfig_map_idx = -1;
1141 obj->rodata_map_idx = -1;
1142
1143 obj->kern_version = get_kernel_version();
1144 obj->loaded = false;
1145
1146 INIT_LIST_HEAD(&obj->list);
1147 list_add(&obj->list, &bpf_objects_list);
1148 return obj;
1149}
1150
1151static void bpf_object__elf_finish(struct bpf_object *obj)
1152{
1153 if (!obj_elf_valid(obj))
1154 return;
1155
1156 if (obj->efile.elf) {
1157 elf_end(obj->efile.elf);
1158 obj->efile.elf = NULL;
1159 }
1160 obj->efile.symbols = NULL;
1161 obj->efile.data = NULL;
1162 obj->efile.rodata = NULL;
1163 obj->efile.bss = NULL;
1164 obj->efile.st_ops_data = NULL;
1165
1166 zfree(&obj->efile.reloc_sects);
1167 obj->efile.nr_reloc_sects = 0;
1168 zclose(obj->efile.fd);
1169 obj->efile.obj_buf = NULL;
1170 obj->efile.obj_buf_sz = 0;
1171}
1172
1173static int bpf_object__elf_init(struct bpf_object *obj)
1174{
1175 int err = 0;
1176 GElf_Ehdr *ep;
1177
1178 if (obj_elf_valid(obj)) {
1179 pr_warn("elf: init internal error\n");
1180 return -LIBBPF_ERRNO__LIBELF;
1181 }
1182
1183 if (obj->efile.obj_buf_sz > 0) {
1184 /*
1185 * obj_buf should have been validated by
1186 * bpf_object__open_buffer().
1187 */
1188 obj->efile.elf = elf_memory((char *)obj->efile.obj_buf,
1189 obj->efile.obj_buf_sz);
1190 } else {
1191 obj->efile.fd = open(obj->path, O_RDONLY);
1192 if (obj->efile.fd < 0) {
1193 char errmsg[STRERR_BUFSIZE], *cp;
1194
1195 err = -errno;
1196 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
1197 pr_warn("elf: failed to open %s: %s\n", obj->path, cp);
1198 return err;
1199 }
1200
1201 obj->efile.elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL);
1202 }
1203
1204 if (!obj->efile.elf) {
1205 pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1));
1206 err = -LIBBPF_ERRNO__LIBELF;
1207 goto errout;
1208 }
1209
1210 if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
1211 pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1));
1212 err = -LIBBPF_ERRNO__FORMAT;
1213 goto errout;
1214 }
1215 ep = &obj->efile.ehdr;
1216
1217 if (elf_getshdrstrndx(obj->efile.elf, &obj->efile.shstrndx)) {
1218 pr_warn("elf: failed to get section names section index for %s: %s\n",
1219 obj->path, elf_errmsg(-1));
1220 err = -LIBBPF_ERRNO__FORMAT;
1221 goto errout;
1222 }
1223
1224 /* Elf is corrupted/truncated, avoid calling elf_strptr. */
1225 if (!elf_rawdata(elf_getscn(obj->efile.elf, obj->efile.shstrndx), NULL)) {
1226 pr_warn("elf: failed to get section names strings from %s: %s\n",
1227 obj->path, elf_errmsg(-1));
1228 err = -LIBBPF_ERRNO__FORMAT;
1229 goto errout;
1230 }
1231
1232 /* Old LLVM set e_machine to EM_NONE */
1233 if (ep->e_type != ET_REL ||
1234 (ep->e_machine && ep->e_machine != EM_BPF)) {
1235 pr_warn("elf: %s is not a valid eBPF object file\n", obj->path);
1236 err = -LIBBPF_ERRNO__FORMAT;
1237 goto errout;
1238 }
1239
1240 return 0;
1241errout:
1242 bpf_object__elf_finish(obj);
1243 return err;
1244}
1245
1246static int bpf_object__check_endianness(struct bpf_object *obj)
1247{
1248#if __BYTE_ORDER == __LITTLE_ENDIAN
1249 if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
1250 return 0;
1251#elif __BYTE_ORDER == __BIG_ENDIAN
1252 if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2MSB)
1253 return 0;
1254#else
1255# error "Unrecognized __BYTE_ORDER__"
1256#endif
1257 pr_warn("elf: endianness mismatch in %s.\n", obj->path);
1258 return -LIBBPF_ERRNO__ENDIAN;
1259}
1260
1261static int
1262bpf_object__init_license(struct bpf_object *obj, void *data, size_t size)
1263{
1264 memcpy(obj->license, data, min(size, sizeof(obj->license) - 1));
1265 pr_debug("license of %s is %s\n", obj->path, obj->license);
1266 return 0;
1267}
1268
1269static int
1270bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
1271{
1272 __u32 kver;
1273
1274 if (size != sizeof(kver)) {
1275 pr_warn("invalid kver section in %s\n", obj->path);
1276 return -LIBBPF_ERRNO__FORMAT;
1277 }
1278 memcpy(&kver, data, sizeof(kver));
1279 obj->kern_version = kver;
1280 pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version);
1281 return 0;
1282}
1283
1284static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
1285{
1286 if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
1287 type == BPF_MAP_TYPE_HASH_OF_MAPS)
1288 return true;
1289 return false;
1290}
1291
1292int bpf_object__section_size(const struct bpf_object *obj, const char *name,
1293 __u32 *size)
1294{
1295 int ret = -ENOENT;
1296
1297 *size = 0;
1298 if (!name) {
1299 return -EINVAL;
1300 } else if (!strcmp(name, DATA_SEC)) {
1301 if (obj->efile.data)
1302 *size = obj->efile.data->d_size;
1303 } else if (!strcmp(name, BSS_SEC)) {
1304 if (obj->efile.bss)
1305 *size = obj->efile.bss->d_size;
1306 } else if (!strcmp(name, RODATA_SEC)) {
1307 if (obj->efile.rodata)
1308 *size = obj->efile.rodata->d_size;
1309 } else if (!strcmp(name, STRUCT_OPS_SEC)) {
1310 if (obj->efile.st_ops_data)
1311 *size = obj->efile.st_ops_data->d_size;
1312 } else {
1313 Elf_Scn *scn = elf_sec_by_name(obj, name);
1314 Elf_Data *data = elf_sec_data(obj, scn);
1315
1316 if (data) {
1317 ret = 0; /* found it */
1318 *size = data->d_size;
1319 }
1320 }
1321
1322 return *size ? 0 : ret;
1323}
1324
1325int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
1326 __u32 *off)
1327{
1328 Elf_Data *symbols = obj->efile.symbols;
1329 const char *sname;
1330 size_t si;
1331
1332 if (!name || !off)
1333 return -EINVAL;
1334
1335 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym); si++) {
1336 GElf_Sym sym;
1337
1338 if (!gelf_getsym(symbols, si, &sym))
1339 continue;
1340 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
1341 GELF_ST_TYPE(sym.st_info) != STT_OBJECT)
1342 continue;
1343
1344 sname = elf_sym_str(obj, sym.st_name);
1345 if (!sname) {
1346 pr_warn("failed to get sym name string for var %s\n",
1347 name);
1348 return -EIO;
1349 }
1350 if (strcmp(name, sname) == 0) {
1351 *off = sym.st_value;
1352 return 0;
1353 }
1354 }
1355
1356 return -ENOENT;
1357}
1358
1359static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
1360{
1361 struct bpf_map *new_maps;
1362 size_t new_cap;
1363 int i;
1364
1365 if (obj->nr_maps < obj->maps_cap)
1366 return &obj->maps[obj->nr_maps++];
1367
1368 new_cap = max((size_t)4, obj->maps_cap * 3 / 2);
1369 new_maps = libbpf_reallocarray(obj->maps, new_cap, sizeof(*obj->maps));
1370 if (!new_maps) {
1371 pr_warn("alloc maps for object failed\n");
1372 return ERR_PTR(-ENOMEM);
1373 }
1374
1375 obj->maps_cap = new_cap;
1376 obj->maps = new_maps;
1377
1378 /* zero out new maps */
1379 memset(obj->maps + obj->nr_maps, 0,
1380 (obj->maps_cap - obj->nr_maps) * sizeof(*obj->maps));
1381 /*
1382 * fill all fd with -1 so won't close incorrect fd (fd=0 is stdin)
1383 * when failure (zclose won't close negative fd)).
1384 */
1385 for (i = obj->nr_maps; i < obj->maps_cap; i++) {
1386 obj->maps[i].fd = -1;
1387 obj->maps[i].inner_map_fd = -1;
1388 }
1389
1390 return &obj->maps[obj->nr_maps++];
1391}
1392
1393static size_t bpf_map_mmap_sz(const struct bpf_map *map)
1394{
1395 long page_sz = sysconf(_SC_PAGE_SIZE);
1396 size_t map_sz;
1397
1398 map_sz = (size_t)roundup(map->def.value_size, 8) * map->def.max_entries;
1399 map_sz = roundup(map_sz, page_sz);
1400 return map_sz;
1401}
1402
1403static char *internal_map_name(struct bpf_object *obj,
1404 enum libbpf_map_type type)
1405{
1406 char map_name[BPF_OBJ_NAME_LEN], *p;
1407 const char *sfx = libbpf_type_to_btf_name[type];
1408 int sfx_len = max((size_t)7, strlen(sfx));
1409 int pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1,
1410 strlen(obj->name));
1411
1412 snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name,
1413 sfx_len, libbpf_type_to_btf_name[type]);
1414
1415 /* sanitise map name to characters allowed by kernel */
1416 for (p = map_name; *p && p < map_name + sizeof(map_name); p++)
1417 if (!isalnum(*p) && *p != '_' && *p != '.')
1418 *p = '_';
1419
1420 return strdup(map_name);
1421}
1422
1423static int
1424bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
1425 int sec_idx, void *data, size_t data_sz)
1426{
1427 struct bpf_map_def *def;
1428 struct bpf_map *map;
1429 int err;
1430
1431 map = bpf_object__add_map(obj);
1432 if (IS_ERR(map))
1433 return PTR_ERR(map);
1434
1435 map->libbpf_type = type;
1436 map->sec_idx = sec_idx;
1437 map->sec_offset = 0;
1438 map->name = internal_map_name(obj, type);
1439 if (!map->name) {
1440 pr_warn("failed to alloc map name\n");
1441 return -ENOMEM;
1442 }
1443
1444 def = &map->def;
1445 def->type = BPF_MAP_TYPE_ARRAY;
1446 def->key_size = sizeof(int);
1447 def->value_size = data_sz;
1448 def->max_entries = 1;
1449 def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG
1450 ? BPF_F_RDONLY_PROG : 0;
1451 def->map_flags |= BPF_F_MMAPABLE;
1452
1453 pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n",
1454 map->name, map->sec_idx, map->sec_offset, def->map_flags);
1455
1456 map->mmaped = mmap(NULL, bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE,
1457 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1458 if (map->mmaped == MAP_FAILED) {
1459 err = -errno;
1460 map->mmaped = NULL;
1461 pr_warn("failed to alloc map '%s' content buffer: %d\n",
1462 map->name, err);
1463 zfree(&map->name);
1464 return err;
1465 }
1466
1467 if (data)
1468 memcpy(map->mmaped, data, data_sz);
1469
1470 pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name);
1471 return 0;
1472}
1473
1474static int bpf_object__init_global_data_maps(struct bpf_object *obj)
1475{
1476 int err;
1477
1478 /*
1479 * Populate obj->maps with libbpf internal maps.
1480 */
1481 if (obj->efile.data_shndx >= 0) {
1482 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA,
1483 obj->efile.data_shndx,
1484 obj->efile.data->d_buf,
1485 obj->efile.data->d_size);
1486 if (err)
1487 return err;
1488 }
1489 if (obj->efile.rodata_shndx >= 0) {
1490 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA,
1491 obj->efile.rodata_shndx,
1492 obj->efile.rodata->d_buf,
1493 obj->efile.rodata->d_size);
1494 if (err)
1495 return err;
1496
1497 obj->rodata_map_idx = obj->nr_maps - 1;
1498 }
1499 if (obj->efile.bss_shndx >= 0) {
1500 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
1501 obj->efile.bss_shndx,
1502 NULL,
1503 obj->efile.bss->d_size);
1504 if (err)
1505 return err;
1506 }
1507 return 0;
1508}
1509
1510
1511static struct extern_desc *find_extern_by_name(const struct bpf_object *obj,
1512 const void *name)
1513{
1514 int i;
1515
1516 for (i = 0; i < obj->nr_extern; i++) {
1517 if (strcmp(obj->externs[i].name, name) == 0)
1518 return &obj->externs[i];
1519 }
1520 return NULL;
1521}
1522
1523static int set_kcfg_value_tri(struct extern_desc *ext, void *ext_val,
1524 char value)
1525{
1526 switch (ext->kcfg.type) {
1527 case KCFG_BOOL:
1528 if (value == 'm') {
1529 pr_warn("extern (kcfg) %s=%c should be tristate or char\n",
1530 ext->name, value);
1531 return -EINVAL;
1532 }
1533 *(bool *)ext_val = value == 'y' ? true : false;
1534 break;
1535 case KCFG_TRISTATE:
1536 if (value == 'y')
1537 *(enum libbpf_tristate *)ext_val = TRI_YES;
1538 else if (value == 'm')
1539 *(enum libbpf_tristate *)ext_val = TRI_MODULE;
1540 else /* value == 'n' */
1541 *(enum libbpf_tristate *)ext_val = TRI_NO;
1542 break;
1543 case KCFG_CHAR:
1544 *(char *)ext_val = value;
1545 break;
1546 case KCFG_UNKNOWN:
1547 case KCFG_INT:
1548 case KCFG_CHAR_ARR:
1549 default:
1550 pr_warn("extern (kcfg) %s=%c should be bool, tristate, or char\n",
1551 ext->name, value);
1552 return -EINVAL;
1553 }
1554 ext->is_set = true;
1555 return 0;
1556}
1557
1558static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val,
1559 const char *value)
1560{
1561 size_t len;
1562
1563 if (ext->kcfg.type != KCFG_CHAR_ARR) {
1564 pr_warn("extern (kcfg) %s=%s should be char array\n", ext->name, value);
1565 return -EINVAL;
1566 }
1567
1568 len = strlen(value);
1569 if (value[len - 1] != '"') {
1570 pr_warn("extern (kcfg) '%s': invalid string config '%s'\n",
1571 ext->name, value);
1572 return -EINVAL;
1573 }
1574
1575 /* strip quotes */
1576 len -= 2;
1577 if (len >= ext->kcfg.sz) {
1578 pr_warn("extern (kcfg) '%s': long string config %s of (%zu bytes) truncated to %d bytes\n",
1579 ext->name, value, len, ext->kcfg.sz - 1);
1580 len = ext->kcfg.sz - 1;
1581 }
1582 memcpy(ext_val, value + 1, len);
1583 ext_val[len] = '\0';
1584 ext->is_set = true;
1585 return 0;
1586}
1587
1588static int parse_u64(const char *value, __u64 *res)
1589{
1590 char *value_end;
1591 int err;
1592
1593 errno = 0;
1594 *res = strtoull(value, &value_end, 0);
1595 if (errno) {
1596 err = -errno;
1597 pr_warn("failed to parse '%s' as integer: %d\n", value, err);
1598 return err;
1599 }
1600 if (*value_end) {
1601 pr_warn("failed to parse '%s' as integer completely\n", value);
1602 return -EINVAL;
1603 }
1604 return 0;
1605}
1606
1607static bool is_kcfg_value_in_range(const struct extern_desc *ext, __u64 v)
1608{
1609 int bit_sz = ext->kcfg.sz * 8;
1610
1611 if (ext->kcfg.sz == 8)
1612 return true;
1613
1614 /* Validate that value stored in u64 fits in integer of `ext->sz`
1615 * bytes size without any loss of information. If the target integer
1616 * is signed, we rely on the following limits of integer type of
1617 * Y bits and subsequent transformation:
1618 *
1619 * -2^(Y-1) <= X <= 2^(Y-1) - 1
1620 * 0 <= X + 2^(Y-1) <= 2^Y - 1
1621 * 0 <= X + 2^(Y-1) < 2^Y
1622 *
1623 * For unsigned target integer, check that all the (64 - Y) bits are
1624 * zero.
1625 */
1626 if (ext->kcfg.is_signed)
1627 return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz);
1628 else
1629 return (v >> bit_sz) == 0;
1630}
1631
1632static int set_kcfg_value_num(struct extern_desc *ext, void *ext_val,
1633 __u64 value)
1634{
1635 if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR) {
1636 pr_warn("extern (kcfg) %s=%llu should be integer\n",
1637 ext->name, (unsigned long long)value);
1638 return -EINVAL;
1639 }
1640 if (!is_kcfg_value_in_range(ext, value)) {
1641 pr_warn("extern (kcfg) %s=%llu value doesn't fit in %d bytes\n",
1642 ext->name, (unsigned long long)value, ext->kcfg.sz);
1643 return -ERANGE;
1644 }
1645 switch (ext->kcfg.sz) {
1646 case 1: *(__u8 *)ext_val = value; break;
1647 case 2: *(__u16 *)ext_val = value; break;
1648 case 4: *(__u32 *)ext_val = value; break;
1649 case 8: *(__u64 *)ext_val = value; break;
1650 default:
1651 return -EINVAL;
1652 }
1653 ext->is_set = true;
1654 return 0;
1655}
1656
1657static int bpf_object__process_kconfig_line(struct bpf_object *obj,
1658 char *buf, void *data)
1659{
1660 struct extern_desc *ext;
1661 char *sep, *value;
1662 int len, err = 0;
1663 void *ext_val;
1664 __u64 num;
1665
1666 if (strncmp(buf, "CONFIG_", 7))
1667 return 0;
1668
1669 sep = strchr(buf, '=');
1670 if (!sep) {
1671 pr_warn("failed to parse '%s': no separator\n", buf);
1672 return -EINVAL;
1673 }
1674
1675 /* Trim ending '\n' */
1676 len = strlen(buf);
1677 if (buf[len - 1] == '\n')
1678 buf[len - 1] = '\0';
1679 /* Split on '=' and ensure that a value is present. */
1680 *sep = '\0';
1681 if (!sep[1]) {
1682 *sep = '=';
1683 pr_warn("failed to parse '%s': no value\n", buf);
1684 return -EINVAL;
1685 }
1686
1687 ext = find_extern_by_name(obj, buf);
1688 if (!ext || ext->is_set)
1689 return 0;
1690
1691 ext_val = data + ext->kcfg.data_off;
1692 value = sep + 1;
1693
1694 switch (*value) {
1695 case 'y': case 'n': case 'm':
1696 err = set_kcfg_value_tri(ext, ext_val, *value);
1697 break;
1698 case '"':
1699 err = set_kcfg_value_str(ext, ext_val, value);
1700 break;
1701 default:
1702 /* assume integer */
1703 err = parse_u64(value, &num);
1704 if (err) {
1705 pr_warn("extern (kcfg) %s=%s should be integer\n",
1706 ext->name, value);
1707 return err;
1708 }
1709 err = set_kcfg_value_num(ext, ext_val, num);
1710 break;
1711 }
1712 if (err)
1713 return err;
1714 pr_debug("extern (kcfg) %s=%s\n", ext->name, value);
1715 return 0;
1716}
1717
1718static int bpf_object__read_kconfig_file(struct bpf_object *obj, void *data)
1719{
1720 char buf[PATH_MAX];
1721 struct utsname uts;
1722 int len, err = 0;
1723 gzFile file;
1724
1725 uname(&uts);
1726 len = snprintf(buf, PATH_MAX, "/boot/config-%s", uts.release);
1727 if (len < 0)
1728 return -EINVAL;
1729 else if (len >= PATH_MAX)
1730 return -ENAMETOOLONG;
1731
1732 /* gzopen also accepts uncompressed files. */
1733 file = gzopen(buf, "r");
1734 if (!file)
1735 file = gzopen("/proc/config.gz", "r");
1736
1737 if (!file) {
1738 pr_warn("failed to open system Kconfig\n");
1739 return -ENOENT;
1740 }
1741
1742 while (gzgets(file, buf, sizeof(buf))) {
1743 err = bpf_object__process_kconfig_line(obj, buf, data);
1744 if (err) {
1745 pr_warn("error parsing system Kconfig line '%s': %d\n",
1746 buf, err);
1747 goto out;
1748 }
1749 }
1750
1751out:
1752 gzclose(file);
1753 return err;
1754}
1755
1756static int bpf_object__read_kconfig_mem(struct bpf_object *obj,
1757 const char *config, void *data)
1758{
1759 char buf[PATH_MAX];
1760 int err = 0;
1761 FILE *file;
1762
1763 file = fmemopen((void *)config, strlen(config), "r");
1764 if (!file) {
1765 err = -errno;
1766 pr_warn("failed to open in-memory Kconfig: %d\n", err);
1767 return err;
1768 }
1769
1770 while (fgets(buf, sizeof(buf), file)) {
1771 err = bpf_object__process_kconfig_line(obj, buf, data);
1772 if (err) {
1773 pr_warn("error parsing in-memory Kconfig line '%s': %d\n",
1774 buf, err);
1775 break;
1776 }
1777 }
1778
1779 fclose(file);
1780 return err;
1781}
1782
1783static int bpf_object__init_kconfig_map(struct bpf_object *obj)
1784{
1785 struct extern_desc *last_ext = NULL, *ext;
1786 size_t map_sz;
1787 int i, err;
1788
1789 for (i = 0; i < obj->nr_extern; i++) {
1790 ext = &obj->externs[i];
1791 if (ext->type == EXT_KCFG)
1792 last_ext = ext;
1793 }
1794
1795 if (!last_ext)
1796 return 0;
1797
1798 map_sz = last_ext->kcfg.data_off + last_ext->kcfg.sz;
1799 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_KCONFIG,
1800 obj->efile.symbols_shndx,
1801 NULL, map_sz);
1802 if (err)
1803 return err;
1804
1805 obj->kconfig_map_idx = obj->nr_maps - 1;
1806
1807 return 0;
1808}
1809
1810static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
1811{
1812 Elf_Data *symbols = obj->efile.symbols;
1813 int i, map_def_sz = 0, nr_maps = 0, nr_syms;
1814 Elf_Data *data = NULL;
1815 Elf_Scn *scn;
1816
1817 if (obj->efile.maps_shndx < 0)
1818 return 0;
1819
1820 if (!symbols)
1821 return -EINVAL;
1822
1823 scn = elf_sec_by_idx(obj, obj->efile.maps_shndx);
1824 data = elf_sec_data(obj, scn);
1825 if (!scn || !data) {
1826 pr_warn("elf: failed to get legacy map definitions for %s\n",
1827 obj->path);
1828 return -EINVAL;
1829 }
1830
1831 /*
1832 * Count number of maps. Each map has a name.
1833 * Array of maps is not supported: only the first element is
1834 * considered.
1835 *
1836 * TODO: Detect array of map and report error.
1837 */
1838 nr_syms = symbols->d_size / sizeof(GElf_Sym);
1839 for (i = 0; i < nr_syms; i++) {
1840 GElf_Sym sym;
1841
1842 if (!gelf_getsym(symbols, i, &sym))
1843 continue;
1844 if (sym.st_shndx != obj->efile.maps_shndx)
1845 continue;
1846 nr_maps++;
1847 }
1848 /* Assume equally sized map definitions */
1849 pr_debug("elf: found %d legacy map definitions (%zd bytes) in %s\n",
1850 nr_maps, data->d_size, obj->path);
1851
1852 if (!data->d_size || nr_maps == 0 || (data->d_size % nr_maps) != 0) {
1853 pr_warn("elf: unable to determine legacy map definition size in %s\n",
1854 obj->path);
1855 return -EINVAL;
1856 }
1857 map_def_sz = data->d_size / nr_maps;
1858
1859 /* Fill obj->maps using data in "maps" section. */
1860 for (i = 0; i < nr_syms; i++) {
1861 GElf_Sym sym;
1862 const char *map_name;
1863 struct bpf_map_def *def;
1864 struct bpf_map *map;
1865
1866 if (!gelf_getsym(symbols, i, &sym))
1867 continue;
1868 if (sym.st_shndx != obj->efile.maps_shndx)
1869 continue;
1870
1871 map = bpf_object__add_map(obj);
1872 if (IS_ERR(map))
1873 return PTR_ERR(map);
1874
1875 map_name = elf_sym_str(obj, sym.st_name);
1876 if (!map_name) {
1877 pr_warn("failed to get map #%d name sym string for obj %s\n",
1878 i, obj->path);
1879 return -LIBBPF_ERRNO__FORMAT;
1880 }
1881
1882 if (GELF_ST_TYPE(sym.st_info) == STT_SECTION
1883 || GELF_ST_BIND(sym.st_info) == STB_LOCAL) {
1884 pr_warn("map '%s' (legacy): static maps are not supported\n", map_name);
1885 return -ENOTSUP;
1886 }
1887
1888 map->libbpf_type = LIBBPF_MAP_UNSPEC;
1889 map->sec_idx = sym.st_shndx;
1890 map->sec_offset = sym.st_value;
1891 pr_debug("map '%s' (legacy): at sec_idx %d, offset %zu.\n",
1892 map_name, map->sec_idx, map->sec_offset);
1893 if (sym.st_value + map_def_sz > data->d_size) {
1894 pr_warn("corrupted maps section in %s: last map \"%s\" too small\n",
1895 obj->path, map_name);
1896 return -EINVAL;
1897 }
1898
1899 map->name = strdup(map_name);
1900 if (!map->name) {
1901 pr_warn("failed to alloc map name\n");
1902 return -ENOMEM;
1903 }
1904 pr_debug("map %d is \"%s\"\n", i, map->name);
1905 def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
1906 /*
1907 * If the definition of the map in the object file fits in
1908 * bpf_map_def, copy it. Any extra fields in our version
1909 * of bpf_map_def will default to zero as a result of the
1910 * calloc above.
1911 */
1912 if (map_def_sz <= sizeof(struct bpf_map_def)) {
1913 memcpy(&map->def, def, map_def_sz);
1914 } else {
1915 /*
1916 * Here the map structure being read is bigger than what
1917 * we expect, truncate if the excess bits are all zero.
1918 * If they are not zero, reject this map as
1919 * incompatible.
1920 */
1921 char *b;
1922
1923 for (b = ((char *)def) + sizeof(struct bpf_map_def);
1924 b < ((char *)def) + map_def_sz; b++) {
1925 if (*b != 0) {
1926 pr_warn("maps section in %s: \"%s\" has unrecognized, non-zero options\n",
1927 obj->path, map_name);
1928 if (strict)
1929 return -EINVAL;
1930 }
1931 }
1932 memcpy(&map->def, def, sizeof(struct bpf_map_def));
1933 }
1934 }
1935 return 0;
1936}
1937
1938const struct btf_type *
1939skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id)
1940{
1941 const struct btf_type *t = btf__type_by_id(btf, id);
1942
1943 if (res_id)
1944 *res_id = id;
1945
1946 while (btf_is_mod(t) || btf_is_typedef(t)) {
1947 if (res_id)
1948 *res_id = t->type;
1949 t = btf__type_by_id(btf, t->type);
1950 }
1951
1952 return t;
1953}
1954
1955static const struct btf_type *
1956resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id)
1957{
1958 const struct btf_type *t;
1959
1960 t = skip_mods_and_typedefs(btf, id, NULL);
1961 if (!btf_is_ptr(t))
1962 return NULL;
1963
1964 t = skip_mods_and_typedefs(btf, t->type, res_id);
1965
1966 return btf_is_func_proto(t) ? t : NULL;
1967}
1968
1969static const char *__btf_kind_str(__u16 kind)
1970{
1971 switch (kind) {
1972 case BTF_KIND_UNKN: return "void";
1973 case BTF_KIND_INT: return "int";
1974 case BTF_KIND_PTR: return "ptr";
1975 case BTF_KIND_ARRAY: return "array";
1976 case BTF_KIND_STRUCT: return "struct";
1977 case BTF_KIND_UNION: return "union";
1978 case BTF_KIND_ENUM: return "enum";
1979 case BTF_KIND_FWD: return "fwd";
1980 case BTF_KIND_TYPEDEF: return "typedef";
1981 case BTF_KIND_VOLATILE: return "volatile";
1982 case BTF_KIND_CONST: return "const";
1983 case BTF_KIND_RESTRICT: return "restrict";
1984 case BTF_KIND_FUNC: return "func";
1985 case BTF_KIND_FUNC_PROTO: return "func_proto";
1986 case BTF_KIND_VAR: return "var";
1987 case BTF_KIND_DATASEC: return "datasec";
1988 case BTF_KIND_FLOAT: return "float";
1989 default: return "unknown";
1990 }
1991}
1992
1993const char *btf_kind_str(const struct btf_type *t)
1994{
1995 return __btf_kind_str(btf_kind(t));
1996}
1997
1998/*
1999 * Fetch integer attribute of BTF map definition. Such attributes are
2000 * represented using a pointer to an array, in which dimensionality of array
2001 * encodes specified integer value. E.g., int (*type)[BPF_MAP_TYPE_ARRAY];
2002 * encodes `type => BPF_MAP_TYPE_ARRAY` key/value pair completely using BTF
2003 * type definition, while using only sizeof(void *) space in ELF data section.
2004 */
2005static bool get_map_field_int(const char *map_name, const struct btf *btf,
2006 const struct btf_member *m, __u32 *res)
2007{
2008 const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
2009 const char *name = btf__name_by_offset(btf, m->name_off);
2010 const struct btf_array *arr_info;
2011 const struct btf_type *arr_t;
2012
2013 if (!btf_is_ptr(t)) {
2014 pr_warn("map '%s': attr '%s': expected PTR, got %s.\n",
2015 map_name, name, btf_kind_str(t));
2016 return false;
2017 }
2018
2019 arr_t = btf__type_by_id(btf, t->type);
2020 if (!arr_t) {
2021 pr_warn("map '%s': attr '%s': type [%u] not found.\n",
2022 map_name, name, t->type);
2023 return false;
2024 }
2025 if (!btf_is_array(arr_t)) {
2026 pr_warn("map '%s': attr '%s': expected ARRAY, got %s.\n",
2027 map_name, name, btf_kind_str(arr_t));
2028 return false;
2029 }
2030 arr_info = btf_array(arr_t);
2031 *res = arr_info->nelems;
2032 return true;
2033}
2034
2035static int build_map_pin_path(struct bpf_map *map, const char *path)
2036{
2037 char buf[PATH_MAX];
2038 int len;
2039
2040 if (!path)
2041 path = "/sys/fs/bpf";
2042
2043 len = snprintf(buf, PATH_MAX, "%s/%s", path, bpf_map__name(map));
2044 if (len < 0)
2045 return -EINVAL;
2046 else if (len >= PATH_MAX)
2047 return -ENAMETOOLONG;
2048
2049 return bpf_map__set_pin_path(map, buf);
2050}
2051
2052int parse_btf_map_def(const char *map_name, struct btf *btf,
2053 const struct btf_type *def_t, bool strict,
2054 struct btf_map_def *map_def, struct btf_map_def *inner_def)
2055{
2056 const struct btf_type *t;
2057 const struct btf_member *m;
2058 bool is_inner = inner_def == NULL;
2059 int vlen, i;
2060
2061 vlen = btf_vlen(def_t);
2062 m = btf_members(def_t);
2063 for (i = 0; i < vlen; i++, m++) {
2064 const char *name = btf__name_by_offset(btf, m->name_off);
2065
2066 if (!name) {
2067 pr_warn("map '%s': invalid field #%d.\n", map_name, i);
2068 return -EINVAL;
2069 }
2070 if (strcmp(name, "type") == 0) {
2071 if (!get_map_field_int(map_name, btf, m, &map_def->map_type))
2072 return -EINVAL;
2073 map_def->parts |= MAP_DEF_MAP_TYPE;
2074 } else if (strcmp(name, "max_entries") == 0) {
2075 if (!get_map_field_int(map_name, btf, m, &map_def->max_entries))
2076 return -EINVAL;
2077 map_def->parts |= MAP_DEF_MAX_ENTRIES;
2078 } else if (strcmp(name, "map_flags") == 0) {
2079 if (!get_map_field_int(map_name, btf, m, &map_def->map_flags))
2080 return -EINVAL;
2081 map_def->parts |= MAP_DEF_MAP_FLAGS;
2082 } else if (strcmp(name, "numa_node") == 0) {
2083 if (!get_map_field_int(map_name, btf, m, &map_def->numa_node))
2084 return -EINVAL;
2085 map_def->parts |= MAP_DEF_NUMA_NODE;
2086 } else if (strcmp(name, "key_size") == 0) {
2087 __u32 sz;
2088
2089 if (!get_map_field_int(map_name, btf, m, &sz))
2090 return -EINVAL;
2091 if (map_def->key_size && map_def->key_size != sz) {
2092 pr_warn("map '%s': conflicting key size %u != %u.\n",
2093 map_name, map_def->key_size, sz);
2094 return -EINVAL;
2095 }
2096 map_def->key_size = sz;
2097 map_def->parts |= MAP_DEF_KEY_SIZE;
2098 } else if (strcmp(name, "key") == 0) {
2099 __s64 sz;
2100
2101 t = btf__type_by_id(btf, m->type);
2102 if (!t) {
2103 pr_warn("map '%s': key type [%d] not found.\n",
2104 map_name, m->type);
2105 return -EINVAL;
2106 }
2107 if (!btf_is_ptr(t)) {
2108 pr_warn("map '%s': key spec is not PTR: %s.\n",
2109 map_name, btf_kind_str(t));
2110 return -EINVAL;
2111 }
2112 sz = btf__resolve_size(btf, t->type);
2113 if (sz < 0) {
2114 pr_warn("map '%s': can't determine key size for type [%u]: %zd.\n",
2115 map_name, t->type, (ssize_t)sz);
2116 return sz;
2117 }
2118 if (map_def->key_size && map_def->key_size != sz) {
2119 pr_warn("map '%s': conflicting key size %u != %zd.\n",
2120 map_name, map_def->key_size, (ssize_t)sz);
2121 return -EINVAL;
2122 }
2123 map_def->key_size = sz;
2124 map_def->key_type_id = t->type;
2125 map_def->parts |= MAP_DEF_KEY_SIZE | MAP_DEF_KEY_TYPE;
2126 } else if (strcmp(name, "value_size") == 0) {
2127 __u32 sz;
2128
2129 if (!get_map_field_int(map_name, btf, m, &sz))
2130 return -EINVAL;
2131 if (map_def->value_size && map_def->value_size != sz) {
2132 pr_warn("map '%s': conflicting value size %u != %u.\n",
2133 map_name, map_def->value_size, sz);
2134 return -EINVAL;
2135 }
2136 map_def->value_size = sz;
2137 map_def->parts |= MAP_DEF_VALUE_SIZE;
2138 } else if (strcmp(name, "value") == 0) {
2139 __s64 sz;
2140
2141 t = btf__type_by_id(btf, m->type);
2142 if (!t) {
2143 pr_warn("map '%s': value type [%d] not found.\n",
2144 map_name, m->type);
2145 return -EINVAL;
2146 }
2147 if (!btf_is_ptr(t)) {
2148 pr_warn("map '%s': value spec is not PTR: %s.\n",
2149 map_name, btf_kind_str(t));
2150 return -EINVAL;
2151 }
2152 sz = btf__resolve_size(btf, t->type);
2153 if (sz < 0) {
2154 pr_warn("map '%s': can't determine value size for type [%u]: %zd.\n",
2155 map_name, t->type, (ssize_t)sz);
2156 return sz;
2157 }
2158 if (map_def->value_size && map_def->value_size != sz) {
2159 pr_warn("map '%s': conflicting value size %u != %zd.\n",
2160 map_name, map_def->value_size, (ssize_t)sz);
2161 return -EINVAL;
2162 }
2163 map_def->value_size = sz;
2164 map_def->value_type_id = t->type;
2165 map_def->parts |= MAP_DEF_VALUE_SIZE | MAP_DEF_VALUE_TYPE;
2166 }
2167 else if (strcmp(name, "values") == 0) {
2168 char inner_map_name[128];
2169 int err;
2170
2171 if (is_inner) {
2172 pr_warn("map '%s': multi-level inner maps not supported.\n",
2173 map_name);
2174 return -ENOTSUP;
2175 }
2176 if (i != vlen - 1) {
2177 pr_warn("map '%s': '%s' member should be last.\n",
2178 map_name, name);
2179 return -EINVAL;
2180 }
2181 if (!bpf_map_type__is_map_in_map(map_def->map_type)) {
2182 pr_warn("map '%s': should be map-in-map.\n",
2183 map_name);
2184 return -ENOTSUP;
2185 }
2186 if (map_def->value_size && map_def->value_size != 4) {
2187 pr_warn("map '%s': conflicting value size %u != 4.\n",
2188 map_name, map_def->value_size);
2189 return -EINVAL;
2190 }
2191 map_def->value_size = 4;
2192 t = btf__type_by_id(btf, m->type);
2193 if (!t) {
2194 pr_warn("map '%s': map-in-map inner type [%d] not found.\n",
2195 map_name, m->type);
2196 return -EINVAL;
2197 }
2198 if (!btf_is_array(t) || btf_array(t)->nelems) {
2199 pr_warn("map '%s': map-in-map inner spec is not a zero-sized array.\n",
2200 map_name);
2201 return -EINVAL;
2202 }
2203 t = skip_mods_and_typedefs(btf, btf_array(t)->type, NULL);
2204 if (!btf_is_ptr(t)) {
2205 pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n",
2206 map_name, btf_kind_str(t));
2207 return -EINVAL;
2208 }
2209 t = skip_mods_and_typedefs(btf, t->type, NULL);
2210 if (!btf_is_struct(t)) {
2211 pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n",
2212 map_name, btf_kind_str(t));
2213 return -EINVAL;
2214 }
2215
2216 snprintf(inner_map_name, sizeof(inner_map_name), "%s.inner", map_name);
2217 err = parse_btf_map_def(inner_map_name, btf, t, strict, inner_def, NULL);
2218 if (err)
2219 return err;
2220
2221 map_def->parts |= MAP_DEF_INNER_MAP;
2222 } else if (strcmp(name, "pinning") == 0) {
2223 __u32 val;
2224
2225 if (is_inner) {
2226 pr_warn("map '%s': inner def can't be pinned.\n", map_name);
2227 return -EINVAL;
2228 }
2229 if (!get_map_field_int(map_name, btf, m, &val))
2230 return -EINVAL;
2231 if (val != LIBBPF_PIN_NONE && val != LIBBPF_PIN_BY_NAME) {
2232 pr_warn("map '%s': invalid pinning value %u.\n",
2233 map_name, val);
2234 return -EINVAL;
2235 }
2236 map_def->pinning = val;
2237 map_def->parts |= MAP_DEF_PINNING;
2238 } else {
2239 if (strict) {
2240 pr_warn("map '%s': unknown field '%s'.\n", map_name, name);
2241 return -ENOTSUP;
2242 }
2243 pr_debug("map '%s': ignoring unknown field '%s'.\n", map_name, name);
2244 }
2245 }
2246
2247 if (map_def->map_type == BPF_MAP_TYPE_UNSPEC) {
2248 pr_warn("map '%s': map type isn't specified.\n", map_name);
2249 return -EINVAL;
2250 }
2251
2252 return 0;
2253}
2254
2255static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def)
2256{
2257 map->def.type = def->map_type;
2258 map->def.key_size = def->key_size;
2259 map->def.value_size = def->value_size;
2260 map->def.max_entries = def->max_entries;
2261 map->def.map_flags = def->map_flags;
2262
2263 map->numa_node = def->numa_node;
2264 map->btf_key_type_id = def->key_type_id;
2265 map->btf_value_type_id = def->value_type_id;
2266
2267 if (def->parts & MAP_DEF_MAP_TYPE)
2268 pr_debug("map '%s': found type = %u.\n", map->name, def->map_type);
2269
2270 if (def->parts & MAP_DEF_KEY_TYPE)
2271 pr_debug("map '%s': found key [%u], sz = %u.\n",
2272 map->name, def->key_type_id, def->key_size);
2273 else if (def->parts & MAP_DEF_KEY_SIZE)
2274 pr_debug("map '%s': found key_size = %u.\n", map->name, def->key_size);
2275
2276 if (def->parts & MAP_DEF_VALUE_TYPE)
2277 pr_debug("map '%s': found value [%u], sz = %u.\n",
2278 map->name, def->value_type_id, def->value_size);
2279 else if (def->parts & MAP_DEF_VALUE_SIZE)
2280 pr_debug("map '%s': found value_size = %u.\n", map->name, def->value_size);
2281
2282 if (def->parts & MAP_DEF_MAX_ENTRIES)
2283 pr_debug("map '%s': found max_entries = %u.\n", map->name, def->max_entries);
2284 if (def->parts & MAP_DEF_MAP_FLAGS)
2285 pr_debug("map '%s': found map_flags = %u.\n", map->name, def->map_flags);
2286 if (def->parts & MAP_DEF_PINNING)
2287 pr_debug("map '%s': found pinning = %u.\n", map->name, def->pinning);
2288 if (def->parts & MAP_DEF_NUMA_NODE)
2289 pr_debug("map '%s': found numa_node = %u.\n", map->name, def->numa_node);
2290
2291 if (def->parts & MAP_DEF_INNER_MAP)
2292 pr_debug("map '%s': found inner map definition.\n", map->name);
2293}
2294
2295static const char *btf_var_linkage_str(__u32 linkage)
2296{
2297 switch (linkage) {
2298 case BTF_VAR_STATIC: return "static";
2299 case BTF_VAR_GLOBAL_ALLOCATED: return "global";
2300 case BTF_VAR_GLOBAL_EXTERN: return "extern";
2301 default: return "unknown";
2302 }
2303}
2304
2305static int bpf_object__init_user_btf_map(struct bpf_object *obj,
2306 const struct btf_type *sec,
2307 int var_idx, int sec_idx,
2308 const Elf_Data *data, bool strict,
2309 const char *pin_root_path)
2310{
2311 struct btf_map_def map_def = {}, inner_def = {};
2312 const struct btf_type *var, *def;
2313 const struct btf_var_secinfo *vi;
2314 const struct btf_var *var_extra;
2315 const char *map_name;
2316 struct bpf_map *map;
2317 int err;
2318
2319 vi = btf_var_secinfos(sec) + var_idx;
2320 var = btf__type_by_id(obj->btf, vi->type);
2321 var_extra = btf_var(var);
2322 map_name = btf__name_by_offset(obj->btf, var->name_off);
2323
2324 if (map_name == NULL || map_name[0] == '\0') {
2325 pr_warn("map #%d: empty name.\n", var_idx);
2326 return -EINVAL;
2327 }
2328 if ((__u64)vi->offset + vi->size > data->d_size) {
2329 pr_warn("map '%s' BTF data is corrupted.\n", map_name);
2330 return -EINVAL;
2331 }
2332 if (!btf_is_var(var)) {
2333 pr_warn("map '%s': unexpected var kind %s.\n",
2334 map_name, btf_kind_str(var));
2335 return -EINVAL;
2336 }
2337 if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED) {
2338 pr_warn("map '%s': unsupported map linkage %s.\n",
2339 map_name, btf_var_linkage_str(var_extra->linkage));
2340 return -EOPNOTSUPP;
2341 }
2342
2343 def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
2344 if (!btf_is_struct(def)) {
2345 pr_warn("map '%s': unexpected def kind %s.\n",
2346 map_name, btf_kind_str(var));
2347 return -EINVAL;
2348 }
2349 if (def->size > vi->size) {
2350 pr_warn("map '%s': invalid def size.\n", map_name);
2351 return -EINVAL;
2352 }
2353
2354 map = bpf_object__add_map(obj);
2355 if (IS_ERR(map))
2356 return PTR_ERR(map);
2357 map->name = strdup(map_name);
2358 if (!map->name) {
2359 pr_warn("map '%s': failed to alloc map name.\n", map_name);
2360 return -ENOMEM;
2361 }
2362 map->libbpf_type = LIBBPF_MAP_UNSPEC;
2363 map->def.type = BPF_MAP_TYPE_UNSPEC;
2364 map->sec_idx = sec_idx;
2365 map->sec_offset = vi->offset;
2366 map->btf_var_idx = var_idx;
2367 pr_debug("map '%s': at sec_idx %d, offset %zu.\n",
2368 map_name, map->sec_idx, map->sec_offset);
2369
2370 err = parse_btf_map_def(map->name, obj->btf, def, strict, &map_def, &inner_def);
2371 if (err)
2372 return err;
2373
2374 fill_map_from_def(map, &map_def);
2375
2376 if (map_def.pinning == LIBBPF_PIN_BY_NAME) {
2377 err = build_map_pin_path(map, pin_root_path);
2378 if (err) {
2379 pr_warn("map '%s': couldn't build pin path.\n", map->name);
2380 return err;
2381 }
2382 }
2383
2384 if (map_def.parts & MAP_DEF_INNER_MAP) {
2385 map->inner_map = calloc(1, sizeof(*map->inner_map));
2386 if (!map->inner_map)
2387 return -ENOMEM;
2388 map->inner_map->fd = -1;
2389 map->inner_map->sec_idx = sec_idx;
2390 map->inner_map->name = malloc(strlen(map_name) + sizeof(".inner") + 1);
2391 if (!map->inner_map->name)
2392 return -ENOMEM;
2393 sprintf(map->inner_map->name, "%s.inner", map_name);
2394
2395 fill_map_from_def(map->inner_map, &inner_def);
2396 }
2397
2398 return 0;
2399}
2400
2401static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
2402 const char *pin_root_path)
2403{
2404 const struct btf_type *sec = NULL;
2405 int nr_types, i, vlen, err;
2406 const struct btf_type *t;
2407 const char *name;
2408 Elf_Data *data;
2409 Elf_Scn *scn;
2410
2411 if (obj->efile.btf_maps_shndx < 0)
2412 return 0;
2413
2414 scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx);
2415 data = elf_sec_data(obj, scn);
2416 if (!scn || !data) {
2417 pr_warn("elf: failed to get %s map definitions for %s\n",
2418 MAPS_ELF_SEC, obj->path);
2419 return -EINVAL;
2420 }
2421
2422 nr_types = btf__get_nr_types(obj->btf);
2423 for (i = 1; i <= nr_types; i++) {
2424 t = btf__type_by_id(obj->btf, i);
2425 if (!btf_is_datasec(t))
2426 continue;
2427 name = btf__name_by_offset(obj->btf, t->name_off);
2428 if (strcmp(name, MAPS_ELF_SEC) == 0) {
2429 sec = t;
2430 obj->efile.btf_maps_sec_btf_id = i;
2431 break;
2432 }
2433 }
2434
2435 if (!sec) {
2436 pr_warn("DATASEC '%s' not found.\n", MAPS_ELF_SEC);
2437 return -ENOENT;
2438 }
2439
2440 vlen = btf_vlen(sec);
2441 for (i = 0; i < vlen; i++) {
2442 err = bpf_object__init_user_btf_map(obj, sec, i,
2443 obj->efile.btf_maps_shndx,
2444 data, strict,
2445 pin_root_path);
2446 if (err)
2447 return err;
2448 }
2449
2450 return 0;
2451}
2452
2453static int bpf_object__init_maps(struct bpf_object *obj,
2454 const struct bpf_object_open_opts *opts)
2455{
2456 const char *pin_root_path;
2457 bool strict;
2458 int err;
2459
2460 strict = !OPTS_GET(opts, relaxed_maps, false);
2461 pin_root_path = OPTS_GET(opts, pin_root_path, NULL);
2462
2463 err = bpf_object__init_user_maps(obj, strict);
2464 err = err ?: bpf_object__init_user_btf_maps(obj, strict, pin_root_path);
2465 err = err ?: bpf_object__init_global_data_maps(obj);
2466 err = err ?: bpf_object__init_kconfig_map(obj);
2467 err = err ?: bpf_object__init_struct_ops_maps(obj);
2468
2469 return err;
2470}
2471
2472static bool section_have_execinstr(struct bpf_object *obj, int idx)
2473{
2474 GElf_Shdr sh;
2475
2476 if (elf_sec_hdr(obj, elf_sec_by_idx(obj, idx), &sh))
2477 return false;
2478
2479 return sh.sh_flags & SHF_EXECINSTR;
2480}
2481
2482static bool btf_needs_sanitization(struct bpf_object *obj)
2483{
2484 bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
2485 bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
2486 bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
2487 bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
2488
2489 return !has_func || !has_datasec || !has_func_global || !has_float;
2490}
2491
2492static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
2493{
2494 bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
2495 bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
2496 bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
2497 bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
2498 struct btf_type *t;
2499 int i, j, vlen;
2500
2501 for (i = 1; i <= btf__get_nr_types(btf); i++) {
2502 t = (struct btf_type *)btf__type_by_id(btf, i);
2503
2504 if (!has_datasec && btf_is_var(t)) {
2505 /* replace VAR with INT */
2506 t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
2507 /*
2508 * using size = 1 is the safest choice, 4 will be too
2509 * big and cause kernel BTF validation failure if
2510 * original variable took less than 4 bytes
2511 */
2512 t->size = 1;
2513 *(int *)(t + 1) = BTF_INT_ENC(0, 0, 8);
2514 } else if (!has_datasec && btf_is_datasec(t)) {
2515 /* replace DATASEC with STRUCT */
2516 const struct btf_var_secinfo *v = btf_var_secinfos(t);
2517 struct btf_member *m = btf_members(t);
2518 struct btf_type *vt;
2519 char *name;
2520
2521 name = (char *)btf__name_by_offset(btf, t->name_off);
2522 while (*name) {
2523 if (*name == '.')
2524 *name = '_';
2525 name++;
2526 }
2527
2528 vlen = btf_vlen(t);
2529 t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen);
2530 for (j = 0; j < vlen; j++, v++, m++) {
2531 /* order of field assignments is important */
2532 m->offset = v->offset * 8;
2533 m->type = v->type;
2534 /* preserve variable name as member name */
2535 vt = (void *)btf__type_by_id(btf, v->type);
2536 m->name_off = vt->name_off;
2537 }
2538 } else if (!has_func && btf_is_func_proto(t)) {
2539 /* replace FUNC_PROTO with ENUM */
2540 vlen = btf_vlen(t);
2541 t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen);
2542 t->size = sizeof(__u32); /* kernel enforced */
2543 } else if (!has_func && btf_is_func(t)) {
2544 /* replace FUNC with TYPEDEF */
2545 t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0);
2546 } else if (!has_func_global && btf_is_func(t)) {
2547 /* replace BTF_FUNC_GLOBAL with BTF_FUNC_STATIC */
2548 t->info = BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0);
2549 } else if (!has_float && btf_is_float(t)) {
2550 /* replace FLOAT with an equally-sized empty STRUCT;
2551 * since C compilers do not accept e.g. "float" as a
2552 * valid struct name, make it anonymous
2553 */
2554 t->name_off = 0;
2555 t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0);
2556 }
2557 }
2558}
2559
2560static bool libbpf_needs_btf(const struct bpf_object *obj)
2561{
2562 return obj->efile.btf_maps_shndx >= 0 ||
2563 obj->efile.st_ops_shndx >= 0 ||
2564 obj->nr_extern > 0;
2565}
2566
2567static bool kernel_needs_btf(const struct bpf_object *obj)
2568{
2569 return obj->efile.st_ops_shndx >= 0;
2570}
2571
2572static int bpf_object__init_btf(struct bpf_object *obj,
2573 Elf_Data *btf_data,
2574 Elf_Data *btf_ext_data)
2575{
2576 int err = -ENOENT;
2577
2578 if (btf_data) {
2579 obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
2580 err = libbpf_get_error(obj->btf);
2581 if (err) {
2582 obj->btf = NULL;
2583 pr_warn("Error loading ELF section %s: %d.\n", BTF_ELF_SEC, err);
2584 goto out;
2585 }
2586 /* enforce 8-byte pointers for BPF-targeted BTFs */
2587 btf__set_pointer_size(obj->btf, 8);
2588 }
2589 if (btf_ext_data) {
2590 if (!obj->btf) {
2591 pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
2592 BTF_EXT_ELF_SEC, BTF_ELF_SEC);
2593 goto out;
2594 }
2595 obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size);
2596 err = libbpf_get_error(obj->btf_ext);
2597 if (err) {
2598 pr_warn("Error loading ELF section %s: %d. Ignored and continue.\n",
2599 BTF_EXT_ELF_SEC, err);
2600 obj->btf_ext = NULL;
2601 goto out;
2602 }
2603 }
2604out:
2605 if (err && libbpf_needs_btf(obj)) {
2606 pr_warn("BTF is required, but is missing or corrupted.\n");
2607 return err;
2608 }
2609 return 0;
2610}
2611
2612static int bpf_object__finalize_btf(struct bpf_object *obj)
2613{
2614 int err;
2615
2616 if (!obj->btf)
2617 return 0;
2618
2619 err = btf__finalize_data(obj, obj->btf);
2620 if (err) {
2621 pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err);
2622 return err;
2623 }
2624
2625 return 0;
2626}
2627
2628static bool prog_needs_vmlinux_btf(struct bpf_program *prog)
2629{
2630 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS ||
2631 prog->type == BPF_PROG_TYPE_LSM)
2632 return true;
2633
2634 /* BPF_PROG_TYPE_TRACING programs which do not attach to other programs
2635 * also need vmlinux BTF
2636 */
2637 if (prog->type == BPF_PROG_TYPE_TRACING && !prog->attach_prog_fd)
2638 return true;
2639
2640 return false;
2641}
2642
2643static bool obj_needs_vmlinux_btf(const struct bpf_object *obj)
2644{
2645 struct bpf_program *prog;
2646 int i;
2647
2648 /* CO-RE relocations need kernel BTF */
2649 if (obj->btf_ext && obj->btf_ext->core_relo_info.len)
2650 return true;
2651
2652 /* Support for typed ksyms needs kernel BTF */
2653 for (i = 0; i < obj->nr_extern; i++) {
2654 const struct extern_desc *ext;
2655
2656 ext = &obj->externs[i];
2657 if (ext->type == EXT_KSYM && ext->ksym.type_id)
2658 return true;
2659 }
2660
2661 bpf_object__for_each_program(prog, obj) {
2662 if (!prog->load)
2663 continue;
2664 if (prog_needs_vmlinux_btf(prog))
2665 return true;
2666 }
2667
2668 return false;
2669}
2670
2671static int bpf_object__load_vmlinux_btf(struct bpf_object *obj, bool force)
2672{
2673 int err;
2674
2675 /* btf_vmlinux could be loaded earlier */
2676 if (obj->btf_vmlinux || obj->gen_loader)
2677 return 0;
2678
2679 if (!force && !obj_needs_vmlinux_btf(obj))
2680 return 0;
2681
2682 obj->btf_vmlinux = libbpf_find_kernel_btf();
2683 err = libbpf_get_error(obj->btf_vmlinux);
2684 if (err) {
2685 pr_warn("Error loading vmlinux BTF: %d\n", err);
2686 obj->btf_vmlinux = NULL;
2687 return err;
2688 }
2689 return 0;
2690}
2691
2692static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
2693{
2694 struct btf *kern_btf = obj->btf;
2695 bool btf_mandatory, sanitize;
2696 int i, err = 0;
2697
2698 if (!obj->btf)
2699 return 0;
2700
2701 if (!kernel_supports(obj, FEAT_BTF)) {
2702 if (kernel_needs_btf(obj)) {
2703 err = -EOPNOTSUPP;
2704 goto report;
2705 }
2706 pr_debug("Kernel doesn't support BTF, skipping uploading it.\n");
2707 return 0;
2708 }
2709
2710 /* Even though some subprogs are global/weak, user might prefer more
2711 * permissive BPF verification process that BPF verifier performs for
2712 * static functions, taking into account more context from the caller
2713 * functions. In such case, they need to mark such subprogs with
2714 * __attribute__((visibility("hidden"))) and libbpf will adjust
2715 * corresponding FUNC BTF type to be marked as static and trigger more
2716 * involved BPF verification process.
2717 */
2718 for (i = 0; i < obj->nr_programs; i++) {
2719 struct bpf_program *prog = &obj->programs[i];
2720 struct btf_type *t;
2721 const char *name;
2722 int j, n;
2723
2724 if (!prog->mark_btf_static || !prog_is_subprog(obj, prog))
2725 continue;
2726
2727 n = btf__get_nr_types(obj->btf);
2728 for (j = 1; j <= n; j++) {
2729 t = btf_type_by_id(obj->btf, j);
2730 if (!btf_is_func(t) || btf_func_linkage(t) != BTF_FUNC_GLOBAL)
2731 continue;
2732
2733 name = btf__str_by_offset(obj->btf, t->name_off);
2734 if (strcmp(name, prog->name) != 0)
2735 continue;
2736
2737 t->info = btf_type_info(BTF_KIND_FUNC, BTF_FUNC_STATIC, 0);
2738 break;
2739 }
2740 }
2741
2742 sanitize = btf_needs_sanitization(obj);
2743 if (sanitize) {
2744 const void *raw_data;
2745 __u32 sz;
2746
2747 /* clone BTF to sanitize a copy and leave the original intact */
2748 raw_data = btf__get_raw_data(obj->btf, &sz);
2749 kern_btf = btf__new(raw_data, sz);
2750 err = libbpf_get_error(kern_btf);
2751 if (err)
2752 return err;
2753
2754 /* enforce 8-byte pointers for BPF-targeted BTFs */
2755 btf__set_pointer_size(obj->btf, 8);
2756 bpf_object__sanitize_btf(obj, kern_btf);
2757 }
2758
2759 if (obj->gen_loader) {
2760 __u32 raw_size = 0;
2761 const void *raw_data = btf__get_raw_data(kern_btf, &raw_size);
2762
2763 if (!raw_data)
2764 return -ENOMEM;
2765 bpf_gen__load_btf(obj->gen_loader, raw_data, raw_size);
2766 /* Pretend to have valid FD to pass various fd >= 0 checks.
2767 * This fd == 0 will not be used with any syscall and will be reset to -1 eventually.
2768 */
2769 btf__set_fd(kern_btf, 0);
2770 } else {
2771 err = btf__load(kern_btf);
2772 }
2773 if (sanitize) {
2774 if (!err) {
2775 /* move fd to libbpf's BTF */
2776 btf__set_fd(obj->btf, btf__fd(kern_btf));
2777 btf__set_fd(kern_btf, -1);
2778 }
2779 btf__free(kern_btf);
2780 }
2781report:
2782 if (err) {
2783 btf_mandatory = kernel_needs_btf(obj);
2784 pr_warn("Error loading .BTF into kernel: %d. %s\n", err,
2785 btf_mandatory ? "BTF is mandatory, can't proceed."
2786 : "BTF is optional, ignoring.");
2787 if (!btf_mandatory)
2788 err = 0;
2789 }
2790 return err;
2791}
2792
2793static const char *elf_sym_str(const struct bpf_object *obj, size_t off)
2794{
2795 const char *name;
2796
2797 name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, off);
2798 if (!name) {
2799 pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
2800 off, obj->path, elf_errmsg(-1));
2801 return NULL;
2802 }
2803
2804 return name;
2805}
2806
2807static const char *elf_sec_str(const struct bpf_object *obj, size_t off)
2808{
2809 const char *name;
2810
2811 name = elf_strptr(obj->efile.elf, obj->efile.shstrndx, off);
2812 if (!name) {
2813 pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
2814 off, obj->path, elf_errmsg(-1));
2815 return NULL;
2816 }
2817
2818 return name;
2819}
2820
2821static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx)
2822{
2823 Elf_Scn *scn;
2824
2825 scn = elf_getscn(obj->efile.elf, idx);
2826 if (!scn) {
2827 pr_warn("elf: failed to get section(%zu) from %s: %s\n",
2828 idx, obj->path, elf_errmsg(-1));
2829 return NULL;
2830 }
2831 return scn;
2832}
2833
2834static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name)
2835{
2836 Elf_Scn *scn = NULL;
2837 Elf *elf = obj->efile.elf;
2838 const char *sec_name;
2839
2840 while ((scn = elf_nextscn(elf, scn)) != NULL) {
2841 sec_name = elf_sec_name(obj, scn);
2842 if (!sec_name)
2843 return NULL;
2844
2845 if (strcmp(sec_name, name) != 0)
2846 continue;
2847
2848 return scn;
2849 }
2850 return NULL;
2851}
2852
2853static int elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn, GElf_Shdr *hdr)
2854{
2855 if (!scn)
2856 return -EINVAL;
2857
2858 if (gelf_getshdr(scn, hdr) != hdr) {
2859 pr_warn("elf: failed to get section(%zu) header from %s: %s\n",
2860 elf_ndxscn(scn), obj->path, elf_errmsg(-1));
2861 return -EINVAL;
2862 }
2863
2864 return 0;
2865}
2866
2867static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn)
2868{
2869 const char *name;
2870 GElf_Shdr sh;
2871
2872 if (!scn)
2873 return NULL;
2874
2875 if (elf_sec_hdr(obj, scn, &sh))
2876 return NULL;
2877
2878 name = elf_sec_str(obj, sh.sh_name);
2879 if (!name) {
2880 pr_warn("elf: failed to get section(%zu) name from %s: %s\n",
2881 elf_ndxscn(scn), obj->path, elf_errmsg(-1));
2882 return NULL;
2883 }
2884
2885 return name;
2886}
2887
2888static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn)
2889{
2890 Elf_Data *data;
2891
2892 if (!scn)
2893 return NULL;
2894
2895 data = elf_getdata(scn, 0);
2896 if (!data) {
2897 pr_warn("elf: failed to get section(%zu) %s data from %s: %s\n",
2898 elf_ndxscn(scn), elf_sec_name(obj, scn) ?: "<?>",
2899 obj->path, elf_errmsg(-1));
2900 return NULL;
2901 }
2902
2903 return data;
2904}
2905
2906static bool is_sec_name_dwarf(const char *name)
2907{
2908 /* approximation, but the actual list is too long */
2909 return strncmp(name, ".debug_", sizeof(".debug_") - 1) == 0;
2910}
2911
2912static bool ignore_elf_section(GElf_Shdr *hdr, const char *name)
2913{
2914 /* no special handling of .strtab */
2915 if (hdr->sh_type == SHT_STRTAB)
2916 return true;
2917
2918 /* ignore .llvm_addrsig section as well */
2919 if (hdr->sh_type == SHT_LLVM_ADDRSIG)
2920 return true;
2921
2922 /* no subprograms will lead to an empty .text section, ignore it */
2923 if (hdr->sh_type == SHT_PROGBITS && hdr->sh_size == 0 &&
2924 strcmp(name, ".text") == 0)
2925 return true;
2926
2927 /* DWARF sections */
2928 if (is_sec_name_dwarf(name))
2929 return true;
2930
2931 if (strncmp(name, ".rel", sizeof(".rel") - 1) == 0) {
2932 name += sizeof(".rel") - 1;
2933 /* DWARF section relocations */
2934 if (is_sec_name_dwarf(name))
2935 return true;
2936
2937 /* .BTF and .BTF.ext don't need relocations */
2938 if (strcmp(name, BTF_ELF_SEC) == 0 ||
2939 strcmp(name, BTF_EXT_ELF_SEC) == 0)
2940 return true;
2941 }
2942
2943 return false;
2944}
2945
2946static int cmp_progs(const void *_a, const void *_b)
2947{
2948 const struct bpf_program *a = _a;
2949 const struct bpf_program *b = _b;
2950
2951 if (a->sec_idx != b->sec_idx)
2952 return a->sec_idx < b->sec_idx ? -1 : 1;
2953
2954 /* sec_insn_off can't be the same within the section */
2955 return a->sec_insn_off < b->sec_insn_off ? -1 : 1;
2956}
2957
2958static int bpf_object__elf_collect(struct bpf_object *obj)
2959{
2960 Elf *elf = obj->efile.elf;
2961 Elf_Data *btf_ext_data = NULL;
2962 Elf_Data *btf_data = NULL;
2963 int idx = 0, err = 0;
2964 const char *name;
2965 Elf_Data *data;
2966 Elf_Scn *scn;
2967 GElf_Shdr sh;
2968
2969 /* a bunch of ELF parsing functionality depends on processing symbols,
2970 * so do the first pass and find the symbol table
2971 */
2972 scn = NULL;
2973 while ((scn = elf_nextscn(elf, scn)) != NULL) {
2974 if (elf_sec_hdr(obj, scn, &sh))
2975 return -LIBBPF_ERRNO__FORMAT;
2976
2977 if (sh.sh_type == SHT_SYMTAB) {
2978 if (obj->efile.symbols) {
2979 pr_warn("elf: multiple symbol tables in %s\n", obj->path);
2980 return -LIBBPF_ERRNO__FORMAT;
2981 }
2982
2983 data = elf_sec_data(obj, scn);
2984 if (!data)
2985 return -LIBBPF_ERRNO__FORMAT;
2986
2987 obj->efile.symbols = data;
2988 obj->efile.symbols_shndx = elf_ndxscn(scn);
2989 obj->efile.strtabidx = sh.sh_link;
2990 }
2991 }
2992
2993 scn = NULL;
2994 while ((scn = elf_nextscn(elf, scn)) != NULL) {
2995 idx++;
2996
2997 if (elf_sec_hdr(obj, scn, &sh))
2998 return -LIBBPF_ERRNO__FORMAT;
2999
3000 name = elf_sec_str(obj, sh.sh_name);
3001 if (!name)
3002 return -LIBBPF_ERRNO__FORMAT;
3003
3004 if (ignore_elf_section(&sh, name))
3005 continue;
3006
3007 data = elf_sec_data(obj, scn);
3008 if (!data)
3009 return -LIBBPF_ERRNO__FORMAT;
3010
3011 pr_debug("elf: section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
3012 idx, name, (unsigned long)data->d_size,
3013 (int)sh.sh_link, (unsigned long)sh.sh_flags,
3014 (int)sh.sh_type);
3015
3016 if (strcmp(name, "license") == 0) {
3017 err = bpf_object__init_license(obj, data->d_buf, data->d_size);
3018 if (err)
3019 return err;
3020 } else if (strcmp(name, "version") == 0) {
3021 err = bpf_object__init_kversion(obj, data->d_buf, data->d_size);
3022 if (err)
3023 return err;
3024 } else if (strcmp(name, "maps") == 0) {
3025 obj->efile.maps_shndx = idx;
3026 } else if (strcmp(name, MAPS_ELF_SEC) == 0) {
3027 obj->efile.btf_maps_shndx = idx;
3028 } else if (strcmp(name, BTF_ELF_SEC) == 0) {
3029 btf_data = data;
3030 } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
3031 btf_ext_data = data;
3032 } else if (sh.sh_type == SHT_SYMTAB) {
3033 /* already processed during the first pass above */
3034 } else if (sh.sh_type == SHT_PROGBITS && data->d_size > 0) {
3035 if (sh.sh_flags & SHF_EXECINSTR) {
3036 if (strcmp(name, ".text") == 0)
3037 obj->efile.text_shndx = idx;
3038 err = bpf_object__add_programs(obj, data, name, idx);
3039 if (err)
3040 return err;
3041 } else if (strcmp(name, DATA_SEC) == 0) {
3042 obj->efile.data = data;
3043 obj->efile.data_shndx = idx;
3044 } else if (strcmp(name, RODATA_SEC) == 0) {
3045 obj->efile.rodata = data;
3046 obj->efile.rodata_shndx = idx;
3047 } else if (strcmp(name, STRUCT_OPS_SEC) == 0) {
3048 obj->efile.st_ops_data = data;
3049 obj->efile.st_ops_shndx = idx;
3050 } else {
3051 pr_info("elf: skipping unrecognized data section(%d) %s\n",
3052 idx, name);
3053 }
3054 } else if (sh.sh_type == SHT_REL) {
3055 int nr_sects = obj->efile.nr_reloc_sects;
3056 void *sects = obj->efile.reloc_sects;
3057 int sec = sh.sh_info; /* points to other section */
3058
3059 /* Only do relo for section with exec instructions */
3060 if (!section_have_execinstr(obj, sec) &&
3061 strcmp(name, ".rel" STRUCT_OPS_SEC) &&
3062 strcmp(name, ".rel" MAPS_ELF_SEC)) {
3063 pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n",
3064 idx, name, sec,
3065 elf_sec_name(obj, elf_sec_by_idx(obj, sec)) ?: "<?>");
3066 continue;
3067 }
3068
3069 sects = libbpf_reallocarray(sects, nr_sects + 1,
3070 sizeof(*obj->efile.reloc_sects));
3071 if (!sects)
3072 return -ENOMEM;
3073
3074 obj->efile.reloc_sects = sects;
3075 obj->efile.nr_reloc_sects++;
3076
3077 obj->efile.reloc_sects[nr_sects].shdr = sh;
3078 obj->efile.reloc_sects[nr_sects].data = data;
3079 } else if (sh.sh_type == SHT_NOBITS && strcmp(name, BSS_SEC) == 0) {
3080 obj->efile.bss = data;
3081 obj->efile.bss_shndx = idx;
3082 } else {
3083 pr_info("elf: skipping section(%d) %s (size %zu)\n", idx, name,
3084 (size_t)sh.sh_size);
3085 }
3086 }
3087
3088 if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) {
3089 pr_warn("elf: symbol strings section missing or invalid in %s\n", obj->path);
3090 return -LIBBPF_ERRNO__FORMAT;
3091 }
3092
3093 /* sort BPF programs by section name and in-section instruction offset
3094 * for faster search */
3095 qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs);
3096
3097 return bpf_object__init_btf(obj, btf_data, btf_ext_data);
3098}
3099
3100static bool sym_is_extern(const GElf_Sym *sym)
3101{
3102 int bind = GELF_ST_BIND(sym->st_info);
3103 /* externs are symbols w/ type=NOTYPE, bind=GLOBAL|WEAK, section=UND */
3104 return sym->st_shndx == SHN_UNDEF &&
3105 (bind == STB_GLOBAL || bind == STB_WEAK) &&
3106 GELF_ST_TYPE(sym->st_info) == STT_NOTYPE;
3107}
3108
3109static bool sym_is_subprog(const GElf_Sym *sym, int text_shndx)
3110{
3111 int bind = GELF_ST_BIND(sym->st_info);
3112 int type = GELF_ST_TYPE(sym->st_info);
3113
3114 /* in .text section */
3115 if (sym->st_shndx != text_shndx)
3116 return false;
3117
3118 /* local function */
3119 if (bind == STB_LOCAL && type == STT_SECTION)
3120 return true;
3121
3122 /* global function */
3123 return bind == STB_GLOBAL && type == STT_FUNC;
3124}
3125
3126static int find_extern_btf_id(const struct btf *btf, const char *ext_name)
3127{
3128 const struct btf_type *t;
3129 const char *tname;
3130 int i, n;
3131
3132 if (!btf)
3133 return -ESRCH;
3134
3135 n = btf__get_nr_types(btf);
3136 for (i = 1; i <= n; i++) {
3137 t = btf__type_by_id(btf, i);
3138
3139 if (!btf_is_var(t) && !btf_is_func(t))
3140 continue;
3141
3142 tname = btf__name_by_offset(btf, t->name_off);
3143 if (strcmp(tname, ext_name))
3144 continue;
3145
3146 if (btf_is_var(t) &&
3147 btf_var(t)->linkage != BTF_VAR_GLOBAL_EXTERN)
3148 return -EINVAL;
3149
3150 if (btf_is_func(t) && btf_func_linkage(t) != BTF_FUNC_EXTERN)
3151 return -EINVAL;
3152
3153 return i;
3154 }
3155
3156 return -ENOENT;
3157}
3158
3159static int find_extern_sec_btf_id(struct btf *btf, int ext_btf_id) {
3160 const struct btf_var_secinfo *vs;
3161 const struct btf_type *t;
3162 int i, j, n;
3163
3164 if (!btf)
3165 return -ESRCH;
3166
3167 n = btf__get_nr_types(btf);
3168 for (i = 1; i <= n; i++) {
3169 t = btf__type_by_id(btf, i);
3170
3171 if (!btf_is_datasec(t))
3172 continue;
3173
3174 vs = btf_var_secinfos(t);
3175 for (j = 0; j < btf_vlen(t); j++, vs++) {
3176 if (vs->type == ext_btf_id)
3177 return i;
3178 }
3179 }
3180
3181 return -ENOENT;
3182}
3183
3184static enum kcfg_type find_kcfg_type(const struct btf *btf, int id,
3185 bool *is_signed)
3186{
3187 const struct btf_type *t;
3188 const char *name;
3189
3190 t = skip_mods_and_typedefs(btf, id, NULL);
3191 name = btf__name_by_offset(btf, t->name_off);
3192
3193 if (is_signed)
3194 *is_signed = false;
3195 switch (btf_kind(t)) {
3196 case BTF_KIND_INT: {
3197 int enc = btf_int_encoding(t);
3198
3199 if (enc & BTF_INT_BOOL)
3200 return t->size == 1 ? KCFG_BOOL : KCFG_UNKNOWN;
3201 if (is_signed)
3202 *is_signed = enc & BTF_INT_SIGNED;
3203 if (t->size == 1)
3204 return KCFG_CHAR;
3205 if (t->size < 1 || t->size > 8 || (t->size & (t->size - 1)))
3206 return KCFG_UNKNOWN;
3207 return KCFG_INT;
3208 }
3209 case BTF_KIND_ENUM:
3210 if (t->size != 4)
3211 return KCFG_UNKNOWN;
3212 if (strcmp(name, "libbpf_tristate"))
3213 return KCFG_UNKNOWN;
3214 return KCFG_TRISTATE;
3215 case BTF_KIND_ARRAY:
3216 if (btf_array(t)->nelems == 0)
3217 return KCFG_UNKNOWN;
3218 if (find_kcfg_type(btf, btf_array(t)->type, NULL) != KCFG_CHAR)
3219 return KCFG_UNKNOWN;
3220 return KCFG_CHAR_ARR;
3221 default:
3222 return KCFG_UNKNOWN;
3223 }
3224}
3225
3226static int cmp_externs(const void *_a, const void *_b)
3227{
3228 const struct extern_desc *a = _a;
3229 const struct extern_desc *b = _b;
3230
3231 if (a->type != b->type)
3232 return a->type < b->type ? -1 : 1;
3233
3234 if (a->type == EXT_KCFG) {
3235 /* descending order by alignment requirements */
3236 if (a->kcfg.align != b->kcfg.align)
3237 return a->kcfg.align > b->kcfg.align ? -1 : 1;
3238 /* ascending order by size, within same alignment class */
3239 if (a->kcfg.sz != b->kcfg.sz)
3240 return a->kcfg.sz < b->kcfg.sz ? -1 : 1;
3241 }
3242
3243 /* resolve ties by name */
3244 return strcmp(a->name, b->name);
3245}
3246
3247static int find_int_btf_id(const struct btf *btf)
3248{
3249 const struct btf_type *t;
3250 int i, n;
3251
3252 n = btf__get_nr_types(btf);
3253 for (i = 1; i <= n; i++) {
3254 t = btf__type_by_id(btf, i);
3255
3256 if (btf_is_int(t) && btf_int_bits(t) == 32)
3257 return i;
3258 }
3259
3260 return 0;
3261}
3262
3263static int add_dummy_ksym_var(struct btf *btf)
3264{
3265 int i, int_btf_id, sec_btf_id, dummy_var_btf_id;
3266 const struct btf_var_secinfo *vs;
3267 const struct btf_type *sec;
3268
3269 if (!btf)
3270 return 0;
3271
3272 sec_btf_id = btf__find_by_name_kind(btf, KSYMS_SEC,
3273 BTF_KIND_DATASEC);
3274 if (sec_btf_id < 0)
3275 return 0;
3276
3277 sec = btf__type_by_id(btf, sec_btf_id);
3278 vs = btf_var_secinfos(sec);
3279 for (i = 0; i < btf_vlen(sec); i++, vs++) {
3280 const struct btf_type *vt;
3281
3282 vt = btf__type_by_id(btf, vs->type);
3283 if (btf_is_func(vt))
3284 break;
3285 }
3286
3287 /* No func in ksyms sec. No need to add dummy var. */
3288 if (i == btf_vlen(sec))
3289 return 0;
3290
3291 int_btf_id = find_int_btf_id(btf);
3292 dummy_var_btf_id = btf__add_var(btf,
3293 "dummy_ksym",
3294 BTF_VAR_GLOBAL_ALLOCATED,
3295 int_btf_id);
3296 if (dummy_var_btf_id < 0)
3297 pr_warn("cannot create a dummy_ksym var\n");
3298
3299 return dummy_var_btf_id;
3300}
3301
3302static int bpf_object__collect_externs(struct bpf_object *obj)
3303{
3304 struct btf_type *sec, *kcfg_sec = NULL, *ksym_sec = NULL;
3305 const struct btf_type *t;
3306 struct extern_desc *ext;
3307 int i, n, off, dummy_var_btf_id;
3308 const char *ext_name, *sec_name;
3309 Elf_Scn *scn;
3310 GElf_Shdr sh;
3311
3312 if (!obj->efile.symbols)
3313 return 0;
3314
3315 scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx);
3316 if (elf_sec_hdr(obj, scn, &sh))
3317 return -LIBBPF_ERRNO__FORMAT;
3318
3319 dummy_var_btf_id = add_dummy_ksym_var(obj->btf);
3320 if (dummy_var_btf_id < 0)
3321 return dummy_var_btf_id;
3322
3323 n = sh.sh_size / sh.sh_entsize;
3324 pr_debug("looking for externs among %d symbols...\n", n);
3325
3326 for (i = 0; i < n; i++) {
3327 GElf_Sym sym;
3328
3329 if (!gelf_getsym(obj->efile.symbols, i, &sym))
3330 return -LIBBPF_ERRNO__FORMAT;
3331 if (!sym_is_extern(&sym))
3332 continue;
3333 ext_name = elf_sym_str(obj, sym.st_name);
3334 if (!ext_name || !ext_name[0])
3335 continue;
3336
3337 ext = obj->externs;
3338 ext = libbpf_reallocarray(ext, obj->nr_extern + 1, sizeof(*ext));
3339 if (!ext)
3340 return -ENOMEM;
3341 obj->externs = ext;
3342 ext = &ext[obj->nr_extern];
3343 memset(ext, 0, sizeof(*ext));
3344 obj->nr_extern++;
3345
3346 ext->btf_id = find_extern_btf_id(obj->btf, ext_name);
3347 if (ext->btf_id <= 0) {
3348 pr_warn("failed to find BTF for extern '%s': %d\n",
3349 ext_name, ext->btf_id);
3350 return ext->btf_id;
3351 }
3352 t = btf__type_by_id(obj->btf, ext->btf_id);
3353 ext->name = btf__name_by_offset(obj->btf, t->name_off);
3354 ext->sym_idx = i;
3355 ext->is_weak = GELF_ST_BIND(sym.st_info) == STB_WEAK;
3356
3357 ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id);
3358 if (ext->sec_btf_id <= 0) {
3359 pr_warn("failed to find BTF for extern '%s' [%d] section: %d\n",
3360 ext_name, ext->btf_id, ext->sec_btf_id);
3361 return ext->sec_btf_id;
3362 }
3363 sec = (void *)btf__type_by_id(obj->btf, ext->sec_btf_id);
3364 sec_name = btf__name_by_offset(obj->btf, sec->name_off);
3365
3366 if (strcmp(sec_name, KCONFIG_SEC) == 0) {
3367 if (btf_is_func(t)) {
3368 pr_warn("extern function %s is unsupported under %s section\n",
3369 ext->name, KCONFIG_SEC);
3370 return -ENOTSUP;
3371 }
3372 kcfg_sec = sec;
3373 ext->type = EXT_KCFG;
3374 ext->kcfg.sz = btf__resolve_size(obj->btf, t->type);
3375 if (ext->kcfg.sz <= 0) {
3376 pr_warn("failed to resolve size of extern (kcfg) '%s': %d\n",
3377 ext_name, ext->kcfg.sz);
3378 return ext->kcfg.sz;
3379 }
3380 ext->kcfg.align = btf__align_of(obj->btf, t->type);
3381 if (ext->kcfg.align <= 0) {
3382 pr_warn("failed to determine alignment of extern (kcfg) '%s': %d\n",
3383 ext_name, ext->kcfg.align);
3384 return -EINVAL;
3385 }
3386 ext->kcfg.type = find_kcfg_type(obj->btf, t->type,
3387 &ext->kcfg.is_signed);
3388 if (ext->kcfg.type == KCFG_UNKNOWN) {
3389 pr_warn("extern (kcfg) '%s' type is unsupported\n", ext_name);
3390 return -ENOTSUP;
3391 }
3392 } else if (strcmp(sec_name, KSYMS_SEC) == 0) {
3393 if (btf_is_func(t) && ext->is_weak) {
3394 pr_warn("extern weak function %s is unsupported\n",
3395 ext->name);
3396 return -ENOTSUP;
3397 }
3398 ksym_sec = sec;
3399 ext->type = EXT_KSYM;
3400 skip_mods_and_typedefs(obj->btf, t->type,
3401 &ext->ksym.type_id);
3402 } else {
3403 pr_warn("unrecognized extern section '%s'\n", sec_name);
3404 return -ENOTSUP;
3405 }
3406 }
3407 pr_debug("collected %d externs total\n", obj->nr_extern);
3408
3409 if (!obj->nr_extern)
3410 return 0;
3411
3412 /* sort externs by type, for kcfg ones also by (align, size, name) */
3413 qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs);
3414
3415 /* for .ksyms section, we need to turn all externs into allocated
3416 * variables in BTF to pass kernel verification; we do this by
3417 * pretending that each extern is a 8-byte variable
3418 */
3419 if (ksym_sec) {
3420 /* find existing 4-byte integer type in BTF to use for fake
3421 * extern variables in DATASEC
3422 */
3423 int int_btf_id = find_int_btf_id(obj->btf);
3424 /* For extern function, a dummy_var added earlier
3425 * will be used to replace the vs->type and
3426 * its name string will be used to refill
3427 * the missing param's name.
3428 */
3429 const struct btf_type *dummy_var;
3430
3431 dummy_var = btf__type_by_id(obj->btf, dummy_var_btf_id);
3432 for (i = 0; i < obj->nr_extern; i++) {
3433 ext = &obj->externs[i];
3434 if (ext->type != EXT_KSYM)
3435 continue;
3436 pr_debug("extern (ksym) #%d: symbol %d, name %s\n",
3437 i, ext->sym_idx, ext->name);
3438 }
3439
3440 sec = ksym_sec;
3441 n = btf_vlen(sec);
3442 for (i = 0, off = 0; i < n; i++, off += sizeof(int)) {
3443 struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
3444 struct btf_type *vt;
3445
3446 vt = (void *)btf__type_by_id(obj->btf, vs->type);
3447 ext_name = btf__name_by_offset(obj->btf, vt->name_off);
3448 ext = find_extern_by_name(obj, ext_name);
3449 if (!ext) {
3450 pr_warn("failed to find extern definition for BTF %s '%s'\n",
3451 btf_kind_str(vt), ext_name);
3452 return -ESRCH;
3453 }
3454 if (btf_is_func(vt)) {
3455 const struct btf_type *func_proto;
3456 struct btf_param *param;
3457 int j;
3458
3459 func_proto = btf__type_by_id(obj->btf,
3460 vt->type);
3461 param = btf_params(func_proto);
3462 /* Reuse the dummy_var string if the
3463 * func proto does not have param name.
3464 */
3465 for (j = 0; j < btf_vlen(func_proto); j++)
3466 if (param[j].type && !param[j].name_off)
3467 param[j].name_off =
3468 dummy_var->name_off;
3469 vs->type = dummy_var_btf_id;
3470 vt->info &= ~0xffff;
3471 vt->info |= BTF_FUNC_GLOBAL;
3472 } else {
3473 btf_var(vt)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
3474 vt->type = int_btf_id;
3475 }
3476 vs->offset = off;
3477 vs->size = sizeof(int);
3478 }
3479 sec->size = off;
3480 }
3481
3482 if (kcfg_sec) {
3483 sec = kcfg_sec;
3484 /* for kcfg externs calculate their offsets within a .kconfig map */
3485 off = 0;
3486 for (i = 0; i < obj->nr_extern; i++) {
3487 ext = &obj->externs[i];
3488 if (ext->type != EXT_KCFG)
3489 continue;
3490
3491 ext->kcfg.data_off = roundup(off, ext->kcfg.align);
3492 off = ext->kcfg.data_off + ext->kcfg.sz;
3493 pr_debug("extern (kcfg) #%d: symbol %d, off %u, name %s\n",
3494 i, ext->sym_idx, ext->kcfg.data_off, ext->name);
3495 }
3496 sec->size = off;
3497 n = btf_vlen(sec);
3498 for (i = 0; i < n; i++) {
3499 struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
3500
3501 t = btf__type_by_id(obj->btf, vs->type);
3502 ext_name = btf__name_by_offset(obj->btf, t->name_off);
3503 ext = find_extern_by_name(obj, ext_name);
3504 if (!ext) {
3505 pr_warn("failed to find extern definition for BTF var '%s'\n",
3506 ext_name);
3507 return -ESRCH;
3508 }
3509 btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
3510 vs->offset = ext->kcfg.data_off;
3511 }
3512 }
3513 return 0;
3514}
3515
3516struct bpf_program *
3517bpf_object__find_program_by_title(const struct bpf_object *obj,
3518 const char *title)
3519{
3520 struct bpf_program *pos;
3521
3522 bpf_object__for_each_program(pos, obj) {
3523 if (pos->sec_name && !strcmp(pos->sec_name, title))
3524 return pos;
3525 }
3526 return errno = ENOENT, NULL;
3527}
3528
3529static bool prog_is_subprog(const struct bpf_object *obj,
3530 const struct bpf_program *prog)
3531{
3532 /* For legacy reasons, libbpf supports an entry-point BPF programs
3533 * without SEC() attribute, i.e., those in the .text section. But if
3534 * there are 2 or more such programs in the .text section, they all
3535 * must be subprograms called from entry-point BPF programs in
3536 * designated SEC()'tions, otherwise there is no way to distinguish
3537 * which of those programs should be loaded vs which are a subprogram.
3538 * Similarly, if there is a function/program in .text and at least one
3539 * other BPF program with custom SEC() attribute, then we just assume
3540 * .text programs are subprograms (even if they are not called from
3541 * other programs), because libbpf never explicitly supported mixing
3542 * SEC()-designated BPF programs and .text entry-point BPF programs.
3543 */
3544 return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1;
3545}
3546
3547struct bpf_program *
3548bpf_object__find_program_by_name(const struct bpf_object *obj,
3549 const char *name)
3550{
3551 struct bpf_program *prog;
3552
3553 bpf_object__for_each_program(prog, obj) {
3554 if (prog_is_subprog(obj, prog))
3555 continue;
3556 if (!strcmp(prog->name, name))
3557 return prog;
3558 }
3559 return errno = ENOENT, NULL;
3560}
3561
3562static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
3563 int shndx)
3564{
3565 return shndx == obj->efile.data_shndx ||
3566 shndx == obj->efile.bss_shndx ||
3567 shndx == obj->efile.rodata_shndx;
3568}
3569
3570static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
3571 int shndx)
3572{
3573 return shndx == obj->efile.maps_shndx ||
3574 shndx == obj->efile.btf_maps_shndx;
3575}
3576
3577static enum libbpf_map_type
3578bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
3579{
3580 if (shndx == obj->efile.data_shndx)
3581 return LIBBPF_MAP_DATA;
3582 else if (shndx == obj->efile.bss_shndx)
3583 return LIBBPF_MAP_BSS;
3584 else if (shndx == obj->efile.rodata_shndx)
3585 return LIBBPF_MAP_RODATA;
3586 else if (shndx == obj->efile.symbols_shndx)
3587 return LIBBPF_MAP_KCONFIG;
3588 else
3589 return LIBBPF_MAP_UNSPEC;
3590}
3591
3592static int bpf_program__record_reloc(struct bpf_program *prog,
3593 struct reloc_desc *reloc_desc,
3594 __u32 insn_idx, const char *sym_name,
3595 const GElf_Sym *sym, const GElf_Rel *rel)
3596{
3597 struct bpf_insn *insn = &prog->insns[insn_idx];
3598 size_t map_idx, nr_maps = prog->obj->nr_maps;
3599 struct bpf_object *obj = prog->obj;
3600 __u32 shdr_idx = sym->st_shndx;
3601 enum libbpf_map_type type;
3602 const char *sym_sec_name;
3603 struct bpf_map *map;
3604
3605 if (!is_call_insn(insn) && !is_ldimm64_insn(insn)) {
3606 pr_warn("prog '%s': invalid relo against '%s' for insns[%d].code 0x%x\n",
3607 prog->name, sym_name, insn_idx, insn->code);
3608 return -LIBBPF_ERRNO__RELOC;
3609 }
3610
3611 if (sym_is_extern(sym)) {
3612 int sym_idx = GELF_R_SYM(rel->r_info);
3613 int i, n = obj->nr_extern;
3614 struct extern_desc *ext;
3615
3616 for (i = 0; i < n; i++) {
3617 ext = &obj->externs[i];
3618 if (ext->sym_idx == sym_idx)
3619 break;
3620 }
3621 if (i >= n) {
3622 pr_warn("prog '%s': extern relo failed to find extern for '%s' (%d)\n",
3623 prog->name, sym_name, sym_idx);
3624 return -LIBBPF_ERRNO__RELOC;
3625 }
3626 pr_debug("prog '%s': found extern #%d '%s' (sym %d) for insn #%u\n",
3627 prog->name, i, ext->name, ext->sym_idx, insn_idx);
3628 if (insn->code == (BPF_JMP | BPF_CALL))
3629 reloc_desc->type = RELO_EXTERN_FUNC;
3630 else
3631 reloc_desc->type = RELO_EXTERN_VAR;
3632 reloc_desc->insn_idx = insn_idx;
3633 reloc_desc->sym_off = i; /* sym_off stores extern index */
3634 return 0;
3635 }
3636
3637 /* sub-program call relocation */
3638 if (is_call_insn(insn)) {
3639 if (insn->src_reg != BPF_PSEUDO_CALL) {
3640 pr_warn("prog '%s': incorrect bpf_call opcode\n", prog->name);
3641 return -LIBBPF_ERRNO__RELOC;
3642 }
3643 /* text_shndx can be 0, if no default "main" program exists */
3644 if (!shdr_idx || shdr_idx != obj->efile.text_shndx) {
3645 sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
3646 pr_warn("prog '%s': bad call relo against '%s' in section '%s'\n",
3647 prog->name, sym_name, sym_sec_name);
3648 return -LIBBPF_ERRNO__RELOC;
3649 }
3650 if (sym->st_value % BPF_INSN_SZ) {
3651 pr_warn("prog '%s': bad call relo against '%s' at offset %zu\n",
3652 prog->name, sym_name, (size_t)sym->st_value);
3653 return -LIBBPF_ERRNO__RELOC;
3654 }
3655 reloc_desc->type = RELO_CALL;
3656 reloc_desc->insn_idx = insn_idx;
3657 reloc_desc->sym_off = sym->st_value;
3658 return 0;
3659 }
3660
3661 if (!shdr_idx || shdr_idx >= SHN_LORESERVE) {
3662 pr_warn("prog '%s': invalid relo against '%s' in special section 0x%x; forgot to initialize global var?..\n",
3663 prog->name, sym_name, shdr_idx);
3664 return -LIBBPF_ERRNO__RELOC;
3665 }
3666
3667 /* loading subprog addresses */
3668 if (sym_is_subprog(sym, obj->efile.text_shndx)) {
3669 /* global_func: sym->st_value = offset in the section, insn->imm = 0.
3670 * local_func: sym->st_value = 0, insn->imm = offset in the section.
3671 */
3672 if ((sym->st_value % BPF_INSN_SZ) || (insn->imm % BPF_INSN_SZ)) {
3673 pr_warn("prog '%s': bad subprog addr relo against '%s' at offset %zu+%d\n",
3674 prog->name, sym_name, (size_t)sym->st_value, insn->imm);
3675 return -LIBBPF_ERRNO__RELOC;
3676 }
3677
3678 reloc_desc->type = RELO_SUBPROG_ADDR;
3679 reloc_desc->insn_idx = insn_idx;
3680 reloc_desc->sym_off = sym->st_value;
3681 return 0;
3682 }
3683
3684 type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
3685 sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
3686
3687 /* generic map reference relocation */
3688 if (type == LIBBPF_MAP_UNSPEC) {
3689 if (!bpf_object__shndx_is_maps(obj, shdr_idx)) {
3690 pr_warn("prog '%s': bad map relo against '%s' in section '%s'\n",
3691 prog->name, sym_name, sym_sec_name);
3692 return -LIBBPF_ERRNO__RELOC;
3693 }
3694 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
3695 map = &obj->maps[map_idx];
3696 if (map->libbpf_type != type ||
3697 map->sec_idx != sym->st_shndx ||
3698 map->sec_offset != sym->st_value)
3699 continue;
3700 pr_debug("prog '%s': found map %zd (%s, sec %d, off %zu) for insn #%u\n",
3701 prog->name, map_idx, map->name, map->sec_idx,
3702 map->sec_offset, insn_idx);
3703 break;
3704 }
3705 if (map_idx >= nr_maps) {
3706 pr_warn("prog '%s': map relo failed to find map for section '%s', off %zu\n",
3707 prog->name, sym_sec_name, (size_t)sym->st_value);
3708 return -LIBBPF_ERRNO__RELOC;
3709 }
3710 reloc_desc->type = RELO_LD64;
3711 reloc_desc->insn_idx = insn_idx;
3712 reloc_desc->map_idx = map_idx;
3713 reloc_desc->sym_off = 0; /* sym->st_value determines map_idx */
3714 return 0;
3715 }
3716
3717 /* global data map relocation */
3718 if (!bpf_object__shndx_is_data(obj, shdr_idx)) {
3719 pr_warn("prog '%s': bad data relo against section '%s'\n",
3720 prog->name, sym_sec_name);
3721 return -LIBBPF_ERRNO__RELOC;
3722 }
3723 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
3724 map = &obj->maps[map_idx];
3725 if (map->libbpf_type != type)
3726 continue;
3727 pr_debug("prog '%s': found data map %zd (%s, sec %d, off %zu) for insn %u\n",
3728 prog->name, map_idx, map->name, map->sec_idx,
3729 map->sec_offset, insn_idx);
3730 break;
3731 }
3732 if (map_idx >= nr_maps) {
3733 pr_warn("prog '%s': data relo failed to find map for section '%s'\n",
3734 prog->name, sym_sec_name);
3735 return -LIBBPF_ERRNO__RELOC;
3736 }
3737
3738 reloc_desc->type = RELO_DATA;
3739 reloc_desc->insn_idx = insn_idx;
3740 reloc_desc->map_idx = map_idx;
3741 reloc_desc->sym_off = sym->st_value;
3742 return 0;
3743}
3744
3745static bool prog_contains_insn(const struct bpf_program *prog, size_t insn_idx)
3746{
3747 return insn_idx >= prog->sec_insn_off &&
3748 insn_idx < prog->sec_insn_off + prog->sec_insn_cnt;
3749}
3750
3751static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj,
3752 size_t sec_idx, size_t insn_idx)
3753{
3754 int l = 0, r = obj->nr_programs - 1, m;
3755 struct bpf_program *prog;
3756
3757 while (l < r) {
3758 m = l + (r - l + 1) / 2;
3759 prog = &obj->programs[m];
3760
3761 if (prog->sec_idx < sec_idx ||
3762 (prog->sec_idx == sec_idx && prog->sec_insn_off <= insn_idx))
3763 l = m;
3764 else
3765 r = m - 1;
3766 }
3767 /* matching program could be at index l, but it still might be the
3768 * wrong one, so we need to double check conditions for the last time
3769 */
3770 prog = &obj->programs[l];
3771 if (prog->sec_idx == sec_idx && prog_contains_insn(prog, insn_idx))
3772 return prog;
3773 return NULL;
3774}
3775
3776static int
3777bpf_object__collect_prog_relos(struct bpf_object *obj, GElf_Shdr *shdr, Elf_Data *data)
3778{
3779 Elf_Data *symbols = obj->efile.symbols;
3780 const char *relo_sec_name, *sec_name;
3781 size_t sec_idx = shdr->sh_info;
3782 struct bpf_program *prog;
3783 struct reloc_desc *relos;
3784 int err, i, nrels;
3785 const char *sym_name;
3786 __u32 insn_idx;
3787 Elf_Scn *scn;
3788 Elf_Data *scn_data;
3789 GElf_Sym sym;
3790 GElf_Rel rel;
3791
3792 scn = elf_sec_by_idx(obj, sec_idx);
3793 scn_data = elf_sec_data(obj, scn);
3794
3795 relo_sec_name = elf_sec_str(obj, shdr->sh_name);
3796 sec_name = elf_sec_name(obj, scn);
3797 if (!relo_sec_name || !sec_name)
3798 return -EINVAL;
3799
3800 pr_debug("sec '%s': collecting relocation for section(%zu) '%s'\n",
3801 relo_sec_name, sec_idx, sec_name);
3802 nrels = shdr->sh_size / shdr->sh_entsize;
3803
3804 for (i = 0; i < nrels; i++) {
3805 if (!gelf_getrel(data, i, &rel)) {
3806 pr_warn("sec '%s': failed to get relo #%d\n", relo_sec_name, i);
3807 return -LIBBPF_ERRNO__FORMAT;
3808 }
3809 if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
3810 pr_warn("sec '%s': symbol 0x%zx not found for relo #%d\n",
3811 relo_sec_name, (size_t)GELF_R_SYM(rel.r_info), i);
3812 return -LIBBPF_ERRNO__FORMAT;
3813 }
3814
3815 if (rel.r_offset % BPF_INSN_SZ || rel.r_offset >= scn_data->d_size) {
3816 pr_warn("sec '%s': invalid offset 0x%zx for relo #%d\n",
3817 relo_sec_name, (size_t)GELF_R_SYM(rel.r_info), i);
3818 return -LIBBPF_ERRNO__FORMAT;
3819 }
3820
3821 insn_idx = rel.r_offset / BPF_INSN_SZ;
3822 /* relocations against static functions are recorded as
3823 * relocations against the section that contains a function;
3824 * in such case, symbol will be STT_SECTION and sym.st_name
3825 * will point to empty string (0), so fetch section name
3826 * instead
3827 */
3828 if (GELF_ST_TYPE(sym.st_info) == STT_SECTION && sym.st_name == 0)
3829 sym_name = elf_sec_name(obj, elf_sec_by_idx(obj, sym.st_shndx));
3830 else
3831 sym_name = elf_sym_str(obj, sym.st_name);
3832 sym_name = sym_name ?: "<?";
3833
3834 pr_debug("sec '%s': relo #%d: insn #%u against '%s'\n",
3835 relo_sec_name, i, insn_idx, sym_name);
3836
3837 prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
3838 if (!prog) {
3839 pr_debug("sec '%s': relo #%d: couldn't find program in section '%s' for insn #%u, probably overridden weak function, skipping...\n",
3840 relo_sec_name, i, sec_name, insn_idx);
3841 continue;
3842 }
3843
3844 relos = libbpf_reallocarray(prog->reloc_desc,
3845 prog->nr_reloc + 1, sizeof(*relos));
3846 if (!relos)
3847 return -ENOMEM;
3848 prog->reloc_desc = relos;
3849
3850 /* adjust insn_idx to local BPF program frame of reference */
3851 insn_idx -= prog->sec_insn_off;
3852 err = bpf_program__record_reloc(prog, &relos[prog->nr_reloc],
3853 insn_idx, sym_name, &sym, &rel);
3854 if (err)
3855 return err;
3856
3857 prog->nr_reloc++;
3858 }
3859 return 0;
3860}
3861
3862static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map)
3863{
3864 struct bpf_map_def *def = &map->def;
3865 __u32 key_type_id = 0, value_type_id = 0;
3866 int ret;
3867
3868 /* if it's BTF-defined map, we don't need to search for type IDs.
3869 * For struct_ops map, it does not need btf_key_type_id and
3870 * btf_value_type_id.
3871 */
3872 if (map->sec_idx == obj->efile.btf_maps_shndx ||
3873 bpf_map__is_struct_ops(map))
3874 return 0;
3875
3876 if (!bpf_map__is_internal(map)) {
3877 ret = btf__get_map_kv_tids(obj->btf, map->name, def->key_size,
3878 def->value_size, &key_type_id,
3879 &value_type_id);
3880 } else {
3881 /*
3882 * LLVM annotates global data differently in BTF, that is,
3883 * only as '.data', '.bss' or '.rodata'.
3884 */
3885 ret = btf__find_by_name(obj->btf,
3886 libbpf_type_to_btf_name[map->libbpf_type]);
3887 }
3888 if (ret < 0)
3889 return ret;
3890
3891 map->btf_key_type_id = key_type_id;
3892 map->btf_value_type_id = bpf_map__is_internal(map) ?
3893 ret : value_type_id;
3894 return 0;
3895}
3896
3897int bpf_map__reuse_fd(struct bpf_map *map, int fd)
3898{
3899 struct bpf_map_info info = {};
3900 __u32 len = sizeof(info);
3901 int new_fd, err;
3902 char *new_name;
3903
3904 err = bpf_obj_get_info_by_fd(fd, &info, &len);
3905 if (err)
3906 return libbpf_err(err);
3907
3908 new_name = strdup(info.name);
3909 if (!new_name)
3910 return libbpf_err(-errno);
3911
3912 new_fd = open("/", O_RDONLY | O_CLOEXEC);
3913 if (new_fd < 0) {
3914 err = -errno;
3915 goto err_free_new_name;
3916 }
3917
3918 new_fd = dup3(fd, new_fd, O_CLOEXEC);
3919 if (new_fd < 0) {
3920 err = -errno;
3921 goto err_close_new_fd;
3922 }
3923
3924 err = zclose(map->fd);
3925 if (err) {
3926 err = -errno;
3927 goto err_close_new_fd;
3928 }
3929 free(map->name);
3930
3931 map->fd = new_fd;
3932 map->name = new_name;
3933 map->def.type = info.type;
3934 map->def.key_size = info.key_size;
3935 map->def.value_size = info.value_size;
3936 map->def.max_entries = info.max_entries;
3937 map->def.map_flags = info.map_flags;
3938 map->btf_key_type_id = info.btf_key_type_id;
3939 map->btf_value_type_id = info.btf_value_type_id;
3940 map->reused = true;
3941
3942 return 0;
3943
3944err_close_new_fd:
3945 close(new_fd);
3946err_free_new_name:
3947 free(new_name);
3948 return libbpf_err(err);
3949}
3950
3951__u32 bpf_map__max_entries(const struct bpf_map *map)
3952{
3953 return map->def.max_entries;
3954}
3955
3956struct bpf_map *bpf_map__inner_map(struct bpf_map *map)
3957{
3958 if (!bpf_map_type__is_map_in_map(map->def.type))
3959 return errno = EINVAL, NULL;
3960
3961 return map->inner_map;
3962}
3963
3964int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
3965{
3966 if (map->fd >= 0)
3967 return libbpf_err(-EBUSY);
3968 map->def.max_entries = max_entries;
3969 return 0;
3970}
3971
3972int bpf_map__resize(struct bpf_map *map, __u32 max_entries)
3973{
3974 if (!map || !max_entries)
3975 return libbpf_err(-EINVAL);
3976
3977 return bpf_map__set_max_entries(map, max_entries);
3978}
3979
3980static int
3981bpf_object__probe_loading(struct bpf_object *obj)
3982{
3983 struct bpf_load_program_attr attr;
3984 char *cp, errmsg[STRERR_BUFSIZE];
3985 struct bpf_insn insns[] = {
3986 BPF_MOV64_IMM(BPF_REG_0, 0),
3987 BPF_EXIT_INSN(),
3988 };
3989 int ret;
3990
3991 if (obj->gen_loader)
3992 return 0;
3993
3994 /* make sure basic loading works */
3995
3996 memset(&attr, 0, sizeof(attr));
3997 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
3998 attr.insns = insns;
3999 attr.insns_cnt = ARRAY_SIZE(insns);
4000 attr.license = "GPL";
4001
4002 ret = bpf_load_program_xattr(&attr, NULL, 0);
4003 if (ret < 0) {
4004 attr.prog_type = BPF_PROG_TYPE_TRACEPOINT;
4005 ret = bpf_load_program_xattr(&attr, NULL, 0);
4006 }
4007 if (ret < 0) {
4008 ret = errno;
4009 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
4010 pr_warn("Error in %s():%s(%d). Couldn't load trivial BPF "
4011 "program. Make sure your kernel supports BPF "
4012 "(CONFIG_BPF_SYSCALL=y) and/or that RLIMIT_MEMLOCK is "
4013 "set to big enough value.\n", __func__, cp, ret);
4014 return -ret;
4015 }
4016 close(ret);
4017
4018 return 0;
4019}
4020
4021static int probe_fd(int fd)
4022{
4023 if (fd >= 0)
4024 close(fd);
4025 return fd >= 0;
4026}
4027
4028static int probe_kern_prog_name(void)
4029{
4030 struct bpf_load_program_attr attr;
4031 struct bpf_insn insns[] = {
4032 BPF_MOV64_IMM(BPF_REG_0, 0),
4033 BPF_EXIT_INSN(),
4034 };
4035 int ret;
4036
4037 /* make sure loading with name works */
4038
4039 memset(&attr, 0, sizeof(attr));
4040 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
4041 attr.insns = insns;
4042 attr.insns_cnt = ARRAY_SIZE(insns);
4043 attr.license = "GPL";
4044 attr.name = "test";
4045 ret = bpf_load_program_xattr(&attr, NULL, 0);
4046 return probe_fd(ret);
4047}
4048
4049static int probe_kern_global_data(void)
4050{
4051 struct bpf_load_program_attr prg_attr;
4052 struct bpf_create_map_attr map_attr;
4053 char *cp, errmsg[STRERR_BUFSIZE];
4054 struct bpf_insn insns[] = {
4055 BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
4056 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
4057 BPF_MOV64_IMM(BPF_REG_0, 0),
4058 BPF_EXIT_INSN(),
4059 };
4060 int ret, map;
4061
4062 memset(&map_attr, 0, sizeof(map_attr));
4063 map_attr.map_type = BPF_MAP_TYPE_ARRAY;
4064 map_attr.key_size = sizeof(int);
4065 map_attr.value_size = 32;
4066 map_attr.max_entries = 1;
4067
4068 map = bpf_create_map_xattr(&map_attr);
4069 if (map < 0) {
4070 ret = -errno;
4071 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
4072 pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
4073 __func__, cp, -ret);
4074 return ret;
4075 }
4076
4077 insns[0].imm = map;
4078
4079 memset(&prg_attr, 0, sizeof(prg_attr));
4080 prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
4081 prg_attr.insns = insns;
4082 prg_attr.insns_cnt = ARRAY_SIZE(insns);
4083 prg_attr.license = "GPL";
4084
4085 ret = bpf_load_program_xattr(&prg_attr, NULL, 0);
4086 close(map);
4087 return probe_fd(ret);
4088}
4089
4090static int probe_kern_btf(void)
4091{
4092 static const char strs[] = "\0int";
4093 __u32 types[] = {
4094 /* int */
4095 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
4096 };
4097
4098 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4099 strs, sizeof(strs)));
4100}
4101
4102static int probe_kern_btf_func(void)
4103{
4104 static const char strs[] = "\0int\0x\0a";
4105 /* void x(int a) {} */
4106 __u32 types[] = {
4107 /* int */
4108 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
4109 /* FUNC_PROTO */ /* [2] */
4110 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
4111 BTF_PARAM_ENC(7, 1),
4112 /* FUNC x */ /* [3] */
4113 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
4114 };
4115
4116 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4117 strs, sizeof(strs)));
4118}
4119
4120static int probe_kern_btf_func_global(void)
4121{
4122 static const char strs[] = "\0int\0x\0a";
4123 /* static void x(int a) {} */
4124 __u32 types[] = {
4125 /* int */
4126 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
4127 /* FUNC_PROTO */ /* [2] */
4128 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
4129 BTF_PARAM_ENC(7, 1),
4130 /* FUNC x BTF_FUNC_GLOBAL */ /* [3] */
4131 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2),
4132 };
4133
4134 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4135 strs, sizeof(strs)));
4136}
4137
4138static int probe_kern_btf_datasec(void)
4139{
4140 static const char strs[] = "\0x\0.data";
4141 /* static int a; */
4142 __u32 types[] = {
4143 /* int */
4144 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
4145 /* VAR x */ /* [2] */
4146 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
4147 BTF_VAR_STATIC,
4148 /* DATASEC val */ /* [3] */
4149 BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
4150 BTF_VAR_SECINFO_ENC(2, 0, 4),
4151 };
4152
4153 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4154 strs, sizeof(strs)));
4155}
4156
4157static int probe_kern_btf_float(void)
4158{
4159 static const char strs[] = "\0float";
4160 __u32 types[] = {
4161 /* float */
4162 BTF_TYPE_FLOAT_ENC(1, 4),
4163 };
4164
4165 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4166 strs, sizeof(strs)));
4167}
4168
4169static int probe_kern_array_mmap(void)
4170{
4171 struct bpf_create_map_attr attr = {
4172 .map_type = BPF_MAP_TYPE_ARRAY,
4173 .map_flags = BPF_F_MMAPABLE,
4174 .key_size = sizeof(int),
4175 .value_size = sizeof(int),
4176 .max_entries = 1,
4177 };
4178
4179 return probe_fd(bpf_create_map_xattr(&attr));
4180}
4181
4182static int probe_kern_exp_attach_type(void)
4183{
4184 struct bpf_load_program_attr attr;
4185 struct bpf_insn insns[] = {
4186 BPF_MOV64_IMM(BPF_REG_0, 0),
4187 BPF_EXIT_INSN(),
4188 };
4189
4190 memset(&attr, 0, sizeof(attr));
4191 /* use any valid combination of program type and (optional)
4192 * non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS)
4193 * to see if kernel supports expected_attach_type field for
4194 * BPF_PROG_LOAD command
4195 */
4196 attr.prog_type = BPF_PROG_TYPE_CGROUP_SOCK;
4197 attr.expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE;
4198 attr.insns = insns;
4199 attr.insns_cnt = ARRAY_SIZE(insns);
4200 attr.license = "GPL";
4201
4202 return probe_fd(bpf_load_program_xattr(&attr, NULL, 0));
4203}
4204
4205static int probe_kern_probe_read_kernel(void)
4206{
4207 struct bpf_load_program_attr attr;
4208 struct bpf_insn insns[] = {
4209 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), /* r1 = r10 (fp) */
4210 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), /* r1 += -8 */
4211 BPF_MOV64_IMM(BPF_REG_2, 8), /* r2 = 8 */
4212 BPF_MOV64_IMM(BPF_REG_3, 0), /* r3 = 0 */
4213 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel),
4214 BPF_EXIT_INSN(),
4215 };
4216
4217 memset(&attr, 0, sizeof(attr));
4218 attr.prog_type = BPF_PROG_TYPE_KPROBE;
4219 attr.insns = insns;
4220 attr.insns_cnt = ARRAY_SIZE(insns);
4221 attr.license = "GPL";
4222
4223 return probe_fd(bpf_load_program_xattr(&attr, NULL, 0));
4224}
4225
4226static int probe_prog_bind_map(void)
4227{
4228 struct bpf_load_program_attr prg_attr;
4229 struct bpf_create_map_attr map_attr;
4230 char *cp, errmsg[STRERR_BUFSIZE];
4231 struct bpf_insn insns[] = {
4232 BPF_MOV64_IMM(BPF_REG_0, 0),
4233 BPF_EXIT_INSN(),
4234 };
4235 int ret, map, prog;
4236
4237 memset(&map_attr, 0, sizeof(map_attr));
4238 map_attr.map_type = BPF_MAP_TYPE_ARRAY;
4239 map_attr.key_size = sizeof(int);
4240 map_attr.value_size = 32;
4241 map_attr.max_entries = 1;
4242
4243 map = bpf_create_map_xattr(&map_attr);
4244 if (map < 0) {
4245 ret = -errno;
4246 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
4247 pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
4248 __func__, cp, -ret);
4249 return ret;
4250 }
4251
4252 memset(&prg_attr, 0, sizeof(prg_attr));
4253 prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
4254 prg_attr.insns = insns;
4255 prg_attr.insns_cnt = ARRAY_SIZE(insns);
4256 prg_attr.license = "GPL";
4257
4258 prog = bpf_load_program_xattr(&prg_attr, NULL, 0);
4259 if (prog < 0) {
4260 close(map);
4261 return 0;
4262 }
4263
4264 ret = bpf_prog_bind_map(prog, map, NULL);
4265
4266 close(map);
4267 close(prog);
4268
4269 return ret >= 0;
4270}
4271
4272static int probe_module_btf(void)
4273{
4274 static const char strs[] = "\0int";
4275 __u32 types[] = {
4276 /* int */
4277 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
4278 };
4279 struct bpf_btf_info info;
4280 __u32 len = sizeof(info);
4281 char name[16];
4282 int fd, err;
4283
4284 fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs));
4285 if (fd < 0)
4286 return 0; /* BTF not supported at all */
4287
4288 memset(&info, 0, sizeof(info));
4289 info.name = ptr_to_u64(name);
4290 info.name_len = sizeof(name);
4291
4292 /* check that BPF_OBJ_GET_INFO_BY_FD supports specifying name pointer;
4293 * kernel's module BTF support coincides with support for
4294 * name/name_len fields in struct bpf_btf_info.
4295 */
4296 err = bpf_obj_get_info_by_fd(fd, &info, &len);
4297 close(fd);
4298 return !err;
4299}
4300
4301enum kern_feature_result {
4302 FEAT_UNKNOWN = 0,
4303 FEAT_SUPPORTED = 1,
4304 FEAT_MISSING = 2,
4305};
4306
4307typedef int (*feature_probe_fn)(void);
4308
4309static struct kern_feature_desc {
4310 const char *desc;
4311 feature_probe_fn probe;
4312 enum kern_feature_result res;
4313} feature_probes[__FEAT_CNT] = {
4314 [FEAT_PROG_NAME] = {
4315 "BPF program name", probe_kern_prog_name,
4316 },
4317 [FEAT_GLOBAL_DATA] = {
4318 "global variables", probe_kern_global_data,
4319 },
4320 [FEAT_BTF] = {
4321 "minimal BTF", probe_kern_btf,
4322 },
4323 [FEAT_BTF_FUNC] = {
4324 "BTF functions", probe_kern_btf_func,
4325 },
4326 [FEAT_BTF_GLOBAL_FUNC] = {
4327 "BTF global function", probe_kern_btf_func_global,
4328 },
4329 [FEAT_BTF_DATASEC] = {
4330 "BTF data section and variable", probe_kern_btf_datasec,
4331 },
4332 [FEAT_ARRAY_MMAP] = {
4333 "ARRAY map mmap()", probe_kern_array_mmap,
4334 },
4335 [FEAT_EXP_ATTACH_TYPE] = {
4336 "BPF_PROG_LOAD expected_attach_type attribute",
4337 probe_kern_exp_attach_type,
4338 },
4339 [FEAT_PROBE_READ_KERN] = {
4340 "bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel,
4341 },
4342 [FEAT_PROG_BIND_MAP] = {
4343 "BPF_PROG_BIND_MAP support", probe_prog_bind_map,
4344 },
4345 [FEAT_MODULE_BTF] = {
4346 "module BTF support", probe_module_btf,
4347 },
4348 [FEAT_BTF_FLOAT] = {
4349 "BTF_KIND_FLOAT support", probe_kern_btf_float,
4350 },
4351};
4352
4353static bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id)
4354{
4355 struct kern_feature_desc *feat = &feature_probes[feat_id];
4356 int ret;
4357
4358 if (obj->gen_loader)
4359 /* To generate loader program assume the latest kernel
4360 * to avoid doing extra prog_load, map_create syscalls.
4361 */
4362 return true;
4363
4364 if (READ_ONCE(feat->res) == FEAT_UNKNOWN) {
4365 ret = feat->probe();
4366 if (ret > 0) {
4367 WRITE_ONCE(feat->res, FEAT_SUPPORTED);
4368 } else if (ret == 0) {
4369 WRITE_ONCE(feat->res, FEAT_MISSING);
4370 } else {
4371 pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret);
4372 WRITE_ONCE(feat->res, FEAT_MISSING);
4373 }
4374 }
4375
4376 return READ_ONCE(feat->res) == FEAT_SUPPORTED;
4377}
4378
4379static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
4380{
4381 struct bpf_map_info map_info = {};
4382 char msg[STRERR_BUFSIZE];
4383 __u32 map_info_len;
4384
4385 map_info_len = sizeof(map_info);
4386
4387 if (bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len)) {
4388 pr_warn("failed to get map info for map FD %d: %s\n",
4389 map_fd, libbpf_strerror_r(errno, msg, sizeof(msg)));
4390 return false;
4391 }
4392
4393 return (map_info.type == map->def.type &&
4394 map_info.key_size == map->def.key_size &&
4395 map_info.value_size == map->def.value_size &&
4396 map_info.max_entries == map->def.max_entries &&
4397 map_info.map_flags == map->def.map_flags);
4398}
4399
4400static int
4401bpf_object__reuse_map(struct bpf_map *map)
4402{
4403 char *cp, errmsg[STRERR_BUFSIZE];
4404 int err, pin_fd;
4405
4406 pin_fd = bpf_obj_get(map->pin_path);
4407 if (pin_fd < 0) {
4408 err = -errno;
4409 if (err == -ENOENT) {
4410 pr_debug("found no pinned map to reuse at '%s'\n",
4411 map->pin_path);
4412 return 0;
4413 }
4414
4415 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
4416 pr_warn("couldn't retrieve pinned map '%s': %s\n",
4417 map->pin_path, cp);
4418 return err;
4419 }
4420
4421 if (!map_is_reuse_compat(map, pin_fd)) {
4422 pr_warn("couldn't reuse pinned map at '%s': parameter mismatch\n",
4423 map->pin_path);
4424 close(pin_fd);
4425 return -EINVAL;
4426 }
4427
4428 err = bpf_map__reuse_fd(map, pin_fd);
4429 if (err) {
4430 close(pin_fd);
4431 return err;
4432 }
4433 map->pinned = true;
4434 pr_debug("reused pinned map at '%s'\n", map->pin_path);
4435
4436 return 0;
4437}
4438
4439static int
4440bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
4441{
4442 enum libbpf_map_type map_type = map->libbpf_type;
4443 char *cp, errmsg[STRERR_BUFSIZE];
4444 int err, zero = 0;
4445
4446 if (obj->gen_loader) {
4447 bpf_gen__map_update_elem(obj->gen_loader, map - obj->maps,
4448 map->mmaped, map->def.value_size);
4449 if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG)
4450 bpf_gen__map_freeze(obj->gen_loader, map - obj->maps);
4451 return 0;
4452 }
4453 err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0);
4454 if (err) {
4455 err = -errno;
4456 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4457 pr_warn("Error setting initial map(%s) contents: %s\n",
4458 map->name, cp);
4459 return err;
4460 }
4461
4462 /* Freeze .rodata and .kconfig map as read-only from syscall side. */
4463 if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG) {
4464 err = bpf_map_freeze(map->fd);
4465 if (err) {
4466 err = -errno;
4467 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4468 pr_warn("Error freezing map(%s) as read-only: %s\n",
4469 map->name, cp);
4470 return err;
4471 }
4472 }
4473 return 0;
4474}
4475
4476static void bpf_map__destroy(struct bpf_map *map);
4477
4478static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner)
4479{
4480 struct bpf_create_map_attr create_attr;
4481 struct bpf_map_def *def = &map->def;
4482
4483 memset(&create_attr, 0, sizeof(create_attr));
4484
4485 if (kernel_supports(obj, FEAT_PROG_NAME))
4486 create_attr.name = map->name;
4487 create_attr.map_ifindex = map->map_ifindex;
4488 create_attr.map_type = def->type;
4489 create_attr.map_flags = def->map_flags;
4490 create_attr.key_size = def->key_size;
4491 create_attr.value_size = def->value_size;
4492 create_attr.numa_node = map->numa_node;
4493
4494 if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !def->max_entries) {
4495 int nr_cpus;
4496
4497 nr_cpus = libbpf_num_possible_cpus();
4498 if (nr_cpus < 0) {
4499 pr_warn("map '%s': failed to determine number of system CPUs: %d\n",
4500 map->name, nr_cpus);
4501 return nr_cpus;
4502 }
4503 pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus);
4504 create_attr.max_entries = nr_cpus;
4505 } else {
4506 create_attr.max_entries = def->max_entries;
4507 }
4508
4509 if (bpf_map__is_struct_ops(map))
4510 create_attr.btf_vmlinux_value_type_id =
4511 map->btf_vmlinux_value_type_id;
4512
4513 create_attr.btf_fd = 0;
4514 create_attr.btf_key_type_id = 0;
4515 create_attr.btf_value_type_id = 0;
4516 if (obj->btf && btf__fd(obj->btf) >= 0 && !bpf_map_find_btf_info(obj, map)) {
4517 create_attr.btf_fd = btf__fd(obj->btf);
4518 create_attr.btf_key_type_id = map->btf_key_type_id;
4519 create_attr.btf_value_type_id = map->btf_value_type_id;
4520 }
4521
4522 if (bpf_map_type__is_map_in_map(def->type)) {
4523 if (map->inner_map) {
4524 int err;
4525
4526 err = bpf_object__create_map(obj, map->inner_map, true);
4527 if (err) {
4528 pr_warn("map '%s': failed to create inner map: %d\n",
4529 map->name, err);
4530 return err;
4531 }
4532 map->inner_map_fd = bpf_map__fd(map->inner_map);
4533 }
4534 if (map->inner_map_fd >= 0)
4535 create_attr.inner_map_fd = map->inner_map_fd;
4536 }
4537
4538 if (obj->gen_loader) {
4539 bpf_gen__map_create(obj->gen_loader, &create_attr, is_inner ? -1 : map - obj->maps);
4540 /* Pretend to have valid FD to pass various fd >= 0 checks.
4541 * This fd == 0 will not be used with any syscall and will be reset to -1 eventually.
4542 */
4543 map->fd = 0;
4544 } else {
4545 map->fd = bpf_create_map_xattr(&create_attr);
4546 }
4547 if (map->fd < 0 && (create_attr.btf_key_type_id ||
4548 create_attr.btf_value_type_id)) {
4549 char *cp, errmsg[STRERR_BUFSIZE];
4550 int err = -errno;
4551
4552 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4553 pr_warn("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
4554 map->name, cp, err);
4555 create_attr.btf_fd = 0;
4556 create_attr.btf_key_type_id = 0;
4557 create_attr.btf_value_type_id = 0;
4558 map->btf_key_type_id = 0;
4559 map->btf_value_type_id = 0;
4560 map->fd = bpf_create_map_xattr(&create_attr);
4561 }
4562
4563 if (map->fd < 0)
4564 return -errno;
4565
4566 if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) {
4567 if (obj->gen_loader)
4568 map->inner_map->fd = -1;
4569 bpf_map__destroy(map->inner_map);
4570 zfree(&map->inner_map);
4571 }
4572
4573 return 0;
4574}
4575
4576static int init_map_slots(struct bpf_object *obj, struct bpf_map *map)
4577{
4578 const struct bpf_map *targ_map;
4579 unsigned int i;
4580 int fd, err = 0;
4581
4582 for (i = 0; i < map->init_slots_sz; i++) {
4583 if (!map->init_slots[i])
4584 continue;
4585
4586 targ_map = map->init_slots[i];
4587 fd = bpf_map__fd(targ_map);
4588 if (obj->gen_loader) {
4589 pr_warn("// TODO map_update_elem: idx %td key %d value==map_idx %td\n",
4590 map - obj->maps, i, targ_map - obj->maps);
4591 return -ENOTSUP;
4592 } else {
4593 err = bpf_map_update_elem(map->fd, &i, &fd, 0);
4594 }
4595 if (err) {
4596 err = -errno;
4597 pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n",
4598 map->name, i, targ_map->name,
4599 fd, err);
4600 return err;
4601 }
4602 pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n",
4603 map->name, i, targ_map->name, fd);
4604 }
4605
4606 zfree(&map->init_slots);
4607 map->init_slots_sz = 0;
4608
4609 return 0;
4610}
4611
4612static int
4613bpf_object__create_maps(struct bpf_object *obj)
4614{
4615 struct bpf_map *map;
4616 char *cp, errmsg[STRERR_BUFSIZE];
4617 unsigned int i, j;
4618 int err;
4619
4620 for (i = 0; i < obj->nr_maps; i++) {
4621 map = &obj->maps[i];
4622
4623 if (map->pin_path) {
4624 err = bpf_object__reuse_map(map);
4625 if (err) {
4626 pr_warn("map '%s': error reusing pinned map\n",
4627 map->name);
4628 goto err_out;
4629 }
4630 }
4631
4632 if (map->fd >= 0) {
4633 pr_debug("map '%s': skipping creation (preset fd=%d)\n",
4634 map->name, map->fd);
4635 } else {
4636 err = bpf_object__create_map(obj, map, false);
4637 if (err)
4638 goto err_out;
4639
4640 pr_debug("map '%s': created successfully, fd=%d\n",
4641 map->name, map->fd);
4642
4643 if (bpf_map__is_internal(map)) {
4644 err = bpf_object__populate_internal_map(obj, map);
4645 if (err < 0) {
4646 zclose(map->fd);
4647 goto err_out;
4648 }
4649 }
4650
4651 if (map->init_slots_sz) {
4652 err = init_map_slots(obj, map);
4653 if (err < 0) {
4654 zclose(map->fd);
4655 goto err_out;
4656 }
4657 }
4658 }
4659
4660 if (map->pin_path && !map->pinned) {
4661 err = bpf_map__pin(map, NULL);
4662 if (err) {
4663 pr_warn("map '%s': failed to auto-pin at '%s': %d\n",
4664 map->name, map->pin_path, err);
4665 zclose(map->fd);
4666 goto err_out;
4667 }
4668 }
4669 }
4670
4671 return 0;
4672
4673err_out:
4674 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4675 pr_warn("map '%s': failed to create: %s(%d)\n", map->name, cp, err);
4676 pr_perm_msg(err);
4677 for (j = 0; j < i; j++)
4678 zclose(obj->maps[j].fd);
4679 return err;
4680}
4681
4682#define BPF_CORE_SPEC_MAX_LEN 64
4683
4684/* represents BPF CO-RE field or array element accessor */
4685struct bpf_core_accessor {
4686 __u32 type_id; /* struct/union type or array element type */
4687 __u32 idx; /* field index or array index */
4688 const char *name; /* field name or NULL for array accessor */
4689};
4690
4691struct bpf_core_spec {
4692 const struct btf *btf;
4693 /* high-level spec: named fields and array indices only */
4694 struct bpf_core_accessor spec[BPF_CORE_SPEC_MAX_LEN];
4695 /* original unresolved (no skip_mods_or_typedefs) root type ID */
4696 __u32 root_type_id;
4697 /* CO-RE relocation kind */
4698 enum bpf_core_relo_kind relo_kind;
4699 /* high-level spec length */
4700 int len;
4701 /* raw, low-level spec: 1-to-1 with accessor spec string */
4702 int raw_spec[BPF_CORE_SPEC_MAX_LEN];
4703 /* raw spec length */
4704 int raw_len;
4705 /* field bit offset represented by spec */
4706 __u32 bit_offset;
4707};
4708
4709static bool str_is_empty(const char *s)
4710{
4711 return !s || !s[0];
4712}
4713
4714static bool is_flex_arr(const struct btf *btf,
4715 const struct bpf_core_accessor *acc,
4716 const struct btf_array *arr)
4717{
4718 const struct btf_type *t;
4719
4720 /* not a flexible array, if not inside a struct or has non-zero size */
4721 if (!acc->name || arr->nelems > 0)
4722 return false;
4723
4724 /* has to be the last member of enclosing struct */
4725 t = btf__type_by_id(btf, acc->type_id);
4726 return acc->idx == btf_vlen(t) - 1;
4727}
4728
4729static const char *core_relo_kind_str(enum bpf_core_relo_kind kind)
4730{
4731 switch (kind) {
4732 case BPF_FIELD_BYTE_OFFSET: return "byte_off";
4733 case BPF_FIELD_BYTE_SIZE: return "byte_sz";
4734 case BPF_FIELD_EXISTS: return "field_exists";
4735 case BPF_FIELD_SIGNED: return "signed";
4736 case BPF_FIELD_LSHIFT_U64: return "lshift_u64";
4737 case BPF_FIELD_RSHIFT_U64: return "rshift_u64";
4738 case BPF_TYPE_ID_LOCAL: return "local_type_id";
4739 case BPF_TYPE_ID_TARGET: return "target_type_id";
4740 case BPF_TYPE_EXISTS: return "type_exists";
4741 case BPF_TYPE_SIZE: return "type_size";
4742 case BPF_ENUMVAL_EXISTS: return "enumval_exists";
4743 case BPF_ENUMVAL_VALUE: return "enumval_value";
4744 default: return "unknown";
4745 }
4746}
4747
4748static bool core_relo_is_field_based(enum bpf_core_relo_kind kind)
4749{
4750 switch (kind) {
4751 case BPF_FIELD_BYTE_OFFSET:
4752 case BPF_FIELD_BYTE_SIZE:
4753 case BPF_FIELD_EXISTS:
4754 case BPF_FIELD_SIGNED:
4755 case BPF_FIELD_LSHIFT_U64:
4756 case BPF_FIELD_RSHIFT_U64:
4757 return true;
4758 default:
4759 return false;
4760 }
4761}
4762
4763static bool core_relo_is_type_based(enum bpf_core_relo_kind kind)
4764{
4765 switch (kind) {
4766 case BPF_TYPE_ID_LOCAL:
4767 case BPF_TYPE_ID_TARGET:
4768 case BPF_TYPE_EXISTS:
4769 case BPF_TYPE_SIZE:
4770 return true;
4771 default:
4772 return false;
4773 }
4774}
4775
4776static bool core_relo_is_enumval_based(enum bpf_core_relo_kind kind)
4777{
4778 switch (kind) {
4779 case BPF_ENUMVAL_EXISTS:
4780 case BPF_ENUMVAL_VALUE:
4781 return true;
4782 default:
4783 return false;
4784 }
4785}
4786
4787/*
4788 * Turn bpf_core_relo into a low- and high-level spec representation,
4789 * validating correctness along the way, as well as calculating resulting
4790 * field bit offset, specified by accessor string. Low-level spec captures
4791 * every single level of nestedness, including traversing anonymous
4792 * struct/union members. High-level one only captures semantically meaningful
4793 * "turning points": named fields and array indicies.
4794 * E.g., for this case:
4795 *
4796 * struct sample {
4797 * int __unimportant;
4798 * struct {
4799 * int __1;
4800 * int __2;
4801 * int a[7];
4802 * };
4803 * };
4804 *
4805 * struct sample *s = ...;
4806 *
4807 * int x = &s->a[3]; // access string = '0:1:2:3'
4808 *
4809 * Low-level spec has 1:1 mapping with each element of access string (it's
4810 * just a parsed access string representation): [0, 1, 2, 3].
4811 *
4812 * High-level spec will capture only 3 points:
4813 * - intial zero-index access by pointer (&s->... is the same as &s[0]...);
4814 * - field 'a' access (corresponds to '2' in low-level spec);
4815 * - array element #3 access (corresponds to '3' in low-level spec).
4816 *
4817 * Type-based relocations (TYPE_EXISTS/TYPE_SIZE,
4818 * TYPE_ID_LOCAL/TYPE_ID_TARGET) don't capture any field information. Their
4819 * spec and raw_spec are kept empty.
4820 *
4821 * Enum value-based relocations (ENUMVAL_EXISTS/ENUMVAL_VALUE) use access
4822 * string to specify enumerator's value index that need to be relocated.
4823 */
4824static int bpf_core_parse_spec(const struct btf *btf,
4825 __u32 type_id,
4826 const char *spec_str,
4827 enum bpf_core_relo_kind relo_kind,
4828 struct bpf_core_spec *spec)
4829{
4830 int access_idx, parsed_len, i;
4831 struct bpf_core_accessor *acc;
4832 const struct btf_type *t;
4833 const char *name;
4834 __u32 id;
4835 __s64 sz;
4836
4837 if (str_is_empty(spec_str) || *spec_str == ':')
4838 return -EINVAL;
4839
4840 memset(spec, 0, sizeof(*spec));
4841 spec->btf = btf;
4842 spec->root_type_id = type_id;
4843 spec->relo_kind = relo_kind;
4844
4845 /* type-based relocations don't have a field access string */
4846 if (core_relo_is_type_based(relo_kind)) {
4847 if (strcmp(spec_str, "0"))
4848 return -EINVAL;
4849 return 0;
4850 }
4851
4852 /* parse spec_str="0:1:2:3:4" into array raw_spec=[0, 1, 2, 3, 4] */
4853 while (*spec_str) {
4854 if (*spec_str == ':')
4855 ++spec_str;
4856 if (sscanf(spec_str, "%d%n", &access_idx, &parsed_len) != 1)
4857 return -EINVAL;
4858 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
4859 return -E2BIG;
4860 spec_str += parsed_len;
4861 spec->raw_spec[spec->raw_len++] = access_idx;
4862 }
4863
4864 if (spec->raw_len == 0)
4865 return -EINVAL;
4866
4867 t = skip_mods_and_typedefs(btf, type_id, &id);
4868 if (!t)
4869 return -EINVAL;
4870
4871 access_idx = spec->raw_spec[0];
4872 acc = &spec->spec[0];
4873 acc->type_id = id;
4874 acc->idx = access_idx;
4875 spec->len++;
4876
4877 if (core_relo_is_enumval_based(relo_kind)) {
4878 if (!btf_is_enum(t) || spec->raw_len > 1 || access_idx >= btf_vlen(t))
4879 return -EINVAL;
4880
4881 /* record enumerator name in a first accessor */
4882 acc->name = btf__name_by_offset(btf, btf_enum(t)[access_idx].name_off);
4883 return 0;
4884 }
4885
4886 if (!core_relo_is_field_based(relo_kind))
4887 return -EINVAL;
4888
4889 sz = btf__resolve_size(btf, id);
4890 if (sz < 0)
4891 return sz;
4892 spec->bit_offset = access_idx * sz * 8;
4893
4894 for (i = 1; i < spec->raw_len; i++) {
4895 t = skip_mods_and_typedefs(btf, id, &id);
4896 if (!t)
4897 return -EINVAL;
4898
4899 access_idx = spec->raw_spec[i];
4900 acc = &spec->spec[spec->len];
4901
4902 if (btf_is_composite(t)) {
4903 const struct btf_member *m;
4904 __u32 bit_offset;
4905
4906 if (access_idx >= btf_vlen(t))
4907 return -EINVAL;
4908
4909 bit_offset = btf_member_bit_offset(t, access_idx);
4910 spec->bit_offset += bit_offset;
4911
4912 m = btf_members(t) + access_idx;
4913 if (m->name_off) {
4914 name = btf__name_by_offset(btf, m->name_off);
4915 if (str_is_empty(name))
4916 return -EINVAL;
4917
4918 acc->type_id = id;
4919 acc->idx = access_idx;
4920 acc->name = name;
4921 spec->len++;
4922 }
4923
4924 id = m->type;
4925 } else if (btf_is_array(t)) {
4926 const struct btf_array *a = btf_array(t);
4927 bool flex;
4928
4929 t = skip_mods_and_typedefs(btf, a->type, &id);
4930 if (!t)
4931 return -EINVAL;
4932
4933 flex = is_flex_arr(btf, acc - 1, a);
4934 if (!flex && access_idx >= a->nelems)
4935 return -EINVAL;
4936
4937 spec->spec[spec->len].type_id = id;
4938 spec->spec[spec->len].idx = access_idx;
4939 spec->len++;
4940
4941 sz = btf__resolve_size(btf, id);
4942 if (sz < 0)
4943 return sz;
4944 spec->bit_offset += access_idx * sz * 8;
4945 } else {
4946 pr_warn("relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %s\n",
4947 type_id, spec_str, i, id, btf_kind_str(t));
4948 return -EINVAL;
4949 }
4950 }
4951
4952 return 0;
4953}
4954
4955static bool bpf_core_is_flavor_sep(const char *s)
4956{
4957 /* check X___Y name pattern, where X and Y are not underscores */
4958 return s[0] != '_' && /* X */
4959 s[1] == '_' && s[2] == '_' && s[3] == '_' && /* ___ */
4960 s[4] != '_'; /* Y */
4961}
4962
4963/* Given 'some_struct_name___with_flavor' return the length of a name prefix
4964 * before last triple underscore. Struct name part after last triple
4965 * underscore is ignored by BPF CO-RE relocation during relocation matching.
4966 */
4967static size_t bpf_core_essential_name_len(const char *name)
4968{
4969 size_t n = strlen(name);
4970 int i;
4971
4972 for (i = n - 5; i >= 0; i--) {
4973 if (bpf_core_is_flavor_sep(name + i))
4974 return i + 1;
4975 }
4976 return n;
4977}
4978
4979struct core_cand
4980{
4981 const struct btf *btf;
4982 const struct btf_type *t;
4983 const char *name;
4984 __u32 id;
4985};
4986
4987/* dynamically sized list of type IDs and its associated struct btf */
4988struct core_cand_list {
4989 struct core_cand *cands;
4990 int len;
4991};
4992
4993static void bpf_core_free_cands(struct core_cand_list *cands)
4994{
4995 free(cands->cands);
4996 free(cands);
4997}
4998
4999static int bpf_core_add_cands(struct core_cand *local_cand,
5000 size_t local_essent_len,
5001 const struct btf *targ_btf,
5002 const char *targ_btf_name,
5003 int targ_start_id,
5004 struct core_cand_list *cands)
5005{
5006 struct core_cand *new_cands, *cand;
5007 const struct btf_type *t;
5008 const char *targ_name;
5009 size_t targ_essent_len;
5010 int n, i;
5011
5012 n = btf__get_nr_types(targ_btf);
5013 for (i = targ_start_id; i <= n; i++) {
5014 t = btf__type_by_id(targ_btf, i);
5015 if (btf_kind(t) != btf_kind(local_cand->t))
5016 continue;
5017
5018 targ_name = btf__name_by_offset(targ_btf, t->name_off);
5019 if (str_is_empty(targ_name))
5020 continue;
5021
5022 targ_essent_len = bpf_core_essential_name_len(targ_name);
5023 if (targ_essent_len != local_essent_len)
5024 continue;
5025
5026 if (strncmp(local_cand->name, targ_name, local_essent_len) != 0)
5027 continue;
5028
5029 pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s in [%s]\n",
5030 local_cand->id, btf_kind_str(local_cand->t),
5031 local_cand->name, i, btf_kind_str(t), targ_name,
5032 targ_btf_name);
5033 new_cands = libbpf_reallocarray(cands->cands, cands->len + 1,
5034 sizeof(*cands->cands));
5035 if (!new_cands)
5036 return -ENOMEM;
5037
5038 cand = &new_cands[cands->len];
5039 cand->btf = targ_btf;
5040 cand->t = t;
5041 cand->name = targ_name;
5042 cand->id = i;
5043
5044 cands->cands = new_cands;
5045 cands->len++;
5046 }
5047 return 0;
5048}
5049
5050static int load_module_btfs(struct bpf_object *obj)
5051{
5052 struct bpf_btf_info info;
5053 struct module_btf *mod_btf;
5054 struct btf *btf;
5055 char name[64];
5056 __u32 id = 0, len;
5057 int err, fd;
5058
5059 if (obj->btf_modules_loaded)
5060 return 0;
5061
5062 if (obj->gen_loader)
5063 return 0;
5064
5065 /* don't do this again, even if we find no module BTFs */
5066 obj->btf_modules_loaded = true;
5067
5068 /* kernel too old to support module BTFs */
5069 if (!kernel_supports(obj, FEAT_MODULE_BTF))
5070 return 0;
5071
5072 while (true) {
5073 err = bpf_btf_get_next_id(id, &id);
5074 if (err && errno == ENOENT)
5075 return 0;
5076 if (err) {
5077 err = -errno;
5078 pr_warn("failed to iterate BTF objects: %d\n", err);
5079 return err;
5080 }
5081
5082 fd = bpf_btf_get_fd_by_id(id);
5083 if (fd < 0) {
5084 if (errno == ENOENT)
5085 continue; /* expected race: BTF was unloaded */
5086 err = -errno;
5087 pr_warn("failed to get BTF object #%d FD: %d\n", id, err);
5088 return err;
5089 }
5090
5091 len = sizeof(info);
5092 memset(&info, 0, sizeof(info));
5093 info.name = ptr_to_u64(name);
5094 info.name_len = sizeof(name);
5095
5096 err = bpf_obj_get_info_by_fd(fd, &info, &len);
5097 if (err) {
5098 err = -errno;
5099 pr_warn("failed to get BTF object #%d info: %d\n", id, err);
5100 goto err_out;
5101 }
5102
5103 /* ignore non-module BTFs */
5104 if (!info.kernel_btf || strcmp(name, "vmlinux") == 0) {
5105 close(fd);
5106 continue;
5107 }
5108
5109 btf = btf_get_from_fd(fd, obj->btf_vmlinux);
5110 err = libbpf_get_error(btf);
5111 if (err) {
5112 pr_warn("failed to load module [%s]'s BTF object #%d: %d\n",
5113 name, id, err);
5114 goto err_out;
5115 }
5116
5117 err = libbpf_ensure_mem((void **)&obj->btf_modules, &obj->btf_module_cap,
5118 sizeof(*obj->btf_modules), obj->btf_module_cnt + 1);
5119 if (err)
5120 goto err_out;
5121
5122 mod_btf = &obj->btf_modules[obj->btf_module_cnt++];
5123
5124 mod_btf->btf = btf;
5125 mod_btf->id = id;
5126 mod_btf->fd = fd;
5127 mod_btf->name = strdup(name);
5128 if (!mod_btf->name) {
5129 err = -ENOMEM;
5130 goto err_out;
5131 }
5132 continue;
5133
5134err_out:
5135 close(fd);
5136 return err;
5137 }
5138
5139 return 0;
5140}
5141
5142static struct core_cand_list *
5143bpf_core_find_cands(struct bpf_object *obj, const struct btf *local_btf, __u32 local_type_id)
5144{
5145 struct core_cand local_cand = {};
5146 struct core_cand_list *cands;
5147 const struct btf *main_btf;
5148 size_t local_essent_len;
5149 int err, i;
5150
5151 local_cand.btf = local_btf;
5152 local_cand.t = btf__type_by_id(local_btf, local_type_id);
5153 if (!local_cand.t)
5154 return ERR_PTR(-EINVAL);
5155
5156 local_cand.name = btf__name_by_offset(local_btf, local_cand.t->name_off);
5157 if (str_is_empty(local_cand.name))
5158 return ERR_PTR(-EINVAL);
5159 local_essent_len = bpf_core_essential_name_len(local_cand.name);
5160
5161 cands = calloc(1, sizeof(*cands));
5162 if (!cands)
5163 return ERR_PTR(-ENOMEM);
5164
5165 /* Attempt to find target candidates in vmlinux BTF first */
5166 main_btf = obj->btf_vmlinux_override ?: obj->btf_vmlinux;
5167 err = bpf_core_add_cands(&local_cand, local_essent_len, main_btf, "vmlinux", 1, cands);
5168 if (err)
5169 goto err_out;
5170
5171 /* if vmlinux BTF has any candidate, don't got for module BTFs */
5172 if (cands->len)
5173 return cands;
5174
5175 /* if vmlinux BTF was overridden, don't attempt to load module BTFs */
5176 if (obj->btf_vmlinux_override)
5177 return cands;
5178
5179 /* now look through module BTFs, trying to still find candidates */
5180 err = load_module_btfs(obj);
5181 if (err)
5182 goto err_out;
5183
5184 for (i = 0; i < obj->btf_module_cnt; i++) {
5185 err = bpf_core_add_cands(&local_cand, local_essent_len,
5186 obj->btf_modules[i].btf,
5187 obj->btf_modules[i].name,
5188 btf__get_nr_types(obj->btf_vmlinux) + 1,
5189 cands);
5190 if (err)
5191 goto err_out;
5192 }
5193
5194 return cands;
5195err_out:
5196 bpf_core_free_cands(cands);
5197 return ERR_PTR(err);
5198}
5199
5200/* Check two types for compatibility for the purpose of field access
5201 * relocation. const/volatile/restrict and typedefs are skipped to ensure we
5202 * are relocating semantically compatible entities:
5203 * - any two STRUCTs/UNIONs are compatible and can be mixed;
5204 * - any two FWDs are compatible, if their names match (modulo flavor suffix);
5205 * - any two PTRs are always compatible;
5206 * - for ENUMs, names should be the same (ignoring flavor suffix) or at
5207 * least one of enums should be anonymous;
5208 * - for ENUMs, check sizes, names are ignored;
5209 * - for INT, size and signedness are ignored;
5210 * - any two FLOATs are always compatible;
5211 * - for ARRAY, dimensionality is ignored, element types are checked for
5212 * compatibility recursively;
5213 * - everything else shouldn't be ever a target of relocation.
5214 * These rules are not set in stone and probably will be adjusted as we get
5215 * more experience with using BPF CO-RE relocations.
5216 */
5217static int bpf_core_fields_are_compat(const struct btf *local_btf,
5218 __u32 local_id,
5219 const struct btf *targ_btf,
5220 __u32 targ_id)
5221{
5222 const struct btf_type *local_type, *targ_type;
5223
5224recur:
5225 local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
5226 targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
5227 if (!local_type || !targ_type)
5228 return -EINVAL;
5229
5230 if (btf_is_composite(local_type) && btf_is_composite(targ_type))
5231 return 1;
5232 if (btf_kind(local_type) != btf_kind(targ_type))
5233 return 0;
5234
5235 switch (btf_kind(local_type)) {
5236 case BTF_KIND_PTR:
5237 case BTF_KIND_FLOAT:
5238 return 1;
5239 case BTF_KIND_FWD:
5240 case BTF_KIND_ENUM: {
5241 const char *local_name, *targ_name;
5242 size_t local_len, targ_len;
5243
5244 local_name = btf__name_by_offset(local_btf,
5245 local_type->name_off);
5246 targ_name = btf__name_by_offset(targ_btf, targ_type->name_off);
5247 local_len = bpf_core_essential_name_len(local_name);
5248 targ_len = bpf_core_essential_name_len(targ_name);
5249 /* one of them is anonymous or both w/ same flavor-less names */
5250 return local_len == 0 || targ_len == 0 ||
5251 (local_len == targ_len &&
5252 strncmp(local_name, targ_name, local_len) == 0);
5253 }
5254 case BTF_KIND_INT:
5255 /* just reject deprecated bitfield-like integers; all other
5256 * integers are by default compatible between each other
5257 */
5258 return btf_int_offset(local_type) == 0 &&
5259 btf_int_offset(targ_type) == 0;
5260 case BTF_KIND_ARRAY:
5261 local_id = btf_array(local_type)->type;
5262 targ_id = btf_array(targ_type)->type;
5263 goto recur;
5264 default:
5265 pr_warn("unexpected kind %d relocated, local [%d], target [%d]\n",
5266 btf_kind(local_type), local_id, targ_id);
5267 return 0;
5268 }
5269}
5270
5271/*
5272 * Given single high-level named field accessor in local type, find
5273 * corresponding high-level accessor for a target type. Along the way,
5274 * maintain low-level spec for target as well. Also keep updating target
5275 * bit offset.
5276 *
5277 * Searching is performed through recursive exhaustive enumeration of all
5278 * fields of a struct/union. If there are any anonymous (embedded)
5279 * structs/unions, they are recursively searched as well. If field with
5280 * desired name is found, check compatibility between local and target types,
5281 * before returning result.
5282 *
5283 * 1 is returned, if field is found.
5284 * 0 is returned if no compatible field is found.
5285 * <0 is returned on error.
5286 */
5287static int bpf_core_match_member(const struct btf *local_btf,
5288 const struct bpf_core_accessor *local_acc,
5289 const struct btf *targ_btf,
5290 __u32 targ_id,
5291 struct bpf_core_spec *spec,
5292 __u32 *next_targ_id)
5293{
5294 const struct btf_type *local_type, *targ_type;
5295 const struct btf_member *local_member, *m;
5296 const char *local_name, *targ_name;
5297 __u32 local_id;
5298 int i, n, found;
5299
5300 targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
5301 if (!targ_type)
5302 return -EINVAL;
5303 if (!btf_is_composite(targ_type))
5304 return 0;
5305
5306 local_id = local_acc->type_id;
5307 local_type = btf__type_by_id(local_btf, local_id);
5308 local_member = btf_members(local_type) + local_acc->idx;
5309 local_name = btf__name_by_offset(local_btf, local_member->name_off);
5310
5311 n = btf_vlen(targ_type);
5312 m = btf_members(targ_type);
5313 for (i = 0; i < n; i++, m++) {
5314 __u32 bit_offset;
5315
5316 bit_offset = btf_member_bit_offset(targ_type, i);
5317
5318 /* too deep struct/union/array nesting */
5319 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
5320 return -E2BIG;
5321
5322 /* speculate this member will be the good one */
5323 spec->bit_offset += bit_offset;
5324 spec->raw_spec[spec->raw_len++] = i;
5325
5326 targ_name = btf__name_by_offset(targ_btf, m->name_off);
5327 if (str_is_empty(targ_name)) {
5328 /* embedded struct/union, we need to go deeper */
5329 found = bpf_core_match_member(local_btf, local_acc,
5330 targ_btf, m->type,
5331 spec, next_targ_id);
5332 if (found) /* either found or error */
5333 return found;
5334 } else if (strcmp(local_name, targ_name) == 0) {
5335 /* matching named field */
5336 struct bpf_core_accessor *targ_acc;
5337
5338 targ_acc = &spec->spec[spec->len++];
5339 targ_acc->type_id = targ_id;
5340 targ_acc->idx = i;
5341 targ_acc->name = targ_name;
5342
5343 *next_targ_id = m->type;
5344 found = bpf_core_fields_are_compat(local_btf,
5345 local_member->type,
5346 targ_btf, m->type);
5347 if (!found)
5348 spec->len--; /* pop accessor */
5349 return found;
5350 }
5351 /* member turned out not to be what we looked for */
5352 spec->bit_offset -= bit_offset;
5353 spec->raw_len--;
5354 }
5355
5356 return 0;
5357}
5358
5359/* Check local and target types for compatibility. This check is used for
5360 * type-based CO-RE relocations and follow slightly different rules than
5361 * field-based relocations. This function assumes that root types were already
5362 * checked for name match. Beyond that initial root-level name check, names
5363 * are completely ignored. Compatibility rules are as follows:
5364 * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
5365 * kind should match for local and target types (i.e., STRUCT is not
5366 * compatible with UNION);
5367 * - for ENUMs, the size is ignored;
5368 * - for INT, size and signedness are ignored;
5369 * - for ARRAY, dimensionality is ignored, element types are checked for
5370 * compatibility recursively;
5371 * - CONST/VOLATILE/RESTRICT modifiers are ignored;
5372 * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
5373 * - FUNC_PROTOs are compatible if they have compatible signature: same
5374 * number of input args and compatible return and argument types.
5375 * These rules are not set in stone and probably will be adjusted as we get
5376 * more experience with using BPF CO-RE relocations.
5377 */
5378static int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
5379 const struct btf *targ_btf, __u32 targ_id)
5380{
5381 const struct btf_type *local_type, *targ_type;
5382 int depth = 32; /* max recursion depth */
5383
5384 /* caller made sure that names match (ignoring flavor suffix) */
5385 local_type = btf__type_by_id(local_btf, local_id);
5386 targ_type = btf__type_by_id(targ_btf, targ_id);
5387 if (btf_kind(local_type) != btf_kind(targ_type))
5388 return 0;
5389
5390recur:
5391 depth--;
5392 if (depth < 0)
5393 return -EINVAL;
5394
5395 local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
5396 targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
5397 if (!local_type || !targ_type)
5398 return -EINVAL;
5399
5400 if (btf_kind(local_type) != btf_kind(targ_type))
5401 return 0;
5402
5403 switch (btf_kind(local_type)) {
5404 case BTF_KIND_UNKN:
5405 case BTF_KIND_STRUCT:
5406 case BTF_KIND_UNION:
5407 case BTF_KIND_ENUM:
5408 case BTF_KIND_FWD:
5409 return 1;
5410 case BTF_KIND_INT:
5411 /* just reject deprecated bitfield-like integers; all other
5412 * integers are by default compatible between each other
5413 */
5414 return btf_int_offset(local_type) == 0 && btf_int_offset(targ_type) == 0;
5415 case BTF_KIND_PTR:
5416 local_id = local_type->type;
5417 targ_id = targ_type->type;
5418 goto recur;
5419 case BTF_KIND_ARRAY:
5420 local_id = btf_array(local_type)->type;
5421 targ_id = btf_array(targ_type)->type;
5422 goto recur;
5423 case BTF_KIND_FUNC_PROTO: {
5424 struct btf_param *local_p = btf_params(local_type);
5425 struct btf_param *targ_p = btf_params(targ_type);
5426 __u16 local_vlen = btf_vlen(local_type);
5427 __u16 targ_vlen = btf_vlen(targ_type);
5428 int i, err;
5429
5430 if (local_vlen != targ_vlen)
5431 return 0;
5432
5433 for (i = 0; i < local_vlen; i++, local_p++, targ_p++) {
5434 skip_mods_and_typedefs(local_btf, local_p->type, &local_id);
5435 skip_mods_and_typedefs(targ_btf, targ_p->type, &targ_id);
5436 err = bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id);
5437 if (err <= 0)
5438 return err;
5439 }
5440
5441 /* tail recurse for return type check */
5442 skip_mods_and_typedefs(local_btf, local_type->type, &local_id);
5443 skip_mods_and_typedefs(targ_btf, targ_type->type, &targ_id);
5444 goto recur;
5445 }
5446 default:
5447 pr_warn("unexpected kind %s relocated, local [%d], target [%d]\n",
5448 btf_kind_str(local_type), local_id, targ_id);
5449 return 0;
5450 }
5451}
5452
5453/*
5454 * Try to match local spec to a target type and, if successful, produce full
5455 * target spec (high-level, low-level + bit offset).
5456 */
5457static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
5458 const struct btf *targ_btf, __u32 targ_id,
5459 struct bpf_core_spec *targ_spec)
5460{
5461 const struct btf_type *targ_type;
5462 const struct bpf_core_accessor *local_acc;
5463 struct bpf_core_accessor *targ_acc;
5464 int i, sz, matched;
5465
5466 memset(targ_spec, 0, sizeof(*targ_spec));
5467 targ_spec->btf = targ_btf;
5468 targ_spec->root_type_id = targ_id;
5469 targ_spec->relo_kind = local_spec->relo_kind;
5470
5471 if (core_relo_is_type_based(local_spec->relo_kind)) {
5472 return bpf_core_types_are_compat(local_spec->btf,
5473 local_spec->root_type_id,
5474 targ_btf, targ_id);
5475 }
5476
5477 local_acc = &local_spec->spec[0];
5478 targ_acc = &targ_spec->spec[0];
5479
5480 if (core_relo_is_enumval_based(local_spec->relo_kind)) {
5481 size_t local_essent_len, targ_essent_len;
5482 const struct btf_enum *e;
5483 const char *targ_name;
5484
5485 /* has to resolve to an enum */
5486 targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id, &targ_id);
5487 if (!btf_is_enum(targ_type))
5488 return 0;
5489
5490 local_essent_len = bpf_core_essential_name_len(local_acc->name);
5491
5492 for (i = 0, e = btf_enum(targ_type); i < btf_vlen(targ_type); i++, e++) {
5493 targ_name = btf__name_by_offset(targ_spec->btf, e->name_off);
5494 targ_essent_len = bpf_core_essential_name_len(targ_name);
5495 if (targ_essent_len != local_essent_len)
5496 continue;
5497 if (strncmp(local_acc->name, targ_name, local_essent_len) == 0) {
5498 targ_acc->type_id = targ_id;
5499 targ_acc->idx = i;
5500 targ_acc->name = targ_name;
5501 targ_spec->len++;
5502 targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
5503 targ_spec->raw_len++;
5504 return 1;
5505 }
5506 }
5507 return 0;
5508 }
5509
5510 if (!core_relo_is_field_based(local_spec->relo_kind))
5511 return -EINVAL;
5512
5513 for (i = 0; i < local_spec->len; i++, local_acc++, targ_acc++) {
5514 targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id,
5515 &targ_id);
5516 if (!targ_type)
5517 return -EINVAL;
5518
5519 if (local_acc->name) {
5520 matched = bpf_core_match_member(local_spec->btf,
5521 local_acc,
5522 targ_btf, targ_id,
5523 targ_spec, &targ_id);
5524 if (matched <= 0)
5525 return matched;
5526 } else {
5527 /* for i=0, targ_id is already treated as array element
5528 * type (because it's the original struct), for others
5529 * we should find array element type first
5530 */
5531 if (i > 0) {
5532 const struct btf_array *a;
5533 bool flex;
5534
5535 if (!btf_is_array(targ_type))
5536 return 0;
5537
5538 a = btf_array(targ_type);
5539 flex = is_flex_arr(targ_btf, targ_acc - 1, a);
5540 if (!flex && local_acc->idx >= a->nelems)
5541 return 0;
5542 if (!skip_mods_and_typedefs(targ_btf, a->type,
5543 &targ_id))
5544 return -EINVAL;
5545 }
5546
5547 /* too deep struct/union/array nesting */
5548 if (targ_spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
5549 return -E2BIG;
5550
5551 targ_acc->type_id = targ_id;
5552 targ_acc->idx = local_acc->idx;
5553 targ_acc->name = NULL;
5554 targ_spec->len++;
5555 targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
5556 targ_spec->raw_len++;
5557
5558 sz = btf__resolve_size(targ_btf, targ_id);
5559 if (sz < 0)
5560 return sz;
5561 targ_spec->bit_offset += local_acc->idx * sz * 8;
5562 }
5563 }
5564
5565 return 1;
5566}
5567
5568static int bpf_core_calc_field_relo(const struct bpf_program *prog,
5569 const struct bpf_core_relo *relo,
5570 const struct bpf_core_spec *spec,
5571 __u32 *val, __u32 *field_sz, __u32 *type_id,
5572 bool *validate)
5573{
5574 const struct bpf_core_accessor *acc;
5575 const struct btf_type *t;
5576 __u32 byte_off, byte_sz, bit_off, bit_sz, field_type_id;
5577 const struct btf_member *m;
5578 const struct btf_type *mt;
5579 bool bitfield;
5580 __s64 sz;
5581
5582 *field_sz = 0;
5583
5584 if (relo->kind == BPF_FIELD_EXISTS) {
5585 *val = spec ? 1 : 0;
5586 return 0;
5587 }
5588
5589 if (!spec)
5590 return -EUCLEAN; /* request instruction poisoning */
5591
5592 acc = &spec->spec[spec->len - 1];
5593 t = btf__type_by_id(spec->btf, acc->type_id);
5594
5595 /* a[n] accessor needs special handling */
5596 if (!acc->name) {
5597 if (relo->kind == BPF_FIELD_BYTE_OFFSET) {
5598 *val = spec->bit_offset / 8;
5599 /* remember field size for load/store mem size */
5600 sz = btf__resolve_size(spec->btf, acc->type_id);
5601 if (sz < 0)
5602 return -EINVAL;
5603 *field_sz = sz;
5604 *type_id = acc->type_id;
5605 } else if (relo->kind == BPF_FIELD_BYTE_SIZE) {
5606 sz = btf__resolve_size(spec->btf, acc->type_id);
5607 if (sz < 0)
5608 return -EINVAL;
5609 *val = sz;
5610 } else {
5611 pr_warn("prog '%s': relo %d at insn #%d can't be applied to array access\n",
5612 prog->name, relo->kind, relo->insn_off / 8);
5613 return -EINVAL;
5614 }
5615 if (validate)
5616 *validate = true;
5617 return 0;
5618 }
5619
5620 m = btf_members(t) + acc->idx;
5621 mt = skip_mods_and_typedefs(spec->btf, m->type, &field_type_id);
5622 bit_off = spec->bit_offset;
5623 bit_sz = btf_member_bitfield_size(t, acc->idx);
5624
5625 bitfield = bit_sz > 0;
5626 if (bitfield) {
5627 byte_sz = mt->size;
5628 byte_off = bit_off / 8 / byte_sz * byte_sz;
5629 /* figure out smallest int size necessary for bitfield load */
5630 while (bit_off + bit_sz - byte_off * 8 > byte_sz * 8) {
5631 if (byte_sz >= 8) {
5632 /* bitfield can't be read with 64-bit read */
5633 pr_warn("prog '%s': relo %d at insn #%d can't be satisfied for bitfield\n",
5634 prog->name, relo->kind, relo->insn_off / 8);
5635 return -E2BIG;
5636 }
5637 byte_sz *= 2;
5638 byte_off = bit_off / 8 / byte_sz * byte_sz;
5639 }
5640 } else {
5641 sz = btf__resolve_size(spec->btf, field_type_id);
5642 if (sz < 0)
5643 return -EINVAL;
5644 byte_sz = sz;
5645 byte_off = spec->bit_offset / 8;
5646 bit_sz = byte_sz * 8;
5647 }
5648
5649 /* for bitfields, all the relocatable aspects are ambiguous and we
5650 * might disagree with compiler, so turn off validation of expected
5651 * value, except for signedness
5652 */
5653 if (validate)
5654 *validate = !bitfield;
5655
5656 switch (relo->kind) {
5657 case BPF_FIELD_BYTE_OFFSET:
5658 *val = byte_off;
5659 if (!bitfield) {
5660 *field_sz = byte_sz;
5661 *type_id = field_type_id;
5662 }
5663 break;
5664 case BPF_FIELD_BYTE_SIZE:
5665 *val = byte_sz;
5666 break;
5667 case BPF_FIELD_SIGNED:
5668 /* enums will be assumed unsigned */
5669 *val = btf_is_enum(mt) ||
5670 (btf_int_encoding(mt) & BTF_INT_SIGNED);
5671 if (validate)
5672 *validate = true; /* signedness is never ambiguous */
5673 break;
5674 case BPF_FIELD_LSHIFT_U64:
5675#if __BYTE_ORDER == __LITTLE_ENDIAN
5676 *val = 64 - (bit_off + bit_sz - byte_off * 8);
5677#else
5678 *val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8);
5679#endif
5680 break;
5681 case BPF_FIELD_RSHIFT_U64:
5682 *val = 64 - bit_sz;
5683 if (validate)
5684 *validate = true; /* right shift is never ambiguous */
5685 break;
5686 case BPF_FIELD_EXISTS:
5687 default:
5688 return -EOPNOTSUPP;
5689 }
5690
5691 return 0;
5692}
5693
5694static int bpf_core_calc_type_relo(const struct bpf_core_relo *relo,
5695 const struct bpf_core_spec *spec,
5696 __u32 *val)
5697{
5698 __s64 sz;
5699
5700 /* type-based relos return zero when target type is not found */
5701 if (!spec) {
5702 *val = 0;
5703 return 0;
5704 }
5705
5706 switch (relo->kind) {
5707 case BPF_TYPE_ID_TARGET:
5708 *val = spec->root_type_id;
5709 break;
5710 case BPF_TYPE_EXISTS:
5711 *val = 1;
5712 break;
5713 case BPF_TYPE_SIZE:
5714 sz = btf__resolve_size(spec->btf, spec->root_type_id);
5715 if (sz < 0)
5716 return -EINVAL;
5717 *val = sz;
5718 break;
5719 case BPF_TYPE_ID_LOCAL:
5720 /* BPF_TYPE_ID_LOCAL is handled specially and shouldn't get here */
5721 default:
5722 return -EOPNOTSUPP;
5723 }
5724
5725 return 0;
5726}
5727
5728static int bpf_core_calc_enumval_relo(const struct bpf_core_relo *relo,
5729 const struct bpf_core_spec *spec,
5730 __u32 *val)
5731{
5732 const struct btf_type *t;
5733 const struct btf_enum *e;
5734
5735 switch (relo->kind) {
5736 case BPF_ENUMVAL_EXISTS:
5737 *val = spec ? 1 : 0;
5738 break;
5739 case BPF_ENUMVAL_VALUE:
5740 if (!spec)
5741 return -EUCLEAN; /* request instruction poisoning */
5742 t = btf__type_by_id(spec->btf, spec->spec[0].type_id);
5743 e = btf_enum(t) + spec->spec[0].idx;
5744 *val = e->val;
5745 break;
5746 default:
5747 return -EOPNOTSUPP;
5748 }
5749
5750 return 0;
5751}
5752
5753struct bpf_core_relo_res
5754{
5755 /* expected value in the instruction, unless validate == false */
5756 __u32 orig_val;
5757 /* new value that needs to be patched up to */
5758 __u32 new_val;
5759 /* relocation unsuccessful, poison instruction, but don't fail load */
5760 bool poison;
5761 /* some relocations can't be validated against orig_val */
5762 bool validate;
5763 /* for field byte offset relocations or the forms:
5764 * *(T *)(rX + <off>) = rY
5765 * rX = *(T *)(rY + <off>),
5766 * we remember original and resolved field size to adjust direct
5767 * memory loads of pointers and integers; this is necessary for 32-bit
5768 * host kernel architectures, but also allows to automatically
5769 * relocate fields that were resized from, e.g., u32 to u64, etc.
5770 */
5771 bool fail_memsz_adjust;
5772 __u32 orig_sz;
5773 __u32 orig_type_id;
5774 __u32 new_sz;
5775 __u32 new_type_id;
5776};
5777
5778/* Calculate original and target relocation values, given local and target
5779 * specs and relocation kind. These values are calculated for each candidate.
5780 * If there are multiple candidates, resulting values should all be consistent
5781 * with each other. Otherwise, libbpf will refuse to proceed due to ambiguity.
5782 * If instruction has to be poisoned, *poison will be set to true.
5783 */
5784static int bpf_core_calc_relo(const struct bpf_program *prog,
5785 const struct bpf_core_relo *relo,
5786 int relo_idx,
5787 const struct bpf_core_spec *local_spec,
5788 const struct bpf_core_spec *targ_spec,
5789 struct bpf_core_relo_res *res)
5790{
5791 int err = -EOPNOTSUPP;
5792
5793 res->orig_val = 0;
5794 res->new_val = 0;
5795 res->poison = false;
5796 res->validate = true;
5797 res->fail_memsz_adjust = false;
5798 res->orig_sz = res->new_sz = 0;
5799 res->orig_type_id = res->new_type_id = 0;
5800
5801 if (core_relo_is_field_based(relo->kind)) {
5802 err = bpf_core_calc_field_relo(prog, relo, local_spec,
5803 &res->orig_val, &res->orig_sz,
5804 &res->orig_type_id, &res->validate);
5805 err = err ?: bpf_core_calc_field_relo(prog, relo, targ_spec,
5806 &res->new_val, &res->new_sz,
5807 &res->new_type_id, NULL);
5808 if (err)
5809 goto done;
5810 /* Validate if it's safe to adjust load/store memory size.
5811 * Adjustments are performed only if original and new memory
5812 * sizes differ.
5813 */
5814 res->fail_memsz_adjust = false;
5815 if (res->orig_sz != res->new_sz) {
5816 const struct btf_type *orig_t, *new_t;
5817
5818 orig_t = btf__type_by_id(local_spec->btf, res->orig_type_id);
5819 new_t = btf__type_by_id(targ_spec->btf, res->new_type_id);
5820
5821 /* There are two use cases in which it's safe to
5822 * adjust load/store's mem size:
5823 * - reading a 32-bit kernel pointer, while on BPF
5824 * size pointers are always 64-bit; in this case
5825 * it's safe to "downsize" instruction size due to
5826 * pointer being treated as unsigned integer with
5827 * zero-extended upper 32-bits;
5828 * - reading unsigned integers, again due to
5829 * zero-extension is preserving the value correctly.
5830 *
5831 * In all other cases it's incorrect to attempt to
5832 * load/store field because read value will be
5833 * incorrect, so we poison relocated instruction.
5834 */
5835 if (btf_is_ptr(orig_t) && btf_is_ptr(new_t))
5836 goto done;
5837 if (btf_is_int(orig_t) && btf_is_int(new_t) &&
5838 btf_int_encoding(orig_t) != BTF_INT_SIGNED &&
5839 btf_int_encoding(new_t) != BTF_INT_SIGNED)
5840 goto done;
5841
5842 /* mark as invalid mem size adjustment, but this will
5843 * only be checked for LDX/STX/ST insns
5844 */
5845 res->fail_memsz_adjust = true;
5846 }
5847 } else if (core_relo_is_type_based(relo->kind)) {
5848 err = bpf_core_calc_type_relo(relo, local_spec, &res->orig_val);
5849 err = err ?: bpf_core_calc_type_relo(relo, targ_spec, &res->new_val);
5850 } else if (core_relo_is_enumval_based(relo->kind)) {
5851 err = bpf_core_calc_enumval_relo(relo, local_spec, &res->orig_val);
5852 err = err ?: bpf_core_calc_enumval_relo(relo, targ_spec, &res->new_val);
5853 }
5854
5855done:
5856 if (err == -EUCLEAN) {
5857 /* EUCLEAN is used to signal instruction poisoning request */
5858 res->poison = true;
5859 err = 0;
5860 } else if (err == -EOPNOTSUPP) {
5861 /* EOPNOTSUPP means unknown/unsupported relocation */
5862 pr_warn("prog '%s': relo #%d: unrecognized CO-RE relocation %s (%d) at insn #%d\n",
5863 prog->name, relo_idx, core_relo_kind_str(relo->kind),
5864 relo->kind, relo->insn_off / 8);
5865 }
5866
5867 return err;
5868}
5869
5870/*
5871 * Turn instruction for which CO_RE relocation failed into invalid one with
5872 * distinct signature.
5873 */
5874static void bpf_core_poison_insn(struct bpf_program *prog, int relo_idx,
5875 int insn_idx, struct bpf_insn *insn)
5876{
5877 pr_debug("prog '%s': relo #%d: substituting insn #%d w/ invalid insn\n",
5878 prog->name, relo_idx, insn_idx);
5879 insn->code = BPF_JMP | BPF_CALL;
5880 insn->dst_reg = 0;
5881 insn->src_reg = 0;
5882 insn->off = 0;
5883 /* if this instruction is reachable (not a dead code),
5884 * verifier will complain with the following message:
5885 * invalid func unknown#195896080
5886 */
5887 insn->imm = 195896080; /* => 0xbad2310 => "bad relo" */
5888}
5889
5890static int insn_bpf_size_to_bytes(struct bpf_insn *insn)
5891{
5892 switch (BPF_SIZE(insn->code)) {
5893 case BPF_DW: return 8;
5894 case BPF_W: return 4;
5895 case BPF_H: return 2;
5896 case BPF_B: return 1;
5897 default: return -1;
5898 }
5899}
5900
5901static int insn_bytes_to_bpf_size(__u32 sz)
5902{
5903 switch (sz) {
5904 case 8: return BPF_DW;
5905 case 4: return BPF_W;
5906 case 2: return BPF_H;
5907 case 1: return BPF_B;
5908 default: return -1;
5909 }
5910}
5911
5912/*
5913 * Patch relocatable BPF instruction.
5914 *
5915 * Patched value is determined by relocation kind and target specification.
5916 * For existence relocations target spec will be NULL if field/type is not found.
5917 * Expected insn->imm value is determined using relocation kind and local
5918 * spec, and is checked before patching instruction. If actual insn->imm value
5919 * is wrong, bail out with error.
5920 *
5921 * Currently supported classes of BPF instruction are:
5922 * 1. rX = <imm> (assignment with immediate operand);
5923 * 2. rX += <imm> (arithmetic operations with immediate operand);
5924 * 3. rX = <imm64> (load with 64-bit immediate value);
5925 * 4. rX = *(T *)(rY + <off>), where T is one of {u8, u16, u32, u64};
5926 * 5. *(T *)(rX + <off>) = rY, where T is one of {u8, u16, u32, u64};
5927 * 6. *(T *)(rX + <off>) = <imm>, where T is one of {u8, u16, u32, u64}.
5928 */
5929static int bpf_core_patch_insn(struct bpf_program *prog,
5930 const struct bpf_core_relo *relo,
5931 int relo_idx,
5932 const struct bpf_core_relo_res *res)
5933{
5934 __u32 orig_val, new_val;
5935 struct bpf_insn *insn;
5936 int insn_idx;
5937 __u8 class;
5938
5939 if (relo->insn_off % BPF_INSN_SZ)
5940 return -EINVAL;
5941 insn_idx = relo->insn_off / BPF_INSN_SZ;
5942 /* adjust insn_idx from section frame of reference to the local
5943 * program's frame of reference; (sub-)program code is not yet
5944 * relocated, so it's enough to just subtract in-section offset
5945 */
5946 insn_idx = insn_idx - prog->sec_insn_off;
5947 insn = &prog->insns[insn_idx];
5948 class = BPF_CLASS(insn->code);
5949
5950 if (res->poison) {
5951poison:
5952 /* poison second part of ldimm64 to avoid confusing error from
5953 * verifier about "unknown opcode 00"
5954 */
5955 if (is_ldimm64_insn(insn))
5956 bpf_core_poison_insn(prog, relo_idx, insn_idx + 1, insn + 1);
5957 bpf_core_poison_insn(prog, relo_idx, insn_idx, insn);
5958 return 0;
5959 }
5960
5961 orig_val = res->orig_val;
5962 new_val = res->new_val;
5963
5964 switch (class) {
5965 case BPF_ALU:
5966 case BPF_ALU64:
5967 if (BPF_SRC(insn->code) != BPF_K)
5968 return -EINVAL;
5969 if (res->validate && insn->imm != orig_val) {
5970 pr_warn("prog '%s': relo #%d: unexpected insn #%d (ALU/ALU64) value: got %u, exp %u -> %u\n",
5971 prog->name, relo_idx,
5972 insn_idx, insn->imm, orig_val, new_val);
5973 return -EINVAL;
5974 }
5975 orig_val = insn->imm;
5976 insn->imm = new_val;
5977 pr_debug("prog '%s': relo #%d: patched insn #%d (ALU/ALU64) imm %u -> %u\n",
5978 prog->name, relo_idx, insn_idx,
5979 orig_val, new_val);
5980 break;
5981 case BPF_LDX:
5982 case BPF_ST:
5983 case BPF_STX:
5984 if (res->validate && insn->off != orig_val) {
5985 pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDX/ST/STX) value: got %u, exp %u -> %u\n",
5986 prog->name, relo_idx, insn_idx, insn->off, orig_val, new_val);
5987 return -EINVAL;
5988 }
5989 if (new_val > SHRT_MAX) {
5990 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) value too big: %u\n",
5991 prog->name, relo_idx, insn_idx, new_val);
5992 return -ERANGE;
5993 }
5994 if (res->fail_memsz_adjust) {
5995 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) accesses field incorrectly. "
5996 "Make sure you are accessing pointers, unsigned integers, or fields of matching type and size.\n",
5997 prog->name, relo_idx, insn_idx);
5998 goto poison;
5999 }
6000
6001 orig_val = insn->off;
6002 insn->off = new_val;
6003 pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) off %u -> %u\n",
6004 prog->name, relo_idx, insn_idx, orig_val, new_val);
6005
6006 if (res->new_sz != res->orig_sz) {
6007 int insn_bytes_sz, insn_bpf_sz;
6008
6009 insn_bytes_sz = insn_bpf_size_to_bytes(insn);
6010 if (insn_bytes_sz != res->orig_sz) {
6011 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) unexpected mem size: got %d, exp %u\n",
6012 prog->name, relo_idx, insn_idx, insn_bytes_sz, res->orig_sz);
6013 return -EINVAL;
6014 }
6015
6016 insn_bpf_sz = insn_bytes_to_bpf_size(res->new_sz);
6017 if (insn_bpf_sz < 0) {
6018 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) invalid new mem size: %u\n",
6019 prog->name, relo_idx, insn_idx, res->new_sz);
6020 return -EINVAL;
6021 }
6022
6023 insn->code = BPF_MODE(insn->code) | insn_bpf_sz | BPF_CLASS(insn->code);
6024 pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) mem_sz %u -> %u\n",
6025 prog->name, relo_idx, insn_idx, res->orig_sz, res->new_sz);
6026 }
6027 break;
6028 case BPF_LD: {
6029 __u64 imm;
6030
6031 if (!is_ldimm64_insn(insn) ||
6032 insn[0].src_reg != 0 || insn[0].off != 0 ||
6033 insn_idx + 1 >= prog->insns_cnt ||
6034 insn[1].code != 0 || insn[1].dst_reg != 0 ||
6035 insn[1].src_reg != 0 || insn[1].off != 0) {
6036 pr_warn("prog '%s': relo #%d: insn #%d (LDIMM64) has unexpected form\n",
6037 prog->name, relo_idx, insn_idx);
6038 return -EINVAL;
6039 }
6040
6041 imm = insn[0].imm + ((__u64)insn[1].imm << 32);
6042 if (res->validate && imm != orig_val) {
6043 pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDIMM64) value: got %llu, exp %u -> %u\n",
6044 prog->name, relo_idx,
6045 insn_idx, (unsigned long long)imm,
6046 orig_val, new_val);
6047 return -EINVAL;
6048 }
6049
6050 insn[0].imm = new_val;
6051 insn[1].imm = 0; /* currently only 32-bit values are supported */
6052 pr_debug("prog '%s': relo #%d: patched insn #%d (LDIMM64) imm64 %llu -> %u\n",
6053 prog->name, relo_idx, insn_idx,
6054 (unsigned long long)imm, new_val);
6055 break;
6056 }
6057 default:
6058 pr_warn("prog '%s': relo #%d: trying to relocate unrecognized insn #%d, code:0x%x, src:0x%x, dst:0x%x, off:0x%x, imm:0x%x\n",
6059 prog->name, relo_idx, insn_idx, insn->code,
6060 insn->src_reg, insn->dst_reg, insn->off, insn->imm);
6061 return -EINVAL;
6062 }
6063
6064 return 0;
6065}
6066
6067/* Output spec definition in the format:
6068 * [<type-id>] (<type-name>) + <raw-spec> => <offset>@<spec>,
6069 * where <spec> is a C-syntax view of recorded field access, e.g.: x.a[3].b
6070 */
6071static void bpf_core_dump_spec(int level, const struct bpf_core_spec *spec)
6072{
6073 const struct btf_type *t;
6074 const struct btf_enum *e;
6075 const char *s;
6076 __u32 type_id;
6077 int i;
6078
6079 type_id = spec->root_type_id;
6080 t = btf__type_by_id(spec->btf, type_id);
6081 s = btf__name_by_offset(spec->btf, t->name_off);
6082
6083 libbpf_print(level, "[%u] %s %s", type_id, btf_kind_str(t), str_is_empty(s) ? "<anon>" : s);
6084
6085 if (core_relo_is_type_based(spec->relo_kind))
6086 return;
6087
6088 if (core_relo_is_enumval_based(spec->relo_kind)) {
6089 t = skip_mods_and_typedefs(spec->btf, type_id, NULL);
6090 e = btf_enum(t) + spec->raw_spec[0];
6091 s = btf__name_by_offset(spec->btf, e->name_off);
6092
6093 libbpf_print(level, "::%s = %u", s, e->val);
6094 return;
6095 }
6096
6097 if (core_relo_is_field_based(spec->relo_kind)) {
6098 for (i = 0; i < spec->len; i++) {
6099 if (spec->spec[i].name)
6100 libbpf_print(level, ".%s", spec->spec[i].name);
6101 else if (i > 0 || spec->spec[i].idx > 0)
6102 libbpf_print(level, "[%u]", spec->spec[i].idx);
6103 }
6104
6105 libbpf_print(level, " (");
6106 for (i = 0; i < spec->raw_len; i++)
6107 libbpf_print(level, "%s%d", i == 0 ? "" : ":", spec->raw_spec[i]);
6108
6109 if (spec->bit_offset % 8)
6110 libbpf_print(level, " @ offset %u.%u)",
6111 spec->bit_offset / 8, spec->bit_offset % 8);
6112 else
6113 libbpf_print(level, " @ offset %u)", spec->bit_offset / 8);
6114 return;
6115 }
6116}
6117
6118static size_t bpf_core_hash_fn(const void *key, void *ctx)
6119{
6120 return (size_t)key;
6121}
6122
6123static bool bpf_core_equal_fn(const void *k1, const void *k2, void *ctx)
6124{
6125 return k1 == k2;
6126}
6127
6128static void *u32_as_hash_key(__u32 x)
6129{
6130 return (void *)(uintptr_t)x;
6131}
6132
6133/*
6134 * CO-RE relocate single instruction.
6135 *
6136 * The outline and important points of the algorithm:
6137 * 1. For given local type, find corresponding candidate target types.
6138 * Candidate type is a type with the same "essential" name, ignoring
6139 * everything after last triple underscore (___). E.g., `sample`,
6140 * `sample___flavor_one`, `sample___flavor_another_one`, are all candidates
6141 * for each other. Names with triple underscore are referred to as
6142 * "flavors" and are useful, among other things, to allow to
6143 * specify/support incompatible variations of the same kernel struct, which
6144 * might differ between different kernel versions and/or build
6145 * configurations.
6146 *
6147 * N.B. Struct "flavors" could be generated by bpftool's BTF-to-C
6148 * converter, when deduplicated BTF of a kernel still contains more than
6149 * one different types with the same name. In that case, ___2, ___3, etc
6150 * are appended starting from second name conflict. But start flavors are
6151 * also useful to be defined "locally", in BPF program, to extract same
6152 * data from incompatible changes between different kernel
6153 * versions/configurations. For instance, to handle field renames between
6154 * kernel versions, one can use two flavors of the struct name with the
6155 * same common name and use conditional relocations to extract that field,
6156 * depending on target kernel version.
6157 * 2. For each candidate type, try to match local specification to this
6158 * candidate target type. Matching involves finding corresponding
6159 * high-level spec accessors, meaning that all named fields should match,
6160 * as well as all array accesses should be within the actual bounds. Also,
6161 * types should be compatible (see bpf_core_fields_are_compat for details).
6162 * 3. It is supported and expected that there might be multiple flavors
6163 * matching the spec. As long as all the specs resolve to the same set of
6164 * offsets across all candidates, there is no error. If there is any
6165 * ambiguity, CO-RE relocation will fail. This is necessary to accomodate
6166 * imprefection of BTF deduplication, which can cause slight duplication of
6167 * the same BTF type, if some directly or indirectly referenced (by
6168 * pointer) type gets resolved to different actual types in different
6169 * object files. If such situation occurs, deduplicated BTF will end up
6170 * with two (or more) structurally identical types, which differ only in
6171 * types they refer to through pointer. This should be OK in most cases and
6172 * is not an error.
6173 * 4. Candidate types search is performed by linearly scanning through all
6174 * types in target BTF. It is anticipated that this is overall more
6175 * efficient memory-wise and not significantly worse (if not better)
6176 * CPU-wise compared to prebuilding a map from all local type names to
6177 * a list of candidate type names. It's also sped up by caching resolved
6178 * list of matching candidates per each local "root" type ID, that has at
6179 * least one bpf_core_relo associated with it. This list is shared
6180 * between multiple relocations for the same type ID and is updated as some
6181 * of the candidates are pruned due to structural incompatibility.
6182 */
6183static int bpf_core_apply_relo(struct bpf_program *prog,
6184 const struct bpf_core_relo *relo,
6185 int relo_idx,
6186 const struct btf *local_btf,
6187 struct hashmap *cand_cache)
6188{
6189 struct bpf_core_spec local_spec, cand_spec, targ_spec = {};
6190 const void *type_key = u32_as_hash_key(relo->type_id);
6191 struct bpf_core_relo_res cand_res, targ_res;
6192 const struct btf_type *local_type;
6193 const char *local_name;
6194 struct core_cand_list *cands = NULL;
6195 __u32 local_id;
6196 const char *spec_str;
6197 int i, j, err;
6198
6199 local_id = relo->type_id;
6200 local_type = btf__type_by_id(local_btf, local_id);
6201 if (!local_type)
6202 return -EINVAL;
6203
6204 local_name = btf__name_by_offset(local_btf, local_type->name_off);
6205 if (!local_name)
6206 return -EINVAL;
6207
6208 spec_str = btf__name_by_offset(local_btf, relo->access_str_off);
6209 if (str_is_empty(spec_str))
6210 return -EINVAL;
6211
6212 if (prog->obj->gen_loader) {
6213 pr_warn("// TODO core_relo: prog %td insn[%d] %s %s kind %d\n",
6214 prog - prog->obj->programs, relo->insn_off / 8,
6215 local_name, spec_str, relo->kind);
6216 return -ENOTSUP;
6217 }
6218 err = bpf_core_parse_spec(local_btf, local_id, spec_str, relo->kind, &local_spec);
6219 if (err) {
6220 pr_warn("prog '%s': relo #%d: parsing [%d] %s %s + %s failed: %d\n",
6221 prog->name, relo_idx, local_id, btf_kind_str(local_type),
6222 str_is_empty(local_name) ? "<anon>" : local_name,
6223 spec_str, err);
6224 return -EINVAL;
6225 }
6226
6227 pr_debug("prog '%s': relo #%d: kind <%s> (%d), spec is ", prog->name,
6228 relo_idx, core_relo_kind_str(relo->kind), relo->kind);
6229 bpf_core_dump_spec(LIBBPF_DEBUG, &local_spec);
6230 libbpf_print(LIBBPF_DEBUG, "\n");
6231
6232 /* TYPE_ID_LOCAL relo is special and doesn't need candidate search */
6233 if (relo->kind == BPF_TYPE_ID_LOCAL) {
6234 targ_res.validate = true;
6235 targ_res.poison = false;
6236 targ_res.orig_val = local_spec.root_type_id;
6237 targ_res.new_val = local_spec.root_type_id;
6238 goto patch_insn;
6239 }
6240
6241 /* libbpf doesn't support candidate search for anonymous types */
6242 if (str_is_empty(spec_str)) {
6243 pr_warn("prog '%s': relo #%d: <%s> (%d) relocation doesn't support anonymous types\n",
6244 prog->name, relo_idx, core_relo_kind_str(relo->kind), relo->kind);
6245 return -EOPNOTSUPP;
6246 }
6247
6248 if (!hashmap__find(cand_cache, type_key, (void **)&cands)) {
6249 cands = bpf_core_find_cands(prog->obj, local_btf, local_id);
6250 if (IS_ERR(cands)) {
6251 pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld\n",
6252 prog->name, relo_idx, local_id, btf_kind_str(local_type),
6253 local_name, PTR_ERR(cands));
6254 return PTR_ERR(cands);
6255 }
6256 err = hashmap__set(cand_cache, type_key, cands, NULL, NULL);
6257 if (err) {
6258 bpf_core_free_cands(cands);
6259 return err;
6260 }
6261 }
6262
6263 for (i = 0, j = 0; i < cands->len; i++) {
6264 err = bpf_core_spec_match(&local_spec, cands->cands[i].btf,
6265 cands->cands[i].id, &cand_spec);
6266 if (err < 0) {
6267 pr_warn("prog '%s': relo #%d: error matching candidate #%d ",
6268 prog->name, relo_idx, i);
6269 bpf_core_dump_spec(LIBBPF_WARN, &cand_spec);
6270 libbpf_print(LIBBPF_WARN, ": %d\n", err);
6271 return err;
6272 }
6273
6274 pr_debug("prog '%s': relo #%d: %s candidate #%d ", prog->name,
6275 relo_idx, err == 0 ? "non-matching" : "matching", i);
6276 bpf_core_dump_spec(LIBBPF_DEBUG, &cand_spec);
6277 libbpf_print(LIBBPF_DEBUG, "\n");
6278
6279 if (err == 0)
6280 continue;
6281
6282 err = bpf_core_calc_relo(prog, relo, relo_idx, &local_spec, &cand_spec, &cand_res);
6283 if (err)
6284 return err;
6285
6286 if (j == 0) {
6287 targ_res = cand_res;
6288 targ_spec = cand_spec;
6289 } else if (cand_spec.bit_offset != targ_spec.bit_offset) {
6290 /* if there are many field relo candidates, they
6291 * should all resolve to the same bit offset
6292 */
6293 pr_warn("prog '%s': relo #%d: field offset ambiguity: %u != %u\n",
6294 prog->name, relo_idx, cand_spec.bit_offset,
6295 targ_spec.bit_offset);
6296 return -EINVAL;
6297 } else if (cand_res.poison != targ_res.poison || cand_res.new_val != targ_res.new_val) {
6298 /* all candidates should result in the same relocation
6299 * decision and value, otherwise it's dangerous to
6300 * proceed due to ambiguity
6301 */
6302 pr_warn("prog '%s': relo #%d: relocation decision ambiguity: %s %u != %s %u\n",
6303 prog->name, relo_idx,
6304 cand_res.poison ? "failure" : "success", cand_res.new_val,
6305 targ_res.poison ? "failure" : "success", targ_res.new_val);
6306 return -EINVAL;
6307 }
6308
6309 cands->cands[j++] = cands->cands[i];
6310 }
6311
6312 /*
6313 * For BPF_FIELD_EXISTS relo or when used BPF program has field
6314 * existence checks or kernel version/config checks, it's expected
6315 * that we might not find any candidates. In this case, if field
6316 * wasn't found in any candidate, the list of candidates shouldn't
6317 * change at all, we'll just handle relocating appropriately,
6318 * depending on relo's kind.
6319 */
6320 if (j > 0)
6321 cands->len = j;
6322
6323 /*
6324 * If no candidates were found, it might be both a programmer error,
6325 * as well as expected case, depending whether instruction w/
6326 * relocation is guarded in some way that makes it unreachable (dead
6327 * code) if relocation can't be resolved. This is handled in
6328 * bpf_core_patch_insn() uniformly by replacing that instruction with
6329 * BPF helper call insn (using invalid helper ID). If that instruction
6330 * is indeed unreachable, then it will be ignored and eliminated by
6331 * verifier. If it was an error, then verifier will complain and point
6332 * to a specific instruction number in its log.
6333 */
6334 if (j == 0) {
6335 pr_debug("prog '%s': relo #%d: no matching targets found\n",
6336 prog->name, relo_idx);
6337
6338 /* calculate single target relo result explicitly */
6339 err = bpf_core_calc_relo(prog, relo, relo_idx, &local_spec, NULL, &targ_res);
6340 if (err)
6341 return err;
6342 }
6343
6344patch_insn:
6345 /* bpf_core_patch_insn() should know how to handle missing targ_spec */
6346 err = bpf_core_patch_insn(prog, relo, relo_idx, &targ_res);
6347 if (err) {
6348 pr_warn("prog '%s': relo #%d: failed to patch insn #%zu: %d\n",
6349 prog->name, relo_idx, relo->insn_off / BPF_INSN_SZ, err);
6350 return -EINVAL;
6351 }
6352
6353 return 0;
6354}
6355
6356static int
6357bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
6358{
6359 const struct btf_ext_info_sec *sec;
6360 const struct bpf_core_relo *rec;
6361 const struct btf_ext_info *seg;
6362 struct hashmap_entry *entry;
6363 struct hashmap *cand_cache = NULL;
6364 struct bpf_program *prog;
6365 const char *sec_name;
6366 int i, err = 0, insn_idx, sec_idx;
6367
6368 if (obj->btf_ext->core_relo_info.len == 0)
6369 return 0;
6370
6371 if (targ_btf_path) {
6372 obj->btf_vmlinux_override = btf__parse(targ_btf_path, NULL);
6373 err = libbpf_get_error(obj->btf_vmlinux_override);
6374 if (err) {
6375 pr_warn("failed to parse target BTF: %d\n", err);
6376 return err;
6377 }
6378 }
6379
6380 cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL);
6381 if (IS_ERR(cand_cache)) {
6382 err = PTR_ERR(cand_cache);
6383 goto out;
6384 }
6385
6386 seg = &obj->btf_ext->core_relo_info;
6387 for_each_btf_ext_sec(seg, sec) {
6388 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
6389 if (str_is_empty(sec_name)) {
6390 err = -EINVAL;
6391 goto out;
6392 }
6393 /* bpf_object's ELF is gone by now so it's not easy to find
6394 * section index by section name, but we can find *any*
6395 * bpf_program within desired section name and use it's
6396 * prog->sec_idx to do a proper search by section index and
6397 * instruction offset
6398 */
6399 prog = NULL;
6400 for (i = 0; i < obj->nr_programs; i++) {
6401 prog = &obj->programs[i];
6402 if (strcmp(prog->sec_name, sec_name) == 0)
6403 break;
6404 }
6405 if (!prog) {
6406 pr_warn("sec '%s': failed to find a BPF program\n", sec_name);
6407 return -ENOENT;
6408 }
6409 sec_idx = prog->sec_idx;
6410
6411 pr_debug("sec '%s': found %d CO-RE relocations\n",
6412 sec_name, sec->num_info);
6413
6414 for_each_btf_ext_rec(seg, sec, i, rec) {
6415 insn_idx = rec->insn_off / BPF_INSN_SZ;
6416 prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
6417 if (!prog) {
6418 pr_warn("sec '%s': failed to find program at insn #%d for CO-RE offset relocation #%d\n",
6419 sec_name, insn_idx, i);
6420 err = -EINVAL;
6421 goto out;
6422 }
6423 /* no need to apply CO-RE relocation if the program is
6424 * not going to be loaded
6425 */
6426 if (!prog->load)
6427 continue;
6428
6429 err = bpf_core_apply_relo(prog, rec, i, obj->btf, cand_cache);
6430 if (err) {
6431 pr_warn("prog '%s': relo #%d: failed to relocate: %d\n",
6432 prog->name, i, err);
6433 goto out;
6434 }
6435 }
6436 }
6437
6438out:
6439 /* obj->btf_vmlinux and module BTFs are freed after object load */
6440 btf__free(obj->btf_vmlinux_override);
6441 obj->btf_vmlinux_override = NULL;
6442
6443 if (!IS_ERR_OR_NULL(cand_cache)) {
6444 hashmap__for_each_entry(cand_cache, entry, i) {
6445 bpf_core_free_cands(entry->value);
6446 }
6447 hashmap__free(cand_cache);
6448 }
6449 return err;
6450}
6451
6452/* Relocate data references within program code:
6453 * - map references;
6454 * - global variable references;
6455 * - extern references.
6456 */
6457static int
6458bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
6459{
6460 int i;
6461
6462 for (i = 0; i < prog->nr_reloc; i++) {
6463 struct reloc_desc *relo = &prog->reloc_desc[i];
6464 struct bpf_insn *insn = &prog->insns[relo->insn_idx];
6465 struct extern_desc *ext;
6466
6467 switch (relo->type) {
6468 case RELO_LD64:
6469 if (obj->gen_loader) {
6470 insn[0].src_reg = BPF_PSEUDO_MAP_IDX;
6471 insn[0].imm = relo->map_idx;
6472 } else {
6473 insn[0].src_reg = BPF_PSEUDO_MAP_FD;
6474 insn[0].imm = obj->maps[relo->map_idx].fd;
6475 }
6476 break;
6477 case RELO_DATA:
6478 insn[1].imm = insn[0].imm + relo->sym_off;
6479 if (obj->gen_loader) {
6480 insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE;
6481 insn[0].imm = relo->map_idx;
6482 } else {
6483 insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
6484 insn[0].imm = obj->maps[relo->map_idx].fd;
6485 }
6486 break;
6487 case RELO_EXTERN_VAR:
6488 ext = &obj->externs[relo->sym_off];
6489 if (ext->type == EXT_KCFG) {
6490 if (obj->gen_loader) {
6491 insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE;
6492 insn[0].imm = obj->kconfig_map_idx;
6493 } else {
6494 insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
6495 insn[0].imm = obj->maps[obj->kconfig_map_idx].fd;
6496 }
6497 insn[1].imm = ext->kcfg.data_off;
6498 } else /* EXT_KSYM */ {
6499 if (ext->ksym.type_id) { /* typed ksyms */
6500 insn[0].src_reg = BPF_PSEUDO_BTF_ID;
6501 insn[0].imm = ext->ksym.kernel_btf_id;
6502 insn[1].imm = ext->ksym.kernel_btf_obj_fd;
6503 } else { /* typeless ksyms */
6504 insn[0].imm = (__u32)ext->ksym.addr;
6505 insn[1].imm = ext->ksym.addr >> 32;
6506 }
6507 }
6508 break;
6509 case RELO_EXTERN_FUNC:
6510 ext = &obj->externs[relo->sym_off];
6511 insn[0].src_reg = BPF_PSEUDO_KFUNC_CALL;
6512 insn[0].imm = ext->ksym.kernel_btf_id;
6513 break;
6514 case RELO_SUBPROG_ADDR:
6515 if (insn[0].src_reg != BPF_PSEUDO_FUNC) {
6516 pr_warn("prog '%s': relo #%d: bad insn\n",
6517 prog->name, i);
6518 return -EINVAL;
6519 }
6520 /* handled already */
6521 break;
6522 case RELO_CALL:
6523 /* handled already */
6524 break;
6525 default:
6526 pr_warn("prog '%s': relo #%d: bad relo type %d\n",
6527 prog->name, i, relo->type);
6528 return -EINVAL;
6529 }
6530 }
6531
6532 return 0;
6533}
6534
6535static int adjust_prog_btf_ext_info(const struct bpf_object *obj,
6536 const struct bpf_program *prog,
6537 const struct btf_ext_info *ext_info,
6538 void **prog_info, __u32 *prog_rec_cnt,
6539 __u32 *prog_rec_sz)
6540{
6541 void *copy_start = NULL, *copy_end = NULL;
6542 void *rec, *rec_end, *new_prog_info;
6543 const struct btf_ext_info_sec *sec;
6544 size_t old_sz, new_sz;
6545 const char *sec_name;
6546 int i, off_adj;
6547
6548 for_each_btf_ext_sec(ext_info, sec) {
6549 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
6550 if (!sec_name)
6551 return -EINVAL;
6552 if (strcmp(sec_name, prog->sec_name) != 0)
6553 continue;
6554
6555 for_each_btf_ext_rec(ext_info, sec, i, rec) {
6556 __u32 insn_off = *(__u32 *)rec / BPF_INSN_SZ;
6557
6558 if (insn_off < prog->sec_insn_off)
6559 continue;
6560 if (insn_off >= prog->sec_insn_off + prog->sec_insn_cnt)
6561 break;
6562
6563 if (!copy_start)
6564 copy_start = rec;
6565 copy_end = rec + ext_info->rec_size;
6566 }
6567
6568 if (!copy_start)
6569 return -ENOENT;
6570
6571 /* append func/line info of a given (sub-)program to the main
6572 * program func/line info
6573 */
6574 old_sz = (size_t)(*prog_rec_cnt) * ext_info->rec_size;
6575 new_sz = old_sz + (copy_end - copy_start);
6576 new_prog_info = realloc(*prog_info, new_sz);
6577 if (!new_prog_info)
6578 return -ENOMEM;
6579 *prog_info = new_prog_info;
6580 *prog_rec_cnt = new_sz / ext_info->rec_size;
6581 memcpy(new_prog_info + old_sz, copy_start, copy_end - copy_start);
6582
6583 /* Kernel instruction offsets are in units of 8-byte
6584 * instructions, while .BTF.ext instruction offsets generated
6585 * by Clang are in units of bytes. So convert Clang offsets
6586 * into kernel offsets and adjust offset according to program
6587 * relocated position.
6588 */
6589 off_adj = prog->sub_insn_off - prog->sec_insn_off;
6590 rec = new_prog_info + old_sz;
6591 rec_end = new_prog_info + new_sz;
6592 for (; rec < rec_end; rec += ext_info->rec_size) {
6593 __u32 *insn_off = rec;
6594
6595 *insn_off = *insn_off / BPF_INSN_SZ + off_adj;
6596 }
6597 *prog_rec_sz = ext_info->rec_size;
6598 return 0;
6599 }
6600
6601 return -ENOENT;
6602}
6603
6604static int
6605reloc_prog_func_and_line_info(const struct bpf_object *obj,
6606 struct bpf_program *main_prog,
6607 const struct bpf_program *prog)
6608{
6609 int err;
6610
6611 /* no .BTF.ext relocation if .BTF.ext is missing or kernel doesn't
6612 * supprot func/line info
6613 */
6614 if (!obj->btf_ext || !kernel_supports(obj, FEAT_BTF_FUNC))
6615 return 0;
6616
6617 /* only attempt func info relocation if main program's func_info
6618 * relocation was successful
6619 */
6620 if (main_prog != prog && !main_prog->func_info)
6621 goto line_info;
6622
6623 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->func_info,
6624 &main_prog->func_info,
6625 &main_prog->func_info_cnt,
6626 &main_prog->func_info_rec_size);
6627 if (err) {
6628 if (err != -ENOENT) {
6629 pr_warn("prog '%s': error relocating .BTF.ext function info: %d\n",
6630 prog->name, err);
6631 return err;
6632 }
6633 if (main_prog->func_info) {
6634 /*
6635 * Some info has already been found but has problem
6636 * in the last btf_ext reloc. Must have to error out.
6637 */
6638 pr_warn("prog '%s': missing .BTF.ext function info.\n", prog->name);
6639 return err;
6640 }
6641 /* Have problem loading the very first info. Ignore the rest. */
6642 pr_warn("prog '%s': missing .BTF.ext function info for the main program, skipping all of .BTF.ext func info.\n",
6643 prog->name);
6644 }
6645
6646line_info:
6647 /* don't relocate line info if main program's relocation failed */
6648 if (main_prog != prog && !main_prog->line_info)
6649 return 0;
6650
6651 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->line_info,
6652 &main_prog->line_info,
6653 &main_prog->line_info_cnt,
6654 &main_prog->line_info_rec_size);
6655 if (err) {
6656 if (err != -ENOENT) {
6657 pr_warn("prog '%s': error relocating .BTF.ext line info: %d\n",
6658 prog->name, err);
6659 return err;
6660 }
6661 if (main_prog->line_info) {
6662 /*
6663 * Some info has already been found but has problem
6664 * in the last btf_ext reloc. Must have to error out.
6665 */
6666 pr_warn("prog '%s': missing .BTF.ext line info.\n", prog->name);
6667 return err;
6668 }
6669 /* Have problem loading the very first info. Ignore the rest. */
6670 pr_warn("prog '%s': missing .BTF.ext line info for the main program, skipping all of .BTF.ext line info.\n",
6671 prog->name);
6672 }
6673 return 0;
6674}
6675
6676static int cmp_relo_by_insn_idx(const void *key, const void *elem)
6677{
6678 size_t insn_idx = *(const size_t *)key;
6679 const struct reloc_desc *relo = elem;
6680
6681 if (insn_idx == relo->insn_idx)
6682 return 0;
6683 return insn_idx < relo->insn_idx ? -1 : 1;
6684}
6685
6686static struct reloc_desc *find_prog_insn_relo(const struct bpf_program *prog, size_t insn_idx)
6687{
6688 return bsearch(&insn_idx, prog->reloc_desc, prog->nr_reloc,
6689 sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx);
6690}
6691
6692static int append_subprog_relos(struct bpf_program *main_prog, struct bpf_program *subprog)
6693{
6694 int new_cnt = main_prog->nr_reloc + subprog->nr_reloc;
6695 struct reloc_desc *relos;
6696 int i;
6697
6698 if (main_prog == subprog)
6699 return 0;
6700 relos = libbpf_reallocarray(main_prog->reloc_desc, new_cnt, sizeof(*relos));
6701 if (!relos)
6702 return -ENOMEM;
6703 memcpy(relos + main_prog->nr_reloc, subprog->reloc_desc,
6704 sizeof(*relos) * subprog->nr_reloc);
6705
6706 for (i = main_prog->nr_reloc; i < new_cnt; i++)
6707 relos[i].insn_idx += subprog->sub_insn_off;
6708 /* After insn_idx adjustment the 'relos' array is still sorted
6709 * by insn_idx and doesn't break bsearch.
6710 */
6711 main_prog->reloc_desc = relos;
6712 main_prog->nr_reloc = new_cnt;
6713 return 0;
6714}
6715
6716static int
6717bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
6718 struct bpf_program *prog)
6719{
6720 size_t sub_insn_idx, insn_idx, new_cnt;
6721 struct bpf_program *subprog;
6722 struct bpf_insn *insns, *insn;
6723 struct reloc_desc *relo;
6724 int err;
6725
6726 err = reloc_prog_func_and_line_info(obj, main_prog, prog);
6727 if (err)
6728 return err;
6729
6730 for (insn_idx = 0; insn_idx < prog->sec_insn_cnt; insn_idx++) {
6731 insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
6732 if (!insn_is_subprog_call(insn) && !insn_is_pseudo_func(insn))
6733 continue;
6734
6735 relo = find_prog_insn_relo(prog, insn_idx);
6736 if (relo && relo->type == RELO_EXTERN_FUNC)
6737 /* kfunc relocations will be handled later
6738 * in bpf_object__relocate_data()
6739 */
6740 continue;
6741 if (relo && relo->type != RELO_CALL && relo->type != RELO_SUBPROG_ADDR) {
6742 pr_warn("prog '%s': unexpected relo for insn #%zu, type %d\n",
6743 prog->name, insn_idx, relo->type);
6744 return -LIBBPF_ERRNO__RELOC;
6745 }
6746 if (relo) {
6747 /* sub-program instruction index is a combination of
6748 * an offset of a symbol pointed to by relocation and
6749 * call instruction's imm field; for global functions,
6750 * call always has imm = -1, but for static functions
6751 * relocation is against STT_SECTION and insn->imm
6752 * points to a start of a static function
6753 *
6754 * for subprog addr relocation, the relo->sym_off + insn->imm is
6755 * the byte offset in the corresponding section.
6756 */
6757 if (relo->type == RELO_CALL)
6758 sub_insn_idx = relo->sym_off / BPF_INSN_SZ + insn->imm + 1;
6759 else
6760 sub_insn_idx = (relo->sym_off + insn->imm) / BPF_INSN_SZ;
6761 } else if (insn_is_pseudo_func(insn)) {
6762 /*
6763 * RELO_SUBPROG_ADDR relo is always emitted even if both
6764 * functions are in the same section, so it shouldn't reach here.
6765 */
6766 pr_warn("prog '%s': missing subprog addr relo for insn #%zu\n",
6767 prog->name, insn_idx);
6768 return -LIBBPF_ERRNO__RELOC;
6769 } else {
6770 /* if subprogram call is to a static function within
6771 * the same ELF section, there won't be any relocation
6772 * emitted, but it also means there is no additional
6773 * offset necessary, insns->imm is relative to
6774 * instruction's original position within the section
6775 */
6776 sub_insn_idx = prog->sec_insn_off + insn_idx + insn->imm + 1;
6777 }
6778
6779 /* we enforce that sub-programs should be in .text section */
6780 subprog = find_prog_by_sec_insn(obj, obj->efile.text_shndx, sub_insn_idx);
6781 if (!subprog) {
6782 pr_warn("prog '%s': no .text section found yet sub-program call exists\n",
6783 prog->name);
6784 return -LIBBPF_ERRNO__RELOC;
6785 }
6786
6787 /* if it's the first call instruction calling into this
6788 * subprogram (meaning this subprog hasn't been processed
6789 * yet) within the context of current main program:
6790 * - append it at the end of main program's instructions blog;
6791 * - process is recursively, while current program is put on hold;
6792 * - if that subprogram calls some other not yet processes
6793 * subprogram, same thing will happen recursively until
6794 * there are no more unprocesses subprograms left to append
6795 * and relocate.
6796 */
6797 if (subprog->sub_insn_off == 0) {
6798 subprog->sub_insn_off = main_prog->insns_cnt;
6799
6800 new_cnt = main_prog->insns_cnt + subprog->insns_cnt;
6801 insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns));
6802 if (!insns) {
6803 pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name);
6804 return -ENOMEM;
6805 }
6806 main_prog->insns = insns;
6807 main_prog->insns_cnt = new_cnt;
6808
6809 memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns,
6810 subprog->insns_cnt * sizeof(*insns));
6811
6812 pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n",
6813 main_prog->name, subprog->insns_cnt, subprog->name);
6814
6815 /* The subprog insns are now appended. Append its relos too. */
6816 err = append_subprog_relos(main_prog, subprog);
6817 if (err)
6818 return err;
6819 err = bpf_object__reloc_code(obj, main_prog, subprog);
6820 if (err)
6821 return err;
6822 }
6823
6824 /* main_prog->insns memory could have been re-allocated, so
6825 * calculate pointer again
6826 */
6827 insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
6828 /* calculate correct instruction position within current main
6829 * prog; each main prog can have a different set of
6830 * subprograms appended (potentially in different order as
6831 * well), so position of any subprog can be different for
6832 * different main programs */
6833 insn->imm = subprog->sub_insn_off - (prog->sub_insn_off + insn_idx) - 1;
6834
6835 pr_debug("prog '%s': insn #%zu relocated, imm %d points to subprog '%s' (now at %zu offset)\n",
6836 prog->name, insn_idx, insn->imm, subprog->name, subprog->sub_insn_off);
6837 }
6838
6839 return 0;
6840}
6841
6842/*
6843 * Relocate sub-program calls.
6844 *
6845 * Algorithm operates as follows. Each entry-point BPF program (referred to as
6846 * main prog) is processed separately. For each subprog (non-entry functions,
6847 * that can be called from either entry progs or other subprogs) gets their
6848 * sub_insn_off reset to zero. This serves as indicator that this subprogram
6849 * hasn't been yet appended and relocated within current main prog. Once its
6850 * relocated, sub_insn_off will point at the position within current main prog
6851 * where given subprog was appended. This will further be used to relocate all
6852 * the call instructions jumping into this subprog.
6853 *
6854 * We start with main program and process all call instructions. If the call
6855 * is into a subprog that hasn't been processed (i.e., subprog->sub_insn_off
6856 * is zero), subprog instructions are appended at the end of main program's
6857 * instruction array. Then main program is "put on hold" while we recursively
6858 * process newly appended subprogram. If that subprogram calls into another
6859 * subprogram that hasn't been appended, new subprogram is appended again to
6860 * the *main* prog's instructions (subprog's instructions are always left
6861 * untouched, as they need to be in unmodified state for subsequent main progs
6862 * and subprog instructions are always sent only as part of a main prog) and
6863 * the process continues recursively. Once all the subprogs called from a main
6864 * prog or any of its subprogs are appended (and relocated), all their
6865 * positions within finalized instructions array are known, so it's easy to
6866 * rewrite call instructions with correct relative offsets, corresponding to
6867 * desired target subprog.
6868 *
6869 * Its important to realize that some subprogs might not be called from some
6870 * main prog and any of its called/used subprogs. Those will keep their
6871 * subprog->sub_insn_off as zero at all times and won't be appended to current
6872 * main prog and won't be relocated within the context of current main prog.
6873 * They might still be used from other main progs later.
6874 *
6875 * Visually this process can be shown as below. Suppose we have two main
6876 * programs mainA and mainB and BPF object contains three subprogs: subA,
6877 * subB, and subC. mainA calls only subA, mainB calls only subC, but subA and
6878 * subC both call subB:
6879 *
6880 * +--------+ +-------+
6881 * | v v |
6882 * +--+---+ +--+-+-+ +---+--+
6883 * | subA | | subB | | subC |
6884 * +--+---+ +------+ +---+--+
6885 * ^ ^
6886 * | |
6887 * +---+-------+ +------+----+
6888 * | mainA | | mainB |
6889 * +-----------+ +-----------+
6890 *
6891 * We'll start relocating mainA, will find subA, append it and start
6892 * processing sub A recursively:
6893 *
6894 * +-----------+------+
6895 * | mainA | subA |
6896 * +-----------+------+
6897 *
6898 * At this point we notice that subB is used from subA, so we append it and
6899 * relocate (there are no further subcalls from subB):
6900 *
6901 * +-----------+------+------+
6902 * | mainA | subA | subB |
6903 * +-----------+------+------+
6904 *
6905 * At this point, we relocate subA calls, then go one level up and finish with
6906 * relocatin mainA calls. mainA is done.
6907 *
6908 * For mainB process is similar but results in different order. We start with
6909 * mainB and skip subA and subB, as mainB never calls them (at least
6910 * directly), but we see subC is needed, so we append and start processing it:
6911 *
6912 * +-----------+------+
6913 * | mainB | subC |
6914 * +-----------+------+
6915 * Now we see subC needs subB, so we go back to it, append and relocate it:
6916 *
6917 * +-----------+------+------+
6918 * | mainB | subC | subB |
6919 * +-----------+------+------+
6920 *
6921 * At this point we unwind recursion, relocate calls in subC, then in mainB.
6922 */
6923static int
6924bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog)
6925{
6926 struct bpf_program *subprog;
6927 int i, err;
6928
6929 /* mark all subprogs as not relocated (yet) within the context of
6930 * current main program
6931 */
6932 for (i = 0; i < obj->nr_programs; i++) {
6933 subprog = &obj->programs[i];
6934 if (!prog_is_subprog(obj, subprog))
6935 continue;
6936
6937 subprog->sub_insn_off = 0;
6938 }
6939
6940 err = bpf_object__reloc_code(obj, prog, prog);
6941 if (err)
6942 return err;
6943
6944
6945 return 0;
6946}
6947
6948static void
6949bpf_object__free_relocs(struct bpf_object *obj)
6950{
6951 struct bpf_program *prog;
6952 int i;
6953
6954 /* free up relocation descriptors */
6955 for (i = 0; i < obj->nr_programs; i++) {
6956 prog = &obj->programs[i];
6957 zfree(&prog->reloc_desc);
6958 prog->nr_reloc = 0;
6959 }
6960}
6961
6962static int
6963bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
6964{
6965 struct bpf_program *prog;
6966 size_t i, j;
6967 int err;
6968
6969 if (obj->btf_ext) {
6970 err = bpf_object__relocate_core(obj, targ_btf_path);
6971 if (err) {
6972 pr_warn("failed to perform CO-RE relocations: %d\n",
6973 err);
6974 return err;
6975 }
6976 }
6977
6978 /* Before relocating calls pre-process relocations and mark
6979 * few ld_imm64 instructions that points to subprogs.
6980 * Otherwise bpf_object__reloc_code() later would have to consider
6981 * all ld_imm64 insns as relocation candidates. That would
6982 * reduce relocation speed, since amount of find_prog_insn_relo()
6983 * would increase and most of them will fail to find a relo.
6984 */
6985 for (i = 0; i < obj->nr_programs; i++) {
6986 prog = &obj->programs[i];
6987 for (j = 0; j < prog->nr_reloc; j++) {
6988 struct reloc_desc *relo = &prog->reloc_desc[j];
6989 struct bpf_insn *insn = &prog->insns[relo->insn_idx];
6990
6991 /* mark the insn, so it's recognized by insn_is_pseudo_func() */
6992 if (relo->type == RELO_SUBPROG_ADDR)
6993 insn[0].src_reg = BPF_PSEUDO_FUNC;
6994 }
6995 }
6996
6997 /* relocate subprogram calls and append used subprograms to main
6998 * programs; each copy of subprogram code needs to be relocated
6999 * differently for each main program, because its code location might
7000 * have changed.
7001 * Append subprog relos to main programs to allow data relos to be
7002 * processed after text is completely relocated.
7003 */
7004 for (i = 0; i < obj->nr_programs; i++) {
7005 prog = &obj->programs[i];
7006 /* sub-program's sub-calls are relocated within the context of
7007 * its main program only
7008 */
7009 if (prog_is_subprog(obj, prog))
7010 continue;
7011
7012 err = bpf_object__relocate_calls(obj, prog);
7013 if (err) {
7014 pr_warn("prog '%s': failed to relocate calls: %d\n",
7015 prog->name, err);
7016 return err;
7017 }
7018 }
7019 /* Process data relos for main programs */
7020 for (i = 0; i < obj->nr_programs; i++) {
7021 prog = &obj->programs[i];
7022 if (prog_is_subprog(obj, prog))
7023 continue;
7024 err = bpf_object__relocate_data(obj, prog);
7025 if (err) {
7026 pr_warn("prog '%s': failed to relocate data references: %d\n",
7027 prog->name, err);
7028 return err;
7029 }
7030 }
7031 if (!obj->gen_loader)
7032 bpf_object__free_relocs(obj);
7033 return 0;
7034}
7035
7036static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
7037 GElf_Shdr *shdr, Elf_Data *data);
7038
7039static int bpf_object__collect_map_relos(struct bpf_object *obj,
7040 GElf_Shdr *shdr, Elf_Data *data)
7041{
7042 const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *);
7043 int i, j, nrels, new_sz;
7044 const struct btf_var_secinfo *vi = NULL;
7045 const struct btf_type *sec, *var, *def;
7046 struct bpf_map *map = NULL, *targ_map;
7047 const struct btf_member *member;
7048 const char *name, *mname;
7049 Elf_Data *symbols;
7050 unsigned int moff;
7051 GElf_Sym sym;
7052 GElf_Rel rel;
7053 void *tmp;
7054
7055 if (!obj->efile.btf_maps_sec_btf_id || !obj->btf)
7056 return -EINVAL;
7057 sec = btf__type_by_id(obj->btf, obj->efile.btf_maps_sec_btf_id);
7058 if (!sec)
7059 return -EINVAL;
7060
7061 symbols = obj->efile.symbols;
7062 nrels = shdr->sh_size / shdr->sh_entsize;
7063 for (i = 0; i < nrels; i++) {
7064 if (!gelf_getrel(data, i, &rel)) {
7065 pr_warn(".maps relo #%d: failed to get ELF relo\n", i);
7066 return -LIBBPF_ERRNO__FORMAT;
7067 }
7068 if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
7069 pr_warn(".maps relo #%d: symbol %zx not found\n",
7070 i, (size_t)GELF_R_SYM(rel.r_info));
7071 return -LIBBPF_ERRNO__FORMAT;
7072 }
7073 name = elf_sym_str(obj, sym.st_name) ?: "<?>";
7074 if (sym.st_shndx != obj->efile.btf_maps_shndx) {
7075 pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n",
7076 i, name);
7077 return -LIBBPF_ERRNO__RELOC;
7078 }
7079
7080 pr_debug(".maps relo #%d: for %zd value %zd rel.r_offset %zu name %d ('%s')\n",
7081 i, (ssize_t)(rel.r_info >> 32), (size_t)sym.st_value,
7082 (size_t)rel.r_offset, sym.st_name, name);
7083
7084 for (j = 0; j < obj->nr_maps; j++) {
7085 map = &obj->maps[j];
7086 if (map->sec_idx != obj->efile.btf_maps_shndx)
7087 continue;
7088
7089 vi = btf_var_secinfos(sec) + map->btf_var_idx;
7090 if (vi->offset <= rel.r_offset &&
7091 rel.r_offset + bpf_ptr_sz <= vi->offset + vi->size)
7092 break;
7093 }
7094 if (j == obj->nr_maps) {
7095 pr_warn(".maps relo #%d: cannot find map '%s' at rel.r_offset %zu\n",
7096 i, name, (size_t)rel.r_offset);
7097 return -EINVAL;
7098 }
7099
7100 if (!bpf_map_type__is_map_in_map(map->def.type))
7101 return -EINVAL;
7102 if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS &&
7103 map->def.key_size != sizeof(int)) {
7104 pr_warn(".maps relo #%d: hash-of-maps '%s' should have key size %zu.\n",
7105 i, map->name, sizeof(int));
7106 return -EINVAL;
7107 }
7108
7109 targ_map = bpf_object__find_map_by_name(obj, name);
7110 if (!targ_map)
7111 return -ESRCH;
7112
7113 var = btf__type_by_id(obj->btf, vi->type);
7114 def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
7115 if (btf_vlen(def) == 0)
7116 return -EINVAL;
7117 member = btf_members(def) + btf_vlen(def) - 1;
7118 mname = btf__name_by_offset(obj->btf, member->name_off);
7119 if (strcmp(mname, "values"))
7120 return -EINVAL;
7121
7122 moff = btf_member_bit_offset(def, btf_vlen(def) - 1) / 8;
7123 if (rel.r_offset - vi->offset < moff)
7124 return -EINVAL;
7125
7126 moff = rel.r_offset - vi->offset - moff;
7127 /* here we use BPF pointer size, which is always 64 bit, as we
7128 * are parsing ELF that was built for BPF target
7129 */
7130 if (moff % bpf_ptr_sz)
7131 return -EINVAL;
7132 moff /= bpf_ptr_sz;
7133 if (moff >= map->init_slots_sz) {
7134 new_sz = moff + 1;
7135 tmp = libbpf_reallocarray(map->init_slots, new_sz, host_ptr_sz);
7136 if (!tmp)
7137 return -ENOMEM;
7138 map->init_slots = tmp;
7139 memset(map->init_slots + map->init_slots_sz, 0,
7140 (new_sz - map->init_slots_sz) * host_ptr_sz);
7141 map->init_slots_sz = new_sz;
7142 }
7143 map->init_slots[moff] = targ_map;
7144
7145 pr_debug(".maps relo #%d: map '%s' slot [%d] points to map '%s'\n",
7146 i, map->name, moff, name);
7147 }
7148
7149 return 0;
7150}
7151
7152static int cmp_relocs(const void *_a, const void *_b)
7153{
7154 const struct reloc_desc *a = _a;
7155 const struct reloc_desc *b = _b;
7156
7157 if (a->insn_idx != b->insn_idx)
7158 return a->insn_idx < b->insn_idx ? -1 : 1;
7159
7160 /* no two relocations should have the same insn_idx, but ... */
7161 if (a->type != b->type)
7162 return a->type < b->type ? -1 : 1;
7163
7164 return 0;
7165}
7166
7167static int bpf_object__collect_relos(struct bpf_object *obj)
7168{
7169 int i, err;
7170
7171 for (i = 0; i < obj->efile.nr_reloc_sects; i++) {
7172 GElf_Shdr *shdr = &obj->efile.reloc_sects[i].shdr;
7173 Elf_Data *data = obj->efile.reloc_sects[i].data;
7174 int idx = shdr->sh_info;
7175
7176 if (shdr->sh_type != SHT_REL) {
7177 pr_warn("internal error at %d\n", __LINE__);
7178 return -LIBBPF_ERRNO__INTERNAL;
7179 }
7180
7181 if (idx == obj->efile.st_ops_shndx)
7182 err = bpf_object__collect_st_ops_relos(obj, shdr, data);
7183 else if (idx == obj->efile.btf_maps_shndx)
7184 err = bpf_object__collect_map_relos(obj, shdr, data);
7185 else
7186 err = bpf_object__collect_prog_relos(obj, shdr, data);
7187 if (err)
7188 return err;
7189 }
7190
7191 for (i = 0; i < obj->nr_programs; i++) {
7192 struct bpf_program *p = &obj->programs[i];
7193
7194 if (!p->nr_reloc)
7195 continue;
7196
7197 qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs);
7198 }
7199 return 0;
7200}
7201
7202static bool insn_is_helper_call(struct bpf_insn *insn, enum bpf_func_id *func_id)
7203{
7204 if (BPF_CLASS(insn->code) == BPF_JMP &&
7205 BPF_OP(insn->code) == BPF_CALL &&
7206 BPF_SRC(insn->code) == BPF_K &&
7207 insn->src_reg == 0 &&
7208 insn->dst_reg == 0) {
7209 *func_id = insn->imm;
7210 return true;
7211 }
7212 return false;
7213}
7214
7215static int bpf_object__sanitize_prog(struct bpf_object *obj, struct bpf_program *prog)
7216{
7217 struct bpf_insn *insn = prog->insns;
7218 enum bpf_func_id func_id;
7219 int i;
7220
7221 if (obj->gen_loader)
7222 return 0;
7223
7224 for (i = 0; i < prog->insns_cnt; i++, insn++) {
7225 if (!insn_is_helper_call(insn, &func_id))
7226 continue;
7227
7228 /* on kernels that don't yet support
7229 * bpf_probe_read_{kernel,user}[_str] helpers, fall back
7230 * to bpf_probe_read() which works well for old kernels
7231 */
7232 switch (func_id) {
7233 case BPF_FUNC_probe_read_kernel:
7234 case BPF_FUNC_probe_read_user:
7235 if (!kernel_supports(obj, FEAT_PROBE_READ_KERN))
7236 insn->imm = BPF_FUNC_probe_read;
7237 break;
7238 case BPF_FUNC_probe_read_kernel_str:
7239 case BPF_FUNC_probe_read_user_str:
7240 if (!kernel_supports(obj, FEAT_PROBE_READ_KERN))
7241 insn->imm = BPF_FUNC_probe_read_str;
7242 break;
7243 default:
7244 break;
7245 }
7246 }
7247 return 0;
7248}
7249
7250static int
7251load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
7252 char *license, __u32 kern_version, int *pfd)
7253{
7254 struct bpf_prog_load_params load_attr = {};
7255 char *cp, errmsg[STRERR_BUFSIZE];
7256 size_t log_buf_size = 0;
7257 char *log_buf = NULL;
7258 int btf_fd, ret;
7259
7260 if (prog->type == BPF_PROG_TYPE_UNSPEC) {
7261 /*
7262 * The program type must be set. Most likely we couldn't find a proper
7263 * section definition at load time, and thus we didn't infer the type.
7264 */
7265 pr_warn("prog '%s': missing BPF prog type, check ELF section name '%s'\n",
7266 prog->name, prog->sec_name);
7267 return -EINVAL;
7268 }
7269
7270 if (!insns || !insns_cnt)
7271 return -EINVAL;
7272
7273 load_attr.prog_type = prog->type;
7274 /* old kernels might not support specifying expected_attach_type */
7275 if (!kernel_supports(prog->obj, FEAT_EXP_ATTACH_TYPE) && prog->sec_def &&
7276 prog->sec_def->is_exp_attach_type_optional)
7277 load_attr.expected_attach_type = 0;
7278 else
7279 load_attr.expected_attach_type = prog->expected_attach_type;
7280 if (kernel_supports(prog->obj, FEAT_PROG_NAME))
7281 load_attr.name = prog->name;
7282 load_attr.insns = insns;
7283 load_attr.insn_cnt = insns_cnt;
7284 load_attr.license = license;
7285 load_attr.attach_btf_id = prog->attach_btf_id;
7286 if (prog->attach_prog_fd)
7287 load_attr.attach_prog_fd = prog->attach_prog_fd;
7288 else
7289 load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd;
7290 load_attr.attach_btf_id = prog->attach_btf_id;
7291 load_attr.kern_version = kern_version;
7292 load_attr.prog_ifindex = prog->prog_ifindex;
7293
7294 /* specify func_info/line_info only if kernel supports them */
7295 btf_fd = bpf_object__btf_fd(prog->obj);
7296 if (btf_fd >= 0 && kernel_supports(prog->obj, FEAT_BTF_FUNC)) {
7297 load_attr.prog_btf_fd = btf_fd;
7298 load_attr.func_info = prog->func_info;
7299 load_attr.func_info_rec_size = prog->func_info_rec_size;
7300 load_attr.func_info_cnt = prog->func_info_cnt;
7301 load_attr.line_info = prog->line_info;
7302 load_attr.line_info_rec_size = prog->line_info_rec_size;
7303 load_attr.line_info_cnt = prog->line_info_cnt;
7304 }
7305 load_attr.log_level = prog->log_level;
7306 load_attr.prog_flags = prog->prog_flags;
7307
7308 if (prog->obj->gen_loader) {
7309 bpf_gen__prog_load(prog->obj->gen_loader, &load_attr,
7310 prog - prog->obj->programs);
7311 *pfd = -1;
7312 return 0;
7313 }
7314retry_load:
7315 if (log_buf_size) {
7316 log_buf = malloc(log_buf_size);
7317 if (!log_buf)
7318 return -ENOMEM;
7319
7320 *log_buf = 0;
7321 }
7322
7323 load_attr.log_buf = log_buf;
7324 load_attr.log_buf_sz = log_buf_size;
7325 ret = libbpf__bpf_prog_load(&load_attr);
7326
7327 if (ret >= 0) {
7328 if (log_buf && load_attr.log_level)
7329 pr_debug("verifier log:\n%s", log_buf);
7330
7331 if (prog->obj->rodata_map_idx >= 0 &&
7332 kernel_supports(prog->obj, FEAT_PROG_BIND_MAP)) {
7333 struct bpf_map *rodata_map =
7334 &prog->obj->maps[prog->obj->rodata_map_idx];
7335
7336 if (bpf_prog_bind_map(ret, bpf_map__fd(rodata_map), NULL)) {
7337 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
7338 pr_warn("prog '%s': failed to bind .rodata map: %s\n",
7339 prog->name, cp);
7340 /* Don't fail hard if can't bind rodata. */
7341 }
7342 }
7343
7344 *pfd = ret;
7345 ret = 0;
7346 goto out;
7347 }
7348
7349 if (!log_buf || errno == ENOSPC) {
7350 log_buf_size = max((size_t)BPF_LOG_BUF_SIZE,
7351 log_buf_size << 1);
7352
7353 free(log_buf);
7354 goto retry_load;
7355 }
7356 ret = errno ? -errno : -LIBBPF_ERRNO__LOAD;
7357 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
7358 pr_warn("load bpf program failed: %s\n", cp);
7359 pr_perm_msg(ret);
7360
7361 if (log_buf && log_buf[0] != '\0') {
7362 ret = -LIBBPF_ERRNO__VERIFY;
7363 pr_warn("-- BEGIN DUMP LOG ---\n");
7364 pr_warn("\n%s\n", log_buf);
7365 pr_warn("-- END LOG --\n");
7366 } else if (load_attr.insn_cnt >= BPF_MAXINSNS) {
7367 pr_warn("Program too large (%zu insns), at most %d insns\n",
7368 load_attr.insn_cnt, BPF_MAXINSNS);
7369 ret = -LIBBPF_ERRNO__PROG2BIG;
7370 } else if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
7371 /* Wrong program type? */
7372 int fd;
7373
7374 load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
7375 load_attr.expected_attach_type = 0;
7376 load_attr.log_buf = NULL;
7377 load_attr.log_buf_sz = 0;
7378 fd = libbpf__bpf_prog_load(&load_attr);
7379 if (fd >= 0) {
7380 close(fd);
7381 ret = -LIBBPF_ERRNO__PROGTYPE;
7382 goto out;
7383 }
7384 }
7385
7386out:
7387 free(log_buf);
7388 return ret;
7389}
7390
7391static int bpf_program__record_externs(struct bpf_program *prog)
7392{
7393 struct bpf_object *obj = prog->obj;
7394 int i;
7395
7396 for (i = 0; i < prog->nr_reloc; i++) {
7397 struct reloc_desc *relo = &prog->reloc_desc[i];
7398 struct extern_desc *ext = &obj->externs[relo->sym_off];
7399
7400 switch (relo->type) {
7401 case RELO_EXTERN_VAR:
7402 if (ext->type != EXT_KSYM)
7403 continue;
7404 if (!ext->ksym.type_id) {
7405 pr_warn("typeless ksym %s is not supported yet\n",
7406 ext->name);
7407 return -ENOTSUP;
7408 }
7409 bpf_gen__record_extern(obj->gen_loader, ext->name, BTF_KIND_VAR,
7410 relo->insn_idx);
7411 break;
7412 case RELO_EXTERN_FUNC:
7413 bpf_gen__record_extern(obj->gen_loader, ext->name, BTF_KIND_FUNC,
7414 relo->insn_idx);
7415 break;
7416 default:
7417 continue;
7418 }
7419 }
7420 return 0;
7421}
7422
7423static int libbpf_find_attach_btf_id(struct bpf_program *prog, int *btf_obj_fd, int *btf_type_id);
7424
7425int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
7426{
7427 int err = 0, fd, i;
7428
7429 if (prog->obj->loaded) {
7430 pr_warn("prog '%s': can't load after object was loaded\n", prog->name);
7431 return libbpf_err(-EINVAL);
7432 }
7433
7434 if ((prog->type == BPF_PROG_TYPE_TRACING ||
7435 prog->type == BPF_PROG_TYPE_LSM ||
7436 prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) {
7437 int btf_obj_fd = 0, btf_type_id = 0;
7438
7439 err = libbpf_find_attach_btf_id(prog, &btf_obj_fd, &btf_type_id);
7440 if (err)
7441 return libbpf_err(err);
7442
7443 prog->attach_btf_obj_fd = btf_obj_fd;
7444 prog->attach_btf_id = btf_type_id;
7445 }
7446
7447 if (prog->instances.nr < 0 || !prog->instances.fds) {
7448 if (prog->preprocessor) {
7449 pr_warn("Internal error: can't load program '%s'\n",
7450 prog->name);
7451 return libbpf_err(-LIBBPF_ERRNO__INTERNAL);
7452 }
7453
7454 prog->instances.fds = malloc(sizeof(int));
7455 if (!prog->instances.fds) {
7456 pr_warn("Not enough memory for BPF fds\n");
7457 return libbpf_err(-ENOMEM);
7458 }
7459 prog->instances.nr = 1;
7460 prog->instances.fds[0] = -1;
7461 }
7462
7463 if (!prog->preprocessor) {
7464 if (prog->instances.nr != 1) {
7465 pr_warn("prog '%s': inconsistent nr(%d) != 1\n",
7466 prog->name, prog->instances.nr);
7467 }
7468 if (prog->obj->gen_loader)
7469 bpf_program__record_externs(prog);
7470 err = load_program(prog, prog->insns, prog->insns_cnt,
7471 license, kern_ver, &fd);
7472 if (!err)
7473 prog->instances.fds[0] = fd;
7474 goto out;
7475 }
7476
7477 for (i = 0; i < prog->instances.nr; i++) {
7478 struct bpf_prog_prep_result result;
7479 bpf_program_prep_t preprocessor = prog->preprocessor;
7480
7481 memset(&result, 0, sizeof(result));
7482 err = preprocessor(prog, i, prog->insns,
7483 prog->insns_cnt, &result);
7484 if (err) {
7485 pr_warn("Preprocessing the %dth instance of program '%s' failed\n",
7486 i, prog->name);
7487 goto out;
7488 }
7489
7490 if (!result.new_insn_ptr || !result.new_insn_cnt) {
7491 pr_debug("Skip loading the %dth instance of program '%s'\n",
7492 i, prog->name);
7493 prog->instances.fds[i] = -1;
7494 if (result.pfd)
7495 *result.pfd = -1;
7496 continue;
7497 }
7498
7499 err = load_program(prog, result.new_insn_ptr,
7500 result.new_insn_cnt, license, kern_ver, &fd);
7501 if (err) {
7502 pr_warn("Loading the %dth instance of program '%s' failed\n",
7503 i, prog->name);
7504 goto out;
7505 }
7506
7507 if (result.pfd)
7508 *result.pfd = fd;
7509 prog->instances.fds[i] = fd;
7510 }
7511out:
7512 if (err)
7513 pr_warn("failed to load program '%s'\n", prog->name);
7514 zfree(&prog->insns);
7515 prog->insns_cnt = 0;
7516 return libbpf_err(err);
7517}
7518
7519static int
7520bpf_object__load_progs(struct bpf_object *obj, int log_level)
7521{
7522 struct bpf_program *prog;
7523 size_t i;
7524 int err;
7525
7526 for (i = 0; i < obj->nr_programs; i++) {
7527 prog = &obj->programs[i];
7528 err = bpf_object__sanitize_prog(obj, prog);
7529 if (err)
7530 return err;
7531 }
7532
7533 for (i = 0; i < obj->nr_programs; i++) {
7534 prog = &obj->programs[i];
7535 if (prog_is_subprog(obj, prog))
7536 continue;
7537 if (!prog->load) {
7538 pr_debug("prog '%s': skipped loading\n", prog->name);
7539 continue;
7540 }
7541 prog->log_level |= log_level;
7542 err = bpf_program__load(prog, obj->license, obj->kern_version);
7543 if (err)
7544 return err;
7545 }
7546 if (obj->gen_loader)
7547 bpf_object__free_relocs(obj);
7548 return 0;
7549}
7550
7551static const struct bpf_sec_def *find_sec_def(const char *sec_name);
7552
7553static struct bpf_object *
7554__bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
7555 const struct bpf_object_open_opts *opts)
7556{
7557 const char *obj_name, *kconfig;
7558 struct bpf_program *prog;
7559 struct bpf_object *obj;
7560 char tmp_name[64];
7561 int err;
7562
7563 if (elf_version(EV_CURRENT) == EV_NONE) {
7564 pr_warn("failed to init libelf for %s\n",
7565 path ? : "(mem buf)");
7566 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
7567 }
7568
7569 if (!OPTS_VALID(opts, bpf_object_open_opts))
7570 return ERR_PTR(-EINVAL);
7571
7572 obj_name = OPTS_GET(opts, object_name, NULL);
7573 if (obj_buf) {
7574 if (!obj_name) {
7575 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
7576 (unsigned long)obj_buf,
7577 (unsigned long)obj_buf_sz);
7578 obj_name = tmp_name;
7579 }
7580 path = obj_name;
7581 pr_debug("loading object '%s' from buffer\n", obj_name);
7582 }
7583
7584 obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name);
7585 if (IS_ERR(obj))
7586 return obj;
7587
7588 kconfig = OPTS_GET(opts, kconfig, NULL);
7589 if (kconfig) {
7590 obj->kconfig = strdup(kconfig);
7591 if (!obj->kconfig)
7592 return ERR_PTR(-ENOMEM);
7593 }
7594
7595 err = bpf_object__elf_init(obj);
7596 err = err ? : bpf_object__check_endianness(obj);
7597 err = err ? : bpf_object__elf_collect(obj);
7598 err = err ? : bpf_object__collect_externs(obj);
7599 err = err ? : bpf_object__finalize_btf(obj);
7600 err = err ? : bpf_object__init_maps(obj, opts);
7601 err = err ? : bpf_object__collect_relos(obj);
7602 if (err)
7603 goto out;
7604 bpf_object__elf_finish(obj);
7605
7606 bpf_object__for_each_program(prog, obj) {
7607 prog->sec_def = find_sec_def(prog->sec_name);
7608 if (!prog->sec_def) {
7609 /* couldn't guess, but user might manually specify */
7610 pr_debug("prog '%s': unrecognized ELF section name '%s'\n",
7611 prog->name, prog->sec_name);
7612 continue;
7613 }
7614
7615 if (prog->sec_def->is_sleepable)
7616 prog->prog_flags |= BPF_F_SLEEPABLE;
7617 bpf_program__set_type(prog, prog->sec_def->prog_type);
7618 bpf_program__set_expected_attach_type(prog,
7619 prog->sec_def->expected_attach_type);
7620
7621 if (prog->sec_def->prog_type == BPF_PROG_TYPE_TRACING ||
7622 prog->sec_def->prog_type == BPF_PROG_TYPE_EXT)
7623 prog->attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0);
7624 }
7625
7626 return obj;
7627out:
7628 bpf_object__close(obj);
7629 return ERR_PTR(err);
7630}
7631
7632static struct bpf_object *
7633__bpf_object__open_xattr(struct bpf_object_open_attr *attr, int flags)
7634{
7635 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
7636 .relaxed_maps = flags & MAPS_RELAX_COMPAT,
7637 );
7638
7639 /* param validation */
7640 if (!attr->file)
7641 return NULL;
7642
7643 pr_debug("loading %s\n", attr->file);
7644 return __bpf_object__open(attr->file, NULL, 0, &opts);
7645}
7646
7647struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
7648{
7649 return libbpf_ptr(__bpf_object__open_xattr(attr, 0));
7650}
7651
7652struct bpf_object *bpf_object__open(const char *path)
7653{
7654 struct bpf_object_open_attr attr = {
7655 .file = path,
7656 .prog_type = BPF_PROG_TYPE_UNSPEC,
7657 };
7658
7659 return libbpf_ptr(__bpf_object__open_xattr(&attr, 0));
7660}
7661
7662struct bpf_object *
7663bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts)
7664{
7665 if (!path)
7666 return libbpf_err_ptr(-EINVAL);
7667
7668 pr_debug("loading %s\n", path);
7669
7670 return libbpf_ptr(__bpf_object__open(path, NULL, 0, opts));
7671}
7672
7673struct bpf_object *
7674bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
7675 const struct bpf_object_open_opts *opts)
7676{
7677 if (!obj_buf || obj_buf_sz == 0)
7678 return libbpf_err_ptr(-EINVAL);
7679
7680 return libbpf_ptr(__bpf_object__open(NULL, obj_buf, obj_buf_sz, opts));
7681}
7682
7683struct bpf_object *
7684bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
7685 const char *name)
7686{
7687 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
7688 .object_name = name,
7689 /* wrong default, but backwards-compatible */
7690 .relaxed_maps = true,
7691 );
7692
7693 /* returning NULL is wrong, but backwards-compatible */
7694 if (!obj_buf || obj_buf_sz == 0)
7695 return errno = EINVAL, NULL;
7696
7697 return libbpf_ptr(__bpf_object__open(NULL, obj_buf, obj_buf_sz, &opts));
7698}
7699
7700int bpf_object__unload(struct bpf_object *obj)
7701{
7702 size_t i;
7703
7704 if (!obj)
7705 return libbpf_err(-EINVAL);
7706
7707 for (i = 0; i < obj->nr_maps; i++) {
7708 zclose(obj->maps[i].fd);
7709 if (obj->maps[i].st_ops)
7710 zfree(&obj->maps[i].st_ops->kern_vdata);
7711 }
7712
7713 for (i = 0; i < obj->nr_programs; i++)
7714 bpf_program__unload(&obj->programs[i]);
7715
7716 return 0;
7717}
7718
7719static int bpf_object__sanitize_maps(struct bpf_object *obj)
7720{
7721 struct bpf_map *m;
7722
7723 bpf_object__for_each_map(m, obj) {
7724 if (!bpf_map__is_internal(m))
7725 continue;
7726 if (!kernel_supports(obj, FEAT_GLOBAL_DATA)) {
7727 pr_warn("kernel doesn't support global data\n");
7728 return -ENOTSUP;
7729 }
7730 if (!kernel_supports(obj, FEAT_ARRAY_MMAP))
7731 m->def.map_flags ^= BPF_F_MMAPABLE;
7732 }
7733
7734 return 0;
7735}
7736
7737static int bpf_object__read_kallsyms_file(struct bpf_object *obj)
7738{
7739 char sym_type, sym_name[500];
7740 unsigned long long sym_addr;
7741 const struct btf_type *t;
7742 struct extern_desc *ext;
7743 int ret, err = 0;
7744 FILE *f;
7745
7746 f = fopen("/proc/kallsyms", "r");
7747 if (!f) {
7748 err = -errno;
7749 pr_warn("failed to open /proc/kallsyms: %d\n", err);
7750 return err;
7751 }
7752
7753 while (true) {
7754 ret = fscanf(f, "%llx %c %499s%*[^\n]\n",
7755 &sym_addr, &sym_type, sym_name);
7756 if (ret == EOF && feof(f))
7757 break;
7758 if (ret != 3) {
7759 pr_warn("failed to read kallsyms entry: %d\n", ret);
7760 err = -EINVAL;
7761 goto out;
7762 }
7763
7764 ext = find_extern_by_name(obj, sym_name);
7765 if (!ext || ext->type != EXT_KSYM)
7766 continue;
7767
7768 t = btf__type_by_id(obj->btf, ext->btf_id);
7769 if (!btf_is_var(t))
7770 continue;
7771
7772 if (ext->is_set && ext->ksym.addr != sym_addr) {
7773 pr_warn("extern (ksym) '%s' resolution is ambiguous: 0x%llx or 0x%llx\n",
7774 sym_name, ext->ksym.addr, sym_addr);
7775 err = -EINVAL;
7776 goto out;
7777 }
7778 if (!ext->is_set) {
7779 ext->is_set = true;
7780 ext->ksym.addr = sym_addr;
7781 pr_debug("extern (ksym) %s=0x%llx\n", sym_name, sym_addr);
7782 }
7783 }
7784
7785out:
7786 fclose(f);
7787 return err;
7788}
7789
7790static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name,
7791 __u16 kind, struct btf **res_btf,
7792 int *res_btf_fd)
7793{
7794 int i, id, btf_fd, err;
7795 struct btf *btf;
7796
7797 btf = obj->btf_vmlinux;
7798 btf_fd = 0;
7799 id = btf__find_by_name_kind(btf, ksym_name, kind);
7800
7801 if (id == -ENOENT) {
7802 err = load_module_btfs(obj);
7803 if (err)
7804 return err;
7805
7806 for (i = 0; i < obj->btf_module_cnt; i++) {
7807 btf = obj->btf_modules[i].btf;
7808 /* we assume module BTF FD is always >0 */
7809 btf_fd = obj->btf_modules[i].fd;
7810 id = btf__find_by_name_kind(btf, ksym_name, kind);
7811 if (id != -ENOENT)
7812 break;
7813 }
7814 }
7815 if (id <= 0) {
7816 pr_warn("extern (%s ksym) '%s': failed to find BTF ID in kernel BTF(s).\n",
7817 __btf_kind_str(kind), ksym_name);
7818 return -ESRCH;
7819 }
7820
7821 *res_btf = btf;
7822 *res_btf_fd = btf_fd;
7823 return id;
7824}
7825
7826static int bpf_object__resolve_ksym_var_btf_id(struct bpf_object *obj,
7827 struct extern_desc *ext)
7828{
7829 const struct btf_type *targ_var, *targ_type;
7830 __u32 targ_type_id, local_type_id;
7831 const char *targ_var_name;
7832 int id, btf_fd = 0, err;
7833 struct btf *btf = NULL;
7834
7835 id = find_ksym_btf_id(obj, ext->name, BTF_KIND_VAR, &btf, &btf_fd);
7836 if (id < 0)
7837 return id;
7838
7839 /* find local type_id */
7840 local_type_id = ext->ksym.type_id;
7841
7842 /* find target type_id */
7843 targ_var = btf__type_by_id(btf, id);
7844 targ_var_name = btf__name_by_offset(btf, targ_var->name_off);
7845 targ_type = skip_mods_and_typedefs(btf, targ_var->type, &targ_type_id);
7846
7847 err = bpf_core_types_are_compat(obj->btf, local_type_id,
7848 btf, targ_type_id);
7849 if (err <= 0) {
7850 const struct btf_type *local_type;
7851 const char *targ_name, *local_name;
7852
7853 local_type = btf__type_by_id(obj->btf, local_type_id);
7854 local_name = btf__name_by_offset(obj->btf, local_type->name_off);
7855 targ_name = btf__name_by_offset(btf, targ_type->name_off);
7856
7857 pr_warn("extern (var ksym) '%s': incompatible types, expected [%d] %s %s, but kernel has [%d] %s %s\n",
7858 ext->name, local_type_id,
7859 btf_kind_str(local_type), local_name, targ_type_id,
7860 btf_kind_str(targ_type), targ_name);
7861 return -EINVAL;
7862 }
7863
7864 ext->is_set = true;
7865 ext->ksym.kernel_btf_obj_fd = btf_fd;
7866 ext->ksym.kernel_btf_id = id;
7867 pr_debug("extern (var ksym) '%s': resolved to [%d] %s %s\n",
7868 ext->name, id, btf_kind_str(targ_var), targ_var_name);
7869
7870 return 0;
7871}
7872
7873static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj,
7874 struct extern_desc *ext)
7875{
7876 int local_func_proto_id, kfunc_proto_id, kfunc_id;
7877 const struct btf_type *kern_func;
7878 struct btf *kern_btf = NULL;
7879 int ret, kern_btf_fd = 0;
7880
7881 local_func_proto_id = ext->ksym.type_id;
7882
7883 kfunc_id = find_ksym_btf_id(obj, ext->name, BTF_KIND_FUNC,
7884 &kern_btf, &kern_btf_fd);
7885 if (kfunc_id < 0) {
7886 pr_warn("extern (func ksym) '%s': not found in kernel BTF\n",
7887 ext->name);
7888 return kfunc_id;
7889 }
7890
7891 if (kern_btf != obj->btf_vmlinux) {
7892 pr_warn("extern (func ksym) '%s': function in kernel module is not supported\n",
7893 ext->name);
7894 return -ENOTSUP;
7895 }
7896
7897 kern_func = btf__type_by_id(kern_btf, kfunc_id);
7898 kfunc_proto_id = kern_func->type;
7899
7900 ret = bpf_core_types_are_compat(obj->btf, local_func_proto_id,
7901 kern_btf, kfunc_proto_id);
7902 if (ret <= 0) {
7903 pr_warn("extern (func ksym) '%s': func_proto [%d] incompatible with kernel [%d]\n",
7904 ext->name, local_func_proto_id, kfunc_proto_id);
7905 return -EINVAL;
7906 }
7907
7908 ext->is_set = true;
7909 ext->ksym.kernel_btf_obj_fd = kern_btf_fd;
7910 ext->ksym.kernel_btf_id = kfunc_id;
7911 pr_debug("extern (func ksym) '%s': resolved to kernel [%d]\n",
7912 ext->name, kfunc_id);
7913
7914 return 0;
7915}
7916
7917static int bpf_object__resolve_ksyms_btf_id(struct bpf_object *obj)
7918{
7919 const struct btf_type *t;
7920 struct extern_desc *ext;
7921 int i, err;
7922
7923 for (i = 0; i < obj->nr_extern; i++) {
7924 ext = &obj->externs[i];
7925 if (ext->type != EXT_KSYM || !ext->ksym.type_id)
7926 continue;
7927
7928 if (obj->gen_loader) {
7929 ext->is_set = true;
7930 ext->ksym.kernel_btf_obj_fd = 0;
7931 ext->ksym.kernel_btf_id = 0;
7932 continue;
7933 }
7934 t = btf__type_by_id(obj->btf, ext->btf_id);
7935 if (btf_is_var(t))
7936 err = bpf_object__resolve_ksym_var_btf_id(obj, ext);
7937 else
7938 err = bpf_object__resolve_ksym_func_btf_id(obj, ext);
7939 if (err)
7940 return err;
7941 }
7942 return 0;
7943}
7944
7945static int bpf_object__resolve_externs(struct bpf_object *obj,
7946 const char *extra_kconfig)
7947{
7948 bool need_config = false, need_kallsyms = false;
7949 bool need_vmlinux_btf = false;
7950 struct extern_desc *ext;
7951 void *kcfg_data = NULL;
7952 int err, i;
7953
7954 if (obj->nr_extern == 0)
7955 return 0;
7956
7957 if (obj->kconfig_map_idx >= 0)
7958 kcfg_data = obj->maps[obj->kconfig_map_idx].mmaped;
7959
7960 for (i = 0; i < obj->nr_extern; i++) {
7961 ext = &obj->externs[i];
7962
7963 if (ext->type == EXT_KCFG &&
7964 strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) {
7965 void *ext_val = kcfg_data + ext->kcfg.data_off;
7966 __u32 kver = get_kernel_version();
7967
7968 if (!kver) {
7969 pr_warn("failed to get kernel version\n");
7970 return -EINVAL;
7971 }
7972 err = set_kcfg_value_num(ext, ext_val, kver);
7973 if (err)
7974 return err;
7975 pr_debug("extern (kcfg) %s=0x%x\n", ext->name, kver);
7976 } else if (ext->type == EXT_KCFG &&
7977 strncmp(ext->name, "CONFIG_", 7) == 0) {
7978 need_config = true;
7979 } else if (ext->type == EXT_KSYM) {
7980 if (ext->ksym.type_id)
7981 need_vmlinux_btf = true;
7982 else
7983 need_kallsyms = true;
7984 } else {
7985 pr_warn("unrecognized extern '%s'\n", ext->name);
7986 return -EINVAL;
7987 }
7988 }
7989 if (need_config && extra_kconfig) {
7990 err = bpf_object__read_kconfig_mem(obj, extra_kconfig, kcfg_data);
7991 if (err)
7992 return -EINVAL;
7993 need_config = false;
7994 for (i = 0; i < obj->nr_extern; i++) {
7995 ext = &obj->externs[i];
7996 if (ext->type == EXT_KCFG && !ext->is_set) {
7997 need_config = true;
7998 break;
7999 }
8000 }
8001 }
8002 if (need_config) {
8003 err = bpf_object__read_kconfig_file(obj, kcfg_data);
8004 if (err)
8005 return -EINVAL;
8006 }
8007 if (need_kallsyms) {
8008 err = bpf_object__read_kallsyms_file(obj);
8009 if (err)
8010 return -EINVAL;
8011 }
8012 if (need_vmlinux_btf) {
8013 err = bpf_object__resolve_ksyms_btf_id(obj);
8014 if (err)
8015 return -EINVAL;
8016 }
8017 for (i = 0; i < obj->nr_extern; i++) {
8018 ext = &obj->externs[i];
8019
8020 if (!ext->is_set && !ext->is_weak) {
8021 pr_warn("extern %s (strong) not resolved\n", ext->name);
8022 return -ESRCH;
8023 } else if (!ext->is_set) {
8024 pr_debug("extern %s (weak) not resolved, defaulting to zero\n",
8025 ext->name);
8026 }
8027 }
8028
8029 return 0;
8030}
8031
8032int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
8033{
8034 struct bpf_object *obj;
8035 int err, i;
8036
8037 if (!attr)
8038 return libbpf_err(-EINVAL);
8039 obj = attr->obj;
8040 if (!obj)
8041 return libbpf_err(-EINVAL);
8042
8043 if (obj->loaded) {
8044 pr_warn("object '%s': load can't be attempted twice\n", obj->name);
8045 return libbpf_err(-EINVAL);
8046 }
8047
8048 if (obj->gen_loader)
8049 bpf_gen__init(obj->gen_loader, attr->log_level);
8050
8051 err = bpf_object__probe_loading(obj);
8052 err = err ? : bpf_object__load_vmlinux_btf(obj, false);
8053 err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
8054 err = err ? : bpf_object__sanitize_and_load_btf(obj);
8055 err = err ? : bpf_object__sanitize_maps(obj);
8056 err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
8057 err = err ? : bpf_object__create_maps(obj);
8058 err = err ? : bpf_object__relocate(obj, attr->target_btf_path);
8059 err = err ? : bpf_object__load_progs(obj, attr->log_level);
8060
8061 if (obj->gen_loader) {
8062 /* reset FDs */
8063 btf__set_fd(obj->btf, -1);
8064 for (i = 0; i < obj->nr_maps; i++)
8065 obj->maps[i].fd = -1;
8066 if (!err)
8067 err = bpf_gen__finish(obj->gen_loader);
8068 }
8069
8070 /* clean up module BTFs */
8071 for (i = 0; i < obj->btf_module_cnt; i++) {
8072 close(obj->btf_modules[i].fd);
8073 btf__free(obj->btf_modules[i].btf);
8074 free(obj->btf_modules[i].name);
8075 }
8076 free(obj->btf_modules);
8077
8078 /* clean up vmlinux BTF */
8079 btf__free(obj->btf_vmlinux);
8080 obj->btf_vmlinux = NULL;
8081
8082 obj->loaded = true; /* doesn't matter if successfully or not */
8083
8084 if (err)
8085 goto out;
8086
8087 return 0;
8088out:
8089 /* unpin any maps that were auto-pinned during load */
8090 for (i = 0; i < obj->nr_maps; i++)
8091 if (obj->maps[i].pinned && !obj->maps[i].reused)
8092 bpf_map__unpin(&obj->maps[i], NULL);
8093
8094 bpf_object__unload(obj);
8095 pr_warn("failed to load object '%s'\n", obj->path);
8096 return libbpf_err(err);
8097}
8098
8099int bpf_object__load(struct bpf_object *obj)
8100{
8101 struct bpf_object_load_attr attr = {
8102 .obj = obj,
8103 };
8104
8105 return bpf_object__load_xattr(&attr);
8106}
8107
8108static int make_parent_dir(const char *path)
8109{
8110 char *cp, errmsg[STRERR_BUFSIZE];
8111 char *dname, *dir;
8112 int err = 0;
8113
8114 dname = strdup(path);
8115 if (dname == NULL)
8116 return -ENOMEM;
8117
8118 dir = dirname(dname);
8119 if (mkdir(dir, 0700) && errno != EEXIST)
8120 err = -errno;
8121
8122 free(dname);
8123 if (err) {
8124 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
8125 pr_warn("failed to mkdir %s: %s\n", path, cp);
8126 }
8127 return err;
8128}
8129
8130static int check_path(const char *path)
8131{
8132 char *cp, errmsg[STRERR_BUFSIZE];
8133 struct statfs st_fs;
8134 char *dname, *dir;
8135 int err = 0;
8136
8137 if (path == NULL)
8138 return -EINVAL;
8139
8140 dname = strdup(path);
8141 if (dname == NULL)
8142 return -ENOMEM;
8143
8144 dir = dirname(dname);
8145 if (statfs(dir, &st_fs)) {
8146 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
8147 pr_warn("failed to statfs %s: %s\n", dir, cp);
8148 err = -errno;
8149 }
8150 free(dname);
8151
8152 if (!err && st_fs.f_type != BPF_FS_MAGIC) {
8153 pr_warn("specified path %s is not on BPF FS\n", path);
8154 err = -EINVAL;
8155 }
8156
8157 return err;
8158}
8159
8160int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
8161 int instance)
8162{
8163 char *cp, errmsg[STRERR_BUFSIZE];
8164 int err;
8165
8166 err = make_parent_dir(path);
8167 if (err)
8168 return libbpf_err(err);
8169
8170 err = check_path(path);
8171 if (err)
8172 return libbpf_err(err);
8173
8174 if (prog == NULL) {
8175 pr_warn("invalid program pointer\n");
8176 return libbpf_err(-EINVAL);
8177 }
8178
8179 if (instance < 0 || instance >= prog->instances.nr) {
8180 pr_warn("invalid prog instance %d of prog %s (max %d)\n",
8181 instance, prog->name, prog->instances.nr);
8182 return libbpf_err(-EINVAL);
8183 }
8184
8185 if (bpf_obj_pin(prog->instances.fds[instance], path)) {
8186 err = -errno;
8187 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
8188 pr_warn("failed to pin program: %s\n", cp);
8189 return libbpf_err(err);
8190 }
8191 pr_debug("pinned program '%s'\n", path);
8192
8193 return 0;
8194}
8195
8196int bpf_program__unpin_instance(struct bpf_program *prog, const char *path,
8197 int instance)
8198{
8199 int err;
8200
8201 err = check_path(path);
8202 if (err)
8203 return libbpf_err(err);
8204
8205 if (prog == NULL) {
8206 pr_warn("invalid program pointer\n");
8207 return libbpf_err(-EINVAL);
8208 }
8209
8210 if (instance < 0 || instance >= prog->instances.nr) {
8211 pr_warn("invalid prog instance %d of prog %s (max %d)\n",
8212 instance, prog->name, prog->instances.nr);
8213 return libbpf_err(-EINVAL);
8214 }
8215
8216 err = unlink(path);
8217 if (err != 0)
8218 return libbpf_err(-errno);
8219
8220 pr_debug("unpinned program '%s'\n", path);
8221
8222 return 0;
8223}
8224
8225int bpf_program__pin(struct bpf_program *prog, const char *path)
8226{
8227 int i, err;
8228
8229 err = make_parent_dir(path);
8230 if (err)
8231 return libbpf_err(err);
8232
8233 err = check_path(path);
8234 if (err)
8235 return libbpf_err(err);
8236
8237 if (prog == NULL) {
8238 pr_warn("invalid program pointer\n");
8239 return libbpf_err(-EINVAL);
8240 }
8241
8242 if (prog->instances.nr <= 0) {
8243 pr_warn("no instances of prog %s to pin\n", prog->name);
8244 return libbpf_err(-EINVAL);
8245 }
8246
8247 if (prog->instances.nr == 1) {
8248 /* don't create subdirs when pinning single instance */
8249 return bpf_program__pin_instance(prog, path, 0);
8250 }
8251
8252 for (i = 0; i < prog->instances.nr; i++) {
8253 char buf[PATH_MAX];
8254 int len;
8255
8256 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
8257 if (len < 0) {
8258 err = -EINVAL;
8259 goto err_unpin;
8260 } else if (len >= PATH_MAX) {
8261 err = -ENAMETOOLONG;
8262 goto err_unpin;
8263 }
8264
8265 err = bpf_program__pin_instance(prog, buf, i);
8266 if (err)
8267 goto err_unpin;
8268 }
8269
8270 return 0;
8271
8272err_unpin:
8273 for (i = i - 1; i >= 0; i--) {
8274 char buf[PATH_MAX];
8275 int len;
8276
8277 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
8278 if (len < 0)
8279 continue;
8280 else if (len >= PATH_MAX)
8281 continue;
8282
8283 bpf_program__unpin_instance(prog, buf, i);
8284 }
8285
8286 rmdir(path);
8287
8288 return libbpf_err(err);
8289}
8290
8291int bpf_program__unpin(struct bpf_program *prog, const char *path)
8292{
8293 int i, err;
8294
8295 err = check_path(path);
8296 if (err)
8297 return libbpf_err(err);
8298
8299 if (prog == NULL) {
8300 pr_warn("invalid program pointer\n");
8301 return libbpf_err(-EINVAL);
8302 }
8303
8304 if (prog->instances.nr <= 0) {
8305 pr_warn("no instances of prog %s to pin\n", prog->name);
8306 return libbpf_err(-EINVAL);
8307 }
8308
8309 if (prog->instances.nr == 1) {
8310 /* don't create subdirs when pinning single instance */
8311 return bpf_program__unpin_instance(prog, path, 0);
8312 }
8313
8314 for (i = 0; i < prog->instances.nr; i++) {
8315 char buf[PATH_MAX];
8316 int len;
8317
8318 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
8319 if (len < 0)
8320 return libbpf_err(-EINVAL);
8321 else if (len >= PATH_MAX)
8322 return libbpf_err(-ENAMETOOLONG);
8323
8324 err = bpf_program__unpin_instance(prog, buf, i);
8325 if (err)
8326 return err;
8327 }
8328
8329 err = rmdir(path);
8330 if (err)
8331 return libbpf_err(-errno);
8332
8333 return 0;
8334}
8335
8336int bpf_map__pin(struct bpf_map *map, const char *path)
8337{
8338 char *cp, errmsg[STRERR_BUFSIZE];
8339 int err;
8340
8341 if (map == NULL) {
8342 pr_warn("invalid map pointer\n");
8343 return libbpf_err(-EINVAL);
8344 }
8345
8346 if (map->pin_path) {
8347 if (path && strcmp(path, map->pin_path)) {
8348 pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
8349 bpf_map__name(map), map->pin_path, path);
8350 return libbpf_err(-EINVAL);
8351 } else if (map->pinned) {
8352 pr_debug("map '%s' already pinned at '%s'; not re-pinning\n",
8353 bpf_map__name(map), map->pin_path);
8354 return 0;
8355 }
8356 } else {
8357 if (!path) {
8358 pr_warn("missing a path to pin map '%s' at\n",
8359 bpf_map__name(map));
8360 return libbpf_err(-EINVAL);
8361 } else if (map->pinned) {
8362 pr_warn("map '%s' already pinned\n", bpf_map__name(map));
8363 return libbpf_err(-EEXIST);
8364 }
8365
8366 map->pin_path = strdup(path);
8367 if (!map->pin_path) {
8368 err = -errno;
8369 goto out_err;
8370 }
8371 }
8372
8373 err = make_parent_dir(map->pin_path);
8374 if (err)
8375 return libbpf_err(err);
8376
8377 err = check_path(map->pin_path);
8378 if (err)
8379 return libbpf_err(err);
8380
8381 if (bpf_obj_pin(map->fd, map->pin_path)) {
8382 err = -errno;
8383 goto out_err;
8384 }
8385
8386 map->pinned = true;
8387 pr_debug("pinned map '%s'\n", map->pin_path);
8388
8389 return 0;
8390
8391out_err:
8392 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
8393 pr_warn("failed to pin map: %s\n", cp);
8394 return libbpf_err(err);
8395}
8396
8397int bpf_map__unpin(struct bpf_map *map, const char *path)
8398{
8399 int err;
8400
8401 if (map == NULL) {
8402 pr_warn("invalid map pointer\n");
8403 return libbpf_err(-EINVAL);
8404 }
8405
8406 if (map->pin_path) {
8407 if (path && strcmp(path, map->pin_path)) {
8408 pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
8409 bpf_map__name(map), map->pin_path, path);
8410 return libbpf_err(-EINVAL);
8411 }
8412 path = map->pin_path;
8413 } else if (!path) {
8414 pr_warn("no path to unpin map '%s' from\n",
8415 bpf_map__name(map));
8416 return libbpf_err(-EINVAL);
8417 }
8418
8419 err = check_path(path);
8420 if (err)
8421 return libbpf_err(err);
8422
8423 err = unlink(path);
8424 if (err != 0)
8425 return libbpf_err(-errno);
8426
8427 map->pinned = false;
8428 pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path);
8429
8430 return 0;
8431}
8432
8433int bpf_map__set_pin_path(struct bpf_map *map, const char *path)
8434{
8435 char *new = NULL;
8436
8437 if (path) {
8438 new = strdup(path);
8439 if (!new)
8440 return libbpf_err(-errno);
8441 }
8442
8443 free(map->pin_path);
8444 map->pin_path = new;
8445 return 0;
8446}
8447
8448const char *bpf_map__get_pin_path(const struct bpf_map *map)
8449{
8450 return map->pin_path;
8451}
8452
8453bool bpf_map__is_pinned(const struct bpf_map *map)
8454{
8455 return map->pinned;
8456}
8457
8458static void sanitize_pin_path(char *s)
8459{
8460 /* bpffs disallows periods in path names */
8461 while (*s) {
8462 if (*s == '.')
8463 *s = '_';
8464 s++;
8465 }
8466}
8467
8468int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
8469{
8470 struct bpf_map *map;
8471 int err;
8472
8473 if (!obj)
8474 return libbpf_err(-ENOENT);
8475
8476 if (!obj->loaded) {
8477 pr_warn("object not yet loaded; load it first\n");
8478 return libbpf_err(-ENOENT);
8479 }
8480
8481 bpf_object__for_each_map(map, obj) {
8482 char *pin_path = NULL;
8483 char buf[PATH_MAX];
8484
8485 if (path) {
8486 int len;
8487
8488 len = snprintf(buf, PATH_MAX, "%s/%s", path,
8489 bpf_map__name(map));
8490 if (len < 0) {
8491 err = -EINVAL;
8492 goto err_unpin_maps;
8493 } else if (len >= PATH_MAX) {
8494 err = -ENAMETOOLONG;
8495 goto err_unpin_maps;
8496 }
8497 sanitize_pin_path(buf);
8498 pin_path = buf;
8499 } else if (!map->pin_path) {
8500 continue;
8501 }
8502
8503 err = bpf_map__pin(map, pin_path);
8504 if (err)
8505 goto err_unpin_maps;
8506 }
8507
8508 return 0;
8509
8510err_unpin_maps:
8511 while ((map = bpf_map__prev(map, obj))) {
8512 if (!map->pin_path)
8513 continue;
8514
8515 bpf_map__unpin(map, NULL);
8516 }
8517
8518 return libbpf_err(err);
8519}
8520
8521int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
8522{
8523 struct bpf_map *map;
8524 int err;
8525
8526 if (!obj)
8527 return libbpf_err(-ENOENT);
8528
8529 bpf_object__for_each_map(map, obj) {
8530 char *pin_path = NULL;
8531 char buf[PATH_MAX];
8532
8533 if (path) {
8534 int len;
8535
8536 len = snprintf(buf, PATH_MAX, "%s/%s", path,
8537 bpf_map__name(map));
8538 if (len < 0)
8539 return libbpf_err(-EINVAL);
8540 else if (len >= PATH_MAX)
8541 return libbpf_err(-ENAMETOOLONG);
8542 sanitize_pin_path(buf);
8543 pin_path = buf;
8544 } else if (!map->pin_path) {
8545 continue;
8546 }
8547
8548 err = bpf_map__unpin(map, pin_path);
8549 if (err)
8550 return libbpf_err(err);
8551 }
8552
8553 return 0;
8554}
8555
8556int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
8557{
8558 struct bpf_program *prog;
8559 int err;
8560
8561 if (!obj)
8562 return libbpf_err(-ENOENT);
8563
8564 if (!obj->loaded) {
8565 pr_warn("object not yet loaded; load it first\n");
8566 return libbpf_err(-ENOENT);
8567 }
8568
8569 bpf_object__for_each_program(prog, obj) {
8570 char buf[PATH_MAX];
8571 int len;
8572
8573 len = snprintf(buf, PATH_MAX, "%s/%s", path,
8574 prog->pin_name);
8575 if (len < 0) {
8576 err = -EINVAL;
8577 goto err_unpin_programs;
8578 } else if (len >= PATH_MAX) {
8579 err = -ENAMETOOLONG;
8580 goto err_unpin_programs;
8581 }
8582
8583 err = bpf_program__pin(prog, buf);
8584 if (err)
8585 goto err_unpin_programs;
8586 }
8587
8588 return 0;
8589
8590err_unpin_programs:
8591 while ((prog = bpf_program__prev(prog, obj))) {
8592 char buf[PATH_MAX];
8593 int len;
8594
8595 len = snprintf(buf, PATH_MAX, "%s/%s", path,
8596 prog->pin_name);
8597 if (len < 0)
8598 continue;
8599 else if (len >= PATH_MAX)
8600 continue;
8601
8602 bpf_program__unpin(prog, buf);
8603 }
8604
8605 return libbpf_err(err);
8606}
8607
8608int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
8609{
8610 struct bpf_program *prog;
8611 int err;
8612
8613 if (!obj)
8614 return libbpf_err(-ENOENT);
8615
8616 bpf_object__for_each_program(prog, obj) {
8617 char buf[PATH_MAX];
8618 int len;
8619
8620 len = snprintf(buf, PATH_MAX, "%s/%s", path,
8621 prog->pin_name);
8622 if (len < 0)
8623 return libbpf_err(-EINVAL);
8624 else if (len >= PATH_MAX)
8625 return libbpf_err(-ENAMETOOLONG);
8626
8627 err = bpf_program__unpin(prog, buf);
8628 if (err)
8629 return libbpf_err(err);
8630 }
8631
8632 return 0;
8633}
8634
8635int bpf_object__pin(struct bpf_object *obj, const char *path)
8636{
8637 int err;
8638
8639 err = bpf_object__pin_maps(obj, path);
8640 if (err)
8641 return libbpf_err(err);
8642
8643 err = bpf_object__pin_programs(obj, path);
8644 if (err) {
8645 bpf_object__unpin_maps(obj, path);
8646 return libbpf_err(err);
8647 }
8648
8649 return 0;
8650}
8651
8652static void bpf_map__destroy(struct bpf_map *map)
8653{
8654 if (map->clear_priv)
8655 map->clear_priv(map, map->priv);
8656 map->priv = NULL;
8657 map->clear_priv = NULL;
8658
8659 if (map->inner_map) {
8660 bpf_map__destroy(map->inner_map);
8661 zfree(&map->inner_map);
8662 }
8663
8664 zfree(&map->init_slots);
8665 map->init_slots_sz = 0;
8666
8667 if (map->mmaped) {
8668 munmap(map->mmaped, bpf_map_mmap_sz(map));
8669 map->mmaped = NULL;
8670 }
8671
8672 if (map->st_ops) {
8673 zfree(&map->st_ops->data);
8674 zfree(&map->st_ops->progs);
8675 zfree(&map->st_ops->kern_func_off);
8676 zfree(&map->st_ops);
8677 }
8678
8679 zfree(&map->name);
8680 zfree(&map->pin_path);
8681
8682 if (map->fd >= 0)
8683 zclose(map->fd);
8684}
8685
8686void bpf_object__close(struct bpf_object *obj)
8687{
8688 size_t i;
8689
8690 if (IS_ERR_OR_NULL(obj))
8691 return;
8692
8693 if (obj->clear_priv)
8694 obj->clear_priv(obj, obj->priv);
8695
8696 bpf_gen__free(obj->gen_loader);
8697 bpf_object__elf_finish(obj);
8698 bpf_object__unload(obj);
8699 btf__free(obj->btf);
8700 btf_ext__free(obj->btf_ext);
8701
8702 for (i = 0; i < obj->nr_maps; i++)
8703 bpf_map__destroy(&obj->maps[i]);
8704
8705 zfree(&obj->kconfig);
8706 zfree(&obj->externs);
8707 obj->nr_extern = 0;
8708
8709 zfree(&obj->maps);
8710 obj->nr_maps = 0;
8711
8712 if (obj->programs && obj->nr_programs) {
8713 for (i = 0; i < obj->nr_programs; i++)
8714 bpf_program__exit(&obj->programs[i]);
8715 }
8716 zfree(&obj->programs);
8717
8718 list_del(&obj->list);
8719 free(obj);
8720}
8721
8722struct bpf_object *
8723bpf_object__next(struct bpf_object *prev)
8724{
8725 struct bpf_object *next;
8726
8727 if (!prev)
8728 next = list_first_entry(&bpf_objects_list,
8729 struct bpf_object,
8730 list);
8731 else
8732 next = list_next_entry(prev, list);
8733
8734 /* Empty list is noticed here so don't need checking on entry. */
8735 if (&next->list == &bpf_objects_list)
8736 return NULL;
8737
8738 return next;
8739}
8740
8741const char *bpf_object__name(const struct bpf_object *obj)
8742{
8743 return obj ? obj->name : libbpf_err_ptr(-EINVAL);
8744}
8745
8746unsigned int bpf_object__kversion(const struct bpf_object *obj)
8747{
8748 return obj ? obj->kern_version : 0;
8749}
8750
8751struct btf *bpf_object__btf(const struct bpf_object *obj)
8752{
8753 return obj ? obj->btf : NULL;
8754}
8755
8756int bpf_object__btf_fd(const struct bpf_object *obj)
8757{
8758 return obj->btf ? btf__fd(obj->btf) : -1;
8759}
8760
8761int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version)
8762{
8763 if (obj->loaded)
8764 return libbpf_err(-EINVAL);
8765
8766 obj->kern_version = kern_version;
8767
8768 return 0;
8769}
8770
8771int bpf_object__set_priv(struct bpf_object *obj, void *priv,
8772 bpf_object_clear_priv_t clear_priv)
8773{
8774 if (obj->priv && obj->clear_priv)
8775 obj->clear_priv(obj, obj->priv);
8776
8777 obj->priv = priv;
8778 obj->clear_priv = clear_priv;
8779 return 0;
8780}
8781
8782void *bpf_object__priv(const struct bpf_object *obj)
8783{
8784 return obj ? obj->priv : libbpf_err_ptr(-EINVAL);
8785}
8786
8787int bpf_object__gen_loader(struct bpf_object *obj, struct gen_loader_opts *opts)
8788{
8789 struct bpf_gen *gen;
8790
8791 if (!opts)
8792 return -EFAULT;
8793 if (!OPTS_VALID(opts, gen_loader_opts))
8794 return -EINVAL;
8795 gen = calloc(sizeof(*gen), 1);
8796 if (!gen)
8797 return -ENOMEM;
8798 gen->opts = opts;
8799 obj->gen_loader = gen;
8800 return 0;
8801}
8802
8803static struct bpf_program *
8804__bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
8805 bool forward)
8806{
8807 size_t nr_programs = obj->nr_programs;
8808 ssize_t idx;
8809
8810 if (!nr_programs)
8811 return NULL;
8812
8813 if (!p)
8814 /* Iter from the beginning */
8815 return forward ? &obj->programs[0] :
8816 &obj->programs[nr_programs - 1];
8817
8818 if (p->obj != obj) {
8819 pr_warn("error: program handler doesn't match object\n");
8820 return errno = EINVAL, NULL;
8821 }
8822
8823 idx = (p - obj->programs) + (forward ? 1 : -1);
8824 if (idx >= obj->nr_programs || idx < 0)
8825 return NULL;
8826 return &obj->programs[idx];
8827}
8828
8829struct bpf_program *
8830bpf_program__next(struct bpf_program *prev, const struct bpf_object *obj)
8831{
8832 struct bpf_program *prog = prev;
8833
8834 do {
8835 prog = __bpf_program__iter(prog, obj, true);
8836 } while (prog && prog_is_subprog(obj, prog));
8837
8838 return prog;
8839}
8840
8841struct bpf_program *
8842bpf_program__prev(struct bpf_program *next, const struct bpf_object *obj)
8843{
8844 struct bpf_program *prog = next;
8845
8846 do {
8847 prog = __bpf_program__iter(prog, obj, false);
8848 } while (prog && prog_is_subprog(obj, prog));
8849
8850 return prog;
8851}
8852
8853int bpf_program__set_priv(struct bpf_program *prog, void *priv,
8854 bpf_program_clear_priv_t clear_priv)
8855{
8856 if (prog->priv && prog->clear_priv)
8857 prog->clear_priv(prog, prog->priv);
8858
8859 prog->priv = priv;
8860 prog->clear_priv = clear_priv;
8861 return 0;
8862}
8863
8864void *bpf_program__priv(const struct bpf_program *prog)
8865{
8866 return prog ? prog->priv : libbpf_err_ptr(-EINVAL);
8867}
8868
8869void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
8870{
8871 prog->prog_ifindex = ifindex;
8872}
8873
8874const char *bpf_program__name(const struct bpf_program *prog)
8875{
8876 return prog->name;
8877}
8878
8879const char *bpf_program__section_name(const struct bpf_program *prog)
8880{
8881 return prog->sec_name;
8882}
8883
8884const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy)
8885{
8886 const char *title;
8887
8888 title = prog->sec_name;
8889 if (needs_copy) {
8890 title = strdup(title);
8891 if (!title) {
8892 pr_warn("failed to strdup program title\n");
8893 return libbpf_err_ptr(-ENOMEM);
8894 }
8895 }
8896
8897 return title;
8898}
8899
8900bool bpf_program__autoload(const struct bpf_program *prog)
8901{
8902 return prog->load;
8903}
8904
8905int bpf_program__set_autoload(struct bpf_program *prog, bool autoload)
8906{
8907 if (prog->obj->loaded)
8908 return libbpf_err(-EINVAL);
8909
8910 prog->load = autoload;
8911 return 0;
8912}
8913
8914int bpf_program__fd(const struct bpf_program *prog)
8915{
8916 return bpf_program__nth_fd(prog, 0);
8917}
8918
8919size_t bpf_program__size(const struct bpf_program *prog)
8920{
8921 return prog->insns_cnt * BPF_INSN_SZ;
8922}
8923
8924int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
8925 bpf_program_prep_t prep)
8926{
8927 int *instances_fds;
8928
8929 if (nr_instances <= 0 || !prep)
8930 return libbpf_err(-EINVAL);
8931
8932 if (prog->instances.nr > 0 || prog->instances.fds) {
8933 pr_warn("Can't set pre-processor after loading\n");
8934 return libbpf_err(-EINVAL);
8935 }
8936
8937 instances_fds = malloc(sizeof(int) * nr_instances);
8938 if (!instances_fds) {
8939 pr_warn("alloc memory failed for fds\n");
8940 return libbpf_err(-ENOMEM);
8941 }
8942
8943 /* fill all fd with -1 */
8944 memset(instances_fds, -1, sizeof(int) * nr_instances);
8945
8946 prog->instances.nr = nr_instances;
8947 prog->instances.fds = instances_fds;
8948 prog->preprocessor = prep;
8949 return 0;
8950}
8951
8952int bpf_program__nth_fd(const struct bpf_program *prog, int n)
8953{
8954 int fd;
8955
8956 if (!prog)
8957 return libbpf_err(-EINVAL);
8958
8959 if (n >= prog->instances.nr || n < 0) {
8960 pr_warn("Can't get the %dth fd from program %s: only %d instances\n",
8961 n, prog->name, prog->instances.nr);
8962 return libbpf_err(-EINVAL);
8963 }
8964
8965 fd = prog->instances.fds[n];
8966 if (fd < 0) {
8967 pr_warn("%dth instance of program '%s' is invalid\n",
8968 n, prog->name);
8969 return libbpf_err(-ENOENT);
8970 }
8971
8972 return fd;
8973}
8974
8975enum bpf_prog_type bpf_program__get_type(const struct bpf_program *prog)
8976{
8977 return prog->type;
8978}
8979
8980void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
8981{
8982 prog->type = type;
8983}
8984
8985static bool bpf_program__is_type(const struct bpf_program *prog,
8986 enum bpf_prog_type type)
8987{
8988 return prog ? (prog->type == type) : false;
8989}
8990
8991#define BPF_PROG_TYPE_FNS(NAME, TYPE) \
8992int bpf_program__set_##NAME(struct bpf_program *prog) \
8993{ \
8994 if (!prog) \
8995 return libbpf_err(-EINVAL); \
8996 bpf_program__set_type(prog, TYPE); \
8997 return 0; \
8998} \
8999 \
9000bool bpf_program__is_##NAME(const struct bpf_program *prog) \
9001{ \
9002 return bpf_program__is_type(prog, TYPE); \
9003} \
9004
9005BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
9006BPF_PROG_TYPE_FNS(lsm, BPF_PROG_TYPE_LSM);
9007BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
9008BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
9009BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
9010BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
9011BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
9012BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
9013BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
9014BPF_PROG_TYPE_FNS(tracing, BPF_PROG_TYPE_TRACING);
9015BPF_PROG_TYPE_FNS(struct_ops, BPF_PROG_TYPE_STRUCT_OPS);
9016BPF_PROG_TYPE_FNS(extension, BPF_PROG_TYPE_EXT);
9017BPF_PROG_TYPE_FNS(sk_lookup, BPF_PROG_TYPE_SK_LOOKUP);
9018
9019enum bpf_attach_type
9020bpf_program__get_expected_attach_type(const struct bpf_program *prog)
9021{
9022 return prog->expected_attach_type;
9023}
9024
9025void bpf_program__set_expected_attach_type(struct bpf_program *prog,
9026 enum bpf_attach_type type)
9027{
9028 prog->expected_attach_type = type;
9029}
9030
9031#define BPF_PROG_SEC_IMPL(string, ptype, eatype, eatype_optional, \
9032 attachable, attach_btf) \
9033 { \
9034 .sec = string, \
9035 .len = sizeof(string) - 1, \
9036 .prog_type = ptype, \
9037 .expected_attach_type = eatype, \
9038 .is_exp_attach_type_optional = eatype_optional, \
9039 .is_attachable = attachable, \
9040 .is_attach_btf = attach_btf, \
9041 }
9042
9043/* Programs that can NOT be attached. */
9044#define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0, 0)
9045
9046/* Programs that can be attached. */
9047#define BPF_APROG_SEC(string, ptype, atype) \
9048 BPF_PROG_SEC_IMPL(string, ptype, atype, true, 1, 0)
9049
9050/* Programs that must specify expected attach type at load time. */
9051#define BPF_EAPROG_SEC(string, ptype, eatype) \
9052 BPF_PROG_SEC_IMPL(string, ptype, eatype, false, 1, 0)
9053
9054/* Programs that use BTF to identify attach point */
9055#define BPF_PROG_BTF(string, ptype, eatype) \
9056 BPF_PROG_SEC_IMPL(string, ptype, eatype, false, 0, 1)
9057
9058/* Programs that can be attached but attach type can't be identified by section
9059 * name. Kept for backward compatibility.
9060 */
9061#define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype)
9062
9063#define SEC_DEF(sec_pfx, ptype, ...) { \
9064 .sec = sec_pfx, \
9065 .len = sizeof(sec_pfx) - 1, \
9066 .prog_type = BPF_PROG_TYPE_##ptype, \
9067 __VA_ARGS__ \
9068}
9069
9070static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec,
9071 struct bpf_program *prog);
9072static struct bpf_link *attach_tp(const struct bpf_sec_def *sec,
9073 struct bpf_program *prog);
9074static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec,
9075 struct bpf_program *prog);
9076static struct bpf_link *attach_trace(const struct bpf_sec_def *sec,
9077 struct bpf_program *prog);
9078static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec,
9079 struct bpf_program *prog);
9080static struct bpf_link *attach_iter(const struct bpf_sec_def *sec,
9081 struct bpf_program *prog);
9082
9083static const struct bpf_sec_def section_defs[] = {
9084 BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER),
9085 BPF_EAPROG_SEC("sk_reuseport/migrate", BPF_PROG_TYPE_SK_REUSEPORT,
9086 BPF_SK_REUSEPORT_SELECT_OR_MIGRATE),
9087 BPF_EAPROG_SEC("sk_reuseport", BPF_PROG_TYPE_SK_REUSEPORT,
9088 BPF_SK_REUSEPORT_SELECT),
9089 SEC_DEF("kprobe/", KPROBE,
9090 .attach_fn = attach_kprobe),
9091 BPF_PROG_SEC("uprobe/", BPF_PROG_TYPE_KPROBE),
9092 SEC_DEF("kretprobe/", KPROBE,
9093 .attach_fn = attach_kprobe),
9094 BPF_PROG_SEC("uretprobe/", BPF_PROG_TYPE_KPROBE),
9095 BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS),
9096 BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT),
9097 SEC_DEF("tracepoint/", TRACEPOINT,
9098 .attach_fn = attach_tp),
9099 SEC_DEF("tp/", TRACEPOINT,
9100 .attach_fn = attach_tp),
9101 SEC_DEF("raw_tracepoint/", RAW_TRACEPOINT,
9102 .attach_fn = attach_raw_tp),
9103 SEC_DEF("raw_tp/", RAW_TRACEPOINT,
9104 .attach_fn = attach_raw_tp),
9105 SEC_DEF("tp_btf/", TRACING,
9106 .expected_attach_type = BPF_TRACE_RAW_TP,
9107 .is_attach_btf = true,
9108 .attach_fn = attach_trace),
9109 SEC_DEF("fentry/", TRACING,
9110 .expected_attach_type = BPF_TRACE_FENTRY,
9111 .is_attach_btf = true,
9112 .attach_fn = attach_trace),
9113 SEC_DEF("fmod_ret/", TRACING,
9114 .expected_attach_type = BPF_MODIFY_RETURN,
9115 .is_attach_btf = true,
9116 .attach_fn = attach_trace),
9117 SEC_DEF("fexit/", TRACING,
9118 .expected_attach_type = BPF_TRACE_FEXIT,
9119 .is_attach_btf = true,
9120 .attach_fn = attach_trace),
9121 SEC_DEF("fentry.s/", TRACING,
9122 .expected_attach_type = BPF_TRACE_FENTRY,
9123 .is_attach_btf = true,
9124 .is_sleepable = true,
9125 .attach_fn = attach_trace),
9126 SEC_DEF("fmod_ret.s/", TRACING,
9127 .expected_attach_type = BPF_MODIFY_RETURN,
9128 .is_attach_btf = true,
9129 .is_sleepable = true,
9130 .attach_fn = attach_trace),
9131 SEC_DEF("fexit.s/", TRACING,
9132 .expected_attach_type = BPF_TRACE_FEXIT,
9133 .is_attach_btf = true,
9134 .is_sleepable = true,
9135 .attach_fn = attach_trace),
9136 SEC_DEF("freplace/", EXT,
9137 .is_attach_btf = true,
9138 .attach_fn = attach_trace),
9139 SEC_DEF("lsm/", LSM,
9140 .is_attach_btf = true,
9141 .expected_attach_type = BPF_LSM_MAC,
9142 .attach_fn = attach_lsm),
9143 SEC_DEF("lsm.s/", LSM,
9144 .is_attach_btf = true,
9145 .is_sleepable = true,
9146 .expected_attach_type = BPF_LSM_MAC,
9147 .attach_fn = attach_lsm),
9148 SEC_DEF("iter/", TRACING,
9149 .expected_attach_type = BPF_TRACE_ITER,
9150 .is_attach_btf = true,
9151 .attach_fn = attach_iter),
9152 SEC_DEF("syscall", SYSCALL,
9153 .is_sleepable = true),
9154 BPF_EAPROG_SEC("xdp_devmap/", BPF_PROG_TYPE_XDP,
9155 BPF_XDP_DEVMAP),
9156 BPF_EAPROG_SEC("xdp_cpumap/", BPF_PROG_TYPE_XDP,
9157 BPF_XDP_CPUMAP),
9158 BPF_APROG_SEC("xdp", BPF_PROG_TYPE_XDP,
9159 BPF_XDP),
9160 BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
9161 BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN),
9162 BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT),
9163 BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT),
9164 BPF_PROG_SEC("lwt_seg6local", BPF_PROG_TYPE_LWT_SEG6LOCAL),
9165 BPF_APROG_SEC("cgroup_skb/ingress", BPF_PROG_TYPE_CGROUP_SKB,
9166 BPF_CGROUP_INET_INGRESS),
9167 BPF_APROG_SEC("cgroup_skb/egress", BPF_PROG_TYPE_CGROUP_SKB,
9168 BPF_CGROUP_INET_EGRESS),
9169 BPF_APROG_COMPAT("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB),
9170 BPF_EAPROG_SEC("cgroup/sock_create", BPF_PROG_TYPE_CGROUP_SOCK,
9171 BPF_CGROUP_INET_SOCK_CREATE),
9172 BPF_EAPROG_SEC("cgroup/sock_release", BPF_PROG_TYPE_CGROUP_SOCK,
9173 BPF_CGROUP_INET_SOCK_RELEASE),
9174 BPF_APROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK,
9175 BPF_CGROUP_INET_SOCK_CREATE),
9176 BPF_EAPROG_SEC("cgroup/post_bind4", BPF_PROG_TYPE_CGROUP_SOCK,
9177 BPF_CGROUP_INET4_POST_BIND),
9178 BPF_EAPROG_SEC("cgroup/post_bind6", BPF_PROG_TYPE_CGROUP_SOCK,
9179 BPF_CGROUP_INET6_POST_BIND),
9180 BPF_APROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE,
9181 BPF_CGROUP_DEVICE),
9182 BPF_APROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS,
9183 BPF_CGROUP_SOCK_OPS),
9184 BPF_APROG_SEC("sk_skb/stream_parser", BPF_PROG_TYPE_SK_SKB,
9185 BPF_SK_SKB_STREAM_PARSER),
9186 BPF_APROG_SEC("sk_skb/stream_verdict", BPF_PROG_TYPE_SK_SKB,
9187 BPF_SK_SKB_STREAM_VERDICT),
9188 BPF_APROG_COMPAT("sk_skb", BPF_PROG_TYPE_SK_SKB),
9189 BPF_APROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG,
9190 BPF_SK_MSG_VERDICT),
9191 BPF_APROG_SEC("lirc_mode2", BPF_PROG_TYPE_LIRC_MODE2,
9192 BPF_LIRC_MODE2),
9193 BPF_APROG_SEC("flow_dissector", BPF_PROG_TYPE_FLOW_DISSECTOR,
9194 BPF_FLOW_DISSECTOR),
9195 BPF_EAPROG_SEC("cgroup/bind4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
9196 BPF_CGROUP_INET4_BIND),
9197 BPF_EAPROG_SEC("cgroup/bind6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
9198 BPF_CGROUP_INET6_BIND),
9199 BPF_EAPROG_SEC("cgroup/connect4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
9200 BPF_CGROUP_INET4_CONNECT),
9201 BPF_EAPROG_SEC("cgroup/connect6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
9202 BPF_CGROUP_INET6_CONNECT),
9203 BPF_EAPROG_SEC("cgroup/sendmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
9204 BPF_CGROUP_UDP4_SENDMSG),
9205 BPF_EAPROG_SEC("cgroup/sendmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
9206 BPF_CGROUP_UDP6_SENDMSG),
9207 BPF_EAPROG_SEC("cgroup/recvmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
9208 BPF_CGROUP_UDP4_RECVMSG),
9209 BPF_EAPROG_SEC("cgroup/recvmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
9210 BPF_CGROUP_UDP6_RECVMSG),
9211 BPF_EAPROG_SEC("cgroup/getpeername4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
9212 BPF_CGROUP_INET4_GETPEERNAME),
9213 BPF_EAPROG_SEC("cgroup/getpeername6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
9214 BPF_CGROUP_INET6_GETPEERNAME),
9215 BPF_EAPROG_SEC("cgroup/getsockname4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
9216 BPF_CGROUP_INET4_GETSOCKNAME),
9217 BPF_EAPROG_SEC("cgroup/getsockname6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
9218 BPF_CGROUP_INET6_GETSOCKNAME),
9219 BPF_EAPROG_SEC("cgroup/sysctl", BPF_PROG_TYPE_CGROUP_SYSCTL,
9220 BPF_CGROUP_SYSCTL),
9221 BPF_EAPROG_SEC("cgroup/getsockopt", BPF_PROG_TYPE_CGROUP_SOCKOPT,
9222 BPF_CGROUP_GETSOCKOPT),
9223 BPF_EAPROG_SEC("cgroup/setsockopt", BPF_PROG_TYPE_CGROUP_SOCKOPT,
9224 BPF_CGROUP_SETSOCKOPT),
9225 BPF_PROG_SEC("struct_ops", BPF_PROG_TYPE_STRUCT_OPS),
9226 BPF_EAPROG_SEC("sk_lookup/", BPF_PROG_TYPE_SK_LOOKUP,
9227 BPF_SK_LOOKUP),
9228};
9229
9230#undef BPF_PROG_SEC_IMPL
9231#undef BPF_PROG_SEC
9232#undef BPF_APROG_SEC
9233#undef BPF_EAPROG_SEC
9234#undef BPF_APROG_COMPAT
9235#undef SEC_DEF
9236
9237#define MAX_TYPE_NAME_SIZE 32
9238
9239static const struct bpf_sec_def *find_sec_def(const char *sec_name)
9240{
9241 int i, n = ARRAY_SIZE(section_defs);
9242
9243 for (i = 0; i < n; i++) {
9244 if (strncmp(sec_name,
9245 section_defs[i].sec, section_defs[i].len))
9246 continue;
9247 return §ion_defs[i];
9248 }
9249 return NULL;
9250}
9251
9252static char *libbpf_get_type_names(bool attach_type)
9253{
9254 int i, len = ARRAY_SIZE(section_defs) * MAX_TYPE_NAME_SIZE;
9255 char *buf;
9256
9257 buf = malloc(len);
9258 if (!buf)
9259 return NULL;
9260
9261 buf[0] = '\0';
9262 /* Forge string buf with all available names */
9263 for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
9264 if (attach_type && !section_defs[i].is_attachable)
9265 continue;
9266
9267 if (strlen(buf) + strlen(section_defs[i].sec) + 2 > len) {
9268 free(buf);
9269 return NULL;
9270 }
9271 strcat(buf, " ");
9272 strcat(buf, section_defs[i].sec);
9273 }
9274
9275 return buf;
9276}
9277
9278int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
9279 enum bpf_attach_type *expected_attach_type)
9280{
9281 const struct bpf_sec_def *sec_def;
9282 char *type_names;
9283
9284 if (!name)
9285 return libbpf_err(-EINVAL);
9286
9287 sec_def = find_sec_def(name);
9288 if (sec_def) {
9289 *prog_type = sec_def->prog_type;
9290 *expected_attach_type = sec_def->expected_attach_type;
9291 return 0;
9292 }
9293
9294 pr_debug("failed to guess program type from ELF section '%s'\n", name);
9295 type_names = libbpf_get_type_names(false);
9296 if (type_names != NULL) {
9297 pr_debug("supported section(type) names are:%s\n", type_names);
9298 free(type_names);
9299 }
9300
9301 return libbpf_err(-ESRCH);
9302}
9303
9304static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
9305 size_t offset)
9306{
9307 struct bpf_map *map;
9308 size_t i;
9309
9310 for (i = 0; i < obj->nr_maps; i++) {
9311 map = &obj->maps[i];
9312 if (!bpf_map__is_struct_ops(map))
9313 continue;
9314 if (map->sec_offset <= offset &&
9315 offset - map->sec_offset < map->def.value_size)
9316 return map;
9317 }
9318
9319 return NULL;
9320}
9321
9322/* Collect the reloc from ELF and populate the st_ops->progs[] */
9323static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
9324 GElf_Shdr *shdr, Elf_Data *data)
9325{
9326 const struct btf_member *member;
9327 struct bpf_struct_ops *st_ops;
9328 struct bpf_program *prog;
9329 unsigned int shdr_idx;
9330 const struct btf *btf;
9331 struct bpf_map *map;
9332 Elf_Data *symbols;
9333 unsigned int moff, insn_idx;
9334 const char *name;
9335 __u32 member_idx;
9336 GElf_Sym sym;
9337 GElf_Rel rel;
9338 int i, nrels;
9339
9340 symbols = obj->efile.symbols;
9341 btf = obj->btf;
9342 nrels = shdr->sh_size / shdr->sh_entsize;
9343 for (i = 0; i < nrels; i++) {
9344 if (!gelf_getrel(data, i, &rel)) {
9345 pr_warn("struct_ops reloc: failed to get %d reloc\n", i);
9346 return -LIBBPF_ERRNO__FORMAT;
9347 }
9348
9349 if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
9350 pr_warn("struct_ops reloc: symbol %zx not found\n",
9351 (size_t)GELF_R_SYM(rel.r_info));
9352 return -LIBBPF_ERRNO__FORMAT;
9353 }
9354
9355 name = elf_sym_str(obj, sym.st_name) ?: "<?>";
9356 map = find_struct_ops_map_by_offset(obj, rel.r_offset);
9357 if (!map) {
9358 pr_warn("struct_ops reloc: cannot find map at rel.r_offset %zu\n",
9359 (size_t)rel.r_offset);
9360 return -EINVAL;
9361 }
9362
9363 moff = rel.r_offset - map->sec_offset;
9364 shdr_idx = sym.st_shndx;
9365 st_ops = map->st_ops;
9366 pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel.r_offset %zu map->sec_offset %zu name %d (\'%s\')\n",
9367 map->name,
9368 (long long)(rel.r_info >> 32),
9369 (long long)sym.st_value,
9370 shdr_idx, (size_t)rel.r_offset,
9371 map->sec_offset, sym.st_name, name);
9372
9373 if (shdr_idx >= SHN_LORESERVE) {
9374 pr_warn("struct_ops reloc %s: rel.r_offset %zu shdr_idx %u unsupported non-static function\n",
9375 map->name, (size_t)rel.r_offset, shdr_idx);
9376 return -LIBBPF_ERRNO__RELOC;
9377 }
9378 if (sym.st_value % BPF_INSN_SZ) {
9379 pr_warn("struct_ops reloc %s: invalid target program offset %llu\n",
9380 map->name, (unsigned long long)sym.st_value);
9381 return -LIBBPF_ERRNO__FORMAT;
9382 }
9383 insn_idx = sym.st_value / BPF_INSN_SZ;
9384
9385 member = find_member_by_offset(st_ops->type, moff * 8);
9386 if (!member) {
9387 pr_warn("struct_ops reloc %s: cannot find member at moff %u\n",
9388 map->name, moff);
9389 return -EINVAL;
9390 }
9391 member_idx = member - btf_members(st_ops->type);
9392 name = btf__name_by_offset(btf, member->name_off);
9393
9394 if (!resolve_func_ptr(btf, member->type, NULL)) {
9395 pr_warn("struct_ops reloc %s: cannot relocate non func ptr %s\n",
9396 map->name, name);
9397 return -EINVAL;
9398 }
9399
9400 prog = find_prog_by_sec_insn(obj, shdr_idx, insn_idx);
9401 if (!prog) {
9402 pr_warn("struct_ops reloc %s: cannot find prog at shdr_idx %u to relocate func ptr %s\n",
9403 map->name, shdr_idx, name);
9404 return -EINVAL;
9405 }
9406
9407 if (prog->type == BPF_PROG_TYPE_UNSPEC) {
9408 const struct bpf_sec_def *sec_def;
9409
9410 sec_def = find_sec_def(prog->sec_name);
9411 if (sec_def &&
9412 sec_def->prog_type != BPF_PROG_TYPE_STRUCT_OPS) {
9413 /* for pr_warn */
9414 prog->type = sec_def->prog_type;
9415 goto invalid_prog;
9416 }
9417
9418 prog->type = BPF_PROG_TYPE_STRUCT_OPS;
9419 prog->attach_btf_id = st_ops->type_id;
9420 prog->expected_attach_type = member_idx;
9421 } else if (prog->type != BPF_PROG_TYPE_STRUCT_OPS ||
9422 prog->attach_btf_id != st_ops->type_id ||
9423 prog->expected_attach_type != member_idx) {
9424 goto invalid_prog;
9425 }
9426 st_ops->progs[member_idx] = prog;
9427 }
9428
9429 return 0;
9430
9431invalid_prog:
9432 pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n",
9433 map->name, prog->name, prog->sec_name, prog->type,
9434 prog->attach_btf_id, prog->expected_attach_type, name);
9435 return -EINVAL;
9436}
9437
9438#define BTF_TRACE_PREFIX "btf_trace_"
9439#define BTF_LSM_PREFIX "bpf_lsm_"
9440#define BTF_ITER_PREFIX "bpf_iter_"
9441#define BTF_MAX_NAME_SIZE 128
9442
9443void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type,
9444 const char **prefix, int *kind)
9445{
9446 switch (attach_type) {
9447 case BPF_TRACE_RAW_TP:
9448 *prefix = BTF_TRACE_PREFIX;
9449 *kind = BTF_KIND_TYPEDEF;
9450 break;
9451 case BPF_LSM_MAC:
9452 *prefix = BTF_LSM_PREFIX;
9453 *kind = BTF_KIND_FUNC;
9454 break;
9455 case BPF_TRACE_ITER:
9456 *prefix = BTF_ITER_PREFIX;
9457 *kind = BTF_KIND_FUNC;
9458 break;
9459 default:
9460 *prefix = "";
9461 *kind = BTF_KIND_FUNC;
9462 }
9463}
9464
9465static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
9466 const char *name, __u32 kind)
9467{
9468 char btf_type_name[BTF_MAX_NAME_SIZE];
9469 int ret;
9470
9471 ret = snprintf(btf_type_name, sizeof(btf_type_name),
9472 "%s%s", prefix, name);
9473 /* snprintf returns the number of characters written excluding the
9474 * the terminating null. So, if >= BTF_MAX_NAME_SIZE are written, it
9475 * indicates truncation.
9476 */
9477 if (ret < 0 || ret >= sizeof(btf_type_name))
9478 return -ENAMETOOLONG;
9479 return btf__find_by_name_kind(btf, btf_type_name, kind);
9480}
9481
9482static inline int find_attach_btf_id(struct btf *btf, const char *name,
9483 enum bpf_attach_type attach_type)
9484{
9485 const char *prefix;
9486 int kind;
9487
9488 btf_get_kernel_prefix_kind(attach_type, &prefix, &kind);
9489 return find_btf_by_prefix_kind(btf, prefix, name, kind);
9490}
9491
9492int libbpf_find_vmlinux_btf_id(const char *name,
9493 enum bpf_attach_type attach_type)
9494{
9495 struct btf *btf;
9496 int err;
9497
9498 btf = libbpf_find_kernel_btf();
9499 err = libbpf_get_error(btf);
9500 if (err) {
9501 pr_warn("vmlinux BTF is not found\n");
9502 return libbpf_err(err);
9503 }
9504
9505 err = find_attach_btf_id(btf, name, attach_type);
9506 if (err <= 0)
9507 pr_warn("%s is not found in vmlinux BTF\n", name);
9508
9509 btf__free(btf);
9510 return libbpf_err(err);
9511}
9512
9513static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
9514{
9515 struct bpf_prog_info_linear *info_linear;
9516 struct bpf_prog_info *info;
9517 struct btf *btf = NULL;
9518 int err = -EINVAL;
9519
9520 info_linear = bpf_program__get_prog_info_linear(attach_prog_fd, 0);
9521 err = libbpf_get_error(info_linear);
9522 if (err) {
9523 pr_warn("failed get_prog_info_linear for FD %d\n",
9524 attach_prog_fd);
9525 return err;
9526 }
9527 info = &info_linear->info;
9528 if (!info->btf_id) {
9529 pr_warn("The target program doesn't have BTF\n");
9530 goto out;
9531 }
9532 if (btf__get_from_id(info->btf_id, &btf)) {
9533 pr_warn("Failed to get BTF of the program\n");
9534 goto out;
9535 }
9536 err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
9537 btf__free(btf);
9538 if (err <= 0) {
9539 pr_warn("%s is not found in prog's BTF\n", name);
9540 goto out;
9541 }
9542out:
9543 free(info_linear);
9544 return err;
9545}
9546
9547static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name,
9548 enum bpf_attach_type attach_type,
9549 int *btf_obj_fd, int *btf_type_id)
9550{
9551 int ret, i;
9552
9553 ret = find_attach_btf_id(obj->btf_vmlinux, attach_name, attach_type);
9554 if (ret > 0) {
9555 *btf_obj_fd = 0; /* vmlinux BTF */
9556 *btf_type_id = ret;
9557 return 0;
9558 }
9559 if (ret != -ENOENT)
9560 return ret;
9561
9562 ret = load_module_btfs(obj);
9563 if (ret)
9564 return ret;
9565
9566 for (i = 0; i < obj->btf_module_cnt; i++) {
9567 const struct module_btf *mod = &obj->btf_modules[i];
9568
9569 ret = find_attach_btf_id(mod->btf, attach_name, attach_type);
9570 if (ret > 0) {
9571 *btf_obj_fd = mod->fd;
9572 *btf_type_id = ret;
9573 return 0;
9574 }
9575 if (ret == -ENOENT)
9576 continue;
9577
9578 return ret;
9579 }
9580
9581 return -ESRCH;
9582}
9583
9584static int libbpf_find_attach_btf_id(struct bpf_program *prog, int *btf_obj_fd, int *btf_type_id)
9585{
9586 enum bpf_attach_type attach_type = prog->expected_attach_type;
9587 __u32 attach_prog_fd = prog->attach_prog_fd;
9588 const char *name = prog->sec_name, *attach_name;
9589 const struct bpf_sec_def *sec = NULL;
9590 int i, err = 0;
9591
9592 if (!name)
9593 return -EINVAL;
9594
9595 for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
9596 if (!section_defs[i].is_attach_btf)
9597 continue;
9598 if (strncmp(name, section_defs[i].sec, section_defs[i].len))
9599 continue;
9600
9601 sec = §ion_defs[i];
9602 break;
9603 }
9604
9605 if (!sec) {
9606 pr_warn("failed to identify BTF ID based on ELF section name '%s'\n", name);
9607 return -ESRCH;
9608 }
9609 attach_name = name + sec->len;
9610
9611 /* BPF program's BTF ID */
9612 if (attach_prog_fd) {
9613 err = libbpf_find_prog_btf_id(attach_name, attach_prog_fd);
9614 if (err < 0) {
9615 pr_warn("failed to find BPF program (FD %d) BTF ID for '%s': %d\n",
9616 attach_prog_fd, attach_name, err);
9617 return err;
9618 }
9619 *btf_obj_fd = 0;
9620 *btf_type_id = err;
9621 return 0;
9622 }
9623
9624 /* kernel/module BTF ID */
9625 if (prog->obj->gen_loader) {
9626 bpf_gen__record_attach_target(prog->obj->gen_loader, attach_name, attach_type);
9627 *btf_obj_fd = 0;
9628 *btf_type_id = 1;
9629 } else {
9630 err = find_kernel_btf_id(prog->obj, attach_name, attach_type, btf_obj_fd, btf_type_id);
9631 }
9632 if (err) {
9633 pr_warn("failed to find kernel BTF type ID of '%s': %d\n", attach_name, err);
9634 return err;
9635 }
9636 return 0;
9637}
9638
9639int libbpf_attach_type_by_name(const char *name,
9640 enum bpf_attach_type *attach_type)
9641{
9642 char *type_names;
9643 int i;
9644
9645 if (!name)
9646 return libbpf_err(-EINVAL);
9647
9648 for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
9649 if (strncmp(name, section_defs[i].sec, section_defs[i].len))
9650 continue;
9651 if (!section_defs[i].is_attachable)
9652 return libbpf_err(-EINVAL);
9653 *attach_type = section_defs[i].expected_attach_type;
9654 return 0;
9655 }
9656 pr_debug("failed to guess attach type based on ELF section name '%s'\n", name);
9657 type_names = libbpf_get_type_names(true);
9658 if (type_names != NULL) {
9659 pr_debug("attachable section(type) names are:%s\n", type_names);
9660 free(type_names);
9661 }
9662
9663 return libbpf_err(-EINVAL);
9664}
9665
9666int bpf_map__fd(const struct bpf_map *map)
9667{
9668 return map ? map->fd : libbpf_err(-EINVAL);
9669}
9670
9671const struct bpf_map_def *bpf_map__def(const struct bpf_map *map)
9672{
9673 return map ? &map->def : libbpf_err_ptr(-EINVAL);
9674}
9675
9676const char *bpf_map__name(const struct bpf_map *map)
9677{
9678 return map ? map->name : NULL;
9679}
9680
9681enum bpf_map_type bpf_map__type(const struct bpf_map *map)
9682{
9683 return map->def.type;
9684}
9685
9686int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type)
9687{
9688 if (map->fd >= 0)
9689 return libbpf_err(-EBUSY);
9690 map->def.type = type;
9691 return 0;
9692}
9693
9694__u32 bpf_map__map_flags(const struct bpf_map *map)
9695{
9696 return map->def.map_flags;
9697}
9698
9699int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags)
9700{
9701 if (map->fd >= 0)
9702 return libbpf_err(-EBUSY);
9703 map->def.map_flags = flags;
9704 return 0;
9705}
9706
9707__u32 bpf_map__numa_node(const struct bpf_map *map)
9708{
9709 return map->numa_node;
9710}
9711
9712int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node)
9713{
9714 if (map->fd >= 0)
9715 return libbpf_err(-EBUSY);
9716 map->numa_node = numa_node;
9717 return 0;
9718}
9719
9720__u32 bpf_map__key_size(const struct bpf_map *map)
9721{
9722 return map->def.key_size;
9723}
9724
9725int bpf_map__set_key_size(struct bpf_map *map, __u32 size)
9726{
9727 if (map->fd >= 0)
9728 return libbpf_err(-EBUSY);
9729 map->def.key_size = size;
9730 return 0;
9731}
9732
9733__u32 bpf_map__value_size(const struct bpf_map *map)
9734{
9735 return map->def.value_size;
9736}
9737
9738int bpf_map__set_value_size(struct bpf_map *map, __u32 size)
9739{
9740 if (map->fd >= 0)
9741 return libbpf_err(-EBUSY);
9742 map->def.value_size = size;
9743 return 0;
9744}
9745
9746__u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
9747{
9748 return map ? map->btf_key_type_id : 0;
9749}
9750
9751__u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
9752{
9753 return map ? map->btf_value_type_id : 0;
9754}
9755
9756int bpf_map__set_priv(struct bpf_map *map, void *priv,
9757 bpf_map_clear_priv_t clear_priv)
9758{
9759 if (!map)
9760 return libbpf_err(-EINVAL);
9761
9762 if (map->priv) {
9763 if (map->clear_priv)
9764 map->clear_priv(map, map->priv);
9765 }
9766
9767 map->priv = priv;
9768 map->clear_priv = clear_priv;
9769 return 0;
9770}
9771
9772void *bpf_map__priv(const struct bpf_map *map)
9773{
9774 return map ? map->priv : libbpf_err_ptr(-EINVAL);
9775}
9776
9777int bpf_map__set_initial_value(struct bpf_map *map,
9778 const void *data, size_t size)
9779{
9780 if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG ||
9781 size != map->def.value_size || map->fd >= 0)
9782 return libbpf_err(-EINVAL);
9783
9784 memcpy(map->mmaped, data, size);
9785 return 0;
9786}
9787
9788const void *bpf_map__initial_value(struct bpf_map *map, size_t *psize)
9789{
9790 if (!map->mmaped)
9791 return NULL;
9792 *psize = map->def.value_size;
9793 return map->mmaped;
9794}
9795
9796bool bpf_map__is_offload_neutral(const struct bpf_map *map)
9797{
9798 return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
9799}
9800
9801bool bpf_map__is_internal(const struct bpf_map *map)
9802{
9803 return map->libbpf_type != LIBBPF_MAP_UNSPEC;
9804}
9805
9806__u32 bpf_map__ifindex(const struct bpf_map *map)
9807{
9808 return map->map_ifindex;
9809}
9810
9811int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
9812{
9813 if (map->fd >= 0)
9814 return libbpf_err(-EBUSY);
9815 map->map_ifindex = ifindex;
9816 return 0;
9817}
9818
9819int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
9820{
9821 if (!bpf_map_type__is_map_in_map(map->def.type)) {
9822 pr_warn("error: unsupported map type\n");
9823 return libbpf_err(-EINVAL);
9824 }
9825 if (map->inner_map_fd != -1) {
9826 pr_warn("error: inner_map_fd already specified\n");
9827 return libbpf_err(-EINVAL);
9828 }
9829 zfree(&map->inner_map);
9830 map->inner_map_fd = fd;
9831 return 0;
9832}
9833
9834static struct bpf_map *
9835__bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
9836{
9837 ssize_t idx;
9838 struct bpf_map *s, *e;
9839
9840 if (!obj || !obj->maps)
9841 return errno = EINVAL, NULL;
9842
9843 s = obj->maps;
9844 e = obj->maps + obj->nr_maps;
9845
9846 if ((m < s) || (m >= e)) {
9847 pr_warn("error in %s: map handler doesn't belong to object\n",
9848 __func__);
9849 return errno = EINVAL, NULL;
9850 }
9851
9852 idx = (m - obj->maps) + i;
9853 if (idx >= obj->nr_maps || idx < 0)
9854 return NULL;
9855 return &obj->maps[idx];
9856}
9857
9858struct bpf_map *
9859bpf_map__next(const struct bpf_map *prev, const struct bpf_object *obj)
9860{
9861 if (prev == NULL)
9862 return obj->maps;
9863
9864 return __bpf_map__iter(prev, obj, 1);
9865}
9866
9867struct bpf_map *
9868bpf_map__prev(const struct bpf_map *next, const struct bpf_object *obj)
9869{
9870 if (next == NULL) {
9871 if (!obj->nr_maps)
9872 return NULL;
9873 return obj->maps + obj->nr_maps - 1;
9874 }
9875
9876 return __bpf_map__iter(next, obj, -1);
9877}
9878
9879struct bpf_map *
9880bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name)
9881{
9882 struct bpf_map *pos;
9883
9884 bpf_object__for_each_map(pos, obj) {
9885 if (pos->name && !strcmp(pos->name, name))
9886 return pos;
9887 }
9888 return errno = ENOENT, NULL;
9889}
9890
9891int
9892bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name)
9893{
9894 return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
9895}
9896
9897struct bpf_map *
9898bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
9899{
9900 return libbpf_err_ptr(-ENOTSUP);
9901}
9902
9903long libbpf_get_error(const void *ptr)
9904{
9905 if (!IS_ERR_OR_NULL(ptr))
9906 return 0;
9907
9908 if (IS_ERR(ptr))
9909 errno = -PTR_ERR(ptr);
9910
9911 /* If ptr == NULL, then errno should be already set by the failing
9912 * API, because libbpf never returns NULL on success and it now always
9913 * sets errno on error. So no extra errno handling for ptr == NULL
9914 * case.
9915 */
9916 return -errno;
9917}
9918
9919int bpf_prog_load(const char *file, enum bpf_prog_type type,
9920 struct bpf_object **pobj, int *prog_fd)
9921{
9922 struct bpf_prog_load_attr attr;
9923
9924 memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
9925 attr.file = file;
9926 attr.prog_type = type;
9927 attr.expected_attach_type = 0;
9928
9929 return bpf_prog_load_xattr(&attr, pobj, prog_fd);
9930}
9931
9932int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
9933 struct bpf_object **pobj, int *prog_fd)
9934{
9935 struct bpf_object_open_attr open_attr = {};
9936 struct bpf_program *prog, *first_prog = NULL;
9937 struct bpf_object *obj;
9938 struct bpf_map *map;
9939 int err;
9940
9941 if (!attr)
9942 return libbpf_err(-EINVAL);
9943 if (!attr->file)
9944 return libbpf_err(-EINVAL);
9945
9946 open_attr.file = attr->file;
9947 open_attr.prog_type = attr->prog_type;
9948
9949 obj = bpf_object__open_xattr(&open_attr);
9950 err = libbpf_get_error(obj);
9951 if (err)
9952 return libbpf_err(-ENOENT);
9953
9954 bpf_object__for_each_program(prog, obj) {
9955 enum bpf_attach_type attach_type = attr->expected_attach_type;
9956 /*
9957 * to preserve backwards compatibility, bpf_prog_load treats
9958 * attr->prog_type, if specified, as an override to whatever
9959 * bpf_object__open guessed
9960 */
9961 if (attr->prog_type != BPF_PROG_TYPE_UNSPEC) {
9962 bpf_program__set_type(prog, attr->prog_type);
9963 bpf_program__set_expected_attach_type(prog,
9964 attach_type);
9965 }
9966 if (bpf_program__get_type(prog) == BPF_PROG_TYPE_UNSPEC) {
9967 /*
9968 * we haven't guessed from section name and user
9969 * didn't provide a fallback type, too bad...
9970 */
9971 bpf_object__close(obj);
9972 return libbpf_err(-EINVAL);
9973 }
9974
9975 prog->prog_ifindex = attr->ifindex;
9976 prog->log_level = attr->log_level;
9977 prog->prog_flags |= attr->prog_flags;
9978 if (!first_prog)
9979 first_prog = prog;
9980 }
9981
9982 bpf_object__for_each_map(map, obj) {
9983 if (!bpf_map__is_offload_neutral(map))
9984 map->map_ifindex = attr->ifindex;
9985 }
9986
9987 if (!first_prog) {
9988 pr_warn("object file doesn't contain bpf program\n");
9989 bpf_object__close(obj);
9990 return libbpf_err(-ENOENT);
9991 }
9992
9993 err = bpf_object__load(obj);
9994 if (err) {
9995 bpf_object__close(obj);
9996 return libbpf_err(err);
9997 }
9998
9999 *pobj = obj;
10000 *prog_fd = bpf_program__fd(first_prog);
10001 return 0;
10002}
10003
10004struct bpf_link {
10005 int (*detach)(struct bpf_link *link);
10006 int (*destroy)(struct bpf_link *link);
10007 char *pin_path; /* NULL, if not pinned */
10008 int fd; /* hook FD, -1 if not applicable */
10009 bool disconnected;
10010};
10011
10012/* Replace link's underlying BPF program with the new one */
10013int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog)
10014{
10015 int ret;
10016
10017 ret = bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), NULL);
10018 return libbpf_err_errno(ret);
10019}
10020
10021/* Release "ownership" of underlying BPF resource (typically, BPF program
10022 * attached to some BPF hook, e.g., tracepoint, kprobe, etc). Disconnected
10023 * link, when destructed through bpf_link__destroy() call won't attempt to
10024 * detach/unregisted that BPF resource. This is useful in situations where,
10025 * say, attached BPF program has to outlive userspace program that attached it
10026 * in the system. Depending on type of BPF program, though, there might be
10027 * additional steps (like pinning BPF program in BPF FS) necessary to ensure
10028 * exit of userspace program doesn't trigger automatic detachment and clean up
10029 * inside the kernel.
10030 */
10031void bpf_link__disconnect(struct bpf_link *link)
10032{
10033 link->disconnected = true;
10034}
10035
10036int bpf_link__destroy(struct bpf_link *link)
10037{
10038 int err = 0;
10039
10040 if (IS_ERR_OR_NULL(link))
10041 return 0;
10042
10043 if (!link->disconnected && link->detach)
10044 err = link->detach(link);
10045 if (link->destroy)
10046 link->destroy(link);
10047 if (link->pin_path)
10048 free(link->pin_path);
10049 free(link);
10050
10051 return libbpf_err(err);
10052}
10053
10054int bpf_link__fd(const struct bpf_link *link)
10055{
10056 return link->fd;
10057}
10058
10059const char *bpf_link__pin_path(const struct bpf_link *link)
10060{
10061 return link->pin_path;
10062}
10063
10064static int bpf_link__detach_fd(struct bpf_link *link)
10065{
10066 return libbpf_err_errno(close(link->fd));
10067}
10068
10069struct bpf_link *bpf_link__open(const char *path)
10070{
10071 struct bpf_link *link;
10072 int fd;
10073
10074 fd = bpf_obj_get(path);
10075 if (fd < 0) {
10076 fd = -errno;
10077 pr_warn("failed to open link at %s: %d\n", path, fd);
10078 return libbpf_err_ptr(fd);
10079 }
10080
10081 link = calloc(1, sizeof(*link));
10082 if (!link) {
10083 close(fd);
10084 return libbpf_err_ptr(-ENOMEM);
10085 }
10086 link->detach = &bpf_link__detach_fd;
10087 link->fd = fd;
10088
10089 link->pin_path = strdup(path);
10090 if (!link->pin_path) {
10091 bpf_link__destroy(link);
10092 return libbpf_err_ptr(-ENOMEM);
10093 }
10094
10095 return link;
10096}
10097
10098int bpf_link__detach(struct bpf_link *link)
10099{
10100 return bpf_link_detach(link->fd) ? -errno : 0;
10101}
10102
10103int bpf_link__pin(struct bpf_link *link, const char *path)
10104{
10105 int err;
10106
10107 if (link->pin_path)
10108 return libbpf_err(-EBUSY);
10109 err = make_parent_dir(path);
10110 if (err)
10111 return libbpf_err(err);
10112 err = check_path(path);
10113 if (err)
10114 return libbpf_err(err);
10115
10116 link->pin_path = strdup(path);
10117 if (!link->pin_path)
10118 return libbpf_err(-ENOMEM);
10119
10120 if (bpf_obj_pin(link->fd, link->pin_path)) {
10121 err = -errno;
10122 zfree(&link->pin_path);
10123 return libbpf_err(err);
10124 }
10125
10126 pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path);
10127 return 0;
10128}
10129
10130int bpf_link__unpin(struct bpf_link *link)
10131{
10132 int err;
10133
10134 if (!link->pin_path)
10135 return libbpf_err(-EINVAL);
10136
10137 err = unlink(link->pin_path);
10138 if (err != 0)
10139 return -errno;
10140
10141 pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path);
10142 zfree(&link->pin_path);
10143 return 0;
10144}
10145
10146static int bpf_link__detach_perf_event(struct bpf_link *link)
10147{
10148 int err;
10149
10150 err = ioctl(link->fd, PERF_EVENT_IOC_DISABLE, 0);
10151 if (err)
10152 err = -errno;
10153
10154 close(link->fd);
10155 return libbpf_err(err);
10156}
10157
10158struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog, int pfd)
10159{
10160 char errmsg[STRERR_BUFSIZE];
10161 struct bpf_link *link;
10162 int prog_fd, err;
10163
10164 if (pfd < 0) {
10165 pr_warn("prog '%s': invalid perf event FD %d\n",
10166 prog->name, pfd);
10167 return libbpf_err_ptr(-EINVAL);
10168 }
10169 prog_fd = bpf_program__fd(prog);
10170 if (prog_fd < 0) {
10171 pr_warn("prog '%s': can't attach BPF program w/o FD (did you load it?)\n",
10172 prog->name);
10173 return libbpf_err_ptr(-EINVAL);
10174 }
10175
10176 link = calloc(1, sizeof(*link));
10177 if (!link)
10178 return libbpf_err_ptr(-ENOMEM);
10179 link->detach = &bpf_link__detach_perf_event;
10180 link->fd = pfd;
10181
10182 if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) {
10183 err = -errno;
10184 free(link);
10185 pr_warn("prog '%s': failed to attach to pfd %d: %s\n",
10186 prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10187 if (err == -EPROTO)
10188 pr_warn("prog '%s': try add PERF_SAMPLE_CALLCHAIN to or remove exclude_callchain_[kernel|user] from pfd %d\n",
10189 prog->name, pfd);
10190 return libbpf_err_ptr(err);
10191 }
10192 if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
10193 err = -errno;
10194 free(link);
10195 pr_warn("prog '%s': failed to enable pfd %d: %s\n",
10196 prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10197 return libbpf_err_ptr(err);
10198 }
10199 return link;
10200}
10201
10202/*
10203 * this function is expected to parse integer in the range of [0, 2^31-1] from
10204 * given file using scanf format string fmt. If actual parsed value is
10205 * negative, the result might be indistinguishable from error
10206 */
10207static int parse_uint_from_file(const char *file, const char *fmt)
10208{
10209 char buf[STRERR_BUFSIZE];
10210 int err, ret;
10211 FILE *f;
10212
10213 f = fopen(file, "r");
10214 if (!f) {
10215 err = -errno;
10216 pr_debug("failed to open '%s': %s\n", file,
10217 libbpf_strerror_r(err, buf, sizeof(buf)));
10218 return err;
10219 }
10220 err = fscanf(f, fmt, &ret);
10221 if (err != 1) {
10222 err = err == EOF ? -EIO : -errno;
10223 pr_debug("failed to parse '%s': %s\n", file,
10224 libbpf_strerror_r(err, buf, sizeof(buf)));
10225 fclose(f);
10226 return err;
10227 }
10228 fclose(f);
10229 return ret;
10230}
10231
10232static int determine_kprobe_perf_type(void)
10233{
10234 const char *file = "/sys/bus/event_source/devices/kprobe/type";
10235
10236 return parse_uint_from_file(file, "%d\n");
10237}
10238
10239static int determine_uprobe_perf_type(void)
10240{
10241 const char *file = "/sys/bus/event_source/devices/uprobe/type";
10242
10243 return parse_uint_from_file(file, "%d\n");
10244}
10245
10246static int determine_kprobe_retprobe_bit(void)
10247{
10248 const char *file = "/sys/bus/event_source/devices/kprobe/format/retprobe";
10249
10250 return parse_uint_from_file(file, "config:%d\n");
10251}
10252
10253static int determine_uprobe_retprobe_bit(void)
10254{
10255 const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe";
10256
10257 return parse_uint_from_file(file, "config:%d\n");
10258}
10259
10260static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
10261 uint64_t offset, int pid)
10262{
10263 struct perf_event_attr attr = {};
10264 char errmsg[STRERR_BUFSIZE];
10265 int type, pfd, err;
10266
10267 type = uprobe ? determine_uprobe_perf_type()
10268 : determine_kprobe_perf_type();
10269 if (type < 0) {
10270 pr_warn("failed to determine %s perf type: %s\n",
10271 uprobe ? "uprobe" : "kprobe",
10272 libbpf_strerror_r(type, errmsg, sizeof(errmsg)));
10273 return type;
10274 }
10275 if (retprobe) {
10276 int bit = uprobe ? determine_uprobe_retprobe_bit()
10277 : determine_kprobe_retprobe_bit();
10278
10279 if (bit < 0) {
10280 pr_warn("failed to determine %s retprobe bit: %s\n",
10281 uprobe ? "uprobe" : "kprobe",
10282 libbpf_strerror_r(bit, errmsg, sizeof(errmsg)));
10283 return bit;
10284 }
10285 attr.config |= 1 << bit;
10286 }
10287 attr.size = sizeof(attr);
10288 attr.type = type;
10289 attr.config1 = ptr_to_u64(name); /* kprobe_func or uprobe_path */
10290 attr.config2 = offset; /* kprobe_addr or probe_offset */
10291
10292 /* pid filter is meaningful only for uprobes */
10293 pfd = syscall(__NR_perf_event_open, &attr,
10294 pid < 0 ? -1 : pid /* pid */,
10295 pid == -1 ? 0 : -1 /* cpu */,
10296 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
10297 if (pfd < 0) {
10298 err = -errno;
10299 pr_warn("%s perf_event_open() failed: %s\n",
10300 uprobe ? "uprobe" : "kprobe",
10301 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10302 return err;
10303 }
10304 return pfd;
10305}
10306
10307struct bpf_link *bpf_program__attach_kprobe(struct bpf_program *prog,
10308 bool retprobe,
10309 const char *func_name)
10310{
10311 char errmsg[STRERR_BUFSIZE];
10312 struct bpf_link *link;
10313 int pfd, err;
10314
10315 pfd = perf_event_open_probe(false /* uprobe */, retprobe, func_name,
10316 0 /* offset */, -1 /* pid */);
10317 if (pfd < 0) {
10318 pr_warn("prog '%s': failed to create %s '%s' perf event: %s\n",
10319 prog->name, retprobe ? "kretprobe" : "kprobe", func_name,
10320 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
10321 return libbpf_err_ptr(pfd);
10322 }
10323 link = bpf_program__attach_perf_event(prog, pfd);
10324 err = libbpf_get_error(link);
10325 if (err) {
10326 close(pfd);
10327 pr_warn("prog '%s': failed to attach to %s '%s': %s\n",
10328 prog->name, retprobe ? "kretprobe" : "kprobe", func_name,
10329 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10330 return libbpf_err_ptr(err);
10331 }
10332 return link;
10333}
10334
10335static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec,
10336 struct bpf_program *prog)
10337{
10338 const char *func_name;
10339 bool retprobe;
10340
10341 func_name = prog->sec_name + sec->len;
10342 retprobe = strcmp(sec->sec, "kretprobe/") == 0;
10343
10344 return bpf_program__attach_kprobe(prog, retprobe, func_name);
10345}
10346
10347struct bpf_link *bpf_program__attach_uprobe(struct bpf_program *prog,
10348 bool retprobe, pid_t pid,
10349 const char *binary_path,
10350 size_t func_offset)
10351{
10352 char errmsg[STRERR_BUFSIZE];
10353 struct bpf_link *link;
10354 int pfd, err;
10355
10356 pfd = perf_event_open_probe(true /* uprobe */, retprobe,
10357 binary_path, func_offset, pid);
10358 if (pfd < 0) {
10359 pr_warn("prog '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
10360 prog->name, retprobe ? "uretprobe" : "uprobe",
10361 binary_path, func_offset,
10362 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
10363 return libbpf_err_ptr(pfd);
10364 }
10365 link = bpf_program__attach_perf_event(prog, pfd);
10366 err = libbpf_get_error(link);
10367 if (err) {
10368 close(pfd);
10369 pr_warn("prog '%s': failed to attach to %s '%s:0x%zx': %s\n",
10370 prog->name, retprobe ? "uretprobe" : "uprobe",
10371 binary_path, func_offset,
10372 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10373 return libbpf_err_ptr(err);
10374 }
10375 return link;
10376}
10377
10378static int determine_tracepoint_id(const char *tp_category,
10379 const char *tp_name)
10380{
10381 char file[PATH_MAX];
10382 int ret;
10383
10384 ret = snprintf(file, sizeof(file),
10385 "/sys/kernel/debug/tracing/events/%s/%s/id",
10386 tp_category, tp_name);
10387 if (ret < 0)
10388 return -errno;
10389 if (ret >= sizeof(file)) {
10390 pr_debug("tracepoint %s/%s path is too long\n",
10391 tp_category, tp_name);
10392 return -E2BIG;
10393 }
10394 return parse_uint_from_file(file, "%d\n");
10395}
10396
10397static int perf_event_open_tracepoint(const char *tp_category,
10398 const char *tp_name)
10399{
10400 struct perf_event_attr attr = {};
10401 char errmsg[STRERR_BUFSIZE];
10402 int tp_id, pfd, err;
10403
10404 tp_id = determine_tracepoint_id(tp_category, tp_name);
10405 if (tp_id < 0) {
10406 pr_warn("failed to determine tracepoint '%s/%s' perf event ID: %s\n",
10407 tp_category, tp_name,
10408 libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg)));
10409 return tp_id;
10410 }
10411
10412 attr.type = PERF_TYPE_TRACEPOINT;
10413 attr.size = sizeof(attr);
10414 attr.config = tp_id;
10415
10416 pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */,
10417 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
10418 if (pfd < 0) {
10419 err = -errno;
10420 pr_warn("tracepoint '%s/%s' perf_event_open() failed: %s\n",
10421 tp_category, tp_name,
10422 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10423 return err;
10424 }
10425 return pfd;
10426}
10427
10428struct bpf_link *bpf_program__attach_tracepoint(struct bpf_program *prog,
10429 const char *tp_category,
10430 const char *tp_name)
10431{
10432 char errmsg[STRERR_BUFSIZE];
10433 struct bpf_link *link;
10434 int pfd, err;
10435
10436 pfd = perf_event_open_tracepoint(tp_category, tp_name);
10437 if (pfd < 0) {
10438 pr_warn("prog '%s': failed to create tracepoint '%s/%s' perf event: %s\n",
10439 prog->name, tp_category, tp_name,
10440 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
10441 return libbpf_err_ptr(pfd);
10442 }
10443 link = bpf_program__attach_perf_event(prog, pfd);
10444 err = libbpf_get_error(link);
10445 if (err) {
10446 close(pfd);
10447 pr_warn("prog '%s': failed to attach to tracepoint '%s/%s': %s\n",
10448 prog->name, tp_category, tp_name,
10449 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10450 return libbpf_err_ptr(err);
10451 }
10452 return link;
10453}
10454
10455static struct bpf_link *attach_tp(const struct bpf_sec_def *sec,
10456 struct bpf_program *prog)
10457{
10458 char *sec_name, *tp_cat, *tp_name;
10459 struct bpf_link *link;
10460
10461 sec_name = strdup(prog->sec_name);
10462 if (!sec_name)
10463 return libbpf_err_ptr(-ENOMEM);
10464
10465 /* extract "tp/<category>/<name>" */
10466 tp_cat = sec_name + sec->len;
10467 tp_name = strchr(tp_cat, '/');
10468 if (!tp_name) {
10469 free(sec_name);
10470 return libbpf_err_ptr(-EINVAL);
10471 }
10472 *tp_name = '\0';
10473 tp_name++;
10474
10475 link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name);
10476 free(sec_name);
10477 return link;
10478}
10479
10480struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
10481 const char *tp_name)
10482{
10483 char errmsg[STRERR_BUFSIZE];
10484 struct bpf_link *link;
10485 int prog_fd, pfd;
10486
10487 prog_fd = bpf_program__fd(prog);
10488 if (prog_fd < 0) {
10489 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
10490 return libbpf_err_ptr(-EINVAL);
10491 }
10492
10493 link = calloc(1, sizeof(*link));
10494 if (!link)
10495 return libbpf_err_ptr(-ENOMEM);
10496 link->detach = &bpf_link__detach_fd;
10497
10498 pfd = bpf_raw_tracepoint_open(tp_name, prog_fd);
10499 if (pfd < 0) {
10500 pfd = -errno;
10501 free(link);
10502 pr_warn("prog '%s': failed to attach to raw tracepoint '%s': %s\n",
10503 prog->name, tp_name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
10504 return libbpf_err_ptr(pfd);
10505 }
10506 link->fd = pfd;
10507 return link;
10508}
10509
10510static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec,
10511 struct bpf_program *prog)
10512{
10513 const char *tp_name = prog->sec_name + sec->len;
10514
10515 return bpf_program__attach_raw_tracepoint(prog, tp_name);
10516}
10517
10518/* Common logic for all BPF program types that attach to a btf_id */
10519static struct bpf_link *bpf_program__attach_btf_id(struct bpf_program *prog)
10520{
10521 char errmsg[STRERR_BUFSIZE];
10522 struct bpf_link *link;
10523 int prog_fd, pfd;
10524
10525 prog_fd = bpf_program__fd(prog);
10526 if (prog_fd < 0) {
10527 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
10528 return libbpf_err_ptr(-EINVAL);
10529 }
10530
10531 link = calloc(1, sizeof(*link));
10532 if (!link)
10533 return libbpf_err_ptr(-ENOMEM);
10534 link->detach = &bpf_link__detach_fd;
10535
10536 pfd = bpf_raw_tracepoint_open(NULL, prog_fd);
10537 if (pfd < 0) {
10538 pfd = -errno;
10539 free(link);
10540 pr_warn("prog '%s': failed to attach: %s\n",
10541 prog->name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
10542 return libbpf_err_ptr(pfd);
10543 }
10544 link->fd = pfd;
10545 return (struct bpf_link *)link;
10546}
10547
10548struct bpf_link *bpf_program__attach_trace(struct bpf_program *prog)
10549{
10550 return bpf_program__attach_btf_id(prog);
10551}
10552
10553struct bpf_link *bpf_program__attach_lsm(struct bpf_program *prog)
10554{
10555 return bpf_program__attach_btf_id(prog);
10556}
10557
10558static struct bpf_link *attach_trace(const struct bpf_sec_def *sec,
10559 struct bpf_program *prog)
10560{
10561 return bpf_program__attach_trace(prog);
10562}
10563
10564static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec,
10565 struct bpf_program *prog)
10566{
10567 return bpf_program__attach_lsm(prog);
10568}
10569
10570static struct bpf_link *
10571bpf_program__attach_fd(struct bpf_program *prog, int target_fd, int btf_id,
10572 const char *target_name)
10573{
10574 DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts,
10575 .target_btf_id = btf_id);
10576 enum bpf_attach_type attach_type;
10577 char errmsg[STRERR_BUFSIZE];
10578 struct bpf_link *link;
10579 int prog_fd, link_fd;
10580
10581 prog_fd = bpf_program__fd(prog);
10582 if (prog_fd < 0) {
10583 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
10584 return libbpf_err_ptr(-EINVAL);
10585 }
10586
10587 link = calloc(1, sizeof(*link));
10588 if (!link)
10589 return libbpf_err_ptr(-ENOMEM);
10590 link->detach = &bpf_link__detach_fd;
10591
10592 attach_type = bpf_program__get_expected_attach_type(prog);
10593 link_fd = bpf_link_create(prog_fd, target_fd, attach_type, &opts);
10594 if (link_fd < 0) {
10595 link_fd = -errno;
10596 free(link);
10597 pr_warn("prog '%s': failed to attach to %s: %s\n",
10598 prog->name, target_name,
10599 libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
10600 return libbpf_err_ptr(link_fd);
10601 }
10602 link->fd = link_fd;
10603 return link;
10604}
10605
10606struct bpf_link *
10607bpf_program__attach_cgroup(struct bpf_program *prog, int cgroup_fd)
10608{
10609 return bpf_program__attach_fd(prog, cgroup_fd, 0, "cgroup");
10610}
10611
10612struct bpf_link *
10613bpf_program__attach_netns(struct bpf_program *prog, int netns_fd)
10614{
10615 return bpf_program__attach_fd(prog, netns_fd, 0, "netns");
10616}
10617
10618struct bpf_link *bpf_program__attach_xdp(struct bpf_program *prog, int ifindex)
10619{
10620 /* target_fd/target_ifindex use the same field in LINK_CREATE */
10621 return bpf_program__attach_fd(prog, ifindex, 0, "xdp");
10622}
10623
10624struct bpf_link *bpf_program__attach_freplace(struct bpf_program *prog,
10625 int target_fd,
10626 const char *attach_func_name)
10627{
10628 int btf_id;
10629
10630 if (!!target_fd != !!attach_func_name) {
10631 pr_warn("prog '%s': supply none or both of target_fd and attach_func_name\n",
10632 prog->name);
10633 return libbpf_err_ptr(-EINVAL);
10634 }
10635
10636 if (prog->type != BPF_PROG_TYPE_EXT) {
10637 pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace",
10638 prog->name);
10639 return libbpf_err_ptr(-EINVAL);
10640 }
10641
10642 if (target_fd) {
10643 btf_id = libbpf_find_prog_btf_id(attach_func_name, target_fd);
10644 if (btf_id < 0)
10645 return libbpf_err_ptr(btf_id);
10646
10647 return bpf_program__attach_fd(prog, target_fd, btf_id, "freplace");
10648 } else {
10649 /* no target, so use raw_tracepoint_open for compatibility
10650 * with old kernels
10651 */
10652 return bpf_program__attach_trace(prog);
10653 }
10654}
10655
10656struct bpf_link *
10657bpf_program__attach_iter(struct bpf_program *prog,
10658 const struct bpf_iter_attach_opts *opts)
10659{
10660 DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_create_opts);
10661 char errmsg[STRERR_BUFSIZE];
10662 struct bpf_link *link;
10663 int prog_fd, link_fd;
10664 __u32 target_fd = 0;
10665
10666 if (!OPTS_VALID(opts, bpf_iter_attach_opts))
10667 return libbpf_err_ptr(-EINVAL);
10668
10669 link_create_opts.iter_info = OPTS_GET(opts, link_info, (void *)0);
10670 link_create_opts.iter_info_len = OPTS_GET(opts, link_info_len, 0);
10671
10672 prog_fd = bpf_program__fd(prog);
10673 if (prog_fd < 0) {
10674 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
10675 return libbpf_err_ptr(-EINVAL);
10676 }
10677
10678 link = calloc(1, sizeof(*link));
10679 if (!link)
10680 return libbpf_err_ptr(-ENOMEM);
10681 link->detach = &bpf_link__detach_fd;
10682
10683 link_fd = bpf_link_create(prog_fd, target_fd, BPF_TRACE_ITER,
10684 &link_create_opts);
10685 if (link_fd < 0) {
10686 link_fd = -errno;
10687 free(link);
10688 pr_warn("prog '%s': failed to attach to iterator: %s\n",
10689 prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
10690 return libbpf_err_ptr(link_fd);
10691 }
10692 link->fd = link_fd;
10693 return link;
10694}
10695
10696static struct bpf_link *attach_iter(const struct bpf_sec_def *sec,
10697 struct bpf_program *prog)
10698{
10699 return bpf_program__attach_iter(prog, NULL);
10700}
10701
10702struct bpf_link *bpf_program__attach(struct bpf_program *prog)
10703{
10704 const struct bpf_sec_def *sec_def;
10705
10706 sec_def = find_sec_def(prog->sec_name);
10707 if (!sec_def || !sec_def->attach_fn)
10708 return libbpf_err_ptr(-ESRCH);
10709
10710 return sec_def->attach_fn(sec_def, prog);
10711}
10712
10713static int bpf_link__detach_struct_ops(struct bpf_link *link)
10714{
10715 __u32 zero = 0;
10716
10717 if (bpf_map_delete_elem(link->fd, &zero))
10718 return -errno;
10719
10720 return 0;
10721}
10722
10723struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map)
10724{
10725 struct bpf_struct_ops *st_ops;
10726 struct bpf_link *link;
10727 __u32 i, zero = 0;
10728 int err;
10729
10730 if (!bpf_map__is_struct_ops(map) || map->fd == -1)
10731 return libbpf_err_ptr(-EINVAL);
10732
10733 link = calloc(1, sizeof(*link));
10734 if (!link)
10735 return libbpf_err_ptr(-EINVAL);
10736
10737 st_ops = map->st_ops;
10738 for (i = 0; i < btf_vlen(st_ops->type); i++) {
10739 struct bpf_program *prog = st_ops->progs[i];
10740 void *kern_data;
10741 int prog_fd;
10742
10743 if (!prog)
10744 continue;
10745
10746 prog_fd = bpf_program__fd(prog);
10747 kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i];
10748 *(unsigned long *)kern_data = prog_fd;
10749 }
10750
10751 err = bpf_map_update_elem(map->fd, &zero, st_ops->kern_vdata, 0);
10752 if (err) {
10753 err = -errno;
10754 free(link);
10755 return libbpf_err_ptr(err);
10756 }
10757
10758 link->detach = bpf_link__detach_struct_ops;
10759 link->fd = map->fd;
10760
10761 return link;
10762}
10763
10764enum bpf_perf_event_ret
10765bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
10766 void **copy_mem, size_t *copy_size,
10767 bpf_perf_event_print_t fn, void *private_data)
10768{
10769 struct perf_event_mmap_page *header = mmap_mem;
10770 __u64 data_head = ring_buffer_read_head(header);
10771 __u64 data_tail = header->data_tail;
10772 void *base = ((__u8 *)header) + page_size;
10773 int ret = LIBBPF_PERF_EVENT_CONT;
10774 struct perf_event_header *ehdr;
10775 size_t ehdr_size;
10776
10777 while (data_head != data_tail) {
10778 ehdr = base + (data_tail & (mmap_size - 1));
10779 ehdr_size = ehdr->size;
10780
10781 if (((void *)ehdr) + ehdr_size > base + mmap_size) {
10782 void *copy_start = ehdr;
10783 size_t len_first = base + mmap_size - copy_start;
10784 size_t len_secnd = ehdr_size - len_first;
10785
10786 if (*copy_size < ehdr_size) {
10787 free(*copy_mem);
10788 *copy_mem = malloc(ehdr_size);
10789 if (!*copy_mem) {
10790 *copy_size = 0;
10791 ret = LIBBPF_PERF_EVENT_ERROR;
10792 break;
10793 }
10794 *copy_size = ehdr_size;
10795 }
10796
10797 memcpy(*copy_mem, copy_start, len_first);
10798 memcpy(*copy_mem + len_first, base, len_secnd);
10799 ehdr = *copy_mem;
10800 }
10801
10802 ret = fn(ehdr, private_data);
10803 data_tail += ehdr_size;
10804 if (ret != LIBBPF_PERF_EVENT_CONT)
10805 break;
10806 }
10807
10808 ring_buffer_write_tail(header, data_tail);
10809 return libbpf_err(ret);
10810}
10811
10812struct perf_buffer;
10813
10814struct perf_buffer_params {
10815 struct perf_event_attr *attr;
10816 /* if event_cb is specified, it takes precendence */
10817 perf_buffer_event_fn event_cb;
10818 /* sample_cb and lost_cb are higher-level common-case callbacks */
10819 perf_buffer_sample_fn sample_cb;
10820 perf_buffer_lost_fn lost_cb;
10821 void *ctx;
10822 int cpu_cnt;
10823 int *cpus;
10824 int *map_keys;
10825};
10826
10827struct perf_cpu_buf {
10828 struct perf_buffer *pb;
10829 void *base; /* mmap()'ed memory */
10830 void *buf; /* for reconstructing segmented data */
10831 size_t buf_size;
10832 int fd;
10833 int cpu;
10834 int map_key;
10835};
10836
10837struct perf_buffer {
10838 perf_buffer_event_fn event_cb;
10839 perf_buffer_sample_fn sample_cb;
10840 perf_buffer_lost_fn lost_cb;
10841 void *ctx; /* passed into callbacks */
10842
10843 size_t page_size;
10844 size_t mmap_size;
10845 struct perf_cpu_buf **cpu_bufs;
10846 struct epoll_event *events;
10847 int cpu_cnt; /* number of allocated CPU buffers */
10848 int epoll_fd; /* perf event FD */
10849 int map_fd; /* BPF_MAP_TYPE_PERF_EVENT_ARRAY BPF map FD */
10850};
10851
10852static void perf_buffer__free_cpu_buf(struct perf_buffer *pb,
10853 struct perf_cpu_buf *cpu_buf)
10854{
10855 if (!cpu_buf)
10856 return;
10857 if (cpu_buf->base &&
10858 munmap(cpu_buf->base, pb->mmap_size + pb->page_size))
10859 pr_warn("failed to munmap cpu_buf #%d\n", cpu_buf->cpu);
10860 if (cpu_buf->fd >= 0) {
10861 ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0);
10862 close(cpu_buf->fd);
10863 }
10864 free(cpu_buf->buf);
10865 free(cpu_buf);
10866}
10867
10868void perf_buffer__free(struct perf_buffer *pb)
10869{
10870 int i;
10871
10872 if (IS_ERR_OR_NULL(pb))
10873 return;
10874 if (pb->cpu_bufs) {
10875 for (i = 0; i < pb->cpu_cnt; i++) {
10876 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
10877
10878 if (!cpu_buf)
10879 continue;
10880
10881 bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key);
10882 perf_buffer__free_cpu_buf(pb, cpu_buf);
10883 }
10884 free(pb->cpu_bufs);
10885 }
10886 if (pb->epoll_fd >= 0)
10887 close(pb->epoll_fd);
10888 free(pb->events);
10889 free(pb);
10890}
10891
10892static struct perf_cpu_buf *
10893perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr,
10894 int cpu, int map_key)
10895{
10896 struct perf_cpu_buf *cpu_buf;
10897 char msg[STRERR_BUFSIZE];
10898 int err;
10899
10900 cpu_buf = calloc(1, sizeof(*cpu_buf));
10901 if (!cpu_buf)
10902 return ERR_PTR(-ENOMEM);
10903
10904 cpu_buf->pb = pb;
10905 cpu_buf->cpu = cpu;
10906 cpu_buf->map_key = map_key;
10907
10908 cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 /* pid */, cpu,
10909 -1, PERF_FLAG_FD_CLOEXEC);
10910 if (cpu_buf->fd < 0) {
10911 err = -errno;
10912 pr_warn("failed to open perf buffer event on cpu #%d: %s\n",
10913 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
10914 goto error;
10915 }
10916
10917 cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size,
10918 PROT_READ | PROT_WRITE, MAP_SHARED,
10919 cpu_buf->fd, 0);
10920 if (cpu_buf->base == MAP_FAILED) {
10921 cpu_buf->base = NULL;
10922 err = -errno;
10923 pr_warn("failed to mmap perf buffer on cpu #%d: %s\n",
10924 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
10925 goto error;
10926 }
10927
10928 if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
10929 err = -errno;
10930 pr_warn("failed to enable perf buffer event on cpu #%d: %s\n",
10931 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
10932 goto error;
10933 }
10934
10935 return cpu_buf;
10936
10937error:
10938 perf_buffer__free_cpu_buf(pb, cpu_buf);
10939 return (struct perf_cpu_buf *)ERR_PTR(err);
10940}
10941
10942static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
10943 struct perf_buffer_params *p);
10944
10945struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
10946 const struct perf_buffer_opts *opts)
10947{
10948 struct perf_buffer_params p = {};
10949 struct perf_event_attr attr = { 0, };
10950
10951 attr.config = PERF_COUNT_SW_BPF_OUTPUT;
10952 attr.type = PERF_TYPE_SOFTWARE;
10953 attr.sample_type = PERF_SAMPLE_RAW;
10954 attr.sample_period = 1;
10955 attr.wakeup_events = 1;
10956
10957 p.attr = &attr;
10958 p.sample_cb = opts ? opts->sample_cb : NULL;
10959 p.lost_cb = opts ? opts->lost_cb : NULL;
10960 p.ctx = opts ? opts->ctx : NULL;
10961
10962 return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p));
10963}
10964
10965struct perf_buffer *
10966perf_buffer__new_raw(int map_fd, size_t page_cnt,
10967 const struct perf_buffer_raw_opts *opts)
10968{
10969 struct perf_buffer_params p = {};
10970
10971 p.attr = opts->attr;
10972 p.event_cb = opts->event_cb;
10973 p.ctx = opts->ctx;
10974 p.cpu_cnt = opts->cpu_cnt;
10975 p.cpus = opts->cpus;
10976 p.map_keys = opts->map_keys;
10977
10978 return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p));
10979}
10980
10981static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
10982 struct perf_buffer_params *p)
10983{
10984 const char *online_cpus_file = "/sys/devices/system/cpu/online";
10985 struct bpf_map_info map;
10986 char msg[STRERR_BUFSIZE];
10987 struct perf_buffer *pb;
10988 bool *online = NULL;
10989 __u32 map_info_len;
10990 int err, i, j, n;
10991
10992 if (page_cnt & (page_cnt - 1)) {
10993 pr_warn("page count should be power of two, but is %zu\n",
10994 page_cnt);
10995 return ERR_PTR(-EINVAL);
10996 }
10997
10998 /* best-effort sanity checks */
10999 memset(&map, 0, sizeof(map));
11000 map_info_len = sizeof(map);
11001 err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len);
11002 if (err) {
11003 err = -errno;
11004 /* if BPF_OBJ_GET_INFO_BY_FD is supported, will return
11005 * -EBADFD, -EFAULT, or -E2BIG on real error
11006 */
11007 if (err != -EINVAL) {
11008 pr_warn("failed to get map info for map FD %d: %s\n",
11009 map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
11010 return ERR_PTR(err);
11011 }
11012 pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n",
11013 map_fd);
11014 } else {
11015 if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
11016 pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
11017 map.name);
11018 return ERR_PTR(-EINVAL);
11019 }
11020 }
11021
11022 pb = calloc(1, sizeof(*pb));
11023 if (!pb)
11024 return ERR_PTR(-ENOMEM);
11025
11026 pb->event_cb = p->event_cb;
11027 pb->sample_cb = p->sample_cb;
11028 pb->lost_cb = p->lost_cb;
11029 pb->ctx = p->ctx;
11030
11031 pb->page_size = getpagesize();
11032 pb->mmap_size = pb->page_size * page_cnt;
11033 pb->map_fd = map_fd;
11034
11035 pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
11036 if (pb->epoll_fd < 0) {
11037 err = -errno;
11038 pr_warn("failed to create epoll instance: %s\n",
11039 libbpf_strerror_r(err, msg, sizeof(msg)));
11040 goto error;
11041 }
11042
11043 if (p->cpu_cnt > 0) {
11044 pb->cpu_cnt = p->cpu_cnt;
11045 } else {
11046 pb->cpu_cnt = libbpf_num_possible_cpus();
11047 if (pb->cpu_cnt < 0) {
11048 err = pb->cpu_cnt;
11049 goto error;
11050 }
11051 if (map.max_entries && map.max_entries < pb->cpu_cnt)
11052 pb->cpu_cnt = map.max_entries;
11053 }
11054
11055 pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events));
11056 if (!pb->events) {
11057 err = -ENOMEM;
11058 pr_warn("failed to allocate events: out of memory\n");
11059 goto error;
11060 }
11061 pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs));
11062 if (!pb->cpu_bufs) {
11063 err = -ENOMEM;
11064 pr_warn("failed to allocate buffers: out of memory\n");
11065 goto error;
11066 }
11067
11068 err = parse_cpu_mask_file(online_cpus_file, &online, &n);
11069 if (err) {
11070 pr_warn("failed to get online CPU mask: %d\n", err);
11071 goto error;
11072 }
11073
11074 for (i = 0, j = 0; i < pb->cpu_cnt; i++) {
11075 struct perf_cpu_buf *cpu_buf;
11076 int cpu, map_key;
11077
11078 cpu = p->cpu_cnt > 0 ? p->cpus[i] : i;
11079 map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i;
11080
11081 /* in case user didn't explicitly requested particular CPUs to
11082 * be attached to, skip offline/not present CPUs
11083 */
11084 if (p->cpu_cnt <= 0 && (cpu >= n || !online[cpu]))
11085 continue;
11086
11087 cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key);
11088 if (IS_ERR(cpu_buf)) {
11089 err = PTR_ERR(cpu_buf);
11090 goto error;
11091 }
11092
11093 pb->cpu_bufs[j] = cpu_buf;
11094
11095 err = bpf_map_update_elem(pb->map_fd, &map_key,
11096 &cpu_buf->fd, 0);
11097 if (err) {
11098 err = -errno;
11099 pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n",
11100 cpu, map_key, cpu_buf->fd,
11101 libbpf_strerror_r(err, msg, sizeof(msg)));
11102 goto error;
11103 }
11104
11105 pb->events[j].events = EPOLLIN;
11106 pb->events[j].data.ptr = cpu_buf;
11107 if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd,
11108 &pb->events[j]) < 0) {
11109 err = -errno;
11110 pr_warn("failed to epoll_ctl cpu #%d perf FD %d: %s\n",
11111 cpu, cpu_buf->fd,
11112 libbpf_strerror_r(err, msg, sizeof(msg)));
11113 goto error;
11114 }
11115 j++;
11116 }
11117 pb->cpu_cnt = j;
11118 free(online);
11119
11120 return pb;
11121
11122error:
11123 free(online);
11124 if (pb)
11125 perf_buffer__free(pb);
11126 return ERR_PTR(err);
11127}
11128
11129struct perf_sample_raw {
11130 struct perf_event_header header;
11131 uint32_t size;
11132 char data[];
11133};
11134
11135struct perf_sample_lost {
11136 struct perf_event_header header;
11137 uint64_t id;
11138 uint64_t lost;
11139 uint64_t sample_id;
11140};
11141
11142static enum bpf_perf_event_ret
11143perf_buffer__process_record(struct perf_event_header *e, void *ctx)
11144{
11145 struct perf_cpu_buf *cpu_buf = ctx;
11146 struct perf_buffer *pb = cpu_buf->pb;
11147 void *data = e;
11148
11149 /* user wants full control over parsing perf event */
11150 if (pb->event_cb)
11151 return pb->event_cb(pb->ctx, cpu_buf->cpu, e);
11152
11153 switch (e->type) {
11154 case PERF_RECORD_SAMPLE: {
11155 struct perf_sample_raw *s = data;
11156
11157 if (pb->sample_cb)
11158 pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size);
11159 break;
11160 }
11161 case PERF_RECORD_LOST: {
11162 struct perf_sample_lost *s = data;
11163
11164 if (pb->lost_cb)
11165 pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost);
11166 break;
11167 }
11168 default:
11169 pr_warn("unknown perf sample type %d\n", e->type);
11170 return LIBBPF_PERF_EVENT_ERROR;
11171 }
11172 return LIBBPF_PERF_EVENT_CONT;
11173}
11174
11175static int perf_buffer__process_records(struct perf_buffer *pb,
11176 struct perf_cpu_buf *cpu_buf)
11177{
11178 enum bpf_perf_event_ret ret;
11179
11180 ret = bpf_perf_event_read_simple(cpu_buf->base, pb->mmap_size,
11181 pb->page_size, &cpu_buf->buf,
11182 &cpu_buf->buf_size,
11183 perf_buffer__process_record, cpu_buf);
11184 if (ret != LIBBPF_PERF_EVENT_CONT)
11185 return ret;
11186 return 0;
11187}
11188
11189int perf_buffer__epoll_fd(const struct perf_buffer *pb)
11190{
11191 return pb->epoll_fd;
11192}
11193
11194int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms)
11195{
11196 int i, cnt, err;
11197
11198 cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms);
11199 if (cnt < 0)
11200 return -errno;
11201
11202 for (i = 0; i < cnt; i++) {
11203 struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr;
11204
11205 err = perf_buffer__process_records(pb, cpu_buf);
11206 if (err) {
11207 pr_warn("error while processing records: %d\n", err);
11208 return libbpf_err(err);
11209 }
11210 }
11211 return cnt;
11212}
11213
11214/* Return number of PERF_EVENT_ARRAY map slots set up by this perf_buffer
11215 * manager.
11216 */
11217size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb)
11218{
11219 return pb->cpu_cnt;
11220}
11221
11222/*
11223 * Return perf_event FD of a ring buffer in *buf_idx* slot of
11224 * PERF_EVENT_ARRAY BPF map. This FD can be polled for new data using
11225 * select()/poll()/epoll() Linux syscalls.
11226 */
11227int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx)
11228{
11229 struct perf_cpu_buf *cpu_buf;
11230
11231 if (buf_idx >= pb->cpu_cnt)
11232 return libbpf_err(-EINVAL);
11233
11234 cpu_buf = pb->cpu_bufs[buf_idx];
11235 if (!cpu_buf)
11236 return libbpf_err(-ENOENT);
11237
11238 return cpu_buf->fd;
11239}
11240
11241/*
11242 * Consume data from perf ring buffer corresponding to slot *buf_idx* in
11243 * PERF_EVENT_ARRAY BPF map without waiting/polling. If there is no data to
11244 * consume, do nothing and return success.
11245 * Returns:
11246 * - 0 on success;
11247 * - <0 on failure.
11248 */
11249int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx)
11250{
11251 struct perf_cpu_buf *cpu_buf;
11252
11253 if (buf_idx >= pb->cpu_cnt)
11254 return libbpf_err(-EINVAL);
11255
11256 cpu_buf = pb->cpu_bufs[buf_idx];
11257 if (!cpu_buf)
11258 return libbpf_err(-ENOENT);
11259
11260 return perf_buffer__process_records(pb, cpu_buf);
11261}
11262
11263int perf_buffer__consume(struct perf_buffer *pb)
11264{
11265 int i, err;
11266
11267 for (i = 0; i < pb->cpu_cnt; i++) {
11268 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
11269
11270 if (!cpu_buf)
11271 continue;
11272
11273 err = perf_buffer__process_records(pb, cpu_buf);
11274 if (err) {
11275 pr_warn("perf_buffer: failed to process records in buffer #%d: %d\n", i, err);
11276 return libbpf_err(err);
11277 }
11278 }
11279 return 0;
11280}
11281
11282struct bpf_prog_info_array_desc {
11283 int array_offset; /* e.g. offset of jited_prog_insns */
11284 int count_offset; /* e.g. offset of jited_prog_len */
11285 int size_offset; /* > 0: offset of rec size,
11286 * < 0: fix size of -size_offset
11287 */
11288};
11289
11290static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = {
11291 [BPF_PROG_INFO_JITED_INSNS] = {
11292 offsetof(struct bpf_prog_info, jited_prog_insns),
11293 offsetof(struct bpf_prog_info, jited_prog_len),
11294 -1,
11295 },
11296 [BPF_PROG_INFO_XLATED_INSNS] = {
11297 offsetof(struct bpf_prog_info, xlated_prog_insns),
11298 offsetof(struct bpf_prog_info, xlated_prog_len),
11299 -1,
11300 },
11301 [BPF_PROG_INFO_MAP_IDS] = {
11302 offsetof(struct bpf_prog_info, map_ids),
11303 offsetof(struct bpf_prog_info, nr_map_ids),
11304 -(int)sizeof(__u32),
11305 },
11306 [BPF_PROG_INFO_JITED_KSYMS] = {
11307 offsetof(struct bpf_prog_info, jited_ksyms),
11308 offsetof(struct bpf_prog_info, nr_jited_ksyms),
11309 -(int)sizeof(__u64),
11310 },
11311 [BPF_PROG_INFO_JITED_FUNC_LENS] = {
11312 offsetof(struct bpf_prog_info, jited_func_lens),
11313 offsetof(struct bpf_prog_info, nr_jited_func_lens),
11314 -(int)sizeof(__u32),
11315 },
11316 [BPF_PROG_INFO_FUNC_INFO] = {
11317 offsetof(struct bpf_prog_info, func_info),
11318 offsetof(struct bpf_prog_info, nr_func_info),
11319 offsetof(struct bpf_prog_info, func_info_rec_size),
11320 },
11321 [BPF_PROG_INFO_LINE_INFO] = {
11322 offsetof(struct bpf_prog_info, line_info),
11323 offsetof(struct bpf_prog_info, nr_line_info),
11324 offsetof(struct bpf_prog_info, line_info_rec_size),
11325 },
11326 [BPF_PROG_INFO_JITED_LINE_INFO] = {
11327 offsetof(struct bpf_prog_info, jited_line_info),
11328 offsetof(struct bpf_prog_info, nr_jited_line_info),
11329 offsetof(struct bpf_prog_info, jited_line_info_rec_size),
11330 },
11331 [BPF_PROG_INFO_PROG_TAGS] = {
11332 offsetof(struct bpf_prog_info, prog_tags),
11333 offsetof(struct bpf_prog_info, nr_prog_tags),
11334 -(int)sizeof(__u8) * BPF_TAG_SIZE,
11335 },
11336
11337};
11338
11339static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info,
11340 int offset)
11341{
11342 __u32 *array = (__u32 *)info;
11343
11344 if (offset >= 0)
11345 return array[offset / sizeof(__u32)];
11346 return -(int)offset;
11347}
11348
11349static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info,
11350 int offset)
11351{
11352 __u64 *array = (__u64 *)info;
11353
11354 if (offset >= 0)
11355 return array[offset / sizeof(__u64)];
11356 return -(int)offset;
11357}
11358
11359static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset,
11360 __u32 val)
11361{
11362 __u32 *array = (__u32 *)info;
11363
11364 if (offset >= 0)
11365 array[offset / sizeof(__u32)] = val;
11366}
11367
11368static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset,
11369 __u64 val)
11370{
11371 __u64 *array = (__u64 *)info;
11372
11373 if (offset >= 0)
11374 array[offset / sizeof(__u64)] = val;
11375}
11376
11377struct bpf_prog_info_linear *
11378bpf_program__get_prog_info_linear(int fd, __u64 arrays)
11379{
11380 struct bpf_prog_info_linear *info_linear;
11381 struct bpf_prog_info info = {};
11382 __u32 info_len = sizeof(info);
11383 __u32 data_len = 0;
11384 int i, err;
11385 void *ptr;
11386
11387 if (arrays >> BPF_PROG_INFO_LAST_ARRAY)
11388 return libbpf_err_ptr(-EINVAL);
11389
11390 /* step 1: get array dimensions */
11391 err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
11392 if (err) {
11393 pr_debug("can't get prog info: %s", strerror(errno));
11394 return libbpf_err_ptr(-EFAULT);
11395 }
11396
11397 /* step 2: calculate total size of all arrays */
11398 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
11399 bool include_array = (arrays & (1UL << i)) > 0;
11400 struct bpf_prog_info_array_desc *desc;
11401 __u32 count, size;
11402
11403 desc = bpf_prog_info_array_desc + i;
11404
11405 /* kernel is too old to support this field */
11406 if (info_len < desc->array_offset + sizeof(__u32) ||
11407 info_len < desc->count_offset + sizeof(__u32) ||
11408 (desc->size_offset > 0 && info_len < desc->size_offset))
11409 include_array = false;
11410
11411 if (!include_array) {
11412 arrays &= ~(1UL << i); /* clear the bit */
11413 continue;
11414 }
11415
11416 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
11417 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
11418
11419 data_len += count * size;
11420 }
11421
11422 /* step 3: allocate continuous memory */
11423 data_len = roundup(data_len, sizeof(__u64));
11424 info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len);
11425 if (!info_linear)
11426 return libbpf_err_ptr(-ENOMEM);
11427
11428 /* step 4: fill data to info_linear->info */
11429 info_linear->arrays = arrays;
11430 memset(&info_linear->info, 0, sizeof(info));
11431 ptr = info_linear->data;
11432
11433 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
11434 struct bpf_prog_info_array_desc *desc;
11435 __u32 count, size;
11436
11437 if ((arrays & (1UL << i)) == 0)
11438 continue;
11439
11440 desc = bpf_prog_info_array_desc + i;
11441 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
11442 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
11443 bpf_prog_info_set_offset_u32(&info_linear->info,
11444 desc->count_offset, count);
11445 bpf_prog_info_set_offset_u32(&info_linear->info,
11446 desc->size_offset, size);
11447 bpf_prog_info_set_offset_u64(&info_linear->info,
11448 desc->array_offset,
11449 ptr_to_u64(ptr));
11450 ptr += count * size;
11451 }
11452
11453 /* step 5: call syscall again to get required arrays */
11454 err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len);
11455 if (err) {
11456 pr_debug("can't get prog info: %s", strerror(errno));
11457 free(info_linear);
11458 return libbpf_err_ptr(-EFAULT);
11459 }
11460
11461 /* step 6: verify the data */
11462 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
11463 struct bpf_prog_info_array_desc *desc;
11464 __u32 v1, v2;
11465
11466 if ((arrays & (1UL << i)) == 0)
11467 continue;
11468
11469 desc = bpf_prog_info_array_desc + i;
11470 v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
11471 v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
11472 desc->count_offset);
11473 if (v1 != v2)
11474 pr_warn("%s: mismatch in element count\n", __func__);
11475
11476 v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
11477 v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
11478 desc->size_offset);
11479 if (v1 != v2)
11480 pr_warn("%s: mismatch in rec size\n", __func__);
11481 }
11482
11483 /* step 7: update info_len and data_len */
11484 info_linear->info_len = sizeof(struct bpf_prog_info);
11485 info_linear->data_len = data_len;
11486
11487 return info_linear;
11488}
11489
11490void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear)
11491{
11492 int i;
11493
11494 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
11495 struct bpf_prog_info_array_desc *desc;
11496 __u64 addr, offs;
11497
11498 if ((info_linear->arrays & (1UL << i)) == 0)
11499 continue;
11500
11501 desc = bpf_prog_info_array_desc + i;
11502 addr = bpf_prog_info_read_offset_u64(&info_linear->info,
11503 desc->array_offset);
11504 offs = addr - ptr_to_u64(info_linear->data);
11505 bpf_prog_info_set_offset_u64(&info_linear->info,
11506 desc->array_offset, offs);
11507 }
11508}
11509
11510void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear)
11511{
11512 int i;
11513
11514 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
11515 struct bpf_prog_info_array_desc *desc;
11516 __u64 addr, offs;
11517
11518 if ((info_linear->arrays & (1UL << i)) == 0)
11519 continue;
11520
11521 desc = bpf_prog_info_array_desc + i;
11522 offs = bpf_prog_info_read_offset_u64(&info_linear->info,
11523 desc->array_offset);
11524 addr = offs + ptr_to_u64(info_linear->data);
11525 bpf_prog_info_set_offset_u64(&info_linear->info,
11526 desc->array_offset, addr);
11527 }
11528}
11529
11530int bpf_program__set_attach_target(struct bpf_program *prog,
11531 int attach_prog_fd,
11532 const char *attach_func_name)
11533{
11534 int btf_obj_fd = 0, btf_id = 0, err;
11535
11536 if (!prog || attach_prog_fd < 0 || !attach_func_name)
11537 return libbpf_err(-EINVAL);
11538
11539 if (prog->obj->loaded)
11540 return libbpf_err(-EINVAL);
11541
11542 if (attach_prog_fd) {
11543 btf_id = libbpf_find_prog_btf_id(attach_func_name,
11544 attach_prog_fd);
11545 if (btf_id < 0)
11546 return libbpf_err(btf_id);
11547 } else {
11548 /* load btf_vmlinux, if not yet */
11549 err = bpf_object__load_vmlinux_btf(prog->obj, true);
11550 if (err)
11551 return libbpf_err(err);
11552 err = find_kernel_btf_id(prog->obj, attach_func_name,
11553 prog->expected_attach_type,
11554 &btf_obj_fd, &btf_id);
11555 if (err)
11556 return libbpf_err(err);
11557 }
11558
11559 prog->attach_btf_id = btf_id;
11560 prog->attach_btf_obj_fd = btf_obj_fd;
11561 prog->attach_prog_fd = attach_prog_fd;
11562 return 0;
11563}
11564
11565int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz)
11566{
11567 int err = 0, n, len, start, end = -1;
11568 bool *tmp;
11569
11570 *mask = NULL;
11571 *mask_sz = 0;
11572
11573 /* Each sub string separated by ',' has format \d+-\d+ or \d+ */
11574 while (*s) {
11575 if (*s == ',' || *s == '\n') {
11576 s++;
11577 continue;
11578 }
11579 n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len);
11580 if (n <= 0 || n > 2) {
11581 pr_warn("Failed to get CPU range %s: %d\n", s, n);
11582 err = -EINVAL;
11583 goto cleanup;
11584 } else if (n == 1) {
11585 end = start;
11586 }
11587 if (start < 0 || start > end) {
11588 pr_warn("Invalid CPU range [%d,%d] in %s\n",
11589 start, end, s);
11590 err = -EINVAL;
11591 goto cleanup;
11592 }
11593 tmp = realloc(*mask, end + 1);
11594 if (!tmp) {
11595 err = -ENOMEM;
11596 goto cleanup;
11597 }
11598 *mask = tmp;
11599 memset(tmp + *mask_sz, 0, start - *mask_sz);
11600 memset(tmp + start, 1, end - start + 1);
11601 *mask_sz = end + 1;
11602 s += len;
11603 }
11604 if (!*mask_sz) {
11605 pr_warn("Empty CPU range\n");
11606 return -EINVAL;
11607 }
11608 return 0;
11609cleanup:
11610 free(*mask);
11611 *mask = NULL;
11612 return err;
11613}
11614
11615int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz)
11616{
11617 int fd, err = 0, len;
11618 char buf[128];
11619
11620 fd = open(fcpu, O_RDONLY);
11621 if (fd < 0) {
11622 err = -errno;
11623 pr_warn("Failed to open cpu mask file %s: %d\n", fcpu, err);
11624 return err;
11625 }
11626 len = read(fd, buf, sizeof(buf));
11627 close(fd);
11628 if (len <= 0) {
11629 err = len ? -errno : -EINVAL;
11630 pr_warn("Failed to read cpu mask from %s: %d\n", fcpu, err);
11631 return err;
11632 }
11633 if (len >= sizeof(buf)) {
11634 pr_warn("CPU mask is too big in file %s\n", fcpu);
11635 return -E2BIG;
11636 }
11637 buf[len] = '\0';
11638
11639 return parse_cpu_mask_str(buf, mask, mask_sz);
11640}
11641
11642int libbpf_num_possible_cpus(void)
11643{
11644 static const char *fcpu = "/sys/devices/system/cpu/possible";
11645 static int cpus;
11646 int err, n, i, tmp_cpus;
11647 bool *mask;
11648
11649 tmp_cpus = READ_ONCE(cpus);
11650 if (tmp_cpus > 0)
11651 return tmp_cpus;
11652
11653 err = parse_cpu_mask_file(fcpu, &mask, &n);
11654 if (err)
11655 return libbpf_err(err);
11656
11657 tmp_cpus = 0;
11658 for (i = 0; i < n; i++) {
11659 if (mask[i])
11660 tmp_cpus++;
11661 }
11662 free(mask);
11663
11664 WRITE_ONCE(cpus, tmp_cpus);
11665 return tmp_cpus;
11666}
11667
11668int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
11669 const struct bpf_object_open_opts *opts)
11670{
11671 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, skel_opts,
11672 .object_name = s->name,
11673 );
11674 struct bpf_object *obj;
11675 int i, err;
11676
11677 /* Attempt to preserve opts->object_name, unless overriden by user
11678 * explicitly. Overwriting object name for skeletons is discouraged,
11679 * as it breaks global data maps, because they contain object name
11680 * prefix as their own map name prefix. When skeleton is generated,
11681 * bpftool is making an assumption that this name will stay the same.
11682 */
11683 if (opts) {
11684 memcpy(&skel_opts, opts, sizeof(*opts));
11685 if (!opts->object_name)
11686 skel_opts.object_name = s->name;
11687 }
11688
11689 obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts);
11690 err = libbpf_get_error(obj);
11691 if (err) {
11692 pr_warn("failed to initialize skeleton BPF object '%s': %d\n",
11693 s->name, err);
11694 return libbpf_err(err);
11695 }
11696
11697 *s->obj = obj;
11698
11699 for (i = 0; i < s->map_cnt; i++) {
11700 struct bpf_map **map = s->maps[i].map;
11701 const char *name = s->maps[i].name;
11702 void **mmaped = s->maps[i].mmaped;
11703
11704 *map = bpf_object__find_map_by_name(obj, name);
11705 if (!*map) {
11706 pr_warn("failed to find skeleton map '%s'\n", name);
11707 return libbpf_err(-ESRCH);
11708 }
11709
11710 /* externs shouldn't be pre-setup from user code */
11711 if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG)
11712 *mmaped = (*map)->mmaped;
11713 }
11714
11715 for (i = 0; i < s->prog_cnt; i++) {
11716 struct bpf_program **prog = s->progs[i].prog;
11717 const char *name = s->progs[i].name;
11718
11719 *prog = bpf_object__find_program_by_name(obj, name);
11720 if (!*prog) {
11721 pr_warn("failed to find skeleton program '%s'\n", name);
11722 return libbpf_err(-ESRCH);
11723 }
11724 }
11725
11726 return 0;
11727}
11728
11729int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
11730{
11731 int i, err;
11732
11733 err = bpf_object__load(*s->obj);
11734 if (err) {
11735 pr_warn("failed to load BPF skeleton '%s': %d\n", s->name, err);
11736 return libbpf_err(err);
11737 }
11738
11739 for (i = 0; i < s->map_cnt; i++) {
11740 struct bpf_map *map = *s->maps[i].map;
11741 size_t mmap_sz = bpf_map_mmap_sz(map);
11742 int prot, map_fd = bpf_map__fd(map);
11743 void **mmaped = s->maps[i].mmaped;
11744
11745 if (!mmaped)
11746 continue;
11747
11748 if (!(map->def.map_flags & BPF_F_MMAPABLE)) {
11749 *mmaped = NULL;
11750 continue;
11751 }
11752
11753 if (map->def.map_flags & BPF_F_RDONLY_PROG)
11754 prot = PROT_READ;
11755 else
11756 prot = PROT_READ | PROT_WRITE;
11757
11758 /* Remap anonymous mmap()-ed "map initialization image" as
11759 * a BPF map-backed mmap()-ed memory, but preserving the same
11760 * memory address. This will cause kernel to change process'
11761 * page table to point to a different piece of kernel memory,
11762 * but from userspace point of view memory address (and its
11763 * contents, being identical at this point) will stay the
11764 * same. This mapping will be released by bpf_object__close()
11765 * as per normal clean up procedure, so we don't need to worry
11766 * about it from skeleton's clean up perspective.
11767 */
11768 *mmaped = mmap(map->mmaped, mmap_sz, prot,
11769 MAP_SHARED | MAP_FIXED, map_fd, 0);
11770 if (*mmaped == MAP_FAILED) {
11771 err = -errno;
11772 *mmaped = NULL;
11773 pr_warn("failed to re-mmap() map '%s': %d\n",
11774 bpf_map__name(map), err);
11775 return libbpf_err(err);
11776 }
11777 }
11778
11779 return 0;
11780}
11781
11782int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
11783{
11784 int i, err;
11785
11786 for (i = 0; i < s->prog_cnt; i++) {
11787 struct bpf_program *prog = *s->progs[i].prog;
11788 struct bpf_link **link = s->progs[i].link;
11789 const struct bpf_sec_def *sec_def;
11790
11791 if (!prog->load)
11792 continue;
11793
11794 sec_def = find_sec_def(prog->sec_name);
11795 if (!sec_def || !sec_def->attach_fn)
11796 continue;
11797
11798 *link = sec_def->attach_fn(sec_def, prog);
11799 err = libbpf_get_error(*link);
11800 if (err) {
11801 pr_warn("failed to auto-attach program '%s': %d\n",
11802 bpf_program__name(prog), err);
11803 return libbpf_err(err);
11804 }
11805 }
11806
11807 return 0;
11808}
11809
11810void bpf_object__detach_skeleton(struct bpf_object_skeleton *s)
11811{
11812 int i;
11813
11814 for (i = 0; i < s->prog_cnt; i++) {
11815 struct bpf_link **link = s->progs[i].link;
11816
11817 bpf_link__destroy(*link);
11818 *link = NULL;
11819 }
11820}
11821
11822void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s)
11823{
11824 if (s->progs)
11825 bpf_object__detach_skeleton(s);
11826 if (s->obj)
11827 bpf_object__close(*s->obj);
11828 free(s->maps);
11829 free(s->progs);
11830 free(s);
11831}