Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2
3/*
4 * Common eBPF ELF object loading operations.
5 *
6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8 * Copyright (C) 2015 Huawei Inc.
9 * Copyright (C) 2017 Nicira, Inc.
10 * Copyright (C) 2019 Isovalent, Inc.
11 */
12
13#ifndef _GNU_SOURCE
14#define _GNU_SOURCE
15#endif
16#include <stdlib.h>
17#include <stdio.h>
18#include <stdarg.h>
19#include <libgen.h>
20#include <inttypes.h>
21#include <limits.h>
22#include <string.h>
23#include <unistd.h>
24#include <endian.h>
25#include <fcntl.h>
26#include <errno.h>
27#include <ctype.h>
28#include <asm/unistd.h>
29#include <linux/err.h>
30#include <linux/kernel.h>
31#include <linux/bpf.h>
32#include <linux/btf.h>
33#include <linux/filter.h>
34#include <linux/list.h>
35#include <linux/limits.h>
36#include <linux/perf_event.h>
37#include <linux/ring_buffer.h>
38#include <linux/version.h>
39#include <sys/epoll.h>
40#include <sys/ioctl.h>
41#include <sys/mman.h>
42#include <sys/stat.h>
43#include <sys/types.h>
44#include <sys/vfs.h>
45#include <sys/utsname.h>
46#include <sys/resource.h>
47#include <libelf.h>
48#include <gelf.h>
49#include <zlib.h>
50
51#include "libbpf.h"
52#include "bpf.h"
53#include "btf.h"
54#include "str_error.h"
55#include "libbpf_internal.h"
56#include "hashmap.h"
57
58#ifndef EM_BPF
59#define EM_BPF 247
60#endif
61
62#ifndef BPF_FS_MAGIC
63#define BPF_FS_MAGIC 0xcafe4a11
64#endif
65
66#define BPF_INSN_SZ (sizeof(struct bpf_insn))
67
68/* vsprintf() in __base_pr() uses nonliteral format string. It may break
69 * compilation if user enables corresponding warning. Disable it explicitly.
70 */
71#pragma GCC diagnostic ignored "-Wformat-nonliteral"
72
73#define __printf(a, b) __attribute__((format(printf, a, b)))
74
75static struct bpf_map *bpf_object__add_map(struct bpf_object *obj);
76static const struct btf_type *
77skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id);
78
79static int __base_pr(enum libbpf_print_level level, const char *format,
80 va_list args)
81{
82 if (level == LIBBPF_DEBUG)
83 return 0;
84
85 return vfprintf(stderr, format, args);
86}
87
88static libbpf_print_fn_t __libbpf_pr = __base_pr;
89
90libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn)
91{
92 libbpf_print_fn_t old_print_fn = __libbpf_pr;
93
94 __libbpf_pr = fn;
95 return old_print_fn;
96}
97
98__printf(2, 3)
99void libbpf_print(enum libbpf_print_level level, const char *format, ...)
100{
101 va_list args;
102
103 if (!__libbpf_pr)
104 return;
105
106 va_start(args, format);
107 __libbpf_pr(level, format, args);
108 va_end(args);
109}
110
111static void pr_perm_msg(int err)
112{
113 struct rlimit limit;
114 char buf[100];
115
116 if (err != -EPERM || geteuid() != 0)
117 return;
118
119 err = getrlimit(RLIMIT_MEMLOCK, &limit);
120 if (err)
121 return;
122
123 if (limit.rlim_cur == RLIM_INFINITY)
124 return;
125
126 if (limit.rlim_cur < 1024)
127 snprintf(buf, sizeof(buf), "%zu bytes", (size_t)limit.rlim_cur);
128 else if (limit.rlim_cur < 1024*1024)
129 snprintf(buf, sizeof(buf), "%.1f KiB", (double)limit.rlim_cur / 1024);
130 else
131 snprintf(buf, sizeof(buf), "%.1f MiB", (double)limit.rlim_cur / (1024*1024));
132
133 pr_warn("permission error while running as root; try raising 'ulimit -l'? current value: %s\n",
134 buf);
135}
136
137#define STRERR_BUFSIZE 128
138
139/* Copied from tools/perf/util/util.h */
140#ifndef zfree
141# define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
142#endif
143
144#ifndef zclose
145# define zclose(fd) ({ \
146 int ___err = 0; \
147 if ((fd) >= 0) \
148 ___err = close((fd)); \
149 fd = -1; \
150 ___err; })
151#endif
152
153static inline __u64 ptr_to_u64(const void *ptr)
154{
155 return (__u64) (unsigned long) ptr;
156}
157
158enum kern_feature_id {
159 /* v4.14: kernel support for program & map names. */
160 FEAT_PROG_NAME,
161 /* v5.2: kernel support for global data sections. */
162 FEAT_GLOBAL_DATA,
163 /* BTF support */
164 FEAT_BTF,
165 /* BTF_KIND_FUNC and BTF_KIND_FUNC_PROTO support */
166 FEAT_BTF_FUNC,
167 /* BTF_KIND_VAR and BTF_KIND_DATASEC support */
168 FEAT_BTF_DATASEC,
169 /* BTF_FUNC_GLOBAL is supported */
170 FEAT_BTF_GLOBAL_FUNC,
171 /* BPF_F_MMAPABLE is supported for arrays */
172 FEAT_ARRAY_MMAP,
173 /* kernel support for expected_attach_type in BPF_PROG_LOAD */
174 FEAT_EXP_ATTACH_TYPE,
175 /* bpf_probe_read_{kernel,user}[_str] helpers */
176 FEAT_PROBE_READ_KERN,
177 /* BPF_PROG_BIND_MAP is supported */
178 FEAT_PROG_BIND_MAP,
179 /* Kernel support for module BTFs */
180 FEAT_MODULE_BTF,
181 __FEAT_CNT,
182};
183
184static bool kernel_supports(enum kern_feature_id feat_id);
185
186enum reloc_type {
187 RELO_LD64,
188 RELO_CALL,
189 RELO_DATA,
190 RELO_EXTERN,
191};
192
193struct reloc_desc {
194 enum reloc_type type;
195 int insn_idx;
196 int map_idx;
197 int sym_off;
198 bool processed;
199};
200
201struct bpf_sec_def;
202
203typedef struct bpf_link *(*attach_fn_t)(const struct bpf_sec_def *sec,
204 struct bpf_program *prog);
205
206struct bpf_sec_def {
207 const char *sec;
208 size_t len;
209 enum bpf_prog_type prog_type;
210 enum bpf_attach_type expected_attach_type;
211 bool is_exp_attach_type_optional;
212 bool is_attachable;
213 bool is_attach_btf;
214 bool is_sleepable;
215 attach_fn_t attach_fn;
216};
217
218/*
219 * bpf_prog should be a better name but it has been used in
220 * linux/filter.h.
221 */
222struct bpf_program {
223 const struct bpf_sec_def *sec_def;
224 char *sec_name;
225 size_t sec_idx;
226 /* this program's instruction offset (in number of instructions)
227 * within its containing ELF section
228 */
229 size_t sec_insn_off;
230 /* number of original instructions in ELF section belonging to this
231 * program, not taking into account subprogram instructions possible
232 * appended later during relocation
233 */
234 size_t sec_insn_cnt;
235 /* Offset (in number of instructions) of the start of instruction
236 * belonging to this BPF program within its containing main BPF
237 * program. For the entry-point (main) BPF program, this is always
238 * zero. For a sub-program, this gets reset before each of main BPF
239 * programs are processed and relocated and is used to determined
240 * whether sub-program was already appended to the main program, and
241 * if yes, at which instruction offset.
242 */
243 size_t sub_insn_off;
244
245 char *name;
246 /* sec_name with / replaced by _; makes recursive pinning
247 * in bpf_object__pin_programs easier
248 */
249 char *pin_name;
250
251 /* instructions that belong to BPF program; insns[0] is located at
252 * sec_insn_off instruction within its ELF section in ELF file, so
253 * when mapping ELF file instruction index to the local instruction,
254 * one needs to subtract sec_insn_off; and vice versa.
255 */
256 struct bpf_insn *insns;
257 /* actual number of instruction in this BPF program's image; for
258 * entry-point BPF programs this includes the size of main program
259 * itself plus all the used sub-programs, appended at the end
260 */
261 size_t insns_cnt;
262
263 struct reloc_desc *reloc_desc;
264 int nr_reloc;
265 int log_level;
266
267 struct {
268 int nr;
269 int *fds;
270 } instances;
271 bpf_program_prep_t preprocessor;
272
273 struct bpf_object *obj;
274 void *priv;
275 bpf_program_clear_priv_t clear_priv;
276
277 bool load;
278 enum bpf_prog_type type;
279 enum bpf_attach_type expected_attach_type;
280 int prog_ifindex;
281 __u32 attach_btf_obj_fd;
282 __u32 attach_btf_id;
283 __u32 attach_prog_fd;
284 void *func_info;
285 __u32 func_info_rec_size;
286 __u32 func_info_cnt;
287
288 void *line_info;
289 __u32 line_info_rec_size;
290 __u32 line_info_cnt;
291 __u32 prog_flags;
292};
293
294struct bpf_struct_ops {
295 const char *tname;
296 const struct btf_type *type;
297 struct bpf_program **progs;
298 __u32 *kern_func_off;
299 /* e.g. struct tcp_congestion_ops in bpf_prog's btf format */
300 void *data;
301 /* e.g. struct bpf_struct_ops_tcp_congestion_ops in
302 * btf_vmlinux's format.
303 * struct bpf_struct_ops_tcp_congestion_ops {
304 * [... some other kernel fields ...]
305 * struct tcp_congestion_ops data;
306 * }
307 * kern_vdata-size == sizeof(struct bpf_struct_ops_tcp_congestion_ops)
308 * bpf_map__init_kern_struct_ops() will populate the "kern_vdata"
309 * from "data".
310 */
311 void *kern_vdata;
312 __u32 type_id;
313};
314
315#define DATA_SEC ".data"
316#define BSS_SEC ".bss"
317#define RODATA_SEC ".rodata"
318#define KCONFIG_SEC ".kconfig"
319#define KSYMS_SEC ".ksyms"
320#define STRUCT_OPS_SEC ".struct_ops"
321
322enum libbpf_map_type {
323 LIBBPF_MAP_UNSPEC,
324 LIBBPF_MAP_DATA,
325 LIBBPF_MAP_BSS,
326 LIBBPF_MAP_RODATA,
327 LIBBPF_MAP_KCONFIG,
328};
329
330static const char * const libbpf_type_to_btf_name[] = {
331 [LIBBPF_MAP_DATA] = DATA_SEC,
332 [LIBBPF_MAP_BSS] = BSS_SEC,
333 [LIBBPF_MAP_RODATA] = RODATA_SEC,
334 [LIBBPF_MAP_KCONFIG] = KCONFIG_SEC,
335};
336
337struct bpf_map {
338 char *name;
339 int fd;
340 int sec_idx;
341 size_t sec_offset;
342 int map_ifindex;
343 int inner_map_fd;
344 struct bpf_map_def def;
345 __u32 numa_node;
346 __u32 btf_var_idx;
347 __u32 btf_key_type_id;
348 __u32 btf_value_type_id;
349 __u32 btf_vmlinux_value_type_id;
350 void *priv;
351 bpf_map_clear_priv_t clear_priv;
352 enum libbpf_map_type libbpf_type;
353 void *mmaped;
354 struct bpf_struct_ops *st_ops;
355 struct bpf_map *inner_map;
356 void **init_slots;
357 int init_slots_sz;
358 char *pin_path;
359 bool pinned;
360 bool reused;
361};
362
363enum extern_type {
364 EXT_UNKNOWN,
365 EXT_KCFG,
366 EXT_KSYM,
367};
368
369enum kcfg_type {
370 KCFG_UNKNOWN,
371 KCFG_CHAR,
372 KCFG_BOOL,
373 KCFG_INT,
374 KCFG_TRISTATE,
375 KCFG_CHAR_ARR,
376};
377
378struct extern_desc {
379 enum extern_type type;
380 int sym_idx;
381 int btf_id;
382 int sec_btf_id;
383 const char *name;
384 bool is_set;
385 bool is_weak;
386 union {
387 struct {
388 enum kcfg_type type;
389 int sz;
390 int align;
391 int data_off;
392 bool is_signed;
393 } kcfg;
394 struct {
395 unsigned long long addr;
396
397 /* target btf_id of the corresponding kernel var. */
398 int kernel_btf_obj_fd;
399 int kernel_btf_id;
400
401 /* local btf_id of the ksym extern's type. */
402 __u32 type_id;
403 } ksym;
404 };
405};
406
407static LIST_HEAD(bpf_objects_list);
408
409struct module_btf {
410 struct btf *btf;
411 char *name;
412 __u32 id;
413 int fd;
414};
415
416struct bpf_object {
417 char name[BPF_OBJ_NAME_LEN];
418 char license[64];
419 __u32 kern_version;
420
421 struct bpf_program *programs;
422 size_t nr_programs;
423 struct bpf_map *maps;
424 size_t nr_maps;
425 size_t maps_cap;
426
427 char *kconfig;
428 struct extern_desc *externs;
429 int nr_extern;
430 int kconfig_map_idx;
431 int rodata_map_idx;
432
433 bool loaded;
434 bool has_subcalls;
435
436 /*
437 * Information when doing elf related work. Only valid if fd
438 * is valid.
439 */
440 struct {
441 int fd;
442 const void *obj_buf;
443 size_t obj_buf_sz;
444 Elf *elf;
445 GElf_Ehdr ehdr;
446 Elf_Data *symbols;
447 Elf_Data *data;
448 Elf_Data *rodata;
449 Elf_Data *bss;
450 Elf_Data *st_ops_data;
451 size_t shstrndx; /* section index for section name strings */
452 size_t strtabidx;
453 struct {
454 GElf_Shdr shdr;
455 Elf_Data *data;
456 } *reloc_sects;
457 int nr_reloc_sects;
458 int maps_shndx;
459 int btf_maps_shndx;
460 __u32 btf_maps_sec_btf_id;
461 int text_shndx;
462 int symbols_shndx;
463 int data_shndx;
464 int rodata_shndx;
465 int bss_shndx;
466 int st_ops_shndx;
467 } efile;
468 /*
469 * All loaded bpf_object is linked in a list, which is
470 * hidden to caller. bpf_objects__<func> handlers deal with
471 * all objects.
472 */
473 struct list_head list;
474
475 struct btf *btf;
476 struct btf_ext *btf_ext;
477
478 /* Parse and load BTF vmlinux if any of the programs in the object need
479 * it at load time.
480 */
481 struct btf *btf_vmlinux;
482 /* vmlinux BTF override for CO-RE relocations */
483 struct btf *btf_vmlinux_override;
484 /* Lazily initialized kernel module BTFs */
485 struct module_btf *btf_modules;
486 bool btf_modules_loaded;
487 size_t btf_module_cnt;
488 size_t btf_module_cap;
489
490 void *priv;
491 bpf_object_clear_priv_t clear_priv;
492
493 char path[];
494};
495#define obj_elf_valid(o) ((o)->efile.elf)
496
497static const char *elf_sym_str(const struct bpf_object *obj, size_t off);
498static const char *elf_sec_str(const struct bpf_object *obj, size_t off);
499static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx);
500static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name);
501static int elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn, GElf_Shdr *hdr);
502static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn);
503static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn);
504static int elf_sym_by_sec_off(const struct bpf_object *obj, size_t sec_idx,
505 size_t off, __u32 sym_type, GElf_Sym *sym);
506
507void bpf_program__unload(struct bpf_program *prog)
508{
509 int i;
510
511 if (!prog)
512 return;
513
514 /*
515 * If the object is opened but the program was never loaded,
516 * it is possible that prog->instances.nr == -1.
517 */
518 if (prog->instances.nr > 0) {
519 for (i = 0; i < prog->instances.nr; i++)
520 zclose(prog->instances.fds[i]);
521 } else if (prog->instances.nr != -1) {
522 pr_warn("Internal error: instances.nr is %d\n",
523 prog->instances.nr);
524 }
525
526 prog->instances.nr = -1;
527 zfree(&prog->instances.fds);
528
529 zfree(&prog->func_info);
530 zfree(&prog->line_info);
531}
532
533static void bpf_program__exit(struct bpf_program *prog)
534{
535 if (!prog)
536 return;
537
538 if (prog->clear_priv)
539 prog->clear_priv(prog, prog->priv);
540
541 prog->priv = NULL;
542 prog->clear_priv = NULL;
543
544 bpf_program__unload(prog);
545 zfree(&prog->name);
546 zfree(&prog->sec_name);
547 zfree(&prog->pin_name);
548 zfree(&prog->insns);
549 zfree(&prog->reloc_desc);
550
551 prog->nr_reloc = 0;
552 prog->insns_cnt = 0;
553 prog->sec_idx = -1;
554}
555
556static char *__bpf_program__pin_name(struct bpf_program *prog)
557{
558 char *name, *p;
559
560 name = p = strdup(prog->sec_name);
561 while ((p = strchr(p, '/')))
562 *p = '_';
563
564 return name;
565}
566
567static bool insn_is_subprog_call(const struct bpf_insn *insn)
568{
569 return BPF_CLASS(insn->code) == BPF_JMP &&
570 BPF_OP(insn->code) == BPF_CALL &&
571 BPF_SRC(insn->code) == BPF_K &&
572 insn->src_reg == BPF_PSEUDO_CALL &&
573 insn->dst_reg == 0 &&
574 insn->off == 0;
575}
576
577static int
578bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog,
579 const char *name, size_t sec_idx, const char *sec_name,
580 size_t sec_off, void *insn_data, size_t insn_data_sz)
581{
582 if (insn_data_sz == 0 || insn_data_sz % BPF_INSN_SZ || sec_off % BPF_INSN_SZ) {
583 pr_warn("sec '%s': corrupted program '%s', offset %zu, size %zu\n",
584 sec_name, name, sec_off, insn_data_sz);
585 return -EINVAL;
586 }
587
588 memset(prog, 0, sizeof(*prog));
589 prog->obj = obj;
590
591 prog->sec_idx = sec_idx;
592 prog->sec_insn_off = sec_off / BPF_INSN_SZ;
593 prog->sec_insn_cnt = insn_data_sz / BPF_INSN_SZ;
594 /* insns_cnt can later be increased by appending used subprograms */
595 prog->insns_cnt = prog->sec_insn_cnt;
596
597 prog->type = BPF_PROG_TYPE_UNSPEC;
598 prog->load = true;
599
600 prog->instances.fds = NULL;
601 prog->instances.nr = -1;
602
603 prog->sec_name = strdup(sec_name);
604 if (!prog->sec_name)
605 goto errout;
606
607 prog->name = strdup(name);
608 if (!prog->name)
609 goto errout;
610
611 prog->pin_name = __bpf_program__pin_name(prog);
612 if (!prog->pin_name)
613 goto errout;
614
615 prog->insns = malloc(insn_data_sz);
616 if (!prog->insns)
617 goto errout;
618 memcpy(prog->insns, insn_data, insn_data_sz);
619
620 return 0;
621errout:
622 pr_warn("sec '%s': failed to allocate memory for prog '%s'\n", sec_name, name);
623 bpf_program__exit(prog);
624 return -ENOMEM;
625}
626
627static int
628bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
629 const char *sec_name, int sec_idx)
630{
631 struct bpf_program *prog, *progs;
632 void *data = sec_data->d_buf;
633 size_t sec_sz = sec_data->d_size, sec_off, prog_sz;
634 int nr_progs, err;
635 const char *name;
636 GElf_Sym sym;
637
638 progs = obj->programs;
639 nr_progs = obj->nr_programs;
640 sec_off = 0;
641
642 while (sec_off < sec_sz) {
643 if (elf_sym_by_sec_off(obj, sec_idx, sec_off, STT_FUNC, &sym)) {
644 pr_warn("sec '%s': failed to find program symbol at offset %zu\n",
645 sec_name, sec_off);
646 return -LIBBPF_ERRNO__FORMAT;
647 }
648
649 prog_sz = sym.st_size;
650
651 name = elf_sym_str(obj, sym.st_name);
652 if (!name) {
653 pr_warn("sec '%s': failed to get symbol name for offset %zu\n",
654 sec_name, sec_off);
655 return -LIBBPF_ERRNO__FORMAT;
656 }
657
658 if (sec_off + prog_sz > sec_sz) {
659 pr_warn("sec '%s': program at offset %zu crosses section boundary\n",
660 sec_name, sec_off);
661 return -LIBBPF_ERRNO__FORMAT;
662 }
663
664 pr_debug("sec '%s': found program '%s' at insn offset %zu (%zu bytes), code size %zu insns (%zu bytes)\n",
665 sec_name, name, sec_off / BPF_INSN_SZ, sec_off, prog_sz / BPF_INSN_SZ, prog_sz);
666
667 progs = libbpf_reallocarray(progs, nr_progs + 1, sizeof(*progs));
668 if (!progs) {
669 /*
670 * In this case the original obj->programs
671 * is still valid, so don't need special treat for
672 * bpf_close_object().
673 */
674 pr_warn("sec '%s': failed to alloc memory for new program '%s'\n",
675 sec_name, name);
676 return -ENOMEM;
677 }
678 obj->programs = progs;
679
680 prog = &progs[nr_progs];
681
682 err = bpf_object__init_prog(obj, prog, name, sec_idx, sec_name,
683 sec_off, data + sec_off, prog_sz);
684 if (err)
685 return err;
686
687 nr_progs++;
688 obj->nr_programs = nr_progs;
689
690 sec_off += prog_sz;
691 }
692
693 return 0;
694}
695
696static __u32 get_kernel_version(void)
697{
698 __u32 major, minor, patch;
699 struct utsname info;
700
701 uname(&info);
702 if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3)
703 return 0;
704 return KERNEL_VERSION(major, minor, patch);
705}
706
707static const struct btf_member *
708find_member_by_offset(const struct btf_type *t, __u32 bit_offset)
709{
710 struct btf_member *m;
711 int i;
712
713 for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
714 if (btf_member_bit_offset(t, i) == bit_offset)
715 return m;
716 }
717
718 return NULL;
719}
720
721static const struct btf_member *
722find_member_by_name(const struct btf *btf, const struct btf_type *t,
723 const char *name)
724{
725 struct btf_member *m;
726 int i;
727
728 for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
729 if (!strcmp(btf__name_by_offset(btf, m->name_off), name))
730 return m;
731 }
732
733 return NULL;
734}
735
736#define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_"
737static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
738 const char *name, __u32 kind);
739
740static int
741find_struct_ops_kern_types(const struct btf *btf, const char *tname,
742 const struct btf_type **type, __u32 *type_id,
743 const struct btf_type **vtype, __u32 *vtype_id,
744 const struct btf_member **data_member)
745{
746 const struct btf_type *kern_type, *kern_vtype;
747 const struct btf_member *kern_data_member;
748 __s32 kern_vtype_id, kern_type_id;
749 __u32 i;
750
751 kern_type_id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT);
752 if (kern_type_id < 0) {
753 pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n",
754 tname);
755 return kern_type_id;
756 }
757 kern_type = btf__type_by_id(btf, kern_type_id);
758
759 /* Find the corresponding "map_value" type that will be used
760 * in map_update(BPF_MAP_TYPE_STRUCT_OPS). For example,
761 * find "struct bpf_struct_ops_tcp_congestion_ops" from the
762 * btf_vmlinux.
763 */
764 kern_vtype_id = find_btf_by_prefix_kind(btf, STRUCT_OPS_VALUE_PREFIX,
765 tname, BTF_KIND_STRUCT);
766 if (kern_vtype_id < 0) {
767 pr_warn("struct_ops init_kern: struct %s%s is not found in kernel BTF\n",
768 STRUCT_OPS_VALUE_PREFIX, tname);
769 return kern_vtype_id;
770 }
771 kern_vtype = btf__type_by_id(btf, kern_vtype_id);
772
773 /* Find "struct tcp_congestion_ops" from
774 * struct bpf_struct_ops_tcp_congestion_ops {
775 * [ ... ]
776 * struct tcp_congestion_ops data;
777 * }
778 */
779 kern_data_member = btf_members(kern_vtype);
780 for (i = 0; i < btf_vlen(kern_vtype); i++, kern_data_member++) {
781 if (kern_data_member->type == kern_type_id)
782 break;
783 }
784 if (i == btf_vlen(kern_vtype)) {
785 pr_warn("struct_ops init_kern: struct %s data is not found in struct %s%s\n",
786 tname, STRUCT_OPS_VALUE_PREFIX, tname);
787 return -EINVAL;
788 }
789
790 *type = kern_type;
791 *type_id = kern_type_id;
792 *vtype = kern_vtype;
793 *vtype_id = kern_vtype_id;
794 *data_member = kern_data_member;
795
796 return 0;
797}
798
799static bool bpf_map__is_struct_ops(const struct bpf_map *map)
800{
801 return map->def.type == BPF_MAP_TYPE_STRUCT_OPS;
802}
803
804/* Init the map's fields that depend on kern_btf */
805static int bpf_map__init_kern_struct_ops(struct bpf_map *map,
806 const struct btf *btf,
807 const struct btf *kern_btf)
808{
809 const struct btf_member *member, *kern_member, *kern_data_member;
810 const struct btf_type *type, *kern_type, *kern_vtype;
811 __u32 i, kern_type_id, kern_vtype_id, kern_data_off;
812 struct bpf_struct_ops *st_ops;
813 void *data, *kern_data;
814 const char *tname;
815 int err;
816
817 st_ops = map->st_ops;
818 type = st_ops->type;
819 tname = st_ops->tname;
820 err = find_struct_ops_kern_types(kern_btf, tname,
821 &kern_type, &kern_type_id,
822 &kern_vtype, &kern_vtype_id,
823 &kern_data_member);
824 if (err)
825 return err;
826
827 pr_debug("struct_ops init_kern %s: type_id:%u kern_type_id:%u kern_vtype_id:%u\n",
828 map->name, st_ops->type_id, kern_type_id, kern_vtype_id);
829
830 map->def.value_size = kern_vtype->size;
831 map->btf_vmlinux_value_type_id = kern_vtype_id;
832
833 st_ops->kern_vdata = calloc(1, kern_vtype->size);
834 if (!st_ops->kern_vdata)
835 return -ENOMEM;
836
837 data = st_ops->data;
838 kern_data_off = kern_data_member->offset / 8;
839 kern_data = st_ops->kern_vdata + kern_data_off;
840
841 member = btf_members(type);
842 for (i = 0; i < btf_vlen(type); i++, member++) {
843 const struct btf_type *mtype, *kern_mtype;
844 __u32 mtype_id, kern_mtype_id;
845 void *mdata, *kern_mdata;
846 __s64 msize, kern_msize;
847 __u32 moff, kern_moff;
848 __u32 kern_member_idx;
849 const char *mname;
850
851 mname = btf__name_by_offset(btf, member->name_off);
852 kern_member = find_member_by_name(kern_btf, kern_type, mname);
853 if (!kern_member) {
854 pr_warn("struct_ops init_kern %s: Cannot find member %s in kernel BTF\n",
855 map->name, mname);
856 return -ENOTSUP;
857 }
858
859 kern_member_idx = kern_member - btf_members(kern_type);
860 if (btf_member_bitfield_size(type, i) ||
861 btf_member_bitfield_size(kern_type, kern_member_idx)) {
862 pr_warn("struct_ops init_kern %s: bitfield %s is not supported\n",
863 map->name, mname);
864 return -ENOTSUP;
865 }
866
867 moff = member->offset / 8;
868 kern_moff = kern_member->offset / 8;
869
870 mdata = data + moff;
871 kern_mdata = kern_data + kern_moff;
872
873 mtype = skip_mods_and_typedefs(btf, member->type, &mtype_id);
874 kern_mtype = skip_mods_and_typedefs(kern_btf, kern_member->type,
875 &kern_mtype_id);
876 if (BTF_INFO_KIND(mtype->info) !=
877 BTF_INFO_KIND(kern_mtype->info)) {
878 pr_warn("struct_ops init_kern %s: Unmatched member type %s %u != %u(kernel)\n",
879 map->name, mname, BTF_INFO_KIND(mtype->info),
880 BTF_INFO_KIND(kern_mtype->info));
881 return -ENOTSUP;
882 }
883
884 if (btf_is_ptr(mtype)) {
885 struct bpf_program *prog;
886
887 prog = st_ops->progs[i];
888 if (!prog)
889 continue;
890
891 kern_mtype = skip_mods_and_typedefs(kern_btf,
892 kern_mtype->type,
893 &kern_mtype_id);
894
895 /* mtype->type must be a func_proto which was
896 * guaranteed in bpf_object__collect_st_ops_relos(),
897 * so only check kern_mtype for func_proto here.
898 */
899 if (!btf_is_func_proto(kern_mtype)) {
900 pr_warn("struct_ops init_kern %s: kernel member %s is not a func ptr\n",
901 map->name, mname);
902 return -ENOTSUP;
903 }
904
905 prog->attach_btf_id = kern_type_id;
906 prog->expected_attach_type = kern_member_idx;
907
908 st_ops->kern_func_off[i] = kern_data_off + kern_moff;
909
910 pr_debug("struct_ops init_kern %s: func ptr %s is set to prog %s from data(+%u) to kern_data(+%u)\n",
911 map->name, mname, prog->name, moff,
912 kern_moff);
913
914 continue;
915 }
916
917 msize = btf__resolve_size(btf, mtype_id);
918 kern_msize = btf__resolve_size(kern_btf, kern_mtype_id);
919 if (msize < 0 || kern_msize < 0 || msize != kern_msize) {
920 pr_warn("struct_ops init_kern %s: Error in size of member %s: %zd != %zd(kernel)\n",
921 map->name, mname, (ssize_t)msize,
922 (ssize_t)kern_msize);
923 return -ENOTSUP;
924 }
925
926 pr_debug("struct_ops init_kern %s: copy %s %u bytes from data(+%u) to kern_data(+%u)\n",
927 map->name, mname, (unsigned int)msize,
928 moff, kern_moff);
929 memcpy(kern_mdata, mdata, msize);
930 }
931
932 return 0;
933}
934
935static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
936{
937 struct bpf_map *map;
938 size_t i;
939 int err;
940
941 for (i = 0; i < obj->nr_maps; i++) {
942 map = &obj->maps[i];
943
944 if (!bpf_map__is_struct_ops(map))
945 continue;
946
947 err = bpf_map__init_kern_struct_ops(map, obj->btf,
948 obj->btf_vmlinux);
949 if (err)
950 return err;
951 }
952
953 return 0;
954}
955
956static int bpf_object__init_struct_ops_maps(struct bpf_object *obj)
957{
958 const struct btf_type *type, *datasec;
959 const struct btf_var_secinfo *vsi;
960 struct bpf_struct_ops *st_ops;
961 const char *tname, *var_name;
962 __s32 type_id, datasec_id;
963 const struct btf *btf;
964 struct bpf_map *map;
965 __u32 i;
966
967 if (obj->efile.st_ops_shndx == -1)
968 return 0;
969
970 btf = obj->btf;
971 datasec_id = btf__find_by_name_kind(btf, STRUCT_OPS_SEC,
972 BTF_KIND_DATASEC);
973 if (datasec_id < 0) {
974 pr_warn("struct_ops init: DATASEC %s not found\n",
975 STRUCT_OPS_SEC);
976 return -EINVAL;
977 }
978
979 datasec = btf__type_by_id(btf, datasec_id);
980 vsi = btf_var_secinfos(datasec);
981 for (i = 0; i < btf_vlen(datasec); i++, vsi++) {
982 type = btf__type_by_id(obj->btf, vsi->type);
983 var_name = btf__name_by_offset(obj->btf, type->name_off);
984
985 type_id = btf__resolve_type(obj->btf, vsi->type);
986 if (type_id < 0) {
987 pr_warn("struct_ops init: Cannot resolve var type_id %u in DATASEC %s\n",
988 vsi->type, STRUCT_OPS_SEC);
989 return -EINVAL;
990 }
991
992 type = btf__type_by_id(obj->btf, type_id);
993 tname = btf__name_by_offset(obj->btf, type->name_off);
994 if (!tname[0]) {
995 pr_warn("struct_ops init: anonymous type is not supported\n");
996 return -ENOTSUP;
997 }
998 if (!btf_is_struct(type)) {
999 pr_warn("struct_ops init: %s is not a struct\n", tname);
1000 return -EINVAL;
1001 }
1002
1003 map = bpf_object__add_map(obj);
1004 if (IS_ERR(map))
1005 return PTR_ERR(map);
1006
1007 map->sec_idx = obj->efile.st_ops_shndx;
1008 map->sec_offset = vsi->offset;
1009 map->name = strdup(var_name);
1010 if (!map->name)
1011 return -ENOMEM;
1012
1013 map->def.type = BPF_MAP_TYPE_STRUCT_OPS;
1014 map->def.key_size = sizeof(int);
1015 map->def.value_size = type->size;
1016 map->def.max_entries = 1;
1017
1018 map->st_ops = calloc(1, sizeof(*map->st_ops));
1019 if (!map->st_ops)
1020 return -ENOMEM;
1021 st_ops = map->st_ops;
1022 st_ops->data = malloc(type->size);
1023 st_ops->progs = calloc(btf_vlen(type), sizeof(*st_ops->progs));
1024 st_ops->kern_func_off = malloc(btf_vlen(type) *
1025 sizeof(*st_ops->kern_func_off));
1026 if (!st_ops->data || !st_ops->progs || !st_ops->kern_func_off)
1027 return -ENOMEM;
1028
1029 if (vsi->offset + type->size > obj->efile.st_ops_data->d_size) {
1030 pr_warn("struct_ops init: var %s is beyond the end of DATASEC %s\n",
1031 var_name, STRUCT_OPS_SEC);
1032 return -EINVAL;
1033 }
1034
1035 memcpy(st_ops->data,
1036 obj->efile.st_ops_data->d_buf + vsi->offset,
1037 type->size);
1038 st_ops->tname = tname;
1039 st_ops->type = type;
1040 st_ops->type_id = type_id;
1041
1042 pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n",
1043 tname, type_id, var_name, vsi->offset);
1044 }
1045
1046 return 0;
1047}
1048
1049static struct bpf_object *bpf_object__new(const char *path,
1050 const void *obj_buf,
1051 size_t obj_buf_sz,
1052 const char *obj_name)
1053{
1054 struct bpf_object *obj;
1055 char *end;
1056
1057 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
1058 if (!obj) {
1059 pr_warn("alloc memory failed for %s\n", path);
1060 return ERR_PTR(-ENOMEM);
1061 }
1062
1063 strcpy(obj->path, path);
1064 if (obj_name) {
1065 strncpy(obj->name, obj_name, sizeof(obj->name) - 1);
1066 obj->name[sizeof(obj->name) - 1] = 0;
1067 } else {
1068 /* Using basename() GNU version which doesn't modify arg. */
1069 strncpy(obj->name, basename((void *)path),
1070 sizeof(obj->name) - 1);
1071 end = strchr(obj->name, '.');
1072 if (end)
1073 *end = 0;
1074 }
1075
1076 obj->efile.fd = -1;
1077 /*
1078 * Caller of this function should also call
1079 * bpf_object__elf_finish() after data collection to return
1080 * obj_buf to user. If not, we should duplicate the buffer to
1081 * avoid user freeing them before elf finish.
1082 */
1083 obj->efile.obj_buf = obj_buf;
1084 obj->efile.obj_buf_sz = obj_buf_sz;
1085 obj->efile.maps_shndx = -1;
1086 obj->efile.btf_maps_shndx = -1;
1087 obj->efile.data_shndx = -1;
1088 obj->efile.rodata_shndx = -1;
1089 obj->efile.bss_shndx = -1;
1090 obj->efile.st_ops_shndx = -1;
1091 obj->kconfig_map_idx = -1;
1092 obj->rodata_map_idx = -1;
1093
1094 obj->kern_version = get_kernel_version();
1095 obj->loaded = false;
1096
1097 INIT_LIST_HEAD(&obj->list);
1098 list_add(&obj->list, &bpf_objects_list);
1099 return obj;
1100}
1101
1102static void bpf_object__elf_finish(struct bpf_object *obj)
1103{
1104 if (!obj_elf_valid(obj))
1105 return;
1106
1107 if (obj->efile.elf) {
1108 elf_end(obj->efile.elf);
1109 obj->efile.elf = NULL;
1110 }
1111 obj->efile.symbols = NULL;
1112 obj->efile.data = NULL;
1113 obj->efile.rodata = NULL;
1114 obj->efile.bss = NULL;
1115 obj->efile.st_ops_data = NULL;
1116
1117 zfree(&obj->efile.reloc_sects);
1118 obj->efile.nr_reloc_sects = 0;
1119 zclose(obj->efile.fd);
1120 obj->efile.obj_buf = NULL;
1121 obj->efile.obj_buf_sz = 0;
1122}
1123
1124/* if libelf is old and doesn't support mmap(), fall back to read() */
1125#ifndef ELF_C_READ_MMAP
1126#define ELF_C_READ_MMAP ELF_C_READ
1127#endif
1128
1129static int bpf_object__elf_init(struct bpf_object *obj)
1130{
1131 int err = 0;
1132 GElf_Ehdr *ep;
1133
1134 if (obj_elf_valid(obj)) {
1135 pr_warn("elf: init internal error\n");
1136 return -LIBBPF_ERRNO__LIBELF;
1137 }
1138
1139 if (obj->efile.obj_buf_sz > 0) {
1140 /*
1141 * obj_buf should have been validated by
1142 * bpf_object__open_buffer().
1143 */
1144 obj->efile.elf = elf_memory((char *)obj->efile.obj_buf,
1145 obj->efile.obj_buf_sz);
1146 } else {
1147 obj->efile.fd = open(obj->path, O_RDONLY);
1148 if (obj->efile.fd < 0) {
1149 char errmsg[STRERR_BUFSIZE], *cp;
1150
1151 err = -errno;
1152 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
1153 pr_warn("elf: failed to open %s: %s\n", obj->path, cp);
1154 return err;
1155 }
1156
1157 obj->efile.elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL);
1158 }
1159
1160 if (!obj->efile.elf) {
1161 pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1));
1162 err = -LIBBPF_ERRNO__LIBELF;
1163 goto errout;
1164 }
1165
1166 if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
1167 pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1));
1168 err = -LIBBPF_ERRNO__FORMAT;
1169 goto errout;
1170 }
1171 ep = &obj->efile.ehdr;
1172
1173 if (elf_getshdrstrndx(obj->efile.elf, &obj->efile.shstrndx)) {
1174 pr_warn("elf: failed to get section names section index for %s: %s\n",
1175 obj->path, elf_errmsg(-1));
1176 err = -LIBBPF_ERRNO__FORMAT;
1177 goto errout;
1178 }
1179
1180 /* Elf is corrupted/truncated, avoid calling elf_strptr. */
1181 if (!elf_rawdata(elf_getscn(obj->efile.elf, obj->efile.shstrndx), NULL)) {
1182 pr_warn("elf: failed to get section names strings from %s: %s\n",
1183 obj->path, elf_errmsg(-1));
1184 return -LIBBPF_ERRNO__FORMAT;
1185 }
1186
1187 /* Old LLVM set e_machine to EM_NONE */
1188 if (ep->e_type != ET_REL ||
1189 (ep->e_machine && ep->e_machine != EM_BPF)) {
1190 pr_warn("elf: %s is not a valid eBPF object file\n", obj->path);
1191 err = -LIBBPF_ERRNO__FORMAT;
1192 goto errout;
1193 }
1194
1195 return 0;
1196errout:
1197 bpf_object__elf_finish(obj);
1198 return err;
1199}
1200
1201static int bpf_object__check_endianness(struct bpf_object *obj)
1202{
1203#if __BYTE_ORDER == __LITTLE_ENDIAN
1204 if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
1205 return 0;
1206#elif __BYTE_ORDER == __BIG_ENDIAN
1207 if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2MSB)
1208 return 0;
1209#else
1210# error "Unrecognized __BYTE_ORDER__"
1211#endif
1212 pr_warn("elf: endianness mismatch in %s.\n", obj->path);
1213 return -LIBBPF_ERRNO__ENDIAN;
1214}
1215
1216static int
1217bpf_object__init_license(struct bpf_object *obj, void *data, size_t size)
1218{
1219 memcpy(obj->license, data, min(size, sizeof(obj->license) - 1));
1220 pr_debug("license of %s is %s\n", obj->path, obj->license);
1221 return 0;
1222}
1223
1224static int
1225bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
1226{
1227 __u32 kver;
1228
1229 if (size != sizeof(kver)) {
1230 pr_warn("invalid kver section in %s\n", obj->path);
1231 return -LIBBPF_ERRNO__FORMAT;
1232 }
1233 memcpy(&kver, data, sizeof(kver));
1234 obj->kern_version = kver;
1235 pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version);
1236 return 0;
1237}
1238
1239static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
1240{
1241 if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
1242 type == BPF_MAP_TYPE_HASH_OF_MAPS)
1243 return true;
1244 return false;
1245}
1246
1247int bpf_object__section_size(const struct bpf_object *obj, const char *name,
1248 __u32 *size)
1249{
1250 int ret = -ENOENT;
1251
1252 *size = 0;
1253 if (!name) {
1254 return -EINVAL;
1255 } else if (!strcmp(name, DATA_SEC)) {
1256 if (obj->efile.data)
1257 *size = obj->efile.data->d_size;
1258 } else if (!strcmp(name, BSS_SEC)) {
1259 if (obj->efile.bss)
1260 *size = obj->efile.bss->d_size;
1261 } else if (!strcmp(name, RODATA_SEC)) {
1262 if (obj->efile.rodata)
1263 *size = obj->efile.rodata->d_size;
1264 } else if (!strcmp(name, STRUCT_OPS_SEC)) {
1265 if (obj->efile.st_ops_data)
1266 *size = obj->efile.st_ops_data->d_size;
1267 } else {
1268 Elf_Scn *scn = elf_sec_by_name(obj, name);
1269 Elf_Data *data = elf_sec_data(obj, scn);
1270
1271 if (data) {
1272 ret = 0; /* found it */
1273 *size = data->d_size;
1274 }
1275 }
1276
1277 return *size ? 0 : ret;
1278}
1279
1280int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
1281 __u32 *off)
1282{
1283 Elf_Data *symbols = obj->efile.symbols;
1284 const char *sname;
1285 size_t si;
1286
1287 if (!name || !off)
1288 return -EINVAL;
1289
1290 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym); si++) {
1291 GElf_Sym sym;
1292
1293 if (!gelf_getsym(symbols, si, &sym))
1294 continue;
1295 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
1296 GELF_ST_TYPE(sym.st_info) != STT_OBJECT)
1297 continue;
1298
1299 sname = elf_sym_str(obj, sym.st_name);
1300 if (!sname) {
1301 pr_warn("failed to get sym name string for var %s\n",
1302 name);
1303 return -EIO;
1304 }
1305 if (strcmp(name, sname) == 0) {
1306 *off = sym.st_value;
1307 return 0;
1308 }
1309 }
1310
1311 return -ENOENT;
1312}
1313
1314static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
1315{
1316 struct bpf_map *new_maps;
1317 size_t new_cap;
1318 int i;
1319
1320 if (obj->nr_maps < obj->maps_cap)
1321 return &obj->maps[obj->nr_maps++];
1322
1323 new_cap = max((size_t)4, obj->maps_cap * 3 / 2);
1324 new_maps = libbpf_reallocarray(obj->maps, new_cap, sizeof(*obj->maps));
1325 if (!new_maps) {
1326 pr_warn("alloc maps for object failed\n");
1327 return ERR_PTR(-ENOMEM);
1328 }
1329
1330 obj->maps_cap = new_cap;
1331 obj->maps = new_maps;
1332
1333 /* zero out new maps */
1334 memset(obj->maps + obj->nr_maps, 0,
1335 (obj->maps_cap - obj->nr_maps) * sizeof(*obj->maps));
1336 /*
1337 * fill all fd with -1 so won't close incorrect fd (fd=0 is stdin)
1338 * when failure (zclose won't close negative fd)).
1339 */
1340 for (i = obj->nr_maps; i < obj->maps_cap; i++) {
1341 obj->maps[i].fd = -1;
1342 obj->maps[i].inner_map_fd = -1;
1343 }
1344
1345 return &obj->maps[obj->nr_maps++];
1346}
1347
1348static size_t bpf_map_mmap_sz(const struct bpf_map *map)
1349{
1350 long page_sz = sysconf(_SC_PAGE_SIZE);
1351 size_t map_sz;
1352
1353 map_sz = (size_t)roundup(map->def.value_size, 8) * map->def.max_entries;
1354 map_sz = roundup(map_sz, page_sz);
1355 return map_sz;
1356}
1357
1358static char *internal_map_name(struct bpf_object *obj,
1359 enum libbpf_map_type type)
1360{
1361 char map_name[BPF_OBJ_NAME_LEN], *p;
1362 const char *sfx = libbpf_type_to_btf_name[type];
1363 int sfx_len = max((size_t)7, strlen(sfx));
1364 int pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1,
1365 strlen(obj->name));
1366
1367 snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name,
1368 sfx_len, libbpf_type_to_btf_name[type]);
1369
1370 /* sanitise map name to characters allowed by kernel */
1371 for (p = map_name; *p && p < map_name + sizeof(map_name); p++)
1372 if (!isalnum(*p) && *p != '_' && *p != '.')
1373 *p = '_';
1374
1375 return strdup(map_name);
1376}
1377
1378static int
1379bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
1380 int sec_idx, void *data, size_t data_sz)
1381{
1382 struct bpf_map_def *def;
1383 struct bpf_map *map;
1384 int err;
1385
1386 map = bpf_object__add_map(obj);
1387 if (IS_ERR(map))
1388 return PTR_ERR(map);
1389
1390 map->libbpf_type = type;
1391 map->sec_idx = sec_idx;
1392 map->sec_offset = 0;
1393 map->name = internal_map_name(obj, type);
1394 if (!map->name) {
1395 pr_warn("failed to alloc map name\n");
1396 return -ENOMEM;
1397 }
1398
1399 def = &map->def;
1400 def->type = BPF_MAP_TYPE_ARRAY;
1401 def->key_size = sizeof(int);
1402 def->value_size = data_sz;
1403 def->max_entries = 1;
1404 def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG
1405 ? BPF_F_RDONLY_PROG : 0;
1406 def->map_flags |= BPF_F_MMAPABLE;
1407
1408 pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n",
1409 map->name, map->sec_idx, map->sec_offset, def->map_flags);
1410
1411 map->mmaped = mmap(NULL, bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE,
1412 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1413 if (map->mmaped == MAP_FAILED) {
1414 err = -errno;
1415 map->mmaped = NULL;
1416 pr_warn("failed to alloc map '%s' content buffer: %d\n",
1417 map->name, err);
1418 zfree(&map->name);
1419 return err;
1420 }
1421
1422 if (data)
1423 memcpy(map->mmaped, data, data_sz);
1424
1425 pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name);
1426 return 0;
1427}
1428
1429static int bpf_object__init_global_data_maps(struct bpf_object *obj)
1430{
1431 int err;
1432
1433 /*
1434 * Populate obj->maps with libbpf internal maps.
1435 */
1436 if (obj->efile.data_shndx >= 0) {
1437 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA,
1438 obj->efile.data_shndx,
1439 obj->efile.data->d_buf,
1440 obj->efile.data->d_size);
1441 if (err)
1442 return err;
1443 }
1444 if (obj->efile.rodata_shndx >= 0) {
1445 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA,
1446 obj->efile.rodata_shndx,
1447 obj->efile.rodata->d_buf,
1448 obj->efile.rodata->d_size);
1449 if (err)
1450 return err;
1451
1452 obj->rodata_map_idx = obj->nr_maps - 1;
1453 }
1454 if (obj->efile.bss_shndx >= 0) {
1455 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
1456 obj->efile.bss_shndx,
1457 NULL,
1458 obj->efile.bss->d_size);
1459 if (err)
1460 return err;
1461 }
1462 return 0;
1463}
1464
1465
1466static struct extern_desc *find_extern_by_name(const struct bpf_object *obj,
1467 const void *name)
1468{
1469 int i;
1470
1471 for (i = 0; i < obj->nr_extern; i++) {
1472 if (strcmp(obj->externs[i].name, name) == 0)
1473 return &obj->externs[i];
1474 }
1475 return NULL;
1476}
1477
1478static int set_kcfg_value_tri(struct extern_desc *ext, void *ext_val,
1479 char value)
1480{
1481 switch (ext->kcfg.type) {
1482 case KCFG_BOOL:
1483 if (value == 'm') {
1484 pr_warn("extern (kcfg) %s=%c should be tristate or char\n",
1485 ext->name, value);
1486 return -EINVAL;
1487 }
1488 *(bool *)ext_val = value == 'y' ? true : false;
1489 break;
1490 case KCFG_TRISTATE:
1491 if (value == 'y')
1492 *(enum libbpf_tristate *)ext_val = TRI_YES;
1493 else if (value == 'm')
1494 *(enum libbpf_tristate *)ext_val = TRI_MODULE;
1495 else /* value == 'n' */
1496 *(enum libbpf_tristate *)ext_val = TRI_NO;
1497 break;
1498 case KCFG_CHAR:
1499 *(char *)ext_val = value;
1500 break;
1501 case KCFG_UNKNOWN:
1502 case KCFG_INT:
1503 case KCFG_CHAR_ARR:
1504 default:
1505 pr_warn("extern (kcfg) %s=%c should be bool, tristate, or char\n",
1506 ext->name, value);
1507 return -EINVAL;
1508 }
1509 ext->is_set = true;
1510 return 0;
1511}
1512
1513static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val,
1514 const char *value)
1515{
1516 size_t len;
1517
1518 if (ext->kcfg.type != KCFG_CHAR_ARR) {
1519 pr_warn("extern (kcfg) %s=%s should be char array\n", ext->name, value);
1520 return -EINVAL;
1521 }
1522
1523 len = strlen(value);
1524 if (value[len - 1] != '"') {
1525 pr_warn("extern (kcfg) '%s': invalid string config '%s'\n",
1526 ext->name, value);
1527 return -EINVAL;
1528 }
1529
1530 /* strip quotes */
1531 len -= 2;
1532 if (len >= ext->kcfg.sz) {
1533 pr_warn("extern (kcfg) '%s': long string config %s of (%zu bytes) truncated to %d bytes\n",
1534 ext->name, value, len, ext->kcfg.sz - 1);
1535 len = ext->kcfg.sz - 1;
1536 }
1537 memcpy(ext_val, value + 1, len);
1538 ext_val[len] = '\0';
1539 ext->is_set = true;
1540 return 0;
1541}
1542
1543static int parse_u64(const char *value, __u64 *res)
1544{
1545 char *value_end;
1546 int err;
1547
1548 errno = 0;
1549 *res = strtoull(value, &value_end, 0);
1550 if (errno) {
1551 err = -errno;
1552 pr_warn("failed to parse '%s' as integer: %d\n", value, err);
1553 return err;
1554 }
1555 if (*value_end) {
1556 pr_warn("failed to parse '%s' as integer completely\n", value);
1557 return -EINVAL;
1558 }
1559 return 0;
1560}
1561
1562static bool is_kcfg_value_in_range(const struct extern_desc *ext, __u64 v)
1563{
1564 int bit_sz = ext->kcfg.sz * 8;
1565
1566 if (ext->kcfg.sz == 8)
1567 return true;
1568
1569 /* Validate that value stored in u64 fits in integer of `ext->sz`
1570 * bytes size without any loss of information. If the target integer
1571 * is signed, we rely on the following limits of integer type of
1572 * Y bits and subsequent transformation:
1573 *
1574 * -2^(Y-1) <= X <= 2^(Y-1) - 1
1575 * 0 <= X + 2^(Y-1) <= 2^Y - 1
1576 * 0 <= X + 2^(Y-1) < 2^Y
1577 *
1578 * For unsigned target integer, check that all the (64 - Y) bits are
1579 * zero.
1580 */
1581 if (ext->kcfg.is_signed)
1582 return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz);
1583 else
1584 return (v >> bit_sz) == 0;
1585}
1586
1587static int set_kcfg_value_num(struct extern_desc *ext, void *ext_val,
1588 __u64 value)
1589{
1590 if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR) {
1591 pr_warn("extern (kcfg) %s=%llu should be integer\n",
1592 ext->name, (unsigned long long)value);
1593 return -EINVAL;
1594 }
1595 if (!is_kcfg_value_in_range(ext, value)) {
1596 pr_warn("extern (kcfg) %s=%llu value doesn't fit in %d bytes\n",
1597 ext->name, (unsigned long long)value, ext->kcfg.sz);
1598 return -ERANGE;
1599 }
1600 switch (ext->kcfg.sz) {
1601 case 1: *(__u8 *)ext_val = value; break;
1602 case 2: *(__u16 *)ext_val = value; break;
1603 case 4: *(__u32 *)ext_val = value; break;
1604 case 8: *(__u64 *)ext_val = value; break;
1605 default:
1606 return -EINVAL;
1607 }
1608 ext->is_set = true;
1609 return 0;
1610}
1611
1612static int bpf_object__process_kconfig_line(struct bpf_object *obj,
1613 char *buf, void *data)
1614{
1615 struct extern_desc *ext;
1616 char *sep, *value;
1617 int len, err = 0;
1618 void *ext_val;
1619 __u64 num;
1620
1621 if (strncmp(buf, "CONFIG_", 7))
1622 return 0;
1623
1624 sep = strchr(buf, '=');
1625 if (!sep) {
1626 pr_warn("failed to parse '%s': no separator\n", buf);
1627 return -EINVAL;
1628 }
1629
1630 /* Trim ending '\n' */
1631 len = strlen(buf);
1632 if (buf[len - 1] == '\n')
1633 buf[len - 1] = '\0';
1634 /* Split on '=' and ensure that a value is present. */
1635 *sep = '\0';
1636 if (!sep[1]) {
1637 *sep = '=';
1638 pr_warn("failed to parse '%s': no value\n", buf);
1639 return -EINVAL;
1640 }
1641
1642 ext = find_extern_by_name(obj, buf);
1643 if (!ext || ext->is_set)
1644 return 0;
1645
1646 ext_val = data + ext->kcfg.data_off;
1647 value = sep + 1;
1648
1649 switch (*value) {
1650 case 'y': case 'n': case 'm':
1651 err = set_kcfg_value_tri(ext, ext_val, *value);
1652 break;
1653 case '"':
1654 err = set_kcfg_value_str(ext, ext_val, value);
1655 break;
1656 default:
1657 /* assume integer */
1658 err = parse_u64(value, &num);
1659 if (err) {
1660 pr_warn("extern (kcfg) %s=%s should be integer\n",
1661 ext->name, value);
1662 return err;
1663 }
1664 err = set_kcfg_value_num(ext, ext_val, num);
1665 break;
1666 }
1667 if (err)
1668 return err;
1669 pr_debug("extern (kcfg) %s=%s\n", ext->name, value);
1670 return 0;
1671}
1672
1673static int bpf_object__read_kconfig_file(struct bpf_object *obj, void *data)
1674{
1675 char buf[PATH_MAX];
1676 struct utsname uts;
1677 int len, err = 0;
1678 gzFile file;
1679
1680 uname(&uts);
1681 len = snprintf(buf, PATH_MAX, "/boot/config-%s", uts.release);
1682 if (len < 0)
1683 return -EINVAL;
1684 else if (len >= PATH_MAX)
1685 return -ENAMETOOLONG;
1686
1687 /* gzopen also accepts uncompressed files. */
1688 file = gzopen(buf, "r");
1689 if (!file)
1690 file = gzopen("/proc/config.gz", "r");
1691
1692 if (!file) {
1693 pr_warn("failed to open system Kconfig\n");
1694 return -ENOENT;
1695 }
1696
1697 while (gzgets(file, buf, sizeof(buf))) {
1698 err = bpf_object__process_kconfig_line(obj, buf, data);
1699 if (err) {
1700 pr_warn("error parsing system Kconfig line '%s': %d\n",
1701 buf, err);
1702 goto out;
1703 }
1704 }
1705
1706out:
1707 gzclose(file);
1708 return err;
1709}
1710
1711static int bpf_object__read_kconfig_mem(struct bpf_object *obj,
1712 const char *config, void *data)
1713{
1714 char buf[PATH_MAX];
1715 int err = 0;
1716 FILE *file;
1717
1718 file = fmemopen((void *)config, strlen(config), "r");
1719 if (!file) {
1720 err = -errno;
1721 pr_warn("failed to open in-memory Kconfig: %d\n", err);
1722 return err;
1723 }
1724
1725 while (fgets(buf, sizeof(buf), file)) {
1726 err = bpf_object__process_kconfig_line(obj, buf, data);
1727 if (err) {
1728 pr_warn("error parsing in-memory Kconfig line '%s': %d\n",
1729 buf, err);
1730 break;
1731 }
1732 }
1733
1734 fclose(file);
1735 return err;
1736}
1737
1738static int bpf_object__init_kconfig_map(struct bpf_object *obj)
1739{
1740 struct extern_desc *last_ext = NULL, *ext;
1741 size_t map_sz;
1742 int i, err;
1743
1744 for (i = 0; i < obj->nr_extern; i++) {
1745 ext = &obj->externs[i];
1746 if (ext->type == EXT_KCFG)
1747 last_ext = ext;
1748 }
1749
1750 if (!last_ext)
1751 return 0;
1752
1753 map_sz = last_ext->kcfg.data_off + last_ext->kcfg.sz;
1754 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_KCONFIG,
1755 obj->efile.symbols_shndx,
1756 NULL, map_sz);
1757 if (err)
1758 return err;
1759
1760 obj->kconfig_map_idx = obj->nr_maps - 1;
1761
1762 return 0;
1763}
1764
1765static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
1766{
1767 Elf_Data *symbols = obj->efile.symbols;
1768 int i, map_def_sz = 0, nr_maps = 0, nr_syms;
1769 Elf_Data *data = NULL;
1770 Elf_Scn *scn;
1771
1772 if (obj->efile.maps_shndx < 0)
1773 return 0;
1774
1775 if (!symbols)
1776 return -EINVAL;
1777
1778
1779 scn = elf_sec_by_idx(obj, obj->efile.maps_shndx);
1780 data = elf_sec_data(obj, scn);
1781 if (!scn || !data) {
1782 pr_warn("elf: failed to get legacy map definitions for %s\n",
1783 obj->path);
1784 return -EINVAL;
1785 }
1786
1787 /*
1788 * Count number of maps. Each map has a name.
1789 * Array of maps is not supported: only the first element is
1790 * considered.
1791 *
1792 * TODO: Detect array of map and report error.
1793 */
1794 nr_syms = symbols->d_size / sizeof(GElf_Sym);
1795 for (i = 0; i < nr_syms; i++) {
1796 GElf_Sym sym;
1797
1798 if (!gelf_getsym(symbols, i, &sym))
1799 continue;
1800 if (sym.st_shndx != obj->efile.maps_shndx)
1801 continue;
1802 nr_maps++;
1803 }
1804 /* Assume equally sized map definitions */
1805 pr_debug("elf: found %d legacy map definitions (%zd bytes) in %s\n",
1806 nr_maps, data->d_size, obj->path);
1807
1808 if (!data->d_size || nr_maps == 0 || (data->d_size % nr_maps) != 0) {
1809 pr_warn("elf: unable to determine legacy map definition size in %s\n",
1810 obj->path);
1811 return -EINVAL;
1812 }
1813 map_def_sz = data->d_size / nr_maps;
1814
1815 /* Fill obj->maps using data in "maps" section. */
1816 for (i = 0; i < nr_syms; i++) {
1817 GElf_Sym sym;
1818 const char *map_name;
1819 struct bpf_map_def *def;
1820 struct bpf_map *map;
1821
1822 if (!gelf_getsym(symbols, i, &sym))
1823 continue;
1824 if (sym.st_shndx != obj->efile.maps_shndx)
1825 continue;
1826
1827 map = bpf_object__add_map(obj);
1828 if (IS_ERR(map))
1829 return PTR_ERR(map);
1830
1831 map_name = elf_sym_str(obj, sym.st_name);
1832 if (!map_name) {
1833 pr_warn("failed to get map #%d name sym string for obj %s\n",
1834 i, obj->path);
1835 return -LIBBPF_ERRNO__FORMAT;
1836 }
1837
1838 map->libbpf_type = LIBBPF_MAP_UNSPEC;
1839 map->sec_idx = sym.st_shndx;
1840 map->sec_offset = sym.st_value;
1841 pr_debug("map '%s' (legacy): at sec_idx %d, offset %zu.\n",
1842 map_name, map->sec_idx, map->sec_offset);
1843 if (sym.st_value + map_def_sz > data->d_size) {
1844 pr_warn("corrupted maps section in %s: last map \"%s\" too small\n",
1845 obj->path, map_name);
1846 return -EINVAL;
1847 }
1848
1849 map->name = strdup(map_name);
1850 if (!map->name) {
1851 pr_warn("failed to alloc map name\n");
1852 return -ENOMEM;
1853 }
1854 pr_debug("map %d is \"%s\"\n", i, map->name);
1855 def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
1856 /*
1857 * If the definition of the map in the object file fits in
1858 * bpf_map_def, copy it. Any extra fields in our version
1859 * of bpf_map_def will default to zero as a result of the
1860 * calloc above.
1861 */
1862 if (map_def_sz <= sizeof(struct bpf_map_def)) {
1863 memcpy(&map->def, def, map_def_sz);
1864 } else {
1865 /*
1866 * Here the map structure being read is bigger than what
1867 * we expect, truncate if the excess bits are all zero.
1868 * If they are not zero, reject this map as
1869 * incompatible.
1870 */
1871 char *b;
1872
1873 for (b = ((char *)def) + sizeof(struct bpf_map_def);
1874 b < ((char *)def) + map_def_sz; b++) {
1875 if (*b != 0) {
1876 pr_warn("maps section in %s: \"%s\" has unrecognized, non-zero options\n",
1877 obj->path, map_name);
1878 if (strict)
1879 return -EINVAL;
1880 }
1881 }
1882 memcpy(&map->def, def, sizeof(struct bpf_map_def));
1883 }
1884 }
1885 return 0;
1886}
1887
1888static const struct btf_type *
1889skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id)
1890{
1891 const struct btf_type *t = btf__type_by_id(btf, id);
1892
1893 if (res_id)
1894 *res_id = id;
1895
1896 while (btf_is_mod(t) || btf_is_typedef(t)) {
1897 if (res_id)
1898 *res_id = t->type;
1899 t = btf__type_by_id(btf, t->type);
1900 }
1901
1902 return t;
1903}
1904
1905static const struct btf_type *
1906resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id)
1907{
1908 const struct btf_type *t;
1909
1910 t = skip_mods_and_typedefs(btf, id, NULL);
1911 if (!btf_is_ptr(t))
1912 return NULL;
1913
1914 t = skip_mods_and_typedefs(btf, t->type, res_id);
1915
1916 return btf_is_func_proto(t) ? t : NULL;
1917}
1918
1919static const char *btf_kind_str(const struct btf_type *t)
1920{
1921 switch (btf_kind(t)) {
1922 case BTF_KIND_UNKN: return "void";
1923 case BTF_KIND_INT: return "int";
1924 case BTF_KIND_PTR: return "ptr";
1925 case BTF_KIND_ARRAY: return "array";
1926 case BTF_KIND_STRUCT: return "struct";
1927 case BTF_KIND_UNION: return "union";
1928 case BTF_KIND_ENUM: return "enum";
1929 case BTF_KIND_FWD: return "fwd";
1930 case BTF_KIND_TYPEDEF: return "typedef";
1931 case BTF_KIND_VOLATILE: return "volatile";
1932 case BTF_KIND_CONST: return "const";
1933 case BTF_KIND_RESTRICT: return "restrict";
1934 case BTF_KIND_FUNC: return "func";
1935 case BTF_KIND_FUNC_PROTO: return "func_proto";
1936 case BTF_KIND_VAR: return "var";
1937 case BTF_KIND_DATASEC: return "datasec";
1938 default: return "unknown";
1939 }
1940}
1941
1942/*
1943 * Fetch integer attribute of BTF map definition. Such attributes are
1944 * represented using a pointer to an array, in which dimensionality of array
1945 * encodes specified integer value. E.g., int (*type)[BPF_MAP_TYPE_ARRAY];
1946 * encodes `type => BPF_MAP_TYPE_ARRAY` key/value pair completely using BTF
1947 * type definition, while using only sizeof(void *) space in ELF data section.
1948 */
1949static bool get_map_field_int(const char *map_name, const struct btf *btf,
1950 const struct btf_member *m, __u32 *res)
1951{
1952 const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
1953 const char *name = btf__name_by_offset(btf, m->name_off);
1954 const struct btf_array *arr_info;
1955 const struct btf_type *arr_t;
1956
1957 if (!btf_is_ptr(t)) {
1958 pr_warn("map '%s': attr '%s': expected PTR, got %s.\n",
1959 map_name, name, btf_kind_str(t));
1960 return false;
1961 }
1962
1963 arr_t = btf__type_by_id(btf, t->type);
1964 if (!arr_t) {
1965 pr_warn("map '%s': attr '%s': type [%u] not found.\n",
1966 map_name, name, t->type);
1967 return false;
1968 }
1969 if (!btf_is_array(arr_t)) {
1970 pr_warn("map '%s': attr '%s': expected ARRAY, got %s.\n",
1971 map_name, name, btf_kind_str(arr_t));
1972 return false;
1973 }
1974 arr_info = btf_array(arr_t);
1975 *res = arr_info->nelems;
1976 return true;
1977}
1978
1979static int build_map_pin_path(struct bpf_map *map, const char *path)
1980{
1981 char buf[PATH_MAX];
1982 int len;
1983
1984 if (!path)
1985 path = "/sys/fs/bpf";
1986
1987 len = snprintf(buf, PATH_MAX, "%s/%s", path, bpf_map__name(map));
1988 if (len < 0)
1989 return -EINVAL;
1990 else if (len >= PATH_MAX)
1991 return -ENAMETOOLONG;
1992
1993 return bpf_map__set_pin_path(map, buf);
1994}
1995
1996
1997static int parse_btf_map_def(struct bpf_object *obj,
1998 struct bpf_map *map,
1999 const struct btf_type *def,
2000 bool strict, bool is_inner,
2001 const char *pin_root_path)
2002{
2003 const struct btf_type *t;
2004 const struct btf_member *m;
2005 int vlen, i;
2006
2007 vlen = btf_vlen(def);
2008 m = btf_members(def);
2009 for (i = 0; i < vlen; i++, m++) {
2010 const char *name = btf__name_by_offset(obj->btf, m->name_off);
2011
2012 if (!name) {
2013 pr_warn("map '%s': invalid field #%d.\n", map->name, i);
2014 return -EINVAL;
2015 }
2016 if (strcmp(name, "type") == 0) {
2017 if (!get_map_field_int(map->name, obj->btf, m,
2018 &map->def.type))
2019 return -EINVAL;
2020 pr_debug("map '%s': found type = %u.\n",
2021 map->name, map->def.type);
2022 } else if (strcmp(name, "max_entries") == 0) {
2023 if (!get_map_field_int(map->name, obj->btf, m,
2024 &map->def.max_entries))
2025 return -EINVAL;
2026 pr_debug("map '%s': found max_entries = %u.\n",
2027 map->name, map->def.max_entries);
2028 } else if (strcmp(name, "map_flags") == 0) {
2029 if (!get_map_field_int(map->name, obj->btf, m,
2030 &map->def.map_flags))
2031 return -EINVAL;
2032 pr_debug("map '%s': found map_flags = %u.\n",
2033 map->name, map->def.map_flags);
2034 } else if (strcmp(name, "numa_node") == 0) {
2035 if (!get_map_field_int(map->name, obj->btf, m, &map->numa_node))
2036 return -EINVAL;
2037 pr_debug("map '%s': found numa_node = %u.\n", map->name, map->numa_node);
2038 } else if (strcmp(name, "key_size") == 0) {
2039 __u32 sz;
2040
2041 if (!get_map_field_int(map->name, obj->btf, m, &sz))
2042 return -EINVAL;
2043 pr_debug("map '%s': found key_size = %u.\n",
2044 map->name, sz);
2045 if (map->def.key_size && map->def.key_size != sz) {
2046 pr_warn("map '%s': conflicting key size %u != %u.\n",
2047 map->name, map->def.key_size, sz);
2048 return -EINVAL;
2049 }
2050 map->def.key_size = sz;
2051 } else if (strcmp(name, "key") == 0) {
2052 __s64 sz;
2053
2054 t = btf__type_by_id(obj->btf, m->type);
2055 if (!t) {
2056 pr_warn("map '%s': key type [%d] not found.\n",
2057 map->name, m->type);
2058 return -EINVAL;
2059 }
2060 if (!btf_is_ptr(t)) {
2061 pr_warn("map '%s': key spec is not PTR: %s.\n",
2062 map->name, btf_kind_str(t));
2063 return -EINVAL;
2064 }
2065 sz = btf__resolve_size(obj->btf, t->type);
2066 if (sz < 0) {
2067 pr_warn("map '%s': can't determine key size for type [%u]: %zd.\n",
2068 map->name, t->type, (ssize_t)sz);
2069 return sz;
2070 }
2071 pr_debug("map '%s': found key [%u], sz = %zd.\n",
2072 map->name, t->type, (ssize_t)sz);
2073 if (map->def.key_size && map->def.key_size != sz) {
2074 pr_warn("map '%s': conflicting key size %u != %zd.\n",
2075 map->name, map->def.key_size, (ssize_t)sz);
2076 return -EINVAL;
2077 }
2078 map->def.key_size = sz;
2079 map->btf_key_type_id = t->type;
2080 } else if (strcmp(name, "value_size") == 0) {
2081 __u32 sz;
2082
2083 if (!get_map_field_int(map->name, obj->btf, m, &sz))
2084 return -EINVAL;
2085 pr_debug("map '%s': found value_size = %u.\n",
2086 map->name, sz);
2087 if (map->def.value_size && map->def.value_size != sz) {
2088 pr_warn("map '%s': conflicting value size %u != %u.\n",
2089 map->name, map->def.value_size, sz);
2090 return -EINVAL;
2091 }
2092 map->def.value_size = sz;
2093 } else if (strcmp(name, "value") == 0) {
2094 __s64 sz;
2095
2096 t = btf__type_by_id(obj->btf, m->type);
2097 if (!t) {
2098 pr_warn("map '%s': value type [%d] not found.\n",
2099 map->name, m->type);
2100 return -EINVAL;
2101 }
2102 if (!btf_is_ptr(t)) {
2103 pr_warn("map '%s': value spec is not PTR: %s.\n",
2104 map->name, btf_kind_str(t));
2105 return -EINVAL;
2106 }
2107 sz = btf__resolve_size(obj->btf, t->type);
2108 if (sz < 0) {
2109 pr_warn("map '%s': can't determine value size for type [%u]: %zd.\n",
2110 map->name, t->type, (ssize_t)sz);
2111 return sz;
2112 }
2113 pr_debug("map '%s': found value [%u], sz = %zd.\n",
2114 map->name, t->type, (ssize_t)sz);
2115 if (map->def.value_size && map->def.value_size != sz) {
2116 pr_warn("map '%s': conflicting value size %u != %zd.\n",
2117 map->name, map->def.value_size, (ssize_t)sz);
2118 return -EINVAL;
2119 }
2120 map->def.value_size = sz;
2121 map->btf_value_type_id = t->type;
2122 }
2123 else if (strcmp(name, "values") == 0) {
2124 int err;
2125
2126 if (is_inner) {
2127 pr_warn("map '%s': multi-level inner maps not supported.\n",
2128 map->name);
2129 return -ENOTSUP;
2130 }
2131 if (i != vlen - 1) {
2132 pr_warn("map '%s': '%s' member should be last.\n",
2133 map->name, name);
2134 return -EINVAL;
2135 }
2136 if (!bpf_map_type__is_map_in_map(map->def.type)) {
2137 pr_warn("map '%s': should be map-in-map.\n",
2138 map->name);
2139 return -ENOTSUP;
2140 }
2141 if (map->def.value_size && map->def.value_size != 4) {
2142 pr_warn("map '%s': conflicting value size %u != 4.\n",
2143 map->name, map->def.value_size);
2144 return -EINVAL;
2145 }
2146 map->def.value_size = 4;
2147 t = btf__type_by_id(obj->btf, m->type);
2148 if (!t) {
2149 pr_warn("map '%s': map-in-map inner type [%d] not found.\n",
2150 map->name, m->type);
2151 return -EINVAL;
2152 }
2153 if (!btf_is_array(t) || btf_array(t)->nelems) {
2154 pr_warn("map '%s': map-in-map inner spec is not a zero-sized array.\n",
2155 map->name);
2156 return -EINVAL;
2157 }
2158 t = skip_mods_and_typedefs(obj->btf, btf_array(t)->type,
2159 NULL);
2160 if (!btf_is_ptr(t)) {
2161 pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n",
2162 map->name, btf_kind_str(t));
2163 return -EINVAL;
2164 }
2165 t = skip_mods_and_typedefs(obj->btf, t->type, NULL);
2166 if (!btf_is_struct(t)) {
2167 pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n",
2168 map->name, btf_kind_str(t));
2169 return -EINVAL;
2170 }
2171
2172 map->inner_map = calloc(1, sizeof(*map->inner_map));
2173 if (!map->inner_map)
2174 return -ENOMEM;
2175 map->inner_map->sec_idx = obj->efile.btf_maps_shndx;
2176 map->inner_map->name = malloc(strlen(map->name) +
2177 sizeof(".inner") + 1);
2178 if (!map->inner_map->name)
2179 return -ENOMEM;
2180 sprintf(map->inner_map->name, "%s.inner", map->name);
2181
2182 err = parse_btf_map_def(obj, map->inner_map, t, strict,
2183 true /* is_inner */, NULL);
2184 if (err)
2185 return err;
2186 } else if (strcmp(name, "pinning") == 0) {
2187 __u32 val;
2188 int err;
2189
2190 if (is_inner) {
2191 pr_debug("map '%s': inner def can't be pinned.\n",
2192 map->name);
2193 return -EINVAL;
2194 }
2195 if (!get_map_field_int(map->name, obj->btf, m, &val))
2196 return -EINVAL;
2197 pr_debug("map '%s': found pinning = %u.\n",
2198 map->name, val);
2199
2200 if (val != LIBBPF_PIN_NONE &&
2201 val != LIBBPF_PIN_BY_NAME) {
2202 pr_warn("map '%s': invalid pinning value %u.\n",
2203 map->name, val);
2204 return -EINVAL;
2205 }
2206 if (val == LIBBPF_PIN_BY_NAME) {
2207 err = build_map_pin_path(map, pin_root_path);
2208 if (err) {
2209 pr_warn("map '%s': couldn't build pin path.\n",
2210 map->name);
2211 return err;
2212 }
2213 }
2214 } else {
2215 if (strict) {
2216 pr_warn("map '%s': unknown field '%s'.\n",
2217 map->name, name);
2218 return -ENOTSUP;
2219 }
2220 pr_debug("map '%s': ignoring unknown field '%s'.\n",
2221 map->name, name);
2222 }
2223 }
2224
2225 if (map->def.type == BPF_MAP_TYPE_UNSPEC) {
2226 pr_warn("map '%s': map type isn't specified.\n", map->name);
2227 return -EINVAL;
2228 }
2229
2230 return 0;
2231}
2232
2233static int bpf_object__init_user_btf_map(struct bpf_object *obj,
2234 const struct btf_type *sec,
2235 int var_idx, int sec_idx,
2236 const Elf_Data *data, bool strict,
2237 const char *pin_root_path)
2238{
2239 const struct btf_type *var, *def;
2240 const struct btf_var_secinfo *vi;
2241 const struct btf_var *var_extra;
2242 const char *map_name;
2243 struct bpf_map *map;
2244
2245 vi = btf_var_secinfos(sec) + var_idx;
2246 var = btf__type_by_id(obj->btf, vi->type);
2247 var_extra = btf_var(var);
2248 map_name = btf__name_by_offset(obj->btf, var->name_off);
2249
2250 if (map_name == NULL || map_name[0] == '\0') {
2251 pr_warn("map #%d: empty name.\n", var_idx);
2252 return -EINVAL;
2253 }
2254 if ((__u64)vi->offset + vi->size > data->d_size) {
2255 pr_warn("map '%s' BTF data is corrupted.\n", map_name);
2256 return -EINVAL;
2257 }
2258 if (!btf_is_var(var)) {
2259 pr_warn("map '%s': unexpected var kind %s.\n",
2260 map_name, btf_kind_str(var));
2261 return -EINVAL;
2262 }
2263 if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED &&
2264 var_extra->linkage != BTF_VAR_STATIC) {
2265 pr_warn("map '%s': unsupported var linkage %u.\n",
2266 map_name, var_extra->linkage);
2267 return -EOPNOTSUPP;
2268 }
2269
2270 def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
2271 if (!btf_is_struct(def)) {
2272 pr_warn("map '%s': unexpected def kind %s.\n",
2273 map_name, btf_kind_str(var));
2274 return -EINVAL;
2275 }
2276 if (def->size > vi->size) {
2277 pr_warn("map '%s': invalid def size.\n", map_name);
2278 return -EINVAL;
2279 }
2280
2281 map = bpf_object__add_map(obj);
2282 if (IS_ERR(map))
2283 return PTR_ERR(map);
2284 map->name = strdup(map_name);
2285 if (!map->name) {
2286 pr_warn("map '%s': failed to alloc map name.\n", map_name);
2287 return -ENOMEM;
2288 }
2289 map->libbpf_type = LIBBPF_MAP_UNSPEC;
2290 map->def.type = BPF_MAP_TYPE_UNSPEC;
2291 map->sec_idx = sec_idx;
2292 map->sec_offset = vi->offset;
2293 map->btf_var_idx = var_idx;
2294 pr_debug("map '%s': at sec_idx %d, offset %zu.\n",
2295 map_name, map->sec_idx, map->sec_offset);
2296
2297 return parse_btf_map_def(obj, map, def, strict, false, pin_root_path);
2298}
2299
2300static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
2301 const char *pin_root_path)
2302{
2303 const struct btf_type *sec = NULL;
2304 int nr_types, i, vlen, err;
2305 const struct btf_type *t;
2306 const char *name;
2307 Elf_Data *data;
2308 Elf_Scn *scn;
2309
2310 if (obj->efile.btf_maps_shndx < 0)
2311 return 0;
2312
2313 scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx);
2314 data = elf_sec_data(obj, scn);
2315 if (!scn || !data) {
2316 pr_warn("elf: failed to get %s map definitions for %s\n",
2317 MAPS_ELF_SEC, obj->path);
2318 return -EINVAL;
2319 }
2320
2321 nr_types = btf__get_nr_types(obj->btf);
2322 for (i = 1; i <= nr_types; i++) {
2323 t = btf__type_by_id(obj->btf, i);
2324 if (!btf_is_datasec(t))
2325 continue;
2326 name = btf__name_by_offset(obj->btf, t->name_off);
2327 if (strcmp(name, MAPS_ELF_SEC) == 0) {
2328 sec = t;
2329 obj->efile.btf_maps_sec_btf_id = i;
2330 break;
2331 }
2332 }
2333
2334 if (!sec) {
2335 pr_warn("DATASEC '%s' not found.\n", MAPS_ELF_SEC);
2336 return -ENOENT;
2337 }
2338
2339 vlen = btf_vlen(sec);
2340 for (i = 0; i < vlen; i++) {
2341 err = bpf_object__init_user_btf_map(obj, sec, i,
2342 obj->efile.btf_maps_shndx,
2343 data, strict,
2344 pin_root_path);
2345 if (err)
2346 return err;
2347 }
2348
2349 return 0;
2350}
2351
2352static int bpf_object__init_maps(struct bpf_object *obj,
2353 const struct bpf_object_open_opts *opts)
2354{
2355 const char *pin_root_path;
2356 bool strict;
2357 int err;
2358
2359 strict = !OPTS_GET(opts, relaxed_maps, false);
2360 pin_root_path = OPTS_GET(opts, pin_root_path, NULL);
2361
2362 err = bpf_object__init_user_maps(obj, strict);
2363 err = err ?: bpf_object__init_user_btf_maps(obj, strict, pin_root_path);
2364 err = err ?: bpf_object__init_global_data_maps(obj);
2365 err = err ?: bpf_object__init_kconfig_map(obj);
2366 err = err ?: bpf_object__init_struct_ops_maps(obj);
2367 if (err)
2368 return err;
2369
2370 return 0;
2371}
2372
2373static bool section_have_execinstr(struct bpf_object *obj, int idx)
2374{
2375 GElf_Shdr sh;
2376
2377 if (elf_sec_hdr(obj, elf_sec_by_idx(obj, idx), &sh))
2378 return false;
2379
2380 return sh.sh_flags & SHF_EXECINSTR;
2381}
2382
2383static bool btf_needs_sanitization(struct bpf_object *obj)
2384{
2385 bool has_func_global = kernel_supports(FEAT_BTF_GLOBAL_FUNC);
2386 bool has_datasec = kernel_supports(FEAT_BTF_DATASEC);
2387 bool has_func = kernel_supports(FEAT_BTF_FUNC);
2388
2389 return !has_func || !has_datasec || !has_func_global;
2390}
2391
2392static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
2393{
2394 bool has_func_global = kernel_supports(FEAT_BTF_GLOBAL_FUNC);
2395 bool has_datasec = kernel_supports(FEAT_BTF_DATASEC);
2396 bool has_func = kernel_supports(FEAT_BTF_FUNC);
2397 struct btf_type *t;
2398 int i, j, vlen;
2399
2400 for (i = 1; i <= btf__get_nr_types(btf); i++) {
2401 t = (struct btf_type *)btf__type_by_id(btf, i);
2402
2403 if (!has_datasec && btf_is_var(t)) {
2404 /* replace VAR with INT */
2405 t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
2406 /*
2407 * using size = 1 is the safest choice, 4 will be too
2408 * big and cause kernel BTF validation failure if
2409 * original variable took less than 4 bytes
2410 */
2411 t->size = 1;
2412 *(int *)(t + 1) = BTF_INT_ENC(0, 0, 8);
2413 } else if (!has_datasec && btf_is_datasec(t)) {
2414 /* replace DATASEC with STRUCT */
2415 const struct btf_var_secinfo *v = btf_var_secinfos(t);
2416 struct btf_member *m = btf_members(t);
2417 struct btf_type *vt;
2418 char *name;
2419
2420 name = (char *)btf__name_by_offset(btf, t->name_off);
2421 while (*name) {
2422 if (*name == '.')
2423 *name = '_';
2424 name++;
2425 }
2426
2427 vlen = btf_vlen(t);
2428 t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen);
2429 for (j = 0; j < vlen; j++, v++, m++) {
2430 /* order of field assignments is important */
2431 m->offset = v->offset * 8;
2432 m->type = v->type;
2433 /* preserve variable name as member name */
2434 vt = (void *)btf__type_by_id(btf, v->type);
2435 m->name_off = vt->name_off;
2436 }
2437 } else if (!has_func && btf_is_func_proto(t)) {
2438 /* replace FUNC_PROTO with ENUM */
2439 vlen = btf_vlen(t);
2440 t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen);
2441 t->size = sizeof(__u32); /* kernel enforced */
2442 } else if (!has_func && btf_is_func(t)) {
2443 /* replace FUNC with TYPEDEF */
2444 t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0);
2445 } else if (!has_func_global && btf_is_func(t)) {
2446 /* replace BTF_FUNC_GLOBAL with BTF_FUNC_STATIC */
2447 t->info = BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0);
2448 }
2449 }
2450}
2451
2452static bool libbpf_needs_btf(const struct bpf_object *obj)
2453{
2454 return obj->efile.btf_maps_shndx >= 0 ||
2455 obj->efile.st_ops_shndx >= 0 ||
2456 obj->nr_extern > 0;
2457}
2458
2459static bool kernel_needs_btf(const struct bpf_object *obj)
2460{
2461 return obj->efile.st_ops_shndx >= 0;
2462}
2463
2464static int bpf_object__init_btf(struct bpf_object *obj,
2465 Elf_Data *btf_data,
2466 Elf_Data *btf_ext_data)
2467{
2468 int err = -ENOENT;
2469
2470 if (btf_data) {
2471 obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
2472 if (IS_ERR(obj->btf)) {
2473 err = PTR_ERR(obj->btf);
2474 obj->btf = NULL;
2475 pr_warn("Error loading ELF section %s: %d.\n",
2476 BTF_ELF_SEC, err);
2477 goto out;
2478 }
2479 /* enforce 8-byte pointers for BPF-targeted BTFs */
2480 btf__set_pointer_size(obj->btf, 8);
2481 err = 0;
2482 }
2483 if (btf_ext_data) {
2484 if (!obj->btf) {
2485 pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
2486 BTF_EXT_ELF_SEC, BTF_ELF_SEC);
2487 goto out;
2488 }
2489 obj->btf_ext = btf_ext__new(btf_ext_data->d_buf,
2490 btf_ext_data->d_size);
2491 if (IS_ERR(obj->btf_ext)) {
2492 pr_warn("Error loading ELF section %s: %ld. Ignored and continue.\n",
2493 BTF_EXT_ELF_SEC, PTR_ERR(obj->btf_ext));
2494 obj->btf_ext = NULL;
2495 goto out;
2496 }
2497 }
2498out:
2499 if (err && libbpf_needs_btf(obj)) {
2500 pr_warn("BTF is required, but is missing or corrupted.\n");
2501 return err;
2502 }
2503 return 0;
2504}
2505
2506static int bpf_object__finalize_btf(struct bpf_object *obj)
2507{
2508 int err;
2509
2510 if (!obj->btf)
2511 return 0;
2512
2513 err = btf__finalize_data(obj, obj->btf);
2514 if (err) {
2515 pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err);
2516 return err;
2517 }
2518
2519 return 0;
2520}
2521
2522static bool prog_needs_vmlinux_btf(struct bpf_program *prog)
2523{
2524 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS ||
2525 prog->type == BPF_PROG_TYPE_LSM)
2526 return true;
2527
2528 /* BPF_PROG_TYPE_TRACING programs which do not attach to other programs
2529 * also need vmlinux BTF
2530 */
2531 if (prog->type == BPF_PROG_TYPE_TRACING && !prog->attach_prog_fd)
2532 return true;
2533
2534 return false;
2535}
2536
2537static bool obj_needs_vmlinux_btf(const struct bpf_object *obj)
2538{
2539 struct bpf_program *prog;
2540 int i;
2541
2542 /* CO-RE relocations need kernel BTF */
2543 if (obj->btf_ext && obj->btf_ext->core_relo_info.len)
2544 return true;
2545
2546 /* Support for typed ksyms needs kernel BTF */
2547 for (i = 0; i < obj->nr_extern; i++) {
2548 const struct extern_desc *ext;
2549
2550 ext = &obj->externs[i];
2551 if (ext->type == EXT_KSYM && ext->ksym.type_id)
2552 return true;
2553 }
2554
2555 bpf_object__for_each_program(prog, obj) {
2556 if (!prog->load)
2557 continue;
2558 if (prog_needs_vmlinux_btf(prog))
2559 return true;
2560 }
2561
2562 return false;
2563}
2564
2565static int bpf_object__load_vmlinux_btf(struct bpf_object *obj, bool force)
2566{
2567 int err;
2568
2569 /* btf_vmlinux could be loaded earlier */
2570 if (obj->btf_vmlinux)
2571 return 0;
2572
2573 if (!force && !obj_needs_vmlinux_btf(obj))
2574 return 0;
2575
2576 obj->btf_vmlinux = libbpf_find_kernel_btf();
2577 if (IS_ERR(obj->btf_vmlinux)) {
2578 err = PTR_ERR(obj->btf_vmlinux);
2579 pr_warn("Error loading vmlinux BTF: %d\n", err);
2580 obj->btf_vmlinux = NULL;
2581 return err;
2582 }
2583 return 0;
2584}
2585
2586static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
2587{
2588 struct btf *kern_btf = obj->btf;
2589 bool btf_mandatory, sanitize;
2590 int err = 0;
2591
2592 if (!obj->btf)
2593 return 0;
2594
2595 if (!kernel_supports(FEAT_BTF)) {
2596 if (kernel_needs_btf(obj)) {
2597 err = -EOPNOTSUPP;
2598 goto report;
2599 }
2600 pr_debug("Kernel doesn't support BTF, skipping uploading it.\n");
2601 return 0;
2602 }
2603
2604 sanitize = btf_needs_sanitization(obj);
2605 if (sanitize) {
2606 const void *raw_data;
2607 __u32 sz;
2608
2609 /* clone BTF to sanitize a copy and leave the original intact */
2610 raw_data = btf__get_raw_data(obj->btf, &sz);
2611 kern_btf = btf__new(raw_data, sz);
2612 if (IS_ERR(kern_btf))
2613 return PTR_ERR(kern_btf);
2614
2615 /* enforce 8-byte pointers for BPF-targeted BTFs */
2616 btf__set_pointer_size(obj->btf, 8);
2617 bpf_object__sanitize_btf(obj, kern_btf);
2618 }
2619
2620 err = btf__load(kern_btf);
2621 if (sanitize) {
2622 if (!err) {
2623 /* move fd to libbpf's BTF */
2624 btf__set_fd(obj->btf, btf__fd(kern_btf));
2625 btf__set_fd(kern_btf, -1);
2626 }
2627 btf__free(kern_btf);
2628 }
2629report:
2630 if (err) {
2631 btf_mandatory = kernel_needs_btf(obj);
2632 pr_warn("Error loading .BTF into kernel: %d. %s\n", err,
2633 btf_mandatory ? "BTF is mandatory, can't proceed."
2634 : "BTF is optional, ignoring.");
2635 if (!btf_mandatory)
2636 err = 0;
2637 }
2638 return err;
2639}
2640
2641static const char *elf_sym_str(const struct bpf_object *obj, size_t off)
2642{
2643 const char *name;
2644
2645 name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, off);
2646 if (!name) {
2647 pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
2648 off, obj->path, elf_errmsg(-1));
2649 return NULL;
2650 }
2651
2652 return name;
2653}
2654
2655static const char *elf_sec_str(const struct bpf_object *obj, size_t off)
2656{
2657 const char *name;
2658
2659 name = elf_strptr(obj->efile.elf, obj->efile.shstrndx, off);
2660 if (!name) {
2661 pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
2662 off, obj->path, elf_errmsg(-1));
2663 return NULL;
2664 }
2665
2666 return name;
2667}
2668
2669static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx)
2670{
2671 Elf_Scn *scn;
2672
2673 scn = elf_getscn(obj->efile.elf, idx);
2674 if (!scn) {
2675 pr_warn("elf: failed to get section(%zu) from %s: %s\n",
2676 idx, obj->path, elf_errmsg(-1));
2677 return NULL;
2678 }
2679 return scn;
2680}
2681
2682static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name)
2683{
2684 Elf_Scn *scn = NULL;
2685 Elf *elf = obj->efile.elf;
2686 const char *sec_name;
2687
2688 while ((scn = elf_nextscn(elf, scn)) != NULL) {
2689 sec_name = elf_sec_name(obj, scn);
2690 if (!sec_name)
2691 return NULL;
2692
2693 if (strcmp(sec_name, name) != 0)
2694 continue;
2695
2696 return scn;
2697 }
2698 return NULL;
2699}
2700
2701static int elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn, GElf_Shdr *hdr)
2702{
2703 if (!scn)
2704 return -EINVAL;
2705
2706 if (gelf_getshdr(scn, hdr) != hdr) {
2707 pr_warn("elf: failed to get section(%zu) header from %s: %s\n",
2708 elf_ndxscn(scn), obj->path, elf_errmsg(-1));
2709 return -EINVAL;
2710 }
2711
2712 return 0;
2713}
2714
2715static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn)
2716{
2717 const char *name;
2718 GElf_Shdr sh;
2719
2720 if (!scn)
2721 return NULL;
2722
2723 if (elf_sec_hdr(obj, scn, &sh))
2724 return NULL;
2725
2726 name = elf_sec_str(obj, sh.sh_name);
2727 if (!name) {
2728 pr_warn("elf: failed to get section(%zu) name from %s: %s\n",
2729 elf_ndxscn(scn), obj->path, elf_errmsg(-1));
2730 return NULL;
2731 }
2732
2733 return name;
2734}
2735
2736static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn)
2737{
2738 Elf_Data *data;
2739
2740 if (!scn)
2741 return NULL;
2742
2743 data = elf_getdata(scn, 0);
2744 if (!data) {
2745 pr_warn("elf: failed to get section(%zu) %s data from %s: %s\n",
2746 elf_ndxscn(scn), elf_sec_name(obj, scn) ?: "<?>",
2747 obj->path, elf_errmsg(-1));
2748 return NULL;
2749 }
2750
2751 return data;
2752}
2753
2754static int elf_sym_by_sec_off(const struct bpf_object *obj, size_t sec_idx,
2755 size_t off, __u32 sym_type, GElf_Sym *sym)
2756{
2757 Elf_Data *symbols = obj->efile.symbols;
2758 size_t n = symbols->d_size / sizeof(GElf_Sym);
2759 int i;
2760
2761 for (i = 0; i < n; i++) {
2762 if (!gelf_getsym(symbols, i, sym))
2763 continue;
2764 if (sym->st_shndx != sec_idx || sym->st_value != off)
2765 continue;
2766 if (GELF_ST_TYPE(sym->st_info) != sym_type)
2767 continue;
2768 return 0;
2769 }
2770
2771 return -ENOENT;
2772}
2773
2774static bool is_sec_name_dwarf(const char *name)
2775{
2776 /* approximation, but the actual list is too long */
2777 return strncmp(name, ".debug_", sizeof(".debug_") - 1) == 0;
2778}
2779
2780static bool ignore_elf_section(GElf_Shdr *hdr, const char *name)
2781{
2782 /* no special handling of .strtab */
2783 if (hdr->sh_type == SHT_STRTAB)
2784 return true;
2785
2786 /* ignore .llvm_addrsig section as well */
2787 if (hdr->sh_type == 0x6FFF4C03 /* SHT_LLVM_ADDRSIG */)
2788 return true;
2789
2790 /* no subprograms will lead to an empty .text section, ignore it */
2791 if (hdr->sh_type == SHT_PROGBITS && hdr->sh_size == 0 &&
2792 strcmp(name, ".text") == 0)
2793 return true;
2794
2795 /* DWARF sections */
2796 if (is_sec_name_dwarf(name))
2797 return true;
2798
2799 if (strncmp(name, ".rel", sizeof(".rel") - 1) == 0) {
2800 name += sizeof(".rel") - 1;
2801 /* DWARF section relocations */
2802 if (is_sec_name_dwarf(name))
2803 return true;
2804
2805 /* .BTF and .BTF.ext don't need relocations */
2806 if (strcmp(name, BTF_ELF_SEC) == 0 ||
2807 strcmp(name, BTF_EXT_ELF_SEC) == 0)
2808 return true;
2809 }
2810
2811 return false;
2812}
2813
2814static int cmp_progs(const void *_a, const void *_b)
2815{
2816 const struct bpf_program *a = _a;
2817 const struct bpf_program *b = _b;
2818
2819 if (a->sec_idx != b->sec_idx)
2820 return a->sec_idx < b->sec_idx ? -1 : 1;
2821
2822 /* sec_insn_off can't be the same within the section */
2823 return a->sec_insn_off < b->sec_insn_off ? -1 : 1;
2824}
2825
2826static int bpf_object__elf_collect(struct bpf_object *obj)
2827{
2828 Elf *elf = obj->efile.elf;
2829 Elf_Data *btf_ext_data = NULL;
2830 Elf_Data *btf_data = NULL;
2831 int idx = 0, err = 0;
2832 const char *name;
2833 Elf_Data *data;
2834 Elf_Scn *scn;
2835 GElf_Shdr sh;
2836
2837 /* a bunch of ELF parsing functionality depends on processing symbols,
2838 * so do the first pass and find the symbol table
2839 */
2840 scn = NULL;
2841 while ((scn = elf_nextscn(elf, scn)) != NULL) {
2842 if (elf_sec_hdr(obj, scn, &sh))
2843 return -LIBBPF_ERRNO__FORMAT;
2844
2845 if (sh.sh_type == SHT_SYMTAB) {
2846 if (obj->efile.symbols) {
2847 pr_warn("elf: multiple symbol tables in %s\n", obj->path);
2848 return -LIBBPF_ERRNO__FORMAT;
2849 }
2850
2851 data = elf_sec_data(obj, scn);
2852 if (!data)
2853 return -LIBBPF_ERRNO__FORMAT;
2854
2855 obj->efile.symbols = data;
2856 obj->efile.symbols_shndx = elf_ndxscn(scn);
2857 obj->efile.strtabidx = sh.sh_link;
2858 }
2859 }
2860
2861 scn = NULL;
2862 while ((scn = elf_nextscn(elf, scn)) != NULL) {
2863 idx++;
2864
2865 if (elf_sec_hdr(obj, scn, &sh))
2866 return -LIBBPF_ERRNO__FORMAT;
2867
2868 name = elf_sec_str(obj, sh.sh_name);
2869 if (!name)
2870 return -LIBBPF_ERRNO__FORMAT;
2871
2872 if (ignore_elf_section(&sh, name))
2873 continue;
2874
2875 data = elf_sec_data(obj, scn);
2876 if (!data)
2877 return -LIBBPF_ERRNO__FORMAT;
2878
2879 pr_debug("elf: section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
2880 idx, name, (unsigned long)data->d_size,
2881 (int)sh.sh_link, (unsigned long)sh.sh_flags,
2882 (int)sh.sh_type);
2883
2884 if (strcmp(name, "license") == 0) {
2885 err = bpf_object__init_license(obj, data->d_buf, data->d_size);
2886 if (err)
2887 return err;
2888 } else if (strcmp(name, "version") == 0) {
2889 err = bpf_object__init_kversion(obj, data->d_buf, data->d_size);
2890 if (err)
2891 return err;
2892 } else if (strcmp(name, "maps") == 0) {
2893 obj->efile.maps_shndx = idx;
2894 } else if (strcmp(name, MAPS_ELF_SEC) == 0) {
2895 obj->efile.btf_maps_shndx = idx;
2896 } else if (strcmp(name, BTF_ELF_SEC) == 0) {
2897 btf_data = data;
2898 } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
2899 btf_ext_data = data;
2900 } else if (sh.sh_type == SHT_SYMTAB) {
2901 /* already processed during the first pass above */
2902 } else if (sh.sh_type == SHT_PROGBITS && data->d_size > 0) {
2903 if (sh.sh_flags & SHF_EXECINSTR) {
2904 if (strcmp(name, ".text") == 0)
2905 obj->efile.text_shndx = idx;
2906 err = bpf_object__add_programs(obj, data, name, idx);
2907 if (err)
2908 return err;
2909 } else if (strcmp(name, DATA_SEC) == 0) {
2910 obj->efile.data = data;
2911 obj->efile.data_shndx = idx;
2912 } else if (strcmp(name, RODATA_SEC) == 0) {
2913 obj->efile.rodata = data;
2914 obj->efile.rodata_shndx = idx;
2915 } else if (strcmp(name, STRUCT_OPS_SEC) == 0) {
2916 obj->efile.st_ops_data = data;
2917 obj->efile.st_ops_shndx = idx;
2918 } else {
2919 pr_info("elf: skipping unrecognized data section(%d) %s\n",
2920 idx, name);
2921 }
2922 } else if (sh.sh_type == SHT_REL) {
2923 int nr_sects = obj->efile.nr_reloc_sects;
2924 void *sects = obj->efile.reloc_sects;
2925 int sec = sh.sh_info; /* points to other section */
2926
2927 /* Only do relo for section with exec instructions */
2928 if (!section_have_execinstr(obj, sec) &&
2929 strcmp(name, ".rel" STRUCT_OPS_SEC) &&
2930 strcmp(name, ".rel" MAPS_ELF_SEC)) {
2931 pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n",
2932 idx, name, sec,
2933 elf_sec_name(obj, elf_sec_by_idx(obj, sec)) ?: "<?>");
2934 continue;
2935 }
2936
2937 sects = libbpf_reallocarray(sects, nr_sects + 1,
2938 sizeof(*obj->efile.reloc_sects));
2939 if (!sects)
2940 return -ENOMEM;
2941
2942 obj->efile.reloc_sects = sects;
2943 obj->efile.nr_reloc_sects++;
2944
2945 obj->efile.reloc_sects[nr_sects].shdr = sh;
2946 obj->efile.reloc_sects[nr_sects].data = data;
2947 } else if (sh.sh_type == SHT_NOBITS && strcmp(name, BSS_SEC) == 0) {
2948 obj->efile.bss = data;
2949 obj->efile.bss_shndx = idx;
2950 } else {
2951 pr_info("elf: skipping section(%d) %s (size %zu)\n", idx, name,
2952 (size_t)sh.sh_size);
2953 }
2954 }
2955
2956 if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) {
2957 pr_warn("elf: symbol strings section missing or invalid in %s\n", obj->path);
2958 return -LIBBPF_ERRNO__FORMAT;
2959 }
2960
2961 /* sort BPF programs by section name and in-section instruction offset
2962 * for faster search */
2963 qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs);
2964
2965 return bpf_object__init_btf(obj, btf_data, btf_ext_data);
2966}
2967
2968static bool sym_is_extern(const GElf_Sym *sym)
2969{
2970 int bind = GELF_ST_BIND(sym->st_info);
2971 /* externs are symbols w/ type=NOTYPE, bind=GLOBAL|WEAK, section=UND */
2972 return sym->st_shndx == SHN_UNDEF &&
2973 (bind == STB_GLOBAL || bind == STB_WEAK) &&
2974 GELF_ST_TYPE(sym->st_info) == STT_NOTYPE;
2975}
2976
2977static int find_extern_btf_id(const struct btf *btf, const char *ext_name)
2978{
2979 const struct btf_type *t;
2980 const char *var_name;
2981 int i, n;
2982
2983 if (!btf)
2984 return -ESRCH;
2985
2986 n = btf__get_nr_types(btf);
2987 for (i = 1; i <= n; i++) {
2988 t = btf__type_by_id(btf, i);
2989
2990 if (!btf_is_var(t))
2991 continue;
2992
2993 var_name = btf__name_by_offset(btf, t->name_off);
2994 if (strcmp(var_name, ext_name))
2995 continue;
2996
2997 if (btf_var(t)->linkage != BTF_VAR_GLOBAL_EXTERN)
2998 return -EINVAL;
2999
3000 return i;
3001 }
3002
3003 return -ENOENT;
3004}
3005
3006static int find_extern_sec_btf_id(struct btf *btf, int ext_btf_id) {
3007 const struct btf_var_secinfo *vs;
3008 const struct btf_type *t;
3009 int i, j, n;
3010
3011 if (!btf)
3012 return -ESRCH;
3013
3014 n = btf__get_nr_types(btf);
3015 for (i = 1; i <= n; i++) {
3016 t = btf__type_by_id(btf, i);
3017
3018 if (!btf_is_datasec(t))
3019 continue;
3020
3021 vs = btf_var_secinfos(t);
3022 for (j = 0; j < btf_vlen(t); j++, vs++) {
3023 if (vs->type == ext_btf_id)
3024 return i;
3025 }
3026 }
3027
3028 return -ENOENT;
3029}
3030
3031static enum kcfg_type find_kcfg_type(const struct btf *btf, int id,
3032 bool *is_signed)
3033{
3034 const struct btf_type *t;
3035 const char *name;
3036
3037 t = skip_mods_and_typedefs(btf, id, NULL);
3038 name = btf__name_by_offset(btf, t->name_off);
3039
3040 if (is_signed)
3041 *is_signed = false;
3042 switch (btf_kind(t)) {
3043 case BTF_KIND_INT: {
3044 int enc = btf_int_encoding(t);
3045
3046 if (enc & BTF_INT_BOOL)
3047 return t->size == 1 ? KCFG_BOOL : KCFG_UNKNOWN;
3048 if (is_signed)
3049 *is_signed = enc & BTF_INT_SIGNED;
3050 if (t->size == 1)
3051 return KCFG_CHAR;
3052 if (t->size < 1 || t->size > 8 || (t->size & (t->size - 1)))
3053 return KCFG_UNKNOWN;
3054 return KCFG_INT;
3055 }
3056 case BTF_KIND_ENUM:
3057 if (t->size != 4)
3058 return KCFG_UNKNOWN;
3059 if (strcmp(name, "libbpf_tristate"))
3060 return KCFG_UNKNOWN;
3061 return KCFG_TRISTATE;
3062 case BTF_KIND_ARRAY:
3063 if (btf_array(t)->nelems == 0)
3064 return KCFG_UNKNOWN;
3065 if (find_kcfg_type(btf, btf_array(t)->type, NULL) != KCFG_CHAR)
3066 return KCFG_UNKNOWN;
3067 return KCFG_CHAR_ARR;
3068 default:
3069 return KCFG_UNKNOWN;
3070 }
3071}
3072
3073static int cmp_externs(const void *_a, const void *_b)
3074{
3075 const struct extern_desc *a = _a;
3076 const struct extern_desc *b = _b;
3077
3078 if (a->type != b->type)
3079 return a->type < b->type ? -1 : 1;
3080
3081 if (a->type == EXT_KCFG) {
3082 /* descending order by alignment requirements */
3083 if (a->kcfg.align != b->kcfg.align)
3084 return a->kcfg.align > b->kcfg.align ? -1 : 1;
3085 /* ascending order by size, within same alignment class */
3086 if (a->kcfg.sz != b->kcfg.sz)
3087 return a->kcfg.sz < b->kcfg.sz ? -1 : 1;
3088 }
3089
3090 /* resolve ties by name */
3091 return strcmp(a->name, b->name);
3092}
3093
3094static int find_int_btf_id(const struct btf *btf)
3095{
3096 const struct btf_type *t;
3097 int i, n;
3098
3099 n = btf__get_nr_types(btf);
3100 for (i = 1; i <= n; i++) {
3101 t = btf__type_by_id(btf, i);
3102
3103 if (btf_is_int(t) && btf_int_bits(t) == 32)
3104 return i;
3105 }
3106
3107 return 0;
3108}
3109
3110static int bpf_object__collect_externs(struct bpf_object *obj)
3111{
3112 struct btf_type *sec, *kcfg_sec = NULL, *ksym_sec = NULL;
3113 const struct btf_type *t;
3114 struct extern_desc *ext;
3115 int i, n, off;
3116 const char *ext_name, *sec_name;
3117 Elf_Scn *scn;
3118 GElf_Shdr sh;
3119
3120 if (!obj->efile.symbols)
3121 return 0;
3122
3123 scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx);
3124 if (elf_sec_hdr(obj, scn, &sh))
3125 return -LIBBPF_ERRNO__FORMAT;
3126
3127 n = sh.sh_size / sh.sh_entsize;
3128 pr_debug("looking for externs among %d symbols...\n", n);
3129
3130 for (i = 0; i < n; i++) {
3131 GElf_Sym sym;
3132
3133 if (!gelf_getsym(obj->efile.symbols, i, &sym))
3134 return -LIBBPF_ERRNO__FORMAT;
3135 if (!sym_is_extern(&sym))
3136 continue;
3137 ext_name = elf_sym_str(obj, sym.st_name);
3138 if (!ext_name || !ext_name[0])
3139 continue;
3140
3141 ext = obj->externs;
3142 ext = libbpf_reallocarray(ext, obj->nr_extern + 1, sizeof(*ext));
3143 if (!ext)
3144 return -ENOMEM;
3145 obj->externs = ext;
3146 ext = &ext[obj->nr_extern];
3147 memset(ext, 0, sizeof(*ext));
3148 obj->nr_extern++;
3149
3150 ext->btf_id = find_extern_btf_id(obj->btf, ext_name);
3151 if (ext->btf_id <= 0) {
3152 pr_warn("failed to find BTF for extern '%s': %d\n",
3153 ext_name, ext->btf_id);
3154 return ext->btf_id;
3155 }
3156 t = btf__type_by_id(obj->btf, ext->btf_id);
3157 ext->name = btf__name_by_offset(obj->btf, t->name_off);
3158 ext->sym_idx = i;
3159 ext->is_weak = GELF_ST_BIND(sym.st_info) == STB_WEAK;
3160
3161 ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id);
3162 if (ext->sec_btf_id <= 0) {
3163 pr_warn("failed to find BTF for extern '%s' [%d] section: %d\n",
3164 ext_name, ext->btf_id, ext->sec_btf_id);
3165 return ext->sec_btf_id;
3166 }
3167 sec = (void *)btf__type_by_id(obj->btf, ext->sec_btf_id);
3168 sec_name = btf__name_by_offset(obj->btf, sec->name_off);
3169
3170 if (strcmp(sec_name, KCONFIG_SEC) == 0) {
3171 kcfg_sec = sec;
3172 ext->type = EXT_KCFG;
3173 ext->kcfg.sz = btf__resolve_size(obj->btf, t->type);
3174 if (ext->kcfg.sz <= 0) {
3175 pr_warn("failed to resolve size of extern (kcfg) '%s': %d\n",
3176 ext_name, ext->kcfg.sz);
3177 return ext->kcfg.sz;
3178 }
3179 ext->kcfg.align = btf__align_of(obj->btf, t->type);
3180 if (ext->kcfg.align <= 0) {
3181 pr_warn("failed to determine alignment of extern (kcfg) '%s': %d\n",
3182 ext_name, ext->kcfg.align);
3183 return -EINVAL;
3184 }
3185 ext->kcfg.type = find_kcfg_type(obj->btf, t->type,
3186 &ext->kcfg.is_signed);
3187 if (ext->kcfg.type == KCFG_UNKNOWN) {
3188 pr_warn("extern (kcfg) '%s' type is unsupported\n", ext_name);
3189 return -ENOTSUP;
3190 }
3191 } else if (strcmp(sec_name, KSYMS_SEC) == 0) {
3192 ksym_sec = sec;
3193 ext->type = EXT_KSYM;
3194 skip_mods_and_typedefs(obj->btf, t->type,
3195 &ext->ksym.type_id);
3196 } else {
3197 pr_warn("unrecognized extern section '%s'\n", sec_name);
3198 return -ENOTSUP;
3199 }
3200 }
3201 pr_debug("collected %d externs total\n", obj->nr_extern);
3202
3203 if (!obj->nr_extern)
3204 return 0;
3205
3206 /* sort externs by type, for kcfg ones also by (align, size, name) */
3207 qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs);
3208
3209 /* for .ksyms section, we need to turn all externs into allocated
3210 * variables in BTF to pass kernel verification; we do this by
3211 * pretending that each extern is a 8-byte variable
3212 */
3213 if (ksym_sec) {
3214 /* find existing 4-byte integer type in BTF to use for fake
3215 * extern variables in DATASEC
3216 */
3217 int int_btf_id = find_int_btf_id(obj->btf);
3218
3219 for (i = 0; i < obj->nr_extern; i++) {
3220 ext = &obj->externs[i];
3221 if (ext->type != EXT_KSYM)
3222 continue;
3223 pr_debug("extern (ksym) #%d: symbol %d, name %s\n",
3224 i, ext->sym_idx, ext->name);
3225 }
3226
3227 sec = ksym_sec;
3228 n = btf_vlen(sec);
3229 for (i = 0, off = 0; i < n; i++, off += sizeof(int)) {
3230 struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
3231 struct btf_type *vt;
3232
3233 vt = (void *)btf__type_by_id(obj->btf, vs->type);
3234 ext_name = btf__name_by_offset(obj->btf, vt->name_off);
3235 ext = find_extern_by_name(obj, ext_name);
3236 if (!ext) {
3237 pr_warn("failed to find extern definition for BTF var '%s'\n",
3238 ext_name);
3239 return -ESRCH;
3240 }
3241 btf_var(vt)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
3242 vt->type = int_btf_id;
3243 vs->offset = off;
3244 vs->size = sizeof(int);
3245 }
3246 sec->size = off;
3247 }
3248
3249 if (kcfg_sec) {
3250 sec = kcfg_sec;
3251 /* for kcfg externs calculate their offsets within a .kconfig map */
3252 off = 0;
3253 for (i = 0; i < obj->nr_extern; i++) {
3254 ext = &obj->externs[i];
3255 if (ext->type != EXT_KCFG)
3256 continue;
3257
3258 ext->kcfg.data_off = roundup(off, ext->kcfg.align);
3259 off = ext->kcfg.data_off + ext->kcfg.sz;
3260 pr_debug("extern (kcfg) #%d: symbol %d, off %u, name %s\n",
3261 i, ext->sym_idx, ext->kcfg.data_off, ext->name);
3262 }
3263 sec->size = off;
3264 n = btf_vlen(sec);
3265 for (i = 0; i < n; i++) {
3266 struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
3267
3268 t = btf__type_by_id(obj->btf, vs->type);
3269 ext_name = btf__name_by_offset(obj->btf, t->name_off);
3270 ext = find_extern_by_name(obj, ext_name);
3271 if (!ext) {
3272 pr_warn("failed to find extern definition for BTF var '%s'\n",
3273 ext_name);
3274 return -ESRCH;
3275 }
3276 btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
3277 vs->offset = ext->kcfg.data_off;
3278 }
3279 }
3280 return 0;
3281}
3282
3283struct bpf_program *
3284bpf_object__find_program_by_title(const struct bpf_object *obj,
3285 const char *title)
3286{
3287 struct bpf_program *pos;
3288
3289 bpf_object__for_each_program(pos, obj) {
3290 if (pos->sec_name && !strcmp(pos->sec_name, title))
3291 return pos;
3292 }
3293 return NULL;
3294}
3295
3296static bool prog_is_subprog(const struct bpf_object *obj,
3297 const struct bpf_program *prog)
3298{
3299 /* For legacy reasons, libbpf supports an entry-point BPF programs
3300 * without SEC() attribute, i.e., those in the .text section. But if
3301 * there are 2 or more such programs in the .text section, they all
3302 * must be subprograms called from entry-point BPF programs in
3303 * designated SEC()'tions, otherwise there is no way to distinguish
3304 * which of those programs should be loaded vs which are a subprogram.
3305 * Similarly, if there is a function/program in .text and at least one
3306 * other BPF program with custom SEC() attribute, then we just assume
3307 * .text programs are subprograms (even if they are not called from
3308 * other programs), because libbpf never explicitly supported mixing
3309 * SEC()-designated BPF programs and .text entry-point BPF programs.
3310 */
3311 return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1;
3312}
3313
3314struct bpf_program *
3315bpf_object__find_program_by_name(const struct bpf_object *obj,
3316 const char *name)
3317{
3318 struct bpf_program *prog;
3319
3320 bpf_object__for_each_program(prog, obj) {
3321 if (prog_is_subprog(obj, prog))
3322 continue;
3323 if (!strcmp(prog->name, name))
3324 return prog;
3325 }
3326 return NULL;
3327}
3328
3329static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
3330 int shndx)
3331{
3332 return shndx == obj->efile.data_shndx ||
3333 shndx == obj->efile.bss_shndx ||
3334 shndx == obj->efile.rodata_shndx;
3335}
3336
3337static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
3338 int shndx)
3339{
3340 return shndx == obj->efile.maps_shndx ||
3341 shndx == obj->efile.btf_maps_shndx;
3342}
3343
3344static enum libbpf_map_type
3345bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
3346{
3347 if (shndx == obj->efile.data_shndx)
3348 return LIBBPF_MAP_DATA;
3349 else if (shndx == obj->efile.bss_shndx)
3350 return LIBBPF_MAP_BSS;
3351 else if (shndx == obj->efile.rodata_shndx)
3352 return LIBBPF_MAP_RODATA;
3353 else if (shndx == obj->efile.symbols_shndx)
3354 return LIBBPF_MAP_KCONFIG;
3355 else
3356 return LIBBPF_MAP_UNSPEC;
3357}
3358
3359static int bpf_program__record_reloc(struct bpf_program *prog,
3360 struct reloc_desc *reloc_desc,
3361 __u32 insn_idx, const char *sym_name,
3362 const GElf_Sym *sym, const GElf_Rel *rel)
3363{
3364 struct bpf_insn *insn = &prog->insns[insn_idx];
3365 size_t map_idx, nr_maps = prog->obj->nr_maps;
3366 struct bpf_object *obj = prog->obj;
3367 __u32 shdr_idx = sym->st_shndx;
3368 enum libbpf_map_type type;
3369 const char *sym_sec_name;
3370 struct bpf_map *map;
3371
3372 reloc_desc->processed = false;
3373
3374 /* sub-program call relocation */
3375 if (insn->code == (BPF_JMP | BPF_CALL)) {
3376 if (insn->src_reg != BPF_PSEUDO_CALL) {
3377 pr_warn("prog '%s': incorrect bpf_call opcode\n", prog->name);
3378 return -LIBBPF_ERRNO__RELOC;
3379 }
3380 /* text_shndx can be 0, if no default "main" program exists */
3381 if (!shdr_idx || shdr_idx != obj->efile.text_shndx) {
3382 sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
3383 pr_warn("prog '%s': bad call relo against '%s' in section '%s'\n",
3384 prog->name, sym_name, sym_sec_name);
3385 return -LIBBPF_ERRNO__RELOC;
3386 }
3387 if (sym->st_value % BPF_INSN_SZ) {
3388 pr_warn("prog '%s': bad call relo against '%s' at offset %zu\n",
3389 prog->name, sym_name, (size_t)sym->st_value);
3390 return -LIBBPF_ERRNO__RELOC;
3391 }
3392 reloc_desc->type = RELO_CALL;
3393 reloc_desc->insn_idx = insn_idx;
3394 reloc_desc->sym_off = sym->st_value;
3395 return 0;
3396 }
3397
3398 if (insn->code != (BPF_LD | BPF_IMM | BPF_DW)) {
3399 pr_warn("prog '%s': invalid relo against '%s' for insns[%d].code 0x%x\n",
3400 prog->name, sym_name, insn_idx, insn->code);
3401 return -LIBBPF_ERRNO__RELOC;
3402 }
3403
3404 if (sym_is_extern(sym)) {
3405 int sym_idx = GELF_R_SYM(rel->r_info);
3406 int i, n = obj->nr_extern;
3407 struct extern_desc *ext;
3408
3409 for (i = 0; i < n; i++) {
3410 ext = &obj->externs[i];
3411 if (ext->sym_idx == sym_idx)
3412 break;
3413 }
3414 if (i >= n) {
3415 pr_warn("prog '%s': extern relo failed to find extern for '%s' (%d)\n",
3416 prog->name, sym_name, sym_idx);
3417 return -LIBBPF_ERRNO__RELOC;
3418 }
3419 pr_debug("prog '%s': found extern #%d '%s' (sym %d) for insn #%u\n",
3420 prog->name, i, ext->name, ext->sym_idx, insn_idx);
3421 reloc_desc->type = RELO_EXTERN;
3422 reloc_desc->insn_idx = insn_idx;
3423 reloc_desc->sym_off = i; /* sym_off stores extern index */
3424 return 0;
3425 }
3426
3427 if (!shdr_idx || shdr_idx >= SHN_LORESERVE) {
3428 pr_warn("prog '%s': invalid relo against '%s' in special section 0x%x; forgot to initialize global var?..\n",
3429 prog->name, sym_name, shdr_idx);
3430 return -LIBBPF_ERRNO__RELOC;
3431 }
3432
3433 type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
3434 sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
3435
3436 /* generic map reference relocation */
3437 if (type == LIBBPF_MAP_UNSPEC) {
3438 if (!bpf_object__shndx_is_maps(obj, shdr_idx)) {
3439 pr_warn("prog '%s': bad map relo against '%s' in section '%s'\n",
3440 prog->name, sym_name, sym_sec_name);
3441 return -LIBBPF_ERRNO__RELOC;
3442 }
3443 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
3444 map = &obj->maps[map_idx];
3445 if (map->libbpf_type != type ||
3446 map->sec_idx != sym->st_shndx ||
3447 map->sec_offset != sym->st_value)
3448 continue;
3449 pr_debug("prog '%s': found map %zd (%s, sec %d, off %zu) for insn #%u\n",
3450 prog->name, map_idx, map->name, map->sec_idx,
3451 map->sec_offset, insn_idx);
3452 break;
3453 }
3454 if (map_idx >= nr_maps) {
3455 pr_warn("prog '%s': map relo failed to find map for section '%s', off %zu\n",
3456 prog->name, sym_sec_name, (size_t)sym->st_value);
3457 return -LIBBPF_ERRNO__RELOC;
3458 }
3459 reloc_desc->type = RELO_LD64;
3460 reloc_desc->insn_idx = insn_idx;
3461 reloc_desc->map_idx = map_idx;
3462 reloc_desc->sym_off = 0; /* sym->st_value determines map_idx */
3463 return 0;
3464 }
3465
3466 /* global data map relocation */
3467 if (!bpf_object__shndx_is_data(obj, shdr_idx)) {
3468 pr_warn("prog '%s': bad data relo against section '%s'\n",
3469 prog->name, sym_sec_name);
3470 return -LIBBPF_ERRNO__RELOC;
3471 }
3472 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
3473 map = &obj->maps[map_idx];
3474 if (map->libbpf_type != type)
3475 continue;
3476 pr_debug("prog '%s': found data map %zd (%s, sec %d, off %zu) for insn %u\n",
3477 prog->name, map_idx, map->name, map->sec_idx,
3478 map->sec_offset, insn_idx);
3479 break;
3480 }
3481 if (map_idx >= nr_maps) {
3482 pr_warn("prog '%s': data relo failed to find map for section '%s'\n",
3483 prog->name, sym_sec_name);
3484 return -LIBBPF_ERRNO__RELOC;
3485 }
3486
3487 reloc_desc->type = RELO_DATA;
3488 reloc_desc->insn_idx = insn_idx;
3489 reloc_desc->map_idx = map_idx;
3490 reloc_desc->sym_off = sym->st_value;
3491 return 0;
3492}
3493
3494static bool prog_contains_insn(const struct bpf_program *prog, size_t insn_idx)
3495{
3496 return insn_idx >= prog->sec_insn_off &&
3497 insn_idx < prog->sec_insn_off + prog->sec_insn_cnt;
3498}
3499
3500static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj,
3501 size_t sec_idx, size_t insn_idx)
3502{
3503 int l = 0, r = obj->nr_programs - 1, m;
3504 struct bpf_program *prog;
3505
3506 while (l < r) {
3507 m = l + (r - l + 1) / 2;
3508 prog = &obj->programs[m];
3509
3510 if (prog->sec_idx < sec_idx ||
3511 (prog->sec_idx == sec_idx && prog->sec_insn_off <= insn_idx))
3512 l = m;
3513 else
3514 r = m - 1;
3515 }
3516 /* matching program could be at index l, but it still might be the
3517 * wrong one, so we need to double check conditions for the last time
3518 */
3519 prog = &obj->programs[l];
3520 if (prog->sec_idx == sec_idx && prog_contains_insn(prog, insn_idx))
3521 return prog;
3522 return NULL;
3523}
3524
3525static int
3526bpf_object__collect_prog_relos(struct bpf_object *obj, GElf_Shdr *shdr, Elf_Data *data)
3527{
3528 Elf_Data *symbols = obj->efile.symbols;
3529 const char *relo_sec_name, *sec_name;
3530 size_t sec_idx = shdr->sh_info;
3531 struct bpf_program *prog;
3532 struct reloc_desc *relos;
3533 int err, i, nrels;
3534 const char *sym_name;
3535 __u32 insn_idx;
3536 GElf_Sym sym;
3537 GElf_Rel rel;
3538
3539 relo_sec_name = elf_sec_str(obj, shdr->sh_name);
3540 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
3541 if (!relo_sec_name || !sec_name)
3542 return -EINVAL;
3543
3544 pr_debug("sec '%s': collecting relocation for section(%zu) '%s'\n",
3545 relo_sec_name, sec_idx, sec_name);
3546 nrels = shdr->sh_size / shdr->sh_entsize;
3547
3548 for (i = 0; i < nrels; i++) {
3549 if (!gelf_getrel(data, i, &rel)) {
3550 pr_warn("sec '%s': failed to get relo #%d\n", relo_sec_name, i);
3551 return -LIBBPF_ERRNO__FORMAT;
3552 }
3553 if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
3554 pr_warn("sec '%s': symbol 0x%zx not found for relo #%d\n",
3555 relo_sec_name, (size_t)GELF_R_SYM(rel.r_info), i);
3556 return -LIBBPF_ERRNO__FORMAT;
3557 }
3558 if (rel.r_offset % BPF_INSN_SZ) {
3559 pr_warn("sec '%s': invalid offset 0x%zx for relo #%d\n",
3560 relo_sec_name, (size_t)GELF_R_SYM(rel.r_info), i);
3561 return -LIBBPF_ERRNO__FORMAT;
3562 }
3563
3564 insn_idx = rel.r_offset / BPF_INSN_SZ;
3565 /* relocations against static functions are recorded as
3566 * relocations against the section that contains a function;
3567 * in such case, symbol will be STT_SECTION and sym.st_name
3568 * will point to empty string (0), so fetch section name
3569 * instead
3570 */
3571 if (GELF_ST_TYPE(sym.st_info) == STT_SECTION && sym.st_name == 0)
3572 sym_name = elf_sec_name(obj, elf_sec_by_idx(obj, sym.st_shndx));
3573 else
3574 sym_name = elf_sym_str(obj, sym.st_name);
3575 sym_name = sym_name ?: "<?";
3576
3577 pr_debug("sec '%s': relo #%d: insn #%u against '%s'\n",
3578 relo_sec_name, i, insn_idx, sym_name);
3579
3580 prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
3581 if (!prog) {
3582 pr_warn("sec '%s': relo #%d: program not found in section '%s' for insn #%u\n",
3583 relo_sec_name, i, sec_name, insn_idx);
3584 return -LIBBPF_ERRNO__RELOC;
3585 }
3586
3587 relos = libbpf_reallocarray(prog->reloc_desc,
3588 prog->nr_reloc + 1, sizeof(*relos));
3589 if (!relos)
3590 return -ENOMEM;
3591 prog->reloc_desc = relos;
3592
3593 /* adjust insn_idx to local BPF program frame of reference */
3594 insn_idx -= prog->sec_insn_off;
3595 err = bpf_program__record_reloc(prog, &relos[prog->nr_reloc],
3596 insn_idx, sym_name, &sym, &rel);
3597 if (err)
3598 return err;
3599
3600 prog->nr_reloc++;
3601 }
3602 return 0;
3603}
3604
3605static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map)
3606{
3607 struct bpf_map_def *def = &map->def;
3608 __u32 key_type_id = 0, value_type_id = 0;
3609 int ret;
3610
3611 /* if it's BTF-defined map, we don't need to search for type IDs.
3612 * For struct_ops map, it does not need btf_key_type_id and
3613 * btf_value_type_id.
3614 */
3615 if (map->sec_idx == obj->efile.btf_maps_shndx ||
3616 bpf_map__is_struct_ops(map))
3617 return 0;
3618
3619 if (!bpf_map__is_internal(map)) {
3620 ret = btf__get_map_kv_tids(obj->btf, map->name, def->key_size,
3621 def->value_size, &key_type_id,
3622 &value_type_id);
3623 } else {
3624 /*
3625 * LLVM annotates global data differently in BTF, that is,
3626 * only as '.data', '.bss' or '.rodata'.
3627 */
3628 ret = btf__find_by_name(obj->btf,
3629 libbpf_type_to_btf_name[map->libbpf_type]);
3630 }
3631 if (ret < 0)
3632 return ret;
3633
3634 map->btf_key_type_id = key_type_id;
3635 map->btf_value_type_id = bpf_map__is_internal(map) ?
3636 ret : value_type_id;
3637 return 0;
3638}
3639
3640int bpf_map__reuse_fd(struct bpf_map *map, int fd)
3641{
3642 struct bpf_map_info info = {};
3643 __u32 len = sizeof(info);
3644 int new_fd, err;
3645 char *new_name;
3646
3647 err = bpf_obj_get_info_by_fd(fd, &info, &len);
3648 if (err)
3649 return err;
3650
3651 new_name = strdup(info.name);
3652 if (!new_name)
3653 return -errno;
3654
3655 new_fd = open("/", O_RDONLY | O_CLOEXEC);
3656 if (new_fd < 0) {
3657 err = -errno;
3658 goto err_free_new_name;
3659 }
3660
3661 new_fd = dup3(fd, new_fd, O_CLOEXEC);
3662 if (new_fd < 0) {
3663 err = -errno;
3664 goto err_close_new_fd;
3665 }
3666
3667 err = zclose(map->fd);
3668 if (err) {
3669 err = -errno;
3670 goto err_close_new_fd;
3671 }
3672 free(map->name);
3673
3674 map->fd = new_fd;
3675 map->name = new_name;
3676 map->def.type = info.type;
3677 map->def.key_size = info.key_size;
3678 map->def.value_size = info.value_size;
3679 map->def.max_entries = info.max_entries;
3680 map->def.map_flags = info.map_flags;
3681 map->btf_key_type_id = info.btf_key_type_id;
3682 map->btf_value_type_id = info.btf_value_type_id;
3683 map->reused = true;
3684
3685 return 0;
3686
3687err_close_new_fd:
3688 close(new_fd);
3689err_free_new_name:
3690 free(new_name);
3691 return err;
3692}
3693
3694__u32 bpf_map__max_entries(const struct bpf_map *map)
3695{
3696 return map->def.max_entries;
3697}
3698
3699int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
3700{
3701 if (map->fd >= 0)
3702 return -EBUSY;
3703 map->def.max_entries = max_entries;
3704 return 0;
3705}
3706
3707int bpf_map__resize(struct bpf_map *map, __u32 max_entries)
3708{
3709 if (!map || !max_entries)
3710 return -EINVAL;
3711
3712 return bpf_map__set_max_entries(map, max_entries);
3713}
3714
3715static int
3716bpf_object__probe_loading(struct bpf_object *obj)
3717{
3718 struct bpf_load_program_attr attr;
3719 char *cp, errmsg[STRERR_BUFSIZE];
3720 struct bpf_insn insns[] = {
3721 BPF_MOV64_IMM(BPF_REG_0, 0),
3722 BPF_EXIT_INSN(),
3723 };
3724 int ret;
3725
3726 /* make sure basic loading works */
3727
3728 memset(&attr, 0, sizeof(attr));
3729 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
3730 attr.insns = insns;
3731 attr.insns_cnt = ARRAY_SIZE(insns);
3732 attr.license = "GPL";
3733
3734 ret = bpf_load_program_xattr(&attr, NULL, 0);
3735 if (ret < 0) {
3736 ret = errno;
3737 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
3738 pr_warn("Error in %s():%s(%d). Couldn't load trivial BPF "
3739 "program. Make sure your kernel supports BPF "
3740 "(CONFIG_BPF_SYSCALL=y) and/or that RLIMIT_MEMLOCK is "
3741 "set to big enough value.\n", __func__, cp, ret);
3742 return -ret;
3743 }
3744 close(ret);
3745
3746 return 0;
3747}
3748
3749static int probe_fd(int fd)
3750{
3751 if (fd >= 0)
3752 close(fd);
3753 return fd >= 0;
3754}
3755
3756static int probe_kern_prog_name(void)
3757{
3758 struct bpf_load_program_attr attr;
3759 struct bpf_insn insns[] = {
3760 BPF_MOV64_IMM(BPF_REG_0, 0),
3761 BPF_EXIT_INSN(),
3762 };
3763 int ret;
3764
3765 /* make sure loading with name works */
3766
3767 memset(&attr, 0, sizeof(attr));
3768 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
3769 attr.insns = insns;
3770 attr.insns_cnt = ARRAY_SIZE(insns);
3771 attr.license = "GPL";
3772 attr.name = "test";
3773 ret = bpf_load_program_xattr(&attr, NULL, 0);
3774 return probe_fd(ret);
3775}
3776
3777static int probe_kern_global_data(void)
3778{
3779 struct bpf_load_program_attr prg_attr;
3780 struct bpf_create_map_attr map_attr;
3781 char *cp, errmsg[STRERR_BUFSIZE];
3782 struct bpf_insn insns[] = {
3783 BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
3784 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
3785 BPF_MOV64_IMM(BPF_REG_0, 0),
3786 BPF_EXIT_INSN(),
3787 };
3788 int ret, map;
3789
3790 memset(&map_attr, 0, sizeof(map_attr));
3791 map_attr.map_type = BPF_MAP_TYPE_ARRAY;
3792 map_attr.key_size = sizeof(int);
3793 map_attr.value_size = 32;
3794 map_attr.max_entries = 1;
3795
3796 map = bpf_create_map_xattr(&map_attr);
3797 if (map < 0) {
3798 ret = -errno;
3799 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
3800 pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
3801 __func__, cp, -ret);
3802 return ret;
3803 }
3804
3805 insns[0].imm = map;
3806
3807 memset(&prg_attr, 0, sizeof(prg_attr));
3808 prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
3809 prg_attr.insns = insns;
3810 prg_attr.insns_cnt = ARRAY_SIZE(insns);
3811 prg_attr.license = "GPL";
3812
3813 ret = bpf_load_program_xattr(&prg_attr, NULL, 0);
3814 close(map);
3815 return probe_fd(ret);
3816}
3817
3818static int probe_kern_btf(void)
3819{
3820 static const char strs[] = "\0int";
3821 __u32 types[] = {
3822 /* int */
3823 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
3824 };
3825
3826 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
3827 strs, sizeof(strs)));
3828}
3829
3830static int probe_kern_btf_func(void)
3831{
3832 static const char strs[] = "\0int\0x\0a";
3833 /* void x(int a) {} */
3834 __u32 types[] = {
3835 /* int */
3836 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
3837 /* FUNC_PROTO */ /* [2] */
3838 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
3839 BTF_PARAM_ENC(7, 1),
3840 /* FUNC x */ /* [3] */
3841 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
3842 };
3843
3844 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
3845 strs, sizeof(strs)));
3846}
3847
3848static int probe_kern_btf_func_global(void)
3849{
3850 static const char strs[] = "\0int\0x\0a";
3851 /* static void x(int a) {} */
3852 __u32 types[] = {
3853 /* int */
3854 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
3855 /* FUNC_PROTO */ /* [2] */
3856 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
3857 BTF_PARAM_ENC(7, 1),
3858 /* FUNC x BTF_FUNC_GLOBAL */ /* [3] */
3859 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2),
3860 };
3861
3862 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
3863 strs, sizeof(strs)));
3864}
3865
3866static int probe_kern_btf_datasec(void)
3867{
3868 static const char strs[] = "\0x\0.data";
3869 /* static int a; */
3870 __u32 types[] = {
3871 /* int */
3872 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
3873 /* VAR x */ /* [2] */
3874 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
3875 BTF_VAR_STATIC,
3876 /* DATASEC val */ /* [3] */
3877 BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
3878 BTF_VAR_SECINFO_ENC(2, 0, 4),
3879 };
3880
3881 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
3882 strs, sizeof(strs)));
3883}
3884
3885static int probe_kern_array_mmap(void)
3886{
3887 struct bpf_create_map_attr attr = {
3888 .map_type = BPF_MAP_TYPE_ARRAY,
3889 .map_flags = BPF_F_MMAPABLE,
3890 .key_size = sizeof(int),
3891 .value_size = sizeof(int),
3892 .max_entries = 1,
3893 };
3894
3895 return probe_fd(bpf_create_map_xattr(&attr));
3896}
3897
3898static int probe_kern_exp_attach_type(void)
3899{
3900 struct bpf_load_program_attr attr;
3901 struct bpf_insn insns[] = {
3902 BPF_MOV64_IMM(BPF_REG_0, 0),
3903 BPF_EXIT_INSN(),
3904 };
3905
3906 memset(&attr, 0, sizeof(attr));
3907 /* use any valid combination of program type and (optional)
3908 * non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS)
3909 * to see if kernel supports expected_attach_type field for
3910 * BPF_PROG_LOAD command
3911 */
3912 attr.prog_type = BPF_PROG_TYPE_CGROUP_SOCK;
3913 attr.expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE;
3914 attr.insns = insns;
3915 attr.insns_cnt = ARRAY_SIZE(insns);
3916 attr.license = "GPL";
3917
3918 return probe_fd(bpf_load_program_xattr(&attr, NULL, 0));
3919}
3920
3921static int probe_kern_probe_read_kernel(void)
3922{
3923 struct bpf_load_program_attr attr;
3924 struct bpf_insn insns[] = {
3925 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), /* r1 = r10 (fp) */
3926 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), /* r1 += -8 */
3927 BPF_MOV64_IMM(BPF_REG_2, 8), /* r2 = 8 */
3928 BPF_MOV64_IMM(BPF_REG_3, 0), /* r3 = 0 */
3929 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel),
3930 BPF_EXIT_INSN(),
3931 };
3932
3933 memset(&attr, 0, sizeof(attr));
3934 attr.prog_type = BPF_PROG_TYPE_KPROBE;
3935 attr.insns = insns;
3936 attr.insns_cnt = ARRAY_SIZE(insns);
3937 attr.license = "GPL";
3938
3939 return probe_fd(bpf_load_program_xattr(&attr, NULL, 0));
3940}
3941
3942static int probe_prog_bind_map(void)
3943{
3944 struct bpf_load_program_attr prg_attr;
3945 struct bpf_create_map_attr map_attr;
3946 char *cp, errmsg[STRERR_BUFSIZE];
3947 struct bpf_insn insns[] = {
3948 BPF_MOV64_IMM(BPF_REG_0, 0),
3949 BPF_EXIT_INSN(),
3950 };
3951 int ret, map, prog;
3952
3953 memset(&map_attr, 0, sizeof(map_attr));
3954 map_attr.map_type = BPF_MAP_TYPE_ARRAY;
3955 map_attr.key_size = sizeof(int);
3956 map_attr.value_size = 32;
3957 map_attr.max_entries = 1;
3958
3959 map = bpf_create_map_xattr(&map_attr);
3960 if (map < 0) {
3961 ret = -errno;
3962 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
3963 pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
3964 __func__, cp, -ret);
3965 return ret;
3966 }
3967
3968 memset(&prg_attr, 0, sizeof(prg_attr));
3969 prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
3970 prg_attr.insns = insns;
3971 prg_attr.insns_cnt = ARRAY_SIZE(insns);
3972 prg_attr.license = "GPL";
3973
3974 prog = bpf_load_program_xattr(&prg_attr, NULL, 0);
3975 if (prog < 0) {
3976 close(map);
3977 return 0;
3978 }
3979
3980 ret = bpf_prog_bind_map(prog, map, NULL);
3981
3982 close(map);
3983 close(prog);
3984
3985 return ret >= 0;
3986}
3987
3988static int probe_module_btf(void)
3989{
3990 static const char strs[] = "\0int";
3991 __u32 types[] = {
3992 /* int */
3993 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
3994 };
3995 struct bpf_btf_info info;
3996 __u32 len = sizeof(info);
3997 char name[16];
3998 int fd, err;
3999
4000 fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs));
4001 if (fd < 0)
4002 return 0; /* BTF not supported at all */
4003
4004 memset(&info, 0, sizeof(info));
4005 info.name = ptr_to_u64(name);
4006 info.name_len = sizeof(name);
4007
4008 /* check that BPF_OBJ_GET_INFO_BY_FD supports specifying name pointer;
4009 * kernel's module BTF support coincides with support for
4010 * name/name_len fields in struct bpf_btf_info.
4011 */
4012 err = bpf_obj_get_info_by_fd(fd, &info, &len);
4013 close(fd);
4014 return !err;
4015}
4016
4017enum kern_feature_result {
4018 FEAT_UNKNOWN = 0,
4019 FEAT_SUPPORTED = 1,
4020 FEAT_MISSING = 2,
4021};
4022
4023typedef int (*feature_probe_fn)(void);
4024
4025static struct kern_feature_desc {
4026 const char *desc;
4027 feature_probe_fn probe;
4028 enum kern_feature_result res;
4029} feature_probes[__FEAT_CNT] = {
4030 [FEAT_PROG_NAME] = {
4031 "BPF program name", probe_kern_prog_name,
4032 },
4033 [FEAT_GLOBAL_DATA] = {
4034 "global variables", probe_kern_global_data,
4035 },
4036 [FEAT_BTF] = {
4037 "minimal BTF", probe_kern_btf,
4038 },
4039 [FEAT_BTF_FUNC] = {
4040 "BTF functions", probe_kern_btf_func,
4041 },
4042 [FEAT_BTF_GLOBAL_FUNC] = {
4043 "BTF global function", probe_kern_btf_func_global,
4044 },
4045 [FEAT_BTF_DATASEC] = {
4046 "BTF data section and variable", probe_kern_btf_datasec,
4047 },
4048 [FEAT_ARRAY_MMAP] = {
4049 "ARRAY map mmap()", probe_kern_array_mmap,
4050 },
4051 [FEAT_EXP_ATTACH_TYPE] = {
4052 "BPF_PROG_LOAD expected_attach_type attribute",
4053 probe_kern_exp_attach_type,
4054 },
4055 [FEAT_PROBE_READ_KERN] = {
4056 "bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel,
4057 },
4058 [FEAT_PROG_BIND_MAP] = {
4059 "BPF_PROG_BIND_MAP support", probe_prog_bind_map,
4060 },
4061 [FEAT_MODULE_BTF] = {
4062 "module BTF support", probe_module_btf,
4063 },
4064};
4065
4066static bool kernel_supports(enum kern_feature_id feat_id)
4067{
4068 struct kern_feature_desc *feat = &feature_probes[feat_id];
4069 int ret;
4070
4071 if (READ_ONCE(feat->res) == FEAT_UNKNOWN) {
4072 ret = feat->probe();
4073 if (ret > 0) {
4074 WRITE_ONCE(feat->res, FEAT_SUPPORTED);
4075 } else if (ret == 0) {
4076 WRITE_ONCE(feat->res, FEAT_MISSING);
4077 } else {
4078 pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret);
4079 WRITE_ONCE(feat->res, FEAT_MISSING);
4080 }
4081 }
4082
4083 return READ_ONCE(feat->res) == FEAT_SUPPORTED;
4084}
4085
4086static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
4087{
4088 struct bpf_map_info map_info = {};
4089 char msg[STRERR_BUFSIZE];
4090 __u32 map_info_len;
4091
4092 map_info_len = sizeof(map_info);
4093
4094 if (bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len)) {
4095 pr_warn("failed to get map info for map FD %d: %s\n",
4096 map_fd, libbpf_strerror_r(errno, msg, sizeof(msg)));
4097 return false;
4098 }
4099
4100 return (map_info.type == map->def.type &&
4101 map_info.key_size == map->def.key_size &&
4102 map_info.value_size == map->def.value_size &&
4103 map_info.max_entries == map->def.max_entries &&
4104 map_info.map_flags == map->def.map_flags);
4105}
4106
4107static int
4108bpf_object__reuse_map(struct bpf_map *map)
4109{
4110 char *cp, errmsg[STRERR_BUFSIZE];
4111 int err, pin_fd;
4112
4113 pin_fd = bpf_obj_get(map->pin_path);
4114 if (pin_fd < 0) {
4115 err = -errno;
4116 if (err == -ENOENT) {
4117 pr_debug("found no pinned map to reuse at '%s'\n",
4118 map->pin_path);
4119 return 0;
4120 }
4121
4122 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
4123 pr_warn("couldn't retrieve pinned map '%s': %s\n",
4124 map->pin_path, cp);
4125 return err;
4126 }
4127
4128 if (!map_is_reuse_compat(map, pin_fd)) {
4129 pr_warn("couldn't reuse pinned map at '%s': parameter mismatch\n",
4130 map->pin_path);
4131 close(pin_fd);
4132 return -EINVAL;
4133 }
4134
4135 err = bpf_map__reuse_fd(map, pin_fd);
4136 if (err) {
4137 close(pin_fd);
4138 return err;
4139 }
4140 map->pinned = true;
4141 pr_debug("reused pinned map at '%s'\n", map->pin_path);
4142
4143 return 0;
4144}
4145
4146static int
4147bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
4148{
4149 enum libbpf_map_type map_type = map->libbpf_type;
4150 char *cp, errmsg[STRERR_BUFSIZE];
4151 int err, zero = 0;
4152
4153 err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0);
4154 if (err) {
4155 err = -errno;
4156 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4157 pr_warn("Error setting initial map(%s) contents: %s\n",
4158 map->name, cp);
4159 return err;
4160 }
4161
4162 /* Freeze .rodata and .kconfig map as read-only from syscall side. */
4163 if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG) {
4164 err = bpf_map_freeze(map->fd);
4165 if (err) {
4166 err = -errno;
4167 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4168 pr_warn("Error freezing map(%s) as read-only: %s\n",
4169 map->name, cp);
4170 return err;
4171 }
4172 }
4173 return 0;
4174}
4175
4176static void bpf_map__destroy(struct bpf_map *map);
4177
4178static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map)
4179{
4180 struct bpf_create_map_attr create_attr;
4181 struct bpf_map_def *def = &map->def;
4182
4183 memset(&create_attr, 0, sizeof(create_attr));
4184
4185 if (kernel_supports(FEAT_PROG_NAME))
4186 create_attr.name = map->name;
4187 create_attr.map_ifindex = map->map_ifindex;
4188 create_attr.map_type = def->type;
4189 create_attr.map_flags = def->map_flags;
4190 create_attr.key_size = def->key_size;
4191 create_attr.value_size = def->value_size;
4192 create_attr.numa_node = map->numa_node;
4193
4194 if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !def->max_entries) {
4195 int nr_cpus;
4196
4197 nr_cpus = libbpf_num_possible_cpus();
4198 if (nr_cpus < 0) {
4199 pr_warn("map '%s': failed to determine number of system CPUs: %d\n",
4200 map->name, nr_cpus);
4201 return nr_cpus;
4202 }
4203 pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus);
4204 create_attr.max_entries = nr_cpus;
4205 } else {
4206 create_attr.max_entries = def->max_entries;
4207 }
4208
4209 if (bpf_map__is_struct_ops(map))
4210 create_attr.btf_vmlinux_value_type_id =
4211 map->btf_vmlinux_value_type_id;
4212
4213 create_attr.btf_fd = 0;
4214 create_attr.btf_key_type_id = 0;
4215 create_attr.btf_value_type_id = 0;
4216 if (obj->btf && btf__fd(obj->btf) >= 0 && !bpf_map_find_btf_info(obj, map)) {
4217 create_attr.btf_fd = btf__fd(obj->btf);
4218 create_attr.btf_key_type_id = map->btf_key_type_id;
4219 create_attr.btf_value_type_id = map->btf_value_type_id;
4220 }
4221
4222 if (bpf_map_type__is_map_in_map(def->type)) {
4223 if (map->inner_map) {
4224 int err;
4225
4226 err = bpf_object__create_map(obj, map->inner_map);
4227 if (err) {
4228 pr_warn("map '%s': failed to create inner map: %d\n",
4229 map->name, err);
4230 return err;
4231 }
4232 map->inner_map_fd = bpf_map__fd(map->inner_map);
4233 }
4234 if (map->inner_map_fd >= 0)
4235 create_attr.inner_map_fd = map->inner_map_fd;
4236 }
4237
4238 map->fd = bpf_create_map_xattr(&create_attr);
4239 if (map->fd < 0 && (create_attr.btf_key_type_id ||
4240 create_attr.btf_value_type_id)) {
4241 char *cp, errmsg[STRERR_BUFSIZE];
4242 int err = -errno;
4243
4244 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4245 pr_warn("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
4246 map->name, cp, err);
4247 create_attr.btf_fd = 0;
4248 create_attr.btf_key_type_id = 0;
4249 create_attr.btf_value_type_id = 0;
4250 map->btf_key_type_id = 0;
4251 map->btf_value_type_id = 0;
4252 map->fd = bpf_create_map_xattr(&create_attr);
4253 }
4254
4255 if (map->fd < 0)
4256 return -errno;
4257
4258 if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) {
4259 bpf_map__destroy(map->inner_map);
4260 zfree(&map->inner_map);
4261 }
4262
4263 return 0;
4264}
4265
4266static int init_map_slots(struct bpf_map *map)
4267{
4268 const struct bpf_map *targ_map;
4269 unsigned int i;
4270 int fd, err;
4271
4272 for (i = 0; i < map->init_slots_sz; i++) {
4273 if (!map->init_slots[i])
4274 continue;
4275
4276 targ_map = map->init_slots[i];
4277 fd = bpf_map__fd(targ_map);
4278 err = bpf_map_update_elem(map->fd, &i, &fd, 0);
4279 if (err) {
4280 err = -errno;
4281 pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n",
4282 map->name, i, targ_map->name,
4283 fd, err);
4284 return err;
4285 }
4286 pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n",
4287 map->name, i, targ_map->name, fd);
4288 }
4289
4290 zfree(&map->init_slots);
4291 map->init_slots_sz = 0;
4292
4293 return 0;
4294}
4295
4296static int
4297bpf_object__create_maps(struct bpf_object *obj)
4298{
4299 struct bpf_map *map;
4300 char *cp, errmsg[STRERR_BUFSIZE];
4301 unsigned int i, j;
4302 int err;
4303
4304 for (i = 0; i < obj->nr_maps; i++) {
4305 map = &obj->maps[i];
4306
4307 if (map->pin_path) {
4308 err = bpf_object__reuse_map(map);
4309 if (err) {
4310 pr_warn("map '%s': error reusing pinned map\n",
4311 map->name);
4312 goto err_out;
4313 }
4314 }
4315
4316 if (map->fd >= 0) {
4317 pr_debug("map '%s': skipping creation (preset fd=%d)\n",
4318 map->name, map->fd);
4319 } else {
4320 err = bpf_object__create_map(obj, map);
4321 if (err)
4322 goto err_out;
4323
4324 pr_debug("map '%s': created successfully, fd=%d\n",
4325 map->name, map->fd);
4326
4327 if (bpf_map__is_internal(map)) {
4328 err = bpf_object__populate_internal_map(obj, map);
4329 if (err < 0) {
4330 zclose(map->fd);
4331 goto err_out;
4332 }
4333 }
4334
4335 if (map->init_slots_sz) {
4336 err = init_map_slots(map);
4337 if (err < 0) {
4338 zclose(map->fd);
4339 goto err_out;
4340 }
4341 }
4342 }
4343
4344 if (map->pin_path && !map->pinned) {
4345 err = bpf_map__pin(map, NULL);
4346 if (err) {
4347 pr_warn("map '%s': failed to auto-pin at '%s': %d\n",
4348 map->name, map->pin_path, err);
4349 zclose(map->fd);
4350 goto err_out;
4351 }
4352 }
4353 }
4354
4355 return 0;
4356
4357err_out:
4358 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4359 pr_warn("map '%s': failed to create: %s(%d)\n", map->name, cp, err);
4360 pr_perm_msg(err);
4361 for (j = 0; j < i; j++)
4362 zclose(obj->maps[j].fd);
4363 return err;
4364}
4365
4366#define BPF_CORE_SPEC_MAX_LEN 64
4367
4368/* represents BPF CO-RE field or array element accessor */
4369struct bpf_core_accessor {
4370 __u32 type_id; /* struct/union type or array element type */
4371 __u32 idx; /* field index or array index */
4372 const char *name; /* field name or NULL for array accessor */
4373};
4374
4375struct bpf_core_spec {
4376 const struct btf *btf;
4377 /* high-level spec: named fields and array indices only */
4378 struct bpf_core_accessor spec[BPF_CORE_SPEC_MAX_LEN];
4379 /* original unresolved (no skip_mods_or_typedefs) root type ID */
4380 __u32 root_type_id;
4381 /* CO-RE relocation kind */
4382 enum bpf_core_relo_kind relo_kind;
4383 /* high-level spec length */
4384 int len;
4385 /* raw, low-level spec: 1-to-1 with accessor spec string */
4386 int raw_spec[BPF_CORE_SPEC_MAX_LEN];
4387 /* raw spec length */
4388 int raw_len;
4389 /* field bit offset represented by spec */
4390 __u32 bit_offset;
4391};
4392
4393static bool str_is_empty(const char *s)
4394{
4395 return !s || !s[0];
4396}
4397
4398static bool is_flex_arr(const struct btf *btf,
4399 const struct bpf_core_accessor *acc,
4400 const struct btf_array *arr)
4401{
4402 const struct btf_type *t;
4403
4404 /* not a flexible array, if not inside a struct or has non-zero size */
4405 if (!acc->name || arr->nelems > 0)
4406 return false;
4407
4408 /* has to be the last member of enclosing struct */
4409 t = btf__type_by_id(btf, acc->type_id);
4410 return acc->idx == btf_vlen(t) - 1;
4411}
4412
4413static const char *core_relo_kind_str(enum bpf_core_relo_kind kind)
4414{
4415 switch (kind) {
4416 case BPF_FIELD_BYTE_OFFSET: return "byte_off";
4417 case BPF_FIELD_BYTE_SIZE: return "byte_sz";
4418 case BPF_FIELD_EXISTS: return "field_exists";
4419 case BPF_FIELD_SIGNED: return "signed";
4420 case BPF_FIELD_LSHIFT_U64: return "lshift_u64";
4421 case BPF_FIELD_RSHIFT_U64: return "rshift_u64";
4422 case BPF_TYPE_ID_LOCAL: return "local_type_id";
4423 case BPF_TYPE_ID_TARGET: return "target_type_id";
4424 case BPF_TYPE_EXISTS: return "type_exists";
4425 case BPF_TYPE_SIZE: return "type_size";
4426 case BPF_ENUMVAL_EXISTS: return "enumval_exists";
4427 case BPF_ENUMVAL_VALUE: return "enumval_value";
4428 default: return "unknown";
4429 }
4430}
4431
4432static bool core_relo_is_field_based(enum bpf_core_relo_kind kind)
4433{
4434 switch (kind) {
4435 case BPF_FIELD_BYTE_OFFSET:
4436 case BPF_FIELD_BYTE_SIZE:
4437 case BPF_FIELD_EXISTS:
4438 case BPF_FIELD_SIGNED:
4439 case BPF_FIELD_LSHIFT_U64:
4440 case BPF_FIELD_RSHIFT_U64:
4441 return true;
4442 default:
4443 return false;
4444 }
4445}
4446
4447static bool core_relo_is_type_based(enum bpf_core_relo_kind kind)
4448{
4449 switch (kind) {
4450 case BPF_TYPE_ID_LOCAL:
4451 case BPF_TYPE_ID_TARGET:
4452 case BPF_TYPE_EXISTS:
4453 case BPF_TYPE_SIZE:
4454 return true;
4455 default:
4456 return false;
4457 }
4458}
4459
4460static bool core_relo_is_enumval_based(enum bpf_core_relo_kind kind)
4461{
4462 switch (kind) {
4463 case BPF_ENUMVAL_EXISTS:
4464 case BPF_ENUMVAL_VALUE:
4465 return true;
4466 default:
4467 return false;
4468 }
4469}
4470
4471/*
4472 * Turn bpf_core_relo into a low- and high-level spec representation,
4473 * validating correctness along the way, as well as calculating resulting
4474 * field bit offset, specified by accessor string. Low-level spec captures
4475 * every single level of nestedness, including traversing anonymous
4476 * struct/union members. High-level one only captures semantically meaningful
4477 * "turning points": named fields and array indicies.
4478 * E.g., for this case:
4479 *
4480 * struct sample {
4481 * int __unimportant;
4482 * struct {
4483 * int __1;
4484 * int __2;
4485 * int a[7];
4486 * };
4487 * };
4488 *
4489 * struct sample *s = ...;
4490 *
4491 * int x = &s->a[3]; // access string = '0:1:2:3'
4492 *
4493 * Low-level spec has 1:1 mapping with each element of access string (it's
4494 * just a parsed access string representation): [0, 1, 2, 3].
4495 *
4496 * High-level spec will capture only 3 points:
4497 * - intial zero-index access by pointer (&s->... is the same as &s[0]...);
4498 * - field 'a' access (corresponds to '2' in low-level spec);
4499 * - array element #3 access (corresponds to '3' in low-level spec).
4500 *
4501 * Type-based relocations (TYPE_EXISTS/TYPE_SIZE,
4502 * TYPE_ID_LOCAL/TYPE_ID_TARGET) don't capture any field information. Their
4503 * spec and raw_spec are kept empty.
4504 *
4505 * Enum value-based relocations (ENUMVAL_EXISTS/ENUMVAL_VALUE) use access
4506 * string to specify enumerator's value index that need to be relocated.
4507 */
4508static int bpf_core_parse_spec(const struct btf *btf,
4509 __u32 type_id,
4510 const char *spec_str,
4511 enum bpf_core_relo_kind relo_kind,
4512 struct bpf_core_spec *spec)
4513{
4514 int access_idx, parsed_len, i;
4515 struct bpf_core_accessor *acc;
4516 const struct btf_type *t;
4517 const char *name;
4518 __u32 id;
4519 __s64 sz;
4520
4521 if (str_is_empty(spec_str) || *spec_str == ':')
4522 return -EINVAL;
4523
4524 memset(spec, 0, sizeof(*spec));
4525 spec->btf = btf;
4526 spec->root_type_id = type_id;
4527 spec->relo_kind = relo_kind;
4528
4529 /* type-based relocations don't have a field access string */
4530 if (core_relo_is_type_based(relo_kind)) {
4531 if (strcmp(spec_str, "0"))
4532 return -EINVAL;
4533 return 0;
4534 }
4535
4536 /* parse spec_str="0:1:2:3:4" into array raw_spec=[0, 1, 2, 3, 4] */
4537 while (*spec_str) {
4538 if (*spec_str == ':')
4539 ++spec_str;
4540 if (sscanf(spec_str, "%d%n", &access_idx, &parsed_len) != 1)
4541 return -EINVAL;
4542 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
4543 return -E2BIG;
4544 spec_str += parsed_len;
4545 spec->raw_spec[spec->raw_len++] = access_idx;
4546 }
4547
4548 if (spec->raw_len == 0)
4549 return -EINVAL;
4550
4551 t = skip_mods_and_typedefs(btf, type_id, &id);
4552 if (!t)
4553 return -EINVAL;
4554
4555 access_idx = spec->raw_spec[0];
4556 acc = &spec->spec[0];
4557 acc->type_id = id;
4558 acc->idx = access_idx;
4559 spec->len++;
4560
4561 if (core_relo_is_enumval_based(relo_kind)) {
4562 if (!btf_is_enum(t) || spec->raw_len > 1 || access_idx >= btf_vlen(t))
4563 return -EINVAL;
4564
4565 /* record enumerator name in a first accessor */
4566 acc->name = btf__name_by_offset(btf, btf_enum(t)[access_idx].name_off);
4567 return 0;
4568 }
4569
4570 if (!core_relo_is_field_based(relo_kind))
4571 return -EINVAL;
4572
4573 sz = btf__resolve_size(btf, id);
4574 if (sz < 0)
4575 return sz;
4576 spec->bit_offset = access_idx * sz * 8;
4577
4578 for (i = 1; i < spec->raw_len; i++) {
4579 t = skip_mods_and_typedefs(btf, id, &id);
4580 if (!t)
4581 return -EINVAL;
4582
4583 access_idx = spec->raw_spec[i];
4584 acc = &spec->spec[spec->len];
4585
4586 if (btf_is_composite(t)) {
4587 const struct btf_member *m;
4588 __u32 bit_offset;
4589
4590 if (access_idx >= btf_vlen(t))
4591 return -EINVAL;
4592
4593 bit_offset = btf_member_bit_offset(t, access_idx);
4594 spec->bit_offset += bit_offset;
4595
4596 m = btf_members(t) + access_idx;
4597 if (m->name_off) {
4598 name = btf__name_by_offset(btf, m->name_off);
4599 if (str_is_empty(name))
4600 return -EINVAL;
4601
4602 acc->type_id = id;
4603 acc->idx = access_idx;
4604 acc->name = name;
4605 spec->len++;
4606 }
4607
4608 id = m->type;
4609 } else if (btf_is_array(t)) {
4610 const struct btf_array *a = btf_array(t);
4611 bool flex;
4612
4613 t = skip_mods_and_typedefs(btf, a->type, &id);
4614 if (!t)
4615 return -EINVAL;
4616
4617 flex = is_flex_arr(btf, acc - 1, a);
4618 if (!flex && access_idx >= a->nelems)
4619 return -EINVAL;
4620
4621 spec->spec[spec->len].type_id = id;
4622 spec->spec[spec->len].idx = access_idx;
4623 spec->len++;
4624
4625 sz = btf__resolve_size(btf, id);
4626 if (sz < 0)
4627 return sz;
4628 spec->bit_offset += access_idx * sz * 8;
4629 } else {
4630 pr_warn("relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %s\n",
4631 type_id, spec_str, i, id, btf_kind_str(t));
4632 return -EINVAL;
4633 }
4634 }
4635
4636 return 0;
4637}
4638
4639static bool bpf_core_is_flavor_sep(const char *s)
4640{
4641 /* check X___Y name pattern, where X and Y are not underscores */
4642 return s[0] != '_' && /* X */
4643 s[1] == '_' && s[2] == '_' && s[3] == '_' && /* ___ */
4644 s[4] != '_'; /* Y */
4645}
4646
4647/* Given 'some_struct_name___with_flavor' return the length of a name prefix
4648 * before last triple underscore. Struct name part after last triple
4649 * underscore is ignored by BPF CO-RE relocation during relocation matching.
4650 */
4651static size_t bpf_core_essential_name_len(const char *name)
4652{
4653 size_t n = strlen(name);
4654 int i;
4655
4656 for (i = n - 5; i >= 0; i--) {
4657 if (bpf_core_is_flavor_sep(name + i))
4658 return i + 1;
4659 }
4660 return n;
4661}
4662
4663struct core_cand
4664{
4665 const struct btf *btf;
4666 const struct btf_type *t;
4667 const char *name;
4668 __u32 id;
4669};
4670
4671/* dynamically sized list of type IDs and its associated struct btf */
4672struct core_cand_list {
4673 struct core_cand *cands;
4674 int len;
4675};
4676
4677static void bpf_core_free_cands(struct core_cand_list *cands)
4678{
4679 free(cands->cands);
4680 free(cands);
4681}
4682
4683static int bpf_core_add_cands(struct core_cand *local_cand,
4684 size_t local_essent_len,
4685 const struct btf *targ_btf,
4686 const char *targ_btf_name,
4687 int targ_start_id,
4688 struct core_cand_list *cands)
4689{
4690 struct core_cand *new_cands, *cand;
4691 const struct btf_type *t;
4692 const char *targ_name;
4693 size_t targ_essent_len;
4694 int n, i;
4695
4696 n = btf__get_nr_types(targ_btf);
4697 for (i = targ_start_id; i <= n; i++) {
4698 t = btf__type_by_id(targ_btf, i);
4699 if (btf_kind(t) != btf_kind(local_cand->t))
4700 continue;
4701
4702 targ_name = btf__name_by_offset(targ_btf, t->name_off);
4703 if (str_is_empty(targ_name))
4704 continue;
4705
4706 targ_essent_len = bpf_core_essential_name_len(targ_name);
4707 if (targ_essent_len != local_essent_len)
4708 continue;
4709
4710 if (strncmp(local_cand->name, targ_name, local_essent_len) != 0)
4711 continue;
4712
4713 pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s in [%s]\n",
4714 local_cand->id, btf_kind_str(local_cand->t),
4715 local_cand->name, i, btf_kind_str(t), targ_name,
4716 targ_btf_name);
4717 new_cands = libbpf_reallocarray(cands->cands, cands->len + 1,
4718 sizeof(*cands->cands));
4719 if (!new_cands)
4720 return -ENOMEM;
4721
4722 cand = &new_cands[cands->len];
4723 cand->btf = targ_btf;
4724 cand->t = t;
4725 cand->name = targ_name;
4726 cand->id = i;
4727
4728 cands->cands = new_cands;
4729 cands->len++;
4730 }
4731 return 0;
4732}
4733
4734static int load_module_btfs(struct bpf_object *obj)
4735{
4736 struct bpf_btf_info info;
4737 struct module_btf *mod_btf;
4738 struct btf *btf;
4739 char name[64];
4740 __u32 id = 0, len;
4741 int err, fd;
4742
4743 if (obj->btf_modules_loaded)
4744 return 0;
4745
4746 /* don't do this again, even if we find no module BTFs */
4747 obj->btf_modules_loaded = true;
4748
4749 /* kernel too old to support module BTFs */
4750 if (!kernel_supports(FEAT_MODULE_BTF))
4751 return 0;
4752
4753 while (true) {
4754 err = bpf_btf_get_next_id(id, &id);
4755 if (err && errno == ENOENT)
4756 return 0;
4757 if (err) {
4758 err = -errno;
4759 pr_warn("failed to iterate BTF objects: %d\n", err);
4760 return err;
4761 }
4762
4763 fd = bpf_btf_get_fd_by_id(id);
4764 if (fd < 0) {
4765 if (errno == ENOENT)
4766 continue; /* expected race: BTF was unloaded */
4767 err = -errno;
4768 pr_warn("failed to get BTF object #%d FD: %d\n", id, err);
4769 return err;
4770 }
4771
4772 len = sizeof(info);
4773 memset(&info, 0, sizeof(info));
4774 info.name = ptr_to_u64(name);
4775 info.name_len = sizeof(name);
4776
4777 err = bpf_obj_get_info_by_fd(fd, &info, &len);
4778 if (err) {
4779 err = -errno;
4780 pr_warn("failed to get BTF object #%d info: %d\n", id, err);
4781 goto err_out;
4782 }
4783
4784 /* ignore non-module BTFs */
4785 if (!info.kernel_btf || strcmp(name, "vmlinux") == 0) {
4786 close(fd);
4787 continue;
4788 }
4789
4790 btf = btf_get_from_fd(fd, obj->btf_vmlinux);
4791 if (IS_ERR(btf)) {
4792 pr_warn("failed to load module [%s]'s BTF object #%d: %ld\n",
4793 name, id, PTR_ERR(btf));
4794 err = PTR_ERR(btf);
4795 goto err_out;
4796 }
4797
4798 err = btf_ensure_mem((void **)&obj->btf_modules, &obj->btf_module_cap,
4799 sizeof(*obj->btf_modules), obj->btf_module_cnt + 1);
4800 if (err)
4801 goto err_out;
4802
4803 mod_btf = &obj->btf_modules[obj->btf_module_cnt++];
4804
4805 mod_btf->btf = btf;
4806 mod_btf->id = id;
4807 mod_btf->fd = fd;
4808 mod_btf->name = strdup(name);
4809 if (!mod_btf->name) {
4810 err = -ENOMEM;
4811 goto err_out;
4812 }
4813 continue;
4814
4815err_out:
4816 close(fd);
4817 return err;
4818 }
4819
4820 return 0;
4821}
4822
4823static struct core_cand_list *
4824bpf_core_find_cands(struct bpf_object *obj, const struct btf *local_btf, __u32 local_type_id)
4825{
4826 struct core_cand local_cand = {};
4827 struct core_cand_list *cands;
4828 const struct btf *main_btf;
4829 size_t local_essent_len;
4830 int err, i;
4831
4832 local_cand.btf = local_btf;
4833 local_cand.t = btf__type_by_id(local_btf, local_type_id);
4834 if (!local_cand.t)
4835 return ERR_PTR(-EINVAL);
4836
4837 local_cand.name = btf__name_by_offset(local_btf, local_cand.t->name_off);
4838 if (str_is_empty(local_cand.name))
4839 return ERR_PTR(-EINVAL);
4840 local_essent_len = bpf_core_essential_name_len(local_cand.name);
4841
4842 cands = calloc(1, sizeof(*cands));
4843 if (!cands)
4844 return ERR_PTR(-ENOMEM);
4845
4846 /* Attempt to find target candidates in vmlinux BTF first */
4847 main_btf = obj->btf_vmlinux_override ?: obj->btf_vmlinux;
4848 err = bpf_core_add_cands(&local_cand, local_essent_len, main_btf, "vmlinux", 1, cands);
4849 if (err)
4850 goto err_out;
4851
4852 /* if vmlinux BTF has any candidate, don't got for module BTFs */
4853 if (cands->len)
4854 return cands;
4855
4856 /* if vmlinux BTF was overridden, don't attempt to load module BTFs */
4857 if (obj->btf_vmlinux_override)
4858 return cands;
4859
4860 /* now look through module BTFs, trying to still find candidates */
4861 err = load_module_btfs(obj);
4862 if (err)
4863 goto err_out;
4864
4865 for (i = 0; i < obj->btf_module_cnt; i++) {
4866 err = bpf_core_add_cands(&local_cand, local_essent_len,
4867 obj->btf_modules[i].btf,
4868 obj->btf_modules[i].name,
4869 btf__get_nr_types(obj->btf_vmlinux) + 1,
4870 cands);
4871 if (err)
4872 goto err_out;
4873 }
4874
4875 return cands;
4876err_out:
4877 bpf_core_free_cands(cands);
4878 return ERR_PTR(err);
4879}
4880
4881/* Check two types for compatibility for the purpose of field access
4882 * relocation. const/volatile/restrict and typedefs are skipped to ensure we
4883 * are relocating semantically compatible entities:
4884 * - any two STRUCTs/UNIONs are compatible and can be mixed;
4885 * - any two FWDs are compatible, if their names match (modulo flavor suffix);
4886 * - any two PTRs are always compatible;
4887 * - for ENUMs, names should be the same (ignoring flavor suffix) or at
4888 * least one of enums should be anonymous;
4889 * - for ENUMs, check sizes, names are ignored;
4890 * - for INT, size and signedness are ignored;
4891 * - for ARRAY, dimensionality is ignored, element types are checked for
4892 * compatibility recursively;
4893 * - everything else shouldn't be ever a target of relocation.
4894 * These rules are not set in stone and probably will be adjusted as we get
4895 * more experience with using BPF CO-RE relocations.
4896 */
4897static int bpf_core_fields_are_compat(const struct btf *local_btf,
4898 __u32 local_id,
4899 const struct btf *targ_btf,
4900 __u32 targ_id)
4901{
4902 const struct btf_type *local_type, *targ_type;
4903
4904recur:
4905 local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
4906 targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
4907 if (!local_type || !targ_type)
4908 return -EINVAL;
4909
4910 if (btf_is_composite(local_type) && btf_is_composite(targ_type))
4911 return 1;
4912 if (btf_kind(local_type) != btf_kind(targ_type))
4913 return 0;
4914
4915 switch (btf_kind(local_type)) {
4916 case BTF_KIND_PTR:
4917 return 1;
4918 case BTF_KIND_FWD:
4919 case BTF_KIND_ENUM: {
4920 const char *local_name, *targ_name;
4921 size_t local_len, targ_len;
4922
4923 local_name = btf__name_by_offset(local_btf,
4924 local_type->name_off);
4925 targ_name = btf__name_by_offset(targ_btf, targ_type->name_off);
4926 local_len = bpf_core_essential_name_len(local_name);
4927 targ_len = bpf_core_essential_name_len(targ_name);
4928 /* one of them is anonymous or both w/ same flavor-less names */
4929 return local_len == 0 || targ_len == 0 ||
4930 (local_len == targ_len &&
4931 strncmp(local_name, targ_name, local_len) == 0);
4932 }
4933 case BTF_KIND_INT:
4934 /* just reject deprecated bitfield-like integers; all other
4935 * integers are by default compatible between each other
4936 */
4937 return btf_int_offset(local_type) == 0 &&
4938 btf_int_offset(targ_type) == 0;
4939 case BTF_KIND_ARRAY:
4940 local_id = btf_array(local_type)->type;
4941 targ_id = btf_array(targ_type)->type;
4942 goto recur;
4943 default:
4944 pr_warn("unexpected kind %d relocated, local [%d], target [%d]\n",
4945 btf_kind(local_type), local_id, targ_id);
4946 return 0;
4947 }
4948}
4949
4950/*
4951 * Given single high-level named field accessor in local type, find
4952 * corresponding high-level accessor for a target type. Along the way,
4953 * maintain low-level spec for target as well. Also keep updating target
4954 * bit offset.
4955 *
4956 * Searching is performed through recursive exhaustive enumeration of all
4957 * fields of a struct/union. If there are any anonymous (embedded)
4958 * structs/unions, they are recursively searched as well. If field with
4959 * desired name is found, check compatibility between local and target types,
4960 * before returning result.
4961 *
4962 * 1 is returned, if field is found.
4963 * 0 is returned if no compatible field is found.
4964 * <0 is returned on error.
4965 */
4966static int bpf_core_match_member(const struct btf *local_btf,
4967 const struct bpf_core_accessor *local_acc,
4968 const struct btf *targ_btf,
4969 __u32 targ_id,
4970 struct bpf_core_spec *spec,
4971 __u32 *next_targ_id)
4972{
4973 const struct btf_type *local_type, *targ_type;
4974 const struct btf_member *local_member, *m;
4975 const char *local_name, *targ_name;
4976 __u32 local_id;
4977 int i, n, found;
4978
4979 targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
4980 if (!targ_type)
4981 return -EINVAL;
4982 if (!btf_is_composite(targ_type))
4983 return 0;
4984
4985 local_id = local_acc->type_id;
4986 local_type = btf__type_by_id(local_btf, local_id);
4987 local_member = btf_members(local_type) + local_acc->idx;
4988 local_name = btf__name_by_offset(local_btf, local_member->name_off);
4989
4990 n = btf_vlen(targ_type);
4991 m = btf_members(targ_type);
4992 for (i = 0; i < n; i++, m++) {
4993 __u32 bit_offset;
4994
4995 bit_offset = btf_member_bit_offset(targ_type, i);
4996
4997 /* too deep struct/union/array nesting */
4998 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
4999 return -E2BIG;
5000
5001 /* speculate this member will be the good one */
5002 spec->bit_offset += bit_offset;
5003 spec->raw_spec[spec->raw_len++] = i;
5004
5005 targ_name = btf__name_by_offset(targ_btf, m->name_off);
5006 if (str_is_empty(targ_name)) {
5007 /* embedded struct/union, we need to go deeper */
5008 found = bpf_core_match_member(local_btf, local_acc,
5009 targ_btf, m->type,
5010 spec, next_targ_id);
5011 if (found) /* either found or error */
5012 return found;
5013 } else if (strcmp(local_name, targ_name) == 0) {
5014 /* matching named field */
5015 struct bpf_core_accessor *targ_acc;
5016
5017 targ_acc = &spec->spec[spec->len++];
5018 targ_acc->type_id = targ_id;
5019 targ_acc->idx = i;
5020 targ_acc->name = targ_name;
5021
5022 *next_targ_id = m->type;
5023 found = bpf_core_fields_are_compat(local_btf,
5024 local_member->type,
5025 targ_btf, m->type);
5026 if (!found)
5027 spec->len--; /* pop accessor */
5028 return found;
5029 }
5030 /* member turned out not to be what we looked for */
5031 spec->bit_offset -= bit_offset;
5032 spec->raw_len--;
5033 }
5034
5035 return 0;
5036}
5037
5038/* Check local and target types for compatibility. This check is used for
5039 * type-based CO-RE relocations and follow slightly different rules than
5040 * field-based relocations. This function assumes that root types were already
5041 * checked for name match. Beyond that initial root-level name check, names
5042 * are completely ignored. Compatibility rules are as follows:
5043 * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
5044 * kind should match for local and target types (i.e., STRUCT is not
5045 * compatible with UNION);
5046 * - for ENUMs, the size is ignored;
5047 * - for INT, size and signedness are ignored;
5048 * - for ARRAY, dimensionality is ignored, element types are checked for
5049 * compatibility recursively;
5050 * - CONST/VOLATILE/RESTRICT modifiers are ignored;
5051 * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
5052 * - FUNC_PROTOs are compatible if they have compatible signature: same
5053 * number of input args and compatible return and argument types.
5054 * These rules are not set in stone and probably will be adjusted as we get
5055 * more experience with using BPF CO-RE relocations.
5056 */
5057static int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
5058 const struct btf *targ_btf, __u32 targ_id)
5059{
5060 const struct btf_type *local_type, *targ_type;
5061 int depth = 32; /* max recursion depth */
5062
5063 /* caller made sure that names match (ignoring flavor suffix) */
5064 local_type = btf__type_by_id(local_btf, local_id);
5065 targ_type = btf__type_by_id(targ_btf, targ_id);
5066 if (btf_kind(local_type) != btf_kind(targ_type))
5067 return 0;
5068
5069recur:
5070 depth--;
5071 if (depth < 0)
5072 return -EINVAL;
5073
5074 local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
5075 targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
5076 if (!local_type || !targ_type)
5077 return -EINVAL;
5078
5079 if (btf_kind(local_type) != btf_kind(targ_type))
5080 return 0;
5081
5082 switch (btf_kind(local_type)) {
5083 case BTF_KIND_UNKN:
5084 case BTF_KIND_STRUCT:
5085 case BTF_KIND_UNION:
5086 case BTF_KIND_ENUM:
5087 case BTF_KIND_FWD:
5088 return 1;
5089 case BTF_KIND_INT:
5090 /* just reject deprecated bitfield-like integers; all other
5091 * integers are by default compatible between each other
5092 */
5093 return btf_int_offset(local_type) == 0 && btf_int_offset(targ_type) == 0;
5094 case BTF_KIND_PTR:
5095 local_id = local_type->type;
5096 targ_id = targ_type->type;
5097 goto recur;
5098 case BTF_KIND_ARRAY:
5099 local_id = btf_array(local_type)->type;
5100 targ_id = btf_array(targ_type)->type;
5101 goto recur;
5102 case BTF_KIND_FUNC_PROTO: {
5103 struct btf_param *local_p = btf_params(local_type);
5104 struct btf_param *targ_p = btf_params(targ_type);
5105 __u16 local_vlen = btf_vlen(local_type);
5106 __u16 targ_vlen = btf_vlen(targ_type);
5107 int i, err;
5108
5109 if (local_vlen != targ_vlen)
5110 return 0;
5111
5112 for (i = 0; i < local_vlen; i++, local_p++, targ_p++) {
5113 skip_mods_and_typedefs(local_btf, local_p->type, &local_id);
5114 skip_mods_and_typedefs(targ_btf, targ_p->type, &targ_id);
5115 err = bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id);
5116 if (err <= 0)
5117 return err;
5118 }
5119
5120 /* tail recurse for return type check */
5121 skip_mods_and_typedefs(local_btf, local_type->type, &local_id);
5122 skip_mods_and_typedefs(targ_btf, targ_type->type, &targ_id);
5123 goto recur;
5124 }
5125 default:
5126 pr_warn("unexpected kind %s relocated, local [%d], target [%d]\n",
5127 btf_kind_str(local_type), local_id, targ_id);
5128 return 0;
5129 }
5130}
5131
5132/*
5133 * Try to match local spec to a target type and, if successful, produce full
5134 * target spec (high-level, low-level + bit offset).
5135 */
5136static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
5137 const struct btf *targ_btf, __u32 targ_id,
5138 struct bpf_core_spec *targ_spec)
5139{
5140 const struct btf_type *targ_type;
5141 const struct bpf_core_accessor *local_acc;
5142 struct bpf_core_accessor *targ_acc;
5143 int i, sz, matched;
5144
5145 memset(targ_spec, 0, sizeof(*targ_spec));
5146 targ_spec->btf = targ_btf;
5147 targ_spec->root_type_id = targ_id;
5148 targ_spec->relo_kind = local_spec->relo_kind;
5149
5150 if (core_relo_is_type_based(local_spec->relo_kind)) {
5151 return bpf_core_types_are_compat(local_spec->btf,
5152 local_spec->root_type_id,
5153 targ_btf, targ_id);
5154 }
5155
5156 local_acc = &local_spec->spec[0];
5157 targ_acc = &targ_spec->spec[0];
5158
5159 if (core_relo_is_enumval_based(local_spec->relo_kind)) {
5160 size_t local_essent_len, targ_essent_len;
5161 const struct btf_enum *e;
5162 const char *targ_name;
5163
5164 /* has to resolve to an enum */
5165 targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id, &targ_id);
5166 if (!btf_is_enum(targ_type))
5167 return 0;
5168
5169 local_essent_len = bpf_core_essential_name_len(local_acc->name);
5170
5171 for (i = 0, e = btf_enum(targ_type); i < btf_vlen(targ_type); i++, e++) {
5172 targ_name = btf__name_by_offset(targ_spec->btf, e->name_off);
5173 targ_essent_len = bpf_core_essential_name_len(targ_name);
5174 if (targ_essent_len != local_essent_len)
5175 continue;
5176 if (strncmp(local_acc->name, targ_name, local_essent_len) == 0) {
5177 targ_acc->type_id = targ_id;
5178 targ_acc->idx = i;
5179 targ_acc->name = targ_name;
5180 targ_spec->len++;
5181 targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
5182 targ_spec->raw_len++;
5183 return 1;
5184 }
5185 }
5186 return 0;
5187 }
5188
5189 if (!core_relo_is_field_based(local_spec->relo_kind))
5190 return -EINVAL;
5191
5192 for (i = 0; i < local_spec->len; i++, local_acc++, targ_acc++) {
5193 targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id,
5194 &targ_id);
5195 if (!targ_type)
5196 return -EINVAL;
5197
5198 if (local_acc->name) {
5199 matched = bpf_core_match_member(local_spec->btf,
5200 local_acc,
5201 targ_btf, targ_id,
5202 targ_spec, &targ_id);
5203 if (matched <= 0)
5204 return matched;
5205 } else {
5206 /* for i=0, targ_id is already treated as array element
5207 * type (because it's the original struct), for others
5208 * we should find array element type first
5209 */
5210 if (i > 0) {
5211 const struct btf_array *a;
5212 bool flex;
5213
5214 if (!btf_is_array(targ_type))
5215 return 0;
5216
5217 a = btf_array(targ_type);
5218 flex = is_flex_arr(targ_btf, targ_acc - 1, a);
5219 if (!flex && local_acc->idx >= a->nelems)
5220 return 0;
5221 if (!skip_mods_and_typedefs(targ_btf, a->type,
5222 &targ_id))
5223 return -EINVAL;
5224 }
5225
5226 /* too deep struct/union/array nesting */
5227 if (targ_spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
5228 return -E2BIG;
5229
5230 targ_acc->type_id = targ_id;
5231 targ_acc->idx = local_acc->idx;
5232 targ_acc->name = NULL;
5233 targ_spec->len++;
5234 targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
5235 targ_spec->raw_len++;
5236
5237 sz = btf__resolve_size(targ_btf, targ_id);
5238 if (sz < 0)
5239 return sz;
5240 targ_spec->bit_offset += local_acc->idx * sz * 8;
5241 }
5242 }
5243
5244 return 1;
5245}
5246
5247static int bpf_core_calc_field_relo(const struct bpf_program *prog,
5248 const struct bpf_core_relo *relo,
5249 const struct bpf_core_spec *spec,
5250 __u32 *val, __u32 *field_sz, __u32 *type_id,
5251 bool *validate)
5252{
5253 const struct bpf_core_accessor *acc;
5254 const struct btf_type *t;
5255 __u32 byte_off, byte_sz, bit_off, bit_sz, field_type_id;
5256 const struct btf_member *m;
5257 const struct btf_type *mt;
5258 bool bitfield;
5259 __s64 sz;
5260
5261 *field_sz = 0;
5262
5263 if (relo->kind == BPF_FIELD_EXISTS) {
5264 *val = spec ? 1 : 0;
5265 return 0;
5266 }
5267
5268 if (!spec)
5269 return -EUCLEAN; /* request instruction poisoning */
5270
5271 acc = &spec->spec[spec->len - 1];
5272 t = btf__type_by_id(spec->btf, acc->type_id);
5273
5274 /* a[n] accessor needs special handling */
5275 if (!acc->name) {
5276 if (relo->kind == BPF_FIELD_BYTE_OFFSET) {
5277 *val = spec->bit_offset / 8;
5278 /* remember field size for load/store mem size */
5279 sz = btf__resolve_size(spec->btf, acc->type_id);
5280 if (sz < 0)
5281 return -EINVAL;
5282 *field_sz = sz;
5283 *type_id = acc->type_id;
5284 } else if (relo->kind == BPF_FIELD_BYTE_SIZE) {
5285 sz = btf__resolve_size(spec->btf, acc->type_id);
5286 if (sz < 0)
5287 return -EINVAL;
5288 *val = sz;
5289 } else {
5290 pr_warn("prog '%s': relo %d at insn #%d can't be applied to array access\n",
5291 prog->name, relo->kind, relo->insn_off / 8);
5292 return -EINVAL;
5293 }
5294 if (validate)
5295 *validate = true;
5296 return 0;
5297 }
5298
5299 m = btf_members(t) + acc->idx;
5300 mt = skip_mods_and_typedefs(spec->btf, m->type, &field_type_id);
5301 bit_off = spec->bit_offset;
5302 bit_sz = btf_member_bitfield_size(t, acc->idx);
5303
5304 bitfield = bit_sz > 0;
5305 if (bitfield) {
5306 byte_sz = mt->size;
5307 byte_off = bit_off / 8 / byte_sz * byte_sz;
5308 /* figure out smallest int size necessary for bitfield load */
5309 while (bit_off + bit_sz - byte_off * 8 > byte_sz * 8) {
5310 if (byte_sz >= 8) {
5311 /* bitfield can't be read with 64-bit read */
5312 pr_warn("prog '%s': relo %d at insn #%d can't be satisfied for bitfield\n",
5313 prog->name, relo->kind, relo->insn_off / 8);
5314 return -E2BIG;
5315 }
5316 byte_sz *= 2;
5317 byte_off = bit_off / 8 / byte_sz * byte_sz;
5318 }
5319 } else {
5320 sz = btf__resolve_size(spec->btf, field_type_id);
5321 if (sz < 0)
5322 return -EINVAL;
5323 byte_sz = sz;
5324 byte_off = spec->bit_offset / 8;
5325 bit_sz = byte_sz * 8;
5326 }
5327
5328 /* for bitfields, all the relocatable aspects are ambiguous and we
5329 * might disagree with compiler, so turn off validation of expected
5330 * value, except for signedness
5331 */
5332 if (validate)
5333 *validate = !bitfield;
5334
5335 switch (relo->kind) {
5336 case BPF_FIELD_BYTE_OFFSET:
5337 *val = byte_off;
5338 if (!bitfield) {
5339 *field_sz = byte_sz;
5340 *type_id = field_type_id;
5341 }
5342 break;
5343 case BPF_FIELD_BYTE_SIZE:
5344 *val = byte_sz;
5345 break;
5346 case BPF_FIELD_SIGNED:
5347 /* enums will be assumed unsigned */
5348 *val = btf_is_enum(mt) ||
5349 (btf_int_encoding(mt) & BTF_INT_SIGNED);
5350 if (validate)
5351 *validate = true; /* signedness is never ambiguous */
5352 break;
5353 case BPF_FIELD_LSHIFT_U64:
5354#if __BYTE_ORDER == __LITTLE_ENDIAN
5355 *val = 64 - (bit_off + bit_sz - byte_off * 8);
5356#else
5357 *val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8);
5358#endif
5359 break;
5360 case BPF_FIELD_RSHIFT_U64:
5361 *val = 64 - bit_sz;
5362 if (validate)
5363 *validate = true; /* right shift is never ambiguous */
5364 break;
5365 case BPF_FIELD_EXISTS:
5366 default:
5367 return -EOPNOTSUPP;
5368 }
5369
5370 return 0;
5371}
5372
5373static int bpf_core_calc_type_relo(const struct bpf_core_relo *relo,
5374 const struct bpf_core_spec *spec,
5375 __u32 *val)
5376{
5377 __s64 sz;
5378
5379 /* type-based relos return zero when target type is not found */
5380 if (!spec) {
5381 *val = 0;
5382 return 0;
5383 }
5384
5385 switch (relo->kind) {
5386 case BPF_TYPE_ID_TARGET:
5387 *val = spec->root_type_id;
5388 break;
5389 case BPF_TYPE_EXISTS:
5390 *val = 1;
5391 break;
5392 case BPF_TYPE_SIZE:
5393 sz = btf__resolve_size(spec->btf, spec->root_type_id);
5394 if (sz < 0)
5395 return -EINVAL;
5396 *val = sz;
5397 break;
5398 case BPF_TYPE_ID_LOCAL:
5399 /* BPF_TYPE_ID_LOCAL is handled specially and shouldn't get here */
5400 default:
5401 return -EOPNOTSUPP;
5402 }
5403
5404 return 0;
5405}
5406
5407static int bpf_core_calc_enumval_relo(const struct bpf_core_relo *relo,
5408 const struct bpf_core_spec *spec,
5409 __u32 *val)
5410{
5411 const struct btf_type *t;
5412 const struct btf_enum *e;
5413
5414 switch (relo->kind) {
5415 case BPF_ENUMVAL_EXISTS:
5416 *val = spec ? 1 : 0;
5417 break;
5418 case BPF_ENUMVAL_VALUE:
5419 if (!spec)
5420 return -EUCLEAN; /* request instruction poisoning */
5421 t = btf__type_by_id(spec->btf, spec->spec[0].type_id);
5422 e = btf_enum(t) + spec->spec[0].idx;
5423 *val = e->val;
5424 break;
5425 default:
5426 return -EOPNOTSUPP;
5427 }
5428
5429 return 0;
5430}
5431
5432struct bpf_core_relo_res
5433{
5434 /* expected value in the instruction, unless validate == false */
5435 __u32 orig_val;
5436 /* new value that needs to be patched up to */
5437 __u32 new_val;
5438 /* relocation unsuccessful, poison instruction, but don't fail load */
5439 bool poison;
5440 /* some relocations can't be validated against orig_val */
5441 bool validate;
5442 /* for field byte offset relocations or the forms:
5443 * *(T *)(rX + <off>) = rY
5444 * rX = *(T *)(rY + <off>),
5445 * we remember original and resolved field size to adjust direct
5446 * memory loads of pointers and integers; this is necessary for 32-bit
5447 * host kernel architectures, but also allows to automatically
5448 * relocate fields that were resized from, e.g., u32 to u64, etc.
5449 */
5450 bool fail_memsz_adjust;
5451 __u32 orig_sz;
5452 __u32 orig_type_id;
5453 __u32 new_sz;
5454 __u32 new_type_id;
5455};
5456
5457/* Calculate original and target relocation values, given local and target
5458 * specs and relocation kind. These values are calculated for each candidate.
5459 * If there are multiple candidates, resulting values should all be consistent
5460 * with each other. Otherwise, libbpf will refuse to proceed due to ambiguity.
5461 * If instruction has to be poisoned, *poison will be set to true.
5462 */
5463static int bpf_core_calc_relo(const struct bpf_program *prog,
5464 const struct bpf_core_relo *relo,
5465 int relo_idx,
5466 const struct bpf_core_spec *local_spec,
5467 const struct bpf_core_spec *targ_spec,
5468 struct bpf_core_relo_res *res)
5469{
5470 int err = -EOPNOTSUPP;
5471
5472 res->orig_val = 0;
5473 res->new_val = 0;
5474 res->poison = false;
5475 res->validate = true;
5476 res->fail_memsz_adjust = false;
5477 res->orig_sz = res->new_sz = 0;
5478 res->orig_type_id = res->new_type_id = 0;
5479
5480 if (core_relo_is_field_based(relo->kind)) {
5481 err = bpf_core_calc_field_relo(prog, relo, local_spec,
5482 &res->orig_val, &res->orig_sz,
5483 &res->orig_type_id, &res->validate);
5484 err = err ?: bpf_core_calc_field_relo(prog, relo, targ_spec,
5485 &res->new_val, &res->new_sz,
5486 &res->new_type_id, NULL);
5487 if (err)
5488 goto done;
5489 /* Validate if it's safe to adjust load/store memory size.
5490 * Adjustments are performed only if original and new memory
5491 * sizes differ.
5492 */
5493 res->fail_memsz_adjust = false;
5494 if (res->orig_sz != res->new_sz) {
5495 const struct btf_type *orig_t, *new_t;
5496
5497 orig_t = btf__type_by_id(local_spec->btf, res->orig_type_id);
5498 new_t = btf__type_by_id(targ_spec->btf, res->new_type_id);
5499
5500 /* There are two use cases in which it's safe to
5501 * adjust load/store's mem size:
5502 * - reading a 32-bit kernel pointer, while on BPF
5503 * size pointers are always 64-bit; in this case
5504 * it's safe to "downsize" instruction size due to
5505 * pointer being treated as unsigned integer with
5506 * zero-extended upper 32-bits;
5507 * - reading unsigned integers, again due to
5508 * zero-extension is preserving the value correctly.
5509 *
5510 * In all other cases it's incorrect to attempt to
5511 * load/store field because read value will be
5512 * incorrect, so we poison relocated instruction.
5513 */
5514 if (btf_is_ptr(orig_t) && btf_is_ptr(new_t))
5515 goto done;
5516 if (btf_is_int(orig_t) && btf_is_int(new_t) &&
5517 btf_int_encoding(orig_t) != BTF_INT_SIGNED &&
5518 btf_int_encoding(new_t) != BTF_INT_SIGNED)
5519 goto done;
5520
5521 /* mark as invalid mem size adjustment, but this will
5522 * only be checked for LDX/STX/ST insns
5523 */
5524 res->fail_memsz_adjust = true;
5525 }
5526 } else if (core_relo_is_type_based(relo->kind)) {
5527 err = bpf_core_calc_type_relo(relo, local_spec, &res->orig_val);
5528 err = err ?: bpf_core_calc_type_relo(relo, targ_spec, &res->new_val);
5529 } else if (core_relo_is_enumval_based(relo->kind)) {
5530 err = bpf_core_calc_enumval_relo(relo, local_spec, &res->orig_val);
5531 err = err ?: bpf_core_calc_enumval_relo(relo, targ_spec, &res->new_val);
5532 }
5533
5534done:
5535 if (err == -EUCLEAN) {
5536 /* EUCLEAN is used to signal instruction poisoning request */
5537 res->poison = true;
5538 err = 0;
5539 } else if (err == -EOPNOTSUPP) {
5540 /* EOPNOTSUPP means unknown/unsupported relocation */
5541 pr_warn("prog '%s': relo #%d: unrecognized CO-RE relocation %s (%d) at insn #%d\n",
5542 prog->name, relo_idx, core_relo_kind_str(relo->kind),
5543 relo->kind, relo->insn_off / 8);
5544 }
5545
5546 return err;
5547}
5548
5549/*
5550 * Turn instruction for which CO_RE relocation failed into invalid one with
5551 * distinct signature.
5552 */
5553static void bpf_core_poison_insn(struct bpf_program *prog, int relo_idx,
5554 int insn_idx, struct bpf_insn *insn)
5555{
5556 pr_debug("prog '%s': relo #%d: substituting insn #%d w/ invalid insn\n",
5557 prog->name, relo_idx, insn_idx);
5558 insn->code = BPF_JMP | BPF_CALL;
5559 insn->dst_reg = 0;
5560 insn->src_reg = 0;
5561 insn->off = 0;
5562 /* if this instruction is reachable (not a dead code),
5563 * verifier will complain with the following message:
5564 * invalid func unknown#195896080
5565 */
5566 insn->imm = 195896080; /* => 0xbad2310 => "bad relo" */
5567}
5568
5569static bool is_ldimm64(struct bpf_insn *insn)
5570{
5571 return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
5572}
5573
5574static int insn_bpf_size_to_bytes(struct bpf_insn *insn)
5575{
5576 switch (BPF_SIZE(insn->code)) {
5577 case BPF_DW: return 8;
5578 case BPF_W: return 4;
5579 case BPF_H: return 2;
5580 case BPF_B: return 1;
5581 default: return -1;
5582 }
5583}
5584
5585static int insn_bytes_to_bpf_size(__u32 sz)
5586{
5587 switch (sz) {
5588 case 8: return BPF_DW;
5589 case 4: return BPF_W;
5590 case 2: return BPF_H;
5591 case 1: return BPF_B;
5592 default: return -1;
5593 }
5594}
5595
5596/*
5597 * Patch relocatable BPF instruction.
5598 *
5599 * Patched value is determined by relocation kind and target specification.
5600 * For existence relocations target spec will be NULL if field/type is not found.
5601 * Expected insn->imm value is determined using relocation kind and local
5602 * spec, and is checked before patching instruction. If actual insn->imm value
5603 * is wrong, bail out with error.
5604 *
5605 * Currently supported classes of BPF instruction are:
5606 * 1. rX = <imm> (assignment with immediate operand);
5607 * 2. rX += <imm> (arithmetic operations with immediate operand);
5608 * 3. rX = <imm64> (load with 64-bit immediate value);
5609 * 4. rX = *(T *)(rY + <off>), where T is one of {u8, u16, u32, u64};
5610 * 5. *(T *)(rX + <off>) = rY, where T is one of {u8, u16, u32, u64};
5611 * 6. *(T *)(rX + <off>) = <imm>, where T is one of {u8, u16, u32, u64}.
5612 */
5613static int bpf_core_patch_insn(struct bpf_program *prog,
5614 const struct bpf_core_relo *relo,
5615 int relo_idx,
5616 const struct bpf_core_relo_res *res)
5617{
5618 __u32 orig_val, new_val;
5619 struct bpf_insn *insn;
5620 int insn_idx;
5621 __u8 class;
5622
5623 if (relo->insn_off % BPF_INSN_SZ)
5624 return -EINVAL;
5625 insn_idx = relo->insn_off / BPF_INSN_SZ;
5626 /* adjust insn_idx from section frame of reference to the local
5627 * program's frame of reference; (sub-)program code is not yet
5628 * relocated, so it's enough to just subtract in-section offset
5629 */
5630 insn_idx = insn_idx - prog->sec_insn_off;
5631 insn = &prog->insns[insn_idx];
5632 class = BPF_CLASS(insn->code);
5633
5634 if (res->poison) {
5635poison:
5636 /* poison second part of ldimm64 to avoid confusing error from
5637 * verifier about "unknown opcode 00"
5638 */
5639 if (is_ldimm64(insn))
5640 bpf_core_poison_insn(prog, relo_idx, insn_idx + 1, insn + 1);
5641 bpf_core_poison_insn(prog, relo_idx, insn_idx, insn);
5642 return 0;
5643 }
5644
5645 orig_val = res->orig_val;
5646 new_val = res->new_val;
5647
5648 switch (class) {
5649 case BPF_ALU:
5650 case BPF_ALU64:
5651 if (BPF_SRC(insn->code) != BPF_K)
5652 return -EINVAL;
5653 if (res->validate && insn->imm != orig_val) {
5654 pr_warn("prog '%s': relo #%d: unexpected insn #%d (ALU/ALU64) value: got %u, exp %u -> %u\n",
5655 prog->name, relo_idx,
5656 insn_idx, insn->imm, orig_val, new_val);
5657 return -EINVAL;
5658 }
5659 orig_val = insn->imm;
5660 insn->imm = new_val;
5661 pr_debug("prog '%s': relo #%d: patched insn #%d (ALU/ALU64) imm %u -> %u\n",
5662 prog->name, relo_idx, insn_idx,
5663 orig_val, new_val);
5664 break;
5665 case BPF_LDX:
5666 case BPF_ST:
5667 case BPF_STX:
5668 if (res->validate && insn->off != orig_val) {
5669 pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDX/ST/STX) value: got %u, exp %u -> %u\n",
5670 prog->name, relo_idx, insn_idx, insn->off, orig_val, new_val);
5671 return -EINVAL;
5672 }
5673 if (new_val > SHRT_MAX) {
5674 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) value too big: %u\n",
5675 prog->name, relo_idx, insn_idx, new_val);
5676 return -ERANGE;
5677 }
5678 if (res->fail_memsz_adjust) {
5679 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) accesses field incorrectly. "
5680 "Make sure you are accessing pointers, unsigned integers, or fields of matching type and size.\n",
5681 prog->name, relo_idx, insn_idx);
5682 goto poison;
5683 }
5684
5685 orig_val = insn->off;
5686 insn->off = new_val;
5687 pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) off %u -> %u\n",
5688 prog->name, relo_idx, insn_idx, orig_val, new_val);
5689
5690 if (res->new_sz != res->orig_sz) {
5691 int insn_bytes_sz, insn_bpf_sz;
5692
5693 insn_bytes_sz = insn_bpf_size_to_bytes(insn);
5694 if (insn_bytes_sz != res->orig_sz) {
5695 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) unexpected mem size: got %d, exp %u\n",
5696 prog->name, relo_idx, insn_idx, insn_bytes_sz, res->orig_sz);
5697 return -EINVAL;
5698 }
5699
5700 insn_bpf_sz = insn_bytes_to_bpf_size(res->new_sz);
5701 if (insn_bpf_sz < 0) {
5702 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) invalid new mem size: %u\n",
5703 prog->name, relo_idx, insn_idx, res->new_sz);
5704 return -EINVAL;
5705 }
5706
5707 insn->code = BPF_MODE(insn->code) | insn_bpf_sz | BPF_CLASS(insn->code);
5708 pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) mem_sz %u -> %u\n",
5709 prog->name, relo_idx, insn_idx, res->orig_sz, res->new_sz);
5710 }
5711 break;
5712 case BPF_LD: {
5713 __u64 imm;
5714
5715 if (!is_ldimm64(insn) ||
5716 insn[0].src_reg != 0 || insn[0].off != 0 ||
5717 insn_idx + 1 >= prog->insns_cnt ||
5718 insn[1].code != 0 || insn[1].dst_reg != 0 ||
5719 insn[1].src_reg != 0 || insn[1].off != 0) {
5720 pr_warn("prog '%s': relo #%d: insn #%d (LDIMM64) has unexpected form\n",
5721 prog->name, relo_idx, insn_idx);
5722 return -EINVAL;
5723 }
5724
5725 imm = insn[0].imm + ((__u64)insn[1].imm << 32);
5726 if (res->validate && imm != orig_val) {
5727 pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDIMM64) value: got %llu, exp %u -> %u\n",
5728 prog->name, relo_idx,
5729 insn_idx, (unsigned long long)imm,
5730 orig_val, new_val);
5731 return -EINVAL;
5732 }
5733
5734 insn[0].imm = new_val;
5735 insn[1].imm = 0; /* currently only 32-bit values are supported */
5736 pr_debug("prog '%s': relo #%d: patched insn #%d (LDIMM64) imm64 %llu -> %u\n",
5737 prog->name, relo_idx, insn_idx,
5738 (unsigned long long)imm, new_val);
5739 break;
5740 }
5741 default:
5742 pr_warn("prog '%s': relo #%d: trying to relocate unrecognized insn #%d, code:0x%x, src:0x%x, dst:0x%x, off:0x%x, imm:0x%x\n",
5743 prog->name, relo_idx, insn_idx, insn->code,
5744 insn->src_reg, insn->dst_reg, insn->off, insn->imm);
5745 return -EINVAL;
5746 }
5747
5748 return 0;
5749}
5750
5751/* Output spec definition in the format:
5752 * [<type-id>] (<type-name>) + <raw-spec> => <offset>@<spec>,
5753 * where <spec> is a C-syntax view of recorded field access, e.g.: x.a[3].b
5754 */
5755static void bpf_core_dump_spec(int level, const struct bpf_core_spec *spec)
5756{
5757 const struct btf_type *t;
5758 const struct btf_enum *e;
5759 const char *s;
5760 __u32 type_id;
5761 int i;
5762
5763 type_id = spec->root_type_id;
5764 t = btf__type_by_id(spec->btf, type_id);
5765 s = btf__name_by_offset(spec->btf, t->name_off);
5766
5767 libbpf_print(level, "[%u] %s %s", type_id, btf_kind_str(t), str_is_empty(s) ? "<anon>" : s);
5768
5769 if (core_relo_is_type_based(spec->relo_kind))
5770 return;
5771
5772 if (core_relo_is_enumval_based(spec->relo_kind)) {
5773 t = skip_mods_and_typedefs(spec->btf, type_id, NULL);
5774 e = btf_enum(t) + spec->raw_spec[0];
5775 s = btf__name_by_offset(spec->btf, e->name_off);
5776
5777 libbpf_print(level, "::%s = %u", s, e->val);
5778 return;
5779 }
5780
5781 if (core_relo_is_field_based(spec->relo_kind)) {
5782 for (i = 0; i < spec->len; i++) {
5783 if (spec->spec[i].name)
5784 libbpf_print(level, ".%s", spec->spec[i].name);
5785 else if (i > 0 || spec->spec[i].idx > 0)
5786 libbpf_print(level, "[%u]", spec->spec[i].idx);
5787 }
5788
5789 libbpf_print(level, " (");
5790 for (i = 0; i < spec->raw_len; i++)
5791 libbpf_print(level, "%s%d", i == 0 ? "" : ":", spec->raw_spec[i]);
5792
5793 if (spec->bit_offset % 8)
5794 libbpf_print(level, " @ offset %u.%u)",
5795 spec->bit_offset / 8, spec->bit_offset % 8);
5796 else
5797 libbpf_print(level, " @ offset %u)", spec->bit_offset / 8);
5798 return;
5799 }
5800}
5801
5802static size_t bpf_core_hash_fn(const void *key, void *ctx)
5803{
5804 return (size_t)key;
5805}
5806
5807static bool bpf_core_equal_fn(const void *k1, const void *k2, void *ctx)
5808{
5809 return k1 == k2;
5810}
5811
5812static void *u32_as_hash_key(__u32 x)
5813{
5814 return (void *)(uintptr_t)x;
5815}
5816
5817/*
5818 * CO-RE relocate single instruction.
5819 *
5820 * The outline and important points of the algorithm:
5821 * 1. For given local type, find corresponding candidate target types.
5822 * Candidate type is a type with the same "essential" name, ignoring
5823 * everything after last triple underscore (___). E.g., `sample`,
5824 * `sample___flavor_one`, `sample___flavor_another_one`, are all candidates
5825 * for each other. Names with triple underscore are referred to as
5826 * "flavors" and are useful, among other things, to allow to
5827 * specify/support incompatible variations of the same kernel struct, which
5828 * might differ between different kernel versions and/or build
5829 * configurations.
5830 *
5831 * N.B. Struct "flavors" could be generated by bpftool's BTF-to-C
5832 * converter, when deduplicated BTF of a kernel still contains more than
5833 * one different types with the same name. In that case, ___2, ___3, etc
5834 * are appended starting from second name conflict. But start flavors are
5835 * also useful to be defined "locally", in BPF program, to extract same
5836 * data from incompatible changes between different kernel
5837 * versions/configurations. For instance, to handle field renames between
5838 * kernel versions, one can use two flavors of the struct name with the
5839 * same common name and use conditional relocations to extract that field,
5840 * depending on target kernel version.
5841 * 2. For each candidate type, try to match local specification to this
5842 * candidate target type. Matching involves finding corresponding
5843 * high-level spec accessors, meaning that all named fields should match,
5844 * as well as all array accesses should be within the actual bounds. Also,
5845 * types should be compatible (see bpf_core_fields_are_compat for details).
5846 * 3. It is supported and expected that there might be multiple flavors
5847 * matching the spec. As long as all the specs resolve to the same set of
5848 * offsets across all candidates, there is no error. If there is any
5849 * ambiguity, CO-RE relocation will fail. This is necessary to accomodate
5850 * imprefection of BTF deduplication, which can cause slight duplication of
5851 * the same BTF type, if some directly or indirectly referenced (by
5852 * pointer) type gets resolved to different actual types in different
5853 * object files. If such situation occurs, deduplicated BTF will end up
5854 * with two (or more) structurally identical types, which differ only in
5855 * types they refer to through pointer. This should be OK in most cases and
5856 * is not an error.
5857 * 4. Candidate types search is performed by linearly scanning through all
5858 * types in target BTF. It is anticipated that this is overall more
5859 * efficient memory-wise and not significantly worse (if not better)
5860 * CPU-wise compared to prebuilding a map from all local type names to
5861 * a list of candidate type names. It's also sped up by caching resolved
5862 * list of matching candidates per each local "root" type ID, that has at
5863 * least one bpf_core_relo associated with it. This list is shared
5864 * between multiple relocations for the same type ID and is updated as some
5865 * of the candidates are pruned due to structural incompatibility.
5866 */
5867static int bpf_core_apply_relo(struct bpf_program *prog,
5868 const struct bpf_core_relo *relo,
5869 int relo_idx,
5870 const struct btf *local_btf,
5871 struct hashmap *cand_cache)
5872{
5873 struct bpf_core_spec local_spec, cand_spec, targ_spec = {};
5874 const void *type_key = u32_as_hash_key(relo->type_id);
5875 struct bpf_core_relo_res cand_res, targ_res;
5876 const struct btf_type *local_type;
5877 const char *local_name;
5878 struct core_cand_list *cands = NULL;
5879 __u32 local_id;
5880 const char *spec_str;
5881 int i, j, err;
5882
5883 local_id = relo->type_id;
5884 local_type = btf__type_by_id(local_btf, local_id);
5885 if (!local_type)
5886 return -EINVAL;
5887
5888 local_name = btf__name_by_offset(local_btf, local_type->name_off);
5889 if (!local_name)
5890 return -EINVAL;
5891
5892 spec_str = btf__name_by_offset(local_btf, relo->access_str_off);
5893 if (str_is_empty(spec_str))
5894 return -EINVAL;
5895
5896 err = bpf_core_parse_spec(local_btf, local_id, spec_str, relo->kind, &local_spec);
5897 if (err) {
5898 pr_warn("prog '%s': relo #%d: parsing [%d] %s %s + %s failed: %d\n",
5899 prog->name, relo_idx, local_id, btf_kind_str(local_type),
5900 str_is_empty(local_name) ? "<anon>" : local_name,
5901 spec_str, err);
5902 return -EINVAL;
5903 }
5904
5905 pr_debug("prog '%s': relo #%d: kind <%s> (%d), spec is ", prog->name,
5906 relo_idx, core_relo_kind_str(relo->kind), relo->kind);
5907 bpf_core_dump_spec(LIBBPF_DEBUG, &local_spec);
5908 libbpf_print(LIBBPF_DEBUG, "\n");
5909
5910 /* TYPE_ID_LOCAL relo is special and doesn't need candidate search */
5911 if (relo->kind == BPF_TYPE_ID_LOCAL) {
5912 targ_res.validate = true;
5913 targ_res.poison = false;
5914 targ_res.orig_val = local_spec.root_type_id;
5915 targ_res.new_val = local_spec.root_type_id;
5916 goto patch_insn;
5917 }
5918
5919 /* libbpf doesn't support candidate search for anonymous types */
5920 if (str_is_empty(spec_str)) {
5921 pr_warn("prog '%s': relo #%d: <%s> (%d) relocation doesn't support anonymous types\n",
5922 prog->name, relo_idx, core_relo_kind_str(relo->kind), relo->kind);
5923 return -EOPNOTSUPP;
5924 }
5925
5926 if (!hashmap__find(cand_cache, type_key, (void **)&cands)) {
5927 cands = bpf_core_find_cands(prog->obj, local_btf, local_id);
5928 if (IS_ERR(cands)) {
5929 pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld\n",
5930 prog->name, relo_idx, local_id, btf_kind_str(local_type),
5931 local_name, PTR_ERR(cands));
5932 return PTR_ERR(cands);
5933 }
5934 err = hashmap__set(cand_cache, type_key, cands, NULL, NULL);
5935 if (err) {
5936 bpf_core_free_cands(cands);
5937 return err;
5938 }
5939 }
5940
5941 for (i = 0, j = 0; i < cands->len; i++) {
5942 err = bpf_core_spec_match(&local_spec, cands->cands[i].btf,
5943 cands->cands[i].id, &cand_spec);
5944 if (err < 0) {
5945 pr_warn("prog '%s': relo #%d: error matching candidate #%d ",
5946 prog->name, relo_idx, i);
5947 bpf_core_dump_spec(LIBBPF_WARN, &cand_spec);
5948 libbpf_print(LIBBPF_WARN, ": %d\n", err);
5949 return err;
5950 }
5951
5952 pr_debug("prog '%s': relo #%d: %s candidate #%d ", prog->name,
5953 relo_idx, err == 0 ? "non-matching" : "matching", i);
5954 bpf_core_dump_spec(LIBBPF_DEBUG, &cand_spec);
5955 libbpf_print(LIBBPF_DEBUG, "\n");
5956
5957 if (err == 0)
5958 continue;
5959
5960 err = bpf_core_calc_relo(prog, relo, relo_idx, &local_spec, &cand_spec, &cand_res);
5961 if (err)
5962 return err;
5963
5964 if (j == 0) {
5965 targ_res = cand_res;
5966 targ_spec = cand_spec;
5967 } else if (cand_spec.bit_offset != targ_spec.bit_offset) {
5968 /* if there are many field relo candidates, they
5969 * should all resolve to the same bit offset
5970 */
5971 pr_warn("prog '%s': relo #%d: field offset ambiguity: %u != %u\n",
5972 prog->name, relo_idx, cand_spec.bit_offset,
5973 targ_spec.bit_offset);
5974 return -EINVAL;
5975 } else if (cand_res.poison != targ_res.poison || cand_res.new_val != targ_res.new_val) {
5976 /* all candidates should result in the same relocation
5977 * decision and value, otherwise it's dangerous to
5978 * proceed due to ambiguity
5979 */
5980 pr_warn("prog '%s': relo #%d: relocation decision ambiguity: %s %u != %s %u\n",
5981 prog->name, relo_idx,
5982 cand_res.poison ? "failure" : "success", cand_res.new_val,
5983 targ_res.poison ? "failure" : "success", targ_res.new_val);
5984 return -EINVAL;
5985 }
5986
5987 cands->cands[j++] = cands->cands[i];
5988 }
5989
5990 /*
5991 * For BPF_FIELD_EXISTS relo or when used BPF program has field
5992 * existence checks or kernel version/config checks, it's expected
5993 * that we might not find any candidates. In this case, if field
5994 * wasn't found in any candidate, the list of candidates shouldn't
5995 * change at all, we'll just handle relocating appropriately,
5996 * depending on relo's kind.
5997 */
5998 if (j > 0)
5999 cands->len = j;
6000
6001 /*
6002 * If no candidates were found, it might be both a programmer error,
6003 * as well as expected case, depending whether instruction w/
6004 * relocation is guarded in some way that makes it unreachable (dead
6005 * code) if relocation can't be resolved. This is handled in
6006 * bpf_core_patch_insn() uniformly by replacing that instruction with
6007 * BPF helper call insn (using invalid helper ID). If that instruction
6008 * is indeed unreachable, then it will be ignored and eliminated by
6009 * verifier. If it was an error, then verifier will complain and point
6010 * to a specific instruction number in its log.
6011 */
6012 if (j == 0) {
6013 pr_debug("prog '%s': relo #%d: no matching targets found\n",
6014 prog->name, relo_idx);
6015
6016 /* calculate single target relo result explicitly */
6017 err = bpf_core_calc_relo(prog, relo, relo_idx, &local_spec, NULL, &targ_res);
6018 if (err)
6019 return err;
6020 }
6021
6022patch_insn:
6023 /* bpf_core_patch_insn() should know how to handle missing targ_spec */
6024 err = bpf_core_patch_insn(prog, relo, relo_idx, &targ_res);
6025 if (err) {
6026 pr_warn("prog '%s': relo #%d: failed to patch insn at offset %d: %d\n",
6027 prog->name, relo_idx, relo->insn_off, err);
6028 return -EINVAL;
6029 }
6030
6031 return 0;
6032}
6033
6034static int
6035bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
6036{
6037 const struct btf_ext_info_sec *sec;
6038 const struct bpf_core_relo *rec;
6039 const struct btf_ext_info *seg;
6040 struct hashmap_entry *entry;
6041 struct hashmap *cand_cache = NULL;
6042 struct bpf_program *prog;
6043 const char *sec_name;
6044 int i, err = 0, insn_idx, sec_idx;
6045
6046 if (obj->btf_ext->core_relo_info.len == 0)
6047 return 0;
6048
6049 if (targ_btf_path) {
6050 obj->btf_vmlinux_override = btf__parse(targ_btf_path, NULL);
6051 if (IS_ERR_OR_NULL(obj->btf_vmlinux_override)) {
6052 err = PTR_ERR(obj->btf_vmlinux_override);
6053 pr_warn("failed to parse target BTF: %d\n", err);
6054 return err;
6055 }
6056 }
6057
6058 cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL);
6059 if (IS_ERR(cand_cache)) {
6060 err = PTR_ERR(cand_cache);
6061 goto out;
6062 }
6063
6064 seg = &obj->btf_ext->core_relo_info;
6065 for_each_btf_ext_sec(seg, sec) {
6066 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
6067 if (str_is_empty(sec_name)) {
6068 err = -EINVAL;
6069 goto out;
6070 }
6071 /* bpf_object's ELF is gone by now so it's not easy to find
6072 * section index by section name, but we can find *any*
6073 * bpf_program within desired section name and use it's
6074 * prog->sec_idx to do a proper search by section index and
6075 * instruction offset
6076 */
6077 prog = NULL;
6078 for (i = 0; i < obj->nr_programs; i++) {
6079 prog = &obj->programs[i];
6080 if (strcmp(prog->sec_name, sec_name) == 0)
6081 break;
6082 }
6083 if (!prog) {
6084 pr_warn("sec '%s': failed to find a BPF program\n", sec_name);
6085 return -ENOENT;
6086 }
6087 sec_idx = prog->sec_idx;
6088
6089 pr_debug("sec '%s': found %d CO-RE relocations\n",
6090 sec_name, sec->num_info);
6091
6092 for_each_btf_ext_rec(seg, sec, i, rec) {
6093 insn_idx = rec->insn_off / BPF_INSN_SZ;
6094 prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
6095 if (!prog) {
6096 pr_warn("sec '%s': failed to find program at insn #%d for CO-RE offset relocation #%d\n",
6097 sec_name, insn_idx, i);
6098 err = -EINVAL;
6099 goto out;
6100 }
6101 /* no need to apply CO-RE relocation if the program is
6102 * not going to be loaded
6103 */
6104 if (!prog->load)
6105 continue;
6106
6107 err = bpf_core_apply_relo(prog, rec, i, obj->btf, cand_cache);
6108 if (err) {
6109 pr_warn("prog '%s': relo #%d: failed to relocate: %d\n",
6110 prog->name, i, err);
6111 goto out;
6112 }
6113 }
6114 }
6115
6116out:
6117 /* obj->btf_vmlinux and module BTFs are freed after object load */
6118 btf__free(obj->btf_vmlinux_override);
6119 obj->btf_vmlinux_override = NULL;
6120
6121 if (!IS_ERR_OR_NULL(cand_cache)) {
6122 hashmap__for_each_entry(cand_cache, entry, i) {
6123 bpf_core_free_cands(entry->value);
6124 }
6125 hashmap__free(cand_cache);
6126 }
6127 return err;
6128}
6129
6130/* Relocate data references within program code:
6131 * - map references;
6132 * - global variable references;
6133 * - extern references.
6134 */
6135static int
6136bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
6137{
6138 int i;
6139
6140 for (i = 0; i < prog->nr_reloc; i++) {
6141 struct reloc_desc *relo = &prog->reloc_desc[i];
6142 struct bpf_insn *insn = &prog->insns[relo->insn_idx];
6143 struct extern_desc *ext;
6144
6145 switch (relo->type) {
6146 case RELO_LD64:
6147 insn[0].src_reg = BPF_PSEUDO_MAP_FD;
6148 insn[0].imm = obj->maps[relo->map_idx].fd;
6149 relo->processed = true;
6150 break;
6151 case RELO_DATA:
6152 insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
6153 insn[1].imm = insn[0].imm + relo->sym_off;
6154 insn[0].imm = obj->maps[relo->map_idx].fd;
6155 relo->processed = true;
6156 break;
6157 case RELO_EXTERN:
6158 ext = &obj->externs[relo->sym_off];
6159 if (ext->type == EXT_KCFG) {
6160 insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
6161 insn[0].imm = obj->maps[obj->kconfig_map_idx].fd;
6162 insn[1].imm = ext->kcfg.data_off;
6163 } else /* EXT_KSYM */ {
6164 if (ext->ksym.type_id) { /* typed ksyms */
6165 insn[0].src_reg = BPF_PSEUDO_BTF_ID;
6166 insn[0].imm = ext->ksym.kernel_btf_id;
6167 insn[1].imm = ext->ksym.kernel_btf_obj_fd;
6168 } else { /* typeless ksyms */
6169 insn[0].imm = (__u32)ext->ksym.addr;
6170 insn[1].imm = ext->ksym.addr >> 32;
6171 }
6172 }
6173 relo->processed = true;
6174 break;
6175 case RELO_CALL:
6176 /* will be handled as a follow up pass */
6177 break;
6178 default:
6179 pr_warn("prog '%s': relo #%d: bad relo type %d\n",
6180 prog->name, i, relo->type);
6181 return -EINVAL;
6182 }
6183 }
6184
6185 return 0;
6186}
6187
6188static int adjust_prog_btf_ext_info(const struct bpf_object *obj,
6189 const struct bpf_program *prog,
6190 const struct btf_ext_info *ext_info,
6191 void **prog_info, __u32 *prog_rec_cnt,
6192 __u32 *prog_rec_sz)
6193{
6194 void *copy_start = NULL, *copy_end = NULL;
6195 void *rec, *rec_end, *new_prog_info;
6196 const struct btf_ext_info_sec *sec;
6197 size_t old_sz, new_sz;
6198 const char *sec_name;
6199 int i, off_adj;
6200
6201 for_each_btf_ext_sec(ext_info, sec) {
6202 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
6203 if (!sec_name)
6204 return -EINVAL;
6205 if (strcmp(sec_name, prog->sec_name) != 0)
6206 continue;
6207
6208 for_each_btf_ext_rec(ext_info, sec, i, rec) {
6209 __u32 insn_off = *(__u32 *)rec / BPF_INSN_SZ;
6210
6211 if (insn_off < prog->sec_insn_off)
6212 continue;
6213 if (insn_off >= prog->sec_insn_off + prog->sec_insn_cnt)
6214 break;
6215
6216 if (!copy_start)
6217 copy_start = rec;
6218 copy_end = rec + ext_info->rec_size;
6219 }
6220
6221 if (!copy_start)
6222 return -ENOENT;
6223
6224 /* append func/line info of a given (sub-)program to the main
6225 * program func/line info
6226 */
6227 old_sz = (size_t)(*prog_rec_cnt) * ext_info->rec_size;
6228 new_sz = old_sz + (copy_end - copy_start);
6229 new_prog_info = realloc(*prog_info, new_sz);
6230 if (!new_prog_info)
6231 return -ENOMEM;
6232 *prog_info = new_prog_info;
6233 *prog_rec_cnt = new_sz / ext_info->rec_size;
6234 memcpy(new_prog_info + old_sz, copy_start, copy_end - copy_start);
6235
6236 /* Kernel instruction offsets are in units of 8-byte
6237 * instructions, while .BTF.ext instruction offsets generated
6238 * by Clang are in units of bytes. So convert Clang offsets
6239 * into kernel offsets and adjust offset according to program
6240 * relocated position.
6241 */
6242 off_adj = prog->sub_insn_off - prog->sec_insn_off;
6243 rec = new_prog_info + old_sz;
6244 rec_end = new_prog_info + new_sz;
6245 for (; rec < rec_end; rec += ext_info->rec_size) {
6246 __u32 *insn_off = rec;
6247
6248 *insn_off = *insn_off / BPF_INSN_SZ + off_adj;
6249 }
6250 *prog_rec_sz = ext_info->rec_size;
6251 return 0;
6252 }
6253
6254 return -ENOENT;
6255}
6256
6257static int
6258reloc_prog_func_and_line_info(const struct bpf_object *obj,
6259 struct bpf_program *main_prog,
6260 const struct bpf_program *prog)
6261{
6262 int err;
6263
6264 /* no .BTF.ext relocation if .BTF.ext is missing or kernel doesn't
6265 * supprot func/line info
6266 */
6267 if (!obj->btf_ext || !kernel_supports(FEAT_BTF_FUNC))
6268 return 0;
6269
6270 /* only attempt func info relocation if main program's func_info
6271 * relocation was successful
6272 */
6273 if (main_prog != prog && !main_prog->func_info)
6274 goto line_info;
6275
6276 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->func_info,
6277 &main_prog->func_info,
6278 &main_prog->func_info_cnt,
6279 &main_prog->func_info_rec_size);
6280 if (err) {
6281 if (err != -ENOENT) {
6282 pr_warn("prog '%s': error relocating .BTF.ext function info: %d\n",
6283 prog->name, err);
6284 return err;
6285 }
6286 if (main_prog->func_info) {
6287 /*
6288 * Some info has already been found but has problem
6289 * in the last btf_ext reloc. Must have to error out.
6290 */
6291 pr_warn("prog '%s': missing .BTF.ext function info.\n", prog->name);
6292 return err;
6293 }
6294 /* Have problem loading the very first info. Ignore the rest. */
6295 pr_warn("prog '%s': missing .BTF.ext function info for the main program, skipping all of .BTF.ext func info.\n",
6296 prog->name);
6297 }
6298
6299line_info:
6300 /* don't relocate line info if main program's relocation failed */
6301 if (main_prog != prog && !main_prog->line_info)
6302 return 0;
6303
6304 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->line_info,
6305 &main_prog->line_info,
6306 &main_prog->line_info_cnt,
6307 &main_prog->line_info_rec_size);
6308 if (err) {
6309 if (err != -ENOENT) {
6310 pr_warn("prog '%s': error relocating .BTF.ext line info: %d\n",
6311 prog->name, err);
6312 return err;
6313 }
6314 if (main_prog->line_info) {
6315 /*
6316 * Some info has already been found but has problem
6317 * in the last btf_ext reloc. Must have to error out.
6318 */
6319 pr_warn("prog '%s': missing .BTF.ext line info.\n", prog->name);
6320 return err;
6321 }
6322 /* Have problem loading the very first info. Ignore the rest. */
6323 pr_warn("prog '%s': missing .BTF.ext line info for the main program, skipping all of .BTF.ext line info.\n",
6324 prog->name);
6325 }
6326 return 0;
6327}
6328
6329static int cmp_relo_by_insn_idx(const void *key, const void *elem)
6330{
6331 size_t insn_idx = *(const size_t *)key;
6332 const struct reloc_desc *relo = elem;
6333
6334 if (insn_idx == relo->insn_idx)
6335 return 0;
6336 return insn_idx < relo->insn_idx ? -1 : 1;
6337}
6338
6339static struct reloc_desc *find_prog_insn_relo(const struct bpf_program *prog, size_t insn_idx)
6340{
6341 return bsearch(&insn_idx, prog->reloc_desc, prog->nr_reloc,
6342 sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx);
6343}
6344
6345static int
6346bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
6347 struct bpf_program *prog)
6348{
6349 size_t sub_insn_idx, insn_idx, new_cnt;
6350 struct bpf_program *subprog;
6351 struct bpf_insn *insns, *insn;
6352 struct reloc_desc *relo;
6353 int err;
6354
6355 err = reloc_prog_func_and_line_info(obj, main_prog, prog);
6356 if (err)
6357 return err;
6358
6359 for (insn_idx = 0; insn_idx < prog->sec_insn_cnt; insn_idx++) {
6360 insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
6361 if (!insn_is_subprog_call(insn))
6362 continue;
6363
6364 relo = find_prog_insn_relo(prog, insn_idx);
6365 if (relo && relo->type != RELO_CALL) {
6366 pr_warn("prog '%s': unexpected relo for insn #%zu, type %d\n",
6367 prog->name, insn_idx, relo->type);
6368 return -LIBBPF_ERRNO__RELOC;
6369 }
6370 if (relo) {
6371 /* sub-program instruction index is a combination of
6372 * an offset of a symbol pointed to by relocation and
6373 * call instruction's imm field; for global functions,
6374 * call always has imm = -1, but for static functions
6375 * relocation is against STT_SECTION and insn->imm
6376 * points to a start of a static function
6377 */
6378 sub_insn_idx = relo->sym_off / BPF_INSN_SZ + insn->imm + 1;
6379 } else {
6380 /* if subprogram call is to a static function within
6381 * the same ELF section, there won't be any relocation
6382 * emitted, but it also means there is no additional
6383 * offset necessary, insns->imm is relative to
6384 * instruction's original position within the section
6385 */
6386 sub_insn_idx = prog->sec_insn_off + insn_idx + insn->imm + 1;
6387 }
6388
6389 /* we enforce that sub-programs should be in .text section */
6390 subprog = find_prog_by_sec_insn(obj, obj->efile.text_shndx, sub_insn_idx);
6391 if (!subprog) {
6392 pr_warn("prog '%s': no .text section found yet sub-program call exists\n",
6393 prog->name);
6394 return -LIBBPF_ERRNO__RELOC;
6395 }
6396
6397 /* if it's the first call instruction calling into this
6398 * subprogram (meaning this subprog hasn't been processed
6399 * yet) within the context of current main program:
6400 * - append it at the end of main program's instructions blog;
6401 * - process is recursively, while current program is put on hold;
6402 * - if that subprogram calls some other not yet processes
6403 * subprogram, same thing will happen recursively until
6404 * there are no more unprocesses subprograms left to append
6405 * and relocate.
6406 */
6407 if (subprog->sub_insn_off == 0) {
6408 subprog->sub_insn_off = main_prog->insns_cnt;
6409
6410 new_cnt = main_prog->insns_cnt + subprog->insns_cnt;
6411 insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns));
6412 if (!insns) {
6413 pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name);
6414 return -ENOMEM;
6415 }
6416 main_prog->insns = insns;
6417 main_prog->insns_cnt = new_cnt;
6418
6419 memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns,
6420 subprog->insns_cnt * sizeof(*insns));
6421
6422 pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n",
6423 main_prog->name, subprog->insns_cnt, subprog->name);
6424
6425 err = bpf_object__reloc_code(obj, main_prog, subprog);
6426 if (err)
6427 return err;
6428 }
6429
6430 /* main_prog->insns memory could have been re-allocated, so
6431 * calculate pointer again
6432 */
6433 insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
6434 /* calculate correct instruction position within current main
6435 * prog; each main prog can have a different set of
6436 * subprograms appended (potentially in different order as
6437 * well), so position of any subprog can be different for
6438 * different main programs */
6439 insn->imm = subprog->sub_insn_off - (prog->sub_insn_off + insn_idx) - 1;
6440
6441 if (relo)
6442 relo->processed = true;
6443
6444 pr_debug("prog '%s': insn #%zu relocated, imm %d points to subprog '%s' (now at %zu offset)\n",
6445 prog->name, insn_idx, insn->imm, subprog->name, subprog->sub_insn_off);
6446 }
6447
6448 return 0;
6449}
6450
6451/*
6452 * Relocate sub-program calls.
6453 *
6454 * Algorithm operates as follows. Each entry-point BPF program (referred to as
6455 * main prog) is processed separately. For each subprog (non-entry functions,
6456 * that can be called from either entry progs or other subprogs) gets their
6457 * sub_insn_off reset to zero. This serves as indicator that this subprogram
6458 * hasn't been yet appended and relocated within current main prog. Once its
6459 * relocated, sub_insn_off will point at the position within current main prog
6460 * where given subprog was appended. This will further be used to relocate all
6461 * the call instructions jumping into this subprog.
6462 *
6463 * We start with main program and process all call instructions. If the call
6464 * is into a subprog that hasn't been processed (i.e., subprog->sub_insn_off
6465 * is zero), subprog instructions are appended at the end of main program's
6466 * instruction array. Then main program is "put on hold" while we recursively
6467 * process newly appended subprogram. If that subprogram calls into another
6468 * subprogram that hasn't been appended, new subprogram is appended again to
6469 * the *main* prog's instructions (subprog's instructions are always left
6470 * untouched, as they need to be in unmodified state for subsequent main progs
6471 * and subprog instructions are always sent only as part of a main prog) and
6472 * the process continues recursively. Once all the subprogs called from a main
6473 * prog or any of its subprogs are appended (and relocated), all their
6474 * positions within finalized instructions array are known, so it's easy to
6475 * rewrite call instructions with correct relative offsets, corresponding to
6476 * desired target subprog.
6477 *
6478 * Its important to realize that some subprogs might not be called from some
6479 * main prog and any of its called/used subprogs. Those will keep their
6480 * subprog->sub_insn_off as zero at all times and won't be appended to current
6481 * main prog and won't be relocated within the context of current main prog.
6482 * They might still be used from other main progs later.
6483 *
6484 * Visually this process can be shown as below. Suppose we have two main
6485 * programs mainA and mainB and BPF object contains three subprogs: subA,
6486 * subB, and subC. mainA calls only subA, mainB calls only subC, but subA and
6487 * subC both call subB:
6488 *
6489 * +--------+ +-------+
6490 * | v v |
6491 * +--+---+ +--+-+-+ +---+--+
6492 * | subA | | subB | | subC |
6493 * +--+---+ +------+ +---+--+
6494 * ^ ^
6495 * | |
6496 * +---+-------+ +------+----+
6497 * | mainA | | mainB |
6498 * +-----------+ +-----------+
6499 *
6500 * We'll start relocating mainA, will find subA, append it and start
6501 * processing sub A recursively:
6502 *
6503 * +-----------+------+
6504 * | mainA | subA |
6505 * +-----------+------+
6506 *
6507 * At this point we notice that subB is used from subA, so we append it and
6508 * relocate (there are no further subcalls from subB):
6509 *
6510 * +-----------+------+------+
6511 * | mainA | subA | subB |
6512 * +-----------+------+------+
6513 *
6514 * At this point, we relocate subA calls, then go one level up and finish with
6515 * relocatin mainA calls. mainA is done.
6516 *
6517 * For mainB process is similar but results in different order. We start with
6518 * mainB and skip subA and subB, as mainB never calls them (at least
6519 * directly), but we see subC is needed, so we append and start processing it:
6520 *
6521 * +-----------+------+
6522 * | mainB | subC |
6523 * +-----------+------+
6524 * Now we see subC needs subB, so we go back to it, append and relocate it:
6525 *
6526 * +-----------+------+------+
6527 * | mainB | subC | subB |
6528 * +-----------+------+------+
6529 *
6530 * At this point we unwind recursion, relocate calls in subC, then in mainB.
6531 */
6532static int
6533bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog)
6534{
6535 struct bpf_program *subprog;
6536 int i, j, err;
6537
6538 /* mark all subprogs as not relocated (yet) within the context of
6539 * current main program
6540 */
6541 for (i = 0; i < obj->nr_programs; i++) {
6542 subprog = &obj->programs[i];
6543 if (!prog_is_subprog(obj, subprog))
6544 continue;
6545
6546 subprog->sub_insn_off = 0;
6547 for (j = 0; j < subprog->nr_reloc; j++)
6548 if (subprog->reloc_desc[j].type == RELO_CALL)
6549 subprog->reloc_desc[j].processed = false;
6550 }
6551
6552 err = bpf_object__reloc_code(obj, prog, prog);
6553 if (err)
6554 return err;
6555
6556
6557 return 0;
6558}
6559
6560static int
6561bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
6562{
6563 struct bpf_program *prog;
6564 size_t i;
6565 int err;
6566
6567 if (obj->btf_ext) {
6568 err = bpf_object__relocate_core(obj, targ_btf_path);
6569 if (err) {
6570 pr_warn("failed to perform CO-RE relocations: %d\n",
6571 err);
6572 return err;
6573 }
6574 }
6575 /* relocate data references first for all programs and sub-programs,
6576 * as they don't change relative to code locations, so subsequent
6577 * subprogram processing won't need to re-calculate any of them
6578 */
6579 for (i = 0; i < obj->nr_programs; i++) {
6580 prog = &obj->programs[i];
6581 err = bpf_object__relocate_data(obj, prog);
6582 if (err) {
6583 pr_warn("prog '%s': failed to relocate data references: %d\n",
6584 prog->name, err);
6585 return err;
6586 }
6587 }
6588 /* now relocate subprogram calls and append used subprograms to main
6589 * programs; each copy of subprogram code needs to be relocated
6590 * differently for each main program, because its code location might
6591 * have changed
6592 */
6593 for (i = 0; i < obj->nr_programs; i++) {
6594 prog = &obj->programs[i];
6595 /* sub-program's sub-calls are relocated within the context of
6596 * its main program only
6597 */
6598 if (prog_is_subprog(obj, prog))
6599 continue;
6600
6601 err = bpf_object__relocate_calls(obj, prog);
6602 if (err) {
6603 pr_warn("prog '%s': failed to relocate calls: %d\n",
6604 prog->name, err);
6605 return err;
6606 }
6607 }
6608 /* free up relocation descriptors */
6609 for (i = 0; i < obj->nr_programs; i++) {
6610 prog = &obj->programs[i];
6611 zfree(&prog->reloc_desc);
6612 prog->nr_reloc = 0;
6613 }
6614 return 0;
6615}
6616
6617static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
6618 GElf_Shdr *shdr, Elf_Data *data);
6619
6620static int bpf_object__collect_map_relos(struct bpf_object *obj,
6621 GElf_Shdr *shdr, Elf_Data *data)
6622{
6623 const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *);
6624 int i, j, nrels, new_sz;
6625 const struct btf_var_secinfo *vi = NULL;
6626 const struct btf_type *sec, *var, *def;
6627 struct bpf_map *map = NULL, *targ_map;
6628 const struct btf_member *member;
6629 const char *name, *mname;
6630 Elf_Data *symbols;
6631 unsigned int moff;
6632 GElf_Sym sym;
6633 GElf_Rel rel;
6634 void *tmp;
6635
6636 if (!obj->efile.btf_maps_sec_btf_id || !obj->btf)
6637 return -EINVAL;
6638 sec = btf__type_by_id(obj->btf, obj->efile.btf_maps_sec_btf_id);
6639 if (!sec)
6640 return -EINVAL;
6641
6642 symbols = obj->efile.symbols;
6643 nrels = shdr->sh_size / shdr->sh_entsize;
6644 for (i = 0; i < nrels; i++) {
6645 if (!gelf_getrel(data, i, &rel)) {
6646 pr_warn(".maps relo #%d: failed to get ELF relo\n", i);
6647 return -LIBBPF_ERRNO__FORMAT;
6648 }
6649 if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
6650 pr_warn(".maps relo #%d: symbol %zx not found\n",
6651 i, (size_t)GELF_R_SYM(rel.r_info));
6652 return -LIBBPF_ERRNO__FORMAT;
6653 }
6654 name = elf_sym_str(obj, sym.st_name) ?: "<?>";
6655 if (sym.st_shndx != obj->efile.btf_maps_shndx) {
6656 pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n",
6657 i, name);
6658 return -LIBBPF_ERRNO__RELOC;
6659 }
6660
6661 pr_debug(".maps relo #%d: for %zd value %zd rel.r_offset %zu name %d ('%s')\n",
6662 i, (ssize_t)(rel.r_info >> 32), (size_t)sym.st_value,
6663 (size_t)rel.r_offset, sym.st_name, name);
6664
6665 for (j = 0; j < obj->nr_maps; j++) {
6666 map = &obj->maps[j];
6667 if (map->sec_idx != obj->efile.btf_maps_shndx)
6668 continue;
6669
6670 vi = btf_var_secinfos(sec) + map->btf_var_idx;
6671 if (vi->offset <= rel.r_offset &&
6672 rel.r_offset + bpf_ptr_sz <= vi->offset + vi->size)
6673 break;
6674 }
6675 if (j == obj->nr_maps) {
6676 pr_warn(".maps relo #%d: cannot find map '%s' at rel.r_offset %zu\n",
6677 i, name, (size_t)rel.r_offset);
6678 return -EINVAL;
6679 }
6680
6681 if (!bpf_map_type__is_map_in_map(map->def.type))
6682 return -EINVAL;
6683 if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS &&
6684 map->def.key_size != sizeof(int)) {
6685 pr_warn(".maps relo #%d: hash-of-maps '%s' should have key size %zu.\n",
6686 i, map->name, sizeof(int));
6687 return -EINVAL;
6688 }
6689
6690 targ_map = bpf_object__find_map_by_name(obj, name);
6691 if (!targ_map)
6692 return -ESRCH;
6693
6694 var = btf__type_by_id(obj->btf, vi->type);
6695 def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
6696 if (btf_vlen(def) == 0)
6697 return -EINVAL;
6698 member = btf_members(def) + btf_vlen(def) - 1;
6699 mname = btf__name_by_offset(obj->btf, member->name_off);
6700 if (strcmp(mname, "values"))
6701 return -EINVAL;
6702
6703 moff = btf_member_bit_offset(def, btf_vlen(def) - 1) / 8;
6704 if (rel.r_offset - vi->offset < moff)
6705 return -EINVAL;
6706
6707 moff = rel.r_offset - vi->offset - moff;
6708 /* here we use BPF pointer size, which is always 64 bit, as we
6709 * are parsing ELF that was built for BPF target
6710 */
6711 if (moff % bpf_ptr_sz)
6712 return -EINVAL;
6713 moff /= bpf_ptr_sz;
6714 if (moff >= map->init_slots_sz) {
6715 new_sz = moff + 1;
6716 tmp = libbpf_reallocarray(map->init_slots, new_sz, host_ptr_sz);
6717 if (!tmp)
6718 return -ENOMEM;
6719 map->init_slots = tmp;
6720 memset(map->init_slots + map->init_slots_sz, 0,
6721 (new_sz - map->init_slots_sz) * host_ptr_sz);
6722 map->init_slots_sz = new_sz;
6723 }
6724 map->init_slots[moff] = targ_map;
6725
6726 pr_debug(".maps relo #%d: map '%s' slot [%d] points to map '%s'\n",
6727 i, map->name, moff, name);
6728 }
6729
6730 return 0;
6731}
6732
6733static int cmp_relocs(const void *_a, const void *_b)
6734{
6735 const struct reloc_desc *a = _a;
6736 const struct reloc_desc *b = _b;
6737
6738 if (a->insn_idx != b->insn_idx)
6739 return a->insn_idx < b->insn_idx ? -1 : 1;
6740
6741 /* no two relocations should have the same insn_idx, but ... */
6742 if (a->type != b->type)
6743 return a->type < b->type ? -1 : 1;
6744
6745 return 0;
6746}
6747
6748static int bpf_object__collect_relos(struct bpf_object *obj)
6749{
6750 int i, err;
6751
6752 for (i = 0; i < obj->efile.nr_reloc_sects; i++) {
6753 GElf_Shdr *shdr = &obj->efile.reloc_sects[i].shdr;
6754 Elf_Data *data = obj->efile.reloc_sects[i].data;
6755 int idx = shdr->sh_info;
6756
6757 if (shdr->sh_type != SHT_REL) {
6758 pr_warn("internal error at %d\n", __LINE__);
6759 return -LIBBPF_ERRNO__INTERNAL;
6760 }
6761
6762 if (idx == obj->efile.st_ops_shndx)
6763 err = bpf_object__collect_st_ops_relos(obj, shdr, data);
6764 else if (idx == obj->efile.btf_maps_shndx)
6765 err = bpf_object__collect_map_relos(obj, shdr, data);
6766 else
6767 err = bpf_object__collect_prog_relos(obj, shdr, data);
6768 if (err)
6769 return err;
6770 }
6771
6772 for (i = 0; i < obj->nr_programs; i++) {
6773 struct bpf_program *p = &obj->programs[i];
6774
6775 if (!p->nr_reloc)
6776 continue;
6777
6778 qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs);
6779 }
6780 return 0;
6781}
6782
6783static bool insn_is_helper_call(struct bpf_insn *insn, enum bpf_func_id *func_id)
6784{
6785 if (BPF_CLASS(insn->code) == BPF_JMP &&
6786 BPF_OP(insn->code) == BPF_CALL &&
6787 BPF_SRC(insn->code) == BPF_K &&
6788 insn->src_reg == 0 &&
6789 insn->dst_reg == 0) {
6790 *func_id = insn->imm;
6791 return true;
6792 }
6793 return false;
6794}
6795
6796static int bpf_object__sanitize_prog(struct bpf_object* obj, struct bpf_program *prog)
6797{
6798 struct bpf_insn *insn = prog->insns;
6799 enum bpf_func_id func_id;
6800 int i;
6801
6802 for (i = 0; i < prog->insns_cnt; i++, insn++) {
6803 if (!insn_is_helper_call(insn, &func_id))
6804 continue;
6805
6806 /* on kernels that don't yet support
6807 * bpf_probe_read_{kernel,user}[_str] helpers, fall back
6808 * to bpf_probe_read() which works well for old kernels
6809 */
6810 switch (func_id) {
6811 case BPF_FUNC_probe_read_kernel:
6812 case BPF_FUNC_probe_read_user:
6813 if (!kernel_supports(FEAT_PROBE_READ_KERN))
6814 insn->imm = BPF_FUNC_probe_read;
6815 break;
6816 case BPF_FUNC_probe_read_kernel_str:
6817 case BPF_FUNC_probe_read_user_str:
6818 if (!kernel_supports(FEAT_PROBE_READ_KERN))
6819 insn->imm = BPF_FUNC_probe_read_str;
6820 break;
6821 default:
6822 break;
6823 }
6824 }
6825 return 0;
6826}
6827
6828static int
6829load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
6830 char *license, __u32 kern_version, int *pfd)
6831{
6832 struct bpf_prog_load_params load_attr = {};
6833 char *cp, errmsg[STRERR_BUFSIZE];
6834 size_t log_buf_size = 0;
6835 char *log_buf = NULL;
6836 int btf_fd, ret;
6837
6838 if (prog->type == BPF_PROG_TYPE_UNSPEC) {
6839 /*
6840 * The program type must be set. Most likely we couldn't find a proper
6841 * section definition at load time, and thus we didn't infer the type.
6842 */
6843 pr_warn("prog '%s': missing BPF prog type, check ELF section name '%s'\n",
6844 prog->name, prog->sec_name);
6845 return -EINVAL;
6846 }
6847
6848 if (!insns || !insns_cnt)
6849 return -EINVAL;
6850
6851 load_attr.prog_type = prog->type;
6852 /* old kernels might not support specifying expected_attach_type */
6853 if (!kernel_supports(FEAT_EXP_ATTACH_TYPE) && prog->sec_def &&
6854 prog->sec_def->is_exp_attach_type_optional)
6855 load_attr.expected_attach_type = 0;
6856 else
6857 load_attr.expected_attach_type = prog->expected_attach_type;
6858 if (kernel_supports(FEAT_PROG_NAME))
6859 load_attr.name = prog->name;
6860 load_attr.insns = insns;
6861 load_attr.insn_cnt = insns_cnt;
6862 load_attr.license = license;
6863 load_attr.attach_btf_id = prog->attach_btf_id;
6864 if (prog->attach_prog_fd)
6865 load_attr.attach_prog_fd = prog->attach_prog_fd;
6866 else
6867 load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd;
6868 load_attr.attach_btf_id = prog->attach_btf_id;
6869 load_attr.kern_version = kern_version;
6870 load_attr.prog_ifindex = prog->prog_ifindex;
6871
6872 /* specify func_info/line_info only if kernel supports them */
6873 btf_fd = bpf_object__btf_fd(prog->obj);
6874 if (btf_fd >= 0 && kernel_supports(FEAT_BTF_FUNC)) {
6875 load_attr.prog_btf_fd = btf_fd;
6876 load_attr.func_info = prog->func_info;
6877 load_attr.func_info_rec_size = prog->func_info_rec_size;
6878 load_attr.func_info_cnt = prog->func_info_cnt;
6879 load_attr.line_info = prog->line_info;
6880 load_attr.line_info_rec_size = prog->line_info_rec_size;
6881 load_attr.line_info_cnt = prog->line_info_cnt;
6882 }
6883 load_attr.log_level = prog->log_level;
6884 load_attr.prog_flags = prog->prog_flags;
6885
6886retry_load:
6887 if (log_buf_size) {
6888 log_buf = malloc(log_buf_size);
6889 if (!log_buf)
6890 return -ENOMEM;
6891
6892 *log_buf = 0;
6893 }
6894
6895 load_attr.log_buf = log_buf;
6896 load_attr.log_buf_sz = log_buf_size;
6897 ret = libbpf__bpf_prog_load(&load_attr);
6898
6899 if (ret >= 0) {
6900 if (log_buf && load_attr.log_level)
6901 pr_debug("verifier log:\n%s", log_buf);
6902
6903 if (prog->obj->rodata_map_idx >= 0 &&
6904 kernel_supports(FEAT_PROG_BIND_MAP)) {
6905 struct bpf_map *rodata_map =
6906 &prog->obj->maps[prog->obj->rodata_map_idx];
6907
6908 if (bpf_prog_bind_map(ret, bpf_map__fd(rodata_map), NULL)) {
6909 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
6910 pr_warn("prog '%s': failed to bind .rodata map: %s\n",
6911 prog->name, cp);
6912 /* Don't fail hard if can't bind rodata. */
6913 }
6914 }
6915
6916 *pfd = ret;
6917 ret = 0;
6918 goto out;
6919 }
6920
6921 if (!log_buf || errno == ENOSPC) {
6922 log_buf_size = max((size_t)BPF_LOG_BUF_SIZE,
6923 log_buf_size << 1);
6924
6925 free(log_buf);
6926 goto retry_load;
6927 }
6928 ret = errno ? -errno : -LIBBPF_ERRNO__LOAD;
6929 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
6930 pr_warn("load bpf program failed: %s\n", cp);
6931 pr_perm_msg(ret);
6932
6933 if (log_buf && log_buf[0] != '\0') {
6934 ret = -LIBBPF_ERRNO__VERIFY;
6935 pr_warn("-- BEGIN DUMP LOG ---\n");
6936 pr_warn("\n%s\n", log_buf);
6937 pr_warn("-- END LOG --\n");
6938 } else if (load_attr.insn_cnt >= BPF_MAXINSNS) {
6939 pr_warn("Program too large (%zu insns), at most %d insns\n",
6940 load_attr.insn_cnt, BPF_MAXINSNS);
6941 ret = -LIBBPF_ERRNO__PROG2BIG;
6942 } else if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
6943 /* Wrong program type? */
6944 int fd;
6945
6946 load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
6947 load_attr.expected_attach_type = 0;
6948 load_attr.log_buf = NULL;
6949 load_attr.log_buf_sz = 0;
6950 fd = libbpf__bpf_prog_load(&load_attr);
6951 if (fd >= 0) {
6952 close(fd);
6953 ret = -LIBBPF_ERRNO__PROGTYPE;
6954 goto out;
6955 }
6956 }
6957
6958out:
6959 free(log_buf);
6960 return ret;
6961}
6962
6963static int libbpf_find_attach_btf_id(struct bpf_program *prog, int *btf_obj_fd, int *btf_type_id);
6964
6965int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
6966{
6967 int err = 0, fd, i;
6968
6969 if (prog->obj->loaded) {
6970 pr_warn("prog '%s': can't load after object was loaded\n", prog->name);
6971 return -EINVAL;
6972 }
6973
6974 if ((prog->type == BPF_PROG_TYPE_TRACING ||
6975 prog->type == BPF_PROG_TYPE_LSM ||
6976 prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) {
6977 int btf_obj_fd = 0, btf_type_id = 0;
6978
6979 err = libbpf_find_attach_btf_id(prog, &btf_obj_fd, &btf_type_id);
6980 if (err)
6981 return err;
6982
6983 prog->attach_btf_obj_fd = btf_obj_fd;
6984 prog->attach_btf_id = btf_type_id;
6985 }
6986
6987 if (prog->instances.nr < 0 || !prog->instances.fds) {
6988 if (prog->preprocessor) {
6989 pr_warn("Internal error: can't load program '%s'\n",
6990 prog->name);
6991 return -LIBBPF_ERRNO__INTERNAL;
6992 }
6993
6994 prog->instances.fds = malloc(sizeof(int));
6995 if (!prog->instances.fds) {
6996 pr_warn("Not enough memory for BPF fds\n");
6997 return -ENOMEM;
6998 }
6999 prog->instances.nr = 1;
7000 prog->instances.fds[0] = -1;
7001 }
7002
7003 if (!prog->preprocessor) {
7004 if (prog->instances.nr != 1) {
7005 pr_warn("prog '%s': inconsistent nr(%d) != 1\n",
7006 prog->name, prog->instances.nr);
7007 }
7008 err = load_program(prog, prog->insns, prog->insns_cnt,
7009 license, kern_ver, &fd);
7010 if (!err)
7011 prog->instances.fds[0] = fd;
7012 goto out;
7013 }
7014
7015 for (i = 0; i < prog->instances.nr; i++) {
7016 struct bpf_prog_prep_result result;
7017 bpf_program_prep_t preprocessor = prog->preprocessor;
7018
7019 memset(&result, 0, sizeof(result));
7020 err = preprocessor(prog, i, prog->insns,
7021 prog->insns_cnt, &result);
7022 if (err) {
7023 pr_warn("Preprocessing the %dth instance of program '%s' failed\n",
7024 i, prog->name);
7025 goto out;
7026 }
7027
7028 if (!result.new_insn_ptr || !result.new_insn_cnt) {
7029 pr_debug("Skip loading the %dth instance of program '%s'\n",
7030 i, prog->name);
7031 prog->instances.fds[i] = -1;
7032 if (result.pfd)
7033 *result.pfd = -1;
7034 continue;
7035 }
7036
7037 err = load_program(prog, result.new_insn_ptr,
7038 result.new_insn_cnt, license, kern_ver, &fd);
7039 if (err) {
7040 pr_warn("Loading the %dth instance of program '%s' failed\n",
7041 i, prog->name);
7042 goto out;
7043 }
7044
7045 if (result.pfd)
7046 *result.pfd = fd;
7047 prog->instances.fds[i] = fd;
7048 }
7049out:
7050 if (err)
7051 pr_warn("failed to load program '%s'\n", prog->name);
7052 zfree(&prog->insns);
7053 prog->insns_cnt = 0;
7054 return err;
7055}
7056
7057static int
7058bpf_object__load_progs(struct bpf_object *obj, int log_level)
7059{
7060 struct bpf_program *prog;
7061 size_t i;
7062 int err;
7063
7064 for (i = 0; i < obj->nr_programs; i++) {
7065 prog = &obj->programs[i];
7066 err = bpf_object__sanitize_prog(obj, prog);
7067 if (err)
7068 return err;
7069 }
7070
7071 for (i = 0; i < obj->nr_programs; i++) {
7072 prog = &obj->programs[i];
7073 if (prog_is_subprog(obj, prog))
7074 continue;
7075 if (!prog->load) {
7076 pr_debug("prog '%s': skipped loading\n", prog->name);
7077 continue;
7078 }
7079 prog->log_level |= log_level;
7080 err = bpf_program__load(prog, obj->license, obj->kern_version);
7081 if (err)
7082 return err;
7083 }
7084 return 0;
7085}
7086
7087static const struct bpf_sec_def *find_sec_def(const char *sec_name);
7088
7089static struct bpf_object *
7090__bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
7091 const struct bpf_object_open_opts *opts)
7092{
7093 const char *obj_name, *kconfig;
7094 struct bpf_program *prog;
7095 struct bpf_object *obj;
7096 char tmp_name[64];
7097 int err;
7098
7099 if (elf_version(EV_CURRENT) == EV_NONE) {
7100 pr_warn("failed to init libelf for %s\n",
7101 path ? : "(mem buf)");
7102 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
7103 }
7104
7105 if (!OPTS_VALID(opts, bpf_object_open_opts))
7106 return ERR_PTR(-EINVAL);
7107
7108 obj_name = OPTS_GET(opts, object_name, NULL);
7109 if (obj_buf) {
7110 if (!obj_name) {
7111 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
7112 (unsigned long)obj_buf,
7113 (unsigned long)obj_buf_sz);
7114 obj_name = tmp_name;
7115 }
7116 path = obj_name;
7117 pr_debug("loading object '%s' from buffer\n", obj_name);
7118 }
7119
7120 obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name);
7121 if (IS_ERR(obj))
7122 return obj;
7123
7124 kconfig = OPTS_GET(opts, kconfig, NULL);
7125 if (kconfig) {
7126 obj->kconfig = strdup(kconfig);
7127 if (!obj->kconfig)
7128 return ERR_PTR(-ENOMEM);
7129 }
7130
7131 err = bpf_object__elf_init(obj);
7132 err = err ? : bpf_object__check_endianness(obj);
7133 err = err ? : bpf_object__elf_collect(obj);
7134 err = err ? : bpf_object__collect_externs(obj);
7135 err = err ? : bpf_object__finalize_btf(obj);
7136 err = err ? : bpf_object__init_maps(obj, opts);
7137 err = err ? : bpf_object__collect_relos(obj);
7138 if (err)
7139 goto out;
7140 bpf_object__elf_finish(obj);
7141
7142 bpf_object__for_each_program(prog, obj) {
7143 prog->sec_def = find_sec_def(prog->sec_name);
7144 if (!prog->sec_def) {
7145 /* couldn't guess, but user might manually specify */
7146 pr_debug("prog '%s': unrecognized ELF section name '%s'\n",
7147 prog->name, prog->sec_name);
7148 continue;
7149 }
7150
7151 if (prog->sec_def->is_sleepable)
7152 prog->prog_flags |= BPF_F_SLEEPABLE;
7153 bpf_program__set_type(prog, prog->sec_def->prog_type);
7154 bpf_program__set_expected_attach_type(prog,
7155 prog->sec_def->expected_attach_type);
7156
7157 if (prog->sec_def->prog_type == BPF_PROG_TYPE_TRACING ||
7158 prog->sec_def->prog_type == BPF_PROG_TYPE_EXT)
7159 prog->attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0);
7160 }
7161
7162 return obj;
7163out:
7164 bpf_object__close(obj);
7165 return ERR_PTR(err);
7166}
7167
7168static struct bpf_object *
7169__bpf_object__open_xattr(struct bpf_object_open_attr *attr, int flags)
7170{
7171 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
7172 .relaxed_maps = flags & MAPS_RELAX_COMPAT,
7173 );
7174
7175 /* param validation */
7176 if (!attr->file)
7177 return NULL;
7178
7179 pr_debug("loading %s\n", attr->file);
7180 return __bpf_object__open(attr->file, NULL, 0, &opts);
7181}
7182
7183struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
7184{
7185 return __bpf_object__open_xattr(attr, 0);
7186}
7187
7188struct bpf_object *bpf_object__open(const char *path)
7189{
7190 struct bpf_object_open_attr attr = {
7191 .file = path,
7192 .prog_type = BPF_PROG_TYPE_UNSPEC,
7193 };
7194
7195 return bpf_object__open_xattr(&attr);
7196}
7197
7198struct bpf_object *
7199bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts)
7200{
7201 if (!path)
7202 return ERR_PTR(-EINVAL);
7203
7204 pr_debug("loading %s\n", path);
7205
7206 return __bpf_object__open(path, NULL, 0, opts);
7207}
7208
7209struct bpf_object *
7210bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
7211 const struct bpf_object_open_opts *opts)
7212{
7213 if (!obj_buf || obj_buf_sz == 0)
7214 return ERR_PTR(-EINVAL);
7215
7216 return __bpf_object__open(NULL, obj_buf, obj_buf_sz, opts);
7217}
7218
7219struct bpf_object *
7220bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
7221 const char *name)
7222{
7223 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
7224 .object_name = name,
7225 /* wrong default, but backwards-compatible */
7226 .relaxed_maps = true,
7227 );
7228
7229 /* returning NULL is wrong, but backwards-compatible */
7230 if (!obj_buf || obj_buf_sz == 0)
7231 return NULL;
7232
7233 return bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
7234}
7235
7236int bpf_object__unload(struct bpf_object *obj)
7237{
7238 size_t i;
7239
7240 if (!obj)
7241 return -EINVAL;
7242
7243 for (i = 0; i < obj->nr_maps; i++) {
7244 zclose(obj->maps[i].fd);
7245 if (obj->maps[i].st_ops)
7246 zfree(&obj->maps[i].st_ops->kern_vdata);
7247 }
7248
7249 for (i = 0; i < obj->nr_programs; i++)
7250 bpf_program__unload(&obj->programs[i]);
7251
7252 return 0;
7253}
7254
7255static int bpf_object__sanitize_maps(struct bpf_object *obj)
7256{
7257 struct bpf_map *m;
7258
7259 bpf_object__for_each_map(m, obj) {
7260 if (!bpf_map__is_internal(m))
7261 continue;
7262 if (!kernel_supports(FEAT_GLOBAL_DATA)) {
7263 pr_warn("kernel doesn't support global data\n");
7264 return -ENOTSUP;
7265 }
7266 if (!kernel_supports(FEAT_ARRAY_MMAP))
7267 m->def.map_flags ^= BPF_F_MMAPABLE;
7268 }
7269
7270 return 0;
7271}
7272
7273static int bpf_object__read_kallsyms_file(struct bpf_object *obj)
7274{
7275 char sym_type, sym_name[500];
7276 unsigned long long sym_addr;
7277 struct extern_desc *ext;
7278 int ret, err = 0;
7279 FILE *f;
7280
7281 f = fopen("/proc/kallsyms", "r");
7282 if (!f) {
7283 err = -errno;
7284 pr_warn("failed to open /proc/kallsyms: %d\n", err);
7285 return err;
7286 }
7287
7288 while (true) {
7289 ret = fscanf(f, "%llx %c %499s%*[^\n]\n",
7290 &sym_addr, &sym_type, sym_name);
7291 if (ret == EOF && feof(f))
7292 break;
7293 if (ret != 3) {
7294 pr_warn("failed to read kallsyms entry: %d\n", ret);
7295 err = -EINVAL;
7296 goto out;
7297 }
7298
7299 ext = find_extern_by_name(obj, sym_name);
7300 if (!ext || ext->type != EXT_KSYM)
7301 continue;
7302
7303 if (ext->is_set && ext->ksym.addr != sym_addr) {
7304 pr_warn("extern (ksym) '%s' resolution is ambiguous: 0x%llx or 0x%llx\n",
7305 sym_name, ext->ksym.addr, sym_addr);
7306 err = -EINVAL;
7307 goto out;
7308 }
7309 if (!ext->is_set) {
7310 ext->is_set = true;
7311 ext->ksym.addr = sym_addr;
7312 pr_debug("extern (ksym) %s=0x%llx\n", sym_name, sym_addr);
7313 }
7314 }
7315
7316out:
7317 fclose(f);
7318 return err;
7319}
7320
7321static int bpf_object__resolve_ksyms_btf_id(struct bpf_object *obj)
7322{
7323 struct extern_desc *ext;
7324 struct btf *btf;
7325 int i, j, id, btf_fd, err;
7326
7327 for (i = 0; i < obj->nr_extern; i++) {
7328 const struct btf_type *targ_var, *targ_type;
7329 __u32 targ_type_id, local_type_id;
7330 const char *targ_var_name;
7331 int ret;
7332
7333 ext = &obj->externs[i];
7334 if (ext->type != EXT_KSYM || !ext->ksym.type_id)
7335 continue;
7336
7337 btf = obj->btf_vmlinux;
7338 btf_fd = 0;
7339 id = btf__find_by_name_kind(btf, ext->name, BTF_KIND_VAR);
7340 if (id == -ENOENT) {
7341 err = load_module_btfs(obj);
7342 if (err)
7343 return err;
7344
7345 for (j = 0; j < obj->btf_module_cnt; j++) {
7346 btf = obj->btf_modules[j].btf;
7347 /* we assume module BTF FD is always >0 */
7348 btf_fd = obj->btf_modules[j].fd;
7349 id = btf__find_by_name_kind(btf, ext->name, BTF_KIND_VAR);
7350 if (id != -ENOENT)
7351 break;
7352 }
7353 }
7354 if (id <= 0) {
7355 pr_warn("extern (ksym) '%s': failed to find BTF ID in kernel BTF(s).\n",
7356 ext->name);
7357 return -ESRCH;
7358 }
7359
7360 /* find local type_id */
7361 local_type_id = ext->ksym.type_id;
7362
7363 /* find target type_id */
7364 targ_var = btf__type_by_id(btf, id);
7365 targ_var_name = btf__name_by_offset(btf, targ_var->name_off);
7366 targ_type = skip_mods_and_typedefs(btf, targ_var->type, &targ_type_id);
7367
7368 ret = bpf_core_types_are_compat(obj->btf, local_type_id,
7369 btf, targ_type_id);
7370 if (ret <= 0) {
7371 const struct btf_type *local_type;
7372 const char *targ_name, *local_name;
7373
7374 local_type = btf__type_by_id(obj->btf, local_type_id);
7375 local_name = btf__name_by_offset(obj->btf, local_type->name_off);
7376 targ_name = btf__name_by_offset(btf, targ_type->name_off);
7377
7378 pr_warn("extern (ksym) '%s': incompatible types, expected [%d] %s %s, but kernel has [%d] %s %s\n",
7379 ext->name, local_type_id,
7380 btf_kind_str(local_type), local_name, targ_type_id,
7381 btf_kind_str(targ_type), targ_name);
7382 return -EINVAL;
7383 }
7384
7385 ext->is_set = true;
7386 ext->ksym.kernel_btf_obj_fd = btf_fd;
7387 ext->ksym.kernel_btf_id = id;
7388 pr_debug("extern (ksym) '%s': resolved to [%d] %s %s\n",
7389 ext->name, id, btf_kind_str(targ_var), targ_var_name);
7390 }
7391 return 0;
7392}
7393
7394static int bpf_object__resolve_externs(struct bpf_object *obj,
7395 const char *extra_kconfig)
7396{
7397 bool need_config = false, need_kallsyms = false;
7398 bool need_vmlinux_btf = false;
7399 struct extern_desc *ext;
7400 void *kcfg_data = NULL;
7401 int err, i;
7402
7403 if (obj->nr_extern == 0)
7404 return 0;
7405
7406 if (obj->kconfig_map_idx >= 0)
7407 kcfg_data = obj->maps[obj->kconfig_map_idx].mmaped;
7408
7409 for (i = 0; i < obj->nr_extern; i++) {
7410 ext = &obj->externs[i];
7411
7412 if (ext->type == EXT_KCFG &&
7413 strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) {
7414 void *ext_val = kcfg_data + ext->kcfg.data_off;
7415 __u32 kver = get_kernel_version();
7416
7417 if (!kver) {
7418 pr_warn("failed to get kernel version\n");
7419 return -EINVAL;
7420 }
7421 err = set_kcfg_value_num(ext, ext_val, kver);
7422 if (err)
7423 return err;
7424 pr_debug("extern (kcfg) %s=0x%x\n", ext->name, kver);
7425 } else if (ext->type == EXT_KCFG &&
7426 strncmp(ext->name, "CONFIG_", 7) == 0) {
7427 need_config = true;
7428 } else if (ext->type == EXT_KSYM) {
7429 if (ext->ksym.type_id)
7430 need_vmlinux_btf = true;
7431 else
7432 need_kallsyms = true;
7433 } else {
7434 pr_warn("unrecognized extern '%s'\n", ext->name);
7435 return -EINVAL;
7436 }
7437 }
7438 if (need_config && extra_kconfig) {
7439 err = bpf_object__read_kconfig_mem(obj, extra_kconfig, kcfg_data);
7440 if (err)
7441 return -EINVAL;
7442 need_config = false;
7443 for (i = 0; i < obj->nr_extern; i++) {
7444 ext = &obj->externs[i];
7445 if (ext->type == EXT_KCFG && !ext->is_set) {
7446 need_config = true;
7447 break;
7448 }
7449 }
7450 }
7451 if (need_config) {
7452 err = bpf_object__read_kconfig_file(obj, kcfg_data);
7453 if (err)
7454 return -EINVAL;
7455 }
7456 if (need_kallsyms) {
7457 err = bpf_object__read_kallsyms_file(obj);
7458 if (err)
7459 return -EINVAL;
7460 }
7461 if (need_vmlinux_btf) {
7462 err = bpf_object__resolve_ksyms_btf_id(obj);
7463 if (err)
7464 return -EINVAL;
7465 }
7466 for (i = 0; i < obj->nr_extern; i++) {
7467 ext = &obj->externs[i];
7468
7469 if (!ext->is_set && !ext->is_weak) {
7470 pr_warn("extern %s (strong) not resolved\n", ext->name);
7471 return -ESRCH;
7472 } else if (!ext->is_set) {
7473 pr_debug("extern %s (weak) not resolved, defaulting to zero\n",
7474 ext->name);
7475 }
7476 }
7477
7478 return 0;
7479}
7480
7481int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
7482{
7483 struct bpf_object *obj;
7484 int err, i;
7485
7486 if (!attr)
7487 return -EINVAL;
7488 obj = attr->obj;
7489 if (!obj)
7490 return -EINVAL;
7491
7492 if (obj->loaded) {
7493 pr_warn("object '%s': load can't be attempted twice\n", obj->name);
7494 return -EINVAL;
7495 }
7496
7497 err = bpf_object__probe_loading(obj);
7498 err = err ? : bpf_object__load_vmlinux_btf(obj, false);
7499 err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
7500 err = err ? : bpf_object__sanitize_and_load_btf(obj);
7501 err = err ? : bpf_object__sanitize_maps(obj);
7502 err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
7503 err = err ? : bpf_object__create_maps(obj);
7504 err = err ? : bpf_object__relocate(obj, attr->target_btf_path);
7505 err = err ? : bpf_object__load_progs(obj, attr->log_level);
7506
7507 /* clean up module BTFs */
7508 for (i = 0; i < obj->btf_module_cnt; i++) {
7509 close(obj->btf_modules[i].fd);
7510 btf__free(obj->btf_modules[i].btf);
7511 free(obj->btf_modules[i].name);
7512 }
7513 free(obj->btf_modules);
7514
7515 /* clean up vmlinux BTF */
7516 btf__free(obj->btf_vmlinux);
7517 obj->btf_vmlinux = NULL;
7518
7519 obj->loaded = true; /* doesn't matter if successfully or not */
7520
7521 if (err)
7522 goto out;
7523
7524 return 0;
7525out:
7526 /* unpin any maps that were auto-pinned during load */
7527 for (i = 0; i < obj->nr_maps; i++)
7528 if (obj->maps[i].pinned && !obj->maps[i].reused)
7529 bpf_map__unpin(&obj->maps[i], NULL);
7530
7531 bpf_object__unload(obj);
7532 pr_warn("failed to load object '%s'\n", obj->path);
7533 return err;
7534}
7535
7536int bpf_object__load(struct bpf_object *obj)
7537{
7538 struct bpf_object_load_attr attr = {
7539 .obj = obj,
7540 };
7541
7542 return bpf_object__load_xattr(&attr);
7543}
7544
7545static int make_parent_dir(const char *path)
7546{
7547 char *cp, errmsg[STRERR_BUFSIZE];
7548 char *dname, *dir;
7549 int err = 0;
7550
7551 dname = strdup(path);
7552 if (dname == NULL)
7553 return -ENOMEM;
7554
7555 dir = dirname(dname);
7556 if (mkdir(dir, 0700) && errno != EEXIST)
7557 err = -errno;
7558
7559 free(dname);
7560 if (err) {
7561 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
7562 pr_warn("failed to mkdir %s: %s\n", path, cp);
7563 }
7564 return err;
7565}
7566
7567static int check_path(const char *path)
7568{
7569 char *cp, errmsg[STRERR_BUFSIZE];
7570 struct statfs st_fs;
7571 char *dname, *dir;
7572 int err = 0;
7573
7574 if (path == NULL)
7575 return -EINVAL;
7576
7577 dname = strdup(path);
7578 if (dname == NULL)
7579 return -ENOMEM;
7580
7581 dir = dirname(dname);
7582 if (statfs(dir, &st_fs)) {
7583 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
7584 pr_warn("failed to statfs %s: %s\n", dir, cp);
7585 err = -errno;
7586 }
7587 free(dname);
7588
7589 if (!err && st_fs.f_type != BPF_FS_MAGIC) {
7590 pr_warn("specified path %s is not on BPF FS\n", path);
7591 err = -EINVAL;
7592 }
7593
7594 return err;
7595}
7596
7597int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
7598 int instance)
7599{
7600 char *cp, errmsg[STRERR_BUFSIZE];
7601 int err;
7602
7603 err = make_parent_dir(path);
7604 if (err)
7605 return err;
7606
7607 err = check_path(path);
7608 if (err)
7609 return err;
7610
7611 if (prog == NULL) {
7612 pr_warn("invalid program pointer\n");
7613 return -EINVAL;
7614 }
7615
7616 if (instance < 0 || instance >= prog->instances.nr) {
7617 pr_warn("invalid prog instance %d of prog %s (max %d)\n",
7618 instance, prog->name, prog->instances.nr);
7619 return -EINVAL;
7620 }
7621
7622 if (bpf_obj_pin(prog->instances.fds[instance], path)) {
7623 err = -errno;
7624 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
7625 pr_warn("failed to pin program: %s\n", cp);
7626 return err;
7627 }
7628 pr_debug("pinned program '%s'\n", path);
7629
7630 return 0;
7631}
7632
7633int bpf_program__unpin_instance(struct bpf_program *prog, const char *path,
7634 int instance)
7635{
7636 int err;
7637
7638 err = check_path(path);
7639 if (err)
7640 return err;
7641
7642 if (prog == NULL) {
7643 pr_warn("invalid program pointer\n");
7644 return -EINVAL;
7645 }
7646
7647 if (instance < 0 || instance >= prog->instances.nr) {
7648 pr_warn("invalid prog instance %d of prog %s (max %d)\n",
7649 instance, prog->name, prog->instances.nr);
7650 return -EINVAL;
7651 }
7652
7653 err = unlink(path);
7654 if (err != 0)
7655 return -errno;
7656 pr_debug("unpinned program '%s'\n", path);
7657
7658 return 0;
7659}
7660
7661int bpf_program__pin(struct bpf_program *prog, const char *path)
7662{
7663 int i, err;
7664
7665 err = make_parent_dir(path);
7666 if (err)
7667 return err;
7668
7669 err = check_path(path);
7670 if (err)
7671 return err;
7672
7673 if (prog == NULL) {
7674 pr_warn("invalid program pointer\n");
7675 return -EINVAL;
7676 }
7677
7678 if (prog->instances.nr <= 0) {
7679 pr_warn("no instances of prog %s to pin\n", prog->name);
7680 return -EINVAL;
7681 }
7682
7683 if (prog->instances.nr == 1) {
7684 /* don't create subdirs when pinning single instance */
7685 return bpf_program__pin_instance(prog, path, 0);
7686 }
7687
7688 for (i = 0; i < prog->instances.nr; i++) {
7689 char buf[PATH_MAX];
7690 int len;
7691
7692 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
7693 if (len < 0) {
7694 err = -EINVAL;
7695 goto err_unpin;
7696 } else if (len >= PATH_MAX) {
7697 err = -ENAMETOOLONG;
7698 goto err_unpin;
7699 }
7700
7701 err = bpf_program__pin_instance(prog, buf, i);
7702 if (err)
7703 goto err_unpin;
7704 }
7705
7706 return 0;
7707
7708err_unpin:
7709 for (i = i - 1; i >= 0; i--) {
7710 char buf[PATH_MAX];
7711 int len;
7712
7713 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
7714 if (len < 0)
7715 continue;
7716 else if (len >= PATH_MAX)
7717 continue;
7718
7719 bpf_program__unpin_instance(prog, buf, i);
7720 }
7721
7722 rmdir(path);
7723
7724 return err;
7725}
7726
7727int bpf_program__unpin(struct bpf_program *prog, const char *path)
7728{
7729 int i, err;
7730
7731 err = check_path(path);
7732 if (err)
7733 return err;
7734
7735 if (prog == NULL) {
7736 pr_warn("invalid program pointer\n");
7737 return -EINVAL;
7738 }
7739
7740 if (prog->instances.nr <= 0) {
7741 pr_warn("no instances of prog %s to pin\n", prog->name);
7742 return -EINVAL;
7743 }
7744
7745 if (prog->instances.nr == 1) {
7746 /* don't create subdirs when pinning single instance */
7747 return bpf_program__unpin_instance(prog, path, 0);
7748 }
7749
7750 for (i = 0; i < prog->instances.nr; i++) {
7751 char buf[PATH_MAX];
7752 int len;
7753
7754 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
7755 if (len < 0)
7756 return -EINVAL;
7757 else if (len >= PATH_MAX)
7758 return -ENAMETOOLONG;
7759
7760 err = bpf_program__unpin_instance(prog, buf, i);
7761 if (err)
7762 return err;
7763 }
7764
7765 err = rmdir(path);
7766 if (err)
7767 return -errno;
7768
7769 return 0;
7770}
7771
7772int bpf_map__pin(struct bpf_map *map, const char *path)
7773{
7774 char *cp, errmsg[STRERR_BUFSIZE];
7775 int err;
7776
7777 if (map == NULL) {
7778 pr_warn("invalid map pointer\n");
7779 return -EINVAL;
7780 }
7781
7782 if (map->pin_path) {
7783 if (path && strcmp(path, map->pin_path)) {
7784 pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
7785 bpf_map__name(map), map->pin_path, path);
7786 return -EINVAL;
7787 } else if (map->pinned) {
7788 pr_debug("map '%s' already pinned at '%s'; not re-pinning\n",
7789 bpf_map__name(map), map->pin_path);
7790 return 0;
7791 }
7792 } else {
7793 if (!path) {
7794 pr_warn("missing a path to pin map '%s' at\n",
7795 bpf_map__name(map));
7796 return -EINVAL;
7797 } else if (map->pinned) {
7798 pr_warn("map '%s' already pinned\n", bpf_map__name(map));
7799 return -EEXIST;
7800 }
7801
7802 map->pin_path = strdup(path);
7803 if (!map->pin_path) {
7804 err = -errno;
7805 goto out_err;
7806 }
7807 }
7808
7809 err = make_parent_dir(map->pin_path);
7810 if (err)
7811 return err;
7812
7813 err = check_path(map->pin_path);
7814 if (err)
7815 return err;
7816
7817 if (bpf_obj_pin(map->fd, map->pin_path)) {
7818 err = -errno;
7819 goto out_err;
7820 }
7821
7822 map->pinned = true;
7823 pr_debug("pinned map '%s'\n", map->pin_path);
7824
7825 return 0;
7826
7827out_err:
7828 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
7829 pr_warn("failed to pin map: %s\n", cp);
7830 return err;
7831}
7832
7833int bpf_map__unpin(struct bpf_map *map, const char *path)
7834{
7835 int err;
7836
7837 if (map == NULL) {
7838 pr_warn("invalid map pointer\n");
7839 return -EINVAL;
7840 }
7841
7842 if (map->pin_path) {
7843 if (path && strcmp(path, map->pin_path)) {
7844 pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
7845 bpf_map__name(map), map->pin_path, path);
7846 return -EINVAL;
7847 }
7848 path = map->pin_path;
7849 } else if (!path) {
7850 pr_warn("no path to unpin map '%s' from\n",
7851 bpf_map__name(map));
7852 return -EINVAL;
7853 }
7854
7855 err = check_path(path);
7856 if (err)
7857 return err;
7858
7859 err = unlink(path);
7860 if (err != 0)
7861 return -errno;
7862
7863 map->pinned = false;
7864 pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path);
7865
7866 return 0;
7867}
7868
7869int bpf_map__set_pin_path(struct bpf_map *map, const char *path)
7870{
7871 char *new = NULL;
7872
7873 if (path) {
7874 new = strdup(path);
7875 if (!new)
7876 return -errno;
7877 }
7878
7879 free(map->pin_path);
7880 map->pin_path = new;
7881 return 0;
7882}
7883
7884const char *bpf_map__get_pin_path(const struct bpf_map *map)
7885{
7886 return map->pin_path;
7887}
7888
7889bool bpf_map__is_pinned(const struct bpf_map *map)
7890{
7891 return map->pinned;
7892}
7893
7894static void sanitize_pin_path(char *s)
7895{
7896 /* bpffs disallows periods in path names */
7897 while (*s) {
7898 if (*s == '.')
7899 *s = '_';
7900 s++;
7901 }
7902}
7903
7904int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
7905{
7906 struct bpf_map *map;
7907 int err;
7908
7909 if (!obj)
7910 return -ENOENT;
7911
7912 if (!obj->loaded) {
7913 pr_warn("object not yet loaded; load it first\n");
7914 return -ENOENT;
7915 }
7916
7917 bpf_object__for_each_map(map, obj) {
7918 char *pin_path = NULL;
7919 char buf[PATH_MAX];
7920
7921 if (path) {
7922 int len;
7923
7924 len = snprintf(buf, PATH_MAX, "%s/%s", path,
7925 bpf_map__name(map));
7926 if (len < 0) {
7927 err = -EINVAL;
7928 goto err_unpin_maps;
7929 } else if (len >= PATH_MAX) {
7930 err = -ENAMETOOLONG;
7931 goto err_unpin_maps;
7932 }
7933 sanitize_pin_path(buf);
7934 pin_path = buf;
7935 } else if (!map->pin_path) {
7936 continue;
7937 }
7938
7939 err = bpf_map__pin(map, pin_path);
7940 if (err)
7941 goto err_unpin_maps;
7942 }
7943
7944 return 0;
7945
7946err_unpin_maps:
7947 while ((map = bpf_map__prev(map, obj))) {
7948 if (!map->pin_path)
7949 continue;
7950
7951 bpf_map__unpin(map, NULL);
7952 }
7953
7954 return err;
7955}
7956
7957int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
7958{
7959 struct bpf_map *map;
7960 int err;
7961
7962 if (!obj)
7963 return -ENOENT;
7964
7965 bpf_object__for_each_map(map, obj) {
7966 char *pin_path = NULL;
7967 char buf[PATH_MAX];
7968
7969 if (path) {
7970 int len;
7971
7972 len = snprintf(buf, PATH_MAX, "%s/%s", path,
7973 bpf_map__name(map));
7974 if (len < 0)
7975 return -EINVAL;
7976 else if (len >= PATH_MAX)
7977 return -ENAMETOOLONG;
7978 sanitize_pin_path(buf);
7979 pin_path = buf;
7980 } else if (!map->pin_path) {
7981 continue;
7982 }
7983
7984 err = bpf_map__unpin(map, pin_path);
7985 if (err)
7986 return err;
7987 }
7988
7989 return 0;
7990}
7991
7992int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
7993{
7994 struct bpf_program *prog;
7995 int err;
7996
7997 if (!obj)
7998 return -ENOENT;
7999
8000 if (!obj->loaded) {
8001 pr_warn("object not yet loaded; load it first\n");
8002 return -ENOENT;
8003 }
8004
8005 bpf_object__for_each_program(prog, obj) {
8006 char buf[PATH_MAX];
8007 int len;
8008
8009 len = snprintf(buf, PATH_MAX, "%s/%s", path,
8010 prog->pin_name);
8011 if (len < 0) {
8012 err = -EINVAL;
8013 goto err_unpin_programs;
8014 } else if (len >= PATH_MAX) {
8015 err = -ENAMETOOLONG;
8016 goto err_unpin_programs;
8017 }
8018
8019 err = bpf_program__pin(prog, buf);
8020 if (err)
8021 goto err_unpin_programs;
8022 }
8023
8024 return 0;
8025
8026err_unpin_programs:
8027 while ((prog = bpf_program__prev(prog, obj))) {
8028 char buf[PATH_MAX];
8029 int len;
8030
8031 len = snprintf(buf, PATH_MAX, "%s/%s", path,
8032 prog->pin_name);
8033 if (len < 0)
8034 continue;
8035 else if (len >= PATH_MAX)
8036 continue;
8037
8038 bpf_program__unpin(prog, buf);
8039 }
8040
8041 return err;
8042}
8043
8044int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
8045{
8046 struct bpf_program *prog;
8047 int err;
8048
8049 if (!obj)
8050 return -ENOENT;
8051
8052 bpf_object__for_each_program(prog, obj) {
8053 char buf[PATH_MAX];
8054 int len;
8055
8056 len = snprintf(buf, PATH_MAX, "%s/%s", path,
8057 prog->pin_name);
8058 if (len < 0)
8059 return -EINVAL;
8060 else if (len >= PATH_MAX)
8061 return -ENAMETOOLONG;
8062
8063 err = bpf_program__unpin(prog, buf);
8064 if (err)
8065 return err;
8066 }
8067
8068 return 0;
8069}
8070
8071int bpf_object__pin(struct bpf_object *obj, const char *path)
8072{
8073 int err;
8074
8075 err = bpf_object__pin_maps(obj, path);
8076 if (err)
8077 return err;
8078
8079 err = bpf_object__pin_programs(obj, path);
8080 if (err) {
8081 bpf_object__unpin_maps(obj, path);
8082 return err;
8083 }
8084
8085 return 0;
8086}
8087
8088static void bpf_map__destroy(struct bpf_map *map)
8089{
8090 if (map->clear_priv)
8091 map->clear_priv(map, map->priv);
8092 map->priv = NULL;
8093 map->clear_priv = NULL;
8094
8095 if (map->inner_map) {
8096 bpf_map__destroy(map->inner_map);
8097 zfree(&map->inner_map);
8098 }
8099
8100 zfree(&map->init_slots);
8101 map->init_slots_sz = 0;
8102
8103 if (map->mmaped) {
8104 munmap(map->mmaped, bpf_map_mmap_sz(map));
8105 map->mmaped = NULL;
8106 }
8107
8108 if (map->st_ops) {
8109 zfree(&map->st_ops->data);
8110 zfree(&map->st_ops->progs);
8111 zfree(&map->st_ops->kern_func_off);
8112 zfree(&map->st_ops);
8113 }
8114
8115 zfree(&map->name);
8116 zfree(&map->pin_path);
8117
8118 if (map->fd >= 0)
8119 zclose(map->fd);
8120}
8121
8122void bpf_object__close(struct bpf_object *obj)
8123{
8124 size_t i;
8125
8126 if (IS_ERR_OR_NULL(obj))
8127 return;
8128
8129 if (obj->clear_priv)
8130 obj->clear_priv(obj, obj->priv);
8131
8132 bpf_object__elf_finish(obj);
8133 bpf_object__unload(obj);
8134 btf__free(obj->btf);
8135 btf_ext__free(obj->btf_ext);
8136
8137 for (i = 0; i < obj->nr_maps; i++)
8138 bpf_map__destroy(&obj->maps[i]);
8139
8140 zfree(&obj->kconfig);
8141 zfree(&obj->externs);
8142 obj->nr_extern = 0;
8143
8144 zfree(&obj->maps);
8145 obj->nr_maps = 0;
8146
8147 if (obj->programs && obj->nr_programs) {
8148 for (i = 0; i < obj->nr_programs; i++)
8149 bpf_program__exit(&obj->programs[i]);
8150 }
8151 zfree(&obj->programs);
8152
8153 list_del(&obj->list);
8154 free(obj);
8155}
8156
8157struct bpf_object *
8158bpf_object__next(struct bpf_object *prev)
8159{
8160 struct bpf_object *next;
8161
8162 if (!prev)
8163 next = list_first_entry(&bpf_objects_list,
8164 struct bpf_object,
8165 list);
8166 else
8167 next = list_next_entry(prev, list);
8168
8169 /* Empty list is noticed here so don't need checking on entry. */
8170 if (&next->list == &bpf_objects_list)
8171 return NULL;
8172
8173 return next;
8174}
8175
8176const char *bpf_object__name(const struct bpf_object *obj)
8177{
8178 return obj ? obj->name : ERR_PTR(-EINVAL);
8179}
8180
8181unsigned int bpf_object__kversion(const struct bpf_object *obj)
8182{
8183 return obj ? obj->kern_version : 0;
8184}
8185
8186struct btf *bpf_object__btf(const struct bpf_object *obj)
8187{
8188 return obj ? obj->btf : NULL;
8189}
8190
8191int bpf_object__btf_fd(const struct bpf_object *obj)
8192{
8193 return obj->btf ? btf__fd(obj->btf) : -1;
8194}
8195
8196int bpf_object__set_priv(struct bpf_object *obj, void *priv,
8197 bpf_object_clear_priv_t clear_priv)
8198{
8199 if (obj->priv && obj->clear_priv)
8200 obj->clear_priv(obj, obj->priv);
8201
8202 obj->priv = priv;
8203 obj->clear_priv = clear_priv;
8204 return 0;
8205}
8206
8207void *bpf_object__priv(const struct bpf_object *obj)
8208{
8209 return obj ? obj->priv : ERR_PTR(-EINVAL);
8210}
8211
8212static struct bpf_program *
8213__bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
8214 bool forward)
8215{
8216 size_t nr_programs = obj->nr_programs;
8217 ssize_t idx;
8218
8219 if (!nr_programs)
8220 return NULL;
8221
8222 if (!p)
8223 /* Iter from the beginning */
8224 return forward ? &obj->programs[0] :
8225 &obj->programs[nr_programs - 1];
8226
8227 if (p->obj != obj) {
8228 pr_warn("error: program handler doesn't match object\n");
8229 return NULL;
8230 }
8231
8232 idx = (p - obj->programs) + (forward ? 1 : -1);
8233 if (idx >= obj->nr_programs || idx < 0)
8234 return NULL;
8235 return &obj->programs[idx];
8236}
8237
8238struct bpf_program *
8239bpf_program__next(struct bpf_program *prev, const struct bpf_object *obj)
8240{
8241 struct bpf_program *prog = prev;
8242
8243 do {
8244 prog = __bpf_program__iter(prog, obj, true);
8245 } while (prog && prog_is_subprog(obj, prog));
8246
8247 return prog;
8248}
8249
8250struct bpf_program *
8251bpf_program__prev(struct bpf_program *next, const struct bpf_object *obj)
8252{
8253 struct bpf_program *prog = next;
8254
8255 do {
8256 prog = __bpf_program__iter(prog, obj, false);
8257 } while (prog && prog_is_subprog(obj, prog));
8258
8259 return prog;
8260}
8261
8262int bpf_program__set_priv(struct bpf_program *prog, void *priv,
8263 bpf_program_clear_priv_t clear_priv)
8264{
8265 if (prog->priv && prog->clear_priv)
8266 prog->clear_priv(prog, prog->priv);
8267
8268 prog->priv = priv;
8269 prog->clear_priv = clear_priv;
8270 return 0;
8271}
8272
8273void *bpf_program__priv(const struct bpf_program *prog)
8274{
8275 return prog ? prog->priv : ERR_PTR(-EINVAL);
8276}
8277
8278void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
8279{
8280 prog->prog_ifindex = ifindex;
8281}
8282
8283const char *bpf_program__name(const struct bpf_program *prog)
8284{
8285 return prog->name;
8286}
8287
8288const char *bpf_program__section_name(const struct bpf_program *prog)
8289{
8290 return prog->sec_name;
8291}
8292
8293const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy)
8294{
8295 const char *title;
8296
8297 title = prog->sec_name;
8298 if (needs_copy) {
8299 title = strdup(title);
8300 if (!title) {
8301 pr_warn("failed to strdup program title\n");
8302 return ERR_PTR(-ENOMEM);
8303 }
8304 }
8305
8306 return title;
8307}
8308
8309bool bpf_program__autoload(const struct bpf_program *prog)
8310{
8311 return prog->load;
8312}
8313
8314int bpf_program__set_autoload(struct bpf_program *prog, bool autoload)
8315{
8316 if (prog->obj->loaded)
8317 return -EINVAL;
8318
8319 prog->load = autoload;
8320 return 0;
8321}
8322
8323int bpf_program__fd(const struct bpf_program *prog)
8324{
8325 return bpf_program__nth_fd(prog, 0);
8326}
8327
8328size_t bpf_program__size(const struct bpf_program *prog)
8329{
8330 return prog->insns_cnt * BPF_INSN_SZ;
8331}
8332
8333int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
8334 bpf_program_prep_t prep)
8335{
8336 int *instances_fds;
8337
8338 if (nr_instances <= 0 || !prep)
8339 return -EINVAL;
8340
8341 if (prog->instances.nr > 0 || prog->instances.fds) {
8342 pr_warn("Can't set pre-processor after loading\n");
8343 return -EINVAL;
8344 }
8345
8346 instances_fds = malloc(sizeof(int) * nr_instances);
8347 if (!instances_fds) {
8348 pr_warn("alloc memory failed for fds\n");
8349 return -ENOMEM;
8350 }
8351
8352 /* fill all fd with -1 */
8353 memset(instances_fds, -1, sizeof(int) * nr_instances);
8354
8355 prog->instances.nr = nr_instances;
8356 prog->instances.fds = instances_fds;
8357 prog->preprocessor = prep;
8358 return 0;
8359}
8360
8361int bpf_program__nth_fd(const struct bpf_program *prog, int n)
8362{
8363 int fd;
8364
8365 if (!prog)
8366 return -EINVAL;
8367
8368 if (n >= prog->instances.nr || n < 0) {
8369 pr_warn("Can't get the %dth fd from program %s: only %d instances\n",
8370 n, prog->name, prog->instances.nr);
8371 return -EINVAL;
8372 }
8373
8374 fd = prog->instances.fds[n];
8375 if (fd < 0) {
8376 pr_warn("%dth instance of program '%s' is invalid\n",
8377 n, prog->name);
8378 return -ENOENT;
8379 }
8380
8381 return fd;
8382}
8383
8384enum bpf_prog_type bpf_program__get_type(struct bpf_program *prog)
8385{
8386 return prog->type;
8387}
8388
8389void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
8390{
8391 prog->type = type;
8392}
8393
8394static bool bpf_program__is_type(const struct bpf_program *prog,
8395 enum bpf_prog_type type)
8396{
8397 return prog ? (prog->type == type) : false;
8398}
8399
8400#define BPF_PROG_TYPE_FNS(NAME, TYPE) \
8401int bpf_program__set_##NAME(struct bpf_program *prog) \
8402{ \
8403 if (!prog) \
8404 return -EINVAL; \
8405 bpf_program__set_type(prog, TYPE); \
8406 return 0; \
8407} \
8408 \
8409bool bpf_program__is_##NAME(const struct bpf_program *prog) \
8410{ \
8411 return bpf_program__is_type(prog, TYPE); \
8412} \
8413
8414BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
8415BPF_PROG_TYPE_FNS(lsm, BPF_PROG_TYPE_LSM);
8416BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
8417BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
8418BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
8419BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
8420BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
8421BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
8422BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
8423BPF_PROG_TYPE_FNS(tracing, BPF_PROG_TYPE_TRACING);
8424BPF_PROG_TYPE_FNS(struct_ops, BPF_PROG_TYPE_STRUCT_OPS);
8425BPF_PROG_TYPE_FNS(extension, BPF_PROG_TYPE_EXT);
8426BPF_PROG_TYPE_FNS(sk_lookup, BPF_PROG_TYPE_SK_LOOKUP);
8427
8428enum bpf_attach_type
8429bpf_program__get_expected_attach_type(struct bpf_program *prog)
8430{
8431 return prog->expected_attach_type;
8432}
8433
8434void bpf_program__set_expected_attach_type(struct bpf_program *prog,
8435 enum bpf_attach_type type)
8436{
8437 prog->expected_attach_type = type;
8438}
8439
8440#define BPF_PROG_SEC_IMPL(string, ptype, eatype, eatype_optional, \
8441 attachable, attach_btf) \
8442 { \
8443 .sec = string, \
8444 .len = sizeof(string) - 1, \
8445 .prog_type = ptype, \
8446 .expected_attach_type = eatype, \
8447 .is_exp_attach_type_optional = eatype_optional, \
8448 .is_attachable = attachable, \
8449 .is_attach_btf = attach_btf, \
8450 }
8451
8452/* Programs that can NOT be attached. */
8453#define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0, 0)
8454
8455/* Programs that can be attached. */
8456#define BPF_APROG_SEC(string, ptype, atype) \
8457 BPF_PROG_SEC_IMPL(string, ptype, atype, true, 1, 0)
8458
8459/* Programs that must specify expected attach type at load time. */
8460#define BPF_EAPROG_SEC(string, ptype, eatype) \
8461 BPF_PROG_SEC_IMPL(string, ptype, eatype, false, 1, 0)
8462
8463/* Programs that use BTF to identify attach point */
8464#define BPF_PROG_BTF(string, ptype, eatype) \
8465 BPF_PROG_SEC_IMPL(string, ptype, eatype, false, 0, 1)
8466
8467/* Programs that can be attached but attach type can't be identified by section
8468 * name. Kept for backward compatibility.
8469 */
8470#define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype)
8471
8472#define SEC_DEF(sec_pfx, ptype, ...) { \
8473 .sec = sec_pfx, \
8474 .len = sizeof(sec_pfx) - 1, \
8475 .prog_type = BPF_PROG_TYPE_##ptype, \
8476 __VA_ARGS__ \
8477}
8478
8479static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec,
8480 struct bpf_program *prog);
8481static struct bpf_link *attach_tp(const struct bpf_sec_def *sec,
8482 struct bpf_program *prog);
8483static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec,
8484 struct bpf_program *prog);
8485static struct bpf_link *attach_trace(const struct bpf_sec_def *sec,
8486 struct bpf_program *prog);
8487static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec,
8488 struct bpf_program *prog);
8489static struct bpf_link *attach_iter(const struct bpf_sec_def *sec,
8490 struct bpf_program *prog);
8491
8492static const struct bpf_sec_def section_defs[] = {
8493 BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER),
8494 BPF_PROG_SEC("sk_reuseport", BPF_PROG_TYPE_SK_REUSEPORT),
8495 SEC_DEF("kprobe/", KPROBE,
8496 .attach_fn = attach_kprobe),
8497 BPF_PROG_SEC("uprobe/", BPF_PROG_TYPE_KPROBE),
8498 SEC_DEF("kretprobe/", KPROBE,
8499 .attach_fn = attach_kprobe),
8500 BPF_PROG_SEC("uretprobe/", BPF_PROG_TYPE_KPROBE),
8501 BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS),
8502 BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT),
8503 SEC_DEF("tracepoint/", TRACEPOINT,
8504 .attach_fn = attach_tp),
8505 SEC_DEF("tp/", TRACEPOINT,
8506 .attach_fn = attach_tp),
8507 SEC_DEF("raw_tracepoint/", RAW_TRACEPOINT,
8508 .attach_fn = attach_raw_tp),
8509 SEC_DEF("raw_tp/", RAW_TRACEPOINT,
8510 .attach_fn = attach_raw_tp),
8511 SEC_DEF("tp_btf/", TRACING,
8512 .expected_attach_type = BPF_TRACE_RAW_TP,
8513 .is_attach_btf = true,
8514 .attach_fn = attach_trace),
8515 SEC_DEF("fentry/", TRACING,
8516 .expected_attach_type = BPF_TRACE_FENTRY,
8517 .is_attach_btf = true,
8518 .attach_fn = attach_trace),
8519 SEC_DEF("fmod_ret/", TRACING,
8520 .expected_attach_type = BPF_MODIFY_RETURN,
8521 .is_attach_btf = true,
8522 .attach_fn = attach_trace),
8523 SEC_DEF("fexit/", TRACING,
8524 .expected_attach_type = BPF_TRACE_FEXIT,
8525 .is_attach_btf = true,
8526 .attach_fn = attach_trace),
8527 SEC_DEF("fentry.s/", TRACING,
8528 .expected_attach_type = BPF_TRACE_FENTRY,
8529 .is_attach_btf = true,
8530 .is_sleepable = true,
8531 .attach_fn = attach_trace),
8532 SEC_DEF("fmod_ret.s/", TRACING,
8533 .expected_attach_type = BPF_MODIFY_RETURN,
8534 .is_attach_btf = true,
8535 .is_sleepable = true,
8536 .attach_fn = attach_trace),
8537 SEC_DEF("fexit.s/", TRACING,
8538 .expected_attach_type = BPF_TRACE_FEXIT,
8539 .is_attach_btf = true,
8540 .is_sleepable = true,
8541 .attach_fn = attach_trace),
8542 SEC_DEF("freplace/", EXT,
8543 .is_attach_btf = true,
8544 .attach_fn = attach_trace),
8545 SEC_DEF("lsm/", LSM,
8546 .is_attach_btf = true,
8547 .expected_attach_type = BPF_LSM_MAC,
8548 .attach_fn = attach_lsm),
8549 SEC_DEF("lsm.s/", LSM,
8550 .is_attach_btf = true,
8551 .is_sleepable = true,
8552 .expected_attach_type = BPF_LSM_MAC,
8553 .attach_fn = attach_lsm),
8554 SEC_DEF("iter/", TRACING,
8555 .expected_attach_type = BPF_TRACE_ITER,
8556 .is_attach_btf = true,
8557 .attach_fn = attach_iter),
8558 BPF_EAPROG_SEC("xdp_devmap/", BPF_PROG_TYPE_XDP,
8559 BPF_XDP_DEVMAP),
8560 BPF_EAPROG_SEC("xdp_cpumap/", BPF_PROG_TYPE_XDP,
8561 BPF_XDP_CPUMAP),
8562 BPF_APROG_SEC("xdp", BPF_PROG_TYPE_XDP,
8563 BPF_XDP),
8564 BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
8565 BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN),
8566 BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT),
8567 BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT),
8568 BPF_PROG_SEC("lwt_seg6local", BPF_PROG_TYPE_LWT_SEG6LOCAL),
8569 BPF_APROG_SEC("cgroup_skb/ingress", BPF_PROG_TYPE_CGROUP_SKB,
8570 BPF_CGROUP_INET_INGRESS),
8571 BPF_APROG_SEC("cgroup_skb/egress", BPF_PROG_TYPE_CGROUP_SKB,
8572 BPF_CGROUP_INET_EGRESS),
8573 BPF_APROG_COMPAT("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB),
8574 BPF_EAPROG_SEC("cgroup/sock_create", BPF_PROG_TYPE_CGROUP_SOCK,
8575 BPF_CGROUP_INET_SOCK_CREATE),
8576 BPF_EAPROG_SEC("cgroup/sock_release", BPF_PROG_TYPE_CGROUP_SOCK,
8577 BPF_CGROUP_INET_SOCK_RELEASE),
8578 BPF_APROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK,
8579 BPF_CGROUP_INET_SOCK_CREATE),
8580 BPF_EAPROG_SEC("cgroup/post_bind4", BPF_PROG_TYPE_CGROUP_SOCK,
8581 BPF_CGROUP_INET4_POST_BIND),
8582 BPF_EAPROG_SEC("cgroup/post_bind6", BPF_PROG_TYPE_CGROUP_SOCK,
8583 BPF_CGROUP_INET6_POST_BIND),
8584 BPF_APROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE,
8585 BPF_CGROUP_DEVICE),
8586 BPF_APROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS,
8587 BPF_CGROUP_SOCK_OPS),
8588 BPF_APROG_SEC("sk_skb/stream_parser", BPF_PROG_TYPE_SK_SKB,
8589 BPF_SK_SKB_STREAM_PARSER),
8590 BPF_APROG_SEC("sk_skb/stream_verdict", BPF_PROG_TYPE_SK_SKB,
8591 BPF_SK_SKB_STREAM_VERDICT),
8592 BPF_APROG_COMPAT("sk_skb", BPF_PROG_TYPE_SK_SKB),
8593 BPF_APROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG,
8594 BPF_SK_MSG_VERDICT),
8595 BPF_APROG_SEC("lirc_mode2", BPF_PROG_TYPE_LIRC_MODE2,
8596 BPF_LIRC_MODE2),
8597 BPF_APROG_SEC("flow_dissector", BPF_PROG_TYPE_FLOW_DISSECTOR,
8598 BPF_FLOW_DISSECTOR),
8599 BPF_EAPROG_SEC("cgroup/bind4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8600 BPF_CGROUP_INET4_BIND),
8601 BPF_EAPROG_SEC("cgroup/bind6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8602 BPF_CGROUP_INET6_BIND),
8603 BPF_EAPROG_SEC("cgroup/connect4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8604 BPF_CGROUP_INET4_CONNECT),
8605 BPF_EAPROG_SEC("cgroup/connect6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8606 BPF_CGROUP_INET6_CONNECT),
8607 BPF_EAPROG_SEC("cgroup/sendmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8608 BPF_CGROUP_UDP4_SENDMSG),
8609 BPF_EAPROG_SEC("cgroup/sendmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8610 BPF_CGROUP_UDP6_SENDMSG),
8611 BPF_EAPROG_SEC("cgroup/recvmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8612 BPF_CGROUP_UDP4_RECVMSG),
8613 BPF_EAPROG_SEC("cgroup/recvmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8614 BPF_CGROUP_UDP6_RECVMSG),
8615 BPF_EAPROG_SEC("cgroup/getpeername4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8616 BPF_CGROUP_INET4_GETPEERNAME),
8617 BPF_EAPROG_SEC("cgroup/getpeername6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8618 BPF_CGROUP_INET6_GETPEERNAME),
8619 BPF_EAPROG_SEC("cgroup/getsockname4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8620 BPF_CGROUP_INET4_GETSOCKNAME),
8621 BPF_EAPROG_SEC("cgroup/getsockname6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8622 BPF_CGROUP_INET6_GETSOCKNAME),
8623 BPF_EAPROG_SEC("cgroup/sysctl", BPF_PROG_TYPE_CGROUP_SYSCTL,
8624 BPF_CGROUP_SYSCTL),
8625 BPF_EAPROG_SEC("cgroup/getsockopt", BPF_PROG_TYPE_CGROUP_SOCKOPT,
8626 BPF_CGROUP_GETSOCKOPT),
8627 BPF_EAPROG_SEC("cgroup/setsockopt", BPF_PROG_TYPE_CGROUP_SOCKOPT,
8628 BPF_CGROUP_SETSOCKOPT),
8629 BPF_PROG_SEC("struct_ops", BPF_PROG_TYPE_STRUCT_OPS),
8630 BPF_EAPROG_SEC("sk_lookup/", BPF_PROG_TYPE_SK_LOOKUP,
8631 BPF_SK_LOOKUP),
8632};
8633
8634#undef BPF_PROG_SEC_IMPL
8635#undef BPF_PROG_SEC
8636#undef BPF_APROG_SEC
8637#undef BPF_EAPROG_SEC
8638#undef BPF_APROG_COMPAT
8639#undef SEC_DEF
8640
8641#define MAX_TYPE_NAME_SIZE 32
8642
8643static const struct bpf_sec_def *find_sec_def(const char *sec_name)
8644{
8645 int i, n = ARRAY_SIZE(section_defs);
8646
8647 for (i = 0; i < n; i++) {
8648 if (strncmp(sec_name,
8649 section_defs[i].sec, section_defs[i].len))
8650 continue;
8651 return §ion_defs[i];
8652 }
8653 return NULL;
8654}
8655
8656static char *libbpf_get_type_names(bool attach_type)
8657{
8658 int i, len = ARRAY_SIZE(section_defs) * MAX_TYPE_NAME_SIZE;
8659 char *buf;
8660
8661 buf = malloc(len);
8662 if (!buf)
8663 return NULL;
8664
8665 buf[0] = '\0';
8666 /* Forge string buf with all available names */
8667 for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
8668 if (attach_type && !section_defs[i].is_attachable)
8669 continue;
8670
8671 if (strlen(buf) + strlen(section_defs[i].sec) + 2 > len) {
8672 free(buf);
8673 return NULL;
8674 }
8675 strcat(buf, " ");
8676 strcat(buf, section_defs[i].sec);
8677 }
8678
8679 return buf;
8680}
8681
8682int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
8683 enum bpf_attach_type *expected_attach_type)
8684{
8685 const struct bpf_sec_def *sec_def;
8686 char *type_names;
8687
8688 if (!name)
8689 return -EINVAL;
8690
8691 sec_def = find_sec_def(name);
8692 if (sec_def) {
8693 *prog_type = sec_def->prog_type;
8694 *expected_attach_type = sec_def->expected_attach_type;
8695 return 0;
8696 }
8697
8698 pr_debug("failed to guess program type from ELF section '%s'\n", name);
8699 type_names = libbpf_get_type_names(false);
8700 if (type_names != NULL) {
8701 pr_debug("supported section(type) names are:%s\n", type_names);
8702 free(type_names);
8703 }
8704
8705 return -ESRCH;
8706}
8707
8708static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
8709 size_t offset)
8710{
8711 struct bpf_map *map;
8712 size_t i;
8713
8714 for (i = 0; i < obj->nr_maps; i++) {
8715 map = &obj->maps[i];
8716 if (!bpf_map__is_struct_ops(map))
8717 continue;
8718 if (map->sec_offset <= offset &&
8719 offset - map->sec_offset < map->def.value_size)
8720 return map;
8721 }
8722
8723 return NULL;
8724}
8725
8726/* Collect the reloc from ELF and populate the st_ops->progs[] */
8727static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
8728 GElf_Shdr *shdr, Elf_Data *data)
8729{
8730 const struct btf_member *member;
8731 struct bpf_struct_ops *st_ops;
8732 struct bpf_program *prog;
8733 unsigned int shdr_idx;
8734 const struct btf *btf;
8735 struct bpf_map *map;
8736 Elf_Data *symbols;
8737 unsigned int moff, insn_idx;
8738 const char *name;
8739 __u32 member_idx;
8740 GElf_Sym sym;
8741 GElf_Rel rel;
8742 int i, nrels;
8743
8744 symbols = obj->efile.symbols;
8745 btf = obj->btf;
8746 nrels = shdr->sh_size / shdr->sh_entsize;
8747 for (i = 0; i < nrels; i++) {
8748 if (!gelf_getrel(data, i, &rel)) {
8749 pr_warn("struct_ops reloc: failed to get %d reloc\n", i);
8750 return -LIBBPF_ERRNO__FORMAT;
8751 }
8752
8753 if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
8754 pr_warn("struct_ops reloc: symbol %zx not found\n",
8755 (size_t)GELF_R_SYM(rel.r_info));
8756 return -LIBBPF_ERRNO__FORMAT;
8757 }
8758
8759 name = elf_sym_str(obj, sym.st_name) ?: "<?>";
8760 map = find_struct_ops_map_by_offset(obj, rel.r_offset);
8761 if (!map) {
8762 pr_warn("struct_ops reloc: cannot find map at rel.r_offset %zu\n",
8763 (size_t)rel.r_offset);
8764 return -EINVAL;
8765 }
8766
8767 moff = rel.r_offset - map->sec_offset;
8768 shdr_idx = sym.st_shndx;
8769 st_ops = map->st_ops;
8770 pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel.r_offset %zu map->sec_offset %zu name %d (\'%s\')\n",
8771 map->name,
8772 (long long)(rel.r_info >> 32),
8773 (long long)sym.st_value,
8774 shdr_idx, (size_t)rel.r_offset,
8775 map->sec_offset, sym.st_name, name);
8776
8777 if (shdr_idx >= SHN_LORESERVE) {
8778 pr_warn("struct_ops reloc %s: rel.r_offset %zu shdr_idx %u unsupported non-static function\n",
8779 map->name, (size_t)rel.r_offset, shdr_idx);
8780 return -LIBBPF_ERRNO__RELOC;
8781 }
8782 if (sym.st_value % BPF_INSN_SZ) {
8783 pr_warn("struct_ops reloc %s: invalid target program offset %llu\n",
8784 map->name, (unsigned long long)sym.st_value);
8785 return -LIBBPF_ERRNO__FORMAT;
8786 }
8787 insn_idx = sym.st_value / BPF_INSN_SZ;
8788
8789 member = find_member_by_offset(st_ops->type, moff * 8);
8790 if (!member) {
8791 pr_warn("struct_ops reloc %s: cannot find member at moff %u\n",
8792 map->name, moff);
8793 return -EINVAL;
8794 }
8795 member_idx = member - btf_members(st_ops->type);
8796 name = btf__name_by_offset(btf, member->name_off);
8797
8798 if (!resolve_func_ptr(btf, member->type, NULL)) {
8799 pr_warn("struct_ops reloc %s: cannot relocate non func ptr %s\n",
8800 map->name, name);
8801 return -EINVAL;
8802 }
8803
8804 prog = find_prog_by_sec_insn(obj, shdr_idx, insn_idx);
8805 if (!prog) {
8806 pr_warn("struct_ops reloc %s: cannot find prog at shdr_idx %u to relocate func ptr %s\n",
8807 map->name, shdr_idx, name);
8808 return -EINVAL;
8809 }
8810
8811 if (prog->type == BPF_PROG_TYPE_UNSPEC) {
8812 const struct bpf_sec_def *sec_def;
8813
8814 sec_def = find_sec_def(prog->sec_name);
8815 if (sec_def &&
8816 sec_def->prog_type != BPF_PROG_TYPE_STRUCT_OPS) {
8817 /* for pr_warn */
8818 prog->type = sec_def->prog_type;
8819 goto invalid_prog;
8820 }
8821
8822 prog->type = BPF_PROG_TYPE_STRUCT_OPS;
8823 prog->attach_btf_id = st_ops->type_id;
8824 prog->expected_attach_type = member_idx;
8825 } else if (prog->type != BPF_PROG_TYPE_STRUCT_OPS ||
8826 prog->attach_btf_id != st_ops->type_id ||
8827 prog->expected_attach_type != member_idx) {
8828 goto invalid_prog;
8829 }
8830 st_ops->progs[member_idx] = prog;
8831 }
8832
8833 return 0;
8834
8835invalid_prog:
8836 pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n",
8837 map->name, prog->name, prog->sec_name, prog->type,
8838 prog->attach_btf_id, prog->expected_attach_type, name);
8839 return -EINVAL;
8840}
8841
8842#define BTF_TRACE_PREFIX "btf_trace_"
8843#define BTF_LSM_PREFIX "bpf_lsm_"
8844#define BTF_ITER_PREFIX "bpf_iter_"
8845#define BTF_MAX_NAME_SIZE 128
8846
8847static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
8848 const char *name, __u32 kind)
8849{
8850 char btf_type_name[BTF_MAX_NAME_SIZE];
8851 int ret;
8852
8853 ret = snprintf(btf_type_name, sizeof(btf_type_name),
8854 "%s%s", prefix, name);
8855 /* snprintf returns the number of characters written excluding the
8856 * the terminating null. So, if >= BTF_MAX_NAME_SIZE are written, it
8857 * indicates truncation.
8858 */
8859 if (ret < 0 || ret >= sizeof(btf_type_name))
8860 return -ENAMETOOLONG;
8861 return btf__find_by_name_kind(btf, btf_type_name, kind);
8862}
8863
8864static inline int find_attach_btf_id(struct btf *btf, const char *name,
8865 enum bpf_attach_type attach_type)
8866{
8867 int err;
8868
8869 if (attach_type == BPF_TRACE_RAW_TP)
8870 err = find_btf_by_prefix_kind(btf, BTF_TRACE_PREFIX, name,
8871 BTF_KIND_TYPEDEF);
8872 else if (attach_type == BPF_LSM_MAC)
8873 err = find_btf_by_prefix_kind(btf, BTF_LSM_PREFIX, name,
8874 BTF_KIND_FUNC);
8875 else if (attach_type == BPF_TRACE_ITER)
8876 err = find_btf_by_prefix_kind(btf, BTF_ITER_PREFIX, name,
8877 BTF_KIND_FUNC);
8878 else
8879 err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
8880
8881 return err;
8882}
8883
8884int libbpf_find_vmlinux_btf_id(const char *name,
8885 enum bpf_attach_type attach_type)
8886{
8887 struct btf *btf;
8888 int err;
8889
8890 btf = libbpf_find_kernel_btf();
8891 if (IS_ERR(btf)) {
8892 pr_warn("vmlinux BTF is not found\n");
8893 return -EINVAL;
8894 }
8895
8896 err = find_attach_btf_id(btf, name, attach_type);
8897 if (err <= 0)
8898 pr_warn("%s is not found in vmlinux BTF\n", name);
8899
8900 btf__free(btf);
8901 return err;
8902}
8903
8904static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
8905{
8906 struct bpf_prog_info_linear *info_linear;
8907 struct bpf_prog_info *info;
8908 struct btf *btf = NULL;
8909 int err = -EINVAL;
8910
8911 info_linear = bpf_program__get_prog_info_linear(attach_prog_fd, 0);
8912 if (IS_ERR_OR_NULL(info_linear)) {
8913 pr_warn("failed get_prog_info_linear for FD %d\n",
8914 attach_prog_fd);
8915 return -EINVAL;
8916 }
8917 info = &info_linear->info;
8918 if (!info->btf_id) {
8919 pr_warn("The target program doesn't have BTF\n");
8920 goto out;
8921 }
8922 if (btf__get_from_id(info->btf_id, &btf)) {
8923 pr_warn("Failed to get BTF of the program\n");
8924 goto out;
8925 }
8926 err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
8927 btf__free(btf);
8928 if (err <= 0) {
8929 pr_warn("%s is not found in prog's BTF\n", name);
8930 goto out;
8931 }
8932out:
8933 free(info_linear);
8934 return err;
8935}
8936
8937static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name,
8938 enum bpf_attach_type attach_type,
8939 int *btf_obj_fd, int *btf_type_id)
8940{
8941 int ret, i;
8942
8943 ret = find_attach_btf_id(obj->btf_vmlinux, attach_name, attach_type);
8944 if (ret > 0) {
8945 *btf_obj_fd = 0; /* vmlinux BTF */
8946 *btf_type_id = ret;
8947 return 0;
8948 }
8949 if (ret != -ENOENT)
8950 return ret;
8951
8952 ret = load_module_btfs(obj);
8953 if (ret)
8954 return ret;
8955
8956 for (i = 0; i < obj->btf_module_cnt; i++) {
8957 const struct module_btf *mod = &obj->btf_modules[i];
8958
8959 ret = find_attach_btf_id(mod->btf, attach_name, attach_type);
8960 if (ret > 0) {
8961 *btf_obj_fd = mod->fd;
8962 *btf_type_id = ret;
8963 return 0;
8964 }
8965 if (ret == -ENOENT)
8966 continue;
8967
8968 return ret;
8969 }
8970
8971 return -ESRCH;
8972}
8973
8974static int libbpf_find_attach_btf_id(struct bpf_program *prog, int *btf_obj_fd, int *btf_type_id)
8975{
8976 enum bpf_attach_type attach_type = prog->expected_attach_type;
8977 __u32 attach_prog_fd = prog->attach_prog_fd;
8978 const char *name = prog->sec_name, *attach_name;
8979 const struct bpf_sec_def *sec = NULL;
8980 int i, err;
8981
8982 if (!name)
8983 return -EINVAL;
8984
8985 for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
8986 if (!section_defs[i].is_attach_btf)
8987 continue;
8988 if (strncmp(name, section_defs[i].sec, section_defs[i].len))
8989 continue;
8990
8991 sec = §ion_defs[i];
8992 break;
8993 }
8994
8995 if (!sec) {
8996 pr_warn("failed to identify BTF ID based on ELF section name '%s'\n", name);
8997 return -ESRCH;
8998 }
8999 attach_name = name + sec->len;
9000
9001 /* BPF program's BTF ID */
9002 if (attach_prog_fd) {
9003 err = libbpf_find_prog_btf_id(attach_name, attach_prog_fd);
9004 if (err < 0) {
9005 pr_warn("failed to find BPF program (FD %d) BTF ID for '%s': %d\n",
9006 attach_prog_fd, attach_name, err);
9007 return err;
9008 }
9009 *btf_obj_fd = 0;
9010 *btf_type_id = err;
9011 return 0;
9012 }
9013
9014 /* kernel/module BTF ID */
9015 err = find_kernel_btf_id(prog->obj, attach_name, attach_type, btf_obj_fd, btf_type_id);
9016 if (err) {
9017 pr_warn("failed to find kernel BTF type ID of '%s': %d\n", attach_name, err);
9018 return err;
9019 }
9020 return 0;
9021}
9022
9023int libbpf_attach_type_by_name(const char *name,
9024 enum bpf_attach_type *attach_type)
9025{
9026 char *type_names;
9027 int i;
9028
9029 if (!name)
9030 return -EINVAL;
9031
9032 for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
9033 if (strncmp(name, section_defs[i].sec, section_defs[i].len))
9034 continue;
9035 if (!section_defs[i].is_attachable)
9036 return -EINVAL;
9037 *attach_type = section_defs[i].expected_attach_type;
9038 return 0;
9039 }
9040 pr_debug("failed to guess attach type based on ELF section name '%s'\n", name);
9041 type_names = libbpf_get_type_names(true);
9042 if (type_names != NULL) {
9043 pr_debug("attachable section(type) names are:%s\n", type_names);
9044 free(type_names);
9045 }
9046
9047 return -EINVAL;
9048}
9049
9050int bpf_map__fd(const struct bpf_map *map)
9051{
9052 return map ? map->fd : -EINVAL;
9053}
9054
9055const struct bpf_map_def *bpf_map__def(const struct bpf_map *map)
9056{
9057 return map ? &map->def : ERR_PTR(-EINVAL);
9058}
9059
9060const char *bpf_map__name(const struct bpf_map *map)
9061{
9062 return map ? map->name : NULL;
9063}
9064
9065enum bpf_map_type bpf_map__type(const struct bpf_map *map)
9066{
9067 return map->def.type;
9068}
9069
9070int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type)
9071{
9072 if (map->fd >= 0)
9073 return -EBUSY;
9074 map->def.type = type;
9075 return 0;
9076}
9077
9078__u32 bpf_map__map_flags(const struct bpf_map *map)
9079{
9080 return map->def.map_flags;
9081}
9082
9083int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags)
9084{
9085 if (map->fd >= 0)
9086 return -EBUSY;
9087 map->def.map_flags = flags;
9088 return 0;
9089}
9090
9091__u32 bpf_map__numa_node(const struct bpf_map *map)
9092{
9093 return map->numa_node;
9094}
9095
9096int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node)
9097{
9098 if (map->fd >= 0)
9099 return -EBUSY;
9100 map->numa_node = numa_node;
9101 return 0;
9102}
9103
9104__u32 bpf_map__key_size(const struct bpf_map *map)
9105{
9106 return map->def.key_size;
9107}
9108
9109int bpf_map__set_key_size(struct bpf_map *map, __u32 size)
9110{
9111 if (map->fd >= 0)
9112 return -EBUSY;
9113 map->def.key_size = size;
9114 return 0;
9115}
9116
9117__u32 bpf_map__value_size(const struct bpf_map *map)
9118{
9119 return map->def.value_size;
9120}
9121
9122int bpf_map__set_value_size(struct bpf_map *map, __u32 size)
9123{
9124 if (map->fd >= 0)
9125 return -EBUSY;
9126 map->def.value_size = size;
9127 return 0;
9128}
9129
9130__u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
9131{
9132 return map ? map->btf_key_type_id : 0;
9133}
9134
9135__u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
9136{
9137 return map ? map->btf_value_type_id : 0;
9138}
9139
9140int bpf_map__set_priv(struct bpf_map *map, void *priv,
9141 bpf_map_clear_priv_t clear_priv)
9142{
9143 if (!map)
9144 return -EINVAL;
9145
9146 if (map->priv) {
9147 if (map->clear_priv)
9148 map->clear_priv(map, map->priv);
9149 }
9150
9151 map->priv = priv;
9152 map->clear_priv = clear_priv;
9153 return 0;
9154}
9155
9156void *bpf_map__priv(const struct bpf_map *map)
9157{
9158 return map ? map->priv : ERR_PTR(-EINVAL);
9159}
9160
9161int bpf_map__set_initial_value(struct bpf_map *map,
9162 const void *data, size_t size)
9163{
9164 if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG ||
9165 size != map->def.value_size || map->fd >= 0)
9166 return -EINVAL;
9167
9168 memcpy(map->mmaped, data, size);
9169 return 0;
9170}
9171
9172bool bpf_map__is_offload_neutral(const struct bpf_map *map)
9173{
9174 return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
9175}
9176
9177bool bpf_map__is_internal(const struct bpf_map *map)
9178{
9179 return map->libbpf_type != LIBBPF_MAP_UNSPEC;
9180}
9181
9182__u32 bpf_map__ifindex(const struct bpf_map *map)
9183{
9184 return map->map_ifindex;
9185}
9186
9187int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
9188{
9189 if (map->fd >= 0)
9190 return -EBUSY;
9191 map->map_ifindex = ifindex;
9192 return 0;
9193}
9194
9195int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
9196{
9197 if (!bpf_map_type__is_map_in_map(map->def.type)) {
9198 pr_warn("error: unsupported map type\n");
9199 return -EINVAL;
9200 }
9201 if (map->inner_map_fd != -1) {
9202 pr_warn("error: inner_map_fd already specified\n");
9203 return -EINVAL;
9204 }
9205 map->inner_map_fd = fd;
9206 return 0;
9207}
9208
9209static struct bpf_map *
9210__bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
9211{
9212 ssize_t idx;
9213 struct bpf_map *s, *e;
9214
9215 if (!obj || !obj->maps)
9216 return NULL;
9217
9218 s = obj->maps;
9219 e = obj->maps + obj->nr_maps;
9220
9221 if ((m < s) || (m >= e)) {
9222 pr_warn("error in %s: map handler doesn't belong to object\n",
9223 __func__);
9224 return NULL;
9225 }
9226
9227 idx = (m - obj->maps) + i;
9228 if (idx >= obj->nr_maps || idx < 0)
9229 return NULL;
9230 return &obj->maps[idx];
9231}
9232
9233struct bpf_map *
9234bpf_map__next(const struct bpf_map *prev, const struct bpf_object *obj)
9235{
9236 if (prev == NULL)
9237 return obj->maps;
9238
9239 return __bpf_map__iter(prev, obj, 1);
9240}
9241
9242struct bpf_map *
9243bpf_map__prev(const struct bpf_map *next, const struct bpf_object *obj)
9244{
9245 if (next == NULL) {
9246 if (!obj->nr_maps)
9247 return NULL;
9248 return obj->maps + obj->nr_maps - 1;
9249 }
9250
9251 return __bpf_map__iter(next, obj, -1);
9252}
9253
9254struct bpf_map *
9255bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name)
9256{
9257 struct bpf_map *pos;
9258
9259 bpf_object__for_each_map(pos, obj) {
9260 if (pos->name && !strcmp(pos->name, name))
9261 return pos;
9262 }
9263 return NULL;
9264}
9265
9266int
9267bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name)
9268{
9269 return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
9270}
9271
9272struct bpf_map *
9273bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
9274{
9275 return ERR_PTR(-ENOTSUP);
9276}
9277
9278long libbpf_get_error(const void *ptr)
9279{
9280 return PTR_ERR_OR_ZERO(ptr);
9281}
9282
9283int bpf_prog_load(const char *file, enum bpf_prog_type type,
9284 struct bpf_object **pobj, int *prog_fd)
9285{
9286 struct bpf_prog_load_attr attr;
9287
9288 memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
9289 attr.file = file;
9290 attr.prog_type = type;
9291 attr.expected_attach_type = 0;
9292
9293 return bpf_prog_load_xattr(&attr, pobj, prog_fd);
9294}
9295
9296int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
9297 struct bpf_object **pobj, int *prog_fd)
9298{
9299 struct bpf_object_open_attr open_attr = {};
9300 struct bpf_program *prog, *first_prog = NULL;
9301 struct bpf_object *obj;
9302 struct bpf_map *map;
9303 int err;
9304
9305 if (!attr)
9306 return -EINVAL;
9307 if (!attr->file)
9308 return -EINVAL;
9309
9310 open_attr.file = attr->file;
9311 open_attr.prog_type = attr->prog_type;
9312
9313 obj = bpf_object__open_xattr(&open_attr);
9314 if (IS_ERR_OR_NULL(obj))
9315 return -ENOENT;
9316
9317 bpf_object__for_each_program(prog, obj) {
9318 enum bpf_attach_type attach_type = attr->expected_attach_type;
9319 /*
9320 * to preserve backwards compatibility, bpf_prog_load treats
9321 * attr->prog_type, if specified, as an override to whatever
9322 * bpf_object__open guessed
9323 */
9324 if (attr->prog_type != BPF_PROG_TYPE_UNSPEC) {
9325 bpf_program__set_type(prog, attr->prog_type);
9326 bpf_program__set_expected_attach_type(prog,
9327 attach_type);
9328 }
9329 if (bpf_program__get_type(prog) == BPF_PROG_TYPE_UNSPEC) {
9330 /*
9331 * we haven't guessed from section name and user
9332 * didn't provide a fallback type, too bad...
9333 */
9334 bpf_object__close(obj);
9335 return -EINVAL;
9336 }
9337
9338 prog->prog_ifindex = attr->ifindex;
9339 prog->log_level = attr->log_level;
9340 prog->prog_flags |= attr->prog_flags;
9341 if (!first_prog)
9342 first_prog = prog;
9343 }
9344
9345 bpf_object__for_each_map(map, obj) {
9346 if (!bpf_map__is_offload_neutral(map))
9347 map->map_ifindex = attr->ifindex;
9348 }
9349
9350 if (!first_prog) {
9351 pr_warn("object file doesn't contain bpf program\n");
9352 bpf_object__close(obj);
9353 return -ENOENT;
9354 }
9355
9356 err = bpf_object__load(obj);
9357 if (err) {
9358 bpf_object__close(obj);
9359 return err;
9360 }
9361
9362 *pobj = obj;
9363 *prog_fd = bpf_program__fd(first_prog);
9364 return 0;
9365}
9366
9367struct bpf_link {
9368 int (*detach)(struct bpf_link *link);
9369 int (*destroy)(struct bpf_link *link);
9370 char *pin_path; /* NULL, if not pinned */
9371 int fd; /* hook FD, -1 if not applicable */
9372 bool disconnected;
9373};
9374
9375/* Replace link's underlying BPF program with the new one */
9376int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog)
9377{
9378 return bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), NULL);
9379}
9380
9381/* Release "ownership" of underlying BPF resource (typically, BPF program
9382 * attached to some BPF hook, e.g., tracepoint, kprobe, etc). Disconnected
9383 * link, when destructed through bpf_link__destroy() call won't attempt to
9384 * detach/unregisted that BPF resource. This is useful in situations where,
9385 * say, attached BPF program has to outlive userspace program that attached it
9386 * in the system. Depending on type of BPF program, though, there might be
9387 * additional steps (like pinning BPF program in BPF FS) necessary to ensure
9388 * exit of userspace program doesn't trigger automatic detachment and clean up
9389 * inside the kernel.
9390 */
9391void bpf_link__disconnect(struct bpf_link *link)
9392{
9393 link->disconnected = true;
9394}
9395
9396int bpf_link__destroy(struct bpf_link *link)
9397{
9398 int err = 0;
9399
9400 if (IS_ERR_OR_NULL(link))
9401 return 0;
9402
9403 if (!link->disconnected && link->detach)
9404 err = link->detach(link);
9405 if (link->destroy)
9406 link->destroy(link);
9407 if (link->pin_path)
9408 free(link->pin_path);
9409 free(link);
9410
9411 return err;
9412}
9413
9414int bpf_link__fd(const struct bpf_link *link)
9415{
9416 return link->fd;
9417}
9418
9419const char *bpf_link__pin_path(const struct bpf_link *link)
9420{
9421 return link->pin_path;
9422}
9423
9424static int bpf_link__detach_fd(struct bpf_link *link)
9425{
9426 return close(link->fd);
9427}
9428
9429struct bpf_link *bpf_link__open(const char *path)
9430{
9431 struct bpf_link *link;
9432 int fd;
9433
9434 fd = bpf_obj_get(path);
9435 if (fd < 0) {
9436 fd = -errno;
9437 pr_warn("failed to open link at %s: %d\n", path, fd);
9438 return ERR_PTR(fd);
9439 }
9440
9441 link = calloc(1, sizeof(*link));
9442 if (!link) {
9443 close(fd);
9444 return ERR_PTR(-ENOMEM);
9445 }
9446 link->detach = &bpf_link__detach_fd;
9447 link->fd = fd;
9448
9449 link->pin_path = strdup(path);
9450 if (!link->pin_path) {
9451 bpf_link__destroy(link);
9452 return ERR_PTR(-ENOMEM);
9453 }
9454
9455 return link;
9456}
9457
9458int bpf_link__detach(struct bpf_link *link)
9459{
9460 return bpf_link_detach(link->fd) ? -errno : 0;
9461}
9462
9463int bpf_link__pin(struct bpf_link *link, const char *path)
9464{
9465 int err;
9466
9467 if (link->pin_path)
9468 return -EBUSY;
9469 err = make_parent_dir(path);
9470 if (err)
9471 return err;
9472 err = check_path(path);
9473 if (err)
9474 return err;
9475
9476 link->pin_path = strdup(path);
9477 if (!link->pin_path)
9478 return -ENOMEM;
9479
9480 if (bpf_obj_pin(link->fd, link->pin_path)) {
9481 err = -errno;
9482 zfree(&link->pin_path);
9483 return err;
9484 }
9485
9486 pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path);
9487 return 0;
9488}
9489
9490int bpf_link__unpin(struct bpf_link *link)
9491{
9492 int err;
9493
9494 if (!link->pin_path)
9495 return -EINVAL;
9496
9497 err = unlink(link->pin_path);
9498 if (err != 0)
9499 return -errno;
9500
9501 pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path);
9502 zfree(&link->pin_path);
9503 return 0;
9504}
9505
9506static int bpf_link__detach_perf_event(struct bpf_link *link)
9507{
9508 int err;
9509
9510 err = ioctl(link->fd, PERF_EVENT_IOC_DISABLE, 0);
9511 if (err)
9512 err = -errno;
9513
9514 close(link->fd);
9515 return err;
9516}
9517
9518struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog,
9519 int pfd)
9520{
9521 char errmsg[STRERR_BUFSIZE];
9522 struct bpf_link *link;
9523 int prog_fd, err;
9524
9525 if (pfd < 0) {
9526 pr_warn("prog '%s': invalid perf event FD %d\n",
9527 prog->name, pfd);
9528 return ERR_PTR(-EINVAL);
9529 }
9530 prog_fd = bpf_program__fd(prog);
9531 if (prog_fd < 0) {
9532 pr_warn("prog '%s': can't attach BPF program w/o FD (did you load it?)\n",
9533 prog->name);
9534 return ERR_PTR(-EINVAL);
9535 }
9536
9537 link = calloc(1, sizeof(*link));
9538 if (!link)
9539 return ERR_PTR(-ENOMEM);
9540 link->detach = &bpf_link__detach_perf_event;
9541 link->fd = pfd;
9542
9543 if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) {
9544 err = -errno;
9545 free(link);
9546 pr_warn("prog '%s': failed to attach to pfd %d: %s\n",
9547 prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9548 if (err == -EPROTO)
9549 pr_warn("prog '%s': try add PERF_SAMPLE_CALLCHAIN to or remove exclude_callchain_[kernel|user] from pfd %d\n",
9550 prog->name, pfd);
9551 return ERR_PTR(err);
9552 }
9553 if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
9554 err = -errno;
9555 free(link);
9556 pr_warn("prog '%s': failed to enable pfd %d: %s\n",
9557 prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9558 return ERR_PTR(err);
9559 }
9560 return link;
9561}
9562
9563/*
9564 * this function is expected to parse integer in the range of [0, 2^31-1] from
9565 * given file using scanf format string fmt. If actual parsed value is
9566 * negative, the result might be indistinguishable from error
9567 */
9568static int parse_uint_from_file(const char *file, const char *fmt)
9569{
9570 char buf[STRERR_BUFSIZE];
9571 int err, ret;
9572 FILE *f;
9573
9574 f = fopen(file, "r");
9575 if (!f) {
9576 err = -errno;
9577 pr_debug("failed to open '%s': %s\n", file,
9578 libbpf_strerror_r(err, buf, sizeof(buf)));
9579 return err;
9580 }
9581 err = fscanf(f, fmt, &ret);
9582 if (err != 1) {
9583 err = err == EOF ? -EIO : -errno;
9584 pr_debug("failed to parse '%s': %s\n", file,
9585 libbpf_strerror_r(err, buf, sizeof(buf)));
9586 fclose(f);
9587 return err;
9588 }
9589 fclose(f);
9590 return ret;
9591}
9592
9593static int determine_kprobe_perf_type(void)
9594{
9595 const char *file = "/sys/bus/event_source/devices/kprobe/type";
9596
9597 return parse_uint_from_file(file, "%d\n");
9598}
9599
9600static int determine_uprobe_perf_type(void)
9601{
9602 const char *file = "/sys/bus/event_source/devices/uprobe/type";
9603
9604 return parse_uint_from_file(file, "%d\n");
9605}
9606
9607static int determine_kprobe_retprobe_bit(void)
9608{
9609 const char *file = "/sys/bus/event_source/devices/kprobe/format/retprobe";
9610
9611 return parse_uint_from_file(file, "config:%d\n");
9612}
9613
9614static int determine_uprobe_retprobe_bit(void)
9615{
9616 const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe";
9617
9618 return parse_uint_from_file(file, "config:%d\n");
9619}
9620
9621static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
9622 uint64_t offset, int pid)
9623{
9624 struct perf_event_attr attr = {};
9625 char errmsg[STRERR_BUFSIZE];
9626 int type, pfd, err;
9627
9628 type = uprobe ? determine_uprobe_perf_type()
9629 : determine_kprobe_perf_type();
9630 if (type < 0) {
9631 pr_warn("failed to determine %s perf type: %s\n",
9632 uprobe ? "uprobe" : "kprobe",
9633 libbpf_strerror_r(type, errmsg, sizeof(errmsg)));
9634 return type;
9635 }
9636 if (retprobe) {
9637 int bit = uprobe ? determine_uprobe_retprobe_bit()
9638 : determine_kprobe_retprobe_bit();
9639
9640 if (bit < 0) {
9641 pr_warn("failed to determine %s retprobe bit: %s\n",
9642 uprobe ? "uprobe" : "kprobe",
9643 libbpf_strerror_r(bit, errmsg, sizeof(errmsg)));
9644 return bit;
9645 }
9646 attr.config |= 1 << bit;
9647 }
9648 attr.size = sizeof(attr);
9649 attr.type = type;
9650 attr.config1 = ptr_to_u64(name); /* kprobe_func or uprobe_path */
9651 attr.config2 = offset; /* kprobe_addr or probe_offset */
9652
9653 /* pid filter is meaningful only for uprobes */
9654 pfd = syscall(__NR_perf_event_open, &attr,
9655 pid < 0 ? -1 : pid /* pid */,
9656 pid == -1 ? 0 : -1 /* cpu */,
9657 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
9658 if (pfd < 0) {
9659 err = -errno;
9660 pr_warn("%s perf_event_open() failed: %s\n",
9661 uprobe ? "uprobe" : "kprobe",
9662 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9663 return err;
9664 }
9665 return pfd;
9666}
9667
9668struct bpf_link *bpf_program__attach_kprobe(struct bpf_program *prog,
9669 bool retprobe,
9670 const char *func_name)
9671{
9672 char errmsg[STRERR_BUFSIZE];
9673 struct bpf_link *link;
9674 int pfd, err;
9675
9676 pfd = perf_event_open_probe(false /* uprobe */, retprobe, func_name,
9677 0 /* offset */, -1 /* pid */);
9678 if (pfd < 0) {
9679 pr_warn("prog '%s': failed to create %s '%s' perf event: %s\n",
9680 prog->name, retprobe ? "kretprobe" : "kprobe", func_name,
9681 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
9682 return ERR_PTR(pfd);
9683 }
9684 link = bpf_program__attach_perf_event(prog, pfd);
9685 if (IS_ERR(link)) {
9686 close(pfd);
9687 err = PTR_ERR(link);
9688 pr_warn("prog '%s': failed to attach to %s '%s': %s\n",
9689 prog->name, retprobe ? "kretprobe" : "kprobe", func_name,
9690 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9691 return link;
9692 }
9693 return link;
9694}
9695
9696static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec,
9697 struct bpf_program *prog)
9698{
9699 const char *func_name;
9700 bool retprobe;
9701
9702 func_name = prog->sec_name + sec->len;
9703 retprobe = strcmp(sec->sec, "kretprobe/") == 0;
9704
9705 return bpf_program__attach_kprobe(prog, retprobe, func_name);
9706}
9707
9708struct bpf_link *bpf_program__attach_uprobe(struct bpf_program *prog,
9709 bool retprobe, pid_t pid,
9710 const char *binary_path,
9711 size_t func_offset)
9712{
9713 char errmsg[STRERR_BUFSIZE];
9714 struct bpf_link *link;
9715 int pfd, err;
9716
9717 pfd = perf_event_open_probe(true /* uprobe */, retprobe,
9718 binary_path, func_offset, pid);
9719 if (pfd < 0) {
9720 pr_warn("prog '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
9721 prog->name, retprobe ? "uretprobe" : "uprobe",
9722 binary_path, func_offset,
9723 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
9724 return ERR_PTR(pfd);
9725 }
9726 link = bpf_program__attach_perf_event(prog, pfd);
9727 if (IS_ERR(link)) {
9728 close(pfd);
9729 err = PTR_ERR(link);
9730 pr_warn("prog '%s': failed to attach to %s '%s:0x%zx': %s\n",
9731 prog->name, retprobe ? "uretprobe" : "uprobe",
9732 binary_path, func_offset,
9733 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9734 return link;
9735 }
9736 return link;
9737}
9738
9739static int determine_tracepoint_id(const char *tp_category,
9740 const char *tp_name)
9741{
9742 char file[PATH_MAX];
9743 int ret;
9744
9745 ret = snprintf(file, sizeof(file),
9746 "/sys/kernel/debug/tracing/events/%s/%s/id",
9747 tp_category, tp_name);
9748 if (ret < 0)
9749 return -errno;
9750 if (ret >= sizeof(file)) {
9751 pr_debug("tracepoint %s/%s path is too long\n",
9752 tp_category, tp_name);
9753 return -E2BIG;
9754 }
9755 return parse_uint_from_file(file, "%d\n");
9756}
9757
9758static int perf_event_open_tracepoint(const char *tp_category,
9759 const char *tp_name)
9760{
9761 struct perf_event_attr attr = {};
9762 char errmsg[STRERR_BUFSIZE];
9763 int tp_id, pfd, err;
9764
9765 tp_id = determine_tracepoint_id(tp_category, tp_name);
9766 if (tp_id < 0) {
9767 pr_warn("failed to determine tracepoint '%s/%s' perf event ID: %s\n",
9768 tp_category, tp_name,
9769 libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg)));
9770 return tp_id;
9771 }
9772
9773 attr.type = PERF_TYPE_TRACEPOINT;
9774 attr.size = sizeof(attr);
9775 attr.config = tp_id;
9776
9777 pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */,
9778 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
9779 if (pfd < 0) {
9780 err = -errno;
9781 pr_warn("tracepoint '%s/%s' perf_event_open() failed: %s\n",
9782 tp_category, tp_name,
9783 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9784 return err;
9785 }
9786 return pfd;
9787}
9788
9789struct bpf_link *bpf_program__attach_tracepoint(struct bpf_program *prog,
9790 const char *tp_category,
9791 const char *tp_name)
9792{
9793 char errmsg[STRERR_BUFSIZE];
9794 struct bpf_link *link;
9795 int pfd, err;
9796
9797 pfd = perf_event_open_tracepoint(tp_category, tp_name);
9798 if (pfd < 0) {
9799 pr_warn("prog '%s': failed to create tracepoint '%s/%s' perf event: %s\n",
9800 prog->name, tp_category, tp_name,
9801 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
9802 return ERR_PTR(pfd);
9803 }
9804 link = bpf_program__attach_perf_event(prog, pfd);
9805 if (IS_ERR(link)) {
9806 close(pfd);
9807 err = PTR_ERR(link);
9808 pr_warn("prog '%s': failed to attach to tracepoint '%s/%s': %s\n",
9809 prog->name, tp_category, tp_name,
9810 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9811 return link;
9812 }
9813 return link;
9814}
9815
9816static struct bpf_link *attach_tp(const struct bpf_sec_def *sec,
9817 struct bpf_program *prog)
9818{
9819 char *sec_name, *tp_cat, *tp_name;
9820 struct bpf_link *link;
9821
9822 sec_name = strdup(prog->sec_name);
9823 if (!sec_name)
9824 return ERR_PTR(-ENOMEM);
9825
9826 /* extract "tp/<category>/<name>" */
9827 tp_cat = sec_name + sec->len;
9828 tp_name = strchr(tp_cat, '/');
9829 if (!tp_name) {
9830 link = ERR_PTR(-EINVAL);
9831 goto out;
9832 }
9833 *tp_name = '\0';
9834 tp_name++;
9835
9836 link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name);
9837out:
9838 free(sec_name);
9839 return link;
9840}
9841
9842struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
9843 const char *tp_name)
9844{
9845 char errmsg[STRERR_BUFSIZE];
9846 struct bpf_link *link;
9847 int prog_fd, pfd;
9848
9849 prog_fd = bpf_program__fd(prog);
9850 if (prog_fd < 0) {
9851 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
9852 return ERR_PTR(-EINVAL);
9853 }
9854
9855 link = calloc(1, sizeof(*link));
9856 if (!link)
9857 return ERR_PTR(-ENOMEM);
9858 link->detach = &bpf_link__detach_fd;
9859
9860 pfd = bpf_raw_tracepoint_open(tp_name, prog_fd);
9861 if (pfd < 0) {
9862 pfd = -errno;
9863 free(link);
9864 pr_warn("prog '%s': failed to attach to raw tracepoint '%s': %s\n",
9865 prog->name, tp_name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
9866 return ERR_PTR(pfd);
9867 }
9868 link->fd = pfd;
9869 return link;
9870}
9871
9872static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec,
9873 struct bpf_program *prog)
9874{
9875 const char *tp_name = prog->sec_name + sec->len;
9876
9877 return bpf_program__attach_raw_tracepoint(prog, tp_name);
9878}
9879
9880/* Common logic for all BPF program types that attach to a btf_id */
9881static struct bpf_link *bpf_program__attach_btf_id(struct bpf_program *prog)
9882{
9883 char errmsg[STRERR_BUFSIZE];
9884 struct bpf_link *link;
9885 int prog_fd, pfd;
9886
9887 prog_fd = bpf_program__fd(prog);
9888 if (prog_fd < 0) {
9889 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
9890 return ERR_PTR(-EINVAL);
9891 }
9892
9893 link = calloc(1, sizeof(*link));
9894 if (!link)
9895 return ERR_PTR(-ENOMEM);
9896 link->detach = &bpf_link__detach_fd;
9897
9898 pfd = bpf_raw_tracepoint_open(NULL, prog_fd);
9899 if (pfd < 0) {
9900 pfd = -errno;
9901 free(link);
9902 pr_warn("prog '%s': failed to attach: %s\n",
9903 prog->name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
9904 return ERR_PTR(pfd);
9905 }
9906 link->fd = pfd;
9907 return (struct bpf_link *)link;
9908}
9909
9910struct bpf_link *bpf_program__attach_trace(struct bpf_program *prog)
9911{
9912 return bpf_program__attach_btf_id(prog);
9913}
9914
9915struct bpf_link *bpf_program__attach_lsm(struct bpf_program *prog)
9916{
9917 return bpf_program__attach_btf_id(prog);
9918}
9919
9920static struct bpf_link *attach_trace(const struct bpf_sec_def *sec,
9921 struct bpf_program *prog)
9922{
9923 return bpf_program__attach_trace(prog);
9924}
9925
9926static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec,
9927 struct bpf_program *prog)
9928{
9929 return bpf_program__attach_lsm(prog);
9930}
9931
9932static struct bpf_link *attach_iter(const struct bpf_sec_def *sec,
9933 struct bpf_program *prog)
9934{
9935 return bpf_program__attach_iter(prog, NULL);
9936}
9937
9938static struct bpf_link *
9939bpf_program__attach_fd(struct bpf_program *prog, int target_fd, int btf_id,
9940 const char *target_name)
9941{
9942 DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts,
9943 .target_btf_id = btf_id);
9944 enum bpf_attach_type attach_type;
9945 char errmsg[STRERR_BUFSIZE];
9946 struct bpf_link *link;
9947 int prog_fd, link_fd;
9948
9949 prog_fd = bpf_program__fd(prog);
9950 if (prog_fd < 0) {
9951 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
9952 return ERR_PTR(-EINVAL);
9953 }
9954
9955 link = calloc(1, sizeof(*link));
9956 if (!link)
9957 return ERR_PTR(-ENOMEM);
9958 link->detach = &bpf_link__detach_fd;
9959
9960 attach_type = bpf_program__get_expected_attach_type(prog);
9961 link_fd = bpf_link_create(prog_fd, target_fd, attach_type, &opts);
9962 if (link_fd < 0) {
9963 link_fd = -errno;
9964 free(link);
9965 pr_warn("prog '%s': failed to attach to %s: %s\n",
9966 prog->name, target_name,
9967 libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
9968 return ERR_PTR(link_fd);
9969 }
9970 link->fd = link_fd;
9971 return link;
9972}
9973
9974struct bpf_link *
9975bpf_program__attach_cgroup(struct bpf_program *prog, int cgroup_fd)
9976{
9977 return bpf_program__attach_fd(prog, cgroup_fd, 0, "cgroup");
9978}
9979
9980struct bpf_link *
9981bpf_program__attach_netns(struct bpf_program *prog, int netns_fd)
9982{
9983 return bpf_program__attach_fd(prog, netns_fd, 0, "netns");
9984}
9985
9986struct bpf_link *bpf_program__attach_xdp(struct bpf_program *prog, int ifindex)
9987{
9988 /* target_fd/target_ifindex use the same field in LINK_CREATE */
9989 return bpf_program__attach_fd(prog, ifindex, 0, "xdp");
9990}
9991
9992struct bpf_link *bpf_program__attach_freplace(struct bpf_program *prog,
9993 int target_fd,
9994 const char *attach_func_name)
9995{
9996 int btf_id;
9997
9998 if (!!target_fd != !!attach_func_name) {
9999 pr_warn("prog '%s': supply none or both of target_fd and attach_func_name\n",
10000 prog->name);
10001 return ERR_PTR(-EINVAL);
10002 }
10003
10004 if (prog->type != BPF_PROG_TYPE_EXT) {
10005 pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace",
10006 prog->name);
10007 return ERR_PTR(-EINVAL);
10008 }
10009
10010 if (target_fd) {
10011 btf_id = libbpf_find_prog_btf_id(attach_func_name, target_fd);
10012 if (btf_id < 0)
10013 return ERR_PTR(btf_id);
10014
10015 return bpf_program__attach_fd(prog, target_fd, btf_id, "freplace");
10016 } else {
10017 /* no target, so use raw_tracepoint_open for compatibility
10018 * with old kernels
10019 */
10020 return bpf_program__attach_trace(prog);
10021 }
10022}
10023
10024struct bpf_link *
10025bpf_program__attach_iter(struct bpf_program *prog,
10026 const struct bpf_iter_attach_opts *opts)
10027{
10028 DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_create_opts);
10029 char errmsg[STRERR_BUFSIZE];
10030 struct bpf_link *link;
10031 int prog_fd, link_fd;
10032 __u32 target_fd = 0;
10033
10034 if (!OPTS_VALID(opts, bpf_iter_attach_opts))
10035 return ERR_PTR(-EINVAL);
10036
10037 link_create_opts.iter_info = OPTS_GET(opts, link_info, (void *)0);
10038 link_create_opts.iter_info_len = OPTS_GET(opts, link_info_len, 0);
10039
10040 prog_fd = bpf_program__fd(prog);
10041 if (prog_fd < 0) {
10042 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
10043 return ERR_PTR(-EINVAL);
10044 }
10045
10046 link = calloc(1, sizeof(*link));
10047 if (!link)
10048 return ERR_PTR(-ENOMEM);
10049 link->detach = &bpf_link__detach_fd;
10050
10051 link_fd = bpf_link_create(prog_fd, target_fd, BPF_TRACE_ITER,
10052 &link_create_opts);
10053 if (link_fd < 0) {
10054 link_fd = -errno;
10055 free(link);
10056 pr_warn("prog '%s': failed to attach to iterator: %s\n",
10057 prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
10058 return ERR_PTR(link_fd);
10059 }
10060 link->fd = link_fd;
10061 return link;
10062}
10063
10064struct bpf_link *bpf_program__attach(struct bpf_program *prog)
10065{
10066 const struct bpf_sec_def *sec_def;
10067
10068 sec_def = find_sec_def(prog->sec_name);
10069 if (!sec_def || !sec_def->attach_fn)
10070 return ERR_PTR(-ESRCH);
10071
10072 return sec_def->attach_fn(sec_def, prog);
10073}
10074
10075static int bpf_link__detach_struct_ops(struct bpf_link *link)
10076{
10077 __u32 zero = 0;
10078
10079 if (bpf_map_delete_elem(link->fd, &zero))
10080 return -errno;
10081
10082 return 0;
10083}
10084
10085struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map)
10086{
10087 struct bpf_struct_ops *st_ops;
10088 struct bpf_link *link;
10089 __u32 i, zero = 0;
10090 int err;
10091
10092 if (!bpf_map__is_struct_ops(map) || map->fd == -1)
10093 return ERR_PTR(-EINVAL);
10094
10095 link = calloc(1, sizeof(*link));
10096 if (!link)
10097 return ERR_PTR(-EINVAL);
10098
10099 st_ops = map->st_ops;
10100 for (i = 0; i < btf_vlen(st_ops->type); i++) {
10101 struct bpf_program *prog = st_ops->progs[i];
10102 void *kern_data;
10103 int prog_fd;
10104
10105 if (!prog)
10106 continue;
10107
10108 prog_fd = bpf_program__fd(prog);
10109 kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i];
10110 *(unsigned long *)kern_data = prog_fd;
10111 }
10112
10113 err = bpf_map_update_elem(map->fd, &zero, st_ops->kern_vdata, 0);
10114 if (err) {
10115 err = -errno;
10116 free(link);
10117 return ERR_PTR(err);
10118 }
10119
10120 link->detach = bpf_link__detach_struct_ops;
10121 link->fd = map->fd;
10122
10123 return link;
10124}
10125
10126enum bpf_perf_event_ret
10127bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
10128 void **copy_mem, size_t *copy_size,
10129 bpf_perf_event_print_t fn, void *private_data)
10130{
10131 struct perf_event_mmap_page *header = mmap_mem;
10132 __u64 data_head = ring_buffer_read_head(header);
10133 __u64 data_tail = header->data_tail;
10134 void *base = ((__u8 *)header) + page_size;
10135 int ret = LIBBPF_PERF_EVENT_CONT;
10136 struct perf_event_header *ehdr;
10137 size_t ehdr_size;
10138
10139 while (data_head != data_tail) {
10140 ehdr = base + (data_tail & (mmap_size - 1));
10141 ehdr_size = ehdr->size;
10142
10143 if (((void *)ehdr) + ehdr_size > base + mmap_size) {
10144 void *copy_start = ehdr;
10145 size_t len_first = base + mmap_size - copy_start;
10146 size_t len_secnd = ehdr_size - len_first;
10147
10148 if (*copy_size < ehdr_size) {
10149 free(*copy_mem);
10150 *copy_mem = malloc(ehdr_size);
10151 if (!*copy_mem) {
10152 *copy_size = 0;
10153 ret = LIBBPF_PERF_EVENT_ERROR;
10154 break;
10155 }
10156 *copy_size = ehdr_size;
10157 }
10158
10159 memcpy(*copy_mem, copy_start, len_first);
10160 memcpy(*copy_mem + len_first, base, len_secnd);
10161 ehdr = *copy_mem;
10162 }
10163
10164 ret = fn(ehdr, private_data);
10165 data_tail += ehdr_size;
10166 if (ret != LIBBPF_PERF_EVENT_CONT)
10167 break;
10168 }
10169
10170 ring_buffer_write_tail(header, data_tail);
10171 return ret;
10172}
10173
10174struct perf_buffer;
10175
10176struct perf_buffer_params {
10177 struct perf_event_attr *attr;
10178 /* if event_cb is specified, it takes precendence */
10179 perf_buffer_event_fn event_cb;
10180 /* sample_cb and lost_cb are higher-level common-case callbacks */
10181 perf_buffer_sample_fn sample_cb;
10182 perf_buffer_lost_fn lost_cb;
10183 void *ctx;
10184 int cpu_cnt;
10185 int *cpus;
10186 int *map_keys;
10187};
10188
10189struct perf_cpu_buf {
10190 struct perf_buffer *pb;
10191 void *base; /* mmap()'ed memory */
10192 void *buf; /* for reconstructing segmented data */
10193 size_t buf_size;
10194 int fd;
10195 int cpu;
10196 int map_key;
10197};
10198
10199struct perf_buffer {
10200 perf_buffer_event_fn event_cb;
10201 perf_buffer_sample_fn sample_cb;
10202 perf_buffer_lost_fn lost_cb;
10203 void *ctx; /* passed into callbacks */
10204
10205 size_t page_size;
10206 size_t mmap_size;
10207 struct perf_cpu_buf **cpu_bufs;
10208 struct epoll_event *events;
10209 int cpu_cnt; /* number of allocated CPU buffers */
10210 int epoll_fd; /* perf event FD */
10211 int map_fd; /* BPF_MAP_TYPE_PERF_EVENT_ARRAY BPF map FD */
10212};
10213
10214static void perf_buffer__free_cpu_buf(struct perf_buffer *pb,
10215 struct perf_cpu_buf *cpu_buf)
10216{
10217 if (!cpu_buf)
10218 return;
10219 if (cpu_buf->base &&
10220 munmap(cpu_buf->base, pb->mmap_size + pb->page_size))
10221 pr_warn("failed to munmap cpu_buf #%d\n", cpu_buf->cpu);
10222 if (cpu_buf->fd >= 0) {
10223 ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0);
10224 close(cpu_buf->fd);
10225 }
10226 free(cpu_buf->buf);
10227 free(cpu_buf);
10228}
10229
10230void perf_buffer__free(struct perf_buffer *pb)
10231{
10232 int i;
10233
10234 if (IS_ERR_OR_NULL(pb))
10235 return;
10236 if (pb->cpu_bufs) {
10237 for (i = 0; i < pb->cpu_cnt; i++) {
10238 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
10239
10240 if (!cpu_buf)
10241 continue;
10242
10243 bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key);
10244 perf_buffer__free_cpu_buf(pb, cpu_buf);
10245 }
10246 free(pb->cpu_bufs);
10247 }
10248 if (pb->epoll_fd >= 0)
10249 close(pb->epoll_fd);
10250 free(pb->events);
10251 free(pb);
10252}
10253
10254static struct perf_cpu_buf *
10255perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr,
10256 int cpu, int map_key)
10257{
10258 struct perf_cpu_buf *cpu_buf;
10259 char msg[STRERR_BUFSIZE];
10260 int err;
10261
10262 cpu_buf = calloc(1, sizeof(*cpu_buf));
10263 if (!cpu_buf)
10264 return ERR_PTR(-ENOMEM);
10265
10266 cpu_buf->pb = pb;
10267 cpu_buf->cpu = cpu;
10268 cpu_buf->map_key = map_key;
10269
10270 cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 /* pid */, cpu,
10271 -1, PERF_FLAG_FD_CLOEXEC);
10272 if (cpu_buf->fd < 0) {
10273 err = -errno;
10274 pr_warn("failed to open perf buffer event on cpu #%d: %s\n",
10275 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
10276 goto error;
10277 }
10278
10279 cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size,
10280 PROT_READ | PROT_WRITE, MAP_SHARED,
10281 cpu_buf->fd, 0);
10282 if (cpu_buf->base == MAP_FAILED) {
10283 cpu_buf->base = NULL;
10284 err = -errno;
10285 pr_warn("failed to mmap perf buffer on cpu #%d: %s\n",
10286 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
10287 goto error;
10288 }
10289
10290 if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
10291 err = -errno;
10292 pr_warn("failed to enable perf buffer event on cpu #%d: %s\n",
10293 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
10294 goto error;
10295 }
10296
10297 return cpu_buf;
10298
10299error:
10300 perf_buffer__free_cpu_buf(pb, cpu_buf);
10301 return (struct perf_cpu_buf *)ERR_PTR(err);
10302}
10303
10304static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
10305 struct perf_buffer_params *p);
10306
10307struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
10308 const struct perf_buffer_opts *opts)
10309{
10310 struct perf_buffer_params p = {};
10311 struct perf_event_attr attr = { 0, };
10312
10313 attr.config = PERF_COUNT_SW_BPF_OUTPUT;
10314 attr.type = PERF_TYPE_SOFTWARE;
10315 attr.sample_type = PERF_SAMPLE_RAW;
10316 attr.sample_period = 1;
10317 attr.wakeup_events = 1;
10318
10319 p.attr = &attr;
10320 p.sample_cb = opts ? opts->sample_cb : NULL;
10321 p.lost_cb = opts ? opts->lost_cb : NULL;
10322 p.ctx = opts ? opts->ctx : NULL;
10323
10324 return __perf_buffer__new(map_fd, page_cnt, &p);
10325}
10326
10327struct perf_buffer *
10328perf_buffer__new_raw(int map_fd, size_t page_cnt,
10329 const struct perf_buffer_raw_opts *opts)
10330{
10331 struct perf_buffer_params p = {};
10332
10333 p.attr = opts->attr;
10334 p.event_cb = opts->event_cb;
10335 p.ctx = opts->ctx;
10336 p.cpu_cnt = opts->cpu_cnt;
10337 p.cpus = opts->cpus;
10338 p.map_keys = opts->map_keys;
10339
10340 return __perf_buffer__new(map_fd, page_cnt, &p);
10341}
10342
10343static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
10344 struct perf_buffer_params *p)
10345{
10346 const char *online_cpus_file = "/sys/devices/system/cpu/online";
10347 struct bpf_map_info map;
10348 char msg[STRERR_BUFSIZE];
10349 struct perf_buffer *pb;
10350 bool *online = NULL;
10351 __u32 map_info_len;
10352 int err, i, j, n;
10353
10354 if (page_cnt & (page_cnt - 1)) {
10355 pr_warn("page count should be power of two, but is %zu\n",
10356 page_cnt);
10357 return ERR_PTR(-EINVAL);
10358 }
10359
10360 /* best-effort sanity checks */
10361 memset(&map, 0, sizeof(map));
10362 map_info_len = sizeof(map);
10363 err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len);
10364 if (err) {
10365 err = -errno;
10366 /* if BPF_OBJ_GET_INFO_BY_FD is supported, will return
10367 * -EBADFD, -EFAULT, or -E2BIG on real error
10368 */
10369 if (err != -EINVAL) {
10370 pr_warn("failed to get map info for map FD %d: %s\n",
10371 map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
10372 return ERR_PTR(err);
10373 }
10374 pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n",
10375 map_fd);
10376 } else {
10377 if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
10378 pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
10379 map.name);
10380 return ERR_PTR(-EINVAL);
10381 }
10382 }
10383
10384 pb = calloc(1, sizeof(*pb));
10385 if (!pb)
10386 return ERR_PTR(-ENOMEM);
10387
10388 pb->event_cb = p->event_cb;
10389 pb->sample_cb = p->sample_cb;
10390 pb->lost_cb = p->lost_cb;
10391 pb->ctx = p->ctx;
10392
10393 pb->page_size = getpagesize();
10394 pb->mmap_size = pb->page_size * page_cnt;
10395 pb->map_fd = map_fd;
10396
10397 pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
10398 if (pb->epoll_fd < 0) {
10399 err = -errno;
10400 pr_warn("failed to create epoll instance: %s\n",
10401 libbpf_strerror_r(err, msg, sizeof(msg)));
10402 goto error;
10403 }
10404
10405 if (p->cpu_cnt > 0) {
10406 pb->cpu_cnt = p->cpu_cnt;
10407 } else {
10408 pb->cpu_cnt = libbpf_num_possible_cpus();
10409 if (pb->cpu_cnt < 0) {
10410 err = pb->cpu_cnt;
10411 goto error;
10412 }
10413 if (map.max_entries && map.max_entries < pb->cpu_cnt)
10414 pb->cpu_cnt = map.max_entries;
10415 }
10416
10417 pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events));
10418 if (!pb->events) {
10419 err = -ENOMEM;
10420 pr_warn("failed to allocate events: out of memory\n");
10421 goto error;
10422 }
10423 pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs));
10424 if (!pb->cpu_bufs) {
10425 err = -ENOMEM;
10426 pr_warn("failed to allocate buffers: out of memory\n");
10427 goto error;
10428 }
10429
10430 err = parse_cpu_mask_file(online_cpus_file, &online, &n);
10431 if (err) {
10432 pr_warn("failed to get online CPU mask: %d\n", err);
10433 goto error;
10434 }
10435
10436 for (i = 0, j = 0; i < pb->cpu_cnt; i++) {
10437 struct perf_cpu_buf *cpu_buf;
10438 int cpu, map_key;
10439
10440 cpu = p->cpu_cnt > 0 ? p->cpus[i] : i;
10441 map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i;
10442
10443 /* in case user didn't explicitly requested particular CPUs to
10444 * be attached to, skip offline/not present CPUs
10445 */
10446 if (p->cpu_cnt <= 0 && (cpu >= n || !online[cpu]))
10447 continue;
10448
10449 cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key);
10450 if (IS_ERR(cpu_buf)) {
10451 err = PTR_ERR(cpu_buf);
10452 goto error;
10453 }
10454
10455 pb->cpu_bufs[j] = cpu_buf;
10456
10457 err = bpf_map_update_elem(pb->map_fd, &map_key,
10458 &cpu_buf->fd, 0);
10459 if (err) {
10460 err = -errno;
10461 pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n",
10462 cpu, map_key, cpu_buf->fd,
10463 libbpf_strerror_r(err, msg, sizeof(msg)));
10464 goto error;
10465 }
10466
10467 pb->events[j].events = EPOLLIN;
10468 pb->events[j].data.ptr = cpu_buf;
10469 if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd,
10470 &pb->events[j]) < 0) {
10471 err = -errno;
10472 pr_warn("failed to epoll_ctl cpu #%d perf FD %d: %s\n",
10473 cpu, cpu_buf->fd,
10474 libbpf_strerror_r(err, msg, sizeof(msg)));
10475 goto error;
10476 }
10477 j++;
10478 }
10479 pb->cpu_cnt = j;
10480 free(online);
10481
10482 return pb;
10483
10484error:
10485 free(online);
10486 if (pb)
10487 perf_buffer__free(pb);
10488 return ERR_PTR(err);
10489}
10490
10491struct perf_sample_raw {
10492 struct perf_event_header header;
10493 uint32_t size;
10494 char data[];
10495};
10496
10497struct perf_sample_lost {
10498 struct perf_event_header header;
10499 uint64_t id;
10500 uint64_t lost;
10501 uint64_t sample_id;
10502};
10503
10504static enum bpf_perf_event_ret
10505perf_buffer__process_record(struct perf_event_header *e, void *ctx)
10506{
10507 struct perf_cpu_buf *cpu_buf = ctx;
10508 struct perf_buffer *pb = cpu_buf->pb;
10509 void *data = e;
10510
10511 /* user wants full control over parsing perf event */
10512 if (pb->event_cb)
10513 return pb->event_cb(pb->ctx, cpu_buf->cpu, e);
10514
10515 switch (e->type) {
10516 case PERF_RECORD_SAMPLE: {
10517 struct perf_sample_raw *s = data;
10518
10519 if (pb->sample_cb)
10520 pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size);
10521 break;
10522 }
10523 case PERF_RECORD_LOST: {
10524 struct perf_sample_lost *s = data;
10525
10526 if (pb->lost_cb)
10527 pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost);
10528 break;
10529 }
10530 default:
10531 pr_warn("unknown perf sample type %d\n", e->type);
10532 return LIBBPF_PERF_EVENT_ERROR;
10533 }
10534 return LIBBPF_PERF_EVENT_CONT;
10535}
10536
10537static int perf_buffer__process_records(struct perf_buffer *pb,
10538 struct perf_cpu_buf *cpu_buf)
10539{
10540 enum bpf_perf_event_ret ret;
10541
10542 ret = bpf_perf_event_read_simple(cpu_buf->base, pb->mmap_size,
10543 pb->page_size, &cpu_buf->buf,
10544 &cpu_buf->buf_size,
10545 perf_buffer__process_record, cpu_buf);
10546 if (ret != LIBBPF_PERF_EVENT_CONT)
10547 return ret;
10548 return 0;
10549}
10550
10551int perf_buffer__epoll_fd(const struct perf_buffer *pb)
10552{
10553 return pb->epoll_fd;
10554}
10555
10556int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms)
10557{
10558 int i, cnt, err;
10559
10560 cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms);
10561 for (i = 0; i < cnt; i++) {
10562 struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr;
10563
10564 err = perf_buffer__process_records(pb, cpu_buf);
10565 if (err) {
10566 pr_warn("error while processing records: %d\n", err);
10567 return err;
10568 }
10569 }
10570 return cnt < 0 ? -errno : cnt;
10571}
10572
10573/* Return number of PERF_EVENT_ARRAY map slots set up by this perf_buffer
10574 * manager.
10575 */
10576size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb)
10577{
10578 return pb->cpu_cnt;
10579}
10580
10581/*
10582 * Return perf_event FD of a ring buffer in *buf_idx* slot of
10583 * PERF_EVENT_ARRAY BPF map. This FD can be polled for new data using
10584 * select()/poll()/epoll() Linux syscalls.
10585 */
10586int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx)
10587{
10588 struct perf_cpu_buf *cpu_buf;
10589
10590 if (buf_idx >= pb->cpu_cnt)
10591 return -EINVAL;
10592
10593 cpu_buf = pb->cpu_bufs[buf_idx];
10594 if (!cpu_buf)
10595 return -ENOENT;
10596
10597 return cpu_buf->fd;
10598}
10599
10600/*
10601 * Consume data from perf ring buffer corresponding to slot *buf_idx* in
10602 * PERF_EVENT_ARRAY BPF map without waiting/polling. If there is no data to
10603 * consume, do nothing and return success.
10604 * Returns:
10605 * - 0 on success;
10606 * - <0 on failure.
10607 */
10608int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx)
10609{
10610 struct perf_cpu_buf *cpu_buf;
10611
10612 if (buf_idx >= pb->cpu_cnt)
10613 return -EINVAL;
10614
10615 cpu_buf = pb->cpu_bufs[buf_idx];
10616 if (!cpu_buf)
10617 return -ENOENT;
10618
10619 return perf_buffer__process_records(pb, cpu_buf);
10620}
10621
10622int perf_buffer__consume(struct perf_buffer *pb)
10623{
10624 int i, err;
10625
10626 for (i = 0; i < pb->cpu_cnt; i++) {
10627 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
10628
10629 if (!cpu_buf)
10630 continue;
10631
10632 err = perf_buffer__process_records(pb, cpu_buf);
10633 if (err) {
10634 pr_warn("perf_buffer: failed to process records in buffer #%d: %d\n", i, err);
10635 return err;
10636 }
10637 }
10638 return 0;
10639}
10640
10641struct bpf_prog_info_array_desc {
10642 int array_offset; /* e.g. offset of jited_prog_insns */
10643 int count_offset; /* e.g. offset of jited_prog_len */
10644 int size_offset; /* > 0: offset of rec size,
10645 * < 0: fix size of -size_offset
10646 */
10647};
10648
10649static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = {
10650 [BPF_PROG_INFO_JITED_INSNS] = {
10651 offsetof(struct bpf_prog_info, jited_prog_insns),
10652 offsetof(struct bpf_prog_info, jited_prog_len),
10653 -1,
10654 },
10655 [BPF_PROG_INFO_XLATED_INSNS] = {
10656 offsetof(struct bpf_prog_info, xlated_prog_insns),
10657 offsetof(struct bpf_prog_info, xlated_prog_len),
10658 -1,
10659 },
10660 [BPF_PROG_INFO_MAP_IDS] = {
10661 offsetof(struct bpf_prog_info, map_ids),
10662 offsetof(struct bpf_prog_info, nr_map_ids),
10663 -(int)sizeof(__u32),
10664 },
10665 [BPF_PROG_INFO_JITED_KSYMS] = {
10666 offsetof(struct bpf_prog_info, jited_ksyms),
10667 offsetof(struct bpf_prog_info, nr_jited_ksyms),
10668 -(int)sizeof(__u64),
10669 },
10670 [BPF_PROG_INFO_JITED_FUNC_LENS] = {
10671 offsetof(struct bpf_prog_info, jited_func_lens),
10672 offsetof(struct bpf_prog_info, nr_jited_func_lens),
10673 -(int)sizeof(__u32),
10674 },
10675 [BPF_PROG_INFO_FUNC_INFO] = {
10676 offsetof(struct bpf_prog_info, func_info),
10677 offsetof(struct bpf_prog_info, nr_func_info),
10678 offsetof(struct bpf_prog_info, func_info_rec_size),
10679 },
10680 [BPF_PROG_INFO_LINE_INFO] = {
10681 offsetof(struct bpf_prog_info, line_info),
10682 offsetof(struct bpf_prog_info, nr_line_info),
10683 offsetof(struct bpf_prog_info, line_info_rec_size),
10684 },
10685 [BPF_PROG_INFO_JITED_LINE_INFO] = {
10686 offsetof(struct bpf_prog_info, jited_line_info),
10687 offsetof(struct bpf_prog_info, nr_jited_line_info),
10688 offsetof(struct bpf_prog_info, jited_line_info_rec_size),
10689 },
10690 [BPF_PROG_INFO_PROG_TAGS] = {
10691 offsetof(struct bpf_prog_info, prog_tags),
10692 offsetof(struct bpf_prog_info, nr_prog_tags),
10693 -(int)sizeof(__u8) * BPF_TAG_SIZE,
10694 },
10695
10696};
10697
10698static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info,
10699 int offset)
10700{
10701 __u32 *array = (__u32 *)info;
10702
10703 if (offset >= 0)
10704 return array[offset / sizeof(__u32)];
10705 return -(int)offset;
10706}
10707
10708static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info,
10709 int offset)
10710{
10711 __u64 *array = (__u64 *)info;
10712
10713 if (offset >= 0)
10714 return array[offset / sizeof(__u64)];
10715 return -(int)offset;
10716}
10717
10718static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset,
10719 __u32 val)
10720{
10721 __u32 *array = (__u32 *)info;
10722
10723 if (offset >= 0)
10724 array[offset / sizeof(__u32)] = val;
10725}
10726
10727static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset,
10728 __u64 val)
10729{
10730 __u64 *array = (__u64 *)info;
10731
10732 if (offset >= 0)
10733 array[offset / sizeof(__u64)] = val;
10734}
10735
10736struct bpf_prog_info_linear *
10737bpf_program__get_prog_info_linear(int fd, __u64 arrays)
10738{
10739 struct bpf_prog_info_linear *info_linear;
10740 struct bpf_prog_info info = {};
10741 __u32 info_len = sizeof(info);
10742 __u32 data_len = 0;
10743 int i, err;
10744 void *ptr;
10745
10746 if (arrays >> BPF_PROG_INFO_LAST_ARRAY)
10747 return ERR_PTR(-EINVAL);
10748
10749 /* step 1: get array dimensions */
10750 err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
10751 if (err) {
10752 pr_debug("can't get prog info: %s", strerror(errno));
10753 return ERR_PTR(-EFAULT);
10754 }
10755
10756 /* step 2: calculate total size of all arrays */
10757 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
10758 bool include_array = (arrays & (1UL << i)) > 0;
10759 struct bpf_prog_info_array_desc *desc;
10760 __u32 count, size;
10761
10762 desc = bpf_prog_info_array_desc + i;
10763
10764 /* kernel is too old to support this field */
10765 if (info_len < desc->array_offset + sizeof(__u32) ||
10766 info_len < desc->count_offset + sizeof(__u32) ||
10767 (desc->size_offset > 0 && info_len < desc->size_offset))
10768 include_array = false;
10769
10770 if (!include_array) {
10771 arrays &= ~(1UL << i); /* clear the bit */
10772 continue;
10773 }
10774
10775 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
10776 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
10777
10778 data_len += count * size;
10779 }
10780
10781 /* step 3: allocate continuous memory */
10782 data_len = roundup(data_len, sizeof(__u64));
10783 info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len);
10784 if (!info_linear)
10785 return ERR_PTR(-ENOMEM);
10786
10787 /* step 4: fill data to info_linear->info */
10788 info_linear->arrays = arrays;
10789 memset(&info_linear->info, 0, sizeof(info));
10790 ptr = info_linear->data;
10791
10792 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
10793 struct bpf_prog_info_array_desc *desc;
10794 __u32 count, size;
10795
10796 if ((arrays & (1UL << i)) == 0)
10797 continue;
10798
10799 desc = bpf_prog_info_array_desc + i;
10800 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
10801 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
10802 bpf_prog_info_set_offset_u32(&info_linear->info,
10803 desc->count_offset, count);
10804 bpf_prog_info_set_offset_u32(&info_linear->info,
10805 desc->size_offset, size);
10806 bpf_prog_info_set_offset_u64(&info_linear->info,
10807 desc->array_offset,
10808 ptr_to_u64(ptr));
10809 ptr += count * size;
10810 }
10811
10812 /* step 5: call syscall again to get required arrays */
10813 err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len);
10814 if (err) {
10815 pr_debug("can't get prog info: %s", strerror(errno));
10816 free(info_linear);
10817 return ERR_PTR(-EFAULT);
10818 }
10819
10820 /* step 6: verify the data */
10821 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
10822 struct bpf_prog_info_array_desc *desc;
10823 __u32 v1, v2;
10824
10825 if ((arrays & (1UL << i)) == 0)
10826 continue;
10827
10828 desc = bpf_prog_info_array_desc + i;
10829 v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
10830 v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
10831 desc->count_offset);
10832 if (v1 != v2)
10833 pr_warn("%s: mismatch in element count\n", __func__);
10834
10835 v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
10836 v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
10837 desc->size_offset);
10838 if (v1 != v2)
10839 pr_warn("%s: mismatch in rec size\n", __func__);
10840 }
10841
10842 /* step 7: update info_len and data_len */
10843 info_linear->info_len = sizeof(struct bpf_prog_info);
10844 info_linear->data_len = data_len;
10845
10846 return info_linear;
10847}
10848
10849void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear)
10850{
10851 int i;
10852
10853 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
10854 struct bpf_prog_info_array_desc *desc;
10855 __u64 addr, offs;
10856
10857 if ((info_linear->arrays & (1UL << i)) == 0)
10858 continue;
10859
10860 desc = bpf_prog_info_array_desc + i;
10861 addr = bpf_prog_info_read_offset_u64(&info_linear->info,
10862 desc->array_offset);
10863 offs = addr - ptr_to_u64(info_linear->data);
10864 bpf_prog_info_set_offset_u64(&info_linear->info,
10865 desc->array_offset, offs);
10866 }
10867}
10868
10869void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear)
10870{
10871 int i;
10872
10873 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
10874 struct bpf_prog_info_array_desc *desc;
10875 __u64 addr, offs;
10876
10877 if ((info_linear->arrays & (1UL << i)) == 0)
10878 continue;
10879
10880 desc = bpf_prog_info_array_desc + i;
10881 offs = bpf_prog_info_read_offset_u64(&info_linear->info,
10882 desc->array_offset);
10883 addr = offs + ptr_to_u64(info_linear->data);
10884 bpf_prog_info_set_offset_u64(&info_linear->info,
10885 desc->array_offset, addr);
10886 }
10887}
10888
10889int bpf_program__set_attach_target(struct bpf_program *prog,
10890 int attach_prog_fd,
10891 const char *attach_func_name)
10892{
10893 int btf_obj_fd = 0, btf_id = 0, err;
10894
10895 if (!prog || attach_prog_fd < 0 || !attach_func_name)
10896 return -EINVAL;
10897
10898 if (prog->obj->loaded)
10899 return -EINVAL;
10900
10901 if (attach_prog_fd) {
10902 btf_id = libbpf_find_prog_btf_id(attach_func_name,
10903 attach_prog_fd);
10904 if (btf_id < 0)
10905 return btf_id;
10906 } else {
10907 /* load btf_vmlinux, if not yet */
10908 err = bpf_object__load_vmlinux_btf(prog->obj, true);
10909 if (err)
10910 return err;
10911 err = find_kernel_btf_id(prog->obj, attach_func_name,
10912 prog->expected_attach_type,
10913 &btf_obj_fd, &btf_id);
10914 if (err)
10915 return err;
10916 }
10917
10918 prog->attach_btf_id = btf_id;
10919 prog->attach_btf_obj_fd = btf_obj_fd;
10920 prog->attach_prog_fd = attach_prog_fd;
10921 return 0;
10922}
10923
10924int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz)
10925{
10926 int err = 0, n, len, start, end = -1;
10927 bool *tmp;
10928
10929 *mask = NULL;
10930 *mask_sz = 0;
10931
10932 /* Each sub string separated by ',' has format \d+-\d+ or \d+ */
10933 while (*s) {
10934 if (*s == ',' || *s == '\n') {
10935 s++;
10936 continue;
10937 }
10938 n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len);
10939 if (n <= 0 || n > 2) {
10940 pr_warn("Failed to get CPU range %s: %d\n", s, n);
10941 err = -EINVAL;
10942 goto cleanup;
10943 } else if (n == 1) {
10944 end = start;
10945 }
10946 if (start < 0 || start > end) {
10947 pr_warn("Invalid CPU range [%d,%d] in %s\n",
10948 start, end, s);
10949 err = -EINVAL;
10950 goto cleanup;
10951 }
10952 tmp = realloc(*mask, end + 1);
10953 if (!tmp) {
10954 err = -ENOMEM;
10955 goto cleanup;
10956 }
10957 *mask = tmp;
10958 memset(tmp + *mask_sz, 0, start - *mask_sz);
10959 memset(tmp + start, 1, end - start + 1);
10960 *mask_sz = end + 1;
10961 s += len;
10962 }
10963 if (!*mask_sz) {
10964 pr_warn("Empty CPU range\n");
10965 return -EINVAL;
10966 }
10967 return 0;
10968cleanup:
10969 free(*mask);
10970 *mask = NULL;
10971 return err;
10972}
10973
10974int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz)
10975{
10976 int fd, err = 0, len;
10977 char buf[128];
10978
10979 fd = open(fcpu, O_RDONLY);
10980 if (fd < 0) {
10981 err = -errno;
10982 pr_warn("Failed to open cpu mask file %s: %d\n", fcpu, err);
10983 return err;
10984 }
10985 len = read(fd, buf, sizeof(buf));
10986 close(fd);
10987 if (len <= 0) {
10988 err = len ? -errno : -EINVAL;
10989 pr_warn("Failed to read cpu mask from %s: %d\n", fcpu, err);
10990 return err;
10991 }
10992 if (len >= sizeof(buf)) {
10993 pr_warn("CPU mask is too big in file %s\n", fcpu);
10994 return -E2BIG;
10995 }
10996 buf[len] = '\0';
10997
10998 return parse_cpu_mask_str(buf, mask, mask_sz);
10999}
11000
11001int libbpf_num_possible_cpus(void)
11002{
11003 static const char *fcpu = "/sys/devices/system/cpu/possible";
11004 static int cpus;
11005 int err, n, i, tmp_cpus;
11006 bool *mask;
11007
11008 tmp_cpus = READ_ONCE(cpus);
11009 if (tmp_cpus > 0)
11010 return tmp_cpus;
11011
11012 err = parse_cpu_mask_file(fcpu, &mask, &n);
11013 if (err)
11014 return err;
11015
11016 tmp_cpus = 0;
11017 for (i = 0; i < n; i++) {
11018 if (mask[i])
11019 tmp_cpus++;
11020 }
11021 free(mask);
11022
11023 WRITE_ONCE(cpus, tmp_cpus);
11024 return tmp_cpus;
11025}
11026
11027int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
11028 const struct bpf_object_open_opts *opts)
11029{
11030 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, skel_opts,
11031 .object_name = s->name,
11032 );
11033 struct bpf_object *obj;
11034 int i;
11035
11036 /* Attempt to preserve opts->object_name, unless overriden by user
11037 * explicitly. Overwriting object name for skeletons is discouraged,
11038 * as it breaks global data maps, because they contain object name
11039 * prefix as their own map name prefix. When skeleton is generated,
11040 * bpftool is making an assumption that this name will stay the same.
11041 */
11042 if (opts) {
11043 memcpy(&skel_opts, opts, sizeof(*opts));
11044 if (!opts->object_name)
11045 skel_opts.object_name = s->name;
11046 }
11047
11048 obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts);
11049 if (IS_ERR(obj)) {
11050 pr_warn("failed to initialize skeleton BPF object '%s': %ld\n",
11051 s->name, PTR_ERR(obj));
11052 return PTR_ERR(obj);
11053 }
11054
11055 *s->obj = obj;
11056
11057 for (i = 0; i < s->map_cnt; i++) {
11058 struct bpf_map **map = s->maps[i].map;
11059 const char *name = s->maps[i].name;
11060 void **mmaped = s->maps[i].mmaped;
11061
11062 *map = bpf_object__find_map_by_name(obj, name);
11063 if (!*map) {
11064 pr_warn("failed to find skeleton map '%s'\n", name);
11065 return -ESRCH;
11066 }
11067
11068 /* externs shouldn't be pre-setup from user code */
11069 if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG)
11070 *mmaped = (*map)->mmaped;
11071 }
11072
11073 for (i = 0; i < s->prog_cnt; i++) {
11074 struct bpf_program **prog = s->progs[i].prog;
11075 const char *name = s->progs[i].name;
11076
11077 *prog = bpf_object__find_program_by_name(obj, name);
11078 if (!*prog) {
11079 pr_warn("failed to find skeleton program '%s'\n", name);
11080 return -ESRCH;
11081 }
11082 }
11083
11084 return 0;
11085}
11086
11087int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
11088{
11089 int i, err;
11090
11091 err = bpf_object__load(*s->obj);
11092 if (err) {
11093 pr_warn("failed to load BPF skeleton '%s': %d\n", s->name, err);
11094 return err;
11095 }
11096
11097 for (i = 0; i < s->map_cnt; i++) {
11098 struct bpf_map *map = *s->maps[i].map;
11099 size_t mmap_sz = bpf_map_mmap_sz(map);
11100 int prot, map_fd = bpf_map__fd(map);
11101 void **mmaped = s->maps[i].mmaped;
11102
11103 if (!mmaped)
11104 continue;
11105
11106 if (!(map->def.map_flags & BPF_F_MMAPABLE)) {
11107 *mmaped = NULL;
11108 continue;
11109 }
11110
11111 if (map->def.map_flags & BPF_F_RDONLY_PROG)
11112 prot = PROT_READ;
11113 else
11114 prot = PROT_READ | PROT_WRITE;
11115
11116 /* Remap anonymous mmap()-ed "map initialization image" as
11117 * a BPF map-backed mmap()-ed memory, but preserving the same
11118 * memory address. This will cause kernel to change process'
11119 * page table to point to a different piece of kernel memory,
11120 * but from userspace point of view memory address (and its
11121 * contents, being identical at this point) will stay the
11122 * same. This mapping will be released by bpf_object__close()
11123 * as per normal clean up procedure, so we don't need to worry
11124 * about it from skeleton's clean up perspective.
11125 */
11126 *mmaped = mmap(map->mmaped, mmap_sz, prot,
11127 MAP_SHARED | MAP_FIXED, map_fd, 0);
11128 if (*mmaped == MAP_FAILED) {
11129 err = -errno;
11130 *mmaped = NULL;
11131 pr_warn("failed to re-mmap() map '%s': %d\n",
11132 bpf_map__name(map), err);
11133 return err;
11134 }
11135 }
11136
11137 return 0;
11138}
11139
11140int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
11141{
11142 int i;
11143
11144 for (i = 0; i < s->prog_cnt; i++) {
11145 struct bpf_program *prog = *s->progs[i].prog;
11146 struct bpf_link **link = s->progs[i].link;
11147 const struct bpf_sec_def *sec_def;
11148
11149 if (!prog->load)
11150 continue;
11151
11152 sec_def = find_sec_def(prog->sec_name);
11153 if (!sec_def || !sec_def->attach_fn)
11154 continue;
11155
11156 *link = sec_def->attach_fn(sec_def, prog);
11157 if (IS_ERR(*link)) {
11158 pr_warn("failed to auto-attach program '%s': %ld\n",
11159 bpf_program__name(prog), PTR_ERR(*link));
11160 return PTR_ERR(*link);
11161 }
11162 }
11163
11164 return 0;
11165}
11166
11167void bpf_object__detach_skeleton(struct bpf_object_skeleton *s)
11168{
11169 int i;
11170
11171 for (i = 0; i < s->prog_cnt; i++) {
11172 struct bpf_link **link = s->progs[i].link;
11173
11174 bpf_link__destroy(*link);
11175 *link = NULL;
11176 }
11177}
11178
11179void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s)
11180{
11181 if (s->progs)
11182 bpf_object__detach_skeleton(s);
11183 if (s->obj)
11184 bpf_object__close(*s->obj);
11185 free(s->maps);
11186 free(s->progs);
11187 free(s);
11188}