Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * bpf-loader.c
4 *
5 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
6 * Copyright (C) 2015 Huawei Inc.
7 */
8
9#include <linux/bpf.h>
10#include <bpf/libbpf.h>
11#include <bpf/bpf.h>
12#include <linux/filter.h>
13#include <linux/err.h>
14#include <linux/kernel.h>
15#include <linux/string.h>
16#include <linux/zalloc.h>
17#include <errno.h>
18#include <stdlib.h>
19#include "debug.h"
20#include "evlist.h"
21#include "bpf-loader.h"
22#include "bpf-prologue.h"
23#include "probe-event.h"
24#include "probe-finder.h" // for MAX_PROBES
25#include "parse-events.h"
26#include "strfilter.h"
27#include "util.h"
28#include "llvm-utils.h"
29#include "c++/clang-c.h"
30#include "hashmap.h"
31#include "asm/bug.h"
32
33#include <internal/xyarray.h>
34
35/* temporarily disable libbpf deprecation warnings */
36#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
37
38static int libbpf_perf_print(enum libbpf_print_level level __attribute__((unused)),
39 const char *fmt, va_list args)
40{
41 return veprintf(1, verbose, pr_fmt(fmt), args);
42}
43
44struct bpf_prog_priv {
45 bool is_tp;
46 char *sys_name;
47 char *evt_name;
48 struct perf_probe_event pev;
49 bool need_prologue;
50 struct bpf_insn *insns_buf;
51 int nr_types;
52 int *type_mapping;
53 int *prologue_fds;
54};
55
56struct bpf_perf_object {
57 struct list_head list;
58 struct bpf_object *obj;
59};
60
61struct bpf_preproc_result {
62 struct bpf_insn *new_insn_ptr;
63 int new_insn_cnt;
64};
65
66static LIST_HEAD(bpf_objects_list);
67static struct hashmap *bpf_program_hash;
68static struct hashmap *bpf_map_hash;
69
70static struct bpf_perf_object *
71bpf_perf_object__next(struct bpf_perf_object *prev)
72{
73 if (!prev) {
74 if (list_empty(&bpf_objects_list))
75 return NULL;
76
77 return list_first_entry(&bpf_objects_list, struct bpf_perf_object, list);
78 }
79 if (list_is_last(&prev->list, &bpf_objects_list))
80 return NULL;
81
82 return list_next_entry(prev, list);
83}
84
85#define bpf_perf_object__for_each(perf_obj, tmp) \
86 for ((perf_obj) = bpf_perf_object__next(NULL), \
87 (tmp) = bpf_perf_object__next(perf_obj); \
88 (perf_obj) != NULL; \
89 (perf_obj) = (tmp), (tmp) = bpf_perf_object__next(tmp))
90
91static bool libbpf_initialized;
92static int libbpf_sec_handler;
93
94static int bpf_perf_object__add(struct bpf_object *obj)
95{
96 struct bpf_perf_object *perf_obj = zalloc(sizeof(*perf_obj));
97
98 if (perf_obj) {
99 INIT_LIST_HEAD(&perf_obj->list);
100 perf_obj->obj = obj;
101 list_add_tail(&perf_obj->list, &bpf_objects_list);
102 }
103 return perf_obj ? 0 : -ENOMEM;
104}
105
106static void *program_priv(const struct bpf_program *prog)
107{
108 void *priv;
109
110 if (IS_ERR_OR_NULL(bpf_program_hash))
111 return NULL;
112 if (!hashmap__find(bpf_program_hash, prog, &priv))
113 return NULL;
114 return priv;
115}
116
117static struct bpf_insn prologue_init_insn[] = {
118 BPF_MOV64_IMM(BPF_REG_2, 0),
119 BPF_MOV64_IMM(BPF_REG_3, 0),
120 BPF_MOV64_IMM(BPF_REG_4, 0),
121 BPF_MOV64_IMM(BPF_REG_5, 0),
122};
123
124static int libbpf_prog_prepare_load_fn(struct bpf_program *prog,
125 struct bpf_prog_load_opts *opts __maybe_unused,
126 long cookie __maybe_unused)
127{
128 size_t init_size_cnt = ARRAY_SIZE(prologue_init_insn);
129 size_t orig_insn_cnt, insn_cnt, init_size, orig_size;
130 struct bpf_prog_priv *priv = program_priv(prog);
131 const struct bpf_insn *orig_insn;
132 struct bpf_insn *insn;
133
134 if (IS_ERR_OR_NULL(priv)) {
135 pr_debug("bpf: failed to get private field\n");
136 return -BPF_LOADER_ERRNO__INTERNAL;
137 }
138
139 if (!priv->need_prologue)
140 return 0;
141
142 /* prepend initialization code to program instructions */
143 orig_insn = bpf_program__insns(prog);
144 orig_insn_cnt = bpf_program__insn_cnt(prog);
145 init_size = init_size_cnt * sizeof(*insn);
146 orig_size = orig_insn_cnt * sizeof(*insn);
147
148 insn_cnt = orig_insn_cnt + init_size_cnt;
149 insn = malloc(insn_cnt * sizeof(*insn));
150 if (!insn)
151 return -ENOMEM;
152
153 memcpy(insn, prologue_init_insn, init_size);
154 memcpy((char *) insn + init_size, orig_insn, orig_size);
155 bpf_program__set_insns(prog, insn, insn_cnt);
156 return 0;
157}
158
159static int libbpf_init(void)
160{
161 LIBBPF_OPTS(libbpf_prog_handler_opts, handler_opts,
162 .prog_prepare_load_fn = libbpf_prog_prepare_load_fn,
163 );
164
165 if (libbpf_initialized)
166 return 0;
167
168 libbpf_set_print(libbpf_perf_print);
169 libbpf_sec_handler = libbpf_register_prog_handler(NULL, BPF_PROG_TYPE_KPROBE,
170 0, &handler_opts);
171 if (libbpf_sec_handler < 0) {
172 pr_debug("bpf: failed to register libbpf section handler: %d\n",
173 libbpf_sec_handler);
174 return -BPF_LOADER_ERRNO__INTERNAL;
175 }
176 libbpf_initialized = true;
177 return 0;
178}
179
180struct bpf_object *
181bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, const char *name)
182{
183 LIBBPF_OPTS(bpf_object_open_opts, opts, .object_name = name);
184 struct bpf_object *obj;
185 int err;
186
187 err = libbpf_init();
188 if (err)
189 return ERR_PTR(err);
190
191 obj = bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
192 if (IS_ERR_OR_NULL(obj)) {
193 pr_debug("bpf: failed to load buffer\n");
194 return ERR_PTR(-EINVAL);
195 }
196
197 if (bpf_perf_object__add(obj)) {
198 bpf_object__close(obj);
199 return ERR_PTR(-ENOMEM);
200 }
201
202 return obj;
203}
204
205static void bpf_perf_object__close(struct bpf_perf_object *perf_obj)
206{
207 list_del(&perf_obj->list);
208 bpf_object__close(perf_obj->obj);
209 free(perf_obj);
210}
211
212struct bpf_object *bpf__prepare_load(const char *filename, bool source)
213{
214 LIBBPF_OPTS(bpf_object_open_opts, opts, .object_name = filename);
215 struct bpf_object *obj;
216 int err;
217
218 err = libbpf_init();
219 if (err)
220 return ERR_PTR(err);
221
222 if (source) {
223 void *obj_buf;
224 size_t obj_buf_sz;
225
226 perf_clang__init();
227 err = perf_clang__compile_bpf(filename, &obj_buf, &obj_buf_sz);
228 perf_clang__cleanup();
229 if (err) {
230 pr_debug("bpf: builtin compilation failed: %d, try external compiler\n", err);
231 err = llvm__compile_bpf(filename, &obj_buf, &obj_buf_sz);
232 if (err)
233 return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE);
234 } else
235 pr_debug("bpf: successful builtin compilation\n");
236 obj = bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
237
238 if (!IS_ERR_OR_NULL(obj) && llvm_param.dump_obj)
239 llvm__dump_obj(filename, obj_buf, obj_buf_sz);
240
241 free(obj_buf);
242 } else {
243 obj = bpf_object__open(filename);
244 }
245
246 if (IS_ERR_OR_NULL(obj)) {
247 pr_debug("bpf: failed to load %s\n", filename);
248 return obj;
249 }
250
251 if (bpf_perf_object__add(obj)) {
252 bpf_object__close(obj);
253 return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE);
254 }
255
256 return obj;
257}
258
259static void close_prologue_programs(struct bpf_prog_priv *priv)
260{
261 struct perf_probe_event *pev;
262 int i, fd;
263
264 if (!priv->need_prologue)
265 return;
266 pev = &priv->pev;
267 for (i = 0; i < pev->ntevs; i++) {
268 fd = priv->prologue_fds[i];
269 if (fd != -1)
270 close(fd);
271 }
272}
273
274static void
275clear_prog_priv(const struct bpf_program *prog __maybe_unused,
276 void *_priv)
277{
278 struct bpf_prog_priv *priv = _priv;
279
280 close_prologue_programs(priv);
281 cleanup_perf_probe_events(&priv->pev, 1);
282 zfree(&priv->insns_buf);
283 zfree(&priv->prologue_fds);
284 zfree(&priv->type_mapping);
285 zfree(&priv->sys_name);
286 zfree(&priv->evt_name);
287 free(priv);
288}
289
290static void bpf_program_hash_free(void)
291{
292 struct hashmap_entry *cur;
293 size_t bkt;
294
295 if (IS_ERR_OR_NULL(bpf_program_hash))
296 return;
297
298 hashmap__for_each_entry(bpf_program_hash, cur, bkt)
299 clear_prog_priv(cur->key, cur->value);
300
301 hashmap__free(bpf_program_hash);
302 bpf_program_hash = NULL;
303}
304
305static void bpf_map_hash_free(void);
306
307void bpf__clear(void)
308{
309 struct bpf_perf_object *perf_obj, *tmp;
310
311 bpf_perf_object__for_each(perf_obj, tmp) {
312 bpf__unprobe(perf_obj->obj);
313 bpf_perf_object__close(perf_obj);
314 }
315
316 bpf_program_hash_free();
317 bpf_map_hash_free();
318}
319
320static size_t ptr_hash(const void *__key, void *ctx __maybe_unused)
321{
322 return (size_t) __key;
323}
324
325static bool ptr_equal(const void *key1, const void *key2,
326 void *ctx __maybe_unused)
327{
328 return key1 == key2;
329}
330
331static int program_set_priv(struct bpf_program *prog, void *priv)
332{
333 void *old_priv;
334
335 /*
336 * Should not happen, we warn about it in the
337 * caller function - config_bpf_program
338 */
339 if (IS_ERR(bpf_program_hash))
340 return PTR_ERR(bpf_program_hash);
341
342 if (!bpf_program_hash) {
343 bpf_program_hash = hashmap__new(ptr_hash, ptr_equal, NULL);
344 if (IS_ERR(bpf_program_hash))
345 return PTR_ERR(bpf_program_hash);
346 }
347
348 old_priv = program_priv(prog);
349 if (old_priv) {
350 clear_prog_priv(prog, old_priv);
351 return hashmap__set(bpf_program_hash, prog, priv, NULL, NULL);
352 }
353 return hashmap__add(bpf_program_hash, prog, priv);
354}
355
356static int
357prog_config__exec(const char *value, struct perf_probe_event *pev)
358{
359 pev->uprobes = true;
360 pev->target = strdup(value);
361 if (!pev->target)
362 return -ENOMEM;
363 return 0;
364}
365
366static int
367prog_config__module(const char *value, struct perf_probe_event *pev)
368{
369 pev->uprobes = false;
370 pev->target = strdup(value);
371 if (!pev->target)
372 return -ENOMEM;
373 return 0;
374}
375
376static int
377prog_config__bool(const char *value, bool *pbool, bool invert)
378{
379 int err;
380 bool bool_value;
381
382 if (!pbool)
383 return -EINVAL;
384
385 err = strtobool(value, &bool_value);
386 if (err)
387 return err;
388
389 *pbool = invert ? !bool_value : bool_value;
390 return 0;
391}
392
393static int
394prog_config__inlines(const char *value,
395 struct perf_probe_event *pev __maybe_unused)
396{
397 return prog_config__bool(value, &probe_conf.no_inlines, true);
398}
399
400static int
401prog_config__force(const char *value,
402 struct perf_probe_event *pev __maybe_unused)
403{
404 return prog_config__bool(value, &probe_conf.force_add, false);
405}
406
407static struct {
408 const char *key;
409 const char *usage;
410 const char *desc;
411 int (*func)(const char *, struct perf_probe_event *);
412} bpf_prog_config_terms[] = {
413 {
414 .key = "exec",
415 .usage = "exec=<full path of file>",
416 .desc = "Set uprobe target",
417 .func = prog_config__exec,
418 },
419 {
420 .key = "module",
421 .usage = "module=<module name> ",
422 .desc = "Set kprobe module",
423 .func = prog_config__module,
424 },
425 {
426 .key = "inlines",
427 .usage = "inlines=[yes|no] ",
428 .desc = "Probe at inline symbol",
429 .func = prog_config__inlines,
430 },
431 {
432 .key = "force",
433 .usage = "force=[yes|no] ",
434 .desc = "Forcibly add events with existing name",
435 .func = prog_config__force,
436 },
437};
438
439static int
440do_prog_config(const char *key, const char *value,
441 struct perf_probe_event *pev)
442{
443 unsigned int i;
444
445 pr_debug("config bpf program: %s=%s\n", key, value);
446 for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++)
447 if (strcmp(key, bpf_prog_config_terms[i].key) == 0)
448 return bpf_prog_config_terms[i].func(value, pev);
449
450 pr_debug("BPF: ERROR: invalid program config option: %s=%s\n",
451 key, value);
452
453 pr_debug("\nHint: Valid options are:\n");
454 for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++)
455 pr_debug("\t%s:\t%s\n", bpf_prog_config_terms[i].usage,
456 bpf_prog_config_terms[i].desc);
457 pr_debug("\n");
458
459 return -BPF_LOADER_ERRNO__PROGCONF_TERM;
460}
461
462static const char *
463parse_prog_config_kvpair(const char *config_str, struct perf_probe_event *pev)
464{
465 char *text = strdup(config_str);
466 char *sep, *line;
467 const char *main_str = NULL;
468 int err = 0;
469
470 if (!text) {
471 pr_debug("Not enough memory: dup config_str failed\n");
472 return ERR_PTR(-ENOMEM);
473 }
474
475 line = text;
476 while ((sep = strchr(line, ';'))) {
477 char *equ;
478
479 *sep = '\0';
480 equ = strchr(line, '=');
481 if (!equ) {
482 pr_warning("WARNING: invalid config in BPF object: %s\n",
483 line);
484 pr_warning("\tShould be 'key=value'.\n");
485 goto nextline;
486 }
487 *equ = '\0';
488
489 err = do_prog_config(line, equ + 1, pev);
490 if (err)
491 break;
492nextline:
493 line = sep + 1;
494 }
495
496 if (!err)
497 main_str = config_str + (line - text);
498 free(text);
499
500 return err ? ERR_PTR(err) : main_str;
501}
502
503static int
504parse_prog_config(const char *config_str, const char **p_main_str,
505 bool *is_tp, struct perf_probe_event *pev)
506{
507 int err;
508 const char *main_str = parse_prog_config_kvpair(config_str, pev);
509
510 if (IS_ERR(main_str))
511 return PTR_ERR(main_str);
512
513 *p_main_str = main_str;
514 if (!strchr(main_str, '=')) {
515 /* Is a tracepoint event? */
516 const char *s = strchr(main_str, ':');
517
518 if (!s) {
519 pr_debug("bpf: '%s' is not a valid tracepoint\n",
520 config_str);
521 return -BPF_LOADER_ERRNO__CONFIG;
522 }
523
524 *is_tp = true;
525 return 0;
526 }
527
528 *is_tp = false;
529 err = parse_perf_probe_command(main_str, pev);
530 if (err < 0) {
531 pr_debug("bpf: '%s' is not a valid config string\n",
532 config_str);
533 /* parse failed, don't need clear pev. */
534 return -BPF_LOADER_ERRNO__CONFIG;
535 }
536 return 0;
537}
538
539static int
540config_bpf_program(struct bpf_program *prog)
541{
542 struct perf_probe_event *pev = NULL;
543 struct bpf_prog_priv *priv = NULL;
544 const char *config_str, *main_str;
545 bool is_tp = false;
546 int err;
547
548 /* Initialize per-program probing setting */
549 probe_conf.no_inlines = false;
550 probe_conf.force_add = false;
551
552 priv = calloc(sizeof(*priv), 1);
553 if (!priv) {
554 pr_debug("bpf: failed to alloc priv\n");
555 return -ENOMEM;
556 }
557 pev = &priv->pev;
558
559 config_str = bpf_program__section_name(prog);
560 pr_debug("bpf: config program '%s'\n", config_str);
561 err = parse_prog_config(config_str, &main_str, &is_tp, pev);
562 if (err)
563 goto errout;
564
565 if (is_tp) {
566 char *s = strchr(main_str, ':');
567
568 priv->is_tp = true;
569 priv->sys_name = strndup(main_str, s - main_str);
570 priv->evt_name = strdup(s + 1);
571 goto set_priv;
572 }
573
574 if (pev->group && strcmp(pev->group, PERF_BPF_PROBE_GROUP)) {
575 pr_debug("bpf: '%s': group for event is set and not '%s'.\n",
576 config_str, PERF_BPF_PROBE_GROUP);
577 err = -BPF_LOADER_ERRNO__GROUP;
578 goto errout;
579 } else if (!pev->group)
580 pev->group = strdup(PERF_BPF_PROBE_GROUP);
581
582 if (!pev->group) {
583 pr_debug("bpf: strdup failed\n");
584 err = -ENOMEM;
585 goto errout;
586 }
587
588 if (!pev->event) {
589 pr_debug("bpf: '%s': event name is missing. Section name should be 'key=value'\n",
590 config_str);
591 err = -BPF_LOADER_ERRNO__EVENTNAME;
592 goto errout;
593 }
594 pr_debug("bpf: config '%s' is ok\n", config_str);
595
596set_priv:
597 err = program_set_priv(prog, priv);
598 if (err) {
599 pr_debug("Failed to set priv for program '%s'\n", config_str);
600 goto errout;
601 }
602
603 return 0;
604
605errout:
606 if (pev)
607 clear_perf_probe_event(pev);
608 free(priv);
609 return err;
610}
611
612static int bpf__prepare_probe(void)
613{
614 static int err = 0;
615 static bool initialized = false;
616
617 /*
618 * Make err static, so if init failed the first, bpf__prepare_probe()
619 * fails each time without calling init_probe_symbol_maps multiple
620 * times.
621 */
622 if (initialized)
623 return err;
624
625 initialized = true;
626 err = init_probe_symbol_maps(false);
627 if (err < 0)
628 pr_debug("Failed to init_probe_symbol_maps\n");
629 probe_conf.max_probes = MAX_PROBES;
630 return err;
631}
632
633static int
634preproc_gen_prologue(struct bpf_program *prog, int n,
635 const struct bpf_insn *orig_insns, int orig_insns_cnt,
636 struct bpf_preproc_result *res)
637{
638 struct bpf_prog_priv *priv = program_priv(prog);
639 struct probe_trace_event *tev;
640 struct perf_probe_event *pev;
641 struct bpf_insn *buf;
642 size_t prologue_cnt = 0;
643 int i, err;
644
645 if (IS_ERR_OR_NULL(priv) || priv->is_tp)
646 goto errout;
647
648 pev = &priv->pev;
649
650 if (n < 0 || n >= priv->nr_types)
651 goto errout;
652
653 /* Find a tev belongs to that type */
654 for (i = 0; i < pev->ntevs; i++) {
655 if (priv->type_mapping[i] == n)
656 break;
657 }
658
659 if (i >= pev->ntevs) {
660 pr_debug("Internal error: prologue type %d not found\n", n);
661 return -BPF_LOADER_ERRNO__PROLOGUE;
662 }
663
664 tev = &pev->tevs[i];
665
666 buf = priv->insns_buf;
667 err = bpf__gen_prologue(tev->args, tev->nargs,
668 buf, &prologue_cnt,
669 BPF_MAXINSNS - orig_insns_cnt);
670 if (err) {
671 const char *title;
672
673 title = bpf_program__section_name(prog);
674 pr_debug("Failed to generate prologue for program %s\n",
675 title);
676 return err;
677 }
678
679 memcpy(&buf[prologue_cnt], orig_insns,
680 sizeof(struct bpf_insn) * orig_insns_cnt);
681
682 res->new_insn_ptr = buf;
683 res->new_insn_cnt = prologue_cnt + orig_insns_cnt;
684 return 0;
685
686errout:
687 pr_debug("Internal error in preproc_gen_prologue\n");
688 return -BPF_LOADER_ERRNO__PROLOGUE;
689}
690
691/*
692 * compare_tev_args is reflexive, transitive and antisymmetric.
693 * I can proof it but this margin is too narrow to contain.
694 */
695static int compare_tev_args(const void *ptev1, const void *ptev2)
696{
697 int i, ret;
698 const struct probe_trace_event *tev1 =
699 *(const struct probe_trace_event **)ptev1;
700 const struct probe_trace_event *tev2 =
701 *(const struct probe_trace_event **)ptev2;
702
703 ret = tev2->nargs - tev1->nargs;
704 if (ret)
705 return ret;
706
707 for (i = 0; i < tev1->nargs; i++) {
708 struct probe_trace_arg *arg1, *arg2;
709 struct probe_trace_arg_ref *ref1, *ref2;
710
711 arg1 = &tev1->args[i];
712 arg2 = &tev2->args[i];
713
714 ret = strcmp(arg1->value, arg2->value);
715 if (ret)
716 return ret;
717
718 ref1 = arg1->ref;
719 ref2 = arg2->ref;
720
721 while (ref1 && ref2) {
722 ret = ref2->offset - ref1->offset;
723 if (ret)
724 return ret;
725
726 ref1 = ref1->next;
727 ref2 = ref2->next;
728 }
729
730 if (ref1 || ref2)
731 return ref2 ? 1 : -1;
732 }
733
734 return 0;
735}
736
737/*
738 * Assign a type number to each tevs in a pev.
739 * mapping is an array with same slots as tevs in that pev.
740 * nr_types will be set to number of types.
741 */
742static int map_prologue(struct perf_probe_event *pev, int *mapping,
743 int *nr_types)
744{
745 int i, type = 0;
746 struct probe_trace_event **ptevs;
747
748 size_t array_sz = sizeof(*ptevs) * pev->ntevs;
749
750 ptevs = malloc(array_sz);
751 if (!ptevs) {
752 pr_debug("Not enough memory: alloc ptevs failed\n");
753 return -ENOMEM;
754 }
755
756 pr_debug("In map_prologue, ntevs=%d\n", pev->ntevs);
757 for (i = 0; i < pev->ntevs; i++)
758 ptevs[i] = &pev->tevs[i];
759
760 qsort(ptevs, pev->ntevs, sizeof(*ptevs),
761 compare_tev_args);
762
763 for (i = 0; i < pev->ntevs; i++) {
764 int n;
765
766 n = ptevs[i] - pev->tevs;
767 if (i == 0) {
768 mapping[n] = type;
769 pr_debug("mapping[%d]=%d\n", n, type);
770 continue;
771 }
772
773 if (compare_tev_args(ptevs + i, ptevs + i - 1) == 0)
774 mapping[n] = type;
775 else
776 mapping[n] = ++type;
777
778 pr_debug("mapping[%d]=%d\n", n, mapping[n]);
779 }
780 free(ptevs);
781 *nr_types = type + 1;
782
783 return 0;
784}
785
786static int hook_load_preprocessor(struct bpf_program *prog)
787{
788 struct bpf_prog_priv *priv = program_priv(prog);
789 struct perf_probe_event *pev;
790 bool need_prologue = false;
791 int i;
792
793 if (IS_ERR_OR_NULL(priv)) {
794 pr_debug("Internal error when hook preprocessor\n");
795 return -BPF_LOADER_ERRNO__INTERNAL;
796 }
797
798 if (priv->is_tp) {
799 priv->need_prologue = false;
800 return 0;
801 }
802
803 pev = &priv->pev;
804 for (i = 0; i < pev->ntevs; i++) {
805 struct probe_trace_event *tev = &pev->tevs[i];
806
807 if (tev->nargs > 0) {
808 need_prologue = true;
809 break;
810 }
811 }
812
813 /*
814 * Since all tevs don't have argument, we don't need generate
815 * prologue.
816 */
817 if (!need_prologue) {
818 priv->need_prologue = false;
819 return 0;
820 }
821
822 priv->need_prologue = true;
823 priv->insns_buf = malloc(sizeof(struct bpf_insn) * BPF_MAXINSNS);
824 if (!priv->insns_buf) {
825 pr_debug("Not enough memory: alloc insns_buf failed\n");
826 return -ENOMEM;
827 }
828
829 priv->prologue_fds = malloc(sizeof(int) * pev->ntevs);
830 if (!priv->prologue_fds) {
831 pr_debug("Not enough memory: alloc prologue fds failed\n");
832 return -ENOMEM;
833 }
834 memset(priv->prologue_fds, -1, sizeof(int) * pev->ntevs);
835
836 priv->type_mapping = malloc(sizeof(int) * pev->ntevs);
837 if (!priv->type_mapping) {
838 pr_debug("Not enough memory: alloc type_mapping failed\n");
839 return -ENOMEM;
840 }
841 memset(priv->type_mapping, -1,
842 sizeof(int) * pev->ntevs);
843
844 return map_prologue(pev, priv->type_mapping, &priv->nr_types);
845}
846
847int bpf__probe(struct bpf_object *obj)
848{
849 int err = 0;
850 struct bpf_program *prog;
851 struct bpf_prog_priv *priv;
852 struct perf_probe_event *pev;
853
854 err = bpf__prepare_probe();
855 if (err) {
856 pr_debug("bpf__prepare_probe failed\n");
857 return err;
858 }
859
860 bpf_object__for_each_program(prog, obj) {
861 err = config_bpf_program(prog);
862 if (err)
863 goto out;
864
865 priv = program_priv(prog);
866 if (IS_ERR_OR_NULL(priv)) {
867 if (!priv)
868 err = -BPF_LOADER_ERRNO__INTERNAL;
869 else
870 err = PTR_ERR(priv);
871 goto out;
872 }
873
874 if (priv->is_tp) {
875 bpf_program__set_type(prog, BPF_PROG_TYPE_TRACEPOINT);
876 continue;
877 }
878
879 bpf_program__set_type(prog, BPF_PROG_TYPE_KPROBE);
880 pev = &priv->pev;
881
882 err = convert_perf_probe_events(pev, 1);
883 if (err < 0) {
884 pr_debug("bpf_probe: failed to convert perf probe events\n");
885 goto out;
886 }
887
888 err = apply_perf_probe_events(pev, 1);
889 if (err < 0) {
890 pr_debug("bpf_probe: failed to apply perf probe events\n");
891 goto out;
892 }
893
894 /*
895 * After probing, let's consider prologue, which
896 * adds program fetcher to BPF programs.
897 *
898 * hook_load_preprocessor() hooks pre-processor
899 * to bpf_program, let it generate prologue
900 * dynamically during loading.
901 */
902 err = hook_load_preprocessor(prog);
903 if (err)
904 goto out;
905 }
906out:
907 return err < 0 ? err : 0;
908}
909
910#define EVENTS_WRITE_BUFSIZE 4096
911int bpf__unprobe(struct bpf_object *obj)
912{
913 int err, ret = 0;
914 struct bpf_program *prog;
915
916 bpf_object__for_each_program(prog, obj) {
917 struct bpf_prog_priv *priv = program_priv(prog);
918 int i;
919
920 if (IS_ERR_OR_NULL(priv) || priv->is_tp)
921 continue;
922
923 for (i = 0; i < priv->pev.ntevs; i++) {
924 struct probe_trace_event *tev = &priv->pev.tevs[i];
925 char name_buf[EVENTS_WRITE_BUFSIZE];
926 struct strfilter *delfilter;
927
928 snprintf(name_buf, EVENTS_WRITE_BUFSIZE,
929 "%s:%s", tev->group, tev->event);
930 name_buf[EVENTS_WRITE_BUFSIZE - 1] = '\0';
931
932 delfilter = strfilter__new(name_buf, NULL);
933 if (!delfilter) {
934 pr_debug("Failed to create filter for unprobing\n");
935 ret = -ENOMEM;
936 continue;
937 }
938
939 err = del_perf_probe_events(delfilter);
940 strfilter__delete(delfilter);
941 if (err) {
942 pr_debug("Failed to delete %s\n", name_buf);
943 ret = err;
944 continue;
945 }
946 }
947 }
948 return ret;
949}
950
951static int bpf_object__load_prologue(struct bpf_object *obj)
952{
953 int init_cnt = ARRAY_SIZE(prologue_init_insn);
954 const struct bpf_insn *orig_insns;
955 struct bpf_preproc_result res;
956 struct perf_probe_event *pev;
957 struct bpf_program *prog;
958 int orig_insns_cnt;
959
960 bpf_object__for_each_program(prog, obj) {
961 struct bpf_prog_priv *priv = program_priv(prog);
962 int err, i, fd;
963
964 if (IS_ERR_OR_NULL(priv)) {
965 pr_debug("bpf: failed to get private field\n");
966 return -BPF_LOADER_ERRNO__INTERNAL;
967 }
968
969 if (!priv->need_prologue)
970 continue;
971
972 /*
973 * For each program that needs prologue we do following:
974 *
975 * - take its current instructions and use them
976 * to generate the new code with prologue
977 * - load new instructions with bpf_prog_load
978 * and keep the fd in prologue_fds
979 * - new fd will be used in bpf__foreach_event
980 * to connect this program with perf evsel
981 */
982 orig_insns = bpf_program__insns(prog);
983 orig_insns_cnt = bpf_program__insn_cnt(prog);
984
985 pev = &priv->pev;
986 for (i = 0; i < pev->ntevs; i++) {
987 /*
988 * Skipping artificall prologue_init_insn instructions
989 * (init_cnt), so the prologue can be generated instead
990 * of them.
991 */
992 err = preproc_gen_prologue(prog, i,
993 orig_insns + init_cnt,
994 orig_insns_cnt - init_cnt,
995 &res);
996 if (err)
997 return err;
998
999 fd = bpf_prog_load(bpf_program__get_type(prog),
1000 bpf_program__name(prog), "GPL",
1001 res.new_insn_ptr,
1002 res.new_insn_cnt, NULL);
1003 if (fd < 0) {
1004 char bf[128];
1005
1006 libbpf_strerror(-errno, bf, sizeof(bf));
1007 pr_debug("bpf: load objects with prologue failed: err=%d: (%s)\n",
1008 -errno, bf);
1009 return -errno;
1010 }
1011 priv->prologue_fds[i] = fd;
1012 }
1013 /*
1014 * We no longer need the original program,
1015 * we can unload it.
1016 */
1017 bpf_program__unload(prog);
1018 }
1019 return 0;
1020}
1021
1022int bpf__load(struct bpf_object *obj)
1023{
1024 int err;
1025
1026 err = bpf_object__load(obj);
1027 if (err) {
1028 char bf[128];
1029 libbpf_strerror(err, bf, sizeof(bf));
1030 pr_debug("bpf: load objects failed: err=%d: (%s)\n", err, bf);
1031 return err;
1032 }
1033 return bpf_object__load_prologue(obj);
1034}
1035
1036int bpf__foreach_event(struct bpf_object *obj,
1037 bpf_prog_iter_callback_t func,
1038 void *arg)
1039{
1040 struct bpf_program *prog;
1041 int err;
1042
1043 bpf_object__for_each_program(prog, obj) {
1044 struct bpf_prog_priv *priv = program_priv(prog);
1045 struct probe_trace_event *tev;
1046 struct perf_probe_event *pev;
1047 int i, fd;
1048
1049 if (IS_ERR_OR_NULL(priv)) {
1050 pr_debug("bpf: failed to get private field\n");
1051 return -BPF_LOADER_ERRNO__INTERNAL;
1052 }
1053
1054 if (priv->is_tp) {
1055 fd = bpf_program__fd(prog);
1056 err = (*func)(priv->sys_name, priv->evt_name, fd, obj, arg);
1057 if (err) {
1058 pr_debug("bpf: tracepoint call back failed, stop iterate\n");
1059 return err;
1060 }
1061 continue;
1062 }
1063
1064 pev = &priv->pev;
1065 for (i = 0; i < pev->ntevs; i++) {
1066 tev = &pev->tevs[i];
1067
1068 if (priv->need_prologue)
1069 fd = priv->prologue_fds[i];
1070 else
1071 fd = bpf_program__fd(prog);
1072
1073 if (fd < 0) {
1074 pr_debug("bpf: failed to get file descriptor\n");
1075 return fd;
1076 }
1077
1078 err = (*func)(tev->group, tev->event, fd, obj, arg);
1079 if (err) {
1080 pr_debug("bpf: call back failed, stop iterate\n");
1081 return err;
1082 }
1083 }
1084 }
1085 return 0;
1086}
1087
1088enum bpf_map_op_type {
1089 BPF_MAP_OP_SET_VALUE,
1090 BPF_MAP_OP_SET_EVSEL,
1091};
1092
1093enum bpf_map_key_type {
1094 BPF_MAP_KEY_ALL,
1095 BPF_MAP_KEY_RANGES,
1096};
1097
1098struct bpf_map_op {
1099 struct list_head list;
1100 enum bpf_map_op_type op_type;
1101 enum bpf_map_key_type key_type;
1102 union {
1103 struct parse_events_array array;
1104 } k;
1105 union {
1106 u64 value;
1107 struct evsel *evsel;
1108 } v;
1109};
1110
1111struct bpf_map_priv {
1112 struct list_head ops_list;
1113};
1114
1115static void
1116bpf_map_op__delete(struct bpf_map_op *op)
1117{
1118 if (!list_empty(&op->list))
1119 list_del_init(&op->list);
1120 if (op->key_type == BPF_MAP_KEY_RANGES)
1121 parse_events__clear_array(&op->k.array);
1122 free(op);
1123}
1124
1125static void
1126bpf_map_priv__purge(struct bpf_map_priv *priv)
1127{
1128 struct bpf_map_op *pos, *n;
1129
1130 list_for_each_entry_safe(pos, n, &priv->ops_list, list) {
1131 list_del_init(&pos->list);
1132 bpf_map_op__delete(pos);
1133 }
1134}
1135
1136static void
1137bpf_map_priv__clear(const struct bpf_map *map __maybe_unused,
1138 void *_priv)
1139{
1140 struct bpf_map_priv *priv = _priv;
1141
1142 bpf_map_priv__purge(priv);
1143 free(priv);
1144}
1145
1146static void *map_priv(const struct bpf_map *map)
1147{
1148 void *priv;
1149
1150 if (IS_ERR_OR_NULL(bpf_map_hash))
1151 return NULL;
1152 if (!hashmap__find(bpf_map_hash, map, &priv))
1153 return NULL;
1154 return priv;
1155}
1156
1157static void bpf_map_hash_free(void)
1158{
1159 struct hashmap_entry *cur;
1160 size_t bkt;
1161
1162 if (IS_ERR_OR_NULL(bpf_map_hash))
1163 return;
1164
1165 hashmap__for_each_entry(bpf_map_hash, cur, bkt)
1166 bpf_map_priv__clear(cur->key, cur->value);
1167
1168 hashmap__free(bpf_map_hash);
1169 bpf_map_hash = NULL;
1170}
1171
1172static int map_set_priv(struct bpf_map *map, void *priv)
1173{
1174 void *old_priv;
1175
1176 if (WARN_ON_ONCE(IS_ERR(bpf_map_hash)))
1177 return PTR_ERR(bpf_program_hash);
1178
1179 if (!bpf_map_hash) {
1180 bpf_map_hash = hashmap__new(ptr_hash, ptr_equal, NULL);
1181 if (IS_ERR(bpf_map_hash))
1182 return PTR_ERR(bpf_map_hash);
1183 }
1184
1185 old_priv = map_priv(map);
1186 if (old_priv) {
1187 bpf_map_priv__clear(map, old_priv);
1188 return hashmap__set(bpf_map_hash, map, priv, NULL, NULL);
1189 }
1190 return hashmap__add(bpf_map_hash, map, priv);
1191}
1192
1193static int
1194bpf_map_op_setkey(struct bpf_map_op *op, struct parse_events_term *term)
1195{
1196 op->key_type = BPF_MAP_KEY_ALL;
1197 if (!term)
1198 return 0;
1199
1200 if (term->array.nr_ranges) {
1201 size_t memsz = term->array.nr_ranges *
1202 sizeof(op->k.array.ranges[0]);
1203
1204 op->k.array.ranges = memdup(term->array.ranges, memsz);
1205 if (!op->k.array.ranges) {
1206 pr_debug("Not enough memory to alloc indices for map\n");
1207 return -ENOMEM;
1208 }
1209 op->key_type = BPF_MAP_KEY_RANGES;
1210 op->k.array.nr_ranges = term->array.nr_ranges;
1211 }
1212 return 0;
1213}
1214
1215static struct bpf_map_op *
1216bpf_map_op__new(struct parse_events_term *term)
1217{
1218 struct bpf_map_op *op;
1219 int err;
1220
1221 op = zalloc(sizeof(*op));
1222 if (!op) {
1223 pr_debug("Failed to alloc bpf_map_op\n");
1224 return ERR_PTR(-ENOMEM);
1225 }
1226 INIT_LIST_HEAD(&op->list);
1227
1228 err = bpf_map_op_setkey(op, term);
1229 if (err) {
1230 free(op);
1231 return ERR_PTR(err);
1232 }
1233 return op;
1234}
1235
1236static struct bpf_map_op *
1237bpf_map_op__clone(struct bpf_map_op *op)
1238{
1239 struct bpf_map_op *newop;
1240
1241 newop = memdup(op, sizeof(*op));
1242 if (!newop) {
1243 pr_debug("Failed to alloc bpf_map_op\n");
1244 return NULL;
1245 }
1246
1247 INIT_LIST_HEAD(&newop->list);
1248 if (op->key_type == BPF_MAP_KEY_RANGES) {
1249 size_t memsz = op->k.array.nr_ranges *
1250 sizeof(op->k.array.ranges[0]);
1251
1252 newop->k.array.ranges = memdup(op->k.array.ranges, memsz);
1253 if (!newop->k.array.ranges) {
1254 pr_debug("Failed to alloc indices for map\n");
1255 free(newop);
1256 return NULL;
1257 }
1258 }
1259
1260 return newop;
1261}
1262
1263static struct bpf_map_priv *
1264bpf_map_priv__clone(struct bpf_map_priv *priv)
1265{
1266 struct bpf_map_priv *newpriv;
1267 struct bpf_map_op *pos, *newop;
1268
1269 newpriv = zalloc(sizeof(*newpriv));
1270 if (!newpriv) {
1271 pr_debug("Not enough memory to alloc map private\n");
1272 return NULL;
1273 }
1274 INIT_LIST_HEAD(&newpriv->ops_list);
1275
1276 list_for_each_entry(pos, &priv->ops_list, list) {
1277 newop = bpf_map_op__clone(pos);
1278 if (!newop) {
1279 bpf_map_priv__purge(newpriv);
1280 return NULL;
1281 }
1282 list_add_tail(&newop->list, &newpriv->ops_list);
1283 }
1284
1285 return newpriv;
1286}
1287
1288static int
1289bpf_map__add_op(struct bpf_map *map, struct bpf_map_op *op)
1290{
1291 const char *map_name = bpf_map__name(map);
1292 struct bpf_map_priv *priv = map_priv(map);
1293
1294 if (IS_ERR(priv)) {
1295 pr_debug("Failed to get private from map %s\n", map_name);
1296 return PTR_ERR(priv);
1297 }
1298
1299 if (!priv) {
1300 priv = zalloc(sizeof(*priv));
1301 if (!priv) {
1302 pr_debug("Not enough memory to alloc map private\n");
1303 return -ENOMEM;
1304 }
1305 INIT_LIST_HEAD(&priv->ops_list);
1306
1307 if (map_set_priv(map, priv)) {
1308 free(priv);
1309 return -BPF_LOADER_ERRNO__INTERNAL;
1310 }
1311 }
1312
1313 list_add_tail(&op->list, &priv->ops_list);
1314 return 0;
1315}
1316
1317static struct bpf_map_op *
1318bpf_map__add_newop(struct bpf_map *map, struct parse_events_term *term)
1319{
1320 struct bpf_map_op *op;
1321 int err;
1322
1323 op = bpf_map_op__new(term);
1324 if (IS_ERR(op))
1325 return op;
1326
1327 err = bpf_map__add_op(map, op);
1328 if (err) {
1329 bpf_map_op__delete(op);
1330 return ERR_PTR(err);
1331 }
1332 return op;
1333}
1334
1335static int
1336__bpf_map__config_value(struct bpf_map *map,
1337 struct parse_events_term *term)
1338{
1339 struct bpf_map_op *op;
1340 const char *map_name = bpf_map__name(map);
1341
1342 if (!map) {
1343 pr_debug("Map '%s' is invalid\n", map_name);
1344 return -BPF_LOADER_ERRNO__INTERNAL;
1345 }
1346
1347 if (bpf_map__type(map) != BPF_MAP_TYPE_ARRAY) {
1348 pr_debug("Map %s type is not BPF_MAP_TYPE_ARRAY\n",
1349 map_name);
1350 return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
1351 }
1352 if (bpf_map__key_size(map) < sizeof(unsigned int)) {
1353 pr_debug("Map %s has incorrect key size\n", map_name);
1354 return -BPF_LOADER_ERRNO__OBJCONF_MAP_KEYSIZE;
1355 }
1356 switch (bpf_map__value_size(map)) {
1357 case 1:
1358 case 2:
1359 case 4:
1360 case 8:
1361 break;
1362 default:
1363 pr_debug("Map %s has incorrect value size\n", map_name);
1364 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE;
1365 }
1366
1367 op = bpf_map__add_newop(map, term);
1368 if (IS_ERR(op))
1369 return PTR_ERR(op);
1370 op->op_type = BPF_MAP_OP_SET_VALUE;
1371 op->v.value = term->val.num;
1372 return 0;
1373}
1374
1375static int
1376bpf_map__config_value(struct bpf_map *map,
1377 struct parse_events_term *term,
1378 struct evlist *evlist __maybe_unused)
1379{
1380 if (!term->err_val) {
1381 pr_debug("Config value not set\n");
1382 return -BPF_LOADER_ERRNO__OBJCONF_CONF;
1383 }
1384
1385 if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM) {
1386 pr_debug("ERROR: wrong value type for 'value'\n");
1387 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE;
1388 }
1389
1390 return __bpf_map__config_value(map, term);
1391}
1392
1393static int
1394__bpf_map__config_event(struct bpf_map *map,
1395 struct parse_events_term *term,
1396 struct evlist *evlist)
1397{
1398 struct bpf_map_op *op;
1399 const char *map_name = bpf_map__name(map);
1400 struct evsel *evsel = evlist__find_evsel_by_str(evlist, term->val.str);
1401
1402 if (!evsel) {
1403 pr_debug("Event (for '%s') '%s' doesn't exist\n",
1404 map_name, term->val.str);
1405 return -BPF_LOADER_ERRNO__OBJCONF_MAP_NOEVT;
1406 }
1407
1408 if (!map) {
1409 pr_debug("Map '%s' is invalid\n", map_name);
1410 return PTR_ERR(map);
1411 }
1412
1413 /*
1414 * No need to check key_size and value_size:
1415 * kernel has already checked them.
1416 */
1417 if (bpf_map__type(map) != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
1418 pr_debug("Map %s type is not BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
1419 map_name);
1420 return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
1421 }
1422
1423 op = bpf_map__add_newop(map, term);
1424 if (IS_ERR(op))
1425 return PTR_ERR(op);
1426 op->op_type = BPF_MAP_OP_SET_EVSEL;
1427 op->v.evsel = evsel;
1428 return 0;
1429}
1430
1431static int
1432bpf_map__config_event(struct bpf_map *map,
1433 struct parse_events_term *term,
1434 struct evlist *evlist)
1435{
1436 if (!term->err_val) {
1437 pr_debug("Config value not set\n");
1438 return -BPF_LOADER_ERRNO__OBJCONF_CONF;
1439 }
1440
1441 if (term->type_val != PARSE_EVENTS__TERM_TYPE_STR) {
1442 pr_debug("ERROR: wrong value type for 'event'\n");
1443 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE;
1444 }
1445
1446 return __bpf_map__config_event(map, term, evlist);
1447}
1448
1449struct bpf_obj_config__map_func {
1450 const char *config_opt;
1451 int (*config_func)(struct bpf_map *, struct parse_events_term *,
1452 struct evlist *);
1453};
1454
1455struct bpf_obj_config__map_func bpf_obj_config__map_funcs[] = {
1456 {"value", bpf_map__config_value},
1457 {"event", bpf_map__config_event},
1458};
1459
1460static int
1461config_map_indices_range_check(struct parse_events_term *term,
1462 struct bpf_map *map,
1463 const char *map_name)
1464{
1465 struct parse_events_array *array = &term->array;
1466 unsigned int i;
1467
1468 if (!array->nr_ranges)
1469 return 0;
1470 if (!array->ranges) {
1471 pr_debug("ERROR: map %s: array->nr_ranges is %d but range array is NULL\n",
1472 map_name, (int)array->nr_ranges);
1473 return -BPF_LOADER_ERRNO__INTERNAL;
1474 }
1475
1476 if (!map) {
1477 pr_debug("Map '%s' is invalid\n", map_name);
1478 return -BPF_LOADER_ERRNO__INTERNAL;
1479 }
1480
1481 for (i = 0; i < array->nr_ranges; i++) {
1482 unsigned int start = array->ranges[i].start;
1483 size_t length = array->ranges[i].length;
1484 unsigned int idx = start + length - 1;
1485
1486 if (idx >= bpf_map__max_entries(map)) {
1487 pr_debug("ERROR: index %d too large\n", idx);
1488 return -BPF_LOADER_ERRNO__OBJCONF_MAP_IDX2BIG;
1489 }
1490 }
1491 return 0;
1492}
1493
1494static int
1495bpf__obj_config_map(struct bpf_object *obj,
1496 struct parse_events_term *term,
1497 struct evlist *evlist,
1498 int *key_scan_pos)
1499{
1500 /* key is "map:<mapname>.<config opt>" */
1501 char *map_name = strdup(term->config + sizeof("map:") - 1);
1502 struct bpf_map *map;
1503 int err = -BPF_LOADER_ERRNO__OBJCONF_OPT;
1504 char *map_opt;
1505 size_t i;
1506
1507 if (!map_name)
1508 return -ENOMEM;
1509
1510 map_opt = strchr(map_name, '.');
1511 if (!map_opt) {
1512 pr_debug("ERROR: Invalid map config: %s\n", map_name);
1513 goto out;
1514 }
1515
1516 *map_opt++ = '\0';
1517 if (*map_opt == '\0') {
1518 pr_debug("ERROR: Invalid map option: %s\n", term->config);
1519 goto out;
1520 }
1521
1522 map = bpf_object__find_map_by_name(obj, map_name);
1523 if (!map) {
1524 pr_debug("ERROR: Map %s doesn't exist\n", map_name);
1525 err = -BPF_LOADER_ERRNO__OBJCONF_MAP_NOTEXIST;
1526 goto out;
1527 }
1528
1529 *key_scan_pos += strlen(map_opt);
1530 err = config_map_indices_range_check(term, map, map_name);
1531 if (err)
1532 goto out;
1533 *key_scan_pos -= strlen(map_opt);
1534
1535 for (i = 0; i < ARRAY_SIZE(bpf_obj_config__map_funcs); i++) {
1536 struct bpf_obj_config__map_func *func =
1537 &bpf_obj_config__map_funcs[i];
1538
1539 if (strcmp(map_opt, func->config_opt) == 0) {
1540 err = func->config_func(map, term, evlist);
1541 goto out;
1542 }
1543 }
1544
1545 pr_debug("ERROR: Invalid map config option '%s'\n", map_opt);
1546 err = -BPF_LOADER_ERRNO__OBJCONF_MAP_OPT;
1547out:
1548 if (!err)
1549 *key_scan_pos += strlen(map_opt);
1550
1551 free(map_name);
1552 return err;
1553}
1554
1555int bpf__config_obj(struct bpf_object *obj,
1556 struct parse_events_term *term,
1557 struct evlist *evlist,
1558 int *error_pos)
1559{
1560 int key_scan_pos = 0;
1561 int err;
1562
1563 if (!obj || !term || !term->config)
1564 return -EINVAL;
1565
1566 if (strstarts(term->config, "map:")) {
1567 key_scan_pos = sizeof("map:") - 1;
1568 err = bpf__obj_config_map(obj, term, evlist, &key_scan_pos);
1569 goto out;
1570 }
1571 err = -BPF_LOADER_ERRNO__OBJCONF_OPT;
1572out:
1573 if (error_pos)
1574 *error_pos = key_scan_pos;
1575 return err;
1576
1577}
1578
1579typedef int (*map_config_func_t)(const char *name, int map_fd,
1580 const struct bpf_map *map,
1581 struct bpf_map_op *op,
1582 void *pkey, void *arg);
1583
1584static int
1585foreach_key_array_all(map_config_func_t func,
1586 void *arg, const char *name,
1587 int map_fd, const struct bpf_map *map,
1588 struct bpf_map_op *op)
1589{
1590 unsigned int i;
1591 int err;
1592
1593 for (i = 0; i < bpf_map__max_entries(map); i++) {
1594 err = func(name, map_fd, map, op, &i, arg);
1595 if (err) {
1596 pr_debug("ERROR: failed to insert value to %s[%u]\n",
1597 name, i);
1598 return err;
1599 }
1600 }
1601 return 0;
1602}
1603
1604static int
1605foreach_key_array_ranges(map_config_func_t func, void *arg,
1606 const char *name, int map_fd,
1607 const struct bpf_map *map,
1608 struct bpf_map_op *op)
1609{
1610 unsigned int i, j;
1611 int err;
1612
1613 for (i = 0; i < op->k.array.nr_ranges; i++) {
1614 unsigned int start = op->k.array.ranges[i].start;
1615 size_t length = op->k.array.ranges[i].length;
1616
1617 for (j = 0; j < length; j++) {
1618 unsigned int idx = start + j;
1619
1620 err = func(name, map_fd, map, op, &idx, arg);
1621 if (err) {
1622 pr_debug("ERROR: failed to insert value to %s[%u]\n",
1623 name, idx);
1624 return err;
1625 }
1626 }
1627 }
1628 return 0;
1629}
1630
1631static int
1632bpf_map_config_foreach_key(struct bpf_map *map,
1633 map_config_func_t func,
1634 void *arg)
1635{
1636 int err, map_fd, type;
1637 struct bpf_map_op *op;
1638 const char *name = bpf_map__name(map);
1639 struct bpf_map_priv *priv = map_priv(map);
1640
1641 if (IS_ERR(priv)) {
1642 pr_debug("ERROR: failed to get private from map %s\n", name);
1643 return -BPF_LOADER_ERRNO__INTERNAL;
1644 }
1645 if (!priv || list_empty(&priv->ops_list)) {
1646 pr_debug("INFO: nothing to config for map %s\n", name);
1647 return 0;
1648 }
1649
1650 if (!map) {
1651 pr_debug("Map '%s' is invalid\n", name);
1652 return -BPF_LOADER_ERRNO__INTERNAL;
1653 }
1654 map_fd = bpf_map__fd(map);
1655 if (map_fd < 0) {
1656 pr_debug("ERROR: failed to get fd from map %s\n", name);
1657 return map_fd;
1658 }
1659
1660 type = bpf_map__type(map);
1661 list_for_each_entry(op, &priv->ops_list, list) {
1662 switch (type) {
1663 case BPF_MAP_TYPE_ARRAY:
1664 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
1665 switch (op->key_type) {
1666 case BPF_MAP_KEY_ALL:
1667 err = foreach_key_array_all(func, arg, name,
1668 map_fd, map, op);
1669 break;
1670 case BPF_MAP_KEY_RANGES:
1671 err = foreach_key_array_ranges(func, arg, name,
1672 map_fd, map, op);
1673 break;
1674 default:
1675 pr_debug("ERROR: keytype for map '%s' invalid\n",
1676 name);
1677 return -BPF_LOADER_ERRNO__INTERNAL;
1678 }
1679 if (err)
1680 return err;
1681 break;
1682 default:
1683 pr_debug("ERROR: type of '%s' incorrect\n", name);
1684 return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
1685 }
1686 }
1687
1688 return 0;
1689}
1690
1691static int
1692apply_config_value_for_key(int map_fd, void *pkey,
1693 size_t val_size, u64 val)
1694{
1695 int err = 0;
1696
1697 switch (val_size) {
1698 case 1: {
1699 u8 _val = (u8)(val);
1700 err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
1701 break;
1702 }
1703 case 2: {
1704 u16 _val = (u16)(val);
1705 err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
1706 break;
1707 }
1708 case 4: {
1709 u32 _val = (u32)(val);
1710 err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
1711 break;
1712 }
1713 case 8: {
1714 err = bpf_map_update_elem(map_fd, pkey, &val, BPF_ANY);
1715 break;
1716 }
1717 default:
1718 pr_debug("ERROR: invalid value size\n");
1719 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE;
1720 }
1721 if (err && errno)
1722 err = -errno;
1723 return err;
1724}
1725
1726static int
1727apply_config_evsel_for_key(const char *name, int map_fd, void *pkey,
1728 struct evsel *evsel)
1729{
1730 struct xyarray *xy = evsel->core.fd;
1731 struct perf_event_attr *attr;
1732 unsigned int key, events;
1733 bool check_pass = false;
1734 int *evt_fd;
1735 int err;
1736
1737 if (!xy) {
1738 pr_debug("ERROR: evsel not ready for map %s\n", name);
1739 return -BPF_LOADER_ERRNO__INTERNAL;
1740 }
1741
1742 if (xy->row_size / xy->entry_size != 1) {
1743 pr_debug("ERROR: Dimension of target event is incorrect for map %s\n",
1744 name);
1745 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM;
1746 }
1747
1748 attr = &evsel->core.attr;
1749 if (attr->inherit) {
1750 pr_debug("ERROR: Can't put inherit event into map %s\n", name);
1751 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH;
1752 }
1753
1754 if (evsel__is_bpf_output(evsel))
1755 check_pass = true;
1756 if (attr->type == PERF_TYPE_RAW)
1757 check_pass = true;
1758 if (attr->type == PERF_TYPE_HARDWARE)
1759 check_pass = true;
1760 if (!check_pass) {
1761 pr_debug("ERROR: Event type is wrong for map %s\n", name);
1762 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE;
1763 }
1764
1765 events = xy->entries / (xy->row_size / xy->entry_size);
1766 key = *((unsigned int *)pkey);
1767 if (key >= events) {
1768 pr_debug("ERROR: there is no event %d for map %s\n",
1769 key, name);
1770 return -BPF_LOADER_ERRNO__OBJCONF_MAP_MAPSIZE;
1771 }
1772 evt_fd = xyarray__entry(xy, key, 0);
1773 err = bpf_map_update_elem(map_fd, pkey, evt_fd, BPF_ANY);
1774 if (err && errno)
1775 err = -errno;
1776 return err;
1777}
1778
1779static int
1780apply_obj_config_map_for_key(const char *name, int map_fd,
1781 const struct bpf_map *map,
1782 struct bpf_map_op *op,
1783 void *pkey, void *arg __maybe_unused)
1784{
1785 int err;
1786
1787 switch (op->op_type) {
1788 case BPF_MAP_OP_SET_VALUE:
1789 err = apply_config_value_for_key(map_fd, pkey,
1790 bpf_map__value_size(map),
1791 op->v.value);
1792 break;
1793 case BPF_MAP_OP_SET_EVSEL:
1794 err = apply_config_evsel_for_key(name, map_fd, pkey,
1795 op->v.evsel);
1796 break;
1797 default:
1798 pr_debug("ERROR: unknown value type for '%s'\n", name);
1799 err = -BPF_LOADER_ERRNO__INTERNAL;
1800 }
1801 return err;
1802}
1803
1804static int
1805apply_obj_config_map(struct bpf_map *map)
1806{
1807 return bpf_map_config_foreach_key(map,
1808 apply_obj_config_map_for_key,
1809 NULL);
1810}
1811
1812static int
1813apply_obj_config_object(struct bpf_object *obj)
1814{
1815 struct bpf_map *map;
1816 int err;
1817
1818 bpf_object__for_each_map(map, obj) {
1819 err = apply_obj_config_map(map);
1820 if (err)
1821 return err;
1822 }
1823 return 0;
1824}
1825
1826int bpf__apply_obj_config(void)
1827{
1828 struct bpf_perf_object *perf_obj, *tmp;
1829 int err;
1830
1831 bpf_perf_object__for_each(perf_obj, tmp) {
1832 err = apply_obj_config_object(perf_obj->obj);
1833 if (err)
1834 return err;
1835 }
1836
1837 return 0;
1838}
1839
1840#define bpf__perf_for_each_map(map, pobj, tmp) \
1841 bpf_perf_object__for_each(pobj, tmp) \
1842 bpf_object__for_each_map(map, pobj->obj)
1843
1844#define bpf__perf_for_each_map_named(map, pobj, pobjtmp, name) \
1845 bpf__perf_for_each_map(map, pobj, pobjtmp) \
1846 if (bpf_map__name(map) && (strcmp(name, bpf_map__name(map)) == 0))
1847
1848struct evsel *bpf__setup_output_event(struct evlist *evlist, const char *name)
1849{
1850 struct bpf_map_priv *tmpl_priv = NULL;
1851 struct bpf_perf_object *perf_obj, *tmp;
1852 struct evsel *evsel = NULL;
1853 struct bpf_map *map;
1854 int err;
1855 bool need_init = false;
1856
1857 bpf__perf_for_each_map_named(map, perf_obj, tmp, name) {
1858 struct bpf_map_priv *priv = map_priv(map);
1859
1860 if (IS_ERR(priv))
1861 return ERR_PTR(-BPF_LOADER_ERRNO__INTERNAL);
1862
1863 /*
1864 * No need to check map type: type should have been
1865 * verified by kernel.
1866 */
1867 if (!need_init && !priv)
1868 need_init = !priv;
1869 if (!tmpl_priv && priv)
1870 tmpl_priv = priv;
1871 }
1872
1873 if (!need_init)
1874 return NULL;
1875
1876 if (!tmpl_priv) {
1877 char *event_definition = NULL;
1878
1879 if (asprintf(&event_definition, "bpf-output/no-inherit=1,name=%s/", name) < 0)
1880 return ERR_PTR(-ENOMEM);
1881
1882 err = parse_event(evlist, event_definition);
1883 free(event_definition);
1884
1885 if (err) {
1886 pr_debug("ERROR: failed to create the \"%s\" bpf-output event\n", name);
1887 return ERR_PTR(-err);
1888 }
1889
1890 evsel = evlist__last(evlist);
1891 }
1892
1893 bpf__perf_for_each_map_named(map, perf_obj, tmp, name) {
1894 struct bpf_map_priv *priv = map_priv(map);
1895
1896 if (IS_ERR(priv))
1897 return ERR_PTR(-BPF_LOADER_ERRNO__INTERNAL);
1898 if (priv)
1899 continue;
1900
1901 if (tmpl_priv) {
1902 priv = bpf_map_priv__clone(tmpl_priv);
1903 if (!priv)
1904 return ERR_PTR(-ENOMEM);
1905
1906 err = map_set_priv(map, priv);
1907 if (err) {
1908 bpf_map_priv__clear(map, priv);
1909 return ERR_PTR(err);
1910 }
1911 } else if (evsel) {
1912 struct bpf_map_op *op;
1913
1914 op = bpf_map__add_newop(map, NULL);
1915 if (IS_ERR(op))
1916 return ERR_CAST(op);
1917 op->op_type = BPF_MAP_OP_SET_EVSEL;
1918 op->v.evsel = evsel;
1919 }
1920 }
1921
1922 return evsel;
1923}
1924
1925int bpf__setup_stdout(struct evlist *evlist)
1926{
1927 struct evsel *evsel = bpf__setup_output_event(evlist, "__bpf_stdout__");
1928 return PTR_ERR_OR_ZERO(evsel);
1929}
1930
1931#define ERRNO_OFFSET(e) ((e) - __BPF_LOADER_ERRNO__START)
1932#define ERRCODE_OFFSET(c) ERRNO_OFFSET(BPF_LOADER_ERRNO__##c)
1933#define NR_ERRNO (__BPF_LOADER_ERRNO__END - __BPF_LOADER_ERRNO__START)
1934
1935static const char *bpf_loader_strerror_table[NR_ERRNO] = {
1936 [ERRCODE_OFFSET(CONFIG)] = "Invalid config string",
1937 [ERRCODE_OFFSET(GROUP)] = "Invalid group name",
1938 [ERRCODE_OFFSET(EVENTNAME)] = "No event name found in config string",
1939 [ERRCODE_OFFSET(INTERNAL)] = "BPF loader internal error",
1940 [ERRCODE_OFFSET(COMPILE)] = "Error when compiling BPF scriptlet",
1941 [ERRCODE_OFFSET(PROGCONF_TERM)] = "Invalid program config term in config string",
1942 [ERRCODE_OFFSET(PROLOGUE)] = "Failed to generate prologue",
1943 [ERRCODE_OFFSET(PROLOGUE2BIG)] = "Prologue too big for program",
1944 [ERRCODE_OFFSET(PROLOGUEOOB)] = "Offset out of bound for prologue",
1945 [ERRCODE_OFFSET(OBJCONF_OPT)] = "Invalid object config option",
1946 [ERRCODE_OFFSET(OBJCONF_CONF)] = "Config value not set (missing '=')",
1947 [ERRCODE_OFFSET(OBJCONF_MAP_OPT)] = "Invalid object map config option",
1948 [ERRCODE_OFFSET(OBJCONF_MAP_NOTEXIST)] = "Target map doesn't exist",
1949 [ERRCODE_OFFSET(OBJCONF_MAP_VALUE)] = "Incorrect value type for map",
1950 [ERRCODE_OFFSET(OBJCONF_MAP_TYPE)] = "Incorrect map type",
1951 [ERRCODE_OFFSET(OBJCONF_MAP_KEYSIZE)] = "Incorrect map key size",
1952 [ERRCODE_OFFSET(OBJCONF_MAP_VALUESIZE)] = "Incorrect map value size",
1953 [ERRCODE_OFFSET(OBJCONF_MAP_NOEVT)] = "Event not found for map setting",
1954 [ERRCODE_OFFSET(OBJCONF_MAP_MAPSIZE)] = "Invalid map size for event setting",
1955 [ERRCODE_OFFSET(OBJCONF_MAP_EVTDIM)] = "Event dimension too large",
1956 [ERRCODE_OFFSET(OBJCONF_MAP_EVTINH)] = "Doesn't support inherit event",
1957 [ERRCODE_OFFSET(OBJCONF_MAP_EVTTYPE)] = "Wrong event type for map",
1958 [ERRCODE_OFFSET(OBJCONF_MAP_IDX2BIG)] = "Index too large",
1959};
1960
1961static int
1962bpf_loader_strerror(int err, char *buf, size_t size)
1963{
1964 char sbuf[STRERR_BUFSIZE];
1965 const char *msg;
1966
1967 if (!buf || !size)
1968 return -1;
1969
1970 err = err > 0 ? err : -err;
1971
1972 if (err >= __LIBBPF_ERRNO__START)
1973 return libbpf_strerror(err, buf, size);
1974
1975 if (err >= __BPF_LOADER_ERRNO__START && err < __BPF_LOADER_ERRNO__END) {
1976 msg = bpf_loader_strerror_table[ERRNO_OFFSET(err)];
1977 snprintf(buf, size, "%s", msg);
1978 buf[size - 1] = '\0';
1979 return 0;
1980 }
1981
1982 if (err >= __BPF_LOADER_ERRNO__END)
1983 snprintf(buf, size, "Unknown bpf loader error %d", err);
1984 else
1985 snprintf(buf, size, "%s",
1986 str_error_r(err, sbuf, sizeof(sbuf)));
1987
1988 buf[size - 1] = '\0';
1989 return -1;
1990}
1991
1992#define bpf__strerror_head(err, buf, size) \
1993 char sbuf[STRERR_BUFSIZE], *emsg;\
1994 if (!size)\
1995 return 0;\
1996 if (err < 0)\
1997 err = -err;\
1998 bpf_loader_strerror(err, sbuf, sizeof(sbuf));\
1999 emsg = sbuf;\
2000 switch (err) {\
2001 default:\
2002 scnprintf(buf, size, "%s", emsg);\
2003 break;
2004
2005#define bpf__strerror_entry(val, fmt...)\
2006 case val: {\
2007 scnprintf(buf, size, fmt);\
2008 break;\
2009 }
2010
2011#define bpf__strerror_end(buf, size)\
2012 }\
2013 buf[size - 1] = '\0';
2014
2015int bpf__strerror_prepare_load(const char *filename, bool source,
2016 int err, char *buf, size_t size)
2017{
2018 size_t n;
2019 int ret;
2020
2021 n = snprintf(buf, size, "Failed to load %s%s: ",
2022 filename, source ? " from source" : "");
2023 if (n >= size) {
2024 buf[size - 1] = '\0';
2025 return 0;
2026 }
2027 buf += n;
2028 size -= n;
2029
2030 ret = bpf_loader_strerror(err, buf, size);
2031 buf[size - 1] = '\0';
2032 return ret;
2033}
2034
2035int bpf__strerror_probe(struct bpf_object *obj __maybe_unused,
2036 int err, char *buf, size_t size)
2037{
2038 bpf__strerror_head(err, buf, size);
2039 case BPF_LOADER_ERRNO__PROGCONF_TERM: {
2040 scnprintf(buf, size, "%s (add -v to see detail)", emsg);
2041 break;
2042 }
2043 bpf__strerror_entry(EEXIST, "Probe point exist. Try 'perf probe -d \"*\"' and set 'force=yes'");
2044 bpf__strerror_entry(EACCES, "You need to be root");
2045 bpf__strerror_entry(EPERM, "You need to be root, and /proc/sys/kernel/kptr_restrict should be 0");
2046 bpf__strerror_entry(ENOENT, "You need to check probing points in BPF file");
2047 bpf__strerror_end(buf, size);
2048 return 0;
2049}
2050
2051int bpf__strerror_load(struct bpf_object *obj,
2052 int err, char *buf, size_t size)
2053{
2054 bpf__strerror_head(err, buf, size);
2055 case LIBBPF_ERRNO__KVER: {
2056 unsigned int obj_kver = bpf_object__kversion(obj);
2057 unsigned int real_kver;
2058
2059 if (fetch_kernel_version(&real_kver, NULL, 0)) {
2060 scnprintf(buf, size, "Unable to fetch kernel version");
2061 break;
2062 }
2063
2064 if (obj_kver != real_kver) {
2065 scnprintf(buf, size,
2066 "'version' ("KVER_FMT") doesn't match running kernel ("KVER_FMT")",
2067 KVER_PARAM(obj_kver),
2068 KVER_PARAM(real_kver));
2069 break;
2070 }
2071
2072 scnprintf(buf, size, "Failed to load program for unknown reason");
2073 break;
2074 }
2075 bpf__strerror_end(buf, size);
2076 return 0;
2077}
2078
2079int bpf__strerror_config_obj(struct bpf_object *obj __maybe_unused,
2080 struct parse_events_term *term __maybe_unused,
2081 struct evlist *evlist __maybe_unused,
2082 int *error_pos __maybe_unused, int err,
2083 char *buf, size_t size)
2084{
2085 bpf__strerror_head(err, buf, size);
2086 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE,
2087 "Can't use this config term with this map type");
2088 bpf__strerror_end(buf, size);
2089 return 0;
2090}
2091
2092int bpf__strerror_apply_obj_config(int err, char *buf, size_t size)
2093{
2094 bpf__strerror_head(err, buf, size);
2095 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM,
2096 "Cannot set event to BPF map in multi-thread tracing");
2097 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH,
2098 "%s (Hint: use -i to turn off inherit)", emsg);
2099 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE,
2100 "Can only put raw, hardware and BPF output event into a BPF map");
2101 bpf__strerror_end(buf, size);
2102 return 0;
2103}
2104
2105int bpf__strerror_setup_output_event(struct evlist *evlist __maybe_unused,
2106 int err, char *buf, size_t size)
2107{
2108 bpf__strerror_head(err, buf, size);
2109 bpf__strerror_end(buf, size);
2110 return 0;
2111}