Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
fork
Configure Feed
Select the types of activity you want to include in your feed.
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * bpf-loader.c
4 *
5 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
6 * Copyright (C) 2015 Huawei Inc.
7 */
8
9#include <linux/bpf.h>
10#include <bpf/libbpf.h>
11#include <bpf/bpf.h>
12#include <linux/err.h>
13#include <linux/kernel.h>
14#include <linux/string.h>
15#include <linux/zalloc.h>
16#include <errno.h>
17#include <stdlib.h>
18#include "debug.h"
19#include "evlist.h"
20#include "bpf-loader.h"
21#include "bpf-prologue.h"
22#include "probe-event.h"
23#include "probe-finder.h" // for MAX_PROBES
24#include "parse-events.h"
25#include "strfilter.h"
26#include "util.h"
27#include "llvm-utils.h"
28#include "c++/clang-c.h"
29#include "hashmap.h"
30#include "asm/bug.h"
31
32#include <internal/xyarray.h>
33
34/* temporarily disable libbpf deprecation warnings */
35#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
36
37static int libbpf_perf_print(enum libbpf_print_level level __attribute__((unused)),
38 const char *fmt, va_list args)
39{
40 return veprintf(1, verbose, pr_fmt(fmt), args);
41}
42
43struct bpf_prog_priv {
44 bool is_tp;
45 char *sys_name;
46 char *evt_name;
47 struct perf_probe_event pev;
48 bool need_prologue;
49 struct bpf_insn *insns_buf;
50 int nr_types;
51 int *type_mapping;
52};
53
54struct bpf_perf_object {
55 struct list_head list;
56 struct bpf_object *obj;
57};
58
59static LIST_HEAD(bpf_objects_list);
60static struct hashmap *bpf_program_hash;
61static struct hashmap *bpf_map_hash;
62
63static struct bpf_perf_object *
64bpf_perf_object__next(struct bpf_perf_object *prev)
65{
66 struct bpf_perf_object *next;
67
68 if (!prev)
69 next = list_first_entry(&bpf_objects_list,
70 struct bpf_perf_object,
71 list);
72 else
73 next = list_next_entry(prev, list);
74
75 /* Empty list is noticed here so don't need checking on entry. */
76 if (&next->list == &bpf_objects_list)
77 return NULL;
78
79 return next;
80}
81
82#define bpf_perf_object__for_each(perf_obj, tmp) \
83 for ((perf_obj) = bpf_perf_object__next(NULL), \
84 (tmp) = bpf_perf_object__next(perf_obj); \
85 (perf_obj) != NULL; \
86 (perf_obj) = (tmp), (tmp) = bpf_perf_object__next(tmp))
87
88static bool libbpf_initialized;
89
90static int bpf_perf_object__add(struct bpf_object *obj)
91{
92 struct bpf_perf_object *perf_obj = zalloc(sizeof(*perf_obj));
93
94 if (perf_obj) {
95 INIT_LIST_HEAD(&perf_obj->list);
96 perf_obj->obj = obj;
97 list_add_tail(&perf_obj->list, &bpf_objects_list);
98 }
99 return perf_obj ? 0 : -ENOMEM;
100}
101
102struct bpf_object *
103bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, const char *name)
104{
105 LIBBPF_OPTS(bpf_object_open_opts, opts, .object_name = name);
106 struct bpf_object *obj;
107
108 if (!libbpf_initialized) {
109 libbpf_set_print(libbpf_perf_print);
110 libbpf_initialized = true;
111 }
112
113 obj = bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
114 if (IS_ERR_OR_NULL(obj)) {
115 pr_debug("bpf: failed to load buffer\n");
116 return ERR_PTR(-EINVAL);
117 }
118
119 if (bpf_perf_object__add(obj)) {
120 bpf_object__close(obj);
121 return ERR_PTR(-ENOMEM);
122 }
123
124 return obj;
125}
126
127static void bpf_perf_object__close(struct bpf_perf_object *perf_obj)
128{
129 list_del(&perf_obj->list);
130 bpf_object__close(perf_obj->obj);
131 free(perf_obj);
132}
133
134struct bpf_object *bpf__prepare_load(const char *filename, bool source)
135{
136 LIBBPF_OPTS(bpf_object_open_opts, opts, .object_name = filename);
137 struct bpf_object *obj;
138
139 if (!libbpf_initialized) {
140 libbpf_set_print(libbpf_perf_print);
141 libbpf_initialized = true;
142 }
143
144 if (source) {
145 int err;
146 void *obj_buf;
147 size_t obj_buf_sz;
148
149 perf_clang__init();
150 err = perf_clang__compile_bpf(filename, &obj_buf, &obj_buf_sz);
151 perf_clang__cleanup();
152 if (err) {
153 pr_debug("bpf: builtin compilation failed: %d, try external compiler\n", err);
154 err = llvm__compile_bpf(filename, &obj_buf, &obj_buf_sz);
155 if (err)
156 return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE);
157 } else
158 pr_debug("bpf: successful builtin compilation\n");
159 obj = bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
160
161 if (!IS_ERR_OR_NULL(obj) && llvm_param.dump_obj)
162 llvm__dump_obj(filename, obj_buf, obj_buf_sz);
163
164 free(obj_buf);
165 } else {
166 obj = bpf_object__open(filename);
167 }
168
169 if (IS_ERR_OR_NULL(obj)) {
170 pr_debug("bpf: failed to load %s\n", filename);
171 return obj;
172 }
173
174 if (bpf_perf_object__add(obj)) {
175 bpf_object__close(obj);
176 return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE);
177 }
178
179 return obj;
180}
181
182static void
183clear_prog_priv(const struct bpf_program *prog __maybe_unused,
184 void *_priv)
185{
186 struct bpf_prog_priv *priv = _priv;
187
188 cleanup_perf_probe_events(&priv->pev, 1);
189 zfree(&priv->insns_buf);
190 zfree(&priv->type_mapping);
191 zfree(&priv->sys_name);
192 zfree(&priv->evt_name);
193 free(priv);
194}
195
196static void bpf_program_hash_free(void)
197{
198 struct hashmap_entry *cur;
199 size_t bkt;
200
201 if (IS_ERR_OR_NULL(bpf_program_hash))
202 return;
203
204 hashmap__for_each_entry(bpf_program_hash, cur, bkt)
205 clear_prog_priv(cur->key, cur->value);
206
207 hashmap__free(bpf_program_hash);
208 bpf_program_hash = NULL;
209}
210
211static void bpf_map_hash_free(void);
212
213void bpf__clear(void)
214{
215 struct bpf_perf_object *perf_obj, *tmp;
216
217 bpf_perf_object__for_each(perf_obj, tmp) {
218 bpf__unprobe(perf_obj->obj);
219 bpf_perf_object__close(perf_obj);
220 }
221
222 bpf_program_hash_free();
223 bpf_map_hash_free();
224}
225
226static size_t ptr_hash(const void *__key, void *ctx __maybe_unused)
227{
228 return (size_t) __key;
229}
230
231static bool ptr_equal(const void *key1, const void *key2,
232 void *ctx __maybe_unused)
233{
234 return key1 == key2;
235}
236
237static void *program_priv(const struct bpf_program *prog)
238{
239 void *priv;
240
241 if (IS_ERR_OR_NULL(bpf_program_hash))
242 return NULL;
243 if (!hashmap__find(bpf_program_hash, prog, &priv))
244 return NULL;
245 return priv;
246}
247
248static int program_set_priv(struct bpf_program *prog, void *priv)
249{
250 void *old_priv;
251
252 /*
253 * Should not happen, we warn about it in the
254 * caller function - config_bpf_program
255 */
256 if (IS_ERR(bpf_program_hash))
257 return PTR_ERR(bpf_program_hash);
258
259 if (!bpf_program_hash) {
260 bpf_program_hash = hashmap__new(ptr_hash, ptr_equal, NULL);
261 if (IS_ERR(bpf_program_hash))
262 return PTR_ERR(bpf_program_hash);
263 }
264
265 old_priv = program_priv(prog);
266 if (old_priv) {
267 clear_prog_priv(prog, old_priv);
268 return hashmap__set(bpf_program_hash, prog, priv, NULL, NULL);
269 }
270 return hashmap__add(bpf_program_hash, prog, priv);
271}
272
273static int
274prog_config__exec(const char *value, struct perf_probe_event *pev)
275{
276 pev->uprobes = true;
277 pev->target = strdup(value);
278 if (!pev->target)
279 return -ENOMEM;
280 return 0;
281}
282
283static int
284prog_config__module(const char *value, struct perf_probe_event *pev)
285{
286 pev->uprobes = false;
287 pev->target = strdup(value);
288 if (!pev->target)
289 return -ENOMEM;
290 return 0;
291}
292
293static int
294prog_config__bool(const char *value, bool *pbool, bool invert)
295{
296 int err;
297 bool bool_value;
298
299 if (!pbool)
300 return -EINVAL;
301
302 err = strtobool(value, &bool_value);
303 if (err)
304 return err;
305
306 *pbool = invert ? !bool_value : bool_value;
307 return 0;
308}
309
310static int
311prog_config__inlines(const char *value,
312 struct perf_probe_event *pev __maybe_unused)
313{
314 return prog_config__bool(value, &probe_conf.no_inlines, true);
315}
316
317static int
318prog_config__force(const char *value,
319 struct perf_probe_event *pev __maybe_unused)
320{
321 return prog_config__bool(value, &probe_conf.force_add, false);
322}
323
324static struct {
325 const char *key;
326 const char *usage;
327 const char *desc;
328 int (*func)(const char *, struct perf_probe_event *);
329} bpf_prog_config_terms[] = {
330 {
331 .key = "exec",
332 .usage = "exec=<full path of file>",
333 .desc = "Set uprobe target",
334 .func = prog_config__exec,
335 },
336 {
337 .key = "module",
338 .usage = "module=<module name> ",
339 .desc = "Set kprobe module",
340 .func = prog_config__module,
341 },
342 {
343 .key = "inlines",
344 .usage = "inlines=[yes|no] ",
345 .desc = "Probe at inline symbol",
346 .func = prog_config__inlines,
347 },
348 {
349 .key = "force",
350 .usage = "force=[yes|no] ",
351 .desc = "Forcibly add events with existing name",
352 .func = prog_config__force,
353 },
354};
355
356static int
357do_prog_config(const char *key, const char *value,
358 struct perf_probe_event *pev)
359{
360 unsigned int i;
361
362 pr_debug("config bpf program: %s=%s\n", key, value);
363 for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++)
364 if (strcmp(key, bpf_prog_config_terms[i].key) == 0)
365 return bpf_prog_config_terms[i].func(value, pev);
366
367 pr_debug("BPF: ERROR: invalid program config option: %s=%s\n",
368 key, value);
369
370 pr_debug("\nHint: Valid options are:\n");
371 for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++)
372 pr_debug("\t%s:\t%s\n", bpf_prog_config_terms[i].usage,
373 bpf_prog_config_terms[i].desc);
374 pr_debug("\n");
375
376 return -BPF_LOADER_ERRNO__PROGCONF_TERM;
377}
378
379static const char *
380parse_prog_config_kvpair(const char *config_str, struct perf_probe_event *pev)
381{
382 char *text = strdup(config_str);
383 char *sep, *line;
384 const char *main_str = NULL;
385 int err = 0;
386
387 if (!text) {
388 pr_debug("Not enough memory: dup config_str failed\n");
389 return ERR_PTR(-ENOMEM);
390 }
391
392 line = text;
393 while ((sep = strchr(line, ';'))) {
394 char *equ;
395
396 *sep = '\0';
397 equ = strchr(line, '=');
398 if (!equ) {
399 pr_warning("WARNING: invalid config in BPF object: %s\n",
400 line);
401 pr_warning("\tShould be 'key=value'.\n");
402 goto nextline;
403 }
404 *equ = '\0';
405
406 err = do_prog_config(line, equ + 1, pev);
407 if (err)
408 break;
409nextline:
410 line = sep + 1;
411 }
412
413 if (!err)
414 main_str = config_str + (line - text);
415 free(text);
416
417 return err ? ERR_PTR(err) : main_str;
418}
419
420static int
421parse_prog_config(const char *config_str, const char **p_main_str,
422 bool *is_tp, struct perf_probe_event *pev)
423{
424 int err;
425 const char *main_str = parse_prog_config_kvpair(config_str, pev);
426
427 if (IS_ERR(main_str))
428 return PTR_ERR(main_str);
429
430 *p_main_str = main_str;
431 if (!strchr(main_str, '=')) {
432 /* Is a tracepoint event? */
433 const char *s = strchr(main_str, ':');
434
435 if (!s) {
436 pr_debug("bpf: '%s' is not a valid tracepoint\n",
437 config_str);
438 return -BPF_LOADER_ERRNO__CONFIG;
439 }
440
441 *is_tp = true;
442 return 0;
443 }
444
445 *is_tp = false;
446 err = parse_perf_probe_command(main_str, pev);
447 if (err < 0) {
448 pr_debug("bpf: '%s' is not a valid config string\n",
449 config_str);
450 /* parse failed, don't need clear pev. */
451 return -BPF_LOADER_ERRNO__CONFIG;
452 }
453 return 0;
454}
455
456static int
457config_bpf_program(struct bpf_program *prog)
458{
459 struct perf_probe_event *pev = NULL;
460 struct bpf_prog_priv *priv = NULL;
461 const char *config_str, *main_str;
462 bool is_tp = false;
463 int err;
464
465 /* Initialize per-program probing setting */
466 probe_conf.no_inlines = false;
467 probe_conf.force_add = false;
468
469 priv = calloc(sizeof(*priv), 1);
470 if (!priv) {
471 pr_debug("bpf: failed to alloc priv\n");
472 return -ENOMEM;
473 }
474 pev = &priv->pev;
475
476 config_str = bpf_program__section_name(prog);
477 pr_debug("bpf: config program '%s'\n", config_str);
478 err = parse_prog_config(config_str, &main_str, &is_tp, pev);
479 if (err)
480 goto errout;
481
482 if (is_tp) {
483 char *s = strchr(main_str, ':');
484
485 priv->is_tp = true;
486 priv->sys_name = strndup(main_str, s - main_str);
487 priv->evt_name = strdup(s + 1);
488 goto set_priv;
489 }
490
491 if (pev->group && strcmp(pev->group, PERF_BPF_PROBE_GROUP)) {
492 pr_debug("bpf: '%s': group for event is set and not '%s'.\n",
493 config_str, PERF_BPF_PROBE_GROUP);
494 err = -BPF_LOADER_ERRNO__GROUP;
495 goto errout;
496 } else if (!pev->group)
497 pev->group = strdup(PERF_BPF_PROBE_GROUP);
498
499 if (!pev->group) {
500 pr_debug("bpf: strdup failed\n");
501 err = -ENOMEM;
502 goto errout;
503 }
504
505 if (!pev->event) {
506 pr_debug("bpf: '%s': event name is missing. Section name should be 'key=value'\n",
507 config_str);
508 err = -BPF_LOADER_ERRNO__EVENTNAME;
509 goto errout;
510 }
511 pr_debug("bpf: config '%s' is ok\n", config_str);
512
513set_priv:
514 err = program_set_priv(prog, priv);
515 if (err) {
516 pr_debug("Failed to set priv for program '%s'\n", config_str);
517 goto errout;
518 }
519
520 return 0;
521
522errout:
523 if (pev)
524 clear_perf_probe_event(pev);
525 free(priv);
526 return err;
527}
528
529static int bpf__prepare_probe(void)
530{
531 static int err = 0;
532 static bool initialized = false;
533
534 /*
535 * Make err static, so if init failed the first, bpf__prepare_probe()
536 * fails each time without calling init_probe_symbol_maps multiple
537 * times.
538 */
539 if (initialized)
540 return err;
541
542 initialized = true;
543 err = init_probe_symbol_maps(false);
544 if (err < 0)
545 pr_debug("Failed to init_probe_symbol_maps\n");
546 probe_conf.max_probes = MAX_PROBES;
547 return err;
548}
549
550static int
551preproc_gen_prologue(struct bpf_program *prog, int n,
552 struct bpf_insn *orig_insns, int orig_insns_cnt,
553 struct bpf_prog_prep_result *res)
554{
555 struct bpf_prog_priv *priv = program_priv(prog);
556 struct probe_trace_event *tev;
557 struct perf_probe_event *pev;
558 struct bpf_insn *buf;
559 size_t prologue_cnt = 0;
560 int i, err;
561
562 if (IS_ERR_OR_NULL(priv) || priv->is_tp)
563 goto errout;
564
565 pev = &priv->pev;
566
567 if (n < 0 || n >= priv->nr_types)
568 goto errout;
569
570 /* Find a tev belongs to that type */
571 for (i = 0; i < pev->ntevs; i++) {
572 if (priv->type_mapping[i] == n)
573 break;
574 }
575
576 if (i >= pev->ntevs) {
577 pr_debug("Internal error: prologue type %d not found\n", n);
578 return -BPF_LOADER_ERRNO__PROLOGUE;
579 }
580
581 tev = &pev->tevs[i];
582
583 buf = priv->insns_buf;
584 err = bpf__gen_prologue(tev->args, tev->nargs,
585 buf, &prologue_cnt,
586 BPF_MAXINSNS - orig_insns_cnt);
587 if (err) {
588 const char *title;
589
590 title = bpf_program__section_name(prog);
591 pr_debug("Failed to generate prologue for program %s\n",
592 title);
593 return err;
594 }
595
596 memcpy(&buf[prologue_cnt], orig_insns,
597 sizeof(struct bpf_insn) * orig_insns_cnt);
598
599 res->new_insn_ptr = buf;
600 res->new_insn_cnt = prologue_cnt + orig_insns_cnt;
601 res->pfd = NULL;
602 return 0;
603
604errout:
605 pr_debug("Internal error in preproc_gen_prologue\n");
606 return -BPF_LOADER_ERRNO__PROLOGUE;
607}
608
609/*
610 * compare_tev_args is reflexive, transitive and antisymmetric.
611 * I can proof it but this margin is too narrow to contain.
612 */
613static int compare_tev_args(const void *ptev1, const void *ptev2)
614{
615 int i, ret;
616 const struct probe_trace_event *tev1 =
617 *(const struct probe_trace_event **)ptev1;
618 const struct probe_trace_event *tev2 =
619 *(const struct probe_trace_event **)ptev2;
620
621 ret = tev2->nargs - tev1->nargs;
622 if (ret)
623 return ret;
624
625 for (i = 0; i < tev1->nargs; i++) {
626 struct probe_trace_arg *arg1, *arg2;
627 struct probe_trace_arg_ref *ref1, *ref2;
628
629 arg1 = &tev1->args[i];
630 arg2 = &tev2->args[i];
631
632 ret = strcmp(arg1->value, arg2->value);
633 if (ret)
634 return ret;
635
636 ref1 = arg1->ref;
637 ref2 = arg2->ref;
638
639 while (ref1 && ref2) {
640 ret = ref2->offset - ref1->offset;
641 if (ret)
642 return ret;
643
644 ref1 = ref1->next;
645 ref2 = ref2->next;
646 }
647
648 if (ref1 || ref2)
649 return ref2 ? 1 : -1;
650 }
651
652 return 0;
653}
654
655/*
656 * Assign a type number to each tevs in a pev.
657 * mapping is an array with same slots as tevs in that pev.
658 * nr_types will be set to number of types.
659 */
660static int map_prologue(struct perf_probe_event *pev, int *mapping,
661 int *nr_types)
662{
663 int i, type = 0;
664 struct probe_trace_event **ptevs;
665
666 size_t array_sz = sizeof(*ptevs) * pev->ntevs;
667
668 ptevs = malloc(array_sz);
669 if (!ptevs) {
670 pr_debug("Not enough memory: alloc ptevs failed\n");
671 return -ENOMEM;
672 }
673
674 pr_debug("In map_prologue, ntevs=%d\n", pev->ntevs);
675 for (i = 0; i < pev->ntevs; i++)
676 ptevs[i] = &pev->tevs[i];
677
678 qsort(ptevs, pev->ntevs, sizeof(*ptevs),
679 compare_tev_args);
680
681 for (i = 0; i < pev->ntevs; i++) {
682 int n;
683
684 n = ptevs[i] - pev->tevs;
685 if (i == 0) {
686 mapping[n] = type;
687 pr_debug("mapping[%d]=%d\n", n, type);
688 continue;
689 }
690
691 if (compare_tev_args(ptevs + i, ptevs + i - 1) == 0)
692 mapping[n] = type;
693 else
694 mapping[n] = ++type;
695
696 pr_debug("mapping[%d]=%d\n", n, mapping[n]);
697 }
698 free(ptevs);
699 *nr_types = type + 1;
700
701 return 0;
702}
703
704static int hook_load_preprocessor(struct bpf_program *prog)
705{
706 struct bpf_prog_priv *priv = program_priv(prog);
707 struct perf_probe_event *pev;
708 bool need_prologue = false;
709 int err, i;
710
711 if (IS_ERR_OR_NULL(priv)) {
712 pr_debug("Internal error when hook preprocessor\n");
713 return -BPF_LOADER_ERRNO__INTERNAL;
714 }
715
716 if (priv->is_tp) {
717 priv->need_prologue = false;
718 return 0;
719 }
720
721 pev = &priv->pev;
722 for (i = 0; i < pev->ntevs; i++) {
723 struct probe_trace_event *tev = &pev->tevs[i];
724
725 if (tev->nargs > 0) {
726 need_prologue = true;
727 break;
728 }
729 }
730
731 /*
732 * Since all tevs don't have argument, we don't need generate
733 * prologue.
734 */
735 if (!need_prologue) {
736 priv->need_prologue = false;
737 return 0;
738 }
739
740 priv->need_prologue = true;
741 priv->insns_buf = malloc(sizeof(struct bpf_insn) * BPF_MAXINSNS);
742 if (!priv->insns_buf) {
743 pr_debug("Not enough memory: alloc insns_buf failed\n");
744 return -ENOMEM;
745 }
746
747 priv->type_mapping = malloc(sizeof(int) * pev->ntevs);
748 if (!priv->type_mapping) {
749 pr_debug("Not enough memory: alloc type_mapping failed\n");
750 return -ENOMEM;
751 }
752 memset(priv->type_mapping, -1,
753 sizeof(int) * pev->ntevs);
754
755 err = map_prologue(pev, priv->type_mapping, &priv->nr_types);
756 if (err)
757 return err;
758
759 err = bpf_program__set_prep(prog, priv->nr_types,
760 preproc_gen_prologue);
761 return err;
762}
763
764int bpf__probe(struct bpf_object *obj)
765{
766 int err = 0;
767 struct bpf_program *prog;
768 struct bpf_prog_priv *priv;
769 struct perf_probe_event *pev;
770
771 err = bpf__prepare_probe();
772 if (err) {
773 pr_debug("bpf__prepare_probe failed\n");
774 return err;
775 }
776
777 bpf_object__for_each_program(prog, obj) {
778 err = config_bpf_program(prog);
779 if (err)
780 goto out;
781
782 priv = program_priv(prog);
783 if (IS_ERR_OR_NULL(priv)) {
784 if (!priv)
785 err = -BPF_LOADER_ERRNO__INTERNAL;
786 else
787 err = PTR_ERR(priv);
788 goto out;
789 }
790
791 if (priv->is_tp) {
792 bpf_program__set_type(prog, BPF_PROG_TYPE_TRACEPOINT);
793 continue;
794 }
795
796 bpf_program__set_type(prog, BPF_PROG_TYPE_KPROBE);
797 pev = &priv->pev;
798
799 err = convert_perf_probe_events(pev, 1);
800 if (err < 0) {
801 pr_debug("bpf_probe: failed to convert perf probe events\n");
802 goto out;
803 }
804
805 err = apply_perf_probe_events(pev, 1);
806 if (err < 0) {
807 pr_debug("bpf_probe: failed to apply perf probe events\n");
808 goto out;
809 }
810
811 /*
812 * After probing, let's consider prologue, which
813 * adds program fetcher to BPF programs.
814 *
815 * hook_load_preprocessor() hooks pre-processor
816 * to bpf_program, let it generate prologue
817 * dynamically during loading.
818 */
819 err = hook_load_preprocessor(prog);
820 if (err)
821 goto out;
822 }
823out:
824 return err < 0 ? err : 0;
825}
826
827#define EVENTS_WRITE_BUFSIZE 4096
828int bpf__unprobe(struct bpf_object *obj)
829{
830 int err, ret = 0;
831 struct bpf_program *prog;
832
833 bpf_object__for_each_program(prog, obj) {
834 struct bpf_prog_priv *priv = program_priv(prog);
835 int i;
836
837 if (IS_ERR_OR_NULL(priv) || priv->is_tp)
838 continue;
839
840 for (i = 0; i < priv->pev.ntevs; i++) {
841 struct probe_trace_event *tev = &priv->pev.tevs[i];
842 char name_buf[EVENTS_WRITE_BUFSIZE];
843 struct strfilter *delfilter;
844
845 snprintf(name_buf, EVENTS_WRITE_BUFSIZE,
846 "%s:%s", tev->group, tev->event);
847 name_buf[EVENTS_WRITE_BUFSIZE - 1] = '\0';
848
849 delfilter = strfilter__new(name_buf, NULL);
850 if (!delfilter) {
851 pr_debug("Failed to create filter for unprobing\n");
852 ret = -ENOMEM;
853 continue;
854 }
855
856 err = del_perf_probe_events(delfilter);
857 strfilter__delete(delfilter);
858 if (err) {
859 pr_debug("Failed to delete %s\n", name_buf);
860 ret = err;
861 continue;
862 }
863 }
864 }
865 return ret;
866}
867
868int bpf__load(struct bpf_object *obj)
869{
870 int err;
871
872 err = bpf_object__load(obj);
873 if (err) {
874 char bf[128];
875 libbpf_strerror(err, bf, sizeof(bf));
876 pr_debug("bpf: load objects failed: err=%d: (%s)\n", err, bf);
877 return err;
878 }
879 return 0;
880}
881
882int bpf__foreach_event(struct bpf_object *obj,
883 bpf_prog_iter_callback_t func,
884 void *arg)
885{
886 struct bpf_program *prog;
887 int err;
888
889 bpf_object__for_each_program(prog, obj) {
890 struct bpf_prog_priv *priv = program_priv(prog);
891 struct probe_trace_event *tev;
892 struct perf_probe_event *pev;
893 int i, fd;
894
895 if (IS_ERR_OR_NULL(priv)) {
896 pr_debug("bpf: failed to get private field\n");
897 return -BPF_LOADER_ERRNO__INTERNAL;
898 }
899
900 if (priv->is_tp) {
901 fd = bpf_program__fd(prog);
902 err = (*func)(priv->sys_name, priv->evt_name, fd, obj, arg);
903 if (err) {
904 pr_debug("bpf: tracepoint call back failed, stop iterate\n");
905 return err;
906 }
907 continue;
908 }
909
910 pev = &priv->pev;
911 for (i = 0; i < pev->ntevs; i++) {
912 tev = &pev->tevs[i];
913
914 if (priv->need_prologue) {
915 int type = priv->type_mapping[i];
916
917 fd = bpf_program__nth_fd(prog, type);
918 } else {
919 fd = bpf_program__fd(prog);
920 }
921
922 if (fd < 0) {
923 pr_debug("bpf: failed to get file descriptor\n");
924 return fd;
925 }
926
927 err = (*func)(tev->group, tev->event, fd, obj, arg);
928 if (err) {
929 pr_debug("bpf: call back failed, stop iterate\n");
930 return err;
931 }
932 }
933 }
934 return 0;
935}
936
937enum bpf_map_op_type {
938 BPF_MAP_OP_SET_VALUE,
939 BPF_MAP_OP_SET_EVSEL,
940};
941
942enum bpf_map_key_type {
943 BPF_MAP_KEY_ALL,
944 BPF_MAP_KEY_RANGES,
945};
946
947struct bpf_map_op {
948 struct list_head list;
949 enum bpf_map_op_type op_type;
950 enum bpf_map_key_type key_type;
951 union {
952 struct parse_events_array array;
953 } k;
954 union {
955 u64 value;
956 struct evsel *evsel;
957 } v;
958};
959
960struct bpf_map_priv {
961 struct list_head ops_list;
962};
963
964static void
965bpf_map_op__delete(struct bpf_map_op *op)
966{
967 if (!list_empty(&op->list))
968 list_del_init(&op->list);
969 if (op->key_type == BPF_MAP_KEY_RANGES)
970 parse_events__clear_array(&op->k.array);
971 free(op);
972}
973
974static void
975bpf_map_priv__purge(struct bpf_map_priv *priv)
976{
977 struct bpf_map_op *pos, *n;
978
979 list_for_each_entry_safe(pos, n, &priv->ops_list, list) {
980 list_del_init(&pos->list);
981 bpf_map_op__delete(pos);
982 }
983}
984
985static void
986bpf_map_priv__clear(const struct bpf_map *map __maybe_unused,
987 void *_priv)
988{
989 struct bpf_map_priv *priv = _priv;
990
991 bpf_map_priv__purge(priv);
992 free(priv);
993}
994
995static void *map_priv(const struct bpf_map *map)
996{
997 void *priv;
998
999 if (IS_ERR_OR_NULL(bpf_map_hash))
1000 return NULL;
1001 if (!hashmap__find(bpf_map_hash, map, &priv))
1002 return NULL;
1003 return priv;
1004}
1005
1006static void bpf_map_hash_free(void)
1007{
1008 struct hashmap_entry *cur;
1009 size_t bkt;
1010
1011 if (IS_ERR_OR_NULL(bpf_map_hash))
1012 return;
1013
1014 hashmap__for_each_entry(bpf_map_hash, cur, bkt)
1015 bpf_map_priv__clear(cur->key, cur->value);
1016
1017 hashmap__free(bpf_map_hash);
1018 bpf_map_hash = NULL;
1019}
1020
1021static int map_set_priv(struct bpf_map *map, void *priv)
1022{
1023 void *old_priv;
1024
1025 if (WARN_ON_ONCE(IS_ERR(bpf_map_hash)))
1026 return PTR_ERR(bpf_program_hash);
1027
1028 if (!bpf_map_hash) {
1029 bpf_map_hash = hashmap__new(ptr_hash, ptr_equal, NULL);
1030 if (IS_ERR(bpf_map_hash))
1031 return PTR_ERR(bpf_map_hash);
1032 }
1033
1034 old_priv = map_priv(map);
1035 if (old_priv) {
1036 bpf_map_priv__clear(map, old_priv);
1037 return hashmap__set(bpf_map_hash, map, priv, NULL, NULL);
1038 }
1039 return hashmap__add(bpf_map_hash, map, priv);
1040}
1041
1042static int
1043bpf_map_op_setkey(struct bpf_map_op *op, struct parse_events_term *term)
1044{
1045 op->key_type = BPF_MAP_KEY_ALL;
1046 if (!term)
1047 return 0;
1048
1049 if (term->array.nr_ranges) {
1050 size_t memsz = term->array.nr_ranges *
1051 sizeof(op->k.array.ranges[0]);
1052
1053 op->k.array.ranges = memdup(term->array.ranges, memsz);
1054 if (!op->k.array.ranges) {
1055 pr_debug("Not enough memory to alloc indices for map\n");
1056 return -ENOMEM;
1057 }
1058 op->key_type = BPF_MAP_KEY_RANGES;
1059 op->k.array.nr_ranges = term->array.nr_ranges;
1060 }
1061 return 0;
1062}
1063
1064static struct bpf_map_op *
1065bpf_map_op__new(struct parse_events_term *term)
1066{
1067 struct bpf_map_op *op;
1068 int err;
1069
1070 op = zalloc(sizeof(*op));
1071 if (!op) {
1072 pr_debug("Failed to alloc bpf_map_op\n");
1073 return ERR_PTR(-ENOMEM);
1074 }
1075 INIT_LIST_HEAD(&op->list);
1076
1077 err = bpf_map_op_setkey(op, term);
1078 if (err) {
1079 free(op);
1080 return ERR_PTR(err);
1081 }
1082 return op;
1083}
1084
1085static struct bpf_map_op *
1086bpf_map_op__clone(struct bpf_map_op *op)
1087{
1088 struct bpf_map_op *newop;
1089
1090 newop = memdup(op, sizeof(*op));
1091 if (!newop) {
1092 pr_debug("Failed to alloc bpf_map_op\n");
1093 return NULL;
1094 }
1095
1096 INIT_LIST_HEAD(&newop->list);
1097 if (op->key_type == BPF_MAP_KEY_RANGES) {
1098 size_t memsz = op->k.array.nr_ranges *
1099 sizeof(op->k.array.ranges[0]);
1100
1101 newop->k.array.ranges = memdup(op->k.array.ranges, memsz);
1102 if (!newop->k.array.ranges) {
1103 pr_debug("Failed to alloc indices for map\n");
1104 free(newop);
1105 return NULL;
1106 }
1107 }
1108
1109 return newop;
1110}
1111
1112static struct bpf_map_priv *
1113bpf_map_priv__clone(struct bpf_map_priv *priv)
1114{
1115 struct bpf_map_priv *newpriv;
1116 struct bpf_map_op *pos, *newop;
1117
1118 newpriv = zalloc(sizeof(*newpriv));
1119 if (!newpriv) {
1120 pr_debug("Not enough memory to alloc map private\n");
1121 return NULL;
1122 }
1123 INIT_LIST_HEAD(&newpriv->ops_list);
1124
1125 list_for_each_entry(pos, &priv->ops_list, list) {
1126 newop = bpf_map_op__clone(pos);
1127 if (!newop) {
1128 bpf_map_priv__purge(newpriv);
1129 return NULL;
1130 }
1131 list_add_tail(&newop->list, &newpriv->ops_list);
1132 }
1133
1134 return newpriv;
1135}
1136
1137static int
1138bpf_map__add_op(struct bpf_map *map, struct bpf_map_op *op)
1139{
1140 const char *map_name = bpf_map__name(map);
1141 struct bpf_map_priv *priv = map_priv(map);
1142
1143 if (IS_ERR(priv)) {
1144 pr_debug("Failed to get private from map %s\n", map_name);
1145 return PTR_ERR(priv);
1146 }
1147
1148 if (!priv) {
1149 priv = zalloc(sizeof(*priv));
1150 if (!priv) {
1151 pr_debug("Not enough memory to alloc map private\n");
1152 return -ENOMEM;
1153 }
1154 INIT_LIST_HEAD(&priv->ops_list);
1155
1156 if (map_set_priv(map, priv)) {
1157 free(priv);
1158 return -BPF_LOADER_ERRNO__INTERNAL;
1159 }
1160 }
1161
1162 list_add_tail(&op->list, &priv->ops_list);
1163 return 0;
1164}
1165
1166static struct bpf_map_op *
1167bpf_map__add_newop(struct bpf_map *map, struct parse_events_term *term)
1168{
1169 struct bpf_map_op *op;
1170 int err;
1171
1172 op = bpf_map_op__new(term);
1173 if (IS_ERR(op))
1174 return op;
1175
1176 err = bpf_map__add_op(map, op);
1177 if (err) {
1178 bpf_map_op__delete(op);
1179 return ERR_PTR(err);
1180 }
1181 return op;
1182}
1183
1184static int
1185__bpf_map__config_value(struct bpf_map *map,
1186 struct parse_events_term *term)
1187{
1188 struct bpf_map_op *op;
1189 const char *map_name = bpf_map__name(map);
1190
1191 if (!map) {
1192 pr_debug("Map '%s' is invalid\n", map_name);
1193 return -BPF_LOADER_ERRNO__INTERNAL;
1194 }
1195
1196 if (bpf_map__type(map) != BPF_MAP_TYPE_ARRAY) {
1197 pr_debug("Map %s type is not BPF_MAP_TYPE_ARRAY\n",
1198 map_name);
1199 return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
1200 }
1201 if (bpf_map__key_size(map) < sizeof(unsigned int)) {
1202 pr_debug("Map %s has incorrect key size\n", map_name);
1203 return -BPF_LOADER_ERRNO__OBJCONF_MAP_KEYSIZE;
1204 }
1205 switch (bpf_map__value_size(map)) {
1206 case 1:
1207 case 2:
1208 case 4:
1209 case 8:
1210 break;
1211 default:
1212 pr_debug("Map %s has incorrect value size\n", map_name);
1213 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE;
1214 }
1215
1216 op = bpf_map__add_newop(map, term);
1217 if (IS_ERR(op))
1218 return PTR_ERR(op);
1219 op->op_type = BPF_MAP_OP_SET_VALUE;
1220 op->v.value = term->val.num;
1221 return 0;
1222}
1223
1224static int
1225bpf_map__config_value(struct bpf_map *map,
1226 struct parse_events_term *term,
1227 struct evlist *evlist __maybe_unused)
1228{
1229 if (!term->err_val) {
1230 pr_debug("Config value not set\n");
1231 return -BPF_LOADER_ERRNO__OBJCONF_CONF;
1232 }
1233
1234 if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM) {
1235 pr_debug("ERROR: wrong value type for 'value'\n");
1236 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE;
1237 }
1238
1239 return __bpf_map__config_value(map, term);
1240}
1241
1242static int
1243__bpf_map__config_event(struct bpf_map *map,
1244 struct parse_events_term *term,
1245 struct evlist *evlist)
1246{
1247 struct bpf_map_op *op;
1248 const char *map_name = bpf_map__name(map);
1249 struct evsel *evsel = evlist__find_evsel_by_str(evlist, term->val.str);
1250
1251 if (!evsel) {
1252 pr_debug("Event (for '%s') '%s' doesn't exist\n",
1253 map_name, term->val.str);
1254 return -BPF_LOADER_ERRNO__OBJCONF_MAP_NOEVT;
1255 }
1256
1257 if (!map) {
1258 pr_debug("Map '%s' is invalid\n", map_name);
1259 return PTR_ERR(map);
1260 }
1261
1262 /*
1263 * No need to check key_size and value_size:
1264 * kernel has already checked them.
1265 */
1266 if (bpf_map__type(map) != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
1267 pr_debug("Map %s type is not BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
1268 map_name);
1269 return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
1270 }
1271
1272 op = bpf_map__add_newop(map, term);
1273 if (IS_ERR(op))
1274 return PTR_ERR(op);
1275 op->op_type = BPF_MAP_OP_SET_EVSEL;
1276 op->v.evsel = evsel;
1277 return 0;
1278}
1279
1280static int
1281bpf_map__config_event(struct bpf_map *map,
1282 struct parse_events_term *term,
1283 struct evlist *evlist)
1284{
1285 if (!term->err_val) {
1286 pr_debug("Config value not set\n");
1287 return -BPF_LOADER_ERRNO__OBJCONF_CONF;
1288 }
1289
1290 if (term->type_val != PARSE_EVENTS__TERM_TYPE_STR) {
1291 pr_debug("ERROR: wrong value type for 'event'\n");
1292 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE;
1293 }
1294
1295 return __bpf_map__config_event(map, term, evlist);
1296}
1297
1298struct bpf_obj_config__map_func {
1299 const char *config_opt;
1300 int (*config_func)(struct bpf_map *, struct parse_events_term *,
1301 struct evlist *);
1302};
1303
1304struct bpf_obj_config__map_func bpf_obj_config__map_funcs[] = {
1305 {"value", bpf_map__config_value},
1306 {"event", bpf_map__config_event},
1307};
1308
1309static int
1310config_map_indices_range_check(struct parse_events_term *term,
1311 struct bpf_map *map,
1312 const char *map_name)
1313{
1314 struct parse_events_array *array = &term->array;
1315 unsigned int i;
1316
1317 if (!array->nr_ranges)
1318 return 0;
1319 if (!array->ranges) {
1320 pr_debug("ERROR: map %s: array->nr_ranges is %d but range array is NULL\n",
1321 map_name, (int)array->nr_ranges);
1322 return -BPF_LOADER_ERRNO__INTERNAL;
1323 }
1324
1325 if (!map) {
1326 pr_debug("Map '%s' is invalid\n", map_name);
1327 return -BPF_LOADER_ERRNO__INTERNAL;
1328 }
1329
1330 for (i = 0; i < array->nr_ranges; i++) {
1331 unsigned int start = array->ranges[i].start;
1332 size_t length = array->ranges[i].length;
1333 unsigned int idx = start + length - 1;
1334
1335 if (idx >= bpf_map__max_entries(map)) {
1336 pr_debug("ERROR: index %d too large\n", idx);
1337 return -BPF_LOADER_ERRNO__OBJCONF_MAP_IDX2BIG;
1338 }
1339 }
1340 return 0;
1341}
1342
1343static int
1344bpf__obj_config_map(struct bpf_object *obj,
1345 struct parse_events_term *term,
1346 struct evlist *evlist,
1347 int *key_scan_pos)
1348{
1349 /* key is "map:<mapname>.<config opt>" */
1350 char *map_name = strdup(term->config + sizeof("map:") - 1);
1351 struct bpf_map *map;
1352 int err = -BPF_LOADER_ERRNO__OBJCONF_OPT;
1353 char *map_opt;
1354 size_t i;
1355
1356 if (!map_name)
1357 return -ENOMEM;
1358
1359 map_opt = strchr(map_name, '.');
1360 if (!map_opt) {
1361 pr_debug("ERROR: Invalid map config: %s\n", map_name);
1362 goto out;
1363 }
1364
1365 *map_opt++ = '\0';
1366 if (*map_opt == '\0') {
1367 pr_debug("ERROR: Invalid map option: %s\n", term->config);
1368 goto out;
1369 }
1370
1371 map = bpf_object__find_map_by_name(obj, map_name);
1372 if (!map) {
1373 pr_debug("ERROR: Map %s doesn't exist\n", map_name);
1374 err = -BPF_LOADER_ERRNO__OBJCONF_MAP_NOTEXIST;
1375 goto out;
1376 }
1377
1378 *key_scan_pos += strlen(map_opt);
1379 err = config_map_indices_range_check(term, map, map_name);
1380 if (err)
1381 goto out;
1382 *key_scan_pos -= strlen(map_opt);
1383
1384 for (i = 0; i < ARRAY_SIZE(bpf_obj_config__map_funcs); i++) {
1385 struct bpf_obj_config__map_func *func =
1386 &bpf_obj_config__map_funcs[i];
1387
1388 if (strcmp(map_opt, func->config_opt) == 0) {
1389 err = func->config_func(map, term, evlist);
1390 goto out;
1391 }
1392 }
1393
1394 pr_debug("ERROR: Invalid map config option '%s'\n", map_opt);
1395 err = -BPF_LOADER_ERRNO__OBJCONF_MAP_OPT;
1396out:
1397 if (!err)
1398 *key_scan_pos += strlen(map_opt);
1399
1400 free(map_name);
1401 return err;
1402}
1403
1404int bpf__config_obj(struct bpf_object *obj,
1405 struct parse_events_term *term,
1406 struct evlist *evlist,
1407 int *error_pos)
1408{
1409 int key_scan_pos = 0;
1410 int err;
1411
1412 if (!obj || !term || !term->config)
1413 return -EINVAL;
1414
1415 if (strstarts(term->config, "map:")) {
1416 key_scan_pos = sizeof("map:") - 1;
1417 err = bpf__obj_config_map(obj, term, evlist, &key_scan_pos);
1418 goto out;
1419 }
1420 err = -BPF_LOADER_ERRNO__OBJCONF_OPT;
1421out:
1422 if (error_pos)
1423 *error_pos = key_scan_pos;
1424 return err;
1425
1426}
1427
1428typedef int (*map_config_func_t)(const char *name, int map_fd,
1429 const struct bpf_map *map,
1430 struct bpf_map_op *op,
1431 void *pkey, void *arg);
1432
1433static int
1434foreach_key_array_all(map_config_func_t func,
1435 void *arg, const char *name,
1436 int map_fd, const struct bpf_map *map,
1437 struct bpf_map_op *op)
1438{
1439 unsigned int i;
1440 int err;
1441
1442 for (i = 0; i < bpf_map__max_entries(map); i++) {
1443 err = func(name, map_fd, map, op, &i, arg);
1444 if (err) {
1445 pr_debug("ERROR: failed to insert value to %s[%u]\n",
1446 name, i);
1447 return err;
1448 }
1449 }
1450 return 0;
1451}
1452
1453static int
1454foreach_key_array_ranges(map_config_func_t func, void *arg,
1455 const char *name, int map_fd,
1456 const struct bpf_map *map,
1457 struct bpf_map_op *op)
1458{
1459 unsigned int i, j;
1460 int err;
1461
1462 for (i = 0; i < op->k.array.nr_ranges; i++) {
1463 unsigned int start = op->k.array.ranges[i].start;
1464 size_t length = op->k.array.ranges[i].length;
1465
1466 for (j = 0; j < length; j++) {
1467 unsigned int idx = start + j;
1468
1469 err = func(name, map_fd, map, op, &idx, arg);
1470 if (err) {
1471 pr_debug("ERROR: failed to insert value to %s[%u]\n",
1472 name, idx);
1473 return err;
1474 }
1475 }
1476 }
1477 return 0;
1478}
1479
1480static int
1481bpf_map_config_foreach_key(struct bpf_map *map,
1482 map_config_func_t func,
1483 void *arg)
1484{
1485 int err, map_fd, type;
1486 struct bpf_map_op *op;
1487 const char *name = bpf_map__name(map);
1488 struct bpf_map_priv *priv = map_priv(map);
1489
1490 if (IS_ERR(priv)) {
1491 pr_debug("ERROR: failed to get private from map %s\n", name);
1492 return -BPF_LOADER_ERRNO__INTERNAL;
1493 }
1494 if (!priv || list_empty(&priv->ops_list)) {
1495 pr_debug("INFO: nothing to config for map %s\n", name);
1496 return 0;
1497 }
1498
1499 if (!map) {
1500 pr_debug("Map '%s' is invalid\n", name);
1501 return -BPF_LOADER_ERRNO__INTERNAL;
1502 }
1503 map_fd = bpf_map__fd(map);
1504 if (map_fd < 0) {
1505 pr_debug("ERROR: failed to get fd from map %s\n", name);
1506 return map_fd;
1507 }
1508
1509 type = bpf_map__type(map);
1510 list_for_each_entry(op, &priv->ops_list, list) {
1511 switch (type) {
1512 case BPF_MAP_TYPE_ARRAY:
1513 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
1514 switch (op->key_type) {
1515 case BPF_MAP_KEY_ALL:
1516 err = foreach_key_array_all(func, arg, name,
1517 map_fd, map, op);
1518 break;
1519 case BPF_MAP_KEY_RANGES:
1520 err = foreach_key_array_ranges(func, arg, name,
1521 map_fd, map, op);
1522 break;
1523 default:
1524 pr_debug("ERROR: keytype for map '%s' invalid\n",
1525 name);
1526 return -BPF_LOADER_ERRNO__INTERNAL;
1527 }
1528 if (err)
1529 return err;
1530 break;
1531 default:
1532 pr_debug("ERROR: type of '%s' incorrect\n", name);
1533 return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
1534 }
1535 }
1536
1537 return 0;
1538}
1539
1540static int
1541apply_config_value_for_key(int map_fd, void *pkey,
1542 size_t val_size, u64 val)
1543{
1544 int err = 0;
1545
1546 switch (val_size) {
1547 case 1: {
1548 u8 _val = (u8)(val);
1549 err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
1550 break;
1551 }
1552 case 2: {
1553 u16 _val = (u16)(val);
1554 err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
1555 break;
1556 }
1557 case 4: {
1558 u32 _val = (u32)(val);
1559 err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
1560 break;
1561 }
1562 case 8: {
1563 err = bpf_map_update_elem(map_fd, pkey, &val, BPF_ANY);
1564 break;
1565 }
1566 default:
1567 pr_debug("ERROR: invalid value size\n");
1568 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE;
1569 }
1570 if (err && errno)
1571 err = -errno;
1572 return err;
1573}
1574
1575static int
1576apply_config_evsel_for_key(const char *name, int map_fd, void *pkey,
1577 struct evsel *evsel)
1578{
1579 struct xyarray *xy = evsel->core.fd;
1580 struct perf_event_attr *attr;
1581 unsigned int key, events;
1582 bool check_pass = false;
1583 int *evt_fd;
1584 int err;
1585
1586 if (!xy) {
1587 pr_debug("ERROR: evsel not ready for map %s\n", name);
1588 return -BPF_LOADER_ERRNO__INTERNAL;
1589 }
1590
1591 if (xy->row_size / xy->entry_size != 1) {
1592 pr_debug("ERROR: Dimension of target event is incorrect for map %s\n",
1593 name);
1594 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM;
1595 }
1596
1597 attr = &evsel->core.attr;
1598 if (attr->inherit) {
1599 pr_debug("ERROR: Can't put inherit event into map %s\n", name);
1600 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH;
1601 }
1602
1603 if (evsel__is_bpf_output(evsel))
1604 check_pass = true;
1605 if (attr->type == PERF_TYPE_RAW)
1606 check_pass = true;
1607 if (attr->type == PERF_TYPE_HARDWARE)
1608 check_pass = true;
1609 if (!check_pass) {
1610 pr_debug("ERROR: Event type is wrong for map %s\n", name);
1611 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE;
1612 }
1613
1614 events = xy->entries / (xy->row_size / xy->entry_size);
1615 key = *((unsigned int *)pkey);
1616 if (key >= events) {
1617 pr_debug("ERROR: there is no event %d for map %s\n",
1618 key, name);
1619 return -BPF_LOADER_ERRNO__OBJCONF_MAP_MAPSIZE;
1620 }
1621 evt_fd = xyarray__entry(xy, key, 0);
1622 err = bpf_map_update_elem(map_fd, pkey, evt_fd, BPF_ANY);
1623 if (err && errno)
1624 err = -errno;
1625 return err;
1626}
1627
1628static int
1629apply_obj_config_map_for_key(const char *name, int map_fd,
1630 const struct bpf_map *map,
1631 struct bpf_map_op *op,
1632 void *pkey, void *arg __maybe_unused)
1633{
1634 int err;
1635
1636 switch (op->op_type) {
1637 case BPF_MAP_OP_SET_VALUE:
1638 err = apply_config_value_for_key(map_fd, pkey,
1639 bpf_map__value_size(map),
1640 op->v.value);
1641 break;
1642 case BPF_MAP_OP_SET_EVSEL:
1643 err = apply_config_evsel_for_key(name, map_fd, pkey,
1644 op->v.evsel);
1645 break;
1646 default:
1647 pr_debug("ERROR: unknown value type for '%s'\n", name);
1648 err = -BPF_LOADER_ERRNO__INTERNAL;
1649 }
1650 return err;
1651}
1652
1653static int
1654apply_obj_config_map(struct bpf_map *map)
1655{
1656 return bpf_map_config_foreach_key(map,
1657 apply_obj_config_map_for_key,
1658 NULL);
1659}
1660
1661static int
1662apply_obj_config_object(struct bpf_object *obj)
1663{
1664 struct bpf_map *map;
1665 int err;
1666
1667 bpf_object__for_each_map(map, obj) {
1668 err = apply_obj_config_map(map);
1669 if (err)
1670 return err;
1671 }
1672 return 0;
1673}
1674
1675int bpf__apply_obj_config(void)
1676{
1677 struct bpf_perf_object *perf_obj, *tmp;
1678 int err;
1679
1680 bpf_perf_object__for_each(perf_obj, tmp) {
1681 err = apply_obj_config_object(perf_obj->obj);
1682 if (err)
1683 return err;
1684 }
1685
1686 return 0;
1687}
1688
1689#define bpf__perf_for_each_map(map, pobj, tmp) \
1690 bpf_perf_object__for_each(pobj, tmp) \
1691 bpf_object__for_each_map(map, pobj->obj)
1692
1693#define bpf__perf_for_each_map_named(map, pobj, pobjtmp, name) \
1694 bpf__perf_for_each_map(map, pobj, pobjtmp) \
1695 if (bpf_map__name(map) && (strcmp(name, bpf_map__name(map)) == 0))
1696
1697struct evsel *bpf__setup_output_event(struct evlist *evlist, const char *name)
1698{
1699 struct bpf_map_priv *tmpl_priv = NULL;
1700 struct bpf_perf_object *perf_obj, *tmp;
1701 struct evsel *evsel = NULL;
1702 struct bpf_map *map;
1703 int err;
1704 bool need_init = false;
1705
1706 bpf__perf_for_each_map_named(map, perf_obj, tmp, name) {
1707 struct bpf_map_priv *priv = map_priv(map);
1708
1709 if (IS_ERR(priv))
1710 return ERR_PTR(-BPF_LOADER_ERRNO__INTERNAL);
1711
1712 /*
1713 * No need to check map type: type should have been
1714 * verified by kernel.
1715 */
1716 if (!need_init && !priv)
1717 need_init = !priv;
1718 if (!tmpl_priv && priv)
1719 tmpl_priv = priv;
1720 }
1721
1722 if (!need_init)
1723 return NULL;
1724
1725 if (!tmpl_priv) {
1726 char *event_definition = NULL;
1727
1728 if (asprintf(&event_definition, "bpf-output/no-inherit=1,name=%s/", name) < 0)
1729 return ERR_PTR(-ENOMEM);
1730
1731 err = parse_events(evlist, event_definition, NULL);
1732 free(event_definition);
1733
1734 if (err) {
1735 pr_debug("ERROR: failed to create the \"%s\" bpf-output event\n", name);
1736 return ERR_PTR(-err);
1737 }
1738
1739 evsel = evlist__last(evlist);
1740 }
1741
1742 bpf__perf_for_each_map_named(map, perf_obj, tmp, name) {
1743 struct bpf_map_priv *priv = map_priv(map);
1744
1745 if (IS_ERR(priv))
1746 return ERR_PTR(-BPF_LOADER_ERRNO__INTERNAL);
1747 if (priv)
1748 continue;
1749
1750 if (tmpl_priv) {
1751 priv = bpf_map_priv__clone(tmpl_priv);
1752 if (!priv)
1753 return ERR_PTR(-ENOMEM);
1754
1755 err = map_set_priv(map, priv);
1756 if (err) {
1757 bpf_map_priv__clear(map, priv);
1758 return ERR_PTR(err);
1759 }
1760 } else if (evsel) {
1761 struct bpf_map_op *op;
1762
1763 op = bpf_map__add_newop(map, NULL);
1764 if (IS_ERR(op))
1765 return ERR_CAST(op);
1766 op->op_type = BPF_MAP_OP_SET_EVSEL;
1767 op->v.evsel = evsel;
1768 }
1769 }
1770
1771 return evsel;
1772}
1773
1774int bpf__setup_stdout(struct evlist *evlist)
1775{
1776 struct evsel *evsel = bpf__setup_output_event(evlist, "__bpf_stdout__");
1777 return PTR_ERR_OR_ZERO(evsel);
1778}
1779
1780#define ERRNO_OFFSET(e) ((e) - __BPF_LOADER_ERRNO__START)
1781#define ERRCODE_OFFSET(c) ERRNO_OFFSET(BPF_LOADER_ERRNO__##c)
1782#define NR_ERRNO (__BPF_LOADER_ERRNO__END - __BPF_LOADER_ERRNO__START)
1783
1784static const char *bpf_loader_strerror_table[NR_ERRNO] = {
1785 [ERRCODE_OFFSET(CONFIG)] = "Invalid config string",
1786 [ERRCODE_OFFSET(GROUP)] = "Invalid group name",
1787 [ERRCODE_OFFSET(EVENTNAME)] = "No event name found in config string",
1788 [ERRCODE_OFFSET(INTERNAL)] = "BPF loader internal error",
1789 [ERRCODE_OFFSET(COMPILE)] = "Error when compiling BPF scriptlet",
1790 [ERRCODE_OFFSET(PROGCONF_TERM)] = "Invalid program config term in config string",
1791 [ERRCODE_OFFSET(PROLOGUE)] = "Failed to generate prologue",
1792 [ERRCODE_OFFSET(PROLOGUE2BIG)] = "Prologue too big for program",
1793 [ERRCODE_OFFSET(PROLOGUEOOB)] = "Offset out of bound for prologue",
1794 [ERRCODE_OFFSET(OBJCONF_OPT)] = "Invalid object config option",
1795 [ERRCODE_OFFSET(OBJCONF_CONF)] = "Config value not set (missing '=')",
1796 [ERRCODE_OFFSET(OBJCONF_MAP_OPT)] = "Invalid object map config option",
1797 [ERRCODE_OFFSET(OBJCONF_MAP_NOTEXIST)] = "Target map doesn't exist",
1798 [ERRCODE_OFFSET(OBJCONF_MAP_VALUE)] = "Incorrect value type for map",
1799 [ERRCODE_OFFSET(OBJCONF_MAP_TYPE)] = "Incorrect map type",
1800 [ERRCODE_OFFSET(OBJCONF_MAP_KEYSIZE)] = "Incorrect map key size",
1801 [ERRCODE_OFFSET(OBJCONF_MAP_VALUESIZE)] = "Incorrect map value size",
1802 [ERRCODE_OFFSET(OBJCONF_MAP_NOEVT)] = "Event not found for map setting",
1803 [ERRCODE_OFFSET(OBJCONF_MAP_MAPSIZE)] = "Invalid map size for event setting",
1804 [ERRCODE_OFFSET(OBJCONF_MAP_EVTDIM)] = "Event dimension too large",
1805 [ERRCODE_OFFSET(OBJCONF_MAP_EVTINH)] = "Doesn't support inherit event",
1806 [ERRCODE_OFFSET(OBJCONF_MAP_EVTTYPE)] = "Wrong event type for map",
1807 [ERRCODE_OFFSET(OBJCONF_MAP_IDX2BIG)] = "Index too large",
1808};
1809
1810static int
1811bpf_loader_strerror(int err, char *buf, size_t size)
1812{
1813 char sbuf[STRERR_BUFSIZE];
1814 const char *msg;
1815
1816 if (!buf || !size)
1817 return -1;
1818
1819 err = err > 0 ? err : -err;
1820
1821 if (err >= __LIBBPF_ERRNO__START)
1822 return libbpf_strerror(err, buf, size);
1823
1824 if (err >= __BPF_LOADER_ERRNO__START && err < __BPF_LOADER_ERRNO__END) {
1825 msg = bpf_loader_strerror_table[ERRNO_OFFSET(err)];
1826 snprintf(buf, size, "%s", msg);
1827 buf[size - 1] = '\0';
1828 return 0;
1829 }
1830
1831 if (err >= __BPF_LOADER_ERRNO__END)
1832 snprintf(buf, size, "Unknown bpf loader error %d", err);
1833 else
1834 snprintf(buf, size, "%s",
1835 str_error_r(err, sbuf, sizeof(sbuf)));
1836
1837 buf[size - 1] = '\0';
1838 return -1;
1839}
1840
1841#define bpf__strerror_head(err, buf, size) \
1842 char sbuf[STRERR_BUFSIZE], *emsg;\
1843 if (!size)\
1844 return 0;\
1845 if (err < 0)\
1846 err = -err;\
1847 bpf_loader_strerror(err, sbuf, sizeof(sbuf));\
1848 emsg = sbuf;\
1849 switch (err) {\
1850 default:\
1851 scnprintf(buf, size, "%s", emsg);\
1852 break;
1853
1854#define bpf__strerror_entry(val, fmt...)\
1855 case val: {\
1856 scnprintf(buf, size, fmt);\
1857 break;\
1858 }
1859
1860#define bpf__strerror_end(buf, size)\
1861 }\
1862 buf[size - 1] = '\0';
1863
1864int bpf__strerror_prepare_load(const char *filename, bool source,
1865 int err, char *buf, size_t size)
1866{
1867 size_t n;
1868 int ret;
1869
1870 n = snprintf(buf, size, "Failed to load %s%s: ",
1871 filename, source ? " from source" : "");
1872 if (n >= size) {
1873 buf[size - 1] = '\0';
1874 return 0;
1875 }
1876 buf += n;
1877 size -= n;
1878
1879 ret = bpf_loader_strerror(err, buf, size);
1880 buf[size - 1] = '\0';
1881 return ret;
1882}
1883
1884int bpf__strerror_probe(struct bpf_object *obj __maybe_unused,
1885 int err, char *buf, size_t size)
1886{
1887 bpf__strerror_head(err, buf, size);
1888 case BPF_LOADER_ERRNO__PROGCONF_TERM: {
1889 scnprintf(buf, size, "%s (add -v to see detail)", emsg);
1890 break;
1891 }
1892 bpf__strerror_entry(EEXIST, "Probe point exist. Try 'perf probe -d \"*\"' and set 'force=yes'");
1893 bpf__strerror_entry(EACCES, "You need to be root");
1894 bpf__strerror_entry(EPERM, "You need to be root, and /proc/sys/kernel/kptr_restrict should be 0");
1895 bpf__strerror_entry(ENOENT, "You need to check probing points in BPF file");
1896 bpf__strerror_end(buf, size);
1897 return 0;
1898}
1899
1900int bpf__strerror_load(struct bpf_object *obj,
1901 int err, char *buf, size_t size)
1902{
1903 bpf__strerror_head(err, buf, size);
1904 case LIBBPF_ERRNO__KVER: {
1905 unsigned int obj_kver = bpf_object__kversion(obj);
1906 unsigned int real_kver;
1907
1908 if (fetch_kernel_version(&real_kver, NULL, 0)) {
1909 scnprintf(buf, size, "Unable to fetch kernel version");
1910 break;
1911 }
1912
1913 if (obj_kver != real_kver) {
1914 scnprintf(buf, size,
1915 "'version' ("KVER_FMT") doesn't match running kernel ("KVER_FMT")",
1916 KVER_PARAM(obj_kver),
1917 KVER_PARAM(real_kver));
1918 break;
1919 }
1920
1921 scnprintf(buf, size, "Failed to load program for unknown reason");
1922 break;
1923 }
1924 bpf__strerror_end(buf, size);
1925 return 0;
1926}
1927
1928int bpf__strerror_config_obj(struct bpf_object *obj __maybe_unused,
1929 struct parse_events_term *term __maybe_unused,
1930 struct evlist *evlist __maybe_unused,
1931 int *error_pos __maybe_unused, int err,
1932 char *buf, size_t size)
1933{
1934 bpf__strerror_head(err, buf, size);
1935 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE,
1936 "Can't use this config term with this map type");
1937 bpf__strerror_end(buf, size);
1938 return 0;
1939}
1940
1941int bpf__strerror_apply_obj_config(int err, char *buf, size_t size)
1942{
1943 bpf__strerror_head(err, buf, size);
1944 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM,
1945 "Cannot set event to BPF map in multi-thread tracing");
1946 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH,
1947 "%s (Hint: use -i to turn off inherit)", emsg);
1948 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE,
1949 "Can only put raw, hardware and BPF output event into a BPF map");
1950 bpf__strerror_end(buf, size);
1951 return 0;
1952}
1953
1954int bpf__strerror_setup_output_event(struct evlist *evlist __maybe_unused,
1955 int err, char *buf, size_t size)
1956{
1957 bpf__strerror_head(err, buf, size);
1958 bpf__strerror_end(buf, size);
1959 return 0;
1960}