at v5.12-rc8 1783 lines 41 kB view raw
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * bpf-loader.c 4 * 5 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com> 6 * Copyright (C) 2015 Huawei Inc. 7 */ 8 9#include <linux/bpf.h> 10#include <bpf/libbpf.h> 11#include <bpf/bpf.h> 12#include <linux/err.h> 13#include <linux/kernel.h> 14#include <linux/string.h> 15#include <linux/zalloc.h> 16#include <errno.h> 17#include <stdlib.h> 18#include "debug.h" 19#include "evlist.h" 20#include "bpf-loader.h" 21#include "bpf-prologue.h" 22#include "probe-event.h" 23#include "probe-finder.h" // for MAX_PROBES 24#include "parse-events.h" 25#include "strfilter.h" 26#include "util.h" 27#include "llvm-utils.h" 28#include "c++/clang-c.h" 29 30#include <internal/xyarray.h> 31 32static int libbpf_perf_print(enum libbpf_print_level level __attribute__((unused)), 33 const char *fmt, va_list args) 34{ 35 return veprintf(1, verbose, pr_fmt(fmt), args); 36} 37 38struct bpf_prog_priv { 39 bool is_tp; 40 char *sys_name; 41 char *evt_name; 42 struct perf_probe_event pev; 43 bool need_prologue; 44 struct bpf_insn *insns_buf; 45 int nr_types; 46 int *type_mapping; 47}; 48 49static bool libbpf_initialized; 50 51struct bpf_object * 52bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, const char *name) 53{ 54 struct bpf_object *obj; 55 56 if (!libbpf_initialized) { 57 libbpf_set_print(libbpf_perf_print); 58 libbpf_initialized = true; 59 } 60 61 obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, name); 62 if (IS_ERR_OR_NULL(obj)) { 63 pr_debug("bpf: failed to load buffer\n"); 64 return ERR_PTR(-EINVAL); 65 } 66 67 return obj; 68} 69 70struct bpf_object *bpf__prepare_load(const char *filename, bool source) 71{ 72 struct bpf_object *obj; 73 74 if (!libbpf_initialized) { 75 libbpf_set_print(libbpf_perf_print); 76 libbpf_initialized = true; 77 } 78 79 if (source) { 80 int err; 81 void *obj_buf; 82 size_t obj_buf_sz; 83 84 perf_clang__init(); 85 err = perf_clang__compile_bpf(filename, &obj_buf, &obj_buf_sz); 86 perf_clang__cleanup(); 87 if (err) { 88 pr_debug("bpf: builtin compilation failed: %d, try external compiler\n", err); 89 err = llvm__compile_bpf(filename, &obj_buf, &obj_buf_sz); 90 if (err) 91 return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE); 92 } else 93 pr_debug("bpf: successful builtin compilation\n"); 94 obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, filename); 95 96 if (!IS_ERR_OR_NULL(obj) && llvm_param.dump_obj) 97 llvm__dump_obj(filename, obj_buf, obj_buf_sz); 98 99 free(obj_buf); 100 } else 101 obj = bpf_object__open(filename); 102 103 if (IS_ERR_OR_NULL(obj)) { 104 pr_debug("bpf: failed to load %s\n", filename); 105 return obj; 106 } 107 108 return obj; 109} 110 111void bpf__clear(void) 112{ 113 struct bpf_object *obj, *tmp; 114 115 bpf_object__for_each_safe(obj, tmp) { 116 bpf__unprobe(obj); 117 bpf_object__close(obj); 118 } 119} 120 121static void 122clear_prog_priv(struct bpf_program *prog __maybe_unused, 123 void *_priv) 124{ 125 struct bpf_prog_priv *priv = _priv; 126 127 cleanup_perf_probe_events(&priv->pev, 1); 128 zfree(&priv->insns_buf); 129 zfree(&priv->type_mapping); 130 zfree(&priv->sys_name); 131 zfree(&priv->evt_name); 132 free(priv); 133} 134 135static int 136prog_config__exec(const char *value, struct perf_probe_event *pev) 137{ 138 pev->uprobes = true; 139 pev->target = strdup(value); 140 if (!pev->target) 141 return -ENOMEM; 142 return 0; 143} 144 145static int 146prog_config__module(const char *value, struct perf_probe_event *pev) 147{ 148 pev->uprobes = false; 149 pev->target = strdup(value); 150 if (!pev->target) 151 return -ENOMEM; 152 return 0; 153} 154 155static int 156prog_config__bool(const char *value, bool *pbool, bool invert) 157{ 158 int err; 159 bool bool_value; 160 161 if (!pbool) 162 return -EINVAL; 163 164 err = strtobool(value, &bool_value); 165 if (err) 166 return err; 167 168 *pbool = invert ? !bool_value : bool_value; 169 return 0; 170} 171 172static int 173prog_config__inlines(const char *value, 174 struct perf_probe_event *pev __maybe_unused) 175{ 176 return prog_config__bool(value, &probe_conf.no_inlines, true); 177} 178 179static int 180prog_config__force(const char *value, 181 struct perf_probe_event *pev __maybe_unused) 182{ 183 return prog_config__bool(value, &probe_conf.force_add, false); 184} 185 186static struct { 187 const char *key; 188 const char *usage; 189 const char *desc; 190 int (*func)(const char *, struct perf_probe_event *); 191} bpf_prog_config_terms[] = { 192 { 193 .key = "exec", 194 .usage = "exec=<full path of file>", 195 .desc = "Set uprobe target", 196 .func = prog_config__exec, 197 }, 198 { 199 .key = "module", 200 .usage = "module=<module name> ", 201 .desc = "Set kprobe module", 202 .func = prog_config__module, 203 }, 204 { 205 .key = "inlines", 206 .usage = "inlines=[yes|no] ", 207 .desc = "Probe at inline symbol", 208 .func = prog_config__inlines, 209 }, 210 { 211 .key = "force", 212 .usage = "force=[yes|no] ", 213 .desc = "Forcibly add events with existing name", 214 .func = prog_config__force, 215 }, 216}; 217 218static int 219do_prog_config(const char *key, const char *value, 220 struct perf_probe_event *pev) 221{ 222 unsigned int i; 223 224 pr_debug("config bpf program: %s=%s\n", key, value); 225 for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++) 226 if (strcmp(key, bpf_prog_config_terms[i].key) == 0) 227 return bpf_prog_config_terms[i].func(value, pev); 228 229 pr_debug("BPF: ERROR: invalid program config option: %s=%s\n", 230 key, value); 231 232 pr_debug("\nHint: Valid options are:\n"); 233 for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++) 234 pr_debug("\t%s:\t%s\n", bpf_prog_config_terms[i].usage, 235 bpf_prog_config_terms[i].desc); 236 pr_debug("\n"); 237 238 return -BPF_LOADER_ERRNO__PROGCONF_TERM; 239} 240 241static const char * 242parse_prog_config_kvpair(const char *config_str, struct perf_probe_event *pev) 243{ 244 char *text = strdup(config_str); 245 char *sep, *line; 246 const char *main_str = NULL; 247 int err = 0; 248 249 if (!text) { 250 pr_debug("Not enough memory: dup config_str failed\n"); 251 return ERR_PTR(-ENOMEM); 252 } 253 254 line = text; 255 while ((sep = strchr(line, ';'))) { 256 char *equ; 257 258 *sep = '\0'; 259 equ = strchr(line, '='); 260 if (!equ) { 261 pr_warning("WARNING: invalid config in BPF object: %s\n", 262 line); 263 pr_warning("\tShould be 'key=value'.\n"); 264 goto nextline; 265 } 266 *equ = '\0'; 267 268 err = do_prog_config(line, equ + 1, pev); 269 if (err) 270 break; 271nextline: 272 line = sep + 1; 273 } 274 275 if (!err) 276 main_str = config_str + (line - text); 277 free(text); 278 279 return err ? ERR_PTR(err) : main_str; 280} 281 282static int 283parse_prog_config(const char *config_str, const char **p_main_str, 284 bool *is_tp, struct perf_probe_event *pev) 285{ 286 int err; 287 const char *main_str = parse_prog_config_kvpair(config_str, pev); 288 289 if (IS_ERR(main_str)) 290 return PTR_ERR(main_str); 291 292 *p_main_str = main_str; 293 if (!strchr(main_str, '=')) { 294 /* Is a tracepoint event? */ 295 const char *s = strchr(main_str, ':'); 296 297 if (!s) { 298 pr_debug("bpf: '%s' is not a valid tracepoint\n", 299 config_str); 300 return -BPF_LOADER_ERRNO__CONFIG; 301 } 302 303 *is_tp = true; 304 return 0; 305 } 306 307 *is_tp = false; 308 err = parse_perf_probe_command(main_str, pev); 309 if (err < 0) { 310 pr_debug("bpf: '%s' is not a valid config string\n", 311 config_str); 312 /* parse failed, don't need clear pev. */ 313 return -BPF_LOADER_ERRNO__CONFIG; 314 } 315 return 0; 316} 317 318static int 319config_bpf_program(struct bpf_program *prog) 320{ 321 struct perf_probe_event *pev = NULL; 322 struct bpf_prog_priv *priv = NULL; 323 const char *config_str, *main_str; 324 bool is_tp = false; 325 int err; 326 327 /* Initialize per-program probing setting */ 328 probe_conf.no_inlines = false; 329 probe_conf.force_add = false; 330 331 priv = calloc(sizeof(*priv), 1); 332 if (!priv) { 333 pr_debug("bpf: failed to alloc priv\n"); 334 return -ENOMEM; 335 } 336 pev = &priv->pev; 337 338 config_str = bpf_program__section_name(prog); 339 pr_debug("bpf: config program '%s'\n", config_str); 340 err = parse_prog_config(config_str, &main_str, &is_tp, pev); 341 if (err) 342 goto errout; 343 344 if (is_tp) { 345 char *s = strchr(main_str, ':'); 346 347 priv->is_tp = true; 348 priv->sys_name = strndup(main_str, s - main_str); 349 priv->evt_name = strdup(s + 1); 350 goto set_priv; 351 } 352 353 if (pev->group && strcmp(pev->group, PERF_BPF_PROBE_GROUP)) { 354 pr_debug("bpf: '%s': group for event is set and not '%s'.\n", 355 config_str, PERF_BPF_PROBE_GROUP); 356 err = -BPF_LOADER_ERRNO__GROUP; 357 goto errout; 358 } else if (!pev->group) 359 pev->group = strdup(PERF_BPF_PROBE_GROUP); 360 361 if (!pev->group) { 362 pr_debug("bpf: strdup failed\n"); 363 err = -ENOMEM; 364 goto errout; 365 } 366 367 if (!pev->event) { 368 pr_debug("bpf: '%s': event name is missing. Section name should be 'key=value'\n", 369 config_str); 370 err = -BPF_LOADER_ERRNO__EVENTNAME; 371 goto errout; 372 } 373 pr_debug("bpf: config '%s' is ok\n", config_str); 374 375set_priv: 376 err = bpf_program__set_priv(prog, priv, clear_prog_priv); 377 if (err) { 378 pr_debug("Failed to set priv for program '%s'\n", config_str); 379 goto errout; 380 } 381 382 return 0; 383 384errout: 385 if (pev) 386 clear_perf_probe_event(pev); 387 free(priv); 388 return err; 389} 390 391static int bpf__prepare_probe(void) 392{ 393 static int err = 0; 394 static bool initialized = false; 395 396 /* 397 * Make err static, so if init failed the first, bpf__prepare_probe() 398 * fails each time without calling init_probe_symbol_maps multiple 399 * times. 400 */ 401 if (initialized) 402 return err; 403 404 initialized = true; 405 err = init_probe_symbol_maps(false); 406 if (err < 0) 407 pr_debug("Failed to init_probe_symbol_maps\n"); 408 probe_conf.max_probes = MAX_PROBES; 409 return err; 410} 411 412static int 413preproc_gen_prologue(struct bpf_program *prog, int n, 414 struct bpf_insn *orig_insns, int orig_insns_cnt, 415 struct bpf_prog_prep_result *res) 416{ 417 struct bpf_prog_priv *priv = bpf_program__priv(prog); 418 struct probe_trace_event *tev; 419 struct perf_probe_event *pev; 420 struct bpf_insn *buf; 421 size_t prologue_cnt = 0; 422 int i, err; 423 424 if (IS_ERR(priv) || !priv || priv->is_tp) 425 goto errout; 426 427 pev = &priv->pev; 428 429 if (n < 0 || n >= priv->nr_types) 430 goto errout; 431 432 /* Find a tev belongs to that type */ 433 for (i = 0; i < pev->ntevs; i++) { 434 if (priv->type_mapping[i] == n) 435 break; 436 } 437 438 if (i >= pev->ntevs) { 439 pr_debug("Internal error: prologue type %d not found\n", n); 440 return -BPF_LOADER_ERRNO__PROLOGUE; 441 } 442 443 tev = &pev->tevs[i]; 444 445 buf = priv->insns_buf; 446 err = bpf__gen_prologue(tev->args, tev->nargs, 447 buf, &prologue_cnt, 448 BPF_MAXINSNS - orig_insns_cnt); 449 if (err) { 450 const char *title; 451 452 title = bpf_program__section_name(prog); 453 pr_debug("Failed to generate prologue for program %s\n", 454 title); 455 return err; 456 } 457 458 memcpy(&buf[prologue_cnt], orig_insns, 459 sizeof(struct bpf_insn) * orig_insns_cnt); 460 461 res->new_insn_ptr = buf; 462 res->new_insn_cnt = prologue_cnt + orig_insns_cnt; 463 res->pfd = NULL; 464 return 0; 465 466errout: 467 pr_debug("Internal error in preproc_gen_prologue\n"); 468 return -BPF_LOADER_ERRNO__PROLOGUE; 469} 470 471/* 472 * compare_tev_args is reflexive, transitive and antisymmetric. 473 * I can proof it but this margin is too narrow to contain. 474 */ 475static int compare_tev_args(const void *ptev1, const void *ptev2) 476{ 477 int i, ret; 478 const struct probe_trace_event *tev1 = 479 *(const struct probe_trace_event **)ptev1; 480 const struct probe_trace_event *tev2 = 481 *(const struct probe_trace_event **)ptev2; 482 483 ret = tev2->nargs - tev1->nargs; 484 if (ret) 485 return ret; 486 487 for (i = 0; i < tev1->nargs; i++) { 488 struct probe_trace_arg *arg1, *arg2; 489 struct probe_trace_arg_ref *ref1, *ref2; 490 491 arg1 = &tev1->args[i]; 492 arg2 = &tev2->args[i]; 493 494 ret = strcmp(arg1->value, arg2->value); 495 if (ret) 496 return ret; 497 498 ref1 = arg1->ref; 499 ref2 = arg2->ref; 500 501 while (ref1 && ref2) { 502 ret = ref2->offset - ref1->offset; 503 if (ret) 504 return ret; 505 506 ref1 = ref1->next; 507 ref2 = ref2->next; 508 } 509 510 if (ref1 || ref2) 511 return ref2 ? 1 : -1; 512 } 513 514 return 0; 515} 516 517/* 518 * Assign a type number to each tevs in a pev. 519 * mapping is an array with same slots as tevs in that pev. 520 * nr_types will be set to number of types. 521 */ 522static int map_prologue(struct perf_probe_event *pev, int *mapping, 523 int *nr_types) 524{ 525 int i, type = 0; 526 struct probe_trace_event **ptevs; 527 528 size_t array_sz = sizeof(*ptevs) * pev->ntevs; 529 530 ptevs = malloc(array_sz); 531 if (!ptevs) { 532 pr_debug("Not enough memory: alloc ptevs failed\n"); 533 return -ENOMEM; 534 } 535 536 pr_debug("In map_prologue, ntevs=%d\n", pev->ntevs); 537 for (i = 0; i < pev->ntevs; i++) 538 ptevs[i] = &pev->tevs[i]; 539 540 qsort(ptevs, pev->ntevs, sizeof(*ptevs), 541 compare_tev_args); 542 543 for (i = 0; i < pev->ntevs; i++) { 544 int n; 545 546 n = ptevs[i] - pev->tevs; 547 if (i == 0) { 548 mapping[n] = type; 549 pr_debug("mapping[%d]=%d\n", n, type); 550 continue; 551 } 552 553 if (compare_tev_args(ptevs + i, ptevs + i - 1) == 0) 554 mapping[n] = type; 555 else 556 mapping[n] = ++type; 557 558 pr_debug("mapping[%d]=%d\n", n, mapping[n]); 559 } 560 free(ptevs); 561 *nr_types = type + 1; 562 563 return 0; 564} 565 566static int hook_load_preprocessor(struct bpf_program *prog) 567{ 568 struct bpf_prog_priv *priv = bpf_program__priv(prog); 569 struct perf_probe_event *pev; 570 bool need_prologue = false; 571 int err, i; 572 573 if (IS_ERR(priv) || !priv) { 574 pr_debug("Internal error when hook preprocessor\n"); 575 return -BPF_LOADER_ERRNO__INTERNAL; 576 } 577 578 if (priv->is_tp) { 579 priv->need_prologue = false; 580 return 0; 581 } 582 583 pev = &priv->pev; 584 for (i = 0; i < pev->ntevs; i++) { 585 struct probe_trace_event *tev = &pev->tevs[i]; 586 587 if (tev->nargs > 0) { 588 need_prologue = true; 589 break; 590 } 591 } 592 593 /* 594 * Since all tevs don't have argument, we don't need generate 595 * prologue. 596 */ 597 if (!need_prologue) { 598 priv->need_prologue = false; 599 return 0; 600 } 601 602 priv->need_prologue = true; 603 priv->insns_buf = malloc(sizeof(struct bpf_insn) * BPF_MAXINSNS); 604 if (!priv->insns_buf) { 605 pr_debug("Not enough memory: alloc insns_buf failed\n"); 606 return -ENOMEM; 607 } 608 609 priv->type_mapping = malloc(sizeof(int) * pev->ntevs); 610 if (!priv->type_mapping) { 611 pr_debug("Not enough memory: alloc type_mapping failed\n"); 612 return -ENOMEM; 613 } 614 memset(priv->type_mapping, -1, 615 sizeof(int) * pev->ntevs); 616 617 err = map_prologue(pev, priv->type_mapping, &priv->nr_types); 618 if (err) 619 return err; 620 621 err = bpf_program__set_prep(prog, priv->nr_types, 622 preproc_gen_prologue); 623 return err; 624} 625 626int bpf__probe(struct bpf_object *obj) 627{ 628 int err = 0; 629 struct bpf_program *prog; 630 struct bpf_prog_priv *priv; 631 struct perf_probe_event *pev; 632 633 err = bpf__prepare_probe(); 634 if (err) { 635 pr_debug("bpf__prepare_probe failed\n"); 636 return err; 637 } 638 639 bpf_object__for_each_program(prog, obj) { 640 err = config_bpf_program(prog); 641 if (err) 642 goto out; 643 644 priv = bpf_program__priv(prog); 645 if (IS_ERR(priv) || !priv) { 646 err = PTR_ERR(priv); 647 goto out; 648 } 649 650 if (priv->is_tp) { 651 bpf_program__set_tracepoint(prog); 652 continue; 653 } 654 655 bpf_program__set_kprobe(prog); 656 pev = &priv->pev; 657 658 err = convert_perf_probe_events(pev, 1); 659 if (err < 0) { 660 pr_debug("bpf_probe: failed to convert perf probe events\n"); 661 goto out; 662 } 663 664 err = apply_perf_probe_events(pev, 1); 665 if (err < 0) { 666 pr_debug("bpf_probe: failed to apply perf probe events\n"); 667 goto out; 668 } 669 670 /* 671 * After probing, let's consider prologue, which 672 * adds program fetcher to BPF programs. 673 * 674 * hook_load_preprocessorr() hooks pre-processor 675 * to bpf_program, let it generate prologue 676 * dynamically during loading. 677 */ 678 err = hook_load_preprocessor(prog); 679 if (err) 680 goto out; 681 } 682out: 683 return err < 0 ? err : 0; 684} 685 686#define EVENTS_WRITE_BUFSIZE 4096 687int bpf__unprobe(struct bpf_object *obj) 688{ 689 int err, ret = 0; 690 struct bpf_program *prog; 691 692 bpf_object__for_each_program(prog, obj) { 693 struct bpf_prog_priv *priv = bpf_program__priv(prog); 694 int i; 695 696 if (IS_ERR(priv) || !priv || priv->is_tp) 697 continue; 698 699 for (i = 0; i < priv->pev.ntevs; i++) { 700 struct probe_trace_event *tev = &priv->pev.tevs[i]; 701 char name_buf[EVENTS_WRITE_BUFSIZE]; 702 struct strfilter *delfilter; 703 704 snprintf(name_buf, EVENTS_WRITE_BUFSIZE, 705 "%s:%s", tev->group, tev->event); 706 name_buf[EVENTS_WRITE_BUFSIZE - 1] = '\0'; 707 708 delfilter = strfilter__new(name_buf, NULL); 709 if (!delfilter) { 710 pr_debug("Failed to create filter for unprobing\n"); 711 ret = -ENOMEM; 712 continue; 713 } 714 715 err = del_perf_probe_events(delfilter); 716 strfilter__delete(delfilter); 717 if (err) { 718 pr_debug("Failed to delete %s\n", name_buf); 719 ret = err; 720 continue; 721 } 722 } 723 } 724 return ret; 725} 726 727int bpf__load(struct bpf_object *obj) 728{ 729 int err; 730 731 err = bpf_object__load(obj); 732 if (err) { 733 char bf[128]; 734 libbpf_strerror(err, bf, sizeof(bf)); 735 pr_debug("bpf: load objects failed: err=%d: (%s)\n", err, bf); 736 return err; 737 } 738 return 0; 739} 740 741int bpf__foreach_event(struct bpf_object *obj, 742 bpf_prog_iter_callback_t func, 743 void *arg) 744{ 745 struct bpf_program *prog; 746 int err; 747 748 bpf_object__for_each_program(prog, obj) { 749 struct bpf_prog_priv *priv = bpf_program__priv(prog); 750 struct probe_trace_event *tev; 751 struct perf_probe_event *pev; 752 int i, fd; 753 754 if (IS_ERR(priv) || !priv) { 755 pr_debug("bpf: failed to get private field\n"); 756 return -BPF_LOADER_ERRNO__INTERNAL; 757 } 758 759 if (priv->is_tp) { 760 fd = bpf_program__fd(prog); 761 err = (*func)(priv->sys_name, priv->evt_name, fd, obj, arg); 762 if (err) { 763 pr_debug("bpf: tracepoint call back failed, stop iterate\n"); 764 return err; 765 } 766 continue; 767 } 768 769 pev = &priv->pev; 770 for (i = 0; i < pev->ntevs; i++) { 771 tev = &pev->tevs[i]; 772 773 if (priv->need_prologue) { 774 int type = priv->type_mapping[i]; 775 776 fd = bpf_program__nth_fd(prog, type); 777 } else { 778 fd = bpf_program__fd(prog); 779 } 780 781 if (fd < 0) { 782 pr_debug("bpf: failed to get file descriptor\n"); 783 return fd; 784 } 785 786 err = (*func)(tev->group, tev->event, fd, obj, arg); 787 if (err) { 788 pr_debug("bpf: call back failed, stop iterate\n"); 789 return err; 790 } 791 } 792 } 793 return 0; 794} 795 796enum bpf_map_op_type { 797 BPF_MAP_OP_SET_VALUE, 798 BPF_MAP_OP_SET_EVSEL, 799}; 800 801enum bpf_map_key_type { 802 BPF_MAP_KEY_ALL, 803 BPF_MAP_KEY_RANGES, 804}; 805 806struct bpf_map_op { 807 struct list_head list; 808 enum bpf_map_op_type op_type; 809 enum bpf_map_key_type key_type; 810 union { 811 struct parse_events_array array; 812 } k; 813 union { 814 u64 value; 815 struct evsel *evsel; 816 } v; 817}; 818 819struct bpf_map_priv { 820 struct list_head ops_list; 821}; 822 823static void 824bpf_map_op__delete(struct bpf_map_op *op) 825{ 826 if (!list_empty(&op->list)) 827 list_del_init(&op->list); 828 if (op->key_type == BPF_MAP_KEY_RANGES) 829 parse_events__clear_array(&op->k.array); 830 free(op); 831} 832 833static void 834bpf_map_priv__purge(struct bpf_map_priv *priv) 835{ 836 struct bpf_map_op *pos, *n; 837 838 list_for_each_entry_safe(pos, n, &priv->ops_list, list) { 839 list_del_init(&pos->list); 840 bpf_map_op__delete(pos); 841 } 842} 843 844static void 845bpf_map_priv__clear(struct bpf_map *map __maybe_unused, 846 void *_priv) 847{ 848 struct bpf_map_priv *priv = _priv; 849 850 bpf_map_priv__purge(priv); 851 free(priv); 852} 853 854static int 855bpf_map_op_setkey(struct bpf_map_op *op, struct parse_events_term *term) 856{ 857 op->key_type = BPF_MAP_KEY_ALL; 858 if (!term) 859 return 0; 860 861 if (term->array.nr_ranges) { 862 size_t memsz = term->array.nr_ranges * 863 sizeof(op->k.array.ranges[0]); 864 865 op->k.array.ranges = memdup(term->array.ranges, memsz); 866 if (!op->k.array.ranges) { 867 pr_debug("Not enough memory to alloc indices for map\n"); 868 return -ENOMEM; 869 } 870 op->key_type = BPF_MAP_KEY_RANGES; 871 op->k.array.nr_ranges = term->array.nr_ranges; 872 } 873 return 0; 874} 875 876static struct bpf_map_op * 877bpf_map_op__new(struct parse_events_term *term) 878{ 879 struct bpf_map_op *op; 880 int err; 881 882 op = zalloc(sizeof(*op)); 883 if (!op) { 884 pr_debug("Failed to alloc bpf_map_op\n"); 885 return ERR_PTR(-ENOMEM); 886 } 887 INIT_LIST_HEAD(&op->list); 888 889 err = bpf_map_op_setkey(op, term); 890 if (err) { 891 free(op); 892 return ERR_PTR(err); 893 } 894 return op; 895} 896 897static struct bpf_map_op * 898bpf_map_op__clone(struct bpf_map_op *op) 899{ 900 struct bpf_map_op *newop; 901 902 newop = memdup(op, sizeof(*op)); 903 if (!newop) { 904 pr_debug("Failed to alloc bpf_map_op\n"); 905 return NULL; 906 } 907 908 INIT_LIST_HEAD(&newop->list); 909 if (op->key_type == BPF_MAP_KEY_RANGES) { 910 size_t memsz = op->k.array.nr_ranges * 911 sizeof(op->k.array.ranges[0]); 912 913 newop->k.array.ranges = memdup(op->k.array.ranges, memsz); 914 if (!newop->k.array.ranges) { 915 pr_debug("Failed to alloc indices for map\n"); 916 free(newop); 917 return NULL; 918 } 919 } 920 921 return newop; 922} 923 924static struct bpf_map_priv * 925bpf_map_priv__clone(struct bpf_map_priv *priv) 926{ 927 struct bpf_map_priv *newpriv; 928 struct bpf_map_op *pos, *newop; 929 930 newpriv = zalloc(sizeof(*newpriv)); 931 if (!newpriv) { 932 pr_debug("Not enough memory to alloc map private\n"); 933 return NULL; 934 } 935 INIT_LIST_HEAD(&newpriv->ops_list); 936 937 list_for_each_entry(pos, &priv->ops_list, list) { 938 newop = bpf_map_op__clone(pos); 939 if (!newop) { 940 bpf_map_priv__purge(newpriv); 941 return NULL; 942 } 943 list_add_tail(&newop->list, &newpriv->ops_list); 944 } 945 946 return newpriv; 947} 948 949static int 950bpf_map__add_op(struct bpf_map *map, struct bpf_map_op *op) 951{ 952 const char *map_name = bpf_map__name(map); 953 struct bpf_map_priv *priv = bpf_map__priv(map); 954 955 if (IS_ERR(priv)) { 956 pr_debug("Failed to get private from map %s\n", map_name); 957 return PTR_ERR(priv); 958 } 959 960 if (!priv) { 961 priv = zalloc(sizeof(*priv)); 962 if (!priv) { 963 pr_debug("Not enough memory to alloc map private\n"); 964 return -ENOMEM; 965 } 966 INIT_LIST_HEAD(&priv->ops_list); 967 968 if (bpf_map__set_priv(map, priv, bpf_map_priv__clear)) { 969 free(priv); 970 return -BPF_LOADER_ERRNO__INTERNAL; 971 } 972 } 973 974 list_add_tail(&op->list, &priv->ops_list); 975 return 0; 976} 977 978static struct bpf_map_op * 979bpf_map__add_newop(struct bpf_map *map, struct parse_events_term *term) 980{ 981 struct bpf_map_op *op; 982 int err; 983 984 op = bpf_map_op__new(term); 985 if (IS_ERR(op)) 986 return op; 987 988 err = bpf_map__add_op(map, op); 989 if (err) { 990 bpf_map_op__delete(op); 991 return ERR_PTR(err); 992 } 993 return op; 994} 995 996static int 997__bpf_map__config_value(struct bpf_map *map, 998 struct parse_events_term *term) 999{ 1000 struct bpf_map_op *op; 1001 const char *map_name = bpf_map__name(map); 1002 const struct bpf_map_def *def = bpf_map__def(map); 1003 1004 if (IS_ERR(def)) { 1005 pr_debug("Unable to get map definition from '%s'\n", 1006 map_name); 1007 return -BPF_LOADER_ERRNO__INTERNAL; 1008 } 1009 1010 if (def->type != BPF_MAP_TYPE_ARRAY) { 1011 pr_debug("Map %s type is not BPF_MAP_TYPE_ARRAY\n", 1012 map_name); 1013 return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE; 1014 } 1015 if (def->key_size < sizeof(unsigned int)) { 1016 pr_debug("Map %s has incorrect key size\n", map_name); 1017 return -BPF_LOADER_ERRNO__OBJCONF_MAP_KEYSIZE; 1018 } 1019 switch (def->value_size) { 1020 case 1: 1021 case 2: 1022 case 4: 1023 case 8: 1024 break; 1025 default: 1026 pr_debug("Map %s has incorrect value size\n", map_name); 1027 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE; 1028 } 1029 1030 op = bpf_map__add_newop(map, term); 1031 if (IS_ERR(op)) 1032 return PTR_ERR(op); 1033 op->op_type = BPF_MAP_OP_SET_VALUE; 1034 op->v.value = term->val.num; 1035 return 0; 1036} 1037 1038static int 1039bpf_map__config_value(struct bpf_map *map, 1040 struct parse_events_term *term, 1041 struct evlist *evlist __maybe_unused) 1042{ 1043 if (!term->err_val) { 1044 pr_debug("Config value not set\n"); 1045 return -BPF_LOADER_ERRNO__OBJCONF_CONF; 1046 } 1047 1048 if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM) { 1049 pr_debug("ERROR: wrong value type for 'value'\n"); 1050 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE; 1051 } 1052 1053 return __bpf_map__config_value(map, term); 1054} 1055 1056static int 1057__bpf_map__config_event(struct bpf_map *map, 1058 struct parse_events_term *term, 1059 struct evlist *evlist) 1060{ 1061 const struct bpf_map_def *def; 1062 struct bpf_map_op *op; 1063 const char *map_name = bpf_map__name(map); 1064 struct evsel *evsel = evlist__find_evsel_by_str(evlist, term->val.str); 1065 1066 if (!evsel) { 1067 pr_debug("Event (for '%s') '%s' doesn't exist\n", 1068 map_name, term->val.str); 1069 return -BPF_LOADER_ERRNO__OBJCONF_MAP_NOEVT; 1070 } 1071 1072 def = bpf_map__def(map); 1073 if (IS_ERR(def)) { 1074 pr_debug("Unable to get map definition from '%s'\n", 1075 map_name); 1076 return PTR_ERR(def); 1077 } 1078 1079 /* 1080 * No need to check key_size and value_size: 1081 * kernel has already checked them. 1082 */ 1083 if (def->type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) { 1084 pr_debug("Map %s type is not BPF_MAP_TYPE_PERF_EVENT_ARRAY\n", 1085 map_name); 1086 return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE; 1087 } 1088 1089 op = bpf_map__add_newop(map, term); 1090 if (IS_ERR(op)) 1091 return PTR_ERR(op); 1092 op->op_type = BPF_MAP_OP_SET_EVSEL; 1093 op->v.evsel = evsel; 1094 return 0; 1095} 1096 1097static int 1098bpf_map__config_event(struct bpf_map *map, 1099 struct parse_events_term *term, 1100 struct evlist *evlist) 1101{ 1102 if (!term->err_val) { 1103 pr_debug("Config value not set\n"); 1104 return -BPF_LOADER_ERRNO__OBJCONF_CONF; 1105 } 1106 1107 if (term->type_val != PARSE_EVENTS__TERM_TYPE_STR) { 1108 pr_debug("ERROR: wrong value type for 'event'\n"); 1109 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE; 1110 } 1111 1112 return __bpf_map__config_event(map, term, evlist); 1113} 1114 1115struct bpf_obj_config__map_func { 1116 const char *config_opt; 1117 int (*config_func)(struct bpf_map *, struct parse_events_term *, 1118 struct evlist *); 1119}; 1120 1121struct bpf_obj_config__map_func bpf_obj_config__map_funcs[] = { 1122 {"value", bpf_map__config_value}, 1123 {"event", bpf_map__config_event}, 1124}; 1125 1126static int 1127config_map_indices_range_check(struct parse_events_term *term, 1128 struct bpf_map *map, 1129 const char *map_name) 1130{ 1131 struct parse_events_array *array = &term->array; 1132 const struct bpf_map_def *def; 1133 unsigned int i; 1134 1135 if (!array->nr_ranges) 1136 return 0; 1137 if (!array->ranges) { 1138 pr_debug("ERROR: map %s: array->nr_ranges is %d but range array is NULL\n", 1139 map_name, (int)array->nr_ranges); 1140 return -BPF_LOADER_ERRNO__INTERNAL; 1141 } 1142 1143 def = bpf_map__def(map); 1144 if (IS_ERR(def)) { 1145 pr_debug("ERROR: Unable to get map definition from '%s'\n", 1146 map_name); 1147 return -BPF_LOADER_ERRNO__INTERNAL; 1148 } 1149 1150 for (i = 0; i < array->nr_ranges; i++) { 1151 unsigned int start = array->ranges[i].start; 1152 size_t length = array->ranges[i].length; 1153 unsigned int idx = start + length - 1; 1154 1155 if (idx >= def->max_entries) { 1156 pr_debug("ERROR: index %d too large\n", idx); 1157 return -BPF_LOADER_ERRNO__OBJCONF_MAP_IDX2BIG; 1158 } 1159 } 1160 return 0; 1161} 1162 1163static int 1164bpf__obj_config_map(struct bpf_object *obj, 1165 struct parse_events_term *term, 1166 struct evlist *evlist, 1167 int *key_scan_pos) 1168{ 1169 /* key is "map:<mapname>.<config opt>" */ 1170 char *map_name = strdup(term->config + sizeof("map:") - 1); 1171 struct bpf_map *map; 1172 int err = -BPF_LOADER_ERRNO__OBJCONF_OPT; 1173 char *map_opt; 1174 size_t i; 1175 1176 if (!map_name) 1177 return -ENOMEM; 1178 1179 map_opt = strchr(map_name, '.'); 1180 if (!map_opt) { 1181 pr_debug("ERROR: Invalid map config: %s\n", map_name); 1182 goto out; 1183 } 1184 1185 *map_opt++ = '\0'; 1186 if (*map_opt == '\0') { 1187 pr_debug("ERROR: Invalid map option: %s\n", term->config); 1188 goto out; 1189 } 1190 1191 map = bpf_object__find_map_by_name(obj, map_name); 1192 if (!map) { 1193 pr_debug("ERROR: Map %s doesn't exist\n", map_name); 1194 err = -BPF_LOADER_ERRNO__OBJCONF_MAP_NOTEXIST; 1195 goto out; 1196 } 1197 1198 *key_scan_pos += strlen(map_opt); 1199 err = config_map_indices_range_check(term, map, map_name); 1200 if (err) 1201 goto out; 1202 *key_scan_pos -= strlen(map_opt); 1203 1204 for (i = 0; i < ARRAY_SIZE(bpf_obj_config__map_funcs); i++) { 1205 struct bpf_obj_config__map_func *func = 1206 &bpf_obj_config__map_funcs[i]; 1207 1208 if (strcmp(map_opt, func->config_opt) == 0) { 1209 err = func->config_func(map, term, evlist); 1210 goto out; 1211 } 1212 } 1213 1214 pr_debug("ERROR: Invalid map config option '%s'\n", map_opt); 1215 err = -BPF_LOADER_ERRNO__OBJCONF_MAP_OPT; 1216out: 1217 free(map_name); 1218 if (!err) 1219 *key_scan_pos += strlen(map_opt); 1220 return err; 1221} 1222 1223int bpf__config_obj(struct bpf_object *obj, 1224 struct parse_events_term *term, 1225 struct evlist *evlist, 1226 int *error_pos) 1227{ 1228 int key_scan_pos = 0; 1229 int err; 1230 1231 if (!obj || !term || !term->config) 1232 return -EINVAL; 1233 1234 if (strstarts(term->config, "map:")) { 1235 key_scan_pos = sizeof("map:") - 1; 1236 err = bpf__obj_config_map(obj, term, evlist, &key_scan_pos); 1237 goto out; 1238 } 1239 err = -BPF_LOADER_ERRNO__OBJCONF_OPT; 1240out: 1241 if (error_pos) 1242 *error_pos = key_scan_pos; 1243 return err; 1244 1245} 1246 1247typedef int (*map_config_func_t)(const char *name, int map_fd, 1248 const struct bpf_map_def *pdef, 1249 struct bpf_map_op *op, 1250 void *pkey, void *arg); 1251 1252static int 1253foreach_key_array_all(map_config_func_t func, 1254 void *arg, const char *name, 1255 int map_fd, const struct bpf_map_def *pdef, 1256 struct bpf_map_op *op) 1257{ 1258 unsigned int i; 1259 int err; 1260 1261 for (i = 0; i < pdef->max_entries; i++) { 1262 err = func(name, map_fd, pdef, op, &i, arg); 1263 if (err) { 1264 pr_debug("ERROR: failed to insert value to %s[%u]\n", 1265 name, i); 1266 return err; 1267 } 1268 } 1269 return 0; 1270} 1271 1272static int 1273foreach_key_array_ranges(map_config_func_t func, void *arg, 1274 const char *name, int map_fd, 1275 const struct bpf_map_def *pdef, 1276 struct bpf_map_op *op) 1277{ 1278 unsigned int i, j; 1279 int err; 1280 1281 for (i = 0; i < op->k.array.nr_ranges; i++) { 1282 unsigned int start = op->k.array.ranges[i].start; 1283 size_t length = op->k.array.ranges[i].length; 1284 1285 for (j = 0; j < length; j++) { 1286 unsigned int idx = start + j; 1287 1288 err = func(name, map_fd, pdef, op, &idx, arg); 1289 if (err) { 1290 pr_debug("ERROR: failed to insert value to %s[%u]\n", 1291 name, idx); 1292 return err; 1293 } 1294 } 1295 } 1296 return 0; 1297} 1298 1299static int 1300bpf_map_config_foreach_key(struct bpf_map *map, 1301 map_config_func_t func, 1302 void *arg) 1303{ 1304 int err, map_fd; 1305 struct bpf_map_op *op; 1306 const struct bpf_map_def *def; 1307 const char *name = bpf_map__name(map); 1308 struct bpf_map_priv *priv = bpf_map__priv(map); 1309 1310 if (IS_ERR(priv)) { 1311 pr_debug("ERROR: failed to get private from map %s\n", name); 1312 return -BPF_LOADER_ERRNO__INTERNAL; 1313 } 1314 if (!priv || list_empty(&priv->ops_list)) { 1315 pr_debug("INFO: nothing to config for map %s\n", name); 1316 return 0; 1317 } 1318 1319 def = bpf_map__def(map); 1320 if (IS_ERR(def)) { 1321 pr_debug("ERROR: failed to get definition from map %s\n", name); 1322 return -BPF_LOADER_ERRNO__INTERNAL; 1323 } 1324 map_fd = bpf_map__fd(map); 1325 if (map_fd < 0) { 1326 pr_debug("ERROR: failed to get fd from map %s\n", name); 1327 return map_fd; 1328 } 1329 1330 list_for_each_entry(op, &priv->ops_list, list) { 1331 switch (def->type) { 1332 case BPF_MAP_TYPE_ARRAY: 1333 case BPF_MAP_TYPE_PERF_EVENT_ARRAY: 1334 switch (op->key_type) { 1335 case BPF_MAP_KEY_ALL: 1336 err = foreach_key_array_all(func, arg, name, 1337 map_fd, def, op); 1338 break; 1339 case BPF_MAP_KEY_RANGES: 1340 err = foreach_key_array_ranges(func, arg, name, 1341 map_fd, def, 1342 op); 1343 break; 1344 default: 1345 pr_debug("ERROR: keytype for map '%s' invalid\n", 1346 name); 1347 return -BPF_LOADER_ERRNO__INTERNAL; 1348 } 1349 if (err) 1350 return err; 1351 break; 1352 default: 1353 pr_debug("ERROR: type of '%s' incorrect\n", name); 1354 return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE; 1355 } 1356 } 1357 1358 return 0; 1359} 1360 1361static int 1362apply_config_value_for_key(int map_fd, void *pkey, 1363 size_t val_size, u64 val) 1364{ 1365 int err = 0; 1366 1367 switch (val_size) { 1368 case 1: { 1369 u8 _val = (u8)(val); 1370 err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY); 1371 break; 1372 } 1373 case 2: { 1374 u16 _val = (u16)(val); 1375 err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY); 1376 break; 1377 } 1378 case 4: { 1379 u32 _val = (u32)(val); 1380 err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY); 1381 break; 1382 } 1383 case 8: { 1384 err = bpf_map_update_elem(map_fd, pkey, &val, BPF_ANY); 1385 break; 1386 } 1387 default: 1388 pr_debug("ERROR: invalid value size\n"); 1389 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE; 1390 } 1391 if (err && errno) 1392 err = -errno; 1393 return err; 1394} 1395 1396static int 1397apply_config_evsel_for_key(const char *name, int map_fd, void *pkey, 1398 struct evsel *evsel) 1399{ 1400 struct xyarray *xy = evsel->core.fd; 1401 struct perf_event_attr *attr; 1402 unsigned int key, events; 1403 bool check_pass = false; 1404 int *evt_fd; 1405 int err; 1406 1407 if (!xy) { 1408 pr_debug("ERROR: evsel not ready for map %s\n", name); 1409 return -BPF_LOADER_ERRNO__INTERNAL; 1410 } 1411 1412 if (xy->row_size / xy->entry_size != 1) { 1413 pr_debug("ERROR: Dimension of target event is incorrect for map %s\n", 1414 name); 1415 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM; 1416 } 1417 1418 attr = &evsel->core.attr; 1419 if (attr->inherit) { 1420 pr_debug("ERROR: Can't put inherit event into map %s\n", name); 1421 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH; 1422 } 1423 1424 if (evsel__is_bpf_output(evsel)) 1425 check_pass = true; 1426 if (attr->type == PERF_TYPE_RAW) 1427 check_pass = true; 1428 if (attr->type == PERF_TYPE_HARDWARE) 1429 check_pass = true; 1430 if (!check_pass) { 1431 pr_debug("ERROR: Event type is wrong for map %s\n", name); 1432 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE; 1433 } 1434 1435 events = xy->entries / (xy->row_size / xy->entry_size); 1436 key = *((unsigned int *)pkey); 1437 if (key >= events) { 1438 pr_debug("ERROR: there is no event %d for map %s\n", 1439 key, name); 1440 return -BPF_LOADER_ERRNO__OBJCONF_MAP_MAPSIZE; 1441 } 1442 evt_fd = xyarray__entry(xy, key, 0); 1443 err = bpf_map_update_elem(map_fd, pkey, evt_fd, BPF_ANY); 1444 if (err && errno) 1445 err = -errno; 1446 return err; 1447} 1448 1449static int 1450apply_obj_config_map_for_key(const char *name, int map_fd, 1451 const struct bpf_map_def *pdef, 1452 struct bpf_map_op *op, 1453 void *pkey, void *arg __maybe_unused) 1454{ 1455 int err; 1456 1457 switch (op->op_type) { 1458 case BPF_MAP_OP_SET_VALUE: 1459 err = apply_config_value_for_key(map_fd, pkey, 1460 pdef->value_size, 1461 op->v.value); 1462 break; 1463 case BPF_MAP_OP_SET_EVSEL: 1464 err = apply_config_evsel_for_key(name, map_fd, pkey, 1465 op->v.evsel); 1466 break; 1467 default: 1468 pr_debug("ERROR: unknown value type for '%s'\n", name); 1469 err = -BPF_LOADER_ERRNO__INTERNAL; 1470 } 1471 return err; 1472} 1473 1474static int 1475apply_obj_config_map(struct bpf_map *map) 1476{ 1477 return bpf_map_config_foreach_key(map, 1478 apply_obj_config_map_for_key, 1479 NULL); 1480} 1481 1482static int 1483apply_obj_config_object(struct bpf_object *obj) 1484{ 1485 struct bpf_map *map; 1486 int err; 1487 1488 bpf_object__for_each_map(map, obj) { 1489 err = apply_obj_config_map(map); 1490 if (err) 1491 return err; 1492 } 1493 return 0; 1494} 1495 1496int bpf__apply_obj_config(void) 1497{ 1498 struct bpf_object *obj, *tmp; 1499 int err; 1500 1501 bpf_object__for_each_safe(obj, tmp) { 1502 err = apply_obj_config_object(obj); 1503 if (err) 1504 return err; 1505 } 1506 1507 return 0; 1508} 1509 1510#define bpf__for_each_map(pos, obj, objtmp) \ 1511 bpf_object__for_each_safe(obj, objtmp) \ 1512 bpf_object__for_each_map(pos, obj) 1513 1514#define bpf__for_each_map_named(pos, obj, objtmp, name) \ 1515 bpf__for_each_map(pos, obj, objtmp) \ 1516 if (bpf_map__name(pos) && \ 1517 (strcmp(name, \ 1518 bpf_map__name(pos)) == 0)) 1519 1520struct evsel *bpf__setup_output_event(struct evlist *evlist, const char *name) 1521{ 1522 struct bpf_map_priv *tmpl_priv = NULL; 1523 struct bpf_object *obj, *tmp; 1524 struct evsel *evsel = NULL; 1525 struct bpf_map *map; 1526 int err; 1527 bool need_init = false; 1528 1529 bpf__for_each_map_named(map, obj, tmp, name) { 1530 struct bpf_map_priv *priv = bpf_map__priv(map); 1531 1532 if (IS_ERR(priv)) 1533 return ERR_PTR(-BPF_LOADER_ERRNO__INTERNAL); 1534 1535 /* 1536 * No need to check map type: type should have been 1537 * verified by kernel. 1538 */ 1539 if (!need_init && !priv) 1540 need_init = !priv; 1541 if (!tmpl_priv && priv) 1542 tmpl_priv = priv; 1543 } 1544 1545 if (!need_init) 1546 return NULL; 1547 1548 if (!tmpl_priv) { 1549 char *event_definition = NULL; 1550 1551 if (asprintf(&event_definition, "bpf-output/no-inherit=1,name=%s/", name) < 0) 1552 return ERR_PTR(-ENOMEM); 1553 1554 err = parse_events(evlist, event_definition, NULL); 1555 free(event_definition); 1556 1557 if (err) { 1558 pr_debug("ERROR: failed to create the \"%s\" bpf-output event\n", name); 1559 return ERR_PTR(-err); 1560 } 1561 1562 evsel = evlist__last(evlist); 1563 } 1564 1565 bpf__for_each_map_named(map, obj, tmp, name) { 1566 struct bpf_map_priv *priv = bpf_map__priv(map); 1567 1568 if (IS_ERR(priv)) 1569 return ERR_PTR(-BPF_LOADER_ERRNO__INTERNAL); 1570 if (priv) 1571 continue; 1572 1573 if (tmpl_priv) { 1574 priv = bpf_map_priv__clone(tmpl_priv); 1575 if (!priv) 1576 return ERR_PTR(-ENOMEM); 1577 1578 err = bpf_map__set_priv(map, priv, bpf_map_priv__clear); 1579 if (err) { 1580 bpf_map_priv__clear(map, priv); 1581 return ERR_PTR(err); 1582 } 1583 } else if (evsel) { 1584 struct bpf_map_op *op; 1585 1586 op = bpf_map__add_newop(map, NULL); 1587 if (IS_ERR(op)) 1588 return ERR_CAST(op); 1589 op->op_type = BPF_MAP_OP_SET_EVSEL; 1590 op->v.evsel = evsel; 1591 } 1592 } 1593 1594 return evsel; 1595} 1596 1597int bpf__setup_stdout(struct evlist *evlist) 1598{ 1599 struct evsel *evsel = bpf__setup_output_event(evlist, "__bpf_stdout__"); 1600 return PTR_ERR_OR_ZERO(evsel); 1601} 1602 1603#define ERRNO_OFFSET(e) ((e) - __BPF_LOADER_ERRNO__START) 1604#define ERRCODE_OFFSET(c) ERRNO_OFFSET(BPF_LOADER_ERRNO__##c) 1605#define NR_ERRNO (__BPF_LOADER_ERRNO__END - __BPF_LOADER_ERRNO__START) 1606 1607static const char *bpf_loader_strerror_table[NR_ERRNO] = { 1608 [ERRCODE_OFFSET(CONFIG)] = "Invalid config string", 1609 [ERRCODE_OFFSET(GROUP)] = "Invalid group name", 1610 [ERRCODE_OFFSET(EVENTNAME)] = "No event name found in config string", 1611 [ERRCODE_OFFSET(INTERNAL)] = "BPF loader internal error", 1612 [ERRCODE_OFFSET(COMPILE)] = "Error when compiling BPF scriptlet", 1613 [ERRCODE_OFFSET(PROGCONF_TERM)] = "Invalid program config term in config string", 1614 [ERRCODE_OFFSET(PROLOGUE)] = "Failed to generate prologue", 1615 [ERRCODE_OFFSET(PROLOGUE2BIG)] = "Prologue too big for program", 1616 [ERRCODE_OFFSET(PROLOGUEOOB)] = "Offset out of bound for prologue", 1617 [ERRCODE_OFFSET(OBJCONF_OPT)] = "Invalid object config option", 1618 [ERRCODE_OFFSET(OBJCONF_CONF)] = "Config value not set (missing '=')", 1619 [ERRCODE_OFFSET(OBJCONF_MAP_OPT)] = "Invalid object map config option", 1620 [ERRCODE_OFFSET(OBJCONF_MAP_NOTEXIST)] = "Target map doesn't exist", 1621 [ERRCODE_OFFSET(OBJCONF_MAP_VALUE)] = "Incorrect value type for map", 1622 [ERRCODE_OFFSET(OBJCONF_MAP_TYPE)] = "Incorrect map type", 1623 [ERRCODE_OFFSET(OBJCONF_MAP_KEYSIZE)] = "Incorrect map key size", 1624 [ERRCODE_OFFSET(OBJCONF_MAP_VALUESIZE)] = "Incorrect map value size", 1625 [ERRCODE_OFFSET(OBJCONF_MAP_NOEVT)] = "Event not found for map setting", 1626 [ERRCODE_OFFSET(OBJCONF_MAP_MAPSIZE)] = "Invalid map size for event setting", 1627 [ERRCODE_OFFSET(OBJCONF_MAP_EVTDIM)] = "Event dimension too large", 1628 [ERRCODE_OFFSET(OBJCONF_MAP_EVTINH)] = "Doesn't support inherit event", 1629 [ERRCODE_OFFSET(OBJCONF_MAP_EVTTYPE)] = "Wrong event type for map", 1630 [ERRCODE_OFFSET(OBJCONF_MAP_IDX2BIG)] = "Index too large", 1631}; 1632 1633static int 1634bpf_loader_strerror(int err, char *buf, size_t size) 1635{ 1636 char sbuf[STRERR_BUFSIZE]; 1637 const char *msg; 1638 1639 if (!buf || !size) 1640 return -1; 1641 1642 err = err > 0 ? err : -err; 1643 1644 if (err >= __LIBBPF_ERRNO__START) 1645 return libbpf_strerror(err, buf, size); 1646 1647 if (err >= __BPF_LOADER_ERRNO__START && err < __BPF_LOADER_ERRNO__END) { 1648 msg = bpf_loader_strerror_table[ERRNO_OFFSET(err)]; 1649 snprintf(buf, size, "%s", msg); 1650 buf[size - 1] = '\0'; 1651 return 0; 1652 } 1653 1654 if (err >= __BPF_LOADER_ERRNO__END) 1655 snprintf(buf, size, "Unknown bpf loader error %d", err); 1656 else 1657 snprintf(buf, size, "%s", 1658 str_error_r(err, sbuf, sizeof(sbuf))); 1659 1660 buf[size - 1] = '\0'; 1661 return -1; 1662} 1663 1664#define bpf__strerror_head(err, buf, size) \ 1665 char sbuf[STRERR_BUFSIZE], *emsg;\ 1666 if (!size)\ 1667 return 0;\ 1668 if (err < 0)\ 1669 err = -err;\ 1670 bpf_loader_strerror(err, sbuf, sizeof(sbuf));\ 1671 emsg = sbuf;\ 1672 switch (err) {\ 1673 default:\ 1674 scnprintf(buf, size, "%s", emsg);\ 1675 break; 1676 1677#define bpf__strerror_entry(val, fmt...)\ 1678 case val: {\ 1679 scnprintf(buf, size, fmt);\ 1680 break;\ 1681 } 1682 1683#define bpf__strerror_end(buf, size)\ 1684 }\ 1685 buf[size - 1] = '\0'; 1686 1687int bpf__strerror_prepare_load(const char *filename, bool source, 1688 int err, char *buf, size_t size) 1689{ 1690 size_t n; 1691 int ret; 1692 1693 n = snprintf(buf, size, "Failed to load %s%s: ", 1694 filename, source ? " from source" : ""); 1695 if (n >= size) { 1696 buf[size - 1] = '\0'; 1697 return 0; 1698 } 1699 buf += n; 1700 size -= n; 1701 1702 ret = bpf_loader_strerror(err, buf, size); 1703 buf[size - 1] = '\0'; 1704 return ret; 1705} 1706 1707int bpf__strerror_probe(struct bpf_object *obj __maybe_unused, 1708 int err, char *buf, size_t size) 1709{ 1710 bpf__strerror_head(err, buf, size); 1711 case BPF_LOADER_ERRNO__PROGCONF_TERM: { 1712 scnprintf(buf, size, "%s (add -v to see detail)", emsg); 1713 break; 1714 } 1715 bpf__strerror_entry(EEXIST, "Probe point exist. Try 'perf probe -d \"*\"' and set 'force=yes'"); 1716 bpf__strerror_entry(EACCES, "You need to be root"); 1717 bpf__strerror_entry(EPERM, "You need to be root, and /proc/sys/kernel/kptr_restrict should be 0"); 1718 bpf__strerror_entry(ENOENT, "You need to check probing points in BPF file"); 1719 bpf__strerror_end(buf, size); 1720 return 0; 1721} 1722 1723int bpf__strerror_load(struct bpf_object *obj, 1724 int err, char *buf, size_t size) 1725{ 1726 bpf__strerror_head(err, buf, size); 1727 case LIBBPF_ERRNO__KVER: { 1728 unsigned int obj_kver = bpf_object__kversion(obj); 1729 unsigned int real_kver; 1730 1731 if (fetch_kernel_version(&real_kver, NULL, 0)) { 1732 scnprintf(buf, size, "Unable to fetch kernel version"); 1733 break; 1734 } 1735 1736 if (obj_kver != real_kver) { 1737 scnprintf(buf, size, 1738 "'version' ("KVER_FMT") doesn't match running kernel ("KVER_FMT")", 1739 KVER_PARAM(obj_kver), 1740 KVER_PARAM(real_kver)); 1741 break; 1742 } 1743 1744 scnprintf(buf, size, "Failed to load program for unknown reason"); 1745 break; 1746 } 1747 bpf__strerror_end(buf, size); 1748 return 0; 1749} 1750 1751int bpf__strerror_config_obj(struct bpf_object *obj __maybe_unused, 1752 struct parse_events_term *term __maybe_unused, 1753 struct evlist *evlist __maybe_unused, 1754 int *error_pos __maybe_unused, int err, 1755 char *buf, size_t size) 1756{ 1757 bpf__strerror_head(err, buf, size); 1758 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE, 1759 "Can't use this config term with this map type"); 1760 bpf__strerror_end(buf, size); 1761 return 0; 1762} 1763 1764int bpf__strerror_apply_obj_config(int err, char *buf, size_t size) 1765{ 1766 bpf__strerror_head(err, buf, size); 1767 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM, 1768 "Cannot set event to BPF map in multi-thread tracing"); 1769 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH, 1770 "%s (Hint: use -i to turn off inherit)", emsg); 1771 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE, 1772 "Can only put raw, hardware and BPF output event into a BPF map"); 1773 bpf__strerror_end(buf, size); 1774 return 0; 1775} 1776 1777int bpf__strerror_setup_output_event(struct evlist *evlist __maybe_unused, 1778 int err, char *buf, size_t size) 1779{ 1780 bpf__strerror_head(err, buf, size); 1781 bpf__strerror_end(buf, size); 1782 return 0; 1783}