at v4.12-rc5 1786 lines 41 kB view raw
1/* 2 * bpf-loader.c 3 * 4 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com> 5 * Copyright (C) 2015 Huawei Inc. 6 */ 7 8#include <linux/bpf.h> 9#include <bpf/libbpf.h> 10#include <bpf/bpf.h> 11#include <linux/err.h> 12#include <linux/kernel.h> 13#include <linux/string.h> 14#include <errno.h> 15#include "perf.h" 16#include "debug.h" 17#include "bpf-loader.h" 18#include "bpf-prologue.h" 19#include "probe-event.h" 20#include "probe-finder.h" // for MAX_PROBES 21#include "parse-events.h" 22#include "strfilter.h" 23#include "llvm-utils.h" 24#include "c++/clang-c.h" 25 26#define DEFINE_PRINT_FN(name, level) \ 27static int libbpf_##name(const char *fmt, ...) \ 28{ \ 29 va_list args; \ 30 int ret; \ 31 \ 32 va_start(args, fmt); \ 33 ret = veprintf(level, verbose, pr_fmt(fmt), args);\ 34 va_end(args); \ 35 return ret; \ 36} 37 38DEFINE_PRINT_FN(warning, 1) 39DEFINE_PRINT_FN(info, 1) 40DEFINE_PRINT_FN(debug, 1) 41 42struct bpf_prog_priv { 43 bool is_tp; 44 char *sys_name; 45 char *evt_name; 46 struct perf_probe_event pev; 47 bool need_prologue; 48 struct bpf_insn *insns_buf; 49 int nr_types; 50 int *type_mapping; 51}; 52 53static bool libbpf_initialized; 54 55struct bpf_object * 56bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, const char *name) 57{ 58 struct bpf_object *obj; 59 60 if (!libbpf_initialized) { 61 libbpf_set_print(libbpf_warning, 62 libbpf_info, 63 libbpf_debug); 64 libbpf_initialized = true; 65 } 66 67 obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, name); 68 if (IS_ERR(obj)) { 69 pr_debug("bpf: failed to load buffer\n"); 70 return ERR_PTR(-EINVAL); 71 } 72 73 return obj; 74} 75 76struct bpf_object *bpf__prepare_load(const char *filename, bool source) 77{ 78 struct bpf_object *obj; 79 80 if (!libbpf_initialized) { 81 libbpf_set_print(libbpf_warning, 82 libbpf_info, 83 libbpf_debug); 84 libbpf_initialized = true; 85 } 86 87 if (source) { 88 int err; 89 void *obj_buf; 90 size_t obj_buf_sz; 91 92 perf_clang__init(); 93 err = perf_clang__compile_bpf(filename, &obj_buf, &obj_buf_sz); 94 perf_clang__cleanup(); 95 if (err) { 96 pr_warning("bpf: builtin compilation failed: %d, try external compiler\n", err); 97 err = llvm__compile_bpf(filename, &obj_buf, &obj_buf_sz); 98 if (err) 99 return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE); 100 } else 101 pr_debug("bpf: successfull builtin compilation\n"); 102 obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, filename); 103 104 if (!IS_ERR(obj) && llvm_param.dump_obj) 105 llvm__dump_obj(filename, obj_buf, obj_buf_sz); 106 107 free(obj_buf); 108 } else 109 obj = bpf_object__open(filename); 110 111 if (IS_ERR(obj)) { 112 pr_debug("bpf: failed to load %s\n", filename); 113 return obj; 114 } 115 116 return obj; 117} 118 119void bpf__clear(void) 120{ 121 struct bpf_object *obj, *tmp; 122 123 bpf_object__for_each_safe(obj, tmp) { 124 bpf__unprobe(obj); 125 bpf_object__close(obj); 126 } 127} 128 129static void 130clear_prog_priv(struct bpf_program *prog __maybe_unused, 131 void *_priv) 132{ 133 struct bpf_prog_priv *priv = _priv; 134 135 cleanup_perf_probe_events(&priv->pev, 1); 136 zfree(&priv->insns_buf); 137 zfree(&priv->type_mapping); 138 zfree(&priv->sys_name); 139 zfree(&priv->evt_name); 140 free(priv); 141} 142 143static int 144prog_config__exec(const char *value, struct perf_probe_event *pev) 145{ 146 pev->uprobes = true; 147 pev->target = strdup(value); 148 if (!pev->target) 149 return -ENOMEM; 150 return 0; 151} 152 153static int 154prog_config__module(const char *value, struct perf_probe_event *pev) 155{ 156 pev->uprobes = false; 157 pev->target = strdup(value); 158 if (!pev->target) 159 return -ENOMEM; 160 return 0; 161} 162 163static int 164prog_config__bool(const char *value, bool *pbool, bool invert) 165{ 166 int err; 167 bool bool_value; 168 169 if (!pbool) 170 return -EINVAL; 171 172 err = strtobool(value, &bool_value); 173 if (err) 174 return err; 175 176 *pbool = invert ? !bool_value : bool_value; 177 return 0; 178} 179 180static int 181prog_config__inlines(const char *value, 182 struct perf_probe_event *pev __maybe_unused) 183{ 184 return prog_config__bool(value, &probe_conf.no_inlines, true); 185} 186 187static int 188prog_config__force(const char *value, 189 struct perf_probe_event *pev __maybe_unused) 190{ 191 return prog_config__bool(value, &probe_conf.force_add, false); 192} 193 194static struct { 195 const char *key; 196 const char *usage; 197 const char *desc; 198 int (*func)(const char *, struct perf_probe_event *); 199} bpf_prog_config_terms[] = { 200 { 201 .key = "exec", 202 .usage = "exec=<full path of file>", 203 .desc = "Set uprobe target", 204 .func = prog_config__exec, 205 }, 206 { 207 .key = "module", 208 .usage = "module=<module name> ", 209 .desc = "Set kprobe module", 210 .func = prog_config__module, 211 }, 212 { 213 .key = "inlines", 214 .usage = "inlines=[yes|no] ", 215 .desc = "Probe at inline symbol", 216 .func = prog_config__inlines, 217 }, 218 { 219 .key = "force", 220 .usage = "force=[yes|no] ", 221 .desc = "Forcibly add events with existing name", 222 .func = prog_config__force, 223 }, 224}; 225 226static int 227do_prog_config(const char *key, const char *value, 228 struct perf_probe_event *pev) 229{ 230 unsigned int i; 231 232 pr_debug("config bpf program: %s=%s\n", key, value); 233 for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++) 234 if (strcmp(key, bpf_prog_config_terms[i].key) == 0) 235 return bpf_prog_config_terms[i].func(value, pev); 236 237 pr_debug("BPF: ERROR: invalid program config option: %s=%s\n", 238 key, value); 239 240 pr_debug("\nHint: Valid options are:\n"); 241 for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++) 242 pr_debug("\t%s:\t%s\n", bpf_prog_config_terms[i].usage, 243 bpf_prog_config_terms[i].desc); 244 pr_debug("\n"); 245 246 return -BPF_LOADER_ERRNO__PROGCONF_TERM; 247} 248 249static const char * 250parse_prog_config_kvpair(const char *config_str, struct perf_probe_event *pev) 251{ 252 char *text = strdup(config_str); 253 char *sep, *line; 254 const char *main_str = NULL; 255 int err = 0; 256 257 if (!text) { 258 pr_debug("Not enough memory: dup config_str failed\n"); 259 return ERR_PTR(-ENOMEM); 260 } 261 262 line = text; 263 while ((sep = strchr(line, ';'))) { 264 char *equ; 265 266 *sep = '\0'; 267 equ = strchr(line, '='); 268 if (!equ) { 269 pr_warning("WARNING: invalid config in BPF object: %s\n", 270 line); 271 pr_warning("\tShould be 'key=value'.\n"); 272 goto nextline; 273 } 274 *equ = '\0'; 275 276 err = do_prog_config(line, equ + 1, pev); 277 if (err) 278 break; 279nextline: 280 line = sep + 1; 281 } 282 283 if (!err) 284 main_str = config_str + (line - text); 285 free(text); 286 287 return err ? ERR_PTR(err) : main_str; 288} 289 290static int 291parse_prog_config(const char *config_str, const char **p_main_str, 292 bool *is_tp, struct perf_probe_event *pev) 293{ 294 int err; 295 const char *main_str = parse_prog_config_kvpair(config_str, pev); 296 297 if (IS_ERR(main_str)) 298 return PTR_ERR(main_str); 299 300 *p_main_str = main_str; 301 if (!strchr(main_str, '=')) { 302 /* Is a tracepoint event? */ 303 const char *s = strchr(main_str, ':'); 304 305 if (!s) { 306 pr_debug("bpf: '%s' is not a valid tracepoint\n", 307 config_str); 308 return -BPF_LOADER_ERRNO__CONFIG; 309 } 310 311 *is_tp = true; 312 return 0; 313 } 314 315 *is_tp = false; 316 err = parse_perf_probe_command(main_str, pev); 317 if (err < 0) { 318 pr_debug("bpf: '%s' is not a valid config string\n", 319 config_str); 320 /* parse failed, don't need clear pev. */ 321 return -BPF_LOADER_ERRNO__CONFIG; 322 } 323 return 0; 324} 325 326static int 327config_bpf_program(struct bpf_program *prog) 328{ 329 struct perf_probe_event *pev = NULL; 330 struct bpf_prog_priv *priv = NULL; 331 const char *config_str, *main_str; 332 bool is_tp = false; 333 int err; 334 335 /* Initialize per-program probing setting */ 336 probe_conf.no_inlines = false; 337 probe_conf.force_add = false; 338 339 config_str = bpf_program__title(prog, false); 340 if (IS_ERR(config_str)) { 341 pr_debug("bpf: unable to get title for program\n"); 342 return PTR_ERR(config_str); 343 } 344 345 priv = calloc(sizeof(*priv), 1); 346 if (!priv) { 347 pr_debug("bpf: failed to alloc priv\n"); 348 return -ENOMEM; 349 } 350 pev = &priv->pev; 351 352 pr_debug("bpf: config program '%s'\n", config_str); 353 err = parse_prog_config(config_str, &main_str, &is_tp, pev); 354 if (err) 355 goto errout; 356 357 if (is_tp) { 358 char *s = strchr(main_str, ':'); 359 360 priv->is_tp = true; 361 priv->sys_name = strndup(main_str, s - main_str); 362 priv->evt_name = strdup(s + 1); 363 goto set_priv; 364 } 365 366 if (pev->group && strcmp(pev->group, PERF_BPF_PROBE_GROUP)) { 367 pr_debug("bpf: '%s': group for event is set and not '%s'.\n", 368 config_str, PERF_BPF_PROBE_GROUP); 369 err = -BPF_LOADER_ERRNO__GROUP; 370 goto errout; 371 } else if (!pev->group) 372 pev->group = strdup(PERF_BPF_PROBE_GROUP); 373 374 if (!pev->group) { 375 pr_debug("bpf: strdup failed\n"); 376 err = -ENOMEM; 377 goto errout; 378 } 379 380 if (!pev->event) { 381 pr_debug("bpf: '%s': event name is missing. Section name should be 'key=value'\n", 382 config_str); 383 err = -BPF_LOADER_ERRNO__EVENTNAME; 384 goto errout; 385 } 386 pr_debug("bpf: config '%s' is ok\n", config_str); 387 388set_priv: 389 err = bpf_program__set_priv(prog, priv, clear_prog_priv); 390 if (err) { 391 pr_debug("Failed to set priv for program '%s'\n", config_str); 392 goto errout; 393 } 394 395 return 0; 396 397errout: 398 if (pev) 399 clear_perf_probe_event(pev); 400 free(priv); 401 return err; 402} 403 404static int bpf__prepare_probe(void) 405{ 406 static int err = 0; 407 static bool initialized = false; 408 409 /* 410 * Make err static, so if init failed the first, bpf__prepare_probe() 411 * fails each time without calling init_probe_symbol_maps multiple 412 * times. 413 */ 414 if (initialized) 415 return err; 416 417 initialized = true; 418 err = init_probe_symbol_maps(false); 419 if (err < 0) 420 pr_debug("Failed to init_probe_symbol_maps\n"); 421 probe_conf.max_probes = MAX_PROBES; 422 return err; 423} 424 425static int 426preproc_gen_prologue(struct bpf_program *prog, int n, 427 struct bpf_insn *orig_insns, int orig_insns_cnt, 428 struct bpf_prog_prep_result *res) 429{ 430 struct bpf_prog_priv *priv = bpf_program__priv(prog); 431 struct probe_trace_event *tev; 432 struct perf_probe_event *pev; 433 struct bpf_insn *buf; 434 size_t prologue_cnt = 0; 435 int i, err; 436 437 if (IS_ERR(priv) || !priv || priv->is_tp) 438 goto errout; 439 440 pev = &priv->pev; 441 442 if (n < 0 || n >= priv->nr_types) 443 goto errout; 444 445 /* Find a tev belongs to that type */ 446 for (i = 0; i < pev->ntevs; i++) { 447 if (priv->type_mapping[i] == n) 448 break; 449 } 450 451 if (i >= pev->ntevs) { 452 pr_debug("Internal error: prologue type %d not found\n", n); 453 return -BPF_LOADER_ERRNO__PROLOGUE; 454 } 455 456 tev = &pev->tevs[i]; 457 458 buf = priv->insns_buf; 459 err = bpf__gen_prologue(tev->args, tev->nargs, 460 buf, &prologue_cnt, 461 BPF_MAXINSNS - orig_insns_cnt); 462 if (err) { 463 const char *title; 464 465 title = bpf_program__title(prog, false); 466 if (!title) 467 title = "[unknown]"; 468 469 pr_debug("Failed to generate prologue for program %s\n", 470 title); 471 return err; 472 } 473 474 memcpy(&buf[prologue_cnt], orig_insns, 475 sizeof(struct bpf_insn) * orig_insns_cnt); 476 477 res->new_insn_ptr = buf; 478 res->new_insn_cnt = prologue_cnt + orig_insns_cnt; 479 res->pfd = NULL; 480 return 0; 481 482errout: 483 pr_debug("Internal error in preproc_gen_prologue\n"); 484 return -BPF_LOADER_ERRNO__PROLOGUE; 485} 486 487/* 488 * compare_tev_args is reflexive, transitive and antisymmetric. 489 * I can proof it but this margin is too narrow to contain. 490 */ 491static int compare_tev_args(const void *ptev1, const void *ptev2) 492{ 493 int i, ret; 494 const struct probe_trace_event *tev1 = 495 *(const struct probe_trace_event **)ptev1; 496 const struct probe_trace_event *tev2 = 497 *(const struct probe_trace_event **)ptev2; 498 499 ret = tev2->nargs - tev1->nargs; 500 if (ret) 501 return ret; 502 503 for (i = 0; i < tev1->nargs; i++) { 504 struct probe_trace_arg *arg1, *arg2; 505 struct probe_trace_arg_ref *ref1, *ref2; 506 507 arg1 = &tev1->args[i]; 508 arg2 = &tev2->args[i]; 509 510 ret = strcmp(arg1->value, arg2->value); 511 if (ret) 512 return ret; 513 514 ref1 = arg1->ref; 515 ref2 = arg2->ref; 516 517 while (ref1 && ref2) { 518 ret = ref2->offset - ref1->offset; 519 if (ret) 520 return ret; 521 522 ref1 = ref1->next; 523 ref2 = ref2->next; 524 } 525 526 if (ref1 || ref2) 527 return ref2 ? 1 : -1; 528 } 529 530 return 0; 531} 532 533/* 534 * Assign a type number to each tevs in a pev. 535 * mapping is an array with same slots as tevs in that pev. 536 * nr_types will be set to number of types. 537 */ 538static int map_prologue(struct perf_probe_event *pev, int *mapping, 539 int *nr_types) 540{ 541 int i, type = 0; 542 struct probe_trace_event **ptevs; 543 544 size_t array_sz = sizeof(*ptevs) * pev->ntevs; 545 546 ptevs = malloc(array_sz); 547 if (!ptevs) { 548 pr_debug("Not enough memory: alloc ptevs failed\n"); 549 return -ENOMEM; 550 } 551 552 pr_debug("In map_prologue, ntevs=%d\n", pev->ntevs); 553 for (i = 0; i < pev->ntevs; i++) 554 ptevs[i] = &pev->tevs[i]; 555 556 qsort(ptevs, pev->ntevs, sizeof(*ptevs), 557 compare_tev_args); 558 559 for (i = 0; i < pev->ntevs; i++) { 560 int n; 561 562 n = ptevs[i] - pev->tevs; 563 if (i == 0) { 564 mapping[n] = type; 565 pr_debug("mapping[%d]=%d\n", n, type); 566 continue; 567 } 568 569 if (compare_tev_args(ptevs + i, ptevs + i - 1) == 0) 570 mapping[n] = type; 571 else 572 mapping[n] = ++type; 573 574 pr_debug("mapping[%d]=%d\n", n, mapping[n]); 575 } 576 free(ptevs); 577 *nr_types = type + 1; 578 579 return 0; 580} 581 582static int hook_load_preprocessor(struct bpf_program *prog) 583{ 584 struct bpf_prog_priv *priv = bpf_program__priv(prog); 585 struct perf_probe_event *pev; 586 bool need_prologue = false; 587 int err, i; 588 589 if (IS_ERR(priv) || !priv) { 590 pr_debug("Internal error when hook preprocessor\n"); 591 return -BPF_LOADER_ERRNO__INTERNAL; 592 } 593 594 if (priv->is_tp) { 595 priv->need_prologue = false; 596 return 0; 597 } 598 599 pev = &priv->pev; 600 for (i = 0; i < pev->ntevs; i++) { 601 struct probe_trace_event *tev = &pev->tevs[i]; 602 603 if (tev->nargs > 0) { 604 need_prologue = true; 605 break; 606 } 607 } 608 609 /* 610 * Since all tevs don't have argument, we don't need generate 611 * prologue. 612 */ 613 if (!need_prologue) { 614 priv->need_prologue = false; 615 return 0; 616 } 617 618 priv->need_prologue = true; 619 priv->insns_buf = malloc(sizeof(struct bpf_insn) * BPF_MAXINSNS); 620 if (!priv->insns_buf) { 621 pr_debug("Not enough memory: alloc insns_buf failed\n"); 622 return -ENOMEM; 623 } 624 625 priv->type_mapping = malloc(sizeof(int) * pev->ntevs); 626 if (!priv->type_mapping) { 627 pr_debug("Not enough memory: alloc type_mapping failed\n"); 628 return -ENOMEM; 629 } 630 memset(priv->type_mapping, -1, 631 sizeof(int) * pev->ntevs); 632 633 err = map_prologue(pev, priv->type_mapping, &priv->nr_types); 634 if (err) 635 return err; 636 637 err = bpf_program__set_prep(prog, priv->nr_types, 638 preproc_gen_prologue); 639 return err; 640} 641 642int bpf__probe(struct bpf_object *obj) 643{ 644 int err = 0; 645 struct bpf_program *prog; 646 struct bpf_prog_priv *priv; 647 struct perf_probe_event *pev; 648 649 err = bpf__prepare_probe(); 650 if (err) { 651 pr_debug("bpf__prepare_probe failed\n"); 652 return err; 653 } 654 655 bpf_object__for_each_program(prog, obj) { 656 err = config_bpf_program(prog); 657 if (err) 658 goto out; 659 660 priv = bpf_program__priv(prog); 661 if (IS_ERR(priv) || !priv) { 662 err = PTR_ERR(priv); 663 goto out; 664 } 665 666 if (priv->is_tp) { 667 bpf_program__set_tracepoint(prog); 668 continue; 669 } 670 671 bpf_program__set_kprobe(prog); 672 pev = &priv->pev; 673 674 err = convert_perf_probe_events(pev, 1); 675 if (err < 0) { 676 pr_debug("bpf_probe: failed to convert perf probe events\n"); 677 goto out; 678 } 679 680 err = apply_perf_probe_events(pev, 1); 681 if (err < 0) { 682 pr_debug("bpf_probe: failed to apply perf probe events\n"); 683 goto out; 684 } 685 686 /* 687 * After probing, let's consider prologue, which 688 * adds program fetcher to BPF programs. 689 * 690 * hook_load_preprocessorr() hooks pre-processor 691 * to bpf_program, let it generate prologue 692 * dynamically during loading. 693 */ 694 err = hook_load_preprocessor(prog); 695 if (err) 696 goto out; 697 } 698out: 699 return err < 0 ? err : 0; 700} 701 702#define EVENTS_WRITE_BUFSIZE 4096 703int bpf__unprobe(struct bpf_object *obj) 704{ 705 int err, ret = 0; 706 struct bpf_program *prog; 707 708 bpf_object__for_each_program(prog, obj) { 709 struct bpf_prog_priv *priv = bpf_program__priv(prog); 710 int i; 711 712 if (IS_ERR(priv) || !priv || priv->is_tp) 713 continue; 714 715 for (i = 0; i < priv->pev.ntevs; i++) { 716 struct probe_trace_event *tev = &priv->pev.tevs[i]; 717 char name_buf[EVENTS_WRITE_BUFSIZE]; 718 struct strfilter *delfilter; 719 720 snprintf(name_buf, EVENTS_WRITE_BUFSIZE, 721 "%s:%s", tev->group, tev->event); 722 name_buf[EVENTS_WRITE_BUFSIZE - 1] = '\0'; 723 724 delfilter = strfilter__new(name_buf, NULL); 725 if (!delfilter) { 726 pr_debug("Failed to create filter for unprobing\n"); 727 ret = -ENOMEM; 728 continue; 729 } 730 731 err = del_perf_probe_events(delfilter); 732 strfilter__delete(delfilter); 733 if (err) { 734 pr_debug("Failed to delete %s\n", name_buf); 735 ret = err; 736 continue; 737 } 738 } 739 } 740 return ret; 741} 742 743int bpf__load(struct bpf_object *obj) 744{ 745 int err; 746 747 err = bpf_object__load(obj); 748 if (err) { 749 pr_debug("bpf: load objects failed\n"); 750 return err; 751 } 752 return 0; 753} 754 755int bpf__foreach_event(struct bpf_object *obj, 756 bpf_prog_iter_callback_t func, 757 void *arg) 758{ 759 struct bpf_program *prog; 760 int err; 761 762 bpf_object__for_each_program(prog, obj) { 763 struct bpf_prog_priv *priv = bpf_program__priv(prog); 764 struct probe_trace_event *tev; 765 struct perf_probe_event *pev; 766 int i, fd; 767 768 if (IS_ERR(priv) || !priv) { 769 pr_debug("bpf: failed to get private field\n"); 770 return -BPF_LOADER_ERRNO__INTERNAL; 771 } 772 773 if (priv->is_tp) { 774 fd = bpf_program__fd(prog); 775 err = (*func)(priv->sys_name, priv->evt_name, fd, arg); 776 if (err) { 777 pr_debug("bpf: tracepoint call back failed, stop iterate\n"); 778 return err; 779 } 780 continue; 781 } 782 783 pev = &priv->pev; 784 for (i = 0; i < pev->ntevs; i++) { 785 tev = &pev->tevs[i]; 786 787 if (priv->need_prologue) { 788 int type = priv->type_mapping[i]; 789 790 fd = bpf_program__nth_fd(prog, type); 791 } else { 792 fd = bpf_program__fd(prog); 793 } 794 795 if (fd < 0) { 796 pr_debug("bpf: failed to get file descriptor\n"); 797 return fd; 798 } 799 800 err = (*func)(tev->group, tev->event, fd, arg); 801 if (err) { 802 pr_debug("bpf: call back failed, stop iterate\n"); 803 return err; 804 } 805 } 806 } 807 return 0; 808} 809 810enum bpf_map_op_type { 811 BPF_MAP_OP_SET_VALUE, 812 BPF_MAP_OP_SET_EVSEL, 813}; 814 815enum bpf_map_key_type { 816 BPF_MAP_KEY_ALL, 817 BPF_MAP_KEY_RANGES, 818}; 819 820struct bpf_map_op { 821 struct list_head list; 822 enum bpf_map_op_type op_type; 823 enum bpf_map_key_type key_type; 824 union { 825 struct parse_events_array array; 826 } k; 827 union { 828 u64 value; 829 struct perf_evsel *evsel; 830 } v; 831}; 832 833struct bpf_map_priv { 834 struct list_head ops_list; 835}; 836 837static void 838bpf_map_op__delete(struct bpf_map_op *op) 839{ 840 if (!list_empty(&op->list)) 841 list_del(&op->list); 842 if (op->key_type == BPF_MAP_KEY_RANGES) 843 parse_events__clear_array(&op->k.array); 844 free(op); 845} 846 847static void 848bpf_map_priv__purge(struct bpf_map_priv *priv) 849{ 850 struct bpf_map_op *pos, *n; 851 852 list_for_each_entry_safe(pos, n, &priv->ops_list, list) { 853 list_del_init(&pos->list); 854 bpf_map_op__delete(pos); 855 } 856} 857 858static void 859bpf_map_priv__clear(struct bpf_map *map __maybe_unused, 860 void *_priv) 861{ 862 struct bpf_map_priv *priv = _priv; 863 864 bpf_map_priv__purge(priv); 865 free(priv); 866} 867 868static int 869bpf_map_op_setkey(struct bpf_map_op *op, struct parse_events_term *term) 870{ 871 op->key_type = BPF_MAP_KEY_ALL; 872 if (!term) 873 return 0; 874 875 if (term->array.nr_ranges) { 876 size_t memsz = term->array.nr_ranges * 877 sizeof(op->k.array.ranges[0]); 878 879 op->k.array.ranges = memdup(term->array.ranges, memsz); 880 if (!op->k.array.ranges) { 881 pr_debug("Not enough memory to alloc indices for map\n"); 882 return -ENOMEM; 883 } 884 op->key_type = BPF_MAP_KEY_RANGES; 885 op->k.array.nr_ranges = term->array.nr_ranges; 886 } 887 return 0; 888} 889 890static struct bpf_map_op * 891bpf_map_op__new(struct parse_events_term *term) 892{ 893 struct bpf_map_op *op; 894 int err; 895 896 op = zalloc(sizeof(*op)); 897 if (!op) { 898 pr_debug("Failed to alloc bpf_map_op\n"); 899 return ERR_PTR(-ENOMEM); 900 } 901 INIT_LIST_HEAD(&op->list); 902 903 err = bpf_map_op_setkey(op, term); 904 if (err) { 905 free(op); 906 return ERR_PTR(err); 907 } 908 return op; 909} 910 911static struct bpf_map_op * 912bpf_map_op__clone(struct bpf_map_op *op) 913{ 914 struct bpf_map_op *newop; 915 916 newop = memdup(op, sizeof(*op)); 917 if (!newop) { 918 pr_debug("Failed to alloc bpf_map_op\n"); 919 return NULL; 920 } 921 922 INIT_LIST_HEAD(&newop->list); 923 if (op->key_type == BPF_MAP_KEY_RANGES) { 924 size_t memsz = op->k.array.nr_ranges * 925 sizeof(op->k.array.ranges[0]); 926 927 newop->k.array.ranges = memdup(op->k.array.ranges, memsz); 928 if (!newop->k.array.ranges) { 929 pr_debug("Failed to alloc indices for map\n"); 930 free(newop); 931 return NULL; 932 } 933 } 934 935 return newop; 936} 937 938static struct bpf_map_priv * 939bpf_map_priv__clone(struct bpf_map_priv *priv) 940{ 941 struct bpf_map_priv *newpriv; 942 struct bpf_map_op *pos, *newop; 943 944 newpriv = zalloc(sizeof(*newpriv)); 945 if (!newpriv) { 946 pr_debug("Not enough memory to alloc map private\n"); 947 return NULL; 948 } 949 INIT_LIST_HEAD(&newpriv->ops_list); 950 951 list_for_each_entry(pos, &priv->ops_list, list) { 952 newop = bpf_map_op__clone(pos); 953 if (!newop) { 954 bpf_map_priv__purge(newpriv); 955 return NULL; 956 } 957 list_add_tail(&newop->list, &newpriv->ops_list); 958 } 959 960 return newpriv; 961} 962 963static int 964bpf_map__add_op(struct bpf_map *map, struct bpf_map_op *op) 965{ 966 const char *map_name = bpf_map__name(map); 967 struct bpf_map_priv *priv = bpf_map__priv(map); 968 969 if (IS_ERR(priv)) { 970 pr_debug("Failed to get private from map %s\n", map_name); 971 return PTR_ERR(priv); 972 } 973 974 if (!priv) { 975 priv = zalloc(sizeof(*priv)); 976 if (!priv) { 977 pr_debug("Not enough memory to alloc map private\n"); 978 return -ENOMEM; 979 } 980 INIT_LIST_HEAD(&priv->ops_list); 981 982 if (bpf_map__set_priv(map, priv, bpf_map_priv__clear)) { 983 free(priv); 984 return -BPF_LOADER_ERRNO__INTERNAL; 985 } 986 } 987 988 list_add_tail(&op->list, &priv->ops_list); 989 return 0; 990} 991 992static struct bpf_map_op * 993bpf_map__add_newop(struct bpf_map *map, struct parse_events_term *term) 994{ 995 struct bpf_map_op *op; 996 int err; 997 998 op = bpf_map_op__new(term); 999 if (IS_ERR(op)) 1000 return op; 1001 1002 err = bpf_map__add_op(map, op); 1003 if (err) { 1004 bpf_map_op__delete(op); 1005 return ERR_PTR(err); 1006 } 1007 return op; 1008} 1009 1010static int 1011__bpf_map__config_value(struct bpf_map *map, 1012 struct parse_events_term *term) 1013{ 1014 struct bpf_map_op *op; 1015 const char *map_name = bpf_map__name(map); 1016 const struct bpf_map_def *def = bpf_map__def(map); 1017 1018 if (IS_ERR(def)) { 1019 pr_debug("Unable to get map definition from '%s'\n", 1020 map_name); 1021 return -BPF_LOADER_ERRNO__INTERNAL; 1022 } 1023 1024 if (def->type != BPF_MAP_TYPE_ARRAY) { 1025 pr_debug("Map %s type is not BPF_MAP_TYPE_ARRAY\n", 1026 map_name); 1027 return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE; 1028 } 1029 if (def->key_size < sizeof(unsigned int)) { 1030 pr_debug("Map %s has incorrect key size\n", map_name); 1031 return -BPF_LOADER_ERRNO__OBJCONF_MAP_KEYSIZE; 1032 } 1033 switch (def->value_size) { 1034 case 1: 1035 case 2: 1036 case 4: 1037 case 8: 1038 break; 1039 default: 1040 pr_debug("Map %s has incorrect value size\n", map_name); 1041 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE; 1042 } 1043 1044 op = bpf_map__add_newop(map, term); 1045 if (IS_ERR(op)) 1046 return PTR_ERR(op); 1047 op->op_type = BPF_MAP_OP_SET_VALUE; 1048 op->v.value = term->val.num; 1049 return 0; 1050} 1051 1052static int 1053bpf_map__config_value(struct bpf_map *map, 1054 struct parse_events_term *term, 1055 struct perf_evlist *evlist __maybe_unused) 1056{ 1057 if (!term->err_val) { 1058 pr_debug("Config value not set\n"); 1059 return -BPF_LOADER_ERRNO__OBJCONF_CONF; 1060 } 1061 1062 if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM) { 1063 pr_debug("ERROR: wrong value type for 'value'\n"); 1064 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE; 1065 } 1066 1067 return __bpf_map__config_value(map, term); 1068} 1069 1070static int 1071__bpf_map__config_event(struct bpf_map *map, 1072 struct parse_events_term *term, 1073 struct perf_evlist *evlist) 1074{ 1075 struct perf_evsel *evsel; 1076 const struct bpf_map_def *def; 1077 struct bpf_map_op *op; 1078 const char *map_name = bpf_map__name(map); 1079 1080 evsel = perf_evlist__find_evsel_by_str(evlist, term->val.str); 1081 if (!evsel) { 1082 pr_debug("Event (for '%s') '%s' doesn't exist\n", 1083 map_name, term->val.str); 1084 return -BPF_LOADER_ERRNO__OBJCONF_MAP_NOEVT; 1085 } 1086 1087 def = bpf_map__def(map); 1088 if (IS_ERR(def)) { 1089 pr_debug("Unable to get map definition from '%s'\n", 1090 map_name); 1091 return PTR_ERR(def); 1092 } 1093 1094 /* 1095 * No need to check key_size and value_size: 1096 * kernel has already checked them. 1097 */ 1098 if (def->type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) { 1099 pr_debug("Map %s type is not BPF_MAP_TYPE_PERF_EVENT_ARRAY\n", 1100 map_name); 1101 return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE; 1102 } 1103 1104 op = bpf_map__add_newop(map, term); 1105 if (IS_ERR(op)) 1106 return PTR_ERR(op); 1107 op->op_type = BPF_MAP_OP_SET_EVSEL; 1108 op->v.evsel = evsel; 1109 return 0; 1110} 1111 1112static int 1113bpf_map__config_event(struct bpf_map *map, 1114 struct parse_events_term *term, 1115 struct perf_evlist *evlist) 1116{ 1117 if (!term->err_val) { 1118 pr_debug("Config value not set\n"); 1119 return -BPF_LOADER_ERRNO__OBJCONF_CONF; 1120 } 1121 1122 if (term->type_val != PARSE_EVENTS__TERM_TYPE_STR) { 1123 pr_debug("ERROR: wrong value type for 'event'\n"); 1124 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE; 1125 } 1126 1127 return __bpf_map__config_event(map, term, evlist); 1128} 1129 1130struct bpf_obj_config__map_func { 1131 const char *config_opt; 1132 int (*config_func)(struct bpf_map *, struct parse_events_term *, 1133 struct perf_evlist *); 1134}; 1135 1136struct bpf_obj_config__map_func bpf_obj_config__map_funcs[] = { 1137 {"value", bpf_map__config_value}, 1138 {"event", bpf_map__config_event}, 1139}; 1140 1141static int 1142config_map_indices_range_check(struct parse_events_term *term, 1143 struct bpf_map *map, 1144 const char *map_name) 1145{ 1146 struct parse_events_array *array = &term->array; 1147 const struct bpf_map_def *def; 1148 unsigned int i; 1149 1150 if (!array->nr_ranges) 1151 return 0; 1152 if (!array->ranges) { 1153 pr_debug("ERROR: map %s: array->nr_ranges is %d but range array is NULL\n", 1154 map_name, (int)array->nr_ranges); 1155 return -BPF_LOADER_ERRNO__INTERNAL; 1156 } 1157 1158 def = bpf_map__def(map); 1159 if (IS_ERR(def)) { 1160 pr_debug("ERROR: Unable to get map definition from '%s'\n", 1161 map_name); 1162 return -BPF_LOADER_ERRNO__INTERNAL; 1163 } 1164 1165 for (i = 0; i < array->nr_ranges; i++) { 1166 unsigned int start = array->ranges[i].start; 1167 size_t length = array->ranges[i].length; 1168 unsigned int idx = start + length - 1; 1169 1170 if (idx >= def->max_entries) { 1171 pr_debug("ERROR: index %d too large\n", idx); 1172 return -BPF_LOADER_ERRNO__OBJCONF_MAP_IDX2BIG; 1173 } 1174 } 1175 return 0; 1176} 1177 1178static int 1179bpf__obj_config_map(struct bpf_object *obj, 1180 struct parse_events_term *term, 1181 struct perf_evlist *evlist, 1182 int *key_scan_pos) 1183{ 1184 /* key is "map:<mapname>.<config opt>" */ 1185 char *map_name = strdup(term->config + sizeof("map:") - 1); 1186 struct bpf_map *map; 1187 int err = -BPF_LOADER_ERRNO__OBJCONF_OPT; 1188 char *map_opt; 1189 size_t i; 1190 1191 if (!map_name) 1192 return -ENOMEM; 1193 1194 map_opt = strchr(map_name, '.'); 1195 if (!map_opt) { 1196 pr_debug("ERROR: Invalid map config: %s\n", map_name); 1197 goto out; 1198 } 1199 1200 *map_opt++ = '\0'; 1201 if (*map_opt == '\0') { 1202 pr_debug("ERROR: Invalid map option: %s\n", term->config); 1203 goto out; 1204 } 1205 1206 map = bpf_object__find_map_by_name(obj, map_name); 1207 if (!map) { 1208 pr_debug("ERROR: Map %s doesn't exist\n", map_name); 1209 err = -BPF_LOADER_ERRNO__OBJCONF_MAP_NOTEXIST; 1210 goto out; 1211 } 1212 1213 *key_scan_pos += strlen(map_opt); 1214 err = config_map_indices_range_check(term, map, map_name); 1215 if (err) 1216 goto out; 1217 *key_scan_pos -= strlen(map_opt); 1218 1219 for (i = 0; i < ARRAY_SIZE(bpf_obj_config__map_funcs); i++) { 1220 struct bpf_obj_config__map_func *func = 1221 &bpf_obj_config__map_funcs[i]; 1222 1223 if (strcmp(map_opt, func->config_opt) == 0) { 1224 err = func->config_func(map, term, evlist); 1225 goto out; 1226 } 1227 } 1228 1229 pr_debug("ERROR: Invalid map config option '%s'\n", map_opt); 1230 err = -BPF_LOADER_ERRNO__OBJCONF_MAP_OPT; 1231out: 1232 free(map_name); 1233 if (!err) 1234 key_scan_pos += strlen(map_opt); 1235 return err; 1236} 1237 1238int bpf__config_obj(struct bpf_object *obj, 1239 struct parse_events_term *term, 1240 struct perf_evlist *evlist, 1241 int *error_pos) 1242{ 1243 int key_scan_pos = 0; 1244 int err; 1245 1246 if (!obj || !term || !term->config) 1247 return -EINVAL; 1248 1249 if (!prefixcmp(term->config, "map:")) { 1250 key_scan_pos = sizeof("map:") - 1; 1251 err = bpf__obj_config_map(obj, term, evlist, &key_scan_pos); 1252 goto out; 1253 } 1254 err = -BPF_LOADER_ERRNO__OBJCONF_OPT; 1255out: 1256 if (error_pos) 1257 *error_pos = key_scan_pos; 1258 return err; 1259 1260} 1261 1262typedef int (*map_config_func_t)(const char *name, int map_fd, 1263 const struct bpf_map_def *pdef, 1264 struct bpf_map_op *op, 1265 void *pkey, void *arg); 1266 1267static int 1268foreach_key_array_all(map_config_func_t func, 1269 void *arg, const char *name, 1270 int map_fd, const struct bpf_map_def *pdef, 1271 struct bpf_map_op *op) 1272{ 1273 unsigned int i; 1274 int err; 1275 1276 for (i = 0; i < pdef->max_entries; i++) { 1277 err = func(name, map_fd, pdef, op, &i, arg); 1278 if (err) { 1279 pr_debug("ERROR: failed to insert value to %s[%u]\n", 1280 name, i); 1281 return err; 1282 } 1283 } 1284 return 0; 1285} 1286 1287static int 1288foreach_key_array_ranges(map_config_func_t func, void *arg, 1289 const char *name, int map_fd, 1290 const struct bpf_map_def *pdef, 1291 struct bpf_map_op *op) 1292{ 1293 unsigned int i, j; 1294 int err; 1295 1296 for (i = 0; i < op->k.array.nr_ranges; i++) { 1297 unsigned int start = op->k.array.ranges[i].start; 1298 size_t length = op->k.array.ranges[i].length; 1299 1300 for (j = 0; j < length; j++) { 1301 unsigned int idx = start + j; 1302 1303 err = func(name, map_fd, pdef, op, &idx, arg); 1304 if (err) { 1305 pr_debug("ERROR: failed to insert value to %s[%u]\n", 1306 name, idx); 1307 return err; 1308 } 1309 } 1310 } 1311 return 0; 1312} 1313 1314static int 1315bpf_map_config_foreach_key(struct bpf_map *map, 1316 map_config_func_t func, 1317 void *arg) 1318{ 1319 int err, map_fd; 1320 struct bpf_map_op *op; 1321 const struct bpf_map_def *def; 1322 const char *name = bpf_map__name(map); 1323 struct bpf_map_priv *priv = bpf_map__priv(map); 1324 1325 if (IS_ERR(priv)) { 1326 pr_debug("ERROR: failed to get private from map %s\n", name); 1327 return -BPF_LOADER_ERRNO__INTERNAL; 1328 } 1329 if (!priv || list_empty(&priv->ops_list)) { 1330 pr_debug("INFO: nothing to config for map %s\n", name); 1331 return 0; 1332 } 1333 1334 def = bpf_map__def(map); 1335 if (IS_ERR(def)) { 1336 pr_debug("ERROR: failed to get definition from map %s\n", name); 1337 return -BPF_LOADER_ERRNO__INTERNAL; 1338 } 1339 map_fd = bpf_map__fd(map); 1340 if (map_fd < 0) { 1341 pr_debug("ERROR: failed to get fd from map %s\n", name); 1342 return map_fd; 1343 } 1344 1345 list_for_each_entry(op, &priv->ops_list, list) { 1346 switch (def->type) { 1347 case BPF_MAP_TYPE_ARRAY: 1348 case BPF_MAP_TYPE_PERF_EVENT_ARRAY: 1349 switch (op->key_type) { 1350 case BPF_MAP_KEY_ALL: 1351 err = foreach_key_array_all(func, arg, name, 1352 map_fd, def, op); 1353 break; 1354 case BPF_MAP_KEY_RANGES: 1355 err = foreach_key_array_ranges(func, arg, name, 1356 map_fd, def, 1357 op); 1358 break; 1359 default: 1360 pr_debug("ERROR: keytype for map '%s' invalid\n", 1361 name); 1362 return -BPF_LOADER_ERRNO__INTERNAL; 1363 } 1364 if (err) 1365 return err; 1366 break; 1367 default: 1368 pr_debug("ERROR: type of '%s' incorrect\n", name); 1369 return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE; 1370 } 1371 } 1372 1373 return 0; 1374} 1375 1376static int 1377apply_config_value_for_key(int map_fd, void *pkey, 1378 size_t val_size, u64 val) 1379{ 1380 int err = 0; 1381 1382 switch (val_size) { 1383 case 1: { 1384 u8 _val = (u8)(val); 1385 err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY); 1386 break; 1387 } 1388 case 2: { 1389 u16 _val = (u16)(val); 1390 err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY); 1391 break; 1392 } 1393 case 4: { 1394 u32 _val = (u32)(val); 1395 err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY); 1396 break; 1397 } 1398 case 8: { 1399 err = bpf_map_update_elem(map_fd, pkey, &val, BPF_ANY); 1400 break; 1401 } 1402 default: 1403 pr_debug("ERROR: invalid value size\n"); 1404 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE; 1405 } 1406 if (err && errno) 1407 err = -errno; 1408 return err; 1409} 1410 1411static int 1412apply_config_evsel_for_key(const char *name, int map_fd, void *pkey, 1413 struct perf_evsel *evsel) 1414{ 1415 struct xyarray *xy = evsel->fd; 1416 struct perf_event_attr *attr; 1417 unsigned int key, events; 1418 bool check_pass = false; 1419 int *evt_fd; 1420 int err; 1421 1422 if (!xy) { 1423 pr_debug("ERROR: evsel not ready for map %s\n", name); 1424 return -BPF_LOADER_ERRNO__INTERNAL; 1425 } 1426 1427 if (xy->row_size / xy->entry_size != 1) { 1428 pr_debug("ERROR: Dimension of target event is incorrect for map %s\n", 1429 name); 1430 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM; 1431 } 1432 1433 attr = &evsel->attr; 1434 if (attr->inherit) { 1435 pr_debug("ERROR: Can't put inherit event into map %s\n", name); 1436 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH; 1437 } 1438 1439 if (perf_evsel__is_bpf_output(evsel)) 1440 check_pass = true; 1441 if (attr->type == PERF_TYPE_RAW) 1442 check_pass = true; 1443 if (attr->type == PERF_TYPE_HARDWARE) 1444 check_pass = true; 1445 if (!check_pass) { 1446 pr_debug("ERROR: Event type is wrong for map %s\n", name); 1447 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE; 1448 } 1449 1450 events = xy->entries / (xy->row_size / xy->entry_size); 1451 key = *((unsigned int *)pkey); 1452 if (key >= events) { 1453 pr_debug("ERROR: there is no event %d for map %s\n", 1454 key, name); 1455 return -BPF_LOADER_ERRNO__OBJCONF_MAP_MAPSIZE; 1456 } 1457 evt_fd = xyarray__entry(xy, key, 0); 1458 err = bpf_map_update_elem(map_fd, pkey, evt_fd, BPF_ANY); 1459 if (err && errno) 1460 err = -errno; 1461 return err; 1462} 1463 1464static int 1465apply_obj_config_map_for_key(const char *name, int map_fd, 1466 const struct bpf_map_def *pdef, 1467 struct bpf_map_op *op, 1468 void *pkey, void *arg __maybe_unused) 1469{ 1470 int err; 1471 1472 switch (op->op_type) { 1473 case BPF_MAP_OP_SET_VALUE: 1474 err = apply_config_value_for_key(map_fd, pkey, 1475 pdef->value_size, 1476 op->v.value); 1477 break; 1478 case BPF_MAP_OP_SET_EVSEL: 1479 err = apply_config_evsel_for_key(name, map_fd, pkey, 1480 op->v.evsel); 1481 break; 1482 default: 1483 pr_debug("ERROR: unknown value type for '%s'\n", name); 1484 err = -BPF_LOADER_ERRNO__INTERNAL; 1485 } 1486 return err; 1487} 1488 1489static int 1490apply_obj_config_map(struct bpf_map *map) 1491{ 1492 return bpf_map_config_foreach_key(map, 1493 apply_obj_config_map_for_key, 1494 NULL); 1495} 1496 1497static int 1498apply_obj_config_object(struct bpf_object *obj) 1499{ 1500 struct bpf_map *map; 1501 int err; 1502 1503 bpf_map__for_each(map, obj) { 1504 err = apply_obj_config_map(map); 1505 if (err) 1506 return err; 1507 } 1508 return 0; 1509} 1510 1511int bpf__apply_obj_config(void) 1512{ 1513 struct bpf_object *obj, *tmp; 1514 int err; 1515 1516 bpf_object__for_each_safe(obj, tmp) { 1517 err = apply_obj_config_object(obj); 1518 if (err) 1519 return err; 1520 } 1521 1522 return 0; 1523} 1524 1525#define bpf__for_each_map(pos, obj, objtmp) \ 1526 bpf_object__for_each_safe(obj, objtmp) \ 1527 bpf_map__for_each(pos, obj) 1528 1529#define bpf__for_each_stdout_map(pos, obj, objtmp) \ 1530 bpf__for_each_map(pos, obj, objtmp) \ 1531 if (bpf_map__name(pos) && \ 1532 (strcmp("__bpf_stdout__", \ 1533 bpf_map__name(pos)) == 0)) 1534 1535int bpf__setup_stdout(struct perf_evlist *evlist __maybe_unused) 1536{ 1537 struct bpf_map_priv *tmpl_priv = NULL; 1538 struct bpf_object *obj, *tmp; 1539 struct perf_evsel *evsel = NULL; 1540 struct bpf_map *map; 1541 int err; 1542 bool need_init = false; 1543 1544 bpf__for_each_stdout_map(map, obj, tmp) { 1545 struct bpf_map_priv *priv = bpf_map__priv(map); 1546 1547 if (IS_ERR(priv)) 1548 return -BPF_LOADER_ERRNO__INTERNAL; 1549 1550 /* 1551 * No need to check map type: type should have been 1552 * verified by kernel. 1553 */ 1554 if (!need_init && !priv) 1555 need_init = !priv; 1556 if (!tmpl_priv && priv) 1557 tmpl_priv = priv; 1558 } 1559 1560 if (!need_init) 1561 return 0; 1562 1563 if (!tmpl_priv) { 1564 err = parse_events(evlist, "bpf-output/no-inherit=1,name=__bpf_stdout__/", 1565 NULL); 1566 if (err) { 1567 pr_debug("ERROR: failed to create bpf-output event\n"); 1568 return -err; 1569 } 1570 1571 evsel = perf_evlist__last(evlist); 1572 } 1573 1574 bpf__for_each_stdout_map(map, obj, tmp) { 1575 struct bpf_map_priv *priv = bpf_map__priv(map); 1576 1577 if (IS_ERR(priv)) 1578 return -BPF_LOADER_ERRNO__INTERNAL; 1579 if (priv) 1580 continue; 1581 1582 if (tmpl_priv) { 1583 priv = bpf_map_priv__clone(tmpl_priv); 1584 if (!priv) 1585 return -ENOMEM; 1586 1587 err = bpf_map__set_priv(map, priv, bpf_map_priv__clear); 1588 if (err) { 1589 bpf_map_priv__clear(map, priv); 1590 return err; 1591 } 1592 } else if (evsel) { 1593 struct bpf_map_op *op; 1594 1595 op = bpf_map__add_newop(map, NULL); 1596 if (IS_ERR(op)) 1597 return PTR_ERR(op); 1598 op->op_type = BPF_MAP_OP_SET_EVSEL; 1599 op->v.evsel = evsel; 1600 } 1601 } 1602 1603 return 0; 1604} 1605 1606#define ERRNO_OFFSET(e) ((e) - __BPF_LOADER_ERRNO__START) 1607#define ERRCODE_OFFSET(c) ERRNO_OFFSET(BPF_LOADER_ERRNO__##c) 1608#define NR_ERRNO (__BPF_LOADER_ERRNO__END - __BPF_LOADER_ERRNO__START) 1609 1610static const char *bpf_loader_strerror_table[NR_ERRNO] = { 1611 [ERRCODE_OFFSET(CONFIG)] = "Invalid config string", 1612 [ERRCODE_OFFSET(GROUP)] = "Invalid group name", 1613 [ERRCODE_OFFSET(EVENTNAME)] = "No event name found in config string", 1614 [ERRCODE_OFFSET(INTERNAL)] = "BPF loader internal error", 1615 [ERRCODE_OFFSET(COMPILE)] = "Error when compiling BPF scriptlet", 1616 [ERRCODE_OFFSET(PROGCONF_TERM)] = "Invalid program config term in config string", 1617 [ERRCODE_OFFSET(PROLOGUE)] = "Failed to generate prologue", 1618 [ERRCODE_OFFSET(PROLOGUE2BIG)] = "Prologue too big for program", 1619 [ERRCODE_OFFSET(PROLOGUEOOB)] = "Offset out of bound for prologue", 1620 [ERRCODE_OFFSET(OBJCONF_OPT)] = "Invalid object config option", 1621 [ERRCODE_OFFSET(OBJCONF_CONF)] = "Config value not set (missing '=')", 1622 [ERRCODE_OFFSET(OBJCONF_MAP_OPT)] = "Invalid object map config option", 1623 [ERRCODE_OFFSET(OBJCONF_MAP_NOTEXIST)] = "Target map doesn't exist", 1624 [ERRCODE_OFFSET(OBJCONF_MAP_VALUE)] = "Incorrect value type for map", 1625 [ERRCODE_OFFSET(OBJCONF_MAP_TYPE)] = "Incorrect map type", 1626 [ERRCODE_OFFSET(OBJCONF_MAP_KEYSIZE)] = "Incorrect map key size", 1627 [ERRCODE_OFFSET(OBJCONF_MAP_VALUESIZE)] = "Incorrect map value size", 1628 [ERRCODE_OFFSET(OBJCONF_MAP_NOEVT)] = "Event not found for map setting", 1629 [ERRCODE_OFFSET(OBJCONF_MAP_MAPSIZE)] = "Invalid map size for event setting", 1630 [ERRCODE_OFFSET(OBJCONF_MAP_EVTDIM)] = "Event dimension too large", 1631 [ERRCODE_OFFSET(OBJCONF_MAP_EVTINH)] = "Doesn't support inherit event", 1632 [ERRCODE_OFFSET(OBJCONF_MAP_EVTTYPE)] = "Wrong event type for map", 1633 [ERRCODE_OFFSET(OBJCONF_MAP_IDX2BIG)] = "Index too large", 1634}; 1635 1636static int 1637bpf_loader_strerror(int err, char *buf, size_t size) 1638{ 1639 char sbuf[STRERR_BUFSIZE]; 1640 const char *msg; 1641 1642 if (!buf || !size) 1643 return -1; 1644 1645 err = err > 0 ? err : -err; 1646 1647 if (err >= __LIBBPF_ERRNO__START) 1648 return libbpf_strerror(err, buf, size); 1649 1650 if (err >= __BPF_LOADER_ERRNO__START && err < __BPF_LOADER_ERRNO__END) { 1651 msg = bpf_loader_strerror_table[ERRNO_OFFSET(err)]; 1652 snprintf(buf, size, "%s", msg); 1653 buf[size - 1] = '\0'; 1654 return 0; 1655 } 1656 1657 if (err >= __BPF_LOADER_ERRNO__END) 1658 snprintf(buf, size, "Unknown bpf loader error %d", err); 1659 else 1660 snprintf(buf, size, "%s", 1661 str_error_r(err, sbuf, sizeof(sbuf))); 1662 1663 buf[size - 1] = '\0'; 1664 return -1; 1665} 1666 1667#define bpf__strerror_head(err, buf, size) \ 1668 char sbuf[STRERR_BUFSIZE], *emsg;\ 1669 if (!size)\ 1670 return 0;\ 1671 if (err < 0)\ 1672 err = -err;\ 1673 bpf_loader_strerror(err, sbuf, sizeof(sbuf));\ 1674 emsg = sbuf;\ 1675 switch (err) {\ 1676 default:\ 1677 scnprintf(buf, size, "%s", emsg);\ 1678 break; 1679 1680#define bpf__strerror_entry(val, fmt...)\ 1681 case val: {\ 1682 scnprintf(buf, size, fmt);\ 1683 break;\ 1684 } 1685 1686#define bpf__strerror_end(buf, size)\ 1687 }\ 1688 buf[size - 1] = '\0'; 1689 1690int bpf__strerror_prepare_load(const char *filename, bool source, 1691 int err, char *buf, size_t size) 1692{ 1693 size_t n; 1694 int ret; 1695 1696 n = snprintf(buf, size, "Failed to load %s%s: ", 1697 filename, source ? " from source" : ""); 1698 if (n >= size) { 1699 buf[size - 1] = '\0'; 1700 return 0; 1701 } 1702 buf += n; 1703 size -= n; 1704 1705 ret = bpf_loader_strerror(err, buf, size); 1706 buf[size - 1] = '\0'; 1707 return ret; 1708} 1709 1710int bpf__strerror_probe(struct bpf_object *obj __maybe_unused, 1711 int err, char *buf, size_t size) 1712{ 1713 bpf__strerror_head(err, buf, size); 1714 case BPF_LOADER_ERRNO__PROGCONF_TERM: { 1715 scnprintf(buf, size, "%s (add -v to see detail)", emsg); 1716 break; 1717 } 1718 bpf__strerror_entry(EEXIST, "Probe point exist. Try 'perf probe -d \"*\"' and set 'force=yes'"); 1719 bpf__strerror_entry(EACCES, "You need to be root"); 1720 bpf__strerror_entry(EPERM, "You need to be root, and /proc/sys/kernel/kptr_restrict should be 0"); 1721 bpf__strerror_entry(ENOENT, "You need to check probing points in BPF file"); 1722 bpf__strerror_end(buf, size); 1723 return 0; 1724} 1725 1726int bpf__strerror_load(struct bpf_object *obj, 1727 int err, char *buf, size_t size) 1728{ 1729 bpf__strerror_head(err, buf, size); 1730 case LIBBPF_ERRNO__KVER: { 1731 unsigned int obj_kver = bpf_object__kversion(obj); 1732 unsigned int real_kver; 1733 1734 if (fetch_kernel_version(&real_kver, NULL, 0)) { 1735 scnprintf(buf, size, "Unable to fetch kernel version"); 1736 break; 1737 } 1738 1739 if (obj_kver != real_kver) { 1740 scnprintf(buf, size, 1741 "'version' ("KVER_FMT") doesn't match running kernel ("KVER_FMT")", 1742 KVER_PARAM(obj_kver), 1743 KVER_PARAM(real_kver)); 1744 break; 1745 } 1746 1747 scnprintf(buf, size, "Failed to load program for unknown reason"); 1748 break; 1749 } 1750 bpf__strerror_end(buf, size); 1751 return 0; 1752} 1753 1754int bpf__strerror_config_obj(struct bpf_object *obj __maybe_unused, 1755 struct parse_events_term *term __maybe_unused, 1756 struct perf_evlist *evlist __maybe_unused, 1757 int *error_pos __maybe_unused, int err, 1758 char *buf, size_t size) 1759{ 1760 bpf__strerror_head(err, buf, size); 1761 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE, 1762 "Can't use this config term with this map type"); 1763 bpf__strerror_end(buf, size); 1764 return 0; 1765} 1766 1767int bpf__strerror_apply_obj_config(int err, char *buf, size_t size) 1768{ 1769 bpf__strerror_head(err, buf, size); 1770 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM, 1771 "Cannot set event to BPF map in multi-thread tracing"); 1772 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH, 1773 "%s (Hint: use -i to turn off inherit)", emsg); 1774 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE, 1775 "Can only put raw, hardware and BPF output event into a BPF map"); 1776 bpf__strerror_end(buf, size); 1777 return 0; 1778} 1779 1780int bpf__strerror_setup_stdout(struct perf_evlist *evlist __maybe_unused, 1781 int err, char *buf, size_t size) 1782{ 1783 bpf__strerror_head(err, buf, size); 1784 bpf__strerror_end(buf, size); 1785 return 0; 1786}