Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bpftool: Use syscall/loader program in "prog load" and "gen skeleton" command.

Add -L flag to bpftool to use libbpf gen_trace facility and syscall/loader program
for skeleton generation and program loading.

"bpftool gen skeleton -L" command will generate a "light skeleton" or "loader skeleton"
that is similar to existing skeleton, but has one major difference:
$ bpftool gen skeleton lsm.o > lsm.skel.h
$ bpftool gen skeleton -L lsm.o > lsm.lskel.h
$ diff lsm.skel.h lsm.lskel.h
@@ -5,34 +4,34 @@
#define __LSM_SKEL_H__

#include <stdlib.h>
-#include <bpf/libbpf.h>
+#include <bpf/bpf.h>

The light skeleton does not use majority of libbpf infrastructure.
It doesn't need libelf. It doesn't parse .o file.
It only needs few sys_bpf wrappers. All of them are in bpf/bpf.h file.
In future libbpf/bpf.c can be inlined into bpf.h, so not even libbpf.a would be
needed to work with light skeleton.

"bpftool prog load -L file.o" command is introduced for debugging of syscall/loader
program generation. Just like the same command without -L it will try to load
the programs from file.o into the kernel. It won't even try to pin them.

"bpftool prog load -L -d file.o" command will provide additional debug messages
on how syscall/loader program was generated.
Also the execution of syscall/loader program will use bpf_trace_printk() for
each step of loading BTF, creating maps, and loading programs.
The user can do "cat /.../trace_pipe" for further debug.

An example of fexit_sleep.lskel.h generated from progs/fexit_sleep.c:
struct fexit_sleep {
struct bpf_loader_ctx ctx;
struct {
struct bpf_map_desc bss;
} maps;
struct {
struct bpf_prog_desc nanosleep_fentry;
struct bpf_prog_desc nanosleep_fexit;
} progs;
struct {
int nanosleep_fentry_fd;
int nanosleep_fexit_fd;
} links;
struct fexit_sleep__bss {
int pid;
int fentry_cnt;
int fexit_cnt;
} *bss;
};

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20210514003623.28033-18-alexei.starovoitov@gmail.com

authored by

Alexei Starovoitov and committed by
Daniel Borkmann
d510296d 7723256b

+482 -24
+1 -1
tools/bpf/bpftool/Makefile
··· 136 136 137 137 BPFTOOL_BOOTSTRAP := $(BOOTSTRAP_OUTPUT)bpftool 138 138 139 - BOOTSTRAP_OBJS = $(addprefix $(BOOTSTRAP_OUTPUT),main.o common.o json_writer.o gen.o btf.o) 139 + BOOTSTRAP_OBJS = $(addprefix $(BOOTSTRAP_OUTPUT),main.o common.o json_writer.o gen.o btf.o xlated_dumper.o btf_dumper.o) $(OUTPUT)disasm.o 140 140 OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o 141 141 142 142 VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux) \
+365 -21
tools/bpf/bpftool/gen.c
··· 18 18 #include <sys/stat.h> 19 19 #include <sys/mman.h> 20 20 #include <bpf/btf.h> 21 + #include <bpf/bpf_gen_internal.h> 21 22 22 23 #include "json_writer.h" 23 24 #include "main.h" ··· 275 274 free(s); 276 275 } 277 276 277 + static void print_hex(const char *data, int data_sz) 278 + { 279 + int i, len; 280 + 281 + for (i = 0, len = 0; i < data_sz; i++) { 282 + int w = data[i] ? 4 : 2; 283 + 284 + len += w; 285 + if (len > 78) { 286 + printf("\\\n"); 287 + len = w; 288 + } 289 + if (!data[i]) 290 + printf("\\0"); 291 + else 292 + printf("\\x%02x", (unsigned char)data[i]); 293 + } 294 + } 295 + 296 + static size_t bpf_map_mmap_sz(const struct bpf_map *map) 297 + { 298 + long page_sz = sysconf(_SC_PAGE_SIZE); 299 + size_t map_sz; 300 + 301 + map_sz = (size_t)roundup(bpf_map__value_size(map), 8) * bpf_map__max_entries(map); 302 + map_sz = roundup(map_sz, page_sz); 303 + return map_sz; 304 + } 305 + 306 + static void codegen_attach_detach(struct bpf_object *obj, const char *obj_name) 307 + { 308 + struct bpf_program *prog; 309 + 310 + bpf_object__for_each_program(prog, obj) { 311 + const char *tp_name; 312 + 313 + codegen("\ 314 + \n\ 315 + \n\ 316 + static inline int \n\ 317 + %1$s__%2$s__attach(struct %1$s *skel) \n\ 318 + { \n\ 319 + int prog_fd = skel->progs.%2$s.prog_fd; \n\ 320 + ", obj_name, bpf_program__name(prog)); 321 + 322 + switch (bpf_program__get_type(prog)) { 323 + case BPF_PROG_TYPE_RAW_TRACEPOINT: 324 + tp_name = strchr(bpf_program__section_name(prog), '/') + 1; 325 + printf("\tint fd = bpf_raw_tracepoint_open(\"%s\", prog_fd);\n", tp_name); 326 + break; 327 + case BPF_PROG_TYPE_TRACING: 328 + printf("\tint fd = bpf_raw_tracepoint_open(NULL, prog_fd);\n"); 329 + break; 330 + default: 331 + printf("\tint fd = ((void)prog_fd, 0); /* auto-attach not supported */\n"); 332 + break; 333 + } 334 + codegen("\ 335 + \n\ 336 + \n\ 337 + if (fd > 0) \n\ 338 + skel->links.%1$s_fd = fd; \n\ 339 + return fd; \n\ 340 + } \n\ 341 + ", bpf_program__name(prog)); 342 + } 343 + 344 + codegen("\ 345 + \n\ 346 + \n\ 347 + static inline int \n\ 348 + %1$s__attach(struct %1$s *skel) \n\ 349 + { \n\ 350 + int ret = 0; \n\ 351 + \n\ 352 + ", obj_name); 353 + 354 + bpf_object__for_each_program(prog, obj) { 355 + codegen("\ 356 + \n\ 357 + ret = ret < 0 ? ret : %1$s__%2$s__attach(skel); \n\ 358 + ", obj_name, bpf_program__name(prog)); 359 + } 360 + 361 + codegen("\ 362 + \n\ 363 + return ret < 0 ? ret : 0; \n\ 364 + } \n\ 365 + \n\ 366 + static inline void \n\ 367 + %1$s__detach(struct %1$s *skel) \n\ 368 + { \n\ 369 + ", obj_name); 370 + 371 + bpf_object__for_each_program(prog, obj) { 372 + codegen("\ 373 + \n\ 374 + skel_closenz(skel->links.%1$s_fd); \n\ 375 + ", bpf_program__name(prog)); 376 + } 377 + 378 + codegen("\ 379 + \n\ 380 + } \n\ 381 + "); 382 + } 383 + 384 + static void codegen_destroy(struct bpf_object *obj, const char *obj_name) 385 + { 386 + struct bpf_program *prog; 387 + struct bpf_map *map; 388 + 389 + codegen("\ 390 + \n\ 391 + static void \n\ 392 + %1$s__destroy(struct %1$s *skel) \n\ 393 + { \n\ 394 + if (!skel) \n\ 395 + return; \n\ 396 + %1$s__detach(skel); \n\ 397 + ", 398 + obj_name); 399 + 400 + bpf_object__for_each_program(prog, obj) { 401 + codegen("\ 402 + \n\ 403 + skel_closenz(skel->progs.%1$s.prog_fd); \n\ 404 + ", bpf_program__name(prog)); 405 + } 406 + 407 + bpf_object__for_each_map(map, obj) { 408 + const char * ident; 409 + 410 + ident = get_map_ident(map); 411 + if (!ident) 412 + continue; 413 + if (bpf_map__is_internal(map) && 414 + (bpf_map__def(map)->map_flags & BPF_F_MMAPABLE)) 415 + printf("\tmunmap(skel->%1$s, %2$zd);\n", 416 + ident, bpf_map_mmap_sz(map)); 417 + codegen("\ 418 + \n\ 419 + skel_closenz(skel->maps.%1$s.map_fd); \n\ 420 + ", ident); 421 + } 422 + codegen("\ 423 + \n\ 424 + free(skel); \n\ 425 + } \n\ 426 + ", 427 + obj_name); 428 + } 429 + 430 + static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *header_guard) 431 + { 432 + struct bpf_object_load_attr load_attr = {}; 433 + DECLARE_LIBBPF_OPTS(gen_loader_opts, opts); 434 + struct bpf_map *map; 435 + int err = 0; 436 + 437 + err = bpf_object__gen_loader(obj, &opts); 438 + if (err) 439 + return err; 440 + 441 + load_attr.obj = obj; 442 + if (verifier_logs) 443 + /* log_level1 + log_level2 + stats, but not stable UAPI */ 444 + load_attr.log_level = 1 + 2 + 4; 445 + 446 + err = bpf_object__load_xattr(&load_attr); 447 + if (err) { 448 + p_err("failed to load object file"); 449 + goto out; 450 + } 451 + /* If there was no error during load then gen_loader_opts 452 + * are populated with the loader program. 453 + */ 454 + 455 + /* finish generating 'struct skel' */ 456 + codegen("\ 457 + \n\ 458 + }; \n\ 459 + ", obj_name); 460 + 461 + 462 + codegen_attach_detach(obj, obj_name); 463 + 464 + codegen_destroy(obj, obj_name); 465 + 466 + codegen("\ 467 + \n\ 468 + static inline struct %1$s * \n\ 469 + %1$s__open(void) \n\ 470 + { \n\ 471 + struct %1$s *skel; \n\ 472 + \n\ 473 + skel = calloc(sizeof(*skel), 1); \n\ 474 + if (!skel) \n\ 475 + goto cleanup; \n\ 476 + skel->ctx.sz = (void *)&skel->links - (void *)skel; \n\ 477 + ", 478 + obj_name, opts.data_sz); 479 + bpf_object__for_each_map(map, obj) { 480 + const char *ident; 481 + const void *mmap_data = NULL; 482 + size_t mmap_size = 0; 483 + 484 + ident = get_map_ident(map); 485 + if (!ident) 486 + continue; 487 + 488 + if (!bpf_map__is_internal(map) || 489 + !(bpf_map__def(map)->map_flags & BPF_F_MMAPABLE)) 490 + continue; 491 + 492 + codegen("\ 493 + \n\ 494 + skel->%1$s = \n\ 495 + mmap(NULL, %2$zd, PROT_READ | PROT_WRITE,\n\ 496 + MAP_SHARED | MAP_ANONYMOUS, -1, 0); \n\ 497 + if (skel->%1$s == (void *) -1) \n\ 498 + goto cleanup; \n\ 499 + memcpy(skel->%1$s, (void *)\"\\ \n\ 500 + ", ident, bpf_map_mmap_sz(map)); 501 + mmap_data = bpf_map__initial_value(map, &mmap_size); 502 + print_hex(mmap_data, mmap_size); 503 + printf("\", %2$zd);\n" 504 + "\tskel->maps.%1$s.initial_value = (__u64)(long)skel->%1$s;\n", 505 + ident, mmap_size); 506 + } 507 + codegen("\ 508 + \n\ 509 + return skel; \n\ 510 + cleanup: \n\ 511 + %1$s__destroy(skel); \n\ 512 + return NULL; \n\ 513 + } \n\ 514 + \n\ 515 + static inline int \n\ 516 + %1$s__load(struct %1$s *skel) \n\ 517 + { \n\ 518 + struct bpf_load_and_run_opts opts = {}; \n\ 519 + int err; \n\ 520 + \n\ 521 + opts.ctx = (struct bpf_loader_ctx *)skel; \n\ 522 + opts.data_sz = %2$d; \n\ 523 + opts.data = (void *)\"\\ \n\ 524 + ", 525 + obj_name, opts.data_sz); 526 + print_hex(opts.data, opts.data_sz); 527 + codegen("\ 528 + \n\ 529 + \"; \n\ 530 + "); 531 + 532 + codegen("\ 533 + \n\ 534 + opts.insns_sz = %d; \n\ 535 + opts.insns = (void *)\"\\ \n\ 536 + ", 537 + opts.insns_sz); 538 + print_hex(opts.insns, opts.insns_sz); 539 + codegen("\ 540 + \n\ 541 + \"; \n\ 542 + err = bpf_load_and_run(&opts); \n\ 543 + if (err < 0) \n\ 544 + return err; \n\ 545 + ", obj_name); 546 + bpf_object__for_each_map(map, obj) { 547 + const char *ident, *mmap_flags; 548 + 549 + ident = get_map_ident(map); 550 + if (!ident) 551 + continue; 552 + 553 + if (!bpf_map__is_internal(map) || 554 + !(bpf_map__def(map)->map_flags & BPF_F_MMAPABLE)) 555 + continue; 556 + if (bpf_map__def(map)->map_flags & BPF_F_RDONLY_PROG) 557 + mmap_flags = "PROT_READ"; 558 + else 559 + mmap_flags = "PROT_READ | PROT_WRITE"; 560 + 561 + printf("\tskel->%1$s =\n" 562 + "\t\tmmap(skel->%1$s, %2$zd, %3$s, MAP_SHARED | MAP_FIXED,\n" 563 + "\t\t\tskel->maps.%1$s.map_fd, 0);\n", 564 + ident, bpf_map_mmap_sz(map), mmap_flags); 565 + } 566 + codegen("\ 567 + \n\ 568 + return 0; \n\ 569 + } \n\ 570 + \n\ 571 + static inline struct %1$s * \n\ 572 + %1$s__open_and_load(void) \n\ 573 + { \n\ 574 + struct %1$s *skel; \n\ 575 + \n\ 576 + skel = %1$s__open(); \n\ 577 + if (!skel) \n\ 578 + return NULL; \n\ 579 + if (%1$s__load(skel)) { \n\ 580 + %1$s__destroy(skel); \n\ 581 + return NULL; \n\ 582 + } \n\ 583 + return skel; \n\ 584 + } \n\ 585 + ", obj_name); 586 + 587 + codegen("\ 588 + \n\ 589 + \n\ 590 + #endif /* %s */ \n\ 591 + ", 592 + header_guard); 593 + err = 0; 594 + out: 595 + return err; 596 + } 597 + 278 598 static int do_skeleton(int argc, char **argv) 279 599 { 280 600 char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SKEL_H__")]; ··· 605 283 struct bpf_object *obj = NULL; 606 284 const char *file, *ident; 607 285 struct bpf_program *prog; 608 - int fd, len, err = -1; 286 + int fd, err = -1; 609 287 struct bpf_map *map; 610 288 struct btf *btf; 611 289 struct stat st; ··· 687 365 } 688 366 689 367 get_header_guard(header_guard, obj_name); 690 - codegen("\ 368 + if (use_loader) { 369 + codegen("\ 370 + \n\ 371 + /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\ 372 + /* THIS FILE IS AUTOGENERATED! */ \n\ 373 + #ifndef %2$s \n\ 374 + #define %2$s \n\ 375 + \n\ 376 + #include <stdlib.h> \n\ 377 + #include <bpf/bpf.h> \n\ 378 + #include <bpf/skel_internal.h> \n\ 379 + \n\ 380 + struct %1$s { \n\ 381 + struct bpf_loader_ctx ctx; \n\ 382 + ", 383 + obj_name, header_guard 384 + ); 385 + } else { 386 + codegen("\ 691 387 \n\ 692 388 /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\ 693 389 \n\ ··· 721 381 struct bpf_object *obj; \n\ 722 382 ", 723 383 obj_name, header_guard 724 - ); 384 + ); 385 + } 725 386 726 387 if (map_cnt) { 727 388 printf("\tstruct {\n"); ··· 730 389 ident = get_map_ident(map); 731 390 if (!ident) 732 391 continue; 733 - printf("\t\tstruct bpf_map *%s;\n", ident); 392 + if (use_loader) 393 + printf("\t\tstruct bpf_map_desc %s;\n", ident); 394 + else 395 + printf("\t\tstruct bpf_map *%s;\n", ident); 734 396 } 735 397 printf("\t} maps;\n"); 736 398 } ··· 741 397 if (prog_cnt) { 742 398 printf("\tstruct {\n"); 743 399 bpf_object__for_each_program(prog, obj) { 744 - printf("\t\tstruct bpf_program *%s;\n", 745 - bpf_program__name(prog)); 400 + if (use_loader) 401 + printf("\t\tstruct bpf_prog_desc %s;\n", 402 + bpf_program__name(prog)); 403 + else 404 + printf("\t\tstruct bpf_program *%s;\n", 405 + bpf_program__name(prog)); 746 406 } 747 407 printf("\t} progs;\n"); 748 408 printf("\tstruct {\n"); 749 409 bpf_object__for_each_program(prog, obj) { 750 - printf("\t\tstruct bpf_link *%s;\n", 751 - bpf_program__name(prog)); 410 + if (use_loader) 411 + printf("\t\tint %s_fd;\n", 412 + bpf_program__name(prog)); 413 + else 414 + printf("\t\tstruct bpf_link *%s;\n", 415 + bpf_program__name(prog)); 752 416 } 753 417 printf("\t} links;\n"); 754 418 } ··· 766 414 err = codegen_datasecs(obj, obj_name); 767 415 if (err) 768 416 goto out; 417 + } 418 + if (use_loader) { 419 + err = gen_trace(obj, obj_name, header_guard); 420 + goto out; 769 421 } 770 422 771 423 codegen("\ ··· 940 584 file_sz); 941 585 942 586 /* embed contents of BPF object file */ 943 - for (i = 0, len = 0; i < file_sz; i++) { 944 - int w = obj_data[i] ? 4 : 2; 945 - 946 - len += w; 947 - if (len > 78) { 948 - printf("\\\n"); 949 - len = w; 950 - } 951 - if (!obj_data[i]) 952 - printf("\\0"); 953 - else 954 - printf("\\x%02x", (unsigned char)obj_data[i]); 955 - } 587 + print_hex(obj_data, file_sz); 956 588 957 589 codegen("\ 958 590 \n\
+6 -1
tools/bpf/bpftool/main.c
··· 29 29 bool block_mount; 30 30 bool verifier_logs; 31 31 bool relaxed_maps; 32 + bool use_loader; 32 33 struct btf *base_btf; 33 34 struct pinned_obj_table prog_table; 34 35 struct pinned_obj_table map_table; ··· 393 392 { "mapcompat", no_argument, NULL, 'm' }, 394 393 { "nomount", no_argument, NULL, 'n' }, 395 394 { "debug", no_argument, NULL, 'd' }, 395 + { "use-loader", no_argument, NULL, 'L' }, 396 396 { "base-btf", required_argument, NULL, 'B' }, 397 397 { 0 } 398 398 }; ··· 411 409 hash_init(link_table.table); 412 410 413 411 opterr = 0; 414 - while ((opt = getopt_long(argc, argv, "VhpjfmndB:", 412 + while ((opt = getopt_long(argc, argv, "VhpjfLmndB:", 415 413 options, NULL)) >= 0) { 416 414 switch (opt) { 417 415 case 'V': ··· 453 451 base_btf = NULL; 454 452 return -1; 455 453 } 454 + break; 455 + case 'L': 456 + use_loader = true; 456 457 break; 457 458 default: 458 459 p_err("unrecognized option '%s'", argv[optind - 1]);
+1
tools/bpf/bpftool/main.h
··· 90 90 extern bool block_mount; 91 91 extern bool verifier_logs; 92 92 extern bool relaxed_maps; 93 + extern bool use_loader; 93 94 extern struct btf *base_btf; 94 95 extern struct pinned_obj_table prog_table; 95 96 extern struct pinned_obj_table map_table;
+106 -1
tools/bpf/bpftool/prog.c
··· 16 16 #include <sys/types.h> 17 17 #include <sys/stat.h> 18 18 #include <sys/syscall.h> 19 + #include <dirent.h> 19 20 20 21 #include <linux/err.h> 21 22 #include <linux/perf_event.h> ··· 25 24 #include <bpf/bpf.h> 26 25 #include <bpf/btf.h> 27 26 #include <bpf/libbpf.h> 27 + #include <bpf/bpf_gen_internal.h> 28 + #include <bpf/skel_internal.h> 28 29 29 30 #include "cfg.h" 30 31 #include "main.h" ··· 1502 1499 set_max_rlimit(); 1503 1500 1504 1501 obj = bpf_object__open_file(file, &open_opts); 1505 - if (IS_ERR_OR_NULL(obj)) { 1502 + if (libbpf_get_error(obj)) { 1506 1503 p_err("failed to open object file"); 1507 1504 goto err_free_reuse_maps; 1508 1505 } ··· 1648 1645 return -1; 1649 1646 } 1650 1647 1648 + static int count_open_fds(void) 1649 + { 1650 + DIR *dp = opendir("/proc/self/fd"); 1651 + struct dirent *de; 1652 + int cnt = -3; 1653 + 1654 + if (!dp) 1655 + return -1; 1656 + 1657 + while ((de = readdir(dp))) 1658 + cnt++; 1659 + 1660 + closedir(dp); 1661 + return cnt; 1662 + } 1663 + 1664 + static int try_loader(struct gen_loader_opts *gen) 1665 + { 1666 + struct bpf_load_and_run_opts opts = {}; 1667 + struct bpf_loader_ctx *ctx; 1668 + int ctx_sz = sizeof(*ctx) + 64 * max(sizeof(struct bpf_map_desc), 1669 + sizeof(struct bpf_prog_desc)); 1670 + int log_buf_sz = (1u << 24) - 1; 1671 + int err, fds_before, fd_delta; 1672 + char *log_buf; 1673 + 1674 + ctx = alloca(ctx_sz); 1675 + memset(ctx, 0, ctx_sz); 1676 + ctx->sz = ctx_sz; 1677 + ctx->log_level = 1; 1678 + ctx->log_size = log_buf_sz; 1679 + log_buf = malloc(log_buf_sz); 1680 + if (!log_buf) 1681 + return -ENOMEM; 1682 + ctx->log_buf = (long) log_buf; 1683 + opts.ctx = ctx; 1684 + opts.data = gen->data; 1685 + opts.data_sz = gen->data_sz; 1686 + opts.insns = gen->insns; 1687 + opts.insns_sz = gen->insns_sz; 1688 + fds_before = count_open_fds(); 1689 + err = bpf_load_and_run(&opts); 1690 + fd_delta = count_open_fds() - fds_before; 1691 + if (err < 0) { 1692 + fprintf(stderr, "err %d\n%s\n%s", err, opts.errstr, log_buf); 1693 + if (fd_delta) 1694 + fprintf(stderr, "loader prog leaked %d FDs\n", 1695 + fd_delta); 1696 + } 1697 + free(log_buf); 1698 + return err; 1699 + } 1700 + 1701 + static int do_loader(int argc, char **argv) 1702 + { 1703 + DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts); 1704 + DECLARE_LIBBPF_OPTS(gen_loader_opts, gen); 1705 + struct bpf_object_load_attr load_attr = {}; 1706 + struct bpf_object *obj; 1707 + const char *file; 1708 + int err = 0; 1709 + 1710 + if (!REQ_ARGS(1)) 1711 + return -1; 1712 + file = GET_ARG(); 1713 + 1714 + obj = bpf_object__open_file(file, &open_opts); 1715 + if (libbpf_get_error(obj)) { 1716 + p_err("failed to open object file"); 1717 + goto err_close_obj; 1718 + } 1719 + 1720 + err = bpf_object__gen_loader(obj, &gen); 1721 + if (err) 1722 + goto err_close_obj; 1723 + 1724 + load_attr.obj = obj; 1725 + if (verifier_logs) 1726 + /* log_level1 + log_level2 + stats, but not stable UAPI */ 1727 + load_attr.log_level = 1 + 2 + 4; 1728 + 1729 + err = bpf_object__load_xattr(&load_attr); 1730 + if (err) { 1731 + p_err("failed to load object file"); 1732 + goto err_close_obj; 1733 + } 1734 + 1735 + if (verifier_logs) { 1736 + struct dump_data dd = {}; 1737 + 1738 + kernel_syms_load(&dd); 1739 + dump_xlated_plain(&dd, (void *)gen.insns, gen.insns_sz, false, false); 1740 + kernel_syms_destroy(&dd); 1741 + } 1742 + err = try_loader(&gen); 1743 + err_close_obj: 1744 + bpf_object__close(obj); 1745 + return err; 1746 + } 1747 + 1651 1748 static int do_load(int argc, char **argv) 1652 1749 { 1750 + if (use_loader) 1751 + return do_loader(argc, argv); 1653 1752 return load_with_options(argc, argv, true); 1654 1753 } 1655 1754
+3
tools/bpf/bpftool/xlated_dumper.c
··· 196 196 else if (insn->src_reg == BPF_PSEUDO_MAP_VALUE) 197 197 snprintf(dd->scratch_buff, sizeof(dd->scratch_buff), 198 198 "map[id:%u][0]+%u", insn->imm, (insn + 1)->imm); 199 + else if (insn->src_reg == BPF_PSEUDO_MAP_IDX_VALUE) 200 + snprintf(dd->scratch_buff, sizeof(dd->scratch_buff), 201 + "map[idx:%u]+%u", insn->imm, (insn + 1)->imm); 199 202 else if (insn->src_reg == BPF_PSEUDO_FUNC) 200 203 snprintf(dd->scratch_buff, sizeof(dd->scratch_buff), 201 204 "subprog[%+d]", insn->imm);