Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

perf trace: Split BPF skel code to util/bpf_trace_augment.c

And make builtin-trace.c less conditional. Dummy functions will be
called when BUILD_BPF_SKEL=0 is used. This makes the builtin-trace.c
slightly smaller and simpler by removing the skeleton and its helpers.

The conditional guard of trace__init_syscalls_bpf_prog_array_maps() is
changed from the HAVE_BPF_SKEL to HAVE_LIBBPF_SUPPORT as it doesn't
have a skeleton in the code directly. And a dummy function is added so
that it can be called unconditionally. The function will succeed only
if the both conditions are true.

Do not include trace_augment.h from the BPF code and move the definition
of TRACE_AUG_MAX_BUF to the BPF directly.

Reviewed-by: Howard Chu <howardchu95@gmail.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Link: https://lore.kernel.org/r/20250623225721.21553-1-namhyung@kernel.org
Signed-off-by: Namhyung Kim <namhyung@kernel.org>

+255 -141
+48 -139
tools/perf/builtin-trace.c
··· 20 20 #include <bpf/bpf.h> 21 21 #include <bpf/libbpf.h> 22 22 #include <bpf/btf.h> 23 - #ifdef HAVE_BPF_SKEL 24 - #include "bpf_skel/augmented_raw_syscalls.skel.h" 25 - #endif 26 23 #endif 27 24 #include "util/bpf_map.h" 28 25 #include "util/rlimit.h" ··· 152 155 *bpf_output; 153 156 } events; 154 157 } syscalls; 155 - #ifdef HAVE_BPF_SKEL 156 - struct augmented_raw_syscalls_bpf *skel; 157 - #endif 158 158 #ifdef HAVE_LIBBPF_SUPPORT 159 159 struct btf *btf; 160 160 #endif ··· 3697 3703 goto out; 3698 3704 } 3699 3705 3700 - #ifdef HAVE_BPF_SKEL 3706 + #ifdef HAVE_LIBBPF_SUPPORT 3707 + 3708 + static struct bpf_program *unaugmented_prog; 3709 + 3701 3710 static int syscall_arg_fmt__cache_btf_struct(struct syscall_arg_fmt *arg_fmt, struct btf *btf, char *type) 3702 3711 { 3703 3712 int id; ··· 3718 3721 return 0; 3719 3722 } 3720 3723 3721 - static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace, const char *name) 3722 - { 3723 - struct bpf_program *pos, *prog = NULL; 3724 - const char *sec_name; 3725 - 3726 - if (trace->skel->obj == NULL) 3727 - return NULL; 3728 - 3729 - bpf_object__for_each_program(pos, trace->skel->obj) { 3730 - sec_name = bpf_program__section_name(pos); 3731 - if (sec_name && !strcmp(sec_name, name)) { 3732 - prog = pos; 3733 - break; 3734 - } 3735 - } 3736 - 3737 - return prog; 3738 - } 3739 - 3740 - static struct bpf_program *trace__find_syscall_bpf_prog(struct trace *trace, struct syscall *sc, 3724 + static struct bpf_program *trace__find_syscall_bpf_prog(struct trace *trace __maybe_unused, 3725 + struct syscall *sc, 3741 3726 const char *prog_name, const char *type) 3742 3727 { 3743 3728 struct bpf_program *prog; ··· 3727 3748 if (prog_name == NULL) { 3728 3749 char default_prog_name[256]; 3729 3750 scnprintf(default_prog_name, sizeof(default_prog_name), "tp/syscalls/sys_%s_%s", type, sc->name); 3730 - prog = trace__find_bpf_program_by_title(trace, default_prog_name); 3751 + prog = augmented_syscalls__find_by_title(default_prog_name); 3731 3752 if (prog != NULL) 3732 3753 goto out_found; 3733 3754 if (sc->fmt && sc->fmt->alias) { 3734 3755 scnprintf(default_prog_name, sizeof(default_prog_name), "tp/syscalls/sys_%s_%s", type, sc->fmt->alias); 3735 - prog = trace__find_bpf_program_by_title(trace, default_prog_name); 3756 + prog = augmented_syscalls__find_by_title(default_prog_name); 3736 3757 if (prog != NULL) 3737 3758 goto out_found; 3738 3759 } 3739 3760 goto out_unaugmented; 3740 3761 } 3741 3762 3742 - prog = trace__find_bpf_program_by_title(trace, prog_name); 3763 + prog = augmented_syscalls__find_by_title(prog_name); 3743 3764 3744 3765 if (prog != NULL) { 3745 3766 out_found: ··· 3749 3770 pr_debug("Couldn't find BPF prog \"%s\" to associate with syscalls:sys_%s_%s, not augmenting it\n", 3750 3771 prog_name, type, sc->name); 3751 3772 out_unaugmented: 3752 - return trace->skel->progs.syscall_unaugmented; 3773 + return unaugmented_prog; 3753 3774 } 3754 3775 3755 3776 static void trace__init_syscall_bpf_progs(struct trace *trace, int e_machine, int id) ··· 3766 3787 static int trace__bpf_prog_sys_enter_fd(struct trace *trace, int e_machine, int id) 3767 3788 { 3768 3789 struct syscall *sc = trace__syscall_info(trace, NULL, e_machine, id); 3769 - return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(trace->skel->progs.syscall_unaugmented); 3790 + return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(unaugmented_prog); 3770 3791 } 3771 3792 3772 3793 static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int e_machine, int id) 3773 3794 { 3774 3795 struct syscall *sc = trace__syscall_info(trace, NULL, e_machine, id); 3775 - return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->skel->progs.syscall_unaugmented); 3796 + return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(unaugmented_prog); 3776 3797 } 3777 3798 3778 3799 static int trace__bpf_sys_enter_beauty_map(struct trace *trace, int e_machine, int key, unsigned int *beauty_array) ··· 3882 3903 bool is_candidate = false; 3883 3904 3884 3905 if (pair == NULL || pair->id == sc->id || 3885 - pair->bpf_prog.sys_enter == trace->skel->progs.syscall_unaugmented) 3906 + pair->bpf_prog.sys_enter == unaugmented_prog) 3886 3907 continue; 3887 3908 3888 3909 for (field = sc->args, candidate_field = pair->args; ··· 3948 3969 */ 3949 3970 if (pair_prog == NULL) { 3950 3971 pair_prog = trace__find_syscall_bpf_prog(trace, pair, pair->fmt ? pair->fmt->bpf_prog_name.sys_enter : NULL, "enter"); 3951 - if (pair_prog == trace->skel->progs.syscall_unaugmented) 3972 + if (pair_prog == unaugmented_prog) 3952 3973 goto next_candidate; 3953 3974 } 3954 3975 ··· 3964 3985 3965 3986 static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace, int e_machine) 3966 3987 { 3967 - int map_enter_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_enter); 3968 - int map_exit_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_exit); 3969 - int beauty_map_fd = bpf_map__fd(trace->skel->maps.beauty_map_enter); 3988 + int map_enter_fd; 3989 + int map_exit_fd; 3990 + int beauty_map_fd; 3970 3991 int err = 0; 3971 3992 unsigned int beauty_array[6]; 3993 + 3994 + if (augmented_syscalls__get_map_fds(&map_enter_fd, &map_exit_fd, &beauty_map_fd) < 0) 3995 + return -1; 3996 + 3997 + unaugmented_prog = augmented_syscalls__unaugmented(); 3972 3998 3973 3999 for (int i = 0, num_idx = syscalltbl__num_idx(e_machine); i < num_idx; ++i) { 3974 4000 int prog_fd, key = syscalltbl__id_at_idx(e_machine, i); ··· 4044 4060 * For now we're just reusing the sys_enter prog, and if it 4045 4061 * already has an augmenter, we don't need to find one. 4046 4062 */ 4047 - if (sc->bpf_prog.sys_enter != trace->skel->progs.syscall_unaugmented) 4063 + if (sc->bpf_prog.sys_enter != unaugmented_prog) 4048 4064 continue; 4049 4065 4050 4066 /* ··· 4069 4085 4070 4086 return err; 4071 4087 } 4072 - #endif // HAVE_BPF_SKEL 4088 + #else // !HAVE_LIBBPF_SUPPORT 4089 + static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace __maybe_unused, 4090 + int e_machine __maybe_unused) 4091 + { 4092 + return -1; 4093 + } 4094 + #endif // HAVE_LIBBPF_SUPPORT 4073 4095 4074 4096 static int trace__set_ev_qualifier_filter(struct trace *trace) 4075 4097 { 4076 4098 if (trace->syscalls.events.sys_enter) 4077 4099 return trace__set_ev_qualifier_tp_filter(trace); 4078 4100 return 0; 4079 - } 4080 - 4081 - static int bpf_map__set_filter_pids(struct bpf_map *map __maybe_unused, 4082 - size_t npids __maybe_unused, pid_t *pids __maybe_unused) 4083 - { 4084 - int err = 0; 4085 - #ifdef HAVE_LIBBPF_SUPPORT 4086 - bool value = true; 4087 - int map_fd = bpf_map__fd(map); 4088 - size_t i; 4089 - 4090 - for (i = 0; i < npids; ++i) { 4091 - err = bpf_map_update_elem(map_fd, &pids[i], &value, BPF_ANY); 4092 - if (err) 4093 - break; 4094 - } 4095 - #endif 4096 - return err; 4097 4101 } 4098 4102 4099 4103 static int trace__set_filter_loop_pids(struct trace *trace) ··· 4112 4140 thread__put(thread); 4113 4141 4114 4142 err = evlist__append_tp_filter_pids(trace->evlist, nr, pids); 4115 - if (!err && trace->filter_pids.map) 4116 - err = bpf_map__set_filter_pids(trace->filter_pids.map, nr, pids); 4143 + if (!err) 4144 + err = augmented_syscalls__set_filter_pids(nr, pids); 4117 4145 4118 4146 return err; 4119 4147 } ··· 4130 4158 if (trace->filter_pids.nr > 0) { 4131 4159 err = evlist__append_tp_filter_pids(trace->evlist, trace->filter_pids.nr, 4132 4160 trace->filter_pids.entries); 4133 - if (!err && trace->filter_pids.map) { 4134 - err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr, 4161 + if (!err) { 4162 + err = augmented_syscalls__set_filter_pids(trace->filter_pids.nr, 4135 4163 trace->filter_pids.entries); 4136 4164 } 4137 4165 } else if (perf_thread_map__pid(trace->evlist->core.threads, 0) == -1) { ··· 4454 4482 err = evlist__open(evlist); 4455 4483 if (err < 0) 4456 4484 goto out_error_open; 4457 - #ifdef HAVE_BPF_SKEL 4458 - if (trace->syscalls.events.bpf_output) { 4459 - struct perf_cpu cpu; 4460 4485 4461 - /* 4462 - * Set up the __augmented_syscalls__ BPF map to hold for each 4463 - * CPU the bpf-output event's file descriptor. 4464 - */ 4465 - perf_cpu_map__for_each_cpu(cpu, i, trace->syscalls.events.bpf_output->core.cpus) { 4466 - int mycpu = cpu.cpu; 4486 + augmented_syscalls__setup_bpf_output(); 4467 4487 4468 - bpf_map__update_elem(trace->skel->maps.__augmented_syscalls__, 4469 - &mycpu, sizeof(mycpu), 4470 - xyarray__entry(trace->syscalls.events.bpf_output->core.fd, 4471 - mycpu, 0), 4472 - sizeof(__u32), BPF_ANY); 4473 - } 4474 - } 4475 - 4476 - if (trace->skel) 4477 - trace->filter_pids.map = trace->skel->maps.pids_filtered; 4478 - #endif 4479 4488 err = trace__set_filter_pids(trace); 4480 4489 if (err < 0) 4481 4490 goto out_error_mem; 4482 4491 4483 - #ifdef HAVE_BPF_SKEL 4484 - if (trace->skel && trace->skel->progs.sys_enter) { 4485 - /* 4486 - * TODO: Initialize for all host binary machine types, not just 4487 - * those matching the perf binary. 4488 - */ 4489 - trace__init_syscalls_bpf_prog_array_maps(trace, EM_HOST); 4490 - } 4491 - #endif 4492 + /* 4493 + * TODO: Initialize for all host binary machine types, not just 4494 + * those matching the perf binary. 4495 + */ 4496 + trace__init_syscalls_bpf_prog_array_maps(trace, EM_HOST); 4492 4497 4493 4498 if (trace->ev_qualifier_ids.nr > 0) { 4494 4499 err = trace__set_ev_qualifier_filter(trace); ··· 5328 5379 #endif 5329 5380 } 5330 5381 5331 - #ifdef HAVE_BPF_SKEL 5332 - static int bpf__setup_bpf_output(struct evlist *evlist) 5333 - { 5334 - int err = parse_event(evlist, "bpf-output/no-inherit=1,name=__augmented_syscalls__/"); 5335 - 5336 - if (err) 5337 - pr_debug("ERROR: failed to create the \"__augmented_syscalls__\" bpf-output event\n"); 5338 - 5339 - return err; 5340 - } 5341 - #endif 5342 - 5343 5382 int cmd_trace(int argc, const char **argv) 5344 5383 { 5345 5384 const char *trace_usage[] = { ··· 5524 5587 "cgroup monitoring only available in system-wide mode"); 5525 5588 } 5526 5589 5527 - #ifdef HAVE_BPF_SKEL 5528 5590 if (!trace.trace_syscalls) 5529 5591 goto skip_augmentation; 5530 5592 ··· 5542 5606 goto skip_augmentation; 5543 5607 } 5544 5608 5545 - trace.skel = augmented_raw_syscalls_bpf__open(); 5546 - if (!trace.skel) { 5547 - pr_debug("Failed to open augmented syscalls BPF skeleton"); 5548 - } else { 5549 - /* 5550 - * Disable attaching the BPF programs except for sys_enter and 5551 - * sys_exit that tail call into this as necessary. 5552 - */ 5553 - struct bpf_program *prog; 5609 + err = augmented_syscalls__prepare(); 5610 + if (err < 0) 5611 + goto skip_augmentation; 5554 5612 5555 - bpf_object__for_each_program(prog, trace.skel->obj) { 5556 - if (prog != trace.skel->progs.sys_enter && prog != trace.skel->progs.sys_exit) 5557 - bpf_program__set_autoattach(prog, /*autoattach=*/false); 5558 - } 5613 + trace__add_syscall_newtp(&trace); 5559 5614 5560 - err = augmented_raw_syscalls_bpf__load(trace.skel); 5615 + err = augmented_syscalls__create_bpf_output(trace.evlist); 5616 + if (err == 0) 5617 + trace.syscalls.events.bpf_output = evlist__last(trace.evlist); 5561 5618 5562 - if (err < 0) { 5563 - libbpf_strerror(err, bf, sizeof(bf)); 5564 - pr_debug("Failed to load augmented syscalls BPF skeleton: %s\n", bf); 5565 - } else { 5566 - augmented_raw_syscalls_bpf__attach(trace.skel); 5567 - trace__add_syscall_newtp(&trace); 5568 - } 5569 - } 5570 - 5571 - err = bpf__setup_bpf_output(trace.evlist); 5572 - if (err) { 5573 - libbpf_strerror(err, bf, sizeof(bf)); 5574 - pr_err("ERROR: Setup BPF output event failed: %s\n", bf); 5575 - goto out; 5576 - } 5577 - trace.syscalls.events.bpf_output = evlist__last(trace.evlist); 5578 - assert(evsel__name_is(trace.syscalls.events.bpf_output, "__augmented_syscalls__")); 5579 5619 skip_augmentation: 5580 - #endif 5581 5620 err = -1; 5582 5621 5583 5622 if (trace.trace_pgfaults) { ··· 5744 5833 fclose(trace.output); 5745 5834 out: 5746 5835 trace__exit(&trace); 5747 - #ifdef HAVE_BPF_SKEL 5748 - augmented_raw_syscalls_bpf__destroy(trace.skel); 5749 - #endif 5836 + augmented_syscalls__cleanup(); 5750 5837 return err; 5751 5838 }
+1
tools/perf/util/Build
··· 176 176 177 177 ifeq ($(CONFIG_TRACE),y) 178 178 perf-util-$(CONFIG_PERF_BPF_SKEL) += bpf-trace-summary.o 179 + perf-util-$(CONFIG_PERF_BPF_SKEL) += bpf_trace_augment.o 179 180 endif 180 181 181 182 ifeq ($(CONFIG_LIBTRACEEVENT),y)
+2 -1
tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
··· 7 7 */ 8 8 9 9 #include "vmlinux.h" 10 - #include "../trace_augment.h" 11 10 12 11 #include <bpf/bpf_helpers.h> 13 12 #include <linux/limits.h> ··· 25 26 #define is_power_of_2(n) (n != 0 && ((n & (n - 1)) == 0)) 26 27 27 28 #define MAX_CPUS 4096 29 + 30 + #define TRACE_AUG_MAX_BUF 32 /* for buffer augmentation in perf trace */ 28 31 29 32 /* bpf-output associated map */ 30 33 struct __augmented_syscalls__ {
+143
tools/perf/util/bpf_trace_augment.c
··· 1 + #include <bpf/libbpf.h> 2 + #include <internal/xyarray.h> 3 + 4 + #include "util/debug.h" 5 + #include "util/evlist.h" 6 + #include "util/trace_augment.h" 7 + 8 + #include "bpf_skel/augmented_raw_syscalls.skel.h" 9 + 10 + static struct augmented_raw_syscalls_bpf *skel; 11 + static struct evsel *bpf_output; 12 + 13 + int augmented_syscalls__prepare(void) 14 + { 15 + struct bpf_program *prog; 16 + char buf[128]; 17 + int err; 18 + 19 + skel = augmented_raw_syscalls_bpf__open(); 20 + if (!skel) { 21 + pr_debug("Failed to open augmented syscalls BPF skeleton\n"); 22 + return -errno; 23 + } 24 + 25 + /* 26 + * Disable attaching the BPF programs except for sys_enter and 27 + * sys_exit that tail call into this as necessary. 28 + */ 29 + bpf_object__for_each_program(prog, skel->obj) { 30 + if (prog != skel->progs.sys_enter && prog != skel->progs.sys_exit) 31 + bpf_program__set_autoattach(prog, /*autoattach=*/false); 32 + } 33 + 34 + err = augmented_raw_syscalls_bpf__load(skel); 35 + if (err < 0) { 36 + libbpf_strerror(err, buf, sizeof(buf)); 37 + pr_debug("Failed to load augmented syscalls BPF skeleton: %s\n", buf); 38 + return err; 39 + } 40 + 41 + augmented_raw_syscalls_bpf__attach(skel); 42 + return 0; 43 + } 44 + 45 + int augmented_syscalls__create_bpf_output(struct evlist *evlist) 46 + { 47 + int err = parse_event(evlist, "bpf-output/no-inherit=1,name=__augmented_syscalls__/"); 48 + 49 + if (err) { 50 + pr_err("ERROR: Setup BPF output event failed: %d\n", err); 51 + return err; 52 + } 53 + 54 + bpf_output = evlist__last(evlist); 55 + assert(evsel__name_is(bpf_output, "__augmented_syscalls__")); 56 + 57 + return 0; 58 + } 59 + 60 + void augmented_syscalls__setup_bpf_output(void) 61 + { 62 + struct perf_cpu cpu; 63 + int i; 64 + 65 + if (bpf_output == NULL) 66 + return; 67 + 68 + /* 69 + * Set up the __augmented_syscalls__ BPF map to hold for each 70 + * CPU the bpf-output event's file descriptor. 71 + */ 72 + perf_cpu_map__for_each_cpu(cpu, i, bpf_output->core.cpus) { 73 + int mycpu = cpu.cpu; 74 + 75 + bpf_map__update_elem(skel->maps.__augmented_syscalls__, 76 + &mycpu, sizeof(mycpu), 77 + xyarray__entry(bpf_output->core.fd, 78 + mycpu, 0), 79 + sizeof(__u32), BPF_ANY); 80 + } 81 + } 82 + 83 + int augmented_syscalls__set_filter_pids(unsigned int nr, pid_t *pids) 84 + { 85 + bool value = true; 86 + int err = 0; 87 + 88 + if (skel == NULL) 89 + return 0; 90 + 91 + for (size_t i = 0; i < nr; ++i) { 92 + err = bpf_map__update_elem(skel->maps.pids_filtered, &pids[i], 93 + sizeof(*pids), &value, sizeof(value), 94 + BPF_ANY); 95 + if (err) 96 + break; 97 + } 98 + return err; 99 + } 100 + 101 + int augmented_syscalls__get_map_fds(int *enter_fd, int *exit_fd, int *beauty_fd) 102 + { 103 + if (skel == NULL) 104 + return -1; 105 + 106 + *enter_fd = bpf_map__fd(skel->maps.syscalls_sys_enter); 107 + *exit_fd = bpf_map__fd(skel->maps.syscalls_sys_exit); 108 + *beauty_fd = bpf_map__fd(skel->maps.beauty_map_enter); 109 + 110 + if (*enter_fd < 0 || *exit_fd < 0 || *beauty_fd < 0) { 111 + pr_err("Error: failed to get syscall or beauty map fd\n"); 112 + return -1; 113 + } 114 + 115 + return 0; 116 + } 117 + 118 + struct bpf_program *augmented_syscalls__unaugmented(void) 119 + { 120 + return skel->progs.syscall_unaugmented; 121 + } 122 + 123 + struct bpf_program *augmented_syscalls__find_by_title(const char *name) 124 + { 125 + struct bpf_program *pos; 126 + const char *sec_name; 127 + 128 + if (skel->obj == NULL) 129 + return NULL; 130 + 131 + bpf_object__for_each_program(pos, skel->obj) { 132 + sec_name = bpf_program__section_name(pos); 133 + if (sec_name && !strcmp(sec_name, name)) 134 + return pos; 135 + } 136 + 137 + return NULL; 138 + } 139 + 140 + void augmented_syscalls__cleanup(void) 141 + { 142 + augmented_raw_syscalls_bpf__destroy(skel); 143 + }
+61 -1
tools/perf/util/trace_augment.h
··· 1 1 #ifndef TRACE_AUGMENT_H 2 2 #define TRACE_AUGMENT_H 3 3 4 - #define TRACE_AUG_MAX_BUF 32 /* for buffer augmentation in perf trace */ 4 + #include <linux/compiler.h> 5 + 6 + struct bpf_program; 7 + struct evlist; 8 + 9 + #ifdef HAVE_BPF_SKEL 10 + 11 + int augmented_syscalls__prepare(void); 12 + int augmented_syscalls__create_bpf_output(struct evlist *evlist); 13 + void augmented_syscalls__setup_bpf_output(void); 14 + int augmented_syscalls__set_filter_pids(unsigned int nr, pid_t *pids); 15 + int augmented_syscalls__get_map_fds(int *enter_fd, int *exit_fd, int *beauty_fd); 16 + struct bpf_program *augmented_syscalls__find_by_title(const char *name); 17 + struct bpf_program *augmented_syscalls__unaugmented(void); 18 + void augmented_syscalls__cleanup(void); 19 + 20 + #else /* !HAVE_BPF_SKEL */ 21 + 22 + static inline int augmented_syscalls__prepare(void) 23 + { 24 + return -1; 25 + } 26 + 27 + static inline int augmented_syscalls__create_bpf_output(struct evlist *evlist __maybe_unused) 28 + { 29 + return -1; 30 + } 31 + 32 + static inline void augmented_syscalls__setup_bpf_output(void) 33 + { 34 + } 35 + 36 + static inline int augmented_syscalls__set_filter_pids(unsigned int nr __maybe_unused, 37 + pid_t *pids __maybe_unused) 38 + { 39 + return 0; 40 + } 41 + 42 + static inline int augmented_syscalls__get_map_fds(int *enter_fd __maybe_unused, 43 + int *exit_fd __maybe_unused, 44 + int *beauty_fd __maybe_unused) 45 + { 46 + return -1; 47 + } 48 + 49 + static inline struct bpf_program * 50 + augmented_syscalls__find_by_title(const char *name __maybe_unused) 51 + { 52 + return NULL; 53 + } 54 + 55 + static inline struct bpf_program *augmented_syscalls__unaugmented(void) 56 + { 57 + return NULL; 58 + } 59 + 60 + static inline void augmented_syscalls__cleanup(void) 61 + { 62 + } 63 + 64 + #endif /* HAVE_BPF_SKEL */ 5 65 6 66 #endif