Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: lazy-load trigger bench BPF programs

Instead of front-loading all possible benchmarking BPF programs for
trigger benchmarks, explicitly specify which BPF programs are used by
specific benchmark and load only it.

This allows to be more flexible in supporting older kernels, where some
program types might not be possible to load (e.g., those that rely on
newly added kfunc).

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20240326162151.3981687-5-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Andrii Nakryiko and committed by
Alexei Starovoitov
b4ccf915 208c4391

+42 -12
+33 -3
tools/testing/selftests/bpf/benchs/bench_trigger.c
··· 133 133 134 134 static void setup_ctx(void) 135 135 { 136 - int err; 137 - 138 136 setup_libbpf(); 139 137 140 138 ctx.skel = trigger_bench__open(); ··· 141 143 exit(1); 142 144 } 143 145 146 + /* default "driver" BPF program */ 147 + bpf_program__set_autoload(ctx.skel->progs.trigger_driver, true); 148 + 144 149 ctx.skel->rodata->batch_iters = args.batch_iters; 150 + } 151 + 152 + static void load_ctx(void) 153 + { 154 + int err; 145 155 146 156 err = trigger_bench__load(ctx.skel); 147 157 if (err) { ··· 178 172 static void trigger_kernel_count_setup(void) 179 173 { 180 174 setup_ctx(); 175 + bpf_program__set_autoload(ctx.skel->progs.trigger_driver, false); 176 + bpf_program__set_autoload(ctx.skel->progs.trigger_count, true); 177 + load_ctx(); 181 178 /* override driver program */ 182 179 ctx.driver_prog_fd = bpf_program__fd(ctx.skel->progs.trigger_count); 183 180 } ··· 188 179 static void trigger_kprobe_setup(void) 189 180 { 190 181 setup_ctx(); 182 + bpf_program__set_autoload(ctx.skel->progs.bench_trigger_kprobe, true); 183 + load_ctx(); 191 184 attach_bpf(ctx.skel->progs.bench_trigger_kprobe); 192 185 } 193 186 194 187 static void trigger_kretprobe_setup(void) 195 188 { 196 189 setup_ctx(); 190 + bpf_program__set_autoload(ctx.skel->progs.bench_trigger_kretprobe, true); 191 + load_ctx(); 197 192 attach_bpf(ctx.skel->progs.bench_trigger_kretprobe); 198 193 } 199 194 200 195 static void trigger_kprobe_multi_setup(void) 201 196 { 202 197 setup_ctx(); 198 + bpf_program__set_autoload(ctx.skel->progs.bench_trigger_kprobe_multi, true); 199 + load_ctx(); 203 200 attach_bpf(ctx.skel->progs.bench_trigger_kprobe_multi); 204 201 } 205 202 206 203 static void trigger_kretprobe_multi_setup(void) 207 204 { 208 205 setup_ctx(); 206 + bpf_program__set_autoload(ctx.skel->progs.bench_trigger_kretprobe_multi, true); 207 + load_ctx(); 209 208 attach_bpf(ctx.skel->progs.bench_trigger_kretprobe_multi); 210 209 } 211 210 212 211 static void trigger_fentry_setup(void) 213 212 { 214 213 setup_ctx(); 214 + bpf_program__set_autoload(ctx.skel->progs.bench_trigger_fentry, true); 215 + load_ctx(); 215 216 attach_bpf(ctx.skel->progs.bench_trigger_fentry); 216 217 } 217 218 218 219 static void trigger_fexit_setup(void) 219 220 { 220 221 setup_ctx(); 222 + bpf_program__set_autoload(ctx.skel->progs.bench_trigger_fexit, true); 223 + load_ctx(); 221 224 attach_bpf(ctx.skel->progs.bench_trigger_fexit); 222 225 } 223 226 ··· 300 279 { 301 280 size_t uprobe_offset; 302 281 struct bpf_link *link; 282 + int err; 303 283 304 284 setup_libbpf(); 305 285 306 - ctx.skel = trigger_bench__open_and_load(); 286 + ctx.skel = trigger_bench__open(); 307 287 if (!ctx.skel) { 308 288 fprintf(stderr, "failed to open skeleton\n"); 289 + exit(1); 290 + } 291 + 292 + bpf_program__set_autoload(ctx.skel->progs.bench_trigger_uprobe, true); 293 + 294 + err = trigger_bench__load(ctx.skel); 295 + if (err) { 296 + fprintf(stderr, "failed to load skeleton\n"); 309 297 exit(1); 310 298 } 311 299
+9 -9
tools/testing/selftests/bpf/progs/trigger_bench.c
··· 25 25 __sync_add_and_fetch(&hits[cpu & CPU_MASK].value, 1); 26 26 } 27 27 28 - SEC("uprobe") 28 + SEC("?uprobe") 29 29 int bench_trigger_uprobe(void *ctx) 30 30 { 31 31 inc_counter(); ··· 34 34 35 35 const volatile int batch_iters = 0; 36 36 37 - SEC("raw_tp") 37 + SEC("?raw_tp") 38 38 int trigger_count(void *ctx) 39 39 { 40 40 int i; ··· 45 45 return 0; 46 46 } 47 47 48 - SEC("raw_tp") 48 + SEC("?raw_tp") 49 49 int trigger_driver(void *ctx) 50 50 { 51 51 int i; ··· 56 56 return 0; 57 57 } 58 58 59 - SEC("kprobe/bpf_get_numa_node_id") 59 + SEC("?kprobe/bpf_get_numa_node_id") 60 60 int bench_trigger_kprobe(void *ctx) 61 61 { 62 62 inc_counter(); 63 63 return 0; 64 64 } 65 65 66 - SEC("kretprobe/bpf_get_numa_node_id") 66 + SEC("?kretprobe/bpf_get_numa_node_id") 67 67 int bench_trigger_kretprobe(void *ctx) 68 68 { 69 69 inc_counter(); 70 70 return 0; 71 71 } 72 72 73 - SEC("kprobe.multi/bpf_get_numa_node_id") 73 + SEC("?kprobe.multi/bpf_get_numa_node_id") 74 74 int bench_trigger_kprobe_multi(void *ctx) 75 75 { 76 76 inc_counter(); 77 77 return 0; 78 78 } 79 79 80 - SEC("kretprobe.multi/bpf_get_numa_node_id") 80 + SEC("?kretprobe.multi/bpf_get_numa_node_id") 81 81 int bench_trigger_kretprobe_multi(void *ctx) 82 82 { 83 83 inc_counter(); 84 84 return 0; 85 85 } 86 86 87 - SEC("fentry/bpf_get_numa_node_id") 87 + SEC("?fentry/bpf_get_numa_node_id") 88 88 int bench_trigger_fentry(void *ctx) 89 89 { 90 90 inc_counter(); 91 91 return 0; 92 92 } 93 93 94 - SEC("fexit/bpf_get_numa_node_id") 94 + SEC("?fexit/bpf_get_numa_node_id") 95 95 int bench_trigger_fexit(void *ctx) 96 96 { 97 97 inc_counter();