Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: Add stacktrace ips test for kprobe_multi/kretprobe_multi

Adding test that attaches kprobe/kretprobe multi and verifies the
ORC stacktrace matches expected functions.

Adding bpf_testmod_stacktrace_test function to bpf_testmod kernel
module which is called through several functions so we get reliable
call path for stacktrace.

The test is only for ORC unwinder to keep it simple.

Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Link: https://lore.kernel.org/r/20251104215405.168643-4-jolsa@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Steven Rostedt (Google) <rostedt@goodmis.org>

authored by

Jiri Olsa and committed by
Alexei Starovoitov
c9e208fa 20a0bc10

+171
+104
tools/testing/selftests/bpf/prog_tests/stacktrace_ips.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #include <test_progs.h> 3 + #include "stacktrace_ips.skel.h" 4 + 5 + #ifdef __x86_64__ 6 + static int check_stacktrace_ips(int fd, __u32 key, int cnt, ...) 7 + { 8 + __u64 ips[PERF_MAX_STACK_DEPTH]; 9 + struct ksyms *ksyms = NULL; 10 + int i, err = 0; 11 + va_list args; 12 + 13 + /* sorted by addr */ 14 + ksyms = load_kallsyms_local(); 15 + if (!ASSERT_OK_PTR(ksyms, "load_kallsyms_local")) 16 + return -1; 17 + 18 + /* unlikely, but... */ 19 + if (!ASSERT_LT(cnt, PERF_MAX_STACK_DEPTH, "check_max")) 20 + return -1; 21 + 22 + err = bpf_map_lookup_elem(fd, &key, ips); 23 + if (err) 24 + goto out; 25 + 26 + /* 27 + * Compare all symbols provided via arguments with stacktrace ips, 28 + * and their related symbol addresses.t 29 + */ 30 + va_start(args, cnt); 31 + 32 + for (i = 0; i < cnt; i++) { 33 + unsigned long val; 34 + struct ksym *ksym; 35 + 36 + val = va_arg(args, unsigned long); 37 + ksym = ksym_search_local(ksyms, ips[i]); 38 + if (!ASSERT_OK_PTR(ksym, "ksym_search_local")) 39 + break; 40 + ASSERT_EQ(ksym->addr, val, "stack_cmp"); 41 + } 42 + 43 + va_end(args); 44 + 45 + out: 46 + free_kallsyms_local(ksyms); 47 + return err; 48 + } 49 + 50 + static void test_stacktrace_ips_kprobe_multi(bool retprobe) 51 + { 52 + LIBBPF_OPTS(bpf_kprobe_multi_opts, opts, 53 + .retprobe = retprobe 54 + ); 55 + LIBBPF_OPTS(bpf_test_run_opts, topts); 56 + struct stacktrace_ips *skel; 57 + 58 + skel = stacktrace_ips__open_and_load(); 59 + if (!ASSERT_OK_PTR(skel, "stacktrace_ips__open_and_load")) 60 + return; 61 + 62 + if (!skel->kconfig->CONFIG_UNWINDER_ORC) { 63 + test__skip(); 64 + goto cleanup; 65 + } 66 + 67 + skel->links.kprobe_multi_test = bpf_program__attach_kprobe_multi_opts( 68 + skel->progs.kprobe_multi_test, 69 + "bpf_testmod_stacktrace_test", &opts); 70 + if (!ASSERT_OK_PTR(skel->links.kprobe_multi_test, "bpf_program__attach_kprobe_multi_opts")) 71 + goto cleanup; 72 + 73 + trigger_module_test_read(1); 74 + 75 + load_kallsyms(); 76 + 77 + check_stacktrace_ips(bpf_map__fd(skel->maps.stackmap), skel->bss->stack_key, 4, 78 + ksym_get_addr("bpf_testmod_stacktrace_test_3"), 79 + ksym_get_addr("bpf_testmod_stacktrace_test_2"), 80 + ksym_get_addr("bpf_testmod_stacktrace_test_1"), 81 + ksym_get_addr("bpf_testmod_test_read")); 82 + 83 + cleanup: 84 + stacktrace_ips__destroy(skel); 85 + } 86 + 87 + static void __test_stacktrace_ips(void) 88 + { 89 + if (test__start_subtest("kprobe_multi")) 90 + test_stacktrace_ips_kprobe_multi(false); 91 + if (test__start_subtest("kretprobe_multi")) 92 + test_stacktrace_ips_kprobe_multi(true); 93 + } 94 + #else 95 + static void __test_stacktrace_ips(void) 96 + { 97 + test__skip(); 98 + } 99 + #endif 100 + 101 + void test_stacktrace_ips(void) 102 + { 103 + __test_stacktrace_ips(); 104 + }
+41
tools/testing/selftests/bpf/progs/stacktrace_ips.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // Copyright (c) 2018 Facebook 3 + 4 + #include <vmlinux.h> 5 + #include <bpf/bpf_helpers.h> 6 + #include <bpf/bpf_tracing.h> 7 + 8 + #ifndef PERF_MAX_STACK_DEPTH 9 + #define PERF_MAX_STACK_DEPTH 127 10 + #endif 11 + 12 + typedef __u64 stack_trace_t[PERF_MAX_STACK_DEPTH]; 13 + 14 + struct { 15 + __uint(type, BPF_MAP_TYPE_STACK_TRACE); 16 + __uint(max_entries, 16384); 17 + __type(key, __u32); 18 + __type(value, stack_trace_t); 19 + } stackmap SEC(".maps"); 20 + 21 + extern bool CONFIG_UNWINDER_ORC __kconfig __weak; 22 + 23 + /* 24 + * This function is here to have CONFIG_UNWINDER_ORC 25 + * used and added to object BTF. 26 + */ 27 + int unused(void) 28 + { 29 + return CONFIG_UNWINDER_ORC ? 0 : 1; 30 + } 31 + 32 + __u32 stack_key; 33 + 34 + SEC("kprobe.multi") 35 + int kprobe_multi_test(struct pt_regs *ctx) 36 + { 37 + stack_key = bpf_get_stackid(ctx, &stackmap, 0); 38 + return 0; 39 + } 40 + 41 + char _license[] SEC("license") = "GPL";
+26
tools/testing/selftests/bpf/test_kmods/bpf_testmod.c
··· 417 417 return a + (long)b + c + d + (long)e + f + g + h + i + j + k; 418 418 } 419 419 420 + noinline void bpf_testmod_stacktrace_test(void) 421 + { 422 + /* used for stacktrace test as attach function */ 423 + asm volatile (""); 424 + } 425 + 426 + noinline void bpf_testmod_stacktrace_test_3(void) 427 + { 428 + bpf_testmod_stacktrace_test(); 429 + asm volatile (""); 430 + } 431 + 432 + noinline void bpf_testmod_stacktrace_test_2(void) 433 + { 434 + bpf_testmod_stacktrace_test_3(); 435 + asm volatile (""); 436 + } 437 + 438 + noinline void bpf_testmod_stacktrace_test_1(void) 439 + { 440 + bpf_testmod_stacktrace_test_2(); 441 + asm volatile (""); 442 + } 443 + 420 444 int bpf_testmod_fentry_ok; 421 445 422 446 noinline ssize_t ··· 520 496 bpf_testmod_fentry_test11(16, (void *)17, 18, 19, (void *)20, 521 497 21, 22, 23, 24, 25, 26) != 231) 522 498 goto out; 499 + 500 + bpf_testmod_stacktrace_test_1(); 523 501 524 502 bpf_testmod_fentry_ok = 1; 525 503 out: