Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: Add uprobe_regs_equal test

Changing uretprobe_regs_trigger to allow the test for both
uprobe and uretprobe and renaming it to uprobe_regs_equal.

We check that both uprobe and uretprobe probes (bpf programs)
see expected registers with few exceptions.

Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20250720112133.244369-19-jolsa@kernel.org

authored by

Jiri Olsa and committed by
Peter Zijlstra
275eae67 875e1705

+44 -16
+42 -14
tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c
··· 22 22 23 23 #pragma GCC diagnostic ignored "-Wattributes" 24 24 25 - __naked unsigned long uretprobe_regs_trigger(void) 25 + __attribute__((aligned(16))) 26 + __nocf_check __weak __naked unsigned long uprobe_regs_trigger(void) 26 27 { 27 28 asm volatile ( 29 + ".byte 0x0f, 0x1f, 0x44, 0x00, 0x00\n" /* nop5 */ 28 30 "movq $0xdeadbeef, %rax\n" 29 31 "ret\n" 30 32 ); 31 33 } 32 34 33 - __naked void uretprobe_regs(struct pt_regs *before, struct pt_regs *after) 35 + __naked void uprobe_regs(struct pt_regs *before, struct pt_regs *after) 34 36 { 35 37 asm volatile ( 36 38 "movq %r15, 0(%rdi)\n" ··· 53 51 "movq $0, 120(%rdi)\n" /* orig_rax */ 54 52 "movq $0, 128(%rdi)\n" /* rip */ 55 53 "movq $0, 136(%rdi)\n" /* cs */ 54 + "pushq %rax\n" 56 55 "pushf\n" 57 56 "pop %rax\n" 58 57 "movq %rax, 144(%rdi)\n" /* eflags */ 58 + "pop %rax\n" 59 59 "movq %rsp, 152(%rdi)\n" /* rsp */ 60 60 "movq $0, 160(%rdi)\n" /* ss */ 61 61 62 62 /* save 2nd argument */ 63 63 "pushq %rsi\n" 64 - "call uretprobe_regs_trigger\n" 64 + "call uprobe_regs_trigger\n" 65 65 66 66 /* save return value and load 2nd argument pointer to rax */ 67 67 "pushq %rax\n" ··· 103 99 ); 104 100 } 105 101 106 - static void test_uretprobe_regs_equal(void) 102 + static void test_uprobe_regs_equal(bool retprobe) 107 103 { 104 + LIBBPF_OPTS(bpf_uprobe_opts, opts, 105 + .retprobe = retprobe, 106 + ); 108 107 struct uprobe_syscall *skel = NULL; 109 108 struct pt_regs before = {}, after = {}; 110 109 unsigned long *pb = (unsigned long *) &before; 111 110 unsigned long *pa = (unsigned long *) &after; 112 111 unsigned long *pp; 112 + unsigned long offset; 113 113 unsigned int i, cnt; 114 - int err; 114 + 115 + offset = get_uprobe_offset(&uprobe_regs_trigger); 116 + if (!ASSERT_GE(offset, 0, "get_uprobe_offset")) 117 + return; 115 118 116 119 skel = uprobe_syscall__open_and_load(); 117 120 if (!ASSERT_OK_PTR(skel, "uprobe_syscall__open_and_load")) 118 121 goto cleanup; 119 122 120 - err = uprobe_syscall__attach(skel); 121 - if (!ASSERT_OK(err, "uprobe_syscall__attach")) 123 + skel->links.probe = bpf_program__attach_uprobe_opts(skel->progs.probe, 124 + 0, "/proc/self/exe", offset, &opts); 125 + if (!ASSERT_OK_PTR(skel->links.probe, "bpf_program__attach_uprobe_opts")) 122 126 goto cleanup; 123 127 124 - uretprobe_regs(&before, &after); 128 + /* make sure uprobe gets optimized */ 129 + if (!retprobe) 130 + uprobe_regs_trigger(); 131 + 132 + uprobe_regs(&before, &after); 125 133 126 134 pp = (unsigned long *) &skel->bss->regs; 127 135 cnt = sizeof(before)/sizeof(*pb); ··· 142 126 unsigned int offset = i * sizeof(unsigned long); 143 127 144 128 /* 145 - * Check register before and after uretprobe_regs_trigger call 129 + * Check register before and after uprobe_regs_trigger call 146 130 * that triggers the uretprobe. 147 131 */ 148 132 switch (offset) { ··· 156 140 157 141 /* 158 142 * Check register seen from bpf program and register after 159 - * uretprobe_regs_trigger call 143 + * uprobe_regs_trigger call (with rax exception, check below). 160 144 */ 161 145 switch (offset) { 162 146 /* ··· 169 153 case offsetof(struct pt_regs, rsp): 170 154 case offsetof(struct pt_regs, ss): 171 155 break; 156 + /* 157 + * uprobe does not see return value in rax, it needs to see the 158 + * original (before) rax value 159 + */ 160 + case offsetof(struct pt_regs, rax): 161 + if (!retprobe) { 162 + ASSERT_EQ(pp[i], pb[i], "uprobe rax prog-before value check"); 163 + break; 164 + } 172 165 default: 173 166 if (!ASSERT_EQ(pp[i], pa[i], "register prog-after value check")) 174 167 fprintf(stdout, "failed register offset %u\n", offset); ··· 215 190 unsigned long cnt = sizeof(before)/sizeof(*pb); 216 191 unsigned int i, err, offset; 217 192 218 - offset = get_uprobe_offset(uretprobe_regs_trigger); 193 + offset = get_uprobe_offset(uprobe_regs_trigger); 219 194 220 195 err = write_bpf_testmod_uprobe(offset); 221 196 if (!ASSERT_OK(err, "register_uprobe")) 222 197 return; 223 198 224 - uretprobe_regs(&before, &after); 199 + uprobe_regs(&before, &after); 225 200 226 201 err = write_bpf_testmod_uprobe(0); 227 202 if (!ASSERT_OK(err, "unregister_uprobe")) ··· 641 616 /* Run all the tests with shadow stack in place. */ 642 617 shstk_is_enabled = true; 643 618 644 - test_uretprobe_regs_equal(); 619 + test_uprobe_regs_equal(false); 620 + test_uprobe_regs_equal(true); 645 621 test_uretprobe_regs_change(); 646 622 test_uretprobe_syscall_call(); 647 623 ··· 798 772 static void __test_uprobe_syscall(void) 799 773 { 800 774 if (test__start_subtest("uretprobe_regs_equal")) 801 - test_uretprobe_regs_equal(); 775 + test_uprobe_regs_equal(true); 802 776 if (test__start_subtest("uretprobe_regs_change")) 803 777 test_uretprobe_regs_change(); 804 778 if (test__start_subtest("uretprobe_syscall_call")) ··· 817 791 test_uprobe_race(); 818 792 if (test__start_subtest("uprobe_sigill")) 819 793 test_uprobe_sigill(); 794 + if (test__start_subtest("uprobe_regs_equal")) 795 + test_uprobe_regs_equal(false); 820 796 } 821 797 #else 822 798 static void __test_uprobe_syscall(void)
+2 -2
tools/testing/selftests/bpf/progs/uprobe_syscall.c
··· 7 7 8 8 char _license[] SEC("license") = "GPL"; 9 9 10 - SEC("uretprobe//proc/self/exe:uretprobe_regs_trigger") 11 - int uretprobe(struct pt_regs *ctx) 10 + SEC("uprobe") 11 + int probe(struct pt_regs *ctx) 12 12 { 13 13 __builtin_memcpy(&regs, ctx, sizeof(regs)); 14 14 return 0;