Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: Split test_attach_probe into multi subtests

In order to adapt to the older kernel, now we split the "attach_probe"
testing into multi subtests:

manual // manual attach tests for kprobe/uprobe
auto // auto-attach tests for kprobe and uprobe
kprobe-sleepable // kprobe sleepable test
uprobe-lib // uprobe tests for library function by name
uprobe-sleepable // uprobe sleepable test
uprobe-ref_ctr // uprobe ref_ctr test

As sleepable kprobe needs to set BPF_F_SLEEPABLE flag before loading,
we need to move it to a stand alone skel file, in case of it is not
supported by kernel and make the whole loading fail.

Therefore, we can only enable part of the subtests for older kernel.

Signed-off-by: Menglong Dong <imagedong@tencent.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Reviewed-by: Biao Jiang <benbjiang@tencent.com>
Link: https://lore.kernel.org/bpf/20230306064833.7932-3-imagedong@tencent.com

authored by

Menglong Dong and committed by
Andrii Nakryiko
7391ec63 f8b299bc

+218 -114
+183 -103
tools/testing/selftests/bpf/prog_tests/attach_probe.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 #include <test_progs.h> 3 + #include "test_attach_kprobe_sleepable.skel.h" 3 4 #include "test_attach_probe.skel.h" 4 5 5 6 /* this is how USDT semaphore is actually defined, except volatile modifier */ ··· 24 23 asm volatile (""); 25 24 } 26 25 26 + /* attach point for ref_ctr */ 27 + static noinline void trigger_func4(void) 28 + { 29 + asm volatile (""); 30 + } 31 + 27 32 static char test_data[] = "test_data"; 28 33 29 - void test_attach_probe(void) 34 + /* manual attach kprobe/kretprobe/uprobe/uretprobe testings */ 35 + static void test_attach_probe_manual(struct test_attach_probe *skel) 30 36 { 31 37 DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts); 32 38 struct bpf_link *kprobe_link, *kretprobe_link; 33 39 struct bpf_link *uprobe_link, *uretprobe_link; 34 - struct test_attach_probe* skel; 35 - ssize_t uprobe_offset, ref_ctr_offset; 36 - struct bpf_link *uprobe_err_link; 37 - FILE *devnull; 38 - bool legacy; 39 - 40 - /* Check if new-style kprobe/uprobe API is supported. 41 - * Kernels that support new FD-based kprobe and uprobe BPF attachment 42 - * through perf_event_open() syscall expose 43 - * /sys/bus/event_source/devices/kprobe/type and 44 - * /sys/bus/event_source/devices/uprobe/type files, respectively. They 45 - * contain magic numbers that are passed as "type" field of 46 - * perf_event_attr. Lack of such file in the system indicates legacy 47 - * kernel with old-style kprobe/uprobe attach interface through 48 - * creating per-probe event through tracefs. For such cases 49 - * ref_ctr_offset feature is not supported, so we don't test it. 50 - */ 51 - legacy = access("/sys/bus/event_source/devices/kprobe/type", F_OK) != 0; 40 + ssize_t uprobe_offset; 52 41 53 42 uprobe_offset = get_uprobe_offset(&trigger_func); 54 43 if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset")) 55 - return; 56 - 57 - ref_ctr_offset = get_rel_offset((uintptr_t)&uprobe_ref_ctr); 58 - if (!ASSERT_GE(ref_ctr_offset, 0, "ref_ctr_offset")) 59 - return; 60 - 61 - skel = test_attach_probe__open(); 62 - if (!ASSERT_OK_PTR(skel, "skel_open")) 63 - return; 64 - 65 - /* sleepable kprobe test case needs flags set before loading */ 66 - if (!ASSERT_OK(bpf_program__set_flags(skel->progs.handle_kprobe_sleepable, 67 - BPF_F_SLEEPABLE), "kprobe_sleepable_flags")) 68 - goto cleanup; 69 - 70 - if (!ASSERT_OK(test_attach_probe__load(skel), "skel_load")) 71 - goto cleanup; 72 - if (!ASSERT_OK_PTR(skel->bss, "check_bss")) 73 44 goto cleanup; 74 45 75 46 /* manual-attach kprobe/kretprobe */ ··· 59 86 goto cleanup; 60 87 skel->links.handle_kretprobe = kretprobe_link; 61 88 62 - /* auto-attachable kprobe and kretprobe */ 63 - skel->links.handle_kprobe_auto = bpf_program__attach(skel->progs.handle_kprobe_auto); 64 - ASSERT_OK_PTR(skel->links.handle_kprobe_auto, "attach_kprobe_auto"); 65 - 66 - skel->links.handle_kretprobe_auto = bpf_program__attach(skel->progs.handle_kretprobe_auto); 67 - ASSERT_OK_PTR(skel->links.handle_kretprobe_auto, "attach_kretprobe_auto"); 68 - 69 - if (!legacy) 70 - ASSERT_EQ(uprobe_ref_ctr, 0, "uprobe_ref_ctr_before"); 71 - 89 + /* manual-attach uprobe/uretprobe */ 90 + uprobe_opts.ref_ctr_offset = 0; 72 91 uprobe_opts.retprobe = false; 73 - uprobe_opts.ref_ctr_offset = legacy ? 0 : ref_ctr_offset; 74 92 uprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe, 75 93 0 /* self pid */, 76 94 "/proc/self/exe", ··· 71 107 goto cleanup; 72 108 skel->links.handle_uprobe = uprobe_link; 73 109 74 - if (!legacy) 75 - ASSERT_GT(uprobe_ref_ctr, 0, "uprobe_ref_ctr_after"); 76 - 77 - /* if uprobe uses ref_ctr, uretprobe has to use ref_ctr as well */ 78 110 uprobe_opts.retprobe = true; 79 - uprobe_opts.ref_ctr_offset = legacy ? 0 : ref_ctr_offset; 80 111 uretprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe, 81 112 -1 /* any pid */, 82 113 "/proc/self/exe", ··· 80 121 goto cleanup; 81 122 skel->links.handle_uretprobe = uretprobe_link; 82 123 83 - /* verify auto-attach fails for old-style uprobe definition */ 84 - uprobe_err_link = bpf_program__attach(skel->progs.handle_uprobe_byname); 85 - if (!ASSERT_EQ(libbpf_get_error(uprobe_err_link), -EOPNOTSUPP, 86 - "auto-attach should fail for old-style name")) 87 - goto cleanup; 88 - 124 + /* attach uprobe by function name manually */ 89 125 uprobe_opts.func_name = "trigger_func2"; 90 126 uprobe_opts.retprobe = false; 91 127 uprobe_opts.ref_ctr_offset = 0; ··· 92 138 if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname, "attach_uprobe_byname")) 93 139 goto cleanup; 94 140 141 + /* trigger & validate kprobe && kretprobe */ 142 + usleep(1); 143 + 144 + /* trigger & validate uprobe & uretprobe */ 145 + trigger_func(); 146 + 147 + /* trigger & validate uprobe attached by name */ 148 + trigger_func2(); 149 + 150 + ASSERT_EQ(skel->bss->kprobe_res, 1, "check_kprobe_res"); 151 + ASSERT_EQ(skel->bss->kretprobe_res, 2, "check_kretprobe_res"); 152 + ASSERT_EQ(skel->bss->uprobe_res, 3, "check_uprobe_res"); 153 + ASSERT_EQ(skel->bss->uretprobe_res, 4, "check_uretprobe_res"); 154 + ASSERT_EQ(skel->bss->uprobe_byname_res, 5, "check_uprobe_byname_res"); 155 + 156 + cleanup: 157 + } 158 + 159 + static void test_attach_probe_auto(struct test_attach_probe *skel) 160 + { 161 + struct bpf_link *uprobe_err_link; 162 + 163 + /* auto-attachable kprobe and kretprobe */ 164 + skel->links.handle_kprobe_auto = bpf_program__attach(skel->progs.handle_kprobe_auto); 165 + ASSERT_OK_PTR(skel->links.handle_kprobe_auto, "attach_kprobe_auto"); 166 + 167 + skel->links.handle_kretprobe_auto = bpf_program__attach(skel->progs.handle_kretprobe_auto); 168 + ASSERT_OK_PTR(skel->links.handle_kretprobe_auto, "attach_kretprobe_auto"); 169 + 170 + /* verify auto-attach fails for old-style uprobe definition */ 171 + uprobe_err_link = bpf_program__attach(skel->progs.handle_uprobe_byname); 172 + if (!ASSERT_EQ(libbpf_get_error(uprobe_err_link), -EOPNOTSUPP, 173 + "auto-attach should fail for old-style name")) 174 + return; 175 + 95 176 /* verify auto-attach works */ 96 177 skel->links.handle_uretprobe_byname = 97 178 bpf_program__attach(skel->progs.handle_uretprobe_byname); 98 179 if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname, "attach_uretprobe_byname")) 99 - goto cleanup; 180 + return; 181 + 182 + /* trigger & validate kprobe && kretprobe */ 183 + usleep(1); 184 + 185 + /* trigger & validate uprobe attached by name */ 186 + trigger_func2(); 187 + 188 + ASSERT_EQ(skel->bss->kprobe2_res, 11, "check_kprobe_auto_res"); 189 + ASSERT_EQ(skel->bss->kretprobe2_res, 22, "check_kretprobe_auto_res"); 190 + ASSERT_EQ(skel->bss->uretprobe_byname_res, 6, "check_uretprobe_byname_res"); 191 + } 192 + 193 + static void test_uprobe_lib(struct test_attach_probe *skel) 194 + { 195 + DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts); 196 + FILE *devnull; 100 197 101 198 /* test attach by name for a library function, using the library 102 199 * as the binary argument. libc.so.6 will be resolved via dlopen()/dlinfo(). ··· 160 155 "libc.so.6", 161 156 0, &uprobe_opts); 162 157 if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname2, "attach_uprobe_byname2")) 163 - goto cleanup; 158 + return; 164 159 165 160 uprobe_opts.func_name = "fclose"; 166 161 uprobe_opts.retprobe = true; ··· 170 165 "libc.so.6", 171 166 0, &uprobe_opts); 172 167 if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname2, "attach_uretprobe_byname2")) 173 - goto cleanup; 174 - 175 - /* sleepable kprobes should not attach successfully */ 176 - skel->links.handle_kprobe_sleepable = bpf_program__attach(skel->progs.handle_kprobe_sleepable); 177 - if (!ASSERT_ERR_PTR(skel->links.handle_kprobe_sleepable, "attach_kprobe_sleepable")) 178 - goto cleanup; 179 - 180 - /* test sleepable uprobe and uretprobe variants */ 181 - skel->links.handle_uprobe_byname3_sleepable = bpf_program__attach(skel->progs.handle_uprobe_byname3_sleepable); 182 - if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname3_sleepable, "attach_uprobe_byname3_sleepable")) 183 - goto cleanup; 184 - 185 - skel->links.handle_uprobe_byname3 = bpf_program__attach(skel->progs.handle_uprobe_byname3); 186 - if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname3, "attach_uprobe_byname3")) 187 - goto cleanup; 188 - 189 - skel->links.handle_uretprobe_byname3_sleepable = bpf_program__attach(skel->progs.handle_uretprobe_byname3_sleepable); 190 - if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname3_sleepable, "attach_uretprobe_byname3_sleepable")) 191 - goto cleanup; 192 - 193 - skel->links.handle_uretprobe_byname3 = bpf_program__attach(skel->progs.handle_uretprobe_byname3); 194 - if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname3, "attach_uretprobe_byname3")) 195 - goto cleanup; 196 - 197 - skel->bss->user_ptr = test_data; 198 - 199 - /* trigger & validate kprobe && kretprobe */ 200 - usleep(1); 168 + return; 201 169 202 170 /* trigger & validate shared library u[ret]probes attached by name */ 203 171 devnull = fopen("/dev/null", "r"); 204 172 fclose(devnull); 205 173 206 - /* trigger & validate uprobe & uretprobe */ 207 - trigger_func(); 174 + ASSERT_EQ(skel->bss->uprobe_byname2_res, 7, "check_uprobe_byname2_res"); 175 + ASSERT_EQ(skel->bss->uretprobe_byname2_res, 8, "check_uretprobe_byname2_res"); 176 + } 208 177 209 - /* trigger & validate uprobe attached by name */ 210 - trigger_func2(); 178 + static void test_uprobe_ref_ctr(struct test_attach_probe *skel) 179 + { 180 + DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts); 181 + struct bpf_link *uprobe_link, *uretprobe_link; 182 + ssize_t uprobe_offset, ref_ctr_offset; 183 + 184 + uprobe_offset = get_uprobe_offset(&trigger_func4); 185 + if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset_ref_ctr")) 186 + return; 187 + 188 + ref_ctr_offset = get_rel_offset((uintptr_t)&uprobe_ref_ctr); 189 + if (!ASSERT_GE(ref_ctr_offset, 0, "ref_ctr_offset")) 190 + return; 191 + 192 + ASSERT_EQ(uprobe_ref_ctr, 0, "uprobe_ref_ctr_before"); 193 + 194 + uprobe_opts.retprobe = false; 195 + uprobe_opts.ref_ctr_offset = ref_ctr_offset; 196 + uprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe_ref_ctr, 197 + 0 /* self pid */, 198 + "/proc/self/exe", 199 + uprobe_offset, 200 + &uprobe_opts); 201 + if (!ASSERT_OK_PTR(uprobe_link, "attach_uprobe_ref_ctr")) 202 + return; 203 + skel->links.handle_uprobe_ref_ctr = uprobe_link; 204 + 205 + ASSERT_GT(uprobe_ref_ctr, 0, "uprobe_ref_ctr_after"); 206 + 207 + /* if uprobe uses ref_ctr, uretprobe has to use ref_ctr as well */ 208 + uprobe_opts.retprobe = true; 209 + uprobe_opts.ref_ctr_offset = ref_ctr_offset; 210 + uretprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe_ref_ctr, 211 + -1 /* any pid */, 212 + "/proc/self/exe", 213 + uprobe_offset, &uprobe_opts); 214 + if (!ASSERT_OK_PTR(uretprobe_link, "attach_uretprobe_ref_ctr")) 215 + return; 216 + skel->links.handle_uretprobe_ref_ctr = uretprobe_link; 217 + } 218 + 219 + static void test_kprobe_sleepable(void) 220 + { 221 + struct test_attach_kprobe_sleepable *skel; 222 + 223 + skel = test_attach_kprobe_sleepable__open(); 224 + if (!ASSERT_OK_PTR(skel, "skel_kprobe_sleepable_open")) 225 + return; 226 + 227 + /* sleepable kprobe test case needs flags set before loading */ 228 + if (!ASSERT_OK(bpf_program__set_flags(skel->progs.handle_kprobe_sleepable, 229 + BPF_F_SLEEPABLE), "kprobe_sleepable_flags")) 230 + goto cleanup; 231 + 232 + if (!ASSERT_OK(test_attach_kprobe_sleepable__load(skel), 233 + "skel_kprobe_sleepable_load")) 234 + goto cleanup; 235 + 236 + /* sleepable kprobes should not attach successfully */ 237 + skel->links.handle_kprobe_sleepable = bpf_program__attach(skel->progs.handle_kprobe_sleepable); 238 + ASSERT_ERR_PTR(skel->links.handle_kprobe_sleepable, "attach_kprobe_sleepable"); 239 + 240 + cleanup: 241 + test_attach_kprobe_sleepable__destroy(skel); 242 + } 243 + 244 + static void test_uprobe_sleepable(struct test_attach_probe *skel) 245 + { 246 + /* test sleepable uprobe and uretprobe variants */ 247 + skel->links.handle_uprobe_byname3_sleepable = bpf_program__attach(skel->progs.handle_uprobe_byname3_sleepable); 248 + if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname3_sleepable, "attach_uprobe_byname3_sleepable")) 249 + return; 250 + 251 + skel->links.handle_uprobe_byname3 = bpf_program__attach(skel->progs.handle_uprobe_byname3); 252 + if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname3, "attach_uprobe_byname3")) 253 + return; 254 + 255 + skel->links.handle_uretprobe_byname3_sleepable = bpf_program__attach(skel->progs.handle_uretprobe_byname3_sleepable); 256 + if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname3_sleepable, "attach_uretprobe_byname3_sleepable")) 257 + return; 258 + 259 + skel->links.handle_uretprobe_byname3 = bpf_program__attach(skel->progs.handle_uretprobe_byname3); 260 + if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname3, "attach_uretprobe_byname3")) 261 + return; 262 + 263 + skel->bss->user_ptr = test_data; 211 264 212 265 /* trigger & validate sleepable uprobe attached by name */ 213 266 trigger_func3(); 214 267 215 - ASSERT_EQ(skel->bss->kprobe_res, 1, "check_kprobe_res"); 216 - ASSERT_EQ(skel->bss->kprobe2_res, 11, "check_kprobe_auto_res"); 217 - ASSERT_EQ(skel->bss->kretprobe_res, 2, "check_kretprobe_res"); 218 - ASSERT_EQ(skel->bss->kretprobe2_res, 22, "check_kretprobe_auto_res"); 219 - ASSERT_EQ(skel->bss->uprobe_res, 3, "check_uprobe_res"); 220 - ASSERT_EQ(skel->bss->uretprobe_res, 4, "check_uretprobe_res"); 221 - ASSERT_EQ(skel->bss->uprobe_byname_res, 5, "check_uprobe_byname_res"); 222 - ASSERT_EQ(skel->bss->uretprobe_byname_res, 6, "check_uretprobe_byname_res"); 223 - ASSERT_EQ(skel->bss->uprobe_byname2_res, 7, "check_uprobe_byname2_res"); 224 - ASSERT_EQ(skel->bss->uretprobe_byname2_res, 8, "check_uretprobe_byname2_res"); 225 268 ASSERT_EQ(skel->bss->uprobe_byname3_sleepable_res, 9, "check_uprobe_byname3_sleepable_res"); 226 269 ASSERT_EQ(skel->bss->uprobe_byname3_res, 10, "check_uprobe_byname3_res"); 227 270 ASSERT_EQ(skel->bss->uretprobe_byname3_sleepable_res, 11, "check_uretprobe_byname3_sleepable_res"); 228 271 ASSERT_EQ(skel->bss->uretprobe_byname3_res, 12, "check_uretprobe_byname3_res"); 272 + } 273 + 274 + void test_attach_probe(void) 275 + { 276 + struct test_attach_probe *skel; 277 + 278 + skel = test_attach_probe__open(); 279 + if (!ASSERT_OK_PTR(skel, "skel_open")) 280 + return; 281 + 282 + if (!ASSERT_OK(test_attach_probe__load(skel), "skel_load")) 283 + goto cleanup; 284 + if (!ASSERT_OK_PTR(skel->bss, "check_bss")) 285 + goto cleanup; 286 + 287 + if (test__start_subtest("manual")) 288 + test_attach_probe_manual(skel); 289 + if (test__start_subtest("auto")) 290 + test_attach_probe_auto(skel); 291 + if (test__start_subtest("kprobe-sleepable")) 292 + test_kprobe_sleepable(); 293 + if (test__start_subtest("uprobe-lib")) 294 + test_uprobe_lib(skel); 295 + if (test__start_subtest("uprobe-sleepable")) 296 + test_uprobe_sleepable(skel); 297 + if (test__start_subtest("uprobe-ref_ctr")) 298 + test_uprobe_ref_ctr(skel); 229 299 230 300 cleanup: 231 301 test_attach_probe__destroy(skel);
+23
tools/testing/selftests/bpf/progs/test_attach_kprobe_sleepable.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // Copyright (c) 2017 Facebook 3 + 4 + #include "vmlinux.h" 5 + #include <bpf/bpf_helpers.h> 6 + #include <bpf/bpf_tracing.h> 7 + #include <bpf/bpf_core_read.h> 8 + #include "bpf_misc.h" 9 + 10 + int kprobe_res = 0; 11 + 12 + /** 13 + * This program will be manually made sleepable on the userspace side 14 + * and should thus be unattachable. 15 + */ 16 + SEC("kprobe/" SYS_PREFIX "sys_nanosleep") 17 + int handle_kprobe_sleepable(struct pt_regs *ctx) 18 + { 19 + kprobe_res = 1; 20 + return 0; 21 + } 22 + 23 + char _license[] SEC("license") = "GPL";
+12 -11
tools/testing/selftests/bpf/progs/test_attach_probe.c
··· 37 37 return 0; 38 38 } 39 39 40 - /** 41 - * This program will be manually made sleepable on the userspace side 42 - * and should thus be unattachable. 43 - */ 44 - SEC("kprobe/" SYS_PREFIX "sys_nanosleep") 45 - int handle_kprobe_sleepable(struct pt_regs *ctx) 46 - { 47 - kprobe_res = 2; 48 - return 0; 49 - } 50 - 51 40 SEC("kretprobe") 52 41 int handle_kretprobe(struct pt_regs *ctx) 53 42 { ··· 62 73 int handle_uretprobe(struct pt_regs *ctx) 63 74 { 64 75 uretprobe_res = 4; 76 + return 0; 77 + } 78 + 79 + SEC("uprobe") 80 + int handle_uprobe_ref_ctr(struct pt_regs *ctx) 81 + { 82 + return 0; 83 + } 84 + 85 + SEC("uretprobe") 86 + int handle_uretprobe_ref_ctr(struct pt_regs *ctx) 87 + { 65 88 return 0; 66 89 } 67 90