Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: Add uprobe/usdt syscall tests

Adding tests for optimized uprobe/usdt probes.

Checking that we get expected trampoline and attached bpf programs
get executed properly.

Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20250720112133.244369-15-jolsa@kernel.org

authored by

Jiri Olsa and committed by
Peter Zijlstra
d5c86c33 7932c4cf

+335 -1
+283 -1
tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c
··· 8 8 #include <asm/ptrace.h> 9 9 #include <linux/compiler.h> 10 10 #include <linux/stringify.h> 11 + #include <linux/kernel.h> 11 12 #include <sys/wait.h> 12 13 #include <sys/syscall.h> 13 14 #include <sys/prctl.h> 14 15 #include <asm/prctl.h> 15 16 #include "uprobe_syscall.skel.h" 16 17 #include "uprobe_syscall_executed.skel.h" 18 + 19 + #define USDT_NOP .byte 0x0f, 0x1f, 0x44, 0x00, 0x00 20 + #include "usdt.h" 21 + 22 + #pragma GCC diagnostic ignored "-Wattributes" 17 23 18 24 __naked unsigned long uretprobe_regs_trigger(void) 19 25 { ··· 311 305 close(go[0]); 312 306 } 313 307 308 + #define TRAMP "[uprobes-trampoline]" 309 + 310 + __attribute__((aligned(16))) 311 + __nocf_check __weak __naked void uprobe_test(void) 312 + { 313 + asm volatile (" \n" 314 + ".byte 0x0f, 0x1f, 0x44, 0x00, 0x00 \n" 315 + "ret \n" 316 + ); 317 + } 318 + 319 + __attribute__((aligned(16))) 320 + __nocf_check __weak void usdt_test(void) 321 + { 322 + USDT(optimized_uprobe, usdt); 323 + } 324 + 325 + static int find_uprobes_trampoline(void *tramp_addr) 326 + { 327 + void *start, *end; 328 + char line[128]; 329 + int ret = -1; 330 + FILE *maps; 331 + 332 + maps = fopen("/proc/self/maps", "r"); 333 + if (!maps) { 334 + fprintf(stderr, "cannot open maps\n"); 335 + return -1; 336 + } 337 + 338 + while (fgets(line, sizeof(line), maps)) { 339 + int m = -1; 340 + 341 + /* We care only about private r-x mappings. */ 342 + if (sscanf(line, "%p-%p r-xp %*x %*x:%*x %*u %n", &start, &end, &m) != 2) 343 + continue; 344 + if (m < 0) 345 + continue; 346 + if (!strncmp(&line[m], TRAMP, sizeof(TRAMP)-1) && (start == tramp_addr)) { 347 + ret = 0; 348 + break; 349 + } 350 + } 351 + 352 + fclose(maps); 353 + return ret; 354 + } 355 + 356 + static unsigned char nop5[5] = { 0x0f, 0x1f, 0x44, 0x00, 0x00 }; 357 + 358 + static void *find_nop5(void *fn) 359 + { 360 + int i; 361 + 362 + for (i = 0; i < 10; i++) { 363 + if (!memcmp(nop5, fn + i, 5)) 364 + return fn + i; 365 + } 366 + return NULL; 367 + } 368 + 369 + typedef void (__attribute__((nocf_check)) *trigger_t)(void); 370 + 371 + static bool shstk_is_enabled; 372 + 373 + static void *check_attach(struct uprobe_syscall_executed *skel, trigger_t trigger, 374 + void *addr, int executed) 375 + { 376 + struct __arch_relative_insn { 377 + __u8 op; 378 + __s32 raddr; 379 + } __packed *call; 380 + void *tramp = NULL; 381 + __u8 *bp; 382 + 383 + /* Uprobe gets optimized after first trigger, so let's press twice. */ 384 + trigger(); 385 + trigger(); 386 + 387 + /* Make sure bpf program got executed.. */ 388 + ASSERT_EQ(skel->bss->executed, executed, "executed"); 389 + 390 + if (shstk_is_enabled) { 391 + /* .. and check optimization is disabled under shadow stack. */ 392 + bp = (__u8 *) addr; 393 + ASSERT_EQ(*bp, 0xcc, "int3"); 394 + } else { 395 + /* .. and check the trampoline is as expected. */ 396 + call = (struct __arch_relative_insn *) addr; 397 + tramp = (void *) (call + 1) + call->raddr; 398 + ASSERT_EQ(call->op, 0xe8, "call"); 399 + ASSERT_OK(find_uprobes_trampoline(tramp), "uprobes_trampoline"); 400 + } 401 + 402 + return tramp; 403 + } 404 + 405 + static void check_detach(void *addr, void *tramp) 406 + { 407 + /* [uprobes_trampoline] stays after detach */ 408 + ASSERT_OK(!shstk_is_enabled && find_uprobes_trampoline(tramp), "uprobes_trampoline"); 409 + ASSERT_OK(memcmp(addr, nop5, 5), "nop5"); 410 + } 411 + 412 + static void check(struct uprobe_syscall_executed *skel, struct bpf_link *link, 413 + trigger_t trigger, void *addr, int executed) 414 + { 415 + void *tramp; 416 + 417 + tramp = check_attach(skel, trigger, addr, executed); 418 + bpf_link__destroy(link); 419 + check_detach(addr, tramp); 420 + } 421 + 422 + static void test_uprobe_legacy(void) 423 + { 424 + struct uprobe_syscall_executed *skel = NULL; 425 + LIBBPF_OPTS(bpf_uprobe_opts, opts, 426 + .retprobe = true, 427 + ); 428 + struct bpf_link *link; 429 + unsigned long offset; 430 + 431 + offset = get_uprobe_offset(&uprobe_test); 432 + if (!ASSERT_GE(offset, 0, "get_uprobe_offset")) 433 + goto cleanup; 434 + 435 + /* uprobe */ 436 + skel = uprobe_syscall_executed__open_and_load(); 437 + if (!ASSERT_OK_PTR(skel, "uprobe_syscall_executed__open_and_load")) 438 + return; 439 + 440 + skel->bss->pid = getpid(); 441 + 442 + link = bpf_program__attach_uprobe_opts(skel->progs.test_uprobe, 443 + 0, "/proc/self/exe", offset, NULL); 444 + if (!ASSERT_OK_PTR(link, "bpf_program__attach_uprobe_opts")) 445 + goto cleanup; 446 + 447 + check(skel, link, uprobe_test, uprobe_test, 2); 448 + 449 + /* uretprobe */ 450 + skel->bss->executed = 0; 451 + 452 + link = bpf_program__attach_uprobe_opts(skel->progs.test_uretprobe, 453 + 0, "/proc/self/exe", offset, &opts); 454 + if (!ASSERT_OK_PTR(link, "bpf_program__attach_uprobe_opts")) 455 + goto cleanup; 456 + 457 + check(skel, link, uprobe_test, uprobe_test, 2); 458 + 459 + cleanup: 460 + uprobe_syscall_executed__destroy(skel); 461 + } 462 + 463 + static void test_uprobe_multi(void) 464 + { 465 + struct uprobe_syscall_executed *skel = NULL; 466 + LIBBPF_OPTS(bpf_uprobe_multi_opts, opts); 467 + struct bpf_link *link; 468 + unsigned long offset; 469 + 470 + offset = get_uprobe_offset(&uprobe_test); 471 + if (!ASSERT_GE(offset, 0, "get_uprobe_offset")) 472 + goto cleanup; 473 + 474 + opts.offsets = &offset; 475 + opts.cnt = 1; 476 + 477 + skel = uprobe_syscall_executed__open_and_load(); 478 + if (!ASSERT_OK_PTR(skel, "uprobe_syscall_executed__open_and_load")) 479 + return; 480 + 481 + skel->bss->pid = getpid(); 482 + 483 + /* uprobe.multi */ 484 + link = bpf_program__attach_uprobe_multi(skel->progs.test_uprobe_multi, 485 + 0, "/proc/self/exe", NULL, &opts); 486 + if (!ASSERT_OK_PTR(link, "bpf_program__attach_uprobe_multi")) 487 + goto cleanup; 488 + 489 + check(skel, link, uprobe_test, uprobe_test, 2); 490 + 491 + /* uretprobe.multi */ 492 + skel->bss->executed = 0; 493 + opts.retprobe = true; 494 + link = bpf_program__attach_uprobe_multi(skel->progs.test_uretprobe_multi, 495 + 0, "/proc/self/exe", NULL, &opts); 496 + if (!ASSERT_OK_PTR(link, "bpf_program__attach_uprobe_multi")) 497 + goto cleanup; 498 + 499 + check(skel, link, uprobe_test, uprobe_test, 2); 500 + 501 + cleanup: 502 + uprobe_syscall_executed__destroy(skel); 503 + } 504 + 505 + static void test_uprobe_session(void) 506 + { 507 + struct uprobe_syscall_executed *skel = NULL; 508 + LIBBPF_OPTS(bpf_uprobe_multi_opts, opts, 509 + .session = true, 510 + ); 511 + struct bpf_link *link; 512 + unsigned long offset; 513 + 514 + offset = get_uprobe_offset(&uprobe_test); 515 + if (!ASSERT_GE(offset, 0, "get_uprobe_offset")) 516 + goto cleanup; 517 + 518 + opts.offsets = &offset; 519 + opts.cnt = 1; 520 + 521 + skel = uprobe_syscall_executed__open_and_load(); 522 + if (!ASSERT_OK_PTR(skel, "uprobe_syscall_executed__open_and_load")) 523 + return; 524 + 525 + skel->bss->pid = getpid(); 526 + 527 + link = bpf_program__attach_uprobe_multi(skel->progs.test_uprobe_session, 528 + 0, "/proc/self/exe", NULL, &opts); 529 + if (!ASSERT_OK_PTR(link, "bpf_program__attach_uprobe_multi")) 530 + goto cleanup; 531 + 532 + check(skel, link, uprobe_test, uprobe_test, 4); 533 + 534 + cleanup: 535 + uprobe_syscall_executed__destroy(skel); 536 + } 537 + 538 + static void test_uprobe_usdt(void) 539 + { 540 + struct uprobe_syscall_executed *skel; 541 + struct bpf_link *link; 542 + void *addr; 543 + 544 + errno = 0; 545 + addr = find_nop5(usdt_test); 546 + if (!ASSERT_OK_PTR(addr, "find_nop5")) 547 + return; 548 + 549 + skel = uprobe_syscall_executed__open_and_load(); 550 + if (!ASSERT_OK_PTR(skel, "uprobe_syscall_executed__open_and_load")) 551 + return; 552 + 553 + skel->bss->pid = getpid(); 554 + 555 + link = bpf_program__attach_usdt(skel->progs.test_usdt, 556 + -1 /* all PIDs */, "/proc/self/exe", 557 + "optimized_uprobe", "usdt", NULL); 558 + if (!ASSERT_OK_PTR(link, "bpf_program__attach_usdt")) 559 + goto cleanup; 560 + 561 + check(skel, link, usdt_test, addr, 2); 562 + 563 + cleanup: 564 + uprobe_syscall_executed__destroy(skel); 565 + } 566 + 314 567 /* 315 568 * Borrowed from tools/testing/selftests/x86/test_shadow_stack.c. 316 569 * ··· 612 347 return; 613 348 } 614 349 615 - /* Run all of the uretprobe tests. */ 350 + /* Run all the tests with shadow stack in place. */ 351 + shstk_is_enabled = true; 352 + 616 353 test_uretprobe_regs_equal(); 617 354 test_uretprobe_regs_change(); 618 355 test_uretprobe_syscall_call(); 356 + 357 + test_uprobe_legacy(); 358 + test_uprobe_multi(); 359 + test_uprobe_session(); 360 + test_uprobe_usdt(); 361 + 362 + shstk_is_enabled = false; 619 363 620 364 ARCH_PRCTL(ARCH_SHSTK_DISABLE, ARCH_SHSTK_SHSTK); 621 365 } ··· 639 365 test_uretprobe_syscall_call(); 640 366 if (test__start_subtest("uretprobe_shadow_stack")) 641 367 test_uretprobe_shadow_stack(); 368 + if (test__start_subtest("uprobe_legacy")) 369 + test_uprobe_legacy(); 370 + if (test__start_subtest("uprobe_multi")) 371 + test_uprobe_multi(); 372 + if (test__start_subtest("uprobe_session")) 373 + test_uprobe_session(); 374 + if (test__start_subtest("uprobe_usdt")) 375 + test_uprobe_usdt(); 642 376 } 643 377 #else 644 378 static void __test_uprobe_syscall(void)
+52
tools/testing/selftests/bpf/progs/uprobe_syscall_executed.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 #include "vmlinux.h" 3 3 #include <bpf/bpf_helpers.h> 4 + #include <bpf/bpf_tracing.h> 5 + #include <bpf/usdt.bpf.h> 4 6 #include <string.h> 5 7 6 8 struct pt_regs regs; ··· 12 10 int executed = 0; 13 11 int pid; 14 12 13 + SEC("uprobe") 14 + int BPF_UPROBE(test_uprobe) 15 + { 16 + if (bpf_get_current_pid_tgid() >> 32 != pid) 17 + return 0; 18 + 19 + executed++; 20 + return 0; 21 + } 22 + 23 + SEC("uretprobe") 24 + int BPF_URETPROBE(test_uretprobe) 25 + { 26 + if (bpf_get_current_pid_tgid() >> 32 != pid) 27 + return 0; 28 + 29 + executed++; 30 + return 0; 31 + } 32 + 33 + SEC("uprobe.multi") 34 + int test_uprobe_multi(struct pt_regs *ctx) 35 + { 36 + if (bpf_get_current_pid_tgid() >> 32 != pid) 37 + return 0; 38 + 39 + executed++; 40 + return 0; 41 + } 42 + 15 43 SEC("uretprobe.multi") 16 44 int test_uretprobe_multi(struct pt_regs *ctx) 45 + { 46 + if (bpf_get_current_pid_tgid() >> 32 != pid) 47 + return 0; 48 + 49 + executed++; 50 + return 0; 51 + } 52 + 53 + SEC("uprobe.session") 54 + int test_uprobe_session(struct pt_regs *ctx) 55 + { 56 + if (bpf_get_current_pid_tgid() >> 32 != pid) 57 + return 0; 58 + 59 + executed++; 60 + return 0; 61 + } 62 + 63 + SEC("usdt") 64 + int test_usdt(struct pt_regs *ctx) 17 65 { 18 66 if (bpf_get_current_pid_tgid() >> 32 != pid) 19 67 return 0;