Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: Add test for bpf_get_branch_snapshot

This test uses bpf_get_branch_snapshot from a fexit program. The test uses
a target function (bpf_testmod_loop_test) and compares the record against
kallsyms. If there isn't enough record matching kallsyms, the test fails.

Signed-off-by: Song Liu <songliubraving@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: John Fastabend <john.fastabend@gmail.com>
Link: https://lore.kernel.org/bpf/20210910183352.3151445-4-songliubraving@fb.com

authored by

Song Liu and committed by
Alexei Starovoitov
025bd7c7 856c02db

+243 -52
+17 -2
tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
··· 13 13 14 14 DEFINE_PER_CPU(int, bpf_testmod_ksym_percpu) = 123; 15 15 16 + noinline int bpf_testmod_loop_test(int n) 17 + { 18 + int i, sum = 0; 19 + 20 + /* the primary goal of this test is to test LBR. Create a lot of 21 + * branches in the function, so we can catch it easily. 22 + */ 23 + for (i = 0; i < n; i++) 24 + sum += i; 25 + return sum; 26 + } 27 + 16 28 noinline ssize_t 17 29 bpf_testmod_test_read(struct file *file, struct kobject *kobj, 18 30 struct bin_attribute *bin_attr, ··· 36 24 .len = len, 37 25 }; 38 26 39 - trace_bpf_testmod_test_read(current, &ctx); 27 + /* This is always true. Use the check to make sure the compiler 28 + * doesn't remove bpf_testmod_loop_test. 29 + */ 30 + if (bpf_testmod_loop_test(101) > 100) 31 + trace_bpf_testmod_test_read(current, &ctx); 40 32 41 33 return -EIO; /* always fail */ 42 34 } ··· 87 71 MODULE_AUTHOR("Andrii Nakryiko"); 88 72 MODULE_DESCRIPTION("BPF selftests module"); 89 73 MODULE_LICENSE("Dual BSD/GPL"); 90 -
+3 -11
tools/testing/selftests/bpf/prog_tests/core_reloc.c
··· 30 30 .output_len = sizeof(struct core_reloc_module_output), \ 31 31 .prog_sec_name = sec_name, \ 32 32 .raw_tp_name = tp_name, \ 33 - .trigger = trigger_module_test_read, \ 33 + .trigger = __trigger_module_test_read, \ 34 34 .needs_testmod = true, \ 35 35 } 36 36 ··· 475 475 return 0; 476 476 } 477 477 478 - static int trigger_module_test_read(const struct core_reloc_test_case *test) 478 + static int __trigger_module_test_read(const struct core_reloc_test_case *test) 479 479 { 480 480 struct core_reloc_module_output *exp = (void *)test->output; 481 - int fd, err; 482 481 483 - fd = open("/sys/kernel/bpf_testmod", O_RDONLY); 484 - err = -errno; 485 - if (CHECK(fd < 0, "testmod_file_open", "failed: %d\n", err)) 486 - return err; 487 - 488 - read(fd, NULL, exp->len); /* request expected number of bytes */ 489 - close(fd); 490 - 482 + trigger_module_test_read(exp->len); 491 483 return 0; 492 484 } 493 485
+100
tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2021 Facebook */ 3 + #include <test_progs.h> 4 + #include "get_branch_snapshot.skel.h" 5 + 6 + static int *pfd_array; 7 + static int cpu_cnt; 8 + 9 + static int create_perf_events(void) 10 + { 11 + struct perf_event_attr attr = {0}; 12 + int cpu; 13 + 14 + /* create perf event */ 15 + attr.size = sizeof(attr); 16 + attr.type = PERF_TYPE_RAW; 17 + attr.config = 0x1b00; 18 + attr.sample_type = PERF_SAMPLE_BRANCH_STACK; 19 + attr.branch_sample_type = PERF_SAMPLE_BRANCH_KERNEL | 20 + PERF_SAMPLE_BRANCH_USER | PERF_SAMPLE_BRANCH_ANY; 21 + 22 + cpu_cnt = libbpf_num_possible_cpus(); 23 + pfd_array = malloc(sizeof(int) * cpu_cnt); 24 + if (!pfd_array) { 25 + cpu_cnt = 0; 26 + return 1; 27 + } 28 + 29 + for (cpu = 0; cpu < cpu_cnt; cpu++) { 30 + pfd_array[cpu] = syscall(__NR_perf_event_open, &attr, 31 + -1, cpu, -1, PERF_FLAG_FD_CLOEXEC); 32 + if (pfd_array[cpu] < 0) 33 + break; 34 + } 35 + 36 + return cpu == 0; 37 + } 38 + 39 + static void close_perf_events(void) 40 + { 41 + int cpu = 0; 42 + int fd; 43 + 44 + while (cpu++ < cpu_cnt) { 45 + fd = pfd_array[cpu]; 46 + if (fd < 0) 47 + break; 48 + close(fd); 49 + } 50 + free(pfd_array); 51 + } 52 + 53 + void test_get_branch_snapshot(void) 54 + { 55 + struct get_branch_snapshot *skel = NULL; 56 + int err; 57 + 58 + if (create_perf_events()) { 59 + test__skip(); /* system doesn't support LBR */ 60 + goto cleanup; 61 + } 62 + 63 + skel = get_branch_snapshot__open_and_load(); 64 + if (!ASSERT_OK_PTR(skel, "get_branch_snapshot__open_and_load")) 65 + goto cleanup; 66 + 67 + err = kallsyms_find("bpf_testmod_loop_test", &skel->bss->address_low); 68 + if (!ASSERT_OK(err, "kallsyms_find")) 69 + goto cleanup; 70 + 71 + err = kallsyms_find_next("bpf_testmod_loop_test", &skel->bss->address_high); 72 + if (!ASSERT_OK(err, "kallsyms_find_next")) 73 + goto cleanup; 74 + 75 + err = get_branch_snapshot__attach(skel); 76 + if (!ASSERT_OK(err, "get_branch_snapshot__attach")) 77 + goto cleanup; 78 + 79 + trigger_module_test_read(100); 80 + 81 + if (skel->bss->total_entries < 16) { 82 + /* too few entries for the hit/waste test */ 83 + test__skip(); 84 + goto cleanup; 85 + } 86 + 87 + ASSERT_GT(skel->bss->test1_hits, 6, "find_looptest_in_lbr"); 88 + 89 + /* Given we stop LBR in software, we will waste a few entries. 90 + * But we should try to waste as few as possible entries. We are at 91 + * about 7 on x86_64 systems. 92 + * Add a check for < 10 so that we get heads-up when something 93 + * changes and wastes too many entries. 94 + */ 95 + ASSERT_LT(skel->bss->wasted_entries, 10, "check_wasted_entries"); 96 + 97 + cleanup: 98 + get_branch_snapshot__destroy(skel); 99 + close_perf_events(); 100 + }
-39
tools/testing/selftests/bpf/prog_tests/module_attach.c
··· 6 6 7 7 static int duration; 8 8 9 - static int trigger_module_test_read(int read_sz) 10 - { 11 - int fd, err; 12 - 13 - fd = open("/sys/kernel/bpf_testmod", O_RDONLY); 14 - err = -errno; 15 - if (CHECK(fd < 0, "testmod_file_open", "failed: %d\n", err)) 16 - return err; 17 - 18 - read(fd, NULL, read_sz); 19 - close(fd); 20 - 21 - return 0; 22 - } 23 - 24 - static int trigger_module_test_write(int write_sz) 25 - { 26 - int fd, err; 27 - char *buf = malloc(write_sz); 28 - 29 - if (!buf) 30 - return -ENOMEM; 31 - 32 - memset(buf, 'a', write_sz); 33 - buf[write_sz-1] = '\0'; 34 - 35 - fd = open("/sys/kernel/bpf_testmod", O_WRONLY); 36 - err = -errno; 37 - if (CHECK(fd < 0, "testmod_file_open", "failed: %d\n", err)) { 38 - free(buf); 39 - return err; 40 - } 41 - 42 - write(fd, buf, write_sz); 43 - close(fd); 44 - free(buf); 45 - return 0; 46 - } 47 - 48 9 static int delete_module(const char *name, int flags) 49 10 { 50 11 return syscall(__NR_delete_module, name, flags);
+40
tools/testing/selftests/bpf/progs/get_branch_snapshot.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2021 Facebook */ 3 + #include "vmlinux.h" 4 + #include <bpf/bpf_helpers.h> 5 + #include <bpf/bpf_tracing.h> 6 + 7 + char _license[] SEC("license") = "GPL"; 8 + 9 + __u64 test1_hits = 0; 10 + __u64 address_low = 0; 11 + __u64 address_high = 0; 12 + int wasted_entries = 0; 13 + long total_entries = 0; 14 + 15 + #define ENTRY_CNT 32 16 + struct perf_branch_entry entries[ENTRY_CNT] = {}; 17 + 18 + static inline bool in_range(__u64 val) 19 + { 20 + return (val >= address_low) && (val < address_high); 21 + } 22 + 23 + SEC("fexit/bpf_testmod_loop_test") 24 + int BPF_PROG(test1, int n, int ret) 25 + { 26 + long i; 27 + 28 + total_entries = bpf_get_branch_snapshot(entries, sizeof(entries), 0); 29 + total_entries /= sizeof(struct perf_branch_entry); 30 + 31 + for (i = 0; i < ENTRY_CNT; i++) { 32 + if (i >= total_entries) 33 + break; 34 + if (in_range(entries[i].from) && in_range(entries[i].to)) 35 + test1_hits++; 36 + else if (!test1_hits) 37 + wasted_entries++; 38 + } 39 + return 0; 40 + }
+39
tools/testing/selftests/bpf/test_progs.c
··· 743 743 return chdir(flavor); 744 744 } 745 745 746 + int trigger_module_test_read(int read_sz) 747 + { 748 + int fd, err; 749 + 750 + fd = open("/sys/kernel/bpf_testmod", O_RDONLY); 751 + err = -errno; 752 + if (!ASSERT_GE(fd, 0, "testmod_file_open")) 753 + return err; 754 + 755 + read(fd, NULL, read_sz); 756 + close(fd); 757 + 758 + return 0; 759 + } 760 + 761 + int trigger_module_test_write(int write_sz) 762 + { 763 + int fd, err; 764 + char *buf = malloc(write_sz); 765 + 766 + if (!buf) 767 + return -ENOMEM; 768 + 769 + memset(buf, 'a', write_sz); 770 + buf[write_sz-1] = '\0'; 771 + 772 + fd = open("/sys/kernel/bpf_testmod", O_WRONLY); 773 + err = -errno; 774 + if (!ASSERT_GE(fd, 0, "testmod_file_open")) { 775 + free(buf); 776 + return err; 777 + } 778 + 779 + write(fd, buf, write_sz); 780 + close(fd); 781 + free(buf); 782 + return 0; 783 + } 784 + 746 785 #define MAX_BACKTRACE_SZ 128 747 786 void crash_handler(int signum) 748 787 {
+2
tools/testing/selftests/bpf/test_progs.h
··· 291 291 int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len); 292 292 int extract_build_id(char *build_id, size_t size); 293 293 int kern_sync_rcu(void); 294 + int trigger_module_test_read(int read_sz); 295 + int trigger_module_test_write(int write_sz); 294 296 295 297 #ifdef __x86_64__ 296 298 #define SYS_NANOSLEEP_KPROBE_NAME "__x64_sys_nanosleep"
+37
tools/testing/selftests/bpf/trace_helpers.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 + #include <ctype.h> 2 3 #include <stdio.h> 3 4 #include <stdlib.h> 4 5 #include <string.h> ··· 109 108 if (strcmp(name, sym) == 0) { 110 109 *addr = value; 111 110 goto out; 111 + } 112 + } 113 + err = -ENOENT; 114 + 115 + out: 116 + fclose(f); 117 + return err; 118 + } 119 + 120 + /* find the address of the next symbol of the same type, this can be used 121 + * to determine the end of a function. 122 + */ 123 + int kallsyms_find_next(const char *sym, unsigned long long *addr) 124 + { 125 + char type, found_type, name[500]; 126 + unsigned long long value; 127 + bool found = false; 128 + int err = 0; 129 + FILE *f; 130 + 131 + f = fopen("/proc/kallsyms", "r"); 132 + if (!f) 133 + return -EINVAL; 134 + 135 + while (fscanf(f, "%llx %c %499s%*[^\n]\n", &value, &type, name) > 0) { 136 + /* Different types of symbols in kernel modules are mixed 137 + * in /proc/kallsyms. Only return the next matching type. 138 + * Use tolower() for type so that 'T' matches 't'. 139 + */ 140 + if (found && found_type == tolower(type)) { 141 + *addr = value; 142 + goto out; 143 + } 144 + if (strcmp(name, sym) == 0) { 145 + found = true; 146 + found_type = tolower(type); 112 147 } 113 148 } 114 149 err = -ENOENT;
+5
tools/testing/selftests/bpf/trace_helpers.h
··· 16 16 /* open kallsyms and find addresses on the fly, faster than load + search. */ 17 17 int kallsyms_find(const char *sym, unsigned long long *addr); 18 18 19 + /* find the address of the next symbol, this can be used to determine the 20 + * end of a function 21 + */ 22 + int kallsyms_find_next(const char *sym, unsigned long long *addr); 23 + 19 24 void read_trace_pipe(void); 20 25 21 26 ssize_t get_uprobe_offset(const void *addr, ssize_t base);