Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: convert get_current_cgroup_id_user to test_progs

get_current_cgroup_id_user allows testing for bpf_get_current_cgroup_id()
bpf API but is not integrated into test_progs, and so is not tested
automatically in CI.

Convert it to the test_progs framework to allow running it automatically.
The most notable differences with the old test are the following:
- the new test relies on autoattach instead of manually hooking/enabling
the targeted tracepoint through perf_event, which reduces quite a lot the
test code size
- it also accesses bpf prog data through global variables instead of maps
- sleep duration passed to nanosleep syscall has been reduced to its
minimum to not impact overall CI duration (we only care about the syscall
being properly triggered, not about the passed duration)

Signed-off-by: Alexis Lothoré (eBPF Foundation) <alexis.lothore@bootlin.com>
Link: https://lore.kernel.org/r/20240813-convert_cgroup_tests-v4-1-a33c03458cf6@bootlin.com
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>

authored by

Alexis Lothoré (eBPF Foundation) and committed by
Martin KaFai Lau
a4ae5c31 4a4c013d

+51 -176
-1
tools/testing/selftests/bpf/.gitignore
··· 19 19 urandom_read 20 20 test_sockmap 21 21 test_lirc_mode2_user 22 - get_cgroup_id_user 23 22 test_skb_cgroup_id_user 24 23 test_cgroup_storage 25 24 test_flow_dissector
+1 -2
tools/testing/selftests/bpf/Makefile
··· 67 67 68 68 # Order correspond to 'make run_tests' order 69 69 TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \ 70 - test_sock test_sockmap get_cgroup_id_user \ 70 + test_sock test_sockmap \ 71 71 test_cgroup_storage \ 72 72 test_tcpnotify_user test_sysctl \ 73 73 test_progs-no_alu32 ··· 295 295 $(OUTPUT)/test_sock: $(CGROUP_HELPERS) $(TESTING_HELPERS) 296 296 $(OUTPUT)/test_sockmap: $(CGROUP_HELPERS) $(TESTING_HELPERS) 297 297 $(OUTPUT)/test_tcpnotify_user: $(CGROUP_HELPERS) $(TESTING_HELPERS) $(TRACE_HELPERS) 298 - $(OUTPUT)/get_cgroup_id_user: $(CGROUP_HELPERS) $(TESTING_HELPERS) 299 298 $(OUTPUT)/test_cgroup_storage: $(CGROUP_HELPERS) $(TESTING_HELPERS) 300 299 $(OUTPUT)/test_sock_fields: $(CGROUP_HELPERS) $(TESTING_HELPERS) 301 300 $(OUTPUT)/test_sysctl: $(CGROUP_HELPERS) $(TESTING_HELPERS)
-151
tools/testing/selftests/bpf/get_cgroup_id_user.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 2 - // Copyright (c) 2018 Facebook 3 - 4 - #include <stdio.h> 5 - #include <stdlib.h> 6 - #include <string.h> 7 - #include <errno.h> 8 - #include <fcntl.h> 9 - #include <syscall.h> 10 - #include <unistd.h> 11 - #include <linux/perf_event.h> 12 - #include <sys/ioctl.h> 13 - #include <sys/time.h> 14 - #include <sys/types.h> 15 - #include <sys/stat.h> 16 - 17 - #include <linux/bpf.h> 18 - #include <bpf/bpf.h> 19 - #include <bpf/libbpf.h> 20 - 21 - #include "cgroup_helpers.h" 22 - #include "testing_helpers.h" 23 - 24 - #define CHECK(condition, tag, format...) ({ \ 25 - int __ret = !!(condition); \ 26 - if (__ret) { \ 27 - printf("%s:FAIL:%s ", __func__, tag); \ 28 - printf(format); \ 29 - } else { \ 30 - printf("%s:PASS:%s\n", __func__, tag); \ 31 - } \ 32 - __ret; \ 33 - }) 34 - 35 - static int bpf_find_map(const char *test, struct bpf_object *obj, 36 - const char *name) 37 - { 38 - struct bpf_map *map; 39 - 40 - map = bpf_object__find_map_by_name(obj, name); 41 - if (!map) 42 - return -1; 43 - return bpf_map__fd(map); 44 - } 45 - 46 - #define TEST_CGROUP "/test-bpf-get-cgroup-id/" 47 - 48 - int main(int argc, char **argv) 49 - { 50 - const char *probe_name = "syscalls/sys_enter_nanosleep"; 51 - const char *file = "get_cgroup_id_kern.bpf.o"; 52 - int err, bytes, efd, prog_fd, pmu_fd; 53 - int cgroup_fd, cgidmap_fd, pidmap_fd; 54 - struct perf_event_attr attr = {}; 55 - struct bpf_object *obj; 56 - __u64 kcgid = 0, ucgid; 57 - __u32 key = 0, pid; 58 - int exit_code = 1; 59 - char buf[256]; 60 - const struct timespec req = { 61 - .tv_sec = 1, 62 - .tv_nsec = 0, 63 - }; 64 - 65 - cgroup_fd = cgroup_setup_and_join(TEST_CGROUP); 66 - if (CHECK(cgroup_fd < 0, "cgroup_setup_and_join", "err %d errno %d\n", cgroup_fd, errno)) 67 - return 1; 68 - 69 - /* Use libbpf 1.0 API mode */ 70 - libbpf_set_strict_mode(LIBBPF_STRICT_ALL); 71 - 72 - err = bpf_prog_test_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); 73 - if (CHECK(err, "bpf_prog_test_load", "err %d errno %d\n", err, errno)) 74 - goto cleanup_cgroup_env; 75 - 76 - cgidmap_fd = bpf_find_map(__func__, obj, "cg_ids"); 77 - if (CHECK(cgidmap_fd < 0, "bpf_find_map", "err %d errno %d\n", 78 - cgidmap_fd, errno)) 79 - goto close_prog; 80 - 81 - pidmap_fd = bpf_find_map(__func__, obj, "pidmap"); 82 - if (CHECK(pidmap_fd < 0, "bpf_find_map", "err %d errno %d\n", 83 - pidmap_fd, errno)) 84 - goto close_prog; 85 - 86 - pid = getpid(); 87 - bpf_map_update_elem(pidmap_fd, &key, &pid, 0); 88 - 89 - if (access("/sys/kernel/tracing/trace", F_OK) == 0) { 90 - snprintf(buf, sizeof(buf), 91 - "/sys/kernel/tracing/events/%s/id", probe_name); 92 - } else { 93 - snprintf(buf, sizeof(buf), 94 - "/sys/kernel/debug/tracing/events/%s/id", probe_name); 95 - } 96 - efd = open(buf, O_RDONLY, 0); 97 - if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno)) 98 - goto close_prog; 99 - bytes = read(efd, buf, sizeof(buf)); 100 - close(efd); 101 - if (CHECK(bytes <= 0 || bytes >= sizeof(buf), "read", 102 - "bytes %d errno %d\n", bytes, errno)) 103 - goto close_prog; 104 - 105 - attr.config = strtol(buf, NULL, 0); 106 - attr.type = PERF_TYPE_TRACEPOINT; 107 - attr.sample_type = PERF_SAMPLE_RAW; 108 - attr.sample_period = 1; 109 - attr.wakeup_events = 1; 110 - 111 - /* attach to this pid so the all bpf invocations will be in the 112 - * cgroup associated with this pid. 113 - */ 114 - pmu_fd = syscall(__NR_perf_event_open, &attr, getpid(), -1, -1, 0); 115 - if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", pmu_fd, 116 - errno)) 117 - goto close_prog; 118 - 119 - err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0); 120 - if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", err, 121 - errno)) 122 - goto close_pmu; 123 - 124 - err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd); 125 - if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", err, 126 - errno)) 127 - goto close_pmu; 128 - 129 - /* trigger some syscalls */ 130 - syscall(__NR_nanosleep, &req, NULL); 131 - 132 - err = bpf_map_lookup_elem(cgidmap_fd, &key, &kcgid); 133 - if (CHECK(err, "bpf_map_lookup_elem", "err %d errno %d\n", err, errno)) 134 - goto close_pmu; 135 - 136 - ucgid = get_cgroup_id(TEST_CGROUP); 137 - if (CHECK(kcgid != ucgid, "compare_cgroup_id", 138 - "kern cgid %llx user cgid %llx", kcgid, ucgid)) 139 - goto close_pmu; 140 - 141 - exit_code = 0; 142 - printf("%s:PASS\n", argv[0]); 143 - 144 - close_pmu: 145 - close(pmu_fd); 146 - close_prog: 147 - bpf_object__close(obj); 148 - cleanup_cgroup_env: 149 - cleanup_cgroup_environment(); 150 - return exit_code; 151 - }
+46
tools/testing/selftests/bpf/prog_tests/cgroup_get_current_cgroup_id.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + #include <sys/stat.h> 4 + #include <sys/sysmacros.h> 5 + #include "test_progs.h" 6 + #include "cgroup_helpers.h" 7 + #include "get_cgroup_id_kern.skel.h" 8 + 9 + #define TEST_CGROUP "/test-bpf-get-cgroup-id/" 10 + 11 + void test_cgroup_get_current_cgroup_id(void) 12 + { 13 + struct get_cgroup_id_kern *skel; 14 + const struct timespec req = { 15 + .tv_sec = 0, 16 + .tv_nsec = 1, 17 + }; 18 + int cgroup_fd; 19 + __u64 ucgid; 20 + 21 + cgroup_fd = cgroup_setup_and_join(TEST_CGROUP); 22 + if (!ASSERT_OK_FD(cgroup_fd, "cgroup switch")) 23 + return; 24 + 25 + skel = get_cgroup_id_kern__open_and_load(); 26 + if (!ASSERT_OK_PTR(skel, "load program")) 27 + goto cleanup_cgroup; 28 + 29 + if (!ASSERT_OK(get_cgroup_id_kern__attach(skel), "attach bpf program")) 30 + goto cleanup_progs; 31 + 32 + skel->bss->expected_pid = getpid(); 33 + /* trigger the syscall on which is attached the tested prog */ 34 + if (!ASSERT_OK(syscall(__NR_nanosleep, &req, NULL), "nanosleep")) 35 + goto cleanup_progs; 36 + 37 + ucgid = get_cgroup_id(TEST_CGROUP); 38 + 39 + ASSERT_EQ(skel->bss->cg_id, ucgid, "compare cgroup ids"); 40 + 41 + cleanup_progs: 42 + get_cgroup_id_kern__destroy(skel); 43 + cleanup_cgroup: 44 + close(cgroup_fd); 45 + cleanup_cgroup_environment(); 46 + }
+4 -22
tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c
··· 4 4 #include <linux/bpf.h> 5 5 #include <bpf/bpf_helpers.h> 6 6 7 - struct { 8 - __uint(type, BPF_MAP_TYPE_ARRAY); 9 - __uint(max_entries, 1); 10 - __type(key, __u32); 11 - __type(value, __u64); 12 - } cg_ids SEC(".maps"); 13 - 14 - struct { 15 - __uint(type, BPF_MAP_TYPE_ARRAY); 16 - __uint(max_entries, 1); 17 - __type(key, __u32); 18 - __type(value, __u32); 19 - } pidmap SEC(".maps"); 7 + __u64 cg_id; 8 + __u64 expected_pid; 20 9 21 10 SEC("tracepoint/syscalls/sys_enter_nanosleep") 22 11 int trace(void *ctx) 23 12 { 24 13 __u32 pid = bpf_get_current_pid_tgid(); 25 - __u32 key = 0, *expected_pid; 26 - __u64 *val; 27 14 28 - expected_pid = bpf_map_lookup_elem(&pidmap, &key); 29 - if (!expected_pid || *expected_pid != pid) 30 - return 0; 31 - 32 - val = bpf_map_lookup_elem(&cg_ids, &key); 33 - if (val) 34 - *val = bpf_get_current_cgroup_id(); 15 + if (expected_pid == pid) 16 + cg_id = bpf_get_current_cgroup_id(); 35 17 36 18 return 0; 37 19 }