Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: BPF task work scheduling tests

Introducing selftests that check BPF task work scheduling mechanism.
Validate that verifier does not accepts incorrect calls to
bpf_task_work_schedule kfunc.

Signed-off-by: Mykyta Yatsenko <yatsenko@meta.com>
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20250923112404.668720-9-mykyta.yatsenko5@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Mykyta Yatsenko and committed by
Alexei Starovoitov
39fd74df 38aa7003

+353
+150
tools/testing/selftests/bpf/prog_tests/test_task_work.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ 3 + #include <test_progs.h> 4 + #include <string.h> 5 + #include <stdio.h> 6 + #include "task_work.skel.h" 7 + #include "task_work_fail.skel.h" 8 + #include <linux/bpf.h> 9 + #include <linux/perf_event.h> 10 + #include <sys/syscall.h> 11 + #include <time.h> 12 + 13 + static int perf_event_open(__u32 type, __u64 config, int pid) 14 + { 15 + struct perf_event_attr attr = { 16 + .type = type, 17 + .config = config, 18 + .size = sizeof(struct perf_event_attr), 19 + .sample_period = 100000, 20 + }; 21 + 22 + return syscall(__NR_perf_event_open, &attr, pid, -1, -1, 0); 23 + } 24 + 25 + struct elem { 26 + char data[128]; 27 + struct bpf_task_work tw; 28 + }; 29 + 30 + static int verify_map(struct bpf_map *map, const char *expected_data) 31 + { 32 + int err; 33 + struct elem value; 34 + int processed_values = 0; 35 + int k, sz; 36 + 37 + sz = bpf_map__max_entries(map); 38 + for (k = 0; k < sz; ++k) { 39 + err = bpf_map__lookup_elem(map, &k, sizeof(int), &value, sizeof(struct elem), 0); 40 + if (err) 41 + continue; 42 + if (!ASSERT_EQ(strcmp(expected_data, value.data), 0, "map data")) { 43 + fprintf(stderr, "expected '%s', found '%s' in %s map", expected_data, 44 + value.data, bpf_map__name(map)); 45 + return 2; 46 + } 47 + processed_values++; 48 + } 49 + 50 + return processed_values == 0; 51 + } 52 + 53 + static void task_work_run(const char *prog_name, const char *map_name) 54 + { 55 + struct task_work *skel; 56 + struct bpf_program *prog; 57 + struct bpf_map *map; 58 + struct bpf_link *link; 59 + int err, pe_fd = 0, pid, status, pipefd[2]; 60 + char user_string[] = "hello world"; 61 + 62 + if (!ASSERT_NEQ(pipe(pipefd), -1, "pipe")) 63 + return; 64 + 65 + pid = fork(); 66 + if (pid == 0) { 67 + __u64 num = 1; 68 + int i; 69 + char buf; 70 + 71 + close(pipefd[1]); 72 + read(pipefd[0], &buf, sizeof(buf)); 73 + close(pipefd[0]); 74 + 75 + for (i = 0; i < 10000; ++i) 76 + num *= time(0) % 7; 77 + (void)num; 78 + exit(0); 79 + } 80 + ASSERT_GT(pid, 0, "fork() failed"); 81 + 82 + skel = task_work__open(); 83 + if (!ASSERT_OK_PTR(skel, "task_work__open")) 84 + return; 85 + 86 + bpf_object__for_each_program(prog, skel->obj) { 87 + bpf_program__set_autoload(prog, false); 88 + } 89 + 90 + prog = bpf_object__find_program_by_name(skel->obj, prog_name); 91 + if (!ASSERT_OK_PTR(prog, "prog_name")) 92 + goto cleanup; 93 + bpf_program__set_autoload(prog, true); 94 + skel->bss->user_ptr = (char *)user_string; 95 + 96 + err = task_work__load(skel); 97 + if (!ASSERT_OK(err, "skel_load")) 98 + goto cleanup; 99 + 100 + pe_fd = perf_event_open(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES, pid); 101 + if (pe_fd == -1 && (errno == ENOENT || errno == EOPNOTSUPP)) { 102 + printf("%s:SKIP:no PERF_COUNT_HW_CPU_CYCLES\n", __func__); 103 + test__skip(); 104 + goto cleanup; 105 + } 106 + if (!ASSERT_NEQ(pe_fd, -1, "pe_fd")) { 107 + fprintf(stderr, "perf_event_open errno: %d, pid: %d\n", errno, pid); 108 + goto cleanup; 109 + } 110 + 111 + link = bpf_program__attach_perf_event(prog, pe_fd); 112 + if (!ASSERT_OK_PTR(link, "attach_perf_event")) 113 + goto cleanup; 114 + 115 + close(pipefd[0]); 116 + write(pipefd[1], user_string, 1); 117 + close(pipefd[1]); 118 + /* Wait to collect some samples */ 119 + waitpid(pid, &status, 0); 120 + pid = 0; 121 + map = bpf_object__find_map_by_name(skel->obj, map_name); 122 + if (!ASSERT_OK_PTR(map, "find map_name")) 123 + goto cleanup; 124 + if (!ASSERT_OK(verify_map(map, user_string), "verify map")) 125 + goto cleanup; 126 + cleanup: 127 + if (pe_fd >= 0) 128 + close(pe_fd); 129 + task_work__destroy(skel); 130 + if (pid) { 131 + close(pipefd[0]); 132 + write(pipefd[1], user_string, 1); 133 + close(pipefd[1]); 134 + waitpid(pid, &status, 0); 135 + } 136 + } 137 + 138 + void test_task_work(void) 139 + { 140 + if (test__start_subtest("test_task_work_hash_map")) 141 + task_work_run("oncpu_hash_map", "hmap"); 142 + 143 + if (test__start_subtest("test_task_work_array_map")) 144 + task_work_run("oncpu_array_map", "arrmap"); 145 + 146 + if (test__start_subtest("test_task_work_lru_map")) 147 + task_work_run("oncpu_lru_map", "lrumap"); 148 + 149 + RUN_TESTS(task_work_fail); 150 + }
+107
tools/testing/selftests/bpf/progs/task_work.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ 3 + 4 + #include <vmlinux.h> 5 + #include <string.h> 6 + #include <stdbool.h> 7 + #include <bpf/bpf_helpers.h> 8 + #include <bpf/bpf_tracing.h> 9 + #include "bpf_misc.h" 10 + #include "errno.h" 11 + 12 + char _license[] SEC("license") = "GPL"; 13 + 14 + const void *user_ptr = NULL; 15 + 16 + struct elem { 17 + char data[128]; 18 + struct bpf_task_work tw; 19 + }; 20 + 21 + struct { 22 + __uint(type, BPF_MAP_TYPE_HASH); 23 + __uint(map_flags, BPF_F_NO_PREALLOC); 24 + __uint(max_entries, 1); 25 + __type(key, int); 26 + __type(value, struct elem); 27 + } hmap SEC(".maps"); 28 + 29 + struct { 30 + __uint(type, BPF_MAP_TYPE_ARRAY); 31 + __uint(max_entries, 1); 32 + __type(key, int); 33 + __type(value, struct elem); 34 + } arrmap SEC(".maps"); 35 + 36 + struct { 37 + __uint(type, BPF_MAP_TYPE_LRU_HASH); 38 + __uint(max_entries, 1); 39 + __type(key, int); 40 + __type(value, struct elem); 41 + } lrumap SEC(".maps"); 42 + 43 + static int process_work(struct bpf_map *map, void *key, void *value) 44 + { 45 + struct elem *work = value; 46 + 47 + bpf_copy_from_user_str(work->data, sizeof(work->data), (const void *)user_ptr, 0); 48 + return 0; 49 + } 50 + 51 + int key = 0; 52 + 53 + SEC("perf_event") 54 + int oncpu_hash_map(struct pt_regs *args) 55 + { 56 + struct elem empty_work = { .data = { 0 } }; 57 + struct elem *work; 58 + struct task_struct *task; 59 + int err; 60 + 61 + task = bpf_get_current_task_btf(); 62 + err = bpf_map_update_elem(&hmap, &key, &empty_work, BPF_NOEXIST); 63 + if (err) 64 + return 0; 65 + work = bpf_map_lookup_elem(&hmap, &key); 66 + if (!work) 67 + return 0; 68 + 69 + bpf_task_work_schedule_resume(task, &work->tw, &hmap, process_work, NULL); 70 + return 0; 71 + } 72 + 73 + SEC("perf_event") 74 + int oncpu_array_map(struct pt_regs *args) 75 + { 76 + struct elem *work; 77 + struct task_struct *task; 78 + 79 + task = bpf_get_current_task_btf(); 80 + work = bpf_map_lookup_elem(&arrmap, &key); 81 + if (!work) 82 + return 0; 83 + bpf_task_work_schedule_signal(task, &work->tw, &arrmap, process_work, NULL); 84 + return 0; 85 + } 86 + 87 + SEC("perf_event") 88 + int oncpu_lru_map(struct pt_regs *args) 89 + { 90 + struct elem empty_work = { .data = { 0 } }; 91 + struct elem *work; 92 + struct task_struct *task; 93 + int err; 94 + 95 + task = bpf_get_current_task_btf(); 96 + work = bpf_map_lookup_elem(&lrumap, &key); 97 + if (work) 98 + return 0; 99 + err = bpf_map_update_elem(&lrumap, &key, &empty_work, BPF_NOEXIST); 100 + if (err) 101 + return 0; 102 + work = bpf_map_lookup_elem(&lrumap, &key); 103 + if (!work || work->data[0]) 104 + return 0; 105 + bpf_task_work_schedule_resume(task, &work->tw, &lrumap, process_work, NULL); 106 + return 0; 107 + }
+96
tools/testing/selftests/bpf/progs/task_work_fail.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ 3 + 4 + #include <vmlinux.h> 5 + #include <string.h> 6 + #include <stdbool.h> 7 + #include <bpf/bpf_helpers.h> 8 + #include <bpf/bpf_tracing.h> 9 + #include "bpf_misc.h" 10 + 11 + char _license[] SEC("license") = "GPL"; 12 + 13 + const void *user_ptr = NULL; 14 + 15 + struct elem { 16 + char data[128]; 17 + struct bpf_task_work tw; 18 + }; 19 + 20 + struct { 21 + __uint(type, BPF_MAP_TYPE_HASH); 22 + __uint(map_flags, BPF_F_NO_PREALLOC); 23 + __uint(max_entries, 1); 24 + __type(key, int); 25 + __type(value, struct elem); 26 + } hmap SEC(".maps"); 27 + 28 + struct { 29 + __uint(type, BPF_MAP_TYPE_ARRAY); 30 + __uint(max_entries, 1); 31 + __type(key, int); 32 + __type(value, struct elem); 33 + } arrmap SEC(".maps"); 34 + 35 + static int process_work(struct bpf_map *map, void *key, void *value) 36 + { 37 + struct elem *work = value; 38 + 39 + bpf_copy_from_user_str(work->data, sizeof(work->data), (const void *)user_ptr, 0); 40 + return 0; 41 + } 42 + 43 + int key = 0; 44 + 45 + SEC("perf_event") 46 + __failure __msg("doesn't match map pointer in R3") 47 + int mismatch_map(struct pt_regs *args) 48 + { 49 + struct elem *work; 50 + struct task_struct *task; 51 + 52 + task = bpf_get_current_task_btf(); 53 + work = bpf_map_lookup_elem(&arrmap, &key); 54 + if (!work) 55 + return 0; 56 + bpf_task_work_schedule_resume(task, &work->tw, &hmap, process_work, NULL); 57 + return 0; 58 + } 59 + 60 + SEC("perf_event") 61 + __failure __msg("arg#1 doesn't point to a map value") 62 + int no_map_task_work(struct pt_regs *args) 63 + { 64 + struct task_struct *task; 65 + struct bpf_task_work tw; 66 + 67 + task = bpf_get_current_task_btf(); 68 + bpf_task_work_schedule_resume(task, &tw, &hmap, process_work, NULL); 69 + return 0; 70 + } 71 + 72 + SEC("perf_event") 73 + __failure __msg("Possibly NULL pointer passed to trusted arg1") 74 + int task_work_null(struct pt_regs *args) 75 + { 76 + struct task_struct *task; 77 + 78 + task = bpf_get_current_task_btf(); 79 + bpf_task_work_schedule_resume(task, NULL, &hmap, process_work, NULL); 80 + return 0; 81 + } 82 + 83 + SEC("perf_event") 84 + __failure __msg("Possibly NULL pointer passed to trusted arg2") 85 + int map_null(struct pt_regs *args) 86 + { 87 + struct elem *work; 88 + struct task_struct *task; 89 + 90 + task = bpf_get_current_task_btf(); 91 + work = bpf_map_lookup_elem(&arrmap, &key); 92 + if (!work) 93 + return 0; 94 + bpf_task_work_schedule_resume(task, &work->tw, NULL, process_work, NULL); 95 + return 0; 96 + }