Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: Test racing between bpf_timer_cancel_and_free and bpf_timer_cancel

This selftest is based on a Alexei's test adopted from an internal
user to troubleshoot another bug. During this exercise, a separate
racing bug was discovered between bpf_timer_cancel_and_free
and bpf_timer_cancel. The details can be found in the previous
patch.

This patch is to add a selftest that can trigger the bug.
I can trigger the UAF everytime in my qemu setup with KASAN. The idea
is to have multiple user space threads running in a tight loop to exercise
both bpf_map_update_elem (which calls into bpf_timer_cancel_and_free)
and bpf_timer_cancel.

Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Hou Tao <houtao1@huawei.com>
Link: https://lore.kernel.org/bpf/20240215211218.990808-2-martin.lau@linux.dev

authored by

Martin KaFai Lau and committed by
Daniel Borkmann
3f00e4a9 0281b919

+67 -2
+34 -1
tools/testing/selftests/bpf/prog_tests/timer.c
··· 4 4 #include "timer.skel.h" 5 5 #include "timer_failure.skel.h" 6 6 7 + #define NUM_THR 8 8 + 9 + static void *spin_lock_thread(void *arg) 10 + { 11 + int i, err, prog_fd = *(int *)arg; 12 + LIBBPF_OPTS(bpf_test_run_opts, topts); 13 + 14 + for (i = 0; i < 10000; i++) { 15 + err = bpf_prog_test_run_opts(prog_fd, &topts); 16 + if (!ASSERT_OK(err, "test_run_opts err") || 17 + !ASSERT_OK(topts.retval, "test_run_opts retval")) 18 + break; 19 + } 20 + 21 + pthread_exit(arg); 22 + } 23 + 7 24 static int timer(struct timer *timer_skel) 8 25 { 9 - int err, prog_fd; 26 + int i, err, prog_fd; 10 27 LIBBPF_OPTS(bpf_test_run_opts, topts); 28 + pthread_t thread_id[NUM_THR]; 29 + void *ret; 11 30 12 31 err = timer__attach(timer_skel); 13 32 if (!ASSERT_OK(err, "timer_attach")) ··· 61 42 62 43 /* check that code paths completed */ 63 44 ASSERT_EQ(timer_skel->bss->ok, 1 | 2 | 4, "ok"); 45 + 46 + prog_fd = bpf_program__fd(timer_skel->progs.race); 47 + for (i = 0; i < NUM_THR; i++) { 48 + err = pthread_create(&thread_id[i], NULL, 49 + &spin_lock_thread, &prog_fd); 50 + if (!ASSERT_OK(err, "pthread_create")) 51 + break; 52 + } 53 + 54 + while (i) { 55 + err = pthread_join(thread_id[--i], &ret); 56 + if (ASSERT_OK(err, "pthread_join")) 57 + ASSERT_EQ(ret, (void *)&prog_fd, "pthread_join"); 58 + } 64 59 65 60 return 0; 66 61 }
+33 -1
tools/testing/selftests/bpf/progs/timer.c
··· 51 51 __uint(max_entries, 1); 52 52 __type(key, int); 53 53 __type(value, struct elem); 54 - } abs_timer SEC(".maps"), soft_timer_pinned SEC(".maps"), abs_timer_pinned SEC(".maps"); 54 + } abs_timer SEC(".maps"), soft_timer_pinned SEC(".maps"), abs_timer_pinned SEC(".maps"), 55 + race_array SEC(".maps"); 55 56 56 57 __u64 bss_data; 57 58 __u64 abs_data; ··· 388 387 { 389 388 bpf_printk("test5"); 390 389 test_pinned_timer(false); 390 + 391 + return 0; 392 + } 393 + 394 + static int race_timer_callback(void *race_array, int *race_key, struct bpf_timer *timer) 395 + { 396 + bpf_timer_start(timer, 1000000, 0); 397 + return 0; 398 + } 399 + 400 + SEC("syscall") 401 + int race(void *ctx) 402 + { 403 + struct bpf_timer *timer; 404 + int err, race_key = 0; 405 + struct elem init; 406 + 407 + __builtin_memset(&init, 0, sizeof(struct elem)); 408 + bpf_map_update_elem(&race_array, &race_key, &init, BPF_ANY); 409 + 410 + timer = bpf_map_lookup_elem(&race_array, &race_key); 411 + if (!timer) 412 + return 1; 413 + 414 + err = bpf_timer_init(timer, &race_array, CLOCK_MONOTONIC); 415 + if (err && err != -EBUSY) 416 + return 1; 417 + 418 + bpf_timer_set_callback(timer, race_timer_callback); 419 + bpf_timer_start(timer, 0, 0); 420 + bpf_timer_cancel(timer); 391 421 392 422 return 0; 393 423 }