Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bpf/selftests: coverage for bpf_map_ops errors

These tests expose the issue of being unable to properly check for errors
returned from inlined bpf map helpers that make calls to the bpf_map_ops
functions. At best, a check for zero or non-zero can be done but these
tests show it is not possible to check for a negative value or for a
specific error value.

Signed-off-by: JP Kobryn <inwardvessel@gmail.com>
Tested-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20230322194754.185781-2-inwardvessel@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

JP Kobryn and committed by
Alexei Starovoitov
830154cd d9d93f3b

+300
+162
tools/testing/selftests/bpf/prog_tests/map_ops.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ 3 + 4 + #include <errno.h> 5 + #include <sys/syscall.h> 6 + #include <unistd.h> 7 + 8 + #include "test_map_ops.skel.h" 9 + #include "test_progs.h" 10 + 11 + static void map_update(void) 12 + { 13 + (void)syscall(__NR_getpid); 14 + } 15 + 16 + static void map_delete(void) 17 + { 18 + (void)syscall(__NR_getppid); 19 + } 20 + 21 + static void map_push(void) 22 + { 23 + (void)syscall(__NR_getuid); 24 + } 25 + 26 + static void map_pop(void) 27 + { 28 + (void)syscall(__NR_geteuid); 29 + } 30 + 31 + static void map_peek(void) 32 + { 33 + (void)syscall(__NR_getgid); 34 + } 35 + 36 + static void map_for_each_pass(void) 37 + { 38 + (void)syscall(__NR_gettid); 39 + } 40 + 41 + static void map_for_each_fail(void) 42 + { 43 + (void)syscall(__NR_getpgid); 44 + } 45 + 46 + static int setup(struct test_map_ops **skel) 47 + { 48 + int err = 0; 49 + 50 + if (!skel) 51 + return -1; 52 + 53 + *skel = test_map_ops__open(); 54 + if (!ASSERT_OK_PTR(*skel, "test_map_ops__open")) 55 + return -1; 56 + 57 + (*skel)->rodata->pid = getpid(); 58 + 59 + err = test_map_ops__load(*skel); 60 + if (!ASSERT_OK(err, "test_map_ops__load")) 61 + return err; 62 + 63 + err = test_map_ops__attach(*skel); 64 + if (!ASSERT_OK(err, "test_map_ops__attach")) 65 + return err; 66 + 67 + return err; 68 + } 69 + 70 + static void teardown(struct test_map_ops **skel) 71 + { 72 + if (skel && *skel) 73 + test_map_ops__destroy(*skel); 74 + } 75 + 76 + static void map_ops_update_delete_subtest(void) 77 + { 78 + struct test_map_ops *skel; 79 + 80 + if (setup(&skel)) 81 + goto teardown; 82 + 83 + map_update(); 84 + ASSERT_OK(skel->bss->err, "map_update_initial"); 85 + 86 + map_update(); 87 + ASSERT_LT(skel->bss->err, 0, "map_update_existing"); 88 + ASSERT_EQ(skel->bss->err, -EEXIST, "map_update_existing"); 89 + 90 + map_delete(); 91 + ASSERT_OK(skel->bss->err, "map_delete_existing"); 92 + 93 + map_delete(); 94 + ASSERT_LT(skel->bss->err, 0, "map_delete_non_existing"); 95 + ASSERT_EQ(skel->bss->err, -ENOENT, "map_delete_non_existing"); 96 + 97 + teardown: 98 + teardown(&skel); 99 + } 100 + 101 + static void map_ops_push_peek_pop_subtest(void) 102 + { 103 + struct test_map_ops *skel; 104 + 105 + if (setup(&skel)) 106 + goto teardown; 107 + 108 + map_push(); 109 + ASSERT_OK(skel->bss->err, "map_push_initial"); 110 + 111 + map_push(); 112 + ASSERT_LT(skel->bss->err, 0, "map_push_when_full"); 113 + ASSERT_EQ(skel->bss->err, -E2BIG, "map_push_when_full"); 114 + 115 + map_peek(); 116 + ASSERT_OK(skel->bss->err, "map_peek"); 117 + 118 + map_pop(); 119 + ASSERT_OK(skel->bss->err, "map_pop"); 120 + 121 + map_peek(); 122 + ASSERT_LT(skel->bss->err, 0, "map_peek_when_empty"); 123 + ASSERT_EQ(skel->bss->err, -ENOENT, "map_peek_when_empty"); 124 + 125 + map_pop(); 126 + ASSERT_LT(skel->bss->err, 0, "map_pop_when_empty"); 127 + ASSERT_EQ(skel->bss->err, -ENOENT, "map_pop_when_empty"); 128 + 129 + teardown: 130 + teardown(&skel); 131 + } 132 + 133 + static void map_ops_for_each_subtest(void) 134 + { 135 + struct test_map_ops *skel; 136 + 137 + if (setup(&skel)) 138 + goto teardown; 139 + 140 + map_for_each_pass(); 141 + /* expect to iterate over 1 element */ 142 + ASSERT_EQ(skel->bss->err, 1, "map_for_each_no_flags"); 143 + 144 + map_for_each_fail(); 145 + ASSERT_LT(skel->bss->err, 0, "map_for_each_with_flags"); 146 + ASSERT_EQ(skel->bss->err, -EINVAL, "map_for_each_with_flags"); 147 + 148 + teardown: 149 + teardown(&skel); 150 + } 151 + 152 + void test_map_ops(void) 153 + { 154 + if (test__start_subtest("map_ops_update_delete")) 155 + map_ops_update_delete_subtest(); 156 + 157 + if (test__start_subtest("map_ops_push_peek_pop")) 158 + map_ops_push_peek_pop_subtest(); 159 + 160 + if (test__start_subtest("map_ops_for_each")) 161 + map_ops_for_each_subtest(); 162 + }
+138
tools/testing/selftests/bpf/progs/test_map_ops.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ 3 + 4 + #include "vmlinux.h" 5 + #include <bpf/bpf_helpers.h> 6 + 7 + char _license[] SEC("license") = "GPL"; 8 + 9 + struct { 10 + __uint(type, BPF_MAP_TYPE_HASH); 11 + __uint(max_entries, 1); 12 + __type(key, int); 13 + __type(value, int); 14 + } hash_map SEC(".maps"); 15 + 16 + struct { 17 + __uint(type, BPF_MAP_TYPE_STACK); 18 + __uint(max_entries, 1); 19 + __type(value, int); 20 + } stack_map SEC(".maps"); 21 + 22 + struct { 23 + __uint(type, BPF_MAP_TYPE_ARRAY); 24 + __uint(max_entries, 1); 25 + __type(key, int); 26 + __type(value, int); 27 + } array_map SEC(".maps"); 28 + 29 + const volatile pid_t pid; 30 + long err = 0; 31 + 32 + static u64 callback(u64 map, u64 key, u64 val, u64 ctx, u64 flags) 33 + { 34 + return 0; 35 + } 36 + 37 + SEC("tp/syscalls/sys_enter_getpid") 38 + int map_update(void *ctx) 39 + { 40 + const int key = 0; 41 + const int val = 1; 42 + 43 + if (pid != (bpf_get_current_pid_tgid() >> 32)) 44 + return 0; 45 + 46 + err = bpf_map_update_elem(&hash_map, &key, &val, BPF_NOEXIST); 47 + 48 + return 0; 49 + } 50 + 51 + SEC("tp/syscalls/sys_enter_getppid") 52 + int map_delete(void *ctx) 53 + { 54 + const int key = 0; 55 + 56 + if (pid != (bpf_get_current_pid_tgid() >> 32)) 57 + return 0; 58 + 59 + err = bpf_map_delete_elem(&hash_map, &key); 60 + 61 + return 0; 62 + } 63 + 64 + SEC("tp/syscalls/sys_enter_getuid") 65 + int map_push(void *ctx) 66 + { 67 + const int val = 1; 68 + 69 + if (pid != (bpf_get_current_pid_tgid() >> 32)) 70 + return 0; 71 + 72 + err = bpf_map_push_elem(&stack_map, &val, 0); 73 + 74 + return 0; 75 + } 76 + 77 + SEC("tp/syscalls/sys_enter_geteuid") 78 + int map_pop(void *ctx) 79 + { 80 + int val; 81 + 82 + if (pid != (bpf_get_current_pid_tgid() >> 32)) 83 + return 0; 84 + 85 + err = bpf_map_pop_elem(&stack_map, &val); 86 + 87 + return 0; 88 + } 89 + 90 + SEC("tp/syscalls/sys_enter_getgid") 91 + int map_peek(void *ctx) 92 + { 93 + int val; 94 + 95 + if (pid != (bpf_get_current_pid_tgid() >> 32)) 96 + return 0; 97 + 98 + err = bpf_map_peek_elem(&stack_map, &val); 99 + 100 + return 0; 101 + } 102 + 103 + SEC("tp/syscalls/sys_enter_gettid") 104 + int map_for_each_pass(void *ctx) 105 + { 106 + const int key = 0; 107 + const int val = 1; 108 + const u64 flags = 0; 109 + int callback_ctx; 110 + 111 + if (pid != (bpf_get_current_pid_tgid() >> 32)) 112 + return 0; 113 + 114 + bpf_map_update_elem(&array_map, &key, &val, flags); 115 + 116 + err = bpf_for_each_map_elem(&array_map, callback, &callback_ctx, flags); 117 + 118 + return 0; 119 + } 120 + 121 + SEC("tp/syscalls/sys_enter_getpgid") 122 + int map_for_each_fail(void *ctx) 123 + { 124 + const int key = 0; 125 + const int val = 1; 126 + const u64 flags = BPF_NOEXIST; 127 + int callback_ctx; 128 + 129 + if (pid != (bpf_get_current_pid_tgid() >> 32)) 130 + return 0; 131 + 132 + bpf_map_update_elem(&array_map, &key, &val, flags); 133 + 134 + /* calling for_each with non-zero flags will return error */ 135 + err = bpf_for_each_map_elem(&array_map, callback, &callback_ctx, flags); 136 + 137 + return 0; 138 + }