Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: Add tests for kptr_ref refcounting

Check at runtime how various operations for kptr_ref affect its refcount
and verify against the actual count.

Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Link: https://lore.kernel.org/r/20220511194654.765705-5-memxor@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Kumar Kartikeya Dwivedi and committed by
Alexei Starovoitov
0ef6740e 04accf79

+128 -5
+24 -3
tools/testing/selftests/bpf/prog_tests/map_kptr.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 #include <test_progs.h> 3 + #include <network_helpers.h> 3 4 4 5 #include "map_kptr.skel.h" 5 6 #include "map_kptr_fail.skel.h" ··· 82 81 } 83 82 } 84 83 85 - static void test_map_kptr_success(void) 84 + static void test_map_kptr_success(bool test_run) 86 85 { 86 + LIBBPF_OPTS(bpf_test_run_opts, opts, 87 + .data_in = &pkt_v4, 88 + .data_size_in = sizeof(pkt_v4), 89 + .repeat = 1, 90 + ); 87 91 struct map_kptr *skel; 88 92 int key = 0, ret; 89 93 char buf[24]; 90 94 91 95 skel = map_kptr__open_and_load(); 92 96 if (!ASSERT_OK_PTR(skel, "map_kptr__open_and_load")) 97 + return; 98 + 99 + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref), &opts); 100 + ASSERT_OK(ret, "test_map_kptr_ref refcount"); 101 + ASSERT_OK(opts.retval, "test_map_kptr_ref retval"); 102 + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref2), &opts); 103 + ASSERT_OK(ret, "test_map_kptr_ref2 refcount"); 104 + ASSERT_OK(opts.retval, "test_map_kptr_ref2 retval"); 105 + 106 + if (test_run) 93 107 return; 94 108 95 109 ret = bpf_map_update_elem(bpf_map__fd(skel->maps.array_map), &key, buf, 0); ··· 132 116 133 117 void test_map_kptr(void) 134 118 { 135 - if (test__start_subtest("success")) 136 - test_map_kptr_success(); 119 + if (test__start_subtest("success")) { 120 + test_map_kptr_success(false); 121 + /* Do test_run twice, so that we see refcount going back to 1 122 + * after we leave it in map from first iteration. 123 + */ 124 + test_map_kptr_success(true); 125 + } 137 126 test_map_kptr_fail(); 138 127 }
+104 -2
tools/testing/selftests/bpf/progs/map_kptr.c
··· 141 141 int test_map_kptr(struct __sk_buff *ctx) 142 142 { 143 143 struct map_value *v; 144 - int i, key = 0; 144 + int key = 0; 145 145 146 146 #define TEST(map) \ 147 147 v = bpf_map_lookup_elem(&map, &key); \ ··· 162 162 int test_map_in_map_kptr(struct __sk_buff *ctx) 163 163 { 164 164 struct map_value *v; 165 - int i, key = 0; 165 + int key = 0; 166 166 void *map; 167 167 168 168 #define TEST(map_in_map) \ ··· 184 184 TEST(hash_of_lru_hash_maps); 185 185 186 186 #undef TEST 187 + return 0; 188 + } 189 + 190 + SEC("tc") 191 + int test_map_kptr_ref(struct __sk_buff *ctx) 192 + { 193 + struct prog_test_ref_kfunc *p, *p_st; 194 + unsigned long arg = 0; 195 + struct map_value *v; 196 + int key = 0, ret; 197 + 198 + p = bpf_kfunc_call_test_acquire(&arg); 199 + if (!p) 200 + return 1; 201 + 202 + p_st = p->next; 203 + if (p_st->cnt.refs.counter != 2) { 204 + ret = 2; 205 + goto end; 206 + } 207 + 208 + v = bpf_map_lookup_elem(&array_map, &key); 209 + if (!v) { 210 + ret = 3; 211 + goto end; 212 + } 213 + 214 + p = bpf_kptr_xchg(&v->ref_ptr, p); 215 + if (p) { 216 + ret = 4; 217 + goto end; 218 + } 219 + if (p_st->cnt.refs.counter != 2) 220 + return 5; 221 + 222 + p = bpf_kfunc_call_test_kptr_get(&v->ref_ptr, 0, 0); 223 + if (!p) 224 + return 6; 225 + if (p_st->cnt.refs.counter != 3) { 226 + ret = 7; 227 + goto end; 228 + } 229 + bpf_kfunc_call_test_release(p); 230 + if (p_st->cnt.refs.counter != 2) 231 + return 8; 232 + 233 + p = bpf_kptr_xchg(&v->ref_ptr, NULL); 234 + if (!p) 235 + return 9; 236 + bpf_kfunc_call_test_release(p); 237 + if (p_st->cnt.refs.counter != 1) 238 + return 10; 239 + 240 + p = bpf_kfunc_call_test_acquire(&arg); 241 + if (!p) 242 + return 11; 243 + p = bpf_kptr_xchg(&v->ref_ptr, p); 244 + if (p) { 245 + ret = 12; 246 + goto end; 247 + } 248 + if (p_st->cnt.refs.counter != 2) 249 + return 13; 250 + /* Leave in map */ 251 + 252 + return 0; 253 + end: 254 + bpf_kfunc_call_test_release(p); 255 + return ret; 256 + } 257 + 258 + SEC("tc") 259 + int test_map_kptr_ref2(struct __sk_buff *ctx) 260 + { 261 + struct prog_test_ref_kfunc *p, *p_st; 262 + struct map_value *v; 263 + int key = 0; 264 + 265 + v = bpf_map_lookup_elem(&array_map, &key); 266 + if (!v) 267 + return 1; 268 + 269 + p_st = v->ref_ptr; 270 + if (!p_st || p_st->cnt.refs.counter != 2) 271 + return 2; 272 + 273 + p = bpf_kptr_xchg(&v->ref_ptr, NULL); 274 + if (!p) 275 + return 3; 276 + if (p_st->cnt.refs.counter != 2) { 277 + bpf_kfunc_call_test_release(p); 278 + return 4; 279 + } 280 + 281 + p = bpf_kptr_xchg(&v->ref_ptr, p); 282 + if (p) { 283 + bpf_kfunc_call_test_release(p); 284 + return 5; 285 + } 286 + if (p_st->cnt.refs.counter != 2) 287 + return 6; 288 + 187 289 return 0; 188 290 } 189 291