Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: Add test verifying bpf_ringbuf_reserve retval use in map ops

Add a test_ringbuf_map_key test prog, borrowing heavily from extant
test_ringbuf.c. The program tries to use the result of
bpf_ringbuf_reserve as map_key, which was not possible before previouis
commits in this series. The test runner added to prog_tests/ringbuf.c
verifies that the program loads and does basic sanity checks to confirm
that it runs as expected.

Also, refactor test_ringbuf such that runners for existing test_ringbuf
and newly-added test_ringbuf_map_key are subtests of 'ringbuf' top-level
test.

Signed-off-by: Dave Marchevsky <davemarchevsky@fb.com>
Acked-by: Yonghong Song <yhs@fb.com>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20221020160721.4030492-3-davemarchevsky@fb.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Dave Marchevsky and committed by
Alexei Starovoitov
51ee71d3 d1673304

+140 -4
+5 -3
tools/testing/selftests/bpf/Makefile
··· 359 359 test_subskeleton.skel.h test_subskeleton_lib.skel.h \ 360 360 test_usdt.skel.h 361 361 362 - LSKELS := fentry_test.c fexit_test.c fexit_sleep.c \ 363 - test_ringbuf.c atomics.c trace_printk.c trace_vprintk.c \ 364 - map_ptr_kern.c core_kern.c core_kern_overflow.c 362 + LSKELS := fentry_test.c fexit_test.c fexit_sleep.c atomics.c \ 363 + trace_printk.c trace_vprintk.c map_ptr_kern.c \ 364 + core_kern.c core_kern_overflow.c test_ringbuf.c \ 365 + test_ringbuf_map_key.c 366 + 365 367 # Generate both light skeleton and libbpf skeleton for these 366 368 LSKELS_EXTRA := test_ksyms_module.c test_ksyms_weak.c kfunc_call_test.c \ 367 369 kfunc_call_test_subprog.c
+65 -1
tools/testing/selftests/bpf/prog_tests/ringbuf.c
··· 13 13 #include <linux/perf_event.h> 14 14 #include <linux/ring_buffer.h> 15 15 #include "test_ringbuf.lskel.h" 16 + #include "test_ringbuf_map_key.lskel.h" 16 17 17 18 #define EDONE 7777 18 19 ··· 59 58 } 60 59 } 61 60 61 + static struct test_ringbuf_map_key_lskel *skel_map_key; 62 62 static struct test_ringbuf_lskel *skel; 63 63 static struct ring_buffer *ringbuf; 64 64 ··· 83 81 return (void *)(long)ring_buffer__poll(ringbuf, timeout); 84 82 } 85 83 86 - void test_ringbuf(void) 84 + static void ringbuf_subtest(void) 87 85 { 88 86 const size_t rec_sz = BPF_RINGBUF_HDR_SZ + sizeof(struct sample); 89 87 pthread_t thread; ··· 298 296 cleanup: 299 297 ring_buffer__free(ringbuf); 300 298 test_ringbuf_lskel__destroy(skel); 299 + } 300 + 301 + static int process_map_key_sample(void *ctx, void *data, size_t len) 302 + { 303 + struct sample *s; 304 + int err, val; 305 + 306 + s = data; 307 + switch (s->seq) { 308 + case 1: 309 + ASSERT_EQ(s->value, 42, "sample_value"); 310 + err = bpf_map_lookup_elem(skel_map_key->maps.hash_map.map_fd, 311 + s, &val); 312 + ASSERT_OK(err, "hash_map bpf_map_lookup_elem"); 313 + ASSERT_EQ(val, 1, "hash_map val"); 314 + return -EDONE; 315 + default: 316 + return 0; 317 + } 318 + } 319 + 320 + static void ringbuf_map_key_subtest(void) 321 + { 322 + int err; 323 + 324 + skel_map_key = test_ringbuf_map_key_lskel__open(); 325 + if (!ASSERT_OK_PTR(skel_map_key, "test_ringbuf_map_key_lskel__open")) 326 + return; 327 + 328 + skel_map_key->maps.ringbuf.max_entries = getpagesize(); 329 + skel_map_key->bss->pid = getpid(); 330 + 331 + err = test_ringbuf_map_key_lskel__load(skel_map_key); 332 + if (!ASSERT_OK(err, "test_ringbuf_map_key_lskel__load")) 333 + goto cleanup; 334 + 335 + ringbuf = ring_buffer__new(skel_map_key->maps.ringbuf.map_fd, 336 + process_map_key_sample, NULL, NULL); 337 + if (!ASSERT_OK_PTR(ringbuf, "ring_buffer__new")) 338 + goto cleanup; 339 + 340 + err = test_ringbuf_map_key_lskel__attach(skel_map_key); 341 + if (!ASSERT_OK(err, "test_ringbuf_map_key_lskel__attach")) 342 + goto cleanup_ringbuf; 343 + 344 + syscall(__NR_getpgid); 345 + ASSERT_EQ(skel_map_key->bss->seq, 1, "skel_map_key->bss->seq"); 346 + err = ring_buffer__poll(ringbuf, -1); 347 + ASSERT_EQ(err, -EDONE, "ring_buffer__poll"); 348 + 349 + cleanup_ringbuf: 350 + ring_buffer__free(ringbuf); 351 + cleanup: 352 + test_ringbuf_map_key_lskel__destroy(skel_map_key); 353 + } 354 + 355 + void test_ringbuf(void) 356 + { 357 + if (test__start_subtest("ringbuf")) 358 + ringbuf_subtest(); 359 + if (test__start_subtest("ringbuf_map_key")) 360 + ringbuf_map_key_subtest(); 301 361 }
+70
tools/testing/selftests/bpf/progs/test_ringbuf_map_key.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ 3 + 4 + #include <linux/bpf.h> 5 + #include <bpf/bpf_helpers.h> 6 + #include "bpf_misc.h" 7 + 8 + char _license[] SEC("license") = "GPL"; 9 + 10 + struct sample { 11 + int pid; 12 + int seq; 13 + long value; 14 + char comm[16]; 15 + }; 16 + 17 + struct { 18 + __uint(type, BPF_MAP_TYPE_RINGBUF); 19 + } ringbuf SEC(".maps"); 20 + 21 + struct { 22 + __uint(type, BPF_MAP_TYPE_HASH); 23 + __uint(max_entries, 1000); 24 + __type(key, struct sample); 25 + __type(value, int); 26 + } hash_map SEC(".maps"); 27 + 28 + /* inputs */ 29 + int pid = 0; 30 + 31 + /* inner state */ 32 + long seq = 0; 33 + 34 + SEC("fentry/" SYS_PREFIX "sys_getpgid") 35 + int test_ringbuf_mem_map_key(void *ctx) 36 + { 37 + int cur_pid = bpf_get_current_pid_tgid() >> 32; 38 + struct sample *sample, sample_copy; 39 + int *lookup_val; 40 + 41 + if (cur_pid != pid) 42 + return 0; 43 + 44 + sample = bpf_ringbuf_reserve(&ringbuf, sizeof(*sample), 0); 45 + if (!sample) 46 + return 0; 47 + 48 + sample->pid = pid; 49 + bpf_get_current_comm(sample->comm, sizeof(sample->comm)); 50 + sample->seq = ++seq; 51 + sample->value = 42; 52 + 53 + /* test using 'sample' (PTR_TO_MEM | MEM_ALLOC) as map key arg 54 + */ 55 + lookup_val = (int *)bpf_map_lookup_elem(&hash_map, sample); 56 + 57 + /* workaround - memcpy is necessary so that verifier doesn't 58 + * complain with: 59 + * verifier internal error: more than one arg with ref_obj_id R3 60 + * when trying to do bpf_map_update_elem(&hash_map, sample, &sample->seq, BPF_ANY); 61 + * 62 + * Since bpf_map_lookup_elem above uses 'sample' as key, test using 63 + * sample field as value below 64 + */ 65 + __builtin_memcpy(&sample_copy, sample, sizeof(struct sample)); 66 + bpf_map_update_elem(&hash_map, &sample_copy, &sample->seq, BPF_ANY); 67 + 68 + bpf_ringbuf_submit(sample, 0); 69 + return 0; 70 + }