Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: mmap: Use runtime page size

Replace hardcoded 4096 with runtime value in the userspace part of
the test and set bpf table sizes dynamically according to the value.

Do not switch to ASSERT macros, keep CHECK, for consistency with the
rest of the test. Can be a separate cleanup patch.

Signed-off-by: Yauheni Kaliuta <yauheni.kaliuta@redhat.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20210408061310.95877-5-yauheni.kaliuta@redhat.com

authored by

Yauheni Kaliuta and committed by
Andrii Nakryiko
34090aaf 7a85e4df

+19 -7
+19 -5
tools/testing/selftests/bpf/prog_tests/mmap.c
··· 29 29 struct test_mmap *skel; 30 30 __u64 val = 0; 31 31 32 - skel = test_mmap__open_and_load(); 33 - if (CHECK(!skel, "skel_open_and_load", "skeleton open/load failed\n")) 32 + skel = test_mmap__open(); 33 + if (CHECK(!skel, "skel_open", "skeleton open failed\n")) 34 34 return; 35 + 36 + err = bpf_map__set_max_entries(skel->maps.rdonly_map, page_size); 37 + if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n")) 38 + goto cleanup; 39 + 40 + /* at least 4 pages of data */ 41 + err = bpf_map__set_max_entries(skel->maps.data_map, 42 + 4 * (page_size / sizeof(u64))); 43 + if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n")) 44 + goto cleanup; 45 + 46 + err = test_mmap__load(skel); 47 + if (CHECK(err != 0, "skel_load", "skeleton load failed\n")) 48 + goto cleanup; 35 49 36 50 bss_map = skel->maps.bss; 37 51 data_map = skel->maps.data_map; 38 52 data_map_fd = bpf_map__fd(data_map); 39 53 40 54 rdmap_fd = bpf_map__fd(skel->maps.rdonly_map); 41 - tmp1 = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, rdmap_fd, 0); 55 + tmp1 = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rdmap_fd, 0); 42 56 if (CHECK(tmp1 != MAP_FAILED, "rdonly_write_mmap", "unexpected success\n")) { 43 - munmap(tmp1, 4096); 57 + munmap(tmp1, page_size); 44 58 goto cleanup; 45 59 } 46 60 /* now double-check if it's mmap()'able at all */ 47 - tmp1 = mmap(NULL, 4096, PROT_READ, MAP_SHARED, rdmap_fd, 0); 61 + tmp1 = mmap(NULL, page_size, PROT_READ, MAP_SHARED, rdmap_fd, 0); 48 62 if (CHECK(tmp1 == MAP_FAILED, "rdonly_read_mmap", "failed: %d\n", errno)) 49 63 goto cleanup; 50 64
-2
tools/testing/selftests/bpf/progs/test_mmap.c
··· 9 9 10 10 struct { 11 11 __uint(type, BPF_MAP_TYPE_ARRAY); 12 - __uint(max_entries, 4096); 13 12 __uint(map_flags, BPF_F_MMAPABLE | BPF_F_RDONLY_PROG); 14 13 __type(key, __u32); 15 14 __type(value, char); ··· 16 17 17 18 struct { 18 19 __uint(type, BPF_MAP_TYPE_ARRAY); 19 - __uint(max_entries, 512 * 4); /* at least 4 pages of data */ 20 20 __uint(map_flags, BPF_F_MMAPABLE); 21 21 __type(key, __u32); 22 22 __type(value, __u64);