Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: Validate frozen map contents stays frozen

Test that frozen and mmap()'ed BPF map can't be mprotect()'ed as writable or
executable memory. Also validate that "downgrading" from writable to read-only
doesn't screw up internal writable count accounting for the purposes of map
freezing.

Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20200410202613.3679837-2-andriin@fb.com

authored by

Andrii Nakryiko and committed by
Daniel Borkmann
642c1654 1f6cb19b

+60 -2
+60 -2
tools/testing/selftests/bpf/prog_tests/mmap.c
··· 19 19 const size_t map_sz = roundup_page(sizeof(struct map_data)); 20 20 const int zero = 0, one = 1, two = 2, far = 1500; 21 21 const long page_size = sysconf(_SC_PAGE_SIZE); 22 - int err, duration = 0, i, data_map_fd; 22 + int err, duration = 0, i, data_map_fd, data_map_id, tmp_fd; 23 23 struct bpf_map *data_map, *bss_map; 24 24 void *bss_mmaped = NULL, *map_mmaped = NULL, *tmp1, *tmp2; 25 25 struct test_mmap__bss *bss_data; 26 + struct bpf_map_info map_info; 27 + __u32 map_info_sz = sizeof(map_info); 26 28 struct map_data *map_data; 27 29 struct test_mmap *skel; 28 30 __u64 val = 0; 29 - 30 31 31 32 skel = test_mmap__open_and_load(); 32 33 if (CHECK(!skel, "skel_open_and_load", "skeleton open/load failed\n")) ··· 37 36 data_map = skel->maps.data_map; 38 37 data_map_fd = bpf_map__fd(data_map); 39 38 39 + /* get map's ID */ 40 + memset(&map_info, 0, map_info_sz); 41 + err = bpf_obj_get_info_by_fd(data_map_fd, &map_info, &map_info_sz); 42 + if (CHECK(err, "map_get_info", "failed %d\n", errno)) 43 + goto cleanup; 44 + data_map_id = map_info.id; 45 + 46 + /* mmap BSS map */ 40 47 bss_mmaped = mmap(NULL, bss_sz, PROT_READ | PROT_WRITE, MAP_SHARED, 41 48 bpf_map__fd(bss_map), 0); 42 49 if (CHECK(bss_mmaped == MAP_FAILED, "bss_mmap", ··· 107 98 "data_map freeze succeeded: err=%d, errno=%d\n", err, errno)) 108 99 goto cleanup; 109 100 101 + err = mprotect(map_mmaped, map_sz, PROT_READ); 102 + if (CHECK(err, "mprotect_ro", "mprotect to r/o failed %d\n", errno)) 103 + goto cleanup; 104 + 110 105 /* unmap R/W mapping */ 111 106 err = munmap(map_mmaped, map_sz); 112 107 map_mmaped = NULL; ··· 124 111 map_mmaped = NULL; 125 112 goto cleanup; 126 113 } 114 + err = mprotect(map_mmaped, map_sz, PROT_WRITE); 115 + if (CHECK(!err, "mprotect_wr", "mprotect() succeeded unexpectedly!\n")) 116 + goto cleanup; 117 + err = mprotect(map_mmaped, map_sz, PROT_EXEC); 118 + if (CHECK(!err, "mprotect_ex", "mprotect() succeeded unexpectedly!\n")) 119 + goto cleanup; 127 120 map_data = map_mmaped; 128 121 129 122 /* map/unmap in a loop to test ref counting */ ··· 216 197 CHECK_FAIL(map_data->val[far] != 3 * 321); 217 198 218 199 munmap(tmp2, 4 * page_size); 200 + 201 + tmp1 = mmap(NULL, map_sz, PROT_READ, MAP_SHARED, data_map_fd, 0); 202 + if (CHECK(tmp1 == MAP_FAILED, "last_mmap", "failed %d\n", errno)) 203 + goto cleanup; 204 + 205 + test_mmap__destroy(skel); 206 + skel = NULL; 207 + CHECK_FAIL(munmap(bss_mmaped, bss_sz)); 208 + bss_mmaped = NULL; 209 + CHECK_FAIL(munmap(map_mmaped, map_sz)); 210 + map_mmaped = NULL; 211 + 212 + /* map should be still held by active mmap */ 213 + tmp_fd = bpf_map_get_fd_by_id(data_map_id); 214 + if (CHECK(tmp_fd < 0, "get_map_by_id", "failed %d\n", errno)) { 215 + munmap(tmp1, map_sz); 216 + goto cleanup; 217 + } 218 + close(tmp_fd); 219 + 220 + /* this should release data map finally */ 221 + munmap(tmp1, map_sz); 222 + 223 + /* we need to wait for RCU grace period */ 224 + for (i = 0; i < 10000; i++) { 225 + __u32 id = data_map_id - 1; 226 + if (bpf_map_get_next_id(id, &id) || id > data_map_id) 227 + break; 228 + usleep(1); 229 + } 230 + 231 + /* should fail to get map FD by non-existing ID */ 232 + tmp_fd = bpf_map_get_fd_by_id(data_map_id); 233 + if (CHECK(tmp_fd >= 0, "get_map_by_id_after", 234 + "unexpectedly succeeded %d\n", tmp_fd)) { 235 + close(tmp_fd); 236 + goto cleanup; 237 + } 238 + 219 239 cleanup: 220 240 if (bss_mmaped) 221 241 CHECK_FAIL(munmap(bss_mmaped, bss_sz));