Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

libbpf: Add support for bpf_arena.

mmap() bpf_arena right after creation, since the kernel needs to
remember the address returned from mmap. This is user_vm_start.
LLVM will generate bpf_arena_cast_user() instructions where
necessary and JIT will add upper 32-bit of user_vm_start
to such pointers.

Fix up bpf_map_mmap_sz() to compute mmap size as
map->value_size * map->max_entries for arrays and
PAGE_SIZE * map->max_entries for arena.

Don't set BTF at arena creation time, since it doesn't support it.

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20240308010812.89848-9-alexei.starovoitov@gmail.com

authored by

Alexei Starovoitov and committed by
Andrii Nakryiko
79ff13e9 4d2b5608

+46 -8
+39 -8
tools/lib/bpf/libbpf.c
··· 185 185 [BPF_MAP_TYPE_BLOOM_FILTER] = "bloom_filter", 186 186 [BPF_MAP_TYPE_USER_RINGBUF] = "user_ringbuf", 187 187 [BPF_MAP_TYPE_CGRP_STORAGE] = "cgrp_storage", 188 + [BPF_MAP_TYPE_ARENA] = "arena", 188 189 }; 189 190 190 191 static const char * const prog_type_name[] = { ··· 1685 1684 return map; 1686 1685 } 1687 1686 1688 - static size_t bpf_map_mmap_sz(unsigned int value_sz, unsigned int max_entries) 1687 + static size_t array_map_mmap_sz(unsigned int value_sz, unsigned int max_entries) 1689 1688 { 1690 1689 const long page_sz = sysconf(_SC_PAGE_SIZE); 1691 1690 size_t map_sz; ··· 1693 1692 map_sz = (size_t)roundup(value_sz, 8) * max_entries; 1694 1693 map_sz = roundup(map_sz, page_sz); 1695 1694 return map_sz; 1695 + } 1696 + 1697 + static size_t bpf_map_mmap_sz(const struct bpf_map *map) 1698 + { 1699 + const long page_sz = sysconf(_SC_PAGE_SIZE); 1700 + 1701 + switch (map->def.type) { 1702 + case BPF_MAP_TYPE_ARRAY: 1703 + return array_map_mmap_sz(map->def.value_size, map->def.max_entries); 1704 + case BPF_MAP_TYPE_ARENA: 1705 + return page_sz * map->def.max_entries; 1706 + default: 1707 + return 0; /* not supported */ 1708 + } 1696 1709 } 1697 1710 1698 1711 static int bpf_map_mmap_resize(struct bpf_map *map, size_t old_sz, size_t new_sz) ··· 1862 1847 pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n", 1863 1848 map->name, map->sec_idx, map->sec_offset, def->map_flags); 1864 1849 1865 - mmap_sz = bpf_map_mmap_sz(map->def.value_size, map->def.max_entries); 1850 + mmap_sz = bpf_map_mmap_sz(map); 1866 1851 map->mmaped = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE, 1867 1852 MAP_SHARED | MAP_ANONYMOUS, -1, 0); 1868 1853 if (map->mmaped == MAP_FAILED) { ··· 5032 5017 case BPF_MAP_TYPE_SOCKHASH: 5033 5018 case BPF_MAP_TYPE_QUEUE: 5034 5019 case BPF_MAP_TYPE_STACK: 5020 + case BPF_MAP_TYPE_ARENA: 5035 5021 create_attr.btf_fd = 0; 5036 5022 create_attr.btf_key_type_id = 0; 5037 5023 create_attr.btf_value_type_id = 0; ··· 5277 5261 if (err < 0) 5278 5262 goto err_out; 5279 5263 } 5280 - 5264 + if (map->def.type == BPF_MAP_TYPE_ARENA) { 5265 + map->mmaped = mmap((void *)map->map_extra, bpf_map_mmap_sz(map), 5266 + PROT_READ | PROT_WRITE, 5267 + map->map_extra ? MAP_SHARED | MAP_FIXED : MAP_SHARED, 5268 + map->fd, 0); 5269 + if (map->mmaped == MAP_FAILED) { 5270 + err = -errno; 5271 + map->mmaped = NULL; 5272 + pr_warn("map '%s': failed to mmap arena: %d\n", 5273 + map->name, err); 5274 + return err; 5275 + } 5276 + } 5281 5277 if (map->init_slots_sz && map->def.type != BPF_MAP_TYPE_PROG_ARRAY) { 5282 5278 err = init_map_in_map_slots(obj, map); 5283 5279 if (err < 0) ··· 8789 8761 if (map->mmaped) { 8790 8762 size_t mmap_sz; 8791 8763 8792 - mmap_sz = bpf_map_mmap_sz(map->def.value_size, map->def.max_entries); 8764 + mmap_sz = bpf_map_mmap_sz(map); 8793 8765 munmap(map->mmaped, mmap_sz); 8794 8766 map->mmaped = NULL; 8795 8767 } ··· 10023 9995 return libbpf_err(-EBUSY); 10024 9996 10025 9997 if (map->mmaped) { 10026 - int err; 10027 9998 size_t mmap_old_sz, mmap_new_sz; 9999 + int err; 10028 10000 10029 - mmap_old_sz = bpf_map_mmap_sz(map->def.value_size, map->def.max_entries); 10030 - mmap_new_sz = bpf_map_mmap_sz(size, map->def.max_entries); 10001 + if (map->def.type != BPF_MAP_TYPE_ARRAY) 10002 + return -EOPNOTSUPP; 10003 + 10004 + mmap_old_sz = bpf_map_mmap_sz(map); 10005 + mmap_new_sz = array_map_mmap_sz(size, map->def.max_entries); 10031 10006 err = bpf_map_mmap_resize(map, mmap_old_sz, mmap_new_sz); 10032 10007 if (err) { 10033 10008 pr_warn("map '%s': failed to resize memory-mapped region: %d\n", ··· 13561 13530 13562 13531 for (i = 0; i < s->map_cnt; i++) { 13563 13532 struct bpf_map *map = *s->maps[i].map; 13564 - size_t mmap_sz = bpf_map_mmap_sz(map->def.value_size, map->def.max_entries); 13533 + size_t mmap_sz = bpf_map_mmap_sz(map); 13565 13534 int prot, map_fd = map->fd; 13566 13535 void **mmaped = s->maps[i].mmaped; 13567 13536
+7
tools/lib/bpf/libbpf_probes.c
··· 338 338 key_size = 0; 339 339 max_entries = 1; 340 340 break; 341 + case BPF_MAP_TYPE_ARENA: 342 + key_size = 0; 343 + value_size = 0; 344 + max_entries = 1; /* one page */ 345 + opts.map_extra = 0; /* can mmap() at any address */ 346 + opts.map_flags = BPF_F_MMAPABLE; 347 + break; 341 348 case BPF_MAP_TYPE_HASH: 342 349 case BPF_MAP_TYPE_ARRAY: 343 350 case BPF_MAP_TYPE_PROG_ARRAY: