Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bpf/arena: add bpf_arena_reserve_pages kfunc

Add a new BPF arena kfunc for reserving a range of arena virtual
addresses without backing them with pages. This prevents the range from
being populated using bpf_arena_alloc_pages().

Acked-by: Yonghong Song <yonghong.song@linux.dev>
Signed-off-by: Emil Tsalapatis <emil@etsalapatis.com>
Acked-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Link: https://lore.kernel.org/r/20250709191312.29840-2-emil@etsalapatis.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Emil Tsalapatis and committed by
Alexei Starovoitov
8fc3d2d8 ad97cb2e

+43
+43
kernel/bpf/arena.c
··· 550 550 } 551 551 } 552 552 553 + /* 554 + * Reserve an arena virtual address range without populating it. This call stops 555 + * bpf_arena_alloc_pages from adding pages to this range. 556 + */ 557 + static int arena_reserve_pages(struct bpf_arena *arena, long uaddr, u32 page_cnt) 558 + { 559 + long page_cnt_max = (arena->user_vm_end - arena->user_vm_start) >> PAGE_SHIFT; 560 + long pgoff; 561 + int ret; 562 + 563 + if (uaddr & ~PAGE_MASK) 564 + return 0; 565 + 566 + pgoff = compute_pgoff(arena, uaddr); 567 + if (pgoff + page_cnt > page_cnt_max) 568 + return -EINVAL; 569 + 570 + guard(mutex)(&arena->lock); 571 + 572 + /* Cannot guard already allocated pages. */ 573 + ret = is_range_tree_set(&arena->rt, pgoff, page_cnt); 574 + if (ret) 575 + return -EBUSY; 576 + 577 + /* "Allocate" the region to prevent it from being allocated. */ 578 + return range_tree_clear(&arena->rt, pgoff, page_cnt); 579 + } 580 + 553 581 __bpf_kfunc_start_defs(); 554 582 555 583 __bpf_kfunc void *bpf_arena_alloc_pages(void *p__map, void *addr__ign, u32 page_cnt, ··· 601 573 return; 602 574 arena_free_pages(arena, (long)ptr__ign, page_cnt); 603 575 } 576 + 577 + __bpf_kfunc int bpf_arena_reserve_pages(void *p__map, void *ptr__ign, u32 page_cnt) 578 + { 579 + struct bpf_map *map = p__map; 580 + struct bpf_arena *arena = container_of(map, struct bpf_arena, map); 581 + 582 + if (map->map_type != BPF_MAP_TYPE_ARENA) 583 + return -EINVAL; 584 + 585 + if (!page_cnt) 586 + return 0; 587 + 588 + return arena_reserve_pages(arena, (long)ptr__ign, page_cnt); 589 + } 604 590 __bpf_kfunc_end_defs(); 605 591 606 592 BTF_KFUNCS_START(arena_kfuncs) 607 593 BTF_ID_FLAGS(func, bpf_arena_alloc_pages, KF_TRUSTED_ARGS | KF_SLEEPABLE | KF_ARENA_RET | KF_ARENA_ARG2) 608 594 BTF_ID_FLAGS(func, bpf_arena_free_pages, KF_TRUSTED_ARGS | KF_SLEEPABLE | KF_ARENA_ARG2) 595 + BTF_ID_FLAGS(func, bpf_arena_reserve_pages, KF_TRUSTED_ARGS | KF_SLEEPABLE | KF_ARENA_ARG2) 609 596 BTF_KFUNCS_END(arena_kfuncs) 610 597 611 598 static const struct btf_kfunc_id_set common_kfunc_set = {