Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'introduce-bpf_object__prepare'

Mykyta Yatsenko says:

====================
Introduce bpf_object__prepare

From: Mykyta Yatsenko <yatsenko@meta.com>

We are introducing a new function in the libbpf API, bpf_object__prepare,
which provides more granular control over the process of loading a
bpf_object. bpf_object__prepare performs ELF processing, relocations,
prepares final state of BPF program instructions (accessible with
bpf_program__insns()), creates and potentially pins maps, and stops short
of loading BPF programs.

There are couple of anticipated usecases for this API:
* Use BPF token for freplace programs that might need to lookup BTF of
other programs (BPF token creation can't be moved to open step, as open
step is "no privilege assumption" step so that tools like bpftool can
generate skeleton, discover the structure of BPF object, etc).
* Stopping at prepare gives users finalized BPF program
instructions (with subprogs appended, everything relocated and
finalized, etc). And that property can be taken advantage of by
veristat (and similar tools) that might want to process one program at
a time, but would like to avoid relatively slow ELF parsing and
processing; and even BPF selftests itself (RUN_TESTS part of it at
least) would benefit from this by eliminating waste of re-processing
ELF many times.
====================

Link: https://patch.msgid.link/20250303135752.158343-1-mykyta.yatsenko5@gmail.com
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Andrii Nakryiko and committed by
Alexei Starovoitov
2941e215 19856a52

+273 -67
+132 -67
tools/lib/bpf/libbpf.c
··· 670 670 671 671 struct usdt_manager; 672 672 673 + enum bpf_object_state { 674 + OBJ_OPEN, 675 + OBJ_PREPARED, 676 + OBJ_LOADED, 677 + }; 678 + 673 679 struct bpf_object { 674 680 char name[BPF_OBJ_NAME_LEN]; 675 681 char license[64]; 676 682 __u32 kern_version; 677 683 684 + enum bpf_object_state state; 678 685 struct bpf_program *programs; 679 686 size_t nr_programs; 680 687 struct bpf_map *maps; ··· 693 686 int nr_extern; 694 687 int kconfig_map_idx; 695 688 696 - bool loaded; 697 689 bool has_subcalls; 698 690 bool has_rodata; 699 691 ··· 1517 1511 obj->kconfig_map_idx = -1; 1518 1512 1519 1513 obj->kern_version = get_kernel_version(); 1520 - obj->loaded = false; 1514 + obj->state = OBJ_OPEN; 1521 1515 1522 1516 return obj; 1523 1517 } ··· 4851 4845 return 0; 4852 4846 } 4853 4847 4848 + static bool map_is_created(const struct bpf_map *map) 4849 + { 4850 + return map->obj->state >= OBJ_PREPARED || map->reused; 4851 + } 4852 + 4854 4853 bool bpf_map__autocreate(const struct bpf_map *map) 4855 4854 { 4856 4855 return map->autocreate; ··· 4863 4852 4864 4853 int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate) 4865 4854 { 4866 - if (map->obj->loaded) 4855 + if (map_is_created(map)) 4867 4856 return libbpf_err(-EBUSY); 4868 4857 4869 4858 map->autocreate = autocreate; ··· 4957 4946 4958 4947 int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries) 4959 4948 { 4960 - if (map->obj->loaded) 4949 + if (map_is_created(map)) 4961 4950 return libbpf_err(-EBUSY); 4962 4951 4963 4952 map->def.max_entries = max_entries; ··· 5201 5190 } 5202 5191 5203 5192 static void bpf_map__destroy(struct bpf_map *map); 5204 - 5205 - static bool map_is_created(const struct bpf_map *map) 5206 - { 5207 - return map->obj->loaded || map->reused; 5208 - } 5209 5193 5210 5194 static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner) 5211 5195 { ··· 7903 7897 7904 7898 for (i = 0; i < obj->nr_programs; i++) { 7905 7899 prog = &obj->programs[i]; 7906 - err = bpf_object__sanitize_prog(obj, prog); 7907 - if (err) 7908 - return err; 7909 - } 7910 - 7911 - for (i = 0; i < obj->nr_programs; i++) { 7912 - prog = &obj->programs[i]; 7913 7900 if (prog_is_subprog(obj, prog)) 7914 7901 continue; 7915 7902 if (!prog->autoload) { ··· 7923 7924 } 7924 7925 7925 7926 bpf_object__free_relocs(obj); 7927 + return 0; 7928 + } 7929 + 7930 + static int bpf_object_prepare_progs(struct bpf_object *obj) 7931 + { 7932 + struct bpf_program *prog; 7933 + size_t i; 7934 + int err; 7935 + 7936 + for (i = 0; i < obj->nr_programs; i++) { 7937 + prog = &obj->programs[i]; 7938 + err = bpf_object__sanitize_prog(obj, prog); 7939 + if (err) 7940 + return err; 7941 + } 7926 7942 return 0; 7927 7943 } 7928 7944 ··· 8557 8543 return 0; 8558 8544 } 8559 8545 8546 + static void bpf_object_unpin(struct bpf_object *obj) 8547 + { 8548 + int i; 8549 + 8550 + /* unpin any maps that were auto-pinned during load */ 8551 + for (i = 0; i < obj->nr_maps; i++) 8552 + if (obj->maps[i].pinned && !obj->maps[i].reused) 8553 + bpf_map__unpin(&obj->maps[i], NULL); 8554 + } 8555 + 8556 + static void bpf_object_post_load_cleanup(struct bpf_object *obj) 8557 + { 8558 + int i; 8559 + 8560 + /* clean up fd_array */ 8561 + zfree(&obj->fd_array); 8562 + 8563 + /* clean up module BTFs */ 8564 + for (i = 0; i < obj->btf_module_cnt; i++) { 8565 + close(obj->btf_modules[i].fd); 8566 + btf__free(obj->btf_modules[i].btf); 8567 + free(obj->btf_modules[i].name); 8568 + } 8569 + obj->btf_module_cnt = 0; 8570 + zfree(&obj->btf_modules); 8571 + 8572 + /* clean up vmlinux BTF */ 8573 + btf__free(obj->btf_vmlinux); 8574 + obj->btf_vmlinux = NULL; 8575 + } 8576 + 8577 + static int bpf_object_prepare(struct bpf_object *obj, const char *target_btf_path) 8578 + { 8579 + int err; 8580 + 8581 + if (obj->state >= OBJ_PREPARED) { 8582 + pr_warn("object '%s': prepare loading can't be attempted twice\n", obj->name); 8583 + return -EINVAL; 8584 + } 8585 + 8586 + err = bpf_object_prepare_token(obj); 8587 + err = err ? : bpf_object__probe_loading(obj); 8588 + err = err ? : bpf_object__load_vmlinux_btf(obj, false); 8589 + err = err ? : bpf_object__resolve_externs(obj, obj->kconfig); 8590 + err = err ? : bpf_object__sanitize_maps(obj); 8591 + err = err ? : bpf_object__init_kern_struct_ops_maps(obj); 8592 + err = err ? : bpf_object_adjust_struct_ops_autoload(obj); 8593 + err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path); 8594 + err = err ? : bpf_object__sanitize_and_load_btf(obj); 8595 + err = err ? : bpf_object__create_maps(obj); 8596 + err = err ? : bpf_object_prepare_progs(obj); 8597 + 8598 + if (err) { 8599 + bpf_object_unpin(obj); 8600 + bpf_object_unload(obj); 8601 + obj->state = OBJ_LOADED; 8602 + return err; 8603 + } 8604 + 8605 + obj->state = OBJ_PREPARED; 8606 + return 0; 8607 + } 8608 + 8560 8609 static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const char *target_btf_path) 8561 8610 { 8562 - int err, i; 8611 + int err; 8563 8612 8564 8613 if (!obj) 8565 8614 return libbpf_err(-EINVAL); 8566 8615 8567 - if (obj->loaded) { 8616 + if (obj->state >= OBJ_LOADED) { 8568 8617 pr_warn("object '%s': load can't be attempted twice\n", obj->name); 8569 8618 return libbpf_err(-EINVAL); 8570 8619 } ··· 8642 8565 return libbpf_err(-LIBBPF_ERRNO__ENDIAN); 8643 8566 } 8644 8567 8645 - err = bpf_object_prepare_token(obj); 8646 - err = err ? : bpf_object__probe_loading(obj); 8647 - err = err ? : bpf_object__load_vmlinux_btf(obj, false); 8648 - err = err ? : bpf_object__resolve_externs(obj, obj->kconfig); 8649 - err = err ? : bpf_object__sanitize_maps(obj); 8650 - err = err ? : bpf_object__init_kern_struct_ops_maps(obj); 8651 - err = err ? : bpf_object_adjust_struct_ops_autoload(obj); 8652 - err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path); 8653 - err = err ? : bpf_object__sanitize_and_load_btf(obj); 8654 - err = err ? : bpf_object__create_maps(obj); 8655 - err = err ? : bpf_object__load_progs(obj, extra_log_level); 8568 + if (obj->state < OBJ_PREPARED) { 8569 + err = bpf_object_prepare(obj, target_btf_path); 8570 + if (err) 8571 + return libbpf_err(err); 8572 + } 8573 + err = bpf_object__load_progs(obj, extra_log_level); 8656 8574 err = err ? : bpf_object_init_prog_arrays(obj); 8657 8575 err = err ? : bpf_object_prepare_struct_ops(obj); 8658 8576 ··· 8659 8587 err = bpf_gen__finish(obj->gen_loader, obj->nr_programs, obj->nr_maps); 8660 8588 } 8661 8589 8662 - /* clean up fd_array */ 8663 - zfree(&obj->fd_array); 8590 + bpf_object_post_load_cleanup(obj); 8591 + obj->state = OBJ_LOADED; /* doesn't matter if successfully or not */ 8664 8592 8665 - /* clean up module BTFs */ 8666 - for (i = 0; i < obj->btf_module_cnt; i++) { 8667 - close(obj->btf_modules[i].fd); 8668 - btf__free(obj->btf_modules[i].btf); 8669 - free(obj->btf_modules[i].name); 8593 + if (err) { 8594 + bpf_object_unpin(obj); 8595 + bpf_object_unload(obj); 8596 + pr_warn("failed to load object '%s'\n", obj->path); 8597 + return libbpf_err(err); 8670 8598 } 8671 - free(obj->btf_modules); 8672 - 8673 - /* clean up vmlinux BTF */ 8674 - btf__free(obj->btf_vmlinux); 8675 - obj->btf_vmlinux = NULL; 8676 - 8677 - obj->loaded = true; /* doesn't matter if successfully or not */ 8678 - 8679 - if (err) 8680 - goto out; 8681 8599 8682 8600 return 0; 8683 - out: 8684 - /* unpin any maps that were auto-pinned during load */ 8685 - for (i = 0; i < obj->nr_maps; i++) 8686 - if (obj->maps[i].pinned && !obj->maps[i].reused) 8687 - bpf_map__unpin(&obj->maps[i], NULL); 8601 + } 8688 8602 8689 - bpf_object_unload(obj); 8690 - pr_warn("failed to load object '%s'\n", obj->path); 8691 - return libbpf_err(err); 8603 + int bpf_object__prepare(struct bpf_object *obj) 8604 + { 8605 + return libbpf_err(bpf_object_prepare(obj, NULL)); 8692 8606 } 8693 8607 8694 8608 int bpf_object__load(struct bpf_object *obj) ··· 8924 8866 if (!obj) 8925 8867 return libbpf_err(-ENOENT); 8926 8868 8927 - if (!obj->loaded) { 8869 + if (obj->state < OBJ_PREPARED) { 8928 8870 pr_warn("object not yet loaded; load it first\n"); 8929 8871 return libbpf_err(-ENOENT); 8930 8872 } ··· 9003 8945 if (!obj) 9004 8946 return libbpf_err(-ENOENT); 9005 8947 9006 - if (!obj->loaded) { 8948 + if (obj->state < OBJ_LOADED) { 9007 8949 pr_warn("object not yet loaded; load it first\n"); 9008 8950 return libbpf_err(-ENOENT); 9009 8951 } ··· 9122 9064 if (IS_ERR_OR_NULL(obj)) 9123 9065 return; 9124 9066 9067 + /* 9068 + * if user called bpf_object__prepare() without ever getting to 9069 + * bpf_object__load(), we need to clean up stuff that is normally 9070 + * cleaned up at the end of loading step 9071 + */ 9072 + bpf_object_post_load_cleanup(obj); 9073 + 9125 9074 usdt_manager_free(obj->usdt_man); 9126 9075 obj->usdt_man = NULL; 9127 9076 ··· 9197 9132 9198 9133 int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version) 9199 9134 { 9200 - if (obj->loaded) 9135 + if (obj->state >= OBJ_LOADED) 9201 9136 return libbpf_err(-EINVAL); 9202 9137 9203 9138 obj->kern_version = kern_version; ··· 9294 9229 9295 9230 int bpf_program__set_autoload(struct bpf_program *prog, bool autoload) 9296 9231 { 9297 - if (prog->obj->loaded) 9232 + if (prog->obj->state >= OBJ_LOADED) 9298 9233 return libbpf_err(-EINVAL); 9299 9234 9300 9235 prog->autoload = autoload; ··· 9326 9261 { 9327 9262 struct bpf_insn *insns; 9328 9263 9329 - if (prog->obj->loaded) 9264 + if (prog->obj->state >= OBJ_LOADED) 9330 9265 return libbpf_err(-EBUSY); 9331 9266 9332 9267 insns = libbpf_reallocarray(prog->insns, new_insn_cnt, sizeof(*insns)); ··· 9369 9304 9370 9305 int bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type) 9371 9306 { 9372 - if (prog->obj->loaded) 9307 + if (prog->obj->state >= OBJ_LOADED) 9373 9308 return libbpf_err(-EBUSY); 9374 9309 9375 9310 /* if type is not changed, do nothing */ ··· 9400 9335 int bpf_program__set_expected_attach_type(struct bpf_program *prog, 9401 9336 enum bpf_attach_type type) 9402 9337 { 9403 - if (prog->obj->loaded) 9338 + if (prog->obj->state >= OBJ_LOADED) 9404 9339 return libbpf_err(-EBUSY); 9405 9340 9406 9341 prog->expected_attach_type = type; ··· 9414 9349 9415 9350 int bpf_program__set_flags(struct bpf_program *prog, __u32 flags) 9416 9351 { 9417 - if (prog->obj->loaded) 9352 + if (prog->obj->state >= OBJ_LOADED) 9418 9353 return libbpf_err(-EBUSY); 9419 9354 9420 9355 prog->prog_flags = flags; ··· 9428 9363 9429 9364 int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_level) 9430 9365 { 9431 - if (prog->obj->loaded) 9366 + if (prog->obj->state >= OBJ_LOADED) 9432 9367 return libbpf_err(-EBUSY); 9433 9368 9434 9369 prog->log_level = log_level; ··· 9447 9382 return libbpf_err(-EINVAL); 9448 9383 if (prog->log_size > UINT_MAX) 9449 9384 return libbpf_err(-EINVAL); 9450 - if (prog->obj->loaded) 9385 + if (prog->obj->state >= OBJ_LOADED) 9451 9386 return libbpf_err(-EBUSY); 9452 9387 9453 9388 prog->log_buf = log_buf; ··· 10364 10299 10365 10300 int bpf_map__set_value_size(struct bpf_map *map, __u32 size) 10366 10301 { 10367 - if (map->obj->loaded || map->reused) 10302 + if (map_is_created(map)) 10368 10303 return libbpf_err(-EBUSY); 10369 10304 10370 10305 if (map->mmaped) { ··· 10410 10345 { 10411 10346 size_t actual_sz; 10412 10347 10413 - if (map->obj->loaded || map->reused) 10348 + if (map_is_created(map)) 10414 10349 return libbpf_err(-EBUSY); 10415 10350 10416 10351 if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG) ··· 13731 13666 if (!prog || attach_prog_fd < 0) 13732 13667 return libbpf_err(-EINVAL); 13733 13668 13734 - if (prog->obj->loaded) 13669 + if (prog->obj->state >= OBJ_LOADED) 13735 13670 return libbpf_err(-EINVAL); 13736 13671 13737 13672 if (attach_prog_fd && !attach_func_name) {
+13
tools/lib/bpf/libbpf.h
··· 242 242 const struct bpf_object_open_opts *opts); 243 243 244 244 /** 245 + * @brief **bpf_object__prepare()** prepares BPF object for loading: 246 + * performs ELF processing, relocations, prepares final state of BPF program 247 + * instructions (accessible with bpf_program__insns()), creates and 248 + * (potentially) pins maps. Leaves BPF object in the state ready for program 249 + * loading. 250 + * @param obj Pointer to a valid BPF object instance returned by 251 + * **bpf_object__open*()** API 252 + * @return 0, on success; negative error code, otherwise, error code is 253 + * stored in errno 254 + */ 255 + int bpf_object__prepare(struct bpf_object *obj); 256 + 257 + /** 245 258 * @brief **bpf_object__load()** loads BPF object into kernel. 246 259 * @param obj Pointer to a valid BPF object instance returned by 247 260 * **bpf_object__open*()** APIs
+1
tools/lib/bpf/libbpf.map
··· 436 436 bpf_linker__add_buf; 437 437 bpf_linker__add_fd; 438 438 bpf_linker__new_fd; 439 + bpf_object__prepare; 439 440 btf__add_decl_attr; 440 441 btf__add_type_attr; 441 442 } LIBBPF_1.5.0;
+99
tools/testing/selftests/bpf/prog_tests/prepare.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2025 Meta */ 3 + 4 + #include <test_progs.h> 5 + #include <network_helpers.h> 6 + #include "prepare.skel.h" 7 + 8 + static bool check_prepared(struct bpf_object *obj) 9 + { 10 + bool is_prepared = true; 11 + const struct bpf_map *map; 12 + 13 + bpf_object__for_each_map(map, obj) { 14 + if (bpf_map__fd(map) < 0) 15 + is_prepared = false; 16 + } 17 + 18 + return is_prepared; 19 + } 20 + 21 + static void test_prepare_no_load(void) 22 + { 23 + struct prepare *skel; 24 + int err; 25 + LIBBPF_OPTS(bpf_test_run_opts, topts, 26 + .data_in = &pkt_v4, 27 + .data_size_in = sizeof(pkt_v4), 28 + ); 29 + 30 + skel = prepare__open(); 31 + if (!ASSERT_OK_PTR(skel, "prepare__open")) 32 + return; 33 + 34 + if (!ASSERT_FALSE(check_prepared(skel->obj), "not check_prepared")) 35 + goto cleanup; 36 + 37 + err = bpf_object__prepare(skel->obj); 38 + 39 + if (!ASSERT_TRUE(check_prepared(skel->obj), "check_prepared")) 40 + goto cleanup; 41 + 42 + if (!ASSERT_OK(err, "bpf_object__prepare")) 43 + goto cleanup; 44 + 45 + cleanup: 46 + prepare__destroy(skel); 47 + } 48 + 49 + static void test_prepare_load(void) 50 + { 51 + struct prepare *skel; 52 + int err, prog_fd; 53 + LIBBPF_OPTS(bpf_test_run_opts, topts, 54 + .data_in = &pkt_v4, 55 + .data_size_in = sizeof(pkt_v4), 56 + ); 57 + 58 + skel = prepare__open(); 59 + if (!ASSERT_OK_PTR(skel, "prepare__open")) 60 + return; 61 + 62 + if (!ASSERT_FALSE(check_prepared(skel->obj), "not check_prepared")) 63 + goto cleanup; 64 + 65 + err = bpf_object__prepare(skel->obj); 66 + if (!ASSERT_OK(err, "bpf_object__prepare")) 67 + goto cleanup; 68 + 69 + err = prepare__load(skel); 70 + if (!ASSERT_OK(err, "prepare__load")) 71 + goto cleanup; 72 + 73 + if (!ASSERT_TRUE(check_prepared(skel->obj), "check_prepared")) 74 + goto cleanup; 75 + 76 + prog_fd = bpf_program__fd(skel->progs.program); 77 + if (!ASSERT_GE(prog_fd, 0, "prog_fd")) 78 + goto cleanup; 79 + 80 + err = bpf_prog_test_run_opts(prog_fd, &topts); 81 + if (!ASSERT_OK(err, "test_run_opts err")) 82 + goto cleanup; 83 + 84 + if (!ASSERT_OK(topts.retval, "test_run_opts retval")) 85 + goto cleanup; 86 + 87 + ASSERT_EQ(skel->bss->err, 0, "err"); 88 + 89 + cleanup: 90 + prepare__destroy(skel); 91 + } 92 + 93 + void test_prepare(void) 94 + { 95 + if (test__start_subtest("prepare_load")) 96 + test_prepare_load(); 97 + if (test__start_subtest("prepare_no_load")) 98 + test_prepare_no_load(); 99 + }
+28
tools/testing/selftests/bpf/progs/prepare.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2025 Meta */ 3 + #include <vmlinux.h> 4 + #include <bpf/bpf_helpers.h> 5 + //#include <bpf/bpf_tracing.h> 6 + 7 + char _license[] SEC("license") = "GPL"; 8 + 9 + int err; 10 + 11 + struct { 12 + __uint(type, BPF_MAP_TYPE_RINGBUF); 13 + __uint(max_entries, 4096); 14 + } ringbuf SEC(".maps"); 15 + 16 + struct { 17 + __uint(type, BPF_MAP_TYPE_ARRAY); 18 + __uint(max_entries, 1); 19 + __type(key, __u32); 20 + __type(value, __u32); 21 + } array_map SEC(".maps"); 22 + 23 + SEC("cgroup_skb/egress") 24 + int program(struct __sk_buff *skb) 25 + { 26 + err = 0; 27 + return 0; 28 + }