Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

libbpf: Split bpf object load into prepare/load

Introduce bpf_object__prepare API: additional intermediate preparation
step that performs ELF processing, relocations, prepares final state of
BPF program instructions (accessible with bpf_program__insns()), creates
and (potentially) pins maps, and stops short of loading BPF programs.

We anticipate few use cases for this API, such as:
* Use prepare to initialize bpf_token, without loading freplace
programs, unlocking possibility to lookup BTF of other programs.
* Execute prepare to obtain finalized BPF program instructions without
loading programs, enabling tools like veristat to process one program at
a time, without incurring cost of ELF parsing and processing.

Signed-off-by: Mykyta Yatsenko <yatsenko@meta.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20250303135752.158343-4-mykyta.yatsenko5@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Mykyta Yatsenko and committed by
Alexei Starovoitov
1315c28e 9a9e3478

+118 -44
+104 -44
tools/lib/bpf/libbpf.c
··· 7903 7903 7904 7904 for (i = 0; i < obj->nr_programs; i++) { 7905 7905 prog = &obj->programs[i]; 7906 - err = bpf_object__sanitize_prog(obj, prog); 7907 - if (err) 7908 - return err; 7909 - } 7910 - 7911 - for (i = 0; i < obj->nr_programs; i++) { 7912 - prog = &obj->programs[i]; 7913 7906 if (prog_is_subprog(obj, prog)) 7914 7907 continue; 7915 7908 if (!prog->autoload) { ··· 7923 7930 } 7924 7931 7925 7932 bpf_object__free_relocs(obj); 7933 + return 0; 7934 + } 7935 + 7936 + static int bpf_object_prepare_progs(struct bpf_object *obj) 7937 + { 7938 + struct bpf_program *prog; 7939 + size_t i; 7940 + int err; 7941 + 7942 + for (i = 0; i < obj->nr_programs; i++) { 7943 + prog = &obj->programs[i]; 7944 + err = bpf_object__sanitize_prog(obj, prog); 7945 + if (err) 7946 + return err; 7947 + } 7926 7948 return 0; 7927 7949 } 7928 7950 ··· 8557 8549 return 0; 8558 8550 } 8559 8551 8552 + static void bpf_object_unpin(struct bpf_object *obj) 8553 + { 8554 + int i; 8555 + 8556 + /* unpin any maps that were auto-pinned during load */ 8557 + for (i = 0; i < obj->nr_maps; i++) 8558 + if (obj->maps[i].pinned && !obj->maps[i].reused) 8559 + bpf_map__unpin(&obj->maps[i], NULL); 8560 + } 8561 + 8562 + static void bpf_object_post_load_cleanup(struct bpf_object *obj) 8563 + { 8564 + int i; 8565 + 8566 + /* clean up fd_array */ 8567 + zfree(&obj->fd_array); 8568 + 8569 + /* clean up module BTFs */ 8570 + for (i = 0; i < obj->btf_module_cnt; i++) { 8571 + close(obj->btf_modules[i].fd); 8572 + btf__free(obj->btf_modules[i].btf); 8573 + free(obj->btf_modules[i].name); 8574 + } 8575 + obj->btf_module_cnt = 0; 8576 + zfree(&obj->btf_modules); 8577 + 8578 + /* clean up vmlinux BTF */ 8579 + btf__free(obj->btf_vmlinux); 8580 + obj->btf_vmlinux = NULL; 8581 + } 8582 + 8583 + static int bpf_object_prepare(struct bpf_object *obj, const char *target_btf_path) 8584 + { 8585 + int err; 8586 + 8587 + if (obj->state >= OBJ_PREPARED) { 8588 + pr_warn("object '%s': prepare loading can't be attempted twice\n", obj->name); 8589 + return -EINVAL; 8590 + } 8591 + 8592 + err = bpf_object_prepare_token(obj); 8593 + err = err ? : bpf_object__probe_loading(obj); 8594 + err = err ? : bpf_object__load_vmlinux_btf(obj, false); 8595 + err = err ? : bpf_object__resolve_externs(obj, obj->kconfig); 8596 + err = err ? : bpf_object__sanitize_maps(obj); 8597 + err = err ? : bpf_object__init_kern_struct_ops_maps(obj); 8598 + err = err ? : bpf_object_adjust_struct_ops_autoload(obj); 8599 + err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path); 8600 + err = err ? : bpf_object__sanitize_and_load_btf(obj); 8601 + err = err ? : bpf_object__create_maps(obj); 8602 + err = err ? : bpf_object_prepare_progs(obj); 8603 + 8604 + if (err) { 8605 + bpf_object_unpin(obj); 8606 + bpf_object_unload(obj); 8607 + obj->state = OBJ_LOADED; 8608 + return err; 8609 + } 8610 + 8611 + obj->state = OBJ_PREPARED; 8612 + return 0; 8613 + } 8614 + 8560 8615 static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const char *target_btf_path) 8561 8616 { 8562 - int err, i; 8617 + int err; 8563 8618 8564 8619 if (!obj) 8565 8620 return libbpf_err(-EINVAL); ··· 8642 8571 return libbpf_err(-LIBBPF_ERRNO__ENDIAN); 8643 8572 } 8644 8573 8645 - err = bpf_object_prepare_token(obj); 8646 - err = err ? : bpf_object__probe_loading(obj); 8647 - err = err ? : bpf_object__load_vmlinux_btf(obj, false); 8648 - err = err ? : bpf_object__resolve_externs(obj, obj->kconfig); 8649 - err = err ? : bpf_object__sanitize_maps(obj); 8650 - err = err ? : bpf_object__init_kern_struct_ops_maps(obj); 8651 - err = err ? : bpf_object_adjust_struct_ops_autoload(obj); 8652 - err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path); 8653 - err = err ? : bpf_object__sanitize_and_load_btf(obj); 8654 - err = err ? : bpf_object__create_maps(obj); 8655 - err = err ? : bpf_object__load_progs(obj, extra_log_level); 8574 + if (obj->state < OBJ_PREPARED) { 8575 + err = bpf_object_prepare(obj, target_btf_path); 8576 + if (err) 8577 + return libbpf_err(err); 8578 + } 8579 + err = bpf_object__load_progs(obj, extra_log_level); 8656 8580 err = err ? : bpf_object_init_prog_arrays(obj); 8657 8581 err = err ? : bpf_object_prepare_struct_ops(obj); 8658 8582 ··· 8659 8593 err = bpf_gen__finish(obj->gen_loader, obj->nr_programs, obj->nr_maps); 8660 8594 } 8661 8595 8662 - /* clean up fd_array */ 8663 - zfree(&obj->fd_array); 8664 - 8665 - /* clean up module BTFs */ 8666 - for (i = 0; i < obj->btf_module_cnt; i++) { 8667 - close(obj->btf_modules[i].fd); 8668 - btf__free(obj->btf_modules[i].btf); 8669 - free(obj->btf_modules[i].name); 8670 - } 8671 - free(obj->btf_modules); 8672 - 8673 - /* clean up vmlinux BTF */ 8674 - btf__free(obj->btf_vmlinux); 8675 - obj->btf_vmlinux = NULL; 8676 - 8596 + bpf_object_post_load_cleanup(obj); 8677 8597 obj->state = OBJ_LOADED; /* doesn't matter if successfully or not */ 8678 - if (err) 8679 - goto out; 8598 + 8599 + if (err) { 8600 + bpf_object_unpin(obj); 8601 + bpf_object_unload(obj); 8602 + pr_warn("failed to load object '%s'\n", obj->path); 8603 + return libbpf_err(err); 8604 + } 8680 8605 8681 8606 return 0; 8682 - out: 8683 - /* unpin any maps that were auto-pinned during load */ 8684 - for (i = 0; i < obj->nr_maps; i++) 8685 - if (obj->maps[i].pinned && !obj->maps[i].reused) 8686 - bpf_map__unpin(&obj->maps[i], NULL); 8607 + } 8687 8608 8688 - bpf_object_unload(obj); 8689 - pr_warn("failed to load object '%s'\n", obj->path); 8690 - return libbpf_err(err); 8609 + int bpf_object__prepare(struct bpf_object *obj) 8610 + { 8611 + return libbpf_err(bpf_object_prepare(obj, NULL)); 8691 8612 } 8692 8613 8693 8614 int bpf_object__load(struct bpf_object *obj) ··· 9121 9068 9122 9069 if (IS_ERR_OR_NULL(obj)) 9123 9070 return; 9071 + 9072 + /* 9073 + * if user called bpf_object__prepare() without ever getting to 9074 + * bpf_object__load(), we need to clean up stuff that is normally 9075 + * cleaned up at the end of loading step 9076 + */ 9077 + bpf_object_post_load_cleanup(obj); 9124 9078 9125 9079 usdt_manager_free(obj->usdt_man); 9126 9080 obj->usdt_man = NULL;
+13
tools/lib/bpf/libbpf.h
··· 242 242 const struct bpf_object_open_opts *opts); 243 243 244 244 /** 245 + * @brief **bpf_object__prepare()** prepares BPF object for loading: 246 + * performs ELF processing, relocations, prepares final state of BPF program 247 + * instructions (accessible with bpf_program__insns()), creates and 248 + * (potentially) pins maps. Leaves BPF object in the state ready for program 249 + * loading. 250 + * @param obj Pointer to a valid BPF object instance returned by 251 + * **bpf_object__open*()** API 252 + * @return 0, on success; negative error code, otherwise, error code is 253 + * stored in errno 254 + */ 255 + int bpf_object__prepare(struct bpf_object *obj); 256 + 257 + /** 245 258 * @brief **bpf_object__load()** loads BPF object into kernel. 246 259 * @param obj Pointer to a valid BPF object instance returned by 247 260 * **bpf_object__open*()** APIs
+1
tools/lib/bpf/libbpf.map
··· 436 436 bpf_linker__add_buf; 437 437 bpf_linker__add_fd; 438 438 bpf_linker__new_fd; 439 + bpf_object__prepare; 439 440 btf__add_decl_attr; 440 441 btf__add_type_attr; 441 442 } LIBBPF_1.5.0;