Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

libbpf: Update light skeleton for signing

* The metadata map is created with as an exclusive map (with an
excl_prog_hash) This restricts map access exclusively to the signed
loader program, preventing tampering by other processes.

* The map is then frozen, making it read-only from userspace.

* BPF_OBJ_GET_INFO_BY_ID instructs the kernel to compute the hash of the
metadata map (H') and store it in bpf_map->sha.

* The loader is then loaded with the signature which is then verified by
the kernel.

loading signed programs prebuilt into the kernel are not currently
supported. These can supported by enabling BPF_OBJ_GET_INFO_BY_ID to be
called from the kernel.

Signed-off-by: KP Singh <kpsingh@kernel.org>
Link: https://lore.kernel.org/r/20250921160120.9711-3-kpsingh@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

KP Singh and committed by
Alexei Starovoitov
fb2b0e29 34927156

+72 -4
+72 -4
tools/lib/bpf/skel_internal.h
··· 13 13 #include <unistd.h> 14 14 #include <sys/syscall.h> 15 15 #include <sys/mman.h> 16 + #include <linux/keyctl.h> 16 17 #include <stdlib.h> 17 18 #include "bpf.h" 19 + #endif 20 + 21 + #ifndef SHA256_DIGEST_LENGTH 22 + #define SHA256_DIGEST_LENGTH 32 18 23 #endif 19 24 20 25 #ifndef __NR_bpf ··· 69 64 __u32 data_sz; 70 65 __u32 insns_sz; 71 66 const char *errstr; 67 + void *signature; 68 + __u32 signature_sz; 69 + __s32 keyring_id; 70 + void *excl_prog_hash; 71 + __u32 excl_prog_hash_sz; 72 72 }; 73 73 74 74 long kern_sys_bpf(__u32 cmd, void *attr, __u32 attr_size); ··· 230 220 const char *map_name, 231 221 __u32 key_size, 232 222 __u32 value_size, 233 - __u32 max_entries) 223 + __u32 max_entries, 224 + const void *excl_prog_hash, 225 + __u32 excl_prog_hash_sz) 234 226 { 235 - const size_t attr_sz = offsetofend(union bpf_attr, map_extra); 227 + const size_t attr_sz = offsetofend(union bpf_attr, excl_prog_hash_size); 236 228 union bpf_attr attr; 237 229 238 230 memset(&attr, 0, attr_sz); 239 231 240 232 attr.map_type = map_type; 233 + attr.excl_prog_hash = (unsigned long) excl_prog_hash; 234 + attr.excl_prog_hash_size = excl_prog_hash_sz; 235 + 241 236 strncpy(attr.map_name, map_name, sizeof(attr.map_name)); 242 237 attr.key_size = key_size; 243 238 attr.value_size = value_size; ··· 315 300 return skel_sys_bpf(BPF_LINK_CREATE, &attr, attr_sz); 316 301 } 317 302 303 + static inline int skel_obj_get_info_by_fd(int fd) 304 + { 305 + const size_t attr_sz = offsetofend(union bpf_attr, info); 306 + __u8 sha[SHA256_DIGEST_LENGTH]; 307 + struct bpf_map_info info; 308 + __u32 info_len = sizeof(info); 309 + union bpf_attr attr; 310 + 311 + memset(&info, 0, sizeof(info)); 312 + info.hash = (long) &sha; 313 + info.hash_size = SHA256_DIGEST_LENGTH; 314 + 315 + memset(&attr, 0, attr_sz); 316 + attr.info.bpf_fd = fd; 317 + attr.info.info = (long) &info; 318 + attr.info.info_len = info_len; 319 + return skel_sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, attr_sz); 320 + } 321 + 322 + static inline int skel_map_freeze(int fd) 323 + { 324 + const size_t attr_sz = offsetofend(union bpf_attr, map_fd); 325 + union bpf_attr attr; 326 + 327 + memset(&attr, 0, attr_sz); 328 + attr.map_fd = fd; 329 + 330 + return skel_sys_bpf(BPF_MAP_FREEZE, &attr, attr_sz); 331 + } 318 332 #ifdef __KERNEL__ 319 333 #define set_err 320 334 #else ··· 352 308 353 309 static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts) 354 310 { 355 - const size_t prog_load_attr_sz = offsetofend(union bpf_attr, fd_array); 311 + const size_t prog_load_attr_sz = offsetofend(union bpf_attr, keyring_id); 356 312 const size_t test_run_attr_sz = offsetofend(union bpf_attr, test); 357 313 int map_fd = -1, prog_fd = -1, key = 0, err; 358 314 union bpf_attr attr; 359 315 360 - err = map_fd = skel_map_create(BPF_MAP_TYPE_ARRAY, "__loader.map", 4, opts->data_sz, 1); 316 + err = map_fd = skel_map_create(BPF_MAP_TYPE_ARRAY, "__loader.map", 4, opts->data_sz, 1, 317 + opts->excl_prog_hash, opts->excl_prog_hash_sz); 361 318 if (map_fd < 0) { 362 319 opts->errstr = "failed to create loader map"; 363 320 set_err; ··· 372 327 goto out; 373 328 } 374 329 330 + #ifndef __KERNEL__ 331 + err = skel_map_freeze(map_fd); 332 + if (err < 0) { 333 + opts->errstr = "failed to freeze map"; 334 + set_err; 335 + goto out; 336 + } 337 + err = skel_obj_get_info_by_fd(map_fd); 338 + if (err < 0) { 339 + opts->errstr = "failed to fetch obj info"; 340 + set_err; 341 + goto out; 342 + } 343 + #endif 344 + 375 345 memset(&attr, 0, prog_load_attr_sz); 376 346 attr.prog_type = BPF_PROG_TYPE_SYSCALL; 377 347 attr.insns = (long) opts->insns; 378 348 attr.insn_cnt = opts->insns_sz / sizeof(struct bpf_insn); 379 349 attr.license = (long) "Dual BSD/GPL"; 350 + #ifndef __KERNEL__ 351 + attr.signature = (long) opts->signature; 352 + attr.signature_size = opts->signature_sz; 353 + #else 354 + if (opts->signature || opts->signature_sz) 355 + pr_warn("signatures are not supported from bpf_preload\n"); 356 + #endif 357 + attr.keyring_id = opts->keyring_id; 380 358 memcpy(attr.prog_name, "__loader.prog", sizeof("__loader.prog")); 381 359 attr.fd_array = (long) &map_fd; 382 360 attr.log_level = opts->ctx->log_level;