Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: Test gen_pro/epilogue that generate kfuncs

Test gen_prologue and gen_epilogue that generate kfuncs that have not
been seen in the main program.

The main bpf program and return value checks are identical to
pro_epilogue.c introduced in commit 47e69431b57a ("selftests/bpf: Test
gen_prologue and gen_epilogue"). However, now when bpf_testmod_st_ops
detects a program name with prefix "test_kfunc_", it generates slightly
different prologue and epilogue: They still add 1000 to args->a in
prologue, add 10000 to args->a and set r0 to 2 * args->a in epilogue,
but involve kfuncs.

At high level, the alternative version of prologue and epilogue look
like this:

cgrp = bpf_cgroup_from_id(0);
if (cgrp)
bpf_cgroup_release(cgrp);
else
/* Perform what original bpf_testmod_st_ops prologue or
* epilogue does
*/

Since 0 is never a valid cgroup id, the original prologue or epilogue
logic will be performed. As a result, the __retval check should expect
the exact same return value.

Signed-off-by: Amery Hung <ameryhung@gmail.com>
Acked-by: Martin KaFai Lau <martin.lau@kernel.org>
Link: https://lore.kernel.org/r/20250225233545.285481-2-ameryhung@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Amery Hung and committed by
Alexei Starovoitov
4e4136c6 d519594e

+193
+10
include/linux/filter.h
··· 469 469 .off = 0, \ 470 470 .imm = BPF_CALL_IMM(FUNC) }) 471 471 472 + /* Kfunc call */ 473 + 474 + #define BPF_CALL_KFUNC(OFF, IMM) \ 475 + ((struct bpf_insn) { \ 476 + .code = BPF_JMP | BPF_CALL, \ 477 + .dst_reg = 0, \ 478 + .src_reg = BPF_PSEUDO_KFUNC_CALL, \ 479 + .off = OFF, \ 480 + .imm = IMM }) 481 + 472 482 /* Raw code statement block */ 473 483 474 484 #define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \
+1
kernel/bpf/btf.c
··· 606 606 spin_unlock_bh(&btf_idr_lock); 607 607 return ret; 608 608 } 609 + EXPORT_SYMBOL_GPL(bpf_find_btf_id); 609 610 610 611 const struct btf_type *btf_type_skip_modifiers(const struct btf *btf, 611 612 u32 id, u32 *res_id)
+2
tools/testing/selftests/bpf/prog_tests/pro_epilogue.c
··· 6 6 #include "epilogue_tailcall.skel.h" 7 7 #include "pro_epilogue_goto_start.skel.h" 8 8 #include "epilogue_exit.skel.h" 9 + #include "pro_epilogue_with_kfunc.skel.h" 9 10 10 11 struct st_ops_args { 11 12 __u64 a; ··· 56 55 RUN_TESTS(pro_epilogue); 57 56 RUN_TESTS(pro_epilogue_goto_start); 58 57 RUN_TESTS(epilogue_exit); 58 + RUN_TESTS(pro_epilogue_with_kfunc); 59 59 if (test__start_subtest("tailcall")) 60 60 test_tailcall(); 61 61 }
+88
tools/testing/selftests/bpf/progs/pro_epilogue_with_kfunc.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ 3 + 4 + #include <vmlinux.h> 5 + #include <bpf/bpf_tracing.h> 6 + #include "bpf_misc.h" 7 + #include "../test_kmods/bpf_testmod.h" 8 + #include "../test_kmods/bpf_testmod_kfunc.h" 9 + 10 + char _license[] SEC("license") = "GPL"; 11 + 12 + void __kfunc_btf_root(void) 13 + { 14 + bpf_kfunc_st_ops_inc10(NULL); 15 + } 16 + 17 + static __noinline __used int subprog(struct st_ops_args *args) 18 + { 19 + args->a += 1; 20 + return args->a; 21 + } 22 + 23 + __success 24 + /* prologue */ 25 + __xlated("0: r8 = r1") 26 + __xlated("1: r1 = 0") 27 + __xlated("2: call kernel-function") 28 + __xlated("3: if r0 != 0x0 goto pc+5") 29 + __xlated("4: r6 = *(u64 *)(r8 +0)") 30 + __xlated("5: r7 = *(u64 *)(r6 +0)") 31 + __xlated("6: r7 += 1000") 32 + __xlated("7: *(u64 *)(r6 +0) = r7") 33 + __xlated("8: goto pc+2") 34 + __xlated("9: r1 = r0") 35 + __xlated("10: call kernel-function") 36 + __xlated("11: r1 = r8") 37 + /* save __u64 *ctx to stack */ 38 + __xlated("12: *(u64 *)(r10 -8) = r1") 39 + /* main prog */ 40 + __xlated("13: r1 = *(u64 *)(r1 +0)") 41 + __xlated("14: r6 = r1") 42 + __xlated("15: call kernel-function") 43 + __xlated("16: r1 = r6") 44 + __xlated("17: call pc+") 45 + /* epilogue */ 46 + __xlated("18: r1 = 0") 47 + __xlated("19: r6 = 0") 48 + __xlated("20: call kernel-function") 49 + __xlated("21: if r0 != 0x0 goto pc+6") 50 + __xlated("22: r1 = *(u64 *)(r10 -8)") 51 + __xlated("23: r1 = *(u64 *)(r1 +0)") 52 + __xlated("24: r6 = *(u64 *)(r1 +0)") 53 + __xlated("25: r6 += 10000") 54 + __xlated("26: *(u64 *)(r1 +0) = r6") 55 + __xlated("27: goto pc+2") 56 + __xlated("28: r1 = r0") 57 + __xlated("29: call kernel-function") 58 + __xlated("30: r0 = r6") 59 + __xlated("31: r0 *= 2") 60 + __xlated("32: exit") 61 + SEC("struct_ops/test_pro_epilogue") 62 + __naked int test_kfunc_pro_epilogue(void) 63 + { 64 + asm volatile ( 65 + "r1 = *(u64 *)(r1 +0);" 66 + "r6 = r1;" 67 + "call %[bpf_kfunc_st_ops_inc10];" 68 + "r1 = r6;" 69 + "call subprog;" 70 + "exit;" 71 + : 72 + : __imm(bpf_kfunc_st_ops_inc10) 73 + : __clobber_all); 74 + } 75 + 76 + SEC("syscall") 77 + __retval(22022) /* (PROLOGUE_A [1000] + KFUNC_INC10 + SUBPROG_A [1] + EPILOGUE_A [10000]) * 2 */ 78 + int syscall_pro_epilogue(void *ctx) 79 + { 80 + struct st_ops_args args = {}; 81 + 82 + return bpf_kfunc_st_ops_test_pro_epilogue(&args); 83 + } 84 + 85 + SEC(".struct_ops.link") 86 + struct bpf_testmod_st_ops pro_epilogue_with_kfunc = { 87 + .test_pro_epilogue = (void *)test_kfunc_pro_epilogue, 88 + };
+92
tools/testing/selftests/bpf/test_kmods/bpf_testmod.c
··· 1309 1309 return 0; 1310 1310 } 1311 1311 1312 + static int bpf_cgroup_from_id_id; 1313 + static int bpf_cgroup_release_id; 1314 + 1315 + static int st_ops_gen_prologue_with_kfunc(struct bpf_insn *insn_buf, bool direct_write, 1316 + const struct bpf_prog *prog) 1317 + { 1318 + struct bpf_insn *insn = insn_buf; 1319 + 1320 + /* r8 = r1; // r8 will be "u64 *ctx". 1321 + * r1 = 0; 1322 + * r0 = bpf_cgroup_from_id(r1); 1323 + * if r0 != 0 goto pc+5; 1324 + * r6 = r8[0]; // r6 will be "struct st_ops *args". 1325 + * r7 = r6->a; 1326 + * r7 += 1000; 1327 + * r6->a = r7; 1328 + * goto pc+2; 1329 + * r1 = r0; 1330 + * bpf_cgroup_release(r1); 1331 + * r1 = r8; 1332 + */ 1333 + *insn++ = BPF_MOV64_REG(BPF_REG_8, BPF_REG_1); 1334 + *insn++ = BPF_MOV64_IMM(BPF_REG_1, 0); 1335 + *insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_from_id_id); 1336 + *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 5); 1337 + *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_8, 0); 1338 + *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_6, offsetof(struct st_ops_args, a)); 1339 + *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 1000); 1340 + *insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_7, offsetof(struct st_ops_args, a)); 1341 + *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 2); 1342 + *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0); 1343 + *insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_release_id), 1344 + *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_8); 1345 + *insn++ = prog->insnsi[0]; 1346 + 1347 + return insn - insn_buf; 1348 + } 1349 + 1350 + static int st_ops_gen_epilogue_with_kfunc(struct bpf_insn *insn_buf, const struct bpf_prog *prog, 1351 + s16 ctx_stack_off) 1352 + { 1353 + struct bpf_insn *insn = insn_buf; 1354 + 1355 + /* r1 = 0; 1356 + * r6 = 0; 1357 + * r0 = bpf_cgroup_from_id(r1); 1358 + * if r0 != 0 goto pc+6; 1359 + * r1 = stack[ctx_stack_off]; // r1 will be "u64 *ctx" 1360 + * r1 = r1[0]; // r1 will be "struct st_ops *args" 1361 + * r6 = r1->a; 1362 + * r6 += 10000; 1363 + * r1->a = r6; 1364 + * goto pc+2 1365 + * r1 = r0; 1366 + * bpf_cgroup_release(r1); 1367 + * r0 = r6; 1368 + * r0 *= 2; 1369 + * BPF_EXIT; 1370 + */ 1371 + *insn++ = BPF_MOV64_IMM(BPF_REG_1, 0); 1372 + *insn++ = BPF_MOV64_IMM(BPF_REG_6, 0); 1373 + *insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_from_id_id); 1374 + *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 6); 1375 + *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_FP, ctx_stack_off); 1376 + *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0); 1377 + *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct st_ops_args, a)); 1378 + *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 10000); 1379 + *insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, offsetof(struct st_ops_args, a)); 1380 + *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 2); 1381 + *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0); 1382 + *insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_release_id), 1383 + *insn++ = BPF_MOV64_REG(BPF_REG_0, BPF_REG_6); 1384 + *insn++ = BPF_ALU64_IMM(BPF_MUL, BPF_REG_0, 2); 1385 + *insn++ = BPF_EXIT_INSN(); 1386 + 1387 + return insn - insn_buf; 1388 + } 1389 + 1390 + #define KFUNC_PRO_EPI_PREFIX "test_kfunc_" 1312 1391 static int st_ops_gen_prologue(struct bpf_insn *insn_buf, bool direct_write, 1313 1392 const struct bpf_prog *prog) 1314 1393 { ··· 1396 1317 if (strcmp(prog->aux->attach_func_name, "test_prologue") && 1397 1318 strcmp(prog->aux->attach_func_name, "test_pro_epilogue")) 1398 1319 return 0; 1320 + 1321 + if (!strncmp(prog->aux->name, KFUNC_PRO_EPI_PREFIX, strlen(KFUNC_PRO_EPI_PREFIX))) 1322 + return st_ops_gen_prologue_with_kfunc(insn_buf, direct_write, prog); 1399 1323 1400 1324 /* r6 = r1[0]; // r6 will be "struct st_ops *args". r1 is "u64 *ctx". 1401 1325 * r7 = r6->a; ··· 1422 1340 if (strcmp(prog->aux->attach_func_name, "test_epilogue") && 1423 1341 strcmp(prog->aux->attach_func_name, "test_pro_epilogue")) 1424 1342 return 0; 1343 + 1344 + if (!strncmp(prog->aux->name, KFUNC_PRO_EPI_PREFIX, strlen(KFUNC_PRO_EPI_PREFIX))) 1345 + return st_ops_gen_epilogue_with_kfunc(insn_buf, prog, ctx_stack_off); 1425 1346 1426 1347 /* r1 = stack[ctx_stack_off]; // r1 will be "u64 *ctx" 1427 1348 * r1 = r1[0]; // r1 will be "struct st_ops *args" ··· 1496 1411 1497 1412 static int st_ops_init(struct btf *btf) 1498 1413 { 1414 + struct btf *kfunc_btf; 1415 + 1416 + bpf_cgroup_from_id_id = bpf_find_btf_id("bpf_cgroup_from_id", BTF_KIND_FUNC, &kfunc_btf); 1417 + bpf_cgroup_release_id = bpf_find_btf_id("bpf_cgroup_release", BTF_KIND_FUNC, &kfunc_btf); 1418 + if (bpf_cgroup_from_id_id < 0 || bpf_cgroup_release_id < 0) 1419 + return -EINVAL; 1420 + 1499 1421 return 0; 1500 1422 } 1501 1423