Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: Test gen_prologue and gen_epilogue

This test adds a new struct_ops "bpf_testmod_st_ops" in bpf_testmod.
The ops of the bpf_testmod_st_ops is triggered by new kfunc calls
"bpf_kfunc_st_ops_test_*logue". These new kfunc calls are
primarily used by the SEC("syscall") program. The test triggering
sequence is like:
SEC("syscall")
syscall_prologue(struct st_ops_args *args)
bpf_kfunc_st_op_test_prologue(args)
st_ops->test_prologue(args)

.gen_prologue adds 1000 to args->a
.gen_epilogue adds 10000 to args->a
.gen_epilogue will also set the r0 to 2 * args->a.

The .gen_prologue and .gen_epilogue of the bpf_testmod_st_ops
will test the prog->aux->attach_func_name to decide if
it needs to generate codes.

The main programs of the pro_epilogue.c will call a
new kfunc bpf_kfunc_st_ops_inc10 which does "args->a += 10".
It will also call a subprog() which does "args->a += 1".

This patch uses the test_loader infra to check the __xlated
instructions patched after gen_prologue and/or gen_epilogue.
The __xlated check is based on Eduard's example (Thanks!) in v1.

args->a is returned by the struct_ops prog (either the main prog
or the epilogue). Thus, the __retval of the SEC("syscall") prog
is checked. For example, when triggering the ops in the
'SEC("struct_ops/test_epilogue") int test_epilogue'
The expected args->a is +1 (subprog call) + 10 (kfunc call)
+ 10000 (.gen_epilogue) = 10011.
The expected return value is 2 * 10011 (.gen_epilogue).

Suggested-by: Eduard Zingerman <eddyz87@gmail.com>
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Link: https://lore.kernel.org/r/20240829210833.388152-7-martin.lau@linux.dev
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Martin KaFai Lau and committed by
Alexei Starovoitov
47e69431 a0dbf6d0

+371
+190
tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
··· 17 17 #include <linux/in.h> 18 18 #include <linux/in6.h> 19 19 #include <linux/un.h> 20 + #include <linux/filter.h> 20 21 #include <net/sock.h> 21 22 #include <linux/namei.h> 22 23 #include "bpf_testmod.h" ··· 946 945 return err; 947 946 } 948 947 948 + static DEFINE_MUTEX(st_ops_mutex); 949 + static struct bpf_testmod_st_ops *st_ops; 950 + 951 + __bpf_kfunc int bpf_kfunc_st_ops_test_prologue(struct st_ops_args *args) 952 + { 953 + int ret = -1; 954 + 955 + mutex_lock(&st_ops_mutex); 956 + if (st_ops && st_ops->test_prologue) 957 + ret = st_ops->test_prologue(args); 958 + mutex_unlock(&st_ops_mutex); 959 + 960 + return ret; 961 + } 962 + 963 + __bpf_kfunc int bpf_kfunc_st_ops_test_epilogue(struct st_ops_args *args) 964 + { 965 + int ret = -1; 966 + 967 + mutex_lock(&st_ops_mutex); 968 + if (st_ops && st_ops->test_epilogue) 969 + ret = st_ops->test_epilogue(args); 970 + mutex_unlock(&st_ops_mutex); 971 + 972 + return ret; 973 + } 974 + 975 + __bpf_kfunc int bpf_kfunc_st_ops_test_pro_epilogue(struct st_ops_args *args) 976 + { 977 + int ret = -1; 978 + 979 + mutex_lock(&st_ops_mutex); 980 + if (st_ops && st_ops->test_pro_epilogue) 981 + ret = st_ops->test_pro_epilogue(args); 982 + mutex_unlock(&st_ops_mutex); 983 + 984 + return ret; 985 + } 986 + 987 + __bpf_kfunc int bpf_kfunc_st_ops_inc10(struct st_ops_args *args) 988 + { 989 + args->a += 10; 990 + return args->a; 991 + } 992 + 949 993 BTF_KFUNCS_START(bpf_testmod_check_kfunc_ids) 950 994 BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc) 951 995 BTF_ID_FLAGS(func, bpf_kfunc_call_test1) ··· 1027 981 BTF_ID_FLAGS(func, bpf_kfunc_call_sock_sendmsg, KF_SLEEPABLE) 1028 982 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getsockname, KF_SLEEPABLE) 1029 983 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getpeername, KF_SLEEPABLE) 984 + BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_prologue, KF_TRUSTED_ARGS | KF_SLEEPABLE) 985 + BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_epilogue, KF_TRUSTED_ARGS | KF_SLEEPABLE) 986 + BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_pro_epilogue, KF_TRUSTED_ARGS | KF_SLEEPABLE) 987 + BTF_ID_FLAGS(func, bpf_kfunc_st_ops_inc10, KF_TRUSTED_ARGS) 1030 988 BTF_KFUNCS_END(bpf_testmod_check_kfunc_ids) 1031 989 1032 990 static int bpf_testmod_ops_init(struct btf *btf) ··· 1150 1100 .owner = THIS_MODULE, 1151 1101 }; 1152 1102 1103 + static int bpf_test_mod_st_ops__test_prologue(struct st_ops_args *args) 1104 + { 1105 + return 0; 1106 + } 1107 + 1108 + static int bpf_test_mod_st_ops__test_epilogue(struct st_ops_args *args) 1109 + { 1110 + return 0; 1111 + } 1112 + 1113 + static int bpf_test_mod_st_ops__test_pro_epilogue(struct st_ops_args *args) 1114 + { 1115 + return 0; 1116 + } 1117 + 1118 + static int st_ops_gen_prologue(struct bpf_insn *insn_buf, bool direct_write, 1119 + const struct bpf_prog *prog) 1120 + { 1121 + struct bpf_insn *insn = insn_buf; 1122 + 1123 + if (strcmp(prog->aux->attach_func_name, "test_prologue") && 1124 + strcmp(prog->aux->attach_func_name, "test_pro_epilogue")) 1125 + return 0; 1126 + 1127 + /* r6 = r1[0]; // r6 will be "struct st_ops *args". r1 is "u64 *ctx". 1128 + * r7 = r6->a; 1129 + * r7 += 1000; 1130 + * r6->a = r7; 1131 + */ 1132 + *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0); 1133 + *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_6, offsetof(struct st_ops_args, a)); 1134 + *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 1000); 1135 + *insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_7, offsetof(struct st_ops_args, a)); 1136 + *insn++ = prog->insnsi[0]; 1137 + 1138 + return insn - insn_buf; 1139 + } 1140 + 1141 + static int st_ops_gen_epilogue(struct bpf_insn *insn_buf, const struct bpf_prog *prog, 1142 + s16 ctx_stack_off) 1143 + { 1144 + struct bpf_insn *insn = insn_buf; 1145 + 1146 + if (strcmp(prog->aux->attach_func_name, "test_epilogue") && 1147 + strcmp(prog->aux->attach_func_name, "test_pro_epilogue")) 1148 + return 0; 1149 + 1150 + /* r1 = stack[ctx_stack_off]; // r1 will be "u64 *ctx" 1151 + * r1 = r1[0]; // r1 will be "struct st_ops *args" 1152 + * r6 = r1->a; 1153 + * r6 += 10000; 1154 + * r1->a = r6; 1155 + * r0 = r6; 1156 + * r0 *= 2; 1157 + * BPF_EXIT; 1158 + */ 1159 + *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_FP, ctx_stack_off); 1160 + *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0); 1161 + *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct st_ops_args, a)); 1162 + *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 10000); 1163 + *insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, offsetof(struct st_ops_args, a)); 1164 + *insn++ = BPF_MOV64_REG(BPF_REG_0, BPF_REG_6); 1165 + *insn++ = BPF_ALU64_IMM(BPF_MUL, BPF_REG_0, 2); 1166 + *insn++ = BPF_EXIT_INSN(); 1167 + 1168 + return insn - insn_buf; 1169 + } 1170 + 1171 + static int st_ops_btf_struct_access(struct bpf_verifier_log *log, 1172 + const struct bpf_reg_state *reg, 1173 + int off, int size) 1174 + { 1175 + if (off < 0 || off + size > sizeof(struct st_ops_args)) 1176 + return -EACCES; 1177 + return 0; 1178 + } 1179 + 1180 + static const struct bpf_verifier_ops st_ops_verifier_ops = { 1181 + .is_valid_access = bpf_testmod_ops_is_valid_access, 1182 + .btf_struct_access = st_ops_btf_struct_access, 1183 + .gen_prologue = st_ops_gen_prologue, 1184 + .gen_epilogue = st_ops_gen_epilogue, 1185 + .get_func_proto = bpf_base_func_proto, 1186 + }; 1187 + 1188 + static struct bpf_testmod_st_ops st_ops_cfi_stubs = { 1189 + .test_prologue = bpf_test_mod_st_ops__test_prologue, 1190 + .test_epilogue = bpf_test_mod_st_ops__test_epilogue, 1191 + .test_pro_epilogue = bpf_test_mod_st_ops__test_pro_epilogue, 1192 + }; 1193 + 1194 + static int st_ops_reg(void *kdata, struct bpf_link *link) 1195 + { 1196 + int err = 0; 1197 + 1198 + mutex_lock(&st_ops_mutex); 1199 + if (st_ops) { 1200 + pr_err("st_ops has already been registered\n"); 1201 + err = -EEXIST; 1202 + goto unlock; 1203 + } 1204 + st_ops = kdata; 1205 + 1206 + unlock: 1207 + mutex_unlock(&st_ops_mutex); 1208 + return err; 1209 + } 1210 + 1211 + static void st_ops_unreg(void *kdata, struct bpf_link *link) 1212 + { 1213 + mutex_lock(&st_ops_mutex); 1214 + st_ops = NULL; 1215 + mutex_unlock(&st_ops_mutex); 1216 + } 1217 + 1218 + static int st_ops_init(struct btf *btf) 1219 + { 1220 + return 0; 1221 + } 1222 + 1223 + static int st_ops_init_member(const struct btf_type *t, 1224 + const struct btf_member *member, 1225 + void *kdata, const void *udata) 1226 + { 1227 + return 0; 1228 + } 1229 + 1230 + static struct bpf_struct_ops testmod_st_ops = { 1231 + .verifier_ops = &st_ops_verifier_ops, 1232 + .init = st_ops_init, 1233 + .init_member = st_ops_init_member, 1234 + .reg = st_ops_reg, 1235 + .unreg = st_ops_unreg, 1236 + .cfi_stubs = &st_ops_cfi_stubs, 1237 + .name = "bpf_testmod_st_ops", 1238 + .owner = THIS_MODULE, 1239 + }; 1240 + 1153 1241 extern int bpf_fentry_test1(int a); 1154 1242 1155 1243 static int bpf_testmod_init(void) ··· 1305 1117 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set); 1306 1118 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_testmod_kfunc_set); 1307 1119 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set); 1120 + ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_testmod_kfunc_set); 1308 1121 ret = ret ?: register_bpf_struct_ops(&bpf_bpf_testmod_ops, bpf_testmod_ops); 1309 1122 ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops2, bpf_testmod_ops2); 1123 + ret = ret ?: register_bpf_struct_ops(&testmod_st_ops, bpf_testmod_st_ops); 1310 1124 ret = ret ?: register_btf_id_dtor_kfuncs(bpf_testmod_dtors, 1311 1125 ARRAY_SIZE(bpf_testmod_dtors), 1312 1126 THIS_MODULE);
+11
tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h
··· 94 94 int (*test_1)(void); 95 95 }; 96 96 97 + struct st_ops_args { 98 + u64 a; 99 + }; 100 + 101 + struct bpf_testmod_st_ops { 102 + int (*test_prologue)(struct st_ops_args *args); 103 + int (*test_epilogue)(struct st_ops_args *args); 104 + int (*test_pro_epilogue)(struct st_ops_args *args); 105 + struct module *owner; 106 + }; 107 + 97 108 #endif /* _BPF_TESTMOD_H */
+6
tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h
··· 148 148 struct sk_buff *bpf_kfunc_nested_acquire_zero_offset_test(struct sock_common *ptr) __ksym; 149 149 void bpf_kfunc_nested_release_test(struct sk_buff *ptr) __ksym; 150 150 151 + struct st_ops_args; 152 + int bpf_kfunc_st_ops_test_prologue(struct st_ops_args *args) __ksym; 153 + int bpf_kfunc_st_ops_test_epilogue(struct st_ops_args *args) __ksym; 154 + int bpf_kfunc_st_ops_test_pro_epilogue(struct st_ops_args *args) __ksym; 155 + int bpf_kfunc_st_ops_inc10(struct st_ops_args *args) __ksym; 156 + 151 157 #endif /* _BPF_TESTMOD_KFUNC_H */
+10
tools/testing/selftests/bpf/prog_tests/pro_epilogue.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ 3 + 4 + #include <test_progs.h> 5 + #include "pro_epilogue.skel.h" 6 + 7 + void test_pro_epilogue(void) 8 + { 9 + RUN_TESTS(pro_epilogue); 10 + }
+154
tools/testing/selftests/bpf/progs/pro_epilogue.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ 3 + 4 + #include <vmlinux.h> 5 + #include <bpf/bpf_tracing.h> 6 + #include "bpf_misc.h" 7 + #include "../bpf_testmod/bpf_testmod.h" 8 + #include "../bpf_testmod/bpf_testmod_kfunc.h" 9 + 10 + char _license[] SEC("license") = "GPL"; 11 + 12 + void __kfunc_btf_root(void) 13 + { 14 + bpf_kfunc_st_ops_inc10(NULL); 15 + } 16 + 17 + static __noinline __used int subprog(struct st_ops_args *args) 18 + { 19 + args->a += 1; 20 + return args->a; 21 + } 22 + 23 + __success 24 + /* prologue */ 25 + __xlated("0: r6 = *(u64 *)(r1 +0)") 26 + __xlated("1: r7 = *(u64 *)(r6 +0)") 27 + __xlated("2: r7 += 1000") 28 + __xlated("3: *(u64 *)(r6 +0) = r7") 29 + /* main prog */ 30 + __xlated("4: r1 = *(u64 *)(r1 +0)") 31 + __xlated("5: r6 = r1") 32 + __xlated("6: call kernel-function") 33 + __xlated("7: r1 = r6") 34 + __xlated("8: call pc+1") 35 + __xlated("9: exit") 36 + SEC("struct_ops/test_prologue") 37 + __naked int test_prologue(void) 38 + { 39 + asm volatile ( 40 + "r1 = *(u64 *)(r1 +0);" 41 + "r6 = r1;" 42 + "call %[bpf_kfunc_st_ops_inc10];" 43 + "r1 = r6;" 44 + "call subprog;" 45 + "exit;" 46 + : 47 + : __imm(bpf_kfunc_st_ops_inc10) 48 + : __clobber_all); 49 + } 50 + 51 + __success 52 + /* save __u64 *ctx to stack */ 53 + __xlated("0: *(u64 *)(r10 -8) = r1") 54 + /* main prog */ 55 + __xlated("1: r1 = *(u64 *)(r1 +0)") 56 + __xlated("2: r6 = r1") 57 + __xlated("3: call kernel-function") 58 + __xlated("4: r1 = r6") 59 + __xlated("5: call pc+") 60 + /* epilogue */ 61 + __xlated("6: r1 = *(u64 *)(r10 -8)") 62 + __xlated("7: r1 = *(u64 *)(r1 +0)") 63 + __xlated("8: r6 = *(u64 *)(r1 +0)") 64 + __xlated("9: r6 += 10000") 65 + __xlated("10: *(u64 *)(r1 +0) = r6") 66 + __xlated("11: r0 = r6") 67 + __xlated("12: r0 *= 2") 68 + __xlated("13: exit") 69 + SEC("struct_ops/test_epilogue") 70 + __naked int test_epilogue(void) 71 + { 72 + asm volatile ( 73 + "r1 = *(u64 *)(r1 +0);" 74 + "r6 = r1;" 75 + "call %[bpf_kfunc_st_ops_inc10];" 76 + "r1 = r6;" 77 + "call subprog;" 78 + "exit;" 79 + : 80 + : __imm(bpf_kfunc_st_ops_inc10) 81 + : __clobber_all); 82 + } 83 + 84 + __success 85 + /* prologue */ 86 + __xlated("0: r6 = *(u64 *)(r1 +0)") 87 + __xlated("1: r7 = *(u64 *)(r6 +0)") 88 + __xlated("2: r7 += 1000") 89 + __xlated("3: *(u64 *)(r6 +0) = r7") 90 + /* save __u64 *ctx to stack */ 91 + __xlated("4: *(u64 *)(r10 -8) = r1") 92 + /* main prog */ 93 + __xlated("5: r1 = *(u64 *)(r1 +0)") 94 + __xlated("6: r6 = r1") 95 + __xlated("7: call kernel-function") 96 + __xlated("8: r1 = r6") 97 + __xlated("9: call pc+") 98 + /* epilogue */ 99 + __xlated("10: r1 = *(u64 *)(r10 -8)") 100 + __xlated("11: r1 = *(u64 *)(r1 +0)") 101 + __xlated("12: r6 = *(u64 *)(r1 +0)") 102 + __xlated("13: r6 += 10000") 103 + __xlated("14: *(u64 *)(r1 +0) = r6") 104 + __xlated("15: r0 = r6") 105 + __xlated("16: r0 *= 2") 106 + __xlated("17: exit") 107 + SEC("struct_ops/test_pro_epilogue") 108 + __naked int test_pro_epilogue(void) 109 + { 110 + asm volatile ( 111 + "r1 = *(u64 *)(r1 +0);" 112 + "r6 = r1;" 113 + "call %[bpf_kfunc_st_ops_inc10];" 114 + "r1 = r6;" 115 + "call subprog;" 116 + "exit;" 117 + : 118 + : __imm(bpf_kfunc_st_ops_inc10) 119 + : __clobber_all); 120 + } 121 + 122 + SEC("syscall") 123 + __retval(1011) /* PROLOGUE_A [1000] + KFUNC_INC10 + SUBPROG_A [1] */ 124 + int syscall_prologue(void *ctx) 125 + { 126 + struct st_ops_args args = {}; 127 + 128 + return bpf_kfunc_st_ops_test_prologue(&args); 129 + } 130 + 131 + SEC("syscall") 132 + __retval(20022) /* (KFUNC_INC10 + SUBPROG_A [1] + EPILOGUE_A [10000]) * 2 */ 133 + int syscall_epilogue(void *ctx) 134 + { 135 + struct st_ops_args args = {}; 136 + 137 + return bpf_kfunc_st_ops_test_epilogue(&args); 138 + } 139 + 140 + SEC("syscall") 141 + __retval(22022) /* (PROLOGUE_A [1000] + KFUNC_INC10 + SUBPROG_A [1] + EPILOGUE_A [10000]) * 2 */ 142 + int syscall_pro_epilogue(void *ctx) 143 + { 144 + struct st_ops_args args = {}; 145 + 146 + return bpf_kfunc_st_ops_test_pro_epilogue(&args); 147 + } 148 + 149 + SEC(".struct_ops.link") 150 + struct bpf_testmod_st_ops pro_epilogue = { 151 + .test_prologue = (void *)test_prologue, 152 + .test_epilogue = (void *)test_epilogue, 153 + .test_pro_epilogue = (void *)test_pro_epilogue, 154 + };