Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: Add struct_ops prog private stack tests

Add three tests for struct_ops using private stack.
./test_progs -t struct_ops_private_stack
#336/1 struct_ops_private_stack/private_stack:OK
#336/2 struct_ops_private_stack/private_stack_fail:OK
#336/3 struct_ops_private_stack/private_stack_recur:OK
#336 struct_ops_private_stack:OK

The following is a snippet of a struct_ops check_member() implementation:

u32 moff = __btf_member_bit_offset(t, member) / 8;
switch (moff) {
case offsetof(struct bpf_testmod_ops3, test_1):
prog->aux->priv_stack_requested = true;
prog->aux->recursion_detected = test_1_recursion_detected;
fallthrough;
default:
break;
}
return 0;

The first test is with nested two different callback functions where the
first prog has more than 512 byte stack size (including subprogs) with
private stack enabled.

The second test is a negative test where the second prog has more than 512
byte stack size without private stack enabled.

The third test is the same callback function recursing itself. At run time,
the jit trampoline recursion check kicks in to prevent the recursion. The
recursion_detected() callback function is implemented by the bpf_testmod,
the following message in dmesg
bpf_testmod: oh no, recursing into test_1, recursion_misses 1
demonstrates the callback function is indeed triggered when recursion miss
happens.

Signed-off-by: Yonghong Song <yonghong.song@linux.dev>
Link: https://lore.kernel.org/r/20241112163938.2225528-1-yonghong.song@linux.dev
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Yonghong Song and committed by
Alexei Starovoitov
becfe32b 5bd36da1

+389
+104
tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
··· 245 245 call_rcu(&ctx->rcu, testmod_free_cb); 246 246 } 247 247 248 + static struct bpf_testmod_ops3 *st_ops3; 249 + 250 + static int bpf_testmod_test_3(void) 251 + { 252 + return 0; 253 + } 254 + 255 + static int bpf_testmod_test_4(void) 256 + { 257 + return 0; 258 + } 259 + 260 + static struct bpf_testmod_ops3 __bpf_testmod_ops3 = { 261 + .test_1 = bpf_testmod_test_3, 262 + .test_2 = bpf_testmod_test_4, 263 + }; 264 + 265 + static void bpf_testmod_test_struct_ops3(void) 266 + { 267 + if (st_ops3) 268 + st_ops3->test_1(); 269 + } 270 + 271 + __bpf_kfunc void bpf_testmod_ops3_call_test_1(void) 272 + { 273 + st_ops3->test_1(); 274 + } 275 + 276 + __bpf_kfunc void bpf_testmod_ops3_call_test_2(void) 277 + { 278 + st_ops3->test_2(); 279 + } 280 + 248 281 struct bpf_testmod_btf_type_tag_1 { 249 282 int a; 250 283 }; ··· 414 381 (void)bpf_testmod_test_arg_ptr_to_struct(&struct_arg1_2); 415 382 416 383 (void)trace_bpf_testmod_test_raw_tp_null(NULL); 384 + 385 + bpf_testmod_test_struct_ops3(); 417 386 418 387 struct_arg3 = kmalloc((sizeof(struct bpf_testmod_struct_arg_3) + 419 388 sizeof(int)), GFP_KERNEL); ··· 621 586 BTF_ID_FLAGS(func, bpf_kfunc_rcu_task_test, KF_RCU) 622 587 BTF_ID_FLAGS(func, bpf_testmod_ctx_create, KF_ACQUIRE | KF_RET_NULL) 623 588 BTF_ID_FLAGS(func, bpf_testmod_ctx_release, KF_RELEASE) 589 + BTF_ID_FLAGS(func, bpf_testmod_ops3_call_test_1) 590 + BTF_ID_FLAGS(func, bpf_testmod_ops3_call_test_2) 624 591 BTF_KFUNCS_END(bpf_testmod_common_kfunc_ids) 625 592 626 593 BTF_ID_LIST(bpf_testmod_dtor_ids) ··· 1133 1096 .is_valid_access = bpf_testmod_ops_is_valid_access, 1134 1097 }; 1135 1098 1099 + static const struct bpf_verifier_ops bpf_testmod_verifier_ops3 = { 1100 + .is_valid_access = bpf_testmod_ops_is_valid_access, 1101 + }; 1102 + 1136 1103 static int bpf_dummy_reg(void *kdata, struct bpf_link *link) 1137 1104 { 1138 1105 struct bpf_testmod_ops *ops = kdata; ··· 1213 1172 .unreg = bpf_dummy_unreg, 1214 1173 .cfi_stubs = &__bpf_testmod_ops2, 1215 1174 .name = "bpf_testmod_ops2", 1175 + .owner = THIS_MODULE, 1176 + }; 1177 + 1178 + static int st_ops3_reg(void *kdata, struct bpf_link *link) 1179 + { 1180 + int err = 0; 1181 + 1182 + mutex_lock(&st_ops_mutex); 1183 + if (st_ops3) { 1184 + pr_err("st_ops has already been registered\n"); 1185 + err = -EEXIST; 1186 + goto unlock; 1187 + } 1188 + st_ops3 = kdata; 1189 + 1190 + unlock: 1191 + mutex_unlock(&st_ops_mutex); 1192 + return err; 1193 + } 1194 + 1195 + static void st_ops3_unreg(void *kdata, struct bpf_link *link) 1196 + { 1197 + mutex_lock(&st_ops_mutex); 1198 + st_ops3 = NULL; 1199 + mutex_unlock(&st_ops_mutex); 1200 + } 1201 + 1202 + static void test_1_recursion_detected(struct bpf_prog *prog) 1203 + { 1204 + struct bpf_prog_stats *stats; 1205 + 1206 + stats = this_cpu_ptr(prog->stats); 1207 + printk("bpf_testmod: oh no, recursing into test_1, recursion_misses %llu", 1208 + u64_stats_read(&stats->misses)); 1209 + } 1210 + 1211 + static int st_ops3_check_member(const struct btf_type *t, 1212 + const struct btf_member *member, 1213 + const struct bpf_prog *prog) 1214 + { 1215 + u32 moff = __btf_member_bit_offset(t, member) / 8; 1216 + 1217 + switch (moff) { 1218 + case offsetof(struct bpf_testmod_ops3, test_1): 1219 + prog->aux->priv_stack_requested = true; 1220 + prog->aux->recursion_detected = test_1_recursion_detected; 1221 + fallthrough; 1222 + default: 1223 + break; 1224 + } 1225 + return 0; 1226 + } 1227 + 1228 + struct bpf_struct_ops bpf_testmod_ops3 = { 1229 + .verifier_ops = &bpf_testmod_verifier_ops3, 1230 + .init = bpf_testmod_ops_init, 1231 + .init_member = bpf_testmod_ops_init_member, 1232 + .reg = st_ops3_reg, 1233 + .unreg = st_ops3_unreg, 1234 + .check_member = st_ops3_check_member, 1235 + .cfi_stubs = &__bpf_testmod_ops3, 1236 + .name = "bpf_testmod_ops3", 1216 1237 .owner = THIS_MODULE, 1217 1238 }; 1218 1239 ··· 1436 1333 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_testmod_kfunc_set); 1437 1334 ret = ret ?: register_bpf_struct_ops(&bpf_bpf_testmod_ops, bpf_testmod_ops); 1438 1335 ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops2, bpf_testmod_ops2); 1336 + ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops3, bpf_testmod_ops3); 1439 1337 ret = ret ?: register_bpf_struct_ops(&testmod_st_ops, bpf_testmod_st_ops); 1440 1338 ret = ret ?: register_btf_id_dtor_kfuncs(bpf_testmod_dtors, 1441 1339 ARRAY_SIZE(bpf_testmod_dtors),
+5
tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h
··· 94 94 int (*test_1)(void); 95 95 }; 96 96 97 + struct bpf_testmod_ops3 { 98 + int (*test_1)(void); 99 + int (*test_2)(void); 100 + }; 101 + 97 102 struct st_ops_args { 98 103 u64 a; 99 104 };
+106
tools/testing/selftests/bpf/prog_tests/struct_ops_private_stack.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + #include <test_progs.h> 4 + #include "struct_ops_private_stack.skel.h" 5 + #include "struct_ops_private_stack_fail.skel.h" 6 + #include "struct_ops_private_stack_recur.skel.h" 7 + 8 + static void test_private_stack(void) 9 + { 10 + struct struct_ops_private_stack *skel; 11 + struct bpf_link *link; 12 + int err; 13 + 14 + skel = struct_ops_private_stack__open(); 15 + if (!ASSERT_OK_PTR(skel, "struct_ops_private_stack__open")) 16 + return; 17 + 18 + if (skel->data->skip) { 19 + test__skip(); 20 + goto cleanup; 21 + } 22 + 23 + err = struct_ops_private_stack__load(skel); 24 + if (!ASSERT_OK(err, "struct_ops_private_stack__load")) 25 + goto cleanup; 26 + 27 + link = bpf_map__attach_struct_ops(skel->maps.testmod_1); 28 + if (!ASSERT_OK_PTR(link, "attach_struct_ops")) 29 + goto cleanup; 30 + 31 + ASSERT_OK(trigger_module_test_read(256), "trigger_read"); 32 + 33 + ASSERT_EQ(skel->bss->val_i, 3, "val_i"); 34 + ASSERT_EQ(skel->bss->val_j, 8, "val_j"); 35 + 36 + bpf_link__destroy(link); 37 + 38 + cleanup: 39 + struct_ops_private_stack__destroy(skel); 40 + } 41 + 42 + static void test_private_stack_fail(void) 43 + { 44 + struct struct_ops_private_stack_fail *skel; 45 + int err; 46 + 47 + skel = struct_ops_private_stack_fail__open(); 48 + if (!ASSERT_OK_PTR(skel, "struct_ops_private_stack_fail__open")) 49 + return; 50 + 51 + if (skel->data->skip) { 52 + test__skip(); 53 + goto cleanup; 54 + } 55 + 56 + err = struct_ops_private_stack_fail__load(skel); 57 + if (!ASSERT_ERR(err, "struct_ops_private_stack_fail__load")) 58 + goto cleanup; 59 + return; 60 + 61 + cleanup: 62 + struct_ops_private_stack_fail__destroy(skel); 63 + } 64 + 65 + static void test_private_stack_recur(void) 66 + { 67 + struct struct_ops_private_stack_recur *skel; 68 + struct bpf_link *link; 69 + int err; 70 + 71 + skel = struct_ops_private_stack_recur__open(); 72 + if (!ASSERT_OK_PTR(skel, "struct_ops_private_stack_recur__open")) 73 + return; 74 + 75 + if (skel->data->skip) { 76 + test__skip(); 77 + goto cleanup; 78 + } 79 + 80 + err = struct_ops_private_stack_recur__load(skel); 81 + if (!ASSERT_OK(err, "struct_ops_private_stack_recur__load")) 82 + goto cleanup; 83 + 84 + link = bpf_map__attach_struct_ops(skel->maps.testmod_1); 85 + if (!ASSERT_OK_PTR(link, "attach_struct_ops")) 86 + goto cleanup; 87 + 88 + ASSERT_OK(trigger_module_test_read(256), "trigger_read"); 89 + 90 + ASSERT_EQ(skel->bss->val_j, 3, "val_j"); 91 + 92 + bpf_link__destroy(link); 93 + 94 + cleanup: 95 + struct_ops_private_stack_recur__destroy(skel); 96 + } 97 + 98 + void test_struct_ops_private_stack(void) 99 + { 100 + if (test__start_subtest("private_stack")) 101 + test_private_stack(); 102 + if (test__start_subtest("private_stack_fail")) 103 + test_private_stack_fail(); 104 + if (test__start_subtest("private_stack_recur")) 105 + test_private_stack_recur(); 106 + }
+62
tools/testing/selftests/bpf/progs/struct_ops_private_stack.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + #include <vmlinux.h> 4 + #include <bpf/bpf_helpers.h> 5 + #include <bpf/bpf_tracing.h> 6 + #include "../bpf_testmod/bpf_testmod.h" 7 + 8 + char _license[] SEC("license") = "GPL"; 9 + 10 + #if defined(__TARGET_ARCH_x86) 11 + bool skip __attribute((__section__(".data"))) = false; 12 + #else 13 + bool skip = true; 14 + #endif 15 + 16 + void bpf_testmod_ops3_call_test_2(void) __ksym; 17 + 18 + int val_i, val_j; 19 + 20 + __noinline static int subprog2(int *a, int *b) 21 + { 22 + return val_i + a[10] + b[20]; 23 + } 24 + 25 + __noinline static int subprog1(int *a) 26 + { 27 + /* stack size 200 bytes */ 28 + int b[50] = {}; 29 + 30 + b[20] = 2; 31 + return subprog2(a, b); 32 + } 33 + 34 + 35 + SEC("struct_ops") 36 + int BPF_PROG(test_1) 37 + { 38 + /* stack size 400 bytes */ 39 + int a[100] = {}; 40 + 41 + a[10] = 1; 42 + val_i = subprog1(a); 43 + bpf_testmod_ops3_call_test_2(); 44 + return 0; 45 + } 46 + 47 + SEC("struct_ops") 48 + int BPF_PROG(test_2) 49 + { 50 + /* stack size 200 bytes */ 51 + int a[50] = {}; 52 + 53 + a[10] = 3; 54 + val_j = subprog1(a); 55 + return 0; 56 + } 57 + 58 + SEC(".struct_ops") 59 + struct bpf_testmod_ops3 testmod_1 = { 60 + .test_1 = (void *)test_1, 61 + .test_2 = (void *)test_2, 62 + };
+62
tools/testing/selftests/bpf/progs/struct_ops_private_stack_fail.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + #include <vmlinux.h> 4 + #include <bpf/bpf_helpers.h> 5 + #include <bpf/bpf_tracing.h> 6 + #include "../bpf_testmod/bpf_testmod.h" 7 + 8 + char _license[] SEC("license") = "GPL"; 9 + 10 + #if defined(__TARGET_ARCH_x86) 11 + bool skip __attribute((__section__(".data"))) = false; 12 + #else 13 + bool skip = true; 14 + #endif 15 + 16 + void bpf_testmod_ops3_call_test_2(void) __ksym; 17 + 18 + int val_i, val_j; 19 + 20 + __noinline static int subprog2(int *a, int *b) 21 + { 22 + return val_i + a[10] + b[20]; 23 + } 24 + 25 + __noinline static int subprog1(int *a) 26 + { 27 + /* stack size 200 bytes */ 28 + int b[50] = {}; 29 + 30 + b[20] = 2; 31 + return subprog2(a, b); 32 + } 33 + 34 + 35 + SEC("struct_ops") 36 + int BPF_PROG(test_1) 37 + { 38 + /* stack size 100 bytes */ 39 + int a[25] = {}; 40 + 41 + a[10] = 1; 42 + val_i = subprog1(a); 43 + bpf_testmod_ops3_call_test_2(); 44 + return 0; 45 + } 46 + 47 + SEC("struct_ops") 48 + int BPF_PROG(test_2) 49 + { 50 + /* stack size 400 bytes */ 51 + int a[100] = {}; 52 + 53 + a[10] = 3; 54 + val_j = subprog1(a); 55 + return 0; 56 + } 57 + 58 + SEC(".struct_ops") 59 + struct bpf_testmod_ops3 testmod_1 = { 60 + .test_1 = (void *)test_1, 61 + .test_2 = (void *)test_2, 62 + };
+50
tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + #include <vmlinux.h> 4 + #include <bpf/bpf_helpers.h> 5 + #include <bpf/bpf_tracing.h> 6 + #include "../bpf_testmod/bpf_testmod.h" 7 + 8 + char _license[] SEC("license") = "GPL"; 9 + 10 + #if defined(__TARGET_ARCH_x86) 11 + bool skip __attribute((__section__(".data"))) = false; 12 + #else 13 + bool skip = true; 14 + #endif 15 + 16 + void bpf_testmod_ops3_call_test_1(void) __ksym; 17 + 18 + int val_i, val_j; 19 + 20 + __noinline static int subprog2(int *a, int *b) 21 + { 22 + return val_i + a[1] + b[20]; 23 + } 24 + 25 + __noinline static int subprog1(int *a) 26 + { 27 + /* stack size 400 bytes */ 28 + int b[100] = {}; 29 + 30 + b[20] = 2; 31 + return subprog2(a, b); 32 + } 33 + 34 + 35 + SEC("struct_ops") 36 + int BPF_PROG(test_1) 37 + { 38 + /* stack size 20 bytes */ 39 + int a[5] = {}; 40 + 41 + a[1] = 1; 42 + val_j += subprog1(a); 43 + bpf_testmod_ops3_call_test_1(); 44 + return 0; 45 + } 46 + 47 + SEC(".struct_ops") 48 + struct bpf_testmod_ops3 testmod_1 = { 49 + .test_1 = (void *)test_1, 50 + };