Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: Add testcases for tailcall infinite loop fixing

Add 4 test cases to confirm the tailcall infinite loop bug has been fixed.

Like tailcall_bpf2bpf cases, do fentry/fexit on the bpf2bpf, and then
check the final count result.

tools/testing/selftests/bpf/test_progs -t tailcalls
226/13 tailcalls/tailcall_bpf2bpf_fentry:OK
226/14 tailcalls/tailcall_bpf2bpf_fexit:OK
226/15 tailcalls/tailcall_bpf2bpf_fentry_fexit:OK
226/16 tailcalls/tailcall_bpf2bpf_fentry_entry:OK
226 tailcalls:OK
Summary: 1/16 PASSED, 0 SKIPPED, 0 FAILED

Signed-off-by: Leon Hwang <hffilwlqm@gmail.com>
Link: https://lore.kernel.org/r/20230912150442.2009-4-hffilwlqm@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Leon Hwang and committed by
Alexei Starovoitov
e13b5f2f 2b5dcb31

+269 -4
+233 -4
tools/testing/selftests/bpf/prog_tests/tailcalls.c
··· 218 218 bpf_object__close(obj); 219 219 } 220 220 221 - static void test_tailcall_count(const char *which) 221 + static void test_tailcall_count(const char *which, bool test_fentry, 222 + bool test_fexit) 222 223 { 224 + struct bpf_object *obj = NULL, *fentry_obj = NULL, *fexit_obj = NULL; 225 + struct bpf_link *fentry_link = NULL, *fexit_link = NULL; 223 226 int err, map_fd, prog_fd, main_fd, data_fd, i, val; 224 227 struct bpf_map *prog_array, *data_map; 225 228 struct bpf_program *prog; 226 - struct bpf_object *obj; 227 229 char buff[128] = {}; 228 230 LIBBPF_OPTS(bpf_test_run_opts, topts, 229 231 .data_in = buff, ··· 267 265 if (CHECK_FAIL(err)) 268 266 goto out; 269 267 268 + if (test_fentry) { 269 + fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_fentry.bpf.o", 270 + NULL); 271 + if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file")) 272 + goto out; 273 + 274 + prog = bpf_object__find_program_by_name(fentry_obj, "fentry"); 275 + if (!ASSERT_OK_PTR(prog, "find fentry prog")) 276 + goto out; 277 + 278 + err = bpf_program__set_attach_target(prog, prog_fd, 279 + "subprog_tail"); 280 + if (!ASSERT_OK(err, "set_attach_target subprog_tail")) 281 + goto out; 282 + 283 + err = bpf_object__load(fentry_obj); 284 + if (!ASSERT_OK(err, "load fentry_obj")) 285 + goto out; 286 + 287 + fentry_link = bpf_program__attach_trace(prog); 288 + if (!ASSERT_OK_PTR(fentry_link, "attach_trace")) 289 + goto out; 290 + } 291 + 292 + if (test_fexit) { 293 + fexit_obj = bpf_object__open_file("tailcall_bpf2bpf_fexit.bpf.o", 294 + NULL); 295 + if (!ASSERT_OK_PTR(fexit_obj, "open fexit_obj file")) 296 + goto out; 297 + 298 + prog = bpf_object__find_program_by_name(fexit_obj, "fexit"); 299 + if (!ASSERT_OK_PTR(prog, "find fexit prog")) 300 + goto out; 301 + 302 + err = bpf_program__set_attach_target(prog, prog_fd, 303 + "subprog_tail"); 304 + if (!ASSERT_OK(err, "set_attach_target subprog_tail")) 305 + goto out; 306 + 307 + err = bpf_object__load(fexit_obj); 308 + if (!ASSERT_OK(err, "load fexit_obj")) 309 + goto out; 310 + 311 + fexit_link = bpf_program__attach_trace(prog); 312 + if (!ASSERT_OK_PTR(fexit_link, "attach_trace")) 313 + goto out; 314 + } 315 + 270 316 err = bpf_prog_test_run_opts(main_fd, &topts); 271 317 ASSERT_OK(err, "tailcall"); 272 318 ASSERT_EQ(topts.retval, 1, "tailcall retval"); ··· 332 282 ASSERT_OK(err, "tailcall count"); 333 283 ASSERT_EQ(val, 33, "tailcall count"); 334 284 285 + if (test_fentry) { 286 + data_map = bpf_object__find_map_by_name(fentry_obj, ".bss"); 287 + if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map), 288 + "find tailcall_bpf2bpf_fentry.bss map")) 289 + goto out; 290 + 291 + data_fd = bpf_map__fd(data_map); 292 + if (!ASSERT_FALSE(data_fd < 0, 293 + "find tailcall_bpf2bpf_fentry.bss map fd")) 294 + goto out; 295 + 296 + i = 0; 297 + err = bpf_map_lookup_elem(data_fd, &i, &val); 298 + ASSERT_OK(err, "fentry count"); 299 + ASSERT_EQ(val, 33, "fentry count"); 300 + } 301 + 302 + if (test_fexit) { 303 + data_map = bpf_object__find_map_by_name(fexit_obj, ".bss"); 304 + if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map), 305 + "find tailcall_bpf2bpf_fexit.bss map")) 306 + goto out; 307 + 308 + data_fd = bpf_map__fd(data_map); 309 + if (!ASSERT_FALSE(data_fd < 0, 310 + "find tailcall_bpf2bpf_fexit.bss map fd")) 311 + goto out; 312 + 313 + i = 0; 314 + err = bpf_map_lookup_elem(data_fd, &i, &val); 315 + ASSERT_OK(err, "fexit count"); 316 + ASSERT_EQ(val, 33, "fexit count"); 317 + } 318 + 335 319 i = 0; 336 320 err = bpf_map_delete_elem(map_fd, &i); 337 321 if (CHECK_FAIL(err)) ··· 375 291 ASSERT_OK(err, "tailcall"); 376 292 ASSERT_OK(topts.retval, "tailcall retval"); 377 293 out: 294 + bpf_link__destroy(fentry_link); 295 + bpf_link__destroy(fexit_link); 296 + bpf_object__close(fentry_obj); 297 + bpf_object__close(fexit_obj); 378 298 bpf_object__close(obj); 379 299 } 380 300 ··· 387 299 */ 388 300 static void test_tailcall_3(void) 389 301 { 390 - test_tailcall_count("tailcall3.bpf.o"); 302 + test_tailcall_count("tailcall3.bpf.o", false, false); 391 303 } 392 304 393 305 /* test_tailcall_6 checks that the count value of the tail call limit ··· 395 307 */ 396 308 static void test_tailcall_6(void) 397 309 { 398 - test_tailcall_count("tailcall6.bpf.o"); 310 + test_tailcall_count("tailcall6.bpf.o", false, false); 399 311 } 400 312 401 313 /* test_tailcall_4 checks that the kernel properly selects indirect jump ··· 972 884 tailcall_bpf2bpf6__destroy(obj); 973 885 } 974 886 887 + /* test_tailcall_bpf2bpf_fentry checks that the count value of the tail call 888 + * limit enforcement matches with expectations when tailcall is preceded with 889 + * bpf2bpf call, and the bpf2bpf call is traced by fentry. 890 + */ 891 + static void test_tailcall_bpf2bpf_fentry(void) 892 + { 893 + test_tailcall_count("tailcall_bpf2bpf2.bpf.o", true, false); 894 + } 895 + 896 + /* test_tailcall_bpf2bpf_fexit checks that the count value of the tail call 897 + * limit enforcement matches with expectations when tailcall is preceded with 898 + * bpf2bpf call, and the bpf2bpf call is traced by fexit. 899 + */ 900 + static void test_tailcall_bpf2bpf_fexit(void) 901 + { 902 + test_tailcall_count("tailcall_bpf2bpf2.bpf.o", false, true); 903 + } 904 + 905 + /* test_tailcall_bpf2bpf_fentry_fexit checks that the count value of the tail 906 + * call limit enforcement matches with expectations when tailcall is preceded 907 + * with bpf2bpf call, and the bpf2bpf call is traced by both fentry and fexit. 908 + */ 909 + static void test_tailcall_bpf2bpf_fentry_fexit(void) 910 + { 911 + test_tailcall_count("tailcall_bpf2bpf2.bpf.o", true, true); 912 + } 913 + 914 + /* test_tailcall_bpf2bpf_fentry_entry checks that the count value of the tail 915 + * call limit enforcement matches with expectations when tailcall is preceded 916 + * with bpf2bpf call, and the bpf2bpf caller is traced by fentry. 917 + */ 918 + static void test_tailcall_bpf2bpf_fentry_entry(void) 919 + { 920 + struct bpf_object *tgt_obj = NULL, *fentry_obj = NULL; 921 + int err, map_fd, prog_fd, data_fd, i, val; 922 + struct bpf_map *prog_array, *data_map; 923 + struct bpf_link *fentry_link = NULL; 924 + struct bpf_program *prog; 925 + char buff[128] = {}; 926 + 927 + LIBBPF_OPTS(bpf_test_run_opts, topts, 928 + .data_in = buff, 929 + .data_size_in = sizeof(buff), 930 + .repeat = 1, 931 + ); 932 + 933 + err = bpf_prog_test_load("tailcall_bpf2bpf2.bpf.o", 934 + BPF_PROG_TYPE_SCHED_CLS, 935 + &tgt_obj, &prog_fd); 936 + if (!ASSERT_OK(err, "load tgt_obj")) 937 + return; 938 + 939 + prog_array = bpf_object__find_map_by_name(tgt_obj, "jmp_table"); 940 + if (!ASSERT_OK_PTR(prog_array, "find jmp_table map")) 941 + goto out; 942 + 943 + map_fd = bpf_map__fd(prog_array); 944 + if (!ASSERT_FALSE(map_fd < 0, "find jmp_table map fd")) 945 + goto out; 946 + 947 + prog = bpf_object__find_program_by_name(tgt_obj, "classifier_0"); 948 + if (!ASSERT_OK_PTR(prog, "find classifier_0 prog")) 949 + goto out; 950 + 951 + prog_fd = bpf_program__fd(prog); 952 + if (!ASSERT_FALSE(prog_fd < 0, "find classifier_0 prog fd")) 953 + goto out; 954 + 955 + i = 0; 956 + err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); 957 + if (!ASSERT_OK(err, "update jmp_table")) 958 + goto out; 959 + 960 + fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_fentry.bpf.o", 961 + NULL); 962 + if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file")) 963 + goto out; 964 + 965 + prog = bpf_object__find_program_by_name(fentry_obj, "fentry"); 966 + if (!ASSERT_OK_PTR(prog, "find fentry prog")) 967 + goto out; 968 + 969 + err = bpf_program__set_attach_target(prog, prog_fd, "classifier_0"); 970 + if (!ASSERT_OK(err, "set_attach_target classifier_0")) 971 + goto out; 972 + 973 + err = bpf_object__load(fentry_obj); 974 + if (!ASSERT_OK(err, "load fentry_obj")) 975 + goto out; 976 + 977 + fentry_link = bpf_program__attach_trace(prog); 978 + if (!ASSERT_OK_PTR(fentry_link, "attach_trace")) 979 + goto out; 980 + 981 + err = bpf_prog_test_run_opts(prog_fd, &topts); 982 + ASSERT_OK(err, "tailcall"); 983 + ASSERT_EQ(topts.retval, 1, "tailcall retval"); 984 + 985 + data_map = bpf_object__find_map_by_name(tgt_obj, "tailcall.bss"); 986 + if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map), 987 + "find tailcall.bss map")) 988 + goto out; 989 + 990 + data_fd = bpf_map__fd(data_map); 991 + if (!ASSERT_FALSE(data_fd < 0, "find tailcall.bss map fd")) 992 + goto out; 993 + 994 + i = 0; 995 + err = bpf_map_lookup_elem(data_fd, &i, &val); 996 + ASSERT_OK(err, "tailcall count"); 997 + ASSERT_EQ(val, 34, "tailcall count"); 998 + 999 + data_map = bpf_object__find_map_by_name(fentry_obj, ".bss"); 1000 + if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map), 1001 + "find tailcall_bpf2bpf_fentry.bss map")) 1002 + goto out; 1003 + 1004 + data_fd = bpf_map__fd(data_map); 1005 + if (!ASSERT_FALSE(data_fd < 0, 1006 + "find tailcall_bpf2bpf_fentry.bss map fd")) 1007 + goto out; 1008 + 1009 + i = 0; 1010 + err = bpf_map_lookup_elem(data_fd, &i, &val); 1011 + ASSERT_OK(err, "fentry count"); 1012 + ASSERT_EQ(val, 1, "fentry count"); 1013 + 1014 + out: 1015 + bpf_link__destroy(fentry_link); 1016 + bpf_object__close(fentry_obj); 1017 + bpf_object__close(tgt_obj); 1018 + } 1019 + 975 1020 void test_tailcalls(void) 976 1021 { 977 1022 if (test__start_subtest("tailcall_1")) ··· 1131 910 test_tailcall_bpf2bpf_4(true); 1132 911 if (test__start_subtest("tailcall_bpf2bpf_6")) 1133 912 test_tailcall_bpf2bpf_6(); 913 + if (test__start_subtest("tailcall_bpf2bpf_fentry")) 914 + test_tailcall_bpf2bpf_fentry(); 915 + if (test__start_subtest("tailcall_bpf2bpf_fexit")) 916 + test_tailcall_bpf2bpf_fexit(); 917 + if (test__start_subtest("tailcall_bpf2bpf_fentry_fexit")) 918 + test_tailcall_bpf2bpf_fentry_fexit(); 919 + if (test__start_subtest("tailcall_bpf2bpf_fentry_entry")) 920 + test_tailcall_bpf2bpf_fentry_entry(); 1134 921 }
+18
tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_fentry.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright Leon Hwang */ 3 + 4 + #include "vmlinux.h" 5 + #include <bpf/bpf_helpers.h> 6 + #include <bpf/bpf_tracing.h> 7 + 8 + int count = 0; 9 + 10 + SEC("fentry/subprog_tail") 11 + int BPF_PROG(fentry, struct sk_buff *skb) 12 + { 13 + count++; 14 + 15 + return 0; 16 + } 17 + 18 + char _license[] SEC("license") = "GPL";
+18
tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_fexit.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright Leon Hwang */ 3 + 4 + #include "vmlinux.h" 5 + #include <bpf/bpf_helpers.h> 6 + #include <bpf/bpf_tracing.h> 7 + 8 + int count = 0; 9 + 10 + SEC("fexit/subprog_tail") 11 + int BPF_PROG(fexit, struct sk_buff *skb) 12 + { 13 + count++; 14 + 15 + return 0; 16 + } 17 + 18 + char _license[] SEC("license") = "GPL";