Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: Test ldsx with more complex cases

The following ldsx cases are tested:
- signed readonly map value
- read/write map value
- probed memory
- not-narrowed ctx field access
- narrowed ctx field access.

Without previous proper verifier/git handling, the test will fail.

If cpuv4 is not supported either by compiler or by jit,
the test will be skipped.

# ./test_progs -t ldsx_insn
#113/1 ldsx_insn/map_val and probed_memory:SKIP
#113/2 ldsx_insn/ctx_member_sign_ext:SKIP
#113/3 ldsx_insn/ctx_member_narrow_sign_ext:SKIP
#113 ldsx_insn:SKIP
Summary: 1/0 PASSED, 3 SKIPPED, 0 FAILED

Signed-off-by: Yonghong Song <yonghong.song@linux.dev>
Link: https://lore.kernel.org/r/20230728011336.3723434-1-yonghong.song@linux.dev
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Yonghong Song and committed by
Alexei Starovoitov
0c606571 613dad49

+265 -1
+8 -1
tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
··· 98 98 return bpf_testmod_test_struct_arg_result; 99 99 } 100 100 101 + noinline int 102 + bpf_testmod_test_arg_ptr_to_struct(struct bpf_testmod_struct_arg_1 *a) { 103 + bpf_testmod_test_struct_arg_result = a->a; 104 + return bpf_testmod_test_struct_arg_result; 105 + } 106 + 101 107 __bpf_kfunc void 102 108 bpf_testmod_test_mod_kfunc(int i) 103 109 { ··· 246 240 .off = off, 247 241 .len = len, 248 242 }; 249 - struct bpf_testmod_struct_arg_1 struct_arg1 = {10}; 243 + struct bpf_testmod_struct_arg_1 struct_arg1 = {10}, struct_arg1_2 = {-1}; 250 244 struct bpf_testmod_struct_arg_2 struct_arg2 = {2, 3}; 251 245 struct bpf_testmod_struct_arg_3 *struct_arg3; 252 246 struct bpf_testmod_struct_arg_4 struct_arg4 = {21, 22}; ··· 265 259 (void)bpf_testmod_test_struct_arg_8(16, (void *)17, 18, 19, 266 260 (void *)20, struct_arg4, 23); 267 261 262 + (void)bpf_testmod_test_arg_ptr_to_struct(&struct_arg1_2); 268 263 269 264 struct_arg3 = kmalloc((sizeof(struct bpf_testmod_struct_arg_3) + 270 265 sizeof(int)), GFP_KERNEL);
+139
tools/testing/selftests/bpf/prog_tests/test_ldsx_insn.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2023 Meta Platforms, Inc. and affiliates.*/ 3 + 4 + #include <test_progs.h> 5 + #include <network_helpers.h> 6 + #include "test_ldsx_insn.skel.h" 7 + 8 + static void test_map_val_and_probed_memory(void) 9 + { 10 + struct test_ldsx_insn *skel; 11 + int err; 12 + 13 + skel = test_ldsx_insn__open(); 14 + if (!ASSERT_OK_PTR(skel, "test_ldsx_insn__open")) 15 + return; 16 + 17 + if (skel->rodata->skip) { 18 + test__skip(); 19 + goto out; 20 + } 21 + 22 + bpf_program__set_autoload(skel->progs.rdonly_map_prog, true); 23 + bpf_program__set_autoload(skel->progs.map_val_prog, true); 24 + bpf_program__set_autoload(skel->progs.test_ptr_struct_arg, true); 25 + 26 + err = test_ldsx_insn__load(skel); 27 + if (!ASSERT_OK(err, "test_ldsx_insn__load")) 28 + goto out; 29 + 30 + err = test_ldsx_insn__attach(skel); 31 + if (!ASSERT_OK(err, "test_ldsx_insn__attach")) 32 + goto out; 33 + 34 + ASSERT_OK(trigger_module_test_read(256), "trigger_read"); 35 + 36 + ASSERT_EQ(skel->bss->done1, 1, "done1"); 37 + ASSERT_EQ(skel->bss->ret1, 1, "ret1"); 38 + ASSERT_EQ(skel->bss->done2, 1, "done2"); 39 + ASSERT_EQ(skel->bss->ret2, 1, "ret2"); 40 + ASSERT_EQ(skel->bss->int_member, -1, "int_member"); 41 + 42 + out: 43 + test_ldsx_insn__destroy(skel); 44 + } 45 + 46 + static void test_ctx_member_sign_ext(void) 47 + { 48 + struct test_ldsx_insn *skel; 49 + int err, fd, cgroup_fd; 50 + char buf[16] = {0}; 51 + socklen_t optlen; 52 + 53 + cgroup_fd = test__join_cgroup("/ldsx_test"); 54 + if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup /ldsx_test")) 55 + return; 56 + 57 + skel = test_ldsx_insn__open(); 58 + if (!ASSERT_OK_PTR(skel, "test_ldsx_insn__open")) 59 + goto close_cgroup_fd; 60 + 61 + if (skel->rodata->skip) { 62 + test__skip(); 63 + goto destroy_skel; 64 + } 65 + 66 + bpf_program__set_autoload(skel->progs._getsockopt, true); 67 + 68 + err = test_ldsx_insn__load(skel); 69 + if (!ASSERT_OK(err, "test_ldsx_insn__load")) 70 + goto destroy_skel; 71 + 72 + skel->links._getsockopt = 73 + bpf_program__attach_cgroup(skel->progs._getsockopt, cgroup_fd); 74 + if (!ASSERT_OK_PTR(skel->links._getsockopt, "getsockopt_link")) 75 + goto destroy_skel; 76 + 77 + fd = socket(AF_INET, SOCK_STREAM, 0); 78 + if (!ASSERT_GE(fd, 0, "socket")) 79 + goto destroy_skel; 80 + 81 + optlen = sizeof(buf); 82 + (void)getsockopt(fd, SOL_IP, IP_TTL, buf, &optlen); 83 + 84 + ASSERT_EQ(skel->bss->set_optlen, -1, "optlen"); 85 + ASSERT_EQ(skel->bss->set_retval, -1, "retval"); 86 + 87 + close(fd); 88 + destroy_skel: 89 + test_ldsx_insn__destroy(skel); 90 + close_cgroup_fd: 91 + close(cgroup_fd); 92 + } 93 + 94 + static void test_ctx_member_narrow_sign_ext(void) 95 + { 96 + struct test_ldsx_insn *skel; 97 + struct __sk_buff skb = {}; 98 + LIBBPF_OPTS(bpf_test_run_opts, topts, 99 + .data_in = &pkt_v4, 100 + .data_size_in = sizeof(pkt_v4), 101 + .ctx_in = &skb, 102 + .ctx_size_in = sizeof(skb), 103 + ); 104 + int err, prog_fd; 105 + 106 + skel = test_ldsx_insn__open(); 107 + if (!ASSERT_OK_PTR(skel, "test_ldsx_insn__open")) 108 + return; 109 + 110 + if (skel->rodata->skip) { 111 + test__skip(); 112 + goto out; 113 + } 114 + 115 + bpf_program__set_autoload(skel->progs._tc, true); 116 + 117 + err = test_ldsx_insn__load(skel); 118 + if (!ASSERT_OK(err, "test_ldsx_insn__load")) 119 + goto out; 120 + 121 + prog_fd = bpf_program__fd(skel->progs._tc); 122 + err = bpf_prog_test_run_opts(prog_fd, &topts); 123 + ASSERT_OK(err, "test_run"); 124 + 125 + ASSERT_EQ(skel->bss->set_mark, -2, "set_mark"); 126 + 127 + out: 128 + test_ldsx_insn__destroy(skel); 129 + } 130 + 131 + void test_ldsx_insn(void) 132 + { 133 + if (test__start_subtest("map_val and probed_memory")) 134 + test_map_val_and_probed_memory(); 135 + if (test__start_subtest("ctx_member_sign_ext")) 136 + test_ctx_member_sign_ext(); 137 + if (test__start_subtest("ctx_member_narrow_sign_ext")) 138 + test_ctx_member_narrow_sign_ext(); 139 + }
+118
tools/testing/selftests/bpf/progs/test_ldsx_insn.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ 3 + 4 + #include "vmlinux.h" 5 + #include <bpf/bpf_helpers.h> 6 + #include <bpf/bpf_tracing.h> 7 + 8 + #if defined(__TARGET_ARCH_x86) && __clang_major__ >= 18 9 + const volatile int skip = 0; 10 + #else 11 + const volatile int skip = 1; 12 + #endif 13 + 14 + volatile const short val1 = -1; 15 + volatile const int val2 = -1; 16 + short val3 = -1; 17 + int val4 = -1; 18 + int done1, done2, ret1, ret2; 19 + 20 + SEC("?raw_tp/sys_enter") 21 + int rdonly_map_prog(const void *ctx) 22 + { 23 + if (done1) 24 + return 0; 25 + 26 + done1 = 1; 27 + /* val1/val2 readonly map */ 28 + if (val1 == val2) 29 + ret1 = 1; 30 + return 0; 31 + 32 + } 33 + 34 + SEC("?raw_tp/sys_enter") 35 + int map_val_prog(const void *ctx) 36 + { 37 + if (done2) 38 + return 0; 39 + 40 + done2 = 1; 41 + /* val1/val2 regular read/write map */ 42 + if (val3 == val4) 43 + ret2 = 1; 44 + return 0; 45 + 46 + } 47 + 48 + struct bpf_testmod_struct_arg_1 { 49 + int a; 50 + }; 51 + 52 + long long int_member; 53 + 54 + SEC("?fentry/bpf_testmod_test_arg_ptr_to_struct") 55 + int BPF_PROG2(test_ptr_struct_arg, struct bpf_testmod_struct_arg_1 *, p) 56 + { 57 + /* probed memory access */ 58 + int_member = p->a; 59 + return 0; 60 + } 61 + 62 + long long set_optlen, set_retval; 63 + 64 + SEC("?cgroup/getsockopt") 65 + int _getsockopt(volatile struct bpf_sockopt *ctx) 66 + { 67 + int old_optlen, old_retval; 68 + 69 + old_optlen = ctx->optlen; 70 + old_retval = ctx->retval; 71 + 72 + ctx->optlen = -1; 73 + ctx->retval = -1; 74 + 75 + /* sign extension for ctx member */ 76 + set_optlen = ctx->optlen; 77 + set_retval = ctx->retval; 78 + 79 + ctx->optlen = old_optlen; 80 + ctx->retval = old_retval; 81 + 82 + return 0; 83 + } 84 + 85 + long long set_mark; 86 + 87 + SEC("?tc") 88 + int _tc(volatile struct __sk_buff *skb) 89 + { 90 + long long tmp_mark; 91 + int old_mark; 92 + 93 + old_mark = skb->mark; 94 + 95 + skb->mark = 0xf6fe; 96 + 97 + /* narrowed sign extension for ctx member */ 98 + #if __clang_major__ >= 18 99 + /* force narrow one-byte signed load. Otherwise, compiler may 100 + * generate a 32-bit unsigned load followed by an s8 movsx. 101 + */ 102 + asm volatile ("r1 = *(s8 *)(%[ctx] + %[off_mark])\n\t" 103 + "%[tmp_mark] = r1" 104 + : [tmp_mark]"=r"(tmp_mark) 105 + : [ctx]"r"(skb), 106 + [off_mark]"i"(offsetof(struct __sk_buff, mark)) 107 + : "r1"); 108 + #else 109 + tmp_mark = (char)skb->mark; 110 + #endif 111 + set_mark = tmp_mark; 112 + 113 + skb->mark = old_mark; 114 + 115 + return 0; 116 + } 117 + 118 + char _license[] SEC("license") = "GPL";