Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: Test the inlining of bpf_kptr_xchg()

The test uses bpf_prog_get_info_by_fd() to obtain the xlated
instructions of the program first. Since these instructions have
already been rewritten by the verifier, the tests then checks whether
the rewritten instructions are as expected. And to ensure LLVM generates
code exactly as expected, use inline assembly and a naked function.

Suggested-by: Eduard Zingerman <eddyz87@gmail.com>
Signed-off-by: Hou Tao <houtao1@huawei.com>
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20240105104819.3916743-4-houtao@huaweicloud.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Hou Tao and committed by
Alexei Starovoitov
17bda53e b4b7a409

+99
+51
tools/testing/selftests/bpf/prog_tests/kptr_xchg_inline.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (C) 2023. Huawei Technologies Co., Ltd */ 3 + #include <test_progs.h> 4 + 5 + #include "linux/filter.h" 6 + #include "kptr_xchg_inline.skel.h" 7 + 8 + void test_kptr_xchg_inline(void) 9 + { 10 + struct kptr_xchg_inline *skel; 11 + struct bpf_insn *insn = NULL; 12 + struct bpf_insn exp; 13 + unsigned int cnt; 14 + int err; 15 + 16 + #if !defined(__x86_64__) 17 + test__skip(); 18 + return; 19 + #endif 20 + 21 + skel = kptr_xchg_inline__open_and_load(); 22 + if (!ASSERT_OK_PTR(skel, "open_load")) 23 + return; 24 + 25 + err = get_xlated_program(bpf_program__fd(skel->progs.kptr_xchg_inline), &insn, &cnt); 26 + if (!ASSERT_OK(err, "prog insn")) 27 + goto out; 28 + 29 + /* The original instructions are: 30 + * r1 = map[id:xxx][0]+0 31 + * r2 = 0 32 + * call bpf_kptr_xchg#yyy 33 + * 34 + * call bpf_kptr_xchg#yyy will be inlined as: 35 + * r0 = r2 36 + * r0 = atomic64_xchg((u64 *)(r1 +0), r0) 37 + */ 38 + if (!ASSERT_GT(cnt, 5, "insn cnt")) 39 + goto out; 40 + 41 + exp = BPF_MOV64_REG(BPF_REG_0, BPF_REG_2); 42 + if (!ASSERT_OK(memcmp(&insn[3], &exp, sizeof(exp)), "mov")) 43 + goto out; 44 + 45 + exp = BPF_ATOMIC_OP(BPF_DW, BPF_XCHG, BPF_REG_1, BPF_REG_0, 0); 46 + if (!ASSERT_OK(memcmp(&insn[4], &exp, sizeof(exp)), "xchg")) 47 + goto out; 48 + out: 49 + free(insn); 50 + kptr_xchg_inline__destroy(skel); 51 + }
+48
tools/testing/selftests/bpf/progs/kptr_xchg_inline.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (C) 2023. Huawei Technologies Co., Ltd */ 3 + #include <linux/types.h> 4 + #include <bpf/bpf_helpers.h> 5 + 6 + #include "bpf_experimental.h" 7 + #include "bpf_misc.h" 8 + 9 + char _license[] SEC("license") = "GPL"; 10 + 11 + struct bin_data { 12 + char blob[32]; 13 + }; 14 + 15 + #define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8))) 16 + private(kptr) struct bin_data __kptr * ptr; 17 + 18 + SEC("tc") 19 + __naked int kptr_xchg_inline(void) 20 + { 21 + asm volatile ( 22 + "r1 = %[ptr] ll;" 23 + "r2 = 0;" 24 + "call %[bpf_kptr_xchg];" 25 + "if r0 == 0 goto 1f;" 26 + "r1 = r0;" 27 + "r2 = 0;" 28 + "call %[bpf_obj_drop_impl];" 29 + "1:" 30 + "r0 = 0;" 31 + "exit;" 32 + : 33 + : __imm_addr(ptr), 34 + __imm(bpf_kptr_xchg), 35 + __imm(bpf_obj_drop_impl) 36 + : __clobber_all 37 + ); 38 + } 39 + 40 + /* BTF FUNC records are not generated for kfuncs referenced 41 + * from inline assembly. These records are necessary for 42 + * libbpf to link the program. The function below is a hack 43 + * to ensure that BTF FUNC records are generated. 44 + */ 45 + void __btf_root(void) 46 + { 47 + bpf_obj_drop(NULL); 48 + }