Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: verifier/xadd.c converted to inline assembly

Test verifier/xadd.c automatically converted to use inline assembly.

Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20230325025524.144043-42-eddyz87@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Eduard Zingerman and committed by
Alexei Starovoitov
a8036aea d15f5b68

+126 -97
+2
tools/testing/selftests/bpf/prog_tests/verifier.c
··· 38 38 #include "verifier_value.skel.h" 39 39 #include "verifier_value_or_null.skel.h" 40 40 #include "verifier_var_off.skel.h" 41 + #include "verifier_xadd.skel.h" 41 42 42 43 __maybe_unused 43 44 static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) ··· 99 98 void test_verifier_value(void) { RUN(verifier_value); } 100 99 void test_verifier_value_or_null(void) { RUN(verifier_value_or_null); } 101 100 void test_verifier_var_off(void) { RUN(verifier_var_off); } 101 + void test_verifier_xadd(void) { RUN(verifier_xadd); }
+124
tools/testing/selftests/bpf/progs/verifier_xadd.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Converted from tools/testing/selftests/bpf/verifier/xadd.c */ 3 + 4 + #include <linux/bpf.h> 5 + #include <bpf/bpf_helpers.h> 6 + #include "bpf_misc.h" 7 + 8 + struct { 9 + __uint(type, BPF_MAP_TYPE_HASH); 10 + __uint(max_entries, 1); 11 + __type(key, long long); 12 + __type(value, long long); 13 + } map_hash_8b SEC(".maps"); 14 + 15 + SEC("tc") 16 + __description("xadd/w check unaligned stack") 17 + __failure __msg("misaligned stack access off") 18 + __naked void xadd_w_check_unaligned_stack(void) 19 + { 20 + asm volatile (" \ 21 + r0 = 1; \ 22 + *(u64*)(r10 - 8) = r0; \ 23 + lock *(u32 *)(r10 - 7) += w0; \ 24 + r0 = *(u64*)(r10 - 8); \ 25 + exit; \ 26 + " ::: __clobber_all); 27 + } 28 + 29 + SEC("tc") 30 + __description("xadd/w check unaligned map") 31 + __failure __msg("misaligned value access off") 32 + __naked void xadd_w_check_unaligned_map(void) 33 + { 34 + asm volatile (" \ 35 + r1 = 0; \ 36 + *(u64*)(r10 - 8) = r1; \ 37 + r2 = r10; \ 38 + r2 += -8; \ 39 + r1 = %[map_hash_8b] ll; \ 40 + call %[bpf_map_lookup_elem]; \ 41 + if r0 != 0 goto l0_%=; \ 42 + exit; \ 43 + l0_%=: r1 = 1; \ 44 + lock *(u32 *)(r0 + 3) += w1; \ 45 + r0 = *(u32*)(r0 + 3); \ 46 + exit; \ 47 + " : 48 + : __imm(bpf_map_lookup_elem), 49 + __imm_addr(map_hash_8b) 50 + : __clobber_all); 51 + } 52 + 53 + SEC("xdp") 54 + __description("xadd/w check unaligned pkt") 55 + __failure __msg("BPF_ATOMIC stores into R2 pkt is not allowed") 56 + __flag(BPF_F_ANY_ALIGNMENT) 57 + __naked void xadd_w_check_unaligned_pkt(void) 58 + { 59 + asm volatile (" \ 60 + r2 = *(u32*)(r1 + %[xdp_md_data]); \ 61 + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ 62 + r1 = r2; \ 63 + r1 += 8; \ 64 + if r1 < r3 goto l0_%=; \ 65 + r0 = 99; \ 66 + goto l1_%=; \ 67 + l0_%=: r0 = 1; \ 68 + r1 = 0; \ 69 + *(u32*)(r2 + 0) = r1; \ 70 + r1 = 0; \ 71 + *(u32*)(r2 + 3) = r1; \ 72 + lock *(u32 *)(r2 + 1) += w0; \ 73 + lock *(u32 *)(r2 + 2) += w0; \ 74 + r0 = *(u32*)(r2 + 1); \ 75 + l1_%=: exit; \ 76 + " : 77 + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), 78 + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) 79 + : __clobber_all); 80 + } 81 + 82 + SEC("tc") 83 + __description("xadd/w check whether src/dst got mangled, 1") 84 + __success __retval(3) 85 + __naked void src_dst_got_mangled_1(void) 86 + { 87 + asm volatile (" \ 88 + r0 = 1; \ 89 + r6 = r0; \ 90 + r7 = r10; \ 91 + *(u64*)(r10 - 8) = r0; \ 92 + lock *(u64 *)(r10 - 8) += r0; \ 93 + lock *(u64 *)(r10 - 8) += r0; \ 94 + if r6 != r0 goto l0_%=; \ 95 + if r7 != r10 goto l0_%=; \ 96 + r0 = *(u64*)(r10 - 8); \ 97 + exit; \ 98 + l0_%=: r0 = 42; \ 99 + exit; \ 100 + " ::: __clobber_all); 101 + } 102 + 103 + SEC("tc") 104 + __description("xadd/w check whether src/dst got mangled, 2") 105 + __success __retval(3) 106 + __naked void src_dst_got_mangled_2(void) 107 + { 108 + asm volatile (" \ 109 + r0 = 1; \ 110 + r6 = r0; \ 111 + r7 = r10; \ 112 + *(u32*)(r10 - 8) = r0; \ 113 + lock *(u32 *)(r10 - 8) += w0; \ 114 + lock *(u32 *)(r10 - 8) += w0; \ 115 + if r6 != r0 goto l0_%=; \ 116 + if r7 != r10 goto l0_%=; \ 117 + r0 = *(u32*)(r10 - 8); \ 118 + exit; \ 119 + l0_%=: r0 = 42; \ 120 + exit; \ 121 + " ::: __clobber_all); 122 + } 123 + 124 + char _license[] SEC("license") = "GPL";
-97
tools/testing/selftests/bpf/verifier/xadd.c
··· 1 - { 2 - "xadd/w check unaligned stack", 3 - .insns = { 4 - BPF_MOV64_IMM(BPF_REG_0, 1), 5 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), 6 - BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_10, BPF_REG_0, -7), 7 - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), 8 - BPF_EXIT_INSN(), 9 - }, 10 - .result = REJECT, 11 - .errstr = "misaligned stack access off", 12 - .prog_type = BPF_PROG_TYPE_SCHED_CLS, 13 - }, 14 - { 15 - "xadd/w check unaligned map", 16 - .insns = { 17 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 18 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 19 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 20 - BPF_LD_MAP_FD(BPF_REG_1, 0), 21 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 22 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 23 - BPF_EXIT_INSN(), 24 - BPF_MOV64_IMM(BPF_REG_1, 1), 25 - BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_0, BPF_REG_1, 3), 26 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3), 27 - BPF_EXIT_INSN(), 28 - }, 29 - .fixup_map_hash_8b = { 3 }, 30 - .result = REJECT, 31 - .errstr = "misaligned value access off", 32 - .prog_type = BPF_PROG_TYPE_SCHED_CLS, 33 - }, 34 - { 35 - "xadd/w check unaligned pkt", 36 - .insns = { 37 - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), 38 - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 39 - offsetof(struct xdp_md, data_end)), 40 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 41 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 42 - BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2), 43 - BPF_MOV64_IMM(BPF_REG_0, 99), 44 - BPF_JMP_IMM(BPF_JA, 0, 0, 6), 45 - BPF_MOV64_IMM(BPF_REG_0, 1), 46 - BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), 47 - BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0), 48 - BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_2, BPF_REG_0, 1), 49 - BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_2, BPF_REG_0, 2), 50 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1), 51 - BPF_EXIT_INSN(), 52 - }, 53 - .result = REJECT, 54 - .errstr = "BPF_ATOMIC stores into R2 pkt is not allowed", 55 - .prog_type = BPF_PROG_TYPE_XDP, 56 - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 57 - }, 58 - { 59 - "xadd/w check whether src/dst got mangled, 1", 60 - .insns = { 61 - BPF_MOV64_IMM(BPF_REG_0, 1), 62 - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), 63 - BPF_MOV64_REG(BPF_REG_7, BPF_REG_10), 64 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), 65 - BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_10, BPF_REG_0, -8), 66 - BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_10, BPF_REG_0, -8), 67 - BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3), 68 - BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2), 69 - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), 70 - BPF_EXIT_INSN(), 71 - BPF_MOV64_IMM(BPF_REG_0, 42), 72 - BPF_EXIT_INSN(), 73 - }, 74 - .result = ACCEPT, 75 - .prog_type = BPF_PROG_TYPE_SCHED_CLS, 76 - .retval = 3, 77 - }, 78 - { 79 - "xadd/w check whether src/dst got mangled, 2", 80 - .insns = { 81 - BPF_MOV64_IMM(BPF_REG_0, 1), 82 - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), 83 - BPF_MOV64_REG(BPF_REG_7, BPF_REG_10), 84 - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8), 85 - BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_10, BPF_REG_0, -8), 86 - BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_10, BPF_REG_0, -8), 87 - BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3), 88 - BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2), 89 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8), 90 - BPF_EXIT_INSN(), 91 - BPF_MOV64_IMM(BPF_REG_0, 42), 92 - BPF_EXIT_INSN(), 93 - }, 94 - .result = ACCEPT, 95 - .prog_type = BPF_PROG_TYPE_SCHED_CLS, 96 - .retval = 3, 97 - },