Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: verifier/jeq_infer_not_null converted to inline assembly

Test verifier/jeq_infer_not_null automatically converted to use inline assembly.

Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20230421174234.2391278-9-eddyz87@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Eduard Zingerman and committed by
Alexei Starovoitov
a5828e31 0a372c9c

+215 -174
+2
tools/testing/selftests/bpf/prog_tests/verifier.c
··· 29 29 #include "verifier_helper_restricted.skel.h" 30 30 #include "verifier_helper_value_access.skel.h" 31 31 #include "verifier_int_ptr.skel.h" 32 + #include "verifier_jeq_infer_not_null.skel.h" 32 33 #include "verifier_ld_ind.skel.h" 33 34 #include "verifier_leak_ptr.skel.h" 34 35 #include "verifier_map_ptr.skel.h" ··· 112 111 void test_verifier_helper_restricted(void) { RUN(verifier_helper_restricted); } 113 112 void test_verifier_helper_value_access(void) { RUN(verifier_helper_value_access); } 114 113 void test_verifier_int_ptr(void) { RUN(verifier_int_ptr); } 114 + void test_verifier_jeq_infer_not_null(void) { RUN(verifier_jeq_infer_not_null); } 115 115 void test_verifier_ld_ind(void) { RUN(verifier_ld_ind); } 116 116 void test_verifier_leak_ptr(void) { RUN(verifier_leak_ptr); } 117 117 void test_verifier_map_ptr(void) { RUN(verifier_map_ptr); }
+213
tools/testing/selftests/bpf/progs/verifier_jeq_infer_not_null.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Converted from tools/testing/selftests/bpf/verifier/jeq_infer_not_null.c */ 3 + 4 + #include <linux/bpf.h> 5 + #include <bpf/bpf_helpers.h> 6 + #include "bpf_misc.h" 7 + 8 + struct { 9 + __uint(type, BPF_MAP_TYPE_XSKMAP); 10 + __uint(max_entries, 1); 11 + __type(key, int); 12 + __type(value, int); 13 + } map_xskmap SEC(".maps"); 14 + 15 + /* This is equivalent to the following program: 16 + * 17 + * r6 = skb->sk; 18 + * r7 = sk_fullsock(r6); 19 + * r0 = sk_fullsock(r6); 20 + * if (r0 == 0) return 0; (a) 21 + * if (r0 != r7) return 0; (b) 22 + * *r7->type; (c) 23 + * return 0; 24 + * 25 + * It is safe to dereference r7 at point (c), because of (a) and (b). 26 + * The test verifies that relation r0 == r7 is propagated from (b) to (c). 27 + */ 28 + SEC("cgroup/skb") 29 + __description("jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL -> PTR_TO_SOCKET for JNE false branch") 30 + __success __failure_unpriv __msg_unpriv("R7 pointer comparison") 31 + __retval(0) 32 + __naked void socket_for_jne_false_branch(void) 33 + { 34 + asm volatile (" \ 35 + /* r6 = skb->sk; */ \ 36 + r6 = *(u64*)(r1 + %[__sk_buff_sk]); \ 37 + /* if (r6 == 0) return 0; */ \ 38 + if r6 == 0 goto l0_%=; \ 39 + /* r7 = sk_fullsock(skb); */ \ 40 + r1 = r6; \ 41 + call %[bpf_sk_fullsock]; \ 42 + r7 = r0; \ 43 + /* r0 = sk_fullsock(skb); */ \ 44 + r1 = r6; \ 45 + call %[bpf_sk_fullsock]; \ 46 + /* if (r0 == null) return 0; */ \ 47 + if r0 == 0 goto l0_%=; \ 48 + /* if (r0 == r7) r0 = *(r7->type); */ \ 49 + if r0 != r7 goto l0_%=; /* Use ! JNE ! */\ 50 + r0 = *(u32*)(r7 + %[bpf_sock_type]); \ 51 + l0_%=: /* return 0 */ \ 52 + r0 = 0; \ 53 + exit; \ 54 + " : 55 + : __imm(bpf_sk_fullsock), 56 + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), 57 + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) 58 + : __clobber_all); 59 + } 60 + 61 + /* Same as above, but verify that another branch of JNE still 62 + * prohibits access to PTR_MAYBE_NULL. 63 + */ 64 + SEC("cgroup/skb") 65 + __description("jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL unchanged for JNE true branch") 66 + __failure __msg("R7 invalid mem access 'sock_or_null'") 67 + __failure_unpriv __msg_unpriv("R7 pointer comparison") 68 + __naked void unchanged_for_jne_true_branch(void) 69 + { 70 + asm volatile (" \ 71 + /* r6 = skb->sk */ \ 72 + r6 = *(u64*)(r1 + %[__sk_buff_sk]); \ 73 + /* if (r6 == 0) return 0; */ \ 74 + if r6 == 0 goto l0_%=; \ 75 + /* r7 = sk_fullsock(skb); */ \ 76 + r1 = r6; \ 77 + call %[bpf_sk_fullsock]; \ 78 + r7 = r0; \ 79 + /* r0 = sk_fullsock(skb); */ \ 80 + r1 = r6; \ 81 + call %[bpf_sk_fullsock]; \ 82 + /* if (r0 == null) return 0; */ \ 83 + if r0 != 0 goto l0_%=; \ 84 + /* if (r0 == r7) return 0; */ \ 85 + if r0 != r7 goto l1_%=; /* Use ! JNE ! */\ 86 + goto l0_%=; \ 87 + l1_%=: /* r0 = *(r7->type); */ \ 88 + r0 = *(u32*)(r7 + %[bpf_sock_type]); \ 89 + l0_%=: /* return 0 */ \ 90 + r0 = 0; \ 91 + exit; \ 92 + " : 93 + : __imm(bpf_sk_fullsock), 94 + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), 95 + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) 96 + : __clobber_all); 97 + } 98 + 99 + /* Same as a first test, but not null should be inferred for JEQ branch */ 100 + SEC("cgroup/skb") 101 + __description("jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL -> PTR_TO_SOCKET for JEQ true branch") 102 + __success __failure_unpriv __msg_unpriv("R7 pointer comparison") 103 + __retval(0) 104 + __naked void socket_for_jeq_true_branch(void) 105 + { 106 + asm volatile (" \ 107 + /* r6 = skb->sk; */ \ 108 + r6 = *(u64*)(r1 + %[__sk_buff_sk]); \ 109 + /* if (r6 == null) return 0; */ \ 110 + if r6 == 0 goto l0_%=; \ 111 + /* r7 = sk_fullsock(skb); */ \ 112 + r1 = r6; \ 113 + call %[bpf_sk_fullsock]; \ 114 + r7 = r0; \ 115 + /* r0 = sk_fullsock(skb); */ \ 116 + r1 = r6; \ 117 + call %[bpf_sk_fullsock]; \ 118 + /* if (r0 == null) return 0; */ \ 119 + if r0 == 0 goto l0_%=; \ 120 + /* if (r0 != r7) return 0; */ \ 121 + if r0 == r7 goto l1_%=; /* Use ! JEQ ! */\ 122 + goto l0_%=; \ 123 + l1_%=: /* r0 = *(r7->type); */ \ 124 + r0 = *(u32*)(r7 + %[bpf_sock_type]); \ 125 + l0_%=: /* return 0; */ \ 126 + r0 = 0; \ 127 + exit; \ 128 + " : 129 + : __imm(bpf_sk_fullsock), 130 + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), 131 + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) 132 + : __clobber_all); 133 + } 134 + 135 + /* Same as above, but verify that another branch of JNE still 136 + * prohibits access to PTR_MAYBE_NULL. 137 + */ 138 + SEC("cgroup/skb") 139 + __description("jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL unchanged for JEQ false branch") 140 + __failure __msg("R7 invalid mem access 'sock_or_null'") 141 + __failure_unpriv __msg_unpriv("R7 pointer comparison") 142 + __naked void unchanged_for_jeq_false_branch(void) 143 + { 144 + asm volatile (" \ 145 + /* r6 = skb->sk; */ \ 146 + r6 = *(u64*)(r1 + %[__sk_buff_sk]); \ 147 + /* if (r6 == null) return 0; */ \ 148 + if r6 == 0 goto l0_%=; \ 149 + /* r7 = sk_fullsock(skb); */ \ 150 + r1 = r6; \ 151 + call %[bpf_sk_fullsock]; \ 152 + r7 = r0; \ 153 + /* r0 = sk_fullsock(skb); */ \ 154 + r1 = r6; \ 155 + call %[bpf_sk_fullsock]; \ 156 + /* if (r0 == null) return 0; */ \ 157 + if r0 == 0 goto l0_%=; \ 158 + /* if (r0 != r7) r0 = *(r7->type); */ \ 159 + if r0 == r7 goto l0_%=; /* Use ! JEQ ! */\ 160 + r0 = *(u32*)(r7 + %[bpf_sock_type]); \ 161 + l0_%=: /* return 0; */ \ 162 + r0 = 0; \ 163 + exit; \ 164 + " : 165 + : __imm(bpf_sk_fullsock), 166 + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), 167 + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) 168 + : __clobber_all); 169 + } 170 + 171 + /* Maps are treated in a different branch of `mark_ptr_not_null_reg`, 172 + * so separate test for maps case. 173 + */ 174 + SEC("xdp") 175 + __description("jne/jeq infer not null, PTR_TO_MAP_VALUE_OR_NULL -> PTR_TO_MAP_VALUE") 176 + __success __retval(0) 177 + __naked void null_ptr_to_map_value(void) 178 + { 179 + asm volatile (" \ 180 + /* r9 = &some stack to use as key */ \ 181 + r1 = 0; \ 182 + *(u32*)(r10 - 8) = r1; \ 183 + r9 = r10; \ 184 + r9 += -8; \ 185 + /* r8 = process local map */ \ 186 + r8 = %[map_xskmap] ll; \ 187 + /* r6 = map_lookup_elem(r8, r9); */ \ 188 + r1 = r8; \ 189 + r2 = r9; \ 190 + call %[bpf_map_lookup_elem]; \ 191 + r6 = r0; \ 192 + /* r7 = map_lookup_elem(r8, r9); */ \ 193 + r1 = r8; \ 194 + r2 = r9; \ 195 + call %[bpf_map_lookup_elem]; \ 196 + r7 = r0; \ 197 + /* if (r6 == 0) return 0; */ \ 198 + if r6 == 0 goto l0_%=; \ 199 + /* if (r6 != r7) return 0; */ \ 200 + if r6 != r7 goto l0_%=; \ 201 + /* read *r7; */ \ 202 + r0 = *(u32*)(r7 + %[bpf_xdp_sock_queue_id]); \ 203 + l0_%=: /* return 0; */ \ 204 + r0 = 0; \ 205 + exit; \ 206 + " : 207 + : __imm(bpf_map_lookup_elem), 208 + __imm_addr(map_xskmap), 209 + __imm_const(bpf_xdp_sock_queue_id, offsetof(struct bpf_xdp_sock, queue_id)) 210 + : __clobber_all); 211 + } 212 + 213 + char _license[] SEC("license") = "GPL";
-174
tools/testing/selftests/bpf/verifier/jeq_infer_not_null.c
··· 1 - { 2 - /* This is equivalent to the following program: 3 - * 4 - * r6 = skb->sk; 5 - * r7 = sk_fullsock(r6); 6 - * r0 = sk_fullsock(r6); 7 - * if (r0 == 0) return 0; (a) 8 - * if (r0 != r7) return 0; (b) 9 - * *r7->type; (c) 10 - * return 0; 11 - * 12 - * It is safe to dereference r7 at point (c), because of (a) and (b). 13 - * The test verifies that relation r0 == r7 is propagated from (b) to (c). 14 - */ 15 - "jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL -> PTR_TO_SOCKET for JNE false branch", 16 - .insns = { 17 - /* r6 = skb->sk; */ 18 - BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct __sk_buff, sk)), 19 - /* if (r6 == 0) return 0; */ 20 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 8), 21 - /* r7 = sk_fullsock(skb); */ 22 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 23 - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), 24 - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), 25 - /* r0 = sk_fullsock(skb); */ 26 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 27 - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), 28 - /* if (r0 == null) return 0; */ 29 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), 30 - /* if (r0 == r7) r0 = *(r7->type); */ 31 - BPF_JMP_REG(BPF_JNE, BPF_REG_0, BPF_REG_7, 1), /* Use ! JNE ! */ 32 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_sock, type)), 33 - /* return 0 */ 34 - BPF_MOV64_IMM(BPF_REG_0, 0), 35 - BPF_EXIT_INSN(), 36 - }, 37 - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, 38 - .result = ACCEPT, 39 - .result_unpriv = REJECT, 40 - .errstr_unpriv = "R7 pointer comparison", 41 - }, 42 - { 43 - /* Same as above, but verify that another branch of JNE still 44 - * prohibits access to PTR_MAYBE_NULL. 45 - */ 46 - "jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL unchanged for JNE true branch", 47 - .insns = { 48 - /* r6 = skb->sk */ 49 - BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct __sk_buff, sk)), 50 - /* if (r6 == 0) return 0; */ 51 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 9), 52 - /* r7 = sk_fullsock(skb); */ 53 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 54 - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), 55 - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), 56 - /* r0 = sk_fullsock(skb); */ 57 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 58 - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), 59 - /* if (r0 == null) return 0; */ 60 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3), 61 - /* if (r0 == r7) return 0; */ 62 - BPF_JMP_REG(BPF_JNE, BPF_REG_0, BPF_REG_7, 1), /* Use ! JNE ! */ 63 - BPF_JMP_IMM(BPF_JA, 0, 0, 1), 64 - /* r0 = *(r7->type); */ 65 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_sock, type)), 66 - /* return 0 */ 67 - BPF_MOV64_IMM(BPF_REG_0, 0), 68 - BPF_EXIT_INSN(), 69 - }, 70 - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, 71 - .result = REJECT, 72 - .errstr = "R7 invalid mem access 'sock_or_null'", 73 - .result_unpriv = REJECT, 74 - .errstr_unpriv = "R7 pointer comparison", 75 - }, 76 - { 77 - /* Same as a first test, but not null should be inferred for JEQ branch */ 78 - "jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL -> PTR_TO_SOCKET for JEQ true branch", 79 - .insns = { 80 - /* r6 = skb->sk; */ 81 - BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct __sk_buff, sk)), 82 - /* if (r6 == null) return 0; */ 83 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 9), 84 - /* r7 = sk_fullsock(skb); */ 85 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 86 - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), 87 - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), 88 - /* r0 = sk_fullsock(skb); */ 89 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 90 - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), 91 - /* if (r0 == null) return 0; */ 92 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), 93 - /* if (r0 != r7) return 0; */ 94 - BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_7, 1), /* Use ! JEQ ! */ 95 - BPF_JMP_IMM(BPF_JA, 0, 0, 1), 96 - /* r0 = *(r7->type); */ 97 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_sock, type)), 98 - /* return 0; */ 99 - BPF_MOV64_IMM(BPF_REG_0, 0), 100 - BPF_EXIT_INSN(), 101 - }, 102 - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, 103 - .result = ACCEPT, 104 - .result_unpriv = REJECT, 105 - .errstr_unpriv = "R7 pointer comparison", 106 - }, 107 - { 108 - /* Same as above, but verify that another branch of JNE still 109 - * prohibits access to PTR_MAYBE_NULL. 110 - */ 111 - "jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL unchanged for JEQ false branch", 112 - .insns = { 113 - /* r6 = skb->sk; */ 114 - BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct __sk_buff, sk)), 115 - /* if (r6 == null) return 0; */ 116 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 8), 117 - /* r7 = sk_fullsock(skb); */ 118 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 119 - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), 120 - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), 121 - /* r0 = sk_fullsock(skb); */ 122 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 123 - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), 124 - /* if (r0 == null) return 0; */ 125 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), 126 - /* if (r0 != r7) r0 = *(r7->type); */ 127 - BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_7, 1), /* Use ! JEQ ! */ 128 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_sock, type)), 129 - /* return 0; */ 130 - BPF_MOV64_IMM(BPF_REG_0, 0), 131 - BPF_EXIT_INSN(), 132 - }, 133 - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, 134 - .result = REJECT, 135 - .errstr = "R7 invalid mem access 'sock_or_null'", 136 - .result_unpriv = REJECT, 137 - .errstr_unpriv = "R7 pointer comparison", 138 - }, 139 - { 140 - /* Maps are treated in a different branch of `mark_ptr_not_null_reg`, 141 - * so separate test for maps case. 142 - */ 143 - "jne/jeq infer not null, PTR_TO_MAP_VALUE_OR_NULL -> PTR_TO_MAP_VALUE", 144 - .insns = { 145 - /* r9 = &some stack to use as key */ 146 - BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0), 147 - BPF_MOV64_REG(BPF_REG_9, BPF_REG_10), 148 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_9, -8), 149 - /* r8 = process local map */ 150 - BPF_LD_MAP_FD(BPF_REG_8, 0), 151 - /* r6 = map_lookup_elem(r8, r9); */ 152 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), 153 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_9), 154 - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 155 - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), 156 - /* r7 = map_lookup_elem(r8, r9); */ 157 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), 158 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_9), 159 - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 160 - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), 161 - /* if (r6 == 0) return 0; */ 162 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 2), 163 - /* if (r6 != r7) return 0; */ 164 - BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_7, 1), 165 - /* read *r7; */ 166 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_xdp_sock, queue_id)), 167 - /* return 0; */ 168 - BPF_MOV64_IMM(BPF_REG_0, 0), 169 - BPF_EXIT_INSN(), 170 - }, 171 - .fixup_map_xskmap = { 3 }, 172 - .prog_type = BPF_PROG_TYPE_XDP, 173 - .result = ACCEPT, 174 - },