Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: verifier/search_pruning converted to inline assembly

Test verifier/search_pruning automatically converted to use inline assembly.

Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20230421174234.2391278-19-eddyz87@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Eduard Zingerman and committed by
Alexei Starovoitov
034d9ad2 65222842

+341 -266
+2
tools/testing/selftests/bpf/prog_tests/verifier.c
··· 49 49 #include "verifier_regalloc.skel.h" 50 50 #include "verifier_ringbuf.skel.h" 51 51 #include "verifier_runtime_jit.skel.h" 52 + #include "verifier_search_pruning.skel.h" 52 53 #include "verifier_spill_fill.skel.h" 53 54 #include "verifier_stack_ptr.skel.h" 54 55 #include "verifier_uninit.skel.h" ··· 140 139 void test_verifier_regalloc(void) { RUN(verifier_regalloc); } 141 140 void test_verifier_ringbuf(void) { RUN(verifier_ringbuf); } 142 141 void test_verifier_runtime_jit(void) { RUN(verifier_runtime_jit); } 142 + void test_verifier_search_pruning(void) { RUN(verifier_search_pruning); } 143 143 void test_verifier_spill_fill(void) { RUN(verifier_spill_fill); } 144 144 void test_verifier_stack_ptr(void) { RUN(verifier_stack_ptr); } 145 145 void test_verifier_uninit(void) { RUN(verifier_uninit); }
+339
tools/testing/selftests/bpf/progs/verifier_search_pruning.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Converted from tools/testing/selftests/bpf/verifier/search_pruning.c */ 3 + 4 + #include <linux/bpf.h> 5 + #include <bpf/bpf_helpers.h> 6 + #include "bpf_misc.h" 7 + 8 + #define MAX_ENTRIES 11 9 + 10 + struct test_val { 11 + unsigned int index; 12 + int foo[MAX_ENTRIES]; 13 + }; 14 + 15 + struct { 16 + __uint(type, BPF_MAP_TYPE_HASH); 17 + __uint(max_entries, 1); 18 + __type(key, long long); 19 + __type(value, struct test_val); 20 + } map_hash_48b SEC(".maps"); 21 + 22 + struct { 23 + __uint(type, BPF_MAP_TYPE_HASH); 24 + __uint(max_entries, 1); 25 + __type(key, long long); 26 + __type(value, long long); 27 + } map_hash_8b SEC(".maps"); 28 + 29 + SEC("socket") 30 + __description("pointer/scalar confusion in state equality check (way 1)") 31 + __success __failure_unpriv __msg_unpriv("R0 leaks addr as return value") 32 + __retval(POINTER_VALUE) 33 + __naked void state_equality_check_way_1(void) 34 + { 35 + asm volatile (" \ 36 + r1 = 0; \ 37 + *(u64*)(r10 - 8) = r1; \ 38 + r2 = r10; \ 39 + r2 += -8; \ 40 + r1 = %[map_hash_8b] ll; \ 41 + call %[bpf_map_lookup_elem]; \ 42 + if r0 == 0 goto l0_%=; \ 43 + r0 = *(u64*)(r0 + 0); \ 44 + goto l1_%=; \ 45 + l0_%=: r0 = r10; \ 46 + l1_%=: goto l2_%=; \ 47 + l2_%=: exit; \ 48 + " : 49 + : __imm(bpf_map_lookup_elem), 50 + __imm_addr(map_hash_8b) 51 + : __clobber_all); 52 + } 53 + 54 + SEC("socket") 55 + __description("pointer/scalar confusion in state equality check (way 2)") 56 + __success __failure_unpriv __msg_unpriv("R0 leaks addr as return value") 57 + __retval(POINTER_VALUE) 58 + __naked void state_equality_check_way_2(void) 59 + { 60 + asm volatile (" \ 61 + r1 = 0; \ 62 + *(u64*)(r10 - 8) = r1; \ 63 + r2 = r10; \ 64 + r2 += -8; \ 65 + r1 = %[map_hash_8b] ll; \ 66 + call %[bpf_map_lookup_elem]; \ 67 + if r0 != 0 goto l0_%=; \ 68 + r0 = r10; \ 69 + goto l1_%=; \ 70 + l0_%=: r0 = *(u64*)(r0 + 0); \ 71 + l1_%=: exit; \ 72 + " : 73 + : __imm(bpf_map_lookup_elem), 74 + __imm_addr(map_hash_8b) 75 + : __clobber_all); 76 + } 77 + 78 + SEC("lwt_in") 79 + __description("liveness pruning and write screening") 80 + __failure __msg("R0 !read_ok") 81 + __naked void liveness_pruning_and_write_screening(void) 82 + { 83 + asm volatile (" \ 84 + /* Get an unknown value */ \ 85 + r2 = *(u32*)(r1 + 0); \ 86 + /* branch conditions teach us nothing about R2 */\ 87 + if r2 >= 0 goto l0_%=; \ 88 + r0 = 0; \ 89 + l0_%=: if r2 >= 0 goto l1_%=; \ 90 + r0 = 0; \ 91 + l1_%=: exit; \ 92 + " ::: __clobber_all); 93 + } 94 + 95 + SEC("socket") 96 + __description("varlen_map_value_access pruning") 97 + __failure __msg("R0 unbounded memory access") 98 + __failure_unpriv __msg_unpriv("R0 leaks addr") 99 + __flag(BPF_F_ANY_ALIGNMENT) 100 + __naked void varlen_map_value_access_pruning(void) 101 + { 102 + asm volatile (" \ 103 + r1 = 0; \ 104 + *(u64*)(r10 - 8) = r1; \ 105 + r2 = r10; \ 106 + r2 += -8; \ 107 + r1 = %[map_hash_48b] ll; \ 108 + call %[bpf_map_lookup_elem]; \ 109 + if r0 == 0 goto l0_%=; \ 110 + r1 = *(u64*)(r0 + 0); \ 111 + w2 = %[max_entries]; \ 112 + if r2 s> r1 goto l1_%=; \ 113 + w1 = 0; \ 114 + l1_%=: w1 <<= 2; \ 115 + r0 += r1; \ 116 + goto l2_%=; \ 117 + l2_%=: r1 = %[test_val_foo]; \ 118 + *(u64*)(r0 + 0) = r1; \ 119 + l0_%=: exit; \ 120 + " : 121 + : __imm(bpf_map_lookup_elem), 122 + __imm_addr(map_hash_48b), 123 + __imm_const(max_entries, MAX_ENTRIES), 124 + __imm_const(test_val_foo, offsetof(struct test_val, foo)) 125 + : __clobber_all); 126 + } 127 + 128 + SEC("tracepoint") 129 + __description("search pruning: all branches should be verified (nop operation)") 130 + __failure __msg("R6 invalid mem access 'scalar'") 131 + __naked void should_be_verified_nop_operation(void) 132 + { 133 + asm volatile (" \ 134 + r2 = r10; \ 135 + r2 += -8; \ 136 + r1 = 0; \ 137 + *(u64*)(r2 + 0) = r1; \ 138 + r1 = %[map_hash_8b] ll; \ 139 + call %[bpf_map_lookup_elem]; \ 140 + if r0 == 0 goto l0_%=; \ 141 + r3 = *(u64*)(r0 + 0); \ 142 + if r3 == 0xbeef goto l1_%=; \ 143 + r4 = 0; \ 144 + goto l2_%=; \ 145 + l1_%=: r4 = 1; \ 146 + l2_%=: *(u64*)(r10 - 16) = r4; \ 147 + call %[bpf_ktime_get_ns]; \ 148 + r5 = *(u64*)(r10 - 16); \ 149 + if r5 == 0 goto l0_%=; \ 150 + r6 = 0; \ 151 + r1 = 0xdead; \ 152 + *(u64*)(r6 + 0) = r1; \ 153 + l0_%=: exit; \ 154 + " : 155 + : __imm(bpf_ktime_get_ns), 156 + __imm(bpf_map_lookup_elem), 157 + __imm_addr(map_hash_8b) 158 + : __clobber_all); 159 + } 160 + 161 + SEC("socket") 162 + __description("search pruning: all branches should be verified (invalid stack access)") 163 + /* in privileged mode reads from uninitialized stack locations are permitted */ 164 + __success __failure_unpriv 165 + __msg_unpriv("invalid read from stack off -16+0 size 8") 166 + __retval(0) 167 + __naked void be_verified_invalid_stack_access(void) 168 + { 169 + asm volatile (" \ 170 + r2 = r10; \ 171 + r2 += -8; \ 172 + r1 = 0; \ 173 + *(u64*)(r2 + 0) = r1; \ 174 + r1 = %[map_hash_8b] ll; \ 175 + call %[bpf_map_lookup_elem]; \ 176 + if r0 == 0 goto l0_%=; \ 177 + r3 = *(u64*)(r0 + 0); \ 178 + r4 = 0; \ 179 + if r3 == 0xbeef goto l1_%=; \ 180 + *(u64*)(r10 - 16) = r4; \ 181 + goto l2_%=; \ 182 + l1_%=: *(u64*)(r10 - 24) = r4; \ 183 + l2_%=: call %[bpf_ktime_get_ns]; \ 184 + r5 = *(u64*)(r10 - 16); \ 185 + l0_%=: exit; \ 186 + " : 187 + : __imm(bpf_ktime_get_ns), 188 + __imm(bpf_map_lookup_elem), 189 + __imm_addr(map_hash_8b) 190 + : __clobber_all); 191 + } 192 + 193 + SEC("tracepoint") 194 + __description("precision tracking for u32 spill/fill") 195 + __failure __msg("R0 min value is outside of the allowed memory range") 196 + __naked void tracking_for_u32_spill_fill(void) 197 + { 198 + asm volatile (" \ 199 + r7 = r1; \ 200 + call %[bpf_get_prandom_u32]; \ 201 + w6 = 32; \ 202 + if r0 == 0 goto l0_%=; \ 203 + w6 = 4; \ 204 + l0_%=: /* Additional insns to introduce a pruning point. */\ 205 + call %[bpf_get_prandom_u32]; \ 206 + r3 = 0; \ 207 + r3 = 0; \ 208 + if r0 == 0 goto l1_%=; \ 209 + r3 = 0; \ 210 + l1_%=: /* u32 spill/fill */ \ 211 + *(u32*)(r10 - 8) = r6; \ 212 + r8 = *(u32*)(r10 - 8); \ 213 + /* out-of-bound map value access for r6=32 */ \ 214 + r1 = 0; \ 215 + *(u64*)(r10 - 16) = r1; \ 216 + r2 = r10; \ 217 + r2 += -16; \ 218 + r1 = %[map_hash_8b] ll; \ 219 + call %[bpf_map_lookup_elem]; \ 220 + if r0 == 0 goto l2_%=; \ 221 + r0 += r8; \ 222 + r1 = *(u32*)(r0 + 0); \ 223 + l2_%=: r0 = 0; \ 224 + exit; \ 225 + " : 226 + : __imm(bpf_get_prandom_u32), 227 + __imm(bpf_map_lookup_elem), 228 + __imm_addr(map_hash_8b) 229 + : __clobber_all); 230 + } 231 + 232 + SEC("tracepoint") 233 + __description("precision tracking for u32 spills, u64 fill") 234 + __failure __msg("div by zero") 235 + __naked void for_u32_spills_u64_fill(void) 236 + { 237 + asm volatile (" \ 238 + call %[bpf_get_prandom_u32]; \ 239 + r6 = r0; \ 240 + w7 = 0xffffffff; \ 241 + /* Additional insns to introduce a pruning point. */\ 242 + r3 = 1; \ 243 + r3 = 1; \ 244 + r3 = 1; \ 245 + r3 = 1; \ 246 + call %[bpf_get_prandom_u32]; \ 247 + if r0 == 0 goto l0_%=; \ 248 + r3 = 1; \ 249 + l0_%=: w3 /= 0; \ 250 + /* u32 spills, u64 fill */ \ 251 + *(u32*)(r10 - 4) = r6; \ 252 + *(u32*)(r10 - 8) = r7; \ 253 + r8 = *(u64*)(r10 - 8); \ 254 + /* if r8 != X goto pc+1 r8 known in fallthrough branch */\ 255 + if r8 != 0xffffffff goto l1_%=; \ 256 + r3 = 1; \ 257 + l1_%=: /* if r8 == X goto pc+1 condition always true on first\ 258 + * traversal, so starts backtracking to mark r8 as requiring\ 259 + * precision. r7 marked as needing precision. r6 not marked\ 260 + * since it's not tracked. \ 261 + */ \ 262 + if r8 == 0xffffffff goto l2_%=; \ 263 + /* fails if r8 correctly marked unknown after fill. */\ 264 + w3 /= 0; \ 265 + l2_%=: r0 = 0; \ 266 + exit; \ 267 + " : 268 + : __imm(bpf_get_prandom_u32) 269 + : __clobber_all); 270 + } 271 + 272 + SEC("socket") 273 + __description("allocated_stack") 274 + __success __msg("processed 15 insns") 275 + __success_unpriv __msg_unpriv("") __log_level(1) __retval(0) 276 + __naked void allocated_stack(void) 277 + { 278 + asm volatile (" \ 279 + r6 = r1; \ 280 + call %[bpf_get_prandom_u32]; \ 281 + r7 = r0; \ 282 + if r0 == 0 goto l0_%=; \ 283 + r0 = 0; \ 284 + *(u64*)(r10 - 8) = r6; \ 285 + r6 = *(u64*)(r10 - 8); \ 286 + *(u8*)(r10 - 9) = r7; \ 287 + r7 = *(u8*)(r10 - 9); \ 288 + l0_%=: if r0 != 0 goto l1_%=; \ 289 + l1_%=: if r0 != 0 goto l2_%=; \ 290 + l2_%=: if r0 != 0 goto l3_%=; \ 291 + l3_%=: if r0 != 0 goto l4_%=; \ 292 + l4_%=: exit; \ 293 + " : 294 + : __imm(bpf_get_prandom_u32) 295 + : __clobber_all); 296 + } 297 + 298 + /* The test performs a conditional 64-bit write to a stack location 299 + * fp[-8], this is followed by an unconditional 8-bit write to fp[-8], 300 + * then data is read from fp[-8]. This sequence is unsafe. 301 + * 302 + * The test would be mistakenly marked as safe w/o dst register parent 303 + * preservation in verifier.c:copy_register_state() function. 304 + * 305 + * Note the usage of BPF_F_TEST_STATE_FREQ to force creation of the 306 + * checkpoint state after conditional 64-bit assignment. 307 + */ 308 + 309 + SEC("socket") 310 + __description("write tracking and register parent chain bug") 311 + /* in privileged mode reads from uninitialized stack locations are permitted */ 312 + __success __failure_unpriv 313 + __msg_unpriv("invalid read from stack off -8+1 size 8") 314 + __retval(0) __flag(BPF_F_TEST_STATE_FREQ) 315 + __naked void and_register_parent_chain_bug(void) 316 + { 317 + asm volatile (" \ 318 + /* r6 = ktime_get_ns() */ \ 319 + call %[bpf_ktime_get_ns]; \ 320 + r6 = r0; \ 321 + /* r0 = ktime_get_ns() */ \ 322 + call %[bpf_ktime_get_ns]; \ 323 + /* if r0 > r6 goto +1 */ \ 324 + if r0 > r6 goto l0_%=; \ 325 + /* *(u64 *)(r10 - 8) = 0xdeadbeef */ \ 326 + r0 = 0xdeadbeef; \ 327 + *(u64*)(r10 - 8) = r0; \ 328 + l0_%=: r1 = 42; \ 329 + *(u8*)(r10 - 8) = r1; \ 330 + r2 = *(u64*)(r10 - 8); \ 331 + /* exit(0) */ \ 332 + r0 = 0; \ 333 + exit; \ 334 + " : 335 + : __imm(bpf_ktime_get_ns) 336 + : __clobber_all); 337 + } 338 + 339 + char _license[] SEC("license") = "GPL";
-266
tools/testing/selftests/bpf/verifier/search_pruning.c
··· 1 - { 2 - "pointer/scalar confusion in state equality check (way 1)", 3 - .insns = { 4 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 5 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 6 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 7 - BPF_LD_MAP_FD(BPF_REG_1, 0), 8 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 9 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), 10 - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), 11 - BPF_JMP_A(1), 12 - BPF_MOV64_REG(BPF_REG_0, BPF_REG_10), 13 - BPF_JMP_A(0), 14 - BPF_EXIT_INSN(), 15 - }, 16 - .fixup_map_hash_8b = { 3 }, 17 - .result = ACCEPT, 18 - .retval = POINTER_VALUE, 19 - .result_unpriv = REJECT, 20 - .errstr_unpriv = "R0 leaks addr as return value" 21 - }, 22 - { 23 - "pointer/scalar confusion in state equality check (way 2)", 24 - .insns = { 25 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 26 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 27 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 28 - BPF_LD_MAP_FD(BPF_REG_1, 0), 29 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 30 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), 31 - BPF_MOV64_REG(BPF_REG_0, BPF_REG_10), 32 - BPF_JMP_A(1), 33 - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), 34 - BPF_EXIT_INSN(), 35 - }, 36 - .fixup_map_hash_8b = { 3 }, 37 - .result = ACCEPT, 38 - .retval = POINTER_VALUE, 39 - .result_unpriv = REJECT, 40 - .errstr_unpriv = "R0 leaks addr as return value" 41 - }, 42 - { 43 - "liveness pruning and write screening", 44 - .insns = { 45 - /* Get an unknown value */ 46 - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), 47 - /* branch conditions teach us nothing about R2 */ 48 - BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1), 49 - BPF_MOV64_IMM(BPF_REG_0, 0), 50 - BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1), 51 - BPF_MOV64_IMM(BPF_REG_0, 0), 52 - BPF_EXIT_INSN(), 53 - }, 54 - .errstr = "R0 !read_ok", 55 - .result = REJECT, 56 - .prog_type = BPF_PROG_TYPE_LWT_IN, 57 - }, 58 - { 59 - "varlen_map_value_access pruning", 60 - .insns = { 61 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 62 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 63 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 64 - BPF_LD_MAP_FD(BPF_REG_1, 0), 65 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 66 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), 67 - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), 68 - BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES), 69 - BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1), 70 - BPF_MOV32_IMM(BPF_REG_1, 0), 71 - BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2), 72 - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 73 - BPF_JMP_IMM(BPF_JA, 0, 0, 0), 74 - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), 75 - BPF_EXIT_INSN(), 76 - }, 77 - .fixup_map_hash_48b = { 3 }, 78 - .errstr_unpriv = "R0 leaks addr", 79 - .errstr = "R0 unbounded memory access", 80 - .result_unpriv = REJECT, 81 - .result = REJECT, 82 - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 83 - }, 84 - { 85 - "search pruning: all branches should be verified (nop operation)", 86 - .insns = { 87 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 88 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 89 - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 90 - BPF_LD_MAP_FD(BPF_REG_1, 0), 91 - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 92 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), 93 - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0), 94 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2), 95 - BPF_MOV64_IMM(BPF_REG_4, 0), 96 - BPF_JMP_A(1), 97 - BPF_MOV64_IMM(BPF_REG_4, 1), 98 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16), 99 - BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), 100 - BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16), 101 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2), 102 - BPF_MOV64_IMM(BPF_REG_6, 0), 103 - BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead), 104 - BPF_EXIT_INSN(), 105 - }, 106 - .fixup_map_hash_8b = { 3 }, 107 - .errstr = "R6 invalid mem access 'scalar'", 108 - .result = REJECT, 109 - .prog_type = BPF_PROG_TYPE_TRACEPOINT, 110 - }, 111 - { 112 - "search pruning: all branches should be verified (invalid stack access)", 113 - .insns = { 114 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 115 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 116 - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 117 - BPF_LD_MAP_FD(BPF_REG_1, 0), 118 - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 119 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), 120 - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0), 121 - BPF_MOV64_IMM(BPF_REG_4, 0), 122 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2), 123 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16), 124 - BPF_JMP_A(1), 125 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24), 126 - BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), 127 - BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16), 128 - BPF_EXIT_INSN(), 129 - }, 130 - .fixup_map_hash_8b = { 3 }, 131 - .errstr_unpriv = "invalid read from stack off -16+0 size 8", 132 - .result_unpriv = REJECT, 133 - /* in privileged mode reads from uninitialized stack locations are permitted */ 134 - .result = ACCEPT, 135 - }, 136 - { 137 - "precision tracking for u32 spill/fill", 138 - .insns = { 139 - BPF_MOV64_REG(BPF_REG_7, BPF_REG_1), 140 - BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), 141 - BPF_MOV32_IMM(BPF_REG_6, 32), 142 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 143 - BPF_MOV32_IMM(BPF_REG_6, 4), 144 - /* Additional insns to introduce a pruning point. */ 145 - BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), 146 - BPF_MOV64_IMM(BPF_REG_3, 0), 147 - BPF_MOV64_IMM(BPF_REG_3, 0), 148 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 149 - BPF_MOV64_IMM(BPF_REG_3, 0), 150 - /* u32 spill/fill */ 151 - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_6, -8), 152 - BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_10, -8), 153 - /* out-of-bound map value access for r6=32 */ 154 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0), 155 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 156 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 157 - BPF_LD_MAP_FD(BPF_REG_1, 0), 158 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 159 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), 160 - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8), 161 - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), 162 - BPF_MOV64_IMM(BPF_REG_0, 0), 163 - BPF_EXIT_INSN(), 164 - }, 165 - .fixup_map_hash_8b = { 15 }, 166 - .result = REJECT, 167 - .errstr = "R0 min value is outside of the allowed memory range", 168 - .prog_type = BPF_PROG_TYPE_TRACEPOINT, 169 - }, 170 - { 171 - "precision tracking for u32 spills, u64 fill", 172 - .insns = { 173 - BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), 174 - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), 175 - BPF_MOV32_IMM(BPF_REG_7, 0xffffffff), 176 - /* Additional insns to introduce a pruning point. */ 177 - BPF_MOV64_IMM(BPF_REG_3, 1), 178 - BPF_MOV64_IMM(BPF_REG_3, 1), 179 - BPF_MOV64_IMM(BPF_REG_3, 1), 180 - BPF_MOV64_IMM(BPF_REG_3, 1), 181 - BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), 182 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 183 - BPF_MOV64_IMM(BPF_REG_3, 1), 184 - BPF_ALU32_IMM(BPF_DIV, BPF_REG_3, 0), 185 - /* u32 spills, u64 fill */ 186 - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_6, -4), 187 - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, -8), 188 - BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_10, -8), 189 - /* if r8 != X goto pc+1 r8 known in fallthrough branch */ 190 - BPF_JMP_IMM(BPF_JNE, BPF_REG_8, 0xffffffff, 1), 191 - BPF_MOV64_IMM(BPF_REG_3, 1), 192 - /* if r8 == X goto pc+1 condition always true on first 193 - * traversal, so starts backtracking to mark r8 as requiring 194 - * precision. r7 marked as needing precision. r6 not marked 195 - * since it's not tracked. 196 - */ 197 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 0xffffffff, 1), 198 - /* fails if r8 correctly marked unknown after fill. */ 199 - BPF_ALU32_IMM(BPF_DIV, BPF_REG_3, 0), 200 - BPF_MOV64_IMM(BPF_REG_0, 0), 201 - BPF_EXIT_INSN(), 202 - }, 203 - .result = REJECT, 204 - .errstr = "div by zero", 205 - .prog_type = BPF_PROG_TYPE_TRACEPOINT, 206 - }, 207 - { 208 - "allocated_stack", 209 - .insns = { 210 - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1), 211 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), 212 - BPF_ALU64_REG(BPF_MOV, BPF_REG_7, BPF_REG_0), 213 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), 214 - BPF_MOV64_IMM(BPF_REG_0, 0), 215 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8), 216 - BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, -8), 217 - BPF_STX_MEM(BPF_B, BPF_REG_10, BPF_REG_7, -9), 218 - BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_10, -9), 219 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0), 220 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0), 221 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0), 222 - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0), 223 - BPF_EXIT_INSN(), 224 - }, 225 - .result = ACCEPT, 226 - .result_unpriv = ACCEPT, 227 - .insn_processed = 15, 228 - }, 229 - /* The test performs a conditional 64-bit write to a stack location 230 - * fp[-8], this is followed by an unconditional 8-bit write to fp[-8], 231 - * then data is read from fp[-8]. This sequence is unsafe. 232 - * 233 - * The test would be mistakenly marked as safe w/o dst register parent 234 - * preservation in verifier.c:copy_register_state() function. 235 - * 236 - * Note the usage of BPF_F_TEST_STATE_FREQ to force creation of the 237 - * checkpoint state after conditional 64-bit assignment. 238 - */ 239 - { 240 - "write tracking and register parent chain bug", 241 - .insns = { 242 - /* r6 = ktime_get_ns() */ 243 - BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), 244 - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), 245 - /* r0 = ktime_get_ns() */ 246 - BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), 247 - /* if r0 > r6 goto +1 */ 248 - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_6, 1), 249 - /* *(u64 *)(r10 - 8) = 0xdeadbeef */ 250 - BPF_ST_MEM(BPF_DW, BPF_REG_FP, -8, 0xdeadbeef), 251 - /* r1 = 42 */ 252 - BPF_MOV64_IMM(BPF_REG_1, 42), 253 - /* *(u8 *)(r10 - 8) = r1 */ 254 - BPF_STX_MEM(BPF_B, BPF_REG_FP, BPF_REG_1, -8), 255 - /* r2 = *(u64 *)(r10 - 8) */ 256 - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_FP, -8), 257 - /* exit(0) */ 258 - BPF_MOV64_IMM(BPF_REG_0, 0), 259 - BPF_EXIT_INSN(), 260 - }, 261 - .flags = BPF_F_TEST_STATE_FREQ, 262 - .errstr_unpriv = "invalid read from stack off -8+1 size 8", 263 - .result_unpriv = REJECT, 264 - /* in privileged mode reads from uninitialized stack locations are permitted */ 265 - .result = ACCEPT, 266 - },