Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: verifier/spill_fill.c converted to inline assembly

Test verifier/spill_fill.c automatically converted to use inline assembly.

Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20230325025524.144043-35-eddyz87@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Eduard Zingerman and committed by
Alexei Starovoitov
f4fe3cfe b7e42030

+376 -345
+2
tools/testing/selftests/bpf/prog_tests/verifier.c
··· 31 31 #include "verifier_raw_stack.skel.h" 32 32 #include "verifier_raw_tp_writable.skel.h" 33 33 #include "verifier_ringbuf.skel.h" 34 + #include "verifier_spill_fill.skel.h" 34 35 35 36 __maybe_unused 36 37 static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) ··· 85 84 void test_verifier_raw_stack(void) { RUN(verifier_raw_stack); } 86 85 void test_verifier_raw_tp_writable(void) { RUN(verifier_raw_tp_writable); } 87 86 void test_verifier_ringbuf(void) { RUN(verifier_ringbuf); } 87 + void test_verifier_spill_fill(void) { RUN(verifier_spill_fill); }
+374
tools/testing/selftests/bpf/progs/verifier_spill_fill.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Converted from tools/testing/selftests/bpf/verifier/spill_fill.c */ 3 + 4 + #include <linux/bpf.h> 5 + #include <bpf/bpf_helpers.h> 6 + #include "bpf_misc.h" 7 + 8 + struct { 9 + __uint(type, BPF_MAP_TYPE_RINGBUF); 10 + __uint(max_entries, 4096); 11 + } map_ringbuf SEC(".maps"); 12 + 13 + SEC("socket") 14 + __description("check valid spill/fill") 15 + __success __failure_unpriv __msg_unpriv("R0 leaks addr") 16 + __retval(POINTER_VALUE) 17 + __naked void check_valid_spill_fill(void) 18 + { 19 + asm volatile (" \ 20 + /* spill R1(ctx) into stack */ \ 21 + *(u64*)(r10 - 8) = r1; \ 22 + /* fill it back into R2 */ \ 23 + r2 = *(u64*)(r10 - 8); \ 24 + /* should be able to access R0 = *(R2 + 8) */ \ 25 + /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */\ 26 + r0 = r2; \ 27 + exit; \ 28 + " ::: __clobber_all); 29 + } 30 + 31 + SEC("socket") 32 + __description("check valid spill/fill, skb mark") 33 + __success __success_unpriv __retval(0) 34 + __naked void valid_spill_fill_skb_mark(void) 35 + { 36 + asm volatile (" \ 37 + r6 = r1; \ 38 + *(u64*)(r10 - 8) = r6; \ 39 + r0 = *(u64*)(r10 - 8); \ 40 + r0 = *(u32*)(r0 + %[__sk_buff_mark]); \ 41 + exit; \ 42 + " : 43 + : __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) 44 + : __clobber_all); 45 + } 46 + 47 + SEC("socket") 48 + __description("check valid spill/fill, ptr to mem") 49 + __success __success_unpriv __retval(0) 50 + __naked void spill_fill_ptr_to_mem(void) 51 + { 52 + asm volatile (" \ 53 + /* reserve 8 byte ringbuf memory */ \ 54 + r1 = 0; \ 55 + *(u64*)(r10 - 8) = r1; \ 56 + r1 = %[map_ringbuf] ll; \ 57 + r2 = 8; \ 58 + r3 = 0; \ 59 + call %[bpf_ringbuf_reserve]; \ 60 + /* store a pointer to the reserved memory in R6 */\ 61 + r6 = r0; \ 62 + /* check whether the reservation was successful */\ 63 + if r0 == 0 goto l0_%=; \ 64 + /* spill R6(mem) into the stack */ \ 65 + *(u64*)(r10 - 8) = r6; \ 66 + /* fill it back in R7 */ \ 67 + r7 = *(u64*)(r10 - 8); \ 68 + /* should be able to access *(R7) = 0 */ \ 69 + r1 = 0; \ 70 + *(u64*)(r7 + 0) = r1; \ 71 + /* submit the reserved ringbuf memory */ \ 72 + r1 = r7; \ 73 + r2 = 0; \ 74 + call %[bpf_ringbuf_submit]; \ 75 + l0_%=: r0 = 0; \ 76 + exit; \ 77 + " : 78 + : __imm(bpf_ringbuf_reserve), 79 + __imm(bpf_ringbuf_submit), 80 + __imm_addr(map_ringbuf) 81 + : __clobber_all); 82 + } 83 + 84 + SEC("socket") 85 + __description("check with invalid reg offset 0") 86 + __failure __msg("R0 pointer arithmetic on ringbuf_mem_or_null prohibited") 87 + __failure_unpriv 88 + __naked void with_invalid_reg_offset_0(void) 89 + { 90 + asm volatile (" \ 91 + /* reserve 8 byte ringbuf memory */ \ 92 + r1 = 0; \ 93 + *(u64*)(r10 - 8) = r1; \ 94 + r1 = %[map_ringbuf] ll; \ 95 + r2 = 8; \ 96 + r3 = 0; \ 97 + call %[bpf_ringbuf_reserve]; \ 98 + /* store a pointer to the reserved memory in R6 */\ 99 + r6 = r0; \ 100 + /* add invalid offset to memory or NULL */ \ 101 + r0 += 1; \ 102 + /* check whether the reservation was successful */\ 103 + if r0 == 0 goto l0_%=; \ 104 + /* should not be able to access *(R7) = 0 */ \ 105 + r1 = 0; \ 106 + *(u32*)(r6 + 0) = r1; \ 107 + /* submit the reserved ringbuf memory */ \ 108 + r1 = r6; \ 109 + r2 = 0; \ 110 + call %[bpf_ringbuf_submit]; \ 111 + l0_%=: r0 = 0; \ 112 + exit; \ 113 + " : 114 + : __imm(bpf_ringbuf_reserve), 115 + __imm(bpf_ringbuf_submit), 116 + __imm_addr(map_ringbuf) 117 + : __clobber_all); 118 + } 119 + 120 + SEC("socket") 121 + __description("check corrupted spill/fill") 122 + __failure __msg("R0 invalid mem access 'scalar'") 123 + __msg_unpriv("attempt to corrupt spilled") 124 + __flag(BPF_F_ANY_ALIGNMENT) 125 + __naked void check_corrupted_spill_fill(void) 126 + { 127 + asm volatile (" \ 128 + /* spill R1(ctx) into stack */ \ 129 + *(u64*)(r10 - 8) = r1; \ 130 + /* mess up with R1 pointer on stack */ \ 131 + r0 = 0x23; \ 132 + *(u8*)(r10 - 7) = r0; \ 133 + /* fill back into R0 is fine for priv. \ 134 + * R0 now becomes SCALAR_VALUE. \ 135 + */ \ 136 + r0 = *(u64*)(r10 - 8); \ 137 + /* Load from R0 should fail. */ \ 138 + r0 = *(u64*)(r0 + 8); \ 139 + exit; \ 140 + " ::: __clobber_all); 141 + } 142 + 143 + SEC("socket") 144 + __description("check corrupted spill/fill, LSB") 145 + __success __failure_unpriv __msg_unpriv("attempt to corrupt spilled") 146 + __retval(POINTER_VALUE) 147 + __naked void check_corrupted_spill_fill_lsb(void) 148 + { 149 + asm volatile (" \ 150 + *(u64*)(r10 - 8) = r1; \ 151 + r0 = 0xcafe; \ 152 + *(u16*)(r10 - 8) = r0; \ 153 + r0 = *(u64*)(r10 - 8); \ 154 + exit; \ 155 + " ::: __clobber_all); 156 + } 157 + 158 + SEC("socket") 159 + __description("check corrupted spill/fill, MSB") 160 + __success __failure_unpriv __msg_unpriv("attempt to corrupt spilled") 161 + __retval(POINTER_VALUE) 162 + __naked void check_corrupted_spill_fill_msb(void) 163 + { 164 + asm volatile (" \ 165 + *(u64*)(r10 - 8) = r1; \ 166 + r0 = 0x12345678; \ 167 + *(u32*)(r10 - 4) = r0; \ 168 + r0 = *(u64*)(r10 - 8); \ 169 + exit; \ 170 + " ::: __clobber_all); 171 + } 172 + 173 + SEC("tc") 174 + __description("Spill and refill a u32 const scalar. Offset to skb->data") 175 + __success __retval(0) 176 + __naked void scalar_offset_to_skb_data_1(void) 177 + { 178 + asm volatile (" \ 179 + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ 180 + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ 181 + w4 = 20; \ 182 + *(u32*)(r10 - 8) = r4; \ 183 + r4 = *(u32*)(r10 - 8); \ 184 + r0 = r2; \ 185 + /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=20 */ \ 186 + r0 += r4; \ 187 + /* if (r0 > r3) R0=pkt,off=20 R2=pkt R3=pkt_end R4=20 */\ 188 + if r0 > r3 goto l0_%=; \ 189 + /* r0 = *(u32 *)r2 R0=pkt,off=20,r=20 R2=pkt,r=20 R3=pkt_end R4=20 */\ 190 + r0 = *(u32*)(r2 + 0); \ 191 + l0_%=: r0 = 0; \ 192 + exit; \ 193 + " : 194 + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), 195 + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) 196 + : __clobber_all); 197 + } 198 + 199 + SEC("socket") 200 + __description("Spill a u32 const, refill from another half of the uninit u32 from the stack") 201 + /* in privileged mode reads from uninitialized stack locations are permitted */ 202 + __success __failure_unpriv 203 + __msg_unpriv("invalid read from stack off -4+0 size 4") 204 + __retval(0) 205 + __naked void uninit_u32_from_the_stack(void) 206 + { 207 + asm volatile (" \ 208 + w4 = 20; \ 209 + *(u32*)(r10 - 8) = r4; \ 210 + /* r4 = *(u32 *)(r10 -4) fp-8=????rrrr*/ \ 211 + r4 = *(u32*)(r10 - 4); \ 212 + r0 = 0; \ 213 + exit; \ 214 + " ::: __clobber_all); 215 + } 216 + 217 + SEC("tc") 218 + __description("Spill a u32 const scalar. Refill as u16. Offset to skb->data") 219 + __failure __msg("invalid access to packet") 220 + __naked void u16_offset_to_skb_data(void) 221 + { 222 + asm volatile (" \ 223 + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ 224 + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ 225 + w4 = 20; \ 226 + *(u32*)(r10 - 8) = r4; \ 227 + r4 = *(u16*)(r10 - 8); \ 228 + r0 = r2; \ 229 + /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\ 230 + r0 += r4; \ 231 + /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */\ 232 + if r0 > r3 goto l0_%=; \ 233 + /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */\ 234 + r0 = *(u32*)(r2 + 0); \ 235 + l0_%=: r0 = 0; \ 236 + exit; \ 237 + " : 238 + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), 239 + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) 240 + : __clobber_all); 241 + } 242 + 243 + SEC("tc") 244 + __description("Spill u32 const scalars. Refill as u64. Offset to skb->data") 245 + __failure __msg("invalid access to packet") 246 + __naked void u64_offset_to_skb_data(void) 247 + { 248 + asm volatile (" \ 249 + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ 250 + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ 251 + w6 = 0; \ 252 + w7 = 20; \ 253 + *(u32*)(r10 - 4) = r6; \ 254 + *(u32*)(r10 - 8) = r7; \ 255 + r4 = *(u16*)(r10 - 8); \ 256 + r0 = r2; \ 257 + /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\ 258 + r0 += r4; \ 259 + /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */\ 260 + if r0 > r3 goto l0_%=; \ 261 + /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */\ 262 + r0 = *(u32*)(r2 + 0); \ 263 + l0_%=: r0 = 0; \ 264 + exit; \ 265 + " : 266 + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), 267 + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) 268 + : __clobber_all); 269 + } 270 + 271 + SEC("tc") 272 + __description("Spill a u32 const scalar. Refill as u16 from fp-6. Offset to skb->data") 273 + __failure __msg("invalid access to packet") 274 + __naked void _6_offset_to_skb_data(void) 275 + { 276 + asm volatile (" \ 277 + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ 278 + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ 279 + w4 = 20; \ 280 + *(u32*)(r10 - 8) = r4; \ 281 + r4 = *(u16*)(r10 - 6); \ 282 + r0 = r2; \ 283 + /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\ 284 + r0 += r4; \ 285 + /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */\ 286 + if r0 > r3 goto l0_%=; \ 287 + /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */\ 288 + r0 = *(u32*)(r2 + 0); \ 289 + l0_%=: r0 = 0; \ 290 + exit; \ 291 + " : 292 + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), 293 + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) 294 + : __clobber_all); 295 + } 296 + 297 + SEC("tc") 298 + __description("Spill and refill a u32 const scalar at non 8byte aligned stack addr. Offset to skb->data") 299 + __failure __msg("invalid access to packet") 300 + __naked void addr_offset_to_skb_data(void) 301 + { 302 + asm volatile (" \ 303 + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ 304 + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ 305 + w4 = 20; \ 306 + *(u32*)(r10 - 8) = r4; \ 307 + *(u32*)(r10 - 4) = r4; \ 308 + r4 = *(u32*)(r10 - 4); \ 309 + r0 = r2; \ 310 + /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=U32_MAX */\ 311 + r0 += r4; \ 312 + /* if (r0 > r3) R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4= */\ 313 + if r0 > r3 goto l0_%=; \ 314 + /* r0 = *(u32 *)r2 R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4= */\ 315 + r0 = *(u32*)(r2 + 0); \ 316 + l0_%=: r0 = 0; \ 317 + exit; \ 318 + " : 319 + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), 320 + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) 321 + : __clobber_all); 322 + } 323 + 324 + SEC("tc") 325 + __description("Spill and refill a umax=40 bounded scalar. Offset to skb->data") 326 + __success __retval(0) 327 + __naked void scalar_offset_to_skb_data_2(void) 328 + { 329 + asm volatile (" \ 330 + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ 331 + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ 332 + r4 = *(u64*)(r1 + %[__sk_buff_tstamp]); \ 333 + if r4 <= 40 goto l0_%=; \ 334 + r0 = 0; \ 335 + exit; \ 336 + l0_%=: /* *(u32 *)(r10 -8) = r4 R4=umax=40 */ \ 337 + *(u32*)(r10 - 8) = r4; \ 338 + /* r4 = (*u32 *)(r10 - 8) */ \ 339 + r4 = *(u32*)(r10 - 8); \ 340 + /* r2 += r4 R2=pkt R4=umax=40 */ \ 341 + r2 += r4; \ 342 + /* r0 = r2 R2=pkt,umax=40 R4=umax=40 */ \ 343 + r0 = r2; \ 344 + /* r2 += 20 R0=pkt,umax=40 R2=pkt,umax=40 */ \ 345 + r2 += 20; \ 346 + /* if (r2 > r3) R0=pkt,umax=40 R2=pkt,off=20,umax=40 */\ 347 + if r2 > r3 goto l1_%=; \ 348 + /* r0 = *(u32 *)r0 R0=pkt,r=20,umax=40 R2=pkt,off=20,r=20,umax=40 */\ 349 + r0 = *(u32*)(r0 + 0); \ 350 + l1_%=: r0 = 0; \ 351 + exit; \ 352 + " : 353 + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), 354 + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)), 355 + __imm_const(__sk_buff_tstamp, offsetof(struct __sk_buff, tstamp)) 356 + : __clobber_all); 357 + } 358 + 359 + SEC("tc") 360 + __description("Spill a u32 scalar at fp-4 and then at fp-8") 361 + __success __retval(0) 362 + __naked void and_then_at_fp_8(void) 363 + { 364 + asm volatile (" \ 365 + w4 = 4321; \ 366 + *(u32*)(r10 - 4) = r4; \ 367 + *(u32*)(r10 - 8) = r4; \ 368 + r4 = *(u64*)(r10 - 8); \ 369 + r0 = 0; \ 370 + exit; \ 371 + " ::: __clobber_all); 372 + } 373 + 374 + char _license[] SEC("license") = "GPL";
-345
tools/testing/selftests/bpf/verifier/spill_fill.c
··· 1 - { 2 - "check valid spill/fill", 3 - .insns = { 4 - /* spill R1(ctx) into stack */ 5 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), 6 - /* fill it back into R2 */ 7 - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8), 8 - /* should be able to access R0 = *(R2 + 8) */ 9 - /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */ 10 - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 11 - BPF_EXIT_INSN(), 12 - }, 13 - .errstr_unpriv = "R0 leaks addr", 14 - .result = ACCEPT, 15 - .result_unpriv = REJECT, 16 - .retval = POINTER_VALUE, 17 - }, 18 - { 19 - "check valid spill/fill, skb mark", 20 - .insns = { 21 - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1), 22 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8), 23 - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), 24 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 25 - offsetof(struct __sk_buff, mark)), 26 - BPF_EXIT_INSN(), 27 - }, 28 - .result = ACCEPT, 29 - .result_unpriv = ACCEPT, 30 - }, 31 - { 32 - "check valid spill/fill, ptr to mem", 33 - .insns = { 34 - /* reserve 8 byte ringbuf memory */ 35 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 36 - BPF_LD_MAP_FD(BPF_REG_1, 0), 37 - BPF_MOV64_IMM(BPF_REG_2, 8), 38 - BPF_MOV64_IMM(BPF_REG_3, 0), 39 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve), 40 - /* store a pointer to the reserved memory in R6 */ 41 - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), 42 - /* check whether the reservation was successful */ 43 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), 44 - /* spill R6(mem) into the stack */ 45 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8), 46 - /* fill it back in R7 */ 47 - BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, -8), 48 - /* should be able to access *(R7) = 0 */ 49 - BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 0), 50 - /* submit the reserved ringbuf memory */ 51 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), 52 - BPF_MOV64_IMM(BPF_REG_2, 0), 53 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_submit), 54 - BPF_MOV64_IMM(BPF_REG_0, 0), 55 - BPF_EXIT_INSN(), 56 - }, 57 - .fixup_map_ringbuf = { 1 }, 58 - .result = ACCEPT, 59 - .result_unpriv = ACCEPT, 60 - }, 61 - { 62 - "check with invalid reg offset 0", 63 - .insns = { 64 - /* reserve 8 byte ringbuf memory */ 65 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 66 - BPF_LD_MAP_FD(BPF_REG_1, 0), 67 - BPF_MOV64_IMM(BPF_REG_2, 8), 68 - BPF_MOV64_IMM(BPF_REG_3, 0), 69 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve), 70 - /* store a pointer to the reserved memory in R6 */ 71 - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), 72 - /* add invalid offset to memory or NULL */ 73 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), 74 - /* check whether the reservation was successful */ 75 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 76 - /* should not be able to access *(R7) = 0 */ 77 - BPF_ST_MEM(BPF_W, BPF_REG_6, 0, 0), 78 - /* submit the reserved ringbuf memory */ 79 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 80 - BPF_MOV64_IMM(BPF_REG_2, 0), 81 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_submit), 82 - BPF_MOV64_IMM(BPF_REG_0, 0), 83 - BPF_EXIT_INSN(), 84 - }, 85 - .fixup_map_ringbuf = { 1 }, 86 - .result = REJECT, 87 - .errstr = "R0 pointer arithmetic on ringbuf_mem_or_null prohibited", 88 - }, 89 - { 90 - "check corrupted spill/fill", 91 - .insns = { 92 - /* spill R1(ctx) into stack */ 93 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), 94 - /* mess up with R1 pointer on stack */ 95 - BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23), 96 - /* fill back into R0 is fine for priv. 97 - * R0 now becomes SCALAR_VALUE. 98 - */ 99 - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), 100 - /* Load from R0 should fail. */ 101 - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8), 102 - BPF_EXIT_INSN(), 103 - }, 104 - .errstr_unpriv = "attempt to corrupt spilled", 105 - .errstr = "R0 invalid mem access 'scalar'", 106 - .result = REJECT, 107 - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 108 - }, 109 - { 110 - "check corrupted spill/fill, LSB", 111 - .insns = { 112 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), 113 - BPF_ST_MEM(BPF_H, BPF_REG_10, -8, 0xcafe), 114 - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), 115 - BPF_EXIT_INSN(), 116 - }, 117 - .errstr_unpriv = "attempt to corrupt spilled", 118 - .result_unpriv = REJECT, 119 - .result = ACCEPT, 120 - .retval = POINTER_VALUE, 121 - }, 122 - { 123 - "check corrupted spill/fill, MSB", 124 - .insns = { 125 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), 126 - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0x12345678), 127 - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), 128 - BPF_EXIT_INSN(), 129 - }, 130 - .errstr_unpriv = "attempt to corrupt spilled", 131 - .result_unpriv = REJECT, 132 - .result = ACCEPT, 133 - .retval = POINTER_VALUE, 134 - }, 135 - { 136 - "Spill and refill a u32 const scalar. Offset to skb->data", 137 - .insns = { 138 - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 139 - offsetof(struct __sk_buff, data)), 140 - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 141 - offsetof(struct __sk_buff, data_end)), 142 - /* r4 = 20 */ 143 - BPF_MOV32_IMM(BPF_REG_4, 20), 144 - /* *(u32 *)(r10 -8) = r4 */ 145 - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8), 146 - /* r4 = *(u32 *)(r10 -8) */ 147 - BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -8), 148 - /* r0 = r2 */ 149 - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 150 - /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=20 */ 151 - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4), 152 - /* if (r0 > r3) R0=pkt,off=20 R2=pkt R3=pkt_end R4=20 */ 153 - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), 154 - /* r0 = *(u32 *)r2 R0=pkt,off=20,r=20 R2=pkt,r=20 R3=pkt_end R4=20 */ 155 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0), 156 - BPF_MOV64_IMM(BPF_REG_0, 0), 157 - BPF_EXIT_INSN(), 158 - }, 159 - .result = ACCEPT, 160 - .prog_type = BPF_PROG_TYPE_SCHED_CLS, 161 - }, 162 - { 163 - "Spill a u32 const, refill from another half of the uninit u32 from the stack", 164 - .insns = { 165 - /* r4 = 20 */ 166 - BPF_MOV32_IMM(BPF_REG_4, 20), 167 - /* *(u32 *)(r10 -8) = r4 */ 168 - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8), 169 - /* r4 = *(u32 *)(r10 -4) fp-8=????rrrr*/ 170 - BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -4), 171 - BPF_MOV64_IMM(BPF_REG_0, 0), 172 - BPF_EXIT_INSN(), 173 - }, 174 - .result_unpriv = REJECT, 175 - .errstr_unpriv = "invalid read from stack off -4+0 size 4", 176 - /* in privileged mode reads from uninitialized stack locations are permitted */ 177 - .result = ACCEPT, 178 - }, 179 - { 180 - "Spill a u32 const scalar. Refill as u16. Offset to skb->data", 181 - .insns = { 182 - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 183 - offsetof(struct __sk_buff, data)), 184 - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 185 - offsetof(struct __sk_buff, data_end)), 186 - /* r4 = 20 */ 187 - BPF_MOV32_IMM(BPF_REG_4, 20), 188 - /* *(u32 *)(r10 -8) = r4 */ 189 - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8), 190 - /* r4 = *(u16 *)(r10 -8) */ 191 - BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -8), 192 - /* r0 = r2 */ 193 - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 194 - /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */ 195 - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4), 196 - /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */ 197 - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), 198 - /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */ 199 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0), 200 - BPF_MOV64_IMM(BPF_REG_0, 0), 201 - BPF_EXIT_INSN(), 202 - }, 203 - .result = REJECT, 204 - .errstr = "invalid access to packet", 205 - .prog_type = BPF_PROG_TYPE_SCHED_CLS, 206 - }, 207 - { 208 - "Spill u32 const scalars. Refill as u64. Offset to skb->data", 209 - .insns = { 210 - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 211 - offsetof(struct __sk_buff, data)), 212 - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 213 - offsetof(struct __sk_buff, data_end)), 214 - /* r6 = 0 */ 215 - BPF_MOV32_IMM(BPF_REG_6, 0), 216 - /* r7 = 20 */ 217 - BPF_MOV32_IMM(BPF_REG_7, 20), 218 - /* *(u32 *)(r10 -4) = r6 */ 219 - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_6, -4), 220 - /* *(u32 *)(r10 -8) = r7 */ 221 - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, -8), 222 - /* r4 = *(u64 *)(r10 -8) */ 223 - BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -8), 224 - /* r0 = r2 */ 225 - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 226 - /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */ 227 - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4), 228 - /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */ 229 - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), 230 - /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */ 231 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0), 232 - BPF_MOV64_IMM(BPF_REG_0, 0), 233 - BPF_EXIT_INSN(), 234 - }, 235 - .result = REJECT, 236 - .errstr = "invalid access to packet", 237 - .prog_type = BPF_PROG_TYPE_SCHED_CLS, 238 - }, 239 - { 240 - "Spill a u32 const scalar. Refill as u16 from fp-6. Offset to skb->data", 241 - .insns = { 242 - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 243 - offsetof(struct __sk_buff, data)), 244 - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 245 - offsetof(struct __sk_buff, data_end)), 246 - /* r4 = 20 */ 247 - BPF_MOV32_IMM(BPF_REG_4, 20), 248 - /* *(u32 *)(r10 -8) = r4 */ 249 - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8), 250 - /* r4 = *(u16 *)(r10 -6) */ 251 - BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -6), 252 - /* r0 = r2 */ 253 - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 254 - /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */ 255 - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4), 256 - /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */ 257 - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), 258 - /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */ 259 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0), 260 - BPF_MOV64_IMM(BPF_REG_0, 0), 261 - BPF_EXIT_INSN(), 262 - }, 263 - .result = REJECT, 264 - .errstr = "invalid access to packet", 265 - .prog_type = BPF_PROG_TYPE_SCHED_CLS, 266 - }, 267 - { 268 - "Spill and refill a u32 const scalar at non 8byte aligned stack addr. Offset to skb->data", 269 - .insns = { 270 - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 271 - offsetof(struct __sk_buff, data)), 272 - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 273 - offsetof(struct __sk_buff, data_end)), 274 - /* r4 = 20 */ 275 - BPF_MOV32_IMM(BPF_REG_4, 20), 276 - /* *(u32 *)(r10 -8) = r4 */ 277 - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8), 278 - /* *(u32 *)(r10 -4) = r4 */ 279 - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -4), 280 - /* r4 = *(u32 *)(r10 -4), */ 281 - BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -4), 282 - /* r0 = r2 */ 283 - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 284 - /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=U32_MAX */ 285 - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4), 286 - /* if (r0 > r3) R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4= */ 287 - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), 288 - /* r0 = *(u32 *)r2 R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4= */ 289 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0), 290 - BPF_MOV64_IMM(BPF_REG_0, 0), 291 - BPF_EXIT_INSN(), 292 - }, 293 - .result = REJECT, 294 - .errstr = "invalid access to packet", 295 - .prog_type = BPF_PROG_TYPE_SCHED_CLS, 296 - }, 297 - { 298 - "Spill and refill a umax=40 bounded scalar. Offset to skb->data", 299 - .insns = { 300 - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 301 - offsetof(struct __sk_buff, data)), 302 - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 303 - offsetof(struct __sk_buff, data_end)), 304 - BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 305 - offsetof(struct __sk_buff, tstamp)), 306 - BPF_JMP_IMM(BPF_JLE, BPF_REG_4, 40, 2), 307 - BPF_MOV64_IMM(BPF_REG_0, 0), 308 - BPF_EXIT_INSN(), 309 - /* *(u32 *)(r10 -8) = r4 R4=umax=40 */ 310 - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8), 311 - /* r4 = (*u32 *)(r10 - 8) */ 312 - BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -8), 313 - /* r2 += r4 R2=pkt R4=umax=40 */ 314 - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_4), 315 - /* r0 = r2 R2=pkt,umax=40 R4=umax=40 */ 316 - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 317 - /* r2 += 20 R0=pkt,umax=40 R2=pkt,umax=40 */ 318 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 20), 319 - /* if (r2 > r3) R0=pkt,umax=40 R2=pkt,off=20,umax=40 */ 320 - BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_3, 1), 321 - /* r0 = *(u32 *)r0 R0=pkt,r=20,umax=40 R2=pkt,off=20,r=20,umax=40 */ 322 - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0), 323 - BPF_MOV64_IMM(BPF_REG_0, 0), 324 - BPF_EXIT_INSN(), 325 - }, 326 - .result = ACCEPT, 327 - .prog_type = BPF_PROG_TYPE_SCHED_CLS, 328 - }, 329 - { 330 - "Spill a u32 scalar at fp-4 and then at fp-8", 331 - .insns = { 332 - /* r4 = 4321 */ 333 - BPF_MOV32_IMM(BPF_REG_4, 4321), 334 - /* *(u32 *)(r10 -4) = r4 */ 335 - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -4), 336 - /* *(u32 *)(r10 -8) = r4 */ 337 - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8), 338 - /* r4 = *(u64 *)(r10 -8) */ 339 - BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), 340 - BPF_MOV64_IMM(BPF_REG_0, 0), 341 - BPF_EXIT_INSN(), 342 - }, 343 - .result = ACCEPT, 344 - .prog_type = BPF_PROG_TYPE_SCHED_CLS, 345 - },