Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: verifier/helper_access_var_len.c converted to inline assembly

Test verifier/helper_access_var_len.c automatically converted to use inline assembly.

Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20230325025524.144043-21-eddyz87@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Eduard Zingerman and committed by
Alexei Starovoitov
b37d776b 9553de70

+827 -650
+2
tools/testing/selftests/bpf/prog_tests/verifier.c
··· 17 17 #include "verifier_direct_stack_access_wraparound.skel.h" 18 18 #include "verifier_div0.skel.h" 19 19 #include "verifier_div_overflow.skel.h" 20 + #include "verifier_helper_access_var_len.skel.h" 20 21 21 22 __maybe_unused 22 23 static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) ··· 57 56 void test_verifier_direct_stack_access_wraparound(void) { RUN(verifier_direct_stack_access_wraparound); } 58 57 void test_verifier_div0(void) { RUN(verifier_div0); } 59 58 void test_verifier_div_overflow(void) { RUN(verifier_div_overflow); } 59 + void test_verifier_helper_access_var_len(void) { RUN(verifier_helper_access_var_len); }
+825
tools/testing/selftests/bpf/progs/verifier_helper_access_var_len.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Converted from tools/testing/selftests/bpf/verifier/helper_access_var_len.c */ 3 + 4 + #include <linux/bpf.h> 5 + #include <bpf/bpf_helpers.h> 6 + #include "bpf_misc.h" 7 + 8 + #define MAX_ENTRIES 11 9 + 10 + struct test_val { 11 + unsigned int index; 12 + int foo[MAX_ENTRIES]; 13 + }; 14 + 15 + struct { 16 + __uint(type, BPF_MAP_TYPE_HASH); 17 + __uint(max_entries, 1); 18 + __type(key, long long); 19 + __type(value, struct test_val); 20 + } map_hash_48b SEC(".maps"); 21 + 22 + struct { 23 + __uint(type, BPF_MAP_TYPE_HASH); 24 + __uint(max_entries, 1); 25 + __type(key, long long); 26 + __type(value, long long); 27 + } map_hash_8b SEC(".maps"); 28 + 29 + struct { 30 + __uint(type, BPF_MAP_TYPE_RINGBUF); 31 + __uint(max_entries, 4096); 32 + } map_ringbuf SEC(".maps"); 33 + 34 + SEC("tracepoint") 35 + __description("helper access to variable memory: stack, bitwise AND + JMP, correct bounds") 36 + __success 37 + __naked void bitwise_and_jmp_correct_bounds(void) 38 + { 39 + asm volatile (" \ 40 + r1 = r10; \ 41 + r1 += -64; \ 42 + r0 = 0; \ 43 + *(u64*)(r10 - 64) = r0; \ 44 + *(u64*)(r10 - 56) = r0; \ 45 + *(u64*)(r10 - 48) = r0; \ 46 + *(u64*)(r10 - 40) = r0; \ 47 + *(u64*)(r10 - 32) = r0; \ 48 + *(u64*)(r10 - 24) = r0; \ 49 + *(u64*)(r10 - 16) = r0; \ 50 + *(u64*)(r10 - 8) = r0; \ 51 + r2 = 16; \ 52 + *(u64*)(r1 - 128) = r2; \ 53 + r2 = *(u64*)(r1 - 128); \ 54 + r2 &= 64; \ 55 + r4 = 0; \ 56 + if r4 >= r2 goto l0_%=; \ 57 + r3 = 0; \ 58 + call %[bpf_probe_read_kernel]; \ 59 + l0_%=: r0 = 0; \ 60 + exit; \ 61 + " : 62 + : __imm(bpf_probe_read_kernel) 63 + : __clobber_all); 64 + } 65 + 66 + SEC("socket") 67 + __description("helper access to variable memory: stack, bitwise AND, zero included") 68 + /* in privileged mode reads from uninitialized stack locations are permitted */ 69 + __success __failure_unpriv 70 + __msg_unpriv("invalid indirect read from stack R2 off -64+0 size 64") 71 + __retval(0) 72 + __naked void stack_bitwise_and_zero_included(void) 73 + { 74 + asm volatile (" \ 75 + /* set max stack size */ \ 76 + r6 = 0; \ 77 + *(u64*)(r10 - 128) = r6; \ 78 + /* set r3 to a random value */ \ 79 + call %[bpf_get_prandom_u32]; \ 80 + r3 = r0; \ 81 + /* use bitwise AND to limit r3 range to [0, 64] */\ 82 + r3 &= 64; \ 83 + r1 = %[map_ringbuf] ll; \ 84 + r2 = r10; \ 85 + r2 += -64; \ 86 + r4 = 0; \ 87 + /* Call bpf_ringbuf_output(), it is one of a few helper functions with\ 88 + * ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode.\ 89 + * For unpriv this should signal an error, because memory at &fp[-64] is\ 90 + * not initialized. \ 91 + */ \ 92 + call %[bpf_ringbuf_output]; \ 93 + exit; \ 94 + " : 95 + : __imm(bpf_get_prandom_u32), 96 + __imm(bpf_ringbuf_output), 97 + __imm_addr(map_ringbuf) 98 + : __clobber_all); 99 + } 100 + 101 + SEC("tracepoint") 102 + __description("helper access to variable memory: stack, bitwise AND + JMP, wrong max") 103 + __failure __msg("invalid indirect access to stack R1 off=-64 size=65") 104 + __naked void bitwise_and_jmp_wrong_max(void) 105 + { 106 + asm volatile (" \ 107 + r2 = *(u64*)(r1 + 8); \ 108 + r1 = r10; \ 109 + r1 += -64; \ 110 + *(u64*)(r1 - 128) = r2; \ 111 + r2 = *(u64*)(r1 - 128); \ 112 + r2 &= 65; \ 113 + r4 = 0; \ 114 + if r4 >= r2 goto l0_%=; \ 115 + r3 = 0; \ 116 + call %[bpf_probe_read_kernel]; \ 117 + l0_%=: r0 = 0; \ 118 + exit; \ 119 + " : 120 + : __imm(bpf_probe_read_kernel) 121 + : __clobber_all); 122 + } 123 + 124 + SEC("tracepoint") 125 + __description("helper access to variable memory: stack, JMP, correct bounds") 126 + __success 127 + __naked void memory_stack_jmp_correct_bounds(void) 128 + { 129 + asm volatile (" \ 130 + r1 = r10; \ 131 + r1 += -64; \ 132 + r0 = 0; \ 133 + *(u64*)(r10 - 64) = r0; \ 134 + *(u64*)(r10 - 56) = r0; \ 135 + *(u64*)(r10 - 48) = r0; \ 136 + *(u64*)(r10 - 40) = r0; \ 137 + *(u64*)(r10 - 32) = r0; \ 138 + *(u64*)(r10 - 24) = r0; \ 139 + *(u64*)(r10 - 16) = r0; \ 140 + *(u64*)(r10 - 8) = r0; \ 141 + r2 = 16; \ 142 + *(u64*)(r1 - 128) = r2; \ 143 + r2 = *(u64*)(r1 - 128); \ 144 + if r2 > 64 goto l0_%=; \ 145 + r4 = 0; \ 146 + if r4 >= r2 goto l0_%=; \ 147 + r3 = 0; \ 148 + call %[bpf_probe_read_kernel]; \ 149 + l0_%=: r0 = 0; \ 150 + exit; \ 151 + " : 152 + : __imm(bpf_probe_read_kernel) 153 + : __clobber_all); 154 + } 155 + 156 + SEC("tracepoint") 157 + __description("helper access to variable memory: stack, JMP (signed), correct bounds") 158 + __success 159 + __naked void stack_jmp_signed_correct_bounds(void) 160 + { 161 + asm volatile (" \ 162 + r1 = r10; \ 163 + r1 += -64; \ 164 + r0 = 0; \ 165 + *(u64*)(r10 - 64) = r0; \ 166 + *(u64*)(r10 - 56) = r0; \ 167 + *(u64*)(r10 - 48) = r0; \ 168 + *(u64*)(r10 - 40) = r0; \ 169 + *(u64*)(r10 - 32) = r0; \ 170 + *(u64*)(r10 - 24) = r0; \ 171 + *(u64*)(r10 - 16) = r0; \ 172 + *(u64*)(r10 - 8) = r0; \ 173 + r2 = 16; \ 174 + *(u64*)(r1 - 128) = r2; \ 175 + r2 = *(u64*)(r1 - 128); \ 176 + if r2 s> 64 goto l0_%=; \ 177 + r4 = 0; \ 178 + if r4 s>= r2 goto l0_%=; \ 179 + r3 = 0; \ 180 + call %[bpf_probe_read_kernel]; \ 181 + l0_%=: r0 = 0; \ 182 + exit; \ 183 + " : 184 + : __imm(bpf_probe_read_kernel) 185 + : __clobber_all); 186 + } 187 + 188 + SEC("tracepoint") 189 + __description("helper access to variable memory: stack, JMP, bounds + offset") 190 + __failure __msg("invalid indirect access to stack R1 off=-64 size=65") 191 + __naked void memory_stack_jmp_bounds_offset(void) 192 + { 193 + asm volatile (" \ 194 + r2 = *(u64*)(r1 + 8); \ 195 + r1 = r10; \ 196 + r1 += -64; \ 197 + *(u64*)(r1 - 128) = r2; \ 198 + r2 = *(u64*)(r1 - 128); \ 199 + if r2 > 64 goto l0_%=; \ 200 + r4 = 0; \ 201 + if r4 >= r2 goto l0_%=; \ 202 + r2 += 1; \ 203 + r3 = 0; \ 204 + call %[bpf_probe_read_kernel]; \ 205 + l0_%=: r0 = 0; \ 206 + exit; \ 207 + " : 208 + : __imm(bpf_probe_read_kernel) 209 + : __clobber_all); 210 + } 211 + 212 + SEC("tracepoint") 213 + __description("helper access to variable memory: stack, JMP, wrong max") 214 + __failure __msg("invalid indirect access to stack R1 off=-64 size=65") 215 + __naked void memory_stack_jmp_wrong_max(void) 216 + { 217 + asm volatile (" \ 218 + r2 = *(u64*)(r1 + 8); \ 219 + r1 = r10; \ 220 + r1 += -64; \ 221 + *(u64*)(r1 - 128) = r2; \ 222 + r2 = *(u64*)(r1 - 128); \ 223 + if r2 > 65 goto l0_%=; \ 224 + r4 = 0; \ 225 + if r4 >= r2 goto l0_%=; \ 226 + r3 = 0; \ 227 + call %[bpf_probe_read_kernel]; \ 228 + l0_%=: r0 = 0; \ 229 + exit; \ 230 + " : 231 + : __imm(bpf_probe_read_kernel) 232 + : __clobber_all); 233 + } 234 + 235 + SEC("tracepoint") 236 + __description("helper access to variable memory: stack, JMP, no max check") 237 + __failure 238 + /* because max wasn't checked, signed min is negative */ 239 + __msg("R2 min value is negative, either use unsigned or 'var &= const'") 240 + __naked void stack_jmp_no_max_check(void) 241 + { 242 + asm volatile (" \ 243 + r2 = *(u64*)(r1 + 8); \ 244 + r1 = r10; \ 245 + r1 += -64; \ 246 + *(u64*)(r1 - 128) = r2; \ 247 + r2 = *(u64*)(r1 - 128); \ 248 + r4 = 0; \ 249 + if r4 >= r2 goto l0_%=; \ 250 + r3 = 0; \ 251 + call %[bpf_probe_read_kernel]; \ 252 + l0_%=: r0 = 0; \ 253 + exit; \ 254 + " : 255 + : __imm(bpf_probe_read_kernel) 256 + : __clobber_all); 257 + } 258 + 259 + SEC("socket") 260 + __description("helper access to variable memory: stack, JMP, no min check") 261 + /* in privileged mode reads from uninitialized stack locations are permitted */ 262 + __success __failure_unpriv 263 + __msg_unpriv("invalid indirect read from stack R2 off -64+0 size 64") 264 + __retval(0) 265 + __naked void stack_jmp_no_min_check(void) 266 + { 267 + asm volatile (" \ 268 + /* set max stack size */ \ 269 + r6 = 0; \ 270 + *(u64*)(r10 - 128) = r6; \ 271 + /* set r3 to a random value */ \ 272 + call %[bpf_get_prandom_u32]; \ 273 + r3 = r0; \ 274 + /* use JMP to limit r3 range to [0, 64] */ \ 275 + if r3 > 64 goto l0_%=; \ 276 + r1 = %[map_ringbuf] ll; \ 277 + r2 = r10; \ 278 + r2 += -64; \ 279 + r4 = 0; \ 280 + /* Call bpf_ringbuf_output(), it is one of a few helper functions with\ 281 + * ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode.\ 282 + * For unpriv this should signal an error, because memory at &fp[-64] is\ 283 + * not initialized. \ 284 + */ \ 285 + call %[bpf_ringbuf_output]; \ 286 + l0_%=: r0 = 0; \ 287 + exit; \ 288 + " : 289 + : __imm(bpf_get_prandom_u32), 290 + __imm(bpf_ringbuf_output), 291 + __imm_addr(map_ringbuf) 292 + : __clobber_all); 293 + } 294 + 295 + SEC("tracepoint") 296 + __description("helper access to variable memory: stack, JMP (signed), no min check") 297 + __failure __msg("R2 min value is negative") 298 + __naked void jmp_signed_no_min_check(void) 299 + { 300 + asm volatile (" \ 301 + r2 = *(u64*)(r1 + 8); \ 302 + r1 = r10; \ 303 + r1 += -64; \ 304 + *(u64*)(r1 - 128) = r2; \ 305 + r2 = *(u64*)(r1 - 128); \ 306 + if r2 s> 64 goto l0_%=; \ 307 + r3 = 0; \ 308 + call %[bpf_probe_read_kernel]; \ 309 + r0 = 0; \ 310 + l0_%=: exit; \ 311 + " : 312 + : __imm(bpf_probe_read_kernel) 313 + : __clobber_all); 314 + } 315 + 316 + SEC("tracepoint") 317 + __description("helper access to variable memory: map, JMP, correct bounds") 318 + __success 319 + __naked void memory_map_jmp_correct_bounds(void) 320 + { 321 + asm volatile (" \ 322 + r2 = r10; \ 323 + r2 += -8; \ 324 + r1 = 0; \ 325 + *(u64*)(r2 + 0) = r1; \ 326 + r1 = %[map_hash_48b] ll; \ 327 + call %[bpf_map_lookup_elem]; \ 328 + if r0 == 0 goto l0_%=; \ 329 + r1 = r0; \ 330 + r2 = %[sizeof_test_val]; \ 331 + *(u64*)(r10 - 128) = r2; \ 332 + r2 = *(u64*)(r10 - 128); \ 333 + if r2 s> %[sizeof_test_val] goto l1_%=; \ 334 + r4 = 0; \ 335 + if r4 s>= r2 goto l1_%=; \ 336 + r3 = 0; \ 337 + call %[bpf_probe_read_kernel]; \ 338 + l1_%=: r0 = 0; \ 339 + l0_%=: exit; \ 340 + " : 341 + : __imm(bpf_map_lookup_elem), 342 + __imm(bpf_probe_read_kernel), 343 + __imm_addr(map_hash_48b), 344 + __imm_const(sizeof_test_val, sizeof(struct test_val)) 345 + : __clobber_all); 346 + } 347 + 348 + SEC("tracepoint") 349 + __description("helper access to variable memory: map, JMP, wrong max") 350 + __failure __msg("invalid access to map value, value_size=48 off=0 size=49") 351 + __naked void memory_map_jmp_wrong_max(void) 352 + { 353 + asm volatile (" \ 354 + r6 = *(u64*)(r1 + 8); \ 355 + r2 = r10; \ 356 + r2 += -8; \ 357 + r1 = 0; \ 358 + *(u64*)(r2 + 0) = r1; \ 359 + r1 = %[map_hash_48b] ll; \ 360 + call %[bpf_map_lookup_elem]; \ 361 + if r0 == 0 goto l0_%=; \ 362 + r1 = r0; \ 363 + r2 = r6; \ 364 + *(u64*)(r10 - 128) = r2; \ 365 + r2 = *(u64*)(r10 - 128); \ 366 + if r2 s> %[__imm_0] goto l1_%=; \ 367 + r4 = 0; \ 368 + if r4 s>= r2 goto l1_%=; \ 369 + r3 = 0; \ 370 + call %[bpf_probe_read_kernel]; \ 371 + l1_%=: r0 = 0; \ 372 + l0_%=: exit; \ 373 + " : 374 + : __imm(bpf_map_lookup_elem), 375 + __imm(bpf_probe_read_kernel), 376 + __imm_addr(map_hash_48b), 377 + __imm_const(__imm_0, sizeof(struct test_val) + 1) 378 + : __clobber_all); 379 + } 380 + 381 + SEC("tracepoint") 382 + __description("helper access to variable memory: map adjusted, JMP, correct bounds") 383 + __success 384 + __naked void map_adjusted_jmp_correct_bounds(void) 385 + { 386 + asm volatile (" \ 387 + r2 = r10; \ 388 + r2 += -8; \ 389 + r1 = 0; \ 390 + *(u64*)(r2 + 0) = r1; \ 391 + r1 = %[map_hash_48b] ll; \ 392 + call %[bpf_map_lookup_elem]; \ 393 + if r0 == 0 goto l0_%=; \ 394 + r1 = r0; \ 395 + r1 += 20; \ 396 + r2 = %[sizeof_test_val]; \ 397 + *(u64*)(r10 - 128) = r2; \ 398 + r2 = *(u64*)(r10 - 128); \ 399 + if r2 s> %[__imm_0] goto l1_%=; \ 400 + r4 = 0; \ 401 + if r4 s>= r2 goto l1_%=; \ 402 + r3 = 0; \ 403 + call %[bpf_probe_read_kernel]; \ 404 + l1_%=: r0 = 0; \ 405 + l0_%=: exit; \ 406 + " : 407 + : __imm(bpf_map_lookup_elem), 408 + __imm(bpf_probe_read_kernel), 409 + __imm_addr(map_hash_48b), 410 + __imm_const(__imm_0, sizeof(struct test_val) - 20), 411 + __imm_const(sizeof_test_val, sizeof(struct test_val)) 412 + : __clobber_all); 413 + } 414 + 415 + SEC("tracepoint") 416 + __description("helper access to variable memory: map adjusted, JMP, wrong max") 417 + __failure __msg("R1 min value is outside of the allowed memory range") 418 + __naked void map_adjusted_jmp_wrong_max(void) 419 + { 420 + asm volatile (" \ 421 + r6 = *(u64*)(r1 + 8); \ 422 + r2 = r10; \ 423 + r2 += -8; \ 424 + r1 = 0; \ 425 + *(u64*)(r2 + 0) = r1; \ 426 + r1 = %[map_hash_48b] ll; \ 427 + call %[bpf_map_lookup_elem]; \ 428 + if r0 == 0 goto l0_%=; \ 429 + r1 = r0; \ 430 + r1 += 20; \ 431 + r2 = r6; \ 432 + *(u64*)(r10 - 128) = r2; \ 433 + r2 = *(u64*)(r10 - 128); \ 434 + if r2 s> %[__imm_0] goto l1_%=; \ 435 + r4 = 0; \ 436 + if r4 s>= r2 goto l1_%=; \ 437 + r3 = 0; \ 438 + call %[bpf_probe_read_kernel]; \ 439 + l1_%=: r0 = 0; \ 440 + l0_%=: exit; \ 441 + " : 442 + : __imm(bpf_map_lookup_elem), 443 + __imm(bpf_probe_read_kernel), 444 + __imm_addr(map_hash_48b), 445 + __imm_const(__imm_0, sizeof(struct test_val) - 19) 446 + : __clobber_all); 447 + } 448 + 449 + SEC("tc") 450 + __description("helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)") 451 + __success __retval(0) 452 + __naked void ptr_to_mem_or_null_1(void) 453 + { 454 + asm volatile (" \ 455 + r1 = 0; \ 456 + r2 = 0; \ 457 + r3 = 0; \ 458 + r4 = 0; \ 459 + r5 = 0; \ 460 + call %[bpf_csum_diff]; \ 461 + exit; \ 462 + " : 463 + : __imm(bpf_csum_diff) 464 + : __clobber_all); 465 + } 466 + 467 + SEC("tc") 468 + __description("helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)") 469 + __failure __msg("R1 type=scalar expected=fp") 470 + __naked void ptr_to_mem_or_null_2(void) 471 + { 472 + asm volatile (" \ 473 + r2 = *(u32*)(r1 + 0); \ 474 + r1 = 0; \ 475 + *(u64*)(r10 - 128) = r2; \ 476 + r2 = *(u64*)(r10 - 128); \ 477 + r2 &= 64; \ 478 + r3 = 0; \ 479 + r4 = 0; \ 480 + r5 = 0; \ 481 + call %[bpf_csum_diff]; \ 482 + exit; \ 483 + " : 484 + : __imm(bpf_csum_diff) 485 + : __clobber_all); 486 + } 487 + 488 + SEC("tc") 489 + __description("helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)") 490 + __success __retval(0) 491 + __naked void ptr_to_mem_or_null_3(void) 492 + { 493 + asm volatile (" \ 494 + r1 = r10; \ 495 + r1 += -8; \ 496 + r2 = 0; \ 497 + *(u64*)(r1 + 0) = r2; \ 498 + r2 &= 8; \ 499 + r3 = 0; \ 500 + r4 = 0; \ 501 + r5 = 0; \ 502 + call %[bpf_csum_diff]; \ 503 + exit; \ 504 + " : 505 + : __imm(bpf_csum_diff) 506 + : __clobber_all); 507 + } 508 + 509 + SEC("tc") 510 + __description("helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)") 511 + __success __retval(0) 512 + __naked void ptr_to_mem_or_null_4(void) 513 + { 514 + asm volatile (" \ 515 + r1 = 0; \ 516 + *(u64*)(r10 - 8) = r1; \ 517 + r2 = r10; \ 518 + r2 += -8; \ 519 + r1 = %[map_hash_8b] ll; \ 520 + call %[bpf_map_lookup_elem]; \ 521 + if r0 == 0 goto l0_%=; \ 522 + r1 = r0; \ 523 + r2 = 0; \ 524 + r3 = 0; \ 525 + r4 = 0; \ 526 + r5 = 0; \ 527 + call %[bpf_csum_diff]; \ 528 + l0_%=: exit; \ 529 + " : 530 + : __imm(bpf_csum_diff), 531 + __imm(bpf_map_lookup_elem), 532 + __imm_addr(map_hash_8b) 533 + : __clobber_all); 534 + } 535 + 536 + SEC("tc") 537 + __description("helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)") 538 + __success __retval(0) 539 + __naked void ptr_to_mem_or_null_5(void) 540 + { 541 + asm volatile (" \ 542 + r1 = 0; \ 543 + *(u64*)(r10 - 8) = r1; \ 544 + r2 = r10; \ 545 + r2 += -8; \ 546 + r1 = %[map_hash_8b] ll; \ 547 + call %[bpf_map_lookup_elem]; \ 548 + if r0 == 0 goto l0_%=; \ 549 + r2 = *(u64*)(r0 + 0); \ 550 + if r2 > 8 goto l0_%=; \ 551 + r1 = r10; \ 552 + r1 += -8; \ 553 + *(u64*)(r1 + 0) = r2; \ 554 + r3 = 0; \ 555 + r4 = 0; \ 556 + r5 = 0; \ 557 + call %[bpf_csum_diff]; \ 558 + l0_%=: exit; \ 559 + " : 560 + : __imm(bpf_csum_diff), 561 + __imm(bpf_map_lookup_elem), 562 + __imm_addr(map_hash_8b) 563 + : __clobber_all); 564 + } 565 + 566 + SEC("tc") 567 + __description("helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)") 568 + __success __retval(0) 569 + __naked void ptr_to_mem_or_null_6(void) 570 + { 571 + asm volatile (" \ 572 + r1 = 0; \ 573 + *(u64*)(r10 - 8) = r1; \ 574 + r2 = r10; \ 575 + r2 += -8; \ 576 + r1 = %[map_hash_8b] ll; \ 577 + call %[bpf_map_lookup_elem]; \ 578 + if r0 == 0 goto l0_%=; \ 579 + r1 = r0; \ 580 + r2 = *(u64*)(r0 + 0); \ 581 + if r2 > 8 goto l0_%=; \ 582 + r3 = 0; \ 583 + r4 = 0; \ 584 + r5 = 0; \ 585 + call %[bpf_csum_diff]; \ 586 + l0_%=: exit; \ 587 + " : 588 + : __imm(bpf_csum_diff), 589 + __imm(bpf_map_lookup_elem), 590 + __imm_addr(map_hash_8b) 591 + : __clobber_all); 592 + } 593 + 594 + SEC("tc") 595 + __description("helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)") 596 + __success __retval(0) 597 + /* csum_diff of 64-byte packet */ 598 + __flag(BPF_F_ANY_ALIGNMENT) 599 + __naked void ptr_to_mem_or_null_7(void) 600 + { 601 + asm volatile (" \ 602 + r6 = *(u32*)(r1 + %[__sk_buff_data]); \ 603 + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ 604 + r0 = r6; \ 605 + r0 += 8; \ 606 + if r0 > r3 goto l0_%=; \ 607 + r1 = r6; \ 608 + r2 = *(u64*)(r6 + 0); \ 609 + if r2 > 8 goto l0_%=; \ 610 + r3 = 0; \ 611 + r4 = 0; \ 612 + r5 = 0; \ 613 + call %[bpf_csum_diff]; \ 614 + l0_%=: exit; \ 615 + " : 616 + : __imm(bpf_csum_diff), 617 + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), 618 + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) 619 + : __clobber_all); 620 + } 621 + 622 + SEC("tracepoint") 623 + __description("helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)") 624 + __failure __msg("R1 type=scalar expected=fp") 625 + __naked void ptr_to_mem_or_null_8(void) 626 + { 627 + asm volatile (" \ 628 + r1 = 0; \ 629 + r2 = 0; \ 630 + r3 = 0; \ 631 + call %[bpf_probe_read_kernel]; \ 632 + exit; \ 633 + " : 634 + : __imm(bpf_probe_read_kernel) 635 + : __clobber_all); 636 + } 637 + 638 + SEC("tracepoint") 639 + __description("helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)") 640 + __failure __msg("R1 type=scalar expected=fp") 641 + __naked void ptr_to_mem_or_null_9(void) 642 + { 643 + asm volatile (" \ 644 + r1 = 0; \ 645 + r2 = 1; \ 646 + r3 = 0; \ 647 + call %[bpf_probe_read_kernel]; \ 648 + exit; \ 649 + " : 650 + : __imm(bpf_probe_read_kernel) 651 + : __clobber_all); 652 + } 653 + 654 + SEC("tracepoint") 655 + __description("helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)") 656 + __success 657 + __naked void ptr_to_mem_or_null_10(void) 658 + { 659 + asm volatile (" \ 660 + r1 = r10; \ 661 + r1 += -8; \ 662 + r2 = 0; \ 663 + r3 = 0; \ 664 + call %[bpf_probe_read_kernel]; \ 665 + exit; \ 666 + " : 667 + : __imm(bpf_probe_read_kernel) 668 + : __clobber_all); 669 + } 670 + 671 + SEC("tracepoint") 672 + __description("helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)") 673 + __success 674 + __naked void ptr_to_mem_or_null_11(void) 675 + { 676 + asm volatile (" \ 677 + r1 = 0; \ 678 + *(u64*)(r10 - 8) = r1; \ 679 + r2 = r10; \ 680 + r2 += -8; \ 681 + r1 = %[map_hash_8b] ll; \ 682 + call %[bpf_map_lookup_elem]; \ 683 + if r0 == 0 goto l0_%=; \ 684 + r1 = r0; \ 685 + r2 = 0; \ 686 + r3 = 0; \ 687 + call %[bpf_probe_read_kernel]; \ 688 + l0_%=: exit; \ 689 + " : 690 + : __imm(bpf_map_lookup_elem), 691 + __imm(bpf_probe_read_kernel), 692 + __imm_addr(map_hash_8b) 693 + : __clobber_all); 694 + } 695 + 696 + SEC("tracepoint") 697 + __description("helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)") 698 + __success 699 + __naked void ptr_to_mem_or_null_12(void) 700 + { 701 + asm volatile (" \ 702 + r1 = 0; \ 703 + *(u64*)(r10 - 8) = r1; \ 704 + r2 = r10; \ 705 + r2 += -8; \ 706 + r1 = %[map_hash_8b] ll; \ 707 + call %[bpf_map_lookup_elem]; \ 708 + if r0 == 0 goto l0_%=; \ 709 + r2 = *(u64*)(r0 + 0); \ 710 + if r2 > 8 goto l0_%=; \ 711 + r1 = r10; \ 712 + r1 += -8; \ 713 + r3 = 0; \ 714 + call %[bpf_probe_read_kernel]; \ 715 + l0_%=: exit; \ 716 + " : 717 + : __imm(bpf_map_lookup_elem), 718 + __imm(bpf_probe_read_kernel), 719 + __imm_addr(map_hash_8b) 720 + : __clobber_all); 721 + } 722 + 723 + SEC("tracepoint") 724 + __description("helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)") 725 + __success 726 + __naked void ptr_to_mem_or_null_13(void) 727 + { 728 + asm volatile (" \ 729 + r1 = 0; \ 730 + *(u64*)(r10 - 8) = r1; \ 731 + r2 = r10; \ 732 + r2 += -8; \ 733 + r1 = %[map_hash_8b] ll; \ 734 + call %[bpf_map_lookup_elem]; \ 735 + if r0 == 0 goto l0_%=; \ 736 + r1 = r0; \ 737 + r2 = *(u64*)(r0 + 0); \ 738 + if r2 > 8 goto l0_%=; \ 739 + r3 = 0; \ 740 + call %[bpf_probe_read_kernel]; \ 741 + l0_%=: exit; \ 742 + " : 743 + : __imm(bpf_map_lookup_elem), 744 + __imm(bpf_probe_read_kernel), 745 + __imm_addr(map_hash_8b) 746 + : __clobber_all); 747 + } 748 + 749 + SEC("socket") 750 + __description("helper access to variable memory: 8 bytes leak") 751 + /* in privileged mode reads from uninitialized stack locations are permitted */ 752 + __success __failure_unpriv 753 + __msg_unpriv("invalid indirect read from stack R2 off -64+32 size 64") 754 + __retval(0) 755 + __naked void variable_memory_8_bytes_leak(void) 756 + { 757 + asm volatile (" \ 758 + /* set max stack size */ \ 759 + r6 = 0; \ 760 + *(u64*)(r10 - 128) = r6; \ 761 + /* set r3 to a random value */ \ 762 + call %[bpf_get_prandom_u32]; \ 763 + r3 = r0; \ 764 + r1 = %[map_ringbuf] ll; \ 765 + r2 = r10; \ 766 + r2 += -64; \ 767 + r0 = 0; \ 768 + *(u64*)(r10 - 64) = r0; \ 769 + *(u64*)(r10 - 56) = r0; \ 770 + *(u64*)(r10 - 48) = r0; \ 771 + *(u64*)(r10 - 40) = r0; \ 772 + /* Note: fp[-32] left uninitialized */ \ 773 + *(u64*)(r10 - 24) = r0; \ 774 + *(u64*)(r10 - 16) = r0; \ 775 + *(u64*)(r10 - 8) = r0; \ 776 + /* Limit r3 range to [1, 64] */ \ 777 + r3 &= 63; \ 778 + r3 += 1; \ 779 + r4 = 0; \ 780 + /* Call bpf_ringbuf_output(), it is one of a few helper functions with\ 781 + * ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode.\ 782 + * For unpriv this should signal an error, because memory region [1, 64]\ 783 + * at &fp[-64] is not fully initialized. \ 784 + */ \ 785 + call %[bpf_ringbuf_output]; \ 786 + r0 = 0; \ 787 + exit; \ 788 + " : 789 + : __imm(bpf_get_prandom_u32), 790 + __imm(bpf_ringbuf_output), 791 + __imm_addr(map_ringbuf) 792 + : __clobber_all); 793 + } 794 + 795 + SEC("tracepoint") 796 + __description("helper access to variable memory: 8 bytes no leak (init memory)") 797 + __success 798 + __naked void bytes_no_leak_init_memory(void) 799 + { 800 + asm volatile (" \ 801 + r1 = r10; \ 802 + r0 = 0; \ 803 + r0 = 0; \ 804 + *(u64*)(r10 - 64) = r0; \ 805 + *(u64*)(r10 - 56) = r0; \ 806 + *(u64*)(r10 - 48) = r0; \ 807 + *(u64*)(r10 - 40) = r0; \ 808 + *(u64*)(r10 - 32) = r0; \ 809 + *(u64*)(r10 - 24) = r0; \ 810 + *(u64*)(r10 - 16) = r0; \ 811 + *(u64*)(r10 - 8) = r0; \ 812 + r1 += -64; \ 813 + r2 = 0; \ 814 + r2 &= 32; \ 815 + r2 += 32; \ 816 + r3 = 0; \ 817 + call %[bpf_probe_read_kernel]; \ 818 + r1 = *(u64*)(r10 - 16); \ 819 + exit; \ 820 + " : 821 + : __imm(bpf_probe_read_kernel) 822 + : __clobber_all); 823 + } 824 + 825 + char _license[] SEC("license") = "GPL";
-650
tools/testing/selftests/bpf/verifier/helper_access_var_len.c
··· 1 - { 2 - "helper access to variable memory: stack, bitwise AND + JMP, correct bounds", 3 - .insns = { 4 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 5 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), 6 - BPF_MOV64_IMM(BPF_REG_0, 0), 7 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64), 8 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56), 9 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48), 10 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40), 11 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32), 12 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24), 13 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), 14 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), 15 - BPF_MOV64_IMM(BPF_REG_2, 16), 16 - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), 17 - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), 18 - BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64), 19 - BPF_MOV64_IMM(BPF_REG_4, 0), 20 - BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2), 21 - BPF_MOV64_IMM(BPF_REG_3, 0), 22 - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), 23 - BPF_MOV64_IMM(BPF_REG_0, 0), 24 - BPF_EXIT_INSN(), 25 - }, 26 - .result = ACCEPT, 27 - .prog_type = BPF_PROG_TYPE_TRACEPOINT, 28 - }, 29 - { 30 - "helper access to variable memory: stack, bitwise AND, zero included", 31 - .insns = { 32 - /* set max stack size */ 33 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -128, 0), 34 - /* set r3 to a random value */ 35 - BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), 36 - BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), 37 - /* use bitwise AND to limit r3 range to [0, 64] */ 38 - BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 64), 39 - BPF_LD_MAP_FD(BPF_REG_1, 0), 40 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 41 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64), 42 - BPF_MOV64_IMM(BPF_REG_4, 0), 43 - /* Call bpf_ringbuf_output(), it is one of a few helper functions with 44 - * ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode. 45 - * For unpriv this should signal an error, because memory at &fp[-64] is 46 - * not initialized. 47 - */ 48 - BPF_EMIT_CALL(BPF_FUNC_ringbuf_output), 49 - BPF_EXIT_INSN(), 50 - }, 51 - .fixup_map_ringbuf = { 4 }, 52 - .errstr_unpriv = "invalid indirect read from stack R2 off -64+0 size 64", 53 - .result_unpriv = REJECT, 54 - /* in privileged mode reads from uninitialized stack locations are permitted */ 55 - .result = ACCEPT, 56 - }, 57 - { 58 - "helper access to variable memory: stack, bitwise AND + JMP, wrong max", 59 - .insns = { 60 - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8), 61 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 62 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), 63 - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), 64 - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), 65 - BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65), 66 - BPF_MOV64_IMM(BPF_REG_4, 0), 67 - BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2), 68 - BPF_MOV64_IMM(BPF_REG_3, 0), 69 - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), 70 - BPF_MOV64_IMM(BPF_REG_0, 0), 71 - BPF_EXIT_INSN(), 72 - }, 73 - .errstr = "invalid indirect access to stack R1 off=-64 size=65", 74 - .result = REJECT, 75 - .prog_type = BPF_PROG_TYPE_TRACEPOINT, 76 - }, 77 - { 78 - "helper access to variable memory: stack, JMP, correct bounds", 79 - .insns = { 80 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 81 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), 82 - BPF_MOV64_IMM(BPF_REG_0, 0), 83 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64), 84 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56), 85 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48), 86 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40), 87 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32), 88 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24), 89 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), 90 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), 91 - BPF_MOV64_IMM(BPF_REG_2, 16), 92 - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), 93 - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), 94 - BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4), 95 - BPF_MOV64_IMM(BPF_REG_4, 0), 96 - BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2), 97 - BPF_MOV64_IMM(BPF_REG_3, 0), 98 - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), 99 - BPF_MOV64_IMM(BPF_REG_0, 0), 100 - BPF_EXIT_INSN(), 101 - }, 102 - .result = ACCEPT, 103 - .prog_type = BPF_PROG_TYPE_TRACEPOINT, 104 - }, 105 - { 106 - "helper access to variable memory: stack, JMP (signed), correct bounds", 107 - .insns = { 108 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 109 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), 110 - BPF_MOV64_IMM(BPF_REG_0, 0), 111 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64), 112 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56), 113 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48), 114 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40), 115 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32), 116 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24), 117 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), 118 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), 119 - BPF_MOV64_IMM(BPF_REG_2, 16), 120 - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), 121 - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), 122 - BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4), 123 - BPF_MOV64_IMM(BPF_REG_4, 0), 124 - BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2), 125 - BPF_MOV64_IMM(BPF_REG_3, 0), 126 - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), 127 - BPF_MOV64_IMM(BPF_REG_0, 0), 128 - BPF_EXIT_INSN(), 129 - }, 130 - .result = ACCEPT, 131 - .prog_type = BPF_PROG_TYPE_TRACEPOINT, 132 - }, 133 - { 134 - "helper access to variable memory: stack, JMP, bounds + offset", 135 - .insns = { 136 - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8), 137 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 138 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), 139 - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), 140 - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), 141 - BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5), 142 - BPF_MOV64_IMM(BPF_REG_4, 0), 143 - BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3), 144 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), 145 - BPF_MOV64_IMM(BPF_REG_3, 0), 146 - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), 147 - BPF_MOV64_IMM(BPF_REG_0, 0), 148 - BPF_EXIT_INSN(), 149 - }, 150 - .errstr = "invalid indirect access to stack R1 off=-64 size=65", 151 - .result = REJECT, 152 - .prog_type = BPF_PROG_TYPE_TRACEPOINT, 153 - }, 154 - { 155 - "helper access to variable memory: stack, JMP, wrong max", 156 - .insns = { 157 - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8), 158 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 159 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), 160 - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), 161 - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), 162 - BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4), 163 - BPF_MOV64_IMM(BPF_REG_4, 0), 164 - BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2), 165 - BPF_MOV64_IMM(BPF_REG_3, 0), 166 - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), 167 - BPF_MOV64_IMM(BPF_REG_0, 0), 168 - BPF_EXIT_INSN(), 169 - }, 170 - .errstr = "invalid indirect access to stack R1 off=-64 size=65", 171 - .result = REJECT, 172 - .prog_type = BPF_PROG_TYPE_TRACEPOINT, 173 - }, 174 - { 175 - "helper access to variable memory: stack, JMP, no max check", 176 - .insns = { 177 - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8), 178 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 179 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), 180 - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), 181 - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), 182 - BPF_MOV64_IMM(BPF_REG_4, 0), 183 - BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2), 184 - BPF_MOV64_IMM(BPF_REG_3, 0), 185 - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), 186 - BPF_MOV64_IMM(BPF_REG_0, 0), 187 - BPF_EXIT_INSN(), 188 - }, 189 - /* because max wasn't checked, signed min is negative */ 190 - .errstr = "R2 min value is negative, either use unsigned or 'var &= const'", 191 - .result = REJECT, 192 - .prog_type = BPF_PROG_TYPE_TRACEPOINT, 193 - }, 194 - { 195 - "helper access to variable memory: stack, JMP, no min check", 196 - .insns = { 197 - /* set max stack size */ 198 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -128, 0), 199 - /* set r3 to a random value */ 200 - BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), 201 - BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), 202 - /* use JMP to limit r3 range to [0, 64] */ 203 - BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 64, 6), 204 - BPF_LD_MAP_FD(BPF_REG_1, 0), 205 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 206 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64), 207 - BPF_MOV64_IMM(BPF_REG_4, 0), 208 - /* Call bpf_ringbuf_output(), it is one of a few helper functions with 209 - * ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode. 210 - * For unpriv this should signal an error, because memory at &fp[-64] is 211 - * not initialized. 212 - */ 213 - BPF_EMIT_CALL(BPF_FUNC_ringbuf_output), 214 - BPF_MOV64_IMM(BPF_REG_0, 0), 215 - BPF_EXIT_INSN(), 216 - }, 217 - .fixup_map_ringbuf = { 4 }, 218 - .errstr_unpriv = "invalid indirect read from stack R2 off -64+0 size 64", 219 - .result_unpriv = REJECT, 220 - /* in privileged mode reads from uninitialized stack locations are permitted */ 221 - .result = ACCEPT, 222 - }, 223 - { 224 - "helper access to variable memory: stack, JMP (signed), no min check", 225 - .insns = { 226 - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8), 227 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 228 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), 229 - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), 230 - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), 231 - BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3), 232 - BPF_MOV64_IMM(BPF_REG_3, 0), 233 - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), 234 - BPF_MOV64_IMM(BPF_REG_0, 0), 235 - BPF_EXIT_INSN(), 236 - }, 237 - .errstr = "R2 min value is negative", 238 - .result = REJECT, 239 - .prog_type = BPF_PROG_TYPE_TRACEPOINT, 240 - }, 241 - { 242 - "helper access to variable memory: map, JMP, correct bounds", 243 - .insns = { 244 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 245 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 246 - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 247 - BPF_LD_MAP_FD(BPF_REG_1, 0), 248 - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 249 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10), 250 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 251 - BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)), 252 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), 253 - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), 254 - BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, sizeof(struct test_val), 4), 255 - BPF_MOV64_IMM(BPF_REG_4, 0), 256 - BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2), 257 - BPF_MOV64_IMM(BPF_REG_3, 0), 258 - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), 259 - BPF_MOV64_IMM(BPF_REG_0, 0), 260 - BPF_EXIT_INSN(), 261 - }, 262 - .fixup_map_hash_48b = { 3 }, 263 - .result = ACCEPT, 264 - .prog_type = BPF_PROG_TYPE_TRACEPOINT, 265 - }, 266 - { 267 - "helper access to variable memory: map, JMP, wrong max", 268 - .insns = { 269 - BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8), 270 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 271 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 272 - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 273 - BPF_LD_MAP_FD(BPF_REG_1, 0), 274 - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 275 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10), 276 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 277 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_6), 278 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), 279 - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), 280 - BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, sizeof(struct test_val) + 1, 4), 281 - BPF_MOV64_IMM(BPF_REG_4, 0), 282 - BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2), 283 - BPF_MOV64_IMM(BPF_REG_3, 0), 284 - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), 285 - BPF_MOV64_IMM(BPF_REG_0, 0), 286 - BPF_EXIT_INSN(), 287 - }, 288 - .fixup_map_hash_48b = { 4 }, 289 - .errstr = "invalid access to map value, value_size=48 off=0 size=49", 290 - .result = REJECT, 291 - .prog_type = BPF_PROG_TYPE_TRACEPOINT, 292 - }, 293 - { 294 - "helper access to variable memory: map adjusted, JMP, correct bounds", 295 - .insns = { 296 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 297 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 298 - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 299 - BPF_LD_MAP_FD(BPF_REG_1, 0), 300 - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 301 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), 302 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 303 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20), 304 - BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)), 305 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), 306 - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), 307 - BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, sizeof(struct test_val) - 20, 4), 308 - BPF_MOV64_IMM(BPF_REG_4, 0), 309 - BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2), 310 - BPF_MOV64_IMM(BPF_REG_3, 0), 311 - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), 312 - BPF_MOV64_IMM(BPF_REG_0, 0), 313 - BPF_EXIT_INSN(), 314 - }, 315 - .fixup_map_hash_48b = { 3 }, 316 - .result = ACCEPT, 317 - .prog_type = BPF_PROG_TYPE_TRACEPOINT, 318 - }, 319 - { 320 - "helper access to variable memory: map adjusted, JMP, wrong max", 321 - .insns = { 322 - BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8), 323 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 324 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 325 - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 326 - BPF_LD_MAP_FD(BPF_REG_1, 0), 327 - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 328 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), 329 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 330 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20), 331 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_6), 332 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), 333 - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), 334 - BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, sizeof(struct test_val) - 19, 4), 335 - BPF_MOV64_IMM(BPF_REG_4, 0), 336 - BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2), 337 - BPF_MOV64_IMM(BPF_REG_3, 0), 338 - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), 339 - BPF_MOV64_IMM(BPF_REG_0, 0), 340 - BPF_EXIT_INSN(), 341 - }, 342 - .fixup_map_hash_48b = { 4 }, 343 - .errstr = "R1 min value is outside of the allowed memory range", 344 - .result = REJECT, 345 - .prog_type = BPF_PROG_TYPE_TRACEPOINT, 346 - }, 347 - { 348 - "helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)", 349 - .insns = { 350 - BPF_MOV64_IMM(BPF_REG_1, 0), 351 - BPF_MOV64_IMM(BPF_REG_2, 0), 352 - BPF_MOV64_IMM(BPF_REG_3, 0), 353 - BPF_MOV64_IMM(BPF_REG_4, 0), 354 - BPF_MOV64_IMM(BPF_REG_5, 0), 355 - BPF_EMIT_CALL(BPF_FUNC_csum_diff), 356 - BPF_EXIT_INSN(), 357 - }, 358 - .result = ACCEPT, 359 - .prog_type = BPF_PROG_TYPE_SCHED_CLS, 360 - }, 361 - { 362 - "helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)", 363 - .insns = { 364 - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), 365 - BPF_MOV64_IMM(BPF_REG_1, 0), 366 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), 367 - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), 368 - BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64), 369 - BPF_MOV64_IMM(BPF_REG_3, 0), 370 - BPF_MOV64_IMM(BPF_REG_4, 0), 371 - BPF_MOV64_IMM(BPF_REG_5, 0), 372 - BPF_EMIT_CALL(BPF_FUNC_csum_diff), 373 - BPF_EXIT_INSN(), 374 - }, 375 - .errstr = "R1 type=scalar expected=fp", 376 - .result = REJECT, 377 - .prog_type = BPF_PROG_TYPE_SCHED_CLS, 378 - }, 379 - { 380 - "helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)", 381 - .insns = { 382 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 383 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 384 - BPF_MOV64_IMM(BPF_REG_2, 0), 385 - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0), 386 - BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8), 387 - BPF_MOV64_IMM(BPF_REG_3, 0), 388 - BPF_MOV64_IMM(BPF_REG_4, 0), 389 - BPF_MOV64_IMM(BPF_REG_5, 0), 390 - BPF_EMIT_CALL(BPF_FUNC_csum_diff), 391 - BPF_EXIT_INSN(), 392 - }, 393 - .result = ACCEPT, 394 - .prog_type = BPF_PROG_TYPE_SCHED_CLS, 395 - }, 396 - { 397 - "helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)", 398 - .insns = { 399 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 400 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 401 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 402 - BPF_LD_MAP_FD(BPF_REG_1, 0), 403 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 404 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), 405 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 406 - BPF_MOV64_IMM(BPF_REG_2, 0), 407 - BPF_MOV64_IMM(BPF_REG_3, 0), 408 - BPF_MOV64_IMM(BPF_REG_4, 0), 409 - BPF_MOV64_IMM(BPF_REG_5, 0), 410 - BPF_EMIT_CALL(BPF_FUNC_csum_diff), 411 - BPF_EXIT_INSN(), 412 - }, 413 - .fixup_map_hash_8b = { 3 }, 414 - .result = ACCEPT, 415 - .prog_type = BPF_PROG_TYPE_SCHED_CLS, 416 - }, 417 - { 418 - "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)", 419 - .insns = { 420 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 421 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 422 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 423 - BPF_LD_MAP_FD(BPF_REG_1, 0), 424 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 425 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), 426 - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), 427 - BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 7), 428 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 429 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 430 - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0), 431 - BPF_MOV64_IMM(BPF_REG_3, 0), 432 - BPF_MOV64_IMM(BPF_REG_4, 0), 433 - BPF_MOV64_IMM(BPF_REG_5, 0), 434 - BPF_EMIT_CALL(BPF_FUNC_csum_diff), 435 - BPF_EXIT_INSN(), 436 - }, 437 - .fixup_map_hash_8b = { 3 }, 438 - .result = ACCEPT, 439 - .prog_type = BPF_PROG_TYPE_SCHED_CLS, 440 - }, 441 - { 442 - "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)", 443 - .insns = { 444 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 445 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 446 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 447 - BPF_LD_MAP_FD(BPF_REG_1, 0), 448 - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 449 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), 450 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 451 - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), 452 - BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4), 453 - BPF_MOV64_IMM(BPF_REG_3, 0), 454 - BPF_MOV64_IMM(BPF_REG_4, 0), 455 - BPF_MOV64_IMM(BPF_REG_5, 0), 456 - BPF_EMIT_CALL(BPF_FUNC_csum_diff), 457 - BPF_EXIT_INSN(), 458 - }, 459 - .fixup_map_hash_8b = { 3 }, 460 - .result = ACCEPT, 461 - .prog_type = BPF_PROG_TYPE_SCHED_CLS, 462 - }, 463 - { 464 - "helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)", 465 - .insns = { 466 - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 467 - offsetof(struct __sk_buff, data)), 468 - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 469 - offsetof(struct __sk_buff, data_end)), 470 - BPF_MOV64_REG(BPF_REG_0, BPF_REG_6), 471 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 472 - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7), 473 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 474 - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0), 475 - BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4), 476 - BPF_MOV64_IMM(BPF_REG_3, 0), 477 - BPF_MOV64_IMM(BPF_REG_4, 0), 478 - BPF_MOV64_IMM(BPF_REG_5, 0), 479 - BPF_EMIT_CALL(BPF_FUNC_csum_diff), 480 - BPF_EXIT_INSN(), 481 - }, 482 - .result = ACCEPT, 483 - .prog_type = BPF_PROG_TYPE_SCHED_CLS, 484 - .retval = 0 /* csum_diff of 64-byte packet */, 485 - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 486 - }, 487 - { 488 - "helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)", 489 - .insns = { 490 - BPF_MOV64_IMM(BPF_REG_1, 0), 491 - BPF_MOV64_IMM(BPF_REG_2, 0), 492 - BPF_MOV64_IMM(BPF_REG_3, 0), 493 - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), 494 - BPF_EXIT_INSN(), 495 - }, 496 - .errstr = "R1 type=scalar expected=fp", 497 - .result = REJECT, 498 - .prog_type = BPF_PROG_TYPE_TRACEPOINT, 499 - }, 500 - { 501 - "helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)", 502 - .insns = { 503 - BPF_MOV64_IMM(BPF_REG_1, 0), 504 - BPF_MOV64_IMM(BPF_REG_2, 1), 505 - BPF_MOV64_IMM(BPF_REG_3, 0), 506 - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), 507 - BPF_EXIT_INSN(), 508 - }, 509 - .errstr = "R1 type=scalar expected=fp", 510 - .result = REJECT, 511 - .prog_type = BPF_PROG_TYPE_TRACEPOINT, 512 - }, 513 - { 514 - "helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)", 515 - .insns = { 516 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 517 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 518 - BPF_MOV64_IMM(BPF_REG_2, 0), 519 - BPF_MOV64_IMM(BPF_REG_3, 0), 520 - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), 521 - BPF_EXIT_INSN(), 522 - }, 523 - .result = ACCEPT, 524 - .prog_type = BPF_PROG_TYPE_TRACEPOINT, 525 - }, 526 - { 527 - "helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)", 528 - .insns = { 529 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 530 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 531 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 532 - BPF_LD_MAP_FD(BPF_REG_1, 0), 533 - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 534 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 535 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 536 - BPF_MOV64_IMM(BPF_REG_2, 0), 537 - BPF_MOV64_IMM(BPF_REG_3, 0), 538 - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), 539 - BPF_EXIT_INSN(), 540 - }, 541 - .fixup_map_hash_8b = { 3 }, 542 - .result = ACCEPT, 543 - .prog_type = BPF_PROG_TYPE_TRACEPOINT, 544 - }, 545 - { 546 - "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)", 547 - .insns = { 548 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 549 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 550 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 551 - BPF_LD_MAP_FD(BPF_REG_1, 0), 552 - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 553 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), 554 - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), 555 - BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4), 556 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 557 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 558 - BPF_MOV64_IMM(BPF_REG_3, 0), 559 - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), 560 - BPF_EXIT_INSN(), 561 - }, 562 - .fixup_map_hash_8b = { 3 }, 563 - .result = ACCEPT, 564 - .prog_type = BPF_PROG_TYPE_TRACEPOINT, 565 - }, 566 - { 567 - "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)", 568 - .insns = { 569 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 570 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 571 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 572 - BPF_LD_MAP_FD(BPF_REG_1, 0), 573 - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 574 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), 575 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 576 - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), 577 - BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2), 578 - BPF_MOV64_IMM(BPF_REG_3, 0), 579 - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), 580 - BPF_EXIT_INSN(), 581 - }, 582 - .fixup_map_hash_8b = { 3 }, 583 - .result = ACCEPT, 584 - .prog_type = BPF_PROG_TYPE_TRACEPOINT, 585 - }, 586 - { 587 - "helper access to variable memory: 8 bytes leak", 588 - .insns = { 589 - /* set max stack size */ 590 - BPF_ST_MEM(BPF_DW, BPF_REG_10, -128, 0), 591 - /* set r3 to a random value */ 592 - BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), 593 - BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), 594 - BPF_LD_MAP_FD(BPF_REG_1, 0), 595 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 596 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64), 597 - BPF_MOV64_IMM(BPF_REG_0, 0), 598 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64), 599 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56), 600 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48), 601 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40), 602 - /* Note: fp[-32] left uninitialized */ 603 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24), 604 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), 605 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), 606 - /* Limit r3 range to [1, 64] */ 607 - BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 63), 608 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 1), 609 - BPF_MOV64_IMM(BPF_REG_4, 0), 610 - /* Call bpf_ringbuf_output(), it is one of a few helper functions with 611 - * ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode. 612 - * For unpriv this should signal an error, because memory region [1, 64] 613 - * at &fp[-64] is not fully initialized. 614 - */ 615 - BPF_EMIT_CALL(BPF_FUNC_ringbuf_output), 616 - BPF_MOV64_IMM(BPF_REG_0, 0), 617 - BPF_EXIT_INSN(), 618 - }, 619 - .fixup_map_ringbuf = { 3 }, 620 - .errstr_unpriv = "invalid indirect read from stack R2 off -64+32 size 64", 621 - .result_unpriv = REJECT, 622 - /* in privileged mode reads from uninitialized stack locations are permitted */ 623 - .result = ACCEPT, 624 - }, 625 - { 626 - "helper access to variable memory: 8 bytes no leak (init memory)", 627 - .insns = { 628 - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 629 - BPF_MOV64_IMM(BPF_REG_0, 0), 630 - BPF_MOV64_IMM(BPF_REG_0, 0), 631 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64), 632 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56), 633 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48), 634 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40), 635 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32), 636 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24), 637 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), 638 - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), 639 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), 640 - BPF_MOV64_IMM(BPF_REG_2, 0), 641 - BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32), 642 - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32), 643 - BPF_MOV64_IMM(BPF_REG_3, 0), 644 - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), 645 - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), 646 - BPF_EXIT_INSN(), 647 - }, 648 - .result = ACCEPT, 649 - .prog_type = BPF_PROG_TYPE_TRACEPOINT, 650 - },