Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: add iterators tests

Add various tests for open-coded iterators. Some of them excercise
various possible coding patterns in C, some go down to low-level
assembly for more control over various conditions, especially invalid
ones.

We also make use of bpf_for(), bpf_for_each(), bpf_repeat() macros in
some of these tests.

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20230308184121.1165081-7-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Andrii Nakryiko and committed by
Alexei Starovoitov
57400dcc 8c2b5e90

+1325
+15
tools/testing/selftests/bpf/prog_tests/iters.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ 3 + 4 + #include <test_progs.h> 5 + 6 + #include "iters.skel.h" 7 + #include "iters_state_safety.skel.h" 8 + #include "iters_looping.skel.h" 9 + 10 + void test_iters(void) 11 + { 12 + RUN_TESTS(iters_state_safety); 13 + RUN_TESTS(iters_looping); 14 + RUN_TESTS(iters); 15 + }
+1
tools/testing/selftests/bpf/progs/bpf_misc.h
··· 36 36 #define __clobber_common "r0", "r1", "r2", "r3", "r4", "r5", "memory" 37 37 #define __imm(name) [name]"i"(name) 38 38 #define __imm_addr(name) [name]"i"(&name) 39 + #define __imm_ptr(name) [name]"p"(&name) 39 40 40 41 #if defined(__TARGET_ARCH_x86) 41 42 #define SYSCALL_WRAPPER 1
+720
tools/testing/selftests/bpf/progs/iters.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ 3 + 4 + #include <stdbool.h> 5 + #include <linux/bpf.h> 6 + #include <bpf/bpf_helpers.h> 7 + #include "bpf_misc.h" 8 + 9 + #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) 10 + 11 + static volatile int zero = 0; 12 + 13 + int my_pid; 14 + int arr[256]; 15 + int small_arr[16] SEC(".data.small_arr"); 16 + 17 + #ifdef REAL_TEST 18 + #define MY_PID_GUARD() if (my_pid != (bpf_get_current_pid_tgid() >> 32)) return 0 19 + #else 20 + #define MY_PID_GUARD() ({ }) 21 + #endif 22 + 23 + SEC("?raw_tp") 24 + __failure __msg("math between map_value pointer and register with unbounded min value is not allowed") 25 + int iter_err_unsafe_c_loop(const void *ctx) 26 + { 27 + struct bpf_iter_num it; 28 + int *v, i = zero; /* obscure initial value of i */ 29 + 30 + MY_PID_GUARD(); 31 + 32 + bpf_iter_num_new(&it, 0, 1000); 33 + while ((v = bpf_iter_num_next(&it))) { 34 + i++; 35 + } 36 + bpf_iter_num_destroy(&it); 37 + 38 + small_arr[i] = 123; /* invalid */ 39 + 40 + return 0; 41 + } 42 + 43 + SEC("?raw_tp") 44 + __failure __msg("unbounded memory access") 45 + int iter_err_unsafe_asm_loop(const void *ctx) 46 + { 47 + struct bpf_iter_num it; 48 + int *v, i = 0; 49 + 50 + MY_PID_GUARD(); 51 + 52 + asm volatile ( 53 + "r6 = %[zero];" /* iteration counter */ 54 + "r1 = %[it];" /* iterator state */ 55 + "r2 = 0;" 56 + "r3 = 1000;" 57 + "r4 = 1;" 58 + "call %[bpf_iter_num_new];" 59 + "loop:" 60 + "r1 = %[it];" 61 + "call %[bpf_iter_num_next];" 62 + "if r0 == 0 goto out;" 63 + "r6 += 1;" 64 + "goto loop;" 65 + "out:" 66 + "r1 = %[it];" 67 + "call %[bpf_iter_num_destroy];" 68 + "r1 = %[small_arr];" 69 + "r2 = r6;" 70 + "r2 <<= 2;" 71 + "r1 += r2;" 72 + "*(u32 *)(r1 + 0) = r6;" /* invalid */ 73 + : 74 + : [it]"r"(&it), 75 + [small_arr]"p"(small_arr), 76 + [zero]"p"(zero), 77 + __imm(bpf_iter_num_new), 78 + __imm(bpf_iter_num_next), 79 + __imm(bpf_iter_num_destroy) 80 + : __clobber_common, "r6" 81 + ); 82 + 83 + return 0; 84 + } 85 + 86 + SEC("raw_tp") 87 + __success 88 + int iter_while_loop(const void *ctx) 89 + { 90 + struct bpf_iter_num it; 91 + int *v, i; 92 + 93 + MY_PID_GUARD(); 94 + 95 + bpf_iter_num_new(&it, 0, 3); 96 + while ((v = bpf_iter_num_next(&it))) { 97 + bpf_printk("ITER_BASIC: E1 VAL: v=%d", *v); 98 + } 99 + bpf_iter_num_destroy(&it); 100 + 101 + return 0; 102 + } 103 + 104 + SEC("raw_tp") 105 + __success 106 + int iter_while_loop_auto_cleanup(const void *ctx) 107 + { 108 + __attribute__((cleanup(bpf_iter_num_destroy))) struct bpf_iter_num it; 109 + int *v, i; 110 + 111 + MY_PID_GUARD(); 112 + 113 + bpf_iter_num_new(&it, 0, 3); 114 + while ((v = bpf_iter_num_next(&it))) { 115 + bpf_printk("ITER_BASIC: E1 VAL: v=%d", *v); 116 + } 117 + /* (!) no explicit bpf_iter_num_destroy() */ 118 + 119 + return 0; 120 + } 121 + 122 + SEC("raw_tp") 123 + __success 124 + int iter_for_loop(const void *ctx) 125 + { 126 + struct bpf_iter_num it; 127 + int *v, i; 128 + 129 + MY_PID_GUARD(); 130 + 131 + bpf_iter_num_new(&it, 5, 10); 132 + for (v = bpf_iter_num_next(&it); v; v = bpf_iter_num_next(&it)) { 133 + bpf_printk("ITER_BASIC: E2 VAL: v=%d", *v); 134 + } 135 + bpf_iter_num_destroy(&it); 136 + 137 + return 0; 138 + } 139 + 140 + SEC("raw_tp") 141 + __success 142 + int iter_bpf_for_each_macro(const void *ctx) 143 + { 144 + int *v; 145 + 146 + MY_PID_GUARD(); 147 + 148 + bpf_for_each(num, v, 5, 10) { 149 + bpf_printk("ITER_BASIC: E2 VAL: v=%d", *v); 150 + } 151 + 152 + return 0; 153 + } 154 + 155 + SEC("raw_tp") 156 + __success 157 + int iter_bpf_for_macro(const void *ctx) 158 + { 159 + int i; 160 + 161 + MY_PID_GUARD(); 162 + 163 + bpf_for(i, 5, 10) { 164 + bpf_printk("ITER_BASIC: E2 VAL: v=%d", i); 165 + } 166 + 167 + return 0; 168 + } 169 + 170 + SEC("raw_tp") 171 + __success 172 + int iter_pragma_unroll_loop(const void *ctx) 173 + { 174 + struct bpf_iter_num it; 175 + int *v, i; 176 + 177 + MY_PID_GUARD(); 178 + 179 + bpf_iter_num_new(&it, 0, 2); 180 + #pragma nounroll 181 + for (i = 0; i < 3; i++) { 182 + v = bpf_iter_num_next(&it); 183 + bpf_printk("ITER_BASIC: E3 VAL: i=%d v=%d", i, v ? *v : -1); 184 + } 185 + bpf_iter_num_destroy(&it); 186 + 187 + return 0; 188 + } 189 + 190 + SEC("raw_tp") 191 + __success 192 + int iter_manual_unroll_loop(const void *ctx) 193 + { 194 + struct bpf_iter_num it; 195 + int *v, i; 196 + 197 + MY_PID_GUARD(); 198 + 199 + bpf_iter_num_new(&it, 100, 200); 200 + v = bpf_iter_num_next(&it); 201 + bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1); 202 + v = bpf_iter_num_next(&it); 203 + bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1); 204 + v = bpf_iter_num_next(&it); 205 + bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1); 206 + v = bpf_iter_num_next(&it); 207 + bpf_printk("ITER_BASIC: E4 VAL: v=%d\n", v ? *v : -1); 208 + bpf_iter_num_destroy(&it); 209 + 210 + return 0; 211 + } 212 + 213 + SEC("raw_tp") 214 + __success 215 + int iter_multiple_sequential_loops(const void *ctx) 216 + { 217 + struct bpf_iter_num it; 218 + int *v, i; 219 + 220 + MY_PID_GUARD(); 221 + 222 + bpf_iter_num_new(&it, 0, 3); 223 + while ((v = bpf_iter_num_next(&it))) { 224 + bpf_printk("ITER_BASIC: E1 VAL: v=%d", *v); 225 + } 226 + bpf_iter_num_destroy(&it); 227 + 228 + bpf_iter_num_new(&it, 5, 10); 229 + for (v = bpf_iter_num_next(&it); v; v = bpf_iter_num_next(&it)) { 230 + bpf_printk("ITER_BASIC: E2 VAL: v=%d", *v); 231 + } 232 + bpf_iter_num_destroy(&it); 233 + 234 + bpf_iter_num_new(&it, 0, 2); 235 + #pragma nounroll 236 + for (i = 0; i < 3; i++) { 237 + v = bpf_iter_num_next(&it); 238 + bpf_printk("ITER_BASIC: E3 VAL: i=%d v=%d", i, v ? *v : -1); 239 + } 240 + bpf_iter_num_destroy(&it); 241 + 242 + bpf_iter_num_new(&it, 100, 200); 243 + v = bpf_iter_num_next(&it); 244 + bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1); 245 + v = bpf_iter_num_next(&it); 246 + bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1); 247 + v = bpf_iter_num_next(&it); 248 + bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1); 249 + v = bpf_iter_num_next(&it); 250 + bpf_printk("ITER_BASIC: E4 VAL: v=%d\n", v ? *v : -1); 251 + bpf_iter_num_destroy(&it); 252 + 253 + return 0; 254 + } 255 + 256 + SEC("raw_tp") 257 + __success 258 + int iter_limit_cond_break_loop(const void *ctx) 259 + { 260 + struct bpf_iter_num it; 261 + int *v, i = 0, sum = 0; 262 + 263 + MY_PID_GUARD(); 264 + 265 + bpf_iter_num_new(&it, 0, 10); 266 + while ((v = bpf_iter_num_next(&it))) { 267 + bpf_printk("ITER_SIMPLE: i=%d v=%d", i, *v); 268 + sum += *v; 269 + 270 + i++; 271 + if (i > 3) 272 + break; 273 + } 274 + bpf_iter_num_destroy(&it); 275 + 276 + bpf_printk("ITER_SIMPLE: sum=%d\n", sum); 277 + 278 + return 0; 279 + } 280 + 281 + SEC("raw_tp") 282 + __success 283 + int iter_obfuscate_counter(const void *ctx) 284 + { 285 + struct bpf_iter_num it; 286 + int *v, sum = 0; 287 + /* Make i's initial value unknowable for verifier to prevent it from 288 + * pruning if/else branch inside the loop body and marking i as precise. 289 + */ 290 + int i = zero; 291 + 292 + MY_PID_GUARD(); 293 + 294 + bpf_iter_num_new(&it, 0, 10); 295 + while ((v = bpf_iter_num_next(&it))) { 296 + int x; 297 + 298 + i += 1; 299 + 300 + /* If we initialized i as `int i = 0;` above, verifier would 301 + * track that i becomes 1 on first iteration after increment 302 + * above, and here verifier would eagerly prune else branch 303 + * and mark i as precise, ruining open-coded iterator logic 304 + * completely, as each next iteration would have a different 305 + * *precise* value of i, and thus there would be no 306 + * convergence of state. This would result in reaching maximum 307 + * instruction limit, no matter what the limit is. 308 + */ 309 + if (i == 1) 310 + x = 123; 311 + else 312 + x = i * 3 + 1; 313 + 314 + bpf_printk("ITER_OBFUSCATE_COUNTER: i=%d v=%d x=%d", i, *v, x); 315 + 316 + sum += x; 317 + } 318 + bpf_iter_num_destroy(&it); 319 + 320 + bpf_printk("ITER_OBFUSCATE_COUNTER: sum=%d\n", sum); 321 + 322 + return 0; 323 + } 324 + 325 + SEC("raw_tp") 326 + __success 327 + int iter_search_loop(const void *ctx) 328 + { 329 + struct bpf_iter_num it; 330 + int *v, *elem = NULL; 331 + bool found = false; 332 + 333 + MY_PID_GUARD(); 334 + 335 + bpf_iter_num_new(&it, 0, 10); 336 + 337 + while ((v = bpf_iter_num_next(&it))) { 338 + bpf_printk("ITER_SEARCH_LOOP: v=%d", *v); 339 + 340 + if (*v == 2) { 341 + found = true; 342 + elem = v; 343 + barrier_var(elem); 344 + } 345 + } 346 + 347 + /* should fail to verify if bpf_iter_num_destroy() is here */ 348 + 349 + if (found) 350 + /* here found element will be wrong, we should have copied 351 + * value to a variable, but here we want to make sure we can 352 + * access memory after the loop anyways 353 + */ 354 + bpf_printk("ITER_SEARCH_LOOP: FOUND IT = %d!\n", *elem); 355 + else 356 + bpf_printk("ITER_SEARCH_LOOP: NOT FOUND IT!\n"); 357 + 358 + bpf_iter_num_destroy(&it); 359 + 360 + return 0; 361 + } 362 + 363 + SEC("raw_tp") 364 + __success 365 + int iter_array_fill(const void *ctx) 366 + { 367 + int sum, i; 368 + 369 + MY_PID_GUARD(); 370 + 371 + bpf_for(i, 0, ARRAY_SIZE(arr)) { 372 + arr[i] = i * 2; 373 + } 374 + 375 + sum = 0; 376 + bpf_for(i, 0, ARRAY_SIZE(arr)) { 377 + sum += arr[i]; 378 + } 379 + 380 + bpf_printk("ITER_ARRAY_FILL: sum=%d (should be %d)\n", sum, 255 * 256); 381 + 382 + return 0; 383 + } 384 + 385 + static int arr2d[4][5]; 386 + static int arr2d_row_sums[4]; 387 + static int arr2d_col_sums[5]; 388 + 389 + SEC("raw_tp") 390 + __success 391 + int iter_nested_iters(const void *ctx) 392 + { 393 + int sum, row, col; 394 + 395 + MY_PID_GUARD(); 396 + 397 + bpf_for(row, 0, ARRAY_SIZE(arr2d)) { 398 + bpf_for( col, 0, ARRAY_SIZE(arr2d[0])) { 399 + arr2d[row][col] = row * col; 400 + } 401 + } 402 + 403 + /* zero-initialize sums */ 404 + sum = 0; 405 + bpf_for(row, 0, ARRAY_SIZE(arr2d)) { 406 + arr2d_row_sums[row] = 0; 407 + } 408 + bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) { 409 + arr2d_col_sums[col] = 0; 410 + } 411 + 412 + /* calculate sums */ 413 + bpf_for(row, 0, ARRAY_SIZE(arr2d)) { 414 + bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) { 415 + sum += arr2d[row][col]; 416 + arr2d_row_sums[row] += arr2d[row][col]; 417 + arr2d_col_sums[col] += arr2d[row][col]; 418 + } 419 + } 420 + 421 + bpf_printk("ITER_NESTED_ITERS: total sum=%d", sum); 422 + bpf_for(row, 0, ARRAY_SIZE(arr2d)) { 423 + bpf_printk("ITER_NESTED_ITERS: row #%d sum=%d", row, arr2d_row_sums[row]); 424 + } 425 + bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) { 426 + bpf_printk("ITER_NESTED_ITERS: col #%d sum=%d%s", 427 + col, arr2d_col_sums[col], 428 + col == ARRAY_SIZE(arr2d[0]) - 1 ? "\n" : ""); 429 + } 430 + 431 + return 0; 432 + } 433 + 434 + SEC("raw_tp") 435 + __success 436 + int iter_nested_deeply_iters(const void *ctx) 437 + { 438 + int sum = 0; 439 + 440 + MY_PID_GUARD(); 441 + 442 + bpf_repeat(10) { 443 + bpf_repeat(10) { 444 + bpf_repeat(10) { 445 + bpf_repeat(10) { 446 + bpf_repeat(10) { 447 + sum += 1; 448 + } 449 + } 450 + } 451 + } 452 + /* validate that we can break from inside bpf_repeat() */ 453 + break; 454 + } 455 + 456 + return sum; 457 + } 458 + 459 + static __noinline void fill_inner_dimension(int row) 460 + { 461 + int col; 462 + 463 + bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) { 464 + arr2d[row][col] = row * col; 465 + } 466 + } 467 + 468 + static __noinline int sum_inner_dimension(int row) 469 + { 470 + int sum = 0, col; 471 + 472 + bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) { 473 + sum += arr2d[row][col]; 474 + arr2d_row_sums[row] += arr2d[row][col]; 475 + arr2d_col_sums[col] += arr2d[row][col]; 476 + } 477 + 478 + return sum; 479 + } 480 + 481 + SEC("raw_tp") 482 + __success 483 + int iter_subprog_iters(const void *ctx) 484 + { 485 + int sum, row, col; 486 + 487 + MY_PID_GUARD(); 488 + 489 + bpf_for(row, 0, ARRAY_SIZE(arr2d)) { 490 + fill_inner_dimension(row); 491 + } 492 + 493 + /* zero-initialize sums */ 494 + sum = 0; 495 + bpf_for(row, 0, ARRAY_SIZE(arr2d)) { 496 + arr2d_row_sums[row] = 0; 497 + } 498 + bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) { 499 + arr2d_col_sums[col] = 0; 500 + } 501 + 502 + /* calculate sums */ 503 + bpf_for(row, 0, ARRAY_SIZE(arr2d)) { 504 + sum += sum_inner_dimension(row); 505 + } 506 + 507 + bpf_printk("ITER_SUBPROG_ITERS: total sum=%d", sum); 508 + bpf_for(row, 0, ARRAY_SIZE(arr2d)) { 509 + bpf_printk("ITER_SUBPROG_ITERS: row #%d sum=%d", 510 + row, arr2d_row_sums[row]); 511 + } 512 + bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) { 513 + bpf_printk("ITER_SUBPROG_ITERS: col #%d sum=%d%s", 514 + col, arr2d_col_sums[col], 515 + col == ARRAY_SIZE(arr2d[0]) - 1 ? "\n" : ""); 516 + } 517 + 518 + return 0; 519 + } 520 + 521 + struct { 522 + __uint(type, BPF_MAP_TYPE_ARRAY); 523 + __type(key, int); 524 + __type(value, int); 525 + __uint(max_entries, 1000); 526 + } arr_map SEC(".maps"); 527 + 528 + SEC("?raw_tp") 529 + __failure __msg("invalid mem access 'scalar'") 530 + int iter_err_too_permissive1(const void *ctx) 531 + { 532 + int *map_val = NULL; 533 + int key = 0; 534 + 535 + MY_PID_GUARD(); 536 + 537 + map_val = bpf_map_lookup_elem(&arr_map, &key); 538 + if (!map_val) 539 + return 0; 540 + 541 + bpf_repeat(1000000) { 542 + map_val = NULL; 543 + } 544 + 545 + *map_val = 123; 546 + 547 + return 0; 548 + } 549 + 550 + SEC("?raw_tp") 551 + __failure __msg("invalid mem access 'map_value_or_null'") 552 + int iter_err_too_permissive2(const void *ctx) 553 + { 554 + int *map_val = NULL; 555 + int key = 0; 556 + 557 + MY_PID_GUARD(); 558 + 559 + map_val = bpf_map_lookup_elem(&arr_map, &key); 560 + if (!map_val) 561 + return 0; 562 + 563 + bpf_repeat(1000000) { 564 + map_val = bpf_map_lookup_elem(&arr_map, &key); 565 + } 566 + 567 + *map_val = 123; 568 + 569 + return 0; 570 + } 571 + 572 + SEC("?raw_tp") 573 + __failure __msg("invalid mem access 'map_value_or_null'") 574 + int iter_err_too_permissive3(const void *ctx) 575 + { 576 + int *map_val = NULL; 577 + int key = 0; 578 + bool found = false; 579 + 580 + MY_PID_GUARD(); 581 + 582 + bpf_repeat(1000000) { 583 + map_val = bpf_map_lookup_elem(&arr_map, &key); 584 + found = true; 585 + } 586 + 587 + if (found) 588 + *map_val = 123; 589 + 590 + return 0; 591 + } 592 + 593 + SEC("raw_tp") 594 + __success 595 + int iter_tricky_but_fine(const void *ctx) 596 + { 597 + int *map_val = NULL; 598 + int key = 0; 599 + bool found = false; 600 + 601 + MY_PID_GUARD(); 602 + 603 + bpf_repeat(1000000) { 604 + map_val = bpf_map_lookup_elem(&arr_map, &key); 605 + if (map_val) { 606 + found = true; 607 + break; 608 + } 609 + } 610 + 611 + if (found) 612 + *map_val = 123; 613 + 614 + return 0; 615 + } 616 + 617 + #define __bpf_memzero(p, sz) bpf_probe_read_kernel((p), (sz), 0) 618 + 619 + SEC("raw_tp") 620 + __success 621 + int iter_stack_array_loop(const void *ctx) 622 + { 623 + long arr1[16], arr2[16], sum = 0; 624 + int *v, i; 625 + 626 + MY_PID_GUARD(); 627 + 628 + /* zero-init arr1 and arr2 in such a way that verifier doesn't know 629 + * it's all zeros; if we don't do that, we'll make BPF verifier track 630 + * all combination of zero/non-zero stack slots for arr1/arr2, which 631 + * will lead to O(2^(ARRAY_SIZE(arr1)+ARRAY_SIZE(arr2))) different 632 + * states 633 + */ 634 + __bpf_memzero(arr1, sizeof(arr1)); 635 + __bpf_memzero(arr2, sizeof(arr1)); 636 + 637 + /* validate that we can break and continue when using bpf_for() */ 638 + bpf_for(i, 0, ARRAY_SIZE(arr1)) { 639 + if (i & 1) { 640 + arr1[i] = i; 641 + continue; 642 + } else { 643 + arr2[i] = i; 644 + break; 645 + } 646 + } 647 + 648 + bpf_for(i, 0, ARRAY_SIZE(arr1)) { 649 + sum += arr1[i] + arr2[i]; 650 + } 651 + 652 + return sum; 653 + } 654 + 655 + static __noinline void fill(struct bpf_iter_num *it, int *arr, __u32 n, int mul) 656 + { 657 + int *t, i; 658 + 659 + while ((t = bpf_iter_num_next(it))) { 660 + i = *t; 661 + if (i >= n) 662 + break; 663 + arr[i] = i * mul; 664 + } 665 + } 666 + 667 + static __noinline int sum(struct bpf_iter_num *it, int *arr, __u32 n) 668 + { 669 + int *t, i, sum = 0;; 670 + 671 + while ((t = bpf_iter_num_next(it))) { 672 + i = *t; 673 + if (i >= n) 674 + break; 675 + sum += arr[i]; 676 + } 677 + 678 + return sum; 679 + } 680 + 681 + SEC("raw_tp") 682 + __success 683 + int iter_pass_iter_ptr_to_subprog(const void *ctx) 684 + { 685 + int arr1[16], arr2[32]; 686 + struct bpf_iter_num it; 687 + int n, sum1, sum2; 688 + 689 + MY_PID_GUARD(); 690 + 691 + /* fill arr1 */ 692 + n = ARRAY_SIZE(arr1); 693 + bpf_iter_num_new(&it, 0, n); 694 + fill(&it, arr1, n, 2); 695 + bpf_iter_num_destroy(&it); 696 + 697 + /* fill arr2 */ 698 + n = ARRAY_SIZE(arr2); 699 + bpf_iter_num_new(&it, 0, n); 700 + fill(&it, arr2, n, 10); 701 + bpf_iter_num_destroy(&it); 702 + 703 + /* sum arr1 */ 704 + n = ARRAY_SIZE(arr1); 705 + bpf_iter_num_new(&it, 0, n); 706 + sum1 = sum(&it, arr1, n); 707 + bpf_iter_num_destroy(&it); 708 + 709 + /* sum arr2 */ 710 + n = ARRAY_SIZE(arr2); 711 + bpf_iter_num_new(&it, 0, n); 712 + sum2 = sum(&it, arr2, n); 713 + bpf_iter_num_destroy(&it); 714 + 715 + bpf_printk("sum1=%d, sum2=%d", sum1, sum2); 716 + 717 + return 0; 718 + } 719 + 720 + char _license[] SEC("license") = "GPL";
+163
tools/testing/selftests/bpf/progs/iters_looping.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ 3 + 4 + #include <errno.h> 5 + #include <string.h> 6 + #include <linux/bpf.h> 7 + #include <bpf/bpf_helpers.h> 8 + #include "bpf_misc.h" 9 + 10 + char _license[] SEC("license") = "GPL"; 11 + 12 + #define ITER_HELPERS \ 13 + __imm(bpf_iter_num_new), \ 14 + __imm(bpf_iter_num_next), \ 15 + __imm(bpf_iter_num_destroy) 16 + 17 + SEC("?raw_tp") 18 + __success 19 + int force_clang_to_emit_btf_for_externs(void *ctx) 20 + { 21 + /* we need this as a workaround to enforce compiler emitting BTF 22 + * information for bpf_iter_num_{new,next,destroy}() kfuncs, 23 + * as, apparently, it doesn't emit it for symbols only referenced from 24 + * assembly (or cleanup attribute, for that matter, as well) 25 + */ 26 + bpf_repeat(0); 27 + 28 + return 0; 29 + } 30 + 31 + SEC("?raw_tp") 32 + __success 33 + int consume_first_item_only(void *ctx) 34 + { 35 + struct bpf_iter_num iter; 36 + 37 + asm volatile ( 38 + /* create iterator */ 39 + "r1 = %[iter];" 40 + "r2 = 0;" 41 + "r3 = 1000;" 42 + "call %[bpf_iter_num_new];" 43 + 44 + /* consume first item */ 45 + "r1 = %[iter];" 46 + "call %[bpf_iter_num_next];" 47 + 48 + "if r0 == 0 goto +1;" 49 + "r0 = *(u32 *)(r0 + 0);" 50 + 51 + /* destroy iterator */ 52 + "r1 = %[iter];" 53 + "call %[bpf_iter_num_destroy];" 54 + : 55 + : __imm_ptr(iter), ITER_HELPERS 56 + : __clobber_common 57 + ); 58 + 59 + return 0; 60 + } 61 + 62 + SEC("?raw_tp") 63 + __failure __msg("R0 invalid mem access 'scalar'") 64 + int missing_null_check_fail(void *ctx) 65 + { 66 + struct bpf_iter_num iter; 67 + 68 + asm volatile ( 69 + /* create iterator */ 70 + "r1 = %[iter];" 71 + "r2 = 0;" 72 + "r3 = 1000;" 73 + "call %[bpf_iter_num_new];" 74 + 75 + /* consume first element */ 76 + "r1 = %[iter];" 77 + "call %[bpf_iter_num_next];" 78 + 79 + /* FAIL: deref with no NULL check */ 80 + "r1 = *(u32 *)(r0 + 0);" 81 + 82 + /* destroy iterator */ 83 + "r1 = %[iter];" 84 + "call %[bpf_iter_num_destroy];" 85 + : 86 + : __imm_ptr(iter), ITER_HELPERS 87 + : __clobber_common 88 + ); 89 + 90 + return 0; 91 + } 92 + 93 + SEC("?raw_tp") 94 + __failure 95 + __msg("invalid access to memory, mem_size=4 off=0 size=8") 96 + __msg("R0 min value is outside of the allowed memory range") 97 + int wrong_sized_read_fail(void *ctx) 98 + { 99 + struct bpf_iter_num iter; 100 + 101 + asm volatile ( 102 + /* create iterator */ 103 + "r1 = %[iter];" 104 + "r2 = 0;" 105 + "r3 = 1000;" 106 + "call %[bpf_iter_num_new];" 107 + 108 + /* consume first element */ 109 + "r1 = %[iter];" 110 + "call %[bpf_iter_num_next];" 111 + 112 + "if r0 == 0 goto +1;" 113 + /* FAIL: deref more than available 4 bytes */ 114 + "r0 = *(u64 *)(r0 + 0);" 115 + 116 + /* destroy iterator */ 117 + "r1 = %[iter];" 118 + "call %[bpf_iter_num_destroy];" 119 + : 120 + : __imm_ptr(iter), ITER_HELPERS 121 + : __clobber_common 122 + ); 123 + 124 + return 0; 125 + } 126 + 127 + SEC("?raw_tp") 128 + __success __log_level(2) 129 + __flag(BPF_F_TEST_STATE_FREQ) 130 + int simplest_loop(void *ctx) 131 + { 132 + struct bpf_iter_num iter; 133 + 134 + asm volatile ( 135 + "r6 = 0;" /* init sum */ 136 + 137 + /* create iterator */ 138 + "r1 = %[iter];" 139 + "r2 = 0;" 140 + "r3 = 10;" 141 + "call %[bpf_iter_num_new];" 142 + 143 + "1:" 144 + /* consume next item */ 145 + "r1 = %[iter];" 146 + "call %[bpf_iter_num_next];" 147 + 148 + "if r0 == 0 goto 2f;" 149 + "r0 = *(u32 *)(r0 + 0);" 150 + "r6 += r0;" /* accumulate sum */ 151 + "goto 1b;" 152 + 153 + "2:" 154 + /* destroy iterator */ 155 + "r1 = %[iter];" 156 + "call %[bpf_iter_num_destroy];" 157 + : 158 + : __imm_ptr(iter), ITER_HELPERS 159 + : __clobber_common, "r6" 160 + ); 161 + 162 + return 0; 163 + }
+426
tools/testing/selftests/bpf/progs/iters_state_safety.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2022 Facebook */ 3 + 4 + #include <errno.h> 5 + #include <string.h> 6 + #include <linux/bpf.h> 7 + #include <bpf/bpf_helpers.h> 8 + #include "bpf_misc.h" 9 + 10 + char _license[] SEC("license") = "GPL"; 11 + 12 + #define ITER_HELPERS \ 13 + __imm(bpf_iter_num_new), \ 14 + __imm(bpf_iter_num_next), \ 15 + __imm(bpf_iter_num_destroy) 16 + 17 + SEC("?raw_tp") 18 + __success 19 + int force_clang_to_emit_btf_for_externs(void *ctx) 20 + { 21 + /* we need this as a workaround to enforce compiler emitting BTF 22 + * information for bpf_iter_num_{new,next,destroy}() kfuncs, 23 + * as, apparently, it doesn't emit it for symbols only referenced from 24 + * assembly (or cleanup attribute, for that matter, as well) 25 + */ 26 + bpf_repeat(0); 27 + 28 + return 0; 29 + } 30 + 31 + SEC("?raw_tp") 32 + __success __log_level(2) 33 + __msg("fp-8_w=iter_num(ref_id=1,state=active,depth=0)") 34 + int create_and_destroy(void *ctx) 35 + { 36 + struct bpf_iter_num iter; 37 + 38 + asm volatile ( 39 + /* create iterator */ 40 + "r1 = %[iter];" 41 + "r2 = 0;" 42 + "r3 = 1000;" 43 + "call %[bpf_iter_num_new];" 44 + /* destroy iterator */ 45 + "r1 = %[iter];" 46 + "call %[bpf_iter_num_destroy];" 47 + : 48 + : __imm_ptr(iter), ITER_HELPERS 49 + : __clobber_common 50 + ); 51 + 52 + return 0; 53 + } 54 + 55 + SEC("?raw_tp") 56 + __failure __msg("Unreleased reference id=1") 57 + int create_and_forget_to_destroy_fail(void *ctx) 58 + { 59 + struct bpf_iter_num iter; 60 + 61 + asm volatile ( 62 + /* create iterator */ 63 + "r1 = %[iter];" 64 + "r2 = 0;" 65 + "r3 = 1000;" 66 + "call %[bpf_iter_num_new];" 67 + : 68 + : __imm_ptr(iter), ITER_HELPERS 69 + : __clobber_common 70 + ); 71 + 72 + return 0; 73 + } 74 + 75 + SEC("?raw_tp") 76 + __failure __msg("expected an initialized iter_num as arg #1") 77 + int destroy_without_creating_fail(void *ctx) 78 + { 79 + /* init with zeros to stop verifier complaining about uninit stack */ 80 + struct bpf_iter_num iter; 81 + 82 + asm volatile ( 83 + "r1 = %[iter];" 84 + "call %[bpf_iter_num_destroy];" 85 + : 86 + : __imm_ptr(iter), ITER_HELPERS 87 + : __clobber_common 88 + ); 89 + 90 + return 0; 91 + } 92 + 93 + SEC("?raw_tp") 94 + __failure __msg("expected an initialized iter_num as arg #1") 95 + int compromise_iter_w_direct_write_fail(void *ctx) 96 + { 97 + struct bpf_iter_num iter; 98 + 99 + asm volatile ( 100 + /* create iterator */ 101 + "r1 = %[iter];" 102 + "r2 = 0;" 103 + "r3 = 1000;" 104 + "call %[bpf_iter_num_new];" 105 + 106 + /* directly write over first half of iter state */ 107 + "*(u64 *)(%[iter] + 0) = r0;" 108 + 109 + /* (attempt to) destroy iterator */ 110 + "r1 = %[iter];" 111 + "call %[bpf_iter_num_destroy];" 112 + : 113 + : __imm_ptr(iter), ITER_HELPERS 114 + : __clobber_common 115 + ); 116 + 117 + return 0; 118 + } 119 + 120 + SEC("?raw_tp") 121 + __failure __msg("Unreleased reference id=1") 122 + int compromise_iter_w_direct_write_and_skip_destroy_fail(void *ctx) 123 + { 124 + struct bpf_iter_num iter; 125 + 126 + asm volatile ( 127 + /* create iterator */ 128 + "r1 = %[iter];" 129 + "r2 = 0;" 130 + "r3 = 1000;" 131 + "call %[bpf_iter_num_new];" 132 + 133 + /* directly write over first half of iter state */ 134 + "*(u64 *)(%[iter] + 0) = r0;" 135 + 136 + /* don't destroy iter, leaking ref, which should fail */ 137 + : 138 + : __imm_ptr(iter), ITER_HELPERS 139 + : __clobber_common 140 + ); 141 + 142 + return 0; 143 + } 144 + 145 + SEC("?raw_tp") 146 + __failure __msg("expected an initialized iter_num as arg #1") 147 + int compromise_iter_w_helper_write_fail(void *ctx) 148 + { 149 + struct bpf_iter_num iter; 150 + 151 + asm volatile ( 152 + /* create iterator */ 153 + "r1 = %[iter];" 154 + "r2 = 0;" 155 + "r3 = 1000;" 156 + "call %[bpf_iter_num_new];" 157 + 158 + /* overwrite 8th byte with bpf_probe_read_kernel() */ 159 + "r1 = %[iter];" 160 + "r1 += 7;" 161 + "r2 = 1;" 162 + "r3 = 0;" /* NULL */ 163 + "call %[bpf_probe_read_kernel];" 164 + 165 + /* (attempt to) destroy iterator */ 166 + "r1 = %[iter];" 167 + "call %[bpf_iter_num_destroy];" 168 + : 169 + : __imm_ptr(iter), ITER_HELPERS, __imm(bpf_probe_read_kernel) 170 + : __clobber_common 171 + ); 172 + 173 + return 0; 174 + } 175 + 176 + static __noinline void subprog_with_iter(void) 177 + { 178 + struct bpf_iter_num iter; 179 + 180 + bpf_iter_num_new(&iter, 0, 1); 181 + 182 + return; 183 + } 184 + 185 + SEC("?raw_tp") 186 + __failure 187 + /* ensure there was a call to subprog, which might happen without __noinline */ 188 + __msg("returning from callee:") 189 + __msg("Unreleased reference id=1") 190 + int leak_iter_from_subprog_fail(void *ctx) 191 + { 192 + subprog_with_iter(); 193 + 194 + return 0; 195 + } 196 + 197 + SEC("?raw_tp") 198 + __success __log_level(2) 199 + __msg("fp-8_w=iter_num(ref_id=1,state=active,depth=0)") 200 + int valid_stack_reuse(void *ctx) 201 + { 202 + struct bpf_iter_num iter; 203 + 204 + asm volatile ( 205 + /* create iterator */ 206 + "r1 = %[iter];" 207 + "r2 = 0;" 208 + "r3 = 1000;" 209 + "call %[bpf_iter_num_new];" 210 + /* destroy iterator */ 211 + "r1 = %[iter];" 212 + "call %[bpf_iter_num_destroy];" 213 + 214 + /* now reuse same stack slots */ 215 + 216 + /* create iterator */ 217 + "r1 = %[iter];" 218 + "r2 = 0;" 219 + "r3 = 1000;" 220 + "call %[bpf_iter_num_new];" 221 + /* destroy iterator */ 222 + "r1 = %[iter];" 223 + "call %[bpf_iter_num_destroy];" 224 + : 225 + : __imm_ptr(iter), ITER_HELPERS 226 + : __clobber_common 227 + ); 228 + 229 + return 0; 230 + } 231 + 232 + SEC("?raw_tp") 233 + __failure __msg("expected uninitialized iter_num as arg #1") 234 + int double_create_fail(void *ctx) 235 + { 236 + struct bpf_iter_num iter; 237 + 238 + asm volatile ( 239 + /* create iterator */ 240 + "r1 = %[iter];" 241 + "r2 = 0;" 242 + "r3 = 1000;" 243 + "call %[bpf_iter_num_new];" 244 + /* (attempt to) create iterator again */ 245 + "r1 = %[iter];" 246 + "r2 = 0;" 247 + "r3 = 1000;" 248 + "call %[bpf_iter_num_new];" 249 + /* destroy iterator */ 250 + "r1 = %[iter];" 251 + "call %[bpf_iter_num_destroy];" 252 + : 253 + : __imm_ptr(iter), ITER_HELPERS 254 + : __clobber_common 255 + ); 256 + 257 + return 0; 258 + } 259 + 260 + SEC("?raw_tp") 261 + __failure __msg("expected an initialized iter_num as arg #1") 262 + int double_destroy_fail(void *ctx) 263 + { 264 + struct bpf_iter_num iter; 265 + 266 + asm volatile ( 267 + /* create iterator */ 268 + "r1 = %[iter];" 269 + "r2 = 0;" 270 + "r3 = 1000;" 271 + "call %[bpf_iter_num_new];" 272 + /* destroy iterator */ 273 + "r1 = %[iter];" 274 + "call %[bpf_iter_num_destroy];" 275 + /* (attempt to) destroy iterator again */ 276 + "r1 = %[iter];" 277 + "call %[bpf_iter_num_destroy];" 278 + : 279 + : __imm_ptr(iter), ITER_HELPERS 280 + : __clobber_common 281 + ); 282 + 283 + return 0; 284 + } 285 + 286 + SEC("?raw_tp") 287 + __failure __msg("expected an initialized iter_num as arg #1") 288 + int next_without_new_fail(void *ctx) 289 + { 290 + struct bpf_iter_num iter; 291 + 292 + asm volatile ( 293 + /* don't create iterator and try to iterate*/ 294 + "r1 = %[iter];" 295 + "call %[bpf_iter_num_next];" 296 + /* destroy iterator */ 297 + "r1 = %[iter];" 298 + "call %[bpf_iter_num_destroy];" 299 + : 300 + : __imm_ptr(iter), ITER_HELPERS 301 + : __clobber_common 302 + ); 303 + 304 + return 0; 305 + } 306 + 307 + SEC("?raw_tp") 308 + __failure __msg("expected an initialized iter_num as arg #1") 309 + int next_after_destroy_fail(void *ctx) 310 + { 311 + struct bpf_iter_num iter; 312 + 313 + asm volatile ( 314 + /* create iterator */ 315 + "r1 = %[iter];" 316 + "r2 = 0;" 317 + "r3 = 1000;" 318 + "call %[bpf_iter_num_new];" 319 + /* destroy iterator */ 320 + "r1 = %[iter];" 321 + "call %[bpf_iter_num_destroy];" 322 + /* don't create iterator and try to iterate*/ 323 + "r1 = %[iter];" 324 + "call %[bpf_iter_num_next];" 325 + : 326 + : __imm_ptr(iter), ITER_HELPERS 327 + : __clobber_common 328 + ); 329 + 330 + return 0; 331 + } 332 + 333 + SEC("?raw_tp") 334 + __failure __msg("invalid read from stack") 335 + int __naked read_from_iter_slot_fail(void) 336 + { 337 + asm volatile ( 338 + /* r6 points to struct bpf_iter_num on the stack */ 339 + "r6 = r10;" 340 + "r6 += -24;" 341 + 342 + /* create iterator */ 343 + "r1 = r6;" 344 + "r2 = 0;" 345 + "r3 = 1000;" 346 + "call %[bpf_iter_num_new];" 347 + 348 + /* attemp to leak bpf_iter_num state */ 349 + "r7 = *(u64 *)(r6 + 0);" 350 + "r8 = *(u64 *)(r6 + 8);" 351 + 352 + /* destroy iterator */ 353 + "r1 = r6;" 354 + "call %[bpf_iter_num_destroy];" 355 + 356 + /* leak bpf_iter_num state */ 357 + "r0 = r7;" 358 + "if r7 > r8 goto +1;" 359 + "r0 = r8;" 360 + "exit;" 361 + : 362 + : ITER_HELPERS 363 + : __clobber_common, "r6", "r7", "r8" 364 + ); 365 + } 366 + 367 + int zero; 368 + 369 + SEC("?raw_tp") 370 + __failure 371 + __flag(BPF_F_TEST_STATE_FREQ) 372 + __msg("Unreleased reference") 373 + int stacksafe_should_not_conflate_stack_spill_and_iter(void *ctx) 374 + { 375 + struct bpf_iter_num iter; 376 + 377 + asm volatile ( 378 + /* Create a fork in logic, with general setup as follows: 379 + * - fallthrough (first) path is valid; 380 + * - branch (second) path is invalid. 381 + * Then depending on what we do in fallthrough vs branch path, 382 + * we try to detect bugs in func_states_equal(), regsafe(), 383 + * refsafe(), stack_safe(), and similar by tricking verifier 384 + * into believing that branch state is a valid subset of 385 + * a fallthrough state. Verifier should reject overall 386 + * validation, unless there is a bug somewhere in verifier 387 + * logic. 388 + */ 389 + "call %[bpf_get_prandom_u32];" 390 + "r6 = r0;" 391 + "call %[bpf_get_prandom_u32];" 392 + "r7 = r0;" 393 + 394 + "if r6 > r7 goto bad;" /* fork */ 395 + 396 + /* spill r6 into stack slot of bpf_iter_num var */ 397 + "*(u64 *)(%[iter] + 0) = r6;" 398 + 399 + "goto skip_bad;" 400 + 401 + "bad:" 402 + /* create iterator in the same stack slot */ 403 + "r1 = %[iter];" 404 + "r2 = 0;" 405 + "r3 = 1000;" 406 + "call %[bpf_iter_num_new];" 407 + 408 + /* but then forget about it and overwrite it back to r6 spill */ 409 + "*(u64 *)(%[iter] + 0) = r6;" 410 + 411 + "skip_bad:" 412 + "goto +0;" /* force checkpoint */ 413 + 414 + /* corrupt stack slots, if they are really dynptr */ 415 + "*(u64 *)(%[iter] + 0) = r6;" 416 + : 417 + : __imm_ptr(iter), 418 + __imm_addr(zero), 419 + __imm(bpf_get_prandom_u32), 420 + __imm(bpf_dynptr_from_mem), 421 + ITER_HELPERS 422 + : __clobber_common, "r6", "r7" 423 + ); 424 + 425 + return 0; 426 + }