Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

kcsan: test: Add test cases for memory barrier instrumentation

Adds test cases to check that memory barriers are instrumented
correctly, and detection of missing memory barriers is working as
intended if CONFIG_KCSAN_STRICT=y.

Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>

authored by

Marco Elver and committed by
Paul E. McKenney
8bc32b34 7310bd1f

+319
+319
kernel/kcsan/kcsan_test.c
··· 16 16 #define pr_fmt(fmt) "kcsan_test: " fmt 17 17 18 18 #include <kunit/test.h> 19 + #include <linux/atomic.h> 20 + #include <linux/bitops.h> 19 21 #include <linux/jiffies.h> 20 22 #include <linux/kcsan-checks.h> 21 23 #include <linux/kernel.h> 24 + #include <linux/mutex.h> 22 25 #include <linux/sched.h> 23 26 #include <linux/seqlock.h> 24 27 #include <linux/spinlock.h> ··· 308 305 __no_kcsan 309 306 static noinline void sink_value(long v) { WRITE_ONCE(test_sink, v); } 310 307 308 + /* 309 + * Generates a delay and some accesses that enter the runtime but do not produce 310 + * data races. 311 + */ 312 + static noinline void test_delay(int iter) 313 + { 314 + while (iter--) 315 + sink_value(READ_ONCE(test_sink)); 316 + } 317 + 311 318 static noinline void test_kernel_read(void) { sink_value(test_var); } 312 319 313 320 static noinline void test_kernel_write(void) ··· 479 466 kcsan_nestable_atomic_end(); 480 467 } 481 468 469 + #define TEST_KERNEL_LOCKED(name, acquire, release) \ 470 + static noinline void test_kernel_##name(void) \ 471 + { \ 472 + long *flag = &test_struct.val[0]; \ 473 + long v = 0; \ 474 + if (!(acquire)) \ 475 + return; \ 476 + while (v++ < 100) { \ 477 + test_var++; \ 478 + barrier(); \ 479 + } \ 480 + release; \ 481 + test_delay(10); \ 482 + } 483 + 484 + TEST_KERNEL_LOCKED(with_memorder, 485 + cmpxchg_acquire(flag, 0, 1) == 0, 486 + smp_store_release(flag, 0)); 487 + TEST_KERNEL_LOCKED(wrong_memorder, 488 + cmpxchg_relaxed(flag, 0, 1) == 0, 489 + WRITE_ONCE(*flag, 0)); 490 + TEST_KERNEL_LOCKED(atomic_builtin_with_memorder, 491 + __atomic_compare_exchange_n(flag, &v, 1, 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED), 492 + __atomic_store_n(flag, 0, __ATOMIC_RELEASE)); 493 + TEST_KERNEL_LOCKED(atomic_builtin_wrong_memorder, 494 + __atomic_compare_exchange_n(flag, &v, 1, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED), 495 + __atomic_store_n(flag, 0, __ATOMIC_RELAXED)); 496 + 482 497 /* ===== Test cases ===== */ 498 + 499 + /* 500 + * Tests that various barriers have the expected effect on internal state. Not 501 + * exhaustive on atomic_t operations. Unlike the selftest, also checks for 502 + * too-strict barrier instrumentation; these can be tolerated, because it does 503 + * not cause false positives, but at least we should be aware of such cases. 504 + */ 505 + static void test_barrier_nothreads(struct kunit *test) 506 + { 507 + #ifdef CONFIG_KCSAN_WEAK_MEMORY 508 + struct kcsan_scoped_access *reorder_access = &current->kcsan_ctx.reorder_access; 509 + #else 510 + struct kcsan_scoped_access *reorder_access = NULL; 511 + #endif 512 + arch_spinlock_t arch_spinlock = __ARCH_SPIN_LOCK_UNLOCKED; 513 + DEFINE_SPINLOCK(spinlock); 514 + DEFINE_MUTEX(mutex); 515 + atomic_t dummy; 516 + 517 + KCSAN_TEST_REQUIRES(test, reorder_access != NULL); 518 + KCSAN_TEST_REQUIRES(test, IS_ENABLED(CONFIG_SMP)); 519 + 520 + #define __KCSAN_EXPECT_BARRIER(access_type, barrier, order_before, name) \ 521 + do { \ 522 + reorder_access->type = (access_type) | KCSAN_ACCESS_SCOPED; \ 523 + reorder_access->size = sizeof(test_var); \ 524 + barrier; \ 525 + KUNIT_EXPECT_EQ_MSG(test, reorder_access->size, \ 526 + order_before ? 0 : sizeof(test_var), \ 527 + "improperly instrumented type=(" #access_type "): " name); \ 528 + } while (0) 529 + #define KCSAN_EXPECT_READ_BARRIER(b, o) __KCSAN_EXPECT_BARRIER(0, b, o, #b) 530 + #define KCSAN_EXPECT_WRITE_BARRIER(b, o) __KCSAN_EXPECT_BARRIER(KCSAN_ACCESS_WRITE, b, o, #b) 531 + #define KCSAN_EXPECT_RW_BARRIER(b, o) __KCSAN_EXPECT_BARRIER(KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE, b, o, #b) 532 + 533 + /* Force creating a valid entry in reorder_access first. */ 534 + test_var = 0; 535 + while (test_var++ < 1000000 && reorder_access->size != sizeof(test_var)) 536 + __kcsan_check_read(&test_var, sizeof(test_var)); 537 + KUNIT_ASSERT_EQ(test, reorder_access->size, sizeof(test_var)); 538 + 539 + kcsan_nestable_atomic_begin(); /* No watchpoints in called functions. */ 540 + 541 + KCSAN_EXPECT_READ_BARRIER(mb(), true); 542 + KCSAN_EXPECT_READ_BARRIER(wmb(), false); 543 + KCSAN_EXPECT_READ_BARRIER(rmb(), true); 544 + KCSAN_EXPECT_READ_BARRIER(smp_mb(), true); 545 + KCSAN_EXPECT_READ_BARRIER(smp_wmb(), false); 546 + KCSAN_EXPECT_READ_BARRIER(smp_rmb(), true); 547 + KCSAN_EXPECT_READ_BARRIER(dma_wmb(), false); 548 + KCSAN_EXPECT_READ_BARRIER(dma_rmb(), true); 549 + KCSAN_EXPECT_READ_BARRIER(smp_mb__before_atomic(), true); 550 + KCSAN_EXPECT_READ_BARRIER(smp_mb__after_atomic(), true); 551 + KCSAN_EXPECT_READ_BARRIER(smp_mb__after_spinlock(), true); 552 + KCSAN_EXPECT_READ_BARRIER(smp_store_mb(test_var, 0), true); 553 + KCSAN_EXPECT_READ_BARRIER(smp_load_acquire(&test_var), false); 554 + KCSAN_EXPECT_READ_BARRIER(smp_store_release(&test_var, 0), true); 555 + KCSAN_EXPECT_READ_BARRIER(xchg(&test_var, 0), true); 556 + KCSAN_EXPECT_READ_BARRIER(xchg_release(&test_var, 0), true); 557 + KCSAN_EXPECT_READ_BARRIER(xchg_relaxed(&test_var, 0), false); 558 + KCSAN_EXPECT_READ_BARRIER(cmpxchg(&test_var, 0, 0), true); 559 + KCSAN_EXPECT_READ_BARRIER(cmpxchg_release(&test_var, 0, 0), true); 560 + KCSAN_EXPECT_READ_BARRIER(cmpxchg_relaxed(&test_var, 0, 0), false); 561 + KCSAN_EXPECT_READ_BARRIER(atomic_read(&dummy), false); 562 + KCSAN_EXPECT_READ_BARRIER(atomic_read_acquire(&dummy), false); 563 + KCSAN_EXPECT_READ_BARRIER(atomic_set(&dummy, 0), false); 564 + KCSAN_EXPECT_READ_BARRIER(atomic_set_release(&dummy, 0), true); 565 + KCSAN_EXPECT_READ_BARRIER(atomic_add(1, &dummy), false); 566 + KCSAN_EXPECT_READ_BARRIER(atomic_add_return(1, &dummy), true); 567 + KCSAN_EXPECT_READ_BARRIER(atomic_add_return_acquire(1, &dummy), false); 568 + KCSAN_EXPECT_READ_BARRIER(atomic_add_return_release(1, &dummy), true); 569 + KCSAN_EXPECT_READ_BARRIER(atomic_add_return_relaxed(1, &dummy), false); 570 + KCSAN_EXPECT_READ_BARRIER(atomic_fetch_add(1, &dummy), true); 571 + KCSAN_EXPECT_READ_BARRIER(atomic_fetch_add_acquire(1, &dummy), false); 572 + KCSAN_EXPECT_READ_BARRIER(atomic_fetch_add_release(1, &dummy), true); 573 + KCSAN_EXPECT_READ_BARRIER(atomic_fetch_add_relaxed(1, &dummy), false); 574 + KCSAN_EXPECT_READ_BARRIER(test_and_set_bit(0, &test_var), true); 575 + KCSAN_EXPECT_READ_BARRIER(test_and_clear_bit(0, &test_var), true); 576 + KCSAN_EXPECT_READ_BARRIER(test_and_change_bit(0, &test_var), true); 577 + KCSAN_EXPECT_READ_BARRIER(clear_bit_unlock(0, &test_var), true); 578 + KCSAN_EXPECT_READ_BARRIER(__clear_bit_unlock(0, &test_var), true); 579 + KCSAN_EXPECT_READ_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var), true); 580 + KCSAN_EXPECT_READ_BARRIER(arch_spin_lock(&arch_spinlock), false); 581 + KCSAN_EXPECT_READ_BARRIER(arch_spin_unlock(&arch_spinlock), true); 582 + KCSAN_EXPECT_READ_BARRIER(spin_lock(&spinlock), false); 583 + KCSAN_EXPECT_READ_BARRIER(spin_unlock(&spinlock), true); 584 + KCSAN_EXPECT_READ_BARRIER(mutex_lock(&mutex), false); 585 + KCSAN_EXPECT_READ_BARRIER(mutex_unlock(&mutex), true); 586 + 587 + KCSAN_EXPECT_WRITE_BARRIER(mb(), true); 588 + KCSAN_EXPECT_WRITE_BARRIER(wmb(), true); 589 + KCSAN_EXPECT_WRITE_BARRIER(rmb(), false); 590 + KCSAN_EXPECT_WRITE_BARRIER(smp_mb(), true); 591 + KCSAN_EXPECT_WRITE_BARRIER(smp_wmb(), true); 592 + KCSAN_EXPECT_WRITE_BARRIER(smp_rmb(), false); 593 + KCSAN_EXPECT_WRITE_BARRIER(dma_wmb(), true); 594 + KCSAN_EXPECT_WRITE_BARRIER(dma_rmb(), false); 595 + KCSAN_EXPECT_WRITE_BARRIER(smp_mb__before_atomic(), true); 596 + KCSAN_EXPECT_WRITE_BARRIER(smp_mb__after_atomic(), true); 597 + KCSAN_EXPECT_WRITE_BARRIER(smp_mb__after_spinlock(), true); 598 + KCSAN_EXPECT_WRITE_BARRIER(smp_store_mb(test_var, 0), true); 599 + KCSAN_EXPECT_WRITE_BARRIER(smp_load_acquire(&test_var), false); 600 + KCSAN_EXPECT_WRITE_BARRIER(smp_store_release(&test_var, 0), true); 601 + KCSAN_EXPECT_WRITE_BARRIER(xchg(&test_var, 0), true); 602 + KCSAN_EXPECT_WRITE_BARRIER(xchg_release(&test_var, 0), true); 603 + KCSAN_EXPECT_WRITE_BARRIER(xchg_relaxed(&test_var, 0), false); 604 + KCSAN_EXPECT_WRITE_BARRIER(cmpxchg(&test_var, 0, 0), true); 605 + KCSAN_EXPECT_WRITE_BARRIER(cmpxchg_release(&test_var, 0, 0), true); 606 + KCSAN_EXPECT_WRITE_BARRIER(cmpxchg_relaxed(&test_var, 0, 0), false); 607 + KCSAN_EXPECT_WRITE_BARRIER(atomic_read(&dummy), false); 608 + KCSAN_EXPECT_WRITE_BARRIER(atomic_read_acquire(&dummy), false); 609 + KCSAN_EXPECT_WRITE_BARRIER(atomic_set(&dummy, 0), false); 610 + KCSAN_EXPECT_WRITE_BARRIER(atomic_set_release(&dummy, 0), true); 611 + KCSAN_EXPECT_WRITE_BARRIER(atomic_add(1, &dummy), false); 612 + KCSAN_EXPECT_WRITE_BARRIER(atomic_add_return(1, &dummy), true); 613 + KCSAN_EXPECT_WRITE_BARRIER(atomic_add_return_acquire(1, &dummy), false); 614 + KCSAN_EXPECT_WRITE_BARRIER(atomic_add_return_release(1, &dummy), true); 615 + KCSAN_EXPECT_WRITE_BARRIER(atomic_add_return_relaxed(1, &dummy), false); 616 + KCSAN_EXPECT_WRITE_BARRIER(atomic_fetch_add(1, &dummy), true); 617 + KCSAN_EXPECT_WRITE_BARRIER(atomic_fetch_add_acquire(1, &dummy), false); 618 + KCSAN_EXPECT_WRITE_BARRIER(atomic_fetch_add_release(1, &dummy), true); 619 + KCSAN_EXPECT_WRITE_BARRIER(atomic_fetch_add_relaxed(1, &dummy), false); 620 + KCSAN_EXPECT_WRITE_BARRIER(test_and_set_bit(0, &test_var), true); 621 + KCSAN_EXPECT_WRITE_BARRIER(test_and_clear_bit(0, &test_var), true); 622 + KCSAN_EXPECT_WRITE_BARRIER(test_and_change_bit(0, &test_var), true); 623 + KCSAN_EXPECT_WRITE_BARRIER(clear_bit_unlock(0, &test_var), true); 624 + KCSAN_EXPECT_WRITE_BARRIER(__clear_bit_unlock(0, &test_var), true); 625 + KCSAN_EXPECT_WRITE_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var), true); 626 + KCSAN_EXPECT_WRITE_BARRIER(arch_spin_lock(&arch_spinlock), false); 627 + KCSAN_EXPECT_WRITE_BARRIER(arch_spin_unlock(&arch_spinlock), true); 628 + KCSAN_EXPECT_WRITE_BARRIER(spin_lock(&spinlock), false); 629 + KCSAN_EXPECT_WRITE_BARRIER(spin_unlock(&spinlock), true); 630 + KCSAN_EXPECT_WRITE_BARRIER(mutex_lock(&mutex), false); 631 + KCSAN_EXPECT_WRITE_BARRIER(mutex_unlock(&mutex), true); 632 + 633 + KCSAN_EXPECT_RW_BARRIER(mb(), true); 634 + KCSAN_EXPECT_RW_BARRIER(wmb(), true); 635 + KCSAN_EXPECT_RW_BARRIER(rmb(), true); 636 + KCSAN_EXPECT_RW_BARRIER(smp_mb(), true); 637 + KCSAN_EXPECT_RW_BARRIER(smp_wmb(), true); 638 + KCSAN_EXPECT_RW_BARRIER(smp_rmb(), true); 639 + KCSAN_EXPECT_RW_BARRIER(dma_wmb(), true); 640 + KCSAN_EXPECT_RW_BARRIER(dma_rmb(), true); 641 + KCSAN_EXPECT_RW_BARRIER(smp_mb__before_atomic(), true); 642 + KCSAN_EXPECT_RW_BARRIER(smp_mb__after_atomic(), true); 643 + KCSAN_EXPECT_RW_BARRIER(smp_mb__after_spinlock(), true); 644 + KCSAN_EXPECT_RW_BARRIER(smp_store_mb(test_var, 0), true); 645 + KCSAN_EXPECT_RW_BARRIER(smp_load_acquire(&test_var), false); 646 + KCSAN_EXPECT_RW_BARRIER(smp_store_release(&test_var, 0), true); 647 + KCSAN_EXPECT_RW_BARRIER(xchg(&test_var, 0), true); 648 + KCSAN_EXPECT_RW_BARRIER(xchg_release(&test_var, 0), true); 649 + KCSAN_EXPECT_RW_BARRIER(xchg_relaxed(&test_var, 0), false); 650 + KCSAN_EXPECT_RW_BARRIER(cmpxchg(&test_var, 0, 0), true); 651 + KCSAN_EXPECT_RW_BARRIER(cmpxchg_release(&test_var, 0, 0), true); 652 + KCSAN_EXPECT_RW_BARRIER(cmpxchg_relaxed(&test_var, 0, 0), false); 653 + KCSAN_EXPECT_RW_BARRIER(atomic_read(&dummy), false); 654 + KCSAN_EXPECT_RW_BARRIER(atomic_read_acquire(&dummy), false); 655 + KCSAN_EXPECT_RW_BARRIER(atomic_set(&dummy, 0), false); 656 + KCSAN_EXPECT_RW_BARRIER(atomic_set_release(&dummy, 0), true); 657 + KCSAN_EXPECT_RW_BARRIER(atomic_add(1, &dummy), false); 658 + KCSAN_EXPECT_RW_BARRIER(atomic_add_return(1, &dummy), true); 659 + KCSAN_EXPECT_RW_BARRIER(atomic_add_return_acquire(1, &dummy), false); 660 + KCSAN_EXPECT_RW_BARRIER(atomic_add_return_release(1, &dummy), true); 661 + KCSAN_EXPECT_RW_BARRIER(atomic_add_return_relaxed(1, &dummy), false); 662 + KCSAN_EXPECT_RW_BARRIER(atomic_fetch_add(1, &dummy), true); 663 + KCSAN_EXPECT_RW_BARRIER(atomic_fetch_add_acquire(1, &dummy), false); 664 + KCSAN_EXPECT_RW_BARRIER(atomic_fetch_add_release(1, &dummy), true); 665 + KCSAN_EXPECT_RW_BARRIER(atomic_fetch_add_relaxed(1, &dummy), false); 666 + KCSAN_EXPECT_RW_BARRIER(test_and_set_bit(0, &test_var), true); 667 + KCSAN_EXPECT_RW_BARRIER(test_and_clear_bit(0, &test_var), true); 668 + KCSAN_EXPECT_RW_BARRIER(test_and_change_bit(0, &test_var), true); 669 + KCSAN_EXPECT_RW_BARRIER(clear_bit_unlock(0, &test_var), true); 670 + KCSAN_EXPECT_RW_BARRIER(__clear_bit_unlock(0, &test_var), true); 671 + KCSAN_EXPECT_RW_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var), true); 672 + KCSAN_EXPECT_RW_BARRIER(arch_spin_lock(&arch_spinlock), false); 673 + KCSAN_EXPECT_RW_BARRIER(arch_spin_unlock(&arch_spinlock), true); 674 + KCSAN_EXPECT_RW_BARRIER(spin_lock(&spinlock), false); 675 + KCSAN_EXPECT_RW_BARRIER(spin_unlock(&spinlock), true); 676 + KCSAN_EXPECT_RW_BARRIER(mutex_lock(&mutex), false); 677 + KCSAN_EXPECT_RW_BARRIER(mutex_unlock(&mutex), true); 678 + 679 + kcsan_nestable_atomic_end(); 680 + } 483 681 484 682 /* Simple test with normal data race. */ 485 683 __no_kcsan ··· 1263 1039 KUNIT_EXPECT_TRUE(test, match); 1264 1040 } 1265 1041 1042 + __no_kcsan 1043 + static void test_correct_barrier(struct kunit *test) 1044 + { 1045 + struct expect_report expect = { 1046 + .access = { 1047 + { test_kernel_with_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) }, 1048 + { test_kernel_with_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(0) }, 1049 + }, 1050 + }; 1051 + bool match_expect = false; 1052 + 1053 + test_struct.val[0] = 0; /* init unlocked */ 1054 + begin_test_checks(test_kernel_with_memorder, test_kernel_with_memorder); 1055 + do { 1056 + match_expect = report_matches_any_reordered(&expect); 1057 + } while (!end_test_checks(match_expect)); 1058 + KUNIT_EXPECT_FALSE(test, match_expect); 1059 + } 1060 + 1061 + __no_kcsan 1062 + static void test_missing_barrier(struct kunit *test) 1063 + { 1064 + struct expect_report expect = { 1065 + .access = { 1066 + { test_kernel_wrong_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) }, 1067 + { test_kernel_wrong_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(0) }, 1068 + }, 1069 + }; 1070 + bool match_expect = false; 1071 + 1072 + test_struct.val[0] = 0; /* init unlocked */ 1073 + begin_test_checks(test_kernel_wrong_memorder, test_kernel_wrong_memorder); 1074 + do { 1075 + match_expect = report_matches_any_reordered(&expect); 1076 + } while (!end_test_checks(match_expect)); 1077 + if (IS_ENABLED(CONFIG_KCSAN_WEAK_MEMORY)) 1078 + KUNIT_EXPECT_TRUE(test, match_expect); 1079 + else 1080 + KUNIT_EXPECT_FALSE(test, match_expect); 1081 + } 1082 + 1083 + __no_kcsan 1084 + static void test_atomic_builtins_correct_barrier(struct kunit *test) 1085 + { 1086 + struct expect_report expect = { 1087 + .access = { 1088 + { test_kernel_atomic_builtin_with_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) }, 1089 + { test_kernel_atomic_builtin_with_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(0) }, 1090 + }, 1091 + }; 1092 + bool match_expect = false; 1093 + 1094 + test_struct.val[0] = 0; /* init unlocked */ 1095 + begin_test_checks(test_kernel_atomic_builtin_with_memorder, 1096 + test_kernel_atomic_builtin_with_memorder); 1097 + do { 1098 + match_expect = report_matches_any_reordered(&expect); 1099 + } while (!end_test_checks(match_expect)); 1100 + KUNIT_EXPECT_FALSE(test, match_expect); 1101 + } 1102 + 1103 + __no_kcsan 1104 + static void test_atomic_builtins_missing_barrier(struct kunit *test) 1105 + { 1106 + struct expect_report expect = { 1107 + .access = { 1108 + { test_kernel_atomic_builtin_wrong_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) }, 1109 + { test_kernel_atomic_builtin_wrong_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(0) }, 1110 + }, 1111 + }; 1112 + bool match_expect = false; 1113 + 1114 + test_struct.val[0] = 0; /* init unlocked */ 1115 + begin_test_checks(test_kernel_atomic_builtin_wrong_memorder, 1116 + test_kernel_atomic_builtin_wrong_memorder); 1117 + do { 1118 + match_expect = report_matches_any_reordered(&expect); 1119 + } while (!end_test_checks(match_expect)); 1120 + if (IS_ENABLED(CONFIG_KCSAN_WEAK_MEMORY)) 1121 + KUNIT_EXPECT_TRUE(test, match_expect); 1122 + else 1123 + KUNIT_EXPECT_FALSE(test, match_expect); 1124 + } 1125 + 1266 1126 /* 1267 1127 * Generate thread counts for all test cases. Values generated are in interval 1268 1128 * [2, 5] followed by exponentially increasing thread counts from 8 to 32. ··· 1396 1088 1397 1089 #define KCSAN_KUNIT_CASE(test_name) KUNIT_CASE_PARAM(test_name, nthreads_gen_params) 1398 1090 static struct kunit_case kcsan_test_cases[] = { 1091 + KUNIT_CASE(test_barrier_nothreads), 1399 1092 KCSAN_KUNIT_CASE(test_basic), 1400 1093 KCSAN_KUNIT_CASE(test_concurrent_races), 1401 1094 KCSAN_KUNIT_CASE(test_novalue_change), ··· 1421 1112 KCSAN_KUNIT_CASE(test_seqlock_noreport), 1422 1113 KCSAN_KUNIT_CASE(test_atomic_builtins), 1423 1114 KCSAN_KUNIT_CASE(test_1bit_value_change), 1115 + KCSAN_KUNIT_CASE(test_correct_barrier), 1116 + KCSAN_KUNIT_CASE(test_missing_barrier), 1117 + KCSAN_KUNIT_CASE(test_atomic_builtins_correct_barrier), 1118 + KCSAN_KUNIT_CASE(test_atomic_builtins_missing_barrier), 1424 1119 {}, 1425 1120 }; 1426 1121 ··· 1489 1176 observed.nlines = 0; 1490 1177 spin_unlock_irqrestore(&observed.lock, flags); 1491 1178 1179 + if (strstr(test->name, "nothreads")) 1180 + return 0; 1181 + 1492 1182 if (!torture_init_begin((char *)test->name, 1)) 1493 1183 return -EBUSY; 1494 1184 ··· 1533 1217 { 1534 1218 struct task_struct **stop_thread; 1535 1219 int i; 1220 + 1221 + if (strstr(test->name, "nothreads")) 1222 + return; 1536 1223 1537 1224 if (torture_cleanup_begin()) 1538 1225 return;