Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

percpu: add test module for various percpu operations

Tests various percpu operations.

Enable with CONFIG_PERCPU_TEST=m.

Signed-off-by: Greg Thelen <gthelen@google.com>
Acked-by: Tejun Heo <tj@kernel.org>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Greg Thelen and committed by
Linus Torvalds
623fd807 3d035f58

+149
+9
lib/Kconfig.debug
··· 1481 1481 help 1482 1482 A benchmark measuring the performance of the interval tree library 1483 1483 1484 + config PERCPU_TEST 1485 + tristate "Per cpu operations test" 1486 + depends on m && DEBUG_KERNEL 1487 + help 1488 + Enable this option to build test module which validates per-cpu 1489 + operations. 1490 + 1491 + If unsure, say N. 1492 + 1484 1493 config ATOMIC64_SELFTEST 1485 1494 bool "Perform an atomic64_t self-test at boot" 1486 1495 help
+2
lib/Makefile
··· 157 157 158 158 interval_tree_test-objs := interval_tree_test_main.o interval_tree.o 159 159 160 + obj-$(CONFIG_PERCPU_TEST) += percpu_test.o 161 + 160 162 obj-$(CONFIG_ASN1) += asn1_decoder.o 161 163 162 164 obj-$(CONFIG_FONT_SUPPORT) += fonts/
+138
lib/percpu_test.c
··· 1 + #include <linux/module.h> 2 + 3 + /* validate @native and @pcp counter values match @expected */ 4 + #define CHECK(native, pcp, expected) \ 5 + do { \ 6 + WARN((native) != (expected), \ 7 + "raw %ld (0x%lx) != expected %lld (0x%llx)", \ 8 + (native), (native), \ 9 + (long long)(expected), (long long)(expected)); \ 10 + WARN(__this_cpu_read(pcp) != (expected), \ 11 + "pcp %ld (0x%lx) != expected %lld (0x%llx)", \ 12 + __this_cpu_read(pcp), __this_cpu_read(pcp), \ 13 + (long long)(expected), (long long)(expected)); \ 14 + } while (0) 15 + 16 + static DEFINE_PER_CPU(long, long_counter); 17 + static DEFINE_PER_CPU(unsigned long, ulong_counter); 18 + 19 + static int __init percpu_test_init(void) 20 + { 21 + /* 22 + * volatile prevents compiler from optimizing it uses, otherwise the 23 + * +ul_one/-ul_one below would replace with inc/dec instructions. 24 + */ 25 + volatile unsigned int ui_one = 1; 26 + long l = 0; 27 + unsigned long ul = 0; 28 + 29 + pr_info("percpu test start\n"); 30 + 31 + preempt_disable(); 32 + 33 + l += -1; 34 + __this_cpu_add(long_counter, -1); 35 + CHECK(l, long_counter, -1); 36 + 37 + l += 1; 38 + __this_cpu_add(long_counter, 1); 39 + CHECK(l, long_counter, 0); 40 + 41 + ul = 0; 42 + __this_cpu_write(ulong_counter, 0); 43 + 44 + ul += 1UL; 45 + __this_cpu_add(ulong_counter, 1UL); 46 + CHECK(ul, ulong_counter, 1); 47 + 48 + ul += -1UL; 49 + __this_cpu_add(ulong_counter, -1UL); 50 + CHECK(ul, ulong_counter, 0); 51 + 52 + ul += -(unsigned long)1; 53 + __this_cpu_add(ulong_counter, -(unsigned long)1); 54 + CHECK(ul, ulong_counter, -1); 55 + 56 + ul = 0; 57 + __this_cpu_write(ulong_counter, 0); 58 + 59 + ul -= 1; 60 + __this_cpu_dec(ulong_counter); 61 + CHECK(ul, ulong_counter, -1); 62 + CHECK(ul, ulong_counter, ULONG_MAX); 63 + 64 + l += -ui_one; 65 + __this_cpu_add(long_counter, -ui_one); 66 + CHECK(l, long_counter, 0xffffffff); 67 + 68 + l += ui_one; 69 + __this_cpu_add(long_counter, ui_one); 70 + CHECK(l, long_counter, (long)0x100000000LL); 71 + 72 + 73 + l = 0; 74 + __this_cpu_write(long_counter, 0); 75 + 76 + l -= ui_one; 77 + __this_cpu_sub(long_counter, ui_one); 78 + CHECK(l, long_counter, -1); 79 + 80 + l = 0; 81 + __this_cpu_write(long_counter, 0); 82 + 83 + l += ui_one; 84 + __this_cpu_add(long_counter, ui_one); 85 + CHECK(l, long_counter, 1); 86 + 87 + l += -ui_one; 88 + __this_cpu_add(long_counter, -ui_one); 89 + CHECK(l, long_counter, (long)0x100000000LL); 90 + 91 + l = 0; 92 + __this_cpu_write(long_counter, 0); 93 + 94 + l -= ui_one; 95 + this_cpu_sub(long_counter, ui_one); 96 + CHECK(l, long_counter, -1); 97 + CHECK(l, long_counter, ULONG_MAX); 98 + 99 + ul = 0; 100 + __this_cpu_write(ulong_counter, 0); 101 + 102 + ul += ui_one; 103 + __this_cpu_add(ulong_counter, ui_one); 104 + CHECK(ul, ulong_counter, 1); 105 + 106 + ul = 0; 107 + __this_cpu_write(ulong_counter, 0); 108 + 109 + ul -= ui_one; 110 + __this_cpu_sub(ulong_counter, ui_one); 111 + CHECK(ul, ulong_counter, -1); 112 + CHECK(ul, ulong_counter, ULONG_MAX); 113 + 114 + ul = 3; 115 + __this_cpu_write(ulong_counter, 3); 116 + 117 + ul = this_cpu_sub_return(ulong_counter, ui_one); 118 + CHECK(ul, ulong_counter, 2); 119 + 120 + ul = __this_cpu_sub_return(ulong_counter, ui_one); 121 + CHECK(ul, ulong_counter, 1); 122 + 123 + preempt_enable(); 124 + 125 + pr_info("percpu test done\n"); 126 + return -EAGAIN; /* Fail will directly unload the module */ 127 + } 128 + 129 + static void __exit percpu_test_exit(void) 130 + { 131 + } 132 + 133 + module_init(percpu_test_init) 134 + module_exit(percpu_test_exit) 135 + 136 + MODULE_LICENSE("GPL"); 137 + MODULE_AUTHOR("Greg Thelen"); 138 + MODULE_DESCRIPTION("percpu operations test");