Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/damon: add kunit tests

This commit adds kunit based unit tests for the core and the virtual
address spaces monitoring primitives of DAMON.

Link: https://lkml.kernel.org/r/20210716081449.22187-12-sj38.park@gmail.com
Signed-off-by: SeongJae Park <sjpark@amazon.de>
Reviewed-by: Brendan Higgins <brendanhiggins@google.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Amit Shah <amit@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: David Woodhouse <dwmw@amazon.com>
Cc: Fan Du <fan.du@intel.com>
Cc: Fernand Sieber <sieberf@amazon.com>
Cc: Greg Kroah-Hartman <greg@kroah.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Joe Perches <joe@perches.com>
Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Leonard Foerster <foersleo@amazon.de>
Cc: Marco Elver <elver@google.com>
Cc: Markus Boehme <markubo@amazon.de>
Cc: Maximilian Heyne <mheyne@amazon.de>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@surriel.com>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Steven Rostedt (VMware) <rostedt@goodmis.org>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

SeongJae Park and committed by
Linus Torvalds
17ccae8b c4ba6014

+760
+36
mm/damon/Kconfig
··· 12 12 See https://damonitor.github.io/doc/html/latest-damon/index.html for 13 13 more information. 14 14 15 + config DAMON_KUNIT_TEST 16 + bool "Test for damon" if !KUNIT_ALL_TESTS 17 + depends on DAMON && KUNIT=y 18 + default KUNIT_ALL_TESTS 19 + help 20 + This builds the DAMON Kunit test suite. 21 + 22 + For more information on KUnit and unit tests in general, please refer 23 + to the KUnit documentation. 24 + 25 + If unsure, say N. 26 + 15 27 config DAMON_VADDR 16 28 bool "Data access monitoring primitives for virtual address spaces" 17 29 depends on DAMON && MMU ··· 32 20 This builds the default data access monitoring primitives for DAMON 33 21 that works for virtual address spaces. 34 22 23 + config DAMON_VADDR_KUNIT_TEST 24 + bool "Test for DAMON primitives" if !KUNIT_ALL_TESTS 25 + depends on DAMON_VADDR && KUNIT=y 26 + default KUNIT_ALL_TESTS 27 + help 28 + This builds the DAMON virtual addresses primitives Kunit test suite. 29 + 30 + For more information on KUnit and unit tests in general, please refer 31 + to the KUnit documentation. 32 + 33 + If unsure, say N. 34 + 35 35 config DAMON_DBGFS 36 36 bool "DAMON debugfs interface" 37 37 depends on DAMON_VADDR && DEBUG_FS 38 38 help 39 39 This builds the debugfs interface for DAMON. The user space admins 40 40 can use the interface for arbitrary data access monitoring. 41 + 42 + If unsure, say N. 43 + 44 + config DAMON_DBGFS_KUNIT_TEST 45 + bool "Test for damon debugfs interface" if !KUNIT_ALL_TESTS 46 + depends on DAMON_DBGFS && KUNIT=y 47 + default KUNIT_ALL_TESTS 48 + help 49 + This builds the DAMON debugfs interface Kunit test suite. 50 + 51 + For more information on KUnit and unit tests in general, please refer 52 + to the KUnit documentation. 41 53 42 54 If unsure, say N. 43 55
+253
mm/damon/core-test.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Data Access Monitor Unit Tests 4 + * 5 + * Copyright 2019 Amazon.com, Inc. or its affiliates. All rights reserved. 6 + * 7 + * Author: SeongJae Park <sjpark@amazon.de> 8 + */ 9 + 10 + #ifdef CONFIG_DAMON_KUNIT_TEST 11 + 12 + #ifndef _DAMON_CORE_TEST_H 13 + #define _DAMON_CORE_TEST_H 14 + 15 + #include <kunit/test.h> 16 + 17 + static void damon_test_regions(struct kunit *test) 18 + { 19 + struct damon_region *r; 20 + struct damon_target *t; 21 + 22 + r = damon_new_region(1, 2); 23 + KUNIT_EXPECT_EQ(test, 1ul, r->ar.start); 24 + KUNIT_EXPECT_EQ(test, 2ul, r->ar.end); 25 + KUNIT_EXPECT_EQ(test, 0u, r->nr_accesses); 26 + 27 + t = damon_new_target(42); 28 + KUNIT_EXPECT_EQ(test, 0u, damon_nr_regions(t)); 29 + 30 + damon_add_region(r, t); 31 + KUNIT_EXPECT_EQ(test, 1u, damon_nr_regions(t)); 32 + 33 + damon_del_region(r, t); 34 + KUNIT_EXPECT_EQ(test, 0u, damon_nr_regions(t)); 35 + 36 + damon_free_target(t); 37 + } 38 + 39 + static unsigned int nr_damon_targets(struct damon_ctx *ctx) 40 + { 41 + struct damon_target *t; 42 + unsigned int nr_targets = 0; 43 + 44 + damon_for_each_target(t, ctx) 45 + nr_targets++; 46 + 47 + return nr_targets; 48 + } 49 + 50 + static void damon_test_target(struct kunit *test) 51 + { 52 + struct damon_ctx *c = damon_new_ctx(); 53 + struct damon_target *t; 54 + 55 + t = damon_new_target(42); 56 + KUNIT_EXPECT_EQ(test, 42ul, t->id); 57 + KUNIT_EXPECT_EQ(test, 0u, nr_damon_targets(c)); 58 + 59 + damon_add_target(c, t); 60 + KUNIT_EXPECT_EQ(test, 1u, nr_damon_targets(c)); 61 + 62 + damon_destroy_target(t); 63 + KUNIT_EXPECT_EQ(test, 0u, nr_damon_targets(c)); 64 + 65 + damon_destroy_ctx(c); 66 + } 67 + 68 + /* 69 + * Test kdamond_reset_aggregated() 70 + * 71 + * DAMON checks access to each region and aggregates this information as the 72 + * access frequency of each region. In detail, it increases '->nr_accesses' of 73 + * regions that an access has confirmed. 'kdamond_reset_aggregated()' flushes 74 + * the aggregated information ('->nr_accesses' of each regions) to the result 75 + * buffer. As a result of the flushing, the '->nr_accesses' of regions are 76 + * initialized to zero. 77 + */ 78 + static void damon_test_aggregate(struct kunit *test) 79 + { 80 + struct damon_ctx *ctx = damon_new_ctx(); 81 + unsigned long target_ids[] = {1, 2, 3}; 82 + unsigned long saddr[][3] = {{10, 20, 30}, {5, 42, 49}, {13, 33, 55} }; 83 + unsigned long eaddr[][3] = {{15, 27, 40}, {31, 45, 55}, {23, 44, 66} }; 84 + unsigned long accesses[][3] = {{42, 95, 84}, {10, 20, 30}, {0, 1, 2} }; 85 + struct damon_target *t; 86 + struct damon_region *r; 87 + int it, ir; 88 + 89 + damon_set_targets(ctx, target_ids, 3); 90 + 91 + it = 0; 92 + damon_for_each_target(t, ctx) { 93 + for (ir = 0; ir < 3; ir++) { 94 + r = damon_new_region(saddr[it][ir], eaddr[it][ir]); 95 + r->nr_accesses = accesses[it][ir]; 96 + damon_add_region(r, t); 97 + } 98 + it++; 99 + } 100 + kdamond_reset_aggregated(ctx); 101 + it = 0; 102 + damon_for_each_target(t, ctx) { 103 + ir = 0; 104 + /* '->nr_accesses' should be zeroed */ 105 + damon_for_each_region(r, t) { 106 + KUNIT_EXPECT_EQ(test, 0u, r->nr_accesses); 107 + ir++; 108 + } 109 + /* regions should be preserved */ 110 + KUNIT_EXPECT_EQ(test, 3, ir); 111 + it++; 112 + } 113 + /* targets also should be preserved */ 114 + KUNIT_EXPECT_EQ(test, 3, it); 115 + 116 + damon_destroy_ctx(ctx); 117 + } 118 + 119 + static void damon_test_split_at(struct kunit *test) 120 + { 121 + struct damon_ctx *c = damon_new_ctx(); 122 + struct damon_target *t; 123 + struct damon_region *r; 124 + 125 + t = damon_new_target(42); 126 + r = damon_new_region(0, 100); 127 + damon_add_region(r, t); 128 + damon_split_region_at(c, t, r, 25); 129 + KUNIT_EXPECT_EQ(test, r->ar.start, 0ul); 130 + KUNIT_EXPECT_EQ(test, r->ar.end, 25ul); 131 + 132 + r = damon_next_region(r); 133 + KUNIT_EXPECT_EQ(test, r->ar.start, 25ul); 134 + KUNIT_EXPECT_EQ(test, r->ar.end, 100ul); 135 + 136 + damon_free_target(t); 137 + damon_destroy_ctx(c); 138 + } 139 + 140 + static void damon_test_merge_two(struct kunit *test) 141 + { 142 + struct damon_target *t; 143 + struct damon_region *r, *r2, *r3; 144 + int i; 145 + 146 + t = damon_new_target(42); 147 + r = damon_new_region(0, 100); 148 + r->nr_accesses = 10; 149 + damon_add_region(r, t); 150 + r2 = damon_new_region(100, 300); 151 + r2->nr_accesses = 20; 152 + damon_add_region(r2, t); 153 + 154 + damon_merge_two_regions(t, r, r2); 155 + KUNIT_EXPECT_EQ(test, r->ar.start, 0ul); 156 + KUNIT_EXPECT_EQ(test, r->ar.end, 300ul); 157 + KUNIT_EXPECT_EQ(test, r->nr_accesses, 16u); 158 + 159 + i = 0; 160 + damon_for_each_region(r3, t) { 161 + KUNIT_EXPECT_PTR_EQ(test, r, r3); 162 + i++; 163 + } 164 + KUNIT_EXPECT_EQ(test, i, 1); 165 + 166 + damon_free_target(t); 167 + } 168 + 169 + static struct damon_region *__nth_region_of(struct damon_target *t, int idx) 170 + { 171 + struct damon_region *r; 172 + unsigned int i = 0; 173 + 174 + damon_for_each_region(r, t) { 175 + if (i++ == idx) 176 + return r; 177 + } 178 + 179 + return NULL; 180 + } 181 + 182 + static void damon_test_merge_regions_of(struct kunit *test) 183 + { 184 + struct damon_target *t; 185 + struct damon_region *r; 186 + unsigned long sa[] = {0, 100, 114, 122, 130, 156, 170, 184}; 187 + unsigned long ea[] = {100, 112, 122, 130, 156, 170, 184, 230}; 188 + unsigned int nrs[] = {0, 0, 10, 10, 20, 30, 1, 2}; 189 + 190 + unsigned long saddrs[] = {0, 114, 130, 156, 170}; 191 + unsigned long eaddrs[] = {112, 130, 156, 170, 230}; 192 + int i; 193 + 194 + t = damon_new_target(42); 195 + for (i = 0; i < ARRAY_SIZE(sa); i++) { 196 + r = damon_new_region(sa[i], ea[i]); 197 + r->nr_accesses = nrs[i]; 198 + damon_add_region(r, t); 199 + } 200 + 201 + damon_merge_regions_of(t, 9, 9999); 202 + /* 0-112, 114-130, 130-156, 156-170 */ 203 + KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 5u); 204 + for (i = 0; i < 5; i++) { 205 + r = __nth_region_of(t, i); 206 + KUNIT_EXPECT_EQ(test, r->ar.start, saddrs[i]); 207 + KUNIT_EXPECT_EQ(test, r->ar.end, eaddrs[i]); 208 + } 209 + damon_free_target(t); 210 + } 211 + 212 + static void damon_test_split_regions_of(struct kunit *test) 213 + { 214 + struct damon_ctx *c = damon_new_ctx(); 215 + struct damon_target *t; 216 + struct damon_region *r; 217 + 218 + t = damon_new_target(42); 219 + r = damon_new_region(0, 22); 220 + damon_add_region(r, t); 221 + damon_split_regions_of(c, t, 2); 222 + KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 2u); 223 + damon_free_target(t); 224 + 225 + t = damon_new_target(42); 226 + r = damon_new_region(0, 220); 227 + damon_add_region(r, t); 228 + damon_split_regions_of(c, t, 4); 229 + KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 4u); 230 + damon_free_target(t); 231 + damon_destroy_ctx(c); 232 + } 233 + 234 + static struct kunit_case damon_test_cases[] = { 235 + KUNIT_CASE(damon_test_target), 236 + KUNIT_CASE(damon_test_regions), 237 + KUNIT_CASE(damon_test_aggregate), 238 + KUNIT_CASE(damon_test_split_at), 239 + KUNIT_CASE(damon_test_merge_two), 240 + KUNIT_CASE(damon_test_merge_regions_of), 241 + KUNIT_CASE(damon_test_split_regions_of), 242 + {}, 243 + }; 244 + 245 + static struct kunit_suite damon_test_suite = { 246 + .name = "damon", 247 + .test_cases = damon_test_cases, 248 + }; 249 + kunit_test_suite(damon_test_suite); 250 + 251 + #endif /* _DAMON_CORE_TEST_H */ 252 + 253 + #endif /* CONFIG_DAMON_KUNIT_TEST */
+7
mm/damon/core.c
··· 16 16 #define CREATE_TRACE_POINTS 17 17 #include <trace/events/damon.h> 18 18 19 + #ifdef CONFIG_DAMON_KUNIT_TEST 20 + #undef DAMON_MIN_REGION 21 + #define DAMON_MIN_REGION 1 22 + #endif 23 + 19 24 /* Get a random number in [l, r) */ 20 25 #define damon_rand(l, r) (l + prandom_u32_max(r - l)) 21 26 ··· 716 711 717 712 do_exit(0); 718 713 } 714 + 715 + #include "core-test.h"
+126
mm/damon/dbgfs-test.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * DAMON Debugfs Interface Unit Tests 4 + * 5 + * Author: SeongJae Park <sjpark@amazon.de> 6 + */ 7 + 8 + #ifdef CONFIG_DAMON_DBGFS_KUNIT_TEST 9 + 10 + #ifndef _DAMON_DBGFS_TEST_H 11 + #define _DAMON_DBGFS_TEST_H 12 + 13 + #include <kunit/test.h> 14 + 15 + static void damon_dbgfs_test_str_to_target_ids(struct kunit *test) 16 + { 17 + char *question; 18 + unsigned long *answers; 19 + unsigned long expected[] = {12, 35, 46}; 20 + ssize_t nr_integers = 0, i; 21 + 22 + question = "123"; 23 + answers = str_to_target_ids(question, strnlen(question, 128), 24 + &nr_integers); 25 + KUNIT_EXPECT_EQ(test, (ssize_t)1, nr_integers); 26 + KUNIT_EXPECT_EQ(test, 123ul, answers[0]); 27 + kfree(answers); 28 + 29 + question = "123abc"; 30 + answers = str_to_target_ids(question, strnlen(question, 128), 31 + &nr_integers); 32 + KUNIT_EXPECT_EQ(test, (ssize_t)1, nr_integers); 33 + KUNIT_EXPECT_EQ(test, 123ul, answers[0]); 34 + kfree(answers); 35 + 36 + question = "a123"; 37 + answers = str_to_target_ids(question, strnlen(question, 128), 38 + &nr_integers); 39 + KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers); 40 + kfree(answers); 41 + 42 + question = "12 35"; 43 + answers = str_to_target_ids(question, strnlen(question, 128), 44 + &nr_integers); 45 + KUNIT_EXPECT_EQ(test, (ssize_t)2, nr_integers); 46 + for (i = 0; i < nr_integers; i++) 47 + KUNIT_EXPECT_EQ(test, expected[i], answers[i]); 48 + kfree(answers); 49 + 50 + question = "12 35 46"; 51 + answers = str_to_target_ids(question, strnlen(question, 128), 52 + &nr_integers); 53 + KUNIT_EXPECT_EQ(test, (ssize_t)3, nr_integers); 54 + for (i = 0; i < nr_integers; i++) 55 + KUNIT_EXPECT_EQ(test, expected[i], answers[i]); 56 + kfree(answers); 57 + 58 + question = "12 35 abc 46"; 59 + answers = str_to_target_ids(question, strnlen(question, 128), 60 + &nr_integers); 61 + KUNIT_EXPECT_EQ(test, (ssize_t)2, nr_integers); 62 + for (i = 0; i < 2; i++) 63 + KUNIT_EXPECT_EQ(test, expected[i], answers[i]); 64 + kfree(answers); 65 + 66 + question = ""; 67 + answers = str_to_target_ids(question, strnlen(question, 128), 68 + &nr_integers); 69 + KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers); 70 + kfree(answers); 71 + 72 + question = "\n"; 73 + answers = str_to_target_ids(question, strnlen(question, 128), 74 + &nr_integers); 75 + KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers); 76 + kfree(answers); 77 + } 78 + 79 + static void damon_dbgfs_test_set_targets(struct kunit *test) 80 + { 81 + struct damon_ctx *ctx = dbgfs_new_ctx(); 82 + unsigned long ids[] = {1, 2, 3}; 83 + char buf[64]; 84 + 85 + /* Make DAMON consider target id as plain number */ 86 + ctx->primitive.target_valid = NULL; 87 + ctx->primitive.cleanup = NULL; 88 + 89 + damon_set_targets(ctx, ids, 3); 90 + sprint_target_ids(ctx, buf, 64); 91 + KUNIT_EXPECT_STREQ(test, (char *)buf, "1 2 3\n"); 92 + 93 + damon_set_targets(ctx, NULL, 0); 94 + sprint_target_ids(ctx, buf, 64); 95 + KUNIT_EXPECT_STREQ(test, (char *)buf, "\n"); 96 + 97 + damon_set_targets(ctx, (unsigned long []){1, 2}, 2); 98 + sprint_target_ids(ctx, buf, 64); 99 + KUNIT_EXPECT_STREQ(test, (char *)buf, "1 2\n"); 100 + 101 + damon_set_targets(ctx, (unsigned long []){2}, 1); 102 + sprint_target_ids(ctx, buf, 64); 103 + KUNIT_EXPECT_STREQ(test, (char *)buf, "2\n"); 104 + 105 + damon_set_targets(ctx, NULL, 0); 106 + sprint_target_ids(ctx, buf, 64); 107 + KUNIT_EXPECT_STREQ(test, (char *)buf, "\n"); 108 + 109 + dbgfs_destroy_ctx(ctx); 110 + } 111 + 112 + static struct kunit_case damon_test_cases[] = { 113 + KUNIT_CASE(damon_dbgfs_test_str_to_target_ids), 114 + KUNIT_CASE(damon_dbgfs_test_set_targets), 115 + {}, 116 + }; 117 + 118 + static struct kunit_suite damon_test_suite = { 119 + .name = "damon-dbgfs", 120 + .test_cases = damon_test_cases, 121 + }; 122 + kunit_test_suite(damon_test_suite); 123 + 124 + #endif /* _DAMON_TEST_H */ 125 + 126 + #endif /* CONFIG_DAMON_KUNIT_TEST */
+2
mm/damon/dbgfs.c
··· 619 619 } 620 620 621 621 module_init(damon_dbgfs_init); 622 + 623 + #include "dbgfs-test.h"
+329
mm/damon/vaddr-test.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Data Access Monitor Unit Tests 4 + * 5 + * Copyright 2019 Amazon.com, Inc. or its affiliates. All rights reserved. 6 + * 7 + * Author: SeongJae Park <sjpark@amazon.de> 8 + */ 9 + 10 + #ifdef CONFIG_DAMON_VADDR_KUNIT_TEST 11 + 12 + #ifndef _DAMON_VADDR_TEST_H 13 + #define _DAMON_VADDR_TEST_H 14 + 15 + #include <kunit/test.h> 16 + 17 + static void __link_vmas(struct vm_area_struct *vmas, ssize_t nr_vmas) 18 + { 19 + int i, j; 20 + unsigned long largest_gap, gap; 21 + 22 + if (!nr_vmas) 23 + return; 24 + 25 + for (i = 0; i < nr_vmas - 1; i++) { 26 + vmas[i].vm_next = &vmas[i + 1]; 27 + 28 + vmas[i].vm_rb.rb_left = NULL; 29 + vmas[i].vm_rb.rb_right = &vmas[i + 1].vm_rb; 30 + 31 + largest_gap = 0; 32 + for (j = i; j < nr_vmas; j++) { 33 + if (j == 0) 34 + continue; 35 + gap = vmas[j].vm_start - vmas[j - 1].vm_end; 36 + if (gap > largest_gap) 37 + largest_gap = gap; 38 + } 39 + vmas[i].rb_subtree_gap = largest_gap; 40 + } 41 + vmas[i].vm_next = NULL; 42 + vmas[i].vm_rb.rb_right = NULL; 43 + vmas[i].rb_subtree_gap = 0; 44 + } 45 + 46 + /* 47 + * Test __damon_va_three_regions() function 48 + * 49 + * In case of virtual memory address spaces monitoring, DAMON converts the 50 + * complex and dynamic memory mappings of each target task to three 51 + * discontiguous regions which cover every mapped areas. However, the three 52 + * regions should not include the two biggest unmapped areas in the original 53 + * mapping, because the two biggest areas are normally the areas between 1) 54 + * heap and the mmap()-ed regions, and 2) the mmap()-ed regions and stack. 55 + * Because these two unmapped areas are very huge but obviously never accessed, 56 + * covering the region is just a waste. 57 + * 58 + * '__damon_va_three_regions() receives an address space of a process. It 59 + * first identifies the start of mappings, end of mappings, and the two biggest 60 + * unmapped areas. After that, based on the information, it constructs the 61 + * three regions and returns. For more detail, refer to the comment of 62 + * 'damon_init_regions_of()' function definition in 'mm/damon.c' file. 63 + * 64 + * For example, suppose virtual address ranges of 10-20, 20-25, 200-210, 65 + * 210-220, 300-305, and 307-330 (Other comments represent this mappings in 66 + * more short form: 10-20-25, 200-210-220, 300-305, 307-330) of a process are 67 + * mapped. To cover every mappings, the three regions should start with 10, 68 + * and end with 305. The process also has three unmapped areas, 25-200, 69 + * 220-300, and 305-307. Among those, 25-200 and 220-300 are the biggest two 70 + * unmapped areas, and thus it should be converted to three regions of 10-25, 71 + * 200-220, and 300-330. 72 + */ 73 + static void damon_test_three_regions_in_vmas(struct kunit *test) 74 + { 75 + struct damon_addr_range regions[3] = {0,}; 76 + /* 10-20-25, 200-210-220, 300-305, 307-330 */ 77 + struct vm_area_struct vmas[] = { 78 + (struct vm_area_struct) {.vm_start = 10, .vm_end = 20}, 79 + (struct vm_area_struct) {.vm_start = 20, .vm_end = 25}, 80 + (struct vm_area_struct) {.vm_start = 200, .vm_end = 210}, 81 + (struct vm_area_struct) {.vm_start = 210, .vm_end = 220}, 82 + (struct vm_area_struct) {.vm_start = 300, .vm_end = 305}, 83 + (struct vm_area_struct) {.vm_start = 307, .vm_end = 330}, 84 + }; 85 + 86 + __link_vmas(vmas, 6); 87 + 88 + __damon_va_three_regions(&vmas[0], regions); 89 + 90 + KUNIT_EXPECT_EQ(test, 10ul, regions[0].start); 91 + KUNIT_EXPECT_EQ(test, 25ul, regions[0].end); 92 + KUNIT_EXPECT_EQ(test, 200ul, regions[1].start); 93 + KUNIT_EXPECT_EQ(test, 220ul, regions[1].end); 94 + KUNIT_EXPECT_EQ(test, 300ul, regions[2].start); 95 + KUNIT_EXPECT_EQ(test, 330ul, regions[2].end); 96 + } 97 + 98 + static struct damon_region *__nth_region_of(struct damon_target *t, int idx) 99 + { 100 + struct damon_region *r; 101 + unsigned int i = 0; 102 + 103 + damon_for_each_region(r, t) { 104 + if (i++ == idx) 105 + return r; 106 + } 107 + 108 + return NULL; 109 + } 110 + 111 + /* 112 + * Test 'damon_va_apply_three_regions()' 113 + * 114 + * test kunit object 115 + * regions an array containing start/end addresses of current 116 + * monitoring target regions 117 + * nr_regions the number of the addresses in 'regions' 118 + * three_regions The three regions that need to be applied now 119 + * expected start/end addresses of monitoring target regions that 120 + * 'three_regions' are applied 121 + * nr_expected the number of addresses in 'expected' 122 + * 123 + * The memory mapping of the target processes changes dynamically. To follow 124 + * the change, DAMON periodically reads the mappings, simplifies it to the 125 + * three regions, and updates the monitoring target regions to fit in the three 126 + * regions. The update of current target regions is the role of 127 + * 'damon_va_apply_three_regions()'. 128 + * 129 + * This test passes the given target regions and the new three regions that 130 + * need to be applied to the function and check whether it updates the regions 131 + * as expected. 132 + */ 133 + static void damon_do_test_apply_three_regions(struct kunit *test, 134 + unsigned long *regions, int nr_regions, 135 + struct damon_addr_range *three_regions, 136 + unsigned long *expected, int nr_expected) 137 + { 138 + struct damon_ctx *ctx = damon_new_ctx(); 139 + struct damon_target *t; 140 + struct damon_region *r; 141 + int i; 142 + 143 + t = damon_new_target(42); 144 + for (i = 0; i < nr_regions / 2; i++) { 145 + r = damon_new_region(regions[i * 2], regions[i * 2 + 1]); 146 + damon_add_region(r, t); 147 + } 148 + damon_add_target(ctx, t); 149 + 150 + damon_va_apply_three_regions(t, three_regions); 151 + 152 + for (i = 0; i < nr_expected / 2; i++) { 153 + r = __nth_region_of(t, i); 154 + KUNIT_EXPECT_EQ(test, r->ar.start, expected[i * 2]); 155 + KUNIT_EXPECT_EQ(test, r->ar.end, expected[i * 2 + 1]); 156 + } 157 + 158 + damon_destroy_ctx(ctx); 159 + } 160 + 161 + /* 162 + * This function test most common case where the three big regions are only 163 + * slightly changed. Target regions should adjust their boundary (10-20-30, 164 + * 50-55, 70-80, 90-100) to fit with the new big regions or remove target 165 + * regions (57-79) that now out of the three regions. 166 + */ 167 + static void damon_test_apply_three_regions1(struct kunit *test) 168 + { 169 + /* 10-20-30, 50-55-57-59, 70-80-90-100 */ 170 + unsigned long regions[] = {10, 20, 20, 30, 50, 55, 55, 57, 57, 59, 171 + 70, 80, 80, 90, 90, 100}; 172 + /* 5-27, 45-55, 73-104 */ 173 + struct damon_addr_range new_three_regions[3] = { 174 + (struct damon_addr_range){.start = 5, .end = 27}, 175 + (struct damon_addr_range){.start = 45, .end = 55}, 176 + (struct damon_addr_range){.start = 73, .end = 104} }; 177 + /* 5-20-27, 45-55, 73-80-90-104 */ 178 + unsigned long expected[] = {5, 20, 20, 27, 45, 55, 179 + 73, 80, 80, 90, 90, 104}; 180 + 181 + damon_do_test_apply_three_regions(test, regions, ARRAY_SIZE(regions), 182 + new_three_regions, expected, ARRAY_SIZE(expected)); 183 + } 184 + 185 + /* 186 + * Test slightly bigger change. Similar to above, but the second big region 187 + * now require two target regions (50-55, 57-59) to be removed. 188 + */ 189 + static void damon_test_apply_three_regions2(struct kunit *test) 190 + { 191 + /* 10-20-30, 50-55-57-59, 70-80-90-100 */ 192 + unsigned long regions[] = {10, 20, 20, 30, 50, 55, 55, 57, 57, 59, 193 + 70, 80, 80, 90, 90, 100}; 194 + /* 5-27, 56-57, 65-104 */ 195 + struct damon_addr_range new_three_regions[3] = { 196 + (struct damon_addr_range){.start = 5, .end = 27}, 197 + (struct damon_addr_range){.start = 56, .end = 57}, 198 + (struct damon_addr_range){.start = 65, .end = 104} }; 199 + /* 5-20-27, 56-57, 65-80-90-104 */ 200 + unsigned long expected[] = {5, 20, 20, 27, 56, 57, 201 + 65, 80, 80, 90, 90, 104}; 202 + 203 + damon_do_test_apply_three_regions(test, regions, ARRAY_SIZE(regions), 204 + new_three_regions, expected, ARRAY_SIZE(expected)); 205 + } 206 + 207 + /* 208 + * Test a big change. The second big region has totally freed and mapped to 209 + * different area (50-59 -> 61-63). The target regions which were in the old 210 + * second big region (50-55-57-59) should be removed and new target region 211 + * covering the second big region (61-63) should be created. 212 + */ 213 + static void damon_test_apply_three_regions3(struct kunit *test) 214 + { 215 + /* 10-20-30, 50-55-57-59, 70-80-90-100 */ 216 + unsigned long regions[] = {10, 20, 20, 30, 50, 55, 55, 57, 57, 59, 217 + 70, 80, 80, 90, 90, 100}; 218 + /* 5-27, 61-63, 65-104 */ 219 + struct damon_addr_range new_three_regions[3] = { 220 + (struct damon_addr_range){.start = 5, .end = 27}, 221 + (struct damon_addr_range){.start = 61, .end = 63}, 222 + (struct damon_addr_range){.start = 65, .end = 104} }; 223 + /* 5-20-27, 61-63, 65-80-90-104 */ 224 + unsigned long expected[] = {5, 20, 20, 27, 61, 63, 225 + 65, 80, 80, 90, 90, 104}; 226 + 227 + damon_do_test_apply_three_regions(test, regions, ARRAY_SIZE(regions), 228 + new_three_regions, expected, ARRAY_SIZE(expected)); 229 + } 230 + 231 + /* 232 + * Test another big change. Both of the second and third big regions (50-59 233 + * and 70-100) has totally freed and mapped to different area (30-32 and 234 + * 65-68). The target regions which were in the old second and third big 235 + * regions should now be removed and new target regions covering the new second 236 + * and third big regions should be crated. 237 + */ 238 + static void damon_test_apply_three_regions4(struct kunit *test) 239 + { 240 + /* 10-20-30, 50-55-57-59, 70-80-90-100 */ 241 + unsigned long regions[] = {10, 20, 20, 30, 50, 55, 55, 57, 57, 59, 242 + 70, 80, 80, 90, 90, 100}; 243 + /* 5-7, 30-32, 65-68 */ 244 + struct damon_addr_range new_three_regions[3] = { 245 + (struct damon_addr_range){.start = 5, .end = 7}, 246 + (struct damon_addr_range){.start = 30, .end = 32}, 247 + (struct damon_addr_range){.start = 65, .end = 68} }; 248 + /* expect 5-7, 30-32, 65-68 */ 249 + unsigned long expected[] = {5, 7, 30, 32, 65, 68}; 250 + 251 + damon_do_test_apply_three_regions(test, regions, ARRAY_SIZE(regions), 252 + new_three_regions, expected, ARRAY_SIZE(expected)); 253 + } 254 + 255 + static void damon_test_split_evenly(struct kunit *test) 256 + { 257 + struct damon_ctx *c = damon_new_ctx(); 258 + struct damon_target *t; 259 + struct damon_region *r; 260 + unsigned long i; 261 + 262 + KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(NULL, NULL, 5), 263 + -EINVAL); 264 + 265 + t = damon_new_target(42); 266 + r = damon_new_region(0, 100); 267 + KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 0), -EINVAL); 268 + 269 + damon_add_region(r, t); 270 + KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 10), 0); 271 + KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 10u); 272 + 273 + i = 0; 274 + damon_for_each_region(r, t) { 275 + KUNIT_EXPECT_EQ(test, r->ar.start, i++ * 10); 276 + KUNIT_EXPECT_EQ(test, r->ar.end, i * 10); 277 + } 278 + damon_free_target(t); 279 + 280 + t = damon_new_target(42); 281 + r = damon_new_region(5, 59); 282 + damon_add_region(r, t); 283 + KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 5), 0); 284 + KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 5u); 285 + 286 + i = 0; 287 + damon_for_each_region(r, t) { 288 + if (i == 4) 289 + break; 290 + KUNIT_EXPECT_EQ(test, r->ar.start, 5 + 10 * i++); 291 + KUNIT_EXPECT_EQ(test, r->ar.end, 5 + 10 * i); 292 + } 293 + KUNIT_EXPECT_EQ(test, r->ar.start, 5 + 10 * i); 294 + KUNIT_EXPECT_EQ(test, r->ar.end, 59ul); 295 + damon_free_target(t); 296 + 297 + t = damon_new_target(42); 298 + r = damon_new_region(5, 6); 299 + damon_add_region(r, t); 300 + KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 2), -EINVAL); 301 + KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1u); 302 + 303 + damon_for_each_region(r, t) { 304 + KUNIT_EXPECT_EQ(test, r->ar.start, 5ul); 305 + KUNIT_EXPECT_EQ(test, r->ar.end, 6ul); 306 + } 307 + damon_free_target(t); 308 + damon_destroy_ctx(c); 309 + } 310 + 311 + static struct kunit_case damon_test_cases[] = { 312 + KUNIT_CASE(damon_test_three_regions_in_vmas), 313 + KUNIT_CASE(damon_test_apply_three_regions1), 314 + KUNIT_CASE(damon_test_apply_three_regions2), 315 + KUNIT_CASE(damon_test_apply_three_regions3), 316 + KUNIT_CASE(damon_test_apply_three_regions4), 317 + KUNIT_CASE(damon_test_split_evenly), 318 + {}, 319 + }; 320 + 321 + static struct kunit_suite damon_test_suite = { 322 + .name = "damon-primitives", 323 + .test_cases = damon_test_cases, 324 + }; 325 + kunit_test_suite(damon_test_suite); 326 + 327 + #endif /* _DAMON_VADDR_TEST_H */ 328 + 329 + #endif /* CONFIG_DAMON_VADDR_KUNIT_TEST */
+7
mm/damon/vaddr.c
··· 18 18 #include <linux/sched/mm.h> 19 19 #include <linux/slab.h> 20 20 21 + #ifdef CONFIG_DAMON_VADDR_KUNIT_TEST 22 + #undef DAMON_MIN_REGION 23 + #define DAMON_MIN_REGION 1 24 + #endif 25 + 21 26 /* Get a random number in [l, r) */ 22 27 #define damon_rand(l, r) (l + prandom_u32_max(r - l)) 23 28 ··· 668 663 ctx->primitive.target_valid = damon_va_target_valid; 669 664 ctx->primitive.cleanup = NULL; 670 665 } 666 + 667 + #include "vaddr-test.h"