Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: selftests: access_tracking_perf_test: Use MGLRU for access tracking

Use MGLRU's debugfs interface to do access tracking instead of
page_idle. The logic to use the page_idle bitmap is left in, as it is
useful for kernels that do not have MGLRU built in.

When MGLRU is enabled, page_idle will report pages as still idle even
after being accessed, as MGLRU doesn't necessarily clear the Idle folio
flag when accessing an idle page, so the test will not attempt to use
page_idle if MGLRU is enabled but otherwise not usable.

Aging pages with MGLRU is much faster than marking pages as idle with
page_idle.

Co-developed-by: Axel Rasmussen <axelrasmussen@google.com>
Signed-off-by: Axel Rasmussen <axelrasmussen@google.com>
Signed-off-by: James Houghton <jthoughton@google.com>
Link: https://lore.kernel.org/r/20250508184649.2576210-8-jthoughton@google.com
[sean: print parsed features, not raw string]
Signed-off-by: Sean Christopherson <seanjc@google.com>

authored by

James Houghton and committed by
Sean Christopherson
d166453e b11fcb51

+638 -26
+1
tools/testing/selftests/kvm/Makefile.kvm
··· 8 8 LIBKVM += lib/guest_modes.c 9 9 LIBKVM += lib/io.c 10 10 LIBKVM += lib/kvm_util.c 11 + LIBKVM += lib/lru_gen_util.c 11 12 LIBKVM += lib/memstress.c 12 13 LIBKVM += lib/guest_sprintf.c 13 14 LIBKVM += lib/rbtree.c
+199 -26
tools/testing/selftests/kvm/access_tracking_perf_test.c
··· 7 7 * This test measures the performance effects of KVM's access tracking. 8 8 * Access tracking is driven by the MMU notifiers test_young, clear_young, and 9 9 * clear_flush_young. These notifiers do not have a direct userspace API, 10 - * however the clear_young notifier can be triggered by marking a pages as idle 11 - * in /sys/kernel/mm/page_idle/bitmap. This test leverages that mechanism to 12 - * enable access tracking on guest memory. 10 + * however the clear_young notifier can be triggered either by 11 + * 1. marking a pages as idle in /sys/kernel/mm/page_idle/bitmap OR 12 + * 2. adding a new MGLRU generation using the lru_gen debugfs file. 13 + * This test leverages page_idle to enable access tracking on guest memory 14 + * unless MGLRU is enabled, in which case MGLRU is used. 13 15 * 14 16 * To measure performance this test runs a VM with a configurable number of 15 17 * vCPUs that each touch every page in disjoint regions of memory. Performance ··· 19 17 * predefined region. 20 18 * 21 19 * Note that a deterministic correctness test of access tracking is not possible 22 - * by using page_idle as it exists today. This is for a few reasons: 20 + * by using page_idle or MGLRU aging as it exists today. This is for a few 21 + * reasons: 23 22 * 24 - * 1. page_idle only issues clear_young notifiers, which lack a TLB flush. This 25 - * means subsequent guest accesses are not guaranteed to see page table 23 + * 1. page_idle and MGLRU only issue clear_young notifiers, which lack a TLB flush. 24 + * This means subsequent guest accesses are not guaranteed to see page table 26 25 * updates made by KVM until some time in the future. 27 26 * 28 27 * 2. page_idle only operates on LRU pages. Newly allocated pages are not ··· 51 48 #include "guest_modes.h" 52 49 #include "processor.h" 53 50 51 + #include "cgroup_util.h" 52 + #include "lru_gen_util.h" 53 + 54 + static const char *TEST_MEMCG_NAME = "access_tracking_perf_test"; 55 + 54 56 /* Global variable used to synchronize all of the vCPU threads. */ 55 57 static int iteration; 58 + 59 + /* The cgroup memory controller root. Needed for lru_gen-based aging. */ 60 + char cgroup_root[PATH_MAX]; 56 61 57 62 /* Defines what vCPU threads should do during a given iteration. */ 58 63 static enum { ··· 85 74 * too many idle pages are found. 86 75 */ 87 76 static int idle_pages_warn_only = -1; 77 + 78 + /* Whether or not to use MGLRU instead of page_idle for access tracking */ 79 + static bool use_lru_gen; 80 + 81 + /* Total number of pages to expect in the memcg after touching everything */ 82 + static long test_pages; 83 + 84 + /* Last generation we found the pages in */ 85 + static int lru_gen_last_gen = -1; 88 86 89 87 struct test_params { 90 88 /* The backing source for the region of memory. */ ··· 153 133 "Set page_idle bits for PFN 0x%" PRIx64, pfn); 154 134 } 155 135 156 - static void mark_vcpu_memory_idle(struct kvm_vm *vm, 157 - struct memstress_vcpu_args *vcpu_args) 136 + static void too_many_idle_pages(long idle_pages, long total_pages, int vcpu_idx) 137 + { 138 + char prefix[18] = {}; 139 + 140 + if (vcpu_idx >= 0) 141 + snprintf(prefix, 18, "vCPU%d: ", vcpu_idx); 142 + 143 + TEST_ASSERT(idle_pages_warn_only, 144 + "%sToo many pages still idle (%lu out of %lu)", 145 + prefix, idle_pages, total_pages); 146 + 147 + printf("WARNING: %sToo many pages still idle (%lu out of %lu), " 148 + "this will affect performance results.\n", 149 + prefix, idle_pages, total_pages); 150 + } 151 + 152 + static void pageidle_mark_vcpu_memory_idle(struct kvm_vm *vm, 153 + struct memstress_vcpu_args *vcpu_args) 158 154 { 159 155 int vcpu_idx = vcpu_args->vcpu_idx; 160 156 uint64_t base_gva = vcpu_args->gva; ··· 224 188 * access tracking but low enough as to not make the test too brittle 225 189 * over time and across architectures. 226 190 */ 227 - if (still_idle >= pages / 10) { 228 - TEST_ASSERT(idle_pages_warn_only, 229 - "vCPU%d: Too many pages still idle (%lu out of %lu)", 230 - vcpu_idx, still_idle, pages); 231 - 232 - printf("WARNING: vCPU%d: Too many pages still idle (%lu out of %lu), " 233 - "this will affect performance results.\n", 234 - vcpu_idx, still_idle, pages); 235 - } 191 + if (still_idle >= pages / 10) 192 + too_many_idle_pages(still_idle, pages, 193 + overlap_memory_access ? -1 : vcpu_idx); 236 194 237 195 close(page_idle_fd); 238 196 close(pagemap_fd); 197 + } 198 + 199 + int find_generation(struct memcg_stats *stats, long total_pages) 200 + { 201 + /* 202 + * For finding the generation that contains our pages, use the same 203 + * 90% threshold that page_idle uses. 204 + */ 205 + int gen = lru_gen_find_generation(stats, total_pages * 9 / 10); 206 + 207 + if (gen >= 0) 208 + return gen; 209 + 210 + if (!idle_pages_warn_only) { 211 + TEST_FAIL("Could not find a generation with 90%% of guest memory (%ld pages).", 212 + total_pages * 9 / 10); 213 + return gen; 214 + } 215 + 216 + /* 217 + * We couldn't find a generation with 90% of guest memory, which can 218 + * happen if access tracking is unreliable. Simply look for a majority 219 + * of pages. 220 + */ 221 + puts("WARNING: Couldn't find a generation with 90% of guest memory. " 222 + "Performance results may not be accurate."); 223 + gen = lru_gen_find_generation(stats, total_pages / 2); 224 + TEST_ASSERT(gen >= 0, 225 + "Could not find a generation with 50%% of guest memory (%ld pages).", 226 + total_pages / 2); 227 + return gen; 228 + } 229 + 230 + static void lru_gen_mark_memory_idle(struct kvm_vm *vm) 231 + { 232 + struct timespec ts_start; 233 + struct timespec ts_elapsed; 234 + struct memcg_stats stats; 235 + int new_gen; 236 + 237 + /* Make a new generation */ 238 + clock_gettime(CLOCK_MONOTONIC, &ts_start); 239 + lru_gen_do_aging(&stats, TEST_MEMCG_NAME); 240 + ts_elapsed = timespec_elapsed(ts_start); 241 + 242 + /* Check the generation again */ 243 + new_gen = find_generation(&stats, test_pages); 244 + 245 + /* 246 + * This function should only be invoked with newly-accessed pages, 247 + * so pages should always move to a newer generation. 248 + */ 249 + if (new_gen <= lru_gen_last_gen) { 250 + /* We did not move to a newer generation. */ 251 + long idle_pages = lru_gen_sum_memcg_stats_for_gen(lru_gen_last_gen, 252 + &stats); 253 + 254 + too_many_idle_pages(min_t(long, idle_pages, test_pages), 255 + test_pages, -1); 256 + } 257 + pr_info("%-30s: %ld.%09lds\n", 258 + "Mark memory idle (lru_gen)", ts_elapsed.tv_sec, 259 + ts_elapsed.tv_nsec); 260 + lru_gen_last_gen = new_gen; 239 261 } 240 262 241 263 static void assert_ucall(struct kvm_vcpu *vcpu, uint64_t expected_ucall) ··· 335 241 assert_ucall(vcpu, UCALL_SYNC); 336 242 break; 337 243 case ITERATION_MARK_IDLE: 338 - mark_vcpu_memory_idle(vm, vcpu_args); 244 + pageidle_mark_vcpu_memory_idle(vm, vcpu_args); 339 245 break; 340 246 } 341 247 ··· 387 293 388 294 static void mark_memory_idle(struct kvm_vm *vm, int nr_vcpus) 389 295 { 296 + if (use_lru_gen) 297 + return lru_gen_mark_memory_idle(vm); 298 + 390 299 /* 391 300 * Even though this parallelizes the work across vCPUs, this is still a 392 301 * very slow operation because page_idle forces the test to mark one pfn 393 - * at a time and the clear_young notifier serializes on the KVM MMU 302 + * at a time and the clear_young notifier may serialize on the KVM MMU 394 303 * lock. 395 304 */ 396 305 pr_debug("Marking VM memory idle (slow)...\n"); 397 306 iteration_work = ITERATION_MARK_IDLE; 398 - run_iteration(vm, nr_vcpus, "Mark memory idle"); 307 + run_iteration(vm, nr_vcpus, "Mark memory idle (page_idle)"); 399 308 } 400 309 401 310 static void run_test(enum vm_guest_mode mode, void *arg) ··· 410 313 vm = memstress_create_vm(mode, nr_vcpus, params->vcpu_memory_bytes, 1, 411 314 params->backing_src, !overlap_memory_access); 412 315 316 + /* 317 + * If guest_page_size is larger than the host's page size, the 318 + * guest (memstress) will only fault in a subset of the host's pages. 319 + */ 320 + test_pages = params->nr_vcpus * params->vcpu_memory_bytes / 321 + max(memstress_args.guest_page_size, 322 + (uint64_t)getpagesize()); 323 + 413 324 memstress_start_vcpu_threads(nr_vcpus, vcpu_thread_main); 414 325 415 326 pr_info("\n"); 416 327 access_memory(vm, nr_vcpus, ACCESS_WRITE, "Populating memory"); 328 + 329 + if (use_lru_gen) { 330 + struct memcg_stats stats; 331 + 332 + /* 333 + * Do a page table scan now. Following initial population, aging 334 + * may not cause the pages to move to a newer generation. Do 335 + * an aging pass now so that future aging passes always move 336 + * pages to a newer generation. 337 + */ 338 + printf("Initial aging pass (lru_gen)\n"); 339 + lru_gen_do_aging(&stats, TEST_MEMCG_NAME); 340 + TEST_ASSERT(lru_gen_sum_memcg_stats(&stats) >= test_pages, 341 + "Not all pages accounted for (looking for %ld). " 342 + "Was the memcg set up correctly?", test_pages); 343 + access_memory(vm, nr_vcpus, ACCESS_WRITE, "Re-populating memory"); 344 + lru_gen_read_memcg_stats(&stats, TEST_MEMCG_NAME); 345 + lru_gen_last_gen = find_generation(&stats, test_pages); 346 + } 417 347 418 348 /* As a control, read and write to the populated memory first. */ 419 349 access_memory(vm, nr_vcpus, ACCESS_WRITE, "Writing to populated memory"); ··· 478 354 puts("Skipping idle page count sanity check, because NUMA balancing is enabled"); 479 355 return 1; 480 356 } 357 + return 0; 358 + } 481 359 360 + static int run_test_for_each_guest_mode(const char *cgroup, void *arg) 361 + { 362 + for_each_guest_mode(run_test, arg); 482 363 return 0; 483 364 } 484 365 ··· 512 383 exit(0); 513 384 } 514 385 386 + void destroy_cgroup(char *cg) 387 + { 388 + printf("Destroying cgroup: %s\n", cg); 389 + } 390 + 515 391 int main(int argc, char *argv[]) 516 392 { 517 393 struct test_params params = { ··· 524 390 .vcpu_memory_bytes = DEFAULT_PER_VCPU_MEM_SIZE, 525 391 .nr_vcpus = 1, 526 392 }; 393 + char *new_cg = NULL; 527 394 int page_idle_fd; 528 395 int opt; 529 396 ··· 559 424 } 560 425 } 561 426 562 - page_idle_fd = open("/sys/kernel/mm/page_idle/bitmap", O_RDWR); 563 - __TEST_REQUIRE(page_idle_fd >= 0, 564 - "CONFIG_IDLE_PAGE_TRACKING is not enabled"); 565 - close(page_idle_fd); 566 - 567 427 if (idle_pages_warn_only == -1) 568 428 idle_pages_warn_only = access_tracking_unreliable(); 569 429 570 - for_each_guest_mode(run_test, &params); 430 + if (lru_gen_usable()) { 431 + bool cg_created = true; 432 + int ret; 433 + 434 + puts("Using lru_gen for aging"); 435 + use_lru_gen = true; 436 + 437 + if (cg_find_controller_root(cgroup_root, sizeof(cgroup_root), "memory")) 438 + ksft_exit_skip("Cannot find memory cgroup controller\n"); 439 + 440 + new_cg = cg_name(cgroup_root, TEST_MEMCG_NAME); 441 + printf("Creating cgroup: %s\n", new_cg); 442 + if (cg_create(new_cg)) { 443 + if (errno == EEXIST) { 444 + printf("Found existing cgroup"); 445 + cg_created = false; 446 + } else { 447 + ksft_exit_skip("could not create new cgroup: %s\n", new_cg); 448 + } 449 + } 450 + 451 + /* 452 + * This will fork off a new process to run the test within 453 + * a new memcg, so we need to properly propagate the return 454 + * value up. 455 + */ 456 + ret = cg_run(new_cg, &run_test_for_each_guest_mode, &params); 457 + if (cg_created) 458 + cg_destroy(new_cg); 459 + if (ret < 0) 460 + TEST_FAIL("child did not spawn or was abnormally killed"); 461 + if (ret) 462 + return ret; 463 + } else { 464 + page_idle_fd = open("/sys/kernel/mm/page_idle/bitmap", O_RDWR); 465 + __TEST_REQUIRE(page_idle_fd >= 0, 466 + "Couldn't open /sys/kernel/mm/page_idle/bitmap. " 467 + "Is CONFIG_IDLE_PAGE_TRACKING enabled?"); 468 + 469 + close(page_idle_fd); 470 + 471 + puts("Using page_idle for aging"); 472 + run_test_for_each_guest_mode(NULL, &params); 473 + } 571 474 572 475 return 0; 573 476 }
+51
tools/testing/selftests/kvm/include/lru_gen_util.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * Tools for integrating with lru_gen, like parsing the lru_gen debugfs output. 4 + * 5 + * Copyright (C) 2025, Google LLC. 6 + */ 7 + #ifndef SELFTEST_KVM_LRU_GEN_UTIL_H 8 + #define SELFTEST_KVM_LRU_GEN_UTIL_H 9 + 10 + #include <inttypes.h> 11 + #include <limits.h> 12 + #include <stdlib.h> 13 + 14 + #include "test_util.h" 15 + 16 + #define MAX_NR_GENS 16 /* MAX_NR_GENS in include/linux/mmzone.h */ 17 + #define MAX_NR_NODES 4 /* Maximum number of nodes supported by the test */ 18 + 19 + #define LRU_GEN_DEBUGFS "/sys/kernel/debug/lru_gen" 20 + #define LRU_GEN_ENABLED_PATH "/sys/kernel/mm/lru_gen/enabled" 21 + #define LRU_GEN_ENABLED 1 22 + #define LRU_GEN_MM_WALK 2 23 + 24 + struct generation_stats { 25 + int gen; 26 + long age_ms; 27 + long nr_anon; 28 + long nr_file; 29 + }; 30 + 31 + struct node_stats { 32 + int node; 33 + int nr_gens; /* Number of populated gens entries. */ 34 + struct generation_stats gens[MAX_NR_GENS]; 35 + }; 36 + 37 + struct memcg_stats { 38 + unsigned long memcg_id; 39 + int nr_nodes; /* Number of populated nodes entries. */ 40 + struct node_stats nodes[MAX_NR_NODES]; 41 + }; 42 + 43 + void lru_gen_read_memcg_stats(struct memcg_stats *stats, const char *memcg); 44 + long lru_gen_sum_memcg_stats(const struct memcg_stats *stats); 45 + long lru_gen_sum_memcg_stats_for_gen(int gen, const struct memcg_stats *stats); 46 + void lru_gen_do_aging(struct memcg_stats *stats, const char *memcg); 47 + int lru_gen_find_generation(const struct memcg_stats *stats, 48 + unsigned long total_pages); 49 + bool lru_gen_usable(void); 50 + 51 + #endif /* SELFTEST_KVM_LRU_GEN_UTIL_H */
+387
tools/testing/selftests/kvm/lib/lru_gen_util.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright (C) 2025, Google LLC. 4 + */ 5 + 6 + #include <time.h> 7 + 8 + #include "lru_gen_util.h" 9 + 10 + /* 11 + * Tracks state while we parse memcg lru_gen stats. The file we're parsing is 12 + * structured like this (some extra whitespace elided): 13 + * 14 + * memcg (id) (path) 15 + * node (id) 16 + * (gen_nr) (age_in_ms) (nr_anon_pages) (nr_file_pages) 17 + */ 18 + struct memcg_stats_parse_context { 19 + bool consumed; /* Whether or not this line was consumed */ 20 + /* Next parse handler to invoke */ 21 + void (*next_handler)(struct memcg_stats *stats, 22 + struct memcg_stats_parse_context *ctx, 23 + char *line); 24 + int current_node_idx; /* Current index in nodes array */ 25 + const char *name; /* The name of the memcg we're looking for */ 26 + }; 27 + 28 + static void memcg_stats_handle_searching(struct memcg_stats *stats, 29 + struct memcg_stats_parse_context *ctx, 30 + char *line); 31 + static void memcg_stats_handle_in_memcg(struct memcg_stats *stats, 32 + struct memcg_stats_parse_context *ctx, 33 + char *line); 34 + static void memcg_stats_handle_in_node(struct memcg_stats *stats, 35 + struct memcg_stats_parse_context *ctx, 36 + char *line); 37 + 38 + struct split_iterator { 39 + char *str; 40 + char *save; 41 + }; 42 + 43 + static char *split_next(struct split_iterator *it) 44 + { 45 + char *ret = strtok_r(it->str, " \t\n\r", &it->save); 46 + 47 + it->str = NULL; 48 + return ret; 49 + } 50 + 51 + static void memcg_stats_handle_searching(struct memcg_stats *stats, 52 + struct memcg_stats_parse_context *ctx, 53 + char *line) 54 + { 55 + struct split_iterator it = { .str = line }; 56 + char *prefix = split_next(&it); 57 + char *memcg_id = split_next(&it); 58 + char *memcg_name = split_next(&it); 59 + char *end; 60 + 61 + ctx->consumed = true; 62 + 63 + if (!prefix || strcmp("memcg", prefix)) 64 + return; /* Not a memcg line (maybe empty), skip */ 65 + 66 + TEST_ASSERT(memcg_id && memcg_name, 67 + "malformed memcg line; no memcg id or memcg_name"); 68 + 69 + if (strcmp(memcg_name + 1, ctx->name)) 70 + return; /* Wrong memcg, skip */ 71 + 72 + /* Found it! */ 73 + 74 + stats->memcg_id = strtoul(memcg_id, &end, 10); 75 + TEST_ASSERT(*end == '\0', "malformed memcg id '%s'", memcg_id); 76 + if (!stats->memcg_id) 77 + return; /* Removed memcg? */ 78 + 79 + ctx->next_handler = memcg_stats_handle_in_memcg; 80 + } 81 + 82 + static void memcg_stats_handle_in_memcg(struct memcg_stats *stats, 83 + struct memcg_stats_parse_context *ctx, 84 + char *line) 85 + { 86 + struct split_iterator it = { .str = line }; 87 + char *prefix = split_next(&it); 88 + char *id = split_next(&it); 89 + long found_node_id; 90 + char *end; 91 + 92 + ctx->consumed = true; 93 + ctx->current_node_idx = -1; 94 + 95 + if (!prefix) 96 + return; /* Skip empty lines */ 97 + 98 + if (!strcmp("memcg", prefix)) { 99 + /* Memcg done, found next one; stop. */ 100 + ctx->next_handler = NULL; 101 + return; 102 + } else if (strcmp("node", prefix)) 103 + TEST_ASSERT(false, "found malformed line after 'memcg ...'," 104 + "token: '%s'", prefix); 105 + 106 + /* At this point we know we have a node line. Parse the ID. */ 107 + 108 + TEST_ASSERT(id, "malformed node line; no node id"); 109 + 110 + found_node_id = strtol(id, &end, 10); 111 + TEST_ASSERT(*end == '\0', "malformed node id '%s'", id); 112 + 113 + ctx->current_node_idx = stats->nr_nodes++; 114 + TEST_ASSERT(ctx->current_node_idx < MAX_NR_NODES, 115 + "memcg has stats for too many nodes, max is %d", 116 + MAX_NR_NODES); 117 + stats->nodes[ctx->current_node_idx].node = found_node_id; 118 + 119 + ctx->next_handler = memcg_stats_handle_in_node; 120 + } 121 + 122 + static void memcg_stats_handle_in_node(struct memcg_stats *stats, 123 + struct memcg_stats_parse_context *ctx, 124 + char *line) 125 + { 126 + char *my_line = strdup(line); 127 + struct split_iterator it = { .str = my_line }; 128 + char *gen, *age, *nr_anon, *nr_file; 129 + struct node_stats *node_stats; 130 + struct generation_stats *gen_stats; 131 + char *end; 132 + 133 + TEST_ASSERT(it.str, "failed to copy input line"); 134 + 135 + gen = split_next(&it); 136 + 137 + if (!gen) 138 + goto out_consume; /* Skip empty lines */ 139 + 140 + if (!strcmp("memcg", gen) || !strcmp("node", gen)) { 141 + /* 142 + * Reached next memcg or node section. Don't consume, let the 143 + * other handler deal with this. 144 + */ 145 + ctx->next_handler = memcg_stats_handle_in_memcg; 146 + goto out; 147 + } 148 + 149 + node_stats = &stats->nodes[ctx->current_node_idx]; 150 + TEST_ASSERT(node_stats->nr_gens < MAX_NR_GENS, 151 + "found too many generation lines; max is %d", 152 + MAX_NR_GENS); 153 + gen_stats = &node_stats->gens[node_stats->nr_gens++]; 154 + 155 + age = split_next(&it); 156 + nr_anon = split_next(&it); 157 + nr_file = split_next(&it); 158 + 159 + TEST_ASSERT(age && nr_anon && nr_file, 160 + "malformed generation line; not enough tokens"); 161 + 162 + gen_stats->gen = (int)strtol(gen, &end, 10); 163 + TEST_ASSERT(*end == '\0', "malformed generation number '%s'", gen); 164 + 165 + gen_stats->age_ms = strtol(age, &end, 10); 166 + TEST_ASSERT(*end == '\0', "malformed generation age '%s'", age); 167 + 168 + gen_stats->nr_anon = strtol(nr_anon, &end, 10); 169 + TEST_ASSERT(*end == '\0', "malformed anonymous page count '%s'", 170 + nr_anon); 171 + 172 + gen_stats->nr_file = strtol(nr_file, &end, 10); 173 + TEST_ASSERT(*end == '\0', "malformed file page count '%s'", nr_file); 174 + 175 + out_consume: 176 + ctx->consumed = true; 177 + out: 178 + free(my_line); 179 + } 180 + 181 + static void print_memcg_stats(const struct memcg_stats *stats, const char *name) 182 + { 183 + int node, gen; 184 + 185 + pr_debug("stats for memcg %s (id %lu):\n", name, stats->memcg_id); 186 + for (node = 0; node < stats->nr_nodes; ++node) { 187 + pr_debug("\tnode %d\n", stats->nodes[node].node); 188 + for (gen = 0; gen < stats->nodes[node].nr_gens; ++gen) { 189 + const struct generation_stats *gstats = 190 + &stats->nodes[node].gens[gen]; 191 + 192 + pr_debug("\t\tgen %d\tage_ms %ld" 193 + "\tnr_anon %ld\tnr_file %ld\n", 194 + gstats->gen, gstats->age_ms, gstats->nr_anon, 195 + gstats->nr_file); 196 + } 197 + } 198 + } 199 + 200 + /* Re-read lru_gen debugfs information for @memcg into @stats. */ 201 + void lru_gen_read_memcg_stats(struct memcg_stats *stats, const char *memcg) 202 + { 203 + FILE *f; 204 + ssize_t read = 0; 205 + char *line = NULL; 206 + size_t bufsz; 207 + struct memcg_stats_parse_context ctx = { 208 + .next_handler = memcg_stats_handle_searching, 209 + .name = memcg, 210 + }; 211 + 212 + memset(stats, 0, sizeof(struct memcg_stats)); 213 + 214 + f = fopen(LRU_GEN_DEBUGFS, "r"); 215 + TEST_ASSERT(f, "fopen(%s) failed", LRU_GEN_DEBUGFS); 216 + 217 + while (ctx.next_handler && (read = getline(&line, &bufsz, f)) > 0) { 218 + ctx.consumed = false; 219 + 220 + do { 221 + ctx.next_handler(stats, &ctx, line); 222 + if (!ctx.next_handler) 223 + break; 224 + } while (!ctx.consumed); 225 + } 226 + 227 + if (read < 0 && !feof(f)) 228 + TEST_ASSERT(false, "getline(%s) failed", LRU_GEN_DEBUGFS); 229 + 230 + TEST_ASSERT(stats->memcg_id > 0, "Couldn't find memcg: %s\n" 231 + "Did the memcg get created in the proper mount?", 232 + memcg); 233 + if (line) 234 + free(line); 235 + TEST_ASSERT(!fclose(f), "fclose(%s) failed", LRU_GEN_DEBUGFS); 236 + 237 + print_memcg_stats(stats, memcg); 238 + } 239 + 240 + /* 241 + * Find all pages tracked by lru_gen for this memcg in generation @target_gen. 242 + * 243 + * If @target_gen is negative, look for all generations. 244 + */ 245 + long lru_gen_sum_memcg_stats_for_gen(int target_gen, 246 + const struct memcg_stats *stats) 247 + { 248 + int node, gen; 249 + long total_nr = 0; 250 + 251 + for (node = 0; node < stats->nr_nodes; ++node) { 252 + const struct node_stats *node_stats = &stats->nodes[node]; 253 + 254 + for (gen = 0; gen < node_stats->nr_gens; ++gen) { 255 + const struct generation_stats *gen_stats = 256 + &node_stats->gens[gen]; 257 + 258 + if (target_gen >= 0 && gen_stats->gen != target_gen) 259 + continue; 260 + 261 + total_nr += gen_stats->nr_anon + gen_stats->nr_file; 262 + } 263 + } 264 + 265 + return total_nr; 266 + } 267 + 268 + /* Find all pages tracked by lru_gen for this memcg. */ 269 + long lru_gen_sum_memcg_stats(const struct memcg_stats *stats) 270 + { 271 + return lru_gen_sum_memcg_stats_for_gen(-1, stats); 272 + } 273 + 274 + /* 275 + * If lru_gen aging should force page table scanning. 276 + * 277 + * If you want to set this to false, you will need to do eviction 278 + * before doing extra aging passes. 279 + */ 280 + static const bool force_scan = true; 281 + 282 + static void run_aging_impl(unsigned long memcg_id, int node_id, int max_gen) 283 + { 284 + FILE *f = fopen(LRU_GEN_DEBUGFS, "w"); 285 + char *command; 286 + size_t sz; 287 + 288 + TEST_ASSERT(f, "fopen(%s) failed", LRU_GEN_DEBUGFS); 289 + sz = asprintf(&command, "+ %lu %d %d 1 %d\n", 290 + memcg_id, node_id, max_gen, force_scan); 291 + TEST_ASSERT(sz > 0, "creating aging command failed"); 292 + 293 + pr_debug("Running aging command: %s", command); 294 + if (fwrite(command, sizeof(char), sz, f) < sz) { 295 + TEST_ASSERT(false, "writing aging command %s to %s failed", 296 + command, LRU_GEN_DEBUGFS); 297 + } 298 + 299 + TEST_ASSERT(!fclose(f), "fclose(%s) failed", LRU_GEN_DEBUGFS); 300 + } 301 + 302 + void lru_gen_do_aging(struct memcg_stats *stats, const char *memcg) 303 + { 304 + int node, gen; 305 + 306 + pr_debug("lru_gen: invoking aging...\n"); 307 + 308 + /* Must read memcg stats to construct the proper aging command. */ 309 + lru_gen_read_memcg_stats(stats, memcg); 310 + 311 + for (node = 0; node < stats->nr_nodes; ++node) { 312 + int max_gen = 0; 313 + 314 + for (gen = 0; gen < stats->nodes[node].nr_gens; ++gen) { 315 + int this_gen = stats->nodes[node].gens[gen].gen; 316 + 317 + max_gen = max_gen > this_gen ? max_gen : this_gen; 318 + } 319 + 320 + run_aging_impl(stats->memcg_id, stats->nodes[node].node, 321 + max_gen); 322 + } 323 + 324 + /* Re-read so callers get updated information */ 325 + lru_gen_read_memcg_stats(stats, memcg); 326 + } 327 + 328 + /* 329 + * Find which generation contains at least @pages pages, assuming that 330 + * such a generation exists. 331 + */ 332 + int lru_gen_find_generation(const struct memcg_stats *stats, 333 + unsigned long pages) 334 + { 335 + int node, gen, gen_idx, min_gen = INT_MAX, max_gen = -1; 336 + 337 + for (node = 0; node < stats->nr_nodes; ++node) 338 + for (gen_idx = 0; gen_idx < stats->nodes[node].nr_gens; 339 + ++gen_idx) { 340 + gen = stats->nodes[node].gens[gen_idx].gen; 341 + max_gen = gen > max_gen ? gen : max_gen; 342 + min_gen = gen < min_gen ? gen : min_gen; 343 + } 344 + 345 + for (gen = min_gen; gen <= max_gen; ++gen) 346 + /* See if this generation has enough pages. */ 347 + if (lru_gen_sum_memcg_stats_for_gen(gen, stats) > pages) 348 + return gen; 349 + 350 + return -1; 351 + } 352 + 353 + bool lru_gen_usable(void) 354 + { 355 + long required_features = LRU_GEN_ENABLED | LRU_GEN_MM_WALK; 356 + int lru_gen_fd, lru_gen_debug_fd; 357 + char mglru_feature_str[8] = {}; 358 + long mglru_features; 359 + 360 + lru_gen_fd = open(LRU_GEN_ENABLED_PATH, O_RDONLY); 361 + if (lru_gen_fd < 0) { 362 + puts("lru_gen: Could not open " LRU_GEN_ENABLED_PATH); 363 + return false; 364 + } 365 + if (read(lru_gen_fd, &mglru_feature_str, 7) < 7) { 366 + puts("lru_gen: Could not read from " LRU_GEN_ENABLED_PATH); 367 + close(lru_gen_fd); 368 + return false; 369 + } 370 + close(lru_gen_fd); 371 + 372 + mglru_features = strtol(mglru_feature_str, NULL, 16); 373 + if ((mglru_features & required_features) != required_features) { 374 + printf("lru_gen: missing features, got: 0x%lx, expected: 0x%lx\n", 375 + mglru_features, required_features); 376 + printf("lru_gen: Try 'echo 0x%lx > /sys/kernel/mm/lru_gen/enabled'\n", 377 + required_features); 378 + return false; 379 + } 380 + 381 + lru_gen_debug_fd = open(LRU_GEN_DEBUGFS, O_RDWR); 382 + __TEST_REQUIRE(lru_gen_debug_fd >= 0, 383 + "lru_gen: Could not open " LRU_GEN_DEBUGFS ", " 384 + "but lru_gen is enabled, so cannot use page_idle."); 385 + close(lru_gen_debug_fd); 386 + return true; 387 + }