Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'kvm-x86-selftests-6.16' of https://github.com/kvm-x86/linux into HEAD

KVM selftests changes for 6.16:

- Add support for SNP to the various SEV selftests.

- Add a selftest to verify fastops instructions via forced emulation.

- Add MGLRU support to the access tracking perf test.

+1275 -211
+1
arch/x86/include/uapi/asm/kvm.h
··· 845 845 }; 846 846 847 847 /* Kept in sync with firmware values for simplicity. */ 848 + #define KVM_SEV_PAGE_TYPE_INVALID 0x0 848 849 #define KVM_SEV_SNP_PAGE_TYPE_NORMAL 0x1 849 850 #define KVM_SEV_SNP_PAGE_TYPE_ZERO 0x3 850 851 #define KVM_SEV_SNP_PAGE_TYPE_UNMEASURED 0x4
+1
tools/arch/x86/include/uapi/asm/kvm.h
··· 844 844 }; 845 845 846 846 /* Kept in sync with firmware values for simplicity. */ 847 + #define KVM_SEV_PAGE_TYPE_INVALID 0x0 847 848 #define KVM_SEV_SNP_PAGE_TYPE_NORMAL 0x1 848 849 #define KVM_SEV_SNP_PAGE_TYPE_ZERO 0x3 849 850 #define KVM_SEV_SNP_PAGE_TYPE_UNMEASURED 0x4
+11 -10
tools/testing/selftests/cgroup/Makefile
··· 21 21 LOCAL_HDRS += $(selfdir)/clone3/clone3_selftests.h $(selfdir)/pidfd/pidfd.h 22 22 23 23 include ../lib.mk 24 + include lib/libcgroup.mk 24 25 25 - $(OUTPUT)/test_core: cgroup_util.c 26 - $(OUTPUT)/test_cpu: cgroup_util.c 27 - $(OUTPUT)/test_cpuset: cgroup_util.c 28 - $(OUTPUT)/test_freezer: cgroup_util.c 29 - $(OUTPUT)/test_hugetlb_memcg: cgroup_util.c 30 - $(OUTPUT)/test_kill: cgroup_util.c 31 - $(OUTPUT)/test_kmem: cgroup_util.c 32 - $(OUTPUT)/test_memcontrol: cgroup_util.c 33 - $(OUTPUT)/test_pids: cgroup_util.c 34 - $(OUTPUT)/test_zswap: cgroup_util.c 26 + $(OUTPUT)/test_core: $(LIBCGROUP_O) 27 + $(OUTPUT)/test_cpu: $(LIBCGROUP_O) 28 + $(OUTPUT)/test_cpuset: $(LIBCGROUP_O) 29 + $(OUTPUT)/test_freezer: $(LIBCGROUP_O) 30 + $(OUTPUT)/test_hugetlb_memcg: $(LIBCGROUP_O) 31 + $(OUTPUT)/test_kill: $(LIBCGROUP_O) 32 + $(OUTPUT)/test_kmem: $(LIBCGROUP_O) 33 + $(OUTPUT)/test_memcontrol: $(LIBCGROUP_O) 34 + $(OUTPUT)/test_pids: $(LIBCGROUP_O) 35 + $(OUTPUT)/test_zswap: $(LIBCGROUP_O)
+30 -88
tools/testing/selftests/cgroup/cgroup_util.c tools/testing/selftests/cgroup/lib/cgroup_util.c
··· 17 17 #include <unistd.h> 18 18 19 19 #include "cgroup_util.h" 20 - #include "../clone3/clone3_selftests.h" 20 + #include "../../clone3/clone3_selftests.h" 21 21 22 22 /* Returns read len on success, or -errno on failure. */ 23 - static ssize_t read_text(const char *path, char *buf, size_t max_len) 23 + ssize_t read_text(const char *path, char *buf, size_t max_len) 24 24 { 25 25 ssize_t len; 26 26 int fd; ··· 39 39 } 40 40 41 41 /* Returns written len on success, or -errno on failure. */ 42 - static ssize_t write_text(const char *path, char *buf, ssize_t len) 42 + ssize_t write_text(const char *path, char *buf, ssize_t len) 43 43 { 44 44 int fd; 45 45 ··· 217 217 return cg_write(cgroup, control, buf); 218 218 } 219 219 220 - int cg_find_unified_root(char *root, size_t len, bool *nsdelegate) 220 + static int cg_find_root(char *root, size_t len, const char *controller, 221 + bool *nsdelegate) 221 222 { 222 223 char buf[10 * PAGE_SIZE]; 223 224 char *fs, *mount, *type, *options; ··· 237 236 options = strtok(NULL, delim); 238 237 strtok(NULL, delim); 239 238 strtok(NULL, delim); 240 - 241 - if (strcmp(type, "cgroup2") == 0) { 242 - strncpy(root, mount, len); 243 - if (nsdelegate) 244 - *nsdelegate = !!strstr(options, "nsdelegate"); 245 - return 0; 239 + if (strcmp(type, "cgroup") == 0) { 240 + if (!controller || !strstr(options, controller)) 241 + continue; 242 + } else if (strcmp(type, "cgroup2") == 0) { 243 + if (controller && 244 + cg_read_strstr(mount, "cgroup.controllers", controller)) 245 + continue; 246 + } else { 247 + continue; 246 248 } 249 + strncpy(root, mount, len); 250 + 251 + if (nsdelegate) 252 + *nsdelegate = !!strstr(options, "nsdelegate"); 253 + return 0; 254 + 247 255 } 248 256 249 257 return -1; 258 + } 259 + 260 + int cg_find_controller_root(char *root, size_t len, const char *controller) 261 + { 262 + return cg_find_root(root, len, controller, NULL); 263 + } 264 + 265 + int cg_find_unified_root(char *root, size_t len, bool *nsdelegate) 266 + { 267 + return cg_find_root(root, len, NULL, nsdelegate); 250 268 } 251 269 252 270 int cg_create(const char *cgroup) ··· 506 486 } 507 487 508 488 return pid; 509 - } 510 - 511 - int get_temp_fd(void) 512 - { 513 - return open(".", O_TMPFILE | O_RDWR | O_EXCL); 514 - } 515 - 516 - int alloc_pagecache(int fd, size_t size) 517 - { 518 - char buf[PAGE_SIZE]; 519 - struct stat st; 520 - int i; 521 - 522 - if (fstat(fd, &st)) 523 - goto cleanup; 524 - 525 - size += st.st_size; 526 - 527 - if (ftruncate(fd, size)) 528 - goto cleanup; 529 - 530 - for (i = 0; i < size; i += sizeof(buf)) 531 - read(fd, buf, sizeof(buf)); 532 - 533 - return 0; 534 - 535 - cleanup: 536 - return -1; 537 - } 538 - 539 - int alloc_anon(const char *cgroup, void *arg) 540 - { 541 - size_t size = (unsigned long)arg; 542 - char *buf, *ptr; 543 - 544 - buf = malloc(size); 545 - for (ptr = buf; ptr < buf + size; ptr += PAGE_SIZE) 546 - *ptr = 0; 547 - 548 - free(buf); 549 - return 0; 550 - } 551 - 552 - int is_swap_enabled(void) 553 - { 554 - char buf[PAGE_SIZE]; 555 - const char delim[] = "\n"; 556 - int cnt = 0; 557 - char *line; 558 - 559 - if (read_text("/proc/swaps", buf, sizeof(buf)) <= 0) 560 - return -1; 561 - 562 - for (line = strtok(buf, delim); line; line = strtok(NULL, delim)) 563 - cnt++; 564 - 565 - return cnt > 1; 566 - } 567 - 568 - int set_oom_adj_score(int pid, int score) 569 - { 570 - char path[PATH_MAX]; 571 - int fd, len; 572 - 573 - sprintf(path, "/proc/%d/oom_score_adj", pid); 574 - 575 - fd = open(path, O_WRONLY | O_APPEND); 576 - if (fd < 0) 577 - return fd; 578 - 579 - len = dprintf(fd, "%d", score); 580 - if (len < 0) { 581 - close(fd); 582 - return len; 583 - } 584 - 585 - close(fd); 586 - return 0; 587 489 } 588 490 589 491 int proc_mount_contains(const char *option)
+6 -7
tools/testing/selftests/cgroup/cgroup_util.h tools/testing/selftests/cgroup/lib/include/cgroup_util.h
··· 2 2 #include <stdbool.h> 3 3 #include <stdlib.h> 4 4 5 - #include "../kselftest.h" 6 - 5 + #ifndef PAGE_SIZE 7 6 #define PAGE_SIZE 4096 7 + #endif 8 8 9 9 #define MB(x) (x << 20) 10 10 ··· 21 21 return labs(a - b) <= (a + b) / 100 * err; 22 22 } 23 23 24 + extern ssize_t read_text(const char *path, char *buf, size_t max_len); 25 + extern ssize_t write_text(const char *path, char *buf, ssize_t len); 26 + 27 + extern int cg_find_controller_root(char *root, size_t len, const char *controller); 24 28 extern int cg_find_unified_root(char *root, size_t len, bool *nsdelegate); 25 29 extern char *cg_name(const char *root, const char *name); 26 30 extern char *cg_name_indexed(const char *root, const char *name, int index); ··· 53 49 extern int cg_run_nowait(const char *cgroup, 54 50 int (*fn)(const char *cgroup, void *arg), 55 51 void *arg); 56 - extern int get_temp_fd(void); 57 - extern int alloc_pagecache(int fd, size_t size); 58 - extern int alloc_anon(const char *cgroup, void *arg); 59 - extern int is_swap_enabled(void); 60 - extern int set_oom_adj_score(int pid, int score); 61 52 extern int cg_wait_for_proc_count(const char *cgroup, int count); 62 53 extern int cg_killall(const char *cgroup); 63 54 int proc_mount_contains(const char *option);
+19
tools/testing/selftests/cgroup/lib/libcgroup.mk
··· 1 + CGROUP_DIR := $(selfdir)/cgroup 2 + 3 + LIBCGROUP_C := lib/cgroup_util.c 4 + 5 + LIBCGROUP_O := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBCGROUP_C)) 6 + 7 + LIBCGROUP_O_DIRS := $(shell dirname $(LIBCGROUP_O) | uniq) 8 + 9 + CFLAGS += -I$(CGROUP_DIR)/lib/include 10 + 11 + EXTRA_HDRS := $(selfdir)/clone3/clone3_selftests.h 12 + 13 + $(LIBCGROUP_O_DIRS): 14 + mkdir -p $@ 15 + 16 + $(LIBCGROUP_O): $(OUTPUT)/%.o : $(CGROUP_DIR)/%.c $(EXTRA_HDRS) $(LIBCGROUP_O_DIRS) 17 + $(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@ 18 + 19 + EXTRA_CLEAN += $(LIBCGROUP_O)
+78
tools/testing/selftests/cgroup/test_memcontrol.c
··· 24 24 static bool has_localevents; 25 25 static bool has_recursiveprot; 26 26 27 + int get_temp_fd(void) 28 + { 29 + return open(".", O_TMPFILE | O_RDWR | O_EXCL); 30 + } 31 + 32 + int alloc_pagecache(int fd, size_t size) 33 + { 34 + char buf[PAGE_SIZE]; 35 + struct stat st; 36 + int i; 37 + 38 + if (fstat(fd, &st)) 39 + goto cleanup; 40 + 41 + size += st.st_size; 42 + 43 + if (ftruncate(fd, size)) 44 + goto cleanup; 45 + 46 + for (i = 0; i < size; i += sizeof(buf)) 47 + read(fd, buf, sizeof(buf)); 48 + 49 + return 0; 50 + 51 + cleanup: 52 + return -1; 53 + } 54 + 55 + int alloc_anon(const char *cgroup, void *arg) 56 + { 57 + size_t size = (unsigned long)arg; 58 + char *buf, *ptr; 59 + 60 + buf = malloc(size); 61 + for (ptr = buf; ptr < buf + size; ptr += PAGE_SIZE) 62 + *ptr = 0; 63 + 64 + free(buf); 65 + return 0; 66 + } 67 + 68 + int is_swap_enabled(void) 69 + { 70 + char buf[PAGE_SIZE]; 71 + const char delim[] = "\n"; 72 + int cnt = 0; 73 + char *line; 74 + 75 + if (read_text("/proc/swaps", buf, sizeof(buf)) <= 0) 76 + return -1; 77 + 78 + for (line = strtok(buf, delim); line; line = strtok(NULL, delim)) 79 + cnt++; 80 + 81 + return cnt > 1; 82 + } 83 + 84 + int set_oom_adj_score(int pid, int score) 85 + { 86 + char path[PATH_MAX]; 87 + int fd, len; 88 + 89 + sprintf(path, "/proc/%d/oom_score_adj", pid); 90 + 91 + fd = open(path, O_WRONLY | O_APPEND); 92 + if (fd < 0) 93 + return fd; 94 + 95 + len = dprintf(fd, "%d", score); 96 + if (len < 0) { 97 + close(fd); 98 + return len; 99 + } 100 + 101 + close(fd); 102 + return 0; 103 + } 104 + 27 105 /* 28 106 * This test creates two nested cgroups with and without enabling 29 107 * the memory controller.
+4 -1
tools/testing/selftests/kvm/Makefile.kvm
··· 8 8 LIBKVM += lib/guest_modes.c 9 9 LIBKVM += lib/io.c 10 10 LIBKVM += lib/kvm_util.c 11 + LIBKVM += lib/lru_gen_util.c 11 12 LIBKVM += lib/memstress.c 12 13 LIBKVM += lib/guest_sprintf.c 13 14 LIBKVM += lib/rbtree.c ··· 71 70 TEST_GEN_PROGS_x86 += x86/dirty_log_page_splitting_test 72 71 TEST_GEN_PROGS_x86 += x86/feature_msrs_test 73 72 TEST_GEN_PROGS_x86 += x86/exit_on_emulation_failure_test 73 + TEST_GEN_PROGS_x86 += x86/fastops_test 74 74 TEST_GEN_PROGS_x86 += x86/fix_hypercall_test 75 75 TEST_GEN_PROGS_x86 += x86/hwcr_msr_test 76 76 TEST_GEN_PROGS_x86 += x86/hyperv_clock ··· 224 222 # importantly defines, i.e. overwrites, $(CC) (unless `make -e` or `make CC=`, 225 223 # which causes the environment variable to override the makefile). 226 224 include ../lib.mk 225 + include ../cgroup/lib/libcgroup.mk 227 226 228 227 INSTALL_HDR_PATH = $(top_srcdir)/usr 229 228 LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/ ··· 278 275 LIBKVM_C_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM_C)) 279 276 LIBKVM_S_OBJ := $(patsubst %.S, $(OUTPUT)/%.o, $(LIBKVM_S)) 280 277 LIBKVM_STRING_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM_STRING)) 281 - LIBKVM_OBJS = $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ) $(LIBKVM_STRING_OBJ) 278 + LIBKVM_OBJS = $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ) $(LIBKVM_STRING_OBJ) $(LIBCGROUP_O) 282 279 SPLIT_TEST_GEN_PROGS := $(patsubst %, $(OUTPUT)/%, $(SPLIT_TESTS)) 283 280 SPLIT_TEST_GEN_OBJ := $(patsubst %, $(OUTPUT)/$(ARCH)/%.o, $(SPLIT_TESTS)) 284 281
+249 -32
tools/testing/selftests/kvm/access_tracking_perf_test.c
··· 7 7 * This test measures the performance effects of KVM's access tracking. 8 8 * Access tracking is driven by the MMU notifiers test_young, clear_young, and 9 9 * clear_flush_young. These notifiers do not have a direct userspace API, 10 - * however the clear_young notifier can be triggered by marking a pages as idle 11 - * in /sys/kernel/mm/page_idle/bitmap. This test leverages that mechanism to 12 - * enable access tracking on guest memory. 10 + * however the clear_young notifier can be triggered either by 11 + * 1. marking a pages as idle in /sys/kernel/mm/page_idle/bitmap OR 12 + * 2. adding a new MGLRU generation using the lru_gen debugfs file. 13 + * This test leverages page_idle to enable access tracking on guest memory 14 + * unless MGLRU is enabled, in which case MGLRU is used. 13 15 * 14 16 * To measure performance this test runs a VM with a configurable number of 15 17 * vCPUs that each touch every page in disjoint regions of memory. Performance ··· 19 17 * predefined region. 20 18 * 21 19 * Note that a deterministic correctness test of access tracking is not possible 22 - * by using page_idle as it exists today. This is for a few reasons: 20 + * by using page_idle or MGLRU aging as it exists today. This is for a few 21 + * reasons: 23 22 * 24 - * 1. page_idle only issues clear_young notifiers, which lack a TLB flush. This 25 - * means subsequent guest accesses are not guaranteed to see page table 23 + * 1. page_idle and MGLRU only issue clear_young notifiers, which lack a TLB flush. 24 + * This means subsequent guest accesses are not guaranteed to see page table 26 25 * updates made by KVM until some time in the future. 27 26 * 28 27 * 2. page_idle only operates on LRU pages. Newly allocated pages are not ··· 51 48 #include "guest_modes.h" 52 49 #include "processor.h" 53 50 51 + #include "cgroup_util.h" 52 + #include "lru_gen_util.h" 53 + 54 + static const char *TEST_MEMCG_NAME = "access_tracking_perf_test"; 55 + 54 56 /* Global variable used to synchronize all of the vCPU threads. */ 55 57 static int iteration; 58 + 59 + /* The cgroup memory controller root. Needed for lru_gen-based aging. */ 60 + char cgroup_root[PATH_MAX]; 56 61 57 62 /* Defines what vCPU threads should do during a given iteration. */ 58 63 static enum { ··· 75 64 76 65 /* Whether to overlap the regions of memory vCPUs access. */ 77 66 static bool overlap_memory_access; 67 + 68 + /* 69 + * If the test should only warn if there are too many idle pages (i.e., it is 70 + * expected). 71 + * -1: Not yet set. 72 + * 0: We do not expect too many idle pages, so FAIL if too many idle pages. 73 + * 1: Having too many idle pages is expected, so merely print a warning if 74 + * too many idle pages are found. 75 + */ 76 + static int idle_pages_warn_only = -1; 77 + 78 + /* Whether or not to use MGLRU instead of page_idle for access tracking */ 79 + static bool use_lru_gen; 80 + 81 + /* Total number of pages to expect in the memcg after touching everything */ 82 + static long test_pages; 83 + 84 + /* Last generation we found the pages in */ 85 + static int lru_gen_last_gen = -1; 78 86 79 87 struct test_params { 80 88 /* The backing source for the region of memory. */ ··· 153 123 "Set page_idle bits for PFN 0x%" PRIx64, pfn); 154 124 } 155 125 156 - static void mark_vcpu_memory_idle(struct kvm_vm *vm, 157 - struct memstress_vcpu_args *vcpu_args) 126 + static void too_many_idle_pages(long idle_pages, long total_pages, int vcpu_idx) 127 + { 128 + char prefix[18] = {}; 129 + 130 + if (vcpu_idx >= 0) 131 + snprintf(prefix, 18, "vCPU%d: ", vcpu_idx); 132 + 133 + TEST_ASSERT(idle_pages_warn_only, 134 + "%sToo many pages still idle (%lu out of %lu)", 135 + prefix, idle_pages, total_pages); 136 + 137 + printf("WARNING: %sToo many pages still idle (%lu out of %lu), " 138 + "this will affect performance results.\n", 139 + prefix, idle_pages, total_pages); 140 + } 141 + 142 + static void pageidle_mark_vcpu_memory_idle(struct kvm_vm *vm, 143 + struct memstress_vcpu_args *vcpu_args) 158 144 { 159 145 int vcpu_idx = vcpu_args->vcpu_idx; 160 146 uint64_t base_gva = vcpu_args->gva; ··· 223 177 * arbitrary; high enough that we ensure most memory access went through 224 178 * access tracking but low enough as to not make the test too brittle 225 179 * over time and across architectures. 226 - * 227 - * When running the guest as a nested VM, "warn" instead of asserting 228 - * as the TLB size is effectively unlimited and the KVM doesn't 229 - * explicitly flush the TLB when aging SPTEs. As a result, more pages 230 - * are cached and the guest won't see the "idle" bit cleared. 231 180 */ 232 - if (still_idle >= pages / 10) { 233 - #ifdef __x86_64__ 234 - TEST_ASSERT(this_cpu_has(X86_FEATURE_HYPERVISOR), 235 - "vCPU%d: Too many pages still idle (%lu out of %lu)", 236 - vcpu_idx, still_idle, pages); 237 - #endif 238 - printf("WARNING: vCPU%d: Too many pages still idle (%lu out of %lu), " 239 - "this will affect performance results.\n", 240 - vcpu_idx, still_idle, pages); 241 - } 181 + if (still_idle >= pages / 10) 182 + too_many_idle_pages(still_idle, pages, 183 + overlap_memory_access ? -1 : vcpu_idx); 242 184 243 185 close(page_idle_fd); 244 186 close(pagemap_fd); 187 + } 188 + 189 + int find_generation(struct memcg_stats *stats, long total_pages) 190 + { 191 + /* 192 + * For finding the generation that contains our pages, use the same 193 + * 90% threshold that page_idle uses. 194 + */ 195 + int gen = lru_gen_find_generation(stats, total_pages * 9 / 10); 196 + 197 + if (gen >= 0) 198 + return gen; 199 + 200 + if (!idle_pages_warn_only) { 201 + TEST_FAIL("Could not find a generation with 90%% of guest memory (%ld pages).", 202 + total_pages * 9 / 10); 203 + return gen; 204 + } 205 + 206 + /* 207 + * We couldn't find a generation with 90% of guest memory, which can 208 + * happen if access tracking is unreliable. Simply look for a majority 209 + * of pages. 210 + */ 211 + puts("WARNING: Couldn't find a generation with 90% of guest memory. " 212 + "Performance results may not be accurate."); 213 + gen = lru_gen_find_generation(stats, total_pages / 2); 214 + TEST_ASSERT(gen >= 0, 215 + "Could not find a generation with 50%% of guest memory (%ld pages).", 216 + total_pages / 2); 217 + return gen; 218 + } 219 + 220 + static void lru_gen_mark_memory_idle(struct kvm_vm *vm) 221 + { 222 + struct timespec ts_start; 223 + struct timespec ts_elapsed; 224 + struct memcg_stats stats; 225 + int new_gen; 226 + 227 + /* Make a new generation */ 228 + clock_gettime(CLOCK_MONOTONIC, &ts_start); 229 + lru_gen_do_aging(&stats, TEST_MEMCG_NAME); 230 + ts_elapsed = timespec_elapsed(ts_start); 231 + 232 + /* Check the generation again */ 233 + new_gen = find_generation(&stats, test_pages); 234 + 235 + /* 236 + * This function should only be invoked with newly-accessed pages, 237 + * so pages should always move to a newer generation. 238 + */ 239 + if (new_gen <= lru_gen_last_gen) { 240 + /* We did not move to a newer generation. */ 241 + long idle_pages = lru_gen_sum_memcg_stats_for_gen(lru_gen_last_gen, 242 + &stats); 243 + 244 + too_many_idle_pages(min_t(long, idle_pages, test_pages), 245 + test_pages, -1); 246 + } 247 + pr_info("%-30s: %ld.%09lds\n", 248 + "Mark memory idle (lru_gen)", ts_elapsed.tv_sec, 249 + ts_elapsed.tv_nsec); 250 + lru_gen_last_gen = new_gen; 245 251 } 246 252 247 253 static void assert_ucall(struct kvm_vcpu *vcpu, uint64_t expected_ucall) ··· 335 237 assert_ucall(vcpu, UCALL_SYNC); 336 238 break; 337 239 case ITERATION_MARK_IDLE: 338 - mark_vcpu_memory_idle(vm, vcpu_args); 240 + pageidle_mark_vcpu_memory_idle(vm, vcpu_args); 339 241 break; 340 242 } 341 243 ··· 387 289 388 290 static void mark_memory_idle(struct kvm_vm *vm, int nr_vcpus) 389 291 { 292 + if (use_lru_gen) 293 + return lru_gen_mark_memory_idle(vm); 294 + 390 295 /* 391 296 * Even though this parallelizes the work across vCPUs, this is still a 392 297 * very slow operation because page_idle forces the test to mark one pfn 393 - * at a time and the clear_young notifier serializes on the KVM MMU 298 + * at a time and the clear_young notifier may serialize on the KVM MMU 394 299 * lock. 395 300 */ 396 301 pr_debug("Marking VM memory idle (slow)...\n"); 397 302 iteration_work = ITERATION_MARK_IDLE; 398 - run_iteration(vm, nr_vcpus, "Mark memory idle"); 303 + run_iteration(vm, nr_vcpus, "Mark memory idle (page_idle)"); 399 304 } 400 305 401 306 static void run_test(enum vm_guest_mode mode, void *arg) ··· 410 309 vm = memstress_create_vm(mode, nr_vcpus, params->vcpu_memory_bytes, 1, 411 310 params->backing_src, !overlap_memory_access); 412 311 312 + /* 313 + * If guest_page_size is larger than the host's page size, the 314 + * guest (memstress) will only fault in a subset of the host's pages. 315 + */ 316 + test_pages = params->nr_vcpus * params->vcpu_memory_bytes / 317 + max(memstress_args.guest_page_size, 318 + (uint64_t)getpagesize()); 319 + 413 320 memstress_start_vcpu_threads(nr_vcpus, vcpu_thread_main); 414 321 415 322 pr_info("\n"); 416 323 access_memory(vm, nr_vcpus, ACCESS_WRITE, "Populating memory"); 324 + 325 + if (use_lru_gen) { 326 + struct memcg_stats stats; 327 + 328 + /* 329 + * Do a page table scan now. Following initial population, aging 330 + * may not cause the pages to move to a newer generation. Do 331 + * an aging pass now so that future aging passes always move 332 + * pages to a newer generation. 333 + */ 334 + printf("Initial aging pass (lru_gen)\n"); 335 + lru_gen_do_aging(&stats, TEST_MEMCG_NAME); 336 + TEST_ASSERT(lru_gen_sum_memcg_stats(&stats) >= test_pages, 337 + "Not all pages accounted for (looking for %ld). " 338 + "Was the memcg set up correctly?", test_pages); 339 + access_memory(vm, nr_vcpus, ACCESS_WRITE, "Re-populating memory"); 340 + lru_gen_read_memcg_stats(&stats, TEST_MEMCG_NAME); 341 + lru_gen_last_gen = find_generation(&stats, test_pages); 342 + } 417 343 418 344 /* As a control, read and write to the populated memory first. */ 419 345 access_memory(vm, nr_vcpus, ACCESS_WRITE, "Writing to populated memory"); ··· 454 326 455 327 memstress_join_vcpu_threads(nr_vcpus); 456 328 memstress_destroy_vm(vm); 329 + } 330 + 331 + static int access_tracking_unreliable(void) 332 + { 333 + #ifdef __x86_64__ 334 + /* 335 + * When running nested, the TLB size may be effectively unlimited (for 336 + * example, this is the case when running on KVM L0), and KVM doesn't 337 + * explicitly flush the TLB when aging SPTEs. As a result, more pages 338 + * are cached and the guest won't see the "idle" bit cleared. 339 + */ 340 + if (this_cpu_has(X86_FEATURE_HYPERVISOR)) { 341 + puts("Skipping idle page count sanity check, because the test is run nested"); 342 + return 1; 343 + } 344 + #endif 345 + /* 346 + * When NUMA balancing is enabled, guest memory will be unmapped to get 347 + * NUMA faults, dropping the Accessed bits. 348 + */ 349 + if (is_numa_balancing_enabled()) { 350 + puts("Skipping idle page count sanity check, because NUMA balancing is enabled"); 351 + return 1; 352 + } 353 + return 0; 354 + } 355 + 356 + static int run_test_for_each_guest_mode(const char *cgroup, void *arg) 357 + { 358 + for_each_guest_mode(run_test, arg); 359 + return 0; 457 360 } 458 361 459 362 static void help(char *name) ··· 501 342 printf(" -v: specify the number of vCPUs to run.\n"); 502 343 printf(" -o: Overlap guest memory accesses instead of partitioning\n" 503 344 " them into a separate region of memory for each vCPU.\n"); 345 + printf(" -w: Control whether the test warns or fails if more than 10%%\n" 346 + " of pages are still seen as idle/old after accessing guest\n" 347 + " memory. >0 == warn only, 0 == fail, <0 == auto. For auto\n" 348 + " mode, the test fails by default, but switches to warn only\n" 349 + " if NUMA balancing is enabled or the test detects it's running\n" 350 + " in a VM.\n"); 504 351 backing_src_help("-s"); 505 352 puts(""); 506 353 exit(0); 354 + } 355 + 356 + void destroy_cgroup(char *cg) 357 + { 358 + printf("Destroying cgroup: %s\n", cg); 507 359 } 508 360 509 361 int main(int argc, char *argv[]) ··· 524 354 .vcpu_memory_bytes = DEFAULT_PER_VCPU_MEM_SIZE, 525 355 .nr_vcpus = 1, 526 356 }; 357 + char *new_cg = NULL; 527 358 int page_idle_fd; 528 359 int opt; 529 360 530 361 guest_modes_append_default(); 531 362 532 - while ((opt = getopt(argc, argv, "hm:b:v:os:")) != -1) { 363 + while ((opt = getopt(argc, argv, "hm:b:v:os:w:")) != -1) { 533 364 switch (opt) { 534 365 case 'm': 535 366 guest_modes_cmdline(optarg); ··· 547 376 case 's': 548 377 params.backing_src = parse_backing_src_type(optarg); 549 378 break; 379 + case 'w': 380 + idle_pages_warn_only = 381 + atoi_non_negative("Idle pages warning", 382 + optarg); 383 + break; 550 384 case 'h': 551 385 default: 552 386 help(argv[0]); ··· 559 383 } 560 384 } 561 385 562 - page_idle_fd = open("/sys/kernel/mm/page_idle/bitmap", O_RDWR); 563 - __TEST_REQUIRE(page_idle_fd >= 0, 564 - "CONFIG_IDLE_PAGE_TRACKING is not enabled"); 565 - close(page_idle_fd); 386 + if (idle_pages_warn_only == -1) 387 + idle_pages_warn_only = access_tracking_unreliable(); 566 388 567 - for_each_guest_mode(run_test, &params); 389 + if (lru_gen_usable()) { 390 + bool cg_created = true; 391 + int ret; 392 + 393 + puts("Using lru_gen for aging"); 394 + use_lru_gen = true; 395 + 396 + if (cg_find_controller_root(cgroup_root, sizeof(cgroup_root), "memory")) 397 + ksft_exit_skip("Cannot find memory cgroup controller\n"); 398 + 399 + new_cg = cg_name(cgroup_root, TEST_MEMCG_NAME); 400 + printf("Creating cgroup: %s\n", new_cg); 401 + if (cg_create(new_cg)) { 402 + if (errno == EEXIST) { 403 + printf("Found existing cgroup"); 404 + cg_created = false; 405 + } else { 406 + ksft_exit_skip("could not create new cgroup: %s\n", new_cg); 407 + } 408 + } 409 + 410 + /* 411 + * This will fork off a new process to run the test within 412 + * a new memcg, so we need to properly propagate the return 413 + * value up. 414 + */ 415 + ret = cg_run(new_cg, &run_test_for_each_guest_mode, &params); 416 + if (cg_created) 417 + cg_destroy(new_cg); 418 + if (ret < 0) 419 + TEST_FAIL("child did not spawn or was abnormally killed"); 420 + if (ret) 421 + return ret; 422 + } else { 423 + page_idle_fd = open("/sys/kernel/mm/page_idle/bitmap", O_RDWR); 424 + __TEST_REQUIRE(page_idle_fd >= 0, 425 + "Couldn't open /sys/kernel/mm/page_idle/bitmap. " 426 + "Is CONFIG_IDLE_PAGE_TRACKING enabled?"); 427 + 428 + close(page_idle_fd); 429 + 430 + puts("Using page_idle for aging"); 431 + run_test_for_each_guest_mode(NULL, &params); 432 + } 568 433 569 434 return 0; 570 435 }
+35
tools/testing/selftests/kvm/include/kvm_util.h
··· 555 555 #define vm_get_stat(vm, stat) __get_stat(&(vm)->stats, stat) 556 556 #define vcpu_get_stat(vcpu, stat) __get_stat(&(vcpu)->stats, stat) 557 557 558 + static inline bool read_smt_control(char *buf, size_t buf_size) 559 + { 560 + FILE *f = fopen("/sys/devices/system/cpu/smt/control", "r"); 561 + bool ret; 562 + 563 + if (!f) 564 + return false; 565 + 566 + ret = fread(buf, sizeof(*buf), buf_size, f) > 0; 567 + fclose(f); 568 + 569 + return ret; 570 + } 571 + 572 + static inline bool is_smt_possible(void) 573 + { 574 + char buf[16]; 575 + 576 + if (read_smt_control(buf, sizeof(buf)) && 577 + (!strncmp(buf, "forceoff", 8) || !strncmp(buf, "notsupported", 12))) 578 + return false; 579 + 580 + return true; 581 + } 582 + 583 + static inline bool is_smt_on(void) 584 + { 585 + char buf[16]; 586 + 587 + if (read_smt_control(buf, sizeof(buf)) && !strncmp(buf, "on", 2)) 588 + return true; 589 + 590 + return false; 591 + } 592 + 558 593 void vm_create_irqchip(struct kvm_vm *vm); 559 594 560 595 static inline int __vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size,
+51
tools/testing/selftests/kvm/include/lru_gen_util.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * Tools for integrating with lru_gen, like parsing the lru_gen debugfs output. 4 + * 5 + * Copyright (C) 2025, Google LLC. 6 + */ 7 + #ifndef SELFTEST_KVM_LRU_GEN_UTIL_H 8 + #define SELFTEST_KVM_LRU_GEN_UTIL_H 9 + 10 + #include <inttypes.h> 11 + #include <limits.h> 12 + #include <stdlib.h> 13 + 14 + #include "test_util.h" 15 + 16 + #define MAX_NR_GENS 16 /* MAX_NR_GENS in include/linux/mmzone.h */ 17 + #define MAX_NR_NODES 4 /* Maximum number of nodes supported by the test */ 18 + 19 + #define LRU_GEN_DEBUGFS "/sys/kernel/debug/lru_gen" 20 + #define LRU_GEN_ENABLED_PATH "/sys/kernel/mm/lru_gen/enabled" 21 + #define LRU_GEN_ENABLED 1 22 + #define LRU_GEN_MM_WALK 2 23 + 24 + struct generation_stats { 25 + int gen; 26 + long age_ms; 27 + long nr_anon; 28 + long nr_file; 29 + }; 30 + 31 + struct node_stats { 32 + int node; 33 + int nr_gens; /* Number of populated gens entries. */ 34 + struct generation_stats gens[MAX_NR_GENS]; 35 + }; 36 + 37 + struct memcg_stats { 38 + unsigned long memcg_id; 39 + int nr_nodes; /* Number of populated nodes entries. */ 40 + struct node_stats nodes[MAX_NR_NODES]; 41 + }; 42 + 43 + void lru_gen_read_memcg_stats(struct memcg_stats *stats, const char *memcg); 44 + long lru_gen_sum_memcg_stats(const struct memcg_stats *stats); 45 + long lru_gen_sum_memcg_stats_for_gen(int gen, const struct memcg_stats *stats); 46 + void lru_gen_do_aging(struct memcg_stats *stats, const char *memcg); 47 + int lru_gen_find_generation(const struct memcg_stats *stats, 48 + unsigned long total_pages); 49 + bool lru_gen_usable(void); 50 + 51 + #endif /* SELFTEST_KVM_LRU_GEN_UTIL_H */
+1
tools/testing/selftests/kvm/include/test_util.h
··· 153 153 void backing_src_help(const char *flag); 154 154 enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name); 155 155 long get_run_delay(void); 156 + bool is_numa_balancing_enabled(void); 156 157 157 158 /* 158 159 * Whether or not the given source type is shared memory (as opposed to
+1
tools/testing/selftests/kvm/include/x86/processor.h
··· 203 203 #define X86_FEATURE_IDLE_HLT KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 30) 204 204 #define X86_FEATURE_SEV KVM_X86_CPU_FEATURE(0x8000001F, 0, EAX, 1) 205 205 #define X86_FEATURE_SEV_ES KVM_X86_CPU_FEATURE(0x8000001F, 0, EAX, 3) 206 + #define X86_FEATURE_SEV_SNP KVM_X86_CPU_FEATURE(0x8000001F, 0, EAX, 4) 206 207 #define X86_FEATURE_PERFMON_V2 KVM_X86_CPU_FEATURE(0x80000022, 0, EAX, 0) 207 208 #define X86_FEATURE_LBR_PMC_FREEZE KVM_X86_CPU_FEATURE(0x80000022, 0, EAX, 2) 208 209
+52 -1
tools/testing/selftests/kvm/include/x86/sev.h
··· 25 25 #define SEV_POLICY_NO_DBG (1UL << 0) 26 26 #define SEV_POLICY_ES (1UL << 2) 27 27 28 + #define SNP_POLICY_SMT (1ULL << 16) 29 + #define SNP_POLICY_RSVD_MBO (1ULL << 17) 30 + #define SNP_POLICY_DBG (1ULL << 19) 31 + 28 32 #define GHCB_MSR_TERM_REQ 0x100 33 + 34 + static inline bool is_sev_snp_vm(struct kvm_vm *vm) 35 + { 36 + return vm->type == KVM_X86_SNP_VM; 37 + } 38 + 39 + static inline bool is_sev_es_vm(struct kvm_vm *vm) 40 + { 41 + return is_sev_snp_vm(vm) || vm->type == KVM_X86_SEV_ES_VM; 42 + } 43 + 44 + static inline bool is_sev_vm(struct kvm_vm *vm) 45 + { 46 + return is_sev_es_vm(vm) || vm->type == KVM_X86_SEV_VM; 47 + } 29 48 30 49 void sev_vm_launch(struct kvm_vm *vm, uint32_t policy); 31 50 void sev_vm_launch_measure(struct kvm_vm *vm, uint8_t *measurement); 32 51 void sev_vm_launch_finish(struct kvm_vm *vm); 52 + void snp_vm_launch_start(struct kvm_vm *vm, uint64_t policy); 53 + void snp_vm_launch_update(struct kvm_vm *vm); 54 + void snp_vm_launch_finish(struct kvm_vm *vm); 33 55 34 56 struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code, 35 57 struct kvm_vcpu **cpu); 36 - void vm_sev_launch(struct kvm_vm *vm, uint32_t policy, uint8_t *measurement); 58 + void vm_sev_launch(struct kvm_vm *vm, uint64_t policy, uint8_t *measurement); 37 59 38 60 kvm_static_assert(SEV_RET_SUCCESS == 0); 61 + 62 + /* 63 + * A SEV-SNP VM requires the policy reserved bit to always be set. 64 + * The SMT policy bit is also required to be set based on SMT being 65 + * available and active on the system. 66 + */ 67 + static inline u64 snp_default_policy(void) 68 + { 69 + return SNP_POLICY_RSVD_MBO | (is_smt_on() ? SNP_POLICY_SMT : 0); 70 + } 39 71 40 72 /* 41 73 * The KVM_MEMORY_ENCRYPT_OP uAPI is utter garbage and takes an "unsigned long" ··· 102 70 103 71 void sev_vm_init(struct kvm_vm *vm); 104 72 void sev_es_vm_init(struct kvm_vm *vm); 73 + void snp_vm_init(struct kvm_vm *vm); 74 + 75 + static inline void vmgexit(void) 76 + { 77 + __asm__ __volatile__("rep; vmmcall"); 78 + } 105 79 106 80 static inline void sev_register_encrypted_memory(struct kvm_vm *vm, 107 81 struct userspace_mem_region *region) ··· 129 91 }; 130 92 131 93 vm_sev_ioctl(vm, KVM_SEV_LAUNCH_UPDATE_DATA, &update_data); 94 + } 95 + 96 + static inline void snp_launch_update_data(struct kvm_vm *vm, vm_paddr_t gpa, 97 + uint64_t hva, uint64_t size, uint8_t type) 98 + { 99 + struct kvm_sev_snp_launch_update update_data = { 100 + .uaddr = hva, 101 + .gfn_start = gpa >> PAGE_SHIFT, 102 + .len = size, 103 + .type = type, 104 + }; 105 + 106 + vm_sev_ioctl(vm, KVM_SEV_SNP_LAUNCH_UPDATE, &update_data); 132 107 } 133 108 134 109 #endif /* SELFTEST_KVM_SEV_H */
+19 -2
tools/testing/selftests/kvm/lib/kvm_util.c
··· 447 447 448 448 } 449 449 450 + static bool is_guest_memfd_required(struct vm_shape shape) 451 + { 452 + #ifdef __x86_64__ 453 + return shape.type == KVM_X86_SNP_VM; 454 + #else 455 + return false; 456 + #endif 457 + } 458 + 450 459 struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus, 451 460 uint64_t nr_extra_pages) 452 461 { ··· 463 454 nr_extra_pages); 464 455 struct userspace_mem_region *slot0; 465 456 struct kvm_vm *vm; 466 - int i; 457 + int i, flags; 467 458 468 459 kvm_set_files_rlimit(nr_runnable_vcpus); 469 460 ··· 472 463 473 464 vm = ____vm_create(shape); 474 465 475 - vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 0, 0, nr_pages, 0); 466 + /* 467 + * Force GUEST_MEMFD for the primary memory region if necessary, e.g. 468 + * for CoCo VMs that require GUEST_MEMFD backed private memory. 469 + */ 470 + flags = 0; 471 + if (is_guest_memfd_required(shape)) 472 + flags |= KVM_MEM_GUEST_MEMFD; 473 + 474 + vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 0, 0, nr_pages, flags); 476 475 for (i = 0; i < NR_MEM_REGIONS; i++) 477 476 vm->memslots[i] = 0; 478 477
+387
tools/testing/selftests/kvm/lib/lru_gen_util.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright (C) 2025, Google LLC. 4 + */ 5 + 6 + #include <time.h> 7 + 8 + #include "lru_gen_util.h" 9 + 10 + /* 11 + * Tracks state while we parse memcg lru_gen stats. The file we're parsing is 12 + * structured like this (some extra whitespace elided): 13 + * 14 + * memcg (id) (path) 15 + * node (id) 16 + * (gen_nr) (age_in_ms) (nr_anon_pages) (nr_file_pages) 17 + */ 18 + struct memcg_stats_parse_context { 19 + bool consumed; /* Whether or not this line was consumed */ 20 + /* Next parse handler to invoke */ 21 + void (*next_handler)(struct memcg_stats *stats, 22 + struct memcg_stats_parse_context *ctx, 23 + char *line); 24 + int current_node_idx; /* Current index in nodes array */ 25 + const char *name; /* The name of the memcg we're looking for */ 26 + }; 27 + 28 + static void memcg_stats_handle_searching(struct memcg_stats *stats, 29 + struct memcg_stats_parse_context *ctx, 30 + char *line); 31 + static void memcg_stats_handle_in_memcg(struct memcg_stats *stats, 32 + struct memcg_stats_parse_context *ctx, 33 + char *line); 34 + static void memcg_stats_handle_in_node(struct memcg_stats *stats, 35 + struct memcg_stats_parse_context *ctx, 36 + char *line); 37 + 38 + struct split_iterator { 39 + char *str; 40 + char *save; 41 + }; 42 + 43 + static char *split_next(struct split_iterator *it) 44 + { 45 + char *ret = strtok_r(it->str, " \t\n\r", &it->save); 46 + 47 + it->str = NULL; 48 + return ret; 49 + } 50 + 51 + static void memcg_stats_handle_searching(struct memcg_stats *stats, 52 + struct memcg_stats_parse_context *ctx, 53 + char *line) 54 + { 55 + struct split_iterator it = { .str = line }; 56 + char *prefix = split_next(&it); 57 + char *memcg_id = split_next(&it); 58 + char *memcg_name = split_next(&it); 59 + char *end; 60 + 61 + ctx->consumed = true; 62 + 63 + if (!prefix || strcmp("memcg", prefix)) 64 + return; /* Not a memcg line (maybe empty), skip */ 65 + 66 + TEST_ASSERT(memcg_id && memcg_name, 67 + "malformed memcg line; no memcg id or memcg_name"); 68 + 69 + if (strcmp(memcg_name + 1, ctx->name)) 70 + return; /* Wrong memcg, skip */ 71 + 72 + /* Found it! */ 73 + 74 + stats->memcg_id = strtoul(memcg_id, &end, 10); 75 + TEST_ASSERT(*end == '\0', "malformed memcg id '%s'", memcg_id); 76 + if (!stats->memcg_id) 77 + return; /* Removed memcg? */ 78 + 79 + ctx->next_handler = memcg_stats_handle_in_memcg; 80 + } 81 + 82 + static void memcg_stats_handle_in_memcg(struct memcg_stats *stats, 83 + struct memcg_stats_parse_context *ctx, 84 + char *line) 85 + { 86 + struct split_iterator it = { .str = line }; 87 + char *prefix = split_next(&it); 88 + char *id = split_next(&it); 89 + long found_node_id; 90 + char *end; 91 + 92 + ctx->consumed = true; 93 + ctx->current_node_idx = -1; 94 + 95 + if (!prefix) 96 + return; /* Skip empty lines */ 97 + 98 + if (!strcmp("memcg", prefix)) { 99 + /* Memcg done, found next one; stop. */ 100 + ctx->next_handler = NULL; 101 + return; 102 + } else if (strcmp("node", prefix)) 103 + TEST_ASSERT(false, "found malformed line after 'memcg ...'," 104 + "token: '%s'", prefix); 105 + 106 + /* At this point we know we have a node line. Parse the ID. */ 107 + 108 + TEST_ASSERT(id, "malformed node line; no node id"); 109 + 110 + found_node_id = strtol(id, &end, 10); 111 + TEST_ASSERT(*end == '\0', "malformed node id '%s'", id); 112 + 113 + ctx->current_node_idx = stats->nr_nodes++; 114 + TEST_ASSERT(ctx->current_node_idx < MAX_NR_NODES, 115 + "memcg has stats for too many nodes, max is %d", 116 + MAX_NR_NODES); 117 + stats->nodes[ctx->current_node_idx].node = found_node_id; 118 + 119 + ctx->next_handler = memcg_stats_handle_in_node; 120 + } 121 + 122 + static void memcg_stats_handle_in_node(struct memcg_stats *stats, 123 + struct memcg_stats_parse_context *ctx, 124 + char *line) 125 + { 126 + char *my_line = strdup(line); 127 + struct split_iterator it = { .str = my_line }; 128 + char *gen, *age, *nr_anon, *nr_file; 129 + struct node_stats *node_stats; 130 + struct generation_stats *gen_stats; 131 + char *end; 132 + 133 + TEST_ASSERT(it.str, "failed to copy input line"); 134 + 135 + gen = split_next(&it); 136 + 137 + if (!gen) 138 + goto out_consume; /* Skip empty lines */ 139 + 140 + if (!strcmp("memcg", gen) || !strcmp("node", gen)) { 141 + /* 142 + * Reached next memcg or node section. Don't consume, let the 143 + * other handler deal with this. 144 + */ 145 + ctx->next_handler = memcg_stats_handle_in_memcg; 146 + goto out; 147 + } 148 + 149 + node_stats = &stats->nodes[ctx->current_node_idx]; 150 + TEST_ASSERT(node_stats->nr_gens < MAX_NR_GENS, 151 + "found too many generation lines; max is %d", 152 + MAX_NR_GENS); 153 + gen_stats = &node_stats->gens[node_stats->nr_gens++]; 154 + 155 + age = split_next(&it); 156 + nr_anon = split_next(&it); 157 + nr_file = split_next(&it); 158 + 159 + TEST_ASSERT(age && nr_anon && nr_file, 160 + "malformed generation line; not enough tokens"); 161 + 162 + gen_stats->gen = (int)strtol(gen, &end, 10); 163 + TEST_ASSERT(*end == '\0', "malformed generation number '%s'", gen); 164 + 165 + gen_stats->age_ms = strtol(age, &end, 10); 166 + TEST_ASSERT(*end == '\0', "malformed generation age '%s'", age); 167 + 168 + gen_stats->nr_anon = strtol(nr_anon, &end, 10); 169 + TEST_ASSERT(*end == '\0', "malformed anonymous page count '%s'", 170 + nr_anon); 171 + 172 + gen_stats->nr_file = strtol(nr_file, &end, 10); 173 + TEST_ASSERT(*end == '\0', "malformed file page count '%s'", nr_file); 174 + 175 + out_consume: 176 + ctx->consumed = true; 177 + out: 178 + free(my_line); 179 + } 180 + 181 + static void print_memcg_stats(const struct memcg_stats *stats, const char *name) 182 + { 183 + int node, gen; 184 + 185 + pr_debug("stats for memcg %s (id %lu):\n", name, stats->memcg_id); 186 + for (node = 0; node < stats->nr_nodes; ++node) { 187 + pr_debug("\tnode %d\n", stats->nodes[node].node); 188 + for (gen = 0; gen < stats->nodes[node].nr_gens; ++gen) { 189 + const struct generation_stats *gstats = 190 + &stats->nodes[node].gens[gen]; 191 + 192 + pr_debug("\t\tgen %d\tage_ms %ld" 193 + "\tnr_anon %ld\tnr_file %ld\n", 194 + gstats->gen, gstats->age_ms, gstats->nr_anon, 195 + gstats->nr_file); 196 + } 197 + } 198 + } 199 + 200 + /* Re-read lru_gen debugfs information for @memcg into @stats. */ 201 + void lru_gen_read_memcg_stats(struct memcg_stats *stats, const char *memcg) 202 + { 203 + FILE *f; 204 + ssize_t read = 0; 205 + char *line = NULL; 206 + size_t bufsz; 207 + struct memcg_stats_parse_context ctx = { 208 + .next_handler = memcg_stats_handle_searching, 209 + .name = memcg, 210 + }; 211 + 212 + memset(stats, 0, sizeof(struct memcg_stats)); 213 + 214 + f = fopen(LRU_GEN_DEBUGFS, "r"); 215 + TEST_ASSERT(f, "fopen(%s) failed", LRU_GEN_DEBUGFS); 216 + 217 + while (ctx.next_handler && (read = getline(&line, &bufsz, f)) > 0) { 218 + ctx.consumed = false; 219 + 220 + do { 221 + ctx.next_handler(stats, &ctx, line); 222 + if (!ctx.next_handler) 223 + break; 224 + } while (!ctx.consumed); 225 + } 226 + 227 + if (read < 0 && !feof(f)) 228 + TEST_ASSERT(false, "getline(%s) failed", LRU_GEN_DEBUGFS); 229 + 230 + TEST_ASSERT(stats->memcg_id > 0, "Couldn't find memcg: %s\n" 231 + "Did the memcg get created in the proper mount?", 232 + memcg); 233 + if (line) 234 + free(line); 235 + TEST_ASSERT(!fclose(f), "fclose(%s) failed", LRU_GEN_DEBUGFS); 236 + 237 + print_memcg_stats(stats, memcg); 238 + } 239 + 240 + /* 241 + * Find all pages tracked by lru_gen for this memcg in generation @target_gen. 242 + * 243 + * If @target_gen is negative, look for all generations. 244 + */ 245 + long lru_gen_sum_memcg_stats_for_gen(int target_gen, 246 + const struct memcg_stats *stats) 247 + { 248 + int node, gen; 249 + long total_nr = 0; 250 + 251 + for (node = 0; node < stats->nr_nodes; ++node) { 252 + const struct node_stats *node_stats = &stats->nodes[node]; 253 + 254 + for (gen = 0; gen < node_stats->nr_gens; ++gen) { 255 + const struct generation_stats *gen_stats = 256 + &node_stats->gens[gen]; 257 + 258 + if (target_gen >= 0 && gen_stats->gen != target_gen) 259 + continue; 260 + 261 + total_nr += gen_stats->nr_anon + gen_stats->nr_file; 262 + } 263 + } 264 + 265 + return total_nr; 266 + } 267 + 268 + /* Find all pages tracked by lru_gen for this memcg. */ 269 + long lru_gen_sum_memcg_stats(const struct memcg_stats *stats) 270 + { 271 + return lru_gen_sum_memcg_stats_for_gen(-1, stats); 272 + } 273 + 274 + /* 275 + * If lru_gen aging should force page table scanning. 276 + * 277 + * If you want to set this to false, you will need to do eviction 278 + * before doing extra aging passes. 279 + */ 280 + static const bool force_scan = true; 281 + 282 + static void run_aging_impl(unsigned long memcg_id, int node_id, int max_gen) 283 + { 284 + FILE *f = fopen(LRU_GEN_DEBUGFS, "w"); 285 + char *command; 286 + size_t sz; 287 + 288 + TEST_ASSERT(f, "fopen(%s) failed", LRU_GEN_DEBUGFS); 289 + sz = asprintf(&command, "+ %lu %d %d 1 %d\n", 290 + memcg_id, node_id, max_gen, force_scan); 291 + TEST_ASSERT(sz > 0, "creating aging command failed"); 292 + 293 + pr_debug("Running aging command: %s", command); 294 + if (fwrite(command, sizeof(char), sz, f) < sz) { 295 + TEST_ASSERT(false, "writing aging command %s to %s failed", 296 + command, LRU_GEN_DEBUGFS); 297 + } 298 + 299 + TEST_ASSERT(!fclose(f), "fclose(%s) failed", LRU_GEN_DEBUGFS); 300 + } 301 + 302 + void lru_gen_do_aging(struct memcg_stats *stats, const char *memcg) 303 + { 304 + int node, gen; 305 + 306 + pr_debug("lru_gen: invoking aging...\n"); 307 + 308 + /* Must read memcg stats to construct the proper aging command. */ 309 + lru_gen_read_memcg_stats(stats, memcg); 310 + 311 + for (node = 0; node < stats->nr_nodes; ++node) { 312 + int max_gen = 0; 313 + 314 + for (gen = 0; gen < stats->nodes[node].nr_gens; ++gen) { 315 + int this_gen = stats->nodes[node].gens[gen].gen; 316 + 317 + max_gen = max_gen > this_gen ? max_gen : this_gen; 318 + } 319 + 320 + run_aging_impl(stats->memcg_id, stats->nodes[node].node, 321 + max_gen); 322 + } 323 + 324 + /* Re-read so callers get updated information */ 325 + lru_gen_read_memcg_stats(stats, memcg); 326 + } 327 + 328 + /* 329 + * Find which generation contains at least @pages pages, assuming that 330 + * such a generation exists. 331 + */ 332 + int lru_gen_find_generation(const struct memcg_stats *stats, 333 + unsigned long pages) 334 + { 335 + int node, gen, gen_idx, min_gen = INT_MAX, max_gen = -1; 336 + 337 + for (node = 0; node < stats->nr_nodes; ++node) 338 + for (gen_idx = 0; gen_idx < stats->nodes[node].nr_gens; 339 + ++gen_idx) { 340 + gen = stats->nodes[node].gens[gen_idx].gen; 341 + max_gen = gen > max_gen ? gen : max_gen; 342 + min_gen = gen < min_gen ? gen : min_gen; 343 + } 344 + 345 + for (gen = min_gen; gen <= max_gen; ++gen) 346 + /* See if this generation has enough pages. */ 347 + if (lru_gen_sum_memcg_stats_for_gen(gen, stats) > pages) 348 + return gen; 349 + 350 + return -1; 351 + } 352 + 353 + bool lru_gen_usable(void) 354 + { 355 + long required_features = LRU_GEN_ENABLED | LRU_GEN_MM_WALK; 356 + int lru_gen_fd, lru_gen_debug_fd; 357 + char mglru_feature_str[8] = {}; 358 + long mglru_features; 359 + 360 + lru_gen_fd = open(LRU_GEN_ENABLED_PATH, O_RDONLY); 361 + if (lru_gen_fd < 0) { 362 + puts("lru_gen: Could not open " LRU_GEN_ENABLED_PATH); 363 + return false; 364 + } 365 + if (read(lru_gen_fd, &mglru_feature_str, 7) < 7) { 366 + puts("lru_gen: Could not read from " LRU_GEN_ENABLED_PATH); 367 + close(lru_gen_fd); 368 + return false; 369 + } 370 + close(lru_gen_fd); 371 + 372 + mglru_features = strtol(mglru_feature_str, NULL, 16); 373 + if ((mglru_features & required_features) != required_features) { 374 + printf("lru_gen: missing features, got: 0x%lx, expected: 0x%lx\n", 375 + mglru_features, required_features); 376 + printf("lru_gen: Try 'echo 0x%lx > /sys/kernel/mm/lru_gen/enabled'\n", 377 + required_features); 378 + return false; 379 + } 380 + 381 + lru_gen_debug_fd = open(LRU_GEN_DEBUGFS, O_RDWR); 382 + __TEST_REQUIRE(lru_gen_debug_fd >= 0, 383 + "lru_gen: Could not open " LRU_GEN_DEBUGFS ", " 384 + "but lru_gen is enabled, so cannot use page_idle."); 385 + close(lru_gen_debug_fd); 386 + return true; 387 + }
+33 -13
tools/testing/selftests/kvm/lib/test_util.c
··· 132 132 puts(", skipping test"); 133 133 } 134 134 135 - bool thp_configured(void) 135 + static bool test_sysfs_path(const char *path) 136 136 { 137 - int ret; 138 137 struct stat statbuf; 138 + int ret; 139 139 140 - ret = stat("/sys/kernel/mm/transparent_hugepage", &statbuf); 140 + ret = stat(path, &statbuf); 141 141 TEST_ASSERT(ret == 0 || (ret == -1 && errno == ENOENT), 142 - "Error in stating /sys/kernel/mm/transparent_hugepage"); 142 + "Error in stat()ing '%s'", path); 143 143 144 144 return ret == 0; 145 145 } 146 146 147 - size_t get_trans_hugepagesz(void) 147 + bool thp_configured(void) 148 + { 149 + return test_sysfs_path("/sys/kernel/mm/transparent_hugepage"); 150 + } 151 + 152 + static size_t get_sysfs_val(const char *path) 148 153 { 149 154 size_t size; 150 155 FILE *f; 151 156 int ret; 152 157 158 + f = fopen(path, "r"); 159 + TEST_ASSERT(f, "Error opening '%s'", path); 160 + 161 + ret = fscanf(f, "%ld", &size); 162 + TEST_ASSERT(ret > 0, "Error reading '%s'", path); 163 + 164 + /* Re-scan the input stream to verify the entire file was read. */ 165 + ret = fscanf(f, "%ld", &size); 166 + TEST_ASSERT(ret < 1, "Error reading '%s'", path); 167 + 168 + fclose(f); 169 + return size; 170 + } 171 + 172 + size_t get_trans_hugepagesz(void) 173 + { 153 174 TEST_ASSERT(thp_configured(), "THP is not configured in host kernel"); 154 175 155 - f = fopen("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size", "r"); 156 - TEST_ASSERT(f != NULL, "Error in opening transparent_hugepage/hpage_pmd_size"); 176 + return get_sysfs_val("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size"); 177 + } 157 178 158 - ret = fscanf(f, "%ld", &size); 159 - ret = fscanf(f, "%ld", &size); 160 - TEST_ASSERT(ret < 1, "Error reading transparent_hugepage/hpage_pmd_size"); 161 - fclose(f); 162 - 163 - return size; 179 + bool is_numa_balancing_enabled(void) 180 + { 181 + if (!test_sysfs_path("/proc/sys/kernel/numa_balancing")) 182 + return false; 183 + return get_sysfs_val("/proc/sys/kernel/numa_balancing") == 1; 164 184 } 165 185 166 186 size_t get_def_hugetlb_pagesz(void)
+2 -2
tools/testing/selftests/kvm/lib/x86/processor.c
··· 639 639 sync_global_to_guest(vm, host_cpu_is_amd); 640 640 sync_global_to_guest(vm, is_forced_emulation_enabled); 641 641 642 - if (vm->type == KVM_X86_SEV_VM || vm->type == KVM_X86_SEV_ES_VM) { 642 + if (is_sev_vm(vm)) { 643 643 struct kvm_sev_init init = { 0 }; 644 644 645 645 vm_sev_ioctl(vm, KVM_SEV_INIT2, &init); ··· 1156 1156 1157 1157 void kvm_init_vm_address_properties(struct kvm_vm *vm) 1158 1158 { 1159 - if (vm->type == KVM_X86_SEV_VM || vm->type == KVM_X86_SEV_ES_VM) { 1159 + if (is_sev_vm(vm)) { 1160 1160 vm->arch.sev_fd = open_sev_dev_path_or_exit(); 1161 1161 vm->arch.c_bit = BIT_ULL(this_cpu_property(X86_PROPERTY_SEV_C_BIT)); 1162 1162 vm->gpa_tag_mask = vm->arch.c_bit;
+67 -9
tools/testing/selftests/kvm/lib/x86/sev.c
··· 14 14 * and find the first range, but that's correct because the condition 15 15 * expression would cause us to quit the loop. 16 16 */ 17 - static void encrypt_region(struct kvm_vm *vm, struct userspace_mem_region *region) 17 + static void encrypt_region(struct kvm_vm *vm, struct userspace_mem_region *region, 18 + uint8_t page_type, bool private) 18 19 { 19 20 const struct sparsebit *protected_phy_pages = region->protected_phy_pages; 20 21 const vm_paddr_t gpa_base = region->region.guest_phys_addr; ··· 25 24 if (!sparsebit_any_set(protected_phy_pages)) 26 25 return; 27 26 28 - sev_register_encrypted_memory(vm, region); 27 + if (!is_sev_snp_vm(vm)) 28 + sev_register_encrypted_memory(vm, region); 29 29 30 30 sparsebit_for_each_set_range(protected_phy_pages, i, j) { 31 31 const uint64_t size = (j - i + 1) * vm->page_size; 32 32 const uint64_t offset = (i - lowest_page_in_region) * vm->page_size; 33 33 34 - sev_launch_update_data(vm, gpa_base + offset, size); 34 + if (private) 35 + vm_mem_set_private(vm, gpa_base + offset, size); 36 + 37 + if (is_sev_snp_vm(vm)) 38 + snp_launch_update_data(vm, gpa_base + offset, 39 + (uint64_t)addr_gpa2hva(vm, gpa_base + offset), 40 + size, page_type); 41 + else 42 + sev_launch_update_data(vm, gpa_base + offset, size); 43 + 35 44 } 36 45 } 37 46 38 47 void sev_vm_init(struct kvm_vm *vm) 39 48 { 40 49 if (vm->type == KVM_X86_DEFAULT_VM) { 41 - assert(vm->arch.sev_fd == -1); 50 + TEST_ASSERT_EQ(vm->arch.sev_fd, -1); 42 51 vm->arch.sev_fd = open_sev_dev_path_or_exit(); 43 52 vm_sev_ioctl(vm, KVM_SEV_INIT, NULL); 44 53 } else { 45 54 struct kvm_sev_init init = { 0 }; 46 - assert(vm->type == KVM_X86_SEV_VM); 55 + TEST_ASSERT_EQ(vm->type, KVM_X86_SEV_VM); 47 56 vm_sev_ioctl(vm, KVM_SEV_INIT2, &init); 48 57 } 49 58 } ··· 61 50 void sev_es_vm_init(struct kvm_vm *vm) 62 51 { 63 52 if (vm->type == KVM_X86_DEFAULT_VM) { 64 - assert(vm->arch.sev_fd == -1); 53 + TEST_ASSERT_EQ(vm->arch.sev_fd, -1); 65 54 vm->arch.sev_fd = open_sev_dev_path_or_exit(); 66 55 vm_sev_ioctl(vm, KVM_SEV_ES_INIT, NULL); 67 56 } else { 68 57 struct kvm_sev_init init = { 0 }; 69 - assert(vm->type == KVM_X86_SEV_ES_VM); 58 + TEST_ASSERT_EQ(vm->type, KVM_X86_SEV_ES_VM); 70 59 vm_sev_ioctl(vm, KVM_SEV_INIT2, &init); 71 60 } 61 + } 62 + 63 + void snp_vm_init(struct kvm_vm *vm) 64 + { 65 + struct kvm_sev_init init = { 0 }; 66 + 67 + TEST_ASSERT_EQ(vm->type, KVM_X86_SNP_VM); 68 + vm_sev_ioctl(vm, KVM_SEV_INIT2, &init); 72 69 } 73 70 74 71 void sev_vm_launch(struct kvm_vm *vm, uint32_t policy) ··· 95 76 TEST_ASSERT_EQ(status.state, SEV_GUEST_STATE_LAUNCH_UPDATE); 96 77 97 78 hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) 98 - encrypt_region(vm, region); 79 + encrypt_region(vm, region, KVM_SEV_PAGE_TYPE_INVALID, false); 99 80 100 81 if (policy & SEV_POLICY_ES) 101 82 vm_sev_ioctl(vm, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL); ··· 131 112 TEST_ASSERT_EQ(status.state, SEV_GUEST_STATE_RUNNING); 132 113 } 133 114 115 + void snp_vm_launch_start(struct kvm_vm *vm, uint64_t policy) 116 + { 117 + struct kvm_sev_snp_launch_start launch_start = { 118 + .policy = policy, 119 + }; 120 + 121 + vm_sev_ioctl(vm, KVM_SEV_SNP_LAUNCH_START, &launch_start); 122 + } 123 + 124 + void snp_vm_launch_update(struct kvm_vm *vm) 125 + { 126 + struct userspace_mem_region *region; 127 + int ctr; 128 + 129 + hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) 130 + encrypt_region(vm, region, KVM_SEV_SNP_PAGE_TYPE_NORMAL, true); 131 + 132 + vm->arch.is_pt_protected = true; 133 + } 134 + 135 + void snp_vm_launch_finish(struct kvm_vm *vm) 136 + { 137 + struct kvm_sev_snp_launch_finish launch_finish = { 0 }; 138 + 139 + vm_sev_ioctl(vm, KVM_SEV_SNP_LAUNCH_FINISH, &launch_finish); 140 + } 141 + 134 142 struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code, 135 143 struct kvm_vcpu **cpu) 136 144 { ··· 174 128 return vm; 175 129 } 176 130 177 - void vm_sev_launch(struct kvm_vm *vm, uint32_t policy, uint8_t *measurement) 131 + void vm_sev_launch(struct kvm_vm *vm, uint64_t policy, uint8_t *measurement) 178 132 { 133 + if (is_sev_snp_vm(vm)) { 134 + vm_enable_cap(vm, KVM_CAP_EXIT_HYPERCALL, BIT(KVM_HC_MAP_GPA_RANGE)); 135 + 136 + snp_vm_launch_start(vm, policy); 137 + 138 + snp_vm_launch_update(vm); 139 + 140 + snp_vm_launch_finish(vm); 141 + 142 + return; 143 + } 144 + 179 145 sev_vm_launch(vm, policy); 180 146 181 147 if (!measurement)
+165
tools/testing/selftests/kvm/x86/fastops_test.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + #include "test_util.h" 3 + #include "kvm_util.h" 4 + #include "processor.h" 5 + 6 + /* 7 + * Execute a fastop() instruction, with or without forced emulation. BT bit 0 8 + * to set RFLAGS.CF based on whether or not the input is even or odd, so that 9 + * instructions like ADC and SBB are deterministic. 10 + */ 11 + #define guest_execute_fastop_1(FEP, insn, __val, __flags) \ 12 + ({ \ 13 + __asm__ __volatile__("bt $0, %[val]\n\t" \ 14 + FEP insn " %[val]\n\t" \ 15 + "pushfq\n\t" \ 16 + "pop %[flags]\n\t" \ 17 + : [val]"+r"(__val), [flags]"=r"(__flags) \ 18 + : : "cc", "memory"); \ 19 + }) 20 + 21 + #define guest_test_fastop_1(insn, type_t, __val) \ 22 + ({ \ 23 + type_t val = __val, ex_val = __val, input = __val; \ 24 + uint64_t flags, ex_flags; \ 25 + \ 26 + guest_execute_fastop_1("", insn, ex_val, ex_flags); \ 27 + guest_execute_fastop_1(KVM_FEP, insn, val, flags); \ 28 + \ 29 + __GUEST_ASSERT(val == ex_val, \ 30 + "Wanted 0x%lx for '%s 0x%lx', got 0x%lx", \ 31 + (uint64_t)ex_val, insn, (uint64_t)input, (uint64_t)val); \ 32 + __GUEST_ASSERT(flags == ex_flags, \ 33 + "Wanted flags 0x%lx for '%s 0x%lx', got 0x%lx", \ 34 + ex_flags, insn, (uint64_t)input, flags); \ 35 + }) 36 + 37 + #define guest_execute_fastop_2(FEP, insn, __input, __output, __flags) \ 38 + ({ \ 39 + __asm__ __volatile__("bt $0, %[output]\n\t" \ 40 + FEP insn " %[input], %[output]\n\t" \ 41 + "pushfq\n\t" \ 42 + "pop %[flags]\n\t" \ 43 + : [output]"+r"(__output), [flags]"=r"(__flags) \ 44 + : [input]"r"(__input) : "cc", "memory"); \ 45 + }) 46 + 47 + #define guest_test_fastop_2(insn, type_t, __val1, __val2) \ 48 + ({ \ 49 + type_t input = __val1, input2 = __val2, output = __val2, ex_output = __val2; \ 50 + uint64_t flags, ex_flags; \ 51 + \ 52 + guest_execute_fastop_2("", insn, input, ex_output, ex_flags); \ 53 + guest_execute_fastop_2(KVM_FEP, insn, input, output, flags); \ 54 + \ 55 + __GUEST_ASSERT(output == ex_output, \ 56 + "Wanted 0x%lx for '%s 0x%lx 0x%lx', got 0x%lx", \ 57 + (uint64_t)ex_output, insn, (uint64_t)input, \ 58 + (uint64_t)input2, (uint64_t)output); \ 59 + __GUEST_ASSERT(flags == ex_flags, \ 60 + "Wanted flags 0x%lx for '%s 0x%lx, 0x%lx', got 0x%lx", \ 61 + ex_flags, insn, (uint64_t)input, (uint64_t)input2, flags); \ 62 + }) 63 + 64 + #define guest_execute_fastop_cl(FEP, insn, __shift, __output, __flags) \ 65 + ({ \ 66 + __asm__ __volatile__("bt $0, %[output]\n\t" \ 67 + FEP insn " %%cl, %[output]\n\t" \ 68 + "pushfq\n\t" \ 69 + "pop %[flags]\n\t" \ 70 + : [output]"+r"(__output), [flags]"=r"(__flags) \ 71 + : "c"(__shift) : "cc", "memory"); \ 72 + }) 73 + 74 + #define guest_test_fastop_cl(insn, type_t, __val1, __val2) \ 75 + ({ \ 76 + type_t output = __val2, ex_output = __val2, input = __val2; \ 77 + uint8_t shift = __val1; \ 78 + uint64_t flags, ex_flags; \ 79 + \ 80 + guest_execute_fastop_cl("", insn, shift, ex_output, ex_flags); \ 81 + guest_execute_fastop_cl(KVM_FEP, insn, shift, output, flags); \ 82 + \ 83 + __GUEST_ASSERT(output == ex_output, \ 84 + "Wanted 0x%lx for '%s 0x%x, 0x%lx', got 0x%lx", \ 85 + (uint64_t)ex_output, insn, shift, (uint64_t)input, \ 86 + (uint64_t)output); \ 87 + __GUEST_ASSERT(flags == ex_flags, \ 88 + "Wanted flags 0x%lx for '%s 0x%x, 0x%lx', got 0x%lx", \ 89 + ex_flags, insn, shift, (uint64_t)input, flags); \ 90 + }) 91 + 92 + static const uint64_t vals[] = { 93 + 0, 94 + 1, 95 + 2, 96 + 4, 97 + 7, 98 + 0x5555555555555555, 99 + 0xaaaaaaaaaaaaaaaa, 100 + 0xfefefefefefefefe, 101 + 0xffffffffffffffff, 102 + }; 103 + 104 + #define guest_test_fastops(type_t, suffix) \ 105 + do { \ 106 + int i, j; \ 107 + \ 108 + for (i = 0; i < ARRAY_SIZE(vals); i++) { \ 109 + guest_test_fastop_1("dec" suffix, type_t, vals[i]); \ 110 + guest_test_fastop_1("inc" suffix, type_t, vals[i]); \ 111 + guest_test_fastop_1("neg" suffix, type_t, vals[i]); \ 112 + guest_test_fastop_1("not" suffix, type_t, vals[i]); \ 113 + \ 114 + for (j = 0; j < ARRAY_SIZE(vals); j++) { \ 115 + guest_test_fastop_2("add" suffix, type_t, vals[i], vals[j]); \ 116 + guest_test_fastop_2("adc" suffix, type_t, vals[i], vals[j]); \ 117 + guest_test_fastop_2("and" suffix, type_t, vals[i], vals[j]); \ 118 + guest_test_fastop_2("bsf" suffix, type_t, vals[i], vals[j]); \ 119 + guest_test_fastop_2("bsr" suffix, type_t, vals[i], vals[j]); \ 120 + guest_test_fastop_2("bt" suffix, type_t, vals[i], vals[j]); \ 121 + guest_test_fastop_2("btc" suffix, type_t, vals[i], vals[j]); \ 122 + guest_test_fastop_2("btr" suffix, type_t, vals[i], vals[j]); \ 123 + guest_test_fastop_2("bts" suffix, type_t, vals[i], vals[j]); \ 124 + guest_test_fastop_2("cmp" suffix, type_t, vals[i], vals[j]); \ 125 + guest_test_fastop_2("imul" suffix, type_t, vals[i], vals[j]); \ 126 + guest_test_fastop_2("or" suffix, type_t, vals[i], vals[j]); \ 127 + guest_test_fastop_2("sbb" suffix, type_t, vals[i], vals[j]); \ 128 + guest_test_fastop_2("sub" suffix, type_t, vals[i], vals[j]); \ 129 + guest_test_fastop_2("test" suffix, type_t, vals[i], vals[j]); \ 130 + guest_test_fastop_2("xor" suffix, type_t, vals[i], vals[j]); \ 131 + \ 132 + guest_test_fastop_cl("rol" suffix, type_t, vals[i], vals[j]); \ 133 + guest_test_fastop_cl("ror" suffix, type_t, vals[i], vals[j]); \ 134 + guest_test_fastop_cl("rcl" suffix, type_t, vals[i], vals[j]); \ 135 + guest_test_fastop_cl("rcr" suffix, type_t, vals[i], vals[j]); \ 136 + guest_test_fastop_cl("sar" suffix, type_t, vals[i], vals[j]); \ 137 + guest_test_fastop_cl("shl" suffix, type_t, vals[i], vals[j]); \ 138 + guest_test_fastop_cl("shr" suffix, type_t, vals[i], vals[j]); \ 139 + } \ 140 + } \ 141 + } while (0) 142 + 143 + static void guest_code(void) 144 + { 145 + guest_test_fastops(uint16_t, "w"); 146 + guest_test_fastops(uint32_t, "l"); 147 + guest_test_fastops(uint64_t, "q"); 148 + 149 + GUEST_DONE(); 150 + } 151 + 152 + int main(int argc, char *argv[]) 153 + { 154 + struct kvm_vcpu *vcpu; 155 + struct kvm_vm *vm; 156 + 157 + TEST_REQUIRE(is_forced_emulation_enabled); 158 + 159 + vm = vm_create_with_one_vcpu(&vcpu, guest_code); 160 + 161 + vcpu_run(vcpu); 162 + TEST_ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_DONE); 163 + 164 + kvm_vm_free(vm); 165 + }
+1 -20
tools/testing/selftests/kvm/x86/hyperv_cpuid.c
··· 22 22 { 23 23 } 24 24 25 - static bool smt_possible(void) 26 - { 27 - char buf[16]; 28 - FILE *f; 29 - bool res = true; 30 - 31 - f = fopen("/sys/devices/system/cpu/smt/control", "r"); 32 - if (f) { 33 - if (fread(buf, sizeof(*buf), sizeof(buf), f) > 0) { 34 - if (!strncmp(buf, "forceoff", 8) || 35 - !strncmp(buf, "notsupported", 12)) 36 - res = false; 37 - } 38 - fclose(f); 39 - } 40 - 41 - return res; 42 - } 43 - 44 25 static void test_hv_cpuid(struct kvm_vcpu *vcpu, bool evmcs_expected) 45 26 { 46 27 const bool has_irqchip = !vcpu || vcpu->vm->has_irqchip; ··· 74 93 case 0x40000004: 75 94 test_val = entry->eax & (1UL << 18); 76 95 77 - TEST_ASSERT(!!test_val == !smt_possible(), 96 + TEST_ASSERT(!!test_val == !is_smt_possible(), 78 97 "NoNonArchitecturalCoreSharing bit" 79 98 " doesn't reflect SMT setting"); 80 99
+13
tools/testing/selftests/kvm/x86/sev_init2_tests.c
··· 28 28 int kvm_fd; 29 29 u64 supported_vmsa_features; 30 30 bool have_sev_es; 31 + bool have_snp; 31 32 32 33 static int __sev_ioctl(int vm_fd, int cmd_id, void *data) 33 34 { ··· 83 82 */ 84 83 if (have_sev_es) 85 84 test_init2(KVM_X86_SEV_ES_VM, &(struct kvm_sev_init){}); 85 + 86 + if (have_snp) 87 + test_init2(KVM_X86_SNP_VM, &(struct kvm_sev_init){}); 86 88 87 89 test_init2_invalid(0, &(struct kvm_sev_init){}, 88 90 "VM type is KVM_X86_DEFAULT_VM"); ··· 142 138 "sev-es: KVM_CAP_VM_TYPES (%x) does not match cpuid (checking %x)", 143 139 kvm_check_cap(KVM_CAP_VM_TYPES), 1 << KVM_X86_SEV_ES_VM); 144 140 141 + have_snp = kvm_cpu_has(X86_FEATURE_SEV_SNP); 142 + TEST_ASSERT(have_snp == !!(kvm_check_cap(KVM_CAP_VM_TYPES) & BIT(KVM_X86_SNP_VM)), 143 + "sev-snp: KVM_CAP_VM_TYPES (%x) indicates SNP support (bit %d), but CPUID does not", 144 + kvm_check_cap(KVM_CAP_VM_TYPES), KVM_X86_SNP_VM); 145 + 145 146 test_vm_types(); 146 147 147 148 test_flags(KVM_X86_SEV_VM); 148 149 if (have_sev_es) 149 150 test_flags(KVM_X86_SEV_ES_VM); 151 + if (have_snp) 152 + test_flags(KVM_X86_SNP_VM); 150 153 151 154 test_features(KVM_X86_SEV_VM, 0); 152 155 if (have_sev_es) 153 156 test_features(KVM_X86_SEV_ES_VM, supported_vmsa_features); 157 + if (have_snp) 158 + test_features(KVM_X86_SNP_VM, supported_vmsa_features); 154 159 155 160 return 0; 156 161 }
+49 -26
tools/testing/selftests/kvm/x86/sev_smoke_test.c
··· 16 16 17 17 #define XFEATURE_MASK_X87_AVX (XFEATURE_MASK_FP | XFEATURE_MASK_SSE | XFEATURE_MASK_YMM) 18 18 19 + static void guest_snp_code(void) 20 + { 21 + uint64_t sev_msr = rdmsr(MSR_AMD64_SEV); 22 + 23 + GUEST_ASSERT(sev_msr & MSR_AMD64_SEV_ENABLED); 24 + GUEST_ASSERT(sev_msr & MSR_AMD64_SEV_ES_ENABLED); 25 + GUEST_ASSERT(sev_msr & MSR_AMD64_SEV_SNP_ENABLED); 26 + 27 + wrmsr(MSR_AMD64_SEV_ES_GHCB, GHCB_MSR_TERM_REQ); 28 + vmgexit(); 29 + } 30 + 19 31 static void guest_sev_es_code(void) 20 32 { 21 33 /* TODO: Check CPUID after GHCB-based hypercall support is added. */ ··· 39 27 * force "termination" to signal "done" via the GHCB MSR protocol. 40 28 */ 41 29 wrmsr(MSR_AMD64_SEV_ES_GHCB, GHCB_MSR_TERM_REQ); 42 - __asm__ __volatile__("rep; vmmcall"); 30 + vmgexit(); 43 31 } 44 32 45 33 static void guest_sev_code(void) ··· 74 62 abort(); 75 63 } 76 64 77 - static void test_sync_vmsa(uint32_t policy) 65 + static void test_sync_vmsa(uint32_t type, uint64_t policy) 78 66 { 79 67 struct kvm_vcpu *vcpu; 80 68 struct kvm_vm *vm; ··· 84 72 double x87val = M_PI; 85 73 struct kvm_xsave __attribute__((aligned(64))) xsave = { 0 }; 86 74 87 - vm = vm_sev_create_with_one_vcpu(KVM_X86_SEV_ES_VM, guest_code_xsave, &vcpu); 75 + vm = vm_sev_create_with_one_vcpu(type, guest_code_xsave, &vcpu); 88 76 gva = vm_vaddr_alloc_shared(vm, PAGE_SIZE, KVM_UTIL_MIN_VADDR, 89 77 MEM_REGION_TEST_DATA); 90 78 hva = addr_gva2hva(vm, gva); ··· 101 89 : "ymm4", "st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)"); 102 90 vcpu_xsave_set(vcpu, &xsave); 103 91 104 - vm_sev_launch(vm, SEV_POLICY_ES | policy, NULL); 92 + vm_sev_launch(vm, policy, NULL); 105 93 106 94 /* This page is shared, so make it decrypted. */ 107 95 memset(hva, 0, 4096); ··· 120 108 kvm_vm_free(vm); 121 109 } 122 110 123 - static void test_sev(void *guest_code, uint64_t policy) 111 + static void test_sev(void *guest_code, uint32_t type, uint64_t policy) 124 112 { 125 113 struct kvm_vcpu *vcpu; 126 114 struct kvm_vm *vm; 127 115 struct ucall uc; 128 - 129 - uint32_t type = policy & SEV_POLICY_ES ? KVM_X86_SEV_ES_VM : KVM_X86_SEV_VM; 130 116 131 117 vm = vm_sev_create_with_one_vcpu(type, guest_code, &vcpu); 132 118 ··· 134 124 for (;;) { 135 125 vcpu_run(vcpu); 136 126 137 - if (policy & SEV_POLICY_ES) { 127 + if (is_sev_es_vm(vm)) { 138 128 TEST_ASSERT(vcpu->run->exit_reason == KVM_EXIT_SYSTEM_EVENT, 139 129 "Wanted SYSTEM_EVENT, got %s", 140 130 exit_reason_str(vcpu->run->exit_reason)); ··· 171 161 __asm__ __volatile__("ud2"); 172 162 } 173 163 174 - static void test_sev_es_shutdown(void) 164 + static void test_sev_shutdown(uint32_t type, uint64_t policy) 175 165 { 176 166 struct kvm_vcpu *vcpu; 177 167 struct kvm_vm *vm; 178 168 179 - uint32_t type = KVM_X86_SEV_ES_VM; 180 - 181 169 vm = vm_sev_create_with_one_vcpu(type, guest_shutdown_code, &vcpu); 182 170 183 - vm_sev_launch(vm, SEV_POLICY_ES, NULL); 171 + vm_sev_launch(vm, policy, NULL); 184 172 185 173 vcpu_run(vcpu); 186 174 TEST_ASSERT(vcpu->run->exit_reason == KVM_EXIT_SHUTDOWN, ··· 188 180 kvm_vm_free(vm); 189 181 } 190 182 191 - int main(int argc, char *argv[]) 183 + static void test_sev_smoke(void *guest, uint32_t type, uint64_t policy) 192 184 { 193 185 const u64 xf_mask = XFEATURE_MASK_X87_AVX; 194 186 187 + if (type == KVM_X86_SNP_VM) 188 + test_sev(guest, type, policy | SNP_POLICY_DBG); 189 + else 190 + test_sev(guest, type, policy | SEV_POLICY_NO_DBG); 191 + test_sev(guest, type, policy); 192 + 193 + if (type == KVM_X86_SEV_VM) 194 + return; 195 + 196 + test_sev_shutdown(type, policy); 197 + 198 + if (kvm_has_cap(KVM_CAP_XCRS) && 199 + (xgetbv(0) & kvm_cpu_supported_xcr0() & xf_mask) == xf_mask) { 200 + test_sync_vmsa(type, policy); 201 + if (type == KVM_X86_SNP_VM) 202 + test_sync_vmsa(type, policy | SNP_POLICY_DBG); 203 + else 204 + test_sync_vmsa(type, policy | SEV_POLICY_NO_DBG); 205 + } 206 + } 207 + 208 + int main(int argc, char *argv[]) 209 + { 195 210 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SEV)); 196 211 197 - test_sev(guest_sev_code, SEV_POLICY_NO_DBG); 198 - test_sev(guest_sev_code, 0); 212 + test_sev_smoke(guest_sev_code, KVM_X86_SEV_VM, 0); 199 213 200 - if (kvm_cpu_has(X86_FEATURE_SEV_ES)) { 201 - test_sev(guest_sev_es_code, SEV_POLICY_ES | SEV_POLICY_NO_DBG); 202 - test_sev(guest_sev_es_code, SEV_POLICY_ES); 214 + if (kvm_cpu_has(X86_FEATURE_SEV_ES)) 215 + test_sev_smoke(guest_sev_es_code, KVM_X86_SEV_ES_VM, SEV_POLICY_ES); 203 216 204 - test_sev_es_shutdown(); 205 - 206 - if (kvm_has_cap(KVM_CAP_XCRS) && 207 - (xgetbv(0) & kvm_cpu_supported_xcr0() & xf_mask) == xf_mask) { 208 - test_sync_vmsa(0); 209 - test_sync_vmsa(SEV_POLICY_NO_DBG); 210 - } 211 - } 217 + if (kvm_cpu_has(X86_FEATURE_SEV_SNP)) 218 + test_sev_smoke(guest_snp_code, KVM_X86_SNP_VM, snp_default_policy()); 212 219 213 220 return 0; 214 221 }