Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'kvm-x86-selftests_utils-6.10' of https://github.com/kvm-x86/linux into HEAD

KVM selftests treewide updates for 6.10:

- Define _GNU_SOURCE for all selftests to fix a warning that was introduced by
a change to kselftest_harness.h late in the 6.9 cycle, and because forcing
every test to #define _GNU_SOURCE is painful.

- Provide a global psuedo-RNG instance for all tests, so that library code can
generate random, but determinstic numbers.

- Use the global pRNG to randomly force emulation of select writes from guest
code on x86, e.g. to help validate KVM's emulation of locked accesses.

- Rename kvm_util_base.h back to kvm_util.h, as the weird layer of indirection
was added purely to avoid manually #including ucall_common.h in a handful of
locations.

- Allocate and initialize x86's GDT, IDT, TSS, segments, and default exception
handlers at VM creation, instead of forcing tests to manually trigger the
related setup.

+1421 -448
+2 -2
tools/testing/selftests/kvm/Makefile
··· 230 230 endif 231 231 CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \ 232 232 -Wno-gnu-variable-sized-type-not-at-end -MD -MP -DCONFIG_64BIT \ 233 - -fno-builtin-memcmp -fno-builtin-memcpy -fno-builtin-memset \ 234 - -fno-builtin-strnlen \ 233 + -D_GNU_SOURCE -fno-builtin-memcmp -fno-builtin-memcpy \ 234 + -fno-builtin-memset -fno-builtin-strnlen \ 235 235 -fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) \ 236 236 -I$(LINUX_TOOL_ARCH_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude \ 237 237 -I$(<D) -Iinclude/$(ARCH_DIR) -I ../rseq -I.. $(EXTRA_CFLAGS) \
+1 -2
tools/testing/selftests/kvm/aarch64/arch_timer.c
··· 5 5 * 6 6 * Copyright (c) 2021, Google LLC. 7 7 */ 8 - #define _GNU_SOURCE 9 - 10 8 #include "arch_timer.h" 11 9 #include "delay.h" 12 10 #include "gic.h" 13 11 #include "processor.h" 14 12 #include "timer_test.h" 13 + #include "ucall_common.h" 15 14 #include "vgic.h" 16 15 17 16 enum guest_stage {
-1
tools/testing/selftests/kvm/aarch64/page_fault_test.c
··· 7 7 * hugetlbfs with a hole). It checks that the expected handling method is 8 8 * called (e.g., uffd faults with the right address and write/read flag). 9 9 */ 10 - #define _GNU_SOURCE 11 10 #include <linux/bitmap.h> 12 11 #include <fcntl.h> 13 12 #include <test_util.h>
-2
tools/testing/selftests/kvm/aarch64/psci_test.c
··· 11 11 * KVM_SYSTEM_EVENT_SUSPEND UAPI. 12 12 */ 13 13 14 - #define _GNU_SOURCE 15 - 16 14 #include <linux/kernel.h> 17 15 #include <linux/psci.h> 18 16 #include <asm/cputype.h>
-1
tools/testing/selftests/kvm/aarch64/vgic_init.c
··· 4 4 * 5 5 * Copyright (C) 2020, Red Hat, Inc. 6 6 */ 7 - #define _GNU_SOURCE 8 7 #include <linux/kernel.h> 9 8 #include <sys/syscall.h> 10 9 #include <asm/kvm.h>
+1 -3
tools/testing/selftests/kvm/arch_timer.c
··· 19 19 * 20 20 * Copyright (c) 2021, Google LLC. 21 21 */ 22 - 23 - #define _GNU_SOURCE 24 - 25 22 #include <stdlib.h> 26 23 #include <pthread.h> 27 24 #include <linux/sizes.h> ··· 26 29 #include <sys/sysinfo.h> 27 30 28 31 #include "timer_test.h" 32 + #include "ucall_common.h" 29 33 30 34 struct test_args test_args = { 31 35 .nr_vcpus = NR_VCPUS_DEF,
+1 -3
tools/testing/selftests/kvm/demand_paging_test.c
··· 6 6 * Copyright (C) 2018, Red Hat, Inc. 7 7 * Copyright (C) 2019, Google, Inc. 8 8 */ 9 - 10 - #define _GNU_SOURCE /* for pipe2 */ 11 - 12 9 #include <inttypes.h> 13 10 #include <stdio.h> 14 11 #include <stdlib.h> ··· 18 21 #include "test_util.h" 19 22 #include "memstress.h" 20 23 #include "guest_modes.h" 24 + #include "ucall_common.h" 21 25 #include "userfaultfd_util.h" 22 26 23 27 #ifdef __NR_userfaultfd
+5 -5
tools/testing/selftests/kvm/dirty_log_perf_test.c
··· 18 18 #include "test_util.h" 19 19 #include "memstress.h" 20 20 #include "guest_modes.h" 21 + #include "ucall_common.h" 21 22 22 23 #ifdef __aarch64__ 23 24 #include "aarch64/vgic.h" ··· 130 129 enum vm_mem_backing_src_type backing_src; 131 130 int slots; 132 131 uint32_t write_percent; 133 - uint32_t random_seed; 134 132 bool random_access; 135 133 }; 136 134 ··· 153 153 p->slots, p->backing_src, 154 154 p->partition_vcpu_memory_access); 155 155 156 - pr_info("Random seed: %u\n", p->random_seed); 157 - memstress_set_random_seed(vm, p->random_seed); 158 156 memstress_set_write_percent(vm, p->write_percent); 159 157 160 158 guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm->page_shift; ··· 341 343 .partition_vcpu_memory_access = true, 342 344 .backing_src = DEFAULT_VM_MEM_SRC, 343 345 .slots = 1, 344 - .random_seed = 1, 345 346 .write_percent = 100, 346 347 }; 347 348 int opt; 349 + 350 + /* Override the seed to be deterministic by default. */ 351 + guest_random_seed = 1; 348 352 349 353 dirty_log_manual_caps = 350 354 kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2); ··· 392 392 p.phys_offset = strtoull(optarg, NULL, 0); 393 393 break; 394 394 case 'r': 395 - p.random_seed = atoi_positive("Random seed", optarg); 395 + guest_random_seed = atoi_positive("Random seed", optarg); 396 396 break; 397 397 case 's': 398 398 p.backing_src = parse_backing_src_type(optarg);
+5 -21
tools/testing/selftests/kvm/dirty_log_test.c
··· 4 4 * 5 5 * Copyright (C) 2018, Red Hat, Inc. 6 6 */ 7 - 8 - #define _GNU_SOURCE /* for program_invocation_name */ 9 - 10 7 #include <stdio.h> 11 8 #include <stdlib.h> 12 9 #include <pthread.h> ··· 20 23 #include "test_util.h" 21 24 #include "guest_modes.h" 22 25 #include "processor.h" 26 + #include "ucall_common.h" 23 27 24 28 #define DIRTY_MEM_BITS 30 /* 1G */ 25 29 #define PAGE_SHIFT_4K 12 ··· 74 76 static uint64_t host_page_size; 75 77 static uint64_t guest_page_size; 76 78 static uint64_t guest_num_pages; 77 - static uint64_t random_array[TEST_PAGES_PER_LOOP]; 78 79 static uint64_t iteration; 79 80 80 81 /* ··· 106 109 */ 107 110 for (i = 0; i < guest_num_pages; i++) { 108 111 addr = guest_test_virt_mem + i * guest_page_size; 109 - *(uint64_t *)addr = READ_ONCE(iteration); 112 + vcpu_arch_put_guest(*(uint64_t *)addr, READ_ONCE(iteration)); 110 113 } 111 114 112 115 while (true) { 113 116 for (i = 0; i < TEST_PAGES_PER_LOOP; i++) { 114 117 addr = guest_test_virt_mem; 115 - addr += (READ_ONCE(random_array[i]) % guest_num_pages) 118 + addr += (guest_random_u64(&guest_rng) % guest_num_pages) 116 119 * guest_page_size; 117 120 addr = align_down(addr, host_page_size); 118 - *(uint64_t *)addr = READ_ONCE(iteration); 121 + 122 + vcpu_arch_put_guest(*(uint64_t *)addr, READ_ONCE(iteration)); 119 123 } 120 124 121 - /* Tell the host that we need more random numbers */ 122 125 GUEST_SYNC(1); 123 126 } 124 127 } ··· 505 508 mode->after_vcpu_run(vcpu, ret, err); 506 509 } 507 510 508 - static void generate_random_array(uint64_t *guest_array, uint64_t size) 509 - { 510 - uint64_t i; 511 - 512 - for (i = 0; i < size; i++) 513 - guest_array[i] = random(); 514 - } 515 - 516 511 static void *vcpu_worker(void *data) 517 512 { 518 513 int ret; 519 514 struct kvm_vcpu *vcpu = data; 520 - struct kvm_vm *vm = vcpu->vm; 521 - uint64_t *guest_array; 522 515 uint64_t pages_count = 0; 523 516 struct kvm_signal_mask *sigmask = alloca(offsetof(struct kvm_signal_mask, sigset) 524 517 + sizeof(sigset_t)); ··· 527 540 sigemptyset(sigset); 528 541 sigaddset(sigset, SIG_IPI); 529 542 530 - guest_array = addr_gva2hva(vm, (vm_vaddr_t)random_array); 531 - 532 543 while (!READ_ONCE(host_quit)) { 533 544 /* Clear any existing kick signals */ 534 - generate_random_array(guest_array, TEST_PAGES_PER_LOOP); 535 545 pages_count += TEST_PAGES_PER_LOOP; 536 546 /* Let the guest dirty the random pages */ 537 547 ret = __vcpu_run(vcpu);
+1 -3
tools/testing/selftests/kvm/guest_memfd_test.c
··· 4 4 * 5 5 * Author: Chao Peng <chao.p.peng@linux.intel.com> 6 6 */ 7 - 8 - #define _GNU_SOURCE 9 7 #include <stdlib.h> 10 8 #include <string.h> 11 9 #include <unistd.h> ··· 17 19 #include <sys/types.h> 18 20 #include <sys/stat.h> 19 21 22 + #include "kvm_util.h" 20 23 #include "test_util.h" 21 - #include "kvm_util_base.h" 22 24 23 25 static void test_file_read_write(int fd) 24 26 {
+1
tools/testing/selftests/kvm/guest_print_test.c
··· 13 13 #include "test_util.h" 14 14 #include "kvm_util.h" 15 15 #include "processor.h" 16 + #include "ucall_common.h" 16 17 17 18 struct guest_vals { 18 19 uint64_t a;
-3
tools/testing/selftests/kvm/hardware_disable_test.c
··· 4 4 * kvm_arch_hardware_disable is called and it attempts to unregister the user 5 5 * return notifiers. 6 6 */ 7 - 8 - #define _GNU_SOURCE 9 - 10 7 #include <fcntl.h> 11 8 #include <pthread.h> 12 9 #include <semaphore.h>
+2
tools/testing/selftests/kvm/include/aarch64/processor.h
··· 8 8 #define SELFTEST_KVM_PROCESSOR_H 9 9 10 10 #include "kvm_util.h" 11 + #include "ucall_common.h" 12 + 11 13 #include <linux/stringify.h> 12 14 #include <linux/types.h> 13 15 #include <asm/sysreg.h>
+1 -1
tools/testing/selftests/kvm/include/aarch64/ucall.h
··· 2 2 #ifndef SELFTEST_KVM_UCALL_H 3 3 #define SELFTEST_KVM_UCALL_H 4 4 5 - #include "kvm_util_base.h" 5 + #include "kvm_util.h" 6 6 7 7 #define UCALL_EXIT_REASON KVM_EXIT_MMIO 8 8
+1107 -4
tools/testing/selftests/kvm/include/kvm_util.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 2 /* 3 - * tools/testing/selftests/kvm/include/kvm_util.h 4 - * 5 3 * Copyright (C) 2018, Google LLC. 6 4 */ 7 5 #ifndef SELFTEST_KVM_UTIL_H 8 6 #define SELFTEST_KVM_UTIL_H 9 7 10 - #include "kvm_util_base.h" 11 - #include "ucall_common.h" 8 + #include "test_util.h" 9 + 10 + #include <linux/compiler.h> 11 + #include "linux/hashtable.h" 12 + #include "linux/list.h" 13 + #include <linux/kernel.h> 14 + #include <linux/kvm.h> 15 + #include "linux/rbtree.h" 16 + #include <linux/types.h> 17 + 18 + #include <asm/atomic.h> 19 + #include <asm/kvm.h> 20 + 21 + #include <sys/ioctl.h> 22 + 23 + #include "kvm_util_arch.h" 24 + #include "kvm_util_types.h" 25 + #include "sparsebit.h" 26 + 27 + #define KVM_DEV_PATH "/dev/kvm" 28 + #define KVM_MAX_VCPUS 512 29 + 30 + #define NSEC_PER_SEC 1000000000L 31 + 32 + struct userspace_mem_region { 33 + struct kvm_userspace_memory_region2 region; 34 + struct sparsebit *unused_phy_pages; 35 + struct sparsebit *protected_phy_pages; 36 + int fd; 37 + off_t offset; 38 + enum vm_mem_backing_src_type backing_src_type; 39 + void *host_mem; 40 + void *host_alias; 41 + void *mmap_start; 42 + void *mmap_alias; 43 + size_t mmap_size; 44 + struct rb_node gpa_node; 45 + struct rb_node hva_node; 46 + struct hlist_node slot_node; 47 + }; 48 + 49 + struct kvm_vcpu { 50 + struct list_head list; 51 + uint32_t id; 52 + int fd; 53 + struct kvm_vm *vm; 54 + struct kvm_run *run; 55 + #ifdef __x86_64__ 56 + struct kvm_cpuid2 *cpuid; 57 + #endif 58 + struct kvm_dirty_gfn *dirty_gfns; 59 + uint32_t fetch_index; 60 + uint32_t dirty_gfns_count; 61 + }; 62 + 63 + struct userspace_mem_regions { 64 + struct rb_root gpa_tree; 65 + struct rb_root hva_tree; 66 + DECLARE_HASHTABLE(slot_hash, 9); 67 + }; 68 + 69 + enum kvm_mem_region_type { 70 + MEM_REGION_CODE, 71 + MEM_REGION_DATA, 72 + MEM_REGION_PT, 73 + MEM_REGION_TEST_DATA, 74 + NR_MEM_REGIONS, 75 + }; 76 + 77 + struct kvm_vm { 78 + int mode; 79 + unsigned long type; 80 + int kvm_fd; 81 + int fd; 82 + unsigned int pgtable_levels; 83 + unsigned int page_size; 84 + unsigned int page_shift; 85 + unsigned int pa_bits; 86 + unsigned int va_bits; 87 + uint64_t max_gfn; 88 + struct list_head vcpus; 89 + struct userspace_mem_regions regions; 90 + struct sparsebit *vpages_valid; 91 + struct sparsebit *vpages_mapped; 92 + bool has_irqchip; 93 + bool pgd_created; 94 + vm_paddr_t ucall_mmio_addr; 95 + vm_paddr_t pgd; 96 + vm_vaddr_t handlers; 97 + uint32_t dirty_ring_size; 98 + uint64_t gpa_tag_mask; 99 + 100 + struct kvm_vm_arch arch; 101 + 102 + /* Cache of information for binary stats interface */ 103 + int stats_fd; 104 + struct kvm_stats_header stats_header; 105 + struct kvm_stats_desc *stats_desc; 106 + 107 + /* 108 + * KVM region slots. These are the default memslots used by page 109 + * allocators, e.g., lib/elf uses the memslots[MEM_REGION_CODE] 110 + * memslot. 111 + */ 112 + uint32_t memslots[NR_MEM_REGIONS]; 113 + }; 114 + 115 + struct vcpu_reg_sublist { 116 + const char *name; 117 + long capability; 118 + int feature; 119 + int feature_type; 120 + bool finalize; 121 + __u64 *regs; 122 + __u64 regs_n; 123 + __u64 *rejects_set; 124 + __u64 rejects_set_n; 125 + __u64 *skips_set; 126 + __u64 skips_set_n; 127 + }; 128 + 129 + struct vcpu_reg_list { 130 + char *name; 131 + struct vcpu_reg_sublist sublists[]; 132 + }; 133 + 134 + #define for_each_sublist(c, s) \ 135 + for ((s) = &(c)->sublists[0]; (s)->regs; ++(s)) 136 + 137 + #define kvm_for_each_vcpu(vm, i, vcpu) \ 138 + for ((i) = 0; (i) <= (vm)->last_vcpu_id; (i)++) \ 139 + if (!((vcpu) = vm->vcpus[i])) \ 140 + continue; \ 141 + else 142 + 143 + struct userspace_mem_region * 144 + memslot2region(struct kvm_vm *vm, uint32_t memslot); 145 + 146 + static inline struct userspace_mem_region *vm_get_mem_region(struct kvm_vm *vm, 147 + enum kvm_mem_region_type type) 148 + { 149 + assert(type < NR_MEM_REGIONS); 150 + return memslot2region(vm, vm->memslots[type]); 151 + } 152 + 153 + /* Minimum allocated guest virtual and physical addresses */ 154 + #define KVM_UTIL_MIN_VADDR 0x2000 155 + #define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000 156 + 157 + #define DEFAULT_GUEST_STACK_VADDR_MIN 0xab6000 158 + #define DEFAULT_STACK_PGS 5 159 + 160 + enum vm_guest_mode { 161 + VM_MODE_P52V48_4K, 162 + VM_MODE_P52V48_16K, 163 + VM_MODE_P52V48_64K, 164 + VM_MODE_P48V48_4K, 165 + VM_MODE_P48V48_16K, 166 + VM_MODE_P48V48_64K, 167 + VM_MODE_P40V48_4K, 168 + VM_MODE_P40V48_16K, 169 + VM_MODE_P40V48_64K, 170 + VM_MODE_PXXV48_4K, /* For 48bits VA but ANY bits PA */ 171 + VM_MODE_P47V64_4K, 172 + VM_MODE_P44V64_4K, 173 + VM_MODE_P36V48_4K, 174 + VM_MODE_P36V48_16K, 175 + VM_MODE_P36V48_64K, 176 + VM_MODE_P36V47_16K, 177 + NUM_VM_MODES, 178 + }; 179 + 180 + struct vm_shape { 181 + uint32_t type; 182 + uint8_t mode; 183 + uint8_t pad0; 184 + uint16_t pad1; 185 + }; 186 + 187 + kvm_static_assert(sizeof(struct vm_shape) == sizeof(uint64_t)); 188 + 189 + #define VM_TYPE_DEFAULT 0 190 + 191 + #define VM_SHAPE(__mode) \ 192 + ({ \ 193 + struct vm_shape shape = { \ 194 + .mode = (__mode), \ 195 + .type = VM_TYPE_DEFAULT \ 196 + }; \ 197 + \ 198 + shape; \ 199 + }) 200 + 201 + #if defined(__aarch64__) 202 + 203 + extern enum vm_guest_mode vm_mode_default; 204 + 205 + #define VM_MODE_DEFAULT vm_mode_default 206 + #define MIN_PAGE_SHIFT 12U 207 + #define ptes_per_page(page_size) ((page_size) / 8) 208 + 209 + #elif defined(__x86_64__) 210 + 211 + #define VM_MODE_DEFAULT VM_MODE_PXXV48_4K 212 + #define MIN_PAGE_SHIFT 12U 213 + #define ptes_per_page(page_size) ((page_size) / 8) 214 + 215 + #elif defined(__s390x__) 216 + 217 + #define VM_MODE_DEFAULT VM_MODE_P44V64_4K 218 + #define MIN_PAGE_SHIFT 12U 219 + #define ptes_per_page(page_size) ((page_size) / 16) 220 + 221 + #elif defined(__riscv) 222 + 223 + #if __riscv_xlen == 32 224 + #error "RISC-V 32-bit kvm selftests not supported" 225 + #endif 226 + 227 + #define VM_MODE_DEFAULT VM_MODE_P40V48_4K 228 + #define MIN_PAGE_SHIFT 12U 229 + #define ptes_per_page(page_size) ((page_size) / 8) 230 + 231 + #endif 232 + 233 + #define VM_SHAPE_DEFAULT VM_SHAPE(VM_MODE_DEFAULT) 234 + 235 + #define MIN_PAGE_SIZE (1U << MIN_PAGE_SHIFT) 236 + #define PTES_PER_MIN_PAGE ptes_per_page(MIN_PAGE_SIZE) 237 + 238 + struct vm_guest_mode_params { 239 + unsigned int pa_bits; 240 + unsigned int va_bits; 241 + unsigned int page_size; 242 + unsigned int page_shift; 243 + }; 244 + extern const struct vm_guest_mode_params vm_guest_mode_params[]; 245 + 246 + int open_path_or_exit(const char *path, int flags); 247 + int open_kvm_dev_path_or_exit(void); 248 + 249 + bool get_kvm_param_bool(const char *param); 250 + bool get_kvm_intel_param_bool(const char *param); 251 + bool get_kvm_amd_param_bool(const char *param); 252 + 253 + int get_kvm_param_integer(const char *param); 254 + int get_kvm_intel_param_integer(const char *param); 255 + int get_kvm_amd_param_integer(const char *param); 256 + 257 + unsigned int kvm_check_cap(long cap); 258 + 259 + static inline bool kvm_has_cap(long cap) 260 + { 261 + return kvm_check_cap(cap); 262 + } 263 + 264 + #define __KVM_SYSCALL_ERROR(_name, _ret) \ 265 + "%s failed, rc: %i errno: %i (%s)", (_name), (_ret), errno, strerror(errno) 266 + 267 + /* 268 + * Use the "inner", double-underscore macro when reporting errors from within 269 + * other macros so that the name of ioctl() and not its literal numeric value 270 + * is printed on error. The "outer" macro is strongly preferred when reporting 271 + * errors "directly", i.e. without an additional layer of macros, as it reduces 272 + * the probability of passing in the wrong string. 273 + */ 274 + #define __KVM_IOCTL_ERROR(_name, _ret) __KVM_SYSCALL_ERROR(_name, _ret) 275 + #define KVM_IOCTL_ERROR(_ioctl, _ret) __KVM_IOCTL_ERROR(#_ioctl, _ret) 276 + 277 + #define kvm_do_ioctl(fd, cmd, arg) \ 278 + ({ \ 279 + kvm_static_assert(!_IOC_SIZE(cmd) || sizeof(*arg) == _IOC_SIZE(cmd)); \ 280 + ioctl(fd, cmd, arg); \ 281 + }) 282 + 283 + #define __kvm_ioctl(kvm_fd, cmd, arg) \ 284 + kvm_do_ioctl(kvm_fd, cmd, arg) 285 + 286 + #define kvm_ioctl(kvm_fd, cmd, arg) \ 287 + ({ \ 288 + int ret = __kvm_ioctl(kvm_fd, cmd, arg); \ 289 + \ 290 + TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(#cmd, ret)); \ 291 + }) 292 + 293 + static __always_inline void static_assert_is_vm(struct kvm_vm *vm) { } 294 + 295 + #define __vm_ioctl(vm, cmd, arg) \ 296 + ({ \ 297 + static_assert_is_vm(vm); \ 298 + kvm_do_ioctl((vm)->fd, cmd, arg); \ 299 + }) 300 + 301 + /* 302 + * Assert that a VM or vCPU ioctl() succeeded, with extra magic to detect if 303 + * the ioctl() failed because KVM killed/bugged the VM. To detect a dead VM, 304 + * probe KVM_CAP_USER_MEMORY, which (a) has been supported by KVM since before 305 + * selftests existed and (b) should never outright fail, i.e. is supposed to 306 + * return 0 or 1. If KVM kills a VM, KVM returns -EIO for all ioctl()s for the 307 + * VM and its vCPUs, including KVM_CHECK_EXTENSION. 308 + */ 309 + #define __TEST_ASSERT_VM_VCPU_IOCTL(cond, name, ret, vm) \ 310 + do { \ 311 + int __errno = errno; \ 312 + \ 313 + static_assert_is_vm(vm); \ 314 + \ 315 + if (cond) \ 316 + break; \ 317 + \ 318 + if (errno == EIO && \ 319 + __vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)KVM_CAP_USER_MEMORY) < 0) { \ 320 + TEST_ASSERT(errno == EIO, "KVM killed the VM, should return -EIO"); \ 321 + TEST_FAIL("KVM killed/bugged the VM, check the kernel log for clues"); \ 322 + } \ 323 + errno = __errno; \ 324 + TEST_ASSERT(cond, __KVM_IOCTL_ERROR(name, ret)); \ 325 + } while (0) 326 + 327 + #define TEST_ASSERT_VM_VCPU_IOCTL(cond, cmd, ret, vm) \ 328 + __TEST_ASSERT_VM_VCPU_IOCTL(cond, #cmd, ret, vm) 329 + 330 + #define vm_ioctl(vm, cmd, arg) \ 331 + ({ \ 332 + int ret = __vm_ioctl(vm, cmd, arg); \ 333 + \ 334 + __TEST_ASSERT_VM_VCPU_IOCTL(!ret, #cmd, ret, vm); \ 335 + }) 336 + 337 + static __always_inline void static_assert_is_vcpu(struct kvm_vcpu *vcpu) { } 338 + 339 + #define __vcpu_ioctl(vcpu, cmd, arg) \ 340 + ({ \ 341 + static_assert_is_vcpu(vcpu); \ 342 + kvm_do_ioctl((vcpu)->fd, cmd, arg); \ 343 + }) 344 + 345 + #define vcpu_ioctl(vcpu, cmd, arg) \ 346 + ({ \ 347 + int ret = __vcpu_ioctl(vcpu, cmd, arg); \ 348 + \ 349 + __TEST_ASSERT_VM_VCPU_IOCTL(!ret, #cmd, ret, (vcpu)->vm); \ 350 + }) 351 + 352 + /* 353 + * Looks up and returns the value corresponding to the capability 354 + * (KVM_CAP_*) given by cap. 355 + */ 356 + static inline int vm_check_cap(struct kvm_vm *vm, long cap) 357 + { 358 + int ret = __vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)cap); 359 + 360 + TEST_ASSERT_VM_VCPU_IOCTL(ret >= 0, KVM_CHECK_EXTENSION, ret, vm); 361 + return ret; 362 + } 363 + 364 + static inline int __vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0) 365 + { 366 + struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } }; 367 + 368 + return __vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap); 369 + } 370 + static inline void vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0) 371 + { 372 + struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } }; 373 + 374 + vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap); 375 + } 376 + 377 + static inline void vm_set_memory_attributes(struct kvm_vm *vm, uint64_t gpa, 378 + uint64_t size, uint64_t attributes) 379 + { 380 + struct kvm_memory_attributes attr = { 381 + .attributes = attributes, 382 + .address = gpa, 383 + .size = size, 384 + .flags = 0, 385 + }; 386 + 387 + /* 388 + * KVM_SET_MEMORY_ATTRIBUTES overwrites _all_ attributes. These flows 389 + * need significant enhancements to support multiple attributes. 390 + */ 391 + TEST_ASSERT(!attributes || attributes == KVM_MEMORY_ATTRIBUTE_PRIVATE, 392 + "Update me to support multiple attributes!"); 393 + 394 + vm_ioctl(vm, KVM_SET_MEMORY_ATTRIBUTES, &attr); 395 + } 396 + 397 + 398 + static inline void vm_mem_set_private(struct kvm_vm *vm, uint64_t gpa, 399 + uint64_t size) 400 + { 401 + vm_set_memory_attributes(vm, gpa, size, KVM_MEMORY_ATTRIBUTE_PRIVATE); 402 + } 403 + 404 + static inline void vm_mem_set_shared(struct kvm_vm *vm, uint64_t gpa, 405 + uint64_t size) 406 + { 407 + vm_set_memory_attributes(vm, gpa, size, 0); 408 + } 409 + 410 + void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t gpa, uint64_t size, 411 + bool punch_hole); 412 + 413 + static inline void vm_guest_mem_punch_hole(struct kvm_vm *vm, uint64_t gpa, 414 + uint64_t size) 415 + { 416 + vm_guest_mem_fallocate(vm, gpa, size, true); 417 + } 418 + 419 + static inline void vm_guest_mem_allocate(struct kvm_vm *vm, uint64_t gpa, 420 + uint64_t size) 421 + { 422 + vm_guest_mem_fallocate(vm, gpa, size, false); 423 + } 424 + 425 + void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size); 426 + const char *vm_guest_mode_string(uint32_t i); 427 + 428 + void kvm_vm_free(struct kvm_vm *vmp); 429 + void kvm_vm_restart(struct kvm_vm *vmp); 430 + void kvm_vm_release(struct kvm_vm *vmp); 431 + int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, const vm_vaddr_t gva, 432 + size_t len); 433 + void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename); 434 + int kvm_memfd_alloc(size_t size, bool hugepages); 435 + 436 + void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent); 437 + 438 + static inline void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log) 439 + { 440 + struct kvm_dirty_log args = { .dirty_bitmap = log, .slot = slot }; 441 + 442 + vm_ioctl(vm, KVM_GET_DIRTY_LOG, &args); 443 + } 444 + 445 + static inline void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log, 446 + uint64_t first_page, uint32_t num_pages) 447 + { 448 + struct kvm_clear_dirty_log args = { 449 + .dirty_bitmap = log, 450 + .slot = slot, 451 + .first_page = first_page, 452 + .num_pages = num_pages 453 + }; 454 + 455 + vm_ioctl(vm, KVM_CLEAR_DIRTY_LOG, &args); 456 + } 457 + 458 + static inline uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm) 459 + { 460 + return __vm_ioctl(vm, KVM_RESET_DIRTY_RINGS, NULL); 461 + } 462 + 463 + static inline int vm_get_stats_fd(struct kvm_vm *vm) 464 + { 465 + int fd = __vm_ioctl(vm, KVM_GET_STATS_FD, NULL); 466 + 467 + TEST_ASSERT_VM_VCPU_IOCTL(fd >= 0, KVM_GET_STATS_FD, fd, vm); 468 + return fd; 469 + } 470 + 471 + static inline void read_stats_header(int stats_fd, struct kvm_stats_header *header) 472 + { 473 + ssize_t ret; 474 + 475 + ret = pread(stats_fd, header, sizeof(*header), 0); 476 + TEST_ASSERT(ret == sizeof(*header), 477 + "Failed to read '%lu' header bytes, ret = '%ld'", 478 + sizeof(*header), ret); 479 + } 480 + 481 + struct kvm_stats_desc *read_stats_descriptors(int stats_fd, 482 + struct kvm_stats_header *header); 483 + 484 + static inline ssize_t get_stats_descriptor_size(struct kvm_stats_header *header) 485 + { 486 + /* 487 + * The base size of the descriptor is defined by KVM's ABI, but the 488 + * size of the name field is variable, as far as KVM's ABI is 489 + * concerned. For a given instance of KVM, the name field is the same 490 + * size for all stats and is provided in the overall stats header. 491 + */ 492 + return sizeof(struct kvm_stats_desc) + header->name_size; 493 + } 494 + 495 + static inline struct kvm_stats_desc *get_stats_descriptor(struct kvm_stats_desc *stats, 496 + int index, 497 + struct kvm_stats_header *header) 498 + { 499 + /* 500 + * Note, size_desc includes the size of the name field, which is 501 + * variable. i.e. this is NOT equivalent to &stats_desc[i]. 502 + */ 503 + return (void *)stats + index * get_stats_descriptor_size(header); 504 + } 505 + 506 + void read_stat_data(int stats_fd, struct kvm_stats_header *header, 507 + struct kvm_stats_desc *desc, uint64_t *data, 508 + size_t max_elements); 509 + 510 + void __vm_get_stat(struct kvm_vm *vm, const char *stat_name, uint64_t *data, 511 + size_t max_elements); 512 + 513 + static inline uint64_t vm_get_stat(struct kvm_vm *vm, const char *stat_name) 514 + { 515 + uint64_t data; 516 + 517 + __vm_get_stat(vm, stat_name, &data, 1); 518 + return data; 519 + } 520 + 521 + void vm_create_irqchip(struct kvm_vm *vm); 522 + 523 + static inline int __vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size, 524 + uint64_t flags) 525 + { 526 + struct kvm_create_guest_memfd guest_memfd = { 527 + .size = size, 528 + .flags = flags, 529 + }; 530 + 531 + return __vm_ioctl(vm, KVM_CREATE_GUEST_MEMFD, &guest_memfd); 532 + } 533 + 534 + static inline int vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size, 535 + uint64_t flags) 536 + { 537 + int fd = __vm_create_guest_memfd(vm, size, flags); 538 + 539 + TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_GUEST_MEMFD, fd)); 540 + return fd; 541 + } 542 + 543 + void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, 544 + uint64_t gpa, uint64_t size, void *hva); 545 + int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, 546 + uint64_t gpa, uint64_t size, void *hva); 547 + void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags, 548 + uint64_t gpa, uint64_t size, void *hva, 549 + uint32_t guest_memfd, uint64_t guest_memfd_offset); 550 + int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags, 551 + uint64_t gpa, uint64_t size, void *hva, 552 + uint32_t guest_memfd, uint64_t guest_memfd_offset); 553 + 554 + void vm_userspace_mem_region_add(struct kvm_vm *vm, 555 + enum vm_mem_backing_src_type src_type, 556 + uint64_t guest_paddr, uint32_t slot, uint64_t npages, 557 + uint32_t flags); 558 + void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, 559 + uint64_t guest_paddr, uint32_t slot, uint64_t npages, 560 + uint32_t flags, int guest_memfd_fd, uint64_t guest_memfd_offset); 561 + 562 + #ifndef vm_arch_has_protected_memory 563 + static inline bool vm_arch_has_protected_memory(struct kvm_vm *vm) 564 + { 565 + return false; 566 + } 567 + #endif 568 + 569 + void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags); 570 + void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa); 571 + void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot); 572 + struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id); 573 + void vm_populate_vaddr_bitmap(struct kvm_vm *vm); 574 + vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min); 575 + vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min); 576 + vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, 577 + enum kvm_mem_region_type type); 578 + vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz, 579 + vm_vaddr_t vaddr_min, 580 + enum kvm_mem_region_type type); 581 + vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages); 582 + vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm, 583 + enum kvm_mem_region_type type); 584 + vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm); 585 + 586 + void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, 587 + unsigned int npages); 588 + void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa); 589 + void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva); 590 + vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva); 591 + void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa); 592 + 593 + #ifndef vcpu_arch_put_guest 594 + #define vcpu_arch_put_guest(mem, val) do { (mem) = (val); } while (0) 595 + #endif 596 + 597 + static inline vm_paddr_t vm_untag_gpa(struct kvm_vm *vm, vm_paddr_t gpa) 598 + { 599 + return gpa & ~vm->gpa_tag_mask; 600 + } 601 + 602 + void vcpu_run(struct kvm_vcpu *vcpu); 603 + int _vcpu_run(struct kvm_vcpu *vcpu); 604 + 605 + static inline int __vcpu_run(struct kvm_vcpu *vcpu) 606 + { 607 + return __vcpu_ioctl(vcpu, KVM_RUN, NULL); 608 + } 609 + 610 + void vcpu_run_complete_io(struct kvm_vcpu *vcpu); 611 + struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu); 612 + 613 + static inline void vcpu_enable_cap(struct kvm_vcpu *vcpu, uint32_t cap, 614 + uint64_t arg0) 615 + { 616 + struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } }; 617 + 618 + vcpu_ioctl(vcpu, KVM_ENABLE_CAP, &enable_cap); 619 + } 620 + 621 + static inline void vcpu_guest_debug_set(struct kvm_vcpu *vcpu, 622 + struct kvm_guest_debug *debug) 623 + { 624 + vcpu_ioctl(vcpu, KVM_SET_GUEST_DEBUG, debug); 625 + } 626 + 627 + static inline void vcpu_mp_state_get(struct kvm_vcpu *vcpu, 628 + struct kvm_mp_state *mp_state) 629 + { 630 + vcpu_ioctl(vcpu, KVM_GET_MP_STATE, mp_state); 631 + } 632 + static inline void vcpu_mp_state_set(struct kvm_vcpu *vcpu, 633 + struct kvm_mp_state *mp_state) 634 + { 635 + vcpu_ioctl(vcpu, KVM_SET_MP_STATE, mp_state); 636 + } 637 + 638 + static inline void vcpu_regs_get(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 639 + { 640 + vcpu_ioctl(vcpu, KVM_GET_REGS, regs); 641 + } 642 + 643 + static inline void vcpu_regs_set(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 644 + { 645 + vcpu_ioctl(vcpu, KVM_SET_REGS, regs); 646 + } 647 + static inline void vcpu_sregs_get(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 648 + { 649 + vcpu_ioctl(vcpu, KVM_GET_SREGS, sregs); 650 + 651 + } 652 + static inline void vcpu_sregs_set(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 653 + { 654 + vcpu_ioctl(vcpu, KVM_SET_SREGS, sregs); 655 + } 656 + static inline int _vcpu_sregs_set(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 657 + { 658 + return __vcpu_ioctl(vcpu, KVM_SET_SREGS, sregs); 659 + } 660 + static inline void vcpu_fpu_get(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 661 + { 662 + vcpu_ioctl(vcpu, KVM_GET_FPU, fpu); 663 + } 664 + static inline void vcpu_fpu_set(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 665 + { 666 + vcpu_ioctl(vcpu, KVM_SET_FPU, fpu); 667 + } 668 + 669 + static inline int __vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id, void *addr) 670 + { 671 + struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)addr }; 672 + 673 + return __vcpu_ioctl(vcpu, KVM_GET_ONE_REG, &reg); 674 + } 675 + static inline int __vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val) 676 + { 677 + struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val }; 678 + 679 + return __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg); 680 + } 681 + static inline void vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id, void *addr) 682 + { 683 + struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)addr }; 684 + 685 + vcpu_ioctl(vcpu, KVM_GET_ONE_REG, &reg); 686 + } 687 + static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val) 688 + { 689 + struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val }; 690 + 691 + vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg); 692 + } 693 + 694 + #ifdef __KVM_HAVE_VCPU_EVENTS 695 + static inline void vcpu_events_get(struct kvm_vcpu *vcpu, 696 + struct kvm_vcpu_events *events) 697 + { 698 + vcpu_ioctl(vcpu, KVM_GET_VCPU_EVENTS, events); 699 + } 700 + static inline void vcpu_events_set(struct kvm_vcpu *vcpu, 701 + struct kvm_vcpu_events *events) 702 + { 703 + vcpu_ioctl(vcpu, KVM_SET_VCPU_EVENTS, events); 704 + } 705 + #endif 706 + #ifdef __x86_64__ 707 + static inline void vcpu_nested_state_get(struct kvm_vcpu *vcpu, 708 + struct kvm_nested_state *state) 709 + { 710 + vcpu_ioctl(vcpu, KVM_GET_NESTED_STATE, state); 711 + } 712 + static inline int __vcpu_nested_state_set(struct kvm_vcpu *vcpu, 713 + struct kvm_nested_state *state) 714 + { 715 + return __vcpu_ioctl(vcpu, KVM_SET_NESTED_STATE, state); 716 + } 717 + 718 + static inline void vcpu_nested_state_set(struct kvm_vcpu *vcpu, 719 + struct kvm_nested_state *state) 720 + { 721 + vcpu_ioctl(vcpu, KVM_SET_NESTED_STATE, state); 722 + } 723 + #endif 724 + static inline int vcpu_get_stats_fd(struct kvm_vcpu *vcpu) 725 + { 726 + int fd = __vcpu_ioctl(vcpu, KVM_GET_STATS_FD, NULL); 727 + 728 + TEST_ASSERT_VM_VCPU_IOCTL(fd >= 0, KVM_CHECK_EXTENSION, fd, vcpu->vm); 729 + return fd; 730 + } 731 + 732 + int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr); 733 + 734 + static inline void kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr) 735 + { 736 + int ret = __kvm_has_device_attr(dev_fd, group, attr); 737 + 738 + TEST_ASSERT(!ret, "KVM_HAS_DEVICE_ATTR failed, rc: %i errno: %i", ret, errno); 739 + } 740 + 741 + int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val); 742 + 743 + static inline void kvm_device_attr_get(int dev_fd, uint32_t group, 744 + uint64_t attr, void *val) 745 + { 746 + int ret = __kvm_device_attr_get(dev_fd, group, attr, val); 747 + 748 + TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_GET_DEVICE_ATTR, ret)); 749 + } 750 + 751 + int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val); 752 + 753 + static inline void kvm_device_attr_set(int dev_fd, uint32_t group, 754 + uint64_t attr, void *val) 755 + { 756 + int ret = __kvm_device_attr_set(dev_fd, group, attr, val); 757 + 758 + TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_DEVICE_ATTR, ret)); 759 + } 760 + 761 + static inline int __vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group, 762 + uint64_t attr) 763 + { 764 + return __kvm_has_device_attr(vcpu->fd, group, attr); 765 + } 766 + 767 + static inline void vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group, 768 + uint64_t attr) 769 + { 770 + kvm_has_device_attr(vcpu->fd, group, attr); 771 + } 772 + 773 + static inline int __vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group, 774 + uint64_t attr, void *val) 775 + { 776 + return __kvm_device_attr_get(vcpu->fd, group, attr, val); 777 + } 778 + 779 + static inline void vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group, 780 + uint64_t attr, void *val) 781 + { 782 + kvm_device_attr_get(vcpu->fd, group, attr, val); 783 + } 784 + 785 + static inline int __vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group, 786 + uint64_t attr, void *val) 787 + { 788 + return __kvm_device_attr_set(vcpu->fd, group, attr, val); 789 + } 790 + 791 + static inline void vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group, 792 + uint64_t attr, void *val) 793 + { 794 + kvm_device_attr_set(vcpu->fd, group, attr, val); 795 + } 796 + 797 + int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type); 798 + int __kvm_create_device(struct kvm_vm *vm, uint64_t type); 799 + 800 + static inline int kvm_create_device(struct kvm_vm *vm, uint64_t type) 801 + { 802 + int fd = __kvm_create_device(vm, type); 803 + 804 + TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_DEVICE, fd)); 805 + return fd; 806 + } 807 + 808 + void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu); 809 + 810 + /* 811 + * VM VCPU Args Set 812 + * 813 + * Input Args: 814 + * vm - Virtual Machine 815 + * num - number of arguments 816 + * ... - arguments, each of type uint64_t 817 + * 818 + * Output Args: None 819 + * 820 + * Return: None 821 + * 822 + * Sets the first @num input parameters for the function at @vcpu's entry point, 823 + * per the C calling convention of the architecture, to the values given as 824 + * variable args. Each of the variable args is expected to be of type uint64_t. 825 + * The maximum @num can be is specific to the architecture. 826 + */ 827 + void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...); 828 + 829 + void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level); 830 + int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level); 831 + 832 + #define KVM_MAX_IRQ_ROUTES 4096 833 + 834 + struct kvm_irq_routing *kvm_gsi_routing_create(void); 835 + void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing, 836 + uint32_t gsi, uint32_t pin); 837 + int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing); 838 + void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing); 839 + 840 + const char *exit_reason_str(unsigned int exit_reason); 841 + 842 + vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, 843 + uint32_t memslot); 844 + vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, 845 + vm_paddr_t paddr_min, uint32_t memslot, 846 + bool protected); 847 + vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm); 848 + 849 + static inline vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, 850 + vm_paddr_t paddr_min, uint32_t memslot) 851 + { 852 + /* 853 + * By default, allocate memory as protected for VMs that support 854 + * protected memory, as the majority of memory for such VMs is 855 + * protected, i.e. using shared memory is effectively opt-in. 856 + */ 857 + return __vm_phy_pages_alloc(vm, num, paddr_min, memslot, 858 + vm_arch_has_protected_memory(vm)); 859 + } 860 + 861 + /* 862 + * ____vm_create() does KVM_CREATE_VM and little else. __vm_create() also 863 + * loads the test binary into guest memory and creates an IRQ chip (x86 only). 864 + * __vm_create() does NOT create vCPUs, @nr_runnable_vcpus is used purely to 865 + * calculate the amount of memory needed for per-vCPU data, e.g. stacks. 866 + */ 867 + struct kvm_vm *____vm_create(struct vm_shape shape); 868 + struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus, 869 + uint64_t nr_extra_pages); 870 + 871 + static inline struct kvm_vm *vm_create_barebones(void) 872 + { 873 + return ____vm_create(VM_SHAPE_DEFAULT); 874 + } 875 + 876 + static inline struct kvm_vm *vm_create_barebones_type(unsigned long type) 877 + { 878 + const struct vm_shape shape = { 879 + .mode = VM_MODE_DEFAULT, 880 + .type = type, 881 + }; 882 + 883 + return ____vm_create(shape); 884 + } 885 + 886 + static inline struct kvm_vm *vm_create(uint32_t nr_runnable_vcpus) 887 + { 888 + return __vm_create(VM_SHAPE_DEFAULT, nr_runnable_vcpus, 0); 889 + } 890 + 891 + struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus, 892 + uint64_t extra_mem_pages, 893 + void *guest_code, struct kvm_vcpu *vcpus[]); 894 + 895 + static inline struct kvm_vm *vm_create_with_vcpus(uint32_t nr_vcpus, 896 + void *guest_code, 897 + struct kvm_vcpu *vcpus[]) 898 + { 899 + return __vm_create_with_vcpus(VM_SHAPE_DEFAULT, nr_vcpus, 0, 900 + guest_code, vcpus); 901 + } 902 + 903 + 904 + struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape, 905 + struct kvm_vcpu **vcpu, 906 + uint64_t extra_mem_pages, 907 + void *guest_code); 908 + 909 + /* 910 + * Create a VM with a single vCPU with reasonable defaults and @extra_mem_pages 911 + * additional pages of guest memory. Returns the VM and vCPU (via out param). 912 + */ 913 + static inline struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu, 914 + uint64_t extra_mem_pages, 915 + void *guest_code) 916 + { 917 + return __vm_create_shape_with_one_vcpu(VM_SHAPE_DEFAULT, vcpu, 918 + extra_mem_pages, guest_code); 919 + } 920 + 921 + static inline struct kvm_vm *vm_create_with_one_vcpu(struct kvm_vcpu **vcpu, 922 + void *guest_code) 923 + { 924 + return __vm_create_with_one_vcpu(vcpu, 0, guest_code); 925 + } 926 + 927 + static inline struct kvm_vm *vm_create_shape_with_one_vcpu(struct vm_shape shape, 928 + struct kvm_vcpu **vcpu, 929 + void *guest_code) 930 + { 931 + return __vm_create_shape_with_one_vcpu(shape, vcpu, 0, guest_code); 932 + } 933 + 934 + struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm); 935 + 936 + void kvm_pin_this_task_to_pcpu(uint32_t pcpu); 937 + void kvm_print_vcpu_pinning_help(void); 938 + void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[], 939 + int nr_vcpus); 940 + 941 + unsigned long vm_compute_max_gfn(struct kvm_vm *vm); 942 + unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size); 943 + unsigned int vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages); 944 + unsigned int vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages); 945 + static inline unsigned int 946 + vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages) 947 + { 948 + unsigned int n; 949 + n = vm_num_guest_pages(mode, vm_num_host_pages(mode, num_guest_pages)); 950 + #ifdef __s390x__ 951 + /* s390 requires 1M aligned guest sizes */ 952 + n = (n + 255) & ~255; 953 + #endif 954 + return n; 955 + } 956 + 957 + #define sync_global_to_guest(vm, g) ({ \ 958 + typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \ 959 + memcpy(_p, &(g), sizeof(g)); \ 960 + }) 961 + 962 + #define sync_global_from_guest(vm, g) ({ \ 963 + typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \ 964 + memcpy(&(g), _p, sizeof(g)); \ 965 + }) 966 + 967 + /* 968 + * Write a global value, but only in the VM's (guest's) domain. Primarily used 969 + * for "globals" that hold per-VM values (VMs always duplicate code and global 970 + * data into their own region of physical memory), but can be used anytime it's 971 + * undesirable to change the host's copy of the global. 972 + */ 973 + #define write_guest_global(vm, g, val) ({ \ 974 + typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \ 975 + typeof(g) _val = val; \ 976 + \ 977 + memcpy(_p, &(_val), sizeof(g)); \ 978 + }) 979 + 980 + void assert_on_unhandled_exception(struct kvm_vcpu *vcpu); 981 + 982 + void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, 983 + uint8_t indent); 984 + 985 + static inline void vcpu_dump(FILE *stream, struct kvm_vcpu *vcpu, 986 + uint8_t indent) 987 + { 988 + vcpu_arch_dump(stream, vcpu, indent); 989 + } 990 + 991 + /* 992 + * Adds a vCPU with reasonable defaults (e.g. a stack) 993 + * 994 + * Input Args: 995 + * vm - Virtual Machine 996 + * vcpu_id - The id of the VCPU to add to the VM. 997 + */ 998 + struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id); 999 + void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code); 1000 + 1001 + static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, 1002 + void *guest_code) 1003 + { 1004 + struct kvm_vcpu *vcpu = vm_arch_vcpu_add(vm, vcpu_id); 1005 + 1006 + vcpu_arch_set_entry_point(vcpu, guest_code); 1007 + 1008 + return vcpu; 1009 + } 1010 + 1011 + /* Re-create a vCPU after restarting a VM, e.g. for state save/restore tests. */ 1012 + struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id); 1013 + 1014 + static inline struct kvm_vcpu *vm_vcpu_recreate(struct kvm_vm *vm, 1015 + uint32_t vcpu_id) 1016 + { 1017 + return vm_arch_vcpu_recreate(vm, vcpu_id); 1018 + } 1019 + 1020 + void vcpu_arch_free(struct kvm_vcpu *vcpu); 1021 + 1022 + void virt_arch_pgd_alloc(struct kvm_vm *vm); 1023 + 1024 + static inline void virt_pgd_alloc(struct kvm_vm *vm) 1025 + { 1026 + virt_arch_pgd_alloc(vm); 1027 + } 1028 + 1029 + /* 1030 + * VM Virtual Page Map 1031 + * 1032 + * Input Args: 1033 + * vm - Virtual Machine 1034 + * vaddr - VM Virtual Address 1035 + * paddr - VM Physical Address 1036 + * memslot - Memory region slot for new virtual translation tables 1037 + * 1038 + * Output Args: None 1039 + * 1040 + * Return: None 1041 + * 1042 + * Within @vm, creates a virtual translation for the page starting 1043 + * at @vaddr to the page starting at @paddr. 1044 + */ 1045 + void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr); 1046 + 1047 + static inline void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) 1048 + { 1049 + virt_arch_pg_map(vm, vaddr, paddr); 1050 + } 1051 + 1052 + 1053 + /* 1054 + * Address Guest Virtual to Guest Physical 1055 + * 1056 + * Input Args: 1057 + * vm - Virtual Machine 1058 + * gva - VM virtual address 1059 + * 1060 + * Output Args: None 1061 + * 1062 + * Return: 1063 + * Equivalent VM physical address 1064 + * 1065 + * Returns the VM physical address of the translated VM virtual 1066 + * address given by @gva. 1067 + */ 1068 + vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva); 1069 + 1070 + static inline vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) 1071 + { 1072 + return addr_arch_gva2gpa(vm, gva); 1073 + } 1074 + 1075 + /* 1076 + * Virtual Translation Tables Dump 1077 + * 1078 + * Input Args: 1079 + * stream - Output FILE stream 1080 + * vm - Virtual Machine 1081 + * indent - Left margin indent amount 1082 + * 1083 + * Output Args: None 1084 + * 1085 + * Return: None 1086 + * 1087 + * Dumps to the FILE stream given by @stream, the contents of all the 1088 + * virtual translation tables for the VM given by @vm. 1089 + */ 1090 + void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent); 1091 + 1092 + static inline void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) 1093 + { 1094 + virt_arch_dump(stream, vm, indent); 1095 + } 1096 + 1097 + 1098 + static inline int __vm_disable_nx_huge_pages(struct kvm_vm *vm) 1099 + { 1100 + return __vm_enable_cap(vm, KVM_CAP_VM_DISABLE_NX_HUGE_PAGES, 0); 1101 + } 1102 + 1103 + /* 1104 + * Arch hook that is invoked via a constructor, i.e. before exeucting main(), 1105 + * to allow for arch-specific setup that is common to all tests, e.g. computing 1106 + * the default guest "mode". 1107 + */ 1108 + void kvm_selftest_arch_init(void); 1109 + 1110 + void kvm_arch_vm_post_create(struct kvm_vm *vm); 1111 + 1112 + bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr); 1113 + 1114 + uint32_t guest_get_vcpuid(void); 12 1115 13 1116 #endif /* SELFTEST_KVM_UTIL_H */
+9 -6
tools/testing/selftests/kvm/include/kvm_util_base.h
··· 27 27 28 28 /* 29 29 * Provide a version of static_assert() that is guaranteed to have an optional 30 - * message param. If _ISOC11_SOURCE is defined, glibc (/usr/include/assert.h) 31 - * #undefs and #defines static_assert() as a direct alias to _Static_assert(), 32 - * i.e. effectively makes the message mandatory. Many KVM selftests #define 33 - * _GNU_SOURCE for various reasons, and _GNU_SOURCE implies _ISOC11_SOURCE. As 34 - * a result, static_assert() behavior is non-deterministic and may or may not 35 - * require a message depending on #include order. 30 + * message param. _GNU_SOURCE is defined for all KVM selftests, _GNU_SOURCE 31 + * implies _ISOC11_SOURCE, and if _ISOC11_SOURCE is defined, glibc #undefs and 32 + * #defines static_assert() as a direct alias to _Static_assert() (see 33 + * usr/include/assert.h). Define a custom macro instead of redefining 34 + * static_assert() to avoid creating non-deterministic behavior that is 35 + * dependent on include order. 36 36 */ 37 37 #define __kvm_static_assert(expr, msg, ...) _Static_assert(expr, msg) 38 38 #define kvm_static_assert(expr, ...) __kvm_static_assert(expr, ##__VA_ARGS__, #expr) ··· 609 609 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva); 610 610 void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa); 611 611 612 + #ifndef vcpu_arch_put_guest 613 + #define vcpu_arch_put_guest(mem, val) do { (mem) = (val); } while (0) 614 + #endif 612 615 613 616 static inline vm_paddr_t vm_untag_gpa(struct kvm_vm *vm, vm_paddr_t gpa) 614 617 {
+20
tools/testing/selftests/kvm/include/kvm_util_types.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + #ifndef SELFTEST_KVM_UTIL_TYPES_H 3 + #define SELFTEST_KVM_UTIL_TYPES_H 4 + 5 + /* 6 + * Provide a version of static_assert() that is guaranteed to have an optional 7 + * message param. _GNU_SOURCE is defined for all KVM selftests, _GNU_SOURCE 8 + * implies _ISOC11_SOURCE, and if _ISOC11_SOURCE is defined, glibc #undefs and 9 + * #defines static_assert() as a direct alias to _Static_assert() (see 10 + * usr/include/assert.h). Define a custom macro instead of redefining 11 + * static_assert() to avoid creating non-deterministic behavior that is 12 + * dependent on include order. 13 + */ 14 + #define __kvm_static_assert(expr, msg, ...) _Static_assert(expr, msg) 15 + #define kvm_static_assert(expr, ...) __kvm_static_assert(expr, ##__VA_ARGS__, #expr) 16 + 17 + typedef uint64_t vm_paddr_t; /* Virtual Machine (Guest) physical address */ 18 + typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */ 19 + 20 + #endif /* SELFTEST_KVM_UTIL_TYPES_H */
-1
tools/testing/selftests/kvm/include/memstress.h
··· 62 62 void memstress_destroy_vm(struct kvm_vm *vm); 63 63 64 64 void memstress_set_write_percent(struct kvm_vm *vm, uint32_t write_percent); 65 - void memstress_set_random_seed(struct kvm_vm *vm, uint32_t random_seed); 66 65 void memstress_set_random_access(struct kvm_vm *vm, bool random_access); 67 66 68 67 void memstress_start_vcpu_threads(int vcpus, void (*vcpu_fn)(struct memstress_vcpu_args *));
+1 -1
tools/testing/selftests/kvm/include/s390x/ucall.h
··· 2 2 #ifndef SELFTEST_KVM_UCALL_H 3 3 #define SELFTEST_KVM_UCALL_H 4 4 5 - #include "kvm_util_base.h" 5 + #include "kvm_util.h" 6 6 7 7 #define UCALL_EXIT_REASON KVM_EXIT_S390_SIEIC 8 8
+19
tools/testing/selftests/kvm/include/test_util.h
··· 91 91 uint32_t seed; 92 92 }; 93 93 94 + extern uint32_t guest_random_seed; 95 + extern struct guest_random_state guest_rng; 96 + 94 97 struct guest_random_state new_guest_random_state(uint32_t seed); 95 98 uint32_t guest_random_u32(struct guest_random_state *state); 99 + 100 + static inline bool __guest_random_bool(struct guest_random_state *state, 101 + uint8_t percent) 102 + { 103 + return (guest_random_u32(state) % 100) < percent; 104 + } 105 + 106 + static inline bool guest_random_bool(struct guest_random_state *state) 107 + { 108 + return __guest_random_bool(state, 50); 109 + } 110 + 111 + static inline uint64_t guest_random_u64(struct guest_random_state *state) 112 + { 113 + return ((uint64_t)guest_random_u32(state) << 32) | guest_random_u32(state); 114 + } 96 115 97 116 enum vm_mem_backing_src_type { 98 117 VM_MEM_SRC_ANONYMOUS,
-3
tools/testing/selftests/kvm/include/userfaultfd_util.h
··· 5 5 * Copyright (C) 2018, Red Hat, Inc. 6 6 * Copyright (C) 2019-2022 Google LLC 7 7 */ 8 - 9 - #define _GNU_SOURCE /* for pipe2 */ 10 - 11 8 #include <inttypes.h> 12 9 #include <time.h> 13 10 #include <pthread.h>
+28
tools/testing/selftests/kvm/include/x86_64/kvm_util_arch.h
··· 5 5 #include <stdbool.h> 6 6 #include <stdint.h> 7 7 8 + #include "kvm_util_types.h" 9 + #include "test_util.h" 10 + 11 + extern bool is_forced_emulation_enabled; 12 + 8 13 struct kvm_vm_arch { 14 + vm_vaddr_t gdt; 15 + vm_vaddr_t tss; 16 + vm_vaddr_t idt; 17 + 9 18 uint64_t c_bit; 10 19 uint64_t s_bit; 11 20 int sev_fd; ··· 28 19 29 20 #define vm_arch_has_protected_memory(vm) \ 30 21 __vm_arch_has_protected_memory(&(vm)->arch) 22 + 23 + #define vcpu_arch_put_guest(mem, __val) \ 24 + do { \ 25 + const typeof(mem) val = (__val); \ 26 + \ 27 + if (!is_forced_emulation_enabled || guest_random_bool(&guest_rng)) { \ 28 + (mem) = val; \ 29 + } else if (guest_random_bool(&guest_rng)) { \ 30 + __asm__ __volatile__(KVM_FEP "mov %1, %0" \ 31 + : "+m" (mem) \ 32 + : "r" (val) : "memory"); \ 33 + } else { \ 34 + uint64_t __old = READ_ONCE(mem); \ 35 + \ 36 + __asm__ __volatile__(KVM_FEP LOCK_PREFIX "cmpxchg %[new], %[ptr]" \ 37 + : [ptr] "+m" (mem), [old] "+a" (__old) \ 38 + : [new]"r" (val) : "memory", "cc"); \ 39 + } \ 40 + } while (0) 31 41 32 42 #endif // SELFTEST_KVM_UTIL_ARCH_H
+2 -3
tools/testing/selftests/kvm/include/x86_64/processor.h
··· 18 18 #include <linux/kvm_para.h> 19 19 #include <linux/stringify.h> 20 20 21 - #include "../kvm_util.h" 21 + #include "kvm_util.h" 22 + #include "ucall_common.h" 22 23 23 24 extern bool host_cpu_is_intel; 24 25 extern bool host_cpu_is_amd; ··· 1134 1133 uint32_t offset2; uint32_t reserved; 1135 1134 }; 1136 1135 1137 - void vm_init_descriptor_tables(struct kvm_vm *vm); 1138 - void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu); 1139 1136 void vm_install_exception_handler(struct kvm_vm *vm, int vector, 1140 1137 void (*handler)(struct ex_regs *)); 1141 1138
+1 -1
tools/testing/selftests/kvm/include/x86_64/ucall.h
··· 2 2 #ifndef SELFTEST_KVM_UCALL_H 3 3 #define SELFTEST_KVM_UCALL_H 4 4 5 - #include "kvm_util_base.h" 5 + #include "kvm_util.h" 6 6 7 7 #define UCALL_EXIT_REASON KVM_EXIT_IO 8 8
-2
tools/testing/selftests/kvm/kvm_binary_stats_test.c
··· 6 6 * 7 7 * Test the fd-based interface for KVM statistics. 8 8 */ 9 - 10 - #define _GNU_SOURCE /* for program_invocation_short_name */ 11 9 #include <fcntl.h> 12 10 #include <stdio.h> 13 11 #include <stdlib.h>
-2
tools/testing/selftests/kvm/kvm_create_max_vcpus.c
··· 6 6 * 7 7 * Test for KVM_CAP_MAX_VCPUS and KVM_CAP_MAX_VCPU_ID. 8 8 */ 9 - 10 - #define _GNU_SOURCE /* for program_invocation_short_name */ 11 9 #include <fcntl.h> 12 10 #include <stdio.h> 13 11 #include <stdlib.h>
+1 -3
tools/testing/selftests/kvm/kvm_page_table_test.c
··· 8 8 * page size have been pre-allocated on your system, if you are planning to 9 9 * use hugepages to back the guest memory for testing. 10 10 */ 11 - 12 - #define _GNU_SOURCE /* for program_invocation_name */ 13 - 14 11 #include <stdio.h> 15 12 #include <stdlib.h> 16 13 #include <time.h> ··· 18 21 #include "kvm_util.h" 19 22 #include "processor.h" 20 23 #include "guest_modes.h" 24 + #include "ucall_common.h" 21 25 22 26 #define TEST_MEM_SLOT_INDEX 1 23 27
+2
tools/testing/selftests/kvm/lib/aarch64/processor.c
··· 11 11 #include "guest_modes.h" 12 12 #include "kvm_util.h" 13 13 #include "processor.h" 14 + #include "ucall_common.h" 15 + 14 16 #include <linux/bitfield.h> 15 17 #include <linux/sizes.h> 16 18
-3
tools/testing/selftests/kvm/lib/assert.c
··· 4 4 * 5 5 * Copyright (C) 2018, Google LLC. 6 6 */ 7 - 8 - #define _GNU_SOURCE /* for getline(3) and strchrnul(3)*/ 9 - 10 7 #include "test_util.h" 11 8 12 9 #include <execinfo.h>
+10 -2
tools/testing/selftests/kvm/lib/kvm_util.c
··· 4 4 * 5 5 * Copyright (C) 2018, Google LLC. 6 6 */ 7 - 8 - #define _GNU_SOURCE /* for program_invocation_name */ 9 7 #include "test_util.h" 10 8 #include "kvm_util.h" 11 9 #include "processor.h" 10 + #include "ucall_common.h" 12 11 13 12 #include <assert.h> 14 13 #include <sched.h> ··· 18 19 #include <linux/kernel.h> 19 20 20 21 #define KVM_UTIL_MIN_PFN 2 22 + 23 + uint32_t guest_random_seed; 24 + struct guest_random_state guest_rng; 21 25 22 26 static int vcpu_mmap_sz(void); 23 27 ··· 433 431 */ 434 432 slot0 = memslot2region(vm, 0); 435 433 ucall_init(vm, slot0->region.guest_phys_addr + slot0->region.memory_size); 434 + 435 + pr_info("Random seed: 0x%x\n", guest_random_seed); 436 + guest_rng = new_guest_random_state(guest_random_seed); 437 + sync_global_to_guest(vm, guest_rng); 436 438 437 439 kvm_arch_vm_post_create(vm); 438 440 ··· 2318 2312 { 2319 2313 /* Tell stdout not to buffer its content. */ 2320 2314 setbuf(stdout, NULL); 2315 + 2316 + guest_random_seed = random(); 2321 2317 2322 2318 kvm_selftest_arch_init(); 2323 2319 }
+3 -10
tools/testing/selftests/kvm/lib/memstress.c
··· 2 2 /* 3 3 * Copyright (C) 2020, Google LLC. 4 4 */ 5 - #define _GNU_SOURCE 6 - 7 5 #include <inttypes.h> 8 6 #include <linux/bitmap.h> 9 7 10 8 #include "kvm_util.h" 11 9 #include "memstress.h" 12 10 #include "processor.h" 11 + #include "ucall_common.h" 13 12 14 13 struct memstress_args memstress_args; 15 14 ··· 55 56 uint64_t page; 56 57 int i; 57 58 58 - rand_state = new_guest_random_state(args->random_seed + vcpu_idx); 59 + rand_state = new_guest_random_state(guest_random_seed + vcpu_idx); 59 60 60 61 gva = vcpu_args->gva; 61 62 pages = vcpu_args->pages; ··· 75 76 76 77 addr = gva + (page * args->guest_page_size); 77 78 78 - if (guest_random_u32(&rand_state) % 100 < args->write_percent) 79 + if (__guest_random_bool(&rand_state, args->write_percent)) 79 80 *(uint64_t *)addr = 0x0123456789ABCDEF; 80 81 else 81 82 READ_ONCE(*(uint64_t *)addr); ··· 240 241 { 241 242 memstress_args.write_percent = write_percent; 242 243 sync_global_to_guest(vm, memstress_args.write_percent); 243 - } 244 - 245 - void memstress_set_random_seed(struct kvm_vm *vm, uint32_t random_seed) 246 - { 247 - memstress_args.random_seed = random_seed; 248 - sync_global_to_guest(vm, memstress_args.random_seed); 249 244 } 250 245 251 246 void memstress_set_random_access(struct kvm_vm *vm, bool random_access)
+1
tools/testing/selftests/kvm/lib/riscv/processor.c
··· 10 10 11 11 #include "kvm_util.h" 12 12 #include "processor.h" 13 + #include "ucall_common.h" 13 14 14 15 #define DEFAULT_RISCV_GUEST_STACK_VADDR_MIN 0xac0000 15 16
-2
tools/testing/selftests/kvm/lib/test_util.c
··· 4 4 * 5 5 * Copyright (C) 2020, Google LLC. 6 6 */ 7 - 8 - #define _GNU_SOURCE 9 7 #include <stdio.h> 10 8 #include <stdarg.h> 11 9 #include <assert.h>
+4 -1
tools/testing/selftests/kvm/lib/ucall_common.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 - #include "kvm_util.h" 3 2 #include "linux/types.h" 4 3 #include "linux/bitmap.h" 5 4 #include "linux/atomic.h" 5 + 6 + #include "kvm_util.h" 7 + #include "ucall_common.h" 8 + 6 9 7 10 #define GUEST_UCALL_FAILED -1 8 11
-3
tools/testing/selftests/kvm/lib/userfaultfd_util.c
··· 6 6 * Copyright (C) 2018, Red Hat, Inc. 7 7 * Copyright (C) 2019-2022 Google LLC 8 8 */ 9 - 10 - #define _GNU_SOURCE /* for pipe2 */ 11 - 12 9 #include <inttypes.h> 13 10 #include <stdio.h> 14 11 #include <stdlib.h>
+127 -177
tools/testing/selftests/kvm/lib/x86_64/processor.c
··· 15 15 #define NUM_INTERRUPTS 256 16 16 #endif 17 17 18 - #define DEFAULT_CODE_SELECTOR 0x8 19 - #define DEFAULT_DATA_SELECTOR 0x10 18 + #define KERNEL_CS 0x8 19 + #define KERNEL_DS 0x10 20 + #define KERNEL_TSS 0x18 20 21 21 22 #define MAX_NR_CPUID_ENTRIES 100 22 23 23 24 vm_vaddr_t exception_handlers; 24 25 bool host_cpu_is_amd; 25 26 bool host_cpu_is_intel; 27 + bool is_forced_emulation_enabled; 26 28 27 29 static void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent) 28 30 { ··· 419 417 420 418 static void kvm_seg_fill_gdt_64bit(struct kvm_vm *vm, struct kvm_segment *segp) 421 419 { 422 - void *gdt = addr_gva2hva(vm, vm->gdt); 420 + void *gdt = addr_gva2hva(vm, vm->arch.gdt); 423 421 struct desc64 *desc = gdt + (segp->selector >> 3) * 8; 424 422 425 423 desc->limit0 = segp->limit & 0xFFFF; ··· 439 437 desc->base3 = segp->base >> 32; 440 438 } 441 439 442 - 443 - /* 444 - * Set Long Mode Flat Kernel Code Segment 445 - * 446 - * Input Args: 447 - * vm - VM whose GDT is being filled, or NULL to only write segp 448 - * selector - selector value 449 - * 450 - * Output Args: 451 - * segp - Pointer to KVM segment 452 - * 453 - * Return: None 454 - * 455 - * Sets up the KVM segment pointed to by @segp, to be a code segment 456 - * with the selector value given by @selector. 457 - */ 458 - static void kvm_seg_set_kernel_code_64bit(struct kvm_vm *vm, uint16_t selector, 459 - struct kvm_segment *segp) 440 + static void kvm_seg_set_kernel_code_64bit(struct kvm_segment *segp) 460 441 { 461 442 memset(segp, 0, sizeof(*segp)); 462 - segp->selector = selector; 443 + segp->selector = KERNEL_CS; 463 444 segp->limit = 0xFFFFFFFFu; 464 445 segp->s = 0x1; /* kTypeCodeData */ 465 446 segp->type = 0x08 | 0x01 | 0x02; /* kFlagCode | kFlagCodeAccessed ··· 451 466 segp->g = true; 452 467 segp->l = true; 453 468 segp->present = 1; 454 - if (vm) 455 - kvm_seg_fill_gdt_64bit(vm, segp); 456 469 } 457 470 458 - /* 459 - * Set Long Mode Flat Kernel Data Segment 460 - * 461 - * Input Args: 462 - * vm - VM whose GDT is being filled, or NULL to only write segp 463 - * selector - selector value 464 - * 465 - * Output Args: 466 - * segp - Pointer to KVM segment 467 - * 468 - * Return: None 469 - * 470 - * Sets up the KVM segment pointed to by @segp, to be a data segment 471 - * with the selector value given by @selector. 472 - */ 473 - static void kvm_seg_set_kernel_data_64bit(struct kvm_vm *vm, uint16_t selector, 474 - struct kvm_segment *segp) 471 + static void kvm_seg_set_kernel_data_64bit(struct kvm_segment *segp) 475 472 { 476 473 memset(segp, 0, sizeof(*segp)); 477 - segp->selector = selector; 474 + segp->selector = KERNEL_DS; 478 475 segp->limit = 0xFFFFFFFFu; 479 476 segp->s = 0x1; /* kTypeCodeData */ 480 477 segp->type = 0x00 | 0x01 | 0x02; /* kFlagData | kFlagDataAccessed ··· 464 497 */ 465 498 segp->g = true; 466 499 segp->present = true; 467 - if (vm) 468 - kvm_seg_fill_gdt_64bit(vm, segp); 469 500 } 470 501 471 502 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) ··· 481 516 return vm_untag_gpa(vm, PTE_GET_PA(*pte)) | (gva & ~HUGEPAGE_MASK(level)); 482 517 } 483 518 484 - static void kvm_setup_gdt(struct kvm_vm *vm, struct kvm_dtable *dt) 519 + static void kvm_seg_set_tss_64bit(vm_vaddr_t base, struct kvm_segment *segp) 485 520 { 486 - if (!vm->gdt) 487 - vm->gdt = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA); 488 - 489 - dt->base = vm->gdt; 490 - dt->limit = getpagesize(); 491 - } 492 - 493 - static void kvm_setup_tss_64bit(struct kvm_vm *vm, struct kvm_segment *segp, 494 - int selector) 495 - { 496 - if (!vm->tss) 497 - vm->tss = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA); 498 - 499 521 memset(segp, 0, sizeof(*segp)); 500 - segp->base = vm->tss; 522 + segp->base = base; 501 523 segp->limit = 0x67; 502 - segp->selector = selector; 524 + segp->selector = KERNEL_TSS; 503 525 segp->type = 0xb; 504 526 segp->present = 1; 505 - kvm_seg_fill_gdt_64bit(vm, segp); 506 527 } 507 528 508 - static void vcpu_setup(struct kvm_vm *vm, struct kvm_vcpu *vcpu) 529 + static void vcpu_init_sregs(struct kvm_vm *vm, struct kvm_vcpu *vcpu) 509 530 { 510 531 struct kvm_sregs sregs; 532 + 533 + TEST_ASSERT_EQ(vm->mode, VM_MODE_PXXV48_4K); 511 534 512 535 /* Set mode specific system register values. */ 513 536 vcpu_sregs_get(vcpu, &sregs); 514 537 515 - sregs.idt.limit = 0; 538 + sregs.idt.base = vm->arch.idt; 539 + sregs.idt.limit = NUM_INTERRUPTS * sizeof(struct idt_entry) - 1; 540 + sregs.gdt.base = vm->arch.gdt; 541 + sregs.gdt.limit = getpagesize() - 1; 516 542 517 - kvm_setup_gdt(vm, &sregs.gdt); 543 + sregs.cr0 = X86_CR0_PE | X86_CR0_NE | X86_CR0_PG; 544 + sregs.cr4 |= X86_CR4_PAE | X86_CR4_OSFXSR; 545 + sregs.efer |= (EFER_LME | EFER_LMA | EFER_NX); 518 546 519 - switch (vm->mode) { 520 - case VM_MODE_PXXV48_4K: 521 - sregs.cr0 = X86_CR0_PE | X86_CR0_NE | X86_CR0_PG; 522 - sregs.cr4 |= X86_CR4_PAE | X86_CR4_OSFXSR; 523 - sregs.efer |= (EFER_LME | EFER_LMA | EFER_NX); 524 - 525 - kvm_seg_set_unusable(&sregs.ldt); 526 - kvm_seg_set_kernel_code_64bit(vm, DEFAULT_CODE_SELECTOR, &sregs.cs); 527 - kvm_seg_set_kernel_data_64bit(vm, DEFAULT_DATA_SELECTOR, &sregs.ds); 528 - kvm_seg_set_kernel_data_64bit(vm, DEFAULT_DATA_SELECTOR, &sregs.es); 529 - kvm_setup_tss_64bit(vm, &sregs.tr, 0x18); 530 - break; 531 - 532 - default: 533 - TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode); 534 - } 547 + kvm_seg_set_unusable(&sregs.ldt); 548 + kvm_seg_set_kernel_code_64bit(&sregs.cs); 549 + kvm_seg_set_kernel_data_64bit(&sregs.ds); 550 + kvm_seg_set_kernel_data_64bit(&sregs.es); 551 + kvm_seg_set_kernel_data_64bit(&sregs.gs); 552 + kvm_seg_set_tss_64bit(vm->arch.tss, &sregs.tr); 535 553 536 554 sregs.cr3 = vm->pgd; 537 555 vcpu_sregs_set(vcpu, &sregs); 538 556 } 539 557 558 + static void set_idt_entry(struct kvm_vm *vm, int vector, unsigned long addr, 559 + int dpl, unsigned short selector) 560 + { 561 + struct idt_entry *base = 562 + (struct idt_entry *)addr_gva2hva(vm, vm->arch.idt); 563 + struct idt_entry *e = &base[vector]; 564 + 565 + memset(e, 0, sizeof(*e)); 566 + e->offset0 = addr; 567 + e->selector = selector; 568 + e->ist = 0; 569 + e->type = 14; 570 + e->dpl = dpl; 571 + e->p = 1; 572 + e->offset1 = addr >> 16; 573 + e->offset2 = addr >> 32; 574 + } 575 + 576 + static bool kvm_fixup_exception(struct ex_regs *regs) 577 + { 578 + if (regs->r9 != KVM_EXCEPTION_MAGIC || regs->rip != regs->r10) 579 + return false; 580 + 581 + if (regs->vector == DE_VECTOR) 582 + return false; 583 + 584 + regs->rip = regs->r11; 585 + regs->r9 = regs->vector; 586 + regs->r10 = regs->error_code; 587 + return true; 588 + } 589 + 590 + void route_exception(struct ex_regs *regs) 591 + { 592 + typedef void(*handler)(struct ex_regs *); 593 + handler *handlers = (handler *)exception_handlers; 594 + 595 + if (handlers && handlers[regs->vector]) { 596 + handlers[regs->vector](regs); 597 + return; 598 + } 599 + 600 + if (kvm_fixup_exception(regs)) 601 + return; 602 + 603 + ucall_assert(UCALL_UNHANDLED, 604 + "Unhandled exception in guest", __FILE__, __LINE__, 605 + "Unhandled exception '0x%lx' at guest RIP '0x%lx'", 606 + regs->vector, regs->rip); 607 + } 608 + 609 + static void vm_init_descriptor_tables(struct kvm_vm *vm) 610 + { 611 + extern void *idt_handlers; 612 + struct kvm_segment seg; 613 + int i; 614 + 615 + vm->arch.gdt = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA); 616 + vm->arch.idt = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA); 617 + vm->handlers = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA); 618 + vm->arch.tss = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA); 619 + 620 + /* Handlers have the same address in both address spaces.*/ 621 + for (i = 0; i < NUM_INTERRUPTS; i++) 622 + set_idt_entry(vm, i, (unsigned long)(&idt_handlers)[i], 0, KERNEL_CS); 623 + 624 + *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers; 625 + 626 + kvm_seg_set_kernel_code_64bit(&seg); 627 + kvm_seg_fill_gdt_64bit(vm, &seg); 628 + 629 + kvm_seg_set_kernel_data_64bit(&seg); 630 + kvm_seg_fill_gdt_64bit(vm, &seg); 631 + 632 + kvm_seg_set_tss_64bit(vm->arch.tss, &seg); 633 + kvm_seg_fill_gdt_64bit(vm, &seg); 634 + } 635 + 636 + void vm_install_exception_handler(struct kvm_vm *vm, int vector, 637 + void (*handler)(struct ex_regs *)) 638 + { 639 + vm_vaddr_t *handlers = (vm_vaddr_t *)addr_gva2hva(vm, vm->handlers); 640 + 641 + handlers[vector] = (vm_vaddr_t)handler; 642 + } 643 + 644 + void assert_on_unhandled_exception(struct kvm_vcpu *vcpu) 645 + { 646 + struct ucall uc; 647 + 648 + if (get_ucall(vcpu, &uc) == UCALL_UNHANDLED) 649 + REPORT_GUEST_ASSERT(uc); 650 + } 651 + 540 652 void kvm_arch_vm_post_create(struct kvm_vm *vm) 541 653 { 542 654 vm_create_irqchip(vm); 655 + vm_init_descriptor_tables(vm); 656 + 543 657 sync_global_to_guest(vm, host_cpu_is_intel); 544 658 sync_global_to_guest(vm, host_cpu_is_amd); 659 + sync_global_to_guest(vm, is_forced_emulation_enabled); 545 660 546 661 if (vm->type == KVM_X86_SEV_VM || vm->type == KVM_X86_SEV_ES_VM) { 547 662 struct kvm_sev_init init = { 0 }; ··· 667 622 668 623 vcpu = __vm_vcpu_add(vm, vcpu_id); 669 624 vcpu_init_cpuid(vcpu, kvm_get_supported_cpuid()); 670 - vcpu_setup(vm, vcpu); 625 + vcpu_init_sregs(vm, vcpu); 671 626 672 627 /* Setup guest general purpose registers */ 673 628 vcpu_regs_get(vcpu, &regs); ··· 1136 1091 } 1137 1092 } 1138 1093 1139 - static void set_idt_entry(struct kvm_vm *vm, int vector, unsigned long addr, 1140 - int dpl, unsigned short selector) 1141 - { 1142 - struct idt_entry *base = 1143 - (struct idt_entry *)addr_gva2hva(vm, vm->idt); 1144 - struct idt_entry *e = &base[vector]; 1145 - 1146 - memset(e, 0, sizeof(*e)); 1147 - e->offset0 = addr; 1148 - e->selector = selector; 1149 - e->ist = 0; 1150 - e->type = 14; 1151 - e->dpl = dpl; 1152 - e->p = 1; 1153 - e->offset1 = addr >> 16; 1154 - e->offset2 = addr >> 32; 1155 - } 1156 - 1157 - 1158 - static bool kvm_fixup_exception(struct ex_regs *regs) 1159 - { 1160 - if (regs->r9 != KVM_EXCEPTION_MAGIC || regs->rip != regs->r10) 1161 - return false; 1162 - 1163 - if (regs->vector == DE_VECTOR) 1164 - return false; 1165 - 1166 - regs->rip = regs->r11; 1167 - regs->r9 = regs->vector; 1168 - regs->r10 = regs->error_code; 1169 - return true; 1170 - } 1171 - 1172 - void route_exception(struct ex_regs *regs) 1173 - { 1174 - typedef void(*handler)(struct ex_regs *); 1175 - handler *handlers = (handler *)exception_handlers; 1176 - 1177 - if (handlers && handlers[regs->vector]) { 1178 - handlers[regs->vector](regs); 1179 - return; 1180 - } 1181 - 1182 - if (kvm_fixup_exception(regs)) 1183 - return; 1184 - 1185 - ucall_assert(UCALL_UNHANDLED, 1186 - "Unhandled exception in guest", __FILE__, __LINE__, 1187 - "Unhandled exception '0x%lx' at guest RIP '0x%lx'", 1188 - regs->vector, regs->rip); 1189 - } 1190 - 1191 - void vm_init_descriptor_tables(struct kvm_vm *vm) 1192 - { 1193 - extern void *idt_handlers; 1194 - int i; 1195 - 1196 - vm->idt = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA); 1197 - vm->handlers = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA); 1198 - /* Handlers have the same address in both address spaces.*/ 1199 - for (i = 0; i < NUM_INTERRUPTS; i++) 1200 - set_idt_entry(vm, i, (unsigned long)(&idt_handlers)[i], 0, 1201 - DEFAULT_CODE_SELECTOR); 1202 - } 1203 - 1204 - void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu) 1205 - { 1206 - struct kvm_vm *vm = vcpu->vm; 1207 - struct kvm_sregs sregs; 1208 - 1209 - vcpu_sregs_get(vcpu, &sregs); 1210 - sregs.idt.base = vm->idt; 1211 - sregs.idt.limit = NUM_INTERRUPTS * sizeof(struct idt_entry) - 1; 1212 - sregs.gdt.base = vm->gdt; 1213 - sregs.gdt.limit = getpagesize() - 1; 1214 - kvm_seg_set_kernel_data_64bit(NULL, DEFAULT_DATA_SELECTOR, &sregs.gs); 1215 - vcpu_sregs_set(vcpu, &sregs); 1216 - *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers; 1217 - } 1218 - 1219 - void vm_install_exception_handler(struct kvm_vm *vm, int vector, 1220 - void (*handler)(struct ex_regs *)) 1221 - { 1222 - vm_vaddr_t *handlers = (vm_vaddr_t *)addr_gva2hva(vm, vm->handlers); 1223 - 1224 - handlers[vector] = (vm_vaddr_t)handler; 1225 - } 1226 - 1227 - void assert_on_unhandled_exception(struct kvm_vcpu *vcpu) 1228 - { 1229 - struct ucall uc; 1230 - 1231 - if (get_ucall(vcpu, &uc) == UCALL_UNHANDLED) 1232 - REPORT_GUEST_ASSERT(uc); 1233 - } 1234 - 1235 1094 const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid, 1236 1095 uint32_t function, uint32_t index) 1237 1096 { ··· 1297 1348 { 1298 1349 host_cpu_is_intel = this_cpu_is_intel(); 1299 1350 host_cpu_is_amd = this_cpu_is_amd(); 1351 + is_forced_emulation_enabled = kvm_is_forced_emulation_enabled(); 1300 1352 } 1301 1353 1302 1354 bool sys_clocksource_is_based_on_tsc(void)
-1
tools/testing/selftests/kvm/lib/x86_64/sev.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 - #define _GNU_SOURCE /* for program_invocation_short_name */ 3 2 #include <stdint.h> 4 3 #include <stdbool.h> 5 4
-2
tools/testing/selftests/kvm/max_guest_memory_test.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 - #define _GNU_SOURCE 3 - 4 2 #include <stdio.h> 5 3 #include <stdlib.h> 6 4 #include <pthread.h>
-3
tools/testing/selftests/kvm/memslot_modification_stress_test.c
··· 6 6 * Copyright (C) 2018, Red Hat, Inc. 7 7 * Copyright (C) 2020, Google, Inc. 8 8 */ 9 - 10 - #define _GNU_SOURCE /* for program_invocation_name */ 11 - 12 9 #include <stdio.h> 13 10 #include <stdlib.h> 14 11 #include <sys/syscall.h>
+1 -3
tools/testing/selftests/kvm/riscv/arch_timer.c
··· 7 7 * 8 8 * Copyright (c) 2024, Intel Corporation. 9 9 */ 10 - 11 - #define _GNU_SOURCE 12 - 13 10 #include "arch_timer.h" 14 11 #include "kvm_util.h" 15 12 #include "processor.h" 16 13 #include "timer_test.h" 14 + #include "ucall_common.h" 17 15 18 16 static int timer_irq = IRQ_S_TIMER; 19 17
+10 -3
tools/testing/selftests/kvm/rseq_test.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 - #define _GNU_SOURCE /* for program_invocation_short_name */ 2 + 3 + /* 4 + * Include rseq.c without _GNU_SOURCE defined, before including any headers, so 5 + * that rseq.c is compiled with its configuration, not KVM selftests' config. 6 + */ 7 + #undef _GNU_SOURCE 8 + #include "../rseq/rseq.c" 9 + #define _GNU_SOURCE 10 + 3 11 #include <errno.h> 4 12 #include <fcntl.h> 5 13 #include <pthread.h> ··· 27 19 #include "kvm_util.h" 28 20 #include "processor.h" 29 21 #include "test_util.h" 30 - 31 - #include "../rseq/rseq.c" 22 + #include "ucall_common.h" 32 23 33 24 /* 34 25 * Any bug related to task migration is likely to be timing-dependent; perform
+1 -2
tools/testing/selftests/kvm/s390x/cmma_test.c
··· 7 7 * Authors: 8 8 * Nico Boehr <nrb@linux.ibm.com> 9 9 */ 10 - 11 - #define _GNU_SOURCE /* for program_invocation_short_name */ 12 10 #include <fcntl.h> 13 11 #include <stdio.h> 14 12 #include <stdlib.h> ··· 16 18 #include "test_util.h" 17 19 #include "kvm_util.h" 18 20 #include "kselftest.h" 21 + #include "ucall_common.h" 19 22 20 23 #define MAIN_PAGE_COUNT 512 21 24
+1
tools/testing/selftests/kvm/s390x/memop.c
··· 15 15 #include "test_util.h" 16 16 #include "kvm_util.h" 17 17 #include "kselftest.h" 18 + #include "ucall_common.h" 18 19 19 20 enum mop_target { 20 21 LOGICAL,
-2
tools/testing/selftests/kvm/s390x/sync_regs_test.c
··· 10 10 * 11 11 * Test expected behavior of the KVM_CAP_SYNC_REGS functionality. 12 12 */ 13 - 14 - #define _GNU_SOURCE /* for program_invocation_short_name */ 15 13 #include <fcntl.h> 16 14 #include <stdio.h> 17 15 #include <stdlib.h>
+1
tools/testing/selftests/kvm/s390x/tprot.c
··· 8 8 #include "test_util.h" 9 9 #include "kvm_util.h" 10 10 #include "kselftest.h" 11 + #include "ucall_common.h" 11 12 12 13 #define PAGE_SHIFT 12 13 14 #define PAGE_SIZE (1 << PAGE_SHIFT)
+12 -1
tools/testing/selftests/kvm/set_memory_region_test.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 - #define _GNU_SOURCE /* for program_invocation_short_name */ 3 2 #include <fcntl.h> 4 3 #include <pthread.h> 5 4 #include <sched.h> ··· 220 221 221 222 static void guest_code_delete_memory_region(void) 222 223 { 224 + struct desc_ptr idt; 223 225 uint64_t val; 226 + 227 + /* 228 + * Clobber the IDT so that a #PF due to the memory region being deleted 229 + * escalates to triple-fault shutdown. Because the memory region is 230 + * deleted, there will be no valid mappings. As a result, KVM will 231 + * repeatedly intercepts the state-2 page fault that occurs when trying 232 + * to vector the guest's #PF. I.e. trying to actually handle the #PF 233 + * in the guest will never succeed, and so isn't an option. 234 + */ 235 + memset(&idt, 0, sizeof(idt)); 236 + __asm__ __volatile__("lidt %0" :: "m"(idt)); 224 237 225 238 GUEST_SYNC(0); 226 239
+1 -1
tools/testing/selftests/kvm/steal_time.c
··· 4 4 * 5 5 * Copyright (C) 2020, Red Hat, Inc. 6 6 */ 7 - #define _GNU_SOURCE 8 7 #include <stdio.h> 9 8 #include <time.h> 10 9 #include <sched.h> ··· 19 20 #include "test_util.h" 20 21 #include "kvm_util.h" 21 22 #include "processor.h" 23 + #include "ucall_common.h" 22 24 23 25 #define NR_VCPUS 4 24 26 #define ST_GPA_BASE (1 << 30)
-4
tools/testing/selftests/kvm/x86_64/amx_test.c
··· 6 6 * 7 7 * Tests for amx #NM exception and save/restore. 8 8 */ 9 - 10 - #define _GNU_SOURCE /* for program_invocation_short_name */ 11 9 #include <fcntl.h> 12 10 #include <stdio.h> 13 11 #include <stdlib.h> ··· 244 246 vcpu_regs_get(vcpu, &regs1); 245 247 246 248 /* Register #NM handler */ 247 - vm_init_descriptor_tables(vm); 248 - vcpu_init_descriptor_tables(vcpu); 249 249 vm_install_exception_handler(vm, NM_VECTOR, guest_nm_handler); 250 250 251 251 /* amx cfg for guest_code */
+1
tools/testing/selftests/kvm/x86_64/dirty_log_page_splitting_test.c
··· 17 17 #include "test_util.h" 18 18 #include "memstress.h" 19 19 #include "guest_modes.h" 20 + #include "ucall_common.h" 20 21 21 22 #define VCPUS 2 22 23 #define SLOTS 2
+1 -4
tools/testing/selftests/kvm/x86_64/exit_on_emulation_failure_test.c
··· 4 4 * 5 5 * Test for KVM_CAP_EXIT_ON_EMULATION_FAILURE. 6 6 */ 7 - 8 - #define _GNU_SOURCE /* for program_invocation_short_name */ 9 - 10 7 #include "flds_emulation.h" 11 - 12 8 #include "test_util.h" 9 + #include "ucall_common.h" 13 10 14 11 #define MMIO_GPA 0x700000000 15 12 #define MMIO_GVA MMIO_GPA
-2
tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c
··· 110 110 { 111 111 struct kvm_vm *vm = vcpu->vm; 112 112 113 - vm_init_descriptor_tables(vm); 114 - vcpu_init_descriptor_tables(vcpu); 115 113 vm_install_exception_handler(vcpu->vm, UD_VECTOR, guest_ud_handler); 116 114 117 115 if (disable_quirk)
-2
tools/testing/selftests/kvm/x86_64/hwcr_msr_test.c
··· 2 2 /* 3 3 * Copyright (C) 2023, Google LLC. 4 4 */ 5 - 6 - #define _GNU_SOURCE /* for program_invocation_short_name */ 7 5 #include <sys/ioctl.h> 8 6 9 7 #include "test_util.h"
-2
tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
··· 7 7 * This work is licensed under the terms of the GNU GPL, version 2. 8 8 * 9 9 */ 10 - 11 - #define _GNU_SOURCE /* for program_invocation_short_name */ 12 10 #include <fcntl.h> 13 11 #include <stdio.h> 14 12 #include <stdlib.h>
-3
tools/testing/selftests/kvm/x86_64/hyperv_evmcs.c
··· 4 4 * 5 5 * Tests for Enlightened VMCS, including nested guest state. 6 6 */ 7 - #define _GNU_SOURCE /* for program_invocation_short_name */ 8 7 #include <fcntl.h> 9 8 #include <stdio.h> 10 9 #include <stdlib.h> ··· 257 258 vcpu_args_set(vcpu, 3, vmx_pages_gva, hv_pages_gva, addr_gva2gpa(vm, hcall_page)); 258 259 vcpu_set_msr(vcpu, HV_X64_MSR_VP_INDEX, vcpu->id); 259 260 260 - vm_init_descriptor_tables(vm); 261 - vcpu_init_descriptor_tables(vcpu); 262 261 vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler); 263 262 vm_install_exception_handler(vm, NMI_VECTOR, guest_nmi_handler); 264 263
-6
tools/testing/selftests/kvm/x86_64/hyperv_features.c
··· 156 156 vcpu_init_cpuid(vcpu, prev_cpuid); 157 157 } 158 158 159 - vm_init_descriptor_tables(vm); 160 - vcpu_init_descriptor_tables(vcpu); 161 - 162 159 /* TODO: Make this entire test easier to maintain. */ 163 160 if (stage >= 21) 164 161 vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_SYNIC2, 0); ··· 528 531 529 532 while (true) { 530 533 vm = vm_create_with_one_vcpu(&vcpu, guest_hcall); 531 - 532 - vm_init_descriptor_tables(vm); 533 - vcpu_init_descriptor_tables(vcpu); 534 534 535 535 /* Hypercall input/output */ 536 536 hcall_page = vm_vaddr_alloc_pages(vm, 2);
-5
tools/testing/selftests/kvm/x86_64/hyperv_ipi.c
··· 5 5 * Copyright (C) 2022, Red Hat, Inc. 6 6 * 7 7 */ 8 - 9 - #define _GNU_SOURCE /* for program_invocation_short_name */ 10 8 #include <pthread.h> 11 9 #include <inttypes.h> 12 10 ··· 254 256 hcall_page = vm_vaddr_alloc_pages(vm, 2); 255 257 memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize()); 256 258 257 - vm_init_descriptor_tables(vm); 258 259 259 260 vcpu[1] = vm_vcpu_add(vm, RECEIVER_VCPU_ID_1, receiver_code); 260 - vcpu_init_descriptor_tables(vcpu[1]); 261 261 vcpu_args_set(vcpu[1], 2, hcall_page, addr_gva2gpa(vm, hcall_page)); 262 262 vcpu_set_msr(vcpu[1], HV_X64_MSR_VP_INDEX, RECEIVER_VCPU_ID_1); 263 263 vcpu_set_hv_cpuid(vcpu[1]); 264 264 265 265 vcpu[2] = vm_vcpu_add(vm, RECEIVER_VCPU_ID_2, receiver_code); 266 - vcpu_init_descriptor_tables(vcpu[2]); 267 266 vcpu_args_set(vcpu[2], 2, hcall_page, addr_gva2gpa(vm, hcall_page)); 268 267 vcpu_set_msr(vcpu[2], HV_X64_MSR_VP_INDEX, RECEIVER_VCPU_ID_2); 269 268 vcpu_set_hv_cpuid(vcpu[2]);
-1
tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c
··· 4 4 * 5 5 * Tests for Hyper-V extensions to SVM. 6 6 */ 7 - #define _GNU_SOURCE /* for program_invocation_short_name */ 8 7 #include <fcntl.h> 9 8 #include <stdio.h> 10 9 #include <stdlib.h>
-2
tools/testing/selftests/kvm/x86_64/hyperv_tlb_flush.c
··· 5 5 * Copyright (C) 2022, Red Hat, Inc. 6 6 * 7 7 */ 8 - 9 - #define _GNU_SOURCE /* for program_invocation_short_name */ 10 8 #include <asm/barrier.h> 11 9 #include <pthread.h> 12 10 #include <inttypes.h>
-3
tools/testing/selftests/kvm/x86_64/kvm_pv_test.c
··· 183 183 184 184 vcpu_clear_cpuid_entry(vcpu, KVM_CPUID_FEATURES); 185 185 186 - vm_init_descriptor_tables(vm); 187 - vcpu_init_descriptor_tables(vcpu); 188 - 189 186 enter_guest(vcpu); 190 187 kvm_vm_free(vm); 191 188
-3
tools/testing/selftests/kvm/x86_64/monitor_mwait_test.c
··· 81 81 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 82 82 vcpu_clear_cpuid_feature(vcpu, X86_FEATURE_MWAIT); 83 83 84 - vm_init_descriptor_tables(vm); 85 - vcpu_init_descriptor_tables(vcpu); 86 - 87 84 while (1) { 88 85 vcpu_run(vcpu); 89 86 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
-2
tools/testing/selftests/kvm/x86_64/nested_exceptions_test.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 - #define _GNU_SOURCE /* for program_invocation_short_name */ 3 - 4 2 #include "test_util.h" 5 3 #include "kvm_util.h" 6 4 #include "processor.h"
-3
tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
··· 5 5 * 6 6 * Copyright (C) 2022, Google LLC. 7 7 */ 8 - 9 - #define _GNU_SOURCE 10 - 11 8 #include <fcntl.h> 12 9 #include <stdint.h> 13 10 #include <time.h>
+30 -31
tools/testing/selftests/kvm/x86_64/platform_info_test.c
··· 9 9 * Verifies expected behavior of controlling guest access to 10 10 * MSR_PLATFORM_INFO. 11 11 */ 12 - 13 - #define _GNU_SOURCE /* for program_invocation_short_name */ 14 12 #include <fcntl.h> 15 13 #include <stdio.h> 16 14 #include <stdlib.h> ··· 24 26 static void guest_code(void) 25 27 { 26 28 uint64_t msr_platform_info; 29 + uint8_t vector; 27 30 28 - for (;;) { 29 - msr_platform_info = rdmsr(MSR_PLATFORM_INFO); 30 - GUEST_SYNC(msr_platform_info); 31 - asm volatile ("inc %r11"); 32 - } 33 - } 31 + GUEST_SYNC(true); 32 + msr_platform_info = rdmsr(MSR_PLATFORM_INFO); 33 + GUEST_ASSERT_EQ(msr_platform_info & MSR_PLATFORM_INFO_MAX_TURBO_RATIO, 34 + MSR_PLATFORM_INFO_MAX_TURBO_RATIO); 34 35 35 - static void test_msr_platform_info_enabled(struct kvm_vcpu *vcpu) 36 - { 37 - struct ucall uc; 36 + GUEST_SYNC(false); 37 + vector = rdmsr_safe(MSR_PLATFORM_INFO, &msr_platform_info); 38 + GUEST_ASSERT_EQ(vector, GP_VECTOR); 38 39 39 - vm_enable_cap(vcpu->vm, KVM_CAP_MSR_PLATFORM_INFO, true); 40 - vcpu_run(vcpu); 41 - TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); 42 - 43 - get_ucall(vcpu, &uc); 44 - TEST_ASSERT(uc.cmd == UCALL_SYNC, 45 - "Received ucall other than UCALL_SYNC: %lu", uc.cmd); 46 - TEST_ASSERT((uc.args[1] & MSR_PLATFORM_INFO_MAX_TURBO_RATIO) == 47 - MSR_PLATFORM_INFO_MAX_TURBO_RATIO, 48 - "Expected MSR_PLATFORM_INFO to have max turbo ratio mask: %i.", 49 - MSR_PLATFORM_INFO_MAX_TURBO_RATIO); 50 - } 51 - 52 - static void test_msr_platform_info_disabled(struct kvm_vcpu *vcpu) 53 - { 54 - vm_enable_cap(vcpu->vm, KVM_CAP_MSR_PLATFORM_INFO, false); 55 - vcpu_run(vcpu); 56 - TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_SHUTDOWN); 40 + GUEST_DONE(); 57 41 } 58 42 59 43 int main(int argc, char *argv[]) ··· 43 63 struct kvm_vcpu *vcpu; 44 64 struct kvm_vm *vm; 45 65 uint64_t msr_platform_info; 66 + struct ucall uc; 46 67 47 68 TEST_REQUIRE(kvm_has_cap(KVM_CAP_MSR_PLATFORM_INFO)); 48 69 ··· 52 71 msr_platform_info = vcpu_get_msr(vcpu, MSR_PLATFORM_INFO); 53 72 vcpu_set_msr(vcpu, MSR_PLATFORM_INFO, 54 73 msr_platform_info | MSR_PLATFORM_INFO_MAX_TURBO_RATIO); 55 - test_msr_platform_info_enabled(vcpu); 56 - test_msr_platform_info_disabled(vcpu); 74 + 75 + for (;;) { 76 + vcpu_run(vcpu); 77 + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); 78 + 79 + switch (get_ucall(vcpu, &uc)) { 80 + case UCALL_SYNC: 81 + vm_enable_cap(vm, KVM_CAP_MSR_PLATFORM_INFO, uc.args[1]); 82 + break; 83 + case UCALL_DONE: 84 + goto done; 85 + case UCALL_ABORT: 86 + REPORT_GUEST_ASSERT(uc); 87 + default: 88 + TEST_FAIL("Unexpected ucall %lu", uc.cmd); 89 + break; 90 + } 91 + } 92 + 93 + done: 57 94 vcpu_set_msr(vcpu, MSR_PLATFORM_INFO, msr_platform_info); 58 95 59 96 kvm_vm_free(vm);
-8
tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
··· 2 2 /* 3 3 * Copyright (C) 2023, Tencent, Inc. 4 4 */ 5 - 6 - #define _GNU_SOURCE /* for program_invocation_short_name */ 7 5 #include <x86intrin.h> 8 6 9 7 #include "pmu.h" ··· 19 21 20 22 static uint8_t kvm_pmu_version; 21 23 static bool kvm_has_perf_caps; 22 - static bool is_forced_emulation_enabled; 23 24 24 25 static struct kvm_vm *pmu_vm_create_with_one_vcpu(struct kvm_vcpu **vcpu, 25 26 void *guest_code, ··· 28 31 struct kvm_vm *vm; 29 32 30 33 vm = vm_create_with_one_vcpu(vcpu, guest_code); 31 - vm_init_descriptor_tables(vm); 32 - vcpu_init_descriptor_tables(*vcpu); 33 - 34 34 sync_global_to_guest(vm, kvm_pmu_version); 35 - sync_global_to_guest(vm, is_forced_emulation_enabled); 36 35 37 36 /* 38 37 * Set PERF_CAPABILITIES before PMU version as KVM disallows enabling ··· 623 630 624 631 kvm_pmu_version = kvm_cpu_property(X86_PROPERTY_PMU_VERSION); 625 632 kvm_has_perf_caps = kvm_cpu_has(X86_FEATURE_PDCM); 626 - is_forced_emulation_enabled = kvm_is_forced_emulation_enabled(); 627 633 628 634 test_intel_counters(); 629 635
-9
tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
··· 9 9 * Verifies the expected behavior of allow lists and deny lists for 10 10 * virtual PMU events. 11 11 */ 12 - 13 - #define _GNU_SOURCE /* for program_invocation_short_name */ 14 - 15 12 #include "kvm_util.h" 16 13 #include "pmu.h" 17 14 #include "processor.h" ··· 334 337 vm_enable_cap(vm, KVM_CAP_PMU_CAPABILITY, KVM_PMU_CAP_DISABLE); 335 338 336 339 vcpu = vm_vcpu_add(vm, 0, guest_code); 337 - vm_init_descriptor_tables(vm); 338 - vcpu_init_descriptor_tables(vcpu); 339 - 340 340 TEST_ASSERT(!sanity_check_pmu(vcpu), 341 341 "Guest should not be able to use disabled PMU."); 342 342 ··· 869 875 guest_code = use_intel_pmu() ? intel_guest_code : amd_guest_code; 870 876 871 877 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 872 - 873 - vm_init_descriptor_tables(vm); 874 - vcpu_init_descriptor_tables(vcpu); 875 878 876 879 TEST_REQUIRE(sanity_check_pmu(vcpu)); 877 880
-1
tools/testing/selftests/kvm/x86_64/private_mem_conversions_test.c
··· 2 2 /* 3 3 * Copyright (C) 2022, Google LLC. 4 4 */ 5 - #define _GNU_SOURCE /* for program_invocation_short_name */ 6 5 #include <fcntl.h> 7 6 #include <limits.h> 8 7 #include <pthread.h>
-1
tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c
··· 4 4 * 5 5 * Copyright (C) 2020, Red Hat, Inc. 6 6 */ 7 - #define _GNU_SOURCE /* for program_invocation_name */ 8 7 #include <fcntl.h> 9 8 #include <stdio.h> 10 9 #include <stdlib.h>
-1
tools/testing/selftests/kvm/x86_64/set_sregs_test.c
··· 10 10 * That bug allowed a user-mode program that called the KVM_SET_SREGS 11 11 * ioctl to put a VCPU's local APIC into an invalid state. 12 12 */ 13 - #define _GNU_SOURCE /* for program_invocation_short_name */ 14 13 #include <fcntl.h> 15 14 #include <stdio.h> 16 15 #include <stdlib.h>
-6
tools/testing/selftests/kvm/x86_64/smaller_maxphyaddr_emulation_test.c
··· 5 5 * Test that KVM emulates instructions in response to EPT violations when 6 6 * allow_smaller_maxphyaddr is enabled and guest.MAXPHYADDR < host.MAXPHYADDR. 7 7 */ 8 - 9 - #define _GNU_SOURCE /* for program_invocation_short_name */ 10 - 11 8 #include "flds_emulation.h" 12 9 13 10 #include "test_util.h" ··· 56 59 57 60 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 58 61 vcpu_args_set(vcpu, 1, kvm_is_tdp_enabled()); 59 - 60 - vm_init_descriptor_tables(vm); 61 - vcpu_init_descriptor_tables(vcpu); 62 62 63 63 vcpu_set_cpuid_property(vcpu, X86_PROPERTY_MAX_PHY_ADDR, MAXPHYADDR); 64 64
-1
tools/testing/selftests/kvm/x86_64/smm_test.c
··· 4 4 * 5 5 * Tests for SMM. 6 6 */ 7 - #define _GNU_SOURCE /* for program_invocation_short_name */ 8 7 #include <fcntl.h> 9 8 #include <stdio.h> 10 9 #include <stdlib.h>
-1
tools/testing/selftests/kvm/x86_64/state_test.c
··· 6 6 * 7 7 * Tests for vCPU state save/restore, including nested guest state. 8 8 */ 9 - #define _GNU_SOURCE /* for program_invocation_short_name */ 10 9 #include <fcntl.h> 11 10 #include <stdio.h> 12 11 #include <stdlib.h>
-3
tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c
··· 93 93 94 94 vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code); 95 95 96 - vm_init_descriptor_tables(vm); 97 - vcpu_init_descriptor_tables(vcpu); 98 - 99 96 vm_install_exception_handler(vm, VINTR_IRQ_NUMBER, vintr_irq_handler); 100 97 vm_install_exception_handler(vm, INTR_IRQ_NUMBER, intr_irq_handler); 101 98
+1 -4
tools/testing/selftests/kvm/x86_64/svm_nested_shutdown_test.c
··· 48 48 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM)); 49 49 50 50 vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code); 51 - vm_init_descriptor_tables(vm); 52 - vcpu_init_descriptor_tables(vcpu); 53 - 54 51 vcpu_alloc_svm(vm, &svm_gva); 55 52 56 - vcpu_args_set(vcpu, 2, svm_gva, vm->idt); 53 + vcpu_args_set(vcpu, 2, svm_gva, vm->arch.idt); 57 54 58 55 vcpu_run(vcpu); 59 56 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_SHUTDOWN);
+1 -4
tools/testing/selftests/kvm/x86_64/svm_nested_soft_inject_test.c
··· 152 152 153 153 vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code); 154 154 155 - vm_init_descriptor_tables(vm); 156 - vcpu_init_descriptor_tables(vcpu); 157 - 158 155 vm_install_exception_handler(vm, NMI_VECTOR, guest_nmi_handler); 159 156 vm_install_exception_handler(vm, BP_VECTOR, guest_bp_handler); 160 157 vm_install_exception_handler(vm, INT_NR, guest_int_handler); ··· 163 166 164 167 idt_alt_vm = vm_vaddr_alloc_page(vm); 165 168 idt_alt = addr_gva2hva(vm, idt_alt_vm); 166 - idt = addr_gva2hva(vm, vm->idt); 169 + idt = addr_gva2hva(vm, vm->arch.idt); 167 170 memcpy(idt_alt, idt, getpagesize()); 168 171 } else { 169 172 idt_alt_vm = 0;
-2
tools/testing/selftests/kvm/x86_64/sync_regs_test.c
··· 8 8 * including requesting an invalid register set, updates to/from values 9 9 * in kvm_run.s.regs when kvm_valid_regs and kvm_dirty_regs are toggled. 10 10 */ 11 - 12 - #define _GNU_SOURCE /* for program_invocation_short_name */ 13 11 #include <fcntl.h> 14 12 #include <stdio.h> 15 13 #include <stdlib.h>
-7
tools/testing/selftests/kvm/x86_64/ucna_injection_test.c
··· 17 17 * delivered into the guest or not. 18 18 * 19 19 */ 20 - 21 - #define _GNU_SOURCE /* for program_invocation_short_name */ 22 20 #include <pthread.h> 23 21 #include <inttypes.h> 24 22 #include <string.h> 25 23 #include <time.h> 26 24 27 - #include "kvm_util_base.h" 28 25 #include "kvm_util.h" 29 26 #include "mce.h" 30 27 #include "processor.h" ··· 282 285 cmcidis_vcpu = create_vcpu_with_mce_cap(vm, 1, false, cmci_disabled_guest_code); 283 286 cmci_vcpu = create_vcpu_with_mce_cap(vm, 2, true, cmci_enabled_guest_code); 284 287 285 - vm_init_descriptor_tables(vm); 286 - vcpu_init_descriptor_tables(ucna_vcpu); 287 - vcpu_init_descriptor_tables(cmcidis_vcpu); 288 - vcpu_init_descriptor_tables(cmci_vcpu); 289 288 vm_install_exception_handler(vm, CMCI_VECTOR, guest_cmci_handler); 290 289 vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler); 291 290
+2 -13
tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c
··· 4 4 * 5 5 * Tests for exiting into userspace on registered MSRs 6 6 */ 7 - 8 - #define _GNU_SOURCE /* for program_invocation_short_name */ 9 7 #include <sys/ioctl.h> 10 8 11 9 #include "kvm_test_harness.h" 12 10 #include "test_util.h" 13 11 #include "kvm_util.h" 14 12 #include "vmx.h" 15 - 16 - static bool fep_available; 17 13 18 14 #define MSR_NON_EXISTENT 0x474f4f00 19 15 ··· 254 258 GUEST_ASSERT(data == 2); 255 259 GUEST_ASSERT(guest_exception_count == 0); 256 260 257 - if (fep_available) { 261 + if (is_forced_emulation_enabled) { 258 262 /* Let userspace know we aren't done. */ 259 263 GUEST_SYNC(0); 260 264 ··· 516 520 uint64_t cmd; 517 521 int rc; 518 522 519 - sync_global_to_guest(vm, fep_available); 520 - 521 523 rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR); 522 524 TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available"); 523 525 vm_enable_cap(vm, KVM_CAP_X86_USER_SPACE_MSR, KVM_MSR_EXIT_REASON_FILTER); ··· 524 530 TEST_ASSERT(rc, "KVM_CAP_X86_MSR_FILTER is available"); 525 531 526 532 vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_allow); 527 - 528 - vm_init_descriptor_tables(vm); 529 - vcpu_init_descriptor_tables(vcpu); 530 533 531 534 vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler); 532 535 ··· 542 551 vcpu_run(vcpu); 543 552 cmd = process_ucall(vcpu); 544 553 545 - if (fep_available) { 554 + if (is_forced_emulation_enabled) { 546 555 TEST_ASSERT_EQ(cmd, UCALL_SYNC); 547 556 vm_install_exception_handler(vm, GP_VECTOR, guest_fep_gp_handler); 548 557 ··· 765 774 766 775 int main(int argc, char *argv[]) 767 776 { 768 - fep_available = kvm_is_forced_emulation_enabled(); 769 - 770 777 return test_harness_run(argc, argv); 771 778 }
-3
tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
··· 4 4 * 5 5 * Copyright (C) 2018, Red Hat, Inc. 6 6 */ 7 - 8 - #define _GNU_SOURCE /* for program_invocation_name */ 9 - 10 7 #include <stdio.h> 11 8 #include <stdlib.h> 12 9 #include <linux/bitmap.h>
-3
tools/testing/selftests/kvm/x86_64/vmx_exception_with_invalid_guest_state.c
··· 115 115 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 116 116 get_set_sigalrm_vcpu(vcpu); 117 117 118 - vm_init_descriptor_tables(vm); 119 - vcpu_init_descriptor_tables(vcpu); 120 - 121 118 vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler); 122 119 123 120 /*
-4
tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c
··· 10 10 * and check it can be retrieved with KVM_GET_MSR, also test 11 11 * the invalid LBR formats are rejected. 12 12 */ 13 - #define _GNU_SOURCE /* for program_invocation_short_name */ 14 13 #include <sys/ioctl.h> 15 14 16 15 #include <linux/bitmap.h> ··· 84 85 { 85 86 struct ucall uc; 86 87 int r, i; 87 - 88 - vm_init_descriptor_tables(vcpu->vm); 89 - vcpu_init_descriptor_tables(vcpu); 90 88 91 89 vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities); 92 90
-1
tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c
··· 9 9 * value instead of partially decayed timer value 10 10 * 11 11 */ 12 - #define _GNU_SOURCE /* for program_invocation_short_name */ 13 12 #include <fcntl.h> 14 13 #include <stdio.h> 15 14 #include <stdlib.h>
-4
tools/testing/selftests/kvm/x86_64/xapic_ipi_test.c
··· 19 19 * Migration is a command line option. When used on non-numa machines will 20 20 * exit with error. Test is still usefull on non-numa for testing IPIs. 21 21 */ 22 - 23 - #define _GNU_SOURCE /* for program_invocation_short_name */ 24 22 #include <getopt.h> 25 23 #include <pthread.h> 26 24 #include <inttypes.h> ··· 408 410 409 411 vm = vm_create_with_one_vcpu(&params[0].vcpu, halter_guest_code); 410 412 411 - vm_init_descriptor_tables(vm); 412 - vcpu_init_descriptor_tables(params[0].vcpu); 413 413 vm_install_exception_handler(vm, IPI_VECTOR, guest_ipi_handler); 414 414 415 415 virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA);
-1
tools/testing/selftests/kvm/x86_64/xapic_state_test.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 - #define _GNU_SOURCE /* for program_invocation_short_name */ 3 2 #include <fcntl.h> 4 3 #include <stdio.h> 5 4 #include <stdlib.h>
-3
tools/testing/selftests/kvm/x86_64/xcr0_cpuid_test.c
··· 109 109 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 110 110 run = vcpu->run; 111 111 112 - vm_init_descriptor_tables(vm); 113 - vcpu_init_descriptor_tables(vcpu); 114 - 115 112 while (1) { 116 113 vcpu_run(vcpu); 117 114
+3 -4
tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
··· 171 171 static void evtchn_handler(struct ex_regs *regs) 172 172 { 173 173 struct vcpu_info *vi = (void *)VCPU_INFO_VADDR; 174 - vi->evtchn_upcall_pending = 0; 175 - vi->evtchn_pending_sel = 0; 174 + 175 + vcpu_arch_put_guest(vi->evtchn_upcall_pending, 0); 176 + vcpu_arch_put_guest(vi->evtchn_pending_sel, 0); 176 177 guest_saw_irq = true; 177 178 178 179 GUEST_SYNC(TEST_GUEST_SAW_IRQ); ··· 537 536 }; 538 537 vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &vec); 539 538 540 - vm_init_descriptor_tables(vm); 541 - vcpu_init_descriptor_tables(vcpu); 542 539 vm_install_exception_handler(vm, EVTCHN_VECTOR, evtchn_handler); 543 540 544 541 if (do_runstate_tests) {
-2
tools/testing/selftests/kvm/x86_64/xss_msr_test.c
··· 4 4 * 5 5 * Tests for the IA32_XSS MSR. 6 6 */ 7 - 8 - #define _GNU_SOURCE /* for program_invocation_short_name */ 9 7 #include <sys/ioctl.h> 10 8 11 9 #include "test_util.h"