Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2018, Google LLC.
4 */
5#ifndef SELFTEST_KVM_UTIL_H
6#define SELFTEST_KVM_UTIL_H
7
8#include "test_util.h"
9
10#include <linux/compiler.h>
11#include "linux/hashtable.h"
12#include "linux/list.h"
13#include <linux/kernel.h>
14#include <linux/kvm.h>
15#include "linux/rbtree.h"
16#include <linux/types.h>
17
18#include <asm/atomic.h>
19#include <asm/kvm.h>
20
21#include <sys/eventfd.h>
22#include <sys/ioctl.h>
23
24#include <pthread.h>
25
26#include "kvm_util_arch.h"
27#include "kvm_util_types.h"
28#include "sparsebit.h"
29
30#define KVM_DEV_PATH "/dev/kvm"
31#define KVM_MAX_VCPUS 512
32
33#define NSEC_PER_SEC 1000000000L
34
35struct userspace_mem_region {
36 struct kvm_userspace_memory_region2 region;
37 struct sparsebit *unused_phy_pages;
38 struct sparsebit *protected_phy_pages;
39 int fd;
40 off_t offset;
41 enum vm_mem_backing_src_type backing_src_type;
42 void *host_mem;
43 void *host_alias;
44 void *mmap_start;
45 void *mmap_alias;
46 size_t mmap_size;
47 struct rb_node gpa_node;
48 struct rb_node hva_node;
49 struct hlist_node slot_node;
50};
51
52struct kvm_binary_stats {
53 int fd;
54 struct kvm_stats_header header;
55 struct kvm_stats_desc *desc;
56};
57
58struct kvm_vcpu {
59 struct list_head list;
60 uint32_t id;
61 int fd;
62 struct kvm_vm *vm;
63 struct kvm_run *run;
64#ifdef __x86_64__
65 struct kvm_cpuid2 *cpuid;
66#endif
67 struct kvm_binary_stats stats;
68 struct kvm_dirty_gfn *dirty_gfns;
69 uint32_t fetch_index;
70 uint32_t dirty_gfns_count;
71};
72
73struct userspace_mem_regions {
74 struct rb_root gpa_tree;
75 struct rb_root hva_tree;
76 DECLARE_HASHTABLE(slot_hash, 9);
77};
78
79enum kvm_mem_region_type {
80 MEM_REGION_CODE,
81 MEM_REGION_DATA,
82 MEM_REGION_PT,
83 MEM_REGION_TEST_DATA,
84 NR_MEM_REGIONS,
85};
86
87struct kvm_vm {
88 int mode;
89 unsigned long type;
90 int kvm_fd;
91 int fd;
92 unsigned int pgtable_levels;
93 unsigned int page_size;
94 unsigned int page_shift;
95 unsigned int pa_bits;
96 unsigned int va_bits;
97 uint64_t max_gfn;
98 struct list_head vcpus;
99 struct userspace_mem_regions regions;
100 struct sparsebit *vpages_valid;
101 struct sparsebit *vpages_mapped;
102 bool has_irqchip;
103 bool pgd_created;
104 vm_paddr_t ucall_mmio_addr;
105 vm_paddr_t pgd;
106 vm_vaddr_t handlers;
107 uint32_t dirty_ring_size;
108 uint64_t gpa_tag_mask;
109
110 struct kvm_vm_arch arch;
111
112 struct kvm_binary_stats stats;
113
114 /*
115 * KVM region slots. These are the default memslots used by page
116 * allocators, e.g., lib/elf uses the memslots[MEM_REGION_CODE]
117 * memslot.
118 */
119 uint32_t memslots[NR_MEM_REGIONS];
120};
121
122struct vcpu_reg_sublist {
123 const char *name;
124 long capability;
125 int feature;
126 int feature_type;
127 bool finalize;
128 __u64 *regs;
129 __u64 regs_n;
130 __u64 *rejects_set;
131 __u64 rejects_set_n;
132 __u64 *skips_set;
133 __u64 skips_set_n;
134};
135
136struct vcpu_reg_list {
137 char *name;
138 struct vcpu_reg_sublist sublists[];
139};
140
141#define for_each_sublist(c, s) \
142 for ((s) = &(c)->sublists[0]; (s)->regs; ++(s))
143
144#define kvm_for_each_vcpu(vm, i, vcpu) \
145 for ((i) = 0; (i) <= (vm)->last_vcpu_id; (i)++) \
146 if (!((vcpu) = vm->vcpus[i])) \
147 continue; \
148 else
149
150struct userspace_mem_region *
151memslot2region(struct kvm_vm *vm, uint32_t memslot);
152
153static inline struct userspace_mem_region *vm_get_mem_region(struct kvm_vm *vm,
154 enum kvm_mem_region_type type)
155{
156 assert(type < NR_MEM_REGIONS);
157 return memslot2region(vm, vm->memslots[type]);
158}
159
160/* Minimum allocated guest virtual and physical addresses */
161#define KVM_UTIL_MIN_VADDR 0x2000
162#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
163
164#define DEFAULT_GUEST_STACK_VADDR_MIN 0xab6000
165#define DEFAULT_STACK_PGS 5
166
167enum vm_guest_mode {
168 VM_MODE_P52V48_4K,
169 VM_MODE_P52V48_16K,
170 VM_MODE_P52V48_64K,
171 VM_MODE_P48V48_4K,
172 VM_MODE_P48V48_16K,
173 VM_MODE_P48V48_64K,
174 VM_MODE_P40V48_4K,
175 VM_MODE_P40V48_16K,
176 VM_MODE_P40V48_64K,
177 VM_MODE_PXXV48_4K, /* For 48bits VA but ANY bits PA */
178 VM_MODE_P47V64_4K,
179 VM_MODE_P44V64_4K,
180 VM_MODE_P36V48_4K,
181 VM_MODE_P36V48_16K,
182 VM_MODE_P36V48_64K,
183 VM_MODE_P47V47_16K,
184 VM_MODE_P36V47_16K,
185 NUM_VM_MODES,
186};
187
188struct vm_shape {
189 uint32_t type;
190 uint8_t mode;
191 uint8_t pad0;
192 uint16_t pad1;
193};
194
195kvm_static_assert(sizeof(struct vm_shape) == sizeof(uint64_t));
196
197#define VM_TYPE_DEFAULT 0
198
199#define VM_SHAPE(__mode) \
200({ \
201 struct vm_shape shape = { \
202 .mode = (__mode), \
203 .type = VM_TYPE_DEFAULT \
204 }; \
205 \
206 shape; \
207})
208
209#if defined(__aarch64__)
210
211extern enum vm_guest_mode vm_mode_default;
212
213#define VM_MODE_DEFAULT vm_mode_default
214#define MIN_PAGE_SHIFT 12U
215#define ptes_per_page(page_size) ((page_size) / 8)
216
217#elif defined(__x86_64__)
218
219#define VM_MODE_DEFAULT VM_MODE_PXXV48_4K
220#define MIN_PAGE_SHIFT 12U
221#define ptes_per_page(page_size) ((page_size) / 8)
222
223#elif defined(__s390x__)
224
225#define VM_MODE_DEFAULT VM_MODE_P44V64_4K
226#define MIN_PAGE_SHIFT 12U
227#define ptes_per_page(page_size) ((page_size) / 16)
228
229#elif defined(__riscv)
230
231#if __riscv_xlen == 32
232#error "RISC-V 32-bit kvm selftests not supported"
233#endif
234
235#define VM_MODE_DEFAULT VM_MODE_P40V48_4K
236#define MIN_PAGE_SHIFT 12U
237#define ptes_per_page(page_size) ((page_size) / 8)
238
239#elif defined(__loongarch__)
240#define VM_MODE_DEFAULT VM_MODE_P47V47_16K
241#define MIN_PAGE_SHIFT 12U
242#define ptes_per_page(page_size) ((page_size) / 8)
243
244#endif
245
246#define VM_SHAPE_DEFAULT VM_SHAPE(VM_MODE_DEFAULT)
247
248#define MIN_PAGE_SIZE (1U << MIN_PAGE_SHIFT)
249#define PTES_PER_MIN_PAGE ptes_per_page(MIN_PAGE_SIZE)
250
251struct vm_guest_mode_params {
252 unsigned int pa_bits;
253 unsigned int va_bits;
254 unsigned int page_size;
255 unsigned int page_shift;
256};
257extern const struct vm_guest_mode_params vm_guest_mode_params[];
258
259int __open_path_or_exit(const char *path, int flags, const char *enoent_help);
260int open_path_or_exit(const char *path, int flags);
261int open_kvm_dev_path_or_exit(void);
262
263bool get_kvm_param_bool(const char *param);
264bool get_kvm_intel_param_bool(const char *param);
265bool get_kvm_amd_param_bool(const char *param);
266
267int get_kvm_param_integer(const char *param);
268int get_kvm_intel_param_integer(const char *param);
269int get_kvm_amd_param_integer(const char *param);
270
271unsigned int kvm_check_cap(long cap);
272
273static inline bool kvm_has_cap(long cap)
274{
275 return kvm_check_cap(cap);
276}
277
278#define __KVM_SYSCALL_ERROR(_name, _ret) \
279 "%s failed, rc: %i errno: %i (%s)", (_name), (_ret), errno, strerror(errno)
280
281/*
282 * Use the "inner", double-underscore macro when reporting errors from within
283 * other macros so that the name of ioctl() and not its literal numeric value
284 * is printed on error. The "outer" macro is strongly preferred when reporting
285 * errors "directly", i.e. without an additional layer of macros, as it reduces
286 * the probability of passing in the wrong string.
287 */
288#define __KVM_IOCTL_ERROR(_name, _ret) __KVM_SYSCALL_ERROR(_name, _ret)
289#define KVM_IOCTL_ERROR(_ioctl, _ret) __KVM_IOCTL_ERROR(#_ioctl, _ret)
290
291#define kvm_do_ioctl(fd, cmd, arg) \
292({ \
293 kvm_static_assert(!_IOC_SIZE(cmd) || sizeof(*arg) == _IOC_SIZE(cmd)); \
294 ioctl(fd, cmd, arg); \
295})
296
297#define __kvm_ioctl(kvm_fd, cmd, arg) \
298 kvm_do_ioctl(kvm_fd, cmd, arg)
299
300#define kvm_ioctl(kvm_fd, cmd, arg) \
301({ \
302 int ret = __kvm_ioctl(kvm_fd, cmd, arg); \
303 \
304 TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(#cmd, ret)); \
305})
306
307static __always_inline void static_assert_is_vm(struct kvm_vm *vm) { }
308
309#define __vm_ioctl(vm, cmd, arg) \
310({ \
311 static_assert_is_vm(vm); \
312 kvm_do_ioctl((vm)->fd, cmd, arg); \
313})
314
315/*
316 * Assert that a VM or vCPU ioctl() succeeded, with extra magic to detect if
317 * the ioctl() failed because KVM killed/bugged the VM. To detect a dead VM,
318 * probe KVM_CAP_USER_MEMORY, which (a) has been supported by KVM since before
319 * selftests existed and (b) should never outright fail, i.e. is supposed to
320 * return 0 or 1. If KVM kills a VM, KVM returns -EIO for all ioctl()s for the
321 * VM and its vCPUs, including KVM_CHECK_EXTENSION.
322 */
323#define __TEST_ASSERT_VM_VCPU_IOCTL(cond, name, ret, vm) \
324do { \
325 int __errno = errno; \
326 \
327 static_assert_is_vm(vm); \
328 \
329 if (cond) \
330 break; \
331 \
332 if (errno == EIO && \
333 __vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)KVM_CAP_USER_MEMORY) < 0) { \
334 TEST_ASSERT(errno == EIO, "KVM killed the VM, should return -EIO"); \
335 TEST_FAIL("KVM killed/bugged the VM, check the kernel log for clues"); \
336 } \
337 errno = __errno; \
338 TEST_ASSERT(cond, __KVM_IOCTL_ERROR(name, ret)); \
339} while (0)
340
341#define TEST_ASSERT_VM_VCPU_IOCTL(cond, cmd, ret, vm) \
342 __TEST_ASSERT_VM_VCPU_IOCTL(cond, #cmd, ret, vm)
343
344#define vm_ioctl(vm, cmd, arg) \
345({ \
346 int ret = __vm_ioctl(vm, cmd, arg); \
347 \
348 __TEST_ASSERT_VM_VCPU_IOCTL(!ret, #cmd, ret, vm); \
349})
350
351static __always_inline void static_assert_is_vcpu(struct kvm_vcpu *vcpu) { }
352
353#define __vcpu_ioctl(vcpu, cmd, arg) \
354({ \
355 static_assert_is_vcpu(vcpu); \
356 kvm_do_ioctl((vcpu)->fd, cmd, arg); \
357})
358
359#define vcpu_ioctl(vcpu, cmd, arg) \
360({ \
361 int ret = __vcpu_ioctl(vcpu, cmd, arg); \
362 \
363 __TEST_ASSERT_VM_VCPU_IOCTL(!ret, #cmd, ret, (vcpu)->vm); \
364})
365
366/*
367 * Looks up and returns the value corresponding to the capability
368 * (KVM_CAP_*) given by cap.
369 */
370static inline int vm_check_cap(struct kvm_vm *vm, long cap)
371{
372 int ret = __vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)cap);
373
374 TEST_ASSERT_VM_VCPU_IOCTL(ret >= 0, KVM_CHECK_EXTENSION, ret, vm);
375 return ret;
376}
377
378static inline int __vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0)
379{
380 struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
381
382 return __vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap);
383}
384static inline void vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0)
385{
386 struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
387
388 vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap);
389}
390
391static inline void vm_set_memory_attributes(struct kvm_vm *vm, uint64_t gpa,
392 uint64_t size, uint64_t attributes)
393{
394 struct kvm_memory_attributes attr = {
395 .attributes = attributes,
396 .address = gpa,
397 .size = size,
398 .flags = 0,
399 };
400
401 /*
402 * KVM_SET_MEMORY_ATTRIBUTES overwrites _all_ attributes. These flows
403 * need significant enhancements to support multiple attributes.
404 */
405 TEST_ASSERT(!attributes || attributes == KVM_MEMORY_ATTRIBUTE_PRIVATE,
406 "Update me to support multiple attributes!");
407
408 vm_ioctl(vm, KVM_SET_MEMORY_ATTRIBUTES, &attr);
409}
410
411
412static inline void vm_mem_set_private(struct kvm_vm *vm, uint64_t gpa,
413 uint64_t size)
414{
415 vm_set_memory_attributes(vm, gpa, size, KVM_MEMORY_ATTRIBUTE_PRIVATE);
416}
417
418static inline void vm_mem_set_shared(struct kvm_vm *vm, uint64_t gpa,
419 uint64_t size)
420{
421 vm_set_memory_attributes(vm, gpa, size, 0);
422}
423
424void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t gpa, uint64_t size,
425 bool punch_hole);
426
427static inline void vm_guest_mem_punch_hole(struct kvm_vm *vm, uint64_t gpa,
428 uint64_t size)
429{
430 vm_guest_mem_fallocate(vm, gpa, size, true);
431}
432
433static inline void vm_guest_mem_allocate(struct kvm_vm *vm, uint64_t gpa,
434 uint64_t size)
435{
436 vm_guest_mem_fallocate(vm, gpa, size, false);
437}
438
439void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size);
440const char *vm_guest_mode_string(uint32_t i);
441
442void kvm_vm_free(struct kvm_vm *vmp);
443void kvm_vm_restart(struct kvm_vm *vmp);
444void kvm_vm_release(struct kvm_vm *vmp);
445void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename);
446int kvm_memfd_alloc(size_t size, bool hugepages);
447
448void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
449
450static inline void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log)
451{
452 struct kvm_dirty_log args = { .dirty_bitmap = log, .slot = slot };
453
454 vm_ioctl(vm, KVM_GET_DIRTY_LOG, &args);
455}
456
457static inline void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log,
458 uint64_t first_page, uint32_t num_pages)
459{
460 struct kvm_clear_dirty_log args = {
461 .dirty_bitmap = log,
462 .slot = slot,
463 .first_page = first_page,
464 .num_pages = num_pages
465 };
466
467 vm_ioctl(vm, KVM_CLEAR_DIRTY_LOG, &args);
468}
469
470static inline uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm)
471{
472 return __vm_ioctl(vm, KVM_RESET_DIRTY_RINGS, NULL);
473}
474
475static inline void kvm_vm_register_coalesced_io(struct kvm_vm *vm,
476 uint64_t address,
477 uint64_t size, bool pio)
478{
479 struct kvm_coalesced_mmio_zone zone = {
480 .addr = address,
481 .size = size,
482 .pio = pio,
483 };
484
485 vm_ioctl(vm, KVM_REGISTER_COALESCED_MMIO, &zone);
486}
487
488static inline void kvm_vm_unregister_coalesced_io(struct kvm_vm *vm,
489 uint64_t address,
490 uint64_t size, bool pio)
491{
492 struct kvm_coalesced_mmio_zone zone = {
493 .addr = address,
494 .size = size,
495 .pio = pio,
496 };
497
498 vm_ioctl(vm, KVM_UNREGISTER_COALESCED_MMIO, &zone);
499}
500
501static inline int vm_get_stats_fd(struct kvm_vm *vm)
502{
503 int fd = __vm_ioctl(vm, KVM_GET_STATS_FD, NULL);
504
505 TEST_ASSERT_VM_VCPU_IOCTL(fd >= 0, KVM_GET_STATS_FD, fd, vm);
506 return fd;
507}
508
509static inline int __kvm_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd,
510 uint32_t flags)
511{
512 struct kvm_irqfd irqfd = {
513 .fd = eventfd,
514 .gsi = gsi,
515 .flags = flags,
516 .resamplefd = -1,
517 };
518
519 return __vm_ioctl(vm, KVM_IRQFD, &irqfd);
520}
521
522static inline void kvm_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd,
523 uint32_t flags)
524{
525 int ret = __kvm_irqfd(vm, gsi, eventfd, flags);
526
527 TEST_ASSERT_VM_VCPU_IOCTL(!ret, KVM_IRQFD, ret, vm);
528}
529
530static inline void kvm_assign_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd)
531{
532 kvm_irqfd(vm, gsi, eventfd, 0);
533}
534
535static inline void kvm_deassign_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd)
536{
537 kvm_irqfd(vm, gsi, eventfd, KVM_IRQFD_FLAG_DEASSIGN);
538}
539
540static inline int kvm_new_eventfd(void)
541{
542 int fd = eventfd(0, 0);
543
544 TEST_ASSERT(fd >= 0, __KVM_SYSCALL_ERROR("eventfd()", fd));
545 return fd;
546}
547
548static inline void read_stats_header(int stats_fd, struct kvm_stats_header *header)
549{
550 ssize_t ret;
551
552 ret = pread(stats_fd, header, sizeof(*header), 0);
553 TEST_ASSERT(ret == sizeof(*header),
554 "Failed to read '%lu' header bytes, ret = '%ld'",
555 sizeof(*header), ret);
556}
557
558struct kvm_stats_desc *read_stats_descriptors(int stats_fd,
559 struct kvm_stats_header *header);
560
561static inline ssize_t get_stats_descriptor_size(struct kvm_stats_header *header)
562{
563 /*
564 * The base size of the descriptor is defined by KVM's ABI, but the
565 * size of the name field is variable, as far as KVM's ABI is
566 * concerned. For a given instance of KVM, the name field is the same
567 * size for all stats and is provided in the overall stats header.
568 */
569 return sizeof(struct kvm_stats_desc) + header->name_size;
570}
571
572static inline struct kvm_stats_desc *get_stats_descriptor(struct kvm_stats_desc *stats,
573 int index,
574 struct kvm_stats_header *header)
575{
576 /*
577 * Note, size_desc includes the size of the name field, which is
578 * variable. i.e. this is NOT equivalent to &stats_desc[i].
579 */
580 return (void *)stats + index * get_stats_descriptor_size(header);
581}
582
583void read_stat_data(int stats_fd, struct kvm_stats_header *header,
584 struct kvm_stats_desc *desc, uint64_t *data,
585 size_t max_elements);
586
587void kvm_get_stat(struct kvm_binary_stats *stats, const char *name,
588 uint64_t *data, size_t max_elements);
589
590#define __get_stat(stats, stat) \
591({ \
592 uint64_t data; \
593 \
594 kvm_get_stat(stats, #stat, &data, 1); \
595 data; \
596})
597
598#define vm_get_stat(vm, stat) __get_stat(&(vm)->stats, stat)
599#define vcpu_get_stat(vcpu, stat) __get_stat(&(vcpu)->stats, stat)
600
601static inline bool read_smt_control(char *buf, size_t buf_size)
602{
603 FILE *f = fopen("/sys/devices/system/cpu/smt/control", "r");
604 bool ret;
605
606 if (!f)
607 return false;
608
609 ret = fread(buf, sizeof(*buf), buf_size, f) > 0;
610 fclose(f);
611
612 return ret;
613}
614
615static inline bool is_smt_possible(void)
616{
617 char buf[16];
618
619 if (read_smt_control(buf, sizeof(buf)) &&
620 (!strncmp(buf, "forceoff", 8) || !strncmp(buf, "notsupported", 12)))
621 return false;
622
623 return true;
624}
625
626static inline bool is_smt_on(void)
627{
628 char buf[16];
629
630 if (read_smt_control(buf, sizeof(buf)) && !strncmp(buf, "on", 2))
631 return true;
632
633 return false;
634}
635
636void vm_create_irqchip(struct kvm_vm *vm);
637
638static inline int __vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size,
639 uint64_t flags)
640{
641 struct kvm_create_guest_memfd guest_memfd = {
642 .size = size,
643 .flags = flags,
644 };
645
646 return __vm_ioctl(vm, KVM_CREATE_GUEST_MEMFD, &guest_memfd);
647}
648
649static inline int vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size,
650 uint64_t flags)
651{
652 int fd = __vm_create_guest_memfd(vm, size, flags);
653
654 TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_GUEST_MEMFD, fd));
655 return fd;
656}
657
658void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
659 uint64_t gpa, uint64_t size, void *hva);
660int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
661 uint64_t gpa, uint64_t size, void *hva);
662void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
663 uint64_t gpa, uint64_t size, void *hva,
664 uint32_t guest_memfd, uint64_t guest_memfd_offset);
665int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
666 uint64_t gpa, uint64_t size, void *hva,
667 uint32_t guest_memfd, uint64_t guest_memfd_offset);
668
669void vm_userspace_mem_region_add(struct kvm_vm *vm,
670 enum vm_mem_backing_src_type src_type,
671 uint64_t guest_paddr, uint32_t slot, uint64_t npages,
672 uint32_t flags);
673void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
674 uint64_t guest_paddr, uint32_t slot, uint64_t npages,
675 uint32_t flags, int guest_memfd_fd, uint64_t guest_memfd_offset);
676
677#ifndef vm_arch_has_protected_memory
678static inline bool vm_arch_has_protected_memory(struct kvm_vm *vm)
679{
680 return false;
681}
682#endif
683
684void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
685void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
686void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
687struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
688void vm_populate_vaddr_bitmap(struct kvm_vm *vm);
689vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
690vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
691vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
692 enum kvm_mem_region_type type);
693vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz,
694 vm_vaddr_t vaddr_min,
695 enum kvm_mem_region_type type);
696vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
697vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm,
698 enum kvm_mem_region_type type);
699vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm);
700
701void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
702 unsigned int npages);
703void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa);
704void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva);
705vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
706void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa);
707
708#ifndef vcpu_arch_put_guest
709#define vcpu_arch_put_guest(mem, val) do { (mem) = (val); } while (0)
710#endif
711
712static inline vm_paddr_t vm_untag_gpa(struct kvm_vm *vm, vm_paddr_t gpa)
713{
714 return gpa & ~vm->gpa_tag_mask;
715}
716
717void vcpu_run(struct kvm_vcpu *vcpu);
718int _vcpu_run(struct kvm_vcpu *vcpu);
719
720static inline int __vcpu_run(struct kvm_vcpu *vcpu)
721{
722 return __vcpu_ioctl(vcpu, KVM_RUN, NULL);
723}
724
725void vcpu_run_complete_io(struct kvm_vcpu *vcpu);
726struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu);
727
728static inline void vcpu_enable_cap(struct kvm_vcpu *vcpu, uint32_t cap,
729 uint64_t arg0)
730{
731 struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
732
733 vcpu_ioctl(vcpu, KVM_ENABLE_CAP, &enable_cap);
734}
735
736static inline void vcpu_guest_debug_set(struct kvm_vcpu *vcpu,
737 struct kvm_guest_debug *debug)
738{
739 vcpu_ioctl(vcpu, KVM_SET_GUEST_DEBUG, debug);
740}
741
742static inline void vcpu_mp_state_get(struct kvm_vcpu *vcpu,
743 struct kvm_mp_state *mp_state)
744{
745 vcpu_ioctl(vcpu, KVM_GET_MP_STATE, mp_state);
746}
747static inline void vcpu_mp_state_set(struct kvm_vcpu *vcpu,
748 struct kvm_mp_state *mp_state)
749{
750 vcpu_ioctl(vcpu, KVM_SET_MP_STATE, mp_state);
751}
752
753static inline void vcpu_regs_get(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
754{
755 vcpu_ioctl(vcpu, KVM_GET_REGS, regs);
756}
757
758static inline void vcpu_regs_set(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
759{
760 vcpu_ioctl(vcpu, KVM_SET_REGS, regs);
761}
762static inline void vcpu_sregs_get(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
763{
764 vcpu_ioctl(vcpu, KVM_GET_SREGS, sregs);
765
766}
767static inline void vcpu_sregs_set(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
768{
769 vcpu_ioctl(vcpu, KVM_SET_SREGS, sregs);
770}
771static inline int _vcpu_sregs_set(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
772{
773 return __vcpu_ioctl(vcpu, KVM_SET_SREGS, sregs);
774}
775static inline void vcpu_fpu_get(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
776{
777 vcpu_ioctl(vcpu, KVM_GET_FPU, fpu);
778}
779static inline void vcpu_fpu_set(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
780{
781 vcpu_ioctl(vcpu, KVM_SET_FPU, fpu);
782}
783
784static inline int __vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id, void *addr)
785{
786 struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)addr };
787
788 return __vcpu_ioctl(vcpu, KVM_GET_ONE_REG, ®);
789}
790static inline int __vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
791{
792 struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val };
793
794 return __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, ®);
795}
796static inline uint64_t vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id)
797{
798 uint64_t val;
799 struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val };
800
801 TEST_ASSERT(KVM_REG_SIZE(id) <= sizeof(val), "Reg %lx too big", id);
802
803 vcpu_ioctl(vcpu, KVM_GET_ONE_REG, ®);
804 return val;
805}
806static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
807{
808 struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val };
809
810 TEST_ASSERT(KVM_REG_SIZE(id) <= sizeof(val), "Reg %lx too big", id);
811
812 vcpu_ioctl(vcpu, KVM_SET_ONE_REG, ®);
813}
814
815#ifdef __KVM_HAVE_VCPU_EVENTS
816static inline void vcpu_events_get(struct kvm_vcpu *vcpu,
817 struct kvm_vcpu_events *events)
818{
819 vcpu_ioctl(vcpu, KVM_GET_VCPU_EVENTS, events);
820}
821static inline void vcpu_events_set(struct kvm_vcpu *vcpu,
822 struct kvm_vcpu_events *events)
823{
824 vcpu_ioctl(vcpu, KVM_SET_VCPU_EVENTS, events);
825}
826#endif
827#ifdef __x86_64__
828static inline void vcpu_nested_state_get(struct kvm_vcpu *vcpu,
829 struct kvm_nested_state *state)
830{
831 vcpu_ioctl(vcpu, KVM_GET_NESTED_STATE, state);
832}
833static inline int __vcpu_nested_state_set(struct kvm_vcpu *vcpu,
834 struct kvm_nested_state *state)
835{
836 return __vcpu_ioctl(vcpu, KVM_SET_NESTED_STATE, state);
837}
838
839static inline void vcpu_nested_state_set(struct kvm_vcpu *vcpu,
840 struct kvm_nested_state *state)
841{
842 vcpu_ioctl(vcpu, KVM_SET_NESTED_STATE, state);
843}
844#endif
845static inline int vcpu_get_stats_fd(struct kvm_vcpu *vcpu)
846{
847 int fd = __vcpu_ioctl(vcpu, KVM_GET_STATS_FD, NULL);
848
849 TEST_ASSERT_VM_VCPU_IOCTL(fd >= 0, KVM_CHECK_EXTENSION, fd, vcpu->vm);
850 return fd;
851}
852
853int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr);
854
855static inline void kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr)
856{
857 int ret = __kvm_has_device_attr(dev_fd, group, attr);
858
859 TEST_ASSERT(!ret, "KVM_HAS_DEVICE_ATTR failed, rc: %i errno: %i", ret, errno);
860}
861
862int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val);
863
864static inline void kvm_device_attr_get(int dev_fd, uint32_t group,
865 uint64_t attr, void *val)
866{
867 int ret = __kvm_device_attr_get(dev_fd, group, attr, val);
868
869 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_GET_DEVICE_ATTR, ret));
870}
871
872int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val);
873
874static inline void kvm_device_attr_set(int dev_fd, uint32_t group,
875 uint64_t attr, void *val)
876{
877 int ret = __kvm_device_attr_set(dev_fd, group, attr, val);
878
879 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_DEVICE_ATTR, ret));
880}
881
882static inline int __vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group,
883 uint64_t attr)
884{
885 return __kvm_has_device_attr(vcpu->fd, group, attr);
886}
887
888static inline void vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group,
889 uint64_t attr)
890{
891 kvm_has_device_attr(vcpu->fd, group, attr);
892}
893
894static inline int __vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group,
895 uint64_t attr, void *val)
896{
897 return __kvm_device_attr_get(vcpu->fd, group, attr, val);
898}
899
900static inline void vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group,
901 uint64_t attr, void *val)
902{
903 kvm_device_attr_get(vcpu->fd, group, attr, val);
904}
905
906static inline int __vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group,
907 uint64_t attr, void *val)
908{
909 return __kvm_device_attr_set(vcpu->fd, group, attr, val);
910}
911
912static inline void vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group,
913 uint64_t attr, void *val)
914{
915 kvm_device_attr_set(vcpu->fd, group, attr, val);
916}
917
918int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type);
919int __kvm_create_device(struct kvm_vm *vm, uint64_t type);
920
921static inline int kvm_create_device(struct kvm_vm *vm, uint64_t type)
922{
923 int fd = __kvm_create_device(vm, type);
924
925 TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_DEVICE, fd));
926 return fd;
927}
928
929void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu);
930
931/*
932 * VM VCPU Args Set
933 *
934 * Input Args:
935 * vm - Virtual Machine
936 * num - number of arguments
937 * ... - arguments, each of type uint64_t
938 *
939 * Output Args: None
940 *
941 * Return: None
942 *
943 * Sets the first @num input parameters for the function at @vcpu's entry point,
944 * per the C calling convention of the architecture, to the values given as
945 * variable args. Each of the variable args is expected to be of type uint64_t.
946 * The maximum @num can be is specific to the architecture.
947 */
948void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...);
949
950void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
951int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
952
953#define KVM_MAX_IRQ_ROUTES 4096
954
955struct kvm_irq_routing *kvm_gsi_routing_create(void);
956void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing,
957 uint32_t gsi, uint32_t pin);
958int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
959void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
960
961const char *exit_reason_str(unsigned int exit_reason);
962
963vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
964 uint32_t memslot);
965vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
966 vm_paddr_t paddr_min, uint32_t memslot,
967 bool protected);
968vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm);
969
970static inline vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
971 vm_paddr_t paddr_min, uint32_t memslot)
972{
973 /*
974 * By default, allocate memory as protected for VMs that support
975 * protected memory, as the majority of memory for such VMs is
976 * protected, i.e. using shared memory is effectively opt-in.
977 */
978 return __vm_phy_pages_alloc(vm, num, paddr_min, memslot,
979 vm_arch_has_protected_memory(vm));
980}
981
982/*
983 * ____vm_create() does KVM_CREATE_VM and little else. __vm_create() also
984 * loads the test binary into guest memory and creates an IRQ chip (x86 only).
985 * __vm_create() does NOT create vCPUs, @nr_runnable_vcpus is used purely to
986 * calculate the amount of memory needed for per-vCPU data, e.g. stacks.
987 */
988struct kvm_vm *____vm_create(struct vm_shape shape);
989struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
990 uint64_t nr_extra_pages);
991
992static inline struct kvm_vm *vm_create_barebones(void)
993{
994 return ____vm_create(VM_SHAPE_DEFAULT);
995}
996
997static inline struct kvm_vm *vm_create_barebones_type(unsigned long type)
998{
999 const struct vm_shape shape = {
1000 .mode = VM_MODE_DEFAULT,
1001 .type = type,
1002 };
1003
1004 return ____vm_create(shape);
1005}
1006
1007static inline struct kvm_vm *vm_create(uint32_t nr_runnable_vcpus)
1008{
1009 return __vm_create(VM_SHAPE_DEFAULT, nr_runnable_vcpus, 0);
1010}
1011
1012struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus,
1013 uint64_t extra_mem_pages,
1014 void *guest_code, struct kvm_vcpu *vcpus[]);
1015
1016static inline struct kvm_vm *vm_create_with_vcpus(uint32_t nr_vcpus,
1017 void *guest_code,
1018 struct kvm_vcpu *vcpus[])
1019{
1020 return __vm_create_with_vcpus(VM_SHAPE_DEFAULT, nr_vcpus, 0,
1021 guest_code, vcpus);
1022}
1023
1024
1025struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape,
1026 struct kvm_vcpu **vcpu,
1027 uint64_t extra_mem_pages,
1028 void *guest_code);
1029
1030/*
1031 * Create a VM with a single vCPU with reasonable defaults and @extra_mem_pages
1032 * additional pages of guest memory. Returns the VM and vCPU (via out param).
1033 */
1034static inline struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
1035 uint64_t extra_mem_pages,
1036 void *guest_code)
1037{
1038 return __vm_create_shape_with_one_vcpu(VM_SHAPE_DEFAULT, vcpu,
1039 extra_mem_pages, guest_code);
1040}
1041
1042static inline struct kvm_vm *vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
1043 void *guest_code)
1044{
1045 return __vm_create_with_one_vcpu(vcpu, 0, guest_code);
1046}
1047
1048static inline struct kvm_vm *vm_create_shape_with_one_vcpu(struct vm_shape shape,
1049 struct kvm_vcpu **vcpu,
1050 void *guest_code)
1051{
1052 return __vm_create_shape_with_one_vcpu(shape, vcpu, 0, guest_code);
1053}
1054
1055struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm);
1056
1057void kvm_set_files_rlimit(uint32_t nr_vcpus);
1058
1059int __pin_task_to_cpu(pthread_t task, int cpu);
1060
1061static inline void pin_task_to_cpu(pthread_t task, int cpu)
1062{
1063 int r;
1064
1065 r = __pin_task_to_cpu(task, cpu);
1066 TEST_ASSERT(!r, "Failed to set thread affinity to pCPU '%u'", cpu);
1067}
1068
1069static inline int pin_task_to_any_cpu(pthread_t task)
1070{
1071 int cpu = sched_getcpu();
1072
1073 pin_task_to_cpu(task, cpu);
1074 return cpu;
1075}
1076
1077static inline void pin_self_to_cpu(int cpu)
1078{
1079 pin_task_to_cpu(pthread_self(), cpu);
1080}
1081
1082static inline int pin_self_to_any_cpu(void)
1083{
1084 return pin_task_to_any_cpu(pthread_self());
1085}
1086
1087void kvm_print_vcpu_pinning_help(void);
1088void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[],
1089 int nr_vcpus);
1090
1091unsigned long vm_compute_max_gfn(struct kvm_vm *vm);
1092unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size);
1093unsigned int vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages);
1094unsigned int vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages);
1095static inline unsigned int
1096vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
1097{
1098 unsigned int n;
1099 n = vm_num_guest_pages(mode, vm_num_host_pages(mode, num_guest_pages));
1100#ifdef __s390x__
1101 /* s390 requires 1M aligned guest sizes */
1102 n = (n + 255) & ~255;
1103#endif
1104 return n;
1105}
1106
1107#define sync_global_to_guest(vm, g) ({ \
1108 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
1109 memcpy(_p, &(g), sizeof(g)); \
1110})
1111
1112#define sync_global_from_guest(vm, g) ({ \
1113 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
1114 memcpy(&(g), _p, sizeof(g)); \
1115})
1116
1117/*
1118 * Write a global value, but only in the VM's (guest's) domain. Primarily used
1119 * for "globals" that hold per-VM values (VMs always duplicate code and global
1120 * data into their own region of physical memory), but can be used anytime it's
1121 * undesirable to change the host's copy of the global.
1122 */
1123#define write_guest_global(vm, g, val) ({ \
1124 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
1125 typeof(g) _val = val; \
1126 \
1127 memcpy(_p, &(_val), sizeof(g)); \
1128})
1129
1130void assert_on_unhandled_exception(struct kvm_vcpu *vcpu);
1131
1132void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu,
1133 uint8_t indent);
1134
1135static inline void vcpu_dump(FILE *stream, struct kvm_vcpu *vcpu,
1136 uint8_t indent)
1137{
1138 vcpu_arch_dump(stream, vcpu, indent);
1139}
1140
1141/*
1142 * Adds a vCPU with reasonable defaults (e.g. a stack)
1143 *
1144 * Input Args:
1145 * vm - Virtual Machine
1146 * vcpu_id - The id of the VCPU to add to the VM.
1147 */
1148struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
1149void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code);
1150
1151static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
1152 void *guest_code)
1153{
1154 struct kvm_vcpu *vcpu = vm_arch_vcpu_add(vm, vcpu_id);
1155
1156 vcpu_arch_set_entry_point(vcpu, guest_code);
1157
1158 return vcpu;
1159}
1160
1161/* Re-create a vCPU after restarting a VM, e.g. for state save/restore tests. */
1162struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id);
1163
1164static inline struct kvm_vcpu *vm_vcpu_recreate(struct kvm_vm *vm,
1165 uint32_t vcpu_id)
1166{
1167 return vm_arch_vcpu_recreate(vm, vcpu_id);
1168}
1169
1170void vcpu_arch_free(struct kvm_vcpu *vcpu);
1171
1172void virt_arch_pgd_alloc(struct kvm_vm *vm);
1173
1174static inline void virt_pgd_alloc(struct kvm_vm *vm)
1175{
1176 virt_arch_pgd_alloc(vm);
1177}
1178
1179/*
1180 * VM Virtual Page Map
1181 *
1182 * Input Args:
1183 * vm - Virtual Machine
1184 * vaddr - VM Virtual Address
1185 * paddr - VM Physical Address
1186 * memslot - Memory region slot for new virtual translation tables
1187 *
1188 * Output Args: None
1189 *
1190 * Return: None
1191 *
1192 * Within @vm, creates a virtual translation for the page starting
1193 * at @vaddr to the page starting at @paddr.
1194 */
1195void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr);
1196
1197static inline void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
1198{
1199 virt_arch_pg_map(vm, vaddr, paddr);
1200}
1201
1202
1203/*
1204 * Address Guest Virtual to Guest Physical
1205 *
1206 * Input Args:
1207 * vm - Virtual Machine
1208 * gva - VM virtual address
1209 *
1210 * Output Args: None
1211 *
1212 * Return:
1213 * Equivalent VM physical address
1214 *
1215 * Returns the VM physical address of the translated VM virtual
1216 * address given by @gva.
1217 */
1218vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva);
1219
1220static inline vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
1221{
1222 return addr_arch_gva2gpa(vm, gva);
1223}
1224
1225/*
1226 * Virtual Translation Tables Dump
1227 *
1228 * Input Args:
1229 * stream - Output FILE stream
1230 * vm - Virtual Machine
1231 * indent - Left margin indent amount
1232 *
1233 * Output Args: None
1234 *
1235 * Return: None
1236 *
1237 * Dumps to the FILE stream given by @stream, the contents of all the
1238 * virtual translation tables for the VM given by @vm.
1239 */
1240void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
1241
1242static inline void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
1243{
1244 virt_arch_dump(stream, vm, indent);
1245}
1246
1247
1248static inline int __vm_disable_nx_huge_pages(struct kvm_vm *vm)
1249{
1250 return __vm_enable_cap(vm, KVM_CAP_VM_DISABLE_NX_HUGE_PAGES, 0);
1251}
1252
1253/*
1254 * Arch hook that is invoked via a constructor, i.e. before exeucting main(),
1255 * to allow for arch-specific setup that is common to all tests, e.g. computing
1256 * the default guest "mode".
1257 */
1258void kvm_selftest_arch_init(void);
1259
1260void kvm_arch_vm_post_create(struct kvm_vm *vm);
1261
1262bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr);
1263
1264uint32_t guest_get_vcpuid(void);
1265
1266#endif /* SELFTEST_KVM_UTIL_H */