Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2018, Google LLC.
4 */
5#ifndef SELFTEST_KVM_UTIL_H
6#define SELFTEST_KVM_UTIL_H
7
8#include "test_util.h"
9
10#include <linux/compiler.h>
11#include "linux/hashtable.h"
12#include "linux/list.h"
13#include <linux/kernel.h>
14#include <linux/kvm.h>
15#include "linux/rbtree.h"
16#include <linux/types.h>
17
18#include <asm/atomic.h>
19#include <asm/kvm.h>
20
21#include <sys/eventfd.h>
22#include <sys/ioctl.h>
23
24#include <pthread.h>
25
26#include "kvm_util_arch.h"
27#include "kvm_util_types.h"
28#include "sparsebit.h"
29
30#define KVM_DEV_PATH "/dev/kvm"
31#define KVM_MAX_VCPUS 512
32
33#define NSEC_PER_SEC 1000000000L
34
35struct userspace_mem_region {
36 struct kvm_userspace_memory_region2 region;
37 struct sparsebit *unused_phy_pages;
38 struct sparsebit *protected_phy_pages;
39 int fd;
40 off_t offset;
41 enum vm_mem_backing_src_type backing_src_type;
42 void *host_mem;
43 void *host_alias;
44 void *mmap_start;
45 void *mmap_alias;
46 size_t mmap_size;
47 struct rb_node gpa_node;
48 struct rb_node hva_node;
49 struct hlist_node slot_node;
50};
51
52struct kvm_binary_stats {
53 int fd;
54 struct kvm_stats_header header;
55 struct kvm_stats_desc *desc;
56};
57
58struct kvm_vcpu {
59 struct list_head list;
60 uint32_t id;
61 int fd;
62 struct kvm_vm *vm;
63 struct kvm_run *run;
64#ifdef __x86_64__
65 struct kvm_cpuid2 *cpuid;
66#endif
67#ifdef __aarch64__
68 struct kvm_vcpu_init init;
69#endif
70 struct kvm_binary_stats stats;
71 struct kvm_dirty_gfn *dirty_gfns;
72 uint32_t fetch_index;
73 uint32_t dirty_gfns_count;
74};
75
76struct userspace_mem_regions {
77 struct rb_root gpa_tree;
78 struct rb_root hva_tree;
79 DECLARE_HASHTABLE(slot_hash, 9);
80};
81
82enum kvm_mem_region_type {
83 MEM_REGION_CODE,
84 MEM_REGION_DATA,
85 MEM_REGION_PT,
86 MEM_REGION_TEST_DATA,
87 NR_MEM_REGIONS,
88};
89
90struct kvm_vm {
91 int mode;
92 unsigned long type;
93 int kvm_fd;
94 int fd;
95 unsigned int pgtable_levels;
96 unsigned int page_size;
97 unsigned int page_shift;
98 unsigned int pa_bits;
99 unsigned int va_bits;
100 uint64_t max_gfn;
101 struct list_head vcpus;
102 struct userspace_mem_regions regions;
103 struct sparsebit *vpages_valid;
104 struct sparsebit *vpages_mapped;
105 bool has_irqchip;
106 bool pgd_created;
107 vm_paddr_t ucall_mmio_addr;
108 vm_paddr_t pgd;
109 vm_vaddr_t handlers;
110 uint32_t dirty_ring_size;
111 uint64_t gpa_tag_mask;
112
113 struct kvm_vm_arch arch;
114
115 struct kvm_binary_stats stats;
116
117 /*
118 * KVM region slots. These are the default memslots used by page
119 * allocators, e.g., lib/elf uses the memslots[MEM_REGION_CODE]
120 * memslot.
121 */
122 uint32_t memslots[NR_MEM_REGIONS];
123};
124
125struct vcpu_reg_sublist {
126 const char *name;
127 long capability;
128 int feature;
129 int feature_type;
130 bool finalize;
131 __u64 *regs;
132 __u64 regs_n;
133 __u64 *rejects_set;
134 __u64 rejects_set_n;
135 __u64 *skips_set;
136 __u64 skips_set_n;
137};
138
139struct vcpu_reg_list {
140 char *name;
141 struct vcpu_reg_sublist sublists[];
142};
143
144#define for_each_sublist(c, s) \
145 for ((s) = &(c)->sublists[0]; (s)->regs; ++(s))
146
147#define kvm_for_each_vcpu(vm, i, vcpu) \
148 for ((i) = 0; (i) <= (vm)->last_vcpu_id; (i)++) \
149 if (!((vcpu) = vm->vcpus[i])) \
150 continue; \
151 else
152
153struct userspace_mem_region *
154memslot2region(struct kvm_vm *vm, uint32_t memslot);
155
156static inline struct userspace_mem_region *vm_get_mem_region(struct kvm_vm *vm,
157 enum kvm_mem_region_type type)
158{
159 assert(type < NR_MEM_REGIONS);
160 return memslot2region(vm, vm->memslots[type]);
161}
162
163/* Minimum allocated guest virtual and physical addresses */
164#define KVM_UTIL_MIN_VADDR 0x2000
165#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
166
167#define DEFAULT_GUEST_STACK_VADDR_MIN 0xab6000
168#define DEFAULT_STACK_PGS 5
169
170enum vm_guest_mode {
171 VM_MODE_P52V48_4K,
172 VM_MODE_P52V48_16K,
173 VM_MODE_P52V48_64K,
174 VM_MODE_P48V48_4K,
175 VM_MODE_P48V48_16K,
176 VM_MODE_P48V48_64K,
177 VM_MODE_P40V48_4K,
178 VM_MODE_P40V48_16K,
179 VM_MODE_P40V48_64K,
180 VM_MODE_PXXV48_4K, /* For 48bits VA but ANY bits PA */
181 VM_MODE_P47V64_4K,
182 VM_MODE_P44V64_4K,
183 VM_MODE_P36V48_4K,
184 VM_MODE_P36V48_16K,
185 VM_MODE_P36V48_64K,
186 VM_MODE_P47V47_16K,
187 VM_MODE_P36V47_16K,
188 NUM_VM_MODES,
189};
190
191struct vm_shape {
192 uint32_t type;
193 uint8_t mode;
194 uint8_t pad0;
195 uint16_t pad1;
196};
197
198kvm_static_assert(sizeof(struct vm_shape) == sizeof(uint64_t));
199
200#define VM_TYPE_DEFAULT 0
201
202#define VM_SHAPE(__mode) \
203({ \
204 struct vm_shape shape = { \
205 .mode = (__mode), \
206 .type = VM_TYPE_DEFAULT \
207 }; \
208 \
209 shape; \
210})
211
212#if defined(__aarch64__)
213
214extern enum vm_guest_mode vm_mode_default;
215
216#define VM_MODE_DEFAULT vm_mode_default
217#define MIN_PAGE_SHIFT 12U
218#define ptes_per_page(page_size) ((page_size) / 8)
219
220#elif defined(__x86_64__)
221
222#define VM_MODE_DEFAULT VM_MODE_PXXV48_4K
223#define MIN_PAGE_SHIFT 12U
224#define ptes_per_page(page_size) ((page_size) / 8)
225
226#elif defined(__s390x__)
227
228#define VM_MODE_DEFAULT VM_MODE_P44V64_4K
229#define MIN_PAGE_SHIFT 12U
230#define ptes_per_page(page_size) ((page_size) / 16)
231
232#elif defined(__riscv)
233
234#if __riscv_xlen == 32
235#error "RISC-V 32-bit kvm selftests not supported"
236#endif
237
238#define VM_MODE_DEFAULT VM_MODE_P40V48_4K
239#define MIN_PAGE_SHIFT 12U
240#define ptes_per_page(page_size) ((page_size) / 8)
241
242#elif defined(__loongarch__)
243#define VM_MODE_DEFAULT VM_MODE_P47V47_16K
244#define MIN_PAGE_SHIFT 12U
245#define ptes_per_page(page_size) ((page_size) / 8)
246
247#endif
248
249#define VM_SHAPE_DEFAULT VM_SHAPE(VM_MODE_DEFAULT)
250
251#define MIN_PAGE_SIZE (1U << MIN_PAGE_SHIFT)
252#define PTES_PER_MIN_PAGE ptes_per_page(MIN_PAGE_SIZE)
253
254struct vm_guest_mode_params {
255 unsigned int pa_bits;
256 unsigned int va_bits;
257 unsigned int page_size;
258 unsigned int page_shift;
259};
260extern const struct vm_guest_mode_params vm_guest_mode_params[];
261
262int __open_path_or_exit(const char *path, int flags, const char *enoent_help);
263int open_path_or_exit(const char *path, int flags);
264int open_kvm_dev_path_or_exit(void);
265
266int kvm_get_module_param_integer(const char *module_name, const char *param);
267bool kvm_get_module_param_bool(const char *module_name, const char *param);
268
269static inline bool get_kvm_param_bool(const char *param)
270{
271 return kvm_get_module_param_bool("kvm", param);
272}
273
274static inline int get_kvm_param_integer(const char *param)
275{
276 return kvm_get_module_param_integer("kvm", param);
277}
278
279unsigned int kvm_check_cap(long cap);
280
281static inline bool kvm_has_cap(long cap)
282{
283 return kvm_check_cap(cap);
284}
285
286#define __KVM_SYSCALL_ERROR(_name, _ret) \
287 "%s failed, rc: %i errno: %i (%s)", (_name), (_ret), errno, strerror(errno)
288
289static inline void *__kvm_mmap(size_t size, int prot, int flags, int fd,
290 off_t offset)
291{
292 void *mem;
293
294 mem = mmap(NULL, size, prot, flags, fd, offset);
295 TEST_ASSERT(mem != MAP_FAILED, __KVM_SYSCALL_ERROR("mmap()",
296 (int)(unsigned long)MAP_FAILED));
297
298 return mem;
299}
300
301static inline void *kvm_mmap(size_t size, int prot, int flags, int fd)
302{
303 return __kvm_mmap(size, prot, flags, fd, 0);
304}
305
306static inline void kvm_munmap(void *mem, size_t size)
307{
308 int ret;
309
310 ret = munmap(mem, size);
311 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
312}
313
314/*
315 * Use the "inner", double-underscore macro when reporting errors from within
316 * other macros so that the name of ioctl() and not its literal numeric value
317 * is printed on error. The "outer" macro is strongly preferred when reporting
318 * errors "directly", i.e. without an additional layer of macros, as it reduces
319 * the probability of passing in the wrong string.
320 */
321#define __KVM_IOCTL_ERROR(_name, _ret) __KVM_SYSCALL_ERROR(_name, _ret)
322#define KVM_IOCTL_ERROR(_ioctl, _ret) __KVM_IOCTL_ERROR(#_ioctl, _ret)
323
324#define kvm_do_ioctl(fd, cmd, arg) \
325({ \
326 kvm_static_assert(!_IOC_SIZE(cmd) || sizeof(*arg) == _IOC_SIZE(cmd)); \
327 ioctl(fd, cmd, arg); \
328})
329
330#define __kvm_ioctl(kvm_fd, cmd, arg) \
331 kvm_do_ioctl(kvm_fd, cmd, arg)
332
333#define kvm_ioctl(kvm_fd, cmd, arg) \
334({ \
335 int ret = __kvm_ioctl(kvm_fd, cmd, arg); \
336 \
337 TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(#cmd, ret)); \
338})
339
340static __always_inline void static_assert_is_vm(struct kvm_vm *vm) { }
341
342#define __vm_ioctl(vm, cmd, arg) \
343({ \
344 static_assert_is_vm(vm); \
345 kvm_do_ioctl((vm)->fd, cmd, arg); \
346})
347
348/*
349 * Assert that a VM or vCPU ioctl() succeeded, with extra magic to detect if
350 * the ioctl() failed because KVM killed/bugged the VM. To detect a dead VM,
351 * probe KVM_CAP_USER_MEMORY, which (a) has been supported by KVM since before
352 * selftests existed and (b) should never outright fail, i.e. is supposed to
353 * return 0 or 1. If KVM kills a VM, KVM returns -EIO for all ioctl()s for the
354 * VM and its vCPUs, including KVM_CHECK_EXTENSION.
355 */
356#define __TEST_ASSERT_VM_VCPU_IOCTL(cond, name, ret, vm) \
357do { \
358 int __errno = errno; \
359 \
360 static_assert_is_vm(vm); \
361 \
362 if (cond) \
363 break; \
364 \
365 if (errno == EIO && \
366 __vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)KVM_CAP_USER_MEMORY) < 0) { \
367 TEST_ASSERT(errno == EIO, "KVM killed the VM, should return -EIO"); \
368 TEST_FAIL("KVM killed/bugged the VM, check the kernel log for clues"); \
369 } \
370 errno = __errno; \
371 TEST_ASSERT(cond, __KVM_IOCTL_ERROR(name, ret)); \
372} while (0)
373
374#define TEST_ASSERT_VM_VCPU_IOCTL(cond, cmd, ret, vm) \
375 __TEST_ASSERT_VM_VCPU_IOCTL(cond, #cmd, ret, vm)
376
377#define vm_ioctl(vm, cmd, arg) \
378({ \
379 int ret = __vm_ioctl(vm, cmd, arg); \
380 \
381 __TEST_ASSERT_VM_VCPU_IOCTL(!ret, #cmd, ret, vm); \
382})
383
384static __always_inline void static_assert_is_vcpu(struct kvm_vcpu *vcpu) { }
385
386#define __vcpu_ioctl(vcpu, cmd, arg) \
387({ \
388 static_assert_is_vcpu(vcpu); \
389 kvm_do_ioctl((vcpu)->fd, cmd, arg); \
390})
391
392#define vcpu_ioctl(vcpu, cmd, arg) \
393({ \
394 int ret = __vcpu_ioctl(vcpu, cmd, arg); \
395 \
396 __TEST_ASSERT_VM_VCPU_IOCTL(!ret, #cmd, ret, (vcpu)->vm); \
397})
398
399/*
400 * Looks up and returns the value corresponding to the capability
401 * (KVM_CAP_*) given by cap.
402 */
403static inline int vm_check_cap(struct kvm_vm *vm, long cap)
404{
405 int ret = __vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)cap);
406
407 TEST_ASSERT_VM_VCPU_IOCTL(ret >= 0, KVM_CHECK_EXTENSION, ret, vm);
408 return ret;
409}
410
411static inline int __vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0)
412{
413 struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
414
415 return __vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap);
416}
417static inline void vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0)
418{
419 struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
420
421 vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap);
422}
423
424static inline void vm_set_memory_attributes(struct kvm_vm *vm, uint64_t gpa,
425 uint64_t size, uint64_t attributes)
426{
427 struct kvm_memory_attributes attr = {
428 .attributes = attributes,
429 .address = gpa,
430 .size = size,
431 .flags = 0,
432 };
433
434 /*
435 * KVM_SET_MEMORY_ATTRIBUTES overwrites _all_ attributes. These flows
436 * need significant enhancements to support multiple attributes.
437 */
438 TEST_ASSERT(!attributes || attributes == KVM_MEMORY_ATTRIBUTE_PRIVATE,
439 "Update me to support multiple attributes!");
440
441 vm_ioctl(vm, KVM_SET_MEMORY_ATTRIBUTES, &attr);
442}
443
444
445static inline void vm_mem_set_private(struct kvm_vm *vm, uint64_t gpa,
446 uint64_t size)
447{
448 vm_set_memory_attributes(vm, gpa, size, KVM_MEMORY_ATTRIBUTE_PRIVATE);
449}
450
451static inline void vm_mem_set_shared(struct kvm_vm *vm, uint64_t gpa,
452 uint64_t size)
453{
454 vm_set_memory_attributes(vm, gpa, size, 0);
455}
456
457void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t gpa, uint64_t size,
458 bool punch_hole);
459
460static inline void vm_guest_mem_punch_hole(struct kvm_vm *vm, uint64_t gpa,
461 uint64_t size)
462{
463 vm_guest_mem_fallocate(vm, gpa, size, true);
464}
465
466static inline void vm_guest_mem_allocate(struct kvm_vm *vm, uint64_t gpa,
467 uint64_t size)
468{
469 vm_guest_mem_fallocate(vm, gpa, size, false);
470}
471
472void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size);
473const char *vm_guest_mode_string(uint32_t i);
474
475void kvm_vm_free(struct kvm_vm *vmp);
476void kvm_vm_restart(struct kvm_vm *vmp);
477void kvm_vm_release(struct kvm_vm *vmp);
478void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename);
479int kvm_memfd_alloc(size_t size, bool hugepages);
480
481void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
482
483static inline void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log)
484{
485 struct kvm_dirty_log args = { .dirty_bitmap = log, .slot = slot };
486
487 vm_ioctl(vm, KVM_GET_DIRTY_LOG, &args);
488}
489
490static inline void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log,
491 uint64_t first_page, uint32_t num_pages)
492{
493 struct kvm_clear_dirty_log args = {
494 .dirty_bitmap = log,
495 .slot = slot,
496 .first_page = first_page,
497 .num_pages = num_pages
498 };
499
500 vm_ioctl(vm, KVM_CLEAR_DIRTY_LOG, &args);
501}
502
503static inline uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm)
504{
505 return __vm_ioctl(vm, KVM_RESET_DIRTY_RINGS, NULL);
506}
507
508static inline void kvm_vm_register_coalesced_io(struct kvm_vm *vm,
509 uint64_t address,
510 uint64_t size, bool pio)
511{
512 struct kvm_coalesced_mmio_zone zone = {
513 .addr = address,
514 .size = size,
515 .pio = pio,
516 };
517
518 vm_ioctl(vm, KVM_REGISTER_COALESCED_MMIO, &zone);
519}
520
521static inline void kvm_vm_unregister_coalesced_io(struct kvm_vm *vm,
522 uint64_t address,
523 uint64_t size, bool pio)
524{
525 struct kvm_coalesced_mmio_zone zone = {
526 .addr = address,
527 .size = size,
528 .pio = pio,
529 };
530
531 vm_ioctl(vm, KVM_UNREGISTER_COALESCED_MMIO, &zone);
532}
533
534static inline int vm_get_stats_fd(struct kvm_vm *vm)
535{
536 int fd = __vm_ioctl(vm, KVM_GET_STATS_FD, NULL);
537
538 TEST_ASSERT_VM_VCPU_IOCTL(fd >= 0, KVM_GET_STATS_FD, fd, vm);
539 return fd;
540}
541
542static inline int __kvm_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd,
543 uint32_t flags)
544{
545 struct kvm_irqfd irqfd = {
546 .fd = eventfd,
547 .gsi = gsi,
548 .flags = flags,
549 .resamplefd = -1,
550 };
551
552 return __vm_ioctl(vm, KVM_IRQFD, &irqfd);
553}
554
555static inline void kvm_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd,
556 uint32_t flags)
557{
558 int ret = __kvm_irqfd(vm, gsi, eventfd, flags);
559
560 TEST_ASSERT_VM_VCPU_IOCTL(!ret, KVM_IRQFD, ret, vm);
561}
562
563static inline void kvm_assign_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd)
564{
565 kvm_irqfd(vm, gsi, eventfd, 0);
566}
567
568static inline void kvm_deassign_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd)
569{
570 kvm_irqfd(vm, gsi, eventfd, KVM_IRQFD_FLAG_DEASSIGN);
571}
572
573static inline int kvm_new_eventfd(void)
574{
575 int fd = eventfd(0, 0);
576
577 TEST_ASSERT(fd >= 0, __KVM_SYSCALL_ERROR("eventfd()", fd));
578 return fd;
579}
580
581static inline void read_stats_header(int stats_fd, struct kvm_stats_header *header)
582{
583 ssize_t ret;
584
585 ret = pread(stats_fd, header, sizeof(*header), 0);
586 TEST_ASSERT(ret == sizeof(*header),
587 "Failed to read '%lu' header bytes, ret = '%ld'",
588 sizeof(*header), ret);
589}
590
591struct kvm_stats_desc *read_stats_descriptors(int stats_fd,
592 struct kvm_stats_header *header);
593
594static inline ssize_t get_stats_descriptor_size(struct kvm_stats_header *header)
595{
596 /*
597 * The base size of the descriptor is defined by KVM's ABI, but the
598 * size of the name field is variable, as far as KVM's ABI is
599 * concerned. For a given instance of KVM, the name field is the same
600 * size for all stats and is provided in the overall stats header.
601 */
602 return sizeof(struct kvm_stats_desc) + header->name_size;
603}
604
605static inline struct kvm_stats_desc *get_stats_descriptor(struct kvm_stats_desc *stats,
606 int index,
607 struct kvm_stats_header *header)
608{
609 /*
610 * Note, size_desc includes the size of the name field, which is
611 * variable. i.e. this is NOT equivalent to &stats_desc[i].
612 */
613 return (void *)stats + index * get_stats_descriptor_size(header);
614}
615
616void read_stat_data(int stats_fd, struct kvm_stats_header *header,
617 struct kvm_stats_desc *desc, uint64_t *data,
618 size_t max_elements);
619
620void kvm_get_stat(struct kvm_binary_stats *stats, const char *name,
621 uint64_t *data, size_t max_elements);
622
623#define __get_stat(stats, stat) \
624({ \
625 uint64_t data; \
626 \
627 kvm_get_stat(stats, #stat, &data, 1); \
628 data; \
629})
630
631#define vm_get_stat(vm, stat) __get_stat(&(vm)->stats, stat)
632#define vcpu_get_stat(vcpu, stat) __get_stat(&(vcpu)->stats, stat)
633
634static inline bool read_smt_control(char *buf, size_t buf_size)
635{
636 FILE *f = fopen("/sys/devices/system/cpu/smt/control", "r");
637 bool ret;
638
639 if (!f)
640 return false;
641
642 ret = fread(buf, sizeof(*buf), buf_size, f) > 0;
643 fclose(f);
644
645 return ret;
646}
647
648static inline bool is_smt_possible(void)
649{
650 char buf[16];
651
652 if (read_smt_control(buf, sizeof(buf)) &&
653 (!strncmp(buf, "forceoff", 8) || !strncmp(buf, "notsupported", 12)))
654 return false;
655
656 return true;
657}
658
659static inline bool is_smt_on(void)
660{
661 char buf[16];
662
663 if (read_smt_control(buf, sizeof(buf)) && !strncmp(buf, "on", 2))
664 return true;
665
666 return false;
667}
668
669void vm_create_irqchip(struct kvm_vm *vm);
670
671static inline int __vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size,
672 uint64_t flags)
673{
674 struct kvm_create_guest_memfd guest_memfd = {
675 .size = size,
676 .flags = flags,
677 };
678
679 return __vm_ioctl(vm, KVM_CREATE_GUEST_MEMFD, &guest_memfd);
680}
681
682static inline int vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size,
683 uint64_t flags)
684{
685 int fd = __vm_create_guest_memfd(vm, size, flags);
686
687 TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_GUEST_MEMFD, fd));
688 return fd;
689}
690
691void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
692 uint64_t gpa, uint64_t size, void *hva);
693int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
694 uint64_t gpa, uint64_t size, void *hva);
695void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
696 uint64_t gpa, uint64_t size, void *hva,
697 uint32_t guest_memfd, uint64_t guest_memfd_offset);
698int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
699 uint64_t gpa, uint64_t size, void *hva,
700 uint32_t guest_memfd, uint64_t guest_memfd_offset);
701
702void vm_userspace_mem_region_add(struct kvm_vm *vm,
703 enum vm_mem_backing_src_type src_type,
704 uint64_t guest_paddr, uint32_t slot, uint64_t npages,
705 uint32_t flags);
706void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
707 uint64_t guest_paddr, uint32_t slot, uint64_t npages,
708 uint32_t flags, int guest_memfd_fd, uint64_t guest_memfd_offset);
709
710#ifndef vm_arch_has_protected_memory
711static inline bool vm_arch_has_protected_memory(struct kvm_vm *vm)
712{
713 return false;
714}
715#endif
716
717void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
718void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
719void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
720struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
721void vm_populate_vaddr_bitmap(struct kvm_vm *vm);
722vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
723vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
724vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
725 enum kvm_mem_region_type type);
726vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz,
727 vm_vaddr_t vaddr_min,
728 enum kvm_mem_region_type type);
729vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
730vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm,
731 enum kvm_mem_region_type type);
732vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm);
733
734void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
735 unsigned int npages);
736void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa);
737void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva);
738vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
739void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa);
740
741#ifndef vcpu_arch_put_guest
742#define vcpu_arch_put_guest(mem, val) do { (mem) = (val); } while (0)
743#endif
744
745static inline vm_paddr_t vm_untag_gpa(struct kvm_vm *vm, vm_paddr_t gpa)
746{
747 return gpa & ~vm->gpa_tag_mask;
748}
749
750void vcpu_run(struct kvm_vcpu *vcpu);
751int _vcpu_run(struct kvm_vcpu *vcpu);
752
753static inline int __vcpu_run(struct kvm_vcpu *vcpu)
754{
755 return __vcpu_ioctl(vcpu, KVM_RUN, NULL);
756}
757
758void vcpu_run_complete_io(struct kvm_vcpu *vcpu);
759struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu);
760
761static inline void vcpu_enable_cap(struct kvm_vcpu *vcpu, uint32_t cap,
762 uint64_t arg0)
763{
764 struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
765
766 vcpu_ioctl(vcpu, KVM_ENABLE_CAP, &enable_cap);
767}
768
769static inline void vcpu_guest_debug_set(struct kvm_vcpu *vcpu,
770 struct kvm_guest_debug *debug)
771{
772 vcpu_ioctl(vcpu, KVM_SET_GUEST_DEBUG, debug);
773}
774
775static inline void vcpu_mp_state_get(struct kvm_vcpu *vcpu,
776 struct kvm_mp_state *mp_state)
777{
778 vcpu_ioctl(vcpu, KVM_GET_MP_STATE, mp_state);
779}
780static inline void vcpu_mp_state_set(struct kvm_vcpu *vcpu,
781 struct kvm_mp_state *mp_state)
782{
783 vcpu_ioctl(vcpu, KVM_SET_MP_STATE, mp_state);
784}
785
786static inline void vcpu_regs_get(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
787{
788 vcpu_ioctl(vcpu, KVM_GET_REGS, regs);
789}
790
791static inline void vcpu_regs_set(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
792{
793 vcpu_ioctl(vcpu, KVM_SET_REGS, regs);
794}
795static inline void vcpu_sregs_get(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
796{
797 vcpu_ioctl(vcpu, KVM_GET_SREGS, sregs);
798
799}
800static inline void vcpu_sregs_set(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
801{
802 vcpu_ioctl(vcpu, KVM_SET_SREGS, sregs);
803}
804static inline int _vcpu_sregs_set(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
805{
806 return __vcpu_ioctl(vcpu, KVM_SET_SREGS, sregs);
807}
808static inline void vcpu_fpu_get(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
809{
810 vcpu_ioctl(vcpu, KVM_GET_FPU, fpu);
811}
812static inline void vcpu_fpu_set(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
813{
814 vcpu_ioctl(vcpu, KVM_SET_FPU, fpu);
815}
816
817static inline int __vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id, void *addr)
818{
819 struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)addr };
820
821 return __vcpu_ioctl(vcpu, KVM_GET_ONE_REG, ®);
822}
823static inline int __vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
824{
825 struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val };
826
827 return __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, ®);
828}
829static inline uint64_t vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id)
830{
831 uint64_t val;
832 struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val };
833
834 TEST_ASSERT(KVM_REG_SIZE(id) <= sizeof(val), "Reg %lx too big", id);
835
836 vcpu_ioctl(vcpu, KVM_GET_ONE_REG, ®);
837 return val;
838}
839static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
840{
841 struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val };
842
843 TEST_ASSERT(KVM_REG_SIZE(id) <= sizeof(val), "Reg %lx too big", id);
844
845 vcpu_ioctl(vcpu, KVM_SET_ONE_REG, ®);
846}
847
848#ifdef __KVM_HAVE_VCPU_EVENTS
849static inline void vcpu_events_get(struct kvm_vcpu *vcpu,
850 struct kvm_vcpu_events *events)
851{
852 vcpu_ioctl(vcpu, KVM_GET_VCPU_EVENTS, events);
853}
854static inline void vcpu_events_set(struct kvm_vcpu *vcpu,
855 struct kvm_vcpu_events *events)
856{
857 vcpu_ioctl(vcpu, KVM_SET_VCPU_EVENTS, events);
858}
859#endif
860#ifdef __x86_64__
861static inline void vcpu_nested_state_get(struct kvm_vcpu *vcpu,
862 struct kvm_nested_state *state)
863{
864 vcpu_ioctl(vcpu, KVM_GET_NESTED_STATE, state);
865}
866static inline int __vcpu_nested_state_set(struct kvm_vcpu *vcpu,
867 struct kvm_nested_state *state)
868{
869 return __vcpu_ioctl(vcpu, KVM_SET_NESTED_STATE, state);
870}
871
872static inline void vcpu_nested_state_set(struct kvm_vcpu *vcpu,
873 struct kvm_nested_state *state)
874{
875 vcpu_ioctl(vcpu, KVM_SET_NESTED_STATE, state);
876}
877#endif
878static inline int vcpu_get_stats_fd(struct kvm_vcpu *vcpu)
879{
880 int fd = __vcpu_ioctl(vcpu, KVM_GET_STATS_FD, NULL);
881
882 TEST_ASSERT_VM_VCPU_IOCTL(fd >= 0, KVM_CHECK_EXTENSION, fd, vcpu->vm);
883 return fd;
884}
885
886int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr);
887
888static inline void kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr)
889{
890 int ret = __kvm_has_device_attr(dev_fd, group, attr);
891
892 TEST_ASSERT(!ret, "KVM_HAS_DEVICE_ATTR failed, rc: %i errno: %i", ret, errno);
893}
894
895int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val);
896
897static inline void kvm_device_attr_get(int dev_fd, uint32_t group,
898 uint64_t attr, void *val)
899{
900 int ret = __kvm_device_attr_get(dev_fd, group, attr, val);
901
902 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_GET_DEVICE_ATTR, ret));
903}
904
905int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val);
906
907static inline void kvm_device_attr_set(int dev_fd, uint32_t group,
908 uint64_t attr, void *val)
909{
910 int ret = __kvm_device_attr_set(dev_fd, group, attr, val);
911
912 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_DEVICE_ATTR, ret));
913}
914
915static inline int __vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group,
916 uint64_t attr)
917{
918 return __kvm_has_device_attr(vcpu->fd, group, attr);
919}
920
921static inline void vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group,
922 uint64_t attr)
923{
924 kvm_has_device_attr(vcpu->fd, group, attr);
925}
926
927static inline int __vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group,
928 uint64_t attr, void *val)
929{
930 return __kvm_device_attr_get(vcpu->fd, group, attr, val);
931}
932
933static inline void vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group,
934 uint64_t attr, void *val)
935{
936 kvm_device_attr_get(vcpu->fd, group, attr, val);
937}
938
939static inline int __vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group,
940 uint64_t attr, void *val)
941{
942 return __kvm_device_attr_set(vcpu->fd, group, attr, val);
943}
944
945static inline void vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group,
946 uint64_t attr, void *val)
947{
948 kvm_device_attr_set(vcpu->fd, group, attr, val);
949}
950
951int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type);
952int __kvm_create_device(struct kvm_vm *vm, uint64_t type);
953
954static inline int kvm_create_device(struct kvm_vm *vm, uint64_t type)
955{
956 int fd = __kvm_create_device(vm, type);
957
958 TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_DEVICE, fd));
959 return fd;
960}
961
962void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu);
963
964/*
965 * VM VCPU Args Set
966 *
967 * Input Args:
968 * vm - Virtual Machine
969 * num - number of arguments
970 * ... - arguments, each of type uint64_t
971 *
972 * Output Args: None
973 *
974 * Return: None
975 *
976 * Sets the first @num input parameters for the function at @vcpu's entry point,
977 * per the C calling convention of the architecture, to the values given as
978 * variable args. Each of the variable args is expected to be of type uint64_t.
979 * The maximum @num can be is specific to the architecture.
980 */
981void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...);
982
983void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
984int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
985
986#define KVM_MAX_IRQ_ROUTES 4096
987
988struct kvm_irq_routing *kvm_gsi_routing_create(void);
989void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing,
990 uint32_t gsi, uint32_t pin);
991int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
992void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
993
994const char *exit_reason_str(unsigned int exit_reason);
995
996vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
997 uint32_t memslot);
998vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
999 vm_paddr_t paddr_min, uint32_t memslot,
1000 bool protected);
1001vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm);
1002
1003static inline vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
1004 vm_paddr_t paddr_min, uint32_t memslot)
1005{
1006 /*
1007 * By default, allocate memory as protected for VMs that support
1008 * protected memory, as the majority of memory for such VMs is
1009 * protected, i.e. using shared memory is effectively opt-in.
1010 */
1011 return __vm_phy_pages_alloc(vm, num, paddr_min, memslot,
1012 vm_arch_has_protected_memory(vm));
1013}
1014
1015/*
1016 * ____vm_create() does KVM_CREATE_VM and little else. __vm_create() also
1017 * loads the test binary into guest memory and creates an IRQ chip (x86 only).
1018 * __vm_create() does NOT create vCPUs, @nr_runnable_vcpus is used purely to
1019 * calculate the amount of memory needed for per-vCPU data, e.g. stacks.
1020 */
1021struct kvm_vm *____vm_create(struct vm_shape shape);
1022struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
1023 uint64_t nr_extra_pages);
1024
1025static inline struct kvm_vm *vm_create_barebones(void)
1026{
1027 return ____vm_create(VM_SHAPE_DEFAULT);
1028}
1029
1030static inline struct kvm_vm *vm_create_barebones_type(unsigned long type)
1031{
1032 const struct vm_shape shape = {
1033 .mode = VM_MODE_DEFAULT,
1034 .type = type,
1035 };
1036
1037 return ____vm_create(shape);
1038}
1039
1040static inline struct kvm_vm *vm_create(uint32_t nr_runnable_vcpus)
1041{
1042 return __vm_create(VM_SHAPE_DEFAULT, nr_runnable_vcpus, 0);
1043}
1044
1045struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus,
1046 uint64_t extra_mem_pages,
1047 void *guest_code, struct kvm_vcpu *vcpus[]);
1048
1049static inline struct kvm_vm *vm_create_with_vcpus(uint32_t nr_vcpus,
1050 void *guest_code,
1051 struct kvm_vcpu *vcpus[])
1052{
1053 return __vm_create_with_vcpus(VM_SHAPE_DEFAULT, nr_vcpus, 0,
1054 guest_code, vcpus);
1055}
1056
1057
1058struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape,
1059 struct kvm_vcpu **vcpu,
1060 uint64_t extra_mem_pages,
1061 void *guest_code);
1062
1063/*
1064 * Create a VM with a single vCPU with reasonable defaults and @extra_mem_pages
1065 * additional pages of guest memory. Returns the VM and vCPU (via out param).
1066 */
1067static inline struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
1068 uint64_t extra_mem_pages,
1069 void *guest_code)
1070{
1071 return __vm_create_shape_with_one_vcpu(VM_SHAPE_DEFAULT, vcpu,
1072 extra_mem_pages, guest_code);
1073}
1074
1075static inline struct kvm_vm *vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
1076 void *guest_code)
1077{
1078 return __vm_create_with_one_vcpu(vcpu, 0, guest_code);
1079}
1080
1081static inline struct kvm_vm *vm_create_shape_with_one_vcpu(struct vm_shape shape,
1082 struct kvm_vcpu **vcpu,
1083 void *guest_code)
1084{
1085 return __vm_create_shape_with_one_vcpu(shape, vcpu, 0, guest_code);
1086}
1087
1088struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm);
1089
1090void kvm_set_files_rlimit(uint32_t nr_vcpus);
1091
1092int __pin_task_to_cpu(pthread_t task, int cpu);
1093
1094static inline void pin_task_to_cpu(pthread_t task, int cpu)
1095{
1096 int r;
1097
1098 r = __pin_task_to_cpu(task, cpu);
1099 TEST_ASSERT(!r, "Failed to set thread affinity to pCPU '%u'", cpu);
1100}
1101
1102static inline int pin_task_to_any_cpu(pthread_t task)
1103{
1104 int cpu = sched_getcpu();
1105
1106 pin_task_to_cpu(task, cpu);
1107 return cpu;
1108}
1109
1110static inline void pin_self_to_cpu(int cpu)
1111{
1112 pin_task_to_cpu(pthread_self(), cpu);
1113}
1114
1115static inline int pin_self_to_any_cpu(void)
1116{
1117 return pin_task_to_any_cpu(pthread_self());
1118}
1119
1120void kvm_print_vcpu_pinning_help(void);
1121void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[],
1122 int nr_vcpus);
1123
1124unsigned long vm_compute_max_gfn(struct kvm_vm *vm);
1125unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size);
1126unsigned int vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages);
1127unsigned int vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages);
1128static inline unsigned int
1129vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
1130{
1131 unsigned int n;
1132 n = vm_num_guest_pages(mode, vm_num_host_pages(mode, num_guest_pages));
1133#ifdef __s390x__
1134 /* s390 requires 1M aligned guest sizes */
1135 n = (n + 255) & ~255;
1136#endif
1137 return n;
1138}
1139
1140#define sync_global_to_guest(vm, g) ({ \
1141 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
1142 memcpy(_p, &(g), sizeof(g)); \
1143})
1144
1145#define sync_global_from_guest(vm, g) ({ \
1146 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
1147 memcpy(&(g), _p, sizeof(g)); \
1148})
1149
1150/*
1151 * Write a global value, but only in the VM's (guest's) domain. Primarily used
1152 * for "globals" that hold per-VM values (VMs always duplicate code and global
1153 * data into their own region of physical memory), but can be used anytime it's
1154 * undesirable to change the host's copy of the global.
1155 */
1156#define write_guest_global(vm, g, val) ({ \
1157 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
1158 typeof(g) _val = val; \
1159 \
1160 memcpy(_p, &(_val), sizeof(g)); \
1161})
1162
1163void assert_on_unhandled_exception(struct kvm_vcpu *vcpu);
1164
1165void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu,
1166 uint8_t indent);
1167
1168static inline void vcpu_dump(FILE *stream, struct kvm_vcpu *vcpu,
1169 uint8_t indent)
1170{
1171 vcpu_arch_dump(stream, vcpu, indent);
1172}
1173
1174/*
1175 * Adds a vCPU with reasonable defaults (e.g. a stack)
1176 *
1177 * Input Args:
1178 * vm - Virtual Machine
1179 * vcpu_id - The id of the VCPU to add to the VM.
1180 */
1181struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
1182void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code);
1183
1184static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
1185 void *guest_code)
1186{
1187 struct kvm_vcpu *vcpu = vm_arch_vcpu_add(vm, vcpu_id);
1188
1189 vcpu_arch_set_entry_point(vcpu, guest_code);
1190
1191 return vcpu;
1192}
1193
1194/* Re-create a vCPU after restarting a VM, e.g. for state save/restore tests. */
1195struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id);
1196
1197static inline struct kvm_vcpu *vm_vcpu_recreate(struct kvm_vm *vm,
1198 uint32_t vcpu_id)
1199{
1200 return vm_arch_vcpu_recreate(vm, vcpu_id);
1201}
1202
1203void vcpu_arch_free(struct kvm_vcpu *vcpu);
1204
1205void virt_arch_pgd_alloc(struct kvm_vm *vm);
1206
1207static inline void virt_pgd_alloc(struct kvm_vm *vm)
1208{
1209 virt_arch_pgd_alloc(vm);
1210}
1211
1212/*
1213 * VM Virtual Page Map
1214 *
1215 * Input Args:
1216 * vm - Virtual Machine
1217 * vaddr - VM Virtual Address
1218 * paddr - VM Physical Address
1219 * memslot - Memory region slot for new virtual translation tables
1220 *
1221 * Output Args: None
1222 *
1223 * Return: None
1224 *
1225 * Within @vm, creates a virtual translation for the page starting
1226 * at @vaddr to the page starting at @paddr.
1227 */
1228void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr);
1229
1230static inline void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
1231{
1232 virt_arch_pg_map(vm, vaddr, paddr);
1233}
1234
1235
1236/*
1237 * Address Guest Virtual to Guest Physical
1238 *
1239 * Input Args:
1240 * vm - Virtual Machine
1241 * gva - VM virtual address
1242 *
1243 * Output Args: None
1244 *
1245 * Return:
1246 * Equivalent VM physical address
1247 *
1248 * Returns the VM physical address of the translated VM virtual
1249 * address given by @gva.
1250 */
1251vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva);
1252
1253static inline vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
1254{
1255 return addr_arch_gva2gpa(vm, gva);
1256}
1257
1258/*
1259 * Virtual Translation Tables Dump
1260 *
1261 * Input Args:
1262 * stream - Output FILE stream
1263 * vm - Virtual Machine
1264 * indent - Left margin indent amount
1265 *
1266 * Output Args: None
1267 *
1268 * Return: None
1269 *
1270 * Dumps to the FILE stream given by @stream, the contents of all the
1271 * virtual translation tables for the VM given by @vm.
1272 */
1273void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
1274
1275static inline void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
1276{
1277 virt_arch_dump(stream, vm, indent);
1278}
1279
1280
1281static inline int __vm_disable_nx_huge_pages(struct kvm_vm *vm)
1282{
1283 return __vm_enable_cap(vm, KVM_CAP_VM_DISABLE_NX_HUGE_PAGES, 0);
1284}
1285
1286/*
1287 * Arch hook that is invoked via a constructor, i.e. before exeucting main(),
1288 * to allow for arch-specific setup that is common to all tests, e.g. computing
1289 * the default guest "mode".
1290 */
1291void kvm_selftest_arch_init(void);
1292
1293void kvm_arch_vm_post_create(struct kvm_vm *vm, unsigned int nr_vcpus);
1294void kvm_arch_vm_finalize_vcpus(struct kvm_vm *vm);
1295void kvm_arch_vm_release(struct kvm_vm *vm);
1296
1297bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr);
1298
1299uint32_t guest_get_vcpuid(void);
1300
1301bool kvm_arch_has_default_irqchip(void);
1302
1303#endif /* SELFTEST_KVM_UTIL_H */