Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * tools/testing/selftests/kvm/lib/kvm_util.c
4 *
5 * Copyright (C) 2018, Google LLC.
6 */
7
8#include "test_util.h"
9#include "kvm_util.h"
10#include "kvm_util_internal.h"
11
12#include <assert.h>
13#include <sys/mman.h>
14#include <sys/types.h>
15#include <sys/stat.h>
16#include <linux/kernel.h>
17
18#define KVM_UTIL_PGS_PER_HUGEPG 512
19#define KVM_UTIL_MIN_PFN 2
20
21/* Aligns x up to the next multiple of size. Size must be a power of 2. */
22static void *align(void *x, size_t size)
23{
24 size_t mask = size - 1;
25 TEST_ASSERT(size != 0 && !(size & (size - 1)),
26 "size not a power of 2: %lu", size);
27 return (void *) (((size_t) x + mask) & ~mask);
28}
29
30/*
31 * Capability
32 *
33 * Input Args:
34 * cap - Capability
35 *
36 * Output Args: None
37 *
38 * Return:
39 * On success, the Value corresponding to the capability (KVM_CAP_*)
40 * specified by the value of cap. On failure a TEST_ASSERT failure
41 * is produced.
42 *
43 * Looks up and returns the value corresponding to the capability
44 * (KVM_CAP_*) given by cap.
45 */
46int kvm_check_cap(long cap)
47{
48 int ret;
49 int kvm_fd;
50
51 kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
52 if (kvm_fd < 0)
53 exit(KSFT_SKIP);
54
55 ret = ioctl(kvm_fd, KVM_CHECK_EXTENSION, cap);
56 TEST_ASSERT(ret != -1, "KVM_CHECK_EXTENSION IOCTL failed,\n"
57 " rc: %i errno: %i", ret, errno);
58
59 close(kvm_fd);
60
61 return ret;
62}
63
64/* VM Enable Capability
65 *
66 * Input Args:
67 * vm - Virtual Machine
68 * cap - Capability
69 *
70 * Output Args: None
71 *
72 * Return: On success, 0. On failure a TEST_ASSERT failure is produced.
73 *
74 * Enables a capability (KVM_CAP_*) on the VM.
75 */
76int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap)
77{
78 int ret;
79
80 ret = ioctl(vm->fd, KVM_ENABLE_CAP, cap);
81 TEST_ASSERT(ret == 0, "KVM_ENABLE_CAP IOCTL failed,\n"
82 " rc: %i errno: %i", ret, errno);
83
84 return ret;
85}
86
87static void vm_open(struct kvm_vm *vm, int perm, unsigned long type)
88{
89 vm->kvm_fd = open(KVM_DEV_PATH, perm);
90 if (vm->kvm_fd < 0)
91 exit(KSFT_SKIP);
92
93 if (!kvm_check_cap(KVM_CAP_IMMEDIATE_EXIT)) {
94 fprintf(stderr, "immediate_exit not available, skipping test\n");
95 exit(KSFT_SKIP);
96 }
97
98 vm->fd = ioctl(vm->kvm_fd, KVM_CREATE_VM, type);
99 TEST_ASSERT(vm->fd >= 0, "KVM_CREATE_VM ioctl failed, "
100 "rc: %i errno: %i", vm->fd, errno);
101}
102
103const char * const vm_guest_mode_string[] = {
104 "PA-bits:52, VA-bits:48, 4K pages",
105 "PA-bits:52, VA-bits:48, 64K pages",
106 "PA-bits:48, VA-bits:48, 4K pages",
107 "PA-bits:48, VA-bits:48, 64K pages",
108 "PA-bits:40, VA-bits:48, 4K pages",
109 "PA-bits:40, VA-bits:48, 64K pages",
110};
111_Static_assert(sizeof(vm_guest_mode_string)/sizeof(char *) == NUM_VM_MODES,
112 "Missing new mode strings?");
113
114/*
115 * VM Create
116 *
117 * Input Args:
118 * mode - VM Mode (e.g. VM_MODE_P52V48_4K)
119 * phy_pages - Physical memory pages
120 * perm - permission
121 *
122 * Output Args: None
123 *
124 * Return:
125 * Pointer to opaque structure that describes the created VM.
126 *
127 * Creates a VM with the mode specified by mode (e.g. VM_MODE_P52V48_4K).
128 * When phy_pages is non-zero, a memory region of phy_pages physical pages
129 * is created and mapped starting at guest physical address 0. The file
130 * descriptor to control the created VM is created with the permissions
131 * given by perm (e.g. O_RDWR).
132 */
133struct kvm_vm *_vm_create(enum vm_guest_mode mode, uint64_t phy_pages,
134 int perm, unsigned long type)
135{
136 struct kvm_vm *vm;
137
138 vm = calloc(1, sizeof(*vm));
139 TEST_ASSERT(vm != NULL, "Insufficient Memory");
140
141 vm->mode = mode;
142 vm->type = type;
143 vm_open(vm, perm, type);
144
145 /* Setup mode specific traits. */
146 switch (vm->mode) {
147 case VM_MODE_P52V48_4K:
148 vm->pgtable_levels = 4;
149 vm->pa_bits = 52;
150 vm->va_bits = 48;
151 vm->page_size = 0x1000;
152 vm->page_shift = 12;
153 break;
154 case VM_MODE_P52V48_64K:
155 vm->pgtable_levels = 3;
156 vm->pa_bits = 52;
157 vm->va_bits = 48;
158 vm->page_size = 0x10000;
159 vm->page_shift = 16;
160 break;
161 case VM_MODE_P48V48_4K:
162 vm->pgtable_levels = 4;
163 vm->pa_bits = 48;
164 vm->va_bits = 48;
165 vm->page_size = 0x1000;
166 vm->page_shift = 12;
167 break;
168 case VM_MODE_P48V48_64K:
169 vm->pgtable_levels = 3;
170 vm->pa_bits = 48;
171 vm->va_bits = 48;
172 vm->page_size = 0x10000;
173 vm->page_shift = 16;
174 break;
175 case VM_MODE_P40V48_4K:
176 vm->pgtable_levels = 4;
177 vm->pa_bits = 40;
178 vm->va_bits = 48;
179 vm->page_size = 0x1000;
180 vm->page_shift = 12;
181 break;
182 case VM_MODE_P40V48_64K:
183 vm->pgtable_levels = 3;
184 vm->pa_bits = 40;
185 vm->va_bits = 48;
186 vm->page_size = 0x10000;
187 vm->page_shift = 16;
188 break;
189 default:
190 TEST_ASSERT(false, "Unknown guest mode, mode: 0x%x", mode);
191 }
192
193 /* Limit to VA-bit canonical virtual addresses. */
194 vm->vpages_valid = sparsebit_alloc();
195 sparsebit_set_num(vm->vpages_valid,
196 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
197 sparsebit_set_num(vm->vpages_valid,
198 (~((1ULL << (vm->va_bits - 1)) - 1)) >> vm->page_shift,
199 (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
200
201 /* Limit physical addresses to PA-bits. */
202 vm->max_gfn = ((1ULL << vm->pa_bits) >> vm->page_shift) - 1;
203
204 /* Allocate and setup memory for guest. */
205 vm->vpages_mapped = sparsebit_alloc();
206 if (phy_pages != 0)
207 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
208 0, 0, phy_pages, 0);
209
210 return vm;
211}
212
213struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
214{
215 return _vm_create(mode, phy_pages, perm, 0);
216}
217
218/*
219 * VM Restart
220 *
221 * Input Args:
222 * vm - VM that has been released before
223 * perm - permission
224 *
225 * Output Args: None
226 *
227 * Reopens the file descriptors associated to the VM and reinstates the
228 * global state, such as the irqchip and the memory regions that are mapped
229 * into the guest.
230 */
231void kvm_vm_restart(struct kvm_vm *vmp, int perm)
232{
233 struct userspace_mem_region *region;
234
235 vm_open(vmp, perm, vmp->type);
236 if (vmp->has_irqchip)
237 vm_create_irqchip(vmp);
238
239 for (region = vmp->userspace_mem_region_head; region;
240 region = region->next) {
241 int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region);
242 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
243 " rc: %i errno: %i\n"
244 " slot: %u flags: 0x%x\n"
245 " guest_phys_addr: 0x%lx size: 0x%lx",
246 ret, errno, region->region.slot,
247 region->region.flags,
248 region->region.guest_phys_addr,
249 region->region.memory_size);
250 }
251}
252
253void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log)
254{
255 struct kvm_dirty_log args = { .dirty_bitmap = log, .slot = slot };
256 int ret;
257
258 ret = ioctl(vm->fd, KVM_GET_DIRTY_LOG, &args);
259 TEST_ASSERT(ret == 0, "%s: KVM_GET_DIRTY_LOG failed: %s",
260 strerror(-ret));
261}
262
263void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log,
264 uint64_t first_page, uint32_t num_pages)
265{
266 struct kvm_clear_dirty_log args = { .dirty_bitmap = log, .slot = slot,
267 .first_page = first_page,
268 .num_pages = num_pages };
269 int ret;
270
271 ret = ioctl(vm->fd, KVM_CLEAR_DIRTY_LOG, &args);
272 TEST_ASSERT(ret == 0, "%s: KVM_CLEAR_DIRTY_LOG failed: %s",
273 strerror(-ret));
274}
275
276/*
277 * Userspace Memory Region Find
278 *
279 * Input Args:
280 * vm - Virtual Machine
281 * start - Starting VM physical address
282 * end - Ending VM physical address, inclusive.
283 *
284 * Output Args: None
285 *
286 * Return:
287 * Pointer to overlapping region, NULL if no such region.
288 *
289 * Searches for a region with any physical memory that overlaps with
290 * any portion of the guest physical addresses from start to end
291 * inclusive. If multiple overlapping regions exist, a pointer to any
292 * of the regions is returned. Null is returned only when no overlapping
293 * region exists.
294 */
295static struct userspace_mem_region *
296userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end)
297{
298 struct userspace_mem_region *region;
299
300 for (region = vm->userspace_mem_region_head; region;
301 region = region->next) {
302 uint64_t existing_start = region->region.guest_phys_addr;
303 uint64_t existing_end = region->region.guest_phys_addr
304 + region->region.memory_size - 1;
305 if (start <= existing_end && end >= existing_start)
306 return region;
307 }
308
309 return NULL;
310}
311
312/*
313 * KVM Userspace Memory Region Find
314 *
315 * Input Args:
316 * vm - Virtual Machine
317 * start - Starting VM physical address
318 * end - Ending VM physical address, inclusive.
319 *
320 * Output Args: None
321 *
322 * Return:
323 * Pointer to overlapping region, NULL if no such region.
324 *
325 * Public interface to userspace_mem_region_find. Allows tests to look up
326 * the memslot datastructure for a given range of guest physical memory.
327 */
328struct kvm_userspace_memory_region *
329kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
330 uint64_t end)
331{
332 struct userspace_mem_region *region;
333
334 region = userspace_mem_region_find(vm, start, end);
335 if (!region)
336 return NULL;
337
338 return ®ion->region;
339}
340
341/*
342 * VCPU Find
343 *
344 * Input Args:
345 * vm - Virtual Machine
346 * vcpuid - VCPU ID
347 *
348 * Output Args: None
349 *
350 * Return:
351 * Pointer to VCPU structure
352 *
353 * Locates a vcpu structure that describes the VCPU specified by vcpuid and
354 * returns a pointer to it. Returns NULL if the VM doesn't contain a VCPU
355 * for the specified vcpuid.
356 */
357struct vcpu *vcpu_find(struct kvm_vm *vm, uint32_t vcpuid)
358{
359 struct vcpu *vcpup;
360
361 for (vcpup = vm->vcpu_head; vcpup; vcpup = vcpup->next) {
362 if (vcpup->id == vcpuid)
363 return vcpup;
364 }
365
366 return NULL;
367}
368
369/*
370 * VM VCPU Remove
371 *
372 * Input Args:
373 * vm - Virtual Machine
374 * vcpuid - VCPU ID
375 *
376 * Output Args: None
377 *
378 * Return: None, TEST_ASSERT failures for all error conditions
379 *
380 * Within the VM specified by vm, removes the VCPU given by vcpuid.
381 */
382static void vm_vcpu_rm(struct kvm_vm *vm, uint32_t vcpuid)
383{
384 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
385 int ret;
386
387 ret = munmap(vcpu->state, sizeof(*vcpu->state));
388 TEST_ASSERT(ret == 0, "munmap of VCPU fd failed, rc: %i "
389 "errno: %i", ret, errno);
390 close(vcpu->fd);
391 TEST_ASSERT(ret == 0, "Close of VCPU fd failed, rc: %i "
392 "errno: %i", ret, errno);
393
394 if (vcpu->next)
395 vcpu->next->prev = vcpu->prev;
396 if (vcpu->prev)
397 vcpu->prev->next = vcpu->next;
398 else
399 vm->vcpu_head = vcpu->next;
400 free(vcpu);
401}
402
403void kvm_vm_release(struct kvm_vm *vmp)
404{
405 int ret;
406
407 while (vmp->vcpu_head)
408 vm_vcpu_rm(vmp, vmp->vcpu_head->id);
409
410 ret = close(vmp->fd);
411 TEST_ASSERT(ret == 0, "Close of vm fd failed,\n"
412 " vmp->fd: %i rc: %i errno: %i", vmp->fd, ret, errno);
413
414 close(vmp->kvm_fd);
415 TEST_ASSERT(ret == 0, "Close of /dev/kvm fd failed,\n"
416 " vmp->kvm_fd: %i rc: %i errno: %i", vmp->kvm_fd, ret, errno);
417}
418
419/*
420 * Destroys and frees the VM pointed to by vmp.
421 */
422void kvm_vm_free(struct kvm_vm *vmp)
423{
424 int ret;
425
426 if (vmp == NULL)
427 return;
428
429 /* Free userspace_mem_regions. */
430 while (vmp->userspace_mem_region_head) {
431 struct userspace_mem_region *region
432 = vmp->userspace_mem_region_head;
433
434 region->region.memory_size = 0;
435 ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION,
436 ®ion->region);
437 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed, "
438 "rc: %i errno: %i", ret, errno);
439
440 vmp->userspace_mem_region_head = region->next;
441 sparsebit_free(®ion->unused_phy_pages);
442 ret = munmap(region->mmap_start, region->mmap_size);
443 TEST_ASSERT(ret == 0, "munmap failed, rc: %i errno: %i",
444 ret, errno);
445
446 free(region);
447 }
448
449 /* Free sparsebit arrays. */
450 sparsebit_free(&vmp->vpages_valid);
451 sparsebit_free(&vmp->vpages_mapped);
452
453 kvm_vm_release(vmp);
454
455 /* Free the structure describing the VM. */
456 free(vmp);
457}
458
459/*
460 * Memory Compare, host virtual to guest virtual
461 *
462 * Input Args:
463 * hva - Starting host virtual address
464 * vm - Virtual Machine
465 * gva - Starting guest virtual address
466 * len - number of bytes to compare
467 *
468 * Output Args: None
469 *
470 * Input/Output Args: None
471 *
472 * Return:
473 * Returns 0 if the bytes starting at hva for a length of len
474 * are equal the guest virtual bytes starting at gva. Returns
475 * a value < 0, if bytes at hva are less than those at gva.
476 * Otherwise a value > 0 is returned.
477 *
478 * Compares the bytes starting at the host virtual address hva, for
479 * a length of len, to the guest bytes starting at the guest virtual
480 * address given by gva.
481 */
482int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, vm_vaddr_t gva, size_t len)
483{
484 size_t amt;
485
486 /*
487 * Compare a batch of bytes until either a match is found
488 * or all the bytes have been compared.
489 */
490 for (uintptr_t offset = 0; offset < len; offset += amt) {
491 uintptr_t ptr1 = (uintptr_t)hva + offset;
492
493 /*
494 * Determine host address for guest virtual address
495 * at offset.
496 */
497 uintptr_t ptr2 = (uintptr_t)addr_gva2hva(vm, gva + offset);
498
499 /*
500 * Determine amount to compare on this pass.
501 * Don't allow the comparsion to cross a page boundary.
502 */
503 amt = len - offset;
504 if ((ptr1 >> vm->page_shift) != ((ptr1 + amt) >> vm->page_shift))
505 amt = vm->page_size - (ptr1 % vm->page_size);
506 if ((ptr2 >> vm->page_shift) != ((ptr2 + amt) >> vm->page_shift))
507 amt = vm->page_size - (ptr2 % vm->page_size);
508
509 assert((ptr1 >> vm->page_shift) == ((ptr1 + amt - 1) >> vm->page_shift));
510 assert((ptr2 >> vm->page_shift) == ((ptr2 + amt - 1) >> vm->page_shift));
511
512 /*
513 * Perform the comparison. If there is a difference
514 * return that result to the caller, otherwise need
515 * to continue on looking for a mismatch.
516 */
517 int ret = memcmp((void *)ptr1, (void *)ptr2, amt);
518 if (ret != 0)
519 return ret;
520 }
521
522 /*
523 * No mismatch found. Let the caller know the two memory
524 * areas are equal.
525 */
526 return 0;
527}
528
529/*
530 * VM Userspace Memory Region Add
531 *
532 * Input Args:
533 * vm - Virtual Machine
534 * backing_src - Storage source for this region.
535 * NULL to use anonymous memory.
536 * guest_paddr - Starting guest physical address
537 * slot - KVM region slot
538 * npages - Number of physical pages
539 * flags - KVM memory region flags (e.g. KVM_MEM_LOG_DIRTY_PAGES)
540 *
541 * Output Args: None
542 *
543 * Return: None
544 *
545 * Allocates a memory area of the number of pages specified by npages
546 * and maps it to the VM specified by vm, at a starting physical address
547 * given by guest_paddr. The region is created with a KVM region slot
548 * given by slot, which must be unique and < KVM_MEM_SLOTS_NUM. The
549 * region is created with the flags given by flags.
550 */
551void vm_userspace_mem_region_add(struct kvm_vm *vm,
552 enum vm_mem_backing_src_type src_type,
553 uint64_t guest_paddr, uint32_t slot, uint64_t npages,
554 uint32_t flags)
555{
556 int ret;
557 struct userspace_mem_region *region;
558 size_t huge_page_size = KVM_UTIL_PGS_PER_HUGEPG * vm->page_size;
559 size_t alignment;
560
561 TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical "
562 "address not on a page boundary.\n"
563 " guest_paddr: 0x%lx vm->page_size: 0x%x",
564 guest_paddr, vm->page_size);
565 TEST_ASSERT((((guest_paddr >> vm->page_shift) + npages) - 1)
566 <= vm->max_gfn, "Physical range beyond maximum "
567 "supported physical address,\n"
568 " guest_paddr: 0x%lx npages: 0x%lx\n"
569 " vm->max_gfn: 0x%lx vm->page_size: 0x%x",
570 guest_paddr, npages, vm->max_gfn, vm->page_size);
571
572 /*
573 * Confirm a mem region with an overlapping address doesn't
574 * already exist.
575 */
576 region = (struct userspace_mem_region *) userspace_mem_region_find(
577 vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1);
578 if (region != NULL)
579 TEST_ASSERT(false, "overlapping userspace_mem_region already "
580 "exists\n"
581 " requested guest_paddr: 0x%lx npages: 0x%lx "
582 "page_size: 0x%x\n"
583 " existing guest_paddr: 0x%lx size: 0x%lx",
584 guest_paddr, npages, vm->page_size,
585 (uint64_t) region->region.guest_phys_addr,
586 (uint64_t) region->region.memory_size);
587
588 /* Confirm no region with the requested slot already exists. */
589 for (region = vm->userspace_mem_region_head; region;
590 region = region->next) {
591 if (region->region.slot == slot)
592 break;
593 }
594 if (region != NULL)
595 TEST_ASSERT(false, "A mem region with the requested slot "
596 "already exists.\n"
597 " requested slot: %u paddr: 0x%lx npages: 0x%lx\n"
598 " existing slot: %u paddr: 0x%lx size: 0x%lx",
599 slot, guest_paddr, npages,
600 region->region.slot,
601 (uint64_t) region->region.guest_phys_addr,
602 (uint64_t) region->region.memory_size);
603
604 /* Allocate and initialize new mem region structure. */
605 region = calloc(1, sizeof(*region));
606 TEST_ASSERT(region != NULL, "Insufficient Memory");
607 region->mmap_size = npages * vm->page_size;
608
609#ifdef __s390x__
610 /* On s390x, the host address must be aligned to 1M (due to PGSTEs) */
611 alignment = 0x100000;
612#else
613 alignment = 1;
614#endif
615
616 if (src_type == VM_MEM_SRC_ANONYMOUS_THP)
617 alignment = max(huge_page_size, alignment);
618
619 /* Add enough memory to align up if necessary */
620 if (alignment > 1)
621 region->mmap_size += alignment;
622
623 region->mmap_start = mmap(NULL, region->mmap_size,
624 PROT_READ | PROT_WRITE,
625 MAP_PRIVATE | MAP_ANONYMOUS
626 | (src_type == VM_MEM_SRC_ANONYMOUS_HUGETLB ? MAP_HUGETLB : 0),
627 -1, 0);
628 TEST_ASSERT(region->mmap_start != MAP_FAILED,
629 "test_malloc failed, mmap_start: %p errno: %i",
630 region->mmap_start, errno);
631
632 /* Align host address */
633 region->host_mem = align(region->mmap_start, alignment);
634
635 /* As needed perform madvise */
636 if (src_type == VM_MEM_SRC_ANONYMOUS || src_type == VM_MEM_SRC_ANONYMOUS_THP) {
637 ret = madvise(region->host_mem, npages * vm->page_size,
638 src_type == VM_MEM_SRC_ANONYMOUS ? MADV_NOHUGEPAGE : MADV_HUGEPAGE);
639 TEST_ASSERT(ret == 0, "madvise failed,\n"
640 " addr: %p\n"
641 " length: 0x%lx\n"
642 " src_type: %x",
643 region->host_mem, npages * vm->page_size, src_type);
644 }
645
646 region->unused_phy_pages = sparsebit_alloc();
647 sparsebit_set_num(region->unused_phy_pages,
648 guest_paddr >> vm->page_shift, npages);
649 region->region.slot = slot;
650 region->region.flags = flags;
651 region->region.guest_phys_addr = guest_paddr;
652 region->region.memory_size = npages * vm->page_size;
653 region->region.userspace_addr = (uintptr_t) region->host_mem;
654 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region);
655 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
656 " rc: %i errno: %i\n"
657 " slot: %u flags: 0x%x\n"
658 " guest_phys_addr: 0x%lx size: 0x%lx",
659 ret, errno, slot, flags,
660 guest_paddr, (uint64_t) region->region.memory_size);
661
662 /* Add to linked-list of memory regions. */
663 if (vm->userspace_mem_region_head)
664 vm->userspace_mem_region_head->prev = region;
665 region->next = vm->userspace_mem_region_head;
666 vm->userspace_mem_region_head = region;
667}
668
669/*
670 * Memslot to region
671 *
672 * Input Args:
673 * vm - Virtual Machine
674 * memslot - KVM memory slot ID
675 *
676 * Output Args: None
677 *
678 * Return:
679 * Pointer to memory region structure that describe memory region
680 * using kvm memory slot ID given by memslot. TEST_ASSERT failure
681 * on error (e.g. currently no memory region using memslot as a KVM
682 * memory slot ID).
683 */
684static struct userspace_mem_region *
685memslot2region(struct kvm_vm *vm, uint32_t memslot)
686{
687 struct userspace_mem_region *region;
688
689 for (region = vm->userspace_mem_region_head; region;
690 region = region->next) {
691 if (region->region.slot == memslot)
692 break;
693 }
694 if (region == NULL) {
695 fprintf(stderr, "No mem region with the requested slot found,\n"
696 " requested slot: %u\n", memslot);
697 fputs("---- vm dump ----\n", stderr);
698 vm_dump(stderr, vm, 2);
699 TEST_ASSERT(false, "Mem region not found");
700 }
701
702 return region;
703}
704
705/*
706 * VM Memory Region Flags Set
707 *
708 * Input Args:
709 * vm - Virtual Machine
710 * flags - Starting guest physical address
711 *
712 * Output Args: None
713 *
714 * Return: None
715 *
716 * Sets the flags of the memory region specified by the value of slot,
717 * to the values given by flags.
718 */
719void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags)
720{
721 int ret;
722 struct userspace_mem_region *region;
723
724 region = memslot2region(vm, slot);
725
726 region->region.flags = flags;
727
728 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region);
729
730 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
731 " rc: %i errno: %i slot: %u flags: 0x%x",
732 ret, errno, slot, flags);
733}
734
735/*
736 * VCPU mmap Size
737 *
738 * Input Args: None
739 *
740 * Output Args: None
741 *
742 * Return:
743 * Size of VCPU state
744 *
745 * Returns the size of the structure pointed to by the return value
746 * of vcpu_state().
747 */
748static int vcpu_mmap_sz(void)
749{
750 int dev_fd, ret;
751
752 dev_fd = open(KVM_DEV_PATH, O_RDONLY);
753 if (dev_fd < 0)
754 exit(KSFT_SKIP);
755
756 ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL);
757 TEST_ASSERT(ret >= sizeof(struct kvm_run),
758 "%s KVM_GET_VCPU_MMAP_SIZE ioctl failed, rc: %i errno: %i",
759 __func__, ret, errno);
760
761 close(dev_fd);
762
763 return ret;
764}
765
766/*
767 * VM VCPU Add
768 *
769 * Input Args:
770 * vm - Virtual Machine
771 * vcpuid - VCPU ID
772 *
773 * Output Args: None
774 *
775 * Return: None
776 *
777 * Adds a virtual CPU to the VM specified by vm with the ID given by vcpuid.
778 * No additional VCPU setup is done.
779 */
780void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid)
781{
782 struct vcpu *vcpu;
783
784 /* Confirm a vcpu with the specified id doesn't already exist. */
785 vcpu = vcpu_find(vm, vcpuid);
786 if (vcpu != NULL)
787 TEST_ASSERT(false, "vcpu with the specified id "
788 "already exists,\n"
789 " requested vcpuid: %u\n"
790 " existing vcpuid: %u state: %p",
791 vcpuid, vcpu->id, vcpu->state);
792
793 /* Allocate and initialize new vcpu structure. */
794 vcpu = calloc(1, sizeof(*vcpu));
795 TEST_ASSERT(vcpu != NULL, "Insufficient Memory");
796 vcpu->id = vcpuid;
797 vcpu->fd = ioctl(vm->fd, KVM_CREATE_VCPU, vcpuid);
798 TEST_ASSERT(vcpu->fd >= 0, "KVM_CREATE_VCPU failed, rc: %i errno: %i",
799 vcpu->fd, errno);
800
801 TEST_ASSERT(vcpu_mmap_sz() >= sizeof(*vcpu->state), "vcpu mmap size "
802 "smaller than expected, vcpu_mmap_sz: %i expected_min: %zi",
803 vcpu_mmap_sz(), sizeof(*vcpu->state));
804 vcpu->state = (struct kvm_run *) mmap(NULL, sizeof(*vcpu->state),
805 PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, 0);
806 TEST_ASSERT(vcpu->state != MAP_FAILED, "mmap vcpu_state failed, "
807 "vcpu id: %u errno: %i", vcpuid, errno);
808
809 /* Add to linked-list of VCPUs. */
810 if (vm->vcpu_head)
811 vm->vcpu_head->prev = vcpu;
812 vcpu->next = vm->vcpu_head;
813 vm->vcpu_head = vcpu;
814}
815
816/*
817 * VM Virtual Address Unused Gap
818 *
819 * Input Args:
820 * vm - Virtual Machine
821 * sz - Size (bytes)
822 * vaddr_min - Minimum Virtual Address
823 *
824 * Output Args: None
825 *
826 * Return:
827 * Lowest virtual address at or below vaddr_min, with at least
828 * sz unused bytes. TEST_ASSERT failure if no area of at least
829 * size sz is available.
830 *
831 * Within the VM specified by vm, locates the lowest starting virtual
832 * address >= vaddr_min, that has at least sz unallocated bytes. A
833 * TEST_ASSERT failure occurs for invalid input or no area of at least
834 * sz unallocated bytes >= vaddr_min is available.
835 */
836static vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz,
837 vm_vaddr_t vaddr_min)
838{
839 uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift;
840
841 /* Determine lowest permitted virtual page index. */
842 uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift;
843 if ((pgidx_start * vm->page_size) < vaddr_min)
844 goto no_va_found;
845
846 /* Loop over section with enough valid virtual page indexes. */
847 if (!sparsebit_is_set_num(vm->vpages_valid,
848 pgidx_start, pages))
849 pgidx_start = sparsebit_next_set_num(vm->vpages_valid,
850 pgidx_start, pages);
851 do {
852 /*
853 * Are there enough unused virtual pages available at
854 * the currently proposed starting virtual page index.
855 * If not, adjust proposed starting index to next
856 * possible.
857 */
858 if (sparsebit_is_clear_num(vm->vpages_mapped,
859 pgidx_start, pages))
860 goto va_found;
861 pgidx_start = sparsebit_next_clear_num(vm->vpages_mapped,
862 pgidx_start, pages);
863 if (pgidx_start == 0)
864 goto no_va_found;
865
866 /*
867 * If needed, adjust proposed starting virtual address,
868 * to next range of valid virtual addresses.
869 */
870 if (!sparsebit_is_set_num(vm->vpages_valid,
871 pgidx_start, pages)) {
872 pgidx_start = sparsebit_next_set_num(
873 vm->vpages_valid, pgidx_start, pages);
874 if (pgidx_start == 0)
875 goto no_va_found;
876 }
877 } while (pgidx_start != 0);
878
879no_va_found:
880 TEST_ASSERT(false, "No vaddr of specified pages available, "
881 "pages: 0x%lx", pages);
882
883 /* NOT REACHED */
884 return -1;
885
886va_found:
887 TEST_ASSERT(sparsebit_is_set_num(vm->vpages_valid,
888 pgidx_start, pages),
889 "Unexpected, invalid virtual page index range,\n"
890 " pgidx_start: 0x%lx\n"
891 " pages: 0x%lx",
892 pgidx_start, pages);
893 TEST_ASSERT(sparsebit_is_clear_num(vm->vpages_mapped,
894 pgidx_start, pages),
895 "Unexpected, pages already mapped,\n"
896 " pgidx_start: 0x%lx\n"
897 " pages: 0x%lx",
898 pgidx_start, pages);
899
900 return pgidx_start * vm->page_size;
901}
902
903/*
904 * VM Virtual Address Allocate
905 *
906 * Input Args:
907 * vm - Virtual Machine
908 * sz - Size in bytes
909 * vaddr_min - Minimum starting virtual address
910 * data_memslot - Memory region slot for data pages
911 * pgd_memslot - Memory region slot for new virtual translation tables
912 *
913 * Output Args: None
914 *
915 * Return:
916 * Starting guest virtual address
917 *
918 * Allocates at least sz bytes within the virtual address space of the vm
919 * given by vm. The allocated bytes are mapped to a virtual address >=
920 * the address given by vaddr_min. Note that each allocation uses a
921 * a unique set of pages, with the minimum real allocation being at least
922 * a page.
923 */
924vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
925 uint32_t data_memslot, uint32_t pgd_memslot)
926{
927 uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);
928
929 virt_pgd_alloc(vm, pgd_memslot);
930
931 /*
932 * Find an unused range of virtual page addresses of at least
933 * pages in length.
934 */
935 vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min);
936
937 /* Map the virtual pages. */
938 for (vm_vaddr_t vaddr = vaddr_start; pages > 0;
939 pages--, vaddr += vm->page_size) {
940 vm_paddr_t paddr;
941
942 paddr = vm_phy_page_alloc(vm,
943 KVM_UTIL_MIN_PFN * vm->page_size, data_memslot);
944
945 virt_pg_map(vm, vaddr, paddr, pgd_memslot);
946
947 sparsebit_set(vm->vpages_mapped,
948 vaddr >> vm->page_shift);
949 }
950
951 return vaddr_start;
952}
953
954/*
955 * Map a range of VM virtual address to the VM's physical address
956 *
957 * Input Args:
958 * vm - Virtual Machine
959 * vaddr - Virtuall address to map
960 * paddr - VM Physical Address
961 * size - The size of the range to map
962 * pgd_memslot - Memory region slot for new virtual translation tables
963 *
964 * Output Args: None
965 *
966 * Return: None
967 *
968 * Within the VM given by vm, creates a virtual translation for the
969 * page range starting at vaddr to the page range starting at paddr.
970 */
971void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
972 size_t size, uint32_t pgd_memslot)
973{
974 size_t page_size = vm->page_size;
975 size_t npages = size / page_size;
976
977 TEST_ASSERT(vaddr + size > vaddr, "Vaddr overflow");
978 TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
979
980 while (npages--) {
981 virt_pg_map(vm, vaddr, paddr, pgd_memslot);
982 vaddr += page_size;
983 paddr += page_size;
984 }
985}
986
987/*
988 * Address VM Physical to Host Virtual
989 *
990 * Input Args:
991 * vm - Virtual Machine
992 * gpa - VM physical address
993 *
994 * Output Args: None
995 *
996 * Return:
997 * Equivalent host virtual address
998 *
999 * Locates the memory region containing the VM physical address given
1000 * by gpa, within the VM given by vm. When found, the host virtual
1001 * address providing the memory to the vm physical address is returned.
1002 * A TEST_ASSERT failure occurs if no region containing gpa exists.
1003 */
1004void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
1005{
1006 struct userspace_mem_region *region;
1007 for (region = vm->userspace_mem_region_head; region;
1008 region = region->next) {
1009 if ((gpa >= region->region.guest_phys_addr)
1010 && (gpa <= (region->region.guest_phys_addr
1011 + region->region.memory_size - 1)))
1012 return (void *) ((uintptr_t) region->host_mem
1013 + (gpa - region->region.guest_phys_addr));
1014 }
1015
1016 TEST_ASSERT(false, "No vm physical memory at 0x%lx", gpa);
1017 return NULL;
1018}
1019
1020/*
1021 * Address Host Virtual to VM Physical
1022 *
1023 * Input Args:
1024 * vm - Virtual Machine
1025 * hva - Host virtual address
1026 *
1027 * Output Args: None
1028 *
1029 * Return:
1030 * Equivalent VM physical address
1031 *
1032 * Locates the memory region containing the host virtual address given
1033 * by hva, within the VM given by vm. When found, the equivalent
1034 * VM physical address is returned. A TEST_ASSERT failure occurs if no
1035 * region containing hva exists.
1036 */
1037vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
1038{
1039 struct userspace_mem_region *region;
1040 for (region = vm->userspace_mem_region_head; region;
1041 region = region->next) {
1042 if ((hva >= region->host_mem)
1043 && (hva <= (region->host_mem
1044 + region->region.memory_size - 1)))
1045 return (vm_paddr_t) ((uintptr_t)
1046 region->region.guest_phys_addr
1047 + (hva - (uintptr_t) region->host_mem));
1048 }
1049
1050 TEST_ASSERT(false, "No mapping to a guest physical address, "
1051 "hva: %p", hva);
1052 return -1;
1053}
1054
1055/*
1056 * VM Create IRQ Chip
1057 *
1058 * Input Args:
1059 * vm - Virtual Machine
1060 *
1061 * Output Args: None
1062 *
1063 * Return: None
1064 *
1065 * Creates an interrupt controller chip for the VM specified by vm.
1066 */
1067void vm_create_irqchip(struct kvm_vm *vm)
1068{
1069 int ret;
1070
1071 ret = ioctl(vm->fd, KVM_CREATE_IRQCHIP, 0);
1072 TEST_ASSERT(ret == 0, "KVM_CREATE_IRQCHIP IOCTL failed, "
1073 "rc: %i errno: %i", ret, errno);
1074
1075 vm->has_irqchip = true;
1076}
1077
1078/*
1079 * VM VCPU State
1080 *
1081 * Input Args:
1082 * vm - Virtual Machine
1083 * vcpuid - VCPU ID
1084 *
1085 * Output Args: None
1086 *
1087 * Return:
1088 * Pointer to structure that describes the state of the VCPU.
1089 *
1090 * Locates and returns a pointer to a structure that describes the
1091 * state of the VCPU with the given vcpuid.
1092 */
1093struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid)
1094{
1095 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1096 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1097
1098 return vcpu->state;
1099}
1100
1101/*
1102 * VM VCPU Run
1103 *
1104 * Input Args:
1105 * vm - Virtual Machine
1106 * vcpuid - VCPU ID
1107 *
1108 * Output Args: None
1109 *
1110 * Return: None
1111 *
1112 * Switch to executing the code for the VCPU given by vcpuid, within the VM
1113 * given by vm.
1114 */
1115void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid)
1116{
1117 int ret = _vcpu_run(vm, vcpuid);
1118 TEST_ASSERT(ret == 0, "KVM_RUN IOCTL failed, "
1119 "rc: %i errno: %i", ret, errno);
1120}
1121
1122int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid)
1123{
1124 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1125 int rc;
1126
1127 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1128 do {
1129 rc = ioctl(vcpu->fd, KVM_RUN, NULL);
1130 } while (rc == -1 && errno == EINTR);
1131 return rc;
1132}
1133
1134void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid)
1135{
1136 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1137 int ret;
1138
1139 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1140
1141 vcpu->state->immediate_exit = 1;
1142 ret = ioctl(vcpu->fd, KVM_RUN, NULL);
1143 vcpu->state->immediate_exit = 0;
1144
1145 TEST_ASSERT(ret == -1 && errno == EINTR,
1146 "KVM_RUN IOCTL didn't exit immediately, rc: %i, errno: %i",
1147 ret, errno);
1148}
1149
1150/*
1151 * VM VCPU Set MP State
1152 *
1153 * Input Args:
1154 * vm - Virtual Machine
1155 * vcpuid - VCPU ID
1156 * mp_state - mp_state to be set
1157 *
1158 * Output Args: None
1159 *
1160 * Return: None
1161 *
1162 * Sets the MP state of the VCPU given by vcpuid, to the state given
1163 * by mp_state.
1164 */
1165void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid,
1166 struct kvm_mp_state *mp_state)
1167{
1168 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1169 int ret;
1170
1171 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1172
1173 ret = ioctl(vcpu->fd, KVM_SET_MP_STATE, mp_state);
1174 TEST_ASSERT(ret == 0, "KVM_SET_MP_STATE IOCTL failed, "
1175 "rc: %i errno: %i", ret, errno);
1176}
1177
1178/*
1179 * VM VCPU Regs Get
1180 *
1181 * Input Args:
1182 * vm - Virtual Machine
1183 * vcpuid - VCPU ID
1184 *
1185 * Output Args:
1186 * regs - current state of VCPU regs
1187 *
1188 * Return: None
1189 *
1190 * Obtains the current register state for the VCPU specified by vcpuid
1191 * and stores it at the location given by regs.
1192 */
1193void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs)
1194{
1195 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1196 int ret;
1197
1198 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1199
1200 ret = ioctl(vcpu->fd, KVM_GET_REGS, regs);
1201 TEST_ASSERT(ret == 0, "KVM_GET_REGS failed, rc: %i errno: %i",
1202 ret, errno);
1203}
1204
1205/*
1206 * VM VCPU Regs Set
1207 *
1208 * Input Args:
1209 * vm - Virtual Machine
1210 * vcpuid - VCPU ID
1211 * regs - Values to set VCPU regs to
1212 *
1213 * Output Args: None
1214 *
1215 * Return: None
1216 *
1217 * Sets the regs of the VCPU specified by vcpuid to the values
1218 * given by regs.
1219 */
1220void vcpu_regs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs)
1221{
1222 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1223 int ret;
1224
1225 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1226
1227 ret = ioctl(vcpu->fd, KVM_SET_REGS, regs);
1228 TEST_ASSERT(ret == 0, "KVM_SET_REGS failed, rc: %i errno: %i",
1229 ret, errno);
1230}
1231
1232#ifdef __KVM_HAVE_VCPU_EVENTS
1233void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid,
1234 struct kvm_vcpu_events *events)
1235{
1236 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1237 int ret;
1238
1239 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1240
1241 ret = ioctl(vcpu->fd, KVM_GET_VCPU_EVENTS, events);
1242 TEST_ASSERT(ret == 0, "KVM_GET_VCPU_EVENTS, failed, rc: %i errno: %i",
1243 ret, errno);
1244}
1245
1246void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid,
1247 struct kvm_vcpu_events *events)
1248{
1249 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1250 int ret;
1251
1252 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1253
1254 ret = ioctl(vcpu->fd, KVM_SET_VCPU_EVENTS, events);
1255 TEST_ASSERT(ret == 0, "KVM_SET_VCPU_EVENTS, failed, rc: %i errno: %i",
1256 ret, errno);
1257}
1258#endif
1259
1260#ifdef __x86_64__
1261void vcpu_nested_state_get(struct kvm_vm *vm, uint32_t vcpuid,
1262 struct kvm_nested_state *state)
1263{
1264 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1265 int ret;
1266
1267 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1268
1269 ret = ioctl(vcpu->fd, KVM_GET_NESTED_STATE, state);
1270 TEST_ASSERT(ret == 0,
1271 "KVM_SET_NESTED_STATE failed, ret: %i errno: %i",
1272 ret, errno);
1273}
1274
1275int vcpu_nested_state_set(struct kvm_vm *vm, uint32_t vcpuid,
1276 struct kvm_nested_state *state, bool ignore_error)
1277{
1278 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1279 int ret;
1280
1281 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1282
1283 ret = ioctl(vcpu->fd, KVM_SET_NESTED_STATE, state);
1284 if (!ignore_error) {
1285 TEST_ASSERT(ret == 0,
1286 "KVM_SET_NESTED_STATE failed, ret: %i errno: %i",
1287 ret, errno);
1288 }
1289
1290 return ret;
1291}
1292#endif
1293
1294/*
1295 * VM VCPU System Regs Get
1296 *
1297 * Input Args:
1298 * vm - Virtual Machine
1299 * vcpuid - VCPU ID
1300 *
1301 * Output Args:
1302 * sregs - current state of VCPU system regs
1303 *
1304 * Return: None
1305 *
1306 * Obtains the current system register state for the VCPU specified by
1307 * vcpuid and stores it at the location given by sregs.
1308 */
1309void vcpu_sregs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs)
1310{
1311 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1312 int ret;
1313
1314 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1315
1316 ret = ioctl(vcpu->fd, KVM_GET_SREGS, sregs);
1317 TEST_ASSERT(ret == 0, "KVM_GET_SREGS failed, rc: %i errno: %i",
1318 ret, errno);
1319}
1320
1321/*
1322 * VM VCPU System Regs Set
1323 *
1324 * Input Args:
1325 * vm - Virtual Machine
1326 * vcpuid - VCPU ID
1327 * sregs - Values to set VCPU system regs to
1328 *
1329 * Output Args: None
1330 *
1331 * Return: None
1332 *
1333 * Sets the system regs of the VCPU specified by vcpuid to the values
1334 * given by sregs.
1335 */
1336void vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs)
1337{
1338 int ret = _vcpu_sregs_set(vm, vcpuid, sregs);
1339 TEST_ASSERT(ret == 0, "KVM_RUN IOCTL failed, "
1340 "rc: %i errno: %i", ret, errno);
1341}
1342
1343int _vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs)
1344{
1345 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1346
1347 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1348
1349 return ioctl(vcpu->fd, KVM_SET_SREGS, sregs);
1350}
1351
1352/*
1353 * VCPU Ioctl
1354 *
1355 * Input Args:
1356 * vm - Virtual Machine
1357 * vcpuid - VCPU ID
1358 * cmd - Ioctl number
1359 * arg - Argument to pass to the ioctl
1360 *
1361 * Return: None
1362 *
1363 * Issues an arbitrary ioctl on a VCPU fd.
1364 */
1365void vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid,
1366 unsigned long cmd, void *arg)
1367{
1368 int ret;
1369
1370 ret = _vcpu_ioctl(vm, vcpuid, cmd, arg);
1371 TEST_ASSERT(ret == 0, "vcpu ioctl %lu failed, rc: %i errno: %i (%s)",
1372 cmd, ret, errno, strerror(errno));
1373}
1374
1375int _vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid,
1376 unsigned long cmd, void *arg)
1377{
1378 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1379 int ret;
1380
1381 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1382
1383 ret = ioctl(vcpu->fd, cmd, arg);
1384
1385 return ret;
1386}
1387
1388/*
1389 * VM Ioctl
1390 *
1391 * Input Args:
1392 * vm - Virtual Machine
1393 * cmd - Ioctl number
1394 * arg - Argument to pass to the ioctl
1395 *
1396 * Return: None
1397 *
1398 * Issues an arbitrary ioctl on a VM fd.
1399 */
1400void vm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg)
1401{
1402 int ret;
1403
1404 ret = ioctl(vm->fd, cmd, arg);
1405 TEST_ASSERT(ret == 0, "vm ioctl %lu failed, rc: %i errno: %i (%s)",
1406 cmd, ret, errno, strerror(errno));
1407}
1408
1409/*
1410 * VM Dump
1411 *
1412 * Input Args:
1413 * vm - Virtual Machine
1414 * indent - Left margin indent amount
1415 *
1416 * Output Args:
1417 * stream - Output FILE stream
1418 *
1419 * Return: None
1420 *
1421 * Dumps the current state of the VM given by vm, to the FILE stream
1422 * given by stream.
1423 */
1424void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
1425{
1426 struct userspace_mem_region *region;
1427 struct vcpu *vcpu;
1428
1429 fprintf(stream, "%*smode: 0x%x\n", indent, "", vm->mode);
1430 fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd);
1431 fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size);
1432 fprintf(stream, "%*sMem Regions:\n", indent, "");
1433 for (region = vm->userspace_mem_region_head; region;
1434 region = region->next) {
1435 fprintf(stream, "%*sguest_phys: 0x%lx size: 0x%lx "
1436 "host_virt: %p\n", indent + 2, "",
1437 (uint64_t) region->region.guest_phys_addr,
1438 (uint64_t) region->region.memory_size,
1439 region->host_mem);
1440 fprintf(stream, "%*sunused_phy_pages: ", indent + 2, "");
1441 sparsebit_dump(stream, region->unused_phy_pages, 0);
1442 }
1443 fprintf(stream, "%*sMapped Virtual Pages:\n", indent, "");
1444 sparsebit_dump(stream, vm->vpages_mapped, indent + 2);
1445 fprintf(stream, "%*spgd_created: %u\n", indent, "",
1446 vm->pgd_created);
1447 if (vm->pgd_created) {
1448 fprintf(stream, "%*sVirtual Translation Tables:\n",
1449 indent + 2, "");
1450 virt_dump(stream, vm, indent + 4);
1451 }
1452 fprintf(stream, "%*sVCPUs:\n", indent, "");
1453 for (vcpu = vm->vcpu_head; vcpu; vcpu = vcpu->next)
1454 vcpu_dump(stream, vm, vcpu->id, indent + 2);
1455}
1456
1457/* Known KVM exit reasons */
1458static struct exit_reason {
1459 unsigned int reason;
1460 const char *name;
1461} exit_reasons_known[] = {
1462 {KVM_EXIT_UNKNOWN, "UNKNOWN"},
1463 {KVM_EXIT_EXCEPTION, "EXCEPTION"},
1464 {KVM_EXIT_IO, "IO"},
1465 {KVM_EXIT_HYPERCALL, "HYPERCALL"},
1466 {KVM_EXIT_DEBUG, "DEBUG"},
1467 {KVM_EXIT_HLT, "HLT"},
1468 {KVM_EXIT_MMIO, "MMIO"},
1469 {KVM_EXIT_IRQ_WINDOW_OPEN, "IRQ_WINDOW_OPEN"},
1470 {KVM_EXIT_SHUTDOWN, "SHUTDOWN"},
1471 {KVM_EXIT_FAIL_ENTRY, "FAIL_ENTRY"},
1472 {KVM_EXIT_INTR, "INTR"},
1473 {KVM_EXIT_SET_TPR, "SET_TPR"},
1474 {KVM_EXIT_TPR_ACCESS, "TPR_ACCESS"},
1475 {KVM_EXIT_S390_SIEIC, "S390_SIEIC"},
1476 {KVM_EXIT_S390_RESET, "S390_RESET"},
1477 {KVM_EXIT_DCR, "DCR"},
1478 {KVM_EXIT_NMI, "NMI"},
1479 {KVM_EXIT_INTERNAL_ERROR, "INTERNAL_ERROR"},
1480 {KVM_EXIT_OSI, "OSI"},
1481 {KVM_EXIT_PAPR_HCALL, "PAPR_HCALL"},
1482#ifdef KVM_EXIT_MEMORY_NOT_PRESENT
1483 {KVM_EXIT_MEMORY_NOT_PRESENT, "MEMORY_NOT_PRESENT"},
1484#endif
1485};
1486
1487/*
1488 * Exit Reason String
1489 *
1490 * Input Args:
1491 * exit_reason - Exit reason
1492 *
1493 * Output Args: None
1494 *
1495 * Return:
1496 * Constant string pointer describing the exit reason.
1497 *
1498 * Locates and returns a constant string that describes the KVM exit
1499 * reason given by exit_reason. If no such string is found, a constant
1500 * string of "Unknown" is returned.
1501 */
1502const char *exit_reason_str(unsigned int exit_reason)
1503{
1504 unsigned int n1;
1505
1506 for (n1 = 0; n1 < ARRAY_SIZE(exit_reasons_known); n1++) {
1507 if (exit_reason == exit_reasons_known[n1].reason)
1508 return exit_reasons_known[n1].name;
1509 }
1510
1511 return "Unknown";
1512}
1513
1514/*
1515 * Physical Contiguous Page Allocator
1516 *
1517 * Input Args:
1518 * vm - Virtual Machine
1519 * num - number of pages
1520 * paddr_min - Physical address minimum
1521 * memslot - Memory region to allocate page from
1522 *
1523 * Output Args: None
1524 *
1525 * Return:
1526 * Starting physical address
1527 *
1528 * Within the VM specified by vm, locates a range of available physical
1529 * pages at or above paddr_min. If found, the pages are marked as in use
1530 * and their base address is returned. A TEST_ASSERT failure occurs if
1531 * not enough pages are available at or above paddr_min.
1532 */
1533vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
1534 vm_paddr_t paddr_min, uint32_t memslot)
1535{
1536 struct userspace_mem_region *region;
1537 sparsebit_idx_t pg, base;
1538
1539 TEST_ASSERT(num > 0, "Must allocate at least one page");
1540
1541 TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address "
1542 "not divisible by page size.\n"
1543 " paddr_min: 0x%lx page_size: 0x%x",
1544 paddr_min, vm->page_size);
1545
1546 region = memslot2region(vm, memslot);
1547 base = pg = paddr_min >> vm->page_shift;
1548
1549 do {
1550 for (; pg < base + num; ++pg) {
1551 if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
1552 base = pg = sparsebit_next_set(region->unused_phy_pages, pg);
1553 break;
1554 }
1555 }
1556 } while (pg && pg != base + num);
1557
1558 if (pg == 0) {
1559 fprintf(stderr, "No guest physical page available, "
1560 "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n",
1561 paddr_min, vm->page_size, memslot);
1562 fputs("---- vm dump ----\n", stderr);
1563 vm_dump(stderr, vm, 2);
1564 abort();
1565 }
1566
1567 for (pg = base; pg < base + num; ++pg)
1568 sparsebit_clear(region->unused_phy_pages, pg);
1569
1570 return base * vm->page_size;
1571}
1572
1573vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
1574 uint32_t memslot)
1575{
1576 return vm_phy_pages_alloc(vm, 1, paddr_min, memslot);
1577}
1578
1579/*
1580 * Address Guest Virtual to Host Virtual
1581 *
1582 * Input Args:
1583 * vm - Virtual Machine
1584 * gva - VM virtual address
1585 *
1586 * Output Args: None
1587 *
1588 * Return:
1589 * Equivalent host virtual address
1590 */
1591void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva)
1592{
1593 return addr_gpa2hva(vm, addr_gva2gpa(vm, gva));
1594}
1595
1596/*
1597 * Is Unrestricted Guest
1598 *
1599 * Input Args:
1600 * vm - Virtual Machine
1601 *
1602 * Output Args: None
1603 *
1604 * Return: True if the unrestricted guest is set to 'Y', otherwise return false.
1605 *
1606 * Check if the unrestricted guest flag is enabled.
1607 */
1608bool vm_is_unrestricted_guest(struct kvm_vm *vm)
1609{
1610 char val = 'N';
1611 size_t count;
1612 FILE *f;
1613
1614 if (vm == NULL) {
1615 /* Ensure that the KVM vendor-specific module is loaded. */
1616 f = fopen(KVM_DEV_PATH, "r");
1617 TEST_ASSERT(f != NULL, "Error in opening KVM dev file: %d",
1618 errno);
1619 fclose(f);
1620 }
1621
1622 f = fopen("/sys/module/kvm_intel/parameters/unrestricted_guest", "r");
1623 if (f) {
1624 count = fread(&val, sizeof(char), 1, f);
1625 TEST_ASSERT(count == 1, "Unable to read from param file.");
1626 fclose(f);
1627 }
1628
1629 return val == 'Y';
1630}