Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: selftests: Split ucall.c into architecture specific files

The way we exit from a guest to userspace is very specific to the
architecture: On x86, we use PIO, on aarch64 we are using MMIO and on
s390x we're going to use an instruction instead. The possibility to
select a type via the ucall_type_t enum is currently also completely
unused, so the code in ucall.c currently looks more complex than
required. Let's split this up into architecture specific ucall.c
files instead, so we can get rid of the #ifdefs and the unnecessary
ucall_type_t handling.

Reviewed-by: Andrew Jones <drjones@redhat.com>
Signed-off-by: Thomas Huth <thuth@redhat.com>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Link: https://lore.kernel.org/r/20190731151525.17156-2-thuth@redhat.com
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>

authored by

Thomas Huth and committed by
Christian Borntraeger
2040f414 609488bc

+173 -168
+3 -3
tools/testing/selftests/kvm/Makefile
··· 7 7 KSFT_KHDR_INSTALL := 1 8 8 UNAME_M := $(shell uname -m) 9 9 10 - LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/ucall.c lib/sparsebit.c 11 - LIBKVM_x86_64 = lib/x86_64/processor.c lib/x86_64/vmx.c 12 - LIBKVM_aarch64 = lib/aarch64/processor.c 10 + LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c 11 + LIBKVM_x86_64 = lib/x86_64/processor.c lib/x86_64/vmx.c lib/x86_64/ucall.c 12 + LIBKVM_aarch64 = lib/aarch64/processor.c lib/aarch64/ucall.c 13 13 LIBKVM_s390x = lib/s390x/processor.c 14 14 15 15 TEST_GEN_PROGS_x86_64 = x86_64/cr4_cpuid_sync_test
+1 -1
tools/testing/selftests/kvm/dirty_log_test.c
··· 337 337 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); 338 338 #endif 339 339 #ifdef __aarch64__ 340 - ucall_init(vm, UCALL_MMIO, NULL); 340 + ucall_init(vm, NULL); 341 341 #endif 342 342 343 343 /* Export the shared variables to the guest */
+1 -7
tools/testing/selftests/kvm/include/kvm_util.h
··· 165 165 memcpy(&(g), _p, sizeof(g)); \ 166 166 }) 167 167 168 - /* ucall implementation types */ 169 - typedef enum { 170 - UCALL_PIO, 171 - UCALL_MMIO, 172 - } ucall_type_t; 173 - 174 168 /* Common ucalls */ 175 169 enum { 176 170 UCALL_NONE, ··· 180 186 uint64_t args[UCALL_MAX_ARGS]; 181 187 }; 182 188 183 - void ucall_init(struct kvm_vm *vm, ucall_type_t type, void *arg); 189 + void ucall_init(struct kvm_vm *vm, void *arg); 184 190 void ucall_uninit(struct kvm_vm *vm); 185 191 void ucall(uint64_t cmd, int nargs, ...); 186 192 uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc);
+112
tools/testing/selftests/kvm/lib/aarch64/ucall.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * ucall support. A ucall is a "hypercall to userspace". 4 + * 5 + * Copyright (C) 2018, Red Hat, Inc. 6 + */ 7 + #include "kvm_util.h" 8 + #include "../kvm_util_internal.h" 9 + 10 + static vm_vaddr_t *ucall_exit_mmio_addr; 11 + 12 + static bool ucall_mmio_init(struct kvm_vm *vm, vm_paddr_t gpa) 13 + { 14 + if (kvm_userspace_memory_region_find(vm, gpa, gpa + 1)) 15 + return false; 16 + 17 + virt_pg_map(vm, gpa, gpa, 0); 18 + 19 + ucall_exit_mmio_addr = (vm_vaddr_t *)gpa; 20 + sync_global_to_guest(vm, ucall_exit_mmio_addr); 21 + 22 + return true; 23 + } 24 + 25 + void ucall_init(struct kvm_vm *vm, void *arg) 26 + { 27 + vm_paddr_t gpa, start, end, step, offset; 28 + unsigned int bits; 29 + bool ret; 30 + 31 + if (arg) { 32 + gpa = (vm_paddr_t)arg; 33 + ret = ucall_mmio_init(vm, gpa); 34 + TEST_ASSERT(ret, "Can't set ucall mmio address to %lx", gpa); 35 + return; 36 + } 37 + 38 + /* 39 + * Find an address within the allowed physical and virtual address 40 + * spaces, that does _not_ have a KVM memory region associated with 41 + * it. Identity mapping an address like this allows the guest to 42 + * access it, but as KVM doesn't know what to do with it, it 43 + * will assume it's something userspace handles and exit with 44 + * KVM_EXIT_MMIO. Well, at least that's how it works for AArch64. 45 + * Here we start with a guess that the addresses around 5/8th 46 + * of the allowed space are unmapped and then work both down and 47 + * up from there in 1/16th allowed space sized steps. 48 + * 49 + * Note, we need to use VA-bits - 1 when calculating the allowed 50 + * virtual address space for an identity mapping because the upper 51 + * half of the virtual address space is the two's complement of the 52 + * lower and won't match physical addresses. 53 + */ 54 + bits = vm->va_bits - 1; 55 + bits = vm->pa_bits < bits ? vm->pa_bits : bits; 56 + end = 1ul << bits; 57 + start = end * 5 / 8; 58 + step = end / 16; 59 + for (offset = 0; offset < end - start; offset += step) { 60 + if (ucall_mmio_init(vm, start - offset)) 61 + return; 62 + if (ucall_mmio_init(vm, start + offset)) 63 + return; 64 + } 65 + TEST_ASSERT(false, "Can't find a ucall mmio address"); 66 + } 67 + 68 + void ucall_uninit(struct kvm_vm *vm) 69 + { 70 + ucall_exit_mmio_addr = 0; 71 + sync_global_to_guest(vm, ucall_exit_mmio_addr); 72 + } 73 + 74 + void ucall(uint64_t cmd, int nargs, ...) 75 + { 76 + struct ucall uc = { 77 + .cmd = cmd, 78 + }; 79 + va_list va; 80 + int i; 81 + 82 + nargs = nargs <= UCALL_MAX_ARGS ? nargs : UCALL_MAX_ARGS; 83 + 84 + va_start(va, nargs); 85 + for (i = 0; i < nargs; ++i) 86 + uc.args[i] = va_arg(va, uint64_t); 87 + va_end(va); 88 + 89 + *ucall_exit_mmio_addr = (vm_vaddr_t)&uc; 90 + } 91 + 92 + uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc) 93 + { 94 + struct kvm_run *run = vcpu_state(vm, vcpu_id); 95 + struct ucall ucall = {}; 96 + 97 + if (run->exit_reason == KVM_EXIT_MMIO && 98 + run->mmio.phys_addr == (uint64_t)ucall_exit_mmio_addr) { 99 + vm_vaddr_t gva; 100 + 101 + TEST_ASSERT(run->mmio.is_write && run->mmio.len == 8, 102 + "Unexpected ucall exit mmio address access"); 103 + memcpy(&gva, run->mmio.data, sizeof(gva)); 104 + memcpy(&ucall, addr_gva2hva(vm, gva), sizeof(ucall)); 105 + 106 + vcpu_run_complete_io(vm, vcpu_id); 107 + if (uc) 108 + memcpy(uc, &ucall, sizeof(ucall)); 109 + } 110 + 111 + return ucall.cmd; 112 + }
-157
tools/testing/selftests/kvm/lib/ucall.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 2 - /* 3 - * ucall support. A ucall is a "hypercall to userspace". 4 - * 5 - * Copyright (C) 2018, Red Hat, Inc. 6 - */ 7 - #include "kvm_util.h" 8 - #include "kvm_util_internal.h" 9 - 10 - #define UCALL_PIO_PORT ((uint16_t)0x1000) 11 - 12 - static ucall_type_t ucall_type; 13 - static vm_vaddr_t *ucall_exit_mmio_addr; 14 - 15 - static bool ucall_mmio_init(struct kvm_vm *vm, vm_paddr_t gpa) 16 - { 17 - if (kvm_userspace_memory_region_find(vm, gpa, gpa + 1)) 18 - return false; 19 - 20 - virt_pg_map(vm, gpa, gpa, 0); 21 - 22 - ucall_exit_mmio_addr = (vm_vaddr_t *)gpa; 23 - sync_global_to_guest(vm, ucall_exit_mmio_addr); 24 - 25 - return true; 26 - } 27 - 28 - void ucall_init(struct kvm_vm *vm, ucall_type_t type, void *arg) 29 - { 30 - ucall_type = type; 31 - sync_global_to_guest(vm, ucall_type); 32 - 33 - if (type == UCALL_PIO) 34 - return; 35 - 36 - if (type == UCALL_MMIO) { 37 - vm_paddr_t gpa, start, end, step, offset; 38 - unsigned bits; 39 - bool ret; 40 - 41 - if (arg) { 42 - gpa = (vm_paddr_t)arg; 43 - ret = ucall_mmio_init(vm, gpa); 44 - TEST_ASSERT(ret, "Can't set ucall mmio address to %lx", gpa); 45 - return; 46 - } 47 - 48 - /* 49 - * Find an address within the allowed physical and virtual address 50 - * spaces, that does _not_ have a KVM memory region associated with 51 - * it. Identity mapping an address like this allows the guest to 52 - * access it, but as KVM doesn't know what to do with it, it 53 - * will assume it's something userspace handles and exit with 54 - * KVM_EXIT_MMIO. Well, at least that's how it works for AArch64. 55 - * Here we start with a guess that the addresses around 5/8th 56 - * of the allowed space are unmapped and then work both down and 57 - * up from there in 1/16th allowed space sized steps. 58 - * 59 - * Note, we need to use VA-bits - 1 when calculating the allowed 60 - * virtual address space for an identity mapping because the upper 61 - * half of the virtual address space is the two's complement of the 62 - * lower and won't match physical addresses. 63 - */ 64 - bits = vm->va_bits - 1; 65 - bits = vm->pa_bits < bits ? vm->pa_bits : bits; 66 - end = 1ul << bits; 67 - start = end * 5 / 8; 68 - step = end / 16; 69 - for (offset = 0; offset < end - start; offset += step) { 70 - if (ucall_mmio_init(vm, start - offset)) 71 - return; 72 - if (ucall_mmio_init(vm, start + offset)) 73 - return; 74 - } 75 - TEST_ASSERT(false, "Can't find a ucall mmio address"); 76 - } 77 - } 78 - 79 - void ucall_uninit(struct kvm_vm *vm) 80 - { 81 - ucall_type = 0; 82 - sync_global_to_guest(vm, ucall_type); 83 - ucall_exit_mmio_addr = 0; 84 - sync_global_to_guest(vm, ucall_exit_mmio_addr); 85 - } 86 - 87 - static void ucall_pio_exit(struct ucall *uc) 88 - { 89 - #ifdef __x86_64__ 90 - asm volatile("in %[port], %%al" 91 - : : [port] "d" (UCALL_PIO_PORT), "D" (uc) : "rax"); 92 - #endif 93 - } 94 - 95 - static void ucall_mmio_exit(struct ucall *uc) 96 - { 97 - *ucall_exit_mmio_addr = (vm_vaddr_t)uc; 98 - } 99 - 100 - void ucall(uint64_t cmd, int nargs, ...) 101 - { 102 - struct ucall uc = { 103 - .cmd = cmd, 104 - }; 105 - va_list va; 106 - int i; 107 - 108 - nargs = nargs <= UCALL_MAX_ARGS ? nargs : UCALL_MAX_ARGS; 109 - 110 - va_start(va, nargs); 111 - for (i = 0; i < nargs; ++i) 112 - uc.args[i] = va_arg(va, uint64_t); 113 - va_end(va); 114 - 115 - switch (ucall_type) { 116 - case UCALL_PIO: 117 - ucall_pio_exit(&uc); 118 - break; 119 - case UCALL_MMIO: 120 - ucall_mmio_exit(&uc); 121 - break; 122 - }; 123 - } 124 - 125 - uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc) 126 - { 127 - struct kvm_run *run = vcpu_state(vm, vcpu_id); 128 - struct ucall ucall = {}; 129 - bool got_ucall = false; 130 - 131 - #ifdef __x86_64__ 132 - if (ucall_type == UCALL_PIO && run->exit_reason == KVM_EXIT_IO && 133 - run->io.port == UCALL_PIO_PORT) { 134 - struct kvm_regs regs; 135 - vcpu_regs_get(vm, vcpu_id, &regs); 136 - memcpy(&ucall, addr_gva2hva(vm, (vm_vaddr_t)regs.rdi), sizeof(ucall)); 137 - got_ucall = true; 138 - } 139 - #endif 140 - if (ucall_type == UCALL_MMIO && run->exit_reason == KVM_EXIT_MMIO && 141 - run->mmio.phys_addr == (uint64_t)ucall_exit_mmio_addr) { 142 - vm_vaddr_t gva; 143 - TEST_ASSERT(run->mmio.is_write && run->mmio.len == 8, 144 - "Unexpected ucall exit mmio address access"); 145 - memcpy(&gva, run->mmio.data, sizeof(gva)); 146 - memcpy(&ucall, addr_gva2hva(vm, gva), sizeof(ucall)); 147 - got_ucall = true; 148 - } 149 - 150 - if (got_ucall) { 151 - vcpu_run_complete_io(vm, vcpu_id); 152 - if (uc) 153 - memcpy(uc, &ucall, sizeof(ucall)); 154 - } 155 - 156 - return ucall.cmd; 157 - }
+56
tools/testing/selftests/kvm/lib/x86_64/ucall.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * ucall support. A ucall is a "hypercall to userspace". 4 + * 5 + * Copyright (C) 2018, Red Hat, Inc. 6 + */ 7 + #include "kvm_util.h" 8 + 9 + #define UCALL_PIO_PORT ((uint16_t)0x1000) 10 + 11 + void ucall_init(struct kvm_vm *vm, void *arg) 12 + { 13 + } 14 + 15 + void ucall_uninit(struct kvm_vm *vm) 16 + { 17 + } 18 + 19 + void ucall(uint64_t cmd, int nargs, ...) 20 + { 21 + struct ucall uc = { 22 + .cmd = cmd, 23 + }; 24 + va_list va; 25 + int i; 26 + 27 + nargs = nargs <= UCALL_MAX_ARGS ? nargs : UCALL_MAX_ARGS; 28 + 29 + va_start(va, nargs); 30 + for (i = 0; i < nargs; ++i) 31 + uc.args[i] = va_arg(va, uint64_t); 32 + va_end(va); 33 + 34 + asm volatile("in %[port], %%al" 35 + : : [port] "d" (UCALL_PIO_PORT), "D" (&uc) : "rax"); 36 + } 37 + 38 + uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc) 39 + { 40 + struct kvm_run *run = vcpu_state(vm, vcpu_id); 41 + struct ucall ucall = {}; 42 + 43 + if (run->exit_reason == KVM_EXIT_IO && run->io.port == UCALL_PIO_PORT) { 44 + struct kvm_regs regs; 45 + 46 + vcpu_regs_get(vm, vcpu_id, &regs); 47 + memcpy(&ucall, addr_gva2hva(vm, (vm_vaddr_t)regs.rdi), 48 + sizeof(ucall)); 49 + 50 + vcpu_run_complete_io(vm, vcpu_id); 51 + if (uc) 52 + memcpy(uc, &ucall, sizeof(ucall)); 53 + } 54 + 55 + return ucall.cmd; 56 + }