Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: selftests: Automatically do init_ucall() for non-barebones VMs

Do init_ucall() automatically during VM creation to kill two (three?)
birds with one stone.

First, initializing ucall immediately after VM creations allows forcing
aarch64's MMIO ucall address to immediately follow memslot0. This is
still somewhat fragile as tests could clobber the MMIO address with a
new memslot, but it's safe-ish since tests have to be conversative when
accounting for memslot0. And this can be hardened in the future by
creating a read-only memslot for the MMIO page (KVM ARM exits with MMIO
if the guest writes to a read-only memslot). Add a TODO to document that
selftests can and should use a memslot for the ucall MMIO (doing so
requires yet more rework because tests assumes thay can use all memslots
except memslot0).

Second, initializing ucall for all VMs prepares for making ucall
initialization meaningful on all architectures. aarch64 is currently the
only arch that needs to do any setup, but that will change in the future
by switching to a pool-based implementation (instead of the current
stack-based approach).

Lastly, defining the ucall MMIO address from common code will simplify
switching all architectures (except s390) to a common MMIO-based ucall
implementation (if there's ever sufficient motivation to do so).

Cc: Oliver Upton <oliver.upton@linux.dev>
Reviewed-by: Andrew Jones <andrew.jones@linux.dev>
Tested-by: Peter Gonda <pgonda@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Link: https://lore.kernel.org/r/20221006003409.649993-4-seanjc@google.com

+20 -76
-2
tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c
··· 158 158 159 159 TEST_REQUIRE(vcpu_aarch64_only(vcpu)); 160 160 161 - ucall_init(vm, NULL); 162 - 163 161 test_user_raz_wi(vcpu); 164 162 test_user_raz_invariant(vcpu); 165 163 test_guest_raz(vcpu);
-1
tools/testing/selftests/kvm/aarch64/arch_timer.c
··· 375 375 for (i = 0; i < nr_vcpus; i++) 376 376 vcpu_init_descriptor_tables(vcpus[i]); 377 377 378 - ucall_init(vm, NULL); 379 378 test_init_timer_irq(vm); 380 379 gic_fd = vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA); 381 380 __TEST_REQUIRE(gic_fd >= 0, "Failed to create vgic-v3");
-2
tools/testing/selftests/kvm/aarch64/debug-exceptions.c
··· 292 292 int stage; 293 293 294 294 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 295 - ucall_init(vm, NULL); 296 295 297 296 vm_init_descriptor_tables(vm); 298 297 vcpu_init_descriptor_tables(vcpu); ··· 342 343 struct kvm_guest_debug debug = {}; 343 344 344 345 vm = vm_create_with_one_vcpu(&vcpu, guest_code_ss); 345 - ucall_init(vm, NULL); 346 346 run = vcpu->run; 347 347 vcpu_args_set(vcpu, 1, test_cnt); 348 348
-1
tools/testing/selftests/kvm/aarch64/hypercalls.c
··· 236 236 237 237 vm = vm_create_with_one_vcpu(vcpu, guest_code); 238 238 239 - ucall_init(vm, NULL); 240 239 steal_time_init(*vcpu); 241 240 242 241 return vm;
-1
tools/testing/selftests/kvm/aarch64/psci_test.c
··· 79 79 struct kvm_vm *vm; 80 80 81 81 vm = vm_create(2); 82 - ucall_init(vm, NULL); 83 82 84 83 vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init); 85 84 init.features[0] |= (1 << KVM_ARM_VCPU_PSCI_0_2);
-2
tools/testing/selftests/kvm/aarch64/vgic_init.c
··· 68 68 /* we don't want to assert on run execution, hence that helper */ 69 69 static int run_vcpu(struct kvm_vcpu *vcpu) 70 70 { 71 - ucall_init(vcpu->vm, NULL); 72 - 73 71 return __vcpu_run(vcpu) ? -errno : 0; 74 72 } 75 73
-1
tools/testing/selftests/kvm/aarch64/vgic_irq.c
··· 756 756 print_args(&args); 757 757 758 758 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 759 - ucall_init(vm, NULL); 760 759 761 760 vm_init_descriptor_tables(vm); 762 761 vcpu_init_descriptor_tables(vcpu);
-2
tools/testing/selftests/kvm/dirty_log_test.c
··· 756 756 /* Cache the HVA pointer of the region */ 757 757 host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem); 758 758 759 - ucall_init(vm, NULL); 760 - 761 759 /* Export the shared variables to the guest */ 762 760 sync_global_to_guest(vm, host_page_size); 763 761 sync_global_to_guest(vm, guest_page_size);
+3 -3
tools/testing/selftests/kvm/include/ucall_common.h
··· 24 24 uint64_t args[UCALL_MAX_ARGS]; 25 25 }; 26 26 27 - void ucall_arch_init(struct kvm_vm *vm, void *arg); 27 + void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa); 28 28 void ucall_arch_uninit(struct kvm_vm *vm); 29 29 void ucall_arch_do_ucall(vm_vaddr_t uc); 30 30 void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu); ··· 32 32 void ucall(uint64_t cmd, int nargs, ...); 33 33 uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc); 34 34 35 - static inline void ucall_init(struct kvm_vm *vm, void *arg) 35 + static inline void ucall_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) 36 36 { 37 - ucall_arch_init(vm, arg); 37 + ucall_arch_init(vm, mmio_gpa); 38 38 } 39 39 40 40 static inline void ucall_uninit(struct kvm_vm *vm)
-1
tools/testing/selftests/kvm/kvm_page_table_test.c
··· 289 289 host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem); 290 290 291 291 /* Export shared structure test_args to guest */ 292 - ucall_init(vm, NULL); 293 292 sync_global_to_guest(vm, test_args); 294 293 295 294 ret = sem_init(&test_stage_updated, 0, 0);
+3 -51
tools/testing/selftests/kvm/lib/aarch64/ucall.c
··· 8 8 9 9 static vm_vaddr_t *ucall_exit_mmio_addr; 10 10 11 - static bool ucall_mmio_init(struct kvm_vm *vm, vm_paddr_t gpa) 11 + void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) 12 12 { 13 - if (kvm_userspace_memory_region_find(vm, gpa, gpa + 1)) 14 - return false; 13 + virt_pg_map(vm, mmio_gpa, mmio_gpa); 15 14 16 - virt_pg_map(vm, gpa, gpa); 17 - 18 - ucall_exit_mmio_addr = (vm_vaddr_t *)gpa; 15 + ucall_exit_mmio_addr = (vm_vaddr_t *)mmio_gpa; 19 16 sync_global_to_guest(vm, ucall_exit_mmio_addr); 20 - 21 - return true; 22 - } 23 - 24 - void ucall_arch_init(struct kvm_vm *vm, void *arg) 25 - { 26 - vm_paddr_t gpa, start, end, step, offset; 27 - unsigned int bits; 28 - bool ret; 29 - 30 - if (arg) { 31 - gpa = (vm_paddr_t)arg; 32 - ret = ucall_mmio_init(vm, gpa); 33 - TEST_ASSERT(ret, "Can't set ucall mmio address to %lx", gpa); 34 - return; 35 - } 36 - 37 - /* 38 - * Find an address within the allowed physical and virtual address 39 - * spaces, that does _not_ have a KVM memory region associated with 40 - * it. Identity mapping an address like this allows the guest to 41 - * access it, but as KVM doesn't know what to do with it, it 42 - * will assume it's something userspace handles and exit with 43 - * KVM_EXIT_MMIO. Well, at least that's how it works for AArch64. 44 - * Here we start with a guess that the addresses around 5/8th 45 - * of the allowed space are unmapped and then work both down and 46 - * up from there in 1/16th allowed space sized steps. 47 - * 48 - * Note, we need to use VA-bits - 1 when calculating the allowed 49 - * virtual address space for an identity mapping because the upper 50 - * half of the virtual address space is the two's complement of the 51 - * lower and won't match physical addresses. 52 - */ 53 - bits = vm->va_bits - 1; 54 - bits = min(vm->pa_bits, bits); 55 - end = 1ul << bits; 56 - start = end * 5 / 8; 57 - step = end / 16; 58 - for (offset = 0; offset < end - start; offset += step) { 59 - if (ucall_mmio_init(vm, start - offset)) 60 - return; 61 - if (ucall_mmio_init(vm, start + offset)) 62 - return; 63 - } 64 - TEST_FAIL("Can't find a ucall mmio address"); 65 17 } 66 18 67 19 void ucall_arch_uninit(struct kvm_vm *vm)
+11
tools/testing/selftests/kvm/lib/kvm_util.c
··· 335 335 { 336 336 uint64_t nr_pages = vm_nr_pages_required(mode, nr_runnable_vcpus, 337 337 nr_extra_pages); 338 + struct userspace_mem_region *slot0; 338 339 struct kvm_vm *vm; 339 340 340 341 vm = ____vm_create(mode, nr_pages); 341 342 342 343 kvm_vm_elf_load(vm, program_invocation_name); 343 344 345 + /* 346 + * TODO: Add proper defines to protect the library's memslots, and then 347 + * carve out memslot1 for the ucall MMIO address. KVM treats writes to 348 + * read-only memslots as MMIO, and creating a read-only memslot for the 349 + * MMIO region would prevent silently clobbering the MMIO region. 350 + */ 351 + slot0 = memslot2region(vm, 0); 352 + ucall_init(vm, slot0->region.guest_phys_addr + slot0->region.memory_size); 353 + 344 354 #ifdef __x86_64__ 345 355 vm_create_irqchip(vm); 346 356 #endif 357 + 347 358 return vm; 348 359 } 349 360
-2
tools/testing/selftests/kvm/lib/memstress.c
··· 221 221 memstress_setup_nested(vm, nr_vcpus, vcpus); 222 222 } 223 223 224 - ucall_init(vm, NULL); 225 - 226 224 /* Export the shared variables to the guest. */ 227 225 sync_global_to_guest(vm, memstress_args); 228 226
+1 -1
tools/testing/selftests/kvm/lib/riscv/ucall.c
··· 10 10 #include "kvm_util.h" 11 11 #include "processor.h" 12 12 13 - void ucall_arch_init(struct kvm_vm *vm, void *arg) 13 + void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) 14 14 { 15 15 } 16 16
+1 -1
tools/testing/selftests/kvm/lib/s390x/ucall.c
··· 6 6 */ 7 7 #include "kvm_util.h" 8 8 9 - void ucall_arch_init(struct kvm_vm *vm, void *arg) 9 + void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) 10 10 { 11 11 } 12 12
+1 -1
tools/testing/selftests/kvm/lib/x86_64/ucall.c
··· 8 8 9 9 #define UCALL_PIO_PORT ((uint16_t)0x1000) 10 10 11 - void ucall_arch_init(struct kvm_vm *vm, void *arg) 11 + void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) 12 12 { 13 13 } 14 14
-1
tools/testing/selftests/kvm/memslot_perf_test.c
··· 277 277 TEST_ASSERT(data->hva_slots, "malloc() fail"); 278 278 279 279 data->vm = __vm_create_with_one_vcpu(&data->vcpu, mempages, guest_code); 280 - ucall_init(data->vm, NULL); 281 280 282 281 pr_info_v("Adding slots 1..%i, each slot with %"PRIu64" pages + %"PRIu64" extra pages last\n", 283 282 max_mem_slots - 1, data->pages_per_slot, rempages);
-1
tools/testing/selftests/kvm/rseq_test.c
··· 224 224 * CPU affinity. 225 225 */ 226 226 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 227 - ucall_init(vm, NULL); 228 227 229 228 pthread_create(&migration_thread, NULL, migration_worker, 230 229 (void *)(unsigned long)syscall(SYS_gettid));
-1
tools/testing/selftests/kvm/steal_time.c
··· 266 266 gpages = vm_calc_num_guest_pages(VM_MODE_DEFAULT, STEAL_TIME_SIZE * NR_VCPUS); 267 267 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, ST_GPA_BASE, 1, gpages, 0); 268 268 virt_map(vm, ST_GPA_BASE, ST_GPA_BASE, gpages); 269 - ucall_init(vm, NULL); 270 269 271 270 TEST_REQUIRE(is_steal_time_supported(vcpus[0])); 272 271
-1
tools/testing/selftests/kvm/system_counter_offset_test.c
··· 121 121 122 122 vm = vm_create_with_one_vcpu(&vcpu, guest_main); 123 123 check_preconditions(vcpu); 124 - ucall_init(vm, NULL); 125 124 126 125 enter_guest(vcpu); 127 126 kvm_vm_free(vm);