Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: selftests: Purge vm+vcpu_id == vcpu silliness

Take a vCPU directly instead of a VM+vcpu pair in all vCPU-scoped helpers
and ioctls.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>

authored by

Sean Christopherson and committed by
Paolo Bonzini
768e9a61 5260db3e

+793 -911
+5 -5
tools/testing/selftests/kvm/aarch64/arch_timer.c
··· 218 218 struct kvm_vm *vm = vcpu->vm; 219 219 struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[vcpu_idx]; 220 220 221 - vcpu_run(vm, vcpu->id); 221 + vcpu_run(vcpu); 222 222 223 223 /* Currently, any exit from guest is an indication of completion */ 224 224 pthread_mutex_lock(&vcpu_done_map_lock); 225 225 set_bit(vcpu_idx, vcpu_done_map); 226 226 pthread_mutex_unlock(&vcpu_done_map_lock); 227 227 228 - switch (get_ucall(vm, vcpu->id, &uc)) { 228 + switch (get_ucall(vcpu, &uc)) { 229 229 case UCALL_SYNC: 230 230 case UCALL_DONE: 231 231 break; ··· 345 345 static void test_init_timer_irq(struct kvm_vm *vm) 346 346 { 347 347 /* Timer initid should be same for all the vCPUs, so query only vCPU-0 */ 348 - vcpu_device_attr_get(vm, vcpus[0]->id, KVM_ARM_VCPU_TIMER_CTRL, 348 + vcpu_device_attr_get(vcpus[0], KVM_ARM_VCPU_TIMER_CTRL, 349 349 KVM_ARM_VCPU_TIMER_IRQ_PTIMER, &ptimer_irq); 350 - vcpu_device_attr_get(vm, vcpus[0]->id, KVM_ARM_VCPU_TIMER_CTRL, 350 + vcpu_device_attr_get(vcpus[0], KVM_ARM_VCPU_TIMER_CTRL, 351 351 KVM_ARM_VCPU_TIMER_IRQ_VTIMER, &vtimer_irq); 352 352 353 353 sync_global_to_guest(vm, ptimer_irq); ··· 370 370 vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT, guest_irq_handler); 371 371 372 372 for (i = 0; i < nr_vcpus; i++) 373 - vcpu_init_descriptor_tables(vm, vcpus[i]->id); 373 + vcpu_init_descriptor_tables(vcpus[i]); 374 374 375 375 ucall_init(vm, NULL); 376 376 test_init_timer_irq(vm);
+4 -4
tools/testing/selftests/kvm/aarch64/debug-exceptions.c
··· 242 242 { 243 243 uint64_t id_aa64dfr0; 244 244 245 - vcpu_get_reg(vcpu->vm, vcpu->id, KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &id_aa64dfr0); 245 + vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &id_aa64dfr0); 246 246 return id_aa64dfr0 & 0xf; 247 247 } 248 248 ··· 257 257 ucall_init(vm, NULL); 258 258 259 259 vm_init_descriptor_tables(vm); 260 - vcpu_init_descriptor_tables(vm, vcpu->id); 260 + vcpu_init_descriptor_tables(vcpu); 261 261 262 262 if (debug_version(vcpu) < 6) { 263 263 print_skip("Armv8 debug architecture not supported."); ··· 277 277 ESR_EC_SVC64, guest_svc_handler); 278 278 279 279 for (stage = 0; stage < 11; stage++) { 280 - vcpu_run(vm, vcpu->id); 280 + vcpu_run(vcpu); 281 281 282 - switch (get_ucall(vm, vcpu->id, &uc)) { 282 + switch (get_ucall(vcpu, &uc)) { 283 283 case UCALL_SYNC: 284 284 TEST_ASSERT(uc.args[1] == stage, 285 285 "Stage %d: Unexpected sync ucall, got %lx",
+8 -8
tools/testing/selftests/kvm/aarch64/get-reg-list.c
··· 377 377 init->features[s->feature / 32] |= 1 << (s->feature % 32); 378 378 } 379 379 380 - static void finalize_vcpu(struct kvm_vm *vm, uint32_t vcpuid, struct vcpu_config *c) 380 + static void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_config *c) 381 381 { 382 382 struct reg_sublist *s; 383 383 int feature; ··· 385 385 for_each_sublist(c, s) { 386 386 if (s->finalize) { 387 387 feature = s->feature; 388 - vcpu_ioctl(vm, vcpuid, KVM_ARM_VCPU_FINALIZE, &feature); 388 + vcpu_ioctl(vcpu, KVM_ARM_VCPU_FINALIZE, &feature); 389 389 } 390 390 } 391 391 } ··· 420 420 vm = vm_create_barebones(); 421 421 prepare_vcpu_init(c, &init); 422 422 vcpu = __vm_vcpu_add(vm, 0); 423 - aarch64_vcpu_setup(vm, vcpu->id, &init); 424 - finalize_vcpu(vm, vcpu->id, c); 423 + aarch64_vcpu_setup(vcpu, &init); 424 + finalize_vcpu(vcpu, c); 425 425 426 - reg_list = vcpu_get_reg_list(vm, vcpu->id); 426 + reg_list = vcpu_get_reg_list(vcpu); 427 427 428 428 if (fixup_core_regs) 429 429 core_reg_fixup(); ··· 459 459 bool reject_reg = false; 460 460 int ret; 461 461 462 - ret = __vcpu_get_reg(vm, vcpu->id, reg_list->reg[i], &addr); 462 + ret = __vcpu_get_reg(vcpu, reg_list->reg[i], &addr); 463 463 if (ret) { 464 464 printf("%s: Failed to get ", config_name(c)); 465 465 print_reg(c, reg.id); ··· 471 471 for_each_sublist(c, s) { 472 472 if (s->rejects_set && find_reg(s->rejects_set, s->rejects_set_n, reg.id)) { 473 473 reject_reg = true; 474 - ret = __vcpu_ioctl(vm, vcpu->id, KVM_SET_ONE_REG, &reg); 474 + ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg); 475 475 if (ret != -1 || errno != EPERM) { 476 476 printf("%s: Failed to reject (ret=%d, errno=%d) ", config_name(c), ret, errno); 477 477 print_reg(c, reg.id); ··· 483 483 } 484 484 485 485 if (!reject_reg) { 486 - ret = __vcpu_ioctl(vm, vcpu->id, KVM_SET_ONE_REG, &reg); 486 + ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg); 487 487 if (ret) { 488 488 printf("%s: Failed to set ", config_name(c)); 489 489 print_reg(c, reg.id);
+9 -9
tools/testing/selftests/kvm/aarch64/hypercalls.c
··· 158 158 gpages = vm_calc_num_guest_pages(VM_MODE_DEFAULT, STEAL_TIME_SIZE); 159 159 vm_userspace_mem_region_add(vcpu->vm, VM_MEM_SRC_ANONYMOUS, ST_GPA_BASE, 1, gpages, 0); 160 160 161 - vcpu_device_attr_set(vcpu->vm, vcpu->id, KVM_ARM_VCPU_PVTIME_CTRL, 161 + vcpu_device_attr_set(vcpu, KVM_ARM_VCPU_PVTIME_CTRL, 162 162 KVM_ARM_VCPU_PVTIME_IPA, &st_ipa); 163 163 } 164 164 ··· 172 172 const struct kvm_fw_reg_info *reg_info = &fw_reg_info[i]; 173 173 174 174 /* First 'read' should be an upper limit of the features supported */ 175 - vcpu_get_reg(vcpu->vm, vcpu->id, reg_info->reg, &val); 175 + vcpu_get_reg(vcpu, reg_info->reg, &val); 176 176 TEST_ASSERT(val == FW_REG_ULIMIT_VAL(reg_info->max_feat_bit), 177 177 "Expected all the features to be set for reg: 0x%lx; expected: 0x%lx; read: 0x%lx\n", 178 178 reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit), val); 179 179 180 180 /* Test a 'write' by disabling all the features of the register map */ 181 - ret = __vcpu_set_reg(vcpu->vm, vcpu->id, reg_info->reg, 0); 181 + ret = __vcpu_set_reg(vcpu, reg_info->reg, 0); 182 182 TEST_ASSERT(ret == 0, 183 183 "Failed to clear all the features of reg: 0x%lx; ret: %d\n", 184 184 reg_info->reg, errno); 185 185 186 - vcpu_get_reg(vcpu->vm, vcpu->id, reg_info->reg, &val); 186 + vcpu_get_reg(vcpu, reg_info->reg, &val); 187 187 TEST_ASSERT(val == 0, 188 188 "Expected all the features to be cleared for reg: 0x%lx\n", reg_info->reg); 189 189 ··· 192 192 * Avoid this check if all the bits are occupied. 193 193 */ 194 194 if (reg_info->max_feat_bit < 63) { 195 - ret = __vcpu_set_reg(vcpu->vm, vcpu->id, reg_info->reg, BIT(reg_info->max_feat_bit + 1)); 195 + ret = __vcpu_set_reg(vcpu, reg_info->reg, BIT(reg_info->max_feat_bit + 1)); 196 196 TEST_ASSERT(ret != 0 && errno == EINVAL, 197 197 "Unexpected behavior or return value (%d) while setting an unsupported feature for reg: 0x%lx\n", 198 198 errno, reg_info->reg); ··· 213 213 * Before starting the VM, the test clears all the bits. 214 214 * Check if that's still the case. 215 215 */ 216 - vcpu_get_reg(vcpu->vm, vcpu->id, reg_info->reg, &val); 216 + vcpu_get_reg(vcpu, reg_info->reg, &val); 217 217 TEST_ASSERT(val == 0, 218 218 "Expected all the features to be cleared for reg: 0x%lx\n", 219 219 reg_info->reg); ··· 223 223 * the registers and should return EBUSY. Set the registers and check for 224 224 * the expected errno. 225 225 */ 226 - ret = __vcpu_set_reg(vcpu->vm, vcpu->id, reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit)); 226 + ret = __vcpu_set_reg(vcpu, reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit)); 227 227 TEST_ASSERT(ret != 0 && errno == EBUSY, 228 228 "Unexpected behavior or return value (%d) while setting a feature while VM is running for reg: 0x%lx\n", 229 229 errno, reg_info->reg); ··· 281 281 test_fw_regs_before_vm_start(vcpu); 282 282 283 283 while (!guest_done) { 284 - vcpu_run(vcpu->vm, vcpu->id); 284 + vcpu_run(vcpu); 285 285 286 - switch (get_ucall(vcpu->vm, vcpu->id, &uc)) { 286 + switch (get_ucall(vcpu, &uc)) { 287 287 case UCALL_SYNC: 288 288 test_guest_stage(&vm, &vcpu); 289 289 break;
+8 -8
tools/testing/selftests/kvm/aarch64/psci_test.c
··· 67 67 .mp_state = KVM_MP_STATE_STOPPED, 68 68 }; 69 69 70 - vcpu_mp_state_set(vcpu->vm, vcpu->id, &mp_state); 70 + vcpu_mp_state_set(vcpu, &mp_state); 71 71 } 72 72 73 73 static struct kvm_vm *setup_vm(void *guest_code, struct kvm_vcpu **source, ··· 92 92 { 93 93 struct ucall uc; 94 94 95 - vcpu_run(vcpu->vm, vcpu->id); 96 - if (get_ucall(vcpu->vm, vcpu->id, &uc) == UCALL_ABORT) 95 + vcpu_run(vcpu); 96 + if (get_ucall(vcpu, &uc) == UCALL_ABORT) 97 97 TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0], __FILE__, 98 98 uc.args[1]); 99 99 } ··· 102 102 { 103 103 uint64_t obs_pc, obs_x0; 104 104 105 - vcpu_get_reg(vcpu->vm, vcpu->id, ARM64_CORE_REG(regs.pc), &obs_pc); 106 - vcpu_get_reg(vcpu->vm, vcpu->id, ARM64_CORE_REG(regs.regs[0]), &obs_x0); 105 + vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc), &obs_pc); 106 + vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.regs[0]), &obs_x0); 107 107 108 108 TEST_ASSERT(obs_pc == CPU_ON_ENTRY_ADDR, 109 109 "unexpected target cpu pc: %lx (expected: %lx)", ··· 143 143 */ 144 144 vcpu_power_off(target); 145 145 146 - vcpu_get_reg(vm, target->id, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1), &target_mpidr); 147 - vcpu_args_set(vm, source->id, 1, target_mpidr & MPIDR_HWID_BITMASK); 146 + vcpu_get_reg(target, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1), &target_mpidr); 147 + vcpu_args_set(source, 1, target_mpidr & MPIDR_HWID_BITMASK); 148 148 enter_guest(source); 149 149 150 - if (get_ucall(vm, source->id, &uc) != UCALL_DONE) 150 + if (get_ucall(source, &uc) != UCALL_DONE) 151 151 TEST_FAIL("Unhandled ucall: %lu", uc.cmd); 152 152 153 153 assert_vcpu_reset(target);
+4 -4
tools/testing/selftests/kvm/aarch64/vcpu_width_config.c
··· 28 28 vm = vm_create_barebones(); 29 29 30 30 vcpu0 = __vm_vcpu_add(vm, 0); 31 - ret = __vcpu_ioctl(vm, vcpu0->id, KVM_ARM_VCPU_INIT, init0); 31 + ret = __vcpu_ioctl(vcpu0, KVM_ARM_VCPU_INIT, init0); 32 32 if (ret) 33 33 goto free_exit; 34 34 35 35 vcpu1 = __vm_vcpu_add(vm, 1); 36 - ret = __vcpu_ioctl(vm, vcpu1->id, KVM_ARM_VCPU_INIT, init1); 36 + ret = __vcpu_ioctl(vcpu1, KVM_ARM_VCPU_INIT, init1); 37 37 38 38 free_exit: 39 39 kvm_vm_free(vm); ··· 56 56 vcpu0 = __vm_vcpu_add(vm, 0); 57 57 vcpu1 = __vm_vcpu_add(vm, 1); 58 58 59 - ret = __vcpu_ioctl(vm, vcpu0->id, KVM_ARM_VCPU_INIT, init0); 59 + ret = __vcpu_ioctl(vcpu0, KVM_ARM_VCPU_INIT, init0); 60 60 if (ret) 61 61 goto free_exit; 62 62 63 - ret = __vcpu_ioctl(vm, vcpu1->id, KVM_ARM_VCPU_INIT, init1); 63 + ret = __vcpu_ioctl(vcpu1, KVM_ARM_VCPU_INIT, init1); 64 64 65 65 free_exit: 66 66 kvm_vm_free(vm);
+1 -1
tools/testing/selftests/kvm/aarch64/vgic_init.c
··· 70 70 { 71 71 ucall_init(vcpu->vm, NULL); 72 72 73 - return __vcpu_run(vcpu->vm, vcpu->id) ? -errno : 0; 73 + return __vcpu_run(vcpu) ? -errno : 0; 74 74 } 75 75 76 76 static struct vm_gic vm_gic_create_with_vcpus(uint32_t gic_dev_type,
+4 -4
tools/testing/selftests/kvm/aarch64/vgic_irq.c
··· 759 759 ucall_init(vm, NULL); 760 760 761 761 vm_init_descriptor_tables(vm); 762 - vcpu_init_descriptor_tables(vm, vcpu->id); 762 + vcpu_init_descriptor_tables(vcpu); 763 763 764 764 /* Setup the guest args page (so it gets the args). */ 765 765 args_gva = vm_vaddr_alloc_page(vm); 766 766 memcpy(addr_gva2hva(vm, args_gva), &args, sizeof(args)); 767 - vcpu_args_set(vm, vcpu->id, 1, args_gva); 767 + vcpu_args_set(vcpu, 1, args_gva); 768 768 769 769 gic_fd = vgic_v3_setup(vm, 1, nr_irqs, 770 770 GICD_BASE_GPA, GICR_BASE_GPA); ··· 777 777 guest_irq_handlers[args.eoi_split][args.level_sensitive]); 778 778 779 779 while (1) { 780 - vcpu_run(vm, vcpu->id); 780 + vcpu_run(vcpu); 781 781 782 - switch (get_ucall(vm, vcpu->id, &uc)) { 782 + switch (get_ucall(vcpu, &uc)) { 783 783 case UCALL_SYNC: 784 784 kvm_inject_get_call(vm, &uc, &inject_args); 785 785 run_guest_cmd(vcpu, gic_fd, &inject_args, &args);
+2 -2
tools/testing/selftests/kvm/access_tracking_perf_test.c
··· 194 194 static void assert_ucall(struct kvm_vcpu *vcpu, uint64_t expected_ucall) 195 195 { 196 196 struct ucall uc; 197 - uint64_t actual_ucall = get_ucall(vcpu->vm, vcpu->id, &uc); 197 + uint64_t actual_ucall = get_ucall(vcpu, &uc); 198 198 199 199 TEST_ASSERT(expected_ucall == actual_ucall, 200 200 "Guest exited unexpectedly (expected ucall %" PRIu64 ··· 226 226 while (spin_wait_for_next_iteration(&current_iteration)) { 227 227 switch (READ_ONCE(iteration_work)) { 228 228 case ITERATION_ACCESS_MEMORY: 229 - vcpu_run(vm, vcpu->id); 229 + vcpu_run(vcpu); 230 230 assert_ucall(vcpu, UCALL_SYNC); 231 231 break; 232 232 case ITERATION_MARK_IDLE:
+2 -3
tools/testing/selftests/kvm/demand_paging_test.c
··· 45 45 static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args) 46 46 { 47 47 struct kvm_vcpu *vcpu = vcpu_args->vcpu; 48 - struct kvm_vm *vm = perf_test_args.vm; 49 48 int vcpu_idx = vcpu_args->vcpu_idx; 50 49 struct kvm_run *run = vcpu->run; 51 50 struct timespec start; ··· 54 55 clock_gettime(CLOCK_MONOTONIC, &start); 55 56 56 57 /* Let the guest access its memory */ 57 - ret = _vcpu_run(vm, vcpu->id); 58 + ret = _vcpu_run(vcpu); 58 59 TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret); 59 - if (get_ucall(vm, vcpu->id, NULL) != UCALL_SYNC) { 60 + if (get_ucall(vcpu, NULL) != UCALL_SYNC) { 60 61 TEST_ASSERT(false, 61 62 "Invalid guest sync status: exit_reason=%s\n", 62 63 exit_reason_str(run->exit_reason));
+3 -4
tools/testing/selftests/kvm/dirty_log_perf_test.c
··· 69 69 static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args) 70 70 { 71 71 struct kvm_vcpu *vcpu = vcpu_args->vcpu; 72 - struct kvm_vm *vm = perf_test_args.vm; 73 72 int vcpu_idx = vcpu_args->vcpu_idx; 74 73 uint64_t pages_count = 0; 75 74 struct kvm_run *run; ··· 84 85 int current_iteration = READ_ONCE(iteration); 85 86 86 87 clock_gettime(CLOCK_MONOTONIC, &start); 87 - ret = _vcpu_run(vm, vcpu->id); 88 + ret = _vcpu_run(vcpu); 88 89 ts_diff = timespec_elapsed(start); 89 90 90 91 TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret); 91 - TEST_ASSERT(get_ucall(vm, vcpu->id, NULL) == UCALL_SYNC, 92 + TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC, 92 93 "Invalid guest sync status: exit_reason=%s\n", 93 94 exit_reason_str(run->exit_reason)); 94 95 95 96 pr_debug("Got sync event from vCPU %d\n", vcpu_idx); 96 97 vcpu_last_completed_iteration[vcpu_idx] = current_iteration; 97 98 pr_debug("vCPU %d updated last completed iteration to %d\n", 98 - vcpu->id, vcpu_last_completed_iteration[vcpu_idx]); 99 + vcpu_idx, vcpu_last_completed_iteration[vcpu_idx]); 99 100 100 101 if (current_iteration) { 101 102 pages_count += vcpu_args->pages;
+5 -5
tools/testing/selftests/kvm/dirty_log_test.c
··· 255 255 TEST_ASSERT(ret == 0 || (ret == -1 && err == EINTR), 256 256 "vcpu run failed: errno=%d", err); 257 257 258 - TEST_ASSERT(get_ucall(vcpu->vm, vcpu->id, NULL) == UCALL_SYNC, 258 + TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC, 259 259 "Invalid guest sync status: exit_reason=%s\n", 260 260 exit_reason_str(run->exit_reason)); 261 261 ··· 346 346 } 347 347 348 348 /* Only have one vcpu */ 349 - count = dirty_ring_collect_one(vcpu_map_dirty_ring(vcpu->vm, vcpu->id), 349 + count = dirty_ring_collect_one(vcpu_map_dirty_ring(vcpu), 350 350 slot, bitmap, num_pages, &fetch_index); 351 351 352 352 cleared = kvm_vm_reset_dirty_ring(vcpu->vm); ··· 369 369 struct kvm_run *run = vcpu->run; 370 370 371 371 /* A ucall-sync or ring-full event is allowed */ 372 - if (get_ucall(vcpu->vm, vcpu->id, NULL) == UCALL_SYNC) { 372 + if (get_ucall(vcpu, NULL) == UCALL_SYNC) { 373 373 /* We should allow this to continue */ 374 374 ; 375 375 } else if (run->exit_reason == KVM_EXIT_DIRTY_RING_FULL || ··· 521 521 sigmask->len = 8; 522 522 pthread_sigmask(0, NULL, sigset); 523 523 sigdelset(sigset, SIG_IPI); 524 - vcpu_ioctl(vm, vcpu->id, KVM_SET_SIGNAL_MASK, sigmask); 524 + vcpu_ioctl(vcpu, KVM_SET_SIGNAL_MASK, sigmask); 525 525 526 526 sigemptyset(sigset); 527 527 sigaddset(sigset, SIG_IPI); ··· 533 533 generate_random_array(guest_array, TEST_PAGES_PER_LOOP); 534 534 pages_count += TEST_PAGES_PER_LOOP; 535 535 /* Let the guest dirty the random pages */ 536 - ret = __vcpu_run(vm, vcpu->id); 536 + ret = __vcpu_run(vcpu); 537 537 if (ret == -1 && errno == EINTR) { 538 538 int sig = -1; 539 539 sigwait(sigset, &sig);
+1 -1
tools/testing/selftests/kvm/hardware_disable_test.c
··· 39 39 struct kvm_vcpu *vcpu = arg; 40 40 struct kvm_run *run = vcpu->run; 41 41 42 - vcpu_run(vcpu->vm, vcpu->id); 42 + vcpu_run(vcpu); 43 43 44 44 TEST_ASSERT(false, "%s: exited with reason %d: %s\n", 45 45 __func__, run->exit_reason,
+2 -2
tools/testing/selftests/kvm/include/aarch64/processor.h
··· 47 47 48 48 #define MPIDR_HWID_BITMASK (0xff00fffffful) 49 49 50 - void aarch64_vcpu_setup(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init *init); 50 + void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init); 51 51 struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, 52 52 struct kvm_vcpu_init *init, void *guest_code); 53 53 ··· 101 101 bool *ps4k, bool *ps16k, bool *ps64k); 102 102 103 103 void vm_init_descriptor_tables(struct kvm_vm *vm); 104 - void vcpu_init_descriptor_tables(struct kvm_vm *vm, uint32_t vcpuid); 104 + void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu); 105 105 106 106 typedef void(*handler_fn)(struct ex_regs *); 107 107 void vm_install_exception_handler(struct kvm_vm *vm,
+104 -116
tools/testing/selftests/kvm/include/kvm_util_base.h
··· 93 93 continue; \ 94 94 else 95 95 96 - struct kvm_vcpu *vcpu_get(struct kvm_vm *vm, uint32_t vcpuid); 96 + struct kvm_vcpu *vcpu_get(struct kvm_vm *vm, uint32_t vcpu_id); 97 97 98 98 struct userspace_mem_region * 99 99 memslot2region(struct kvm_vm *vm, uint32_t memslot); ··· 196 196 void _vm_ioctl(struct kvm_vm *vm, unsigned long cmd, const char *name, void *arg); 197 197 #define vm_ioctl(vm, cmd, arg) _vm_ioctl(vm, cmd, #cmd, arg) 198 198 199 - int __vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, unsigned long cmd, 199 + int __vcpu_ioctl(struct kvm_vcpu *vcpu, unsigned long cmd, 200 200 void *arg); 201 - void _vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, unsigned long cmd, 201 + void _vcpu_ioctl(struct kvm_vcpu *vcpu, unsigned long cmd, 202 202 const char *name, void *arg); 203 - #define vcpu_ioctl(vm, vcpuid, cmd, arg) \ 204 - _vcpu_ioctl(vm, vcpuid, cmd, #cmd, arg) 203 + #define vcpu_ioctl(vcpu, cmd, arg) \ 204 + _vcpu_ioctl(vcpu, cmd, #cmd, arg) 205 205 206 206 /* 207 207 * Looks up and returns the value corresponding to the capability ··· 288 288 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags); 289 289 void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa); 290 290 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot); 291 - struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid); 291 + struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id); 292 292 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min); 293 293 vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages); 294 294 vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm); ··· 300 300 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva); 301 301 void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa); 302 302 303 - struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid); 304 - void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid); 305 - int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid); 303 + struct kvm_run *vcpu_state(struct kvm_vcpu *vcpu); 304 + void vcpu_run(struct kvm_vcpu *vcpu); 305 + int _vcpu_run(struct kvm_vcpu *vcpu); 306 306 307 - static inline int __vcpu_run(struct kvm_vm *vm, uint32_t vcpuid) 307 + static inline int __vcpu_run(struct kvm_vcpu *vcpu) 308 308 { 309 - return __vcpu_ioctl(vm, vcpuid, KVM_RUN, NULL); 309 + return __vcpu_ioctl(vcpu, KVM_RUN, NULL); 310 310 } 311 311 312 - void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid); 313 - struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vm *vm, uint32_t vcpuid); 312 + void vcpu_run_complete_io(struct kvm_vcpu *vcpu); 313 + struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu); 314 314 315 - static inline void vcpu_enable_cap(struct kvm_vm *vm, uint32_t vcpu_id, 316 - uint32_t cap, uint64_t arg0) 315 + static inline void vcpu_enable_cap(struct kvm_vcpu *vcpu, uint32_t cap, 316 + uint64_t arg0) 317 317 { 318 318 struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } }; 319 319 320 - vcpu_ioctl(vm, vcpu_id, KVM_ENABLE_CAP, &enable_cap); 320 + vcpu_ioctl(vcpu, KVM_ENABLE_CAP, &enable_cap); 321 321 } 322 322 323 - static inline void vcpu_guest_debug_set(struct kvm_vm *vm, uint32_t vcpuid, 323 + static inline void vcpu_guest_debug_set(struct kvm_vcpu *vcpu, 324 324 struct kvm_guest_debug *debug) 325 325 { 326 - vcpu_ioctl(vm, vcpuid, KVM_SET_GUEST_DEBUG, debug); 326 + vcpu_ioctl(vcpu, KVM_SET_GUEST_DEBUG, debug); 327 327 } 328 328 329 - static inline void vcpu_mp_state_get(struct kvm_vm *vm, uint32_t vcpuid, 329 + static inline void vcpu_mp_state_get(struct kvm_vcpu *vcpu, 330 330 struct kvm_mp_state *mp_state) 331 331 { 332 - vcpu_ioctl(vm, vcpuid, KVM_GET_MP_STATE, mp_state); 332 + vcpu_ioctl(vcpu, KVM_GET_MP_STATE, mp_state); 333 333 } 334 - static inline void vcpu_mp_state_set(struct kvm_vm *vm, uint32_t vcpuid, 334 + static inline void vcpu_mp_state_set(struct kvm_vcpu *vcpu, 335 335 struct kvm_mp_state *mp_state) 336 336 { 337 - vcpu_ioctl(vm, vcpuid, KVM_SET_MP_STATE, mp_state); 337 + vcpu_ioctl(vcpu, KVM_SET_MP_STATE, mp_state); 338 338 } 339 339 340 - static inline void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, 341 - struct kvm_regs *regs) 340 + static inline void vcpu_regs_get(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 342 341 { 343 - vcpu_ioctl(vm, vcpuid, KVM_GET_REGS, regs); 342 + vcpu_ioctl(vcpu, KVM_GET_REGS, regs); 344 343 } 345 344 346 - static inline void vcpu_regs_set(struct kvm_vm *vm, uint32_t vcpuid, 347 - struct kvm_regs *regs) 345 + static inline void vcpu_regs_set(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 348 346 { 349 - vcpu_ioctl(vm, vcpuid, KVM_SET_REGS, regs); 347 + vcpu_ioctl(vcpu, KVM_SET_REGS, regs); 350 348 } 351 - static inline void vcpu_sregs_get(struct kvm_vm *vm, uint32_t vcpuid, 352 - struct kvm_sregs *sregs) 349 + static inline void vcpu_sregs_get(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 353 350 { 354 - vcpu_ioctl(vm, vcpuid, KVM_GET_SREGS, sregs); 351 + vcpu_ioctl(vcpu, KVM_GET_SREGS, sregs); 355 352 356 353 } 357 - static inline void vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, 358 - struct kvm_sregs *sregs) 354 + static inline void vcpu_sregs_set(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 359 355 { 360 - vcpu_ioctl(vm, vcpuid, KVM_SET_SREGS, sregs); 356 + vcpu_ioctl(vcpu, KVM_SET_SREGS, sregs); 361 357 } 362 - static inline int _vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, 363 - struct kvm_sregs *sregs) 358 + static inline int _vcpu_sregs_set(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 364 359 { 365 - return __vcpu_ioctl(vm, vcpuid, KVM_SET_SREGS, sregs); 360 + return __vcpu_ioctl(vcpu, KVM_SET_SREGS, sregs); 366 361 } 367 - static inline void vcpu_fpu_get(struct kvm_vm *vm, uint32_t vcpuid, 368 - struct kvm_fpu *fpu) 362 + static inline void vcpu_fpu_get(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 369 363 { 370 - vcpu_ioctl(vm, vcpuid, KVM_GET_FPU, fpu); 364 + vcpu_ioctl(vcpu, KVM_GET_FPU, fpu); 371 365 } 372 - static inline void vcpu_fpu_set(struct kvm_vm *vm, uint32_t vcpuid, 373 - struct kvm_fpu *fpu) 366 + static inline void vcpu_fpu_set(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 374 367 { 375 - vcpu_ioctl(vm, vcpuid, KVM_SET_FPU, fpu); 368 + vcpu_ioctl(vcpu, KVM_SET_FPU, fpu); 376 369 } 377 370 378 - static inline int __vcpu_get_reg(struct kvm_vm *vm, uint32_t vcpuid, 379 - uint64_t reg_id, void *addr) 371 + static inline int __vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id, void *addr) 380 372 { 381 - struct kvm_one_reg reg = { .id = reg_id, .addr = (uint64_t)addr }; 373 + struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)addr }; 382 374 383 - return __vcpu_ioctl(vm, vcpuid, KVM_GET_ONE_REG, &reg); 375 + return __vcpu_ioctl(vcpu, KVM_GET_ONE_REG, &reg); 384 376 } 385 - static inline int __vcpu_set_reg(struct kvm_vm *vm, uint32_t vcpuid, 386 - uint64_t reg_id, uint64_t val) 377 + static inline int __vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val) 387 378 { 388 - struct kvm_one_reg reg = { .id = reg_id, .addr = (uint64_t)&val }; 379 + struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val }; 389 380 390 - return __vcpu_ioctl(vm, vcpuid, KVM_SET_ONE_REG, &reg); 381 + return __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg); 391 382 } 392 - static inline void vcpu_get_reg(struct kvm_vm *vm, uint32_t vcpuid, 393 - uint64_t reg_id, void *addr) 383 + static inline void vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id, void *addr) 394 384 { 395 - struct kvm_one_reg reg = { .id = reg_id, .addr = (uint64_t)addr }; 385 + struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)addr }; 396 386 397 - vcpu_ioctl(vm, vcpuid, KVM_GET_ONE_REG, &reg); 387 + vcpu_ioctl(vcpu, KVM_GET_ONE_REG, &reg); 398 388 } 399 - static inline void vcpu_set_reg(struct kvm_vm *vm, uint32_t vcpuid, 400 - uint64_t reg_id, uint64_t val) 389 + static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val) 401 390 { 402 - struct kvm_one_reg reg = { .id = reg_id, .addr = (uint64_t)&val }; 391 + struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val }; 403 392 404 - vcpu_ioctl(vm, vcpuid, KVM_SET_ONE_REG, &reg); 393 + vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg); 405 394 } 406 395 407 396 #ifdef __KVM_HAVE_VCPU_EVENTS 408 - static inline void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid, 397 + static inline void vcpu_events_get(struct kvm_vcpu *vcpu, 409 398 struct kvm_vcpu_events *events) 410 399 { 411 - vcpu_ioctl(vm, vcpuid, KVM_GET_VCPU_EVENTS, events); 400 + vcpu_ioctl(vcpu, KVM_GET_VCPU_EVENTS, events); 412 401 } 413 - static inline void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid, 402 + static inline void vcpu_events_set(struct kvm_vcpu *vcpu, 414 403 struct kvm_vcpu_events *events) 415 404 { 416 - vcpu_ioctl(vm, vcpuid, KVM_SET_VCPU_EVENTS, events); 405 + vcpu_ioctl(vcpu, KVM_SET_VCPU_EVENTS, events); 417 406 } 418 407 #endif 419 408 #ifdef __x86_64__ 420 - static inline void vcpu_nested_state_get(struct kvm_vm *vm, uint32_t vcpuid, 409 + static inline void vcpu_nested_state_get(struct kvm_vcpu *vcpu, 421 410 struct kvm_nested_state *state) 422 411 { 423 - vcpu_ioctl(vm, vcpuid, KVM_GET_NESTED_STATE, state); 412 + vcpu_ioctl(vcpu, KVM_GET_NESTED_STATE, state); 424 413 } 425 - static inline int __vcpu_nested_state_set(struct kvm_vm *vm, uint32_t vcpuid, 414 + static inline int __vcpu_nested_state_set(struct kvm_vcpu *vcpu, 426 415 struct kvm_nested_state *state) 427 416 { 428 - return __vcpu_ioctl(vm, vcpuid, KVM_SET_NESTED_STATE, state); 417 + return __vcpu_ioctl(vcpu, KVM_SET_NESTED_STATE, state); 429 418 } 430 419 431 - static inline void vcpu_nested_state_set(struct kvm_vm *vm, uint32_t vcpuid, 420 + static inline void vcpu_nested_state_set(struct kvm_vcpu *vcpu, 432 421 struct kvm_nested_state *state) 433 422 { 434 - vcpu_ioctl(vm, vcpuid, KVM_SET_NESTED_STATE, state); 423 + vcpu_ioctl(vcpu, KVM_SET_NESTED_STATE, state); 435 424 } 436 425 #endif 437 - static inline int vcpu_get_stats_fd(struct kvm_vm *vm, uint32_t vcpuid) 426 + static inline int vcpu_get_stats_fd(struct kvm_vcpu *vcpu) 438 427 { 439 - int fd = __vcpu_ioctl(vm, vcpuid, KVM_GET_STATS_FD, NULL); 428 + int fd = __vcpu_ioctl(vcpu, KVM_GET_STATS_FD, NULL); 440 429 441 430 TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_GET_STATS_FD, fd)); 442 431 return fd; ··· 460 471 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_DEVICE_ATTR, ret)); 461 472 } 462 473 463 - int __vcpu_has_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group, 464 - uint64_t attr); 465 - 466 - static inline void vcpu_has_device_attr(struct kvm_vm *vm, uint32_t vcpuid, 467 - uint32_t group, uint64_t attr) 474 + static inline int __vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group, 475 + uint64_t attr) 468 476 { 469 - int ret = __vcpu_has_device_attr(vm, vcpuid, group, attr); 470 - 471 - TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_HAS_DEVICE_ATTR, ret)); 477 + return __kvm_has_device_attr(vcpu->fd, group, attr); 472 478 } 473 479 474 - int __vcpu_device_attr_get(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group, 475 - uint64_t attr, void *val); 476 - void vcpu_device_attr_get(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group, 477 - uint64_t attr, void *val); 478 - int __vcpu_device_attr_set(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group, 479 - uint64_t attr, void *val); 480 - void vcpu_device_attr_set(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group, 481 - uint64_t attr, void *val); 480 + static inline void vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group, 481 + uint64_t attr) 482 + { 483 + kvm_has_device_attr(vcpu->fd, group, attr); 484 + } 485 + 486 + static inline int __vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group, 487 + uint64_t attr, void *val) 488 + { 489 + return __kvm_device_attr_get(vcpu->fd, group, attr, val); 490 + } 491 + 492 + static inline void vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group, 493 + uint64_t attr, void *val) 494 + { 495 + kvm_device_attr_get(vcpu->fd, group, attr, val); 496 + } 497 + 498 + static inline int __vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group, 499 + uint64_t attr, void *val) 500 + { 501 + return __kvm_device_attr_set(vcpu->fd, group, attr, val); 502 + } 503 + 504 + static inline void vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group, 505 + uint64_t attr, void *val) 506 + { 507 + kvm_device_attr_set(vcpu->fd, group, attr, val); 508 + } 509 + 482 510 int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type); 483 511 int __kvm_create_device(struct kvm_vm *vm, uint64_t type); 484 512 ··· 507 501 return fd; 508 502 } 509 503 510 - void *vcpu_map_dirty_ring(struct kvm_vm *vm, uint32_t vcpuid); 504 + void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu); 511 505 512 506 /* 513 507 * VM VCPU Args Set 514 508 * 515 509 * Input Args: 516 510 * vm - Virtual Machine 517 - * vcpuid - VCPU ID 518 511 * num - number of arguments 519 512 * ... - arguments, each of type uint64_t 520 513 * ··· 521 516 * 522 517 * Return: None 523 518 * 524 - * Sets the first @num function input registers of the VCPU with @vcpuid, 525 - * per the C calling convention of the architecture, to the values given 526 - * as variable args. Each of the variable args is expected to be of type 527 - * uint64_t. The maximum @num can be is specific to the architecture. 519 + * Sets the first @num input parameters for the function at @vcpu's entry point, 520 + * per the C calling convention of the architecture, to the values given as 521 + * variable args. Each of the variable args is expected to be of type uint64_t. 522 + * The maximum @num can be is specific to the architecture. 528 523 */ 529 - void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...); 524 + void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...); 530 525 531 526 void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level); 532 527 int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level); ··· 631 626 memcpy(&(g), _p, sizeof(g)); \ 632 627 }) 633 628 634 - void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid); 629 + void assert_on_unhandled_exception(struct kvm_vcpu *vcpu); 635 630 636 - /* 637 - * VM VCPU Dump 638 - * 639 - * Input Args: 640 - * stream - Output FILE stream 641 - * vm - Virtual Machine 642 - * vcpuid - VCPU ID 643 - * indent - Left margin indent amount 644 - * 645 - * Output Args: None 646 - * 647 - * Return: None 648 - * 649 - * Dumps the current state of the VCPU specified by @vcpuid, within the VM 650 - * given by @vm, to the FILE stream given by @stream. 651 - */ 652 - 653 - void vcpu_arch_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, 631 + void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, 654 632 uint8_t indent); 655 633 656 - static inline void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, 634 + static inline void vcpu_dump(FILE *stream, struct kvm_vcpu *vcpu, 657 635 uint8_t indent) 658 636 { 659 - vcpu_arch_dump(stream, vm, vcpuid, indent); 637 + vcpu_arch_dump(stream, vcpu, indent); 660 638 } 661 639 662 640 /* ··· 647 659 * 648 660 * Input Args: 649 661 * vm - Virtual Machine 650 - * vcpuid - The id of the VCPU to add to the VM. 662 + * vcpu_id - The id of the VCPU to add to the VM. 651 663 * guest_code - The vCPU's entry point 652 664 */ 653 665 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
+1 -1
tools/testing/selftests/kvm/include/ucall_common.h
··· 26 26 void ucall_init(struct kvm_vm *vm, void *arg); 27 27 void ucall_uninit(struct kvm_vm *vm); 28 28 void ucall(uint64_t cmd, int nargs, ...); 29 - uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc); 29 + uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc); 30 30 31 31 #define GUEST_SYNC_ARGS(stage, arg1, arg2, arg3, arg4) \ 32 32 ucall(UCALL_SYNC, 6, "hello", stage, arg1, arg2, arg3, arg4)
+1 -1
tools/testing/selftests/kvm/include/x86_64/evmcs.h
··· 241 241 extern struct hv_enlightened_vmcs *current_evmcs; 242 242 extern struct hv_vp_assist_page *current_vp_assist; 243 243 244 - int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id); 244 + int vcpu_enable_evmcs(struct kvm_vcpu *vcpu); 245 245 246 246 static inline int enable_vp_assist(uint64_t vp_assist_pa, void *vp_assist) 247 247 {
+37 -40
tools/testing/selftests/kvm/include/x86_64/processor.h
··· 422 422 return ((eax >> 12) & 0xf0) | ((eax >> 4) & 0x0f); 423 423 } 424 424 425 - struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid); 426 - void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, 427 - struct kvm_x86_state *state); 425 + struct kvm_x86_state *vcpu_save_state(struct kvm_vcpu *vcpu); 426 + void vcpu_load_state(struct kvm_vcpu *vcpu, struct kvm_x86_state *state); 428 427 void kvm_x86_state_cleanup(struct kvm_x86_state *state); 429 428 430 429 const struct kvm_msr_list *kvm_get_msr_index_list(void); ··· 431 432 bool kvm_msr_is_in_save_restore_list(uint32_t msr_index); 432 433 uint64_t kvm_get_feature_msr(uint64_t msr_index); 433 434 434 - static inline void vcpu_msrs_get(struct kvm_vm *vm, uint32_t vcpuid, 435 + static inline void vcpu_msrs_get(struct kvm_vcpu *vcpu, 435 436 struct kvm_msrs *msrs) 436 437 { 437 - int r = __vcpu_ioctl(vm, vcpuid, KVM_GET_MSRS, msrs); 438 + int r = __vcpu_ioctl(vcpu, KVM_GET_MSRS, msrs); 438 439 439 440 TEST_ASSERT(r == msrs->nmsrs, 440 441 "KVM_GET_MSRS failed, r: %i (failed on MSR %x)", 441 442 r, r < 0 || r >= msrs->nmsrs ? -1 : msrs->entries[r].index); 442 443 } 443 - static inline void vcpu_msrs_set(struct kvm_vm *vm, uint32_t vcpuid, 444 - struct kvm_msrs *msrs) 444 + static inline void vcpu_msrs_set(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs) 445 445 { 446 - int r = __vcpu_ioctl(vm, vcpuid, KVM_SET_MSRS, msrs); 446 + int r = __vcpu_ioctl(vcpu, KVM_SET_MSRS, msrs); 447 447 448 448 TEST_ASSERT(r == msrs->nmsrs, 449 449 "KVM_GET_MSRS failed, r: %i (failed on MSR %x)", 450 450 r, r < 0 || r >= msrs->nmsrs ? -1 : msrs->entries[r].index); 451 451 } 452 - static inline void vcpu_debugregs_get(struct kvm_vm *vm, uint32_t vcpuid, 452 + static inline void vcpu_debugregs_get(struct kvm_vcpu *vcpu, 453 453 struct kvm_debugregs *debugregs) 454 454 { 455 - vcpu_ioctl(vm, vcpuid, KVM_GET_DEBUGREGS, debugregs); 455 + vcpu_ioctl(vcpu, KVM_GET_DEBUGREGS, debugregs); 456 456 } 457 - static inline void vcpu_debugregs_set(struct kvm_vm *vm, uint32_t vcpuid, 457 + static inline void vcpu_debugregs_set(struct kvm_vcpu *vcpu, 458 458 struct kvm_debugregs *debugregs) 459 459 { 460 - vcpu_ioctl(vm, vcpuid, KVM_SET_DEBUGREGS, debugregs); 460 + vcpu_ioctl(vcpu, KVM_SET_DEBUGREGS, debugregs); 461 461 } 462 - static inline void vcpu_xsave_get(struct kvm_vm *vm, uint32_t vcpuid, 462 + static inline void vcpu_xsave_get(struct kvm_vcpu *vcpu, 463 463 struct kvm_xsave *xsave) 464 464 { 465 - vcpu_ioctl(vm, vcpuid, KVM_GET_XSAVE, xsave); 465 + vcpu_ioctl(vcpu, KVM_GET_XSAVE, xsave); 466 466 } 467 - static inline void vcpu_xsave2_get(struct kvm_vm *vm, uint32_t vcpuid, 467 + static inline void vcpu_xsave2_get(struct kvm_vcpu *vcpu, 468 468 struct kvm_xsave *xsave) 469 469 { 470 - vcpu_ioctl(vm, vcpuid, KVM_GET_XSAVE2, xsave); 470 + vcpu_ioctl(vcpu, KVM_GET_XSAVE2, xsave); 471 471 } 472 - static inline void vcpu_xsave_set(struct kvm_vm *vm, uint32_t vcpuid, 472 + static inline void vcpu_xsave_set(struct kvm_vcpu *vcpu, 473 473 struct kvm_xsave *xsave) 474 474 { 475 - vcpu_ioctl(vm, vcpuid, KVM_SET_XSAVE, xsave); 475 + vcpu_ioctl(vcpu, KVM_SET_XSAVE, xsave); 476 476 } 477 - static inline void vcpu_xcrs_get(struct kvm_vm *vm, uint32_t vcpuid, 477 + static inline void vcpu_xcrs_get(struct kvm_vcpu *vcpu, 478 478 struct kvm_xcrs *xcrs) 479 479 { 480 - vcpu_ioctl(vm, vcpuid, KVM_GET_XCRS, xcrs); 480 + vcpu_ioctl(vcpu, KVM_GET_XCRS, xcrs); 481 481 } 482 - static inline void vcpu_xcrs_set(struct kvm_vm *vm, uint32_t vcpuid, 483 - struct kvm_xcrs *xcrs) 482 + static inline void vcpu_xcrs_set(struct kvm_vcpu *vcpu, struct kvm_xcrs *xcrs) 484 483 { 485 - vcpu_ioctl(vm, vcpuid, KVM_SET_XCRS, xcrs); 484 + vcpu_ioctl(vcpu, KVM_SET_XCRS, xcrs); 486 485 } 487 486 488 487 struct kvm_cpuid2 *kvm_get_supported_cpuid(void); 489 - struct kvm_cpuid2 *vcpu_get_cpuid(struct kvm_vm *vm, uint32_t vcpuid); 488 + struct kvm_cpuid2 *vcpu_get_cpuid(struct kvm_vcpu *vcpu); 490 489 491 - static inline int __vcpu_set_cpuid(struct kvm_vm *vm, uint32_t vcpuid, 490 + static inline int __vcpu_set_cpuid(struct kvm_vcpu *vcpu, 492 491 struct kvm_cpuid2 *cpuid) 493 492 { 494 - return __vcpu_ioctl(vm, vcpuid, KVM_SET_CPUID2, cpuid); 493 + return __vcpu_ioctl(vcpu, KVM_SET_CPUID2, cpuid); 495 494 } 496 495 497 - static inline void vcpu_set_cpuid(struct kvm_vm *vm, uint32_t vcpuid, 496 + static inline void vcpu_set_cpuid(struct kvm_vcpu *vcpu, 498 497 struct kvm_cpuid2 *cpuid) 499 498 { 500 - vcpu_ioctl(vm, vcpuid, KVM_SET_CPUID2, cpuid); 499 + vcpu_ioctl(vcpu, KVM_SET_CPUID2, cpuid); 501 500 } 502 501 503 502 struct kvm_cpuid_entry2 * ··· 507 510 return kvm_get_supported_cpuid_index(function, 0); 508 511 } 509 512 510 - uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index); 511 - int _vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index, 512 - uint64_t msr_value); 513 + uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index); 514 + int _vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, uint64_t msr_value); 513 515 514 - static inline void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, 515 - uint64_t msr_index, uint64_t msr_value) 516 + static inline void vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, 517 + uint64_t msr_value) 516 518 { 517 - int r = _vcpu_set_msr(vm, vcpuid, msr_index, msr_value); 519 + int r = _vcpu_set_msr(vcpu, msr_index, msr_value); 518 520 519 521 TEST_ASSERT(r == 1, KVM_IOCTL_ERROR(KVM_SET_MSRS, r)); 520 522 } ··· 537 541 }; 538 542 539 543 void vm_init_descriptor_tables(struct kvm_vm *vm); 540 - void vcpu_init_descriptor_tables(struct kvm_vm *vm, uint32_t vcpuid); 544 + void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu); 541 545 void vm_install_exception_handler(struct kvm_vm *vm, int vector, 542 546 void (*handler)(struct ex_regs *)); 543 547 544 - uint64_t vm_get_page_table_entry(struct kvm_vm *vm, int vcpuid, uint64_t vaddr); 545 - void vm_set_page_table_entry(struct kvm_vm *vm, int vcpuid, uint64_t vaddr, 546 - uint64_t pte); 548 + uint64_t vm_get_page_table_entry(struct kvm_vm *vm, struct kvm_vcpu *vcpu, 549 + uint64_t vaddr); 550 + void vm_set_page_table_entry(struct kvm_vm *vm, struct kvm_vcpu *vcpu, 551 + uint64_t vaddr, uint64_t pte); 547 552 548 553 /* 549 554 * get_cpuid() - find matching CPUID entry and return pointer to it. ··· 564 567 uint64_t a3); 565 568 566 569 struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void); 567 - void vcpu_set_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid); 568 - struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid); 570 + void vcpu_set_hv_cpuid(struct kvm_vcpu *vcpu); 571 + struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vcpu *vcpu); 569 572 void vm_xsave_req_perm(int bit); 570 573 571 574 enum pg_level {
+1 -1
tools/testing/selftests/kvm/kvm_binary_stats_test.c
··· 174 174 175 175 static void vcpu_stats_test(struct kvm_vcpu *vcpu) 176 176 { 177 - int stats_fd = vcpu_get_stats_fd(vcpu->vm, vcpu->id); 177 + int stats_fd = vcpu_get_stats_fd(vcpu); 178 178 179 179 stats_test(stats_fd); 180 180 close(stats_fd);
+3 -4
tools/testing/selftests/kvm/kvm_page_table_test.c
··· 184 184 185 185 static void *vcpu_worker(void *data) 186 186 { 187 - struct kvm_vm *vm = test_args.vm; 188 187 struct kvm_vcpu *vcpu = data; 189 188 bool do_write = !(vcpu->id % 2); 190 189 struct timespec start; ··· 191 192 enum test_stage stage; 192 193 int ret; 193 194 194 - vcpu_args_set(vm, vcpu->id, 1, do_write); 195 + vcpu_args_set(vcpu, 1, do_write); 195 196 196 197 while (!READ_ONCE(host_quit)) { 197 198 ret = sem_wait(&test_stage_updated); ··· 201 202 return NULL; 202 203 203 204 clock_gettime(CLOCK_MONOTONIC_RAW, &start); 204 - ret = _vcpu_run(vm, vcpu->id); 205 + ret = _vcpu_run(vcpu); 205 206 ts_diff = timespec_elapsed(start); 206 207 207 208 TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret); 208 - TEST_ASSERT(get_ucall(vm, vcpu->id, NULL) == UCALL_SYNC, 209 + TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC, 209 210 "Invalid guest sync status: exit_reason=%s\n", 210 211 exit_reason_str(vcpu->run->exit_reason)); 211 212
+24 -23
tools/testing/selftests/kvm/lib/aarch64/processor.c
··· 212 212 } 213 213 } 214 214 215 - void aarch64_vcpu_setup(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init *init) 215 + void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init) 216 216 { 217 217 struct kvm_vcpu_init default_init = { .target = -1, }; 218 + struct kvm_vm *vm = vcpu->vm; 218 219 uint64_t sctlr_el1, tcr_el1; 219 220 220 221 if (!init) ··· 227 226 init->target = preferred.target; 228 227 } 229 228 230 - vcpu_ioctl(vm, vcpuid, KVM_ARM_VCPU_INIT, init); 229 + vcpu_ioctl(vcpu, KVM_ARM_VCPU_INIT, init); 231 230 232 231 /* 233 232 * Enable FP/ASIMD to avoid trapping when accessing Q0-Q15 234 233 * registers, which the variable argument list macros do. 235 234 */ 236 - vcpu_set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_CPACR_EL1), 3 << 20); 235 + vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CPACR_EL1), 3 << 20); 237 236 238 - vcpu_get_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), &sctlr_el1); 239 - vcpu_get_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TCR_EL1), &tcr_el1); 237 + vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), &sctlr_el1); 238 + vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TCR_EL1), &tcr_el1); 240 239 241 240 /* Configure base granule size */ 242 241 switch (vm->mode) { ··· 297 296 tcr_el1 |= (1 << 8) | (1 << 10) | (3 << 12); 298 297 tcr_el1 |= (64 - vm->va_bits) /* T0SZ */; 299 298 300 - vcpu_set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), sctlr_el1); 301 - vcpu_set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TCR_EL1), tcr_el1); 302 - vcpu_set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_MAIR_EL1), DEFAULT_MAIR_EL1); 303 - vcpu_set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TTBR0_EL1), vm->pgd); 304 - vcpu_set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TPIDR_EL1), vcpuid); 299 + vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), sctlr_el1); 300 + vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TCR_EL1), tcr_el1); 301 + vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_MAIR_EL1), DEFAULT_MAIR_EL1); 302 + vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TTBR0_EL1), vm->pgd); 303 + vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TPIDR_EL1), vcpu->id); 305 304 } 306 305 307 - void vcpu_arch_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent) 306 + void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent) 308 307 { 309 308 uint64_t pstate, pc; 310 309 311 - vcpu_get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pstate), &pstate); 312 - vcpu_get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pc), &pc); 310 + vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pstate), &pstate); 311 + vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc), &pc); 313 312 314 313 fprintf(stream, "%*spstate: 0x%.16lx pc: 0x%.16lx\n", 315 314 indent, "", pstate, pc); ··· 325 324 DEFAULT_ARM64_GUEST_STACK_VADDR_MIN); 326 325 struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id); 327 326 328 - aarch64_vcpu_setup(vm, vcpu_id, init); 327 + aarch64_vcpu_setup(vcpu, init); 329 328 330 - vcpu_set_reg(vm, vcpu_id, ARM64_CORE_REG(sp_el1), stack_vaddr + stack_size); 331 - vcpu_set_reg(vm, vcpu_id, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code); 329 + vcpu_set_reg(vcpu, ARM64_CORE_REG(sp_el1), stack_vaddr + stack_size); 330 + vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code); 332 331 333 332 return vcpu; 334 333 } ··· 339 338 return aarch64_vcpu_add(vm, vcpu_id, NULL, guest_code); 340 339 } 341 340 342 - void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...) 341 + void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...) 343 342 { 344 343 va_list ap; 345 344 int i; ··· 350 349 va_start(ap, num); 351 350 352 351 for (i = 0; i < num; i++) { 353 - vcpu_set_reg(vm, vcpuid, ARM64_CORE_REG(regs.regs[i]), 354 - va_arg(ap, uint64_t)); 352 + vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.regs[i]), 353 + va_arg(ap, uint64_t)); 355 354 } 356 355 357 356 va_end(ap); ··· 364 363 ; 365 364 } 366 365 367 - void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid) 366 + void assert_on_unhandled_exception(struct kvm_vcpu *vcpu) 368 367 { 369 368 struct ucall uc; 370 369 371 - if (get_ucall(vm, vcpuid, &uc) != UCALL_UNHANDLED) 370 + if (get_ucall(vcpu, &uc) != UCALL_UNHANDLED) 372 371 return; 373 372 374 373 if (uc.args[2]) /* valid_ec */ { ··· 386 385 handler_fn exception_handlers[VECTOR_NUM][ESR_EC_NUM]; 387 386 }; 388 387 389 - void vcpu_init_descriptor_tables(struct kvm_vm *vm, uint32_t vcpuid) 388 + void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu) 390 389 { 391 390 extern char vectors; 392 391 393 - vcpu_set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_VBAR_EL1), (uint64_t)&vectors); 392 + vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_VBAR_EL1), (uint64_t)&vectors); 394 393 } 395 394 396 395 void route_exception(struct ex_regs *regs, int vector)
+4 -4
tools/testing/selftests/kvm/lib/aarch64/ucall.c
··· 88 88 *ucall_exit_mmio_addr = (vm_vaddr_t)&uc; 89 89 } 90 90 91 - uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc) 91 + uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc) 92 92 { 93 - struct kvm_run *run = vcpu_state(vm, vcpu_id); 93 + struct kvm_run *run = vcpu->run; 94 94 struct ucall ucall = {}; 95 95 96 96 if (uc) ··· 103 103 TEST_ASSERT(run->mmio.is_write && run->mmio.len == 8, 104 104 "Unexpected ucall exit mmio address access"); 105 105 memcpy(&gva, run->mmio.data, sizeof(gva)); 106 - memcpy(&ucall, addr_gva2hva(vm, gva), sizeof(ucall)); 106 + memcpy(&ucall, addr_gva2hva(vcpu->vm, gva), sizeof(ucall)); 107 107 108 - vcpu_run_complete_io(vm, vcpu_id); 108 + vcpu_run_complete_io(vcpu); 109 109 if (uc) 110 110 memcpy(uc, &ucall, sizeof(ucall)); 111 111 }
+39 -123
tools/testing/selftests/kvm/lib/kvm_util.c
··· 1395 1395 return (void *) ((uintptr_t) region->host_alias + offset); 1396 1396 } 1397 1397 1398 - /* 1399 - * VM Create IRQ Chip 1400 - * 1401 - * Input Args: 1402 - * vm - Virtual Machine 1403 - * 1404 - * Output Args: None 1405 - * 1406 - * Return: None 1407 - * 1408 - * Creates an interrupt controller chip for the VM specified by vm. 1409 - */ 1398 + /* Create an interrupt controller chip for the specified VM. */ 1410 1399 void vm_create_irqchip(struct kvm_vm *vm) 1411 1400 { 1412 1401 vm_ioctl(vm, KVM_CREATE_IRQCHIP, NULL); 1413 1402 1414 1403 vm->has_irqchip = true; 1415 1404 } 1416 - 1417 - /* 1418 - * VM VCPU State 1419 - * 1420 - * Input Args: 1421 - * vm - Virtual Machine 1422 - * vcpuid - VCPU ID 1423 - * 1424 - * Output Args: None 1425 - * 1426 - * Return: 1427 - * Pointer to structure that describes the state of the VCPU. 1428 - * 1429 - * Locates and returns a pointer to a structure that describes the 1430 - * state of the VCPU with the given vcpuid. 1431 - */ 1432 - struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid) 1405 + struct kvm_run *vcpu_state(struct kvm_vcpu *vcpu) 1433 1406 { 1434 - struct kvm_vcpu *vcpu = vcpu_get(vm, vcpuid); 1435 - 1436 1407 return vcpu->run; 1437 1408 } 1438 1409 1439 - /* 1440 - * VM VCPU Run 1441 - * 1442 - * Input Args: 1443 - * vm - Virtual Machine 1444 - * vcpuid - VCPU ID 1445 - * 1446 - * Output Args: None 1447 - * 1448 - * Return: None 1449 - * 1450 - * Switch to executing the code for the VCPU given by vcpuid, within the VM 1451 - * given by vm. 1452 - */ 1453 - void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid) 1454 - { 1455 - int ret = _vcpu_run(vm, vcpuid); 1456 1410 1457 - TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_RUN, ret)); 1458 - } 1459 - 1460 - int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid) 1411 + int _vcpu_run(struct kvm_vcpu *vcpu) 1461 1412 { 1462 1413 int rc; 1463 1414 1464 1415 do { 1465 - rc = __vcpu_run(vm, vcpuid); 1416 + rc = __vcpu_run(vcpu); 1466 1417 } while (rc == -1 && errno == EINTR); 1467 1418 1468 - assert_on_unhandled_exception(vm, vcpuid); 1419 + assert_on_unhandled_exception(vcpu); 1469 1420 1470 1421 return rc; 1471 1422 } 1472 1423 1473 - void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid) 1424 + /* 1425 + * Invoke KVM_RUN on a vCPU until KVM returns something other than -EINTR. 1426 + * Assert if the KVM returns an error (other than -EINTR). 1427 + */ 1428 + void vcpu_run(struct kvm_vcpu *vcpu) 1474 1429 { 1475 - struct kvm_vcpu *vcpu = vcpu_get(vm, vcpuid); 1430 + int ret = _vcpu_run(vcpu); 1431 + 1432 + TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_RUN, ret)); 1433 + } 1434 + 1435 + void vcpu_run_complete_io(struct kvm_vcpu *vcpu) 1436 + { 1476 1437 int ret; 1477 1438 1478 1439 vcpu->run->immediate_exit = 1; 1479 - ret = __vcpu_run(vm, vcpuid); 1440 + ret = __vcpu_run(vcpu); 1480 1441 vcpu->run->immediate_exit = 0; 1481 1442 1482 1443 TEST_ASSERT(ret == -1 && errno == EINTR, ··· 1446 1485 } 1447 1486 1448 1487 /* 1449 - * VM VCPU Get Reg List 1450 - * 1451 - * Input Args: 1452 - * vm - Virtual Machine 1453 - * vcpuid - VCPU ID 1454 - * 1455 - * Output Args: 1456 - * None 1457 - * 1458 - * Return: 1459 - * A pointer to an allocated struct kvm_reg_list 1460 - * 1461 1488 * Get the list of guest registers which are supported for 1462 - * KVM_GET_ONE_REG/KVM_SET_ONE_REG calls 1489 + * KVM_GET_ONE_REG/KVM_SET_ONE_REG ioctls. Returns a kvm_reg_list pointer, 1490 + * it is the callers responsibility to free the list. 1463 1491 */ 1464 - struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vm *vm, uint32_t vcpuid) 1492 + struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu) 1465 1493 { 1466 1494 struct kvm_reg_list reg_list_n = { .n = 0 }, *reg_list; 1467 1495 int ret; 1468 1496 1469 - ret = __vcpu_ioctl(vm, vcpuid, KVM_GET_REG_LIST, &reg_list_n); 1497 + ret = __vcpu_ioctl(vcpu, KVM_GET_REG_LIST, &reg_list_n); 1470 1498 TEST_ASSERT(ret == -1 && errno == E2BIG, "KVM_GET_REG_LIST n=0"); 1499 + 1471 1500 reg_list = calloc(1, sizeof(*reg_list) + reg_list_n.n * sizeof(__u64)); 1472 1501 reg_list->n = reg_list_n.n; 1473 - vcpu_ioctl(vm, vcpuid, KVM_GET_REG_LIST, reg_list); 1502 + vcpu_ioctl(vcpu, KVM_GET_REG_LIST, reg_list); 1474 1503 return reg_list; 1475 1504 } 1476 1505 1477 - int __vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, 1478 - unsigned long cmd, void *arg) 1506 + int __vcpu_ioctl(struct kvm_vcpu *vcpu, unsigned long cmd, void *arg) 1479 1507 { 1480 - struct kvm_vcpu *vcpu = vcpu_get(vm, vcpuid); 1481 - 1482 1508 return ioctl(vcpu->fd, cmd, arg); 1483 1509 } 1484 1510 1485 - void _vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, unsigned long cmd, 1486 - const char *name, void *arg) 1511 + void _vcpu_ioctl(struct kvm_vcpu *vcpu, unsigned long cmd, const char *name, 1512 + void *arg) 1487 1513 { 1488 - int ret = __vcpu_ioctl(vm, vcpuid, cmd, arg); 1514 + int ret = __vcpu_ioctl(vcpu, cmd, arg); 1489 1515 1490 1516 TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(name, ret)); 1491 1517 } 1492 1518 1493 - void *vcpu_map_dirty_ring(struct kvm_vm *vm, uint32_t vcpuid) 1519 + void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu) 1494 1520 { 1495 - struct kvm_vcpu *vcpu = vcpu_get(vm, vcpuid); 1496 - uint32_t size = vm->dirty_ring_size; 1521 + uint32_t page_size = vcpu->vm->page_size; 1522 + uint32_t size = vcpu->vm->dirty_ring_size; 1497 1523 1498 1524 TEST_ASSERT(size > 0, "Should enable dirty ring first"); 1499 1525 1500 1526 if (!vcpu->dirty_gfns) { 1501 1527 void *addr; 1502 1528 1503 - addr = mmap(NULL, size, PROT_READ, 1504 - MAP_PRIVATE, vcpu->fd, 1505 - vm->page_size * KVM_DIRTY_LOG_PAGE_OFFSET); 1529 + addr = mmap(NULL, size, PROT_READ, MAP_PRIVATE, vcpu->fd, 1530 + page_size * KVM_DIRTY_LOG_PAGE_OFFSET); 1506 1531 TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped private"); 1507 1532 1508 - addr = mmap(NULL, size, PROT_READ | PROT_EXEC, 1509 - MAP_PRIVATE, vcpu->fd, 1510 - vm->page_size * KVM_DIRTY_LOG_PAGE_OFFSET); 1533 + addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE, vcpu->fd, 1534 + page_size * KVM_DIRTY_LOG_PAGE_OFFSET); 1511 1535 TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped exec"); 1512 1536 1513 - addr = mmap(NULL, size, PROT_READ | PROT_WRITE, 1514 - MAP_SHARED, vcpu->fd, 1515 - vm->page_size * KVM_DIRTY_LOG_PAGE_OFFSET); 1537 + addr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, 1538 + page_size * KVM_DIRTY_LOG_PAGE_OFFSET); 1516 1539 TEST_ASSERT(addr != MAP_FAILED, "Dirty ring map failed"); 1517 1540 1518 1541 vcpu->dirty_gfns = addr; ··· 1579 1634 }; 1580 1635 1581 1636 return __kvm_ioctl(dev_fd, KVM_SET_DEVICE_ATTR, &kvmattr); 1582 - } 1583 - 1584 - int __vcpu_device_attr_get(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group, 1585 - uint64_t attr, void *val) 1586 - { 1587 - return __kvm_device_attr_get(vcpu_get(vm, vcpuid)->fd, group, attr, val); 1588 - } 1589 - 1590 - void vcpu_device_attr_get(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group, 1591 - uint64_t attr, void *val) 1592 - { 1593 - kvm_device_attr_get(vcpu_get(vm, vcpuid)->fd, group, attr, val); 1594 - } 1595 - 1596 - int __vcpu_device_attr_set(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group, 1597 - uint64_t attr, void *val) 1598 - { 1599 - return __kvm_device_attr_set(vcpu_get(vm, vcpuid)->fd, group, attr, val); 1600 - } 1601 - 1602 - void vcpu_device_attr_set(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group, 1603 - uint64_t attr, void *val) 1604 - { 1605 - kvm_device_attr_set(vcpu_get(vm, vcpuid)->fd, group, attr, val); 1606 - } 1607 - 1608 - int __vcpu_has_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group, 1609 - uint64_t attr) 1610 - { 1611 - return __kvm_has_device_attr(vcpu_get(vm, vcpuid)->fd, group, attr); 1612 1637 } 1613 1638 1614 1639 /* ··· 1696 1781 virt_dump(stream, vm, indent + 4); 1697 1782 } 1698 1783 fprintf(stream, "%*sVCPUs:\n", indent, ""); 1784 + 1699 1785 list_for_each_entry(vcpu, &vm->vcpus, list) 1700 - vcpu_dump(stream, vm, vcpu->id, indent + 2); 1786 + vcpu_dump(stream, vcpu, indent + 2); 1701 1787 } 1702 1788 1703 1789 /* Known KVM exit reasons */
+1 -1
tools/testing/selftests/kvm/lib/perf_test_util.c
··· 98 98 vcpu_args->gpa = pta->gpa; 99 99 } 100 100 101 - vcpu_args_set(vm, vcpus[i]->id, 1, i); 101 + vcpu_args_set(vcpus[i], 1, i); 102 102 103 103 pr_debug("Added VCPU %d with test mem gpa [%lx, %lx)\n", 104 104 i, vcpu_args->gpa, vcpu_args->gpa +
+46 -48
tools/testing/selftests/kvm/lib/riscv/processor.c
··· 178 178 } 179 179 } 180 180 181 - void riscv_vcpu_mmu_setup(struct kvm_vm *vm, int vcpuid) 181 + void riscv_vcpu_mmu_setup(struct kvm_vcpu *vcpu) 182 182 { 183 + struct kvm_vm *vm = vcpu->vm; 183 184 unsigned long satp; 184 185 185 186 /* ··· 199 198 satp = (vm->pgd >> PGTBL_PAGE_SIZE_SHIFT) & SATP_PPN; 200 199 satp |= SATP_MODE_48; 201 200 202 - vcpu_set_reg(vm, vcpuid, RISCV_CSR_REG(satp), satp); 201 + vcpu_set_reg(vcpu, RISCV_CSR_REG(satp), satp); 203 202 } 204 203 205 - void vcpu_arch_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent) 204 + void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent) 206 205 { 207 206 struct kvm_riscv_core core; 208 207 209 - vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(mode), &core.mode); 210 - vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.pc), &core.regs.pc); 211 - vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.ra), &core.regs.ra); 212 - vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.sp), &core.regs.sp); 213 - vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.gp), &core.regs.gp); 214 - vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.tp), &core.regs.tp); 215 - vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t0), &core.regs.t0); 216 - vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t1), &core.regs.t1); 217 - vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t2), &core.regs.t2); 218 - vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s0), &core.regs.s0); 219 - vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s1), &core.regs.s1); 220 - vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a0), &core.regs.a0); 221 - vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a1), &core.regs.a1); 222 - vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a2), &core.regs.a2); 223 - vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a3), &core.regs.a3); 224 - vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a4), &core.regs.a4); 225 - vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a5), &core.regs.a5); 226 - vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a6), &core.regs.a6); 227 - vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a7), &core.regs.a7); 228 - vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s2), &core.regs.s2); 229 - vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s3), &core.regs.s3); 230 - vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s4), &core.regs.s4); 231 - vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s5), &core.regs.s5); 232 - vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s6), &core.regs.s6); 233 - vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s7), &core.regs.s7); 234 - vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s8), &core.regs.s8); 235 - vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s9), &core.regs.s9); 236 - vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s10), &core.regs.s10); 237 - vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s11), &core.regs.s11); 238 - vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t3), &core.regs.t3); 239 - vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t4), &core.regs.t4); 240 - vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t5), &core.regs.t5); 241 - vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t6), &core.regs.t6); 208 + vcpu_get_reg(vcpu, RISCV_CORE_REG(mode), &core.mode); 209 + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.pc), &core.regs.pc); 210 + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.ra), &core.regs.ra); 211 + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.sp), &core.regs.sp); 212 + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.gp), &core.regs.gp); 213 + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.tp), &core.regs.tp); 214 + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t0), &core.regs.t0); 215 + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t1), &core.regs.t1); 216 + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t2), &core.regs.t2); 217 + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s0), &core.regs.s0); 218 + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s1), &core.regs.s1); 219 + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a0), &core.regs.a0); 220 + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a1), &core.regs.a1); 221 + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a2), &core.regs.a2); 222 + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a3), &core.regs.a3); 223 + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a4), &core.regs.a4); 224 + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a5), &core.regs.a5); 225 + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a6), &core.regs.a6); 226 + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a7), &core.regs.a7); 227 + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s2), &core.regs.s2); 228 + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s3), &core.regs.s3); 229 + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s4), &core.regs.s4); 230 + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s5), &core.regs.s5); 231 + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s6), &core.regs.s6); 232 + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s7), &core.regs.s7); 233 + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s8), &core.regs.s8); 234 + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s9), &core.regs.s9); 235 + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s10), &core.regs.s10); 236 + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s11), &core.regs.s11); 237 + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t3), &core.regs.t3); 238 + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t4), &core.regs.t4); 239 + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t5), &core.regs.t5); 240 + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t6), &core.regs.t6); 242 241 243 242 fprintf(stream, 244 243 " MODE: 0x%lx\n", core.mode); ··· 289 288 struct kvm_vcpu *vcpu; 290 289 291 290 vcpu = __vm_vcpu_add(vm, vcpu_id); 292 - riscv_vcpu_mmu_setup(vm, vcpu_id); 291 + riscv_vcpu_mmu_setup(vcpu); 293 292 294 293 /* 295 294 * With SBI HSM support in KVM RISC-V, all secondary VCPUs are ··· 297 296 * are powered-on using KVM_SET_MP_STATE ioctl(). 298 297 */ 299 298 mps.mp_state = KVM_MP_STATE_RUNNABLE; 300 - r = __vcpu_ioctl(vm, vcpu_id, KVM_SET_MP_STATE, &mps); 299 + r = __vcpu_ioctl(vcpu, KVM_SET_MP_STATE, &mps); 301 300 TEST_ASSERT(!r, "IOCTL KVM_SET_MP_STATE failed (error %d)", r); 302 301 303 302 /* Setup global pointer of guest to be same as the host */ 304 303 asm volatile ( 305 304 "add %0, gp, zero" : "=r" (current_gp) : : "memory"); 306 - vcpu_set_reg(vm, vcpu_id, RISCV_CORE_REG(regs.gp), current_gp); 305 + vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.gp), current_gp); 307 306 308 307 /* Setup stack pointer and program counter of guest */ 309 - vcpu_set_reg(vm, vcpu_id, RISCV_CORE_REG(regs.sp), 310 - stack_vaddr + stack_size); 311 - vcpu_set_reg(vm, vcpu_id, RISCV_CORE_REG(regs.pc), 312 - (unsigned long)guest_code); 308 + vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.sp), stack_vaddr + stack_size); 309 + vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.pc), (unsigned long)guest_code); 313 310 314 311 /* Setup default exception vector of guest */ 315 - vcpu_set_reg(vm, vcpu_id, RISCV_CSR_REG(stvec), 316 - (unsigned long)guest_unexp_trap); 312 + vcpu_set_reg(vcpu, RISCV_CSR_REG(stvec), (unsigned long)guest_unexp_trap); 317 313 318 314 return vcpu; 319 315 } 320 316 321 - void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...) 317 + void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...) 322 318 { 323 319 va_list ap; 324 320 uint64_t id = RISCV_CORE_REG(regs.a0); ··· 353 355 id = RISCV_CORE_REG(regs.a7); 354 356 break; 355 357 } 356 - vcpu_set_reg(vm, vcpuid, id, va_arg(ap, uint64_t)); 358 + vcpu_set_reg(vcpu, id, va_arg(ap, uint64_t)); 357 359 } 358 360 359 361 va_end(ap); 360 362 } 361 363 362 - void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid) 364 + void assert_on_unhandled_exception(struct kvm_vcpu *vcpu) 363 365 { 364 366 }
+7 -6
tools/testing/selftests/kvm/lib/riscv/ucall.c
··· 64 64 (vm_vaddr_t)&uc, 0, 0, 0, 0, 0); 65 65 } 66 66 67 - uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc) 67 + uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc) 68 68 { 69 - struct kvm_run *run = vcpu_state(vm, vcpu_id); 69 + struct kvm_run *run = vcpu->run; 70 70 struct ucall ucall = {}; 71 71 72 72 if (uc) ··· 76 76 run->riscv_sbi.extension_id == KVM_RISCV_SELFTESTS_SBI_EXT) { 77 77 switch (run->riscv_sbi.function_id) { 78 78 case KVM_RISCV_SELFTESTS_SBI_UCALL: 79 - memcpy(&ucall, addr_gva2hva(vm, 80 - run->riscv_sbi.args[0]), sizeof(ucall)); 79 + memcpy(&ucall, 80 + addr_gva2hva(vcpu->vm, run->riscv_sbi.args[0]), 81 + sizeof(ucall)); 81 82 82 - vcpu_run_complete_io(vm, vcpu_id); 83 + vcpu_run_complete_io(vcpu); 83 84 if (uc) 84 85 memcpy(uc, &ucall, sizeof(ucall)); 85 86 86 87 break; 87 88 case KVM_RISCV_SELFTESTS_SBI_UNEXP: 88 - vcpu_dump(stderr, vm, vcpu_id, 2); 89 + vcpu_dump(stderr, vcpu, 2); 89 90 TEST_ASSERT(0, "Unexpected trap taken by guest"); 90 91 break; 91 92 default:
+1 -1
tools/testing/selftests/kvm/lib/s390x/diag318_test_handler.c
··· 32 32 uint64_t diag318_info; 33 33 34 34 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 35 - vcpu_run(vm, vcpu->id); 35 + vcpu_run(vcpu); 36 36 run = vcpu->run; 37 37 38 38 TEST_ASSERT(run->exit_reason == KVM_EXIT_S390_SIEIC,
+10 -12
tools/testing/selftests/kvm/lib/s390x/processor.c
··· 173 173 vcpu = __vm_vcpu_add(vm, vcpu_id); 174 174 175 175 /* Setup guest registers */ 176 - vcpu_regs_get(vm, vcpu_id, &regs); 176 + vcpu_regs_get(vcpu, &regs); 177 177 regs.gprs[15] = stack_vaddr + (DEFAULT_STACK_PGS * getpagesize()) - 160; 178 - vcpu_regs_set(vm, vcpu_id, &regs); 178 + vcpu_regs_set(vcpu, &regs); 179 179 180 - vcpu_sregs_get(vm, vcpu_id, &sregs); 180 + vcpu_sregs_get(vcpu, &sregs); 181 181 sregs.crs[0] |= 0x00040000; /* Enable floating point regs */ 182 182 sregs.crs[1] = vm->pgd | 0xf; /* Primary region table */ 183 - vcpu_sregs_set(vm, vcpu_id, &sregs); 183 + vcpu_sregs_set(vcpu, &sregs); 184 184 185 - run = vcpu_state(vm, vcpu_id); 185 + run = vcpu->run; 186 186 run->psw_mask = 0x0400000180000000ULL; /* DAT enabled + 64 bit mode */ 187 187 run->psw_addr = (uintptr_t)guest_code; 188 188 189 189 return vcpu; 190 190 } 191 191 192 - void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...) 192 + void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...) 193 193 { 194 194 va_list ap; 195 195 struct kvm_regs regs; ··· 200 200 num); 201 201 202 202 va_start(ap, num); 203 - vcpu_regs_get(vm, vcpuid, &regs); 203 + vcpu_regs_get(vcpu, &regs); 204 204 205 205 for (i = 0; i < num; i++) 206 206 regs.gprs[i + 2] = va_arg(ap, uint64_t); 207 207 208 - vcpu_regs_set(vm, vcpuid, &regs); 208 + vcpu_regs_set(vcpu, &regs); 209 209 va_end(ap); 210 210 } 211 211 212 - void vcpu_arch_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent) 212 + void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent) 213 213 { 214 - struct kvm_vcpu *vcpu = vcpu_get(vm, vcpuid); 215 - 216 214 fprintf(stream, "%*spstate: psw: 0x%.16llx:0x%.16llx\n", 217 215 indent, "", vcpu->run->psw_mask, vcpu->run->psw_addr); 218 216 } 219 217 220 - void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid) 218 + void assert_on_unhandled_exception(struct kvm_vcpu *vcpu) 221 219 { 222 220 }
+4 -4
tools/testing/selftests/kvm/lib/s390x/ucall.c
··· 33 33 asm volatile ("diag 0,%0,0x501" : : "a"(&uc) : "memory"); 34 34 } 35 35 36 - uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc) 36 + uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc) 37 37 { 38 - struct kvm_run *run = vcpu_state(vm, vcpu_id); 38 + struct kvm_run *run = vcpu->run; 39 39 struct ucall ucall = {}; 40 40 41 41 if (uc) ··· 47 47 (run->s390_sieic.ipb >> 16) == 0x501) { 48 48 int reg = run->s390_sieic.ipa & 0xf; 49 49 50 - memcpy(&ucall, addr_gva2hva(vm, run->s.regs.gprs[reg]), 50 + memcpy(&ucall, addr_gva2hva(vcpu->vm, run->s.regs.gprs[reg]), 51 51 sizeof(ucall)); 52 52 53 - vcpu_run_complete_io(vm, vcpu_id); 53 + vcpu_run_complete_io(vcpu); 54 54 if (uc) 55 55 memcpy(uc, &ucall, sizeof(ucall)); 56 56 }
+66 -77
tools/testing/selftests/kvm/lib/x86_64/processor.c
··· 212 212 __virt_pg_map(vm, vaddr, paddr, PG_LEVEL_4K); 213 213 } 214 214 215 - static uint64_t *_vm_get_page_table_entry(struct kvm_vm *vm, int vcpuid, 216 - uint64_t vaddr) 215 + static uint64_t *_vm_get_page_table_entry(struct kvm_vm *vm, 216 + struct kvm_vcpu *vcpu, 217 + uint64_t vaddr) 217 218 { 218 219 uint16_t index[4]; 219 220 uint64_t *pml4e, *pdpe, *pde; ··· 236 235 * If IA32_EFER.NXE = 0 and the P flag of a paging-structure entry is 1, 237 236 * the XD flag (bit 63) is reserved. 238 237 */ 239 - vcpu_sregs_get(vm, vcpuid, &sregs); 238 + vcpu_sregs_get(vcpu, &sregs); 240 239 if ((sregs.efer & EFER_NX) == 0) { 241 240 rsvd_mask |= PTE_NX_MASK; 242 241 } ··· 288 287 return &pte[index[0]]; 289 288 } 290 289 291 - uint64_t vm_get_page_table_entry(struct kvm_vm *vm, int vcpuid, uint64_t vaddr) 290 + uint64_t vm_get_page_table_entry(struct kvm_vm *vm, struct kvm_vcpu *vcpu, 291 + uint64_t vaddr) 292 292 { 293 - uint64_t *pte = _vm_get_page_table_entry(vm, vcpuid, vaddr); 293 + uint64_t *pte = _vm_get_page_table_entry(vm, vcpu, vaddr); 294 294 295 295 return *(uint64_t *)pte; 296 296 } 297 297 298 - void vm_set_page_table_entry(struct kvm_vm *vm, int vcpuid, uint64_t vaddr, 299 - uint64_t pte) 298 + void vm_set_page_table_entry(struct kvm_vm *vm, struct kvm_vcpu *vcpu, 299 + uint64_t vaddr, uint64_t pte) 300 300 { 301 - uint64_t *new_pte = _vm_get_page_table_entry(vm, vcpuid, vaddr); 301 + uint64_t *new_pte = _vm_get_page_table_entry(vm, vcpu, vaddr); 302 302 303 303 *(uint64_t *)new_pte = pte; 304 304 } ··· 548 546 kvm_seg_fill_gdt_64bit(vm, segp); 549 547 } 550 548 551 - static void vcpu_setup(struct kvm_vm *vm, int vcpuid) 549 + static void vcpu_setup(struct kvm_vm *vm, struct kvm_vcpu *vcpu) 552 550 { 553 551 struct kvm_sregs sregs; 554 552 555 553 /* Set mode specific system register values. */ 556 - vcpu_sregs_get(vm, vcpuid, &sregs); 554 + vcpu_sregs_get(vcpu, &sregs); 557 555 558 556 sregs.idt.limit = 0; 559 557 ··· 577 575 } 578 576 579 577 sregs.cr3 = vm->pgd; 580 - vcpu_sregs_set(vm, vcpuid, &sregs); 578 + vcpu_sregs_set(vcpu, &sregs); 581 579 } 582 580 583 581 #define CPUID_XFD_BIT (1 << 4) ··· 646 644 DEFAULT_GUEST_STACK_VADDR_MIN); 647 645 648 646 vcpu = __vm_vcpu_add(vm, vcpu_id); 649 - vcpu_set_cpuid(vm, vcpu_id, kvm_get_supported_cpuid()); 650 - vcpu_setup(vm, vcpu_id); 647 + vcpu_set_cpuid(vcpu, kvm_get_supported_cpuid()); 648 + vcpu_setup(vm, vcpu); 651 649 652 650 /* Setup guest general purpose registers */ 653 - vcpu_regs_get(vm, vcpu_id, &regs); 651 + vcpu_regs_get(vcpu, &regs); 654 652 regs.rflags = regs.rflags | 0x2; 655 653 regs.rsp = stack_vaddr + (DEFAULT_STACK_PGS * getpagesize()); 656 654 regs.rip = (unsigned long) guest_code; 657 - vcpu_regs_set(vm, vcpu_id, &regs); 655 + vcpu_regs_set(vcpu, &regs); 658 656 659 657 /* Setup the MP state */ 660 658 mp_state.mp_state = 0; 661 - vcpu_mp_state_set(vm, vcpu_id, &mp_state); 659 + vcpu_mp_state_set(vcpu, &mp_state); 662 660 663 661 return vcpu; 664 662 } ··· 744 742 return buffer.entry.data; 745 743 } 746 744 747 - /* 748 - * VM VCPU CPUID Set 749 - * 750 - * Input Args: 751 - * vm - Virtual Machine 752 - * vcpuid - VCPU id 753 - * 754 - * Output Args: None 755 - * 756 - * Return: KVM CPUID (KVM_GET_CPUID2) 757 - * 758 - * Set the VCPU's CPUID. 759 - */ 760 - struct kvm_cpuid2 *vcpu_get_cpuid(struct kvm_vm *vm, uint32_t vcpuid) 745 + struct kvm_cpuid2 *vcpu_get_cpuid(struct kvm_vcpu *vcpu) 761 746 { 762 747 struct kvm_cpuid2 *cpuid; 763 748 int max_ent; ··· 754 765 max_ent = cpuid->nent; 755 766 756 767 for (cpuid->nent = 1; cpuid->nent <= max_ent; cpuid->nent++) { 757 - rc = __vcpu_ioctl(vm, vcpuid, KVM_GET_CPUID2, cpuid); 768 + rc = __vcpu_ioctl(vcpu, KVM_GET_CPUID2, cpuid); 758 769 if (!rc) 759 770 break; 760 771 ··· 801 812 return entry; 802 813 } 803 814 804 - uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index) 815 + uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index) 805 816 { 806 817 struct { 807 818 struct kvm_msrs header; ··· 811 822 buffer.header.nmsrs = 1; 812 823 buffer.entry.index = msr_index; 813 824 814 - vcpu_msrs_get(vm, vcpuid, &buffer.header); 825 + vcpu_msrs_get(vcpu, &buffer.header); 815 826 816 827 return buffer.entry.data; 817 828 } 818 829 819 - int _vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index, 820 - uint64_t msr_value) 830 + int _vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, uint64_t msr_value) 821 831 { 822 832 struct { 823 833 struct kvm_msrs header; ··· 828 840 buffer.entry.index = msr_index; 829 841 buffer.entry.data = msr_value; 830 842 831 - return __vcpu_ioctl(vm, vcpuid, KVM_SET_MSRS, &buffer.header); 843 + return __vcpu_ioctl(vcpu, KVM_SET_MSRS, &buffer.header); 832 844 } 833 845 834 - void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...) 846 + void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...) 835 847 { 836 848 va_list ap; 837 849 struct kvm_regs regs; ··· 841 853 num); 842 854 843 855 va_start(ap, num); 844 - vcpu_regs_get(vm, vcpuid, &regs); 856 + vcpu_regs_get(vcpu, &regs); 845 857 846 858 if (num >= 1) 847 859 regs.rdi = va_arg(ap, uint64_t); ··· 861 873 if (num >= 6) 862 874 regs.r9 = va_arg(ap, uint64_t); 863 875 864 - vcpu_regs_set(vm, vcpuid, &regs); 876 + vcpu_regs_set(vcpu, &regs); 865 877 va_end(ap); 866 878 } 867 879 868 - void vcpu_arch_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent) 880 + void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent) 869 881 { 870 882 struct kvm_regs regs; 871 883 struct kvm_sregs sregs; 872 884 873 - fprintf(stream, "%*scpuid: %u\n", indent, "", vcpuid); 885 + fprintf(stream, "%*svCPU ID: %u\n", indent, "", vcpu->id); 874 886 875 887 fprintf(stream, "%*sregs:\n", indent + 2, ""); 876 - vcpu_regs_get(vm, vcpuid, &regs); 888 + vcpu_regs_get(vcpu, &regs); 877 889 regs_dump(stream, &regs, indent + 4); 878 890 879 891 fprintf(stream, "%*ssregs:\n", indent + 2, ""); 880 - vcpu_sregs_get(vm, vcpuid, &sregs); 892 + vcpu_sregs_get(vcpu, &sregs); 881 893 sregs_dump(stream, &sregs, indent + 4); 882 894 } 883 895 ··· 947 959 return false; 948 960 } 949 961 950 - static void vcpu_save_xsave_state(struct kvm_vm *vm, uint32_t vcpuid, 962 + static void vcpu_save_xsave_state(struct kvm_vcpu *vcpu, 951 963 struct kvm_x86_state *state) 952 964 { 953 - int size = vm_check_cap(vm, KVM_CAP_XSAVE2); 965 + int size = vm_check_cap(vcpu->vm, KVM_CAP_XSAVE2); 954 966 955 967 if (size) { 956 968 state->xsave = malloc(size); 957 - vcpu_xsave2_get(vm, vcpuid, state->xsave); 969 + vcpu_xsave2_get(vcpu, state->xsave); 958 970 } else { 959 971 state->xsave = malloc(sizeof(struct kvm_xsave)); 960 - vcpu_xsave_get(vm, vcpuid, state->xsave); 972 + vcpu_xsave_get(vcpu, state->xsave); 961 973 } 962 974 } 963 975 964 - struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid) 976 + struct kvm_x86_state *vcpu_save_state(struct kvm_vcpu *vcpu) 965 977 { 966 978 const struct kvm_msr_list *msr_list = kvm_get_msr_index_list(); 967 979 struct kvm_x86_state *state; ··· 982 994 * kernel with KVM_RUN. Complete IO prior to migrating state 983 995 * to a new VM. 984 996 */ 985 - vcpu_run_complete_io(vm, vcpuid); 997 + vcpu_run_complete_io(vcpu); 986 998 987 999 state = malloc(sizeof(*state) + msr_list->nmsrs * sizeof(state->msrs.entries[0])); 988 1000 989 - vcpu_events_get(vm, vcpuid, &state->events); 990 - vcpu_mp_state_get(vm, vcpuid, &state->mp_state); 991 - vcpu_regs_get(vm, vcpuid, &state->regs); 992 - vcpu_save_xsave_state(vm, vcpuid, state); 1001 + vcpu_events_get(vcpu, &state->events); 1002 + vcpu_mp_state_get(vcpu, &state->mp_state); 1003 + vcpu_regs_get(vcpu, &state->regs); 1004 + vcpu_save_xsave_state(vcpu, state); 993 1005 994 1006 if (kvm_check_cap(KVM_CAP_XCRS)) 995 - vcpu_xcrs_get(vm, vcpuid, &state->xcrs); 1007 + vcpu_xcrs_get(vcpu, &state->xcrs); 996 1008 997 - vcpu_sregs_get(vm, vcpuid, &state->sregs); 1009 + vcpu_sregs_get(vcpu, &state->sregs); 998 1010 999 1011 if (nested_size) { 1000 1012 state->nested.size = sizeof(state->nested_); 1001 1013 1002 - vcpu_nested_state_get(vm, vcpuid, &state->nested); 1014 + vcpu_nested_state_get(vcpu, &state->nested); 1003 1015 TEST_ASSERT(state->nested.size <= nested_size, 1004 1016 "Nested state size too big, %i (KVM_CHECK_CAP gave %i)", 1005 1017 state->nested.size, nested_size); ··· 1010 1022 state->msrs.nmsrs = msr_list->nmsrs; 1011 1023 for (i = 0; i < msr_list->nmsrs; i++) 1012 1024 state->msrs.entries[i].index = msr_list->indices[i]; 1013 - vcpu_msrs_get(vm, vcpuid, &state->msrs); 1025 + vcpu_msrs_get(vcpu, &state->msrs); 1014 1026 1015 - vcpu_debugregs_get(vm, vcpuid, &state->debugregs); 1027 + vcpu_debugregs_get(vcpu, &state->debugregs); 1016 1028 1017 1029 return state; 1018 1030 } 1019 1031 1020 - void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *state) 1032 + void vcpu_load_state(struct kvm_vcpu *vcpu, struct kvm_x86_state *state) 1021 1033 { 1022 - vcpu_sregs_set(vm, vcpuid, &state->sregs); 1023 - vcpu_msrs_set(vm, vcpuid, &state->msrs); 1034 + vcpu_sregs_set(vcpu, &state->sregs); 1035 + vcpu_msrs_set(vcpu, &state->msrs); 1024 1036 1025 1037 if (kvm_check_cap(KVM_CAP_XCRS)) 1026 - vcpu_xcrs_set(vm, vcpuid, &state->xcrs); 1038 + vcpu_xcrs_set(vcpu, &state->xcrs); 1027 1039 1028 - vcpu_xsave_set(vm, vcpuid, state->xsave); 1029 - vcpu_events_set(vm, vcpuid, &state->events); 1030 - vcpu_mp_state_set(vm, vcpuid, &state->mp_state); 1031 - vcpu_debugregs_set(vm, vcpuid, &state->debugregs); 1032 - vcpu_regs_set(vm, vcpuid, &state->regs); 1040 + vcpu_xsave_set(vcpu, state->xsave); 1041 + vcpu_events_set(vcpu, &state->events); 1042 + vcpu_mp_state_set(vcpu, &state->mp_state); 1043 + vcpu_debugregs_set(vcpu, &state->debugregs); 1044 + vcpu_regs_set(vcpu, &state->regs); 1033 1045 1034 1046 if (state->nested.size) 1035 - vcpu_nested_state_set(vm, vcpuid, &state->nested); 1047 + vcpu_nested_state_set(vcpu, &state->nested); 1036 1048 } 1037 1049 1038 1050 void kvm_x86_state_cleanup(struct kvm_x86_state *state) ··· 1158 1170 DEFAULT_CODE_SELECTOR); 1159 1171 } 1160 1172 1161 - void vcpu_init_descriptor_tables(struct kvm_vm *vm, uint32_t vcpuid) 1173 + void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu) 1162 1174 { 1175 + struct kvm_vm *vm = vcpu->vm; 1163 1176 struct kvm_sregs sregs; 1164 1177 1165 - vcpu_sregs_get(vm, vcpuid, &sregs); 1178 + vcpu_sregs_get(vcpu, &sregs); 1166 1179 sregs.idt.base = vm->idt; 1167 1180 sregs.idt.limit = NUM_INTERRUPTS * sizeof(struct idt_entry) - 1; 1168 1181 sregs.gdt.base = vm->gdt; 1169 1182 sregs.gdt.limit = getpagesize() - 1; 1170 1183 kvm_seg_set_kernel_data_64bit(NULL, DEFAULT_DATA_SELECTOR, &sregs.gs); 1171 - vcpu_sregs_set(vm, vcpuid, &sregs); 1184 + vcpu_sregs_set(vcpu, &sregs); 1172 1185 *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers; 1173 1186 } 1174 1187 ··· 1181 1192 handlers[vector] = (vm_vaddr_t)handler; 1182 1193 } 1183 1194 1184 - void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid) 1195 + void assert_on_unhandled_exception(struct kvm_vcpu *vcpu) 1185 1196 { 1186 1197 struct ucall uc; 1187 1198 1188 - if (get_ucall(vm, vcpuid, &uc) == UCALL_UNHANDLED) { 1199 + if (get_ucall(vcpu, &uc) == UCALL_UNHANDLED) { 1189 1200 uint64_t vector = uc.args[0]; 1190 1201 1191 1202 TEST_FAIL("Unexpected vectored event in guest (vector:0x%lx)", ··· 1256 1267 return cpuid; 1257 1268 } 1258 1269 1259 - void vcpu_set_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid) 1270 + void vcpu_set_hv_cpuid(struct kvm_vcpu *vcpu) 1260 1271 { 1261 1272 static struct kvm_cpuid2 *cpuid_full; 1262 1273 struct kvm_cpuid2 *cpuid_sys, *cpuid_hv; ··· 1288 1299 cpuid_full->nent = nent + cpuid_hv->nent; 1289 1300 } 1290 1301 1291 - vcpu_set_cpuid(vm, vcpuid, cpuid_full); 1302 + vcpu_set_cpuid(vcpu, cpuid_full); 1292 1303 } 1293 1304 1294 - struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid) 1305 + struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vcpu *vcpu) 1295 1306 { 1296 1307 static struct kvm_cpuid2 *cpuid; 1297 1308 1298 1309 cpuid = allocate_kvm_cpuid2(); 1299 1310 1300 - vcpu_ioctl(vm, vcpuid, KVM_GET_SUPPORTED_HV_CPUID, cpuid); 1311 + vcpu_ioctl(vcpu, KVM_GET_SUPPORTED_HV_CPUID, cpuid); 1301 1312 1302 1313 return cpuid; 1303 1314 }
+5 -5
tools/testing/selftests/kvm/lib/x86_64/ucall.c
··· 35 35 : : [port] "d" (UCALL_PIO_PORT), "D" (&uc) : "rax", "memory"); 36 36 } 37 37 38 - uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc) 38 + uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc) 39 39 { 40 - struct kvm_run *run = vcpu_state(vm, vcpu_id); 40 + struct kvm_run *run = vcpu->run; 41 41 struct ucall ucall = {}; 42 42 43 43 if (uc) ··· 46 46 if (run->exit_reason == KVM_EXIT_IO && run->io.port == UCALL_PIO_PORT) { 47 47 struct kvm_regs regs; 48 48 49 - vcpu_regs_get(vm, vcpu_id, &regs); 50 - memcpy(&ucall, addr_gva2hva(vm, (vm_vaddr_t)regs.rdi), 49 + vcpu_regs_get(vcpu, &regs); 50 + memcpy(&ucall, addr_gva2hva(vcpu->vm, (vm_vaddr_t)regs.rdi), 51 51 sizeof(ucall)); 52 52 53 - vcpu_run_complete_io(vm, vcpu_id); 53 + vcpu_run_complete_io(vcpu); 54 54 if (uc) 55 55 memcpy(uc, &ucall, sizeof(ucall)); 56 56 }
+2 -2
tools/testing/selftests/kvm/lib/x86_64/vmx.c
··· 42 42 uint64_t address:40; 43 43 uint64_t reserved_63_52:12; 44 44 }; 45 - int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id) 45 + int vcpu_enable_evmcs(struct kvm_vcpu *vcpu) 46 46 { 47 47 uint16_t evmcs_ver; 48 48 49 - vcpu_enable_cap(vm, vcpu_id, KVM_CAP_HYPERV_ENLIGHTENED_VMCS, 49 + vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENLIGHTENED_VMCS, 50 50 (unsigned long)&evmcs_ver); 51 51 52 52 /* KVM should return supported EVMCS version range */
+10 -10
tools/testing/selftests/kvm/max_guest_memory_test.c
··· 51 51 } 52 52 } 53 53 54 - static void run_vcpu(struct kvm_vm *vm, uint32_t vcpu_id) 54 + static void run_vcpu(struct kvm_vcpu *vcpu) 55 55 { 56 - vcpu_run(vm, vcpu_id); 57 - ASSERT_EQ(get_ucall(vm, vcpu_id, NULL), UCALL_DONE); 56 + vcpu_run(vcpu); 57 + ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_DONE); 58 58 } 59 59 60 60 static void *vcpu_worker(void *data) ··· 65 65 struct kvm_sregs sregs; 66 66 struct kvm_regs regs; 67 67 68 - vcpu_args_set(vm, vcpu->id, 3, info->start_gpa, info->end_gpa, 68 + vcpu_args_set(vcpu, 3, info->start_gpa, info->end_gpa, 69 69 vm_get_page_size(vm)); 70 70 71 71 /* Snapshot regs before the first run. */ 72 - vcpu_regs_get(vm, vcpu->id, &regs); 72 + vcpu_regs_get(vcpu, &regs); 73 73 rendezvous_with_boss(); 74 74 75 - run_vcpu(vm, vcpu->id); 75 + run_vcpu(vcpu); 76 76 rendezvous_with_boss(); 77 - vcpu_regs_set(vm, vcpu->id, &regs); 78 - vcpu_sregs_get(vm, vcpu->id, &sregs); 77 + vcpu_regs_set(vcpu, &regs); 78 + vcpu_sregs_get(vcpu, &sregs); 79 79 #ifdef __x86_64__ 80 80 /* Toggle CR0.WP to trigger a MMU context reset. */ 81 81 sregs.cr0 ^= X86_CR0_WP; 82 82 #endif 83 - vcpu_sregs_set(vm, vcpu->id, &sregs); 83 + vcpu_sregs_set(vcpu, &sregs); 84 84 rendezvous_with_boss(); 85 85 86 - run_vcpu(vm, vcpu->id); 86 + run_vcpu(vcpu); 87 87 rendezvous_with_boss(); 88 88 89 89 return NULL;
+2 -3
tools/testing/selftests/kvm/memslot_modification_stress_test.c
··· 39 39 static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args) 40 40 { 41 41 struct kvm_vcpu *vcpu = vcpu_args->vcpu; 42 - struct kvm_vm *vm = perf_test_args.vm; 43 42 struct kvm_run *run; 44 43 int ret; 45 44 ··· 46 47 47 48 /* Let the guest access its memory until a stop signal is received */ 48 49 while (READ_ONCE(run_vcpus)) { 49 - ret = _vcpu_run(vm, vcpu->id); 50 + ret = _vcpu_run(vcpu); 50 51 TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret); 51 52 52 - if (get_ucall(vm, vcpu->id, NULL) == UCALL_SYNC) 53 + if (get_ucall(vcpu, NULL) == UCALL_SYNC) 53 54 continue; 54 55 55 56 TEST_ASSERT(false,
+2 -2
tools/testing/selftests/kvm/memslot_perf_test.c
··· 146 146 struct ucall uc; 147 147 148 148 while (1) { 149 - vcpu_run(data->vm, vcpu->id); 149 + vcpu_run(vcpu); 150 150 151 - switch (get_ucall(data->vm, vcpu->id, &uc)) { 151 + switch (get_ucall(vcpu, &uc)) { 152 152 case UCALL_SYNC: 153 153 TEST_ASSERT(uc.args[1] == 0, 154 154 "Unexpected sync ucall, got %lx",
+2 -2
tools/testing/selftests/kvm/rseq_test.c
··· 233 233 pthread_create(&migration_thread, NULL, migration_worker, 0); 234 234 235 235 for (i = 0; !done; i++) { 236 - vcpu_run(vm, vcpu->id); 237 - TEST_ASSERT(get_ucall(vm, vcpu->id, NULL) == UCALL_SYNC, 236 + vcpu_run(vcpu); 237 + TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC, 238 238 "Guest failed?"); 239 239 240 240 /*
+4 -4
tools/testing/selftests/kvm/s390x/memop.c
··· 152 152 if (!vcpu) 153 153 vm_ioctl(info.vm, KVM_S390_MEM_OP, ksmo); 154 154 else 155 - vcpu_ioctl(vcpu->vm, vcpu->id, KVM_S390_MEM_OP, ksmo); 155 + vcpu_ioctl(vcpu, KVM_S390_MEM_OP, ksmo); 156 156 } 157 157 158 158 static int err_memop_ioctl(struct test_info info, struct kvm_s390_mem_op *ksmo) ··· 162 162 if (!vcpu) 163 163 return __vm_ioctl(info.vm, KVM_S390_MEM_OP, ksmo); 164 164 else 165 - return __vcpu_ioctl(vcpu->vm, vcpu->id, KVM_S390_MEM_OP, ksmo); 165 + return __vcpu_ioctl(vcpu, KVM_S390_MEM_OP, ksmo); 166 166 } 167 167 168 168 #define MEMOP(err, info_p, mop_target_p, access_mode_p, buf_p, size_p, ...) \ ··· 250 250 struct ucall uc; \ 251 251 int __stage = (stage); \ 252 252 \ 253 - vcpu_run(__vcpu->vm, __vcpu->id); \ 254 - get_ucall(__vcpu->vm, __vcpu->id, &uc); \ 253 + vcpu_run(__vcpu); \ 254 + get_ucall(__vcpu, &uc); \ 255 255 ASSERT_EQ(uc.cmd, UCALL_SYNC); \ 256 256 ASSERT_EQ(uc.args[1], __stage); \ 257 257 }) \
+14 -14
tools/testing/selftests/kvm/s390x/resets.c
··· 61 61 { 62 62 uint64_t eval_reg; 63 63 64 - vcpu_get_reg(vcpu->vm, vcpu->id, id, &eval_reg); 64 + vcpu_get_reg(vcpu, id, &eval_reg); 65 65 TEST_ASSERT(eval_reg == value, "value == 0x%lx", value); 66 66 } 67 67 ··· 72 72 73 73 irq_state.len = sizeof(buf); 74 74 irq_state.buf = (unsigned long)buf; 75 - irqs = __vcpu_ioctl(vcpu->vm, vcpu->id, KVM_S390_GET_IRQ_STATE, &irq_state); 75 + irqs = __vcpu_ioctl(vcpu, KVM_S390_GET_IRQ_STATE, &irq_state); 76 76 /* 77 77 * irqs contains the number of retrieved interrupts. Any interrupt 78 78 * (notably, the emergency call interrupt we have injected) should ··· 89 89 struct kvm_regs regs; 90 90 struct kvm_fpu fpu; 91 91 92 - vcpu_regs_get(vcpu->vm, vcpu->id, &regs); 92 + vcpu_regs_get(vcpu, &regs); 93 93 TEST_ASSERT(!memcmp(&regs.gprs, regs_null, sizeof(regs.gprs)), "grs == 0"); 94 94 95 - vcpu_sregs_get(vcpu->vm, vcpu->id, &sregs); 95 + vcpu_sregs_get(vcpu, &sregs); 96 96 TEST_ASSERT(!memcmp(&sregs.acrs, regs_null, sizeof(sregs.acrs)), "acrs == 0"); 97 97 98 - vcpu_fpu_get(vcpu->vm, vcpu->id, &fpu); 98 + vcpu_fpu_get(vcpu, &fpu); 99 99 TEST_ASSERT(!memcmp(&fpu.fprs, regs_null, sizeof(fpu.fprs)), "fprs == 0"); 100 100 101 101 /* sync regs */ ··· 133 133 struct kvm_fpu fpu; 134 134 135 135 /* KVM_GET_SREGS */ 136 - vcpu_sregs_get(vcpu->vm, vcpu->id, &sregs); 136 + vcpu_sregs_get(vcpu, &sregs); 137 137 TEST_ASSERT(sregs.crs[0] == 0xE0UL, "cr0 == 0xE0 (KVM_GET_SREGS)"); 138 138 TEST_ASSERT(sregs.crs[14] == 0xC2000000UL, 139 139 "cr14 == 0xC2000000 (KVM_GET_SREGS)"); ··· 159 159 TEST_ASSERT(vcpu->run->psw_addr == 0, "psw_addr == 0 (kvm_run)"); 160 160 TEST_ASSERT(vcpu->run->psw_mask == 0, "psw_mask == 0 (kvm_run)"); 161 161 162 - vcpu_fpu_get(vcpu->vm, vcpu->id, &fpu); 162 + vcpu_fpu_get(vcpu, &fpu); 163 163 TEST_ASSERT(!fpu.fpc, "fpc == 0"); 164 164 165 165 test_one_reg(vcpu, KVM_REG_S390_GBEA, 1); ··· 198 198 irq_state.buf = (unsigned long)buf; 199 199 irq->type = KVM_S390_INT_EMERGENCY; 200 200 irq->u.emerg.code = vcpu->id; 201 - irqs = __vcpu_ioctl(vcpu->vm, vcpu->id, KVM_S390_SET_IRQ_STATE, &irq_state); 201 + irqs = __vcpu_ioctl(vcpu, KVM_S390_SET_IRQ_STATE, &irq_state); 202 202 TEST_ASSERT(irqs >= 0, "Error injecting EMERGENCY IRQ errno %d\n", errno); 203 203 } 204 204 ··· 221 221 ksft_print_msg("Testing normal reset\n"); 222 222 vm = create_vm(&vcpu); 223 223 224 - vcpu_run(vm, vcpu->id); 224 + vcpu_run(vcpu); 225 225 226 226 inject_irq(vcpu); 227 227 228 - vcpu_ioctl(vm, vcpu->id, KVM_S390_NORMAL_RESET, 0); 228 + vcpu_ioctl(vcpu, KVM_S390_NORMAL_RESET, 0); 229 229 230 230 /* must clears */ 231 231 assert_normal(vcpu); ··· 244 244 ksft_print_msg("Testing initial reset\n"); 245 245 vm = create_vm(&vcpu); 246 246 247 - vcpu_run(vm, vcpu->id); 247 + vcpu_run(vcpu); 248 248 249 249 inject_irq(vcpu); 250 250 251 - vcpu_ioctl(vm, vcpu->id, KVM_S390_INITIAL_RESET, 0); 251 + vcpu_ioctl(vcpu, KVM_S390_INITIAL_RESET, 0); 252 252 253 253 /* must clears */ 254 254 assert_normal(vcpu); ··· 267 267 ksft_print_msg("Testing clear reset\n"); 268 268 vm = create_vm(&vcpu); 269 269 270 - vcpu_run(vm, vcpu->id); 270 + vcpu_run(vcpu); 271 271 272 272 inject_irq(vcpu); 273 273 274 - vcpu_ioctl(vm, vcpu->id, KVM_S390_CLEAR_RESET, 0); 274 + vcpu_ioctl(vcpu, KVM_S390_CLEAR_RESET, 0); 275 275 276 276 /* must clears */ 277 277 assert_normal(vcpu);
+11 -11
tools/testing/selftests/kvm/s390x/sync_regs_test.c
··· 80 80 81 81 /* Request reading invalid register set from VCPU. */ 82 82 run->kvm_valid_regs = INVALID_SYNC_FIELD; 83 - rv = _vcpu_run(vcpu->vm, vcpu->id); 83 + rv = _vcpu_run(vcpu); 84 84 TEST_ASSERT(rv < 0 && errno == EINVAL, 85 85 "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n", 86 86 rv); 87 87 run->kvm_valid_regs = 0; 88 88 89 89 run->kvm_valid_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS; 90 - rv = _vcpu_run(vcpu->vm, vcpu->id); 90 + rv = _vcpu_run(vcpu); 91 91 TEST_ASSERT(rv < 0 && errno == EINVAL, 92 92 "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n", 93 93 rv); ··· 101 101 102 102 /* Request setting invalid register set into VCPU. */ 103 103 run->kvm_dirty_regs = INVALID_SYNC_FIELD; 104 - rv = _vcpu_run(vcpu->vm, vcpu->id); 104 + rv = _vcpu_run(vcpu); 105 105 TEST_ASSERT(rv < 0 && errno == EINVAL, 106 106 "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n", 107 107 rv); 108 108 run->kvm_dirty_regs = 0; 109 109 110 110 run->kvm_dirty_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS; 111 - rv = _vcpu_run(vcpu->vm, vcpu->id); 111 + rv = _vcpu_run(vcpu); 112 112 TEST_ASSERT(rv < 0 && errno == EINVAL, 113 113 "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n", 114 114 rv); ··· 124 124 125 125 /* Request and verify all valid register sets. */ 126 126 run->kvm_valid_regs = TEST_SYNC_FIELDS; 127 - rv = _vcpu_run(vcpu->vm, vcpu->id); 127 + rv = _vcpu_run(vcpu); 128 128 TEST_ASSERT(rv == 0, "vcpu_run failed: %d\n", rv); 129 129 TEST_ASSERT(run->exit_reason == KVM_EXIT_S390_SIEIC, 130 130 "Unexpected exit reason: %u (%s)\n", ··· 137 137 run->s390_sieic.icptcode, run->s390_sieic.ipa, 138 138 run->s390_sieic.ipb); 139 139 140 - vcpu_regs_get(vcpu->vm, vcpu->id, &regs); 140 + vcpu_regs_get(vcpu, &regs); 141 141 compare_regs(&regs, &run->s.regs); 142 142 143 - vcpu_sregs_get(vcpu->vm, vcpu->id, &sregs); 143 + vcpu_sregs_get(vcpu, &sregs); 144 144 compare_sregs(&sregs, &run->s.regs); 145 145 } 146 146 ··· 163 163 run->kvm_dirty_regs |= KVM_SYNC_DIAG318; 164 164 } 165 165 166 - rv = _vcpu_run(vcpu->vm, vcpu->id); 166 + rv = _vcpu_run(vcpu); 167 167 TEST_ASSERT(rv == 0, "vcpu_run failed: %d\n", rv); 168 168 TEST_ASSERT(run->exit_reason == KVM_EXIT_S390_SIEIC, 169 169 "Unexpected exit reason: %u (%s)\n", ··· 179 179 "diag318 sync regs value incorrect 0x%llx.", 180 180 run->s.regs.diag318); 181 181 182 - vcpu_regs_get(vcpu->vm, vcpu->id, &regs); 182 + vcpu_regs_get(vcpu, &regs); 183 183 compare_regs(&regs, &run->s.regs); 184 184 185 - vcpu_sregs_get(vcpu->vm, vcpu->id, &sregs); 185 + vcpu_sregs_get(vcpu, &sregs); 186 186 compare_sregs(&sregs, &run->s.regs); 187 187 } 188 188 ··· 198 198 run->kvm_dirty_regs = 0; 199 199 run->s.regs.gprs[11] = 0xDEADBEEF; 200 200 run->s.regs.diag318 = 0x4B1D; 201 - rv = _vcpu_run(vcpu->vm, vcpu->id); 201 + rv = _vcpu_run(vcpu); 202 202 TEST_ASSERT(rv == 0, "vcpu_run failed: %d\n", rv); 203 203 TEST_ASSERT(run->exit_reason == KVM_EXIT_S390_SIEIC, 204 204 "Unexpected exit reason: %u (%s)\n",
+2 -2
tools/testing/selftests/kvm/s390x/tprot.c
··· 187 187 struct ucall uc; \ 188 188 int __stage = (stage); \ 189 189 \ 190 - vcpu_run(__vcpu->vm, __vcpu->id); \ 191 - get_ucall(__vcpu->vm, __vcpu->id, &uc); \ 190 + vcpu_run(__vcpu); \ 191 + get_ucall(__vcpu, &uc); \ 192 192 if (uc.cmd == UCALL_ABORT) { \ 193 193 TEST_FAIL("line %lu: %s, hints: %lu, %lu", uc.args[1], \ 194 194 (const char *)uc.args[0], uc.args[2], uc.args[3]); \
+4 -4
tools/testing/selftests/kvm/set_memory_region_test.c
··· 63 63 * has been deleted or while it is being moved . 64 64 */ 65 65 while (1) { 66 - vcpu_run(vcpu->vm, vcpu->id); 66 + vcpu_run(vcpu); 67 67 68 68 if (run->exit_reason == KVM_EXIT_IO) { 69 - cmd = get_ucall(vcpu->vm, vcpu->id, &uc); 69 + cmd = get_ucall(vcpu, &uc); 70 70 if (cmd != UCALL_SYNC) 71 71 break; 72 72 ··· 291 291 run->exit_reason == KVM_EXIT_INTERNAL_ERROR, 292 292 "Unexpected exit reason = %d", run->exit_reason); 293 293 294 - vcpu_regs_get(vm, vcpu->id, &regs); 294 + vcpu_regs_get(vcpu, &regs); 295 295 296 296 /* 297 297 * On AMD, after KVM_EXIT_SHUTDOWN the VMCB has been reinitialized already, ··· 318 318 vcpu = __vm_vcpu_add(vm, 0); 319 319 320 320 vm_ioctl(vm, KVM_SET_NR_MMU_PAGES, (void *)64ul); 321 - vcpu_run(vm, vcpu->id); 321 + vcpu_run(vcpu); 322 322 323 323 run = vcpu->run; 324 324 TEST_ASSERT(run->exit_reason == KVM_EXIT_INTERNAL_ERROR,
+10 -10
tools/testing/selftests/kvm/steal_time.c
··· 73 73 st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE); 74 74 sync_global_to_guest(vcpu->vm, st_gva[i]); 75 75 76 - ret = _vcpu_set_msr(vcpu->vm, vcpu->id, MSR_KVM_STEAL_TIME, 76 + ret = _vcpu_set_msr(vcpu, MSR_KVM_STEAL_TIME, 77 77 (ulong)st_gva[i] | KVM_STEAL_RESERVED_MASK); 78 78 TEST_ASSERT(ret == 0, "Bad GPA didn't fail"); 79 79 80 - vcpu_set_msr(vcpu->vm, vcpu->id, MSR_KVM_STEAL_TIME, (ulong)st_gva[i] | KVM_MSR_ENABLED); 80 + vcpu_set_msr(vcpu, MSR_KVM_STEAL_TIME, (ulong)st_gva[i] | KVM_MSR_ENABLED); 81 81 } 82 82 83 83 static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx) ··· 163 163 .attr = KVM_ARM_VCPU_PVTIME_IPA, 164 164 }; 165 165 166 - return !__vcpu_ioctl(vcpu->vm, vcpu->id, KVM_HAS_DEVICE_ATTR, &dev); 166 + return !__vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &dev); 167 167 } 168 168 169 169 static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i) ··· 178 178 .addr = (uint64_t)&st_ipa, 179 179 }; 180 180 181 - vcpu_ioctl(vm, vcpu->id, KVM_HAS_DEVICE_ATTR, &dev); 181 + vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &dev); 182 182 183 183 /* ST_GPA_BASE is identity mapped */ 184 184 st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE); 185 185 sync_global_to_guest(vm, st_gva[i]); 186 186 187 187 st_ipa = (ulong)st_gva[i] | 1; 188 - ret = __vcpu_ioctl(vm, vcpu->id, KVM_SET_DEVICE_ATTR, &dev); 188 + ret = __vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev); 189 189 TEST_ASSERT(ret == -1 && errno == EINVAL, "Bad IPA didn't report EINVAL"); 190 190 191 191 st_ipa = (ulong)st_gva[i]; 192 - vcpu_ioctl(vm, vcpu->id, KVM_SET_DEVICE_ATTR, &dev); 192 + vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev); 193 193 194 - ret = __vcpu_ioctl(vm, vcpu->id, KVM_SET_DEVICE_ATTR, &dev); 194 + ret = __vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev); 195 195 TEST_ASSERT(ret == -1 && errno == EEXIST, "Set IPA twice without EEXIST"); 196 196 } 197 197 ··· 227 227 { 228 228 struct ucall uc; 229 229 230 - vcpu_run(vcpu->vm, vcpu->id); 230 + vcpu_run(vcpu); 231 231 232 - switch (get_ucall(vcpu->vm, vcpu->id, &uc)) { 232 + switch (get_ucall(vcpu, &uc)) { 233 233 case UCALL_SYNC: 234 234 case UCALL_DONE: 235 235 break; ··· 280 280 for (i = 0; i < NR_VCPUS; ++i) { 281 281 steal_time_init(vcpus[i], i); 282 282 283 - vcpu_args_set(vm, vcpus[i]->id, 1, i); 283 + vcpu_args_set(vcpus[i], 1, i); 284 284 285 285 /* First VCPU run initializes steal-time */ 286 286 run_vcpu(vcpus[i]);
+6 -7
tools/testing/selftests/kvm/system_counter_offset_test.c
··· 28 28 29 29 static void check_preconditions(struct kvm_vcpu *vcpu) 30 30 { 31 - if (!__vcpu_has_device_attr(vcpu->vm, vcpu->id, KVM_VCPU_TSC_CTRL, 32 - KVM_VCPU_TSC_OFFSET)) 31 + if (!__vcpu_has_device_attr(vcpu, KVM_VCPU_TSC_CTRL, KVM_VCPU_TSC_OFFSET)) 33 32 return; 34 33 35 34 print_skip("KVM_VCPU_TSC_OFFSET not supported; skipping test"); ··· 37 38 38 39 static void setup_system_counter(struct kvm_vcpu *vcpu, struct test_case *test) 39 40 { 40 - vcpu_device_attr_set(vcpu->vm, vcpu->id, KVM_VCPU_TSC_CTRL, 41 - KVM_VCPU_TSC_OFFSET, &test->tsc_offset); 41 + vcpu_device_attr_set(vcpu, KVM_VCPU_TSC_CTRL, KVM_VCPU_TSC_OFFSET, 42 + &test->tsc_offset); 42 43 } 43 44 44 45 static uint64_t guest_read_system_counter(struct test_case *test) ··· 100 101 101 102 setup_system_counter(vcpu, test); 102 103 start = host_read_guest_system_counter(test); 103 - vcpu_run(vcpu->vm, vcpu->id); 104 + vcpu_run(vcpu); 104 105 end = host_read_guest_system_counter(test); 105 106 106 - switch (get_ucall(vcpu->vm, vcpu->id, &uc)) { 107 + switch (get_ucall(vcpu, &uc)) { 107 108 case UCALL_SYNC: 108 109 handle_sync(&uc, start, end); 109 110 break; ··· 112 113 return; 113 114 default: 114 115 TEST_ASSERT(0, "unhandled ucall %ld\n", 115 - get_ucall(vcpu->vm, vcpu->id, &uc)); 116 + get_ucall(vcpu, &uc)); 116 117 } 117 118 } 118 119 }
+11 -11
tools/testing/selftests/kvm/x86_64/amx_test.c
··· 351 351 } 352 352 353 353 run = vcpu->run; 354 - vcpu_regs_get(vm, vcpu->id, &regs1); 354 + vcpu_regs_get(vcpu, &regs1); 355 355 356 356 /* Register #NM handler */ 357 357 vm_init_descriptor_tables(vm); 358 - vcpu_init_descriptor_tables(vm, vcpu->id); 358 + vcpu_init_descriptor_tables(vcpu); 359 359 vm_install_exception_handler(vm, NM_VECTOR, guest_nm_handler); 360 360 361 361 /* amx cfg for guest_code */ ··· 369 369 /* xsave data for guest_code */ 370 370 xsavedata = vm_vaddr_alloc_pages(vm, 3); 371 371 memset(addr_gva2hva(vm, xsavedata), 0, 3 * getpagesize()); 372 - vcpu_args_set(vm, vcpu->id, 3, amx_cfg, tiledata, xsavedata); 372 + vcpu_args_set(vcpu, 3, amx_cfg, tiledata, xsavedata); 373 373 374 374 for (stage = 1; ; stage++) { 375 - vcpu_run(vm, vcpu->id); 375 + vcpu_run(vcpu); 376 376 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 377 377 "Stage %d: unexpected exit reason: %u (%s),\n", 378 378 stage, run->exit_reason, 379 379 exit_reason_str(run->exit_reason)); 380 380 381 - switch (get_ucall(vm, vcpu->id, &uc)) { 381 + switch (get_ucall(vcpu, &uc)) { 382 382 case UCALL_ABORT: 383 383 TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0], 384 384 __FILE__, uc.args[1]); ··· 403 403 * size subtract 8K amx size. 404 404 */ 405 405 amx_offset = xsave_restore_size - NUM_TILES*TILE_SIZE; 406 - state = vcpu_save_state(vm, vcpu->id); 406 + state = vcpu_save_state(vcpu); 407 407 void *amx_start = (void *)state->xsave + amx_offset; 408 408 void *tiles_data = (void *)addr_gva2hva(vm, tiledata); 409 409 /* Only check TMM0 register, 1 tile */ ··· 424 424 TEST_FAIL("Unknown ucall %lu", uc.cmd); 425 425 } 426 426 427 - state = vcpu_save_state(vm, vcpu->id); 427 + state = vcpu_save_state(vcpu); 428 428 memset(&regs1, 0, sizeof(regs1)); 429 - vcpu_regs_get(vm, vcpu->id, &regs1); 429 + vcpu_regs_get(vcpu, &regs1); 430 430 431 431 kvm_vm_release(vm); 432 432 433 433 /* Restore state in a new VM. */ 434 434 vcpu = vm_recreate_with_one_vcpu(vm); 435 - vcpu_set_cpuid(vm, vcpu->id, kvm_get_supported_cpuid()); 436 - vcpu_load_state(vm, vcpu->id, state); 435 + vcpu_set_cpuid(vcpu, kvm_get_supported_cpuid()); 436 + vcpu_load_state(vcpu, state); 437 437 run = vcpu->run; 438 438 kvm_x86_state_cleanup(state); 439 439 440 440 memset(&regs2, 0, sizeof(regs2)); 441 - vcpu_regs_get(vm, vcpu->id, &regs2); 441 + vcpu_regs_get(vcpu, &regs2); 442 442 TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)), 443 443 "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx", 444 444 (ulong) regs2.rdi, (ulong) regs2.rsi);
+7 -7
tools/testing/selftests/kvm/x86_64/cpuid_test.c
··· 120 120 { 121 121 struct ucall uc; 122 122 123 - vcpu_run(vcpu->vm, vcpu->id); 123 + vcpu_run(vcpu); 124 124 125 - switch (get_ucall(vcpu->vm, vcpu->id, &uc)) { 125 + switch (get_ucall(vcpu, &uc)) { 126 126 case UCALL_SYNC: 127 127 TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") && 128 128 uc.args[1] == stage + 1, ··· 159 159 u32 eax, ebx, x; 160 160 161 161 /* Setting unmodified CPUID is allowed */ 162 - rc = __vcpu_set_cpuid(vcpu->vm, vcpu->id, cpuid); 162 + rc = __vcpu_set_cpuid(vcpu, cpuid); 163 163 TEST_ASSERT(!rc, "Setting unmodified CPUID after KVM_RUN failed: %d", rc); 164 164 165 165 /* Changing CPU features is forbidden */ 166 166 ent = get_cpuid(cpuid, 0x7, 0); 167 167 ebx = ent->ebx; 168 168 ent->ebx--; 169 - rc = __vcpu_set_cpuid(vcpu->vm, vcpu->id, cpuid); 169 + rc = __vcpu_set_cpuid(vcpu, cpuid); 170 170 TEST_ASSERT(rc, "Changing CPU features should fail"); 171 171 ent->ebx = ebx; 172 172 ··· 175 175 eax = ent->eax; 176 176 x = eax & 0xff; 177 177 ent->eax = (eax & ~0xffu) | (x - 1); 178 - rc = __vcpu_set_cpuid(vcpu->vm, vcpu->id, cpuid); 178 + rc = __vcpu_set_cpuid(vcpu, cpuid); 179 179 TEST_ASSERT(rc, "Changing MAXPHYADDR should fail"); 180 180 ent->eax = eax; 181 181 } ··· 191 191 vm = vm_create_with_one_vcpu(&vcpu, guest_main); 192 192 193 193 supp_cpuid = kvm_get_supported_cpuid(); 194 - cpuid2 = vcpu_get_cpuid(vm, vcpu->id); 194 + cpuid2 = vcpu_get_cpuid(vcpu); 195 195 196 196 compare_cpuids(supp_cpuid, cpuid2); 197 197 198 198 vcpu_alloc_cpuid(vm, &cpuid_gva, cpuid2); 199 199 200 - vcpu_args_set(vm, vcpu->id, 1, cpuid_gva); 200 + vcpu_args_set(vcpu, 1, cpuid_gva); 201 201 202 202 for (stage = 0; stage < 3; stage++) 203 203 run_vcpu(vcpu, stage);
+4 -4
tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c
··· 82 82 run = vcpu->run; 83 83 84 84 while (1) { 85 - vcpu_run(vm, vcpu->id); 85 + vcpu_run(vcpu); 86 86 87 87 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 88 88 "Unexpected exit reason: %u (%s),\n", 89 89 run->exit_reason, 90 90 exit_reason_str(run->exit_reason)); 91 91 92 - switch (get_ucall(vm, vcpu->id, &uc)) { 92 + switch (get_ucall(vcpu, &uc)) { 93 93 case UCALL_SYNC: 94 94 /* emulate hypervisor clearing CR4.OSXSAVE */ 95 - vcpu_sregs_get(vm, vcpu->id, &sregs); 95 + vcpu_sregs_get(vcpu, &sregs); 96 96 sregs.cr4 &= ~X86_CR4_OSXSAVE; 97 - vcpu_sregs_set(vm, vcpu->id, &sregs); 97 + vcpu_sregs_set(vcpu, &sregs); 98 98 break; 99 99 case UCALL_ABORT: 100 100 TEST_FAIL("Guest CR4 bit (OSXSAVE) unsynchronized with CPUID bit.");
+15 -15
tools/testing/selftests/kvm/x86_64/debug_regs.c
··· 70 70 { 71 71 struct kvm_regs regs; 72 72 73 - vcpu_regs_get(vcpu->vm, vcpu->id, &regs); 73 + vcpu_regs_get(vcpu, &regs); 74 74 regs.rip += insn_len; 75 - vcpu_regs_set(vcpu->vm, vcpu->id, &regs); 75 + vcpu_regs_set(vcpu, &regs); 76 76 } 77 77 78 78 int main(void) ··· 106 106 /* Test software BPs - int3 */ 107 107 memset(&debug, 0, sizeof(debug)); 108 108 debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP; 109 - vcpu_guest_debug_set(vm, vcpu->id, &debug); 110 - vcpu_run(vm, vcpu->id); 109 + vcpu_guest_debug_set(vcpu, &debug); 110 + vcpu_run(vcpu); 111 111 TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG && 112 112 run->debug.arch.exception == BP_VECTOR && 113 113 run->debug.arch.pc == CAST_TO_RIP(sw_bp), ··· 122 122 debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP; 123 123 debug.arch.debugreg[i] = CAST_TO_RIP(hw_bp); 124 124 debug.arch.debugreg[7] = 0x400 | (1UL << (2*i+1)); 125 - vcpu_guest_debug_set(vm, vcpu->id, &debug); 126 - vcpu_run(vm, vcpu->id); 125 + vcpu_guest_debug_set(vcpu, &debug); 126 + vcpu_run(vcpu); 127 127 target_dr6 = 0xffff0ff0 | (1UL << i); 128 128 TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG && 129 129 run->debug.arch.exception == DB_VECTOR && ··· 145 145 debug.arch.debugreg[i] = CAST_TO_RIP(guest_value); 146 146 debug.arch.debugreg[7] = 0x00000400 | (1UL << (2*i+1)) | 147 147 (0x000d0000UL << (4*i)); 148 - vcpu_guest_debug_set(vm, vcpu->id, &debug); 149 - vcpu_run(vm, vcpu->id); 148 + vcpu_guest_debug_set(vcpu, &debug); 149 + vcpu_run(vcpu); 150 150 target_dr6 = 0xffff0ff0 | (1UL << i); 151 151 TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG && 152 152 run->debug.arch.exception == DB_VECTOR && ··· 172 172 debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP | 173 173 KVM_GUESTDBG_BLOCKIRQ; 174 174 debug.arch.debugreg[7] = 0x00000400; 175 - vcpu_guest_debug_set(vm, vcpu->id, &debug); 176 - vcpu_run(vm, vcpu->id); 175 + vcpu_guest_debug_set(vcpu, &debug); 176 + vcpu_run(vcpu); 177 177 TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG && 178 178 run->debug.arch.exception == DB_VECTOR && 179 179 run->debug.arch.pc == target_rip && ··· 189 189 memset(&debug, 0, sizeof(debug)); 190 190 debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP; 191 191 debug.arch.debugreg[7] = 0x400 | DR7_GD; 192 - vcpu_guest_debug_set(vm, vcpu->id, &debug); 193 - vcpu_run(vm, vcpu->id); 192 + vcpu_guest_debug_set(vcpu, &debug); 193 + vcpu_run(vcpu); 194 194 target_dr6 = 0xffff0ff0 | DR6_BD; 195 195 TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG && 196 196 run->debug.arch.exception == DB_VECTOR && ··· 204 204 205 205 /* Disable all debug controls, run to the end */ 206 206 memset(&debug, 0, sizeof(debug)); 207 - vcpu_guest_debug_set(vm, vcpu->id, &debug); 207 + vcpu_guest_debug_set(vcpu, &debug); 208 208 209 - vcpu_run(vm, vcpu->id); 209 + vcpu_run(vcpu); 210 210 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, "KVM_EXIT_IO"); 211 - cmd = get_ucall(vm, vcpu->id, &uc); 211 + cmd = get_ucall(vcpu, &uc); 212 212 TEST_ASSERT(cmd == UCALL_DONE, "UCALL_DONE"); 213 213 214 214 kvm_vm_free(vm);
+10 -10
tools/testing/selftests/kvm/x86_64/emulator_error_test.c
··· 83 83 * contained an flds instruction that is 2-bytes in 84 84 * length (ie: no prefix, no SIB, no displacement). 85 85 */ 86 - vcpu_regs_get(vcpu->vm, vcpu->id, &regs); 86 + vcpu_regs_get(vcpu, &regs); 87 87 regs.rip += 2; 88 - vcpu_regs_set(vcpu->vm, vcpu->id, &regs); 88 + vcpu_regs_set(vcpu, &regs); 89 89 } 90 90 } 91 91 } ··· 101 101 struct ucall uc; 102 102 103 103 if (vcpu->run->exit_reason == KVM_EXIT_IO && 104 - get_ucall(vcpu->vm, vcpu->id, &uc) == UCALL_ABORT) { 104 + get_ucall(vcpu, &uc) == UCALL_ABORT) { 105 105 do_guest_assert(&uc); 106 106 } 107 107 } ··· 118 118 run->exit_reason, 119 119 exit_reason_str(run->exit_reason)); 120 120 121 - TEST_ASSERT(get_ucall(vcpu->vm, vcpu->id, &uc) == UCALL_DONE, 121 + TEST_ASSERT(get_ucall(vcpu, &uc) == UCALL_DONE, 122 122 "Unexpected ucall command: %lu, expected UCALL_DONE (%d)", 123 123 uc.cmd, UCALL_DONE); 124 124 } ··· 133 133 run->exit_reason, 134 134 exit_reason_str(run->exit_reason)); 135 135 136 - switch (get_ucall(vcpu->vm, vcpu->id, &uc)) { 136 + switch (get_ucall(vcpu, &uc)) { 137 137 case UCALL_SYNC: 138 138 break; 139 139 case UCALL_ABORT: ··· 175 175 entry->eax = (entry->eax & 0xffffff00) | MAXPHYADDR; 176 176 set_cpuid(cpuid, entry); 177 177 178 - vcpu_set_cpuid(vm, vcpu->id, cpuid); 178 + vcpu_set_cpuid(vcpu, cpuid); 179 179 180 180 rc = kvm_check_cap(KVM_CAP_EXIT_ON_EMULATION_FAILURE); 181 181 TEST_ASSERT(rc, "KVM_CAP_EXIT_ON_EMULATION_FAILURE is unavailable"); ··· 190 190 virt_map(vm, MEM_REGION_GVA, MEM_REGION_GPA, 1); 191 191 hva = addr_gpa2hva(vm, MEM_REGION_GPA); 192 192 memset(hva, 0, PAGE_SIZE); 193 - pte = vm_get_page_table_entry(vm, vcpu->id, MEM_REGION_GVA); 194 - vm_set_page_table_entry(vm, vcpu->id, MEM_REGION_GVA, pte | (1ull << 36)); 193 + pte = vm_get_page_table_entry(vm, vcpu, MEM_REGION_GVA); 194 + vm_set_page_table_entry(vm, vcpu, MEM_REGION_GVA, pte | (1ull << 36)); 195 195 196 - vcpu_run(vm, vcpu->id); 196 + vcpu_run(vcpu); 197 197 process_exit_on_emulation_error(vcpu); 198 - vcpu_run(vm, vcpu->id); 198 + vcpu_run(vcpu); 199 199 200 200 TEST_ASSERT(process_ucall(vcpu) == UCALL_DONE, "Expected UCALL_DONE"); 201 201
+14 -14
tools/testing/selftests/kvm/x86_64/evmcs_test.c
··· 161 161 { 162 162 struct kvm_vcpu_events events; 163 163 164 - vcpu_events_get(vcpu->vm, vcpu->id, &events); 164 + vcpu_events_get(vcpu, &events); 165 165 166 166 events.nmi.pending = 1; 167 167 events.flags |= KVM_VCPUEVENT_VALID_NMI_PENDING; 168 168 169 - vcpu_events_set(vcpu->vm, vcpu->id, &events); 169 + vcpu_events_set(vcpu, &events); 170 170 } 171 171 172 172 static struct kvm_vcpu *save_restore_vm(struct kvm_vm *vm, ··· 175 175 struct kvm_regs regs1, regs2; 176 176 struct kvm_x86_state *state; 177 177 178 - state = vcpu_save_state(vm, vcpu->id); 178 + state = vcpu_save_state(vcpu); 179 179 memset(&regs1, 0, sizeof(regs1)); 180 - vcpu_regs_get(vm, vcpu->id, &regs1); 180 + vcpu_regs_get(vcpu, &regs1); 181 181 182 182 kvm_vm_release(vm); 183 183 184 184 /* Restore state in a new VM. */ 185 185 vcpu = vm_recreate_with_one_vcpu(vm); 186 - vcpu_set_hv_cpuid(vm, vcpu->id); 187 - vcpu_enable_evmcs(vm, vcpu->id); 188 - vcpu_load_state(vm, vcpu->id, state); 186 + vcpu_set_hv_cpuid(vcpu); 187 + vcpu_enable_evmcs(vcpu); 188 + vcpu_load_state(vcpu, state); 189 189 kvm_x86_state_cleanup(state); 190 190 191 191 memset(&regs2, 0, sizeof(regs2)); 192 - vcpu_regs_get(vm, vcpu->id, &regs2); 192 + vcpu_regs_get(vcpu, &regs2); 193 193 TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)), 194 194 "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx", 195 195 (ulong) regs2.rdi, (ulong) regs2.rsi); ··· 215 215 exit(KSFT_SKIP); 216 216 } 217 217 218 - vcpu_set_hv_cpuid(vm, vcpu->id); 219 - vcpu_enable_evmcs(vm, vcpu->id); 218 + vcpu_set_hv_cpuid(vcpu); 219 + vcpu_enable_evmcs(vcpu); 220 220 221 221 vcpu_alloc_vmx(vm, &vmx_pages_gva); 222 - vcpu_args_set(vm, vcpu->id, 1, vmx_pages_gva); 222 + vcpu_args_set(vcpu, 1, vmx_pages_gva); 223 223 224 224 vm_init_descriptor_tables(vm); 225 - vcpu_init_descriptor_tables(vm, vcpu->id); 225 + vcpu_init_descriptor_tables(vcpu); 226 226 vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler); 227 227 vm_install_exception_handler(vm, NMI_VECTOR, guest_nmi_handler); 228 228 ··· 231 231 for (stage = 1;; stage++) { 232 232 run = vcpu->run; 233 233 234 - vcpu_run(vm, vcpu->id); 234 + vcpu_run(vcpu); 235 235 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 236 236 "Stage %d: unexpected exit reason: %u (%s),\n", 237 237 stage, run->exit_reason, 238 238 exit_reason_str(run->exit_reason)); 239 239 240 - switch (get_ucall(vm, vcpu->id, &uc)) { 240 + switch (get_ucall(vcpu, &uc)) { 241 241 case UCALL_ABORT: 242 242 TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0], 243 243 __FILE__, uc.args[1]);
+3 -3
tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c
··· 95 95 static void setup_ud_vector(struct kvm_vcpu *vcpu) 96 96 { 97 97 vm_init_descriptor_tables(vcpu->vm); 98 - vcpu_init_descriptor_tables(vcpu->vm, vcpu->id); 98 + vcpu_init_descriptor_tables(vcpu); 99 99 vm_install_exception_handler(vcpu->vm, UD_VECTOR, guest_ud_handler); 100 100 } 101 101 ··· 104 104 struct kvm_run *run = vcpu->run; 105 105 struct ucall uc; 106 106 107 - vcpu_run(vcpu->vm, vcpu->id); 108 - switch (get_ucall(vcpu->vm, vcpu->id, &uc)) { 107 + vcpu_run(vcpu); 108 + switch (get_ucall(vcpu, &uc)) { 109 109 case UCALL_SYNC: 110 110 pr_info("%s: %016lx\n", (const char *)uc.args[2], uc.args[3]); 111 111 break;
+7 -7
tools/testing/selftests/kvm/x86_64/hyperv_clock.c
··· 178 178 u64 tsc_freq, r1, r2, t1, t2; 179 179 s64 delta_ns; 180 180 181 - tsc_freq = vcpu_get_msr(vcpu->vm, vcpu->id, HV_X64_MSR_TSC_FREQUENCY); 181 + tsc_freq = vcpu_get_msr(vcpu, HV_X64_MSR_TSC_FREQUENCY); 182 182 TEST_ASSERT(tsc_freq > 0, "TSC frequency must be nonzero"); 183 183 184 184 /* For increased accuracy, take mean rdtsc() before and afrer ioctl */ 185 185 r1 = rdtsc(); 186 - t1 = vcpu_get_msr(vcpu->vm, vcpu->id, HV_X64_MSR_TIME_REF_COUNT); 186 + t1 = vcpu_get_msr(vcpu, HV_X64_MSR_TIME_REF_COUNT); 187 187 r1 = (r1 + rdtsc()) / 2; 188 188 nop_loop(); 189 189 r2 = rdtsc(); 190 - t2 = vcpu_get_msr(vcpu->vm, vcpu->id, HV_X64_MSR_TIME_REF_COUNT); 190 + t2 = vcpu_get_msr(vcpu, HV_X64_MSR_TIME_REF_COUNT); 191 191 r2 = (r2 + rdtsc()) / 2; 192 192 193 193 TEST_ASSERT(t2 > t1, "Time reference MSR is not monotonic (%ld <= %ld)", t1, t2); ··· 215 215 vm = vm_create_with_one_vcpu(&vcpu, guest_main); 216 216 run = vcpu->run; 217 217 218 - vcpu_set_hv_cpuid(vm, vcpu->id); 218 + vcpu_set_hv_cpuid(vcpu); 219 219 220 220 tsc_page_gva = vm_vaddr_alloc_page(vm); 221 221 memset(addr_gva2hva(vm, tsc_page_gva), 0x0, getpagesize()); 222 222 TEST_ASSERT((addr_gva2gpa(vm, tsc_page_gva) & (getpagesize() - 1)) == 0, 223 223 "TSC page has to be page aligned\n"); 224 - vcpu_args_set(vm, vcpu->id, 2, tsc_page_gva, addr_gva2gpa(vm, tsc_page_gva)); 224 + vcpu_args_set(vcpu, 2, tsc_page_gva, addr_gva2gpa(vm, tsc_page_gva)); 225 225 226 226 host_check_tsc_msr_rdtsc(vcpu); 227 227 228 228 for (stage = 1;; stage++) { 229 - vcpu_run(vm, vcpu->id); 229 + vcpu_run(vcpu); 230 230 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 231 231 "Stage %d: unexpected exit reason: %u (%s),\n", 232 232 stage, run->exit_reason, 233 233 exit_reason_str(run->exit_reason)); 234 234 235 - switch (get_ucall(vm, vcpu->id, &uc)) { 235 + switch (get_ucall(vcpu, &uc)) { 236 236 case UCALL_ABORT: 237 237 TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0], 238 238 __FILE__, uc.args[1]);
+4 -4
tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
··· 119 119 int ret; 120 120 121 121 if (vcpu) 122 - ret = __vcpu_ioctl(vm, vcpu->id, KVM_GET_SUPPORTED_HV_CPUID, &cpuid); 122 + ret = __vcpu_ioctl(vcpu, KVM_GET_SUPPORTED_HV_CPUID, &cpuid); 123 123 else 124 124 ret = __kvm_ioctl(vm_get_kvm_fd(vm), KVM_GET_SUPPORTED_HV_CPUID, &cpuid); 125 125 ··· 147 147 /* Test vCPU ioctl version */ 148 148 test_hv_cpuid_e2big(vm, vcpu); 149 149 150 - hv_cpuid_entries = vcpu_get_supported_hv_cpuid(vm, vcpu->id); 150 + hv_cpuid_entries = vcpu_get_supported_hv_cpuid(vcpu); 151 151 test_hv_cpuid(hv_cpuid_entries, false); 152 152 free(hv_cpuid_entries); 153 153 ··· 156 156 print_skip("Enlightened VMCS is unsupported"); 157 157 goto do_sys; 158 158 } 159 - vcpu_enable_evmcs(vm, vcpu->id); 160 - hv_cpuid_entries = vcpu_get_supported_hv_cpuid(vm, vcpu->id); 159 + vcpu_enable_evmcs(vcpu); 160 + hv_cpuid_entries = vcpu_get_supported_hv_cpuid(vcpu); 161 161 test_hv_cpuid(hv_cpuid_entries, true); 162 162 free(hv_cpuid_entries); 163 163
+14 -14
tools/testing/selftests/kvm/x86_64/hyperv_features.c
··· 161 161 "failed to set HYPERV_CPUID_ENLIGHTMENT_INFO leaf"); 162 162 TEST_ASSERT(set_cpuid(cpuid, dbg), 163 163 "failed to set HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES leaf"); 164 - vcpu_set_cpuid(vcpu->vm, vcpu->id, cpuid); 164 + vcpu_set_cpuid(vcpu, cpuid); 165 165 } 166 166 167 167 static void guest_test_msrs_access(void) ··· 191 191 memset(addr_gva2hva(vm, msr_gva), 0x0, getpagesize()); 192 192 msr = addr_gva2hva(vm, msr_gva); 193 193 194 - vcpu_args_set(vm, vcpu->id, 1, msr_gva); 195 - vcpu_enable_cap(vm, vcpu->id, KVM_CAP_HYPERV_ENFORCE_CPUID, 1); 194 + vcpu_args_set(vcpu, 1, msr_gva); 195 + vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENFORCE_CPUID, 1); 196 196 197 - vcpu_set_hv_cpuid(vm, vcpu->id); 197 + vcpu_set_hv_cpuid(vcpu); 198 198 199 199 best = kvm_get_supported_hv_cpuid(); 200 200 201 201 vm_init_descriptor_tables(vm); 202 - vcpu_init_descriptor_tables(vm, vcpu->id); 202 + vcpu_init_descriptor_tables(vcpu); 203 203 vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler); 204 204 205 205 run = vcpu->run; ··· 333 333 * Remains unavailable even with KVM_CAP_HYPERV_SYNIC2 334 334 * capability enabled and guest visible CPUID bit unset. 335 335 */ 336 - vcpu_enable_cap(vm, vcpu->id, KVM_CAP_HYPERV_SYNIC2, 0); 336 + vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_SYNIC2, 0); 337 337 break; 338 338 case 22: 339 339 feat.eax |= HV_MSR_SYNIC_AVAILABLE; ··· 471 471 else 472 472 pr_debug("Stage %d: finish\n", stage); 473 473 474 - vcpu_run(vm, vcpu->id); 474 + vcpu_run(vcpu); 475 475 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 476 476 "unexpected exit reason: %u (%s)", 477 477 run->exit_reason, exit_reason_str(run->exit_reason)); 478 478 479 - switch (get_ucall(vm, vcpu->id, &uc)) { 479 + switch (get_ucall(vcpu, &uc)) { 480 480 case UCALL_SYNC: 481 481 TEST_ASSERT(uc.args[1] == 0, 482 482 "Unexpected stage: %ld (0 expected)\n", ··· 520 520 vm = vm_create_with_one_vcpu(&vcpu, guest_hcall); 521 521 522 522 vm_init_descriptor_tables(vm); 523 - vcpu_init_descriptor_tables(vm, vcpu->id); 523 + vcpu_init_descriptor_tables(vcpu); 524 524 vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler); 525 525 526 526 /* Hypercall input/output */ ··· 531 531 hcall_params = vm_vaddr_alloc_page(vm); 532 532 memset(addr_gva2hva(vm, hcall_params), 0x0, getpagesize()); 533 533 534 - vcpu_args_set(vm, vcpu->id, 2, addr_gva2gpa(vm, hcall_page), hcall_params); 535 - vcpu_enable_cap(vm, vcpu->id, KVM_CAP_HYPERV_ENFORCE_CPUID, 1); 534 + vcpu_args_set(vcpu, 2, addr_gva2gpa(vm, hcall_page), hcall_params); 535 + vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENFORCE_CPUID, 1); 536 536 537 - vcpu_set_hv_cpuid(vm, vcpu->id); 537 + vcpu_set_hv_cpuid(vcpu); 538 538 539 539 best = kvm_get_supported_hv_cpuid(); 540 540 ··· 641 641 else 642 642 pr_debug("Stage %d: finish\n", stage); 643 643 644 - vcpu_run(vm, vcpu->id); 644 + vcpu_run(vcpu); 645 645 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 646 646 "unexpected exit reason: %u (%s)", 647 647 run->exit_reason, exit_reason_str(run->exit_reason)); 648 648 649 - switch (get_ucall(vm, vcpu->id, &uc)) { 649 + switch (get_ucall(vcpu, &uc)) { 650 650 case UCALL_SYNC: 651 651 TEST_ASSERT(uc.args[1] == 0, 652 652 "Unexpected stage: %ld (0 expected)\n",
+4 -4
tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c
··· 133 133 } 134 134 /* Create VM */ 135 135 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 136 - vcpu_set_hv_cpuid(vm, vcpu->id); 136 + vcpu_set_hv_cpuid(vcpu); 137 137 run = vcpu->run; 138 138 vcpu_alloc_svm(vm, &nested_gva); 139 - vcpu_args_set(vm, vcpu->id, 1, nested_gva); 139 + vcpu_args_set(vcpu, 1, nested_gva); 140 140 141 141 for (stage = 1;; stage++) { 142 - vcpu_run(vm, vcpu->id); 142 + vcpu_run(vcpu); 143 143 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 144 144 "Stage %d: unexpected exit reason: %u (%s),\n", 145 145 stage, run->exit_reason, 146 146 exit_reason_str(run->exit_reason)); 147 147 148 - switch (get_ucall(vm, vcpu->id, &uc)) { 148 + switch (get_ucall(vcpu, &uc)) { 149 149 case UCALL_ABORT: 150 150 TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0], 151 151 __FILE__, uc.args[1]);
+3 -3
tools/testing/selftests/kvm/x86_64/kvm_clock_test.c
··· 116 116 117 117 vm_ioctl(vm, KVM_GET_CLOCK, &start); 118 118 119 - vcpu_run(vcpu->vm, vcpu->id); 119 + vcpu_run(vcpu); 120 120 vm_ioctl(vm, KVM_GET_CLOCK, &end); 121 121 122 122 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 123 123 "unexpected exit reason: %u (%s)", 124 124 run->exit_reason, exit_reason_str(run->exit_reason)); 125 125 126 - switch (get_ucall(vcpu->vm, vcpu->id, &uc)) { 126 + switch (get_ucall(vcpu, &uc)) { 127 127 case UCALL_SYNC: 128 128 handle_sync(&uc, &start, &end); 129 129 break; ··· 193 193 194 194 pvti_gva = vm_vaddr_alloc(vm, getpagesize(), 0x10000); 195 195 pvti_gpa = addr_gva2gpa(vm, pvti_gva); 196 - vcpu_args_set(vm, vcpu->id, 2, pvti_gpa, pvti_gva); 196 + vcpu_args_set(vcpu, 2, pvti_gpa, pvti_gva); 197 197 198 198 enter_guest(vcpu); 199 199 kvm_vm_free(vm);
+5 -5
tools/testing/selftests/kvm/x86_64/kvm_pv_test.c
··· 177 177 struct ucall uc; 178 178 179 179 while (true) { 180 - vcpu_run(vcpu->vm, vcpu->id); 180 + vcpu_run(vcpu); 181 181 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 182 182 "unexpected exit reason: %u (%s)", 183 183 run->exit_reason, exit_reason_str(run->exit_reason)); 184 184 185 - switch (get_ucall(vcpu->vm, vcpu->id, &uc)) { 185 + switch (get_ucall(vcpu, &uc)) { 186 186 case UCALL_PR_MSR: 187 187 pr_msr(&uc); 188 188 break; ··· 211 211 212 212 vm = vm_create_with_one_vcpu(&vcpu, guest_main); 213 213 214 - vcpu_enable_cap(vm, vcpu->id, KVM_CAP_ENFORCE_PV_FEATURE_CPUID, 1); 214 + vcpu_enable_cap(vcpu, KVM_CAP_ENFORCE_PV_FEATURE_CPUID, 1); 215 215 216 216 best = kvm_get_supported_cpuid(); 217 217 clear_kvm_cpuid_features(best); 218 - vcpu_set_cpuid(vm, vcpu->id, best); 218 + vcpu_set_cpuid(vcpu, best); 219 219 220 220 vm_init_descriptor_tables(vm); 221 - vcpu_init_descriptor_tables(vm, vcpu->id); 221 + vcpu_init_descriptor_tables(vcpu); 222 222 vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler); 223 223 224 224 enter_guest(vcpu);
+5 -5
tools/testing/selftests/kvm/x86_64/mmu_role_test.c
··· 35 35 /* Map 1gb page without a backing memlot. */ 36 36 __virt_pg_map(vm, MMIO_GPA, MMIO_GPA, PG_LEVEL_1G); 37 37 38 - vcpu_run(vm, vcpu->id); 38 + vcpu_run(vcpu); 39 39 40 40 /* Guest access to the 1gb page should trigger MMIO. */ 41 41 TEST_ASSERT(run->exit_reason == KVM_EXIT_MMIO, ··· 54 54 * returns the struct that contains the entry being modified. Eww. 55 55 */ 56 56 *cpuid_reg = evil_cpuid_val; 57 - vcpu_set_cpuid(vm, vcpu->id, kvm_get_supported_cpuid()); 57 + vcpu_set_cpuid(vcpu, kvm_get_supported_cpuid()); 58 58 59 59 /* 60 60 * Add a dummy memslot to coerce KVM into bumping the MMIO generation. ··· 67 67 68 68 /* Set up a #PF handler to eat the RSVD #PF and signal all done! */ 69 69 vm_init_descriptor_tables(vm); 70 - vcpu_init_descriptor_tables(vm, vcpu->id); 70 + vcpu_init_descriptor_tables(vcpu); 71 71 vm_install_exception_handler(vm, PF_VECTOR, guest_pf_handler); 72 72 73 - vcpu_run(vm, vcpu->id); 73 + vcpu_run(vcpu); 74 74 75 - cmd = get_ucall(vm, vcpu->id, NULL); 75 + cmd = get_ucall(vcpu, NULL); 76 76 TEST_ASSERT(cmd == UCALL_DONE, 77 77 "Unexpected guest exit, exit_reason=%s, ucall.cmd = %lu\n", 78 78 exit_reason_str(run->exit_reason), cmd);
+7 -7
tools/testing/selftests/kvm/x86_64/platform_info_test.c
··· 40 40 struct ucall uc; 41 41 42 42 vm_enable_cap(vcpu->vm, KVM_CAP_MSR_PLATFORM_INFO, true); 43 - vcpu_run(vcpu->vm, vcpu->id); 43 + vcpu_run(vcpu); 44 44 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 45 45 "Exit_reason other than KVM_EXIT_IO: %u (%s),\n", 46 46 run->exit_reason, 47 47 exit_reason_str(run->exit_reason)); 48 - get_ucall(vcpu->vm, vcpu->id, &uc); 48 + get_ucall(vcpu, &uc); 49 49 TEST_ASSERT(uc.cmd == UCALL_SYNC, 50 50 "Received ucall other than UCALL_SYNC: %lu\n", uc.cmd); 51 51 TEST_ASSERT((uc.args[1] & MSR_PLATFORM_INFO_MAX_TURBO_RATIO) == ··· 59 59 struct kvm_run *run = vcpu->run; 60 60 61 61 vm_enable_cap(vcpu->vm, KVM_CAP_MSR_PLATFORM_INFO, false); 62 - vcpu_run(vcpu->vm, vcpu->id); 62 + vcpu_run(vcpu); 63 63 TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN, 64 64 "Exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s)\n", 65 65 run->exit_reason, ··· 84 84 85 85 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 86 86 87 - msr_platform_info = vcpu_get_msr(vm, vcpu->id, MSR_PLATFORM_INFO); 88 - vcpu_set_msr(vm, vcpu->id, MSR_PLATFORM_INFO, 89 - msr_platform_info | MSR_PLATFORM_INFO_MAX_TURBO_RATIO); 87 + msr_platform_info = vcpu_get_msr(vcpu, MSR_PLATFORM_INFO); 88 + vcpu_set_msr(vcpu, MSR_PLATFORM_INFO, 89 + msr_platform_info | MSR_PLATFORM_INFO_MAX_TURBO_RATIO); 90 90 test_msr_platform_info_enabled(vcpu); 91 91 test_msr_platform_info_disabled(vcpu); 92 - vcpu_set_msr(vm, vcpu->id, MSR_PLATFORM_INFO, msr_platform_info); 92 + vcpu_set_msr(vcpu, MSR_PLATFORM_INFO, msr_platform_info); 93 93 94 94 kvm_vm_free(vm); 95 95
+4 -4
tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
··· 177 177 struct kvm_run *run = vcpu->run; 178 178 struct ucall uc; 179 179 180 - vcpu_run(vcpu->vm, vcpu->id); 180 + vcpu_run(vcpu); 181 181 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 182 182 "Exit_reason other than KVM_EXIT_IO: %u (%s)\n", 183 183 run->exit_reason, 184 184 exit_reason_str(run->exit_reason)); 185 - get_ucall(vcpu->vm, vcpu->id, &uc); 185 + get_ucall(vcpu, &uc); 186 186 TEST_ASSERT(uc.cmd == UCALL_SYNC, 187 187 "Received ucall other than UCALL_SYNC: %lu", uc.cmd); 188 188 return uc.args[1]; ··· 371 371 372 372 vcpu = vm_vcpu_add(vm, 0, guest_code); 373 373 vm_init_descriptor_tables(vm); 374 - vcpu_init_descriptor_tables(vm, vcpu->id); 374 + vcpu_init_descriptor_tables(vcpu); 375 375 376 376 TEST_ASSERT(!sanity_check_pmu(vcpu), 377 377 "Guest should not be able to use disabled PMU."); ··· 470 470 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 471 471 472 472 vm_init_descriptor_tables(vm); 473 - vcpu_init_descriptor_tables(vm, vcpu->id); 473 + vcpu_init_descriptor_tables(vcpu); 474 474 475 475 if (!sanity_check_pmu(vcpu)) { 476 476 print_skip("Guest PMU is not functional");
+2 -2
tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c
··· 49 49 50 50 for (stage = 0; stage < 2; stage++) { 51 51 52 - vcpu_run(vcpu->vm, vcpu->id); 52 + vcpu_run(vcpu); 53 53 54 - switch (get_ucall(vcpu->vm, vcpu->id, &uc)) { 54 + switch (get_ucall(vcpu, &uc)) { 55 55 case UCALL_SYNC: 56 56 TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") && 57 57 uc.args[1] == stage + 1,
+8 -8
tools/testing/selftests/kvm/x86_64/set_sregs_test.c
··· 35 35 memcpy(&sregs, orig, sizeof(sregs)); 36 36 sregs.cr4 |= feature_bit; 37 37 38 - rc = _vcpu_sregs_set(vcpu->vm, vcpu->id, &sregs); 38 + rc = _vcpu_sregs_set(vcpu, &sregs); 39 39 TEST_ASSERT(rc, "KVM allowed unsupported CR4 bit (0x%lx)", feature_bit); 40 40 41 41 /* Sanity check that KVM didn't change anything. */ 42 - vcpu_sregs_get(vcpu->vm, vcpu->id, &sregs); 42 + vcpu_sregs_get(vcpu, &sregs); 43 43 TEST_ASSERT(!memcmp(&sregs, orig, sizeof(sregs)), "KVM modified sregs"); 44 44 } 45 45 ··· 97 97 vm = vm_create_barebones(); 98 98 vcpu = __vm_vcpu_add(vm, 0); 99 99 100 - vcpu_sregs_get(vm, vcpu->id, &sregs); 100 + vcpu_sregs_get(vcpu, &sregs); 101 101 102 102 sregs.cr4 |= calc_cr4_feature_bits(vm); 103 103 cr4 = sregs.cr4; 104 104 105 - rc = _vcpu_sregs_set(vm, vcpu->id, &sregs); 105 + rc = _vcpu_sregs_set(vcpu, &sregs); 106 106 TEST_ASSERT(!rc, "Failed to set supported CR4 bits (0x%lx)", cr4); 107 107 108 - vcpu_sregs_get(vm, vcpu->id, &sregs); 108 + vcpu_sregs_get(vcpu, &sregs); 109 109 TEST_ASSERT(sregs.cr4 == cr4, "sregs.CR4 (0x%llx) != CR4 (0x%lx)", 110 110 sregs.cr4, cr4); 111 111 ··· 125 125 /* Create a "real" VM and verify APIC_BASE can be set. */ 126 126 vm = vm_create_with_one_vcpu(&vcpu, NULL); 127 127 128 - vcpu_sregs_get(vm, vcpu->id, &sregs); 128 + vcpu_sregs_get(vcpu, &sregs); 129 129 sregs.apic_base = 1 << 10; 130 - rc = _vcpu_sregs_set(vm, vcpu->id, &sregs); 130 + rc = _vcpu_sregs_set(vcpu, &sregs); 131 131 TEST_ASSERT(rc, "Set IA32_APIC_BASE to %llx (invalid)", 132 132 sregs.apic_base); 133 133 sregs.apic_base = 1 << 11; 134 - rc = _vcpu_sregs_set(vm, vcpu->id, &sregs); 134 + rc = _vcpu_sregs_set(vcpu, &sregs); 135 135 TEST_ASSERT(!rc, "Couldn't set IA32_APIC_BASE to %llx (valid)", 136 136 sregs.apic_base); 137 137
+9 -9
tools/testing/selftests/kvm/x86_64/smm_test.c
··· 118 118 { 119 119 struct kvm_vcpu_events events; 120 120 121 - vcpu_events_get(vcpu->vm, vcpu->id, &events); 121 + vcpu_events_get(vcpu, &events); 122 122 123 123 events.smi.pending = 1; 124 124 events.flags |= KVM_VCPUEVENT_VALID_SMM; 125 125 126 - vcpu_events_set(vcpu->vm, vcpu->id, &events); 126 + vcpu_events_set(vcpu, &events); 127 127 } 128 128 129 129 int main(int argc, char *argv[]) ··· 151 151 memcpy(addr_gpa2hva(vm, SMRAM_GPA) + 0x8000, smi_handler, 152 152 sizeof(smi_handler)); 153 153 154 - vcpu_set_msr(vm, vcpu->id, MSR_IA32_SMBASE, SMRAM_GPA); 154 + vcpu_set_msr(vcpu, MSR_IA32_SMBASE, SMRAM_GPA); 155 155 156 156 if (kvm_check_cap(KVM_CAP_NESTED_STATE)) { 157 157 if (nested_svm_supported()) ··· 163 163 if (!nested_gva) 164 164 pr_info("will skip SMM test with VMX enabled\n"); 165 165 166 - vcpu_args_set(vm, vcpu->id, 1, nested_gva); 166 + vcpu_args_set(vcpu, 1, nested_gva); 167 167 168 168 for (stage = 1;; stage++) { 169 - vcpu_run(vm, vcpu->id); 169 + vcpu_run(vcpu); 170 170 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 171 171 "Stage %d: unexpected exit reason: %u (%s),\n", 172 172 stage, run->exit_reason, 173 173 exit_reason_str(run->exit_reason)); 174 174 175 175 memset(&regs, 0, sizeof(regs)); 176 - vcpu_regs_get(vm, vcpu->id, &regs); 176 + vcpu_regs_get(vcpu, &regs); 177 177 178 178 stage_reported = regs.rax & 0xff; 179 179 ··· 201 201 if (stage == 10) 202 202 inject_smi(vcpu); 203 203 204 - state = vcpu_save_state(vm, vcpu->id); 204 + state = vcpu_save_state(vcpu); 205 205 kvm_vm_release(vm); 206 206 207 207 vcpu = vm_recreate_with_one_vcpu(vm); 208 - vcpu_set_cpuid(vm, vcpu->id, kvm_get_supported_cpuid()); 209 - vcpu_load_state(vm, vcpu->id, state); 208 + vcpu_set_cpuid(vcpu, kvm_get_supported_cpuid()); 209 + vcpu_load_state(vcpu, state); 210 210 run = vcpu->run; 211 211 kvm_x86_state_cleanup(state); 212 212 }
+9 -9
tools/testing/selftests/kvm/x86_64/state_test.c
··· 167 167 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 168 168 run = vcpu->run; 169 169 170 - vcpu_regs_get(vm, vcpu->id, &regs1); 170 + vcpu_regs_get(vcpu, &regs1); 171 171 172 172 if (kvm_check_cap(KVM_CAP_NESTED_STATE)) { 173 173 if (nested_svm_supported()) ··· 179 179 if (!nested_gva) 180 180 pr_info("will skip nested state checks\n"); 181 181 182 - vcpu_args_set(vm, vcpu->id, 1, nested_gva); 182 + vcpu_args_set(vcpu, 1, nested_gva); 183 183 184 184 for (stage = 1;; stage++) { 185 - vcpu_run(vm, vcpu->id); 185 + vcpu_run(vcpu); 186 186 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 187 187 "Stage %d: unexpected exit reason: %u (%s),\n", 188 188 stage, run->exit_reason, 189 189 exit_reason_str(run->exit_reason)); 190 190 191 - switch (get_ucall(vm, vcpu->id, &uc)) { 191 + switch (get_ucall(vcpu, &uc)) { 192 192 case UCALL_ABORT: 193 193 TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0], 194 194 __FILE__, uc.args[1]); ··· 206 206 uc.args[1] == stage, "Stage %d: Unexpected register values vmexit, got %lx", 207 207 stage, (ulong)uc.args[1]); 208 208 209 - state = vcpu_save_state(vm, vcpu->id); 209 + state = vcpu_save_state(vcpu); 210 210 memset(&regs1, 0, sizeof(regs1)); 211 - vcpu_regs_get(vm, vcpu->id, &regs1); 211 + vcpu_regs_get(vcpu, &regs1); 212 212 213 213 kvm_vm_release(vm); 214 214 215 215 /* Restore state in a new VM. */ 216 216 vcpu = vm_recreate_with_one_vcpu(vm); 217 - vcpu_set_cpuid(vm, vcpu->id, kvm_get_supported_cpuid()); 218 - vcpu_load_state(vm, vcpu->id, state); 217 + vcpu_set_cpuid(vcpu, kvm_get_supported_cpuid()); 218 + vcpu_load_state(vcpu, state); 219 219 run = vcpu->run; 220 220 kvm_x86_state_cleanup(state); 221 221 222 222 memset(&regs2, 0, sizeof(regs2)); 223 - vcpu_regs_get(vm, vcpu->id, &regs2); 223 + vcpu_regs_get(vcpu, &regs2); 224 224 TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)), 225 225 "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx", 226 226 (ulong) regs2.rdi, (ulong) regs2.rsi);
+4 -4
tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c
··· 95 95 vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code); 96 96 97 97 vm_init_descriptor_tables(vm); 98 - vcpu_init_descriptor_tables(vm, vcpu->id); 98 + vcpu_init_descriptor_tables(vcpu); 99 99 100 100 vm_install_exception_handler(vm, VINTR_IRQ_NUMBER, vintr_irq_handler); 101 101 vm_install_exception_handler(vm, INTR_IRQ_NUMBER, intr_irq_handler); 102 102 103 103 vcpu_alloc_svm(vm, &svm_gva); 104 - vcpu_args_set(vm, vcpu->id, 1, svm_gva); 104 + vcpu_args_set(vcpu, 1, svm_gva); 105 105 106 106 run = vcpu->run; 107 107 108 - vcpu_run(vm, vcpu->id); 108 + vcpu_run(vcpu); 109 109 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 110 110 "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n", 111 111 run->exit_reason, 112 112 exit_reason_str(run->exit_reason)); 113 113 114 - switch (get_ucall(vm, vcpu->id, &uc)) { 114 + switch (get_ucall(vcpu, &uc)) { 115 115 case UCALL_ABORT: 116 116 TEST_FAIL("%s", (const char *)uc.args[0]); 117 117 break;
+5 -5
tools/testing/selftests/kvm/x86_64/svm_nested_soft_inject_test.c
··· 145 145 vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code); 146 146 147 147 vm_init_descriptor_tables(vm); 148 - vcpu_init_descriptor_tables(vm, vcpu->id); 148 + vcpu_init_descriptor_tables(vcpu); 149 149 150 150 vm_install_exception_handler(vm, NMI_VECTOR, guest_nmi_handler); 151 151 vm_install_exception_handler(vm, BP_VECTOR, guest_bp_handler); ··· 163 163 } else { 164 164 idt_alt_vm = 0; 165 165 } 166 - vcpu_args_set(vm, vcpu->id, 3, svm_gva, (uint64_t)is_nmi, (uint64_t)idt_alt_vm); 166 + vcpu_args_set(vcpu, 3, svm_gva, (uint64_t)is_nmi, (uint64_t)idt_alt_vm); 167 167 168 168 memset(&debug, 0, sizeof(debug)); 169 - vcpu_guest_debug_set(vm, vcpu->id, &debug); 169 + vcpu_guest_debug_set(vcpu, &debug); 170 170 171 171 struct kvm_run *run = vcpu->run; 172 172 struct ucall uc; 173 173 174 174 alarm(2); 175 - vcpu_run(vm, vcpu->id); 175 + vcpu_run(vcpu); 176 176 alarm(0); 177 177 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 178 178 "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n", 179 179 run->exit_reason, 180 180 exit_reason_str(run->exit_reason)); 181 181 182 - switch (get_ucall(vm, vcpu->id, &uc)) { 182 + switch (get_ucall(vcpu, &uc)) { 183 183 case UCALL_ABORT: 184 184 TEST_FAIL("%s at %s:%ld, vals = 0x%lx 0x%lx 0x%lx", (const char *)uc.args[0], 185 185 __FILE__, uc.args[1], uc.args[2], uc.args[3], uc.args[4]);
+3 -3
tools/testing/selftests/kvm/x86_64/svm_vmcall_test.c
··· 44 44 vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code); 45 45 46 46 vcpu_alloc_svm(vm, &svm_gva); 47 - vcpu_args_set(vm, vcpu->id, 1, svm_gva); 47 + vcpu_args_set(vcpu, 1, svm_gva); 48 48 49 49 for (;;) { 50 50 volatile struct kvm_run *run = vcpu->run; 51 51 struct ucall uc; 52 52 53 - vcpu_run(vm, vcpu->id); 53 + vcpu_run(vcpu); 54 54 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 55 55 "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n", 56 56 run->exit_reason, 57 57 exit_reason_str(run->exit_reason)); 58 58 59 - switch (get_ucall(vm, vcpu->id, &uc)) { 59 + switch (get_ucall(vcpu, &uc)) { 60 60 case UCALL_ABORT: 61 61 TEST_FAIL("%s", (const char *)uc.args[0]); 62 62 /* NOT REACHED */
+18 -18
tools/testing/selftests/kvm/x86_64/sync_regs_test.c
··· 109 109 110 110 /* Request reading invalid register set from VCPU. */ 111 111 run->kvm_valid_regs = INVALID_SYNC_FIELD; 112 - rv = _vcpu_run(vm, vcpu->id); 112 + rv = _vcpu_run(vcpu); 113 113 TEST_ASSERT(rv < 0 && errno == EINVAL, 114 114 "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n", 115 115 rv); 116 116 run->kvm_valid_regs = 0; 117 117 118 118 run->kvm_valid_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS; 119 - rv = _vcpu_run(vm, vcpu->id); 119 + rv = _vcpu_run(vcpu); 120 120 TEST_ASSERT(rv < 0 && errno == EINVAL, 121 121 "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n", 122 122 rv); ··· 124 124 125 125 /* Request setting invalid register set into VCPU. */ 126 126 run->kvm_dirty_regs = INVALID_SYNC_FIELD; 127 - rv = _vcpu_run(vm, vcpu->id); 127 + rv = _vcpu_run(vcpu); 128 128 TEST_ASSERT(rv < 0 && errno == EINVAL, 129 129 "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n", 130 130 rv); 131 131 run->kvm_dirty_regs = 0; 132 132 133 133 run->kvm_dirty_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS; 134 - rv = _vcpu_run(vm, vcpu->id); 134 + rv = _vcpu_run(vcpu); 135 135 TEST_ASSERT(rv < 0 && errno == EINVAL, 136 136 "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n", 137 137 rv); ··· 140 140 /* Request and verify all valid register sets. */ 141 141 /* TODO: BUILD TIME CHECK: TEST_ASSERT(KVM_SYNC_X86_NUM_FIELDS != 3); */ 142 142 run->kvm_valid_regs = TEST_SYNC_FIELDS; 143 - rv = _vcpu_run(vm, vcpu->id); 143 + rv = _vcpu_run(vcpu); 144 144 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 145 145 "Unexpected exit reason: %u (%s),\n", 146 146 run->exit_reason, 147 147 exit_reason_str(run->exit_reason)); 148 148 149 - vcpu_regs_get(vm, vcpu->id, &regs); 149 + vcpu_regs_get(vcpu, &regs); 150 150 compare_regs(&regs, &run->s.regs.regs); 151 151 152 - vcpu_sregs_get(vm, vcpu->id, &sregs); 152 + vcpu_sregs_get(vcpu, &sregs); 153 153 compare_sregs(&sregs, &run->s.regs.sregs); 154 154 155 - vcpu_events_get(vm, vcpu->id, &events); 155 + vcpu_events_get(vcpu, &events); 156 156 compare_vcpu_events(&events, &run->s.regs.events); 157 157 158 158 /* Set and verify various register values. */ ··· 162 162 163 163 run->kvm_valid_regs = TEST_SYNC_FIELDS; 164 164 run->kvm_dirty_regs = KVM_SYNC_X86_REGS | KVM_SYNC_X86_SREGS; 165 - rv = _vcpu_run(vm, vcpu->id); 165 + rv = _vcpu_run(vcpu); 166 166 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 167 167 "Unexpected exit reason: %u (%s),\n", 168 168 run->exit_reason, ··· 174 174 "apic_base sync regs value incorrect 0x%llx.", 175 175 run->s.regs.sregs.apic_base); 176 176 177 - vcpu_regs_get(vm, vcpu->id, &regs); 177 + vcpu_regs_get(vcpu, &regs); 178 178 compare_regs(&regs, &run->s.regs.regs); 179 179 180 - vcpu_sregs_get(vm, vcpu->id, &sregs); 180 + vcpu_sregs_get(vcpu, &sregs); 181 181 compare_sregs(&sregs, &run->s.regs.sregs); 182 182 183 - vcpu_events_get(vm, vcpu->id, &events); 183 + vcpu_events_get(vcpu, &events); 184 184 compare_vcpu_events(&events, &run->s.regs.events); 185 185 186 186 /* Clear kvm_dirty_regs bits, verify new s.regs values are ··· 189 189 run->kvm_valid_regs = TEST_SYNC_FIELDS; 190 190 run->kvm_dirty_regs = 0; 191 191 run->s.regs.regs.rbx = 0xDEADBEEF; 192 - rv = _vcpu_run(vm, vcpu->id); 192 + rv = _vcpu_run(vcpu); 193 193 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 194 194 "Unexpected exit reason: %u (%s),\n", 195 195 run->exit_reason, ··· 206 206 run->kvm_dirty_regs = 0; 207 207 run->s.regs.regs.rbx = 0xAAAA; 208 208 regs.rbx = 0xBAC0; 209 - vcpu_regs_set(vm, vcpu->id, &regs); 210 - rv = _vcpu_run(vm, vcpu->id); 209 + vcpu_regs_set(vcpu, &regs); 210 + rv = _vcpu_run(vcpu); 211 211 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 212 212 "Unexpected exit reason: %u (%s),\n", 213 213 run->exit_reason, ··· 215 215 TEST_ASSERT(run->s.regs.regs.rbx == 0xAAAA, 216 216 "rbx sync regs value incorrect 0x%llx.", 217 217 run->s.regs.regs.rbx); 218 - vcpu_regs_get(vm, vcpu->id, &regs); 218 + vcpu_regs_get(vcpu, &regs); 219 219 TEST_ASSERT(regs.rbx == 0xBAC0 + 1, 220 220 "rbx guest value incorrect 0x%llx.", 221 221 regs.rbx); ··· 227 227 run->kvm_valid_regs = 0; 228 228 run->kvm_dirty_regs = TEST_SYNC_FIELDS; 229 229 run->s.regs.regs.rbx = 0xBBBB; 230 - rv = _vcpu_run(vm, vcpu->id); 230 + rv = _vcpu_run(vcpu); 231 231 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 232 232 "Unexpected exit reason: %u (%s),\n", 233 233 run->exit_reason, ··· 235 235 TEST_ASSERT(run->s.regs.regs.rbx == 0xBBBB, 236 236 "rbx sync regs value incorrect 0x%llx.", 237 237 run->s.regs.regs.rbx); 238 - vcpu_regs_get(vm, vcpu->id, &regs); 238 + vcpu_regs_get(vcpu, &regs); 239 239 TEST_ASSERT(regs.rbx == 0xBBBB + 1, 240 240 "rbx guest value incorrect 0x%llx.", 241 241 regs.rbx);
+8 -8
tools/testing/selftests/kvm/x86_64/triple_fault_event_test.c
··· 61 61 62 62 run = vcpu->run; 63 63 vcpu_alloc_vmx(vm, &vmx_pages_gva); 64 - vcpu_args_set(vm, vcpu->id, 1, vmx_pages_gva); 65 - vcpu_run(vm, vcpu->id); 64 + vcpu_args_set(vcpu, 1, vmx_pages_gva); 65 + vcpu_run(vcpu); 66 66 67 67 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 68 68 "Expected KVM_EXIT_IO, got: %u (%s)\n", ··· 70 70 TEST_ASSERT(run->io.port == ARBITRARY_IO_PORT, 71 71 "Expected IN from port %d from L2, got port %d", 72 72 ARBITRARY_IO_PORT, run->io.port); 73 - vcpu_events_get(vm, vcpu->id, &events); 73 + vcpu_events_get(vcpu, &events); 74 74 events.flags |= KVM_VCPUEVENT_VALID_TRIPLE_FAULT; 75 75 events.triple_fault.pending = true; 76 - vcpu_events_set(vm, vcpu->id, &events); 76 + vcpu_events_set(vcpu, &events); 77 77 run->immediate_exit = true; 78 - vcpu_run_complete_io(vm, vcpu->id); 78 + vcpu_run_complete_io(vcpu); 79 79 80 - vcpu_events_get(vm, vcpu->id, &events); 80 + vcpu_events_get(vcpu, &events); 81 81 TEST_ASSERT(events.flags & KVM_VCPUEVENT_VALID_TRIPLE_FAULT, 82 82 "Triple fault event invalid"); 83 83 TEST_ASSERT(events.triple_fault.pending, 84 84 "No triple fault pending"); 85 - vcpu_run(vm, vcpu->id); 85 + vcpu_run(vcpu); 86 86 87 - switch (get_ucall(vm, vcpu->id, &uc)) { 87 + switch (get_ucall(vcpu, &uc)) { 88 88 case UCALL_DONE: 89 89 break; 90 90 case UCALL_ABORT:
+7 -7
tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c
··· 14 14 #define GUEST_STEP (UNITY * 4) 15 15 #define ROUND(x) ((x + UNITY / 2) & -UNITY) 16 16 #define rounded_rdmsr(x) ROUND(rdmsr(x)) 17 - #define rounded_host_rdmsr(x) ROUND(vcpu_get_msr(vm, vcpu->id, x)) 17 + #define rounded_host_rdmsr(x) ROUND(vcpu_get_msr(vcpu, x)) 18 18 19 19 static void guest_code(void) 20 20 { ··· 68 68 { 69 69 struct ucall uc; 70 70 71 - vcpu_run(vcpu->vm, vcpu->id); 71 + vcpu_run(vcpu); 72 72 73 - switch (get_ucall(vcpu->vm, vcpu->id, &uc)) { 73 + switch (get_ucall(vcpu, &uc)) { 74 74 case UCALL_SYNC: 75 75 TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") && 76 76 uc.args[1] == stage + 1, "Stage %d: Unexpected register values vmexit, got %lx", ··· 116 116 * Host: writes to MSR_IA32_TSC set the host-side offset 117 117 * and therefore do not change MSR_IA32_TSC_ADJUST. 118 118 */ 119 - vcpu_set_msr(vm, vcpu->id, MSR_IA32_TSC, HOST_ADJUST + val); 119 + vcpu_set_msr(vcpu, MSR_IA32_TSC, HOST_ADJUST + val); 120 120 ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val); 121 121 ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val); 122 122 run_vcpu(vcpu, 3); 123 123 124 124 /* Host: writes to MSR_IA32_TSC_ADJUST do not modify the TSC. */ 125 - vcpu_set_msr(vm, vcpu->id, MSR_IA32_TSC_ADJUST, UNITY * 123456); 125 + vcpu_set_msr(vcpu, MSR_IA32_TSC_ADJUST, UNITY * 123456); 126 126 ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val); 127 - ASSERT_EQ(vcpu_get_msr(vm, vcpu->id, MSR_IA32_TSC_ADJUST), UNITY * 123456); 127 + ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_TSC_ADJUST), UNITY * 123456); 128 128 129 129 /* Restore previous value. */ 130 - vcpu_set_msr(vm, vcpu->id, MSR_IA32_TSC_ADJUST, val); 130 + vcpu_set_msr(vcpu, MSR_IA32_TSC_ADJUST, val); 131 131 ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val); 132 132 ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val); 133 133
+3 -3
tools/testing/selftests/kvm/x86_64/tsc_scaling_sync.c
··· 58 58 59 59 if (!first_cpu_done) { 60 60 first_cpu_done = true; 61 - vcpu_set_msr(vm, vcpu->id, MSR_IA32_TSC, TEST_TSC_OFFSET); 61 + vcpu_set_msr(vcpu, MSR_IA32_TSC, TEST_TSC_OFFSET); 62 62 } 63 63 64 64 pthread_spin_unlock(&create_lock); ··· 67 67 volatile struct kvm_run *run = vcpu->run; 68 68 struct ucall uc; 69 69 70 - vcpu_run(vm, vcpu->id); 70 + vcpu_run(vcpu); 71 71 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 72 72 "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n", 73 73 run->exit_reason, 74 74 exit_reason_str(run->exit_reason)); 75 75 76 - switch (get_ucall(vm, vcpu->id, &uc)) { 76 + switch (get_ucall(vcpu, &uc)) { 77 77 case UCALL_DONE: 78 78 goto out; 79 79
+4 -4
tools/testing/selftests/kvm/x86_64/userspace_io_test.c
··· 65 65 memset(&regs, 0, sizeof(regs)); 66 66 67 67 while (1) { 68 - vcpu_run(vm, vcpu->id); 68 + vcpu_run(vcpu); 69 69 70 70 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 71 71 "Unexpected exit reason: %u (%s),\n", 72 72 run->exit_reason, 73 73 exit_reason_str(run->exit_reason)); 74 74 75 - if (get_ucall(vm, vcpu->id, &uc)) 75 + if (get_ucall(vcpu, &uc)) 76 76 break; 77 77 78 78 TEST_ASSERT(run->io.port == 0x80, ··· 85 85 * scope from a testing perspective as it's not ABI in any way, 86 86 * i.e. it really is abusing internal KVM knowledge. 87 87 */ 88 - vcpu_regs_get(vm, vcpu->id, &regs); 88 + vcpu_regs_get(vcpu, &regs); 89 89 if (regs.rcx == 2) 90 90 regs.rcx = 1; 91 91 if (regs.rcx == 3) 92 92 regs.rcx = 8192; 93 93 memset((void *)run + run->io.data_offset, 0xaa, 4096); 94 - vcpu_regs_set(vm, vcpu->id, &regs); 94 + vcpu_regs_set(vcpu, &regs); 95 95 } 96 96 97 97 switch (uc.cmd) {
+11 -11
tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c
··· 399 399 struct ucall uc; 400 400 401 401 if (vcpu->run->exit_reason == KVM_EXIT_IO && 402 - get_ucall(vcpu->vm, vcpu->id, &uc) == UCALL_ABORT) { 402 + get_ucall(vcpu, &uc) == UCALL_ABORT) { 403 403 TEST_FAIL("%s at %s:%ld", 404 404 (const char *)uc.args[0], __FILE__, uc.args[1]); 405 405 } ··· 483 483 run->exit_reason, 484 484 exit_reason_str(run->exit_reason)); 485 485 486 - TEST_ASSERT(get_ucall(vcpu->vm, vcpu->id, &uc) == UCALL_DONE, 486 + TEST_ASSERT(get_ucall(vcpu, &uc) == UCALL_DONE, 487 487 "Unexpected ucall command: %lu, expected UCALL_DONE (%d)", 488 488 uc.cmd, UCALL_DONE); 489 489 } ··· 500 500 run->exit_reason, 501 501 exit_reason_str(run->exit_reason)); 502 502 503 - switch (get_ucall(vcpu->vm, vcpu->id, &uc)) { 503 + switch (get_ucall(vcpu, &uc)) { 504 504 case UCALL_SYNC: 505 505 break; 506 506 case UCALL_ABORT: ··· 519 519 static void run_guest_then_process_rdmsr(struct kvm_vcpu *vcpu, 520 520 uint32_t msr_index) 521 521 { 522 - vcpu_run(vcpu->vm, vcpu->id); 522 + vcpu_run(vcpu); 523 523 process_rdmsr(vcpu, msr_index); 524 524 } 525 525 526 526 static void run_guest_then_process_wrmsr(struct kvm_vcpu *vcpu, 527 527 uint32_t msr_index) 528 528 { 529 - vcpu_run(vcpu->vm, vcpu->id); 529 + vcpu_run(vcpu); 530 530 process_wrmsr(vcpu, msr_index); 531 531 } 532 532 533 533 static uint64_t run_guest_then_process_ucall(struct kvm_vcpu *vcpu) 534 534 { 535 - vcpu_run(vcpu->vm, vcpu->id); 535 + vcpu_run(vcpu); 536 536 return process_ucall(vcpu); 537 537 } 538 538 539 539 static void run_guest_then_process_ucall_done(struct kvm_vcpu *vcpu) 540 540 { 541 - vcpu_run(vcpu->vm, vcpu->id); 541 + vcpu_run(vcpu); 542 542 process_ucall_done(vcpu); 543 543 } 544 544 ··· 560 560 vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_allow); 561 561 562 562 vm_init_descriptor_tables(vm); 563 - vcpu_init_descriptor_tables(vm, vcpu->id); 563 + vcpu_init_descriptor_tables(vcpu); 564 564 565 565 vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler); 566 566 ··· 577 577 run_guest_then_process_rdmsr(vcpu, MSR_NON_EXISTENT); 578 578 579 579 vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler); 580 - vcpu_run(vm, vcpu->id); 580 + vcpu_run(vcpu); 581 581 vm_install_exception_handler(vm, UD_VECTOR, NULL); 582 582 583 583 if (process_ucall(vcpu) != UCALL_DONE) { ··· 608 608 { 609 609 struct ucall uc; 610 610 611 - switch (get_ucall(vcpu->vm, vcpu->id, &uc)) { 611 + switch (get_ucall(vcpu, &uc)) { 612 612 case UCALL_ABORT: 613 613 TEST_FAIL("Guest assertion not met"); 614 614 break; ··· 684 684 vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_deny); 685 685 686 686 while (1) { 687 - vcpu_run(vm, vcpu->id); 687 + vcpu_run(vcpu); 688 688 689 689 switch (run->exit_reason) { 690 690 case KVM_EXIT_X86_RDMSR:
+3 -3
tools/testing/selftests/kvm/x86_64/vmx_apic_access_test.c
··· 95 95 96 96 vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva); 97 97 prepare_virtualize_apic_accesses(vmx, vm); 98 - vcpu_args_set(vm, vcpu->id, 2, vmx_pages_gva, high_gpa); 98 + vcpu_args_set(vcpu, 2, vmx_pages_gva, high_gpa); 99 99 100 100 while (!done) { 101 101 volatile struct kvm_run *run = vcpu->run; 102 102 struct ucall uc; 103 103 104 - vcpu_run(vm, vcpu->id); 104 + vcpu_run(vcpu); 105 105 if (apic_access_addr == high_gpa) { 106 106 TEST_ASSERT(run->exit_reason == 107 107 KVM_EXIT_INTERNAL_ERROR, ··· 119 119 run->exit_reason, 120 120 exit_reason_str(run->exit_reason)); 121 121 122 - switch (get_ucall(vm, vcpu->id, &uc)) { 122 + switch (get_ucall(vcpu, &uc)) { 123 123 case UCALL_ABORT: 124 124 TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0], 125 125 __FILE__, uc.args[1]);
+3 -3
tools/testing/selftests/kvm/x86_64/vmx_close_while_nested_test.c
··· 57 57 58 58 /* Allocate VMX pages and shared descriptors (vmx_pages). */ 59 59 vcpu_alloc_vmx(vm, &vmx_pages_gva); 60 - vcpu_args_set(vm, vcpu->id, 1, vmx_pages_gva); 60 + vcpu_args_set(vcpu, 1, vmx_pages_gva); 61 61 62 62 for (;;) { 63 63 volatile struct kvm_run *run = vcpu->run; 64 64 struct ucall uc; 65 65 66 - vcpu_run(vm, vcpu->id); 66 + vcpu_run(vcpu); 67 67 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 68 68 "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n", 69 69 run->exit_reason, ··· 72 72 if (run->io.port == PORT_L0_EXIT) 73 73 break; 74 74 75 - switch (get_ucall(vm, vcpu->id, &uc)) { 75 + switch (get_ucall(vcpu, &uc)) { 76 76 case UCALL_ABORT: 77 77 TEST_FAIL("%s", (const char *)uc.args[0]); 78 78 /* NOT REACHED */
+3 -3
tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
··· 82 82 /* Create VM */ 83 83 vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code); 84 84 vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva); 85 - vcpu_args_set(vm, vcpu->id, 1, vmx_pages_gva); 85 + vcpu_args_set(vcpu, 1, vmx_pages_gva); 86 86 run = vcpu->run; 87 87 88 88 /* Add an extra memory slot for testing dirty logging */ ··· 115 115 116 116 while (!done) { 117 117 memset(host_test_mem, 0xaa, TEST_MEM_PAGES * 4096); 118 - vcpu_run(vm, vcpu->id); 118 + vcpu_run(vcpu); 119 119 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 120 120 "Unexpected exit reason: %u (%s),\n", 121 121 run->exit_reason, 122 122 exit_reason_str(run->exit_reason)); 123 123 124 - switch (get_ucall(vm, vcpu->id, &uc)) { 124 + switch (get_ucall(vcpu, &uc)) { 125 125 case UCALL_ABORT: 126 126 TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0], 127 127 __FILE__, uc.args[1]);
+5 -5
tools/testing/selftests/kvm/x86_64/vmx_exception_with_invalid_guest_state.c
··· 24 24 { 25 25 struct kvm_run *run = vcpu->run; 26 26 27 - vcpu_run(vcpu->vm, vcpu->id); 27 + vcpu_run(vcpu); 28 28 29 29 TEST_ASSERT(run->exit_reason == KVM_EXIT_INTERNAL_ERROR, 30 30 "Expected KVM_EXIT_INTERNAL_ERROR, got %d (%s)\n", ··· 60 60 static struct kvm_sregs sregs; 61 61 62 62 if (!sregs.cr0) 63 - vcpu_sregs_get(vcpu->vm, vcpu->id, &sregs); 63 + vcpu_sregs_get(vcpu, &sregs); 64 64 sregs.tr.unusable = !!set; 65 - vcpu_sregs_set(vcpu->vm, vcpu->id, &sregs); 65 + vcpu_sregs_set(vcpu, &sregs); 66 66 } 67 67 68 68 static void set_invalid_guest_state(struct kvm_vcpu *vcpu) ··· 91 91 92 92 TEST_ASSERT(sig == SIGALRM, "Unexpected signal = %d", sig); 93 93 94 - vcpu_events_get(vcpu->vm, vcpu->id, &events); 94 + vcpu_events_get(vcpu, &events); 95 95 96 96 /* 97 97 * If an exception is pending, attempt KVM_RUN with invalid guest, ··· 120 120 get_set_sigalrm_vcpu(vcpu); 121 121 122 122 vm_init_descriptor_tables(vm); 123 - vcpu_init_descriptor_tables(vm, vcpu->id); 123 + vcpu_init_descriptor_tables(vcpu); 124 124 125 125 vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler); 126 126
+6 -6
tools/testing/selftests/kvm/x86_64/vmx_invalid_nested_guest_state.c
··· 64 64 65 65 /* Allocate VMX pages and shared descriptors (vmx_pages). */ 66 66 vcpu_alloc_vmx(vm, &vmx_pages_gva); 67 - vcpu_args_set(vm, vcpu->id, 1, vmx_pages_gva); 67 + vcpu_args_set(vcpu, 1, vmx_pages_gva); 68 68 69 - vcpu_run(vm, vcpu->id); 69 + vcpu_run(vcpu); 70 70 71 71 run = vcpu->run; 72 72 ··· 88 88 * emulating invalid guest state for L2. 89 89 */ 90 90 memset(&sregs, 0, sizeof(sregs)); 91 - vcpu_sregs_get(vm, vcpu->id, &sregs); 91 + vcpu_sregs_get(vcpu, &sregs); 92 92 sregs.tr.unusable = 1; 93 - vcpu_sregs_set(vm, vcpu->id, &sregs); 93 + vcpu_sregs_set(vcpu, &sregs); 94 94 95 - vcpu_run(vm, vcpu->id); 95 + vcpu_run(vcpu); 96 96 97 - switch (get_ucall(vm, vcpu->id, &uc)) { 97 + switch (get_ucall(vcpu, &uc)) { 98 98 case UCALL_DONE: 99 99 break; 100 100 case UCALL_ABORT:
+5 -6
tools/testing/selftests/kvm/x86_64/vmx_nested_tsc_scaling_test.c
··· 182 182 183 183 vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code); 184 184 vcpu_alloc_vmx(vm, &vmx_pages_gva); 185 - vcpu_args_set(vm, vcpu->id, 1, vmx_pages_gva); 185 + vcpu_args_set(vcpu, 1, vmx_pages_gva); 186 186 187 - tsc_khz = __vcpu_ioctl(vm, vcpu->id, KVM_GET_TSC_KHZ, NULL); 187 + tsc_khz = __vcpu_ioctl(vcpu, KVM_GET_TSC_KHZ, NULL); 188 188 TEST_ASSERT(tsc_khz != -1, "vcpu ioctl KVM_GET_TSC_KHZ failed"); 189 189 190 190 /* scale down L1's TSC frequency */ 191 - vcpu_ioctl(vm, vcpu->id, KVM_SET_TSC_KHZ, 192 - (void *) (tsc_khz / l1_scale_factor)); 191 + vcpu_ioctl(vcpu, KVM_SET_TSC_KHZ, (void *) (tsc_khz / l1_scale_factor)); 193 192 194 193 for (;;) { 195 194 volatile struct kvm_run *run = vcpu->run; 196 195 struct ucall uc; 197 196 198 - vcpu_run(vm, vcpu->id); 197 + vcpu_run(vcpu); 199 198 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 200 199 "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n", 201 200 run->exit_reason, 202 201 exit_reason_str(run->exit_reason)); 203 202 204 - switch (get_ucall(vm, vcpu->id, &uc)) { 203 + switch (get_ucall(vcpu, &uc)) { 205 204 case UCALL_ABORT: 206 205 TEST_FAIL("%s", (const char *) uc.args[0]); 207 206 case UCALL_SYNC:
+10 -10
tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c
··· 87 87 } 88 88 89 89 /* testcase 1, set capabilities when we have PDCM bit */ 90 - vcpu_set_cpuid(vm, vcpu->id, cpuid); 91 - vcpu_set_msr(vm, vcpu->id, MSR_IA32_PERF_CAPABILITIES, PMU_CAP_FW_WRITES); 90 + vcpu_set_cpuid(vcpu, cpuid); 91 + vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, PMU_CAP_FW_WRITES); 92 92 93 93 /* check capabilities can be retrieved with KVM_GET_MSR */ 94 - ASSERT_EQ(vcpu_get_msr(vm, vcpu->id, MSR_IA32_PERF_CAPABILITIES), PMU_CAP_FW_WRITES); 94 + ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES), PMU_CAP_FW_WRITES); 95 95 96 96 /* check whatever we write with KVM_SET_MSR is _not_ modified */ 97 - vcpu_run(vm, vcpu->id); 98 - ASSERT_EQ(vcpu_get_msr(vm, vcpu->id, MSR_IA32_PERF_CAPABILITIES), PMU_CAP_FW_WRITES); 97 + vcpu_run(vcpu); 98 + ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES), PMU_CAP_FW_WRITES); 99 99 100 100 /* testcase 2, check valid LBR formats are accepted */ 101 - vcpu_set_msr(vm, vcpu->id, MSR_IA32_PERF_CAPABILITIES, 0); 102 - ASSERT_EQ(vcpu_get_msr(vm, vcpu->id, MSR_IA32_PERF_CAPABILITIES), 0); 101 + vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, 0); 102 + ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES), 0); 103 103 104 - vcpu_set_msr(vm, vcpu->id, MSR_IA32_PERF_CAPABILITIES, host_cap.lbr_format); 105 - ASSERT_EQ(vcpu_get_msr(vm, vcpu->id, MSR_IA32_PERF_CAPABILITIES), (u64)host_cap.lbr_format); 104 + vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.lbr_format); 105 + ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES), (u64)host_cap.lbr_format); 106 106 107 107 /* testcase 3, check invalid LBR format is rejected */ 108 108 /* Note, on Arch LBR capable platforms, LBR_FMT in perf capability msr is 0x3f, 109 109 * to avoid the failure, use a true invalid format 0x30 for the test. */ 110 - ret = _vcpu_set_msr(vm, vcpu->id, MSR_IA32_PERF_CAPABILITIES, 0x30); 110 + ret = _vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, 0x30); 111 111 TEST_ASSERT(ret == 0, "Bad PERF_CAPABILITIES didn't fail."); 112 112 113 113 printf("Completed perf capability tests.\n");
+9 -9
tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c
··· 178 178 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 179 179 run = vcpu->run; 180 180 181 - vcpu_regs_get(vm, vcpu->id, &regs1); 181 + vcpu_regs_get(vcpu, &regs1); 182 182 183 183 vcpu_alloc_vmx(vm, &vmx_pages_gva); 184 - vcpu_args_set(vm, vcpu->id, 1, vmx_pages_gva); 184 + vcpu_args_set(vcpu, 1, vmx_pages_gva); 185 185 186 186 for (stage = 1;; stage++) { 187 - vcpu_run(vm, vcpu->id); 187 + vcpu_run(vcpu); 188 188 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 189 189 "Stage %d: unexpected exit reason: %u (%s),\n", 190 190 stage, run->exit_reason, 191 191 exit_reason_str(run->exit_reason)); 192 192 193 - switch (get_ucall(vm, vcpu->id, &uc)) { 193 + switch (get_ucall(vcpu, &uc)) { 194 194 case UCALL_ABORT: 195 195 TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0], 196 196 __FILE__, uc.args[1]); ··· 232 232 stage, uc.args[4], uc.args[5]); 233 233 } 234 234 235 - state = vcpu_save_state(vm, vcpu->id); 235 + state = vcpu_save_state(vcpu); 236 236 memset(&regs1, 0, sizeof(regs1)); 237 - vcpu_regs_get(vm, vcpu->id, &regs1); 237 + vcpu_regs_get(vcpu, &regs1); 238 238 239 239 kvm_vm_release(vm); 240 240 241 241 /* Restore state in a new VM. */ 242 242 vcpu = vm_recreate_with_one_vcpu(vm); 243 243 244 - vcpu_set_cpuid(vm, vcpu->id, kvm_get_supported_cpuid()); 245 - vcpu_load_state(vm, vcpu->id, state); 244 + vcpu_set_cpuid(vcpu, kvm_get_supported_cpuid()); 245 + vcpu_load_state(vcpu, state); 246 246 run = vcpu->run; 247 247 kvm_x86_state_cleanup(state); 248 248 249 249 memset(&regs2, 0, sizeof(regs2)); 250 - vcpu_regs_get(vm, vcpu->id, &regs2); 250 + vcpu_regs_get(vcpu, &regs2); 251 251 TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)), 252 252 "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx", 253 253 (ulong) regs2.rdi, (ulong) regs2.rsi);
+6 -6
tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
··· 28 28 29 29 void test_nested_state(struct kvm_vcpu *vcpu, struct kvm_nested_state *state) 30 30 { 31 - vcpu_nested_state_set(vcpu->vm, vcpu->id, state); 31 + vcpu_nested_state_set(vcpu, state); 32 32 } 33 33 34 34 void test_nested_state_expect_errno(struct kvm_vcpu *vcpu, ··· 37 37 { 38 38 int rv; 39 39 40 - rv = __vcpu_nested_state_set(vcpu->vm, vcpu->id, state); 40 + rv = __vcpu_nested_state_set(vcpu, state); 41 41 TEST_ASSERT(rv == -1 && errno == expected_errno, 42 42 "Expected %s (%d) from vcpu_nested_state_set but got rv: %i errno: %s (%d)", 43 43 strerror(expected_errno), expected_errno, rv, strerror(errno), ··· 121 121 test_nested_state(vcpu, state); 122 122 123 123 /* Enable VMX in the guest CPUID. */ 124 - vcpu_set_cpuid(vcpu->vm, vcpu->id, kvm_get_supported_cpuid()); 124 + vcpu_set_cpuid(vcpu, kvm_get_supported_cpuid()); 125 125 126 126 /* 127 127 * Setting vmxon_pa == -1ull and vmcs_pa == -1ull exits early without ··· 137 137 state->flags &= KVM_STATE_NESTED_EVMCS; 138 138 if (have_evmcs) { 139 139 test_nested_state_expect_einval(vcpu, state); 140 - vcpu_enable_evmcs(vcpu->vm, vcpu->id); 140 + vcpu_enable_evmcs(vcpu); 141 141 } 142 142 test_nested_state(vcpu, state); 143 143 ··· 233 233 state->hdr.vmx.vmcs12_pa = -1ull; 234 234 state->flags = 0; 235 235 test_nested_state(vcpu, state); 236 - vcpu_nested_state_get(vcpu->vm, vcpu->id, state); 236 + vcpu_nested_state_get(vcpu, state); 237 237 TEST_ASSERT(state->size >= sizeof(*state) && state->size <= state_sz, 238 238 "Size must be between %ld and %d. The size returned was %d.", 239 239 sizeof(*state), state_sz, state->size); ··· 255 255 TEST_ASSERT(i != cpuid->nent, "CPUID function 1 not found"); 256 256 257 257 cpuid->entries[i].ecx &= ~CPUID_VMX; 258 - vcpu_set_cpuid(vcpu->vm, vcpu->id, cpuid); 258 + vcpu_set_cpuid(vcpu, cpuid); 259 259 cpuid->entries[i].ecx |= CPUID_VMX; 260 260 } 261 261
+3 -3
tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
··· 133 133 134 134 /* Allocate VMX pages and shared descriptors (vmx_pages). */ 135 135 vcpu_alloc_vmx(vm, &vmx_pages_gva); 136 - vcpu_args_set(vm, vcpu->id, 1, vmx_pages_gva); 136 + vcpu_args_set(vcpu, 1, vmx_pages_gva); 137 137 138 138 for (;;) { 139 139 volatile struct kvm_run *run = vcpu->run; 140 140 struct ucall uc; 141 141 142 - vcpu_run(vm, vcpu->id); 142 + vcpu_run(vcpu); 143 143 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 144 144 "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n", 145 145 run->exit_reason, 146 146 exit_reason_str(run->exit_reason)); 147 147 148 - switch (get_ucall(vm, vcpu->id, &uc)) { 148 + switch (get_ucall(vcpu, &uc)) { 149 149 case UCALL_ABORT: 150 150 TEST_FAIL("%s", (const char *)uc.args[0]); 151 151 /* NOT REACHED */
+5 -5
tools/testing/selftests/kvm/x86_64/xapic_ipi_test.c
··· 206 206 vcpu->id, r); 207 207 208 208 fprintf(stderr, "vCPU thread running vCPU %u\n", vcpu->id); 209 - vcpu_run(vcpu->vm, vcpu->id); 209 + vcpu_run(vcpu); 210 210 exit_reason = vcpu->run->exit_reason; 211 211 212 212 TEST_ASSERT(exit_reason == KVM_EXIT_IO, 213 213 "vCPU %u exited with unexpected exit reason %u-%s, expected KVM_EXIT_IO", 214 214 vcpu->id, exit_reason, exit_reason_str(exit_reason)); 215 215 216 - if (get_ucall(vcpu->vm, vcpu->id, &uc) == UCALL_ABORT) { 216 + if (get_ucall(vcpu, &uc) == UCALL_ABORT) { 217 217 TEST_ASSERT(false, 218 218 "vCPU %u exited with error: %s.\n" 219 219 "Sending vCPU sent %lu IPIs to halting vCPU\n" ··· 415 415 vm = vm_create_with_one_vcpu(&params[0].vcpu, halter_guest_code); 416 416 417 417 vm_init_descriptor_tables(vm); 418 - vcpu_init_descriptor_tables(vm, params[0].vcpu->id); 418 + vcpu_init_descriptor_tables(params[0].vcpu); 419 419 vm_install_exception_handler(vm, IPI_VECTOR, guest_ipi_handler); 420 420 421 421 virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA); ··· 428 428 params[0].data = data; 429 429 params[1].data = data; 430 430 431 - vcpu_args_set(vm, params[0].vcpu->id, 1, test_data_page_vaddr); 432 - vcpu_args_set(vm, params[1].vcpu->id, 1, test_data_page_vaddr); 431 + vcpu_args_set(params[0].vcpu, 1, test_data_page_vaddr); 432 + vcpu_args_set(params[1].vcpu, 1, test_data_page_vaddr); 433 433 434 434 pipis_rcvd = (uint64_t *)addr_gva2hva(vm, (uint64_t)&ipis_rcvd); 435 435 params[0].pipis_rcvd = pipis_rcvd;
+21 -21
tools/testing/selftests/kvm/x86_64/xapic_state_test.c
··· 47 47 } while (1); 48 48 } 49 49 50 - static void ____test_icr(struct kvm_vm *vm, struct xapic_vcpu *x, uint64_t val) 50 + static void ____test_icr(struct xapic_vcpu *x, uint64_t val) 51 51 { 52 52 struct kvm_vcpu *vcpu = x->vcpu; 53 53 struct kvm_lapic_state xapic; ··· 59 59 * all bits are valid and should not be modified by KVM (ignoring the 60 60 * fact that vectors 0-15 are technically illegal). 61 61 */ 62 - vcpu_ioctl(vm, vcpu->id, KVM_GET_LAPIC, &xapic); 62 + vcpu_ioctl(vcpu, KVM_GET_LAPIC, &xapic); 63 63 *((u32 *)&xapic.regs[APIC_IRR]) = val; 64 64 *((u32 *)&xapic.regs[APIC_IRR + 0x10]) = val >> 32; 65 - vcpu_ioctl(vm, vcpu->id, KVM_SET_LAPIC, &xapic); 65 + vcpu_ioctl(vcpu, KVM_SET_LAPIC, &xapic); 66 66 67 - vcpu_run(vm, vcpu->id); 68 - ASSERT_EQ(get_ucall(vm, vcpu->id, &uc), UCALL_SYNC); 67 + vcpu_run(vcpu); 68 + ASSERT_EQ(get_ucall(vcpu, &uc), UCALL_SYNC); 69 69 ASSERT_EQ(uc.args[1], val); 70 70 71 - vcpu_ioctl(vm, vcpu->id, KVM_GET_LAPIC, &xapic); 71 + vcpu_ioctl(vcpu, KVM_GET_LAPIC, &xapic); 72 72 icr = (u64)(*((u32 *)&xapic.regs[APIC_ICR])) | 73 73 (u64)(*((u32 *)&xapic.regs[APIC_ICR2])) << 32; 74 74 if (!x->is_x2apic) ··· 76 76 ASSERT_EQ(icr, val & ~APIC_ICR_BUSY); 77 77 } 78 78 79 - static void __test_icr(struct kvm_vm *vm, struct xapic_vcpu *x, uint64_t val) 79 + static void __test_icr(struct xapic_vcpu *x, uint64_t val) 80 80 { 81 - ____test_icr(vm, x, val | APIC_ICR_BUSY); 82 - ____test_icr(vm, x, val & ~(u64)APIC_ICR_BUSY); 81 + ____test_icr(x, val | APIC_ICR_BUSY); 82 + ____test_icr(x, val & ~(u64)APIC_ICR_BUSY); 83 83 } 84 84 85 - static void test_icr(struct kvm_vm *vm, struct xapic_vcpu *x) 85 + static void test_icr(struct xapic_vcpu *x) 86 86 { 87 87 struct kvm_vcpu *vcpu = x->vcpu; 88 88 uint64_t icr, i, j; 89 89 90 90 icr = APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_FIXED; 91 91 for (i = 0; i <= 0xff; i++) 92 - __test_icr(vm, x, icr | i); 92 + __test_icr(x, icr | i); 93 93 94 94 icr = APIC_INT_ASSERT | APIC_DM_FIXED; 95 95 for (i = 0; i <= 0xff; i++) 96 - __test_icr(vm, x, icr | i); 96 + __test_icr(x, icr | i); 97 97 98 98 /* 99 99 * Send all flavors of IPIs to non-existent vCPUs. TODO: use number of ··· 102 102 icr = APIC_INT_ASSERT | 0xff; 103 103 for (i = vcpu->id + 1; i < 0xff; i++) { 104 104 for (j = 0; j < 8; j++) 105 - __test_icr(vm, x, i << (32 + 24) | APIC_INT_ASSERT | (j << 8)); 105 + __test_icr(x, i << (32 + 24) | APIC_INT_ASSERT | (j << 8)); 106 106 } 107 107 108 108 /* And again with a shorthand destination for all types of IPIs. */ 109 109 icr = APIC_DEST_ALLBUT | APIC_INT_ASSERT; 110 110 for (i = 0; i < 8; i++) 111 - __test_icr(vm, x, icr | (i << 8)); 111 + __test_icr(x, icr | (i << 8)); 112 112 113 113 /* And a few garbage value, just make sure it's an IRQ (blocked). */ 114 - __test_icr(vm, x, 0xa5a5a5a5a5a5a5a5 & ~APIC_DM_FIXED_MASK); 115 - __test_icr(vm, x, 0x5a5a5a5a5a5a5a5a & ~APIC_DM_FIXED_MASK); 116 - __test_icr(vm, x, -1ull & ~APIC_DM_FIXED_MASK); 114 + __test_icr(x, 0xa5a5a5a5a5a5a5a5 & ~APIC_DM_FIXED_MASK); 115 + __test_icr(x, 0x5a5a5a5a5a5a5a5a & ~APIC_DM_FIXED_MASK); 116 + __test_icr(x, -1ull & ~APIC_DM_FIXED_MASK); 117 117 } 118 118 119 119 int main(int argc, char *argv[]) ··· 127 127 int i; 128 128 129 129 vm = vm_create_with_one_vcpu(&x.vcpu, x2apic_guest_code); 130 - test_icr(vm, &x); 130 + test_icr(&x); 131 131 kvm_vm_free(vm); 132 132 133 133 /* ··· 138 138 vm = vm_create_with_one_vcpu(&x.vcpu, xapic_guest_code); 139 139 x.is_x2apic = false; 140 140 141 - cpuid = vcpu_get_cpuid(vm, x.vcpu->id); 141 + cpuid = vcpu_get_cpuid(x.vcpu); 142 142 for (i = 0; i < cpuid->nent; i++) { 143 143 if (cpuid->entries[i].function == 1) 144 144 break; 145 145 } 146 146 cpuid->entries[i].ecx &= ~BIT(21); 147 - vcpu_set_cpuid(vm, x.vcpu->id, cpuid); 147 + vcpu_set_cpuid(x.vcpu, cpuid); 148 148 149 149 virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA); 150 - test_icr(vm, &x); 150 + test_icr(&x); 151 151 kvm_vm_free(vm); 152 152 }
+19 -19
tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
··· 348 348 { 349 349 if (vinfo) 350 350 printf("evtchn_upcall_pending 0x%x\n", vinfo->evtchn_upcall_pending); 351 - vcpu_dump(stdout, vcpu->vm, vcpu->id, 0); 351 + vcpu_dump(stdout, vcpu, 0); 352 352 TEST_FAIL("IRQ delivery timed out"); 353 353 } 354 354 ··· 423 423 .type = KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO, 424 424 .u.gpa = VCPU_INFO_ADDR, 425 425 }; 426 - vcpu_ioctl(vm, vcpu->id, KVM_XEN_VCPU_SET_ATTR, &vi); 426 + vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &vi); 427 427 428 428 struct kvm_xen_vcpu_attr pvclock = { 429 429 .type = KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO, 430 430 .u.gpa = PVTIME_ADDR, 431 431 }; 432 - vcpu_ioctl(vm, vcpu->id, KVM_XEN_VCPU_SET_ATTR, &pvclock); 432 + vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &pvclock); 433 433 434 434 struct kvm_xen_hvm_attr vec = { 435 435 .type = KVM_XEN_ATTR_TYPE_UPCALL_VECTOR, ··· 438 438 vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &vec); 439 439 440 440 vm_init_descriptor_tables(vm); 441 - vcpu_init_descriptor_tables(vm, vcpu->id); 441 + vcpu_init_descriptor_tables(vcpu); 442 442 vm_install_exception_handler(vm, EVTCHN_VECTOR, evtchn_handler); 443 443 444 444 if (do_runstate_tests) { ··· 446 446 .type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR, 447 447 .u.gpa = RUNSTATE_ADDR, 448 448 }; 449 - vcpu_ioctl(vm, vcpu->id, KVM_XEN_VCPU_SET_ATTR, &st); 449 + vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &st); 450 450 } 451 451 452 452 int irq_fd[2] = { -1, -1 }; ··· 522 522 inj.u.evtchn.flags = 0; 523 523 vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &inj); 524 524 525 - vcpu_ioctl(vm, vcpu->id, KVM_XEN_VCPU_SET_ATTR, &tmr); 525 + vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr); 526 526 } 527 527 vinfo = addr_gpa2hva(vm, VCPU_INFO_VADDR); 528 528 vinfo->evtchn_upcall_pending = 0; ··· 536 536 volatile struct kvm_run *run = vcpu->run; 537 537 struct ucall uc; 538 538 539 - vcpu_run(vm, vcpu->id); 539 + vcpu_run(vcpu); 540 540 541 541 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 542 542 "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n", 543 543 run->exit_reason, 544 544 exit_reason_str(run->exit_reason)); 545 545 546 - switch (get_ucall(vm, vcpu->id, &uc)) { 546 + switch (get_ucall(vcpu, &uc)) { 547 547 case UCALL_ABORT: 548 548 TEST_FAIL("%s", (const char *)uc.args[0]); 549 549 /* NOT REACHED */ ··· 572 572 printf("Testing runstate %s\n", runstate_names[uc.args[1]]); 573 573 rst.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT; 574 574 rst.u.runstate.state = uc.args[1]; 575 - vcpu_ioctl(vm, vcpu->id, KVM_XEN_VCPU_SET_ATTR, &rst); 575 + vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &rst); 576 576 break; 577 577 578 578 case 4: ··· 587 587 0x6b6b - rs->time[RUNSTATE_offline]; 588 588 rst.u.runstate.time_runnable = -rst.u.runstate.time_blocked - 589 589 rst.u.runstate.time_offline; 590 - vcpu_ioctl(vm, vcpu->id, KVM_XEN_VCPU_SET_ATTR, &rst); 590 + vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &rst); 591 591 break; 592 592 593 593 case 5: ··· 599 599 rst.u.runstate.state_entry_time = 0x6b6b + 0x5a; 600 600 rst.u.runstate.time_blocked = 0x6b6b; 601 601 rst.u.runstate.time_offline = 0x5a; 602 - vcpu_ioctl(vm, vcpu->id, KVM_XEN_VCPU_SET_ATTR, &rst); 602 + vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &rst); 603 603 break; 604 604 605 605 case 6: ··· 700 700 case 14: 701 701 memset(&tmr, 0, sizeof(tmr)); 702 702 tmr.type = KVM_XEN_VCPU_ATTR_TYPE_TIMER; 703 - vcpu_ioctl(vm, vcpu->id, KVM_XEN_VCPU_GET_ATTR, &tmr); 703 + vcpu_ioctl(vcpu, KVM_XEN_VCPU_GET_ATTR, &tmr); 704 704 TEST_ASSERT(tmr.u.timer.port == EVTCHN_TIMER, 705 705 "Timer port not returned"); 706 706 TEST_ASSERT(tmr.u.timer.priority == KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL, ··· 720 720 printf("Testing restored oneshot timer\n"); 721 721 722 722 tmr.u.timer.expires_ns = rs->state_entry_time + 100000000, 723 - vcpu_ioctl(vm, vcpu->id, KVM_XEN_VCPU_SET_ATTR, &tmr); 723 + vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr); 724 724 evtchn_irq_expected = true; 725 725 alarm(1); 726 726 break; ··· 747 747 printf("Testing SCHEDOP_poll wake on masked event\n"); 748 748 749 749 tmr.u.timer.expires_ns = rs->state_entry_time + 100000000, 750 - vcpu_ioctl(vm, vcpu->id, KVM_XEN_VCPU_SET_ATTR, &tmr); 750 + vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr); 751 751 alarm(1); 752 752 break; 753 753 ··· 758 758 759 759 evtchn_irq_expected = true; 760 760 tmr.u.timer.expires_ns = rs->state_entry_time + 100000000; 761 - vcpu_ioctl(vm, vcpu->id, KVM_XEN_VCPU_SET_ATTR, &tmr); 761 + vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr); 762 762 763 763 /* Read it back and check the pending time is reported correctly */ 764 764 tmr.u.timer.expires_ns = 0; 765 - vcpu_ioctl(vm, vcpu->id, KVM_XEN_VCPU_GET_ATTR, &tmr); 765 + vcpu_ioctl(vcpu, KVM_XEN_VCPU_GET_ATTR, &tmr); 766 766 TEST_ASSERT(tmr.u.timer.expires_ns == rs->state_entry_time + 100000000, 767 767 "Timer not reported pending"); 768 768 alarm(1); ··· 772 772 TEST_ASSERT(!evtchn_irq_expected, 773 773 "Expected event channel IRQ but it didn't happen"); 774 774 /* Read timer and check it is no longer pending */ 775 - vcpu_ioctl(vm, vcpu->id, KVM_XEN_VCPU_GET_ATTR, &tmr); 775 + vcpu_ioctl(vcpu, KVM_XEN_VCPU_GET_ATTR, &tmr); 776 776 TEST_ASSERT(!tmr.u.timer.expires_ns, "Timer still reported pending"); 777 777 778 778 shinfo->evtchn_pending[0] = 0; ··· 781 781 782 782 evtchn_irq_expected = true; 783 783 tmr.u.timer.expires_ns = rs->state_entry_time - 100000000ULL; 784 - vcpu_ioctl(vm, vcpu->id, KVM_XEN_VCPU_SET_ATTR, &tmr); 784 + vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr); 785 785 alarm(1); 786 786 break; 787 787 ··· 851 851 struct kvm_xen_vcpu_attr rst = { 852 852 .type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA, 853 853 }; 854 - vcpu_ioctl(vm, vcpu->id, KVM_XEN_VCPU_GET_ATTR, &rst); 854 + vcpu_ioctl(vcpu, KVM_XEN_VCPU_GET_ATTR, &rst); 855 855 856 856 if (verbose) { 857 857 printf("Runstate: %s(%d), entry %" PRIu64 " ns\n",
+3 -3
tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c
··· 90 90 } 91 91 92 92 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 93 - vcpu_set_hv_cpuid(vm, vcpu->id); 93 + vcpu_set_hv_cpuid(vcpu); 94 94 95 95 struct kvm_xen_hvm_config hvmc = { 96 96 .flags = KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL, ··· 107 107 volatile struct kvm_run *run = vcpu->run; 108 108 struct ucall uc; 109 109 110 - vcpu_run(vm, vcpu->id); 110 + vcpu_run(vcpu); 111 111 112 112 if (run->exit_reason == KVM_EXIT_XEN) { 113 113 ASSERT_EQ(run->xen.type, KVM_EXIT_XEN_HCALL); ··· 129 129 run->exit_reason, 130 130 exit_reason_str(run->exit_reason)); 131 131 132 - switch (get_ucall(vm, vcpu->id, &uc)) { 132 + switch (get_ucall(vcpu, &uc)) { 133 133 case UCALL_ABORT: 134 134 TEST_FAIL("%s", (const char *)uc.args[0]); 135 135 /* NOT REACHED */
+3 -3
tools/testing/selftests/kvm/x86_64/xss_msr_test.c
··· 38 38 exit(KSFT_SKIP); 39 39 } 40 40 41 - xss_val = vcpu_get_msr(vm, vcpu->id, MSR_IA32_XSS); 41 + xss_val = vcpu_get_msr(vcpu, MSR_IA32_XSS); 42 42 TEST_ASSERT(xss_val == 0, 43 43 "MSR_IA32_XSS should be initialized to zero\n"); 44 44 45 - vcpu_set_msr(vm, vcpu->id, MSR_IA32_XSS, xss_val); 45 + vcpu_set_msr(vcpu, MSR_IA32_XSS, xss_val); 46 46 47 47 /* 48 48 * At present, KVM only supports a guest IA32_XSS value of 0. Verify ··· 52 52 */ 53 53 xss_in_msr_list = kvm_msr_is_in_save_restore_list(MSR_IA32_XSS); 54 54 for (i = 0; i < MSR_BITS; ++i) { 55 - r = _vcpu_set_msr(vm, vcpu->id, MSR_IA32_XSS, 1ull << i); 55 + r = _vcpu_set_msr(vcpu, MSR_IA32_XSS, 1ull << i); 56 56 57 57 /* 58 58 * Setting a list of MSRs returns the entry that "faulted", or