Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: selftests: Return a value from vcpu_get_reg() instead of using an out-param

Return a uint64_t from vcpu_get_reg() instead of having the caller provide
a pointer to storage, as none of the vcpu_get_reg() usage in KVM selftests
accesses a register larger than 64 bits, and vcpu_set_reg() only accepts a
64-bit value. If a use case comes along that needs to get a register that
is larger than 64 bits, then a utility can be added to assert success and
take a void pointer, but until then, forcing an out param yields ugly code
and prevents feeding the output of vcpu_get_reg() into vcpu_set_reg().

Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
Acked-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
Link: https://lore.kernel.org/r/20241128005547.4077116-3-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>

+81 -81
+5 -5
tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c
··· 97 97 uint64_t reg_id = raz_wi_reg_ids[i]; 98 98 uint64_t val; 99 99 100 - vcpu_get_reg(vcpu, reg_id, &val); 100 + val = vcpu_get_reg(vcpu, reg_id); 101 101 TEST_ASSERT_EQ(val, 0); 102 102 103 103 /* ··· 106 106 */ 107 107 vcpu_set_reg(vcpu, reg_id, BAD_ID_REG_VAL); 108 108 109 - vcpu_get_reg(vcpu, reg_id, &val); 109 + val = vcpu_get_reg(vcpu, reg_id); 110 110 TEST_ASSERT_EQ(val, 0); 111 111 } 112 112 } ··· 126 126 uint64_t reg_id = raz_invariant_reg_ids[i]; 127 127 uint64_t val; 128 128 129 - vcpu_get_reg(vcpu, reg_id, &val); 129 + val = vcpu_get_reg(vcpu, reg_id); 130 130 TEST_ASSERT_EQ(val, 0); 131 131 132 132 r = __vcpu_set_reg(vcpu, reg_id, BAD_ID_REG_VAL); 133 133 TEST_ASSERT(r < 0 && errno == EINVAL, 134 134 "unexpected KVM_SET_ONE_REG error: r=%d, errno=%d", r, errno); 135 135 136 - vcpu_get_reg(vcpu, reg_id, &val); 136 + val = vcpu_get_reg(vcpu, reg_id); 137 137 TEST_ASSERT_EQ(val, 0); 138 138 } 139 139 } ··· 144 144 { 145 145 uint64_t val, el0; 146 146 147 - vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), &val); 147 + val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1)); 148 148 149 149 el0 = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), val); 150 150 return el0 == ID_AA64PFR0_EL1_ELx_64BIT_ONLY;
+2 -2
tools/testing/selftests/kvm/aarch64/debug-exceptions.c
··· 501 501 TEST_ASSERT(ss_enable, "Unexpected KVM_EXIT_DEBUG"); 502 502 503 503 /* Check if the current pc is expected. */ 504 - vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc), &pc); 504 + pc = vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc)); 505 505 TEST_ASSERT(!test_pc || pc == test_pc, 506 506 "Unexpected pc 0x%lx (expected 0x%lx)", 507 507 pc, test_pc); ··· 583 583 uint64_t aa64dfr0; 584 584 585 585 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 586 - vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &aa64dfr0); 586 + aa64dfr0 = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1)); 587 587 __TEST_REQUIRE(debug_version(aa64dfr0) >= 6, 588 588 "Armv8 debug architecture not supported."); 589 589 kvm_vm_free(vm);
+3 -3
tools/testing/selftests/kvm/aarch64/hypercalls.c
··· 173 173 const struct kvm_fw_reg_info *reg_info = &fw_reg_info[i]; 174 174 175 175 /* First 'read' should be an upper limit of the features supported */ 176 - vcpu_get_reg(vcpu, reg_info->reg, &val); 176 + val = vcpu_get_reg(vcpu, reg_info->reg); 177 177 TEST_ASSERT(val == FW_REG_ULIMIT_VAL(reg_info->max_feat_bit), 178 178 "Expected all the features to be set for reg: 0x%lx; expected: 0x%lx; read: 0x%lx", 179 179 reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit), val); ··· 184 184 "Failed to clear all the features of reg: 0x%lx; ret: %d", 185 185 reg_info->reg, errno); 186 186 187 - vcpu_get_reg(vcpu, reg_info->reg, &val); 187 + val = vcpu_get_reg(vcpu, reg_info->reg); 188 188 TEST_ASSERT(val == 0, 189 189 "Expected all the features to be cleared for reg: 0x%lx", reg_info->reg); 190 190 ··· 214 214 * Before starting the VM, the test clears all the bits. 215 215 * Check if that's still the case. 216 216 */ 217 - vcpu_get_reg(vcpu, reg_info->reg, &val); 217 + val = vcpu_get_reg(vcpu, reg_info->reg); 218 218 TEST_ASSERT(val == 0, 219 219 "Expected all the features to be cleared for reg: 0x%lx", 220 220 reg_info->reg);
+1 -1
tools/testing/selftests/kvm/aarch64/no-vgic-v3.c
··· 164 164 uint64_t pfr0; 165 165 166 166 vm = vm_create_with_one_vcpu(&vcpu, NULL); 167 - vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), &pfr0); 167 + pfr0 = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1)); 168 168 __TEST_REQUIRE(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC), pfr0), 169 169 "GICv3 not supported."); 170 170 kvm_vm_free(vm);
+4 -4
tools/testing/selftests/kvm/aarch64/psci_test.c
··· 111 111 { 112 112 uint64_t obs_pc, obs_x0; 113 113 114 - vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc), &obs_pc); 115 - vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.regs[0]), &obs_x0); 114 + obs_pc = vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc)); 115 + obs_x0 = vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.regs[0])); 116 116 117 117 TEST_ASSERT(obs_pc == CPU_ON_ENTRY_ADDR, 118 118 "unexpected target cpu pc: %lx (expected: %lx)", ··· 152 152 */ 153 153 vcpu_power_off(target); 154 154 155 - vcpu_get_reg(target, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1), &target_mpidr); 155 + target_mpidr = vcpu_get_reg(target, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1)); 156 156 vcpu_args_set(source, 1, target_mpidr & MPIDR_HWID_BITMASK); 157 157 enter_guest(source); 158 158 ··· 244 244 245 245 setup_vm(guest_test_system_off2, &source, &target); 246 246 247 - vcpu_get_reg(target, KVM_REG_ARM_PSCI_VERSION, &psci_version); 247 + psci_version = vcpu_get_reg(target, KVM_REG_ARM_PSCI_VERSION); 248 248 249 249 TEST_ASSERT(psci_version >= PSCI_VERSION(1, 3), 250 250 "Unexpected PSCI version %lu.%lu",
+11 -11
tools/testing/selftests/kvm/aarch64/set_id_regs.c
··· 346 346 uint64_t mask = ftr_bits->mask; 347 347 uint64_t val, new_val, ftr; 348 348 349 - vcpu_get_reg(vcpu, reg, &val); 349 + val = vcpu_get_reg(vcpu, reg); 350 350 ftr = (val & mask) >> shift; 351 351 352 352 ftr = get_safe_value(ftr_bits, ftr); ··· 356 356 val |= ftr; 357 357 358 358 vcpu_set_reg(vcpu, reg, val); 359 - vcpu_get_reg(vcpu, reg, &new_val); 359 + new_val = vcpu_get_reg(vcpu, reg); 360 360 TEST_ASSERT_EQ(new_val, val); 361 361 362 362 return new_val; ··· 370 370 uint64_t val, old_val, ftr; 371 371 int r; 372 372 373 - vcpu_get_reg(vcpu, reg, &val); 373 + val = vcpu_get_reg(vcpu, reg); 374 374 ftr = (val & mask) >> shift; 375 375 376 376 ftr = get_invalid_value(ftr_bits, ftr); ··· 384 384 TEST_ASSERT(r < 0 && errno == EINVAL, 385 385 "Unexpected KVM_SET_ONE_REG error: r=%d, errno=%d", r, errno); 386 386 387 - vcpu_get_reg(vcpu, reg, &val); 387 + val = vcpu_get_reg(vcpu, reg); 388 388 TEST_ASSERT_EQ(val, old_val); 389 389 } 390 390 ··· 471 471 } 472 472 473 473 /* Get the id register value */ 474 - vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), &val); 474 + val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1)); 475 475 476 476 /* Try to set MPAM=0. This should always be possible. */ 477 477 val &= ~ID_AA64PFR0_EL1_MPAM_MASK; ··· 508 508 } 509 509 510 510 /* Get the id register value */ 511 - vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1), &val); 511 + val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1)); 512 512 513 513 /* Try to set MPAM_frac=0. This should always be possible. */ 514 514 val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK; ··· 576 576 uint64_t clidr; 577 577 int level; 578 578 579 - vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CLIDR_EL1), &clidr); 579 + clidr = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CLIDR_EL1)); 580 580 581 581 /* find the first empty level in the cache hierarchy */ 582 582 for (level = 1; level < 7; level++) { ··· 601 601 { 602 602 u64 ctr; 603 603 604 - vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CTR_EL0), &ctr); 604 + ctr = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CTR_EL0)); 605 605 ctr &= ~CTR_EL0_DIC_MASK; 606 606 if (ctr & CTR_EL0_IminLine_MASK) 607 607 ctr--; ··· 617 617 test_clidr(vcpu); 618 618 test_ctr(vcpu); 619 619 620 - vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1), &val); 620 + val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1)); 621 621 val++; 622 622 vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1), val); 623 623 ··· 630 630 size_t idx = encoding_to_range_idx(encoding); 631 631 uint64_t observed; 632 632 633 - vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(encoding), &observed); 633 + observed = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(encoding)); 634 634 TEST_ASSERT_EQ(test_reg_vals[idx], observed); 635 635 } 636 636 ··· 665 665 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 666 666 667 667 /* Check for AARCH64 only system */ 668 - vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), &val); 668 + val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1)); 669 669 el0 = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), val); 670 670 aarch64_only = (el0 == ID_AA64PFR0_EL1_ELx_64BIT_ONLY); 671 671
+9 -10
tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
··· 440 440 "Failed to create vgic-v3, skipping"); 441 441 442 442 /* Make sure that PMUv3 support is indicated in the ID register */ 443 - vcpu_get_reg(vpmu_vm.vcpu, 444 - KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &dfr0); 443 + dfr0 = vcpu_get_reg(vpmu_vm.vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1)); 445 444 pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), dfr0); 446 445 TEST_ASSERT(pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF && 447 446 pmuver >= ID_AA64DFR0_EL1_PMUVer_IMP, ··· 483 484 create_vpmu_vm(guest_code); 484 485 vcpu = vpmu_vm.vcpu; 485 486 486 - vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr_orig); 487 + pmcr_orig = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0)); 487 488 pmcr = pmcr_orig; 488 489 489 490 /* ··· 492 493 */ 493 494 set_pmcr_n(&pmcr, pmcr_n); 494 495 vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), pmcr); 495 - vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr); 496 + pmcr = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0)); 496 497 497 498 if (expect_fail) 498 499 TEST_ASSERT(pmcr_orig == pmcr, ··· 520 521 vcpu = vpmu_vm.vcpu; 521 522 522 523 /* Save the initial sp to restore them later to run the guest again */ 523 - vcpu_get_reg(vcpu, ARM64_CORE_REG(sp_el1), &sp); 524 + sp = vcpu_get_reg(vcpu, ARM64_CORE_REG(sp_el1)); 524 525 525 526 run_vcpu(vcpu, pmcr_n); 526 527 ··· 571 572 * Test if the 'set' and 'clr' variants of the registers 572 573 * are initialized based on the number of valid counters. 573 574 */ 574 - vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id), &reg_val); 575 + reg_val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id)); 575 576 TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0, 576 577 "Initial read of set_reg: 0x%llx has unimplemented counters enabled: 0x%lx", 577 578 KVM_ARM64_SYS_REG(set_reg_id), reg_val); 578 579 579 - vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(clr_reg_id), &reg_val); 580 + reg_val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(clr_reg_id)); 580 581 TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0, 581 582 "Initial read of clr_reg: 0x%llx has unimplemented counters enabled: 0x%lx", 582 583 KVM_ARM64_SYS_REG(clr_reg_id), reg_val); ··· 588 589 */ 589 590 vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id), max_counters_mask); 590 591 591 - vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id), &reg_val); 592 + reg_val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id)); 592 593 TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0, 593 594 "Read of set_reg: 0x%llx has unimplemented counters enabled: 0x%lx", 594 595 KVM_ARM64_SYS_REG(set_reg_id), reg_val); 595 596 596 - vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(clr_reg_id), &reg_val); 597 + reg_val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(clr_reg_id)); 597 598 TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0, 598 599 "Read of clr_reg: 0x%llx has unimplemented counters enabled: 0x%lx", 599 600 KVM_ARM64_SYS_REG(clr_reg_id), reg_val); ··· 624 625 uint64_t pmcr; 625 626 626 627 create_vpmu_vm(guest_code); 627 - vcpu_get_reg(vpmu_vm.vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr); 628 + pmcr = vcpu_get_reg(vpmu_vm.vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0)); 628 629 destroy_vpmu_vm(); 629 630 return get_pmcr_n(pmcr); 630 631 }
+4 -2
tools/testing/selftests/kvm/include/kvm_util.h
··· 702 702 703 703 return __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg); 704 704 } 705 - static inline void vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id, void *addr) 705 + static inline uint64_t vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id) 706 706 { 707 - struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)addr }; 707 + uint64_t val; 708 + struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val }; 708 709 709 710 vcpu_ioctl(vcpu, KVM_GET_ONE_REG, &reg); 711 + return val; 710 712 } 711 713 static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val) 712 714 {
+4 -4
tools/testing/selftests/kvm/lib/aarch64/processor.c
··· 281 281 */ 282 282 vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CPACR_EL1), 3 << 20); 283 283 284 - vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), &sctlr_el1); 285 - vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TCR_EL1), &tcr_el1); 284 + sctlr_el1 = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1)); 285 + tcr_el1 = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TCR_EL1)); 286 286 287 287 /* Configure base granule size */ 288 288 switch (vm->mode) { ··· 360 360 { 361 361 uint64_t pstate, pc; 362 362 363 - vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pstate), &pstate); 364 - vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc), &pc); 363 + pstate = vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pstate)); 364 + pc = vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc)); 365 365 366 366 fprintf(stream, "%*spstate: 0x%.16lx pc: 0x%.16lx\n", 367 367 indent, "", pstate, pc);
+33 -33
tools/testing/selftests/kvm/lib/riscv/processor.c
··· 221 221 { 222 222 struct kvm_riscv_core core; 223 223 224 - vcpu_get_reg(vcpu, RISCV_CORE_REG(mode), &core.mode); 225 - vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.pc), &core.regs.pc); 226 - vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.ra), &core.regs.ra); 227 - vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.sp), &core.regs.sp); 228 - vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.gp), &core.regs.gp); 229 - vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.tp), &core.regs.tp); 230 - vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t0), &core.regs.t0); 231 - vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t1), &core.regs.t1); 232 - vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t2), &core.regs.t2); 233 - vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s0), &core.regs.s0); 234 - vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s1), &core.regs.s1); 235 - vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a0), &core.regs.a0); 236 - vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a1), &core.regs.a1); 237 - vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a2), &core.regs.a2); 238 - vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a3), &core.regs.a3); 239 - vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a4), &core.regs.a4); 240 - vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a5), &core.regs.a5); 241 - vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a6), &core.regs.a6); 242 - vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a7), &core.regs.a7); 243 - vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s2), &core.regs.s2); 244 - vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s3), &core.regs.s3); 245 - vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s4), &core.regs.s4); 246 - vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s5), &core.regs.s5); 247 - vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s6), &core.regs.s6); 248 - vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s7), &core.regs.s7); 249 - vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s8), &core.regs.s8); 250 - vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s9), &core.regs.s9); 251 - vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s10), &core.regs.s10); 252 - vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s11), &core.regs.s11); 253 - vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t3), &core.regs.t3); 254 - vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t4), &core.regs.t4); 255 - vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t5), &core.regs.t5); 256 - vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t6), &core.regs.t6); 224 + core.mode = vcpu_get_reg(vcpu, RISCV_CORE_REG(mode)); 225 + core.regs.pc = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.pc)); 226 + core.regs.ra = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.ra)); 227 + core.regs.sp = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.sp)); 228 + core.regs.gp = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.gp)); 229 + core.regs.tp = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.tp)); 230 + core.regs.t0 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t0)); 231 + core.regs.t1 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t1)); 232 + core.regs.t2 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t2)); 233 + core.regs.s0 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s0)); 234 + core.regs.s1 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s1)); 235 + core.regs.a0 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a0)); 236 + core.regs.a1 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a1)); 237 + core.regs.a2 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a2)); 238 + core.regs.a3 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a3)); 239 + core.regs.a4 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a4)); 240 + core.regs.a5 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a5)); 241 + core.regs.a6 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a6)); 242 + core.regs.a7 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a7)); 243 + core.regs.s2 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s2)); 244 + core.regs.s3 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s3)); 245 + core.regs.s4 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s4)); 246 + core.regs.s5 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s5)); 247 + core.regs.s6 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s6)); 248 + core.regs.s7 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s7)); 249 + core.regs.s8 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s8)); 250 + core.regs.s9 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s9)); 251 + core.regs.s10 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s10)); 252 + core.regs.s11 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s11)); 253 + core.regs.t3 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t3)); 254 + core.regs.t4 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t4)); 255 + core.regs.t5 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t5)); 256 + core.regs.t6 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t6)); 257 257 258 258 fprintf(stream, 259 259 " MODE: 0x%lx\n", core.mode);
+1 -1
tools/testing/selftests/kvm/riscv/arch_timer.c
··· 93 93 vcpu_init_vector_tables(vcpus[i]); 94 94 95 95 /* Initialize guest timer frequency. */ 96 - vcpu_get_reg(vcpus[0], RISCV_TIMER_REG(frequency), &timer_freq); 96 + timer_freq = vcpu_get_reg(vcpus[0], RISCV_TIMER_REG(frequency)); 97 97 sync_global_to_guest(vm, timer_freq); 98 98 pr_debug("timer_freq: %lu\n", timer_freq); 99 99
+1 -1
tools/testing/selftests/kvm/riscv/ebreak_test.c
··· 60 60 61 61 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_DEBUG); 62 62 63 - vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.pc), &pc); 63 + pc = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.pc)); 64 64 TEST_ASSERT_EQ(pc, LABEL_ADDRESS(sw_bp_1)); 65 65 66 66 /* skip sw_bp_1 */
+1 -1
tools/testing/selftests/kvm/riscv/sbi_pmu_test.c
··· 608 608 609 609 vcpu_init_vector_tables(vcpu); 610 610 /* Initialize guest timer frequency. */ 611 - vcpu_get_reg(vcpu, RISCV_TIMER_REG(frequency), &timer_freq); 611 + timer_freq = vcpu_get_reg(vcpu, RISCV_TIMER_REG(frequency)); 612 612 sync_global_to_guest(vm, timer_freq); 613 613 614 614 run_vcpu(vcpu);
+1 -1
tools/testing/selftests/kvm/s390x/resets.c
··· 61 61 { 62 62 uint64_t eval_reg; 63 63 64 - vcpu_get_reg(vcpu, id, &eval_reg); 64 + eval_reg = vcpu_get_reg(vcpu, id); 65 65 TEST_ASSERT(eval_reg == value, "value == 0x%lx", value); 66 66 } 67 67
+1 -2
tools/testing/selftests/kvm/steal_time.c
··· 269 269 static bool is_steal_time_supported(struct kvm_vcpu *vcpu) 270 270 { 271 271 uint64_t id = RISCV_SBI_EXT_REG(KVM_RISCV_SBI_EXT_STA); 272 - unsigned long enabled; 272 + unsigned long enabled = vcpu_get_reg(vcpu, id); 273 273 274 - vcpu_get_reg(vcpu, id, &enabled); 275 274 TEST_ASSERT(enabled == 0 || enabled == 1, "Expected boolean result"); 276 275 277 276 return enabled;