Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: selftests: Consolidate KVM_{G,S}ET_ONE_REG helpers

Rework vcpu_{g,s}et_reg() to provide the APIs that tests actually want to
use, and drop the three "one-off" implementations that cropped up due to
the poor API.

Ignore the handful of direct KVM_{G,S}ET_ONE_REG calls that don't fit the
APIs for one reason or another.

No functional change intended.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>

authored by

Sean Christopherson and committed by
Paolo Bonzini
bfff0f60 45f56808

+94 -133
+1 -1
tools/testing/selftests/kvm/aarch64/debug-exceptions.c
··· 242 242 { 243 243 uint64_t id_aa64dfr0; 244 244 245 - get_reg(vcpu->vm, vcpu->id, KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &id_aa64dfr0); 245 + vcpu_get_reg(vcpu->vm, vcpu->id, KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &id_aa64dfr0); 246 246 return id_aa64dfr0 & 0xf; 247 247 } 248 248
+1 -1
tools/testing/selftests/kvm/aarch64/get-reg-list.c
··· 458 458 bool reject_reg = false; 459 459 int ret; 460 460 461 - ret = __vcpu_ioctl(vm, 0, KVM_GET_ONE_REG, &reg); 461 + ret = __vcpu_get_reg(vm, 0, reg_list->reg[i], &addr); 462 462 if (ret) { 463 463 printf("%s: Failed to get ", config_name(c)); 464 464 print_reg(c, reg.id);
+6 -26
tools/testing/selftests/kvm/aarch64/hypercalls.c
··· 141 141 GUEST_DONE(); 142 142 } 143 143 144 - static int set_fw_reg(struct kvm_vm *vm, uint64_t id, uint64_t val) 145 - { 146 - struct kvm_one_reg reg = { 147 - .id = id, 148 - .addr = (uint64_t)&val, 149 - }; 150 - 151 - return __vcpu_ioctl(vm, 0, KVM_SET_ONE_REG, &reg); 152 - } 153 - 154 - static void get_fw_reg(struct kvm_vm *vm, uint64_t id, uint64_t *addr) 155 - { 156 - struct kvm_one_reg reg = { 157 - .id = id, 158 - .addr = (uint64_t)addr, 159 - }; 160 - 161 - vcpu_ioctl(vm, 0, KVM_GET_ONE_REG, &reg); 162 - } 163 - 164 144 struct st_time { 165 145 uint32_t rev; 166 146 uint32_t attr; ··· 176 196 const struct kvm_fw_reg_info *reg_info = &fw_reg_info[i]; 177 197 178 198 /* First 'read' should be an upper limit of the features supported */ 179 - get_fw_reg(vm, reg_info->reg, &val); 199 + vcpu_get_reg(vm, 0, reg_info->reg, &val); 180 200 TEST_ASSERT(val == FW_REG_ULIMIT_VAL(reg_info->max_feat_bit), 181 201 "Expected all the features to be set for reg: 0x%lx; expected: 0x%lx; read: 0x%lx\n", 182 202 reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit), val); 183 203 184 204 /* Test a 'write' by disabling all the features of the register map */ 185 - ret = set_fw_reg(vm, reg_info->reg, 0); 205 + ret = __vcpu_set_reg(vm, 0, reg_info->reg, 0); 186 206 TEST_ASSERT(ret == 0, 187 207 "Failed to clear all the features of reg: 0x%lx; ret: %d\n", 188 208 reg_info->reg, errno); 189 209 190 - get_fw_reg(vm, reg_info->reg, &val); 210 + vcpu_get_reg(vm, 0, reg_info->reg, &val); 191 211 TEST_ASSERT(val == 0, 192 212 "Expected all the features to be cleared for reg: 0x%lx\n", reg_info->reg); 193 213 ··· 196 216 * Avoid this check if all the bits are occupied. 197 217 */ 198 218 if (reg_info->max_feat_bit < 63) { 199 - ret = set_fw_reg(vm, reg_info->reg, BIT(reg_info->max_feat_bit + 1)); 219 + ret = __vcpu_set_reg(vm, 0, reg_info->reg, BIT(reg_info->max_feat_bit + 1)); 200 220 TEST_ASSERT(ret != 0 && errno == EINVAL, 201 221 "Unexpected behavior or return value (%d) while setting an unsupported feature for reg: 0x%lx\n", 202 222 errno, reg_info->reg); ··· 217 237 * Before starting the VM, the test clears all the bits. 218 238 * Check if that's still the case. 219 239 */ 220 - get_fw_reg(vm, reg_info->reg, &val); 240 + vcpu_get_reg(vm, 0, reg_info->reg, &val); 221 241 TEST_ASSERT(val == 0, 222 242 "Expected all the features to be cleared for reg: 0x%lx\n", 223 243 reg_info->reg); ··· 227 247 * the registers and should return EBUSY. Set the registers and check for 228 248 * the expected errno. 229 249 */ 230 - ret = set_fw_reg(vm, reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit)); 250 + ret = __vcpu_set_reg(vm, 0, reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit)); 231 251 TEST_ASSERT(ret != 0 && errno == EBUSY, 232 252 "Unexpected behavior or return value (%d) while setting a feature while VM is running for reg: 0x%lx\n", 233 253 errno, reg_info->reg);
+3 -3
tools/testing/selftests/kvm/aarch64/psci_test.c
··· 102 102 { 103 103 uint64_t obs_pc, obs_x0; 104 104 105 - get_reg(vcpu->vm, vcpu->id, ARM64_CORE_REG(regs.pc), &obs_pc); 106 - get_reg(vcpu->vm, vcpu->id, ARM64_CORE_REG(regs.regs[0]), &obs_x0); 105 + vcpu_get_reg(vcpu->vm, vcpu->id, ARM64_CORE_REG(regs.pc), &obs_pc); 106 + vcpu_get_reg(vcpu->vm, vcpu->id, ARM64_CORE_REG(regs.regs[0]), &obs_x0); 107 107 108 108 TEST_ASSERT(obs_pc == CPU_ON_ENTRY_ADDR, 109 109 "unexpected target cpu pc: %lx (expected: %lx)", ··· 143 143 */ 144 144 vcpu_power_off(target); 145 145 146 - get_reg(vm, target->id, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1), &target_mpidr); 146 + vcpu_get_reg(vm, target->id, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1), &target_mpidr); 147 147 vcpu_args_set(vm, source->id, 1, target_mpidr & MPIDR_HWID_BITMASK); 148 148 enter_guest(source); 149 149
+1 -17
tools/testing/selftests/kvm/include/aarch64/processor.h
··· 19 19 /* 20 20 * KVM_ARM64_SYS_REG(sys_reg_id): Helper macro to convert 21 21 * SYS_* register definitions in asm/sysreg.h to use in KVM 22 - * calls such as get_reg() and set_reg(). 22 + * calls such as vcpu_get_reg() and vcpu_set_reg(). 23 23 */ 24 24 #define KVM_ARM64_SYS_REG(sys_reg_id) \ 25 25 ARM64_SYS_REG(sys_reg_Op0(sys_reg_id), \ ··· 46 46 (0xbbul << (5 * 8))) 47 47 48 48 #define MPIDR_HWID_BITMASK (0xff00fffffful) 49 - 50 - static inline void get_reg(struct kvm_vm *vm, uint32_t vcpuid, uint64_t id, uint64_t *addr) 51 - { 52 - struct kvm_one_reg reg; 53 - reg.id = id; 54 - reg.addr = (uint64_t)addr; 55 - vcpu_ioctl(vm, vcpuid, KVM_GET_ONE_REG, &reg); 56 - } 57 - 58 - static inline void set_reg(struct kvm_vm *vm, uint32_t vcpuid, uint64_t id, uint64_t val) 59 - { 60 - struct kvm_one_reg reg; 61 - reg.id = id; 62 - reg.addr = (uint64_t)&val; 63 - vcpu_ioctl(vm, vcpuid, KVM_SET_ONE_REG, &reg); 64 - } 65 49 66 50 void aarch64_vcpu_setup(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init *init); 67 51 struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
+25 -5
tools/testing/selftests/kvm/include/kvm_util_base.h
··· 374 374 { 375 375 vcpu_ioctl(vm, vcpuid, KVM_SET_FPU, fpu); 376 376 } 377 - static inline void vcpu_get_reg(struct kvm_vm *vm, uint32_t vcpuid, 378 - struct kvm_one_reg *reg) 377 + 378 + static inline int __vcpu_get_reg(struct kvm_vm *vm, uint32_t vcpuid, 379 + uint64_t reg_id, void *addr) 379 380 { 380 - vcpu_ioctl(vm, vcpuid, KVM_GET_ONE_REG, reg); 381 + struct kvm_one_reg reg = { .id = reg_id, .addr = (uint64_t)addr }; 382 + 383 + return __vcpu_ioctl(vm, vcpuid, KVM_GET_ONE_REG, &reg); 384 + } 385 + static inline int __vcpu_set_reg(struct kvm_vm *vm, uint32_t vcpuid, 386 + uint64_t reg_id, uint64_t val) 387 + { 388 + struct kvm_one_reg reg = { .id = reg_id, .addr = (uint64_t)&val }; 389 + 390 + return __vcpu_ioctl(vm, vcpuid, KVM_SET_ONE_REG, &reg); 391 + } 392 + static inline void vcpu_get_reg(struct kvm_vm *vm, uint32_t vcpuid, 393 + uint64_t reg_id, void *addr) 394 + { 395 + struct kvm_one_reg reg = { .id = reg_id, .addr = (uint64_t)addr }; 396 + 397 + vcpu_ioctl(vm, vcpuid, KVM_GET_ONE_REG, &reg); 381 398 } 382 399 static inline void vcpu_set_reg(struct kvm_vm *vm, uint32_t vcpuid, 383 - struct kvm_one_reg *reg) 400 + uint64_t reg_id, uint64_t val) 384 401 { 385 - vcpu_ioctl(vm, vcpuid, KVM_SET_ONE_REG, reg); 402 + struct kvm_one_reg reg = { .id = reg_id, .addr = (uint64_t)&val }; 403 + 404 + vcpu_ioctl(vm, vcpuid, KVM_SET_ONE_REG, &reg); 386 405 } 406 + 387 407 #ifdef __KVM_HAVE_VCPU_EVENTS 388 408 static inline void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid, 389 409 struct kvm_vcpu_events *events)
-20
tools/testing/selftests/kvm/include/riscv/processor.h
··· 38 38 KVM_REG_RISCV_TIMER_REG(name), \ 39 39 KVM_REG_SIZE_U64) 40 40 41 - static inline void get_reg(struct kvm_vm *vm, uint32_t vcpuid, uint64_t id, 42 - unsigned long *addr) 43 - { 44 - struct kvm_one_reg reg; 45 - 46 - reg.id = id; 47 - reg.addr = (unsigned long)addr; 48 - vcpu_get_reg(vm, vcpuid, &reg); 49 - } 50 - 51 - static inline void set_reg(struct kvm_vm *vm, uint32_t vcpuid, uint64_t id, 52 - unsigned long val) 53 - { 54 - struct kvm_one_reg reg; 55 - 56 - reg.id = id; 57 - reg.addr = (unsigned long)&val; 58 - vcpu_set_reg(vm, vcpuid, &reg); 59 - } 60 - 61 41 /* L3 index Bit[47:39] */ 62 42 #define PGTBL_L3_INDEX_MASK 0x0000FF8000000000ULL 63 43 #define PGTBL_L3_INDEX_SHIFT 39
+14 -14
tools/testing/selftests/kvm/lib/aarch64/processor.c
··· 232 232 * Enable FP/ASIMD to avoid trapping when accessing Q0-Q15 233 233 * registers, which the variable argument list macros do. 234 234 */ 235 - set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_CPACR_EL1), 3 << 20); 235 + vcpu_set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_CPACR_EL1), 3 << 20); 236 236 237 - get_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), &sctlr_el1); 238 - get_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TCR_EL1), &tcr_el1); 237 + vcpu_get_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), &sctlr_el1); 238 + vcpu_get_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TCR_EL1), &tcr_el1); 239 239 240 240 /* Configure base granule size */ 241 241 switch (vm->mode) { ··· 296 296 tcr_el1 |= (1 << 8) | (1 << 10) | (3 << 12); 297 297 tcr_el1 |= (64 - vm->va_bits) /* T0SZ */; 298 298 299 - set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), sctlr_el1); 300 - set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TCR_EL1), tcr_el1); 301 - set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_MAIR_EL1), DEFAULT_MAIR_EL1); 302 - set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TTBR0_EL1), vm->pgd); 303 - set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TPIDR_EL1), vcpuid); 299 + vcpu_set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), sctlr_el1); 300 + vcpu_set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TCR_EL1), tcr_el1); 301 + vcpu_set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_MAIR_EL1), DEFAULT_MAIR_EL1); 302 + vcpu_set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TTBR0_EL1), vm->pgd); 303 + vcpu_set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TPIDR_EL1), vcpuid); 304 304 } 305 305 306 306 void vcpu_arch_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent) 307 307 { 308 308 uint64_t pstate, pc; 309 309 310 - get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pstate), &pstate); 311 - get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pc), &pc); 310 + vcpu_get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pstate), &pstate); 311 + vcpu_get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pc), &pc); 312 312 313 313 fprintf(stream, "%*spstate: 0x%.16lx pc: 0x%.16lx\n", 314 314 indent, "", pstate, pc); ··· 326 326 327 327 aarch64_vcpu_setup(vm, vcpu_id, init); 328 328 329 - set_reg(vm, vcpu_id, ARM64_CORE_REG(sp_el1), stack_vaddr + stack_size); 330 - set_reg(vm, vcpu_id, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code); 329 + vcpu_set_reg(vm, vcpu_id, ARM64_CORE_REG(sp_el1), stack_vaddr + stack_size); 330 + vcpu_set_reg(vm, vcpu_id, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code); 331 331 332 332 return vcpu; 333 333 } ··· 349 349 va_start(ap, num); 350 350 351 351 for (i = 0; i < num; i++) { 352 - set_reg(vm, vcpuid, ARM64_CORE_REG(regs.regs[i]), 352 + vcpu_set_reg(vm, vcpuid, ARM64_CORE_REG(regs.regs[i]), 353 353 va_arg(ap, uint64_t)); 354 354 } 355 355 ··· 389 389 { 390 390 extern char vectors; 391 391 392 - set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_VBAR_EL1), (uint64_t)&vectors); 392 + vcpu_set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_VBAR_EL1), (uint64_t)&vectors); 393 393 } 394 394 395 395 void route_exception(struct ex_regs *regs, int vector)
+42 -42
tools/testing/selftests/kvm/lib/riscv/processor.c
··· 198 198 satp = (vm->pgd >> PGTBL_PAGE_SIZE_SHIFT) & SATP_PPN; 199 199 satp |= SATP_MODE_48; 200 200 201 - set_reg(vm, vcpuid, RISCV_CSR_REG(satp), satp); 201 + vcpu_set_reg(vm, vcpuid, RISCV_CSR_REG(satp), satp); 202 202 } 203 203 204 204 void vcpu_arch_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent) 205 205 { 206 206 struct kvm_riscv_core core; 207 207 208 - get_reg(vm, vcpuid, RISCV_CORE_REG(mode), &core.mode); 209 - get_reg(vm, vcpuid, RISCV_CORE_REG(regs.pc), &core.regs.pc); 210 - get_reg(vm, vcpuid, RISCV_CORE_REG(regs.ra), &core.regs.ra); 211 - get_reg(vm, vcpuid, RISCV_CORE_REG(regs.sp), &core.regs.sp); 212 - get_reg(vm, vcpuid, RISCV_CORE_REG(regs.gp), &core.regs.gp); 213 - get_reg(vm, vcpuid, RISCV_CORE_REG(regs.tp), &core.regs.tp); 214 - get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t0), &core.regs.t0); 215 - get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t1), &core.regs.t1); 216 - get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t2), &core.regs.t2); 217 - get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s0), &core.regs.s0); 218 - get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s1), &core.regs.s1); 219 - get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a0), &core.regs.a0); 220 - get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a1), &core.regs.a1); 221 - get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a2), &core.regs.a2); 222 - get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a3), &core.regs.a3); 223 - get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a4), &core.regs.a4); 224 - get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a5), &core.regs.a5); 225 - get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a6), &core.regs.a6); 226 - get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a7), &core.regs.a7); 227 - get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s2), &core.regs.s2); 228 - get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s3), &core.regs.s3); 229 - get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s4), &core.regs.s4); 230 - get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s5), &core.regs.s5); 231 - get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s6), &core.regs.s6); 232 - get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s7), &core.regs.s7); 233 - get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s8), &core.regs.s8); 234 - get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s9), &core.regs.s9); 235 - get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s10), &core.regs.s10); 236 - get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s11), &core.regs.s11); 237 - get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t3), &core.regs.t3); 238 - get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t4), &core.regs.t4); 239 - get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t5), &core.regs.t5); 240 - get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t6), &core.regs.t6); 208 + vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(mode), &core.mode); 209 + vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.pc), &core.regs.pc); 210 + vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.ra), &core.regs.ra); 211 + vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.sp), &core.regs.sp); 212 + vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.gp), &core.regs.gp); 213 + vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.tp), &core.regs.tp); 214 + vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t0), &core.regs.t0); 215 + vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t1), &core.regs.t1); 216 + vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t2), &core.regs.t2); 217 + vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s0), &core.regs.s0); 218 + vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s1), &core.regs.s1); 219 + vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a0), &core.regs.a0); 220 + vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a1), &core.regs.a1); 221 + vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a2), &core.regs.a2); 222 + vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a3), &core.regs.a3); 223 + vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a4), &core.regs.a4); 224 + vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a5), &core.regs.a5); 225 + vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a6), &core.regs.a6); 226 + vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a7), &core.regs.a7); 227 + vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s2), &core.regs.s2); 228 + vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s3), &core.regs.s3); 229 + vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s4), &core.regs.s4); 230 + vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s5), &core.regs.s5); 231 + vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s6), &core.regs.s6); 232 + vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s7), &core.regs.s7); 233 + vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s8), &core.regs.s8); 234 + vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s9), &core.regs.s9); 235 + vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s10), &core.regs.s10); 236 + vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s11), &core.regs.s11); 237 + vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t3), &core.regs.t3); 238 + vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t4), &core.regs.t4); 239 + vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t5), &core.regs.t5); 240 + vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t6), &core.regs.t6); 241 241 242 242 fprintf(stream, 243 243 " MODE: 0x%lx\n", core.mode); ··· 302 302 /* Setup global pointer of guest to be same as the host */ 303 303 asm volatile ( 304 304 "add %0, gp, zero" : "=r" (current_gp) : : "memory"); 305 - set_reg(vm, vcpu_id, RISCV_CORE_REG(regs.gp), current_gp); 305 + vcpu_set_reg(vm, vcpu_id, RISCV_CORE_REG(regs.gp), current_gp); 306 306 307 307 /* Setup stack pointer and program counter of guest */ 308 - set_reg(vm, vcpu_id, RISCV_CORE_REG(regs.sp), 309 - stack_vaddr + stack_size); 310 - set_reg(vm, vcpu_id, RISCV_CORE_REG(regs.pc), 311 - (unsigned long)guest_code); 308 + vcpu_set_reg(vm, vcpu_id, RISCV_CORE_REG(regs.sp), 309 + stack_vaddr + stack_size); 310 + vcpu_set_reg(vm, vcpu_id, RISCV_CORE_REG(regs.pc), 311 + (unsigned long)guest_code); 312 312 313 313 /* Setup default exception vector of guest */ 314 - set_reg(vm, vcpu_id, RISCV_CSR_REG(stvec), 315 - (unsigned long)guest_unexp_trap); 314 + vcpu_set_reg(vm, vcpu_id, RISCV_CSR_REG(stvec), 315 + (unsigned long)guest_unexp_trap); 316 316 317 317 return vcpu; 318 318 } ··· 355 355 id = RISCV_CORE_REG(regs.a7); 356 356 break; 357 357 } 358 - set_reg(vm, vcpuid, id, va_arg(ap, uint64_t)); 358 + vcpu_set_reg(vm, vcpuid, id, va_arg(ap, uint64_t)); 359 359 } 360 360 361 361 va_end(ap);
+1 -4
tools/testing/selftests/kvm/s390x/resets.c
··· 61 61 62 62 static void test_one_reg(uint64_t id, uint64_t value) 63 63 { 64 - struct kvm_one_reg reg; 65 64 uint64_t eval_reg; 66 65 67 - reg.addr = (uintptr_t)&eval_reg; 68 - reg.id = id; 69 - vcpu_get_reg(vm, VCPU_ID, &reg); 66 + vcpu_get_reg(vm, VCPU_ID, id, &eval_reg); 70 67 TEST_ASSERT(eval_reg == value, "value == 0x%lx", value); 71 68 } 72 69