Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull kvm fixes from Paolo Bonzini:
"ARM:

- Rework of system register accessors for system registers that are
directly writen to memory, so that sanitisation of the in-memory
value happens at the correct time (after the read, or before the
write). For convenience, RMW-style accessors are also provided.

- Multiple fixes for the so-called "arch-timer-edge-cases' selftest,
which was always broken.

x86:

- Make KVM_PRE_FAULT_MEMORY stricter for TDX, allowing userspace to
pass only the "untouched" addresses and flipping the shared/private
bit in the implementation.

- Disable SEV-SNP support on initialization failure

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
KVM: x86/mmu: Reject direct bits in gpa passed to KVM_PRE_FAULT_MEMORY
KVM: x86/mmu: Embed direct bits into gpa for KVM_PRE_FAULT_MEMORY
KVM: SEV: Disable SEV-SNP support on initialization failure
KVM: arm64: selftests: Determine effective counter width in arch_timer_edge_cases
KVM: arm64: selftests: Fix xVAL init in arch_timer_edge_cases
KVM: arm64: selftests: Fix thread migration in arch_timer_edge_cases
KVM: arm64: selftests: Fix help text for arch_timer_edge_cases
KVM: arm64: Make __vcpu_sys_reg() a pure rvalue operand
KVM: arm64: Don't use __vcpu_sys_reg() to get the address of a sysreg
KVM: arm64: Add RMW specific sysreg accessor
KVM: arm64: Add assignment-specific sysreg accessor

+194 -126
+28 -6
arch/arm64/include/asm/kvm_host.h
··· 1107 1107 #define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r)) 1108 1108 1109 1109 u64 kvm_vcpu_apply_reg_masks(const struct kvm_vcpu *, enum vcpu_sysreg, u64); 1110 - #define __vcpu_sys_reg(v,r) \ 1111 - (*({ \ 1110 + 1111 + #define __vcpu_assign_sys_reg(v, r, val) \ 1112 + do { \ 1112 1113 const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \ 1113 - u64 *__r = __ctxt_sys_reg(ctxt, (r)); \ 1114 + u64 __v = (val); \ 1114 1115 if (vcpu_has_nv((v)) && (r) >= __SANITISED_REG_START__) \ 1115 - *__r = kvm_vcpu_apply_reg_masks((v), (r), *__r);\ 1116 - __r; \ 1117 - })) 1116 + __v = kvm_vcpu_apply_reg_masks((v), (r), __v); \ 1117 + \ 1118 + ctxt_sys_reg(ctxt, (r)) = __v; \ 1119 + } while (0) 1120 + 1121 + #define __vcpu_rmw_sys_reg(v, r, op, val) \ 1122 + do { \ 1123 + const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \ 1124 + u64 __v = ctxt_sys_reg(ctxt, (r)); \ 1125 + __v op (val); \ 1126 + if (vcpu_has_nv((v)) && (r) >= __SANITISED_REG_START__) \ 1127 + __v = kvm_vcpu_apply_reg_masks((v), (r), __v); \ 1128 + \ 1129 + ctxt_sys_reg(ctxt, (r)) = __v; \ 1130 + } while (0) 1131 + 1132 + #define __vcpu_sys_reg(v,r) \ 1133 + ({ \ 1134 + const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \ 1135 + u64 __v = ctxt_sys_reg(ctxt, (r)); \ 1136 + if (vcpu_has_nv((v)) && (r) >= __SANITISED_REG_START__) \ 1137 + __v = kvm_vcpu_apply_reg_masks((v), (r), __v); \ 1138 + __v; \ 1139 + }) 1118 1140 1119 1141 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg); 1120 1142 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
+9 -9
arch/arm64/kvm/arch_timer.c
··· 108 108 109 109 switch(arch_timer_ctx_index(ctxt)) { 110 110 case TIMER_VTIMER: 111 - __vcpu_sys_reg(vcpu, CNTV_CTL_EL0) = ctl; 111 + __vcpu_assign_sys_reg(vcpu, CNTV_CTL_EL0, ctl); 112 112 break; 113 113 case TIMER_PTIMER: 114 - __vcpu_sys_reg(vcpu, CNTP_CTL_EL0) = ctl; 114 + __vcpu_assign_sys_reg(vcpu, CNTP_CTL_EL0, ctl); 115 115 break; 116 116 case TIMER_HVTIMER: 117 - __vcpu_sys_reg(vcpu, CNTHV_CTL_EL2) = ctl; 117 + __vcpu_assign_sys_reg(vcpu, CNTHV_CTL_EL2, ctl); 118 118 break; 119 119 case TIMER_HPTIMER: 120 - __vcpu_sys_reg(vcpu, CNTHP_CTL_EL2) = ctl; 120 + __vcpu_assign_sys_reg(vcpu, CNTHP_CTL_EL2, ctl); 121 121 break; 122 122 default: 123 123 WARN_ON(1); ··· 130 130 131 131 switch(arch_timer_ctx_index(ctxt)) { 132 132 case TIMER_VTIMER: 133 - __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0) = cval; 133 + __vcpu_assign_sys_reg(vcpu, CNTV_CVAL_EL0, cval); 134 134 break; 135 135 case TIMER_PTIMER: 136 - __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = cval; 136 + __vcpu_assign_sys_reg(vcpu, CNTP_CVAL_EL0, cval); 137 137 break; 138 138 case TIMER_HVTIMER: 139 - __vcpu_sys_reg(vcpu, CNTHV_CVAL_EL2) = cval; 139 + __vcpu_assign_sys_reg(vcpu, CNTHV_CVAL_EL2, cval); 140 140 break; 141 141 case TIMER_HPTIMER: 142 - __vcpu_sys_reg(vcpu, CNTHP_CVAL_EL2) = cval; 142 + __vcpu_assign_sys_reg(vcpu, CNTHP_CVAL_EL2, cval); 143 143 break; 144 144 default: 145 145 WARN_ON(1); ··· 1036 1036 if (vcpu_has_nv(vcpu)) { 1037 1037 struct arch_timer_offset *offs = &vcpu_vtimer(vcpu)->offset; 1038 1038 1039 - offs->vcpu_offset = &__vcpu_sys_reg(vcpu, CNTVOFF_EL2); 1039 + offs->vcpu_offset = __ctxt_sys_reg(&vcpu->arch.ctxt, CNTVOFF_EL2); 1040 1040 offs->vm_offset = &vcpu->kvm->arch.timer_data.poffset; 1041 1041 } 1042 1042
+2 -2
arch/arm64/kvm/debug.c
··· 216 216 void kvm_debug_handle_oslar(struct kvm_vcpu *vcpu, u64 val) 217 217 { 218 218 if (val & OSLAR_EL1_OSLK) 219 - __vcpu_sys_reg(vcpu, OSLSR_EL1) |= OSLSR_EL1_OSLK; 219 + __vcpu_rmw_sys_reg(vcpu, OSLSR_EL1, |=, OSLSR_EL1_OSLK); 220 220 else 221 - __vcpu_sys_reg(vcpu, OSLSR_EL1) &= ~OSLSR_EL1_OSLK; 221 + __vcpu_rmw_sys_reg(vcpu, OSLSR_EL1, &=, ~OSLSR_EL1_OSLK); 222 222 223 223 preempt_disable(); 224 224 kvm_arch_vcpu_put(vcpu);
+2 -2
arch/arm64/kvm/fpsimd.c
··· 103 103 fp_state.sve_state = vcpu->arch.sve_state; 104 104 fp_state.sve_vl = vcpu->arch.sve_max_vl; 105 105 fp_state.sme_state = NULL; 106 - fp_state.svcr = &__vcpu_sys_reg(vcpu, SVCR); 107 - fp_state.fpmr = &__vcpu_sys_reg(vcpu, FPMR); 106 + fp_state.svcr = __ctxt_sys_reg(&vcpu->arch.ctxt, SVCR); 107 + fp_state.fpmr = __ctxt_sys_reg(&vcpu->arch.ctxt, FPMR); 108 108 fp_state.fp_type = &vcpu->arch.fp_type; 109 109 110 110 if (vcpu_has_sve(vcpu))
+2 -2
arch/arm64/kvm/hyp/exception.c
··· 37 37 if (unlikely(vcpu_has_nv(vcpu))) 38 38 vcpu_write_sys_reg(vcpu, val, reg); 39 39 else if (!__vcpu_write_sys_reg_to_cpu(val, reg)) 40 - __vcpu_sys_reg(vcpu, reg) = val; 40 + __vcpu_assign_sys_reg(vcpu, reg, val); 41 41 } 42 42 43 43 static void __vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long target_mode, ··· 51 51 } else if (has_vhe()) { 52 52 write_sysreg_el1(val, SYS_SPSR); 53 53 } else { 54 - __vcpu_sys_reg(vcpu, SPSR_EL1) = val; 54 + __vcpu_assign_sys_reg(vcpu, SPSR_EL1, val); 55 55 } 56 56 } 57 57
+2 -2
arch/arm64/kvm/hyp/include/hyp/switch.h
··· 45 45 if (!vcpu_el1_is_32bit(vcpu)) 46 46 return; 47 47 48 - __vcpu_sys_reg(vcpu, FPEXC32_EL2) = read_sysreg(fpexc32_el2); 48 + __vcpu_assign_sys_reg(vcpu, FPEXC32_EL2, read_sysreg(fpexc32_el2)); 49 49 } 50 50 51 51 static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu) ··· 456 456 */ 457 457 if (vcpu_has_sve(vcpu)) { 458 458 zcr_el1 = read_sysreg_el1(SYS_ZCR); 459 - __vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)) = zcr_el1; 459 + __vcpu_assign_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu), zcr_el1); 460 460 461 461 /* 462 462 * The guest's state is always saved using the guest's max VL.
+3 -3
arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
··· 307 307 vcpu->arch.ctxt.spsr_irq = read_sysreg(spsr_irq); 308 308 vcpu->arch.ctxt.spsr_fiq = read_sysreg(spsr_fiq); 309 309 310 - __vcpu_sys_reg(vcpu, DACR32_EL2) = read_sysreg(dacr32_el2); 311 - __vcpu_sys_reg(vcpu, IFSR32_EL2) = read_sysreg(ifsr32_el2); 310 + __vcpu_assign_sys_reg(vcpu, DACR32_EL2, read_sysreg(dacr32_el2)); 311 + __vcpu_assign_sys_reg(vcpu, IFSR32_EL2, read_sysreg(ifsr32_el2)); 312 312 313 313 if (has_vhe() || kvm_debug_regs_in_use(vcpu)) 314 - __vcpu_sys_reg(vcpu, DBGVCR32_EL2) = read_sysreg(dbgvcr32_el2); 314 + __vcpu_assign_sys_reg(vcpu, DBGVCR32_EL2, read_sysreg(dbgvcr32_el2)); 315 315 } 316 316 317 317 static inline void __sysreg32_restore_state(struct kvm_vcpu *vcpu)
+2 -2
arch/arm64/kvm/hyp/nvhe/hyp-main.c
··· 26 26 27 27 static void __hyp_sve_save_guest(struct kvm_vcpu *vcpu) 28 28 { 29 - __vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR); 29 + __vcpu_assign_sys_reg(vcpu, ZCR_EL1, read_sysreg_el1(SYS_ZCR)); 30 30 /* 31 31 * On saving/restoring guest sve state, always use the maximum VL for 32 32 * the guest. The layout of the data when saving the sve state depends ··· 79 79 80 80 has_fpmr = kvm_has_fpmr(kern_hyp_va(vcpu->kvm)); 81 81 if (has_fpmr) 82 - __vcpu_sys_reg(vcpu, FPMR) = read_sysreg_s(SYS_FPMR); 82 + __vcpu_assign_sys_reg(vcpu, FPMR, read_sysreg_s(SYS_FPMR)); 83 83 84 84 if (system_supports_sve()) 85 85 __hyp_sve_restore_host();
+2 -2
arch/arm64/kvm/hyp/vhe/switch.c
··· 223 223 */ 224 224 val = read_sysreg_el0(SYS_CNTP_CVAL); 225 225 if (map.direct_ptimer == vcpu_ptimer(vcpu)) 226 - __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = val; 226 + __vcpu_assign_sys_reg(vcpu, CNTP_CVAL_EL0, val); 227 227 if (map.direct_ptimer == vcpu_hptimer(vcpu)) 228 - __vcpu_sys_reg(vcpu, CNTHP_CVAL_EL2) = val; 228 + __vcpu_assign_sys_reg(vcpu, CNTHP_CVAL_EL2, val); 229 229 230 230 offset = read_sysreg_s(SYS_CNTPOFF_EL2); 231 231
+23 -23
arch/arm64/kvm/hyp/vhe/sysreg-sr.c
··· 18 18 static void __sysreg_save_vel2_state(struct kvm_vcpu *vcpu) 19 19 { 20 20 /* These registers are common with EL1 */ 21 - __vcpu_sys_reg(vcpu, PAR_EL1) = read_sysreg(par_el1); 22 - __vcpu_sys_reg(vcpu, TPIDR_EL1) = read_sysreg(tpidr_el1); 21 + __vcpu_assign_sys_reg(vcpu, PAR_EL1, read_sysreg(par_el1)); 22 + __vcpu_assign_sys_reg(vcpu, TPIDR_EL1, read_sysreg(tpidr_el1)); 23 23 24 - __vcpu_sys_reg(vcpu, ESR_EL2) = read_sysreg_el1(SYS_ESR); 25 - __vcpu_sys_reg(vcpu, AFSR0_EL2) = read_sysreg_el1(SYS_AFSR0); 26 - __vcpu_sys_reg(vcpu, AFSR1_EL2) = read_sysreg_el1(SYS_AFSR1); 27 - __vcpu_sys_reg(vcpu, FAR_EL2) = read_sysreg_el1(SYS_FAR); 28 - __vcpu_sys_reg(vcpu, MAIR_EL2) = read_sysreg_el1(SYS_MAIR); 29 - __vcpu_sys_reg(vcpu, VBAR_EL2) = read_sysreg_el1(SYS_VBAR); 30 - __vcpu_sys_reg(vcpu, CONTEXTIDR_EL2) = read_sysreg_el1(SYS_CONTEXTIDR); 31 - __vcpu_sys_reg(vcpu, AMAIR_EL2) = read_sysreg_el1(SYS_AMAIR); 24 + __vcpu_assign_sys_reg(vcpu, ESR_EL2, read_sysreg_el1(SYS_ESR)); 25 + __vcpu_assign_sys_reg(vcpu, AFSR0_EL2, read_sysreg_el1(SYS_AFSR0)); 26 + __vcpu_assign_sys_reg(vcpu, AFSR1_EL2, read_sysreg_el1(SYS_AFSR1)); 27 + __vcpu_assign_sys_reg(vcpu, FAR_EL2, read_sysreg_el1(SYS_FAR)); 28 + __vcpu_assign_sys_reg(vcpu, MAIR_EL2, read_sysreg_el1(SYS_MAIR)); 29 + __vcpu_assign_sys_reg(vcpu, VBAR_EL2, read_sysreg_el1(SYS_VBAR)); 30 + __vcpu_assign_sys_reg(vcpu, CONTEXTIDR_EL2, read_sysreg_el1(SYS_CONTEXTIDR)); 31 + __vcpu_assign_sys_reg(vcpu, AMAIR_EL2, read_sysreg_el1(SYS_AMAIR)); 32 32 33 33 /* 34 34 * In VHE mode those registers are compatible between EL1 and EL2, ··· 46 46 * are always trapped, ensuring that the in-memory 47 47 * copy is always up-to-date. A small blessing... 48 48 */ 49 - __vcpu_sys_reg(vcpu, SCTLR_EL2) = read_sysreg_el1(SYS_SCTLR); 50 - __vcpu_sys_reg(vcpu, TTBR0_EL2) = read_sysreg_el1(SYS_TTBR0); 51 - __vcpu_sys_reg(vcpu, TTBR1_EL2) = read_sysreg_el1(SYS_TTBR1); 52 - __vcpu_sys_reg(vcpu, TCR_EL2) = read_sysreg_el1(SYS_TCR); 49 + __vcpu_assign_sys_reg(vcpu, SCTLR_EL2, read_sysreg_el1(SYS_SCTLR)); 50 + __vcpu_assign_sys_reg(vcpu, TTBR0_EL2, read_sysreg_el1(SYS_TTBR0)); 51 + __vcpu_assign_sys_reg(vcpu, TTBR1_EL2, read_sysreg_el1(SYS_TTBR1)); 52 + __vcpu_assign_sys_reg(vcpu, TCR_EL2, read_sysreg_el1(SYS_TCR)); 53 53 54 54 if (ctxt_has_tcrx(&vcpu->arch.ctxt)) { 55 - __vcpu_sys_reg(vcpu, TCR2_EL2) = read_sysreg_el1(SYS_TCR2); 55 + __vcpu_assign_sys_reg(vcpu, TCR2_EL2, read_sysreg_el1(SYS_TCR2)); 56 56 57 57 if (ctxt_has_s1pie(&vcpu->arch.ctxt)) { 58 - __vcpu_sys_reg(vcpu, PIRE0_EL2) = read_sysreg_el1(SYS_PIRE0); 59 - __vcpu_sys_reg(vcpu, PIR_EL2) = read_sysreg_el1(SYS_PIR); 58 + __vcpu_assign_sys_reg(vcpu, PIRE0_EL2, read_sysreg_el1(SYS_PIRE0)); 59 + __vcpu_assign_sys_reg(vcpu, PIR_EL2, read_sysreg_el1(SYS_PIR)); 60 60 } 61 61 62 62 if (ctxt_has_s1poe(&vcpu->arch.ctxt)) 63 - __vcpu_sys_reg(vcpu, POR_EL2) = read_sysreg_el1(SYS_POR); 63 + __vcpu_assign_sys_reg(vcpu, POR_EL2, read_sysreg_el1(SYS_POR)); 64 64 } 65 65 66 66 /* ··· 70 70 */ 71 71 val = read_sysreg_el1(SYS_CNTKCTL); 72 72 val &= CNTKCTL_VALID_BITS; 73 - __vcpu_sys_reg(vcpu, CNTHCTL_EL2) &= ~CNTKCTL_VALID_BITS; 74 - __vcpu_sys_reg(vcpu, CNTHCTL_EL2) |= val; 73 + __vcpu_rmw_sys_reg(vcpu, CNTHCTL_EL2, &=, ~CNTKCTL_VALID_BITS); 74 + __vcpu_rmw_sys_reg(vcpu, CNTHCTL_EL2, |=, val); 75 75 } 76 76 77 - __vcpu_sys_reg(vcpu, SP_EL2) = read_sysreg(sp_el1); 78 - __vcpu_sys_reg(vcpu, ELR_EL2) = read_sysreg_el1(SYS_ELR); 79 - __vcpu_sys_reg(vcpu, SPSR_EL2) = read_sysreg_el1(SYS_SPSR); 77 + __vcpu_assign_sys_reg(vcpu, SP_EL2, read_sysreg(sp_el1)); 78 + __vcpu_assign_sys_reg(vcpu, ELR_EL2, read_sysreg_el1(SYS_ELR)); 79 + __vcpu_assign_sys_reg(vcpu, SPSR_EL2, read_sysreg_el1(SYS_SPSR)); 80 80 } 81 81 82 82 static void __sysreg_restore_vel2_state(struct kvm_vcpu *vcpu)
+1 -1
arch/arm64/kvm/nested.c
··· 1757 1757 1758 1758 out: 1759 1759 for (enum vcpu_sysreg sr = __SANITISED_REG_START__; sr < NR_SYS_REGS; sr++) 1760 - (void)__vcpu_sys_reg(vcpu, sr); 1760 + __vcpu_rmw_sys_reg(vcpu, sr, |=, 0); 1761 1761 1762 1762 return 0; 1763 1763 }
+12 -12
arch/arm64/kvm/pmu-emul.c
··· 178 178 val |= lower_32_bits(val); 179 179 } 180 180 181 - __vcpu_sys_reg(vcpu, reg) = val; 181 + __vcpu_assign_sys_reg(vcpu, reg, val); 182 182 183 183 /* Recreate the perf event to reflect the updated sample_period */ 184 184 kvm_pmu_create_perf_event(pmc); ··· 204 204 void kvm_pmu_set_counter_value_user(struct kvm_vcpu *vcpu, u64 select_idx, u64 val) 205 205 { 206 206 kvm_pmu_release_perf_event(kvm_vcpu_idx_to_pmc(vcpu, select_idx)); 207 - __vcpu_sys_reg(vcpu, counter_index_to_reg(select_idx)) = val; 207 + __vcpu_assign_sys_reg(vcpu, counter_index_to_reg(select_idx), val); 208 208 kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu); 209 209 } 210 210 ··· 239 239 240 240 reg = counter_index_to_reg(pmc->idx); 241 241 242 - __vcpu_sys_reg(vcpu, reg) = val; 242 + __vcpu_assign_sys_reg(vcpu, reg, val); 243 243 244 244 kvm_pmu_release_perf_event(pmc); 245 245 } ··· 503 503 reg = __vcpu_sys_reg(vcpu, counter_index_to_reg(i)) + 1; 504 504 if (!kvm_pmc_is_64bit(pmc)) 505 505 reg = lower_32_bits(reg); 506 - __vcpu_sys_reg(vcpu, counter_index_to_reg(i)) = reg; 506 + __vcpu_assign_sys_reg(vcpu, counter_index_to_reg(i), reg); 507 507 508 508 /* No overflow? move on */ 509 509 if (kvm_pmc_has_64bit_overflow(pmc) ? reg : lower_32_bits(reg)) 510 510 continue; 511 511 512 512 /* Mark overflow */ 513 - __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i); 513 + __vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, |=, BIT(i)); 514 514 515 515 if (kvm_pmu_counter_can_chain(pmc)) 516 516 kvm_pmu_counter_increment(vcpu, BIT(i + 1), ··· 556 556 perf_event->attr.sample_period = period; 557 557 perf_event->hw.sample_period = period; 558 558 559 - __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx); 559 + __vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, |=, BIT(idx)); 560 560 561 561 if (kvm_pmu_counter_can_chain(pmc)) 562 562 kvm_pmu_counter_increment(vcpu, BIT(idx + 1), ··· 602 602 kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu); 603 603 604 604 /* The reset bits don't indicate any state, and shouldn't be saved. */ 605 - __vcpu_sys_reg(vcpu, PMCR_EL0) = val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P); 605 + __vcpu_assign_sys_reg(vcpu, PMCR_EL0, (val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P))); 606 606 607 607 if (val & ARMV8_PMU_PMCR_C) 608 608 kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0); ··· 779 779 u64 reg; 780 780 781 781 reg = counter_index_to_evtreg(pmc->idx); 782 - __vcpu_sys_reg(vcpu, reg) = data & kvm_pmu_evtyper_mask(vcpu->kvm); 782 + __vcpu_assign_sys_reg(vcpu, reg, (data & kvm_pmu_evtyper_mask(vcpu->kvm))); 783 783 784 784 kvm_pmu_create_perf_event(pmc); 785 785 } ··· 914 914 { 915 915 u64 mask = kvm_pmu_implemented_counter_mask(vcpu); 916 916 917 - __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= mask; 918 - __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= mask; 919 - __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= mask; 917 + __vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, &=, mask); 918 + __vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, &=, mask); 919 + __vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, &=, mask); 920 920 921 921 kvm_pmu_reprogram_counter_mask(vcpu, mask); 922 922 } ··· 1038 1038 u64 val = __vcpu_sys_reg(vcpu, MDCR_EL2); 1039 1039 val &= ~MDCR_EL2_HPMN; 1040 1040 val |= FIELD_PREP(MDCR_EL2_HPMN, kvm->arch.nr_pmu_counters); 1041 - __vcpu_sys_reg(vcpu, MDCR_EL2) = val; 1041 + __vcpu_assign_sys_reg(vcpu, MDCR_EL2, val); 1042 1042 } 1043 1043 } 1044 1044 }
+31 -29
arch/arm64/kvm/sys_regs.c
··· 228 228 * to reverse-translate virtual EL2 system registers for a 229 229 * non-VHE guest hypervisor. 230 230 */ 231 - __vcpu_sys_reg(vcpu, reg) = val; 231 + __vcpu_assign_sys_reg(vcpu, reg, val); 232 232 233 233 switch (reg) { 234 234 case CNTHCTL_EL2: ··· 263 263 return; 264 264 265 265 memory_write: 266 - __vcpu_sys_reg(vcpu, reg) = val; 266 + __vcpu_assign_sys_reg(vcpu, reg, val); 267 267 } 268 268 269 269 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */ ··· 605 605 if ((val ^ rd->val) & ~OSLSR_EL1_OSLK) 606 606 return -EINVAL; 607 607 608 - __vcpu_sys_reg(vcpu, rd->reg) = val; 608 + __vcpu_assign_sys_reg(vcpu, rd->reg, val); 609 609 return 0; 610 610 } 611 611 ··· 791 791 mask |= GENMASK(n - 1, 0); 792 792 793 793 reset_unknown(vcpu, r); 794 - __vcpu_sys_reg(vcpu, r->reg) &= mask; 794 + __vcpu_rmw_sys_reg(vcpu, r->reg, &=, mask); 795 795 796 796 return __vcpu_sys_reg(vcpu, r->reg); 797 797 } ··· 799 799 static u64 reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 800 800 { 801 801 reset_unknown(vcpu, r); 802 - __vcpu_sys_reg(vcpu, r->reg) &= GENMASK(31, 0); 802 + __vcpu_rmw_sys_reg(vcpu, r->reg, &=, GENMASK(31, 0)); 803 803 804 804 return __vcpu_sys_reg(vcpu, r->reg); 805 805 } ··· 811 811 return 0; 812 812 813 813 reset_unknown(vcpu, r); 814 - __vcpu_sys_reg(vcpu, r->reg) &= kvm_pmu_evtyper_mask(vcpu->kvm); 814 + __vcpu_rmw_sys_reg(vcpu, r->reg, &=, kvm_pmu_evtyper_mask(vcpu->kvm)); 815 815 816 816 return __vcpu_sys_reg(vcpu, r->reg); 817 817 } ··· 819 819 static u64 reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 820 820 { 821 821 reset_unknown(vcpu, r); 822 - __vcpu_sys_reg(vcpu, r->reg) &= PMSELR_EL0_SEL_MASK; 822 + __vcpu_rmw_sys_reg(vcpu, r->reg, &=, PMSELR_EL0_SEL_MASK); 823 823 824 824 return __vcpu_sys_reg(vcpu, r->reg); 825 825 } ··· 835 835 * The value of PMCR.N field is included when the 836 836 * vCPU register is read via kvm_vcpu_read_pmcr(). 837 837 */ 838 - __vcpu_sys_reg(vcpu, r->reg) = pmcr; 838 + __vcpu_assign_sys_reg(vcpu, r->reg, pmcr); 839 839 840 840 return __vcpu_sys_reg(vcpu, r->reg); 841 841 } ··· 907 907 return false; 908 908 909 909 if (p->is_write) 910 - __vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval; 910 + __vcpu_assign_sys_reg(vcpu, PMSELR_EL0, p->regval); 911 911 else 912 912 /* return PMSELR.SEL field */ 913 913 p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0) ··· 1076 1076 { 1077 1077 u64 mask = kvm_pmu_accessible_counter_mask(vcpu); 1078 1078 1079 - __vcpu_sys_reg(vcpu, r->reg) = val & mask; 1079 + __vcpu_assign_sys_reg(vcpu, r->reg, val & mask); 1080 1080 kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu); 1081 1081 1082 1082 return 0; ··· 1103 1103 val = p->regval & mask; 1104 1104 if (r->Op2 & 0x1) 1105 1105 /* accessing PMCNTENSET_EL0 */ 1106 - __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val; 1106 + __vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, |=, val); 1107 1107 else 1108 1108 /* accessing PMCNTENCLR_EL0 */ 1109 - __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val; 1109 + __vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, &=, ~val); 1110 1110 1111 1111 kvm_pmu_reprogram_counter_mask(vcpu, val); 1112 1112 } else { ··· 1129 1129 1130 1130 if (r->Op2 & 0x1) 1131 1131 /* accessing PMINTENSET_EL1 */ 1132 - __vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val; 1132 + __vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, |=, val); 1133 1133 else 1134 1134 /* accessing PMINTENCLR_EL1 */ 1135 - __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val; 1135 + __vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, &=, ~val); 1136 1136 } else { 1137 1137 p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1); 1138 1138 } ··· 1151 1151 if (p->is_write) { 1152 1152 if (r->CRm & 0x2) 1153 1153 /* accessing PMOVSSET_EL0 */ 1154 - __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask); 1154 + __vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, |=, (p->regval & mask)); 1155 1155 else 1156 1156 /* accessing PMOVSCLR_EL0 */ 1157 - __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask); 1157 + __vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, &=, ~(p->regval & mask)); 1158 1158 } else { 1159 1159 p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0); 1160 1160 } ··· 1185 1185 if (!vcpu_mode_priv(vcpu)) 1186 1186 return undef_access(vcpu, p, r); 1187 1187 1188 - __vcpu_sys_reg(vcpu, PMUSERENR_EL0) = 1189 - p->regval & ARMV8_PMU_USERENR_MASK; 1188 + __vcpu_assign_sys_reg(vcpu, PMUSERENR_EL0, 1189 + (p->regval & ARMV8_PMU_USERENR_MASK)); 1190 1190 } else { 1191 1191 p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0) 1192 1192 & ARMV8_PMU_USERENR_MASK; ··· 1237 1237 if (!kvm_supports_32bit_el0()) 1238 1238 val |= ARMV8_PMU_PMCR_LC; 1239 1239 1240 - __vcpu_sys_reg(vcpu, r->reg) = val; 1240 + __vcpu_assign_sys_reg(vcpu, r->reg, val); 1241 1241 kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu); 1242 1242 1243 1243 return 0; ··· 2213 2213 if (kvm_has_mte(vcpu->kvm)) 2214 2214 clidr |= 2ULL << CLIDR_TTYPE_SHIFT(loc); 2215 2215 2216 - __vcpu_sys_reg(vcpu, r->reg) = clidr; 2216 + __vcpu_assign_sys_reg(vcpu, r->reg, clidr); 2217 2217 2218 2218 return __vcpu_sys_reg(vcpu, r->reg); 2219 2219 } ··· 2227 2227 if ((val & CLIDR_EL1_RES0) || (!(ctr_el0 & CTR_EL0_IDC) && idc)) 2228 2228 return -EINVAL; 2229 2229 2230 - __vcpu_sys_reg(vcpu, rd->reg) = val; 2230 + __vcpu_assign_sys_reg(vcpu, rd->reg, val); 2231 2231 2232 2232 return 0; 2233 2233 } ··· 2404 2404 const struct sys_reg_desc *r) 2405 2405 { 2406 2406 if (p->is_write) 2407 - __vcpu_sys_reg(vcpu, SP_EL1) = p->regval; 2407 + __vcpu_assign_sys_reg(vcpu, SP_EL1, p->regval); 2408 2408 else 2409 2409 p->regval = __vcpu_sys_reg(vcpu, SP_EL1); 2410 2410 ··· 2428 2428 const struct sys_reg_desc *r) 2429 2429 { 2430 2430 if (p->is_write) 2431 - __vcpu_sys_reg(vcpu, SPSR_EL1) = p->regval; 2431 + __vcpu_assign_sys_reg(vcpu, SPSR_EL1, p->regval); 2432 2432 else 2433 2433 p->regval = __vcpu_sys_reg(vcpu, SPSR_EL1); 2434 2434 ··· 2440 2440 const struct sys_reg_desc *r) 2441 2441 { 2442 2442 if (p->is_write) 2443 - __vcpu_sys_reg(vcpu, CNTKCTL_EL1) = p->regval; 2443 + __vcpu_assign_sys_reg(vcpu, CNTKCTL_EL1, p->regval); 2444 2444 else 2445 2445 p->regval = __vcpu_sys_reg(vcpu, CNTKCTL_EL1); 2446 2446 ··· 2454 2454 if (!cpus_have_final_cap(ARM64_HAS_HCR_NV1)) 2455 2455 val |= HCR_E2H; 2456 2456 2457 - return __vcpu_sys_reg(vcpu, r->reg) = val; 2457 + __vcpu_assign_sys_reg(vcpu, r->reg, val); 2458 + 2459 + return __vcpu_sys_reg(vcpu, r->reg); 2458 2460 } 2459 2461 2460 2462 static unsigned int __el2_visibility(const struct kvm_vcpu *vcpu, ··· 2627 2625 u64_replace_bits(val, hpmn, MDCR_EL2_HPMN); 2628 2626 } 2629 2627 2630 - __vcpu_sys_reg(vcpu, MDCR_EL2) = val; 2628 + __vcpu_assign_sys_reg(vcpu, MDCR_EL2, val); 2631 2629 2632 2630 /* 2633 2631 * Request a reload of the PMU to enable/disable the counters ··· 2756 2754 2757 2755 static u64 reset_mdcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 2758 2756 { 2759 - __vcpu_sys_reg(vcpu, r->reg) = vcpu->kvm->arch.nr_pmu_counters; 2757 + __vcpu_assign_sys_reg(vcpu, r->reg, vcpu->kvm->arch.nr_pmu_counters); 2760 2758 return vcpu->kvm->arch.nr_pmu_counters; 2761 2759 } 2762 2760 ··· 4792 4790 r->reset(vcpu, r); 4793 4791 4794 4792 if (r->reg >= __SANITISED_REG_START__ && r->reg < NR_SYS_REGS) 4795 - (void)__vcpu_sys_reg(vcpu, r->reg); 4793 + __vcpu_rmw_sys_reg(vcpu, r->reg, |=, 0); 4796 4794 } 4797 4795 4798 4796 set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags); ··· 5014 5012 if (r->set_user) { 5015 5013 ret = (r->set_user)(vcpu, r, val); 5016 5014 } else { 5017 - __vcpu_sys_reg(vcpu, r->reg) = val; 5015 + __vcpu_assign_sys_reg(vcpu, r->reg, val); 5018 5016 ret = 0; 5019 5017 } 5020 5018
+2 -2
arch/arm64/kvm/sys_regs.h
··· 137 137 { 138 138 BUG_ON(!r->reg); 139 139 BUG_ON(r->reg >= NR_SYS_REGS); 140 - __vcpu_sys_reg(vcpu, r->reg) = 0x1de7ec7edbadc0deULL; 140 + __vcpu_assign_sys_reg(vcpu, r->reg, 0x1de7ec7edbadc0deULL); 141 141 return __vcpu_sys_reg(vcpu, r->reg); 142 142 } 143 143 ··· 145 145 { 146 146 BUG_ON(!r->reg); 147 147 BUG_ON(r->reg >= NR_SYS_REGS); 148 - __vcpu_sys_reg(vcpu, r->reg) = r->val; 148 + __vcpu_assign_sys_reg(vcpu, r->reg, r->val); 149 149 return __vcpu_sys_reg(vcpu, r->reg); 150 150 } 151 151
+5 -5
arch/arm64/kvm/vgic/vgic-v3-nested.c
··· 356 356 val = __vcpu_sys_reg(vcpu, ICH_HCR_EL2); 357 357 val &= ~ICH_HCR_EL2_EOIcount_MASK; 358 358 val |= (s_cpu_if->vgic_hcr & ICH_HCR_EL2_EOIcount_MASK); 359 - __vcpu_sys_reg(vcpu, ICH_HCR_EL2) = val; 360 - __vcpu_sys_reg(vcpu, ICH_VMCR_EL2) = s_cpu_if->vgic_vmcr; 359 + __vcpu_assign_sys_reg(vcpu, ICH_HCR_EL2, val); 360 + __vcpu_assign_sys_reg(vcpu, ICH_VMCR_EL2, s_cpu_if->vgic_vmcr); 361 361 362 362 for (i = 0; i < 4; i++) { 363 - __vcpu_sys_reg(vcpu, ICH_AP0RN(i)) = s_cpu_if->vgic_ap0r[i]; 364 - __vcpu_sys_reg(vcpu, ICH_AP1RN(i)) = s_cpu_if->vgic_ap1r[i]; 363 + __vcpu_assign_sys_reg(vcpu, ICH_AP0RN(i), s_cpu_if->vgic_ap0r[i]); 364 + __vcpu_assign_sys_reg(vcpu, ICH_AP1RN(i), s_cpu_if->vgic_ap1r[i]); 365 365 } 366 366 367 367 for_each_set_bit(i, &shadow_if->lr_map, kvm_vgic_global_state.nr_lr) { ··· 370 370 val &= ~ICH_LR_STATE; 371 371 val |= s_cpu_if->vgic_lr[i] & ICH_LR_STATE; 372 372 373 - __vcpu_sys_reg(vcpu, ICH_LRN(i)) = val; 373 + __vcpu_assign_sys_reg(vcpu, ICH_LRN(i), val); 374 374 s_cpu_if->vgic_lr[i] = 0; 375 375 } 376 376
+8 -1
arch/x86/kvm/mmu/mmu.c
··· 4896 4896 { 4897 4897 u64 error_code = PFERR_GUEST_FINAL_MASK; 4898 4898 u8 level = PG_LEVEL_4K; 4899 + u64 direct_bits; 4899 4900 u64 end; 4900 4901 int r; 4901 4902 4902 4903 if (!vcpu->kvm->arch.pre_fault_allowed) 4903 4904 return -EOPNOTSUPP; 4905 + 4906 + if (kvm_is_gfn_alias(vcpu->kvm, gpa_to_gfn(range->gpa))) 4907 + return -EINVAL; 4904 4908 4905 4909 /* 4906 4910 * reload is efficient when called repeatedly, so we can do it on ··· 4914 4910 if (r) 4915 4911 return r; 4916 4912 4913 + direct_bits = 0; 4917 4914 if (kvm_arch_has_private_mem(vcpu->kvm) && 4918 4915 kvm_mem_is_private(vcpu->kvm, gpa_to_gfn(range->gpa))) 4919 4916 error_code |= PFERR_PRIVATE_ACCESS; 4917 + else 4918 + direct_bits = gfn_to_gpa(kvm_gfn_direct_bits(vcpu->kvm)); 4920 4919 4921 4920 /* 4922 4921 * Shadow paging uses GVA for kvm page fault, so restrict to 4923 4922 * two-dimensional paging. 4924 4923 */ 4925 - r = kvm_tdp_map_page(vcpu, range->gpa, error_code, &level); 4924 + r = kvm_tdp_map_page(vcpu, range->gpa | direct_bits, error_code, &level); 4926 4925 if (r < 0) 4927 4926 return r; 4928 4927
+35 -9
arch/x86/kvm/svm/sev.c
··· 2871 2871 } 2872 2872 } 2873 2873 2874 + static bool is_sev_snp_initialized(void) 2875 + { 2876 + struct sev_user_data_snp_status *status; 2877 + struct sev_data_snp_addr buf; 2878 + bool initialized = false; 2879 + int ret, error = 0; 2880 + 2881 + status = snp_alloc_firmware_page(GFP_KERNEL | __GFP_ZERO); 2882 + if (!status) 2883 + return false; 2884 + 2885 + buf.address = __psp_pa(status); 2886 + ret = sev_do_cmd(SEV_CMD_SNP_PLATFORM_STATUS, &buf, &error); 2887 + if (ret) { 2888 + pr_err("SEV: SNP_PLATFORM_STATUS failed ret=%d, fw_error=%d (%#x)\n", 2889 + ret, error, error); 2890 + goto out; 2891 + } 2892 + 2893 + initialized = !!status->state; 2894 + 2895 + out: 2896 + snp_free_firmware_page(status); 2897 + 2898 + return initialized; 2899 + } 2900 + 2874 2901 void __init sev_hardware_setup(void) 2875 2902 { 2876 2903 unsigned int eax, ebx, ecx, edx, sev_asid_count, sev_es_asid_count; ··· 3002 2975 sev_snp_supported = sev_snp_enabled && cc_platform_has(CC_ATTR_HOST_SEV_SNP); 3003 2976 3004 2977 out: 2978 + if (sev_enabled) { 2979 + init_args.probe = true; 2980 + if (sev_platform_init(&init_args)) 2981 + sev_supported = sev_es_supported = sev_snp_supported = false; 2982 + else if (sev_snp_supported) 2983 + sev_snp_supported = is_sev_snp_initialized(); 2984 + } 2985 + 3005 2986 if (boot_cpu_has(X86_FEATURE_SEV)) 3006 2987 pr_info("SEV %s (ASIDs %u - %u)\n", 3007 2988 sev_supported ? min_sev_asid <= max_sev_asid ? "enabled" : ··· 3036 3001 sev_supported_vmsa_features = 0; 3037 3002 if (sev_es_debug_swap_enabled) 3038 3003 sev_supported_vmsa_features |= SVM_SEV_FEAT_DEBUG_SWAP; 3039 - 3040 - if (!sev_enabled) 3041 - return; 3042 - 3043 - /* 3044 - * Do both SNP and SEV initialization at KVM module load. 3045 - */ 3046 - init_args.probe = true; 3047 - sev_platform_init(&init_args); 3048 3004 } 3049 3005 3050 3006 void sev_hardware_unsetup(void)
+25 -14
tools/testing/selftests/kvm/arm64/arch_timer_edge_cases.c
··· 22 22 #include "gic.h" 23 23 #include "vgic.h" 24 24 25 - static const uint64_t CVAL_MAX = ~0ULL; 25 + /* Depends on counter width. */ 26 + static uint64_t CVAL_MAX; 26 27 /* tval is a signed 32-bit int. */ 27 28 static const int32_t TVAL_MAX = INT32_MAX; 28 29 static const int32_t TVAL_MIN = INT32_MIN; ··· 31 30 /* After how much time we say there is no IRQ. */ 32 31 static const uint32_t TIMEOUT_NO_IRQ_US = 50000; 33 32 34 - /* A nice counter value to use as the starting one for most tests. */ 35 - static const uint64_t DEF_CNT = (CVAL_MAX / 2); 33 + /* Counter value to use as the starting one for most tests. Set to CVAL_MAX/2 */ 34 + static uint64_t DEF_CNT; 36 35 37 36 /* Number of runs. */ 38 37 static const uint32_t NR_TEST_ITERS_DEF = 5; ··· 192 191 { 193 192 atomic_set(&shared_data.handled, 0); 194 193 atomic_set(&shared_data.spurious, 0); 195 - timer_set_ctl(timer, ctl); 196 194 timer_set_tval(timer, tval_cycles); 195 + timer_set_ctl(timer, ctl); 197 196 } 198 197 199 198 static void set_xval_irq(enum arch_timer timer, uint64_t xval, uint32_t ctl, ··· 733 732 test_set_cnt_after_tval(timer, 0, tval, (uint64_t) tval + 1, 734 733 wm); 735 734 } 736 - 737 - for (i = 0; i < ARRAY_SIZE(sleep_method); i++) { 738 - sleep_method_t sm = sleep_method[i]; 739 - 740 - test_set_cnt_after_cval_no_irq(timer, 0, DEF_CNT, CVAL_MAX, sm); 741 - } 742 735 } 743 736 744 737 /* ··· 844 849 GUEST_DONE(); 845 850 } 846 851 852 + static cpu_set_t default_cpuset; 853 + 847 854 static uint32_t next_pcpu(void) 848 855 { 849 856 uint32_t max = get_nprocs(); 850 857 uint32_t cur = sched_getcpu(); 851 858 uint32_t next = cur; 852 - cpu_set_t cpuset; 859 + cpu_set_t cpuset = default_cpuset; 853 860 854 861 TEST_ASSERT(max > 1, "Need at least two physical cpus"); 855 - 856 - sched_getaffinity(0, sizeof(cpuset), &cpuset); 857 862 858 863 do { 859 864 next = (next + 1) % CPU_SETSIZE; ··· 970 975 test_init_timer_irq(*vm, *vcpu); 971 976 vgic_v3_setup(*vm, 1, 64); 972 977 sync_global_to_guest(*vm, test_args); 978 + sync_global_to_guest(*vm, CVAL_MAX); 979 + sync_global_to_guest(*vm, DEF_CNT); 973 980 } 974 981 975 982 static void test_print_help(char *name) ··· 983 986 pr_info("\t-b: Test both physical and virtual timers (default: true)\n"); 984 987 pr_info("\t-l: Delta (in ms) used for long wait time test (default: %u)\n", 985 988 LONG_WAIT_TEST_MS); 986 - pr_info("\t-l: Delta (in ms) used for wait times (default: %u)\n", 989 + pr_info("\t-w: Delta (in ms) used for wait times (default: %u)\n", 987 990 WAIT_TEST_MS); 988 991 pr_info("\t-p: Test physical timer (default: true)\n"); 989 992 pr_info("\t-v: Test virtual timer (default: true)\n"); ··· 1032 1035 return false; 1033 1036 } 1034 1037 1038 + static void set_counter_defaults(void) 1039 + { 1040 + const uint64_t MIN_ROLLOVER_SECS = 40ULL * 365 * 24 * 3600; 1041 + uint64_t freq = read_sysreg(CNTFRQ_EL0); 1042 + uint64_t width = ilog2(MIN_ROLLOVER_SECS * freq); 1043 + 1044 + width = clamp(width, 56, 64); 1045 + CVAL_MAX = GENMASK_ULL(width - 1, 0); 1046 + DEF_CNT = CVAL_MAX / 2; 1047 + } 1048 + 1035 1049 int main(int argc, char *argv[]) 1036 1050 { 1037 1051 struct kvm_vcpu *vcpu; ··· 1053 1045 1054 1046 if (!parse_args(argc, argv)) 1055 1047 exit(KSFT_SKIP); 1048 + 1049 + sched_getaffinity(0, sizeof(default_cpuset), &default_cpuset); 1050 + set_counter_defaults(); 1056 1051 1057 1052 if (test_args.test_virtual) { 1058 1053 test_vm_create(&vm, &vcpu, VIRTUAL);