Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch kvm-arm64/mmu/vmid-cleanups into kvmarm-master/next

* kvm-arm64/mmu/vmid-cleanups:
: Cleanup the stage-2 configuration by providing a single helper,
: and tidy up some of the ordering requirements for the VMID
: allocator.
KVM: arm64: Upgrade VMID accesses to {READ,WRITE}_ONCE
KVM: arm64: Unify stage-2 programming behind __load_stage2()
KVM: arm64: Move kern_hyp_va() usage in __load_guest_stage2() into the callers

Signed-off-by: Marc Zyngier <maz@kernel.org>

+25 -22
+9 -8
arch/arm64/include/asm/kvm_mmu.h
··· 252 252 253 253 #define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr) 254 254 255 + /* 256 + * When this is (directly or indirectly) used on the TLB invalidation 257 + * path, we rely on a previously issued DSB so that page table updates 258 + * and VMID reads are correctly ordered. 259 + */ 255 260 static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu) 256 261 { 257 262 struct kvm_vmid *vmid = &mmu->vmid; ··· 264 259 u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0; 265 260 266 261 baddr = mmu->pgd_phys; 267 - vmid_field = (u64)vmid->vmid << VTTBR_VMID_SHIFT; 262 + vmid_field = (u64)READ_ONCE(vmid->vmid) << VTTBR_VMID_SHIFT; 268 263 return kvm_phys_to_vttbr(baddr) | vmid_field | cnp; 269 264 } 270 265 ··· 272 267 * Must be called from hyp code running at EL2 with an updated VTTBR 273 268 * and interrupts disabled. 274 269 */ 275 - static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu, unsigned long vtcr) 270 + static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu, 271 + struct kvm_arch *arch) 276 272 { 277 - write_sysreg(vtcr, vtcr_el2); 273 + write_sysreg(arch->vtcr, vtcr_el2); 278 274 write_sysreg(kvm_get_vttbr(mmu), vttbr_el2); 279 275 280 276 /* ··· 284 278 * the guest. 285 279 */ 286 280 asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT)); 287 - } 288 - 289 - static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu) 290 - { 291 - __load_stage2(mmu, kern_hyp_va(mmu->arch)->vtcr); 292 281 } 293 282 294 283 static inline struct kvm *kvm_s2_mmu_to_kvm(struct kvm_s2_mmu *mmu)
+1 -1
arch/arm64/kvm/arm.c
··· 573 573 kvm_call_hyp(__kvm_flush_vm_context); 574 574 } 575 575 576 - vmid->vmid = kvm_next_vmid; 576 + WRITE_ONCE(vmid->vmid, kvm_next_vmid); 577 577 kvm_next_vmid++; 578 578 kvm_next_vmid &= (1 << kvm_get_vmid_bits()) - 1; 579 579
+1 -1
arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
··· 60 60 static __always_inline void __load_host_stage2(void) 61 61 { 62 62 if (static_branch_likely(&kvm_protected_mode_initialized)) 63 - __load_stage2(&host_kvm.arch.mmu, host_kvm.arch.vtcr); 63 + __load_stage2(&host_kvm.arch.mmu, &host_kvm.arch); 64 64 else 65 65 write_sysreg(0, vttbr_el2); 66 66 }
+3 -3
arch/arm64/kvm/hyp/nvhe/mem_protect.c
··· 112 112 mmu->pgd_phys = __hyp_pa(host_kvm.pgt.pgd); 113 113 mmu->arch = &host_kvm.arch; 114 114 mmu->pgt = &host_kvm.pgt; 115 - mmu->vmid.vmid_gen = 0; 116 - mmu->vmid.vmid = 0; 115 + WRITE_ONCE(mmu->vmid.vmid_gen, 0); 116 + WRITE_ONCE(mmu->vmid.vmid, 0); 117 117 118 118 return 0; 119 119 } ··· 129 129 kvm_flush_dcache_to_poc(params, sizeof(*params)); 130 130 131 131 write_sysreg(params->hcr_el2, hcr_el2); 132 - __load_stage2(&host_kvm.arch.mmu, host_kvm.arch.vtcr); 132 + __load_stage2(&host_kvm.arch.mmu, &host_kvm.arch); 133 133 134 134 /* 135 135 * Make sure to have an ISB before the TLB maintenance below but only
+3 -1
arch/arm64/kvm/hyp/nvhe/switch.c
··· 170 170 { 171 171 struct kvm_cpu_context *host_ctxt; 172 172 struct kvm_cpu_context *guest_ctxt; 173 + struct kvm_s2_mmu *mmu; 173 174 bool pmu_switch_needed; 174 175 u64 exit_code; 175 176 ··· 214 213 __sysreg32_restore_state(vcpu); 215 214 __sysreg_restore_state_nvhe(guest_ctxt); 216 215 217 - __load_guest_stage2(kern_hyp_va(vcpu->arch.hw_mmu)); 216 + mmu = kern_hyp_va(vcpu->arch.hw_mmu); 217 + __load_stage2(mmu, kern_hyp_va(mmu->arch)); 218 218 __activate_traps(vcpu); 219 219 220 220 __hyp_vgic_restore_state(vcpu);
+2 -2
arch/arm64/kvm/hyp/nvhe/tlb.c
··· 34 34 } 35 35 36 36 /* 37 - * __load_guest_stage2() includes an ISB only when the AT 37 + * __load_stage2() includes an ISB only when the AT 38 38 * workaround is applied. Take care of the opposite condition, 39 39 * ensuring that we always have an ISB, but not two ISBs back 40 40 * to back. 41 41 */ 42 - __load_guest_stage2(mmu); 42 + __load_stage2(mmu, kern_hyp_va(mmu->arch)); 43 43 asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT)); 44 44 } 45 45
+3 -3
arch/arm64/kvm/hyp/vhe/switch.c
··· 124 124 * 125 125 * We have already configured the guest's stage 1 translation in 126 126 * kvm_vcpu_load_sysregs_vhe above. We must now call 127 - * __load_guest_stage2 before __activate_traps, because 128 - * __load_guest_stage2 configures stage 2 translation, and 127 + * __load_stage2 before __activate_traps, because 128 + * __load_stage2 configures stage 2 translation, and 129 129 * __activate_traps clear HCR_EL2.TGE (among other things). 130 130 */ 131 - __load_guest_stage2(vcpu->arch.hw_mmu); 131 + __load_stage2(vcpu->arch.hw_mmu, vcpu->arch.hw_mmu->arch); 132 132 __activate_traps(vcpu); 133 133 134 134 __kvm_adjust_pc(vcpu);
+2 -2
arch/arm64/kvm/hyp/vhe/tlb.c
··· 50 50 * 51 51 * ARM erratum 1165522 requires some special handling (again), 52 52 * as we need to make sure both stages of translation are in 53 - * place before clearing TGE. __load_guest_stage2() already 53 + * place before clearing TGE. __load_stage2() already 54 54 * has an ISB in order to deal with this. 55 55 */ 56 - __load_guest_stage2(mmu); 56 + __load_stage2(mmu, mmu->arch); 57 57 val = read_sysreg(hcr_el2); 58 58 val &= ~HCR_TGE; 59 59 write_sysreg(val, hcr_el2);
+1 -1
arch/arm64/kvm/mmu.c
··· 532 532 mmu->arch = &kvm->arch; 533 533 mmu->pgt = pgt; 534 534 mmu->pgd_phys = __pa(pgt->pgd); 535 - mmu->vmid.vmid_gen = 0; 535 + WRITE_ONCE(mmu->vmid.vmid_gen, 0); 536 536 return 0; 537 537 538 538 out_destroy_pgtable: