Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: arm64: Consolidate stage-2 initialisation into a single function

The initialisation of guest stage-2 page-tables is currently split
across two functions: kvm_init_stage2_mmu() and kvm_arm_setup_stage2().
That is presumably for historical reasons as kvm_arm_setup_stage2()
originates from the (now defunct) KVM port for 32-bit Arm.

Simplify this code path by merging both functions into one, taking care
to map the 'struct kvm' into the hypervisor stage-1 early on in order to
simplify the failure path.

Tested-by: Vincent Donnefort <vdonnefort@google.com>
Co-developed-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Quentin Perret <qperret@google.com>
Signed-off-by: Will Deacon <will@kernel.org>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20221110190259.26861-19-will@kernel.org

authored by

Quentin Perret and committed by
Marc Zyngier
315775ff 717a7eeb

+41 -48
+1 -1
arch/arm64/include/asm/kvm_arm.h
··· 135 135 * 40 bits wide (T0SZ = 24). Systems with a PARange smaller than 40 bits are 136 136 * not known to exist and will break with this configuration. 137 137 * 138 - * The VTCR_EL2 is configured per VM and is initialised in kvm_arm_setup_stage2(). 138 + * The VTCR_EL2 is configured per VM and is initialised in kvm_init_stage2_mmu. 139 139 * 140 140 * Note that when using 4K pages, we concatenate two first level page tables 141 141 * together. With 16K pages, we concatenate 16 first level page tables.
-2
arch/arm64/include/asm/kvm_host.h
··· 990 990 #define __KVM_HAVE_ARCH_VM_ALLOC 991 991 struct kvm *kvm_arch_alloc_vm(void); 992 992 993 - int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type); 994 - 995 993 static inline bool kvm_vm_is_protected(struct kvm *kvm) 996 994 { 997 995 return false;
+1 -1
arch/arm64/include/asm/kvm_mmu.h
··· 166 166 void free_hyp_pgds(void); 167 167 168 168 void stage2_unmap_vm(struct kvm *kvm); 169 - int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu); 169 + int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long type); 170 170 void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu); 171 171 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, 172 172 phys_addr_t pa, unsigned long size, bool writable);
+13 -14
arch/arm64/kvm/arm.c
··· 139 139 { 140 140 int ret; 141 141 142 - ret = kvm_arm_setup_stage2(kvm, type); 143 - if (ret) 144 - return ret; 145 - 146 - ret = kvm_init_stage2_mmu(kvm, &kvm->arch.mmu); 147 - if (ret) 148 - return ret; 149 - 150 142 ret = kvm_share_hyp(kvm, kvm + 1); 151 143 if (ret) 152 - goto out_free_stage2_pgd; 144 + return ret; 153 145 154 146 ret = pkvm_init_host_vm(kvm); 155 147 if (ret) 156 - goto out_free_stage2_pgd; 148 + goto err_unshare_kvm; 157 149 158 150 if (!zalloc_cpumask_var(&kvm->arch.supported_cpus, GFP_KERNEL)) { 159 151 ret = -ENOMEM; 160 - goto out_free_stage2_pgd; 152 + goto err_unshare_kvm; 161 153 } 162 154 cpumask_copy(kvm->arch.supported_cpus, cpu_possible_mask); 155 + 156 + ret = kvm_init_stage2_mmu(kvm, &kvm->arch.mmu, type); 157 + if (ret) 158 + goto err_free_cpumask; 163 159 164 160 kvm_vgic_early_init(kvm); 165 161 ··· 165 169 set_default_spectre(kvm); 166 170 kvm_arm_init_hypercalls(kvm); 167 171 168 - return ret; 169 - out_free_stage2_pgd: 170 - kvm_free_stage2_pgd(&kvm->arch.mmu); 172 + return 0; 173 + 174 + err_free_cpumask: 175 + free_cpumask_var(kvm->arch.supported_cpus); 176 + err_unshare_kvm: 177 + kvm_unshare_hyp(kvm, kvm + 1); 171 178 return ret; 172 179 } 173 180
+26 -1
arch/arm64/kvm/mmu.c
··· 675 675 * kvm_init_stage2_mmu - Initialise a S2 MMU structure 676 676 * @kvm: The pointer to the KVM structure 677 677 * @mmu: The pointer to the s2 MMU structure 678 + * @type: The machine type of the virtual machine 678 679 * 679 680 * Allocates only the stage-2 HW PGD level table(s). 680 681 * Note we don't need locking here as this is only called when the VM is 681 682 * created, which can only be done once. 682 683 */ 683 - int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu) 684 + int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long type) 684 685 { 686 + u32 kvm_ipa_limit = get_kvm_ipa_limit(); 685 687 int cpu, err; 686 688 struct kvm_pgtable *pgt; 689 + u64 mmfr0, mmfr1; 690 + u32 phys_shift; 691 + 692 + if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK) 693 + return -EINVAL; 694 + 695 + phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE(type); 696 + if (phys_shift) { 697 + if (phys_shift > kvm_ipa_limit || 698 + phys_shift < ARM64_MIN_PARANGE_BITS) 699 + return -EINVAL; 700 + } else { 701 + phys_shift = KVM_PHYS_SHIFT; 702 + if (phys_shift > kvm_ipa_limit) { 703 + pr_warn_once("%s using unsupported default IPA limit, upgrade your VMM\n", 704 + current->comm); 705 + return -EINVAL; 706 + } 707 + } 708 + 709 + mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); 710 + mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); 711 + kvm->arch.vtcr = kvm_get_vtcr(mmfr0, mmfr1, phys_shift); 687 712 688 713 if (mmu->pgt != NULL) { 689 714 kvm_err("kvm_arch already initialized?\n");
-29
arch/arm64/kvm/reset.c
··· 395 395 396 396 return 0; 397 397 } 398 - 399 - int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type) 400 - { 401 - u64 mmfr0, mmfr1; 402 - u32 phys_shift; 403 - 404 - if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK) 405 - return -EINVAL; 406 - 407 - phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE(type); 408 - if (phys_shift) { 409 - if (phys_shift > kvm_ipa_limit || 410 - phys_shift < ARM64_MIN_PARANGE_BITS) 411 - return -EINVAL; 412 - } else { 413 - phys_shift = KVM_PHYS_SHIFT; 414 - if (phys_shift > kvm_ipa_limit) { 415 - pr_warn_once("%s using unsupported default IPA limit, upgrade your VMM\n", 416 - current->comm); 417 - return -EINVAL; 418 - } 419 - } 420 - 421 - mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); 422 - mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); 423 - kvm->arch.vtcr = kvm_get_vtcr(mmfr0, mmfr1, phys_shift); 424 - 425 - return 0; 426 - }