Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: x86: Prevent KVM SVM from loading on kernels with 5-level paging

Disallow loading KVM SVM if 5-level paging is supported. In theory, NPT
for L1 should simply work, but there unknowns with respect to how the
guest's MAXPHYADDR will be handled by hardware.

Nested NPT is more problematic, as running an L1 VMM that is using
2-level page tables requires stacking single-entry PDP and PML4 tables in
KVM's NPT for L2, as there are no equivalent entries in L1's NPT to
shadow. Barring hardware magic, for 5-level paging, KVM would need stack
another layer to handle PML5.

Opportunistically rename the lm_root pointer, which is used for the
aforementioned stacking when shadowing 2-level L1 NPT, to pml4_root to
call out that it's specifically for PML4.

Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20210505204221.1934471-1-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>

authored by

Sean Christopherson and committed by
Paolo Bonzini
03ca4589 76ea438b

+16 -11
+1 -1
arch/x86/include/asm/kvm_host.h
··· 409 409 u32 pkru_mask; 410 410 411 411 u64 *pae_root; 412 - u64 *lm_root; 412 + u64 *pml4_root; 413 413 414 414 /* 415 415 * check zero bits on shadow page table entries, these
+10 -10
arch/x86/kvm/mmu/mmu.c
··· 3310 3310 if (mmu->shadow_root_level == PT64_ROOT_4LEVEL) { 3311 3311 pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK; 3312 3312 3313 - if (WARN_ON_ONCE(!mmu->lm_root)) { 3313 + if (WARN_ON_ONCE(!mmu->pml4_root)) { 3314 3314 r = -EIO; 3315 3315 goto out_unlock; 3316 3316 } 3317 3317 3318 - mmu->lm_root[0] = __pa(mmu->pae_root) | pm_mask; 3318 + mmu->pml4_root[0] = __pa(mmu->pae_root) | pm_mask; 3319 3319 } 3320 3320 3321 3321 for (i = 0; i < 4; ++i) { ··· 3335 3335 } 3336 3336 3337 3337 if (mmu->shadow_root_level == PT64_ROOT_4LEVEL) 3338 - mmu->root_hpa = __pa(mmu->lm_root); 3338 + mmu->root_hpa = __pa(mmu->pml4_root); 3339 3339 else 3340 3340 mmu->root_hpa = __pa(mmu->pae_root); 3341 3341 ··· 3350 3350 static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu) 3351 3351 { 3352 3352 struct kvm_mmu *mmu = vcpu->arch.mmu; 3353 - u64 *lm_root, *pae_root; 3353 + u64 *pml4_root, *pae_root; 3354 3354 3355 3355 /* 3356 3356 * When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP ··· 3369 3369 if (WARN_ON_ONCE(mmu->shadow_root_level != PT64_ROOT_4LEVEL)) 3370 3370 return -EIO; 3371 3371 3372 - if (mmu->pae_root && mmu->lm_root) 3372 + if (mmu->pae_root && mmu->pml4_root) 3373 3373 return 0; 3374 3374 3375 3375 /* 3376 3376 * The special roots should always be allocated in concert. Yell and 3377 3377 * bail if KVM ends up in a state where only one of the roots is valid. 3378 3378 */ 3379 - if (WARN_ON_ONCE(!tdp_enabled || mmu->pae_root || mmu->lm_root)) 3379 + if (WARN_ON_ONCE(!tdp_enabled || mmu->pae_root || mmu->pml4_root)) 3380 3380 return -EIO; 3381 3381 3382 3382 /* ··· 3387 3387 if (!pae_root) 3388 3388 return -ENOMEM; 3389 3389 3390 - lm_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT); 3391 - if (!lm_root) { 3390 + pml4_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT); 3391 + if (!pml4_root) { 3392 3392 free_page((unsigned long)pae_root); 3393 3393 return -ENOMEM; 3394 3394 } 3395 3395 3396 3396 mmu->pae_root = pae_root; 3397 - mmu->lm_root = lm_root; 3397 + mmu->pml4_root = pml4_root; 3398 3398 3399 3399 return 0; 3400 3400 } ··· 5261 5261 if (!tdp_enabled && mmu->pae_root) 5262 5262 set_memory_encrypted((unsigned long)mmu->pae_root, 1); 5263 5263 free_page((unsigned long)mmu->pae_root); 5264 - free_page((unsigned long)mmu->lm_root); 5264 + free_page((unsigned long)mmu->pml4_root); 5265 5265 } 5266 5266 5267 5267 static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
+5
arch/x86/kvm/svm/svm.c
··· 447 447 return 0; 448 448 } 449 449 450 + if (pgtable_l5_enabled()) { 451 + pr_info("KVM doesn't yet support 5-level paging on AMD SVM\n"); 452 + return 0; 453 + } 454 + 450 455 return 1; 451 456 } 452 457