Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: x86/mmu: Use MMU's role_regs, not vCPU state, to compute mmu_role

Use the provided role_regs to calculate the mmu_role instead of pulling
bits from current vCPU state. For some flows, e.g. nested TDP, the vCPU
state may not be correct (or relevant).

Cc: Maxim Levitsky <mlevitsk@redhat.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20210622175739.3610207-24-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>

authored by

Sean Christopherson and committed by
Paolo Bonzini
8626c120 cd6767c3

+52 -40
+52 -40
arch/x86/kvm/mmu/mmu.c
··· 4542 4542 paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL); 4543 4543 } 4544 4544 4545 - static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu) 4545 + static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu, 4546 + struct kvm_mmu_role_regs *regs) 4546 4547 { 4547 4548 union kvm_mmu_extended_role ext = {0}; 4548 4549 4549 - ext.cr0_pg = !!is_paging(vcpu); 4550 - ext.cr4_pae = !!is_pae(vcpu); 4551 - ext.cr4_smep = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMEP); 4552 - ext.cr4_smap = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMAP); 4553 - ext.cr4_pse = !!is_pse(vcpu); 4554 - ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE); 4555 - ext.cr4_la57 = !!kvm_read_cr4_bits(vcpu, X86_CR4_LA57); 4550 + ext.cr0_pg = ____is_cr0_pg(regs); 4551 + ext.cr4_pae = ____is_cr4_pae(regs); 4552 + ext.cr4_smep = ____is_cr4_smep(regs); 4553 + ext.cr4_smap = ____is_cr4_smap(regs); 4554 + ext.cr4_pse = ____is_cr4_pse(regs); 4555 + ext.cr4_pke = ____is_cr4_pke(regs); 4556 + ext.cr4_la57 = ____is_cr4_la57(regs); 4556 4557 4557 4558 ext.valid = 1; 4558 4559 ··· 4561 4560 } 4562 4561 4563 4562 static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu, 4563 + struct kvm_mmu_role_regs *regs, 4564 4564 bool base_only) 4565 4565 { 4566 4566 union kvm_mmu_role role = {0}; 4567 4567 4568 4568 role.base.access = ACC_ALL; 4569 - role.base.nxe = !!is_nx(vcpu); 4570 - role.base.cr0_wp = is_write_protection(vcpu); 4569 + role.base.nxe = ____is_efer_nx(regs); 4570 + role.base.cr0_wp = ____is_cr0_wp(regs); 4571 4571 role.base.smm = is_smm(vcpu); 4572 4572 role.base.guest_mode = is_guest_mode(vcpu); 4573 4573 4574 4574 if (base_only) 4575 4575 return role; 4576 4576 4577 - role.ext = kvm_calc_mmu_role_ext(vcpu); 4577 + role.ext = kvm_calc_mmu_role_ext(vcpu, regs); 4578 4578 4579 4579 return role; 4580 4580 } ··· 4590 4588 } 4591 4589 4592 4590 static union kvm_mmu_role 4593 - kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only) 4591 + kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, 4592 + struct kvm_mmu_role_regs *regs, bool base_only) 4594 4593 { 4595 - union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only); 4594 + union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs, base_only); 4596 4595 4597 4596 role.base.ad_disabled = (shadow_accessed_mask == 0); 4598 4597 role.base.level = kvm_mmu_get_tdp_level(vcpu); ··· 4606 4603 static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) 4607 4604 { 4608 4605 struct kvm_mmu *context = &vcpu->arch.root_mmu; 4606 + struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu); 4609 4607 union kvm_mmu_role new_role = 4610 - kvm_calc_tdp_mmu_root_page_role(vcpu, false); 4608 + kvm_calc_tdp_mmu_root_page_role(vcpu, &regs, false); 4611 4609 4612 4610 if (new_role.as_u64 == context->mmu_role.as_u64) 4613 4611 return; ··· 4652 4648 } 4653 4649 4654 4650 static union kvm_mmu_role 4655 - kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu, bool base_only) 4651 + kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu, 4652 + struct kvm_mmu_role_regs *regs, bool base_only) 4656 4653 { 4657 - union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only); 4654 + union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs, base_only); 4658 4655 4659 - role.base.smep_andnot_wp = role.ext.cr4_smep && 4660 - !is_write_protection(vcpu); 4661 - role.base.smap_andnot_wp = role.ext.cr4_smap && 4662 - !is_write_protection(vcpu); 4663 - role.base.gpte_is_8_bytes = !!is_pae(vcpu); 4656 + role.base.smep_andnot_wp = role.ext.cr4_smep && !____is_cr0_wp(regs); 4657 + role.base.smap_andnot_wp = role.ext.cr4_smap && !____is_cr0_wp(regs); 4658 + role.base.gpte_is_8_bytes = ____is_cr4_pae(regs); 4664 4659 4665 4660 return role; 4666 4661 } 4667 4662 4668 4663 static union kvm_mmu_role 4669 - kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only) 4664 + kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, 4665 + struct kvm_mmu_role_regs *regs, bool base_only) 4670 4666 { 4671 4667 union kvm_mmu_role role = 4672 - kvm_calc_shadow_root_page_role_common(vcpu, base_only); 4668 + kvm_calc_shadow_root_page_role_common(vcpu, regs, base_only); 4673 4669 4674 - role.base.direct = !is_paging(vcpu); 4670 + role.base.direct = !____is_cr0_pg(regs); 4675 4671 4676 - if (!is_long_mode(vcpu)) 4672 + if (!____is_efer_lma(regs)) 4677 4673 role.base.level = PT32E_ROOT_LEVEL; 4678 - else if (is_la57_mode(vcpu)) 4674 + else if (____is_cr4_la57(regs)) 4679 4675 role.base.level = PT64_ROOT_5LEVEL; 4680 4676 else 4681 4677 role.base.level = PT64_ROOT_4LEVEL; ··· 4713 4709 { 4714 4710 struct kvm_mmu *context = &vcpu->arch.root_mmu; 4715 4711 union kvm_mmu_role new_role = 4716 - kvm_calc_shadow_mmu_root_page_role(vcpu, false); 4712 + kvm_calc_shadow_mmu_root_page_role(vcpu, regs, false); 4717 4713 4718 4714 if (new_role.as_u64 != context->mmu_role.as_u64) 4719 4715 shadow_mmu_init_context(vcpu, context, regs, new_role); 4720 4716 } 4721 4717 4722 4718 static union kvm_mmu_role 4723 - kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu) 4719 + kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu, 4720 + struct kvm_mmu_role_regs *regs) 4724 4721 { 4725 4722 union kvm_mmu_role role = 4726 - kvm_calc_shadow_root_page_role_common(vcpu, false); 4723 + kvm_calc_shadow_root_page_role_common(vcpu, regs, false); 4727 4724 4728 4725 role.base.direct = false; 4729 4726 role.base.level = kvm_mmu_get_tdp_level(vcpu); ··· 4741 4736 .cr4 = cr4, 4742 4737 .efer = efer, 4743 4738 }; 4744 - union kvm_mmu_role new_role = kvm_calc_shadow_npt_root_page_role(vcpu); 4739 + union kvm_mmu_role new_role; 4740 + 4741 + new_role = kvm_calc_shadow_npt_root_page_role(vcpu, &regs); 4745 4742 4746 4743 __kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base); 4747 4744 ··· 4828 4821 context->inject_page_fault = kvm_inject_page_fault; 4829 4822 } 4830 4823 4831 - static union kvm_mmu_role kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu) 4824 + static union kvm_mmu_role 4825 + kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu, struct kvm_mmu_role_regs *regs) 4832 4826 { 4833 - union kvm_mmu_role role = kvm_calc_shadow_root_page_role_common(vcpu, false); 4827 + union kvm_mmu_role role; 4828 + 4829 + role = kvm_calc_shadow_root_page_role_common(vcpu, regs, false); 4834 4830 4835 4831 /* 4836 4832 * Nested MMUs are used only for walking L2's gva->gpa, they never have ··· 4842 4832 */ 4843 4833 role.base.direct = true; 4844 4834 4845 - if (!is_paging(vcpu)) 4835 + if (!____is_cr0_pg(regs)) 4846 4836 role.base.level = 0; 4847 - else if (is_long_mode(vcpu)) 4848 - role.base.level = is_la57_mode(vcpu) ? PT64_ROOT_5LEVEL : 4849 - PT64_ROOT_4LEVEL; 4850 - else if (is_pae(vcpu)) 4837 + else if (____is_efer_lma(regs)) 4838 + role.base.level = ____is_cr4_la57(regs) ? PT64_ROOT_5LEVEL : 4839 + PT64_ROOT_4LEVEL; 4840 + else if (____is_cr4_pae(regs)) 4851 4841 role.base.level = PT32E_ROOT_LEVEL; 4852 4842 else 4853 4843 role.base.level = PT32_ROOT_LEVEL; ··· 4857 4847 4858 4848 static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu) 4859 4849 { 4860 - union kvm_mmu_role new_role = kvm_calc_nested_mmu_role(vcpu); 4850 + struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu); 4851 + union kvm_mmu_role new_role = kvm_calc_nested_mmu_role(vcpu, &regs); 4861 4852 struct kvm_mmu *g_context = &vcpu->arch.nested_mmu; 4862 4853 4863 4854 if (new_role.as_u64 == g_context->mmu_role.as_u64) ··· 4924 4913 static union kvm_mmu_page_role 4925 4914 kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu) 4926 4915 { 4916 + struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu); 4927 4917 union kvm_mmu_role role; 4928 4918 4929 4919 if (tdp_enabled) 4930 - role = kvm_calc_tdp_mmu_root_page_role(vcpu, true); 4920 + role = kvm_calc_tdp_mmu_root_page_role(vcpu, &regs, true); 4931 4921 else 4932 - role = kvm_calc_shadow_mmu_root_page_role(vcpu, true); 4922 + role = kvm_calc_shadow_mmu_root_page_role(vcpu, &regs, true); 4933 4923 4934 4924 return role.base; 4935 4925 }