Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: x86/mmu: Rename page_header() to to_shadow_page()

Rename KVM's accessor for retrieving a 'struct kvm_mmu_page' from the
associated host physical address to better convey what the function is
doing.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200622202034.15093-7-sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>

authored by

Sean Christopherson and committed by
Paolo Bonzini
e47c4aee 57354682

+15 -15
+10 -10
arch/x86/kvm/mmu/mmu.c
··· 2193 2193 continue; 2194 2194 } 2195 2195 2196 - child = page_header(ent & PT64_BASE_ADDR_MASK); 2196 + child = to_shadow_page(ent & PT64_BASE_ADDR_MASK); 2197 2197 2198 2198 if (child->unsync_children) { 2199 2199 if (mmu_pages_add(pvec, child, i)) ··· 2647 2647 * so we should update the spte at this point to get 2648 2648 * a new sp with the correct access. 2649 2649 */ 2650 - child = page_header(*sptep & PT64_BASE_ADDR_MASK); 2650 + child = to_shadow_page(*sptep & PT64_BASE_ADDR_MASK); 2651 2651 if (child->role.access == direct_access) 2652 2652 return; 2653 2653 ··· 2669 2669 if (is_large_pte(pte)) 2670 2670 --kvm->stat.lpages; 2671 2671 } else { 2672 - child = page_header(pte & PT64_BASE_ADDR_MASK); 2672 + child = to_shadow_page(pte & PT64_BASE_ADDR_MASK); 2673 2673 drop_parent_pte(child, spte); 2674 2674 } 2675 2675 return true; ··· 3127 3127 struct kvm_mmu_page *child; 3128 3128 u64 pte = *sptep; 3129 3129 3130 - child = page_header(pte & PT64_BASE_ADDR_MASK); 3130 + child = to_shadow_page(pte & PT64_BASE_ADDR_MASK); 3131 3131 drop_parent_pte(child, sptep); 3132 3132 flush = true; 3133 3133 } else if (pfn != spte_to_pfn(*sptep)) { ··· 3632 3632 if (!VALID_PAGE(*root_hpa)) 3633 3633 return; 3634 3634 3635 - sp = page_header(*root_hpa & PT64_BASE_ADDR_MASK); 3635 + sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK); 3636 3636 --sp->root_count; 3637 3637 if (!sp->root_count && sp->role.invalid) 3638 3638 kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); ··· 3862 3862 3863 3863 if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) { 3864 3864 hpa_t root = vcpu->arch.mmu->root_hpa; 3865 - sp = page_header(root); 3865 + sp = to_shadow_page(root); 3866 3866 3867 3867 /* 3868 3868 * Even if another CPU was marking the SP as unsync-ed ··· 3896 3896 3897 3897 if (root && VALID_PAGE(root)) { 3898 3898 root &= PT64_BASE_ADDR_MASK; 3899 - sp = page_header(root); 3899 + sp = to_shadow_page(root); 3900 3900 mmu_sync_children(vcpu, sp); 3901 3901 } 3902 3902 } ··· 4248 4248 union kvm_mmu_page_role role) 4249 4249 { 4250 4250 return (role.direct || pgd == root->pgd) && 4251 - VALID_PAGE(root->hpa) && page_header(root->hpa) && 4252 - role.word == page_header(root->hpa)->role.word; 4251 + VALID_PAGE(root->hpa) && to_shadow_page(root->hpa) && 4252 + role.word == to_shadow_page(root->hpa)->role.word; 4253 4253 } 4254 4254 4255 4255 /* ··· 4334 4334 */ 4335 4335 vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY); 4336 4336 4337 - __clear_sp_write_flooding_count(page_header(vcpu->arch.mmu->root_hpa)); 4337 + __clear_sp_write_flooding_count(to_shadow_page(vcpu->arch.mmu->root_hpa)); 4338 4338 } 4339 4339 4340 4340 void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd, bool skip_tlb_flush,
+3 -3
arch/x86/kvm/mmu/mmu_audit.c
··· 45 45 !is_last_spte(ent[i], level)) { 46 46 struct kvm_mmu_page *child; 47 47 48 - child = page_header(ent[i] & PT64_BASE_ADDR_MASK); 48 + child = to_shadow_page(ent[i] & PT64_BASE_ADDR_MASK); 49 49 __mmu_spte_walk(vcpu, child, fn, level - 1); 50 50 } 51 51 } ··· 62 62 if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) { 63 63 hpa_t root = vcpu->arch.mmu->root_hpa; 64 64 65 - sp = page_header(root); 65 + sp = to_shadow_page(root); 66 66 __mmu_spte_walk(vcpu, sp, fn, vcpu->arch.mmu->root_level); 67 67 return; 68 68 } ··· 72 72 73 73 if (root && VALID_PAGE(root)) { 74 74 root &= PT64_BASE_ADDR_MASK; 75 - sp = page_header(root); 75 + sp = to_shadow_page(root); 76 76 __mmu_spte_walk(vcpu, sp, fn, 2); 77 77 } 78 78 }
+2 -2
arch/x86/kvm/mmu/mmu_internal.h
··· 43 43 atomic_t write_flooding_count; 44 44 }; 45 45 46 - static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) 46 + static inline struct kvm_mmu_page *to_shadow_page(hpa_t shadow_page) 47 47 { 48 48 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); 49 49 ··· 52 52 53 53 static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep) 54 54 { 55 - return page_header(__pa(sptep)); 55 + return to_shadow_page(__pa(sptep)); 56 56 } 57 57 58 58 void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);