Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: x86/mmu: Add sptep_to_sp() helper to wrap shadow page lookup

Introduce sptep_to_sp() to reduce the boilerplate code needed to get the
shadow page associated with a spte pointer, and to improve readability
as it's not immediately obvious that "page_header" is a KVM-specific
accessor for retrieving a shadow page.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200622202034.15093-6-sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>

authored by

Sean Christopherson and committed by
Paolo Bonzini
57354682 985ab278

+23 -20
+13 -15
arch/x86/kvm/mmu/mmu.c
··· 677 677 678 678 static void count_spte_clear(u64 *sptep, u64 spte) 679 679 { 680 - struct kvm_mmu_page *sp = page_header(__pa(sptep)); 680 + struct kvm_mmu_page *sp = sptep_to_sp(sptep); 681 681 682 682 if (is_shadow_present_pte(spte)) 683 683 return; ··· 761 761 */ 762 762 static u64 __get_spte_lockless(u64 *sptep) 763 763 { 764 - struct kvm_mmu_page *sp = page_header(__pa(sptep)); 764 + struct kvm_mmu_page *sp = sptep_to_sp(sptep); 765 765 union split_spte spte, *orig = (union split_spte *)sptep; 766 766 int count; 767 767 ··· 1427 1427 struct kvm_mmu_page *sp; 1428 1428 struct kvm_rmap_head *rmap_head; 1429 1429 1430 - sp = page_header(__pa(spte)); 1430 + sp = sptep_to_sp(spte); 1431 1431 kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); 1432 1432 rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp); 1433 1433 return pte_list_add(vcpu, spte, rmap_head); ··· 1439 1439 gfn_t gfn; 1440 1440 struct kvm_rmap_head *rmap_head; 1441 1441 1442 - sp = page_header(__pa(spte)); 1442 + sp = sptep_to_sp(spte); 1443 1443 gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt); 1444 1444 rmap_head = gfn_to_rmap(kvm, gfn, sp); 1445 1445 __pte_list_remove(spte, rmap_head); ··· 1531 1531 static bool __drop_large_spte(struct kvm *kvm, u64 *sptep) 1532 1532 { 1533 1533 if (is_large_pte(*sptep)) { 1534 - WARN_ON(page_header(__pa(sptep))->role.level == PG_LEVEL_4K); 1534 + WARN_ON(sptep_to_sp(sptep)->role.level == PG_LEVEL_4K); 1535 1535 drop_spte(kvm, sptep); 1536 1536 --kvm->stat.lpages; 1537 1537 return true; ··· 1543 1543 static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep) 1544 1544 { 1545 1545 if (__drop_large_spte(vcpu->kvm, sptep)) { 1546 - struct kvm_mmu_page *sp = page_header(__pa(sptep)); 1546 + struct kvm_mmu_page *sp = sptep_to_sp(sptep); 1547 1547 1548 1548 kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn, 1549 1549 KVM_PAGES_PER_HPAGE(sp->role.level)); ··· 2002 2002 struct kvm_rmap_head *rmap_head; 2003 2003 struct kvm_mmu_page *sp; 2004 2004 2005 - sp = page_header(__pa(spte)); 2005 + sp = sptep_to_sp(spte); 2006 2006 2007 2007 rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp); 2008 2008 ··· 2124 2124 struct kvm_mmu_page *sp; 2125 2125 unsigned int index; 2126 2126 2127 - sp = page_header(__pa(spte)); 2127 + sp = sptep_to_sp(spte); 2128 2128 index = spte - sp->spt; 2129 2129 if (__test_and_set_bit(index, sp->unsync_child_bitmap)) 2130 2130 return; ··· 2449 2449 2450 2450 static void clear_sp_write_flooding_count(u64 *spte) 2451 2451 { 2452 - struct kvm_mmu_page *sp = page_header(__pa(spte)); 2453 - 2454 - __clear_sp_write_flooding_count(sp); 2452 + __clear_sp_write_flooding_count(sptep_to_sp(spte)); 2455 2453 } 2456 2454 2457 2455 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, ··· 3024 3026 if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access)) 3025 3027 return 0; 3026 3028 3027 - sp = page_header(__pa(sptep)); 3029 + sp = sptep_to_sp(sptep); 3028 3030 if (sp_ad_disabled(sp)) 3029 3031 spte |= SPTE_AD_DISABLED_MASK; 3030 3032 else if (kvm_vcpu_ad_need_write_protect(vcpu)) ··· 3237 3239 { 3238 3240 struct kvm_mmu_page *sp; 3239 3241 3240 - sp = page_header(__pa(sptep)); 3242 + sp = sptep_to_sp(sptep); 3241 3243 3242 3244 /* 3243 3245 * Without accessed bits, there's no way to distinguish between ··· 3545 3547 if (!is_shadow_present_pte(spte)) 3546 3548 break; 3547 3549 3548 - sp = page_header(__pa(iterator.sptep)); 3550 + sp = sptep_to_sp(iterator.sptep); 3549 3551 if (!is_last_spte(spte, sp->role.level)) 3550 3552 break; 3551 3553 ··· 5924 5926 5925 5927 restart: 5926 5928 for_each_rmap_spte(rmap_head, &iter, sptep) { 5927 - sp = page_header(__pa(sptep)); 5929 + sp = sptep_to_sp(sptep); 5928 5930 pfn = spte_to_pfn(*sptep); 5929 5931 5930 5932 /*
+3 -3
arch/x86/kvm/mmu/mmu_audit.c
··· 97 97 kvm_pfn_t pfn; 98 98 hpa_t hpa; 99 99 100 - sp = page_header(__pa(sptep)); 100 + sp = sptep_to_sp(sptep); 101 101 102 102 if (sp->unsync) { 103 103 if (level != PG_LEVEL_4K) { ··· 132 132 struct kvm_memory_slot *slot; 133 133 gfn_t gfn; 134 134 135 - rev_sp = page_header(__pa(sptep)); 135 + rev_sp = sptep_to_sp(sptep); 136 136 gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt); 137 137 138 138 slots = kvm_memslots_for_spte_role(kvm, rev_sp->role); ··· 165 165 166 166 static void audit_spte_after_sync(struct kvm_vcpu *vcpu, u64 *sptep, int level) 167 167 { 168 - struct kvm_mmu_page *sp = page_header(__pa(sptep)); 168 + struct kvm_mmu_page *sp = sptep_to_sp(sptep); 169 169 170 170 if (vcpu->kvm->arch.audit_point == AUDIT_POST_SYNC && sp->unsync) 171 171 audit_printk(vcpu->kvm, "meet unsync sp(%p) after sync "
+5
arch/x86/kvm/mmu/mmu_internal.h
··· 50 50 return (struct kvm_mmu_page *)page_private(page); 51 51 } 52 52 53 + static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep) 54 + { 55 + return page_header(__pa(sptep)); 56 + } 57 + 53 58 void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); 54 59 void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); 55 60 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
+2 -2
arch/x86/kvm/mmu/paging_tmpl.h
··· 596 596 u64 *spte; 597 597 int i; 598 598 599 - sp = page_header(__pa(sptep)); 599 + sp = sptep_to_sp(sptep); 600 600 601 601 if (sp->role.level > PG_LEVEL_4K) 602 602 return; ··· 916 916 level = iterator.level; 917 917 sptep = iterator.sptep; 918 918 919 - sp = page_header(__pa(sptep)); 919 + sp = sptep_to_sp(sptep); 920 920 if (is_last_spte(*sptep, level)) { 921 921 pt_element_t gpte; 922 922 gpa_t pte_gpa;