Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: MMU: skip obsolete sp in for_each_gfn_*()

The obsolete sp should not be used on current vCPUs and should not hurt
vCPU's running, so skip it from for_each_gfn_sp() and
for_each_gfn_indirect_valid_sp()

The side effort is we will double check role.invalid in kvm_mmu_get_page()
but i think it is okay as role is well cached

Signed-off-by: Xiao Guangrong <guangrong.xiao@linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>

authored by

Xiao Guangrong and committed by
Radim Krčmář
46971a2f c54cdf14

+12 -16
+12 -16
arch/x86/kvm/mmu.c
··· 1909 1909 * since it has been deleted from active_mmu_pages but still can be found 1910 1910 * at hast list. 1911 1911 * 1912 - * for_each_gfn_indirect_valid_sp has skipped that kind of page and 1913 - * kvm_mmu_get_page(), the only user of for_each_gfn_sp(), has skipped 1914 - * all the obsolete pages. 1912 + * for_each_gfn_valid_sp() has skipped that kind of pages. 1915 1913 */ 1916 - #define for_each_gfn_sp(_kvm, _sp, _gfn) \ 1914 + #define for_each_gfn_valid_sp(_kvm, _sp, _gfn) \ 1917 1915 hlist_for_each_entry(_sp, \ 1918 1916 &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \ 1919 - if ((_sp)->gfn != (_gfn)) {} else 1917 + if ((_sp)->gfn != (_gfn) || is_obsolete_sp((_kvm), (_sp)) \ 1918 + || (_sp)->role.invalid) {} else 1920 1919 1921 1920 #define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn) \ 1922 - for_each_gfn_sp(_kvm, _sp, _gfn) \ 1923 - if ((_sp)->role.direct || (_sp)->role.invalid) {} else 1921 + for_each_gfn_valid_sp(_kvm, _sp, _gfn) \ 1922 + if ((_sp)->role.direct) {} else 1924 1923 1925 1924 /* @sp->gfn should be write-protected at the call site */ 1926 1925 static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, ··· 1959 1960 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { } 1960 1961 static void mmu_audit_disable(void) { } 1961 1962 #endif 1963 + 1964 + static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp) 1965 + { 1966 + return unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen); 1967 + } 1962 1968 1963 1969 static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, 1964 1970 struct list_head *invalid_list) ··· 2109 2105 __clear_sp_write_flooding_count(sp); 2110 2106 } 2111 2107 2112 - static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp) 2113 - { 2114 - return unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen); 2115 - } 2116 - 2117 2108 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, 2118 2109 gfn_t gfn, 2119 2110 gva_t gaddr, ··· 2135 2136 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; 2136 2137 role.quadrant = quadrant; 2137 2138 } 2138 - for_each_gfn_sp(vcpu->kvm, sp, gfn) { 2139 - if (is_obsolete_sp(vcpu->kvm, sp)) 2140 - continue; 2141 - 2139 + for_each_gfn_valid_sp(vcpu->kvm, sp, gfn) { 2142 2140 if (!need_sync && sp->unsync) 2143 2141 need_sync = true; 2144 2142