Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: PPC: Book3s: PR: Add (dumb) MMU Notifier support

Now that we have very simple MMU Notifier support for e500 in place,
also add the same simple support to book3s. It gets us one step closer
to actual fast support.

Signed-off-by: Alexander Graf <agraf@suse.de>

+51 -7
+1 -2
arch/powerpc/include/asm/kvm_host.h
··· 46 46 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 47 47 #endif 48 48 49 - #if defined(CONFIG_KVM_BOOK3S_64_HV) || defined(CONFIG_KVM_E500V2) || \ 50 - defined(CONFIG_KVM_E500MC) 49 + #if !defined(CONFIG_KVM_440) 51 50 #include <linux/mmu_notifier.h> 52 51 53 52 #define KVM_ARCH_WANT_MMU_NOTIFIER
+1
arch/powerpc/kvm/Kconfig
··· 36 36 config KVM_BOOK3S_PR 37 37 bool 38 38 select KVM_MMIO 39 + select MMU_NOTIFIER 39 40 40 41 config KVM_BOOK3S_32 41 42 tristate "KVM support for PowerPC book3s_32 processors"
+1
arch/powerpc/kvm/book3s_32_mmu_host.c
··· 254 254 255 255 kvmppc_mmu_hpte_cache_map(vcpu, pte); 256 256 257 + kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT); 257 258 out: 258 259 return r; 259 260 }
+1
arch/powerpc/kvm/book3s_64_mmu_host.c
··· 168 168 169 169 kvmppc_mmu_hpte_cache_map(vcpu, pte); 170 170 } 171 + kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT); 171 172 172 173 out: 173 174 return r;
-5
arch/powerpc/kvm/book3s_mmu_hpte.c
··· 114 114 hlist_del_init_rcu(&pte->list_vpte); 115 115 hlist_del_init_rcu(&pte->list_vpte_long); 116 116 117 - if (pte->pte.may_write) 118 - kvm_release_pfn_dirty(pte->pfn); 119 - else 120 - kvm_release_pfn_clean(pte->pfn); 121 - 122 117 spin_unlock(&vcpu3s->mmu_lock); 123 118 124 119 vcpu3s->hpte_cache_count--;
+47
arch/powerpc/kvm/book3s_pr.c
··· 90 90 91 91 void kvmppc_core_check_requests(struct kvm_vcpu *vcpu) 92 92 { 93 + /* We misuse TLB_FLUSH to indicate that we want to clear 94 + all shadow cache entries */ 95 + if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) 96 + kvmppc_mmu_pte_flush(vcpu, 0, 0); 93 97 } 98 + 99 + /************* MMU Notifiers *************/ 100 + 101 + int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) 102 + { 103 + trace_kvm_unmap_hva(hva); 104 + 105 + /* 106 + * Flush all shadow tlb entries everywhere. This is slow, but 107 + * we are 100% sure that we catch the to be unmapped page 108 + */ 109 + kvm_flush_remote_tlbs(kvm); 110 + 111 + return 0; 112 + } 113 + 114 + int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) 115 + { 116 + /* kvm_unmap_hva flushes everything anyways */ 117 + kvm_unmap_hva(kvm, start); 118 + 119 + return 0; 120 + } 121 + 122 + int kvm_age_hva(struct kvm *kvm, unsigned long hva) 123 + { 124 + /* XXX could be more clever ;) */ 125 + return 0; 126 + } 127 + 128 + int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) 129 + { 130 + /* XXX could be more clever ;) */ 131 + return 0; 132 + } 133 + 134 + void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) 135 + { 136 + /* The page will get remapped properly on its next fault */ 137 + kvm_unmap_hva(kvm, hva); 138 + } 139 + 140 + /*****************************************/ 94 141 95 142 static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) 96 143 {