Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: PPC: Use kvm_faultin_pfn() to handle page faults on Book3s PR

Convert Book3S PR to __kvm_faultin_pfn()+kvm_release_faultin_page(), which
are new APIs to consolidate arch code and provide consistent behavior
across all KVM architectures.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-ID: <20241010182427.1434605-65-seanjc@google.com>

authored by

Sean Christopherson and committed by
Paolo Bonzini
8b135c77 2b26d6b7

+14 -12
+1 -1
arch/powerpc/include/asm/kvm_book3s.h
··· 235 235 extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); 236 236 extern int kvmppc_emulate_paired_single(struct kvm_vcpu *vcpu); 237 237 extern kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, 238 - bool writing, bool *writable); 238 + bool writing, bool *writable, struct page **page); 239 239 extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev, 240 240 unsigned long *rmap, long pte_index, int realmode); 241 241 extern void kvmppc_update_dirty_map(const struct kvm_memory_slot *memslot,
+4 -3
arch/powerpc/kvm/book3s.c
··· 422 422 EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter); 423 423 424 424 kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing, 425 - bool *writable) 425 + bool *writable, struct page **page) 426 426 { 427 427 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM; 428 428 gfn_t gfn = gpa >> PAGE_SHIFT; ··· 437 437 kvm_pfn_t pfn; 438 438 439 439 pfn = (kvm_pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT; 440 - get_page(pfn_to_page(pfn)); 440 + *page = pfn_to_page(pfn); 441 + get_page(*page); 441 442 if (writable) 442 443 *writable = true; 443 444 return pfn; 444 445 } 445 446 446 - return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable); 447 + return kvm_faultin_pfn(vcpu, gfn, writing, writable, page); 447 448 } 448 449 EXPORT_SYMBOL_GPL(kvmppc_gpa_to_pfn); 449 450
+4 -3
arch/powerpc/kvm/book3s_32_mmu_host.c
··· 130 130 int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte, 131 131 bool iswrite) 132 132 { 133 + struct page *page; 133 134 kvm_pfn_t hpaddr; 134 135 u64 vpn; 135 136 u64 vsid; ··· 146 145 bool writable; 147 146 148 147 /* Get host physical address for gpa */ 149 - hpaddr = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable); 148 + hpaddr = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable, &page); 150 149 if (is_error_noslot_pfn(hpaddr)) { 151 150 printk(KERN_INFO "Couldn't get guest page for gpa %lx!\n", 152 151 orig_pte->raddr); ··· 233 232 234 233 pte = kvmppc_mmu_hpte_cache_next(vcpu); 235 234 if (!pte) { 236 - kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT); 235 + kvm_release_page_unused(page); 237 236 r = -EAGAIN; 238 237 goto out; 239 238 } ··· 251 250 252 251 kvmppc_mmu_hpte_cache_map(vcpu, pte); 253 252 254 - kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT); 253 + kvm_release_page_clean(page); 255 254 out: 256 255 return r; 257 256 }
+5 -5
arch/powerpc/kvm/book3s_64_mmu_host.c
··· 88 88 struct hpte_cache *cpte; 89 89 unsigned long gfn = orig_pte->raddr >> PAGE_SHIFT; 90 90 unsigned long pfn; 91 + struct page *page; 91 92 92 93 /* used to check for invalidations in progress */ 93 94 mmu_seq = kvm->mmu_invalidate_seq; 94 95 smp_rmb(); 95 96 96 97 /* Get host physical address for gpa */ 97 - pfn = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable); 98 + pfn = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable, &page); 98 99 if (is_error_noslot_pfn(pfn)) { 99 100 printk(KERN_INFO "Couldn't get guest page for gpa %lx!\n", 100 101 orig_pte->raddr); ··· 200 199 } 201 200 202 201 out_unlock: 203 - if (!orig_pte->may_write || !writable) 204 - kvm_release_pfn_clean(pfn); 205 - else 206 - kvm_release_pfn_dirty(pfn); 202 + /* FIXME: Don't unconditionally pass unused=false. */ 203 + kvm_release_faultin_page(kvm, page, false, 204 + orig_pte->may_write && writable); 207 205 spin_unlock(&kvm->mmu_lock); 208 206 if (cpte) 209 207 kvmppc_mmu_hpte_cache_free(cpte);