Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: PPC: Book3S: Make magic page properly 4k mappable

The magic page is defined as a 4k page of per-vCPU data that is shared
between the guest and the host to accelerate accesses to privileged
registers.

However, when the host is using 64k page size granularity we weren't quite
as strict about that rule anymore. Instead, we partially treated all of the
upper 64k as magic page and mapped only the uppermost 4k with the actual
magic contents.

This works well enough for Linux which doesn't use any memory in kernel
space in the upper 64k, but Mac OS X got upset. So this patch makes magic
page actually stay in a 4k range even on 64k page size hosts.

This patch fixes magic page usage with Mac OS X (using MOL) on 64k PAGE_SIZE
hosts for me.

Signed-off-by: Alexander Graf <agraf@suse.de>

+38 -20
+1 -1
arch/powerpc/include/asm/kvm_book3s.h
··· 158 158 bool upper, u32 val); 159 159 extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); 160 160 extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu); 161 - extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing, 161 + extern pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing, 162 162 bool *writable); 163 163 extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev, 164 164 unsigned long *rmap, long pte_index, int realmode);
+6 -6
arch/powerpc/kvm/book3s.c
··· 354 354 } 355 355 EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter); 356 356 357 - pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing, 357 + pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing, 358 358 bool *writable) 359 359 { 360 - ulong mp_pa = vcpu->arch.magic_page_pa; 360 + ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM; 361 + gfn_t gfn = gpa >> PAGE_SHIFT; 361 362 362 363 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) 363 364 mp_pa = (uint32_t)mp_pa; 364 365 365 366 /* Magic page override */ 366 - if (unlikely(mp_pa) && 367 - unlikely(((gfn << PAGE_SHIFT) & KVM_PAM) == 368 - ((mp_pa & PAGE_MASK) & KVM_PAM))) { 367 + gpa &= ~0xFFFULL; 368 + if (unlikely(mp_pa) && unlikely((gpa & KVM_PAM) == mp_pa)) { 369 369 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK; 370 370 pfn_t pfn; 371 371 ··· 378 378 379 379 return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable); 380 380 } 381 - EXPORT_SYMBOL_GPL(kvmppc_gfn_to_pfn); 381 + EXPORT_SYMBOL_GPL(kvmppc_gpa_to_pfn); 382 382 383 383 static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, 384 384 bool iswrite, struct kvmppc_pte *pte)
+3 -4
arch/powerpc/kvm/book3s_32_mmu_host.c
··· 156 156 bool writable; 157 157 158 158 /* Get host physical address for gpa */ 159 - hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT, 160 - iswrite, &writable); 159 + hpaddr = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable); 161 160 if (is_error_noslot_pfn(hpaddr)) { 162 - printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", 163 - orig_pte->eaddr); 161 + printk(KERN_INFO "Couldn't get guest page for gpa %lx!\n", 162 + orig_pte->raddr); 164 163 r = -EINVAL; 165 164 goto out; 166 165 }
+3 -2
arch/powerpc/kvm/book3s_64_mmu_host.c
··· 104 104 smp_rmb(); 105 105 106 106 /* Get host physical address for gpa */ 107 - pfn = kvmppc_gfn_to_pfn(vcpu, gfn, iswrite, &writable); 107 + pfn = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable); 108 108 if (is_error_noslot_pfn(pfn)) { 109 - printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", gfn); 109 + printk(KERN_INFO "Couldn't get guest page for gpa %lx!\n", 110 + orig_pte->raddr); 110 111 r = -EINVAL; 111 112 goto out; 112 113 }
+6 -7
arch/powerpc/kvm/book3s_pr.c
··· 511 511 put_page(hpage); 512 512 } 513 513 514 - static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 514 + static int kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) 515 515 { 516 516 ulong mp_pa = vcpu->arch.magic_page_pa; 517 517 518 518 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) 519 519 mp_pa = (uint32_t)mp_pa; 520 520 521 - if (unlikely(mp_pa) && 522 - unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) { 521 + gpa &= ~0xFFFULL; 522 + if (unlikely(mp_pa) && unlikely((mp_pa & KVM_PAM) == (gpa & KVM_PAM))) { 523 523 return 1; 524 524 } 525 525 526 - return kvm_is_visible_gfn(vcpu->kvm, gfn); 526 + return kvm_is_visible_gfn(vcpu->kvm, gpa >> PAGE_SHIFT); 527 527 } 528 528 529 529 int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, ··· 614 614 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); 615 615 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); 616 616 } else if (!is_mmio && 617 - kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) { 617 + kvmppc_visible_gpa(vcpu, pte.raddr)) { 618 618 if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) { 619 619 /* 620 620 * There is already a host HPTE there, presumably ··· 1387 1387 p = __get_free_page(GFP_KERNEL|__GFP_ZERO); 1388 1388 if (!p) 1389 1389 goto uninit_vcpu; 1390 - /* the real shared page fills the last 4k of our page */ 1391 - vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096); 1390 + vcpu->arch.shared = (void *)p; 1392 1391 #ifdef CONFIG_PPC_BOOK3S_64 1393 1392 /* Always start the shared struct in native endian mode */ 1394 1393 #ifdef __BIG_ENDIAN__
+19
arch/powerpc/kvm/powerpc.c
··· 190 190 vcpu->arch.magic_page_pa = param1 & ~0xfffULL; 191 191 vcpu->arch.magic_page_ea = param2 & ~0xfffULL; 192 192 193 + #ifdef CONFIG_PPC_64K_PAGES 194 + /* 195 + * Make sure our 4k magic page is in the same window of a 64k 196 + * page within the guest and within the host's page. 197 + */ 198 + if ((vcpu->arch.magic_page_pa & 0xf000) != 199 + ((ulong)vcpu->arch.shared & 0xf000)) { 200 + void *old_shared = vcpu->arch.shared; 201 + ulong shared = (ulong)vcpu->arch.shared; 202 + void *new_shared; 203 + 204 + shared &= PAGE_MASK; 205 + shared |= vcpu->arch.magic_page_pa & 0xf000; 206 + new_shared = (void*)shared; 207 + memcpy(new_shared, old_shared, 0x1000); 208 + vcpu->arch.shared = new_shared; 209 + } 210 + #endif 211 + 193 212 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7; 194 213 195 214 r = EV_SUCCESS;