Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: PPC: Magic Page Book3s support

We need to override EA as well as PA lookups for the magic page. When the guest
tells us to project it, the magic page overrides any guest mappings.

In order to reflect that, we need to hook into all the MMU layers of KVM to
force map the magic page if necessary.

Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>

authored by

Alexander Graf and committed by
Avi Kivity
e8508940 beb03f14

+81 -12
+1
arch/powerpc/include/asm/kvm_book3s.h
··· 130 130 bool upper, u32 val); 131 131 extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); 132 132 extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu); 133 + extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); 133 134 134 135 extern u32 kvmppc_trampoline_lowmem; 135 136 extern u32 kvmppc_trampoline_enter;
+32 -3
arch/powerpc/kvm/book3s.c
··· 419 419 } 420 420 } 421 421 422 + pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) 423 + { 424 + ulong mp_pa = vcpu->arch.magic_page_pa; 425 + 426 + /* Magic page override */ 427 + if (unlikely(mp_pa) && 428 + unlikely(((gfn << PAGE_SHIFT) & KVM_PAM) == 429 + ((mp_pa & PAGE_MASK) & KVM_PAM))) { 430 + ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK; 431 + pfn_t pfn; 432 + 433 + pfn = (pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT; 434 + get_page(pfn_to_page(pfn)); 435 + return pfn; 436 + } 437 + 438 + return gfn_to_pfn(vcpu->kvm, gfn); 439 + } 440 + 422 441 /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To 423 442 * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to 424 443 * emulate 32 bytes dcbz length. ··· 573 554 574 555 static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 575 556 { 557 + ulong mp_pa = vcpu->arch.magic_page_pa; 558 + 559 + if (unlikely(mp_pa) && 560 + unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) { 561 + return 1; 562 + } 563 + 576 564 return kvm_is_visible_gfn(vcpu->kvm, gfn); 577 565 } 578 566 ··· 1283 1257 struct kvmppc_vcpu_book3s *vcpu_book3s; 1284 1258 struct kvm_vcpu *vcpu; 1285 1259 int err = -ENOMEM; 1260 + unsigned long p; 1286 1261 1287 1262 vcpu_book3s = vmalloc(sizeof(struct kvmppc_vcpu_book3s)); 1288 1263 if (!vcpu_book3s) ··· 1301 1274 if (err) 1302 1275 goto free_shadow_vcpu; 1303 1276 1304 - vcpu->arch.shared = (void*)__get_free_page(GFP_KERNEL|__GFP_ZERO); 1305 - if (!vcpu->arch.shared) 1277 + p = __get_free_page(GFP_KERNEL|__GFP_ZERO); 1278 + /* the real shared page fills the last 4k of our page */ 1279 + vcpu->arch.shared = (void*)(p + PAGE_SIZE - 4096); 1280 + if (!p) 1306 1281 goto uninit_vcpu; 1307 1282 1308 1283 vcpu->arch.host_retip = kvm_return_point; ··· 1351 1322 { 1352 1323 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); 1353 1324 1354 - free_page((unsigned long)vcpu->arch.shared); 1325 + free_page((unsigned long)vcpu->arch.shared & PAGE_MASK); 1355 1326 kvm_vcpu_uninit(vcpu); 1356 1327 kfree(vcpu_book3s->shadow_vcpu); 1357 1328 vfree(vcpu_book3s);
+16
arch/powerpc/kvm/book3s_32_mmu.c
··· 281 281 struct kvmppc_pte *pte, bool data) 282 282 { 283 283 int r; 284 + ulong mp_ea = vcpu->arch.magic_page_ea; 284 285 285 286 pte->eaddr = eaddr; 287 + 288 + /* Magic page override */ 289 + if (unlikely(mp_ea) && 290 + unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) && 291 + !(vcpu->arch.shared->msr & MSR_PR)) { 292 + pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data); 293 + pte->raddr = vcpu->arch.magic_page_pa | (pte->raddr & 0xfff); 294 + pte->raddr &= KVM_PAM; 295 + pte->may_execute = true; 296 + pte->may_read = true; 297 + pte->may_write = true; 298 + 299 + return 0; 300 + } 301 + 286 302 r = kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, pte, data); 287 303 if (r < 0) 288 304 r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, true);
+1 -1
arch/powerpc/kvm/book3s_32_mmu_host.c
··· 147 147 struct hpte_cache *pte; 148 148 149 149 /* Get host physical address for gpa */ 150 - hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); 150 + hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT); 151 151 if (kvm_is_error_hva(hpaddr)) { 152 152 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", 153 153 orig_pte->eaddr);
+29 -1
arch/powerpc/kvm/book3s_64_mmu.c
··· 163 163 bool found = false; 164 164 bool perm_err = false; 165 165 int second = 0; 166 + ulong mp_ea = vcpu->arch.magic_page_ea; 167 + 168 + /* Magic page override */ 169 + if (unlikely(mp_ea) && 170 + unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) && 171 + !(vcpu->arch.shared->msr & MSR_PR)) { 172 + gpte->eaddr = eaddr; 173 + gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data); 174 + gpte->raddr = vcpu->arch.magic_page_pa | (gpte->raddr & 0xfff); 175 + gpte->raddr &= KVM_PAM; 176 + gpte->may_execute = true; 177 + gpte->may_read = true; 178 + gpte->may_write = true; 179 + 180 + return 0; 181 + } 166 182 167 183 slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu_book3s, eaddr); 168 184 if (!slbe) ··· 461 445 ulong ea = esid << SID_SHIFT; 462 446 struct kvmppc_slb *slb; 463 447 u64 gvsid = esid; 448 + ulong mp_ea = vcpu->arch.magic_page_ea; 464 449 465 450 if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { 466 451 slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea); ··· 481 464 break; 482 465 case MSR_DR|MSR_IR: 483 466 if (!slb) 484 - return -ENOENT; 467 + goto no_slb; 485 468 486 469 *vsid = gvsid; 487 470 break; ··· 494 477 *vsid |= VSID_PR; 495 478 496 479 return 0; 480 + 481 + no_slb: 482 + /* Catch magic page case */ 483 + if (unlikely(mp_ea) && 484 + unlikely(esid == (mp_ea >> SID_SHIFT)) && 485 + !(vcpu->arch.shared->msr & MSR_PR)) { 486 + *vsid = VSID_REAL | esid; 487 + return 0; 488 + } 489 + 490 + return -EINVAL; 497 491 } 498 492 499 493 static bool kvmppc_mmu_book3s_64_is_dcbz32(struct kvm_vcpu *vcpu)
+2 -7
arch/powerpc/kvm/book3s_64_mmu_host.c
··· 101 101 struct kvmppc_sid_map *map; 102 102 103 103 /* Get host physical address for gpa */ 104 - hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); 104 + hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT); 105 105 if (kvm_is_error_hva(hpaddr)) { 106 106 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr); 107 107 return -EINVAL; 108 108 } 109 109 hpaddr <<= PAGE_SHIFT; 110 - #if PAGE_SHIFT == 12 111 - #elif PAGE_SHIFT == 16 112 - hpaddr |= orig_pte->raddr & 0xf000; 113 - #else 114 - #error Unknown page size 115 - #endif 110 + hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK); 116 111 117 112 /* and write the mapping ea -> hpa into the pt */ 118 113 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);