Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: PPC: Make shared struct aka magic page guest endian

The shared (magic) page is a data structure that contains often used
supervisor privileged SPRs accessible via memory to the user to reduce
the number of exits we have to take to read/write them.

When we actually share this structure with the guest we have to maintain
it in guest endianness, because some of the patch tricks only work with
native endian load/store operations.

Since we only share the structure with either host or guest in little
endian on book3s_64 pr mode, we don't have to worry about booke or book3s hv.

For booke, the shared struct stays big endian. For book3s_64 hv we maintain
the struct in host native endian, since it never gets shared with the guest.

For book3s_64 pr we introduce a variable that tells us which endianness the
shared struct is in and route every access to it through helper inline
functions that evaluate this variable.

Signed-off-by: Alexander Graf <agraf@suse.de>

+309 -143
+2 -1
arch/powerpc/include/asm/kvm_book3s.h
··· 268 268 return vcpu->arch.pc; 269 269 } 270 270 271 + static inline u64 kvmppc_get_msr(struct kvm_vcpu *vcpu); 271 272 static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu) 272 273 { 273 - return (vcpu->arch.shared->msr & MSR_LE) != (MSR_KERNEL & MSR_LE); 274 + return (kvmppc_get_msr(vcpu) & MSR_LE) != (MSR_KERNEL & MSR_LE); 274 275 } 275 276 276 277 static inline u32 kvmppc_get_last_inst_internal(struct kvm_vcpu *vcpu, ulong pc)
-5
arch/powerpc/include/asm/kvm_booke.h
··· 108 108 { 109 109 return vcpu->arch.fault_dear; 110 110 } 111 - 112 - static inline ulong kvmppc_get_msr(struct kvm_vcpu *vcpu) 113 - { 114 - return vcpu->arch.shared->msr; 115 - } 116 111 #endif /* __ASM_KVM_BOOKE_H__ */
+3
arch/powerpc/include/asm/kvm_host.h
··· 623 623 wait_queue_head_t cpu_run; 624 624 625 625 struct kvm_vcpu_arch_shared *shared; 626 + #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) 627 + bool shared_big_endian; 628 + #endif 626 629 unsigned long magic_page_pa; /* phys addr to map the magic page to */ 627 630 unsigned long magic_page_ea; /* effect. addr to map the magic page to */ 628 631
+79 -1
arch/powerpc/include/asm/kvm_ppc.h
··· 449 449 } 450 450 451 451 /* 452 + * Shared struct helpers. The shared struct can be little or big endian, 453 + * depending on the guest endianness. So expose helpers to all of them. 454 + */ 455 + static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu) 456 + { 457 + #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) 458 + /* Only Book3S_64 PR supports bi-endian for now */ 459 + return vcpu->arch.shared_big_endian; 460 + #elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__) 461 + /* Book3s_64 HV on little endian is always little endian */ 462 + return false; 463 + #else 464 + return true; 465 + #endif 466 + } 467 + 468 + #define SHARED_WRAPPER_GET(reg, size) \ 469 + static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \ 470 + { \ 471 + if (kvmppc_shared_big_endian(vcpu)) \ 472 + return be##size##_to_cpu(vcpu->arch.shared->reg); \ 473 + else \ 474 + return le##size##_to_cpu(vcpu->arch.shared->reg); \ 475 + } \ 476 + 477 + #define SHARED_WRAPPER_SET(reg, size) \ 478 + static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \ 479 + { \ 480 + if (kvmppc_shared_big_endian(vcpu)) \ 481 + vcpu->arch.shared->reg = cpu_to_be##size(val); \ 482 + else \ 483 + vcpu->arch.shared->reg = cpu_to_le##size(val); \ 484 + } \ 485 + 486 + #define SHARED_WRAPPER(reg, size) \ 487 + SHARED_WRAPPER_GET(reg, size) \ 488 + SHARED_WRAPPER_SET(reg, size) \ 489 + 490 + SHARED_WRAPPER(critical, 64) 491 + SHARED_WRAPPER(sprg0, 64) 492 + SHARED_WRAPPER(sprg1, 64) 493 + SHARED_WRAPPER(sprg2, 64) 494 + SHARED_WRAPPER(sprg3, 64) 495 + SHARED_WRAPPER(srr0, 64) 496 + SHARED_WRAPPER(srr1, 64) 497 + SHARED_WRAPPER(dar, 64) 498 + SHARED_WRAPPER_GET(msr, 64) 499 + static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val) 500 + { 501 + if (kvmppc_shared_big_endian(vcpu)) 502 + vcpu->arch.shared->msr = cpu_to_be64(val); 503 + else 504 + vcpu->arch.shared->msr = cpu_to_le64(val); 505 + } 506 + SHARED_WRAPPER(dsisr, 32) 507 + SHARED_WRAPPER(int_pending, 32) 508 + SHARED_WRAPPER(sprg4, 64) 509 + SHARED_WRAPPER(sprg5, 64) 510 + SHARED_WRAPPER(sprg6, 64) 511 + SHARED_WRAPPER(sprg7, 64) 512 + 513 + static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr) 514 + { 515 + if (kvmppc_shared_big_endian(vcpu)) 516 + return be32_to_cpu(vcpu->arch.shared->sr[nr]); 517 + else 518 + return le32_to_cpu(vcpu->arch.shared->sr[nr]); 519 + } 520 + 521 + static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val) 522 + { 523 + if (kvmppc_shared_big_endian(vcpu)) 524 + vcpu->arch.shared->sr[nr] = cpu_to_be32(val); 525 + else 526 + vcpu->arch.shared->sr[nr] = cpu_to_le32(val); 527 + } 528 + 529 + /* 452 530 * Please call after prepare_to_enter. This function puts the lazy ee and irq 453 531 * disabled tracking state back to normal mode, without actually enabling 454 532 * interrupts. ··· 563 485 msr_64bit = MSR_SF; 564 486 #endif 565 487 566 - if (!(vcpu->arch.shared->msr & msr_64bit)) 488 + if (!(kvmppc_get_msr(vcpu) & msr_64bit)) 567 489 ea = (uint32_t)ea; 568 490 569 491 return ea;
+4
arch/powerpc/kernel/asm-offsets.c
··· 54 54 #endif 55 55 #if defined(CONFIG_KVM) && defined(CONFIG_PPC_BOOK3S) 56 56 #include <asm/kvm_book3s.h> 57 + #include <asm/kvm_ppc.h> 57 58 #endif 58 59 59 60 #ifdef CONFIG_PPC32 ··· 468 467 DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared)); 469 468 DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr)); 470 469 DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr)); 470 + #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) 471 + DEFINE(VCPU_SHAREDBE, offsetof(struct kvm_vcpu, arch.shared_big_endian)); 472 + #endif 471 473 472 474 DEFINE(VCPU_SHARED_MAS0, offsetof(struct kvm_vcpu_arch_shared, mas0)); 473 475 DEFINE(VCPU_SHARED_MAS1, offsetof(struct kvm_vcpu_arch_shared, mas1));
+36 -36
arch/powerpc/kvm/book3s.c
··· 85 85 if (is_kvmppc_hv_enabled(vcpu->kvm)) 86 86 return; 87 87 if (pending_now) 88 - vcpu->arch.shared->int_pending = 1; 88 + kvmppc_set_int_pending(vcpu, 1); 89 89 else if (old_pending) 90 - vcpu->arch.shared->int_pending = 0; 90 + kvmppc_set_int_pending(vcpu, 0); 91 91 } 92 92 93 93 static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) ··· 99 99 if (is_kvmppc_hv_enabled(vcpu->kvm)) 100 100 return false; 101 101 102 - crit_raw = vcpu->arch.shared->critical; 102 + crit_raw = kvmppc_get_critical(vcpu); 103 103 crit_r1 = kvmppc_get_gpr(vcpu, 1); 104 104 105 105 /* Truncate crit indicators in 32 bit mode */ 106 - if (!(vcpu->arch.shared->msr & MSR_SF)) { 106 + if (!(kvmppc_get_msr(vcpu) & MSR_SF)) { 107 107 crit_raw &= 0xffffffff; 108 108 crit_r1 &= 0xffffffff; 109 109 } ··· 111 111 /* Critical section when crit == r1 */ 112 112 crit = (crit_raw == crit_r1); 113 113 /* ... and we're in supervisor mode */ 114 - crit = crit && !(vcpu->arch.shared->msr & MSR_PR); 114 + crit = crit && !(kvmppc_get_msr(vcpu) & MSR_PR); 115 115 116 116 return crit; 117 117 } 118 118 119 119 void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) 120 120 { 121 - vcpu->arch.shared->srr0 = kvmppc_get_pc(vcpu); 122 - vcpu->arch.shared->srr1 = vcpu->arch.shared->msr | flags; 121 + kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu)); 122 + kvmppc_set_srr1(vcpu, kvmppc_get_msr(vcpu) | flags); 123 123 kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec); 124 124 vcpu->arch.mmu.reset_msr(vcpu); 125 125 } ··· 225 225 226 226 switch (priority) { 227 227 case BOOK3S_IRQPRIO_DECREMENTER: 228 - deliver = (vcpu->arch.shared->msr & MSR_EE) && !crit; 228 + deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit; 229 229 vec = BOOK3S_INTERRUPT_DECREMENTER; 230 230 break; 231 231 case BOOK3S_IRQPRIO_EXTERNAL: 232 232 case BOOK3S_IRQPRIO_EXTERNAL_LEVEL: 233 - deliver = (vcpu->arch.shared->msr & MSR_EE) && !crit; 233 + deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit; 234 234 vec = BOOK3S_INTERRUPT_EXTERNAL; 235 235 break; 236 236 case BOOK3S_IRQPRIO_SYSTEM_RESET: ··· 343 343 { 344 344 ulong mp_pa = vcpu->arch.magic_page_pa; 345 345 346 - if (!(vcpu->arch.shared->msr & MSR_SF)) 346 + if (!(kvmppc_get_msr(vcpu) & MSR_SF)) 347 347 mp_pa = (uint32_t)mp_pa; 348 348 349 349 /* Magic page override */ ··· 367 367 static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, 368 368 bool iswrite, struct kvmppc_pte *pte) 369 369 { 370 - int relocated = (vcpu->arch.shared->msr & (data ? MSR_DR : MSR_IR)); 370 + int relocated = (kvmppc_get_msr(vcpu) & (data ? MSR_DR : MSR_IR)); 371 371 int r; 372 372 373 373 if (relocated) { ··· 498 498 regs->ctr = kvmppc_get_ctr(vcpu); 499 499 regs->lr = kvmppc_get_lr(vcpu); 500 500 regs->xer = kvmppc_get_xer(vcpu); 501 - regs->msr = vcpu->arch.shared->msr; 502 - regs->srr0 = vcpu->arch.shared->srr0; 503 - regs->srr1 = vcpu->arch.shared->srr1; 501 + regs->msr = kvmppc_get_msr(vcpu); 502 + regs->srr0 = kvmppc_get_srr0(vcpu); 503 + regs->srr1 = kvmppc_get_srr1(vcpu); 504 504 regs->pid = vcpu->arch.pid; 505 - regs->sprg0 = vcpu->arch.shared->sprg0; 506 - regs->sprg1 = vcpu->arch.shared->sprg1; 507 - regs->sprg2 = vcpu->arch.shared->sprg2; 508 - regs->sprg3 = vcpu->arch.shared->sprg3; 509 - regs->sprg4 = vcpu->arch.shared->sprg4; 510 - regs->sprg5 = vcpu->arch.shared->sprg5; 511 - regs->sprg6 = vcpu->arch.shared->sprg6; 512 - regs->sprg7 = vcpu->arch.shared->sprg7; 505 + regs->sprg0 = kvmppc_get_sprg0(vcpu); 506 + regs->sprg1 = kvmppc_get_sprg1(vcpu); 507 + regs->sprg2 = kvmppc_get_sprg2(vcpu); 508 + regs->sprg3 = kvmppc_get_sprg3(vcpu); 509 + regs->sprg4 = kvmppc_get_sprg4(vcpu); 510 + regs->sprg5 = kvmppc_get_sprg5(vcpu); 511 + regs->sprg6 = kvmppc_get_sprg6(vcpu); 512 + regs->sprg7 = kvmppc_get_sprg7(vcpu); 513 513 514 514 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) 515 515 regs->gpr[i] = kvmppc_get_gpr(vcpu, i); ··· 527 527 kvmppc_set_lr(vcpu, regs->lr); 528 528 kvmppc_set_xer(vcpu, regs->xer); 529 529 kvmppc_set_msr(vcpu, regs->msr); 530 - vcpu->arch.shared->srr0 = regs->srr0; 531 - vcpu->arch.shared->srr1 = regs->srr1; 532 - vcpu->arch.shared->sprg0 = regs->sprg0; 533 - vcpu->arch.shared->sprg1 = regs->sprg1; 534 - vcpu->arch.shared->sprg2 = regs->sprg2; 535 - vcpu->arch.shared->sprg3 = regs->sprg3; 536 - vcpu->arch.shared->sprg4 = regs->sprg4; 537 - vcpu->arch.shared->sprg5 = regs->sprg5; 538 - vcpu->arch.shared->sprg6 = regs->sprg6; 539 - vcpu->arch.shared->sprg7 = regs->sprg7; 530 + kvmppc_set_srr0(vcpu, regs->srr0); 531 + kvmppc_set_srr1(vcpu, regs->srr1); 532 + kvmppc_set_sprg0(vcpu, regs->sprg0); 533 + kvmppc_set_sprg1(vcpu, regs->sprg1); 534 + kvmppc_set_sprg2(vcpu, regs->sprg2); 535 + kvmppc_set_sprg3(vcpu, regs->sprg3); 536 + kvmppc_set_sprg4(vcpu, regs->sprg4); 537 + kvmppc_set_sprg5(vcpu, regs->sprg5); 538 + kvmppc_set_sprg6(vcpu, regs->sprg6); 539 + kvmppc_set_sprg7(vcpu, regs->sprg7); 540 540 541 541 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) 542 542 kvmppc_set_gpr(vcpu, i, regs->gpr[i]); ··· 570 570 r = 0; 571 571 switch (reg->id) { 572 572 case KVM_REG_PPC_DAR: 573 - val = get_reg_val(reg->id, vcpu->arch.shared->dar); 573 + val = get_reg_val(reg->id, kvmppc_get_dar(vcpu)); 574 574 break; 575 575 case KVM_REG_PPC_DSISR: 576 - val = get_reg_val(reg->id, vcpu->arch.shared->dsisr); 576 + val = get_reg_val(reg->id, kvmppc_get_dsisr(vcpu)); 577 577 break; 578 578 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: 579 579 i = reg->id - KVM_REG_PPC_FPR0; ··· 660 660 r = 0; 661 661 switch (reg->id) { 662 662 case KVM_REG_PPC_DAR: 663 - vcpu->arch.shared->dar = set_reg_val(reg->id, val); 663 + kvmppc_set_dar(vcpu, set_reg_val(reg->id, val)); 664 664 break; 665 665 case KVM_REG_PPC_DSISR: 666 - vcpu->arch.shared->dsisr = set_reg_val(reg->id, val); 666 + kvmppc_set_dsisr(vcpu, set_reg_val(reg->id, val)); 667 667 break; 668 668 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: 669 669 i = reg->id - KVM_REG_PPC_FPR0;
+11 -10
arch/powerpc/kvm/book3s_32_mmu.c
··· 91 91 92 92 static u32 find_sr(struct kvm_vcpu *vcpu, gva_t eaddr) 93 93 { 94 - return vcpu->arch.shared->sr[(eaddr >> 28) & 0xf]; 94 + return kvmppc_get_sr(vcpu, (eaddr >> 28) & 0xf); 95 95 } 96 96 97 97 static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, ··· 160 160 else 161 161 bat = &vcpu_book3s->ibat[i]; 162 162 163 - if (vcpu->arch.shared->msr & MSR_PR) { 163 + if (kvmppc_get_msr(vcpu) & MSR_PR) { 164 164 if (!bat->vp) 165 165 continue; 166 166 } else { ··· 242 242 pte->raddr = (pte1 & ~(0xFFFULL)) | (eaddr & 0xFFF); 243 243 pp = pte1 & 3; 244 244 245 - if ((sr_kp(sre) && (vcpu->arch.shared->msr & MSR_PR)) || 246 - (sr_ks(sre) && !(vcpu->arch.shared->msr & MSR_PR))) 245 + if ((sr_kp(sre) && (kvmppc_get_msr(vcpu) & MSR_PR)) || 246 + (sr_ks(sre) && !(kvmppc_get_msr(vcpu) & MSR_PR))) 247 247 pp |= 4; 248 248 249 249 pte->may_write = false; ··· 320 320 /* Magic page override */ 321 321 if (unlikely(mp_ea) && 322 322 unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) && 323 - !(vcpu->arch.shared->msr & MSR_PR)) { 323 + !(kvmppc_get_msr(vcpu) & MSR_PR)) { 324 324 pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data); 325 325 pte->raddr = vcpu->arch.magic_page_pa | (pte->raddr & 0xfff); 326 326 pte->raddr &= KVM_PAM; ··· 345 345 346 346 static u32 kvmppc_mmu_book3s_32_mfsrin(struct kvm_vcpu *vcpu, u32 srnum) 347 347 { 348 - return vcpu->arch.shared->sr[srnum]; 348 + return kvmppc_get_sr(vcpu, srnum); 349 349 } 350 350 351 351 static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum, 352 352 ulong value) 353 353 { 354 - vcpu->arch.shared->sr[srnum] = value; 354 + kvmppc_set_sr(vcpu, srnum, value); 355 355 kvmppc_mmu_map_segment(vcpu, srnum << SID_SHIFT); 356 356 } 357 357 ··· 371 371 ulong ea = esid << SID_SHIFT; 372 372 u32 sr; 373 373 u64 gvsid = esid; 374 + u64 msr = kvmppc_get_msr(vcpu); 374 375 375 - if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { 376 + if (msr & (MSR_DR|MSR_IR)) { 376 377 sr = find_sr(vcpu, ea); 377 378 if (sr_valid(sr)) 378 379 gvsid = sr_vsid(sr); ··· 382 381 /* In case we only have one of MSR_IR or MSR_DR set, let's put 383 382 that in the real-mode context (and hope RM doesn't access 384 383 high memory) */ 385 - switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { 384 + switch (msr & (MSR_DR|MSR_IR)) { 386 385 case 0: 387 386 *vsid = VSID_REAL | esid; 388 387 break; ··· 402 401 BUG(); 403 402 } 404 403 405 - if (vcpu->arch.shared->msr & MSR_PR) 404 + if (msr & MSR_PR) 406 405 *vsid |= VSID_PR; 407 406 408 407 return 0;
+2 -2
arch/powerpc/kvm/book3s_32_mmu_host.c
··· 92 92 struct kvmppc_sid_map *map; 93 93 u16 sid_map_mask; 94 94 95 - if (vcpu->arch.shared->msr & MSR_PR) 95 + if (kvmppc_get_msr(vcpu) & MSR_PR) 96 96 gvsid |= VSID_PR; 97 97 98 98 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); ··· 279 279 u16 sid_map_mask; 280 280 static int backwards_map = 0; 281 281 282 - if (vcpu->arch.shared->msr & MSR_PR) 282 + if (kvmppc_get_msr(vcpu) & MSR_PR) 283 283 gvsid |= VSID_PR; 284 284 285 285 /* We might get collisions that trap in preceding order, so let's
+10 -9
arch/powerpc/kvm/book3s_64_mmu.c
··· 226 226 /* Magic page override */ 227 227 if (unlikely(mp_ea) && 228 228 unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) && 229 - !(vcpu->arch.shared->msr & MSR_PR)) { 229 + !(kvmppc_get_msr(vcpu) & MSR_PR)) { 230 230 gpte->eaddr = eaddr; 231 231 gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data); 232 232 gpte->raddr = vcpu->arch.magic_page_pa | (gpte->raddr & 0xfff); ··· 269 269 goto no_page_found; 270 270 } 271 271 272 - if ((vcpu->arch.shared->msr & MSR_PR) && slbe->Kp) 272 + if ((kvmppc_get_msr(vcpu) & MSR_PR) && slbe->Kp) 273 273 key = 4; 274 - else if (!(vcpu->arch.shared->msr & MSR_PR) && slbe->Ks) 274 + else if (!(kvmppc_get_msr(vcpu) & MSR_PR) && slbe->Ks) 275 275 key = 4; 276 276 277 277 for (i=0; i<16; i+=2) { ··· 482 482 vcpu->arch.slb[i].origv = 0; 483 483 } 484 484 485 - if (vcpu->arch.shared->msr & MSR_IR) { 485 + if (kvmppc_get_msr(vcpu) & MSR_IR) { 486 486 kvmppc_mmu_flush_segments(vcpu); 487 487 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); 488 488 } ··· 566 566 { 567 567 ulong mp_ea = vcpu->arch.magic_page_ea; 568 568 569 - return mp_ea && !(vcpu->arch.shared->msr & MSR_PR) && 569 + return mp_ea && !(kvmppc_get_msr(vcpu) & MSR_PR) && 570 570 (mp_ea >> SID_SHIFT) == esid; 571 571 } 572 572 #endif ··· 579 579 u64 gvsid = esid; 580 580 ulong mp_ea = vcpu->arch.magic_page_ea; 581 581 int pagesize = MMU_PAGE_64K; 582 + u64 msr = kvmppc_get_msr(vcpu); 582 583 583 - if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { 584 + if (msr & (MSR_DR|MSR_IR)) { 584 585 slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea); 585 586 if (slb) { 586 587 gvsid = slb->vsid; ··· 594 593 } 595 594 } 596 595 597 - switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { 596 + switch (msr & (MSR_DR|MSR_IR)) { 598 597 case 0: 599 598 gvsid = VSID_REAL | esid; 600 599 break; ··· 627 626 gvsid |= VSID_64K; 628 627 #endif 629 628 630 - if (vcpu->arch.shared->msr & MSR_PR) 629 + if (kvmppc_get_msr(vcpu) & MSR_PR) 631 630 gvsid |= VSID_PR; 632 631 633 632 *vsid = gvsid; ··· 637 636 /* Catch magic page case */ 638 637 if (unlikely(mp_ea) && 639 638 unlikely(esid == (mp_ea >> SID_SHIFT)) && 640 - !(vcpu->arch.shared->msr & MSR_PR)) { 639 + !(kvmppc_get_msr(vcpu) & MSR_PR)) { 641 640 *vsid = VSID_REAL | esid; 642 641 return 0; 643 642 }
+2 -2
arch/powerpc/kvm/book3s_64_mmu_host.c
··· 58 58 struct kvmppc_sid_map *map; 59 59 u16 sid_map_mask; 60 60 61 - if (vcpu->arch.shared->msr & MSR_PR) 61 + if (kvmppc_get_msr(vcpu) & MSR_PR) 62 62 gvsid |= VSID_PR; 63 63 64 64 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); ··· 230 230 u16 sid_map_mask; 231 231 static int backwards_map = 0; 232 232 233 - if (vcpu->arch.shared->msr & MSR_PR) 233 + if (kvmppc_get_msr(vcpu) & MSR_PR) 234 234 gvsid |= VSID_PR; 235 235 236 236 /* We might get collisions that trap in preceding order, so let's
+14 -14
arch/powerpc/kvm/book3s_emulate.c
··· 80 80 return false; 81 81 82 82 /* Limit user space to its own small SPR set */ 83 - if ((vcpu->arch.shared->msr & MSR_PR) && level > PRIV_PROBLEM) 83 + if ((kvmppc_get_msr(vcpu) & MSR_PR) && level > PRIV_PROBLEM) 84 84 return false; 85 85 86 86 return true; ··· 100 100 switch (get_xop(inst)) { 101 101 case OP_19_XOP_RFID: 102 102 case OP_19_XOP_RFI: 103 - kvmppc_set_pc(vcpu, vcpu->arch.shared->srr0); 104 - kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1); 103 + kvmppc_set_pc(vcpu, kvmppc_get_srr0(vcpu)); 104 + kvmppc_set_msr(vcpu, kvmppc_get_srr1(vcpu)); 105 105 *advance = 0; 106 106 break; 107 107 ··· 113 113 case 31: 114 114 switch (get_xop(inst)) { 115 115 case OP_31_XOP_MFMSR: 116 - kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr); 116 + kvmppc_set_gpr(vcpu, rt, kvmppc_get_msr(vcpu)); 117 117 break; 118 118 case OP_31_XOP_MTMSRD: 119 119 { 120 120 ulong rs_val = kvmppc_get_gpr(vcpu, rs); 121 121 if (inst & 0x10000) { 122 - ulong new_msr = vcpu->arch.shared->msr; 122 + ulong new_msr = kvmppc_get_msr(vcpu); 123 123 new_msr &= ~(MSR_RI | MSR_EE); 124 124 new_msr |= rs_val & (MSR_RI | MSR_EE); 125 - vcpu->arch.shared->msr = new_msr; 125 + kvmppc_set_msr_fast(vcpu, new_msr); 126 126 } else 127 127 kvmppc_set_msr(vcpu, rs_val); 128 128 break; ··· 179 179 ulong cmd = kvmppc_get_gpr(vcpu, 3); 180 180 int i; 181 181 182 - if ((vcpu->arch.shared->msr & MSR_PR) || 182 + if ((kvmppc_get_msr(vcpu) & MSR_PR) || 183 183 !vcpu->arch.papr_enabled) { 184 184 emulated = EMULATE_FAIL; 185 185 break; ··· 261 261 ra_val = kvmppc_get_gpr(vcpu, ra); 262 262 263 263 addr = (ra_val + rb_val) & ~31ULL; 264 - if (!(vcpu->arch.shared->msr & MSR_SF)) 264 + if (!(kvmppc_get_msr(vcpu) & MSR_SF)) 265 265 addr &= 0xffffffff; 266 266 vaddr = addr; 267 267 268 268 r = kvmppc_st(vcpu, &addr, 32, zeros, true); 269 269 if ((r == -ENOENT) || (r == -EPERM)) { 270 270 *advance = 0; 271 - vcpu->arch.shared->dar = vaddr; 271 + kvmppc_set_dar(vcpu, vaddr); 272 272 vcpu->arch.fault_dar = vaddr; 273 273 274 274 dsisr = DSISR_ISSTORE; ··· 277 277 else if (r == -EPERM) 278 278 dsisr |= DSISR_PROTFAULT; 279 279 280 - vcpu->arch.shared->dsisr = dsisr; 280 + kvmppc_set_dsisr(vcpu, dsisr); 281 281 vcpu->arch.fault_dsisr = dsisr; 282 282 283 283 kvmppc_book3s_queue_irqprio(vcpu, ··· 356 356 to_book3s(vcpu)->sdr1 = spr_val; 357 357 break; 358 358 case SPRN_DSISR: 359 - vcpu->arch.shared->dsisr = spr_val; 359 + kvmppc_set_dsisr(vcpu, spr_val); 360 360 break; 361 361 case SPRN_DAR: 362 - vcpu->arch.shared->dar = spr_val; 362 + kvmppc_set_dar(vcpu, spr_val); 363 363 break; 364 364 case SPRN_HIOR: 365 365 to_book3s(vcpu)->hior = spr_val; ··· 493 493 *spr_val = to_book3s(vcpu)->sdr1; 494 494 break; 495 495 case SPRN_DSISR: 496 - *spr_val = vcpu->arch.shared->dsisr; 496 + *spr_val = kvmppc_get_dsisr(vcpu); 497 497 break; 498 498 case SPRN_DAR: 499 - *spr_val = vcpu->arch.shared->dar; 499 + *spr_val = kvmppc_get_dar(vcpu); 500 500 break; 501 501 case SPRN_HIOR: 502 502 *spr_val = to_book3s(vcpu)->hior;
+1
arch/powerpc/kvm/book3s_exports.c
··· 18 18 */ 19 19 20 20 #include <linux/export.h> 21 + #include <asm/kvm_ppc.h> 21 22 #include <asm/kvm_book3s.h> 22 23 23 24 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+11
arch/powerpc/kvm/book3s_hv.c
··· 1280 1280 goto free_vcpu; 1281 1281 1282 1282 vcpu->arch.shared = &vcpu->arch.shregs; 1283 + #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1284 + /* 1285 + * The shared struct is never shared on HV, 1286 + * so we can always use host endianness 1287 + */ 1288 + #ifdef __BIG_ENDIAN__ 1289 + vcpu->arch.shared_big_endian = true; 1290 + #else 1291 + vcpu->arch.shared_big_endian = false; 1292 + #endif 1293 + #endif 1283 1294 vcpu->arch.mmcr[0] = MMCR0_FC; 1284 1295 vcpu->arch.ctrl = CTRL_RUNLATCH; 1285 1296 /* default to host PVR, since we can't spoof it */
+21 -2
arch/powerpc/kvm/book3s_interrupts.S
··· 104 104 stb r3, HSTATE_RESTORE_HID5(r13) 105 105 106 106 /* Load up guest SPRG3 value, since it's user readable */ 107 - ld r3, VCPU_SHARED(r4) 108 - ld r3, VCPU_SHARED_SPRG3(r3) 107 + lwz r3, VCPU_SHAREDBE(r4) 108 + cmpwi r3, 0 109 + ld r5, VCPU_SHARED(r4) 110 + beq sprg3_little_endian 111 + sprg3_big_endian: 112 + #ifdef __BIG_ENDIAN__ 113 + ld r3, VCPU_SHARED_SPRG3(r5) 114 + #else 115 + addi r5, r5, VCPU_SHARED_SPRG3 116 + ldbrx r3, 0, r5 117 + #endif 118 + b after_sprg3_load 119 + sprg3_little_endian: 120 + #ifdef __LITTLE_ENDIAN__ 121 + ld r3, VCPU_SHARED_SPRG3(r5) 122 + #else 123 + addi r5, r5, VCPU_SHARED_SPRG3 124 + ldbrx r3, 0, r5 125 + #endif 126 + 127 + after_sprg3_load: 109 128 mtspr SPRN_SPRG3, r3 110 129 #endif /* CONFIG_PPC_BOOK3S_64 */ 111 130
+9 -7
arch/powerpc/kvm/book3s_paired_singles.c
··· 165 165 166 166 static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store) 167 167 { 168 - u64 dsisr; 169 - struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared; 168 + u32 dsisr; 169 + u64 msr = kvmppc_get_msr(vcpu); 170 170 171 - shared->msr = kvmppc_set_field(shared->msr, 33, 36, 0); 172 - shared->msr = kvmppc_set_field(shared->msr, 42, 47, 0); 173 - shared->dar = eaddr; 171 + msr = kvmppc_set_field(msr, 33, 36, 0); 172 + msr = kvmppc_set_field(msr, 42, 47, 0); 173 + kvmppc_set_msr(vcpu, msr); 174 + kvmppc_set_dar(vcpu, eaddr); 174 175 /* Page Fault */ 175 176 dsisr = kvmppc_set_field(0, 33, 33, 1); 176 177 if (is_store) 177 - shared->dsisr = kvmppc_set_field(dsisr, 38, 38, 1); 178 + dsisr = kvmppc_set_field(dsisr, 38, 38, 1); 179 + kvmppc_set_dsisr(vcpu, dsisr); 178 180 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE); 179 181 } 180 182 ··· 662 660 if (!kvmppc_inst_is_paired_single(vcpu, inst)) 663 661 return EMULATE_FAIL; 664 662 665 - if (!(vcpu->arch.shared->msr & MSR_FP)) { 663 + if (!(kvmppc_get_msr(vcpu) & MSR_FP)) { 666 664 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL); 667 665 return EMULATE_AGAIN; 668 666 }
+58 -39
arch/powerpc/kvm/book3s_pr.c
··· 246 246 247 247 static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) 248 248 { 249 - ulong smsr = vcpu->arch.shared->msr; 249 + ulong guest_msr = kvmppc_get_msr(vcpu); 250 + ulong smsr = guest_msr; 250 251 251 252 /* Guest MSR values */ 252 253 smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE; 253 254 /* Process MSR values */ 254 255 smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE; 255 256 /* External providers the guest reserved */ 256 - smsr |= (vcpu->arch.shared->msr & vcpu->arch.guest_owned_ext); 257 + smsr |= (guest_msr & vcpu->arch.guest_owned_ext); 257 258 /* 64-bit Process MSR values */ 258 259 #ifdef CONFIG_PPC_BOOK3S_64 259 260 smsr |= MSR_ISF | MSR_HV; ··· 264 263 265 264 static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr) 266 265 { 267 - ulong old_msr = vcpu->arch.shared->msr; 266 + ulong old_msr = kvmppc_get_msr(vcpu); 268 267 269 268 #ifdef EXIT_DEBUG 270 269 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); 271 270 #endif 272 271 273 272 msr &= to_book3s(vcpu)->msr_mask; 274 - vcpu->arch.shared->msr = msr; 273 + kvmppc_set_msr_fast(vcpu, msr); 275 274 kvmppc_recalc_shadow_msr(vcpu); 276 275 277 276 if (msr & MSR_POW) { ··· 282 281 283 282 /* Unset POW bit after we woke up */ 284 283 msr &= ~MSR_POW; 285 - vcpu->arch.shared->msr = msr; 284 + kvmppc_set_msr_fast(vcpu, msr); 286 285 } 287 286 } 288 287 289 - if ((vcpu->arch.shared->msr & (MSR_PR|MSR_IR|MSR_DR)) != 288 + if ((kvmppc_get_msr(vcpu) & (MSR_PR|MSR_IR|MSR_DR)) != 290 289 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) { 291 290 kvmppc_mmu_flush_segments(vcpu); 292 291 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); ··· 318 317 } 319 318 320 319 /* Preload FPU if it's enabled */ 321 - if (vcpu->arch.shared->msr & MSR_FP) 320 + if (kvmppc_get_msr(vcpu) & MSR_FP) 322 321 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); 323 322 } 324 323 ··· 439 438 { 440 439 ulong mp_pa = vcpu->arch.magic_page_pa; 441 440 442 - if (!(vcpu->arch.shared->msr & MSR_SF)) 441 + if (!(kvmppc_get_msr(vcpu) & MSR_SF)) 443 442 mp_pa = (uint32_t)mp_pa; 444 443 445 444 if (unlikely(mp_pa) && ··· 460 459 int page_found = 0; 461 460 struct kvmppc_pte pte; 462 461 bool is_mmio = false; 463 - bool dr = (vcpu->arch.shared->msr & MSR_DR) ? true : false; 464 - bool ir = (vcpu->arch.shared->msr & MSR_IR) ? true : false; 462 + bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false; 463 + bool ir = (kvmppc_get_msr(vcpu) & MSR_IR) ? true : false; 465 464 u64 vsid; 466 465 467 466 relocated = data ? dr : ir; ··· 481 480 pte.page_size = MMU_PAGE_64K; 482 481 } 483 482 484 - switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { 483 + switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) { 485 484 case 0: 486 485 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12)); 487 486 break; ··· 489 488 case MSR_IR: 490 489 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); 491 490 492 - if ((vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) == MSR_DR) 491 + if ((kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) == MSR_DR) 493 492 pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12)); 494 493 else 495 494 pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12)); ··· 512 511 513 512 if (page_found == -ENOENT) { 514 513 /* Page not found in guest PTE entries */ 515 - vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); 516 - vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr; 517 - vcpu->arch.shared->msr |= 518 - vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL; 514 + u64 ssrr1 = vcpu->arch.shadow_srr1; 515 + u64 msr = kvmppc_get_msr(vcpu); 516 + kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); 517 + kvmppc_set_dsisr(vcpu, vcpu->arch.fault_dsisr); 518 + kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL)); 519 519 kvmppc_book3s_queue_irqprio(vcpu, vec); 520 520 } else if (page_found == -EPERM) { 521 521 /* Storage protection */ 522 - vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); 523 - vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE; 524 - vcpu->arch.shared->dsisr |= DSISR_PROTFAULT; 525 - vcpu->arch.shared->msr |= 526 - vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL; 522 + u32 dsisr = vcpu->arch.fault_dsisr; 523 + u64 ssrr1 = vcpu->arch.shadow_srr1; 524 + u64 msr = kvmppc_get_msr(vcpu); 525 + kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); 526 + dsisr = (dsisr & ~DSISR_NOHPTE) | DSISR_PROTFAULT; 527 + kvmppc_set_dsisr(vcpu, dsisr); 528 + kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL)); 527 529 kvmppc_book3s_queue_irqprio(vcpu, vec); 528 530 } else if (page_found == -EINVAL) { 529 531 /* Page not found in guest SLB */ 530 - vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); 532 + kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); 531 533 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); 532 534 } else if (!is_mmio && 533 535 kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) { ··· 618 614 619 615 ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false); 620 616 if (ret == -ENOENT) { 621 - ulong msr = vcpu->arch.shared->msr; 617 + ulong msr = kvmppc_get_msr(vcpu); 622 618 623 619 msr = kvmppc_set_field(msr, 33, 33, 1); 624 620 msr = kvmppc_set_field(msr, 34, 36, 0); 625 - vcpu->arch.shared->msr = kvmppc_set_field(msr, 42, 47, 0); 621 + msr = kvmppc_set_field(msr, 42, 47, 0); 622 + kvmppc_set_msr_fast(vcpu, msr); 626 623 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE); 627 624 return EMULATE_AGAIN; 628 625 } ··· 656 651 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) 657 652 return RESUME_GUEST; 658 653 659 - if (!(vcpu->arch.shared->msr & msr)) { 654 + if (!(kvmppc_get_msr(vcpu) & msr)) { 660 655 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 661 656 return RESUME_GUEST; 662 657 } ··· 797 792 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); 798 793 r = RESUME_GUEST; 799 794 } else { 800 - vcpu->arch.shared->msr |= shadow_srr1 & 0x58000000; 795 + u64 msr = kvmppc_get_msr(vcpu); 796 + msr |= shadow_srr1 & 0x58000000; 797 + kvmppc_set_msr_fast(vcpu, msr); 801 798 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 802 799 r = RESUME_GUEST; 803 800 } ··· 839 832 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); 840 833 srcu_read_unlock(&vcpu->kvm->srcu, idx); 841 834 } else { 842 - vcpu->arch.shared->dar = dar; 843 - vcpu->arch.shared->dsisr = fault_dsisr; 835 + kvmppc_set_dar(vcpu, dar); 836 + kvmppc_set_dsisr(vcpu, fault_dsisr); 844 837 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 845 838 r = RESUME_GUEST; 846 839 } ··· 848 841 } 849 842 case BOOK3S_INTERRUPT_DATA_SEGMENT: 850 843 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) { 851 - vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); 844 + kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); 852 845 kvmppc_book3s_queue_irqprio(vcpu, 853 846 BOOK3S_INTERRUPT_DATA_SEGMENT); 854 847 } ··· 886 879 program_interrupt: 887 880 flags = vcpu->arch.shadow_srr1 & 0x1f0000ull; 888 881 889 - if (vcpu->arch.shared->msr & MSR_PR) { 882 + if (kvmppc_get_msr(vcpu) & MSR_PR) { 890 883 #ifdef EXIT_DEBUG 891 884 printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu)); 892 885 #endif ··· 928 921 case BOOK3S_INTERRUPT_SYSCALL: 929 922 if (vcpu->arch.papr_enabled && 930 923 (kvmppc_get_last_sc(vcpu) == 0x44000022) && 931 - !(vcpu->arch.shared->msr & MSR_PR)) { 924 + !(kvmppc_get_msr(vcpu) & MSR_PR)) { 932 925 /* SC 1 papr hypercalls */ 933 926 ulong cmd = kvmppc_get_gpr(vcpu, 3); 934 927 int i; ··· 960 953 gprs[i] = kvmppc_get_gpr(vcpu, i); 961 954 vcpu->arch.osi_needed = 1; 962 955 r = RESUME_HOST_NV; 963 - } else if (!(vcpu->arch.shared->msr & MSR_PR) && 956 + } else if (!(kvmppc_get_msr(vcpu) & MSR_PR) && 964 957 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) { 965 958 /* KVM PV hypercalls */ 966 959 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu)); ··· 1001 994 } 1002 995 case BOOK3S_INTERRUPT_ALIGNMENT: 1003 996 if (kvmppc_read_inst(vcpu) == EMULATE_DONE) { 1004 - vcpu->arch.shared->dsisr = kvmppc_alignment_dsisr(vcpu, 1005 - kvmppc_get_last_inst(vcpu)); 1006 - vcpu->arch.shared->dar = kvmppc_alignment_dar(vcpu, 1007 - kvmppc_get_last_inst(vcpu)); 997 + u32 last_inst = kvmppc_get_last_inst(vcpu); 998 + u32 dsisr; 999 + u64 dar; 1000 + 1001 + dsisr = kvmppc_alignment_dsisr(vcpu, last_inst); 1002 + dar = kvmppc_alignment_dar(vcpu, last_inst); 1003 + 1004 + kvmppc_set_dsisr(vcpu, dsisr); 1005 + kvmppc_set_dar(vcpu, dar); 1006 + 1008 1007 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 1009 1008 } 1010 1009 r = RESUME_GUEST; ··· 1075 1062 } 1076 1063 } else { 1077 1064 for (i = 0; i < 16; i++) 1078 - sregs->u.s.ppc32.sr[i] = vcpu->arch.shared->sr[i]; 1065 + sregs->u.s.ppc32.sr[i] = kvmppc_get_sr(vcpu, i); 1079 1066 1080 1067 for (i = 0; i < 8; i++) { 1081 1068 sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw; ··· 1211 1198 goto uninit_vcpu; 1212 1199 /* the real shared page fills the last 4k of our page */ 1213 1200 vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096); 1214 - 1215 1201 #ifdef CONFIG_PPC_BOOK3S_64 1202 + /* Always start the shared struct in native endian mode */ 1203 + #ifdef __BIG_ENDIAN__ 1204 + vcpu->arch.shared_big_endian = true; 1205 + #else 1206 + vcpu->arch.shared_big_endian = false; 1207 + #endif 1208 + 1216 1209 /* 1217 1210 * Default to the same as the host if we're on sufficiently 1218 1211 * recent machine that we have 1TB segments; ··· 1312 1293 #endif 1313 1294 1314 1295 /* Preload FPU if it's enabled */ 1315 - if (vcpu->arch.shared->msr & MSR_FP) 1296 + if (kvmppc_get_msr(vcpu) & MSR_FP) 1316 1297 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); 1317 1298 1318 1299 kvmppc_fix_ee_before_entry();
+1 -1
arch/powerpc/kvm/book3s_pr_papr.c
··· 278 278 case H_PUT_TCE: 279 279 return kvmppc_h_pr_put_tce(vcpu); 280 280 case H_CEDE: 281 - vcpu->arch.shared->msr |= MSR_EE; 281 + kvmppc_set_msr_fast(vcpu, kvmppc_get_msr(vcpu) | MSR_EE); 282 282 kvm_vcpu_block(vcpu); 283 283 clear_bit(KVM_REQ_UNHALT, &vcpu->requests); 284 284 vcpu->stat.halt_wakeup++;
+12 -12
arch/powerpc/kvm/emulate.c
··· 97 97 98 98 switch (sprn) { 99 99 case SPRN_SRR0: 100 - vcpu->arch.shared->srr0 = spr_val; 100 + kvmppc_set_srr0(vcpu, spr_val); 101 101 break; 102 102 case SPRN_SRR1: 103 - vcpu->arch.shared->srr1 = spr_val; 103 + kvmppc_set_srr1(vcpu, spr_val); 104 104 break; 105 105 106 106 /* XXX We need to context-switch the timebase for ··· 114 114 break; 115 115 116 116 case SPRN_SPRG0: 117 - vcpu->arch.shared->sprg0 = spr_val; 117 + kvmppc_set_sprg0(vcpu, spr_val); 118 118 break; 119 119 case SPRN_SPRG1: 120 - vcpu->arch.shared->sprg1 = spr_val; 120 + kvmppc_set_sprg1(vcpu, spr_val); 121 121 break; 122 122 case SPRN_SPRG2: 123 - vcpu->arch.shared->sprg2 = spr_val; 123 + kvmppc_set_sprg2(vcpu, spr_val); 124 124 break; 125 125 case SPRN_SPRG3: 126 - vcpu->arch.shared->sprg3 = spr_val; 126 + kvmppc_set_sprg3(vcpu, spr_val); 127 127 break; 128 128 129 129 /* PIR can legally be written, but we ignore it */ ··· 150 150 151 151 switch (sprn) { 152 152 case SPRN_SRR0: 153 - spr_val = vcpu->arch.shared->srr0; 153 + spr_val = kvmppc_get_srr0(vcpu); 154 154 break; 155 155 case SPRN_SRR1: 156 - spr_val = vcpu->arch.shared->srr1; 156 + spr_val = kvmppc_get_srr1(vcpu); 157 157 break; 158 158 case SPRN_PVR: 159 159 spr_val = vcpu->arch.pvr; ··· 173 173 break; 174 174 175 175 case SPRN_SPRG0: 176 - spr_val = vcpu->arch.shared->sprg0; 176 + spr_val = kvmppc_get_sprg0(vcpu); 177 177 break; 178 178 case SPRN_SPRG1: 179 - spr_val = vcpu->arch.shared->sprg1; 179 + spr_val = kvmppc_get_sprg1(vcpu); 180 180 break; 181 181 case SPRN_SPRG2: 182 - spr_val = vcpu->arch.shared->sprg2; 182 + spr_val = kvmppc_get_sprg2(vcpu); 183 183 break; 184 184 case SPRN_SPRG3: 185 - spr_val = vcpu->arch.shared->sprg3; 185 + spr_val = kvmppc_get_sprg3(vcpu); 186 186 break; 187 187 /* Note: SPRG4-7 are user-readable, so we don't get 188 188 * a trap. */
+32 -1
arch/powerpc/kvm/powerpc.c
··· 125 125 } 126 126 EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter); 127 127 128 + #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) 129 + static void kvmppc_swab_shared(struct kvm_vcpu *vcpu) 130 + { 131 + struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared; 132 + int i; 133 + 134 + shared->sprg0 = swab64(shared->sprg0); 135 + shared->sprg1 = swab64(shared->sprg1); 136 + shared->sprg2 = swab64(shared->sprg2); 137 + shared->sprg3 = swab64(shared->sprg3); 138 + shared->srr0 = swab64(shared->srr0); 139 + shared->srr1 = swab64(shared->srr1); 140 + shared->dar = swab64(shared->dar); 141 + shared->msr = swab64(shared->msr); 142 + shared->dsisr = swab32(shared->dsisr); 143 + shared->int_pending = swab32(shared->int_pending); 144 + for (i = 0; i < ARRAY_SIZE(shared->sr); i++) 145 + shared->sr[i] = swab32(shared->sr[i]); 146 + } 147 + #endif 148 + 128 149 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) 129 150 { 130 151 int nr = kvmppc_get_gpr(vcpu, 11); ··· 156 135 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); 157 136 unsigned long r2 = 0; 158 137 159 - if (!(vcpu->arch.shared->msr & MSR_SF)) { 138 + if (!(kvmppc_get_msr(vcpu) & MSR_SF)) { 160 139 /* 32 bit mode */ 161 140 param1 &= 0xffffffff; 162 141 param2 &= 0xffffffff; ··· 167 146 switch (nr) { 168 147 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE): 169 148 { 149 + #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) 150 + /* Book3S can be little endian, find it out here */ 151 + int shared_big_endian = true; 152 + if (vcpu->arch.intr_msr & MSR_LE) 153 + shared_big_endian = false; 154 + if (shared_big_endian != vcpu->arch.shared_big_endian) 155 + kvmppc_swab_shared(vcpu); 156 + vcpu->arch.shared_big_endian = shared_big_endian; 157 + #endif 158 + 170 159 vcpu->arch.magic_page_pa = param1; 171 160 vcpu->arch.magic_page_ea = param2; 172 161
+1 -1
arch/powerpc/kvm/trace_pr.h
··· 255 255 __entry->exit_nr = exit_nr; 256 256 __entry->pc = kvmppc_get_pc(vcpu); 257 257 __entry->dar = kvmppc_get_fault_dar(vcpu); 258 - __entry->msr = vcpu->arch.shared->msr; 258 + __entry->msr = kvmppc_get_msr(vcpu); 259 259 __entry->srr1 = vcpu->arch.shadow_srr1; 260 260 __entry->last_inst = vcpu->arch.last_inst; 261 261 ),