Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull more KVM updates from Paolo Bonzini:
"Second batch of KVM updates. Some minor x86 fixes, two s390 guest
features that need some handling in the host, and all the PPC changes.

The PPC changes include support for little-endian guests and
enablement for new POWER8 features"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (45 commits)
x86, kvm: correctly access the KVM_CPUID_FEATURES leaf at 0x40000101
x86, kvm: cache the base of the KVM cpuid leaves
kvm: x86: move KVM_CAP_HYPERV_TIME outside #ifdef
KVM: PPC: Book3S PR: Cope with doorbell interrupts
KVM: PPC: Book3S HV: Add software abort codes for transactional memory
KVM: PPC: Book3S HV: Add new state for transactional memory
powerpc/Kconfig: Make TM select VSX and VMX
KVM: PPC: Book3S HV: Basic little-endian guest support
KVM: PPC: Book3S HV: Add support for DABRX register on POWER7
KVM: PPC: Book3S HV: Prepare for host using hypervisor doorbells
KVM: PPC: Book3S HV: Handle new LPCR bits on POWER8
KVM: PPC: Book3S HV: Handle guest using doorbells for IPIs
KVM: PPC: Book3S HV: Consolidate code that checks reason for wake from nap
KVM: PPC: Book3S HV: Implement architecture compatibility modes for POWER8
KVM: PPC: Book3S HV: Add handler for HV facility unavailable
KVM: PPC: Book3S HV: Flush the correct number of TLB sets on POWER8
KVM: PPC: Book3S HV: Context-switch new POWER8 SPRs
KVM: PPC: Book3S HV: Align physical and virtual CPU thread numbers
KVM: PPC: Book3S HV: Don't set DABR on POWER8
kvm/ppc: IRQ disabling cleanup
...

+1703 -1110
+1
Documentation/virtual/kvm/api.txt
··· 1838 1838 PPC | KVM_REG_PPC_LPCR | 64 1839 1839 PPC | KVM_REG_PPC_PPR | 64 1840 1840 PPC | KVM_REG_PPC_ARCH_COMPAT 32 1841 + PPC | KVM_REG_PPC_DABRX | 32 1841 1842 PPC | KVM_REG_PPC_TM_GPR0 | 64 1842 1843 ... 1843 1844 PPC | KVM_REG_PPC_TM_GPR31 | 64
+2
arch/powerpc/Kconfig
··· 342 342 bool "Transactional Memory support for POWERPC" 343 343 depends on PPC_BOOK3S_64 344 344 depends on SMP 345 + select ALTIVEC 346 + select VSX 345 347 default n 346 348 ---help--- 347 349 Support user-mode Transactional Memory on POWERPC.
+111
arch/powerpc/include/asm/epapr_hcalls.h
··· 460 460 461 461 return r3; 462 462 } 463 + 464 + #ifdef CONFIG_EPAPR_PARAVIRT 465 + static inline unsigned long epapr_hypercall(unsigned long *in, 466 + unsigned long *out, 467 + unsigned long nr) 468 + { 469 + unsigned long register r0 asm("r0"); 470 + unsigned long register r3 asm("r3") = in[0]; 471 + unsigned long register r4 asm("r4") = in[1]; 472 + unsigned long register r5 asm("r5") = in[2]; 473 + unsigned long register r6 asm("r6") = in[3]; 474 + unsigned long register r7 asm("r7") = in[4]; 475 + unsigned long register r8 asm("r8") = in[5]; 476 + unsigned long register r9 asm("r9") = in[6]; 477 + unsigned long register r10 asm("r10") = in[7]; 478 + unsigned long register r11 asm("r11") = nr; 479 + unsigned long register r12 asm("r12"); 480 + 481 + asm volatile("bl epapr_hypercall_start" 482 + : "=r"(r0), "=r"(r3), "=r"(r4), "=r"(r5), "=r"(r6), 483 + "=r"(r7), "=r"(r8), "=r"(r9), "=r"(r10), "=r"(r11), 484 + "=r"(r12) 485 + : "r"(r3), "r"(r4), "r"(r5), "r"(r6), "r"(r7), "r"(r8), 486 + "r"(r9), "r"(r10), "r"(r11) 487 + : "memory", "cc", "xer", "ctr", "lr"); 488 + 489 + out[0] = r4; 490 + out[1] = r5; 491 + out[2] = r6; 492 + out[3] = r7; 493 + out[4] = r8; 494 + out[5] = r9; 495 + out[6] = r10; 496 + out[7] = r11; 497 + 498 + return r3; 499 + } 500 + #else 501 + static unsigned long epapr_hypercall(unsigned long *in, 502 + unsigned long *out, 503 + unsigned long nr) 504 + { 505 + return EV_UNIMPLEMENTED; 506 + } 507 + #endif 508 + 509 + static inline long epapr_hypercall0_1(unsigned int nr, unsigned long *r2) 510 + { 511 + unsigned long in[8]; 512 + unsigned long out[8]; 513 + unsigned long r; 514 + 515 + r = epapr_hypercall(in, out, nr); 516 + *r2 = out[0]; 517 + 518 + return r; 519 + } 520 + 521 + static inline long epapr_hypercall0(unsigned int nr) 522 + { 523 + unsigned long in[8]; 524 + unsigned long out[8]; 525 + 526 + return epapr_hypercall(in, out, nr); 527 + } 528 + 529 + static inline long epapr_hypercall1(unsigned int nr, unsigned long p1) 530 + { 531 + unsigned long in[8]; 532 + unsigned long out[8]; 533 + 534 + in[0] = p1; 535 + return epapr_hypercall(in, out, nr); 536 + } 537 + 538 + static inline long epapr_hypercall2(unsigned int nr, unsigned long p1, 539 + unsigned long p2) 540 + { 541 + unsigned long in[8]; 542 + unsigned long out[8]; 543 + 544 + in[0] = p1; 545 + in[1] = p2; 546 + return epapr_hypercall(in, out, nr); 547 + } 548 + 549 + static inline long epapr_hypercall3(unsigned int nr, unsigned long p1, 550 + unsigned long p2, unsigned long p3) 551 + { 552 + unsigned long in[8]; 553 + unsigned long out[8]; 554 + 555 + in[0] = p1; 556 + in[1] = p2; 557 + in[2] = p3; 558 + return epapr_hypercall(in, out, nr); 559 + } 560 + 561 + static inline long epapr_hypercall4(unsigned int nr, unsigned long p1, 562 + unsigned long p2, unsigned long p3, 563 + unsigned long p4) 564 + { 565 + unsigned long in[8]; 566 + unsigned long out[8]; 567 + 568 + in[0] = p1; 569 + in[1] = p2; 570 + in[2] = p3; 571 + in[3] = p4; 572 + return epapr_hypercall(in, out, nr); 573 + } 463 574 #endif /* !__ASSEMBLY__ */ 464 575 #endif /* _EPAPR_HCALLS_H */
+3
arch/powerpc/include/asm/kvm_asm.h
··· 92 92 #define BOOK3S_INTERRUPT_FP_UNAVAIL 0x800 93 93 #define BOOK3S_INTERRUPT_DECREMENTER 0x900 94 94 #define BOOK3S_INTERRUPT_HV_DECREMENTER 0x980 95 + #define BOOK3S_INTERRUPT_DOORBELL 0xa00 95 96 #define BOOK3S_INTERRUPT_SYSCALL 0xc00 96 97 #define BOOK3S_INTERRUPT_TRACE 0xd00 97 98 #define BOOK3S_INTERRUPT_H_DATA_STORAGE 0xe00 98 99 #define BOOK3S_INTERRUPT_H_INST_STORAGE 0xe20 99 100 #define BOOK3S_INTERRUPT_H_EMUL_ASSIST 0xe40 101 + #define BOOK3S_INTERRUPT_H_DOORBELL 0xe80 100 102 #define BOOK3S_INTERRUPT_PERFMON 0xf00 101 103 #define BOOK3S_INTERRUPT_ALTIVEC 0xf20 102 104 #define BOOK3S_INTERRUPT_VSX 0xf40 105 + #define BOOK3S_INTERRUPT_H_FAC_UNAVAIL 0xf80 103 106 104 107 #define BOOK3S_IRQPRIO_SYSTEM_RESET 0 105 108 #define BOOK3S_IRQPRIO_DATA_SEGMENT 1
+13 -14
arch/powerpc/include/asm/kvm_book3s.h
··· 186 186 187 187 extern void kvmppc_entry_trampoline(void); 188 188 extern void kvmppc_hv_entry_trampoline(void); 189 - extern void kvmppc_load_up_fpu(void); 190 - extern void kvmppc_load_up_altivec(void); 191 - extern void kvmppc_load_up_vsx(void); 192 189 extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst); 193 190 extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst); 194 191 extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd); ··· 268 271 return vcpu->arch.pc; 269 272 } 270 273 271 - static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) 274 + static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu) 272 275 { 273 - ulong pc = kvmppc_get_pc(vcpu); 276 + return (vcpu->arch.shared->msr & MSR_LE) != (MSR_KERNEL & MSR_LE); 277 + } 274 278 279 + static inline u32 kvmppc_get_last_inst_internal(struct kvm_vcpu *vcpu, ulong pc) 280 + { 275 281 /* Load the instruction manually if it failed to do so in the 276 282 * exit path */ 277 283 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) 278 284 kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false); 279 285 280 - return vcpu->arch.last_inst; 286 + return kvmppc_need_byteswap(vcpu) ? swab32(vcpu->arch.last_inst) : 287 + vcpu->arch.last_inst; 288 + } 289 + 290 + static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) 291 + { 292 + return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu)); 281 293 } 282 294 283 295 /* ··· 296 290 */ 297 291 static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu) 298 292 { 299 - ulong pc = kvmppc_get_pc(vcpu) - 4; 300 - 301 - /* Load the instruction manually if it failed to do so in the 302 - * exit path */ 303 - if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) 304 - kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false); 305 - 306 - return vcpu->arch.last_inst; 293 + return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu) - 4); 307 294 } 308 295 309 296 static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
+1
arch/powerpc/include/asm/kvm_book3s_asm.h
··· 88 88 u8 hwthread_req; 89 89 u8 hwthread_state; 90 90 u8 host_ipi; 91 + u8 ptid; 91 92 struct kvm_vcpu *kvm_vcpu; 92 93 struct kvmppc_vcore *kvm_vcore; 93 94 unsigned long xics_phys;
+6
arch/powerpc/include/asm/kvm_booke.h
··· 63 63 return vcpu->arch.xer; 64 64 } 65 65 66 + static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu) 67 + { 68 + /* XXX Would need to check TLB entry */ 69 + return false; 70 + } 71 + 66 72 static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) 67 73 { 68 74 return vcpu->arch.last_inst;
+51 -10
arch/powerpc/include/asm/kvm_host.h
··· 288 288 int n_woken; 289 289 int nap_count; 290 290 int napping_threads; 291 + int first_vcpuid; 291 292 u16 pcpu; 292 293 u16 last_cpu; 293 294 u8 vcore_state; ··· 299 298 u64 stolen_tb; 300 299 u64 preempt_tb; 301 300 struct kvm_vcpu *runner; 301 + struct kvm *kvm; 302 302 u64 tb_offset; /* guest timebase - host timebase */ 303 303 ulong lpcr; 304 304 u32 arch_compat; 305 305 ulong pcr; 306 + ulong dpdes; /* doorbell state (POWER8) */ 306 307 }; 307 308 308 309 #define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff) ··· 413 410 414 411 ulong gpr[32]; 415 412 416 - u64 fpr[32]; 417 - u64 fpscr; 413 + struct thread_fp_state fp; 418 414 419 415 #ifdef CONFIG_SPE 420 416 ulong evr[32]; ··· 422 420 u64 acc; 423 421 #endif 424 422 #ifdef CONFIG_ALTIVEC 425 - vector128 vr[32]; 426 - vector128 vscr; 427 - #endif 428 - 429 - #ifdef CONFIG_VSX 430 - u64 vsr[64]; 423 + struct thread_vr_state vr; 431 424 #endif 432 425 433 426 #ifdef CONFIG_KVM_BOOKE_HV ··· 449 452 ulong pc; 450 453 ulong ctr; 451 454 ulong lr; 455 + ulong tar; 452 456 453 457 ulong xer; 454 458 u32 cr; ··· 459 461 ulong guest_owned_ext; 460 462 ulong purr; 461 463 ulong spurr; 464 + ulong ic; 465 + ulong vtb; 462 466 ulong dscr; 463 467 ulong amr; 464 468 ulong uamor; 469 + ulong iamr; 465 470 u32 ctrl; 471 + u32 dabrx; 466 472 ulong dabr; 473 + ulong dawr; 474 + ulong dawrx; 475 + ulong ciabr; 467 476 ulong cfar; 468 477 ulong ppr; 478 + ulong pspb; 479 + ulong fscr; 480 + ulong ebbhr; 481 + ulong ebbrr; 482 + ulong bescr; 483 + ulong csigr; 484 + ulong tacr; 485 + ulong tcscr; 486 + ulong acop; 487 + ulong wort; 469 488 ulong shadow_srr1; 470 489 #endif 471 490 u32 vrsave; /* also USPRG0 */ ··· 517 502 u32 ccr1; 518 503 u32 dbsr; 519 504 520 - u64 mmcr[3]; 505 + u64 mmcr[5]; 521 506 u32 pmc[8]; 507 + u32 spmc[2]; 522 508 u64 siar; 523 509 u64 sdar; 510 + u64 sier; 511 + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 512 + u64 tfhar; 513 + u64 texasr; 514 + u64 tfiar; 515 + 516 + u32 cr_tm; 517 + u64 lr_tm; 518 + u64 ctr_tm; 519 + u64 amr_tm; 520 + u64 ppr_tm; 521 + u64 dscr_tm; 522 + u64 tar_tm; 523 + 524 + ulong gpr_tm[32]; 525 + 526 + struct thread_fp_state fp_tm; 527 + 528 + struct thread_vr_state vr_tm; 529 + u32 vrsave_tm; /* also USPRG0 */ 530 + 531 + #endif 524 532 525 533 #ifdef CONFIG_KVM_EXIT_TIMING 526 534 struct mutex exit_timing_lock; ··· 584 546 #endif 585 547 gpa_t paddr_accessed; 586 548 gva_t vaddr_accessed; 549 + pgd_t *pgdir; 587 550 588 551 u8 io_gpr; /* GPR used as IO source/target */ 589 552 u8 mmio_is_bigendian; ··· 642 603 struct list_head run_list; 643 604 struct task_struct *run_task; 644 605 struct kvm_run *kvm_run; 645 - pgd_t *pgdir; 646 606 647 607 spinlock_t vpa_update_lock; 648 608 struct kvmppc_vpa vpa; ··· 654 616 spinlock_t tbacct_lock; 655 617 u64 busy_stolen; 656 618 u64 busy_preempt; 619 + unsigned long intr_msr; 657 620 #endif 658 621 }; 622 + 623 + #define VCPU_FPR(vcpu, i) (vcpu)->arch.fp.fpr[i][TS_FPROFFSET] 659 624 660 625 /* Values for vcpu->arch.state */ 661 626 #define KVMPPC_VCPU_NOTREADY 0
+1 -79
arch/powerpc/include/asm/kvm_para.h
··· 39 39 return 1; 40 40 } 41 41 42 - extern unsigned long kvm_hypercall(unsigned long *in, 43 - unsigned long *out, 44 - unsigned long nr); 45 - 46 42 #else 47 43 48 44 static inline int kvm_para_available(void) ··· 46 50 return 0; 47 51 } 48 52 49 - static unsigned long kvm_hypercall(unsigned long *in, 50 - unsigned long *out, 51 - unsigned long nr) 52 - { 53 - return EV_UNIMPLEMENTED; 54 - } 55 - 56 53 #endif 57 - 58 - static inline long kvm_hypercall0_1(unsigned int nr, unsigned long *r2) 59 - { 60 - unsigned long in[8]; 61 - unsigned long out[8]; 62 - unsigned long r; 63 - 64 - r = kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr)); 65 - *r2 = out[0]; 66 - 67 - return r; 68 - } 69 - 70 - static inline long kvm_hypercall0(unsigned int nr) 71 - { 72 - unsigned long in[8]; 73 - unsigned long out[8]; 74 - 75 - return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr)); 76 - } 77 - 78 - static inline long kvm_hypercall1(unsigned int nr, unsigned long p1) 79 - { 80 - unsigned long in[8]; 81 - unsigned long out[8]; 82 - 83 - in[0] = p1; 84 - return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr)); 85 - } 86 - 87 - static inline long kvm_hypercall2(unsigned int nr, unsigned long p1, 88 - unsigned long p2) 89 - { 90 - unsigned long in[8]; 91 - unsigned long out[8]; 92 - 93 - in[0] = p1; 94 - in[1] = p2; 95 - return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr)); 96 - } 97 - 98 - static inline long kvm_hypercall3(unsigned int nr, unsigned long p1, 99 - unsigned long p2, unsigned long p3) 100 - { 101 - unsigned long in[8]; 102 - unsigned long out[8]; 103 - 104 - in[0] = p1; 105 - in[1] = p2; 106 - in[2] = p3; 107 - return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr)); 108 - } 109 - 110 - static inline long kvm_hypercall4(unsigned int nr, unsigned long p1, 111 - unsigned long p2, unsigned long p3, 112 - unsigned long p4) 113 - { 114 - unsigned long in[8]; 115 - unsigned long out[8]; 116 - 117 - in[0] = p1; 118 - in[1] = p2; 119 - in[2] = p3; 120 - in[3] = p4; 121 - return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr)); 122 - } 123 - 124 54 125 55 static inline unsigned int kvm_arch_para_features(void) 126 56 { ··· 55 133 if (!kvm_para_available()) 56 134 return 0; 57 135 58 - if(kvm_hypercall0_1(KVM_HC_FEATURES, &r)) 136 + if(epapr_hypercall0_1(KVM_HCALL_TOKEN(KVM_HC_FEATURES), &r)) 59 137 return 0; 60 138 61 139 return r;
+10 -3
arch/powerpc/include/asm/kvm_ppc.h
··· 54 54 extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu); 55 55 extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 56 56 unsigned int rt, unsigned int bytes, 57 - int is_bigendian); 57 + int is_default_endian); 58 58 extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, 59 59 unsigned int rt, unsigned int bytes, 60 - int is_bigendian); 60 + int is_default_endian); 61 61 extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 62 - u64 val, unsigned int bytes, int is_bigendian); 62 + u64 val, unsigned int bytes, 63 + int is_default_endian); 63 64 64 65 extern int kvmppc_emulate_instruction(struct kvm_run *run, 65 66 struct kvm_vcpu *vcpu); ··· 456 455 trace_hardirqs_on(); 457 456 458 457 #ifdef CONFIG_PPC64 458 + /* 459 + * To avoid races, the caller must have gone directly from having 460 + * interrupts fully-enabled to hard-disabled. 461 + */ 462 + WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS); 463 + 459 464 /* Only need to enable IRQs by hard enabling them after this */ 460 465 local_paca->irq_happened = 0; 461 466 local_paca->soft_enabled = 1;
+21
arch/powerpc/include/asm/pgtable.h
··· 287 287 #endif 288 288 pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, 289 289 unsigned *shift); 290 + 291 + static inline pte_t *lookup_linux_ptep(pgd_t *pgdir, unsigned long hva, 292 + unsigned long *pte_sizep) 293 + { 294 + pte_t *ptep; 295 + unsigned long ps = *pte_sizep; 296 + unsigned int shift; 297 + 298 + ptep = find_linux_pte_or_hugepte(pgdir, hva, &shift); 299 + if (!ptep) 300 + return NULL; 301 + if (shift) 302 + *pte_sizep = 1ul << shift; 303 + else 304 + *pte_sizep = PAGE_SIZE; 305 + 306 + if (ps > *pte_sizep) 307 + return NULL; 308 + 309 + return ptep; 310 + } 290 311 #endif /* __ASSEMBLY__ */ 291 312 292 313 #endif /* __KERNEL__ */
+35 -8
arch/powerpc/include/asm/reg.h
··· 223 223 #define CTRL_TE 0x00c00000 /* thread enable */ 224 224 #define CTRL_RUNLATCH 0x1 225 225 #define SPRN_DAWR 0xB4 226 + #define SPRN_CIABR 0xBB 227 + #define CIABR_PRIV 0x3 228 + #define CIABR_PRIV_USER 1 229 + #define CIABR_PRIV_SUPER 2 230 + #define CIABR_PRIV_HYPER 3 226 231 #define SPRN_DAWRX 0xBC 227 - #define DAWRX_USER (1UL << 0) 228 - #define DAWRX_KERNEL (1UL << 1) 229 - #define DAWRX_HYP (1UL << 2) 232 + #define DAWRX_USER __MASK(0) 233 + #define DAWRX_KERNEL __MASK(1) 234 + #define DAWRX_HYP __MASK(2) 235 + #define DAWRX_WTI __MASK(3) 236 + #define DAWRX_WT __MASK(4) 237 + #define DAWRX_DR __MASK(5) 238 + #define DAWRX_DW __MASK(6) 230 239 #define SPRN_DABR 0x3F5 /* Data Address Breakpoint Register */ 231 240 #define SPRN_DABR2 0x13D /* e300 */ 232 241 #define SPRN_DABRX 0x3F7 /* Data Address Breakpoint Register Extension */ 233 - #define DABRX_USER (1UL << 0) 234 - #define DABRX_KERNEL (1UL << 1) 235 - #define DABRX_HYP (1UL << 2) 236 - #define DABRX_BTI (1UL << 3) 242 + #define DABRX_USER __MASK(0) 243 + #define DABRX_KERNEL __MASK(1) 244 + #define DABRX_HYP __MASK(2) 245 + #define DABRX_BTI __MASK(3) 237 246 #define DABRX_ALL (DABRX_BTI | DABRX_HYP | DABRX_KERNEL | DABRX_USER) 238 247 #define SPRN_DAR 0x013 /* Data Address Register */ 239 248 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */ ··· 269 260 #define SPRN_HRMOR 0x139 /* Real mode offset register */ 270 261 #define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */ 271 262 #define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */ 263 + #define SPRN_IC 0x350 /* Virtual Instruction Count */ 264 + #define SPRN_VTB 0x351 /* Virtual Time Base */ 272 265 /* HFSCR and FSCR bit numbers are the same */ 273 266 #define FSCR_TAR_LG 8 /* Enable Target Address Register */ 274 267 #define FSCR_EBB_LG 7 /* Enable Event Based Branching */ ··· 309 298 #define LPCR_RMLS 0x1C000000 /* impl dependent rmo limit sel */ 310 299 #define LPCR_RMLS_SH (63-37) 311 300 #define LPCR_ILE 0x02000000 /* !HV irqs set MSR:LE */ 301 + #define LPCR_AIL 0x01800000 /* Alternate interrupt location */ 312 302 #define LPCR_AIL_0 0x00000000 /* MMU off exception offset 0x0 */ 313 303 #define LPCR_AIL_3 0x01800000 /* MMU on exception offset 0xc00...4xxx */ 314 - #define LPCR_PECE 0x00007000 /* powersave exit cause enable */ 304 + #define LPCR_ONL 0x00040000 /* online - PURR/SPURR count */ 305 + #define LPCR_PECE 0x0001f000 /* powersave exit cause enable */ 306 + #define LPCR_PECEDP 0x00010000 /* directed priv dbells cause exit */ 307 + #define LPCR_PECEDH 0x00008000 /* directed hyp dbells cause exit */ 315 308 #define LPCR_PECE0 0x00004000 /* ext. exceptions can cause exit */ 316 309 #define LPCR_PECE1 0x00002000 /* decrementer can cause exit */ 317 310 #define LPCR_PECE2 0x00001000 /* machine check etc can cause exit */ ··· 337 322 #define SPRN_PCR 0x152 /* Processor compatibility register */ 338 323 #define PCR_VEC_DIS (1ul << (63-0)) /* Vec. disable (bit NA since POWER8) */ 339 324 #define PCR_VSX_DIS (1ul << (63-1)) /* VSX disable (bit NA since POWER8) */ 325 + #define PCR_TM_DIS (1ul << (63-2)) /* Trans. memory disable (POWER8) */ 326 + #define PCR_ARCH_206 0x4 /* Architecture 2.06 */ 340 327 #define PCR_ARCH_205 0x2 /* Architecture 2.05 */ 341 328 #define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */ 342 329 #define SPRN_TLBINDEXR 0x154 /* P7 TLB control register */ ··· 385 368 #define DER_EBRKE 0x00000002 /* External Breakpoint Interrupt */ 386 369 #define DER_DPIE 0x00000001 /* Dev. Port Nonmaskable Request */ 387 370 #define SPRN_DMISS 0x3D0 /* Data TLB Miss Register */ 371 + #define SPRN_DHDES 0x0B1 /* Directed Hyp. Doorbell Exc. State */ 372 + #define SPRN_DPDES 0x0B0 /* Directed Priv. Doorbell Exc. State */ 388 373 #define SPRN_EAR 0x11A /* External Address Register */ 389 374 #define SPRN_HASH1 0x3D2 /* Primary Hash Address Register */ 390 375 #define SPRN_HASH2 0x3D3 /* Secondary Hash Address Resgister */ ··· 446 427 #define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */ 447 428 #define SPRN_IABR2 0x3FA /* 83xx */ 448 429 #define SPRN_IBCR 0x135 /* 83xx Insn Breakpoint Control Reg */ 430 + #define SPRN_IAMR 0x03D /* Instr. Authority Mask Reg */ 449 431 #define SPRN_HID4 0x3F4 /* 970 HID4 */ 450 432 #define HID4_LPES0 (1ul << (63-0)) /* LPAR env. sel. bit 0 */ 451 433 #define HID4_RMLS2_SH (63 - 2) /* Real mode limit bottom 2 bits */ ··· 561 541 #define SPRN_PIR 0x3FF /* Processor Identification Register */ 562 542 #endif 563 543 #define SPRN_TIR 0x1BE /* Thread Identification Register */ 544 + #define SPRN_PSPB 0x09F /* Problem State Priority Boost reg */ 564 545 #define SPRN_PTEHI 0x3D5 /* 981 7450 PTE HI word (S/W TLB load) */ 565 546 #define SPRN_PTELO 0x3D6 /* 982 7450 PTE LO word (S/W TLB load) */ 566 547 #define SPRN_PURR 0x135 /* Processor Utilization of Resources Reg */ ··· 703 682 #define SPRN_EBBHR 804 /* Event based branch handler register */ 704 683 #define SPRN_EBBRR 805 /* Event based branch return register */ 705 684 #define SPRN_BESCR 806 /* Branch event status and control register */ 685 + #define SPRN_WORT 895 /* Workload optimization register - thread */ 706 686 707 687 #define SPRN_PMC1 787 708 688 #define SPRN_PMC2 788 ··· 720 698 #define SIER_SIHV 0x1000000 /* Sampled MSR_HV */ 721 699 #define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */ 722 700 #define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */ 701 + #define SPRN_TACR 888 702 + #define SPRN_TCSCR 889 703 + #define SPRN_CSIGR 890 704 + #define SPRN_SPMC1 892 705 + #define SPRN_SPMC2 893 723 706 724 707 /* When EBB is enabled, some of MMCR0/MMCR2/SIER are user accessible */ 725 708 #define MMCR0_USER_MASK (MMCR0_FC | MMCR0_PMXE | MMCR0_PMAO)
-2
arch/powerpc/include/asm/switch_to.h
··· 25 25 static inline void save_tar(struct thread_struct *prev) {} 26 26 #endif 27 27 28 - extern void load_up_fpu(void); 29 28 extern void enable_kernel_fp(void); 30 29 extern void enable_kernel_altivec(void); 31 - extern void load_up_altivec(struct task_struct *); 32 30 extern int emulate_altivec(struct pt_regs *); 33 31 extern void __giveup_vsx(struct task_struct *); 34 32 extern void giveup_vsx(struct task_struct *);
+3
arch/powerpc/include/uapi/asm/kvm.h
··· 545 545 #define KVM_REG_PPC_TCSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb1) 546 546 #define KVM_REG_PPC_PID (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb2) 547 547 #define KVM_REG_PPC_ACOP (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb3) 548 + #define KVM_REG_PPC_WORT (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb4) 548 549 549 550 #define KVM_REG_PPC_VRSAVE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb4) 550 551 #define KVM_REG_PPC_LPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb5) ··· 553 552 554 553 /* Architecture compatibility level */ 555 554 #define KVM_REG_PPC_ARCH_COMPAT (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb7) 555 + 556 + #define KVM_REG_PPC_DABRX (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb8) 556 557 557 558 /* Transactional Memory checkpointed state: 558 559 * This is all GPRs, all VSX regs and a subset of SPRs
+2
arch/powerpc/include/uapi/asm/tm.h
··· 6 6 * the failure is persistent. PAPR saves 0xff-0xe0 for the hypervisor. 7 7 */ 8 8 #define TM_CAUSE_PERSISTENT 0x01 9 + #define TM_CAUSE_KVM_RESCHED 0xe0 /* From PAPR */ 10 + #define TM_CAUSE_KVM_FAC_UNAV 0xe2 /* From PAPR */ 9 11 #define TM_CAUSE_RESCHED 0xde 10 12 #define TM_CAUSE_TLBI 0xdc 11 13 #define TM_CAUSE_FAC_UNAV 0xda
+42 -8
arch/powerpc/kernel/asm-offsets.c
··· 438 438 DEFINE(VCPU_GUEST_PID, offsetof(struct kvm_vcpu, arch.pid)); 439 439 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); 440 440 DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave)); 441 - DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fpr)); 442 - DEFINE(VCPU_FPSCR, offsetof(struct kvm_vcpu, arch.fpscr)); 441 + DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fp.fpr)); 443 442 #ifdef CONFIG_ALTIVEC 444 - DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr)); 445 - DEFINE(VCPU_VSCR, offsetof(struct kvm_vcpu, arch.vscr)); 446 - #endif 447 - #ifdef CONFIG_VSX 448 - DEFINE(VCPU_VSRS, offsetof(struct kvm_vcpu, arch.vsr)); 443 + DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr.vr)); 449 444 #endif 450 445 DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); 451 446 DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr)); 452 447 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); 448 + DEFINE(VCPU_TAR, offsetof(struct kvm_vcpu, arch.tar)); 453 449 DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); 454 450 DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc)); 455 451 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE ··· 493 497 DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar)); 494 498 DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr)); 495 499 DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty)); 500 + DEFINE(VCPU_INTR_MSR, offsetof(struct kvm_vcpu, arch.intr_msr)); 496 501 #endif 497 502 #ifdef CONFIG_PPC_BOOK3S 498 503 DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id)); 499 504 DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr)); 500 505 DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr)); 506 + DEFINE(VCPU_IC, offsetof(struct kvm_vcpu, arch.ic)); 507 + DEFINE(VCPU_VTB, offsetof(struct kvm_vcpu, arch.vtb)); 501 508 DEFINE(VCPU_DSCR, offsetof(struct kvm_vcpu, arch.dscr)); 502 509 DEFINE(VCPU_AMR, offsetof(struct kvm_vcpu, arch.amr)); 503 510 DEFINE(VCPU_UAMOR, offsetof(struct kvm_vcpu, arch.uamor)); 511 + DEFINE(VCPU_IAMR, offsetof(struct kvm_vcpu, arch.iamr)); 504 512 DEFINE(VCPU_CTRL, offsetof(struct kvm_vcpu, arch.ctrl)); 505 513 DEFINE(VCPU_DABR, offsetof(struct kvm_vcpu, arch.dabr)); 514 + DEFINE(VCPU_DABRX, offsetof(struct kvm_vcpu, arch.dabrx)); 515 + DEFINE(VCPU_DAWR, offsetof(struct kvm_vcpu, arch.dawr)); 516 + DEFINE(VCPU_DAWRX, offsetof(struct kvm_vcpu, arch.dawrx)); 517 + DEFINE(VCPU_CIABR, offsetof(struct kvm_vcpu, arch.ciabr)); 506 518 DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags)); 507 519 DEFINE(VCPU_DEC, offsetof(struct kvm_vcpu, arch.dec)); 508 520 DEFINE(VCPU_DEC_EXPIRES, offsetof(struct kvm_vcpu, arch.dec_expires)); ··· 519 515 DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded)); 520 516 DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr)); 521 517 DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc)); 518 + DEFINE(VCPU_SPMC, offsetof(struct kvm_vcpu, arch.spmc)); 522 519 DEFINE(VCPU_SIAR, offsetof(struct kvm_vcpu, arch.siar)); 523 520 DEFINE(VCPU_SDAR, offsetof(struct kvm_vcpu, arch.sdar)); 521 + DEFINE(VCPU_SIER, offsetof(struct kvm_vcpu, arch.sier)); 524 522 DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb)); 525 523 DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max)); 526 524 DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr)); ··· 530 524 DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar)); 531 525 DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); 532 526 DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap)); 533 - DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid)); 534 527 DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar)); 535 528 DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr)); 529 + DEFINE(VCPU_FSCR, offsetof(struct kvm_vcpu, arch.fscr)); 530 + DEFINE(VCPU_PSPB, offsetof(struct kvm_vcpu, arch.pspb)); 531 + DEFINE(VCPU_EBBHR, offsetof(struct kvm_vcpu, arch.ebbhr)); 532 + DEFINE(VCPU_EBBRR, offsetof(struct kvm_vcpu, arch.ebbrr)); 533 + DEFINE(VCPU_BESCR, offsetof(struct kvm_vcpu, arch.bescr)); 534 + DEFINE(VCPU_CSIGR, offsetof(struct kvm_vcpu, arch.csigr)); 535 + DEFINE(VCPU_TACR, offsetof(struct kvm_vcpu, arch.tacr)); 536 + DEFINE(VCPU_TCSCR, offsetof(struct kvm_vcpu, arch.tcscr)); 537 + DEFINE(VCPU_ACOP, offsetof(struct kvm_vcpu, arch.acop)); 538 + DEFINE(VCPU_WORT, offsetof(struct kvm_vcpu, arch.wort)); 536 539 DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1)); 537 540 DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count)); 538 541 DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count)); 539 542 DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest)); 540 543 DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads)); 544 + DEFINE(VCORE_KVM, offsetof(struct kvmppc_vcore, kvm)); 541 545 DEFINE(VCORE_TB_OFFSET, offsetof(struct kvmppc_vcore, tb_offset)); 542 546 DEFINE(VCORE_LPCR, offsetof(struct kvmppc_vcore, lpcr)); 543 547 DEFINE(VCORE_PCR, offsetof(struct kvmppc_vcore, pcr)); 548 + DEFINE(VCORE_DPDES, offsetof(struct kvmppc_vcore, dpdes)); 544 549 DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige)); 545 550 DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv)); 546 551 DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb)); 552 + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 553 + DEFINE(VCPU_TFHAR, offsetof(struct kvm_vcpu, arch.tfhar)); 554 + DEFINE(VCPU_TFIAR, offsetof(struct kvm_vcpu, arch.tfiar)); 555 + DEFINE(VCPU_TEXASR, offsetof(struct kvm_vcpu, arch.texasr)); 556 + DEFINE(VCPU_GPR_TM, offsetof(struct kvm_vcpu, arch.gpr_tm)); 557 + DEFINE(VCPU_FPRS_TM, offsetof(struct kvm_vcpu, arch.fp_tm.fpr)); 558 + DEFINE(VCPU_VRS_TM, offsetof(struct kvm_vcpu, arch.vr_tm.vr)); 559 + DEFINE(VCPU_VRSAVE_TM, offsetof(struct kvm_vcpu, arch.vrsave_tm)); 560 + DEFINE(VCPU_CR_TM, offsetof(struct kvm_vcpu, arch.cr_tm)); 561 + DEFINE(VCPU_LR_TM, offsetof(struct kvm_vcpu, arch.lr_tm)); 562 + DEFINE(VCPU_CTR_TM, offsetof(struct kvm_vcpu, arch.ctr_tm)); 563 + DEFINE(VCPU_AMR_TM, offsetof(struct kvm_vcpu, arch.amr_tm)); 564 + DEFINE(VCPU_PPR_TM, offsetof(struct kvm_vcpu, arch.ppr_tm)); 565 + DEFINE(VCPU_DSCR_TM, offsetof(struct kvm_vcpu, arch.dscr_tm)); 566 + DEFINE(VCPU_TAR_TM, offsetof(struct kvm_vcpu, arch.tar_tm)); 567 + #endif 547 568 548 569 #ifdef CONFIG_PPC_BOOK3S_64 549 570 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE ··· 635 602 HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys); 636 603 HSTATE_FIELD(HSTATE_SAVED_XIRR, saved_xirr); 637 604 HSTATE_FIELD(HSTATE_HOST_IPI, host_ipi); 605 + HSTATE_FIELD(HSTATE_PTID, ptid); 638 606 HSTATE_FIELD(HSTATE_MMCR, host_mmcr); 639 607 HSTATE_FIELD(HSTATE_PMC, host_pmc); 640 608 HSTATE_FIELD(HSTATE_PURR, host_purr);
+2 -39
arch/powerpc/kernel/kvm.c
··· 413 413 { 414 414 u32 *features = data; 415 415 416 - ulong in[8]; 416 + ulong in[8] = {0}; 417 417 ulong out[8]; 418 418 419 419 in[0] = KVM_MAGIC_PAGE; 420 420 in[1] = KVM_MAGIC_PAGE; 421 421 422 - kvm_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE)); 422 + epapr_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE)); 423 423 424 424 *features = out[0]; 425 425 } ··· 710 710 printk(KERN_INFO "KVM: Live patching for a fast VM %s\n", 711 711 kvm_patching_worked ? "worked" : "failed"); 712 712 } 713 - 714 - unsigned long kvm_hypercall(unsigned long *in, 715 - unsigned long *out, 716 - unsigned long nr) 717 - { 718 - unsigned long register r0 asm("r0"); 719 - unsigned long register r3 asm("r3") = in[0]; 720 - unsigned long register r4 asm("r4") = in[1]; 721 - unsigned long register r5 asm("r5") = in[2]; 722 - unsigned long register r6 asm("r6") = in[3]; 723 - unsigned long register r7 asm("r7") = in[4]; 724 - unsigned long register r8 asm("r8") = in[5]; 725 - unsigned long register r9 asm("r9") = in[6]; 726 - unsigned long register r10 asm("r10") = in[7]; 727 - unsigned long register r11 asm("r11") = nr; 728 - unsigned long register r12 asm("r12"); 729 - 730 - asm volatile("bl epapr_hypercall_start" 731 - : "=r"(r0), "=r"(r3), "=r"(r4), "=r"(r5), "=r"(r6), 732 - "=r"(r7), "=r"(r8), "=r"(r9), "=r"(r10), "=r"(r11), 733 - "=r"(r12) 734 - : "r"(r3), "r"(r4), "r"(r5), "r"(r6), "r"(r7), "r"(r8), 735 - "r"(r9), "r"(r10), "r"(r11) 736 - : "memory", "cc", "xer", "ctr", "lr"); 737 - 738 - out[0] = r4; 739 - out[1] = r5; 740 - out[2] = r6; 741 - out[3] = r7; 742 - out[4] = r8; 743 - out[5] = r9; 744 - out[6] = r10; 745 - out[7] = r11; 746 - 747 - return r3; 748 - } 749 - EXPORT_SYMBOL_GPL(kvm_hypercall); 750 713 751 714 static __init void kvm_free_tmp(void) 752 715 {
+4
arch/powerpc/kvm/44x.c
··· 21 21 #include <linux/slab.h> 22 22 #include <linux/err.h> 23 23 #include <linux/export.h> 24 + #include <linux/module.h> 25 + #include <linux/miscdevice.h> 24 26 25 27 #include <asm/reg.h> 26 28 #include <asm/cputable.h> ··· 233 231 234 232 module_init(kvmppc_44x_init); 235 233 module_exit(kvmppc_44x_exit); 234 + MODULE_ALIAS_MISCDEV(KVM_MINOR); 235 + MODULE_ALIAS("devname:kvm");
+38 -8
arch/powerpc/kvm/book3s.c
··· 18 18 #include <linux/err.h> 19 19 #include <linux/export.h> 20 20 #include <linux/slab.h> 21 + #include <linux/module.h> 22 + #include <linux/miscdevice.h> 21 23 22 24 #include <asm/reg.h> 23 25 #include <asm/cputable.h> ··· 577 575 break; 578 576 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: 579 577 i = reg->id - KVM_REG_PPC_FPR0; 580 - val = get_reg_val(reg->id, vcpu->arch.fpr[i]); 578 + val = get_reg_val(reg->id, VCPU_FPR(vcpu, i)); 581 579 break; 582 580 case KVM_REG_PPC_FPSCR: 583 - val = get_reg_val(reg->id, vcpu->arch.fpscr); 581 + val = get_reg_val(reg->id, vcpu->arch.fp.fpscr); 584 582 break; 585 583 #ifdef CONFIG_ALTIVEC 586 584 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: ··· 588 586 r = -ENXIO; 589 587 break; 590 588 } 591 - val.vval = vcpu->arch.vr[reg->id - KVM_REG_PPC_VR0]; 589 + val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0]; 592 590 break; 593 591 case KVM_REG_PPC_VSCR: 594 592 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 595 593 r = -ENXIO; 596 594 break; 597 595 } 598 - val = get_reg_val(reg->id, vcpu->arch.vscr.u[3]); 596 + val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]); 599 597 break; 600 598 case KVM_REG_PPC_VRSAVE: 601 599 val = get_reg_val(reg->id, vcpu->arch.vrsave); 602 600 break; 603 601 #endif /* CONFIG_ALTIVEC */ 602 + #ifdef CONFIG_VSX 603 + case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: 604 + if (cpu_has_feature(CPU_FTR_VSX)) { 605 + long int i = reg->id - KVM_REG_PPC_VSR0; 606 + val.vsxval[0] = vcpu->arch.fp.fpr[i][0]; 607 + val.vsxval[1] = vcpu->arch.fp.fpr[i][1]; 608 + } else { 609 + r = -ENXIO; 610 + } 611 + break; 612 + #endif /* CONFIG_VSX */ 604 613 case KVM_REG_PPC_DEBUG_INST: { 605 614 u32 opcode = INS_TW; 606 615 r = copy_to_user((u32 __user *)(long)reg->addr, ··· 667 654 break; 668 655 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: 669 656 i = reg->id - KVM_REG_PPC_FPR0; 670 - vcpu->arch.fpr[i] = set_reg_val(reg->id, val); 657 + VCPU_FPR(vcpu, i) = set_reg_val(reg->id, val); 671 658 break; 672 659 case KVM_REG_PPC_FPSCR: 673 - vcpu->arch.fpscr = set_reg_val(reg->id, val); 660 + vcpu->arch.fp.fpscr = set_reg_val(reg->id, val); 674 661 break; 675 662 #ifdef CONFIG_ALTIVEC 676 663 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: ··· 678 665 r = -ENXIO; 679 666 break; 680 667 } 681 - vcpu->arch.vr[reg->id - KVM_REG_PPC_VR0] = val.vval; 668 + vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval; 682 669 break; 683 670 case KVM_REG_PPC_VSCR: 684 671 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 685 672 r = -ENXIO; 686 673 break; 687 674 } 688 - vcpu->arch.vscr.u[3] = set_reg_val(reg->id, val); 675 + vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val); 689 676 break; 690 677 case KVM_REG_PPC_VRSAVE: 691 678 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { ··· 695 682 vcpu->arch.vrsave = set_reg_val(reg->id, val); 696 683 break; 697 684 #endif /* CONFIG_ALTIVEC */ 685 + #ifdef CONFIG_VSX 686 + case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: 687 + if (cpu_has_feature(CPU_FTR_VSX)) { 688 + long int i = reg->id - KVM_REG_PPC_VSR0; 689 + vcpu->arch.fp.fpr[i][0] = val.vsxval[0]; 690 + vcpu->arch.fp.fpr[i][1] = val.vsxval[1]; 691 + } else { 692 + r = -ENXIO; 693 + } 694 + break; 695 + #endif /* CONFIG_VSX */ 698 696 #ifdef CONFIG_KVM_XICS 699 697 case KVM_REG_PPC_ICP_STATE: 700 698 if (!vcpu->arch.icp) { ··· 903 879 904 880 module_init(kvmppc_book3s_init); 905 881 module_exit(kvmppc_book3s_exit); 882 + 883 + /* On 32bit this is our one and only kernel module */ 884 + #ifdef CONFIG_KVM_BOOK3S_32 885 + MODULE_ALIAS_MISCDEV(KVM_MINOR); 886 + MODULE_ALIAS("devname:kvm"); 887 + #endif
+5
arch/powerpc/kvm/book3s_32_mmu_host.c
··· 243 243 /* Now tell our Shadow PTE code about the new page */ 244 244 245 245 pte = kvmppc_mmu_hpte_cache_next(vcpu); 246 + if (!pte) { 247 + kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT); 248 + r = -EAGAIN; 249 + goto out; 250 + } 246 251 247 252 dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n", 248 253 orig_pte->may_write ? 'w' : '-',
+2 -2
arch/powerpc/kvm/book3s_64_mmu_hv.c
··· 262 262 263 263 static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu) 264 264 { 265 - kvmppc_set_msr(vcpu, MSR_SF | MSR_ME); 265 + kvmppc_set_msr(vcpu, vcpu->arch.intr_msr); 266 266 } 267 267 268 268 /* ··· 562 562 * we just return and retry the instruction. 563 563 */ 564 564 565 - if (instruction_is_store(vcpu->arch.last_inst) != !!is_store) 565 + if (instruction_is_store(kvmppc_get_last_inst(vcpu)) != !!is_store) 566 566 return RESUME_GUEST; 567 567 568 568 /*
-4
arch/powerpc/kvm/book3s_exports.c
··· 25 25 #endif 26 26 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 27 27 EXPORT_SYMBOL_GPL(kvmppc_entry_trampoline); 28 - EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu); 29 - #ifdef CONFIG_ALTIVEC 30 - EXPORT_SYMBOL_GPL(kvmppc_load_up_altivec); 31 - #endif 32 28 #endif 33 29
+241 -78
arch/powerpc/kvm/book3s_hv.c
··· 31 31 #include <linux/spinlock.h> 32 32 #include <linux/page-flags.h> 33 33 #include <linux/srcu.h> 34 + #include <linux/miscdevice.h> 34 35 35 36 #include <asm/reg.h> 36 37 #include <asm/cputable.h> ··· 86 85 87 86 /* CPU points to the first thread of the core */ 88 87 if (cpu != me && cpu >= 0 && cpu < nr_cpu_ids) { 88 + #ifdef CONFIG_KVM_XICS 89 89 int real_cpu = cpu + vcpu->arch.ptid; 90 90 if (paca[real_cpu].kvm_hstate.xics_phys) 91 91 xics_wake_cpu(real_cpu); 92 - else if (cpu_online(cpu)) 92 + else 93 + #endif 94 + if (cpu_online(cpu)) 93 95 smp_send_reschedule(cpu); 94 96 } 95 97 put_cpu(); ··· 186 182 187 183 switch (arch_compat) { 188 184 case PVR_ARCH_205: 189 - pcr = PCR_ARCH_205; 185 + /* 186 + * If an arch bit is set in PCR, all the defined 187 + * higher-order arch bits also have to be set. 188 + */ 189 + pcr = PCR_ARCH_206 | PCR_ARCH_205; 190 190 break; 191 191 case PVR_ARCH_206: 192 192 case PVR_ARCH_206p: 193 + pcr = PCR_ARCH_206; 194 + break; 195 + case PVR_ARCH_207: 193 196 break; 194 197 default: 195 198 return -EINVAL; 199 + } 200 + 201 + if (!cpu_has_feature(CPU_FTR_ARCH_207S)) { 202 + /* POWER7 can't emulate POWER8 */ 203 + if (!(pcr & PCR_ARCH_206)) 204 + return -EINVAL; 205 + pcr &= ~PCR_ARCH_206; 196 206 } 197 207 } 198 208 ··· 655 637 r = RESUME_GUEST; 656 638 break; 657 639 case BOOK3S_INTERRUPT_EXTERNAL: 640 + case BOOK3S_INTERRUPT_H_DOORBELL: 658 641 vcpu->stat.ext_intr_exits++; 659 642 r = RESUME_GUEST; 660 643 break; ··· 692 673 /* hcall - punt to userspace */ 693 674 int i; 694 675 695 - if (vcpu->arch.shregs.msr & MSR_PR) { 696 - /* sc 1 from userspace - reflect to guest syscall */ 697 - kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_SYSCALL); 698 - r = RESUME_GUEST; 699 - break; 700 - } 676 + /* hypercall with MSR_PR has already been handled in rmode, 677 + * and never reaches here. 678 + */ 679 + 701 680 run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3); 702 681 for (i = 0; i < 9; ++i) 703 682 run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i); ··· 725 708 * we don't emulate any guest instructions at this stage. 726 709 */ 727 710 case BOOK3S_INTERRUPT_H_EMUL_ASSIST: 728 - kvmppc_core_queue_program(vcpu, 0x80000); 711 + kvmppc_core_queue_program(vcpu, SRR1_PROGILL); 712 + r = RESUME_GUEST; 713 + break; 714 + /* 715 + * This occurs if the guest (kernel or userspace), does something that 716 + * is prohibited by HFSCR. We just generate a program interrupt to 717 + * the guest. 718 + */ 719 + case BOOK3S_INTERRUPT_H_FAC_UNAVAIL: 720 + kvmppc_core_queue_program(vcpu, SRR1_PROGILL); 729 721 r = RESUME_GUEST; 730 722 break; 731 723 default: ··· 792 766 793 767 spin_lock(&vc->lock); 794 768 /* 769 + * If ILE (interrupt little-endian) has changed, update the 770 + * MSR_LE bit in the intr_msr for each vcpu in this vcore. 771 + */ 772 + if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) { 773 + struct kvm *kvm = vcpu->kvm; 774 + struct kvm_vcpu *vcpu; 775 + int i; 776 + 777 + mutex_lock(&kvm->lock); 778 + kvm_for_each_vcpu(i, vcpu, kvm) { 779 + if (vcpu->arch.vcore != vc) 780 + continue; 781 + if (new_lpcr & LPCR_ILE) 782 + vcpu->arch.intr_msr |= MSR_LE; 783 + else 784 + vcpu->arch.intr_msr &= ~MSR_LE; 785 + } 786 + mutex_unlock(&kvm->lock); 787 + } 788 + 789 + /* 795 790 * Userspace can only modify DPFD (default prefetch depth), 796 791 * ILE (interrupt little-endian) and TC (translation control). 792 + * On POWER8 userspace can also modify AIL (alt. interrupt loc.) 797 793 */ 798 794 mask = LPCR_DPFD | LPCR_ILE | LPCR_TC; 795 + if (cpu_has_feature(CPU_FTR_ARCH_207S)) 796 + mask |= LPCR_AIL; 799 797 vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask); 800 798 spin_unlock(&vc->lock); 801 799 } ··· 837 787 case KVM_REG_PPC_DABR: 838 788 *val = get_reg_val(id, vcpu->arch.dabr); 839 789 break; 790 + case KVM_REG_PPC_DABRX: 791 + *val = get_reg_val(id, vcpu->arch.dabrx); 792 + break; 840 793 case KVM_REG_PPC_DSCR: 841 794 *val = get_reg_val(id, vcpu->arch.dscr); 842 795 break; ··· 855 802 case KVM_REG_PPC_UAMOR: 856 803 *val = get_reg_val(id, vcpu->arch.uamor); 857 804 break; 858 - case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRA: 805 + case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS: 859 806 i = id - KVM_REG_PPC_MMCR0; 860 807 *val = get_reg_val(id, vcpu->arch.mmcr[i]); 861 808 break; ··· 863 810 i = id - KVM_REG_PPC_PMC1; 864 811 *val = get_reg_val(id, vcpu->arch.pmc[i]); 865 812 break; 813 + case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2: 814 + i = id - KVM_REG_PPC_SPMC1; 815 + *val = get_reg_val(id, vcpu->arch.spmc[i]); 816 + break; 866 817 case KVM_REG_PPC_SIAR: 867 818 *val = get_reg_val(id, vcpu->arch.siar); 868 819 break; 869 820 case KVM_REG_PPC_SDAR: 870 821 *val = get_reg_val(id, vcpu->arch.sdar); 871 822 break; 872 - #ifdef CONFIG_VSX 873 - case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: 874 - if (cpu_has_feature(CPU_FTR_VSX)) { 875 - /* VSX => FP reg i is stored in arch.vsr[2*i] */ 876 - long int i = id - KVM_REG_PPC_FPR0; 877 - *val = get_reg_val(id, vcpu->arch.vsr[2 * i]); 878 - } else { 879 - /* let generic code handle it */ 880 - r = -EINVAL; 881 - } 823 + case KVM_REG_PPC_SIER: 824 + *val = get_reg_val(id, vcpu->arch.sier); 882 825 break; 883 - case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: 884 - if (cpu_has_feature(CPU_FTR_VSX)) { 885 - long int i = id - KVM_REG_PPC_VSR0; 886 - val->vsxval[0] = vcpu->arch.vsr[2 * i]; 887 - val->vsxval[1] = vcpu->arch.vsr[2 * i + 1]; 888 - } else { 889 - r = -ENXIO; 890 - } 826 + case KVM_REG_PPC_IAMR: 827 + *val = get_reg_val(id, vcpu->arch.iamr); 891 828 break; 892 - #endif /* CONFIG_VSX */ 829 + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 830 + case KVM_REG_PPC_TFHAR: 831 + *val = get_reg_val(id, vcpu->arch.tfhar); 832 + break; 833 + case KVM_REG_PPC_TFIAR: 834 + *val = get_reg_val(id, vcpu->arch.tfiar); 835 + break; 836 + case KVM_REG_PPC_TEXASR: 837 + *val = get_reg_val(id, vcpu->arch.texasr); 838 + break; 839 + #endif 840 + case KVM_REG_PPC_FSCR: 841 + *val = get_reg_val(id, vcpu->arch.fscr); 842 + break; 843 + case KVM_REG_PPC_PSPB: 844 + *val = get_reg_val(id, vcpu->arch.pspb); 845 + break; 846 + case KVM_REG_PPC_EBBHR: 847 + *val = get_reg_val(id, vcpu->arch.ebbhr); 848 + break; 849 + case KVM_REG_PPC_EBBRR: 850 + *val = get_reg_val(id, vcpu->arch.ebbrr); 851 + break; 852 + case KVM_REG_PPC_BESCR: 853 + *val = get_reg_val(id, vcpu->arch.bescr); 854 + break; 855 + case KVM_REG_PPC_TAR: 856 + *val = get_reg_val(id, vcpu->arch.tar); 857 + break; 858 + case KVM_REG_PPC_DPDES: 859 + *val = get_reg_val(id, vcpu->arch.vcore->dpdes); 860 + break; 861 + case KVM_REG_PPC_DAWR: 862 + *val = get_reg_val(id, vcpu->arch.dawr); 863 + break; 864 + case KVM_REG_PPC_DAWRX: 865 + *val = get_reg_val(id, vcpu->arch.dawrx); 866 + break; 867 + case KVM_REG_PPC_CIABR: 868 + *val = get_reg_val(id, vcpu->arch.ciabr); 869 + break; 870 + case KVM_REG_PPC_IC: 871 + *val = get_reg_val(id, vcpu->arch.ic); 872 + break; 873 + case KVM_REG_PPC_VTB: 874 + *val = get_reg_val(id, vcpu->arch.vtb); 875 + break; 876 + case KVM_REG_PPC_CSIGR: 877 + *val = get_reg_val(id, vcpu->arch.csigr); 878 + break; 879 + case KVM_REG_PPC_TACR: 880 + *val = get_reg_val(id, vcpu->arch.tacr); 881 + break; 882 + case KVM_REG_PPC_TCSCR: 883 + *val = get_reg_val(id, vcpu->arch.tcscr); 884 + break; 885 + case KVM_REG_PPC_PID: 886 + *val = get_reg_val(id, vcpu->arch.pid); 887 + break; 888 + case KVM_REG_PPC_ACOP: 889 + *val = get_reg_val(id, vcpu->arch.acop); 890 + break; 891 + case KVM_REG_PPC_WORT: 892 + *val = get_reg_val(id, vcpu->arch.wort); 893 + break; 893 894 case KVM_REG_PPC_VPA_ADDR: 894 895 spin_lock(&vcpu->arch.vpa_update_lock); 895 896 *val = get_reg_val(id, vcpu->arch.vpa.next_gpa); ··· 997 890 case KVM_REG_PPC_DABR: 998 891 vcpu->arch.dabr = set_reg_val(id, *val); 999 892 break; 893 + case KVM_REG_PPC_DABRX: 894 + vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP; 895 + break; 1000 896 case KVM_REG_PPC_DSCR: 1001 897 vcpu->arch.dscr = set_reg_val(id, *val); 1002 898 break; ··· 1015 905 case KVM_REG_PPC_UAMOR: 1016 906 vcpu->arch.uamor = set_reg_val(id, *val); 1017 907 break; 1018 - case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRA: 908 + case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS: 1019 909 i = id - KVM_REG_PPC_MMCR0; 1020 910 vcpu->arch.mmcr[i] = set_reg_val(id, *val); 1021 911 break; ··· 1023 913 i = id - KVM_REG_PPC_PMC1; 1024 914 vcpu->arch.pmc[i] = set_reg_val(id, *val); 1025 915 break; 916 + case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2: 917 + i = id - KVM_REG_PPC_SPMC1; 918 + vcpu->arch.spmc[i] = set_reg_val(id, *val); 919 + break; 1026 920 case KVM_REG_PPC_SIAR: 1027 921 vcpu->arch.siar = set_reg_val(id, *val); 1028 922 break; 1029 923 case KVM_REG_PPC_SDAR: 1030 924 vcpu->arch.sdar = set_reg_val(id, *val); 1031 925 break; 1032 - #ifdef CONFIG_VSX 1033 - case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: 1034 - if (cpu_has_feature(CPU_FTR_VSX)) { 1035 - /* VSX => FP reg i is stored in arch.vsr[2*i] */ 1036 - long int i = id - KVM_REG_PPC_FPR0; 1037 - vcpu->arch.vsr[2 * i] = set_reg_val(id, *val); 1038 - } else { 1039 - /* let generic code handle it */ 1040 - r = -EINVAL; 1041 - } 926 + case KVM_REG_PPC_SIER: 927 + vcpu->arch.sier = set_reg_val(id, *val); 1042 928 break; 1043 - case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: 1044 - if (cpu_has_feature(CPU_FTR_VSX)) { 1045 - long int i = id - KVM_REG_PPC_VSR0; 1046 - vcpu->arch.vsr[2 * i] = val->vsxval[0]; 1047 - vcpu->arch.vsr[2 * i + 1] = val->vsxval[1]; 1048 - } else { 1049 - r = -ENXIO; 1050 - } 929 + case KVM_REG_PPC_IAMR: 930 + vcpu->arch.iamr = set_reg_val(id, *val); 1051 931 break; 1052 - #endif /* CONFIG_VSX */ 932 + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 933 + case KVM_REG_PPC_TFHAR: 934 + vcpu->arch.tfhar = set_reg_val(id, *val); 935 + break; 936 + case KVM_REG_PPC_TFIAR: 937 + vcpu->arch.tfiar = set_reg_val(id, *val); 938 + break; 939 + case KVM_REG_PPC_TEXASR: 940 + vcpu->arch.texasr = set_reg_val(id, *val); 941 + break; 942 + #endif 943 + case KVM_REG_PPC_FSCR: 944 + vcpu->arch.fscr = set_reg_val(id, *val); 945 + break; 946 + case KVM_REG_PPC_PSPB: 947 + vcpu->arch.pspb = set_reg_val(id, *val); 948 + break; 949 + case KVM_REG_PPC_EBBHR: 950 + vcpu->arch.ebbhr = set_reg_val(id, *val); 951 + break; 952 + case KVM_REG_PPC_EBBRR: 953 + vcpu->arch.ebbrr = set_reg_val(id, *val); 954 + break; 955 + case KVM_REG_PPC_BESCR: 956 + vcpu->arch.bescr = set_reg_val(id, *val); 957 + break; 958 + case KVM_REG_PPC_TAR: 959 + vcpu->arch.tar = set_reg_val(id, *val); 960 + break; 961 + case KVM_REG_PPC_DPDES: 962 + vcpu->arch.vcore->dpdes = set_reg_val(id, *val); 963 + break; 964 + case KVM_REG_PPC_DAWR: 965 + vcpu->arch.dawr = set_reg_val(id, *val); 966 + break; 967 + case KVM_REG_PPC_DAWRX: 968 + vcpu->arch.dawrx = set_reg_val(id, *val) & ~DAWRX_HYP; 969 + break; 970 + case KVM_REG_PPC_CIABR: 971 + vcpu->arch.ciabr = set_reg_val(id, *val); 972 + /* Don't allow setting breakpoints in hypervisor code */ 973 + if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER) 974 + vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */ 975 + break; 976 + case KVM_REG_PPC_IC: 977 + vcpu->arch.ic = set_reg_val(id, *val); 978 + break; 979 + case KVM_REG_PPC_VTB: 980 + vcpu->arch.vtb = set_reg_val(id, *val); 981 + break; 982 + case KVM_REG_PPC_CSIGR: 983 + vcpu->arch.csigr = set_reg_val(id, *val); 984 + break; 985 + case KVM_REG_PPC_TACR: 986 + vcpu->arch.tacr = set_reg_val(id, *val); 987 + break; 988 + case KVM_REG_PPC_TCSCR: 989 + vcpu->arch.tcscr = set_reg_val(id, *val); 990 + break; 991 + case KVM_REG_PPC_PID: 992 + vcpu->arch.pid = set_reg_val(id, *val); 993 + break; 994 + case KVM_REG_PPC_ACOP: 995 + vcpu->arch.acop = set_reg_val(id, *val); 996 + break; 997 + case KVM_REG_PPC_WORT: 998 + vcpu->arch.wort = set_reg_val(id, *val); 999 + break; 1053 1000 case KVM_REG_PPC_VPA_ADDR: 1054 1001 addr = set_reg_val(id, *val); 1055 1002 r = -EINVAL; ··· 1184 1017 spin_lock_init(&vcpu->arch.vpa_update_lock); 1185 1018 spin_lock_init(&vcpu->arch.tbacct_lock); 1186 1019 vcpu->arch.busy_preempt = TB_NIL; 1020 + vcpu->arch.intr_msr = MSR_SF | MSR_ME; 1187 1021 1188 1022 kvmppc_mmu_book3s_hv_init(vcpu); 1189 1023 ··· 1202 1034 init_waitqueue_head(&vcore->wq); 1203 1035 vcore->preempt_tb = TB_NIL; 1204 1036 vcore->lpcr = kvm->arch.lpcr; 1037 + vcore->first_vcpuid = core * threads_per_core; 1038 + vcore->kvm = kvm; 1205 1039 } 1206 1040 kvm->arch.vcores[core] = vcore; 1207 1041 kvm->arch.online_vcores++; ··· 1217 1047 ++vcore->num_threads; 1218 1048 spin_unlock(&vcore->lock); 1219 1049 vcpu->arch.vcore = vcore; 1050 + vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid; 1220 1051 1221 1052 vcpu->arch.cpu_type = KVM_CPU_3S_64; 1222 1053 kvmppc_sanity_check(vcpu); ··· 1281 1110 } 1282 1111 } 1283 1112 1284 - extern int __kvmppc_vcore_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); 1113 + extern void __kvmppc_vcore_entry(void); 1285 1114 1286 1115 static void kvmppc_remove_runnable(struct kvmppc_vcore *vc, 1287 1116 struct kvm_vcpu *vcpu) ··· 1355 1184 tpaca = &paca[cpu]; 1356 1185 tpaca->kvm_hstate.kvm_vcpu = vcpu; 1357 1186 tpaca->kvm_hstate.kvm_vcore = vc; 1358 - tpaca->kvm_hstate.napping = 0; 1187 + tpaca->kvm_hstate.ptid = vcpu->arch.ptid; 1359 1188 vcpu->cpu = vc->pcpu; 1360 1189 smp_wmb(); 1361 1190 #if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP) 1362 - if (vcpu->arch.ptid) { 1191 + if (cpu != smp_processor_id()) { 1192 + #ifdef CONFIG_KVM_XICS 1363 1193 xics_wake_cpu(cpu); 1364 - ++vc->n_woken; 1194 + #endif 1195 + if (vcpu->arch.ptid) 1196 + ++vc->n_woken; 1365 1197 } 1366 1198 #endif 1367 1199 } ··· 1421 1247 */ 1422 1248 static void kvmppc_run_core(struct kvmppc_vcore *vc) 1423 1249 { 1424 - struct kvm_vcpu *vcpu, *vcpu0, *vnext; 1250 + struct kvm_vcpu *vcpu, *vnext; 1425 1251 long ret; 1426 1252 u64 now; 1427 - int ptid, i, need_vpa_update; 1253 + int i, need_vpa_update; 1428 1254 int srcu_idx; 1429 1255 struct kvm_vcpu *vcpus_to_update[threads_per_core]; 1430 1256 ··· 1462 1288 } 1463 1289 1464 1290 /* 1465 - * Assign physical thread IDs, first to non-ceded vcpus 1466 - * and then to ceded ones. 1467 - */ 1468 - ptid = 0; 1469 - vcpu0 = NULL; 1470 - list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { 1471 - if (!vcpu->arch.ceded) { 1472 - if (!ptid) 1473 - vcpu0 = vcpu; 1474 - vcpu->arch.ptid = ptid++; 1475 - } 1476 - } 1477 - if (!vcpu0) 1478 - goto out; /* nothing to run; should never happen */ 1479 - list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) 1480 - if (vcpu->arch.ceded) 1481 - vcpu->arch.ptid = ptid++; 1482 - 1483 - /* 1484 1291 * Make sure we are running on thread 0, and that 1485 1292 * secondary threads are offline. 1486 1293 */ ··· 1477 1322 kvmppc_create_dtl_entry(vcpu, vc); 1478 1323 } 1479 1324 1325 + /* Set this explicitly in case thread 0 doesn't have a vcpu */ 1326 + get_paca()->kvm_hstate.kvm_vcore = vc; 1327 + get_paca()->kvm_hstate.ptid = 0; 1328 + 1480 1329 vc->vcore_state = VCORE_RUNNING; 1481 1330 preempt_disable(); 1482 1331 spin_unlock(&vc->lock); 1483 1332 1484 1333 kvm_guest_enter(); 1485 1334 1486 - srcu_idx = srcu_read_lock(&vcpu0->kvm->srcu); 1335 + srcu_idx = srcu_read_lock(&vc->kvm->srcu); 1487 1336 1488 - __kvmppc_vcore_entry(NULL, vcpu0); 1337 + __kvmppc_vcore_entry(); 1489 1338 1490 1339 spin_lock(&vc->lock); 1491 1340 /* disable sending of IPIs on virtual external irqs */ ··· 1504 1345 vc->vcore_state = VCORE_EXITING; 1505 1346 spin_unlock(&vc->lock); 1506 1347 1507 - srcu_read_unlock(&vcpu0->kvm->srcu, srcu_idx); 1348 + srcu_read_unlock(&vc->kvm->srcu, srcu_idx); 1508 1349 1509 1350 /* make sure updates to secondary vcpu structs are visible now */ 1510 1351 smp_mb(); ··· 1612 1453 if (!signal_pending(current)) { 1613 1454 if (vc->vcore_state == VCORE_RUNNING && 1614 1455 VCORE_EXIT_COUNT(vc) == 0) { 1615 - vcpu->arch.ptid = vc->n_runnable - 1; 1616 1456 kvmppc_create_dtl_entry(vcpu, vc); 1617 1457 kvmppc_start_thread(vcpu); 1618 1458 } else if (vc->vcore_state == VCORE_SLEEPING) { ··· 2206 2048 LPCR_VPM0 | LPCR_VPM1; 2207 2049 kvm->arch.vrma_slb_v = SLB_VSID_B_1T | 2208 2050 (VRMA_VSID << SLB_VSID_SHIFT_1T); 2051 + /* On POWER8 turn on online bit to enable PURR/SPURR */ 2052 + if (cpu_has_feature(CPU_FTR_ARCH_207S)) 2053 + lpcr |= LPCR_ONL; 2209 2054 } 2210 2055 kvm->arch.lpcr = lpcr; 2211 2056 ··· 2383 2222 module_init(kvmppc_book3s_init_hv); 2384 2223 module_exit(kvmppc_book3s_exit_hv); 2385 2224 MODULE_LICENSE("GPL"); 2225 + MODULE_ALIAS_MISCDEV(KVM_MINOR); 2226 + MODULE_ALIAS("devname:kvm");
+4 -4
arch/powerpc/kvm/book3s_hv_interrupts.S
··· 35 35 ****************************************************************************/ 36 36 37 37 /* Registers: 38 - * r4: vcpu pointer 38 + * none 39 39 */ 40 40 _GLOBAL(__kvmppc_vcore_entry) 41 41 ··· 57 57 std r3, HSTATE_DSCR(r13) 58 58 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 59 59 60 + BEGIN_FTR_SECTION 60 61 /* Save host DABR */ 61 62 mfspr r3, SPRN_DABR 62 63 std r3, HSTATE_DABR(r13) 64 + END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 63 65 64 66 /* Hard-disable interrupts */ 65 67 mfmsr r10 ··· 71 69 mtmsrd r10,1 72 70 73 71 /* Save host PMU registers */ 74 - /* R4 is live here (vcpu pointer) but not r3 or r5 */ 75 72 li r3, 1 76 73 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 77 74 mfspr r7, SPRN_MMCR0 /* save MMCR0 */ ··· 135 134 * enters the guest with interrupts enabled. 136 135 */ 137 136 BEGIN_FTR_SECTION 137 + ld r4, HSTATE_KVM_VCPU(r13) 138 138 ld r0, VCPU_PENDING_EXC(r4) 139 139 li r7, (1 << BOOK3S_IRQPRIO_EXTERNAL) 140 140 oris r7, r7, (1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h 141 141 and. r0, r0, r7 142 142 beq 32f 143 - mr r31, r4 144 143 lhz r3, PACAPACAINDEX(r13) 145 144 bl smp_send_reschedule 146 145 nop 147 - mr r4, r31 148 146 32: 149 147 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 150 148 #endif /* CONFIG_SMP */
+5 -3
arch/powerpc/kvm/book3s_hv_rm_mmu.c
··· 134 134 unlock_rmap(rmap); 135 135 } 136 136 137 - static pte_t lookup_linux_pte(pgd_t *pgdir, unsigned long hva, 137 + static pte_t lookup_linux_pte_and_update(pgd_t *pgdir, unsigned long hva, 138 138 int writing, unsigned long *pte_sizep) 139 139 { 140 140 pte_t *ptep; ··· 232 232 233 233 /* Look up the Linux PTE for the backing page */ 234 234 pte_size = psize; 235 - pte = lookup_linux_pte(pgdir, hva, writing, &pte_size); 235 + pte = lookup_linux_pte_and_update(pgdir, hva, writing, 236 + &pte_size); 236 237 if (pte_present(pte)) { 237 238 if (writing && !pte_write(pte)) 238 239 /* make the actual HPTE be read-only */ ··· 673 672 memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn); 674 673 if (memslot) { 675 674 hva = __gfn_to_hva_memslot(memslot, gfn); 676 - pte = lookup_linux_pte(pgdir, hva, 1, &psize); 675 + pte = lookup_linux_pte_and_update(pgdir, hva, 676 + 1, &psize); 677 677 if (pte_present(pte) && !pte_write(pte)) 678 678 r = hpte_make_readonly(r); 679 679 }
+734 -457
arch/powerpc/kvm/book3s_hv_rmhandlers.S
··· 33 33 #error Need to fix lppaca and SLB shadow accesses in little endian mode 34 34 #endif 35 35 36 + /* Values in HSTATE_NAPPING(r13) */ 37 + #define NAPPING_CEDE 1 38 + #define NAPPING_NOVCPU 2 39 + 36 40 /* 37 41 * Call kvmppc_hv_entry in real mode. 38 42 * Must be called with interrupts hard-disabled. ··· 61 57 RFI 62 58 63 59 kvmppc_call_hv_entry: 60 + ld r4, HSTATE_KVM_VCPU(r13) 64 61 bl kvmppc_hv_entry 65 62 66 63 /* Back from guest - restore host state and return to caller */ 67 64 65 + BEGIN_FTR_SECTION 68 66 /* Restore host DABR and DABRX */ 69 67 ld r5,HSTATE_DABR(r13) 70 68 li r6,7 71 69 mtspr SPRN_DABR,r5 72 70 mtspr SPRN_DABRX,r6 71 + END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 73 72 74 73 /* Restore SPRG3 */ 75 74 ld r3,PACA_SPRG3(r13) 76 75 mtspr SPRN_SPRG3,r3 77 - 78 - /* 79 - * Reload DEC. HDEC interrupts were disabled when 80 - * we reloaded the host's LPCR value. 81 - */ 82 - ld r3, HSTATE_DECEXP(r13) 83 - mftb r4 84 - subf r4, r4, r3 85 - mtspr SPRN_DEC, r4 86 76 87 77 /* Reload the host's PMU registers */ 88 78 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */ ··· 111 113 mtspr SPRN_MMCR0, r3 112 114 isync 113 115 23: 116 + 117 + /* 118 + * Reload DEC. HDEC interrupts were disabled when 119 + * we reloaded the host's LPCR value. 120 + */ 121 + ld r3, HSTATE_DECEXP(r13) 122 + mftb r4 123 + subf r4, r4, r3 124 + mtspr SPRN_DEC, r4 114 125 115 126 /* 116 127 * For external and machine check interrupts, we need ··· 160 153 161 154 13: b machine_check_fwnmi 162 155 156 + kvmppc_primary_no_guest: 157 + /* We handle this much like a ceded vcpu */ 158 + /* set our bit in napping_threads */ 159 + ld r5, HSTATE_KVM_VCORE(r13) 160 + lbz r7, HSTATE_PTID(r13) 161 + li r0, 1 162 + sld r0, r0, r7 163 + addi r6, r5, VCORE_NAPPING_THREADS 164 + 1: lwarx r3, 0, r6 165 + or r3, r3, r0 166 + stwcx. r3, 0, r6 167 + bne 1b 168 + /* order napping_threads update vs testing entry_exit_count */ 169 + isync 170 + li r12, 0 171 + lwz r7, VCORE_ENTRY_EXIT(r5) 172 + cmpwi r7, 0x100 173 + bge kvm_novcpu_exit /* another thread already exiting */ 174 + li r3, NAPPING_NOVCPU 175 + stb r3, HSTATE_NAPPING(r13) 176 + li r3, 1 177 + stb r3, HSTATE_HWTHREAD_REQ(r13) 178 + 179 + b kvm_do_nap 180 + 181 + kvm_novcpu_wakeup: 182 + ld r1, HSTATE_HOST_R1(r13) 183 + ld r5, HSTATE_KVM_VCORE(r13) 184 + li r0, 0 185 + stb r0, HSTATE_NAPPING(r13) 186 + stb r0, HSTATE_HWTHREAD_REQ(r13) 187 + 188 + /* check the wake reason */ 189 + bl kvmppc_check_wake_reason 190 + 191 + /* see if any other thread is already exiting */ 192 + lwz r0, VCORE_ENTRY_EXIT(r5) 193 + cmpwi r0, 0x100 194 + bge kvm_novcpu_exit 195 + 196 + /* clear our bit in napping_threads */ 197 + lbz r7, HSTATE_PTID(r13) 198 + li r0, 1 199 + sld r0, r0, r7 200 + addi r6, r5, VCORE_NAPPING_THREADS 201 + 4: lwarx r7, 0, r6 202 + andc r7, r7, r0 203 + stwcx. r7, 0, r6 204 + bne 4b 205 + 206 + /* See if the wake reason means we need to exit */ 207 + cmpdi r3, 0 208 + bge kvm_novcpu_exit 209 + 210 + /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */ 211 + ld r4, HSTATE_KVM_VCPU(r13) 212 + cmpdi r4, 0 213 + bne kvmppc_got_guest 214 + 215 + kvm_novcpu_exit: 216 + b hdec_soon 217 + 163 218 /* 164 - * We come in here when wakened from nap mode on a secondary hw thread. 219 + * We come in here when wakened from nap mode. 165 220 * Relocation is off and most register values are lost. 166 221 * r13 points to the PACA. 167 222 */ 168 223 .globl kvm_start_guest 169 224 kvm_start_guest: 170 - ld r1,PACAEMERGSP(r13) 171 - subi r1,r1,STACK_FRAME_OVERHEAD 172 225 ld r2,PACATOC(r13) 173 226 174 227 li r0,KVM_HWTHREAD_IN_KVM ··· 240 173 241 174 /* were we napping due to cede? */ 242 175 lbz r0,HSTATE_NAPPING(r13) 243 - cmpwi r0,0 244 - bne kvm_end_cede 176 + cmpwi r0,NAPPING_CEDE 177 + beq kvm_end_cede 178 + cmpwi r0,NAPPING_NOVCPU 179 + beq kvm_novcpu_wakeup 180 + 181 + ld r1,PACAEMERGSP(r13) 182 + subi r1,r1,STACK_FRAME_OVERHEAD 245 183 246 184 /* 247 185 * We weren't napping due to cede, so this must be a secondary ··· 256 184 */ 257 185 258 186 /* Check the wake reason in SRR1 to see why we got here */ 259 - mfspr r3,SPRN_SRR1 260 - rlwinm r3,r3,44-31,0x7 /* extract wake reason field */ 261 - cmpwi r3,4 /* was it an external interrupt? */ 262 - bne 27f /* if not */ 263 - ld r5,HSTATE_XICS_PHYS(r13) 264 - li r7,XICS_XIRR /* if it was an external interrupt, */ 265 - lwzcix r8,r5,r7 /* get and ack the interrupt */ 266 - sync 267 - clrldi. r9,r8,40 /* get interrupt source ID. */ 268 - beq 28f /* none there? */ 269 - cmpwi r9,XICS_IPI /* was it an IPI? */ 270 - bne 29f 271 - li r0,0xff 272 - li r6,XICS_MFRR 273 - stbcix r0,r5,r6 /* clear IPI */ 274 - stwcix r8,r5,r7 /* EOI the interrupt */ 275 - sync /* order loading of vcpu after that */ 187 + bl kvmppc_check_wake_reason 188 + cmpdi r3, 0 189 + bge kvm_no_guest 276 190 277 191 /* get vcpu pointer, NULL if we have no vcpu to run */ 278 192 ld r4,HSTATE_KVM_VCPU(r13) 279 193 cmpdi r4,0 280 194 /* if we have no vcpu to run, go back to sleep */ 281 195 beq kvm_no_guest 282 - b 30f 283 196 284 - 27: /* XXX should handle hypervisor maintenance interrupts etc. here */ 285 - b kvm_no_guest 286 - 28: /* SRR1 said external but ICP said nope?? */ 287 - b kvm_no_guest 288 - 29: /* External non-IPI interrupt to offline secondary thread? help?? */ 289 - stw r8,HSTATE_SAVED_XIRR(r13) 290 - b kvm_no_guest 197 + /* Set HSTATE_DSCR(r13) to something sensible */ 198 + LOAD_REG_ADDR(r6, dscr_default) 199 + ld r6, 0(r6) 200 + std r6, HSTATE_DSCR(r13) 291 201 292 - 30: bl kvmppc_hv_entry 202 + bl kvmppc_hv_entry 293 203 294 204 /* Back from the guest, go back to nap */ 295 205 /* Clear our vcpu pointer so we don't come back in early */ ··· 283 229 * visible we could be given another vcpu. 284 230 */ 285 231 lwsync 286 - /* Clear any pending IPI - we're an offline thread */ 287 - ld r5, HSTATE_XICS_PHYS(r13) 288 - li r7, XICS_XIRR 289 - lwzcix r3, r5, r7 /* ack any pending interrupt */ 290 - rlwinm. r0, r3, 0, 0xffffff /* any pending? */ 291 - beq 37f 292 - sync 293 - li r0, 0xff 294 - li r6, XICS_MFRR 295 - stbcix r0, r5, r6 /* clear the IPI */ 296 - stwcix r3, r5, r7 /* EOI it */ 297 - 37: sync 298 232 299 233 /* increment the nap count and then go to nap mode */ 300 234 ld r4, HSTATE_KVM_VCORE(r13) ··· 295 253 kvm_no_guest: 296 254 li r0, KVM_HWTHREAD_IN_NAP 297 255 stb r0, HSTATE_HWTHREAD_STATE(r13) 256 + kvm_do_nap: 298 257 li r3, LPCR_PECE0 299 258 mfspr r4, SPRN_LPCR 300 259 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 ··· 320 277 321 278 /* Required state: 322 279 * 323 - * R4 = vcpu pointer 280 + * R4 = vcpu pointer (or NULL) 324 281 * MSR = ~IR|DR 325 282 * R13 = PACA 326 283 * R1 = host R1 ··· 330 287 std r0, PPC_LR_STKOFF(r1) 331 288 stdu r1, -112(r1) 332 289 333 - /* Set partition DABR */ 334 - /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */ 335 - li r5,3 336 - ld r6,VCPU_DABR(r4) 337 - mtspr SPRN_DABRX,r5 338 - mtspr SPRN_DABR,r6 339 - BEGIN_FTR_SECTION 340 - isync 341 - END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 342 - 343 - /* Load guest PMU registers */ 344 - /* R4 is live here (vcpu pointer) */ 345 - li r3, 1 346 - sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 347 - mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ 348 - isync 349 - lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */ 350 - lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */ 351 - lwz r6, VCPU_PMC + 8(r4) 352 - lwz r7, VCPU_PMC + 12(r4) 353 - lwz r8, VCPU_PMC + 16(r4) 354 - lwz r9, VCPU_PMC + 20(r4) 355 - BEGIN_FTR_SECTION 356 - lwz r10, VCPU_PMC + 24(r4) 357 - lwz r11, VCPU_PMC + 28(r4) 358 - END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 359 - mtspr SPRN_PMC1, r3 360 - mtspr SPRN_PMC2, r5 361 - mtspr SPRN_PMC3, r6 362 - mtspr SPRN_PMC4, r7 363 - mtspr SPRN_PMC5, r8 364 - mtspr SPRN_PMC6, r9 365 - BEGIN_FTR_SECTION 366 - mtspr SPRN_PMC7, r10 367 - mtspr SPRN_PMC8, r11 368 - END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 369 - ld r3, VCPU_MMCR(r4) 370 - ld r5, VCPU_MMCR + 8(r4) 371 - ld r6, VCPU_MMCR + 16(r4) 372 - ld r7, VCPU_SIAR(r4) 373 - ld r8, VCPU_SDAR(r4) 374 - mtspr SPRN_MMCR1, r5 375 - mtspr SPRN_MMCRA, r6 376 - mtspr SPRN_SIAR, r7 377 - mtspr SPRN_SDAR, r8 378 - mtspr SPRN_MMCR0, r3 379 - isync 380 - 381 - /* Load up FP, VMX and VSX registers */ 382 - bl kvmppc_load_fp 383 - 384 - ld r14, VCPU_GPR(R14)(r4) 385 - ld r15, VCPU_GPR(R15)(r4) 386 - ld r16, VCPU_GPR(R16)(r4) 387 - ld r17, VCPU_GPR(R17)(r4) 388 - ld r18, VCPU_GPR(R18)(r4) 389 - ld r19, VCPU_GPR(R19)(r4) 390 - ld r20, VCPU_GPR(R20)(r4) 391 - ld r21, VCPU_GPR(R21)(r4) 392 - ld r22, VCPU_GPR(R22)(r4) 393 - ld r23, VCPU_GPR(R23)(r4) 394 - ld r24, VCPU_GPR(R24)(r4) 395 - ld r25, VCPU_GPR(R25)(r4) 396 - ld r26, VCPU_GPR(R26)(r4) 397 - ld r27, VCPU_GPR(R27)(r4) 398 - ld r28, VCPU_GPR(R28)(r4) 399 - ld r29, VCPU_GPR(R29)(r4) 400 - ld r30, VCPU_GPR(R30)(r4) 401 - ld r31, VCPU_GPR(R31)(r4) 402 - 403 - BEGIN_FTR_SECTION 404 - /* Switch DSCR to guest value */ 405 - ld r5, VCPU_DSCR(r4) 406 - mtspr SPRN_DSCR, r5 407 - END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 408 - 409 - /* 410 - * Set the decrementer to the guest decrementer. 411 - */ 412 - ld r8,VCPU_DEC_EXPIRES(r4) 413 - mftb r7 414 - subf r3,r7,r8 415 - mtspr SPRN_DEC,r3 416 - stw r3,VCPU_DEC(r4) 417 - 418 - ld r5, VCPU_SPRG0(r4) 419 - ld r6, VCPU_SPRG1(r4) 420 - ld r7, VCPU_SPRG2(r4) 421 - ld r8, VCPU_SPRG3(r4) 422 - mtspr SPRN_SPRG0, r5 423 - mtspr SPRN_SPRG1, r6 424 - mtspr SPRN_SPRG2, r7 425 - mtspr SPRN_SPRG3, r8 426 - 427 290 /* Save R1 in the PACA */ 428 291 std r1, HSTATE_HOST_R1(r13) 429 292 430 - /* Load up DAR and DSISR */ 431 - ld r5, VCPU_DAR(r4) 432 - lwz r6, VCPU_DSISR(r4) 433 - mtspr SPRN_DAR, r5 434 - mtspr SPRN_DSISR, r6 435 - 436 293 li r6, KVM_GUEST_MODE_HOST_HV 437 294 stb r6, HSTATE_IN_GUEST(r13) 438 - 439 - BEGIN_FTR_SECTION 440 - /* Restore AMR and UAMOR, set AMOR to all 1s */ 441 - ld r5,VCPU_AMR(r4) 442 - ld r6,VCPU_UAMOR(r4) 443 - li r7,-1 444 - mtspr SPRN_AMR,r5 445 - mtspr SPRN_UAMOR,r6 446 - mtspr SPRN_AMOR,r7 447 - END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 448 295 449 296 /* Clear out SLB */ 450 297 li r6,0 ··· 361 428 bne 21b 362 429 363 430 /* Primary thread switches to guest partition. */ 364 - ld r9,VCPU_KVM(r4) /* pointer to struct kvm */ 365 - lwz r6,VCPU_PTID(r4) 431 + ld r9,VCORE_KVM(r5) /* pointer to struct kvm */ 432 + lbz r6,HSTATE_PTID(r13) 366 433 cmpwi r6,0 367 434 bne 20f 368 435 ld r6,KVM_SDR1(r9) ··· 390 457 andc r7,r7,r0 391 458 stdcx. r7,0,r6 392 459 bne 23b 393 - li r6,128 /* and flush the TLB */ 460 + /* Flush the TLB of any entries for this LPID */ 461 + /* use arch 2.07S as a proxy for POWER8 */ 462 + BEGIN_FTR_SECTION 463 + li r6,512 /* POWER8 has 512 sets */ 464 + FTR_SECTION_ELSE 465 + li r6,128 /* POWER7 has 128 sets */ 466 + ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S) 394 467 mtctr r6 395 468 li r7,0x800 /* IS field = 0b10 */ 396 469 ptesync ··· 426 487 beq 38f 427 488 mtspr SPRN_PCR, r7 428 489 38: 490 + 491 + BEGIN_FTR_SECTION 492 + /* DPDES is shared between threads */ 493 + ld r8, VCORE_DPDES(r5) 494 + mtspr SPRN_DPDES, r8 495 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 496 + 429 497 li r0,1 430 498 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */ 431 499 b 10f ··· 449 503 mtspr SPRN_RMOR,r8 450 504 isync 451 505 452 - /* Increment yield count if they have a VPA */ 453 - ld r3, VCPU_VPA(r4) 454 - cmpdi r3, 0 455 - beq 25f 456 - lwz r5, LPPACA_YIELDCOUNT(r3) 457 - addi r5, r5, 1 458 - stw r5, LPPACA_YIELDCOUNT(r3) 459 - li r6, 1 460 - stb r6, VCPU_VPA_DIRTY(r4) 461 - 25: 462 506 /* Check if HDEC expires soon */ 463 507 mfspr r3,SPRN_HDEC 464 - cmpwi r3,10 508 + cmpwi r3,512 /* 1 microsecond */ 465 509 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER 466 - mr r9,r4 467 510 blt hdec_soon 468 - 469 - /* Save purr/spurr */ 470 - mfspr r5,SPRN_PURR 471 - mfspr r6,SPRN_SPURR 472 - std r5,HSTATE_PURR(r13) 473 - std r6,HSTATE_SPURR(r13) 474 - ld r7,VCPU_PURR(r4) 475 - ld r8,VCPU_SPURR(r4) 476 - mtspr SPRN_PURR,r7 477 - mtspr SPRN_SPURR,r8 478 511 b 31f 479 512 480 513 /* ··· 464 539 * We also have to invalidate the TLB since its 465 540 * entries aren't tagged with the LPID. 466 541 */ 467 - 30: ld r9,VCPU_KVM(r4) /* pointer to struct kvm */ 542 + 30: ld r5,HSTATE_KVM_VCORE(r13) 543 + ld r9,VCORE_KVM(r5) /* pointer to struct kvm */ 468 544 469 545 /* first take native_tlbie_lock */ 470 546 .section ".toc","aw" ··· 530 604 mfspr r3,SPRN_HDEC 531 605 cmpwi r3,10 532 606 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER 533 - mr r9,r4 534 607 blt hdec_soon 535 608 536 609 /* Enable HDEC interrupts */ ··· 544 619 mfspr r0,SPRN_HID0 545 620 mfspr r0,SPRN_HID0 546 621 mfspr r0,SPRN_HID0 622 + 31: 623 + /* Do we have a guest vcpu to run? */ 624 + cmpdi r4, 0 625 + beq kvmppc_primary_no_guest 626 + kvmppc_got_guest: 547 627 548 628 /* Load up guest SLB entries */ 549 - 31: lwz r5,VCPU_SLB_MAX(r4) 629 + lwz r5,VCPU_SLB_MAX(r4) 550 630 cmpwi r5,0 551 631 beq 9f 552 632 mtctr r5 ··· 562 632 addi r6,r6,VCPU_SLB_SIZE 563 633 bdnz 1b 564 634 9: 635 + /* Increment yield count if they have a VPA */ 636 + ld r3, VCPU_VPA(r4) 637 + cmpdi r3, 0 638 + beq 25f 639 + lwz r5, LPPACA_YIELDCOUNT(r3) 640 + addi r5, r5, 1 641 + stw r5, LPPACA_YIELDCOUNT(r3) 642 + li r6, 1 643 + stb r6, VCPU_VPA_DIRTY(r4) 644 + 25: 645 + 646 + BEGIN_FTR_SECTION 647 + /* Save purr/spurr */ 648 + mfspr r5,SPRN_PURR 649 + mfspr r6,SPRN_SPURR 650 + std r5,HSTATE_PURR(r13) 651 + std r6,HSTATE_SPURR(r13) 652 + ld r7,VCPU_PURR(r4) 653 + ld r8,VCPU_SPURR(r4) 654 + mtspr SPRN_PURR,r7 655 + mtspr SPRN_SPURR,r8 656 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 657 + 658 + BEGIN_FTR_SECTION 659 + /* Set partition DABR */ 660 + /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */ 661 + lwz r5,VCPU_DABRX(r4) 662 + ld r6,VCPU_DABR(r4) 663 + mtspr SPRN_DABRX,r5 664 + mtspr SPRN_DABR,r6 665 + BEGIN_FTR_SECTION_NESTED(89) 666 + isync 667 + END_FTR_SECTION_NESTED(CPU_FTR_ARCH_206, CPU_FTR_ARCH_206, 89) 668 + END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 669 + 670 + /* Load guest PMU registers */ 671 + /* R4 is live here (vcpu pointer) */ 672 + li r3, 1 673 + sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 674 + mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ 675 + isync 676 + lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */ 677 + lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */ 678 + lwz r6, VCPU_PMC + 8(r4) 679 + lwz r7, VCPU_PMC + 12(r4) 680 + lwz r8, VCPU_PMC + 16(r4) 681 + lwz r9, VCPU_PMC + 20(r4) 682 + BEGIN_FTR_SECTION 683 + lwz r10, VCPU_PMC + 24(r4) 684 + lwz r11, VCPU_PMC + 28(r4) 685 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 686 + mtspr SPRN_PMC1, r3 687 + mtspr SPRN_PMC2, r5 688 + mtspr SPRN_PMC3, r6 689 + mtspr SPRN_PMC4, r7 690 + mtspr SPRN_PMC5, r8 691 + mtspr SPRN_PMC6, r9 692 + BEGIN_FTR_SECTION 693 + mtspr SPRN_PMC7, r10 694 + mtspr SPRN_PMC8, r11 695 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 696 + ld r3, VCPU_MMCR(r4) 697 + ld r5, VCPU_MMCR + 8(r4) 698 + ld r6, VCPU_MMCR + 16(r4) 699 + ld r7, VCPU_SIAR(r4) 700 + ld r8, VCPU_SDAR(r4) 701 + mtspr SPRN_MMCR1, r5 702 + mtspr SPRN_MMCRA, r6 703 + mtspr SPRN_SIAR, r7 704 + mtspr SPRN_SDAR, r8 705 + BEGIN_FTR_SECTION 706 + ld r5, VCPU_MMCR + 24(r4) 707 + ld r6, VCPU_SIER(r4) 708 + lwz r7, VCPU_PMC + 24(r4) 709 + lwz r8, VCPU_PMC + 28(r4) 710 + ld r9, VCPU_MMCR + 32(r4) 711 + mtspr SPRN_MMCR2, r5 712 + mtspr SPRN_SIER, r6 713 + mtspr SPRN_SPMC1, r7 714 + mtspr SPRN_SPMC2, r8 715 + mtspr SPRN_MMCRS, r9 716 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 717 + mtspr SPRN_MMCR0, r3 718 + isync 719 + 720 + /* Load up FP, VMX and VSX registers */ 721 + bl kvmppc_load_fp 722 + 723 + ld r14, VCPU_GPR(R14)(r4) 724 + ld r15, VCPU_GPR(R15)(r4) 725 + ld r16, VCPU_GPR(R16)(r4) 726 + ld r17, VCPU_GPR(R17)(r4) 727 + ld r18, VCPU_GPR(R18)(r4) 728 + ld r19, VCPU_GPR(R19)(r4) 729 + ld r20, VCPU_GPR(R20)(r4) 730 + ld r21, VCPU_GPR(R21)(r4) 731 + ld r22, VCPU_GPR(R22)(r4) 732 + ld r23, VCPU_GPR(R23)(r4) 733 + ld r24, VCPU_GPR(R24)(r4) 734 + ld r25, VCPU_GPR(R25)(r4) 735 + ld r26, VCPU_GPR(R26)(r4) 736 + ld r27, VCPU_GPR(R27)(r4) 737 + ld r28, VCPU_GPR(R28)(r4) 738 + ld r29, VCPU_GPR(R29)(r4) 739 + ld r30, VCPU_GPR(R30)(r4) 740 + ld r31, VCPU_GPR(R31)(r4) 741 + 742 + BEGIN_FTR_SECTION 743 + /* Switch DSCR to guest value */ 744 + ld r5, VCPU_DSCR(r4) 745 + mtspr SPRN_DSCR, r5 746 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 747 + 748 + BEGIN_FTR_SECTION 749 + /* Skip next section on POWER7 or PPC970 */ 750 + b 8f 751 + END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 752 + /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */ 753 + mfmsr r8 754 + li r0, 1 755 + rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG 756 + mtmsrd r8 757 + 758 + /* Load up POWER8-specific registers */ 759 + ld r5, VCPU_IAMR(r4) 760 + lwz r6, VCPU_PSPB(r4) 761 + ld r7, VCPU_FSCR(r4) 762 + mtspr SPRN_IAMR, r5 763 + mtspr SPRN_PSPB, r6 764 + mtspr SPRN_FSCR, r7 765 + ld r5, VCPU_DAWR(r4) 766 + ld r6, VCPU_DAWRX(r4) 767 + ld r7, VCPU_CIABR(r4) 768 + ld r8, VCPU_TAR(r4) 769 + mtspr SPRN_DAWR, r5 770 + mtspr SPRN_DAWRX, r6 771 + mtspr SPRN_CIABR, r7 772 + mtspr SPRN_TAR, r8 773 + ld r5, VCPU_IC(r4) 774 + ld r6, VCPU_VTB(r4) 775 + mtspr SPRN_IC, r5 776 + mtspr SPRN_VTB, r6 777 + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 778 + ld r5, VCPU_TFHAR(r4) 779 + ld r6, VCPU_TFIAR(r4) 780 + ld r7, VCPU_TEXASR(r4) 781 + mtspr SPRN_TFHAR, r5 782 + mtspr SPRN_TFIAR, r6 783 + mtspr SPRN_TEXASR, r7 784 + #endif 785 + ld r8, VCPU_EBBHR(r4) 786 + mtspr SPRN_EBBHR, r8 787 + ld r5, VCPU_EBBRR(r4) 788 + ld r6, VCPU_BESCR(r4) 789 + ld r7, VCPU_CSIGR(r4) 790 + ld r8, VCPU_TACR(r4) 791 + mtspr SPRN_EBBRR, r5 792 + mtspr SPRN_BESCR, r6 793 + mtspr SPRN_CSIGR, r7 794 + mtspr SPRN_TACR, r8 795 + ld r5, VCPU_TCSCR(r4) 796 + ld r6, VCPU_ACOP(r4) 797 + lwz r7, VCPU_GUEST_PID(r4) 798 + ld r8, VCPU_WORT(r4) 799 + mtspr SPRN_TCSCR, r5 800 + mtspr SPRN_ACOP, r6 801 + mtspr SPRN_PID, r7 802 + mtspr SPRN_WORT, r8 803 + 8: 804 + 805 + /* 806 + * Set the decrementer to the guest decrementer. 807 + */ 808 + ld r8,VCPU_DEC_EXPIRES(r4) 809 + mftb r7 810 + subf r3,r7,r8 811 + mtspr SPRN_DEC,r3 812 + stw r3,VCPU_DEC(r4) 813 + 814 + ld r5, VCPU_SPRG0(r4) 815 + ld r6, VCPU_SPRG1(r4) 816 + ld r7, VCPU_SPRG2(r4) 817 + ld r8, VCPU_SPRG3(r4) 818 + mtspr SPRN_SPRG0, r5 819 + mtspr SPRN_SPRG1, r6 820 + mtspr SPRN_SPRG2, r7 821 + mtspr SPRN_SPRG3, r8 822 + 823 + /* Load up DAR and DSISR */ 824 + ld r5, VCPU_DAR(r4) 825 + lwz r6, VCPU_DSISR(r4) 826 + mtspr SPRN_DAR, r5 827 + mtspr SPRN_DSISR, r6 828 + 829 + BEGIN_FTR_SECTION 830 + /* Restore AMR and UAMOR, set AMOR to all 1s */ 831 + ld r5,VCPU_AMR(r4) 832 + ld r6,VCPU_UAMOR(r4) 833 + li r7,-1 834 + mtspr SPRN_AMR,r5 835 + mtspr SPRN_UAMOR,r6 836 + mtspr SPRN_AMOR,r7 837 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 565 838 566 839 /* Restore state of CTRL run bit; assume 1 on entry */ 567 840 lwz r5,VCPU_CTRL(r4) ··· 780 647 mtctr r6 781 648 mtxer r7 782 649 650 + kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */ 783 651 ld r10, VCPU_PC(r4) 784 652 ld r11, VCPU_MSR(r4) 785 - kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */ 786 653 ld r6, VCPU_SRR0(r4) 787 654 ld r7, VCPU_SRR1(r4) 655 + mtspr SPRN_SRR0, r6 656 + mtspr SPRN_SRR1, r7 788 657 658 + deliver_guest_interrupt: 789 659 /* r11 = vcpu->arch.msr & ~MSR_HV */ 790 660 rldicl r11, r11, 63 - MSR_HV_LG, 1 791 661 rotldi r11, r11, 1 + MSR_HV_LG 792 662 ori r11, r11, MSR_ME 793 663 794 664 /* Check if we can deliver an external or decrementer interrupt now */ 795 - ld r0,VCPU_PENDING_EXC(r4) 796 - lis r8,(1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h 797 - and r0,r0,r8 798 - cmpdi cr1,r0,0 799 - andi. r0,r11,MSR_EE 800 - beq cr1,11f 665 + ld r0, VCPU_PENDING_EXC(r4) 666 + rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63 667 + cmpdi cr1, r0, 0 668 + andi. r8, r11, MSR_EE 801 669 BEGIN_FTR_SECTION 802 - mfspr r8,SPRN_LPCR 803 - ori r8,r8,LPCR_MER 804 - mtspr SPRN_LPCR,r8 670 + mfspr r8, SPRN_LPCR 671 + /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */ 672 + rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH 673 + mtspr SPRN_LPCR, r8 805 674 isync 806 675 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 807 676 beq 5f 808 - li r0,BOOK3S_INTERRUPT_EXTERNAL 809 - 12: mr r6,r10 677 + li r0, BOOK3S_INTERRUPT_EXTERNAL 678 + bne cr1, 12f 679 + mfspr r0, SPRN_DEC 680 + cmpwi r0, 0 681 + li r0, BOOK3S_INTERRUPT_DECREMENTER 682 + bge 5f 683 + 684 + 12: mtspr SPRN_SRR0, r10 810 685 mr r10,r0 811 - mr r7,r11 812 - li r11,(MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ 813 - rotldi r11,r11,63 814 - b 5f 815 - 11: beq 5f 816 - mfspr r0,SPRN_DEC 817 - cmpwi r0,0 818 - li r0,BOOK3S_INTERRUPT_DECREMENTER 819 - blt 12b 686 + mtspr SPRN_SRR1, r11 687 + ld r11, VCPU_INTR_MSR(r4) 688 + 5: 820 689 821 - /* Move SRR0 and SRR1 into the respective regs */ 822 - 5: mtspr SPRN_SRR0, r6 823 - mtspr SPRN_SRR1, r7 824 - 690 + /* 691 + * Required state: 692 + * R4 = vcpu 693 + * R10: value for HSRR0 694 + * R11: value for HSRR1 695 + * R13 = PACA 696 + */ 825 697 fast_guest_return: 826 698 li r0,0 827 699 stb r0,VCPU_CEDED(r4) /* cancel cede */ ··· 1006 868 /* External interrupt, first check for host_ipi. If this is 1007 869 * set, we know the host wants us out so let's do it now 1008 870 */ 1009 - do_ext_interrupt: 1010 871 bl kvmppc_read_intr 1011 872 cmpdi r3, 0 1012 873 bgt ext_interrupt_to_host 1013 874 1014 - /* Allright, looks like an IPI for the guest, we need to set MER */ 1015 875 /* Check if any CPU is heading out to the host, if so head out too */ 1016 876 ld r5, HSTATE_KVM_VCORE(r13) 1017 877 lwz r0, VCORE_ENTRY_EXIT(r5) 1018 878 cmpwi r0, 0x100 1019 879 bge ext_interrupt_to_host 1020 880 1021 - /* See if there is a pending interrupt for the guest */ 1022 - mfspr r8, SPRN_LPCR 1023 - ld r0, VCPU_PENDING_EXC(r9) 1024 - /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */ 1025 - rldicl. r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63 1026 - rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH 1027 - beq 2f 1028 - 1029 - /* And if the guest EE is set, we can deliver immediately, else 1030 - * we return to the guest with MER set 1031 - */ 1032 - andi. r0, r11, MSR_EE 1033 - beq 2f 1034 - mtspr SPRN_SRR0, r10 1035 - mtspr SPRN_SRR1, r11 1036 - li r10, BOOK3S_INTERRUPT_EXTERNAL 1037 - li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ 1038 - rotldi r11, r11, 63 1039 - 2: mr r4, r9 1040 - mtspr SPRN_LPCR, r8 1041 - b fast_guest_return 881 + /* Return to guest after delivering any pending interrupt */ 882 + mr r4, r9 883 + b deliver_guest_interrupt 1042 884 1043 885 ext_interrupt_to_host: 1044 886 ··· 1093 975 mtspr SPRN_SPURR,r4 1094 976 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201) 1095 977 978 + /* Save DEC */ 979 + mfspr r5,SPRN_DEC 980 + mftb r6 981 + extsw r5,r5 982 + add r5,r5,r6 983 + std r5,VCPU_DEC_EXPIRES(r9) 984 + 985 + BEGIN_FTR_SECTION 986 + b 8f 987 + END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 988 + /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */ 989 + mfmsr r8 990 + li r0, 1 991 + rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG 992 + mtmsrd r8 993 + 994 + /* Save POWER8-specific registers */ 995 + mfspr r5, SPRN_IAMR 996 + mfspr r6, SPRN_PSPB 997 + mfspr r7, SPRN_FSCR 998 + std r5, VCPU_IAMR(r9) 999 + stw r6, VCPU_PSPB(r9) 1000 + std r7, VCPU_FSCR(r9) 1001 + mfspr r5, SPRN_IC 1002 + mfspr r6, SPRN_VTB 1003 + mfspr r7, SPRN_TAR 1004 + std r5, VCPU_IC(r9) 1005 + std r6, VCPU_VTB(r9) 1006 + std r7, VCPU_TAR(r9) 1007 + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1008 + mfspr r5, SPRN_TFHAR 1009 + mfspr r6, SPRN_TFIAR 1010 + mfspr r7, SPRN_TEXASR 1011 + std r5, VCPU_TFHAR(r9) 1012 + std r6, VCPU_TFIAR(r9) 1013 + std r7, VCPU_TEXASR(r9) 1014 + #endif 1015 + mfspr r8, SPRN_EBBHR 1016 + std r8, VCPU_EBBHR(r9) 1017 + mfspr r5, SPRN_EBBRR 1018 + mfspr r6, SPRN_BESCR 1019 + mfspr r7, SPRN_CSIGR 1020 + mfspr r8, SPRN_TACR 1021 + std r5, VCPU_EBBRR(r9) 1022 + std r6, VCPU_BESCR(r9) 1023 + std r7, VCPU_CSIGR(r9) 1024 + std r8, VCPU_TACR(r9) 1025 + mfspr r5, SPRN_TCSCR 1026 + mfspr r6, SPRN_ACOP 1027 + mfspr r7, SPRN_PID 1028 + mfspr r8, SPRN_WORT 1029 + std r5, VCPU_TCSCR(r9) 1030 + std r6, VCPU_ACOP(r9) 1031 + stw r7, VCPU_GUEST_PID(r9) 1032 + std r8, VCPU_WORT(r9) 1033 + 8: 1034 + 1035 + /* Save and reset AMR and UAMOR before turning on the MMU */ 1036 + BEGIN_FTR_SECTION 1037 + mfspr r5,SPRN_AMR 1038 + mfspr r6,SPRN_UAMOR 1039 + std r5,VCPU_AMR(r9) 1040 + std r6,VCPU_UAMOR(r9) 1041 + li r6,0 1042 + mtspr SPRN_AMR,r6 1043 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 1044 + 1045 + /* Switch DSCR back to host value */ 1046 + BEGIN_FTR_SECTION 1047 + mfspr r8, SPRN_DSCR 1048 + ld r7, HSTATE_DSCR(r13) 1049 + std r8, VCPU_DSCR(r9) 1050 + mtspr SPRN_DSCR, r7 1051 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 1052 + 1053 + /* Save non-volatile GPRs */ 1054 + std r14, VCPU_GPR(R14)(r9) 1055 + std r15, VCPU_GPR(R15)(r9) 1056 + std r16, VCPU_GPR(R16)(r9) 1057 + std r17, VCPU_GPR(R17)(r9) 1058 + std r18, VCPU_GPR(R18)(r9) 1059 + std r19, VCPU_GPR(R19)(r9) 1060 + std r20, VCPU_GPR(R20)(r9) 1061 + std r21, VCPU_GPR(R21)(r9) 1062 + std r22, VCPU_GPR(R22)(r9) 1063 + std r23, VCPU_GPR(R23)(r9) 1064 + std r24, VCPU_GPR(R24)(r9) 1065 + std r25, VCPU_GPR(R25)(r9) 1066 + std r26, VCPU_GPR(R26)(r9) 1067 + std r27, VCPU_GPR(R27)(r9) 1068 + std r28, VCPU_GPR(R28)(r9) 1069 + std r29, VCPU_GPR(R29)(r9) 1070 + std r30, VCPU_GPR(R30)(r9) 1071 + std r31, VCPU_GPR(R31)(r9) 1072 + 1073 + /* Save SPRGs */ 1074 + mfspr r3, SPRN_SPRG0 1075 + mfspr r4, SPRN_SPRG1 1076 + mfspr r5, SPRN_SPRG2 1077 + mfspr r6, SPRN_SPRG3 1078 + std r3, VCPU_SPRG0(r9) 1079 + std r4, VCPU_SPRG1(r9) 1080 + std r5, VCPU_SPRG2(r9) 1081 + std r6, VCPU_SPRG3(r9) 1082 + 1083 + /* save FP state */ 1084 + mr r3, r9 1085 + bl kvmppc_save_fp 1086 + 1087 + /* Increment yield count if they have a VPA */ 1088 + ld r8, VCPU_VPA(r9) /* do they have a VPA? */ 1089 + cmpdi r8, 0 1090 + beq 25f 1091 + lwz r3, LPPACA_YIELDCOUNT(r8) 1092 + addi r3, r3, 1 1093 + stw r3, LPPACA_YIELDCOUNT(r8) 1094 + li r3, 1 1095 + stb r3, VCPU_VPA_DIRTY(r9) 1096 + 25: 1097 + /* Save PMU registers if requested */ 1098 + /* r8 and cr0.eq are live here */ 1099 + li r3, 1 1100 + sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 1101 + mfspr r4, SPRN_MMCR0 /* save MMCR0 */ 1102 + mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ 1103 + mfspr r6, SPRN_MMCRA 1104 + BEGIN_FTR_SECTION 1105 + /* On P7, clear MMCRA in order to disable SDAR updates */ 1106 + li r7, 0 1107 + mtspr SPRN_MMCRA, r7 1108 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 1109 + isync 1110 + beq 21f /* if no VPA, save PMU stuff anyway */ 1111 + lbz r7, LPPACA_PMCINUSE(r8) 1112 + cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */ 1113 + bne 21f 1114 + std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ 1115 + b 22f 1116 + 21: mfspr r5, SPRN_MMCR1 1117 + mfspr r7, SPRN_SIAR 1118 + mfspr r8, SPRN_SDAR 1119 + std r4, VCPU_MMCR(r9) 1120 + std r5, VCPU_MMCR + 8(r9) 1121 + std r6, VCPU_MMCR + 16(r9) 1122 + std r7, VCPU_SIAR(r9) 1123 + std r8, VCPU_SDAR(r9) 1124 + mfspr r3, SPRN_PMC1 1125 + mfspr r4, SPRN_PMC2 1126 + mfspr r5, SPRN_PMC3 1127 + mfspr r6, SPRN_PMC4 1128 + mfspr r7, SPRN_PMC5 1129 + mfspr r8, SPRN_PMC6 1130 + BEGIN_FTR_SECTION 1131 + mfspr r10, SPRN_PMC7 1132 + mfspr r11, SPRN_PMC8 1133 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 1134 + stw r3, VCPU_PMC(r9) 1135 + stw r4, VCPU_PMC + 4(r9) 1136 + stw r5, VCPU_PMC + 8(r9) 1137 + stw r6, VCPU_PMC + 12(r9) 1138 + stw r7, VCPU_PMC + 16(r9) 1139 + stw r8, VCPU_PMC + 20(r9) 1140 + BEGIN_FTR_SECTION 1141 + stw r10, VCPU_PMC + 24(r9) 1142 + stw r11, VCPU_PMC + 28(r9) 1143 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 1144 + BEGIN_FTR_SECTION 1145 + mfspr r4, SPRN_MMCR2 1146 + mfspr r5, SPRN_SIER 1147 + mfspr r6, SPRN_SPMC1 1148 + mfspr r7, SPRN_SPMC2 1149 + mfspr r8, SPRN_MMCRS 1150 + std r4, VCPU_MMCR + 24(r9) 1151 + std r5, VCPU_SIER(r9) 1152 + stw r6, VCPU_PMC + 24(r9) 1153 + stw r7, VCPU_PMC + 28(r9) 1154 + std r8, VCPU_MMCR + 32(r9) 1155 + lis r4, 0x8000 1156 + mtspr SPRN_MMCRS, r4 1157 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1158 + 22: 1096 1159 /* Clear out SLB */ 1097 1160 li r5,0 1098 1161 slbmte r5,r5 1099 1162 slbia 1100 1163 ptesync 1101 1164 1102 - hdec_soon: /* r9 = vcpu, r12 = trap, r13 = paca */ 1165 + hdec_soon: /* r12 = trap, r13 = paca */ 1103 1166 BEGIN_FTR_SECTION 1104 1167 b 32f 1105 1168 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) ··· 1313 1014 */ 1314 1015 cmpwi r3,0x100 /* Are we the first here? */ 1315 1016 bge 43f 1316 - cmpwi r3,1 /* Are any other threads in the guest? */ 1317 - ble 43f 1318 1017 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER 1319 1018 beq 40f 1320 1019 li r0,0 ··· 1323 1026 * doesn't wake CPUs up from nap. 1324 1027 */ 1325 1028 lwz r3,VCORE_NAPPING_THREADS(r5) 1326 - lwz r4,VCPU_PTID(r9) 1029 + lbz r4,HSTATE_PTID(r13) 1327 1030 li r0,1 1328 1031 sld r0,r0,r4 1329 1032 andc. r3,r3,r0 /* no sense IPI'ing ourselves */ ··· 1342 1045 addi r6,r6,PACA_SIZE 1343 1046 bne 42b 1344 1047 1048 + secondary_too_late: 1345 1049 /* Secondary threads wait for primary to do partition switch */ 1346 - 43: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */ 1347 - ld r5,HSTATE_KVM_VCORE(r13) 1348 - lwz r3,VCPU_PTID(r9) 1050 + 43: ld r5,HSTATE_KVM_VCORE(r13) 1051 + ld r4,VCORE_KVM(r5) /* pointer to struct kvm */ 1052 + lbz r3,HSTATE_PTID(r13) 1349 1053 cmpwi r3,0 1350 1054 beq 15f 1351 1055 HMT_LOW ··· 1373 1075 mtspr SPRN_SDR1,r6 /* switch to partition page table */ 1374 1076 mtspr SPRN_LPID,r7 1375 1077 isync 1078 + 1079 + BEGIN_FTR_SECTION 1080 + /* DPDES is shared between threads */ 1081 + mfspr r7, SPRN_DPDES 1082 + std r7, VCORE_DPDES(r5) 1083 + /* clear DPDES so we don't get guest doorbells in the host */ 1084 + li r8, 0 1085 + mtspr SPRN_DPDES, r8 1086 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1376 1087 1377 1088 /* Subtract timebase offset from timebase */ 1378 1089 ld r8,VCORE_TB_OFFSET(r5) ··· 1420 1113 * We have to lock against concurrent tlbies, and 1421 1114 * we have to flush the whole TLB. 1422 1115 */ 1423 - 32: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */ 1116 + 32: ld r5,HSTATE_KVM_VCORE(r13) 1117 + ld r4,VCORE_KVM(r5) /* pointer to struct kvm */ 1424 1118 1425 1119 /* Take the guest's tlbie_lock */ 1426 1120 #ifdef __BIG_ENDIAN__ ··· 1511 1203 add r5,r5,r6 1512 1204 std r5,VCPU_DEC_EXPIRES(r9) 1513 1205 1206 + BEGIN_FTR_SECTION 1207 + b 8f 1208 + END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 1209 + /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */ 1210 + mfmsr r8 1211 + li r0, 1 1212 + rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG 1213 + mtmsrd r8 1214 + 1215 + /* Save POWER8-specific registers */ 1216 + mfspr r5, SPRN_IAMR 1217 + mfspr r6, SPRN_PSPB 1218 + mfspr r7, SPRN_FSCR 1219 + std r5, VCPU_IAMR(r9) 1220 + stw r6, VCPU_PSPB(r9) 1221 + std r7, VCPU_FSCR(r9) 1222 + mfspr r5, SPRN_IC 1223 + mfspr r6, SPRN_VTB 1224 + mfspr r7, SPRN_TAR 1225 + std r5, VCPU_IC(r9) 1226 + std r6, VCPU_VTB(r9) 1227 + std r7, VCPU_TAR(r9) 1228 + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1229 + mfspr r5, SPRN_TFHAR 1230 + mfspr r6, SPRN_TFIAR 1231 + mfspr r7, SPRN_TEXASR 1232 + std r5, VCPU_TFHAR(r9) 1233 + std r6, VCPU_TFIAR(r9) 1234 + std r7, VCPU_TEXASR(r9) 1235 + #endif 1236 + mfspr r8, SPRN_EBBHR 1237 + std r8, VCPU_EBBHR(r9) 1238 + mfspr r5, SPRN_EBBRR 1239 + mfspr r6, SPRN_BESCR 1240 + mfspr r7, SPRN_CSIGR 1241 + mfspr r8, SPRN_TACR 1242 + std r5, VCPU_EBBRR(r9) 1243 + std r6, VCPU_BESCR(r9) 1244 + std r7, VCPU_CSIGR(r9) 1245 + std r8, VCPU_TACR(r9) 1246 + mfspr r5, SPRN_TCSCR 1247 + mfspr r6, SPRN_ACOP 1248 + mfspr r7, SPRN_PID 1249 + mfspr r8, SPRN_WORT 1250 + std r5, VCPU_TCSCR(r9) 1251 + std r6, VCPU_ACOP(r9) 1252 + stw r7, VCPU_GUEST_PID(r9) 1253 + std r8, VCPU_WORT(r9) 1254 + 8: 1255 + 1514 1256 /* Save and reset AMR and UAMOR before turning on the MMU */ 1515 1257 BEGIN_FTR_SECTION 1516 1258 mfspr r5,SPRN_AMR ··· 1575 1217 li r0, KVM_GUEST_MODE_NONE 1576 1218 stb r0, HSTATE_IN_GUEST(r13) 1577 1219 1578 - /* Switch DSCR back to host value */ 1579 - BEGIN_FTR_SECTION 1580 - mfspr r8, SPRN_DSCR 1581 - ld r7, HSTATE_DSCR(r13) 1582 - std r8, VCPU_DSCR(r9) 1583 - mtspr SPRN_DSCR, r7 1584 - END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 1585 - 1586 - /* Save non-volatile GPRs */ 1587 - std r14, VCPU_GPR(R14)(r9) 1588 - std r15, VCPU_GPR(R15)(r9) 1589 - std r16, VCPU_GPR(R16)(r9) 1590 - std r17, VCPU_GPR(R17)(r9) 1591 - std r18, VCPU_GPR(R18)(r9) 1592 - std r19, VCPU_GPR(R19)(r9) 1593 - std r20, VCPU_GPR(R20)(r9) 1594 - std r21, VCPU_GPR(R21)(r9) 1595 - std r22, VCPU_GPR(R22)(r9) 1596 - std r23, VCPU_GPR(R23)(r9) 1597 - std r24, VCPU_GPR(R24)(r9) 1598 - std r25, VCPU_GPR(R25)(r9) 1599 - std r26, VCPU_GPR(R26)(r9) 1600 - std r27, VCPU_GPR(R27)(r9) 1601 - std r28, VCPU_GPR(R28)(r9) 1602 - std r29, VCPU_GPR(R29)(r9) 1603 - std r30, VCPU_GPR(R30)(r9) 1604 - std r31, VCPU_GPR(R31)(r9) 1605 - 1606 - /* Save SPRGs */ 1607 - mfspr r3, SPRN_SPRG0 1608 - mfspr r4, SPRN_SPRG1 1609 - mfspr r5, SPRN_SPRG2 1610 - mfspr r6, SPRN_SPRG3 1611 - std r3, VCPU_SPRG0(r9) 1612 - std r4, VCPU_SPRG1(r9) 1613 - std r5, VCPU_SPRG2(r9) 1614 - std r6, VCPU_SPRG3(r9) 1615 - 1616 - /* save FP state */ 1617 - mr r3, r9 1618 - bl .kvmppc_save_fp 1619 - 1620 - /* Increment yield count if they have a VPA */ 1621 - ld r8, VCPU_VPA(r9) /* do they have a VPA? */ 1622 - cmpdi r8, 0 1623 - beq 25f 1624 - lwz r3, LPPACA_YIELDCOUNT(r8) 1625 - addi r3, r3, 1 1626 - stw r3, LPPACA_YIELDCOUNT(r8) 1627 - li r3, 1 1628 - stb r3, VCPU_VPA_DIRTY(r9) 1629 - 25: 1630 - /* Save PMU registers if requested */ 1631 - /* r8 and cr0.eq are live here */ 1632 - li r3, 1 1633 - sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 1634 - mfspr r4, SPRN_MMCR0 /* save MMCR0 */ 1635 - mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ 1636 - mfspr r6, SPRN_MMCRA 1637 - BEGIN_FTR_SECTION 1638 - /* On P7, clear MMCRA in order to disable SDAR updates */ 1639 - li r7, 0 1640 - mtspr SPRN_MMCRA, r7 1641 - END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 1642 - isync 1643 - beq 21f /* if no VPA, save PMU stuff anyway */ 1644 - lbz r7, LPPACA_PMCINUSE(r8) 1645 - cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */ 1646 - bne 21f 1647 - std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ 1648 - b 22f 1649 - 21: mfspr r5, SPRN_MMCR1 1650 - mfspr r7, SPRN_SIAR 1651 - mfspr r8, SPRN_SDAR 1652 - std r4, VCPU_MMCR(r9) 1653 - std r5, VCPU_MMCR + 8(r9) 1654 - std r6, VCPU_MMCR + 16(r9) 1655 - std r7, VCPU_SIAR(r9) 1656 - std r8, VCPU_SDAR(r9) 1657 - mfspr r3, SPRN_PMC1 1658 - mfspr r4, SPRN_PMC2 1659 - mfspr r5, SPRN_PMC3 1660 - mfspr r6, SPRN_PMC4 1661 - mfspr r7, SPRN_PMC5 1662 - mfspr r8, SPRN_PMC6 1663 - BEGIN_FTR_SECTION 1664 - mfspr r10, SPRN_PMC7 1665 - mfspr r11, SPRN_PMC8 1666 - END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 1667 - stw r3, VCPU_PMC(r9) 1668 - stw r4, VCPU_PMC + 4(r9) 1669 - stw r5, VCPU_PMC + 8(r9) 1670 - stw r6, VCPU_PMC + 12(r9) 1671 - stw r7, VCPU_PMC + 16(r9) 1672 - stw r8, VCPU_PMC + 20(r9) 1673 - BEGIN_FTR_SECTION 1674 - stw r10, VCPU_PMC + 24(r9) 1675 - stw r11, VCPU_PMC + 28(r9) 1676 - END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 1677 - 22: 1678 1220 ld r0, 112+PPC_LR_STKOFF(r1) 1679 1221 addi r1, r1, 112 1680 1222 mtlr r0 1681 1223 blr 1682 - secondary_too_late: 1683 - ld r5,HSTATE_KVM_VCORE(r13) 1684 - HMT_LOW 1685 - 13: lbz r3,VCORE_IN_GUEST(r5) 1686 - cmpwi r3,0 1687 - bne 13b 1688 - HMT_MEDIUM 1689 - li r0, KVM_GUEST_MODE_NONE 1690 - stb r0, HSTATE_IN_GUEST(r13) 1691 - ld r11,PACA_SLBSHADOWPTR(r13) 1692 - 1693 - .rept SLB_NUM_BOLTED 1694 - ld r5,SLBSHADOW_SAVEAREA(r11) 1695 - ld r6,SLBSHADOW_SAVEAREA+8(r11) 1696 - andis. r7,r5,SLB_ESID_V@h 1697 - beq 1f 1698 - slbmte r6,r5 1699 - 1: addi r11,r11,16 1700 - .endr 1701 - b 22b 1702 1224 1703 1225 /* 1704 1226 * Check whether an HDSI is an HPTE not found fault or something else. ··· 1624 1386 mtspr SPRN_SRR0, r10 1625 1387 mtspr SPRN_SRR1, r11 1626 1388 li r10, BOOK3S_INTERRUPT_DATA_STORAGE 1627 - li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ 1628 - rotldi r11, r11, 63 1389 + ld r11, VCPU_INTR_MSR(r9) 1629 1390 fast_interrupt_c_return: 1630 1391 6: ld r7, VCPU_CTR(r9) 1631 1392 lwz r8, VCPU_XER(r9) ··· 1693 1456 1: mtspr SPRN_SRR0, r10 1694 1457 mtspr SPRN_SRR1, r11 1695 1458 li r10, BOOK3S_INTERRUPT_INST_STORAGE 1696 - li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ 1697 - rotldi r11, r11, 63 1459 + ld r11, VCPU_INTR_MSR(r9) 1698 1460 b fast_interrupt_c_return 1699 1461 1700 1462 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */ ··· 1710 1474 hcall_try_real_mode: 1711 1475 ld r3,VCPU_GPR(R3)(r9) 1712 1476 andi. r0,r11,MSR_PR 1713 - bne guest_exit_cont 1477 + /* sc 1 from userspace - reflect to guest syscall */ 1478 + bne sc_1_fast_return 1714 1479 clrrdi r3,r3,2 1715 1480 cmpldi r3,hcall_real_table_end - hcall_real_table 1716 1481 bge guest_exit_cont ··· 1730 1493 std r3,VCPU_GPR(R3)(r4) 1731 1494 ld r10,VCPU_PC(r4) 1732 1495 ld r11,VCPU_MSR(r4) 1496 + b fast_guest_return 1497 + 1498 + sc_1_fast_return: 1499 + mtspr SPRN_SRR0,r10 1500 + mtspr SPRN_SRR1,r11 1501 + li r10, BOOK3S_INTERRUPT_SYSCALL 1502 + ld r11, VCPU_INTR_MSR(r9) 1503 + mr r4,r9 1733 1504 b fast_guest_return 1734 1505 1735 1506 /* We've attempted a real mode hcall, but it's punted it back ··· 1833 1588 .long 0 /* 0x11c */ 1834 1589 .long 0 /* 0x120 */ 1835 1590 .long .kvmppc_h_bulk_remove - hcall_real_table 1591 + .long 0 /* 0x128 */ 1592 + .long 0 /* 0x12c */ 1593 + .long 0 /* 0x130 */ 1594 + .long .kvmppc_h_set_xdabr - hcall_real_table 1836 1595 hcall_real_table_end: 1837 1596 1838 1597 ignore_hdec: 1839 1598 mr r4,r9 1840 1599 b fast_guest_return 1841 1600 1601 + _GLOBAL(kvmppc_h_set_xdabr) 1602 + andi. r0, r5, DABRX_USER | DABRX_KERNEL 1603 + beq 6f 1604 + li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI 1605 + andc. r0, r5, r0 1606 + beq 3f 1607 + 6: li r3, H_PARAMETER 1608 + blr 1609 + 1842 1610 _GLOBAL(kvmppc_h_set_dabr) 1611 + li r5, DABRX_USER | DABRX_KERNEL 1612 + 3: 1613 + BEGIN_FTR_SECTION 1614 + b 2f 1615 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1843 1616 std r4,VCPU_DABR(r3) 1617 + stw r5, VCPU_DABRX(r3) 1618 + mtspr SPRN_DABRX, r5 1844 1619 /* Work around P7 bug where DABR can get corrupted on mtspr */ 1845 1620 1: mtspr SPRN_DABR,r4 1846 1621 mfspr r5, SPRN_DABR ··· 1868 1603 bne 1b 1869 1604 isync 1870 1605 li r3,0 1606 + blr 1607 + 1608 + /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */ 1609 + 2: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW 1610 + rlwimi r5, r4, 1, DAWRX_WT 1611 + clrrdi r4, r4, 3 1612 + std r4, VCPU_DAWR(r3) 1613 + std r5, VCPU_DAWRX(r3) 1614 + mtspr SPRN_DAWR, r4 1615 + mtspr SPRN_DAWRX, r5 1616 + li r3, 0 1871 1617 blr 1872 1618 1873 1619 _GLOBAL(kvmppc_h_cede) ··· 1904 1628 * up to the host. 1905 1629 */ 1906 1630 ld r5,HSTATE_KVM_VCORE(r13) 1907 - lwz r6,VCPU_PTID(r3) 1631 + lbz r6,HSTATE_PTID(r13) 1908 1632 lwz r8,VCORE_ENTRY_EXIT(r5) 1909 1633 clrldi r8,r8,56 1910 1634 li r0,1 ··· 1919 1643 bne 31b 1920 1644 /* order napping_threads update vs testing entry_exit_count */ 1921 1645 isync 1922 - li r0,1 1646 + li r0,NAPPING_CEDE 1923 1647 stb r0,HSTATE_NAPPING(r13) 1924 - mr r4,r3 1925 1648 lwz r7,VCORE_ENTRY_EXIT(r5) 1926 1649 cmpwi r7,0x100 1927 1650 bge 33f /* another thread already exiting */ ··· 1952 1677 std r31, VCPU_GPR(R31)(r3) 1953 1678 1954 1679 /* save FP state */ 1955 - bl .kvmppc_save_fp 1680 + bl kvmppc_save_fp 1956 1681 1957 1682 /* 1958 - * Take a nap until a decrementer or external interrupt occurs, 1959 - * with PECE1 (wake on decr) and PECE0 (wake on external) set in LPCR 1683 + * Take a nap until a decrementer or external or doobell interrupt 1684 + * occurs, with PECE1, PECE0 and PECEDP set in LPCR 1960 1685 */ 1961 1686 li r0,1 1962 1687 stb r0,HSTATE_HWTHREAD_REQ(r13) 1963 1688 mfspr r5,SPRN_LPCR 1964 1689 ori r5,r5,LPCR_PECE0 | LPCR_PECE1 1690 + BEGIN_FTR_SECTION 1691 + oris r5,r5,LPCR_PECEDP@h 1692 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1965 1693 mtspr SPRN_LPCR,r5 1966 1694 isync 1967 1695 li r0, 0 ··· 1975 1697 bne 1b 1976 1698 nap 1977 1699 b . 1700 + 1701 + 33: mr r4, r3 1702 + li r3, 0 1703 + li r12, 0 1704 + b 34f 1978 1705 1979 1706 kvm_end_cede: 1980 1707 /* get vcpu pointer */ ··· 2010 1727 ld r29, VCPU_GPR(R29)(r4) 2011 1728 ld r30, VCPU_GPR(R30)(r4) 2012 1729 ld r31, VCPU_GPR(R31)(r4) 1730 + 1731 + /* Check the wake reason in SRR1 to see why we got here */ 1732 + bl kvmppc_check_wake_reason 2013 1733 2014 1734 /* clear our bit in vcore->napping_threads */ 2015 - 33: ld r5,HSTATE_KVM_VCORE(r13) 2016 - lwz r3,VCPU_PTID(r4) 1735 + 34: ld r5,HSTATE_KVM_VCORE(r13) 1736 + lbz r7,HSTATE_PTID(r13) 2017 1737 li r0,1 2018 - sld r0,r0,r3 1738 + sld r0,r0,r7 2019 1739 addi r6,r5,VCORE_NAPPING_THREADS 2020 1740 32: lwarx r7,0,r6 2021 1741 andc r7,r7,r0 ··· 2027 1741 li r0,0 2028 1742 stb r0,HSTATE_NAPPING(r13) 2029 1743 2030 - /* Check the wake reason in SRR1 to see why we got here */ 2031 - mfspr r3, SPRN_SRR1 2032 - rlwinm r3, r3, 44-31, 0x7 /* extract wake reason field */ 2033 - cmpwi r3, 4 /* was it an external interrupt? */ 2034 - li r12, BOOK3S_INTERRUPT_EXTERNAL 1744 + /* See if the wake reason means we need to exit */ 1745 + stw r12, VCPU_TRAP(r4) 2035 1746 mr r9, r4 2036 - ld r10, VCPU_PC(r9) 2037 - ld r11, VCPU_MSR(r9) 2038 - beq do_ext_interrupt /* if so */ 1747 + cmpdi r3, 0 1748 + bgt guest_exit_cont 2039 1749 2040 1750 /* see if any other thread is already exiting */ 2041 1751 lwz r0,VCORE_ENTRY_EXIT(r5) 2042 1752 cmpwi r0,0x100 2043 - blt kvmppc_cede_reentry /* if not go back to guest */ 1753 + bge guest_exit_cont 2044 1754 2045 - /* some threads are exiting, so go to the guest exit path */ 2046 - b hcall_real_fallback 1755 + b kvmppc_cede_reentry /* if not go back to guest */ 2047 1756 2048 1757 /* cede when already previously prodded case */ 2049 1758 kvm_cede_prodded: ··· 2064 1783 beq mc_cont 2065 1784 /* If not, deliver a machine check. SRR0/1 are already set */ 2066 1785 li r10, BOOK3S_INTERRUPT_MACHINE_CHECK 2067 - li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ 2068 - rotldi r11, r11, 63 1786 + ld r11, VCPU_INTR_MSR(r9) 2069 1787 b fast_interrupt_c_return 1788 + 1789 + /* 1790 + * Check the reason we woke from nap, and take appropriate action. 1791 + * Returns: 1792 + * 0 if nothing needs to be done 1793 + * 1 if something happened that needs to be handled by the host 1794 + * -1 if there was a guest wakeup (IPI) 1795 + * 1796 + * Also sets r12 to the interrupt vector for any interrupt that needs 1797 + * to be handled now by the host (0x500 for external interrupt), or zero. 1798 + */ 1799 + kvmppc_check_wake_reason: 1800 + mfspr r6, SPRN_SRR1 1801 + BEGIN_FTR_SECTION 1802 + rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */ 1803 + FTR_SECTION_ELSE 1804 + rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */ 1805 + ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S) 1806 + cmpwi r6, 8 /* was it an external interrupt? */ 1807 + li r12, BOOK3S_INTERRUPT_EXTERNAL 1808 + beq kvmppc_read_intr /* if so, see what it was */ 1809 + li r3, 0 1810 + li r12, 0 1811 + cmpwi r6, 6 /* was it the decrementer? */ 1812 + beq 0f 1813 + BEGIN_FTR_SECTION 1814 + cmpwi r6, 5 /* privileged doorbell? */ 1815 + beq 0f 1816 + cmpwi r6, 3 /* hypervisor doorbell? */ 1817 + beq 3f 1818 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1819 + li r3, 1 /* anything else, return 1 */ 1820 + 0: blr 1821 + 1822 + /* hypervisor doorbell */ 1823 + 3: li r12, BOOK3S_INTERRUPT_H_DOORBELL 1824 + li r3, 1 1825 + blr 2070 1826 2071 1827 /* 2072 1828 * Determine what sort of external interrupt is pending (if any). ··· 2136 1818 * interrupts directly to the guest 2137 1819 */ 2138 1820 cmpwi r3, XICS_IPI /* if there is, is it an IPI? */ 2139 - li r3, 1 2140 1821 bne 42f 2141 1822 2142 1823 /* It's an IPI, clear the MFRR and EOI it */ ··· 2161 1844 * before exit, it will be picked up by the host ICP driver 2162 1845 */ 2163 1846 stw r0, HSTATE_SAVED_XIRR(r13) 1847 + li r3, 1 2164 1848 b 1b 2165 1849 2166 1850 43: /* We raced with the host, we need to resend that IPI, bummer */ 2167 1851 li r0, IPI_PRIORITY 2168 1852 stbcix r0, r6, r8 /* set the IPI */ 2169 1853 sync 1854 + li r3, 1 2170 1855 b 1b 2171 1856 2172 1857 /* 2173 1858 * Save away FP, VMX and VSX registers. 2174 1859 * r3 = vcpu pointer 1860 + * N.B. r30 and r31 are volatile across this function, 1861 + * thus it is not callable from C. 2175 1862 */ 2176 - _GLOBAL(kvmppc_save_fp) 1863 + kvmppc_save_fp: 1864 + mflr r30 1865 + mr r31,r3 2177 1866 mfmsr r5 2178 1867 ori r8,r5,MSR_FP 2179 1868 #ifdef CONFIG_ALTIVEC ··· 2194 1871 #endif 2195 1872 mtmsrd r8 2196 1873 isync 2197 - #ifdef CONFIG_VSX 2198 - BEGIN_FTR_SECTION 2199 - reg = 0 2200 - .rept 32 2201 - li r6,reg*16+VCPU_VSRS 2202 - STXVD2X(reg,R6,R3) 2203 - reg = reg + 1 2204 - .endr 2205 - FTR_SECTION_ELSE 2206 - #endif 2207 - reg = 0 2208 - .rept 32 2209 - stfd reg,reg*8+VCPU_FPRS(r3) 2210 - reg = reg + 1 2211 - .endr 2212 - #ifdef CONFIG_VSX 2213 - ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX) 2214 - #endif 2215 - mffs fr0 2216 - stfd fr0,VCPU_FPSCR(r3) 2217 - 1874 + addi r3,r3,VCPU_FPRS 1875 + bl .store_fp_state 2218 1876 #ifdef CONFIG_ALTIVEC 2219 1877 BEGIN_FTR_SECTION 2220 - reg = 0 2221 - .rept 32 2222 - li r6,reg*16+VCPU_VRS 2223 - stvx reg,r6,r3 2224 - reg = reg + 1 2225 - .endr 2226 - mfvscr vr0 2227 - li r6,VCPU_VSCR 2228 - stvx vr0,r6,r3 1878 + addi r3,r31,VCPU_VRS 1879 + bl .store_vr_state 2229 1880 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2230 1881 #endif 2231 1882 mfspr r6,SPRN_VRSAVE 2232 1883 stw r6,VCPU_VRSAVE(r3) 1884 + mtlr r30 2233 1885 mtmsrd r5 2234 1886 isync 2235 1887 blr ··· 2212 1914 /* 2213 1915 * Load up FP, VMX and VSX registers 2214 1916 * r4 = vcpu pointer 1917 + * N.B. r30 and r31 are volatile across this function, 1918 + * thus it is not callable from C. 2215 1919 */ 2216 - .globl kvmppc_load_fp 2217 1920 kvmppc_load_fp: 1921 + mflr r30 1922 + mr r31,r4 2218 1923 mfmsr r9 2219 1924 ori r8,r9,MSR_FP 2220 1925 #ifdef CONFIG_ALTIVEC ··· 2232 1931 #endif 2233 1932 mtmsrd r8 2234 1933 isync 2235 - lfd fr0,VCPU_FPSCR(r4) 2236 - MTFSF_L(fr0) 2237 - #ifdef CONFIG_VSX 2238 - BEGIN_FTR_SECTION 2239 - reg = 0 2240 - .rept 32 2241 - li r7,reg*16+VCPU_VSRS 2242 - LXVD2X(reg,R7,R4) 2243 - reg = reg + 1 2244 - .endr 2245 - FTR_SECTION_ELSE 2246 - #endif 2247 - reg = 0 2248 - .rept 32 2249 - lfd reg,reg*8+VCPU_FPRS(r4) 2250 - reg = reg + 1 2251 - .endr 2252 - #ifdef CONFIG_VSX 2253 - ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX) 2254 - #endif 2255 - 1934 + addi r3,r4,VCPU_FPRS 1935 + bl .load_fp_state 2256 1936 #ifdef CONFIG_ALTIVEC 2257 1937 BEGIN_FTR_SECTION 2258 - li r7,VCPU_VSCR 2259 - lvx vr0,r7,r4 2260 - mtvscr vr0 2261 - reg = 0 2262 - .rept 32 2263 - li r7,reg*16+VCPU_VRS 2264 - lvx reg,r7,r4 2265 - reg = reg + 1 2266 - .endr 1938 + addi r3,r31,VCPU_VRS 1939 + bl .load_vr_state 2267 1940 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2268 1941 #endif 2269 1942 lwz r7,VCPU_VRSAVE(r4) 2270 1943 mtspr SPRN_VRSAVE,r7 1944 + mtlr r30 1945 + mr r4,r31 2271 1946 blr 2272 1947 2273 1948 /*
+83 -86
arch/powerpc/kvm/book3s_paired_singles.c
··· 160 160 161 161 static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt) 162 162 { 163 - kvm_cvt_df(&vcpu->arch.fpr[rt], &vcpu->arch.qpr[rt]); 163 + kvm_cvt_df(&VCPU_FPR(vcpu, rt), &vcpu->arch.qpr[rt]); 164 164 } 165 165 166 166 static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store) ··· 207 207 /* put in registers */ 208 208 switch (ls_type) { 209 209 case FPU_LS_SINGLE: 210 - kvm_cvt_fd((u32*)tmp, &vcpu->arch.fpr[rs]); 210 + kvm_cvt_fd((u32*)tmp, &VCPU_FPR(vcpu, rs)); 211 211 vcpu->arch.qpr[rs] = *((u32*)tmp); 212 212 break; 213 213 case FPU_LS_DOUBLE: 214 - vcpu->arch.fpr[rs] = *((u64*)tmp); 214 + VCPU_FPR(vcpu, rs) = *((u64*)tmp); 215 215 break; 216 216 } 217 217 ··· 233 233 234 234 switch (ls_type) { 235 235 case FPU_LS_SINGLE: 236 - kvm_cvt_df(&vcpu->arch.fpr[rs], (u32*)tmp); 236 + kvm_cvt_df(&VCPU_FPR(vcpu, rs), (u32*)tmp); 237 237 val = *((u32*)tmp); 238 238 len = sizeof(u32); 239 239 break; 240 240 case FPU_LS_SINGLE_LOW: 241 - *((u32*)tmp) = vcpu->arch.fpr[rs]; 242 - val = vcpu->arch.fpr[rs] & 0xffffffff; 241 + *((u32*)tmp) = VCPU_FPR(vcpu, rs); 242 + val = VCPU_FPR(vcpu, rs) & 0xffffffff; 243 243 len = sizeof(u32); 244 244 break; 245 245 case FPU_LS_DOUBLE: 246 - *((u64*)tmp) = vcpu->arch.fpr[rs]; 247 - val = vcpu->arch.fpr[rs]; 246 + *((u64*)tmp) = VCPU_FPR(vcpu, rs); 247 + val = VCPU_FPR(vcpu, rs); 248 248 len = sizeof(u64); 249 249 break; 250 250 default: ··· 301 301 emulated = EMULATE_DONE; 302 302 303 303 /* put in registers */ 304 - kvm_cvt_fd(&tmp[0], &vcpu->arch.fpr[rs]); 304 + kvm_cvt_fd(&tmp[0], &VCPU_FPR(vcpu, rs)); 305 305 vcpu->arch.qpr[rs] = tmp[1]; 306 306 307 307 dprintk(KERN_INFO "KVM: PSQ_LD [0x%x, 0x%x] at 0x%lx (%d)\n", tmp[0], ··· 319 319 u32 tmp[2]; 320 320 int len = w ? sizeof(u32) : sizeof(u64); 321 321 322 - kvm_cvt_df(&vcpu->arch.fpr[rs], &tmp[0]); 322 + kvm_cvt_df(&VCPU_FPR(vcpu, rs), &tmp[0]); 323 323 tmp[1] = vcpu->arch.qpr[rs]; 324 324 325 325 r = kvmppc_st(vcpu, &addr, len, tmp, true); ··· 512 512 u32 *src2, u32 *src3)) 513 513 { 514 514 u32 *qpr = vcpu->arch.qpr; 515 - u64 *fpr = vcpu->arch.fpr; 516 515 u32 ps0_out; 517 516 u32 ps0_in1, ps0_in2, ps0_in3; 518 517 u32 ps1_in1, ps1_in2, ps1_in3; ··· 520 521 WARN_ON(rc); 521 522 522 523 /* PS0 */ 523 - kvm_cvt_df(&fpr[reg_in1], &ps0_in1); 524 - kvm_cvt_df(&fpr[reg_in2], &ps0_in2); 525 - kvm_cvt_df(&fpr[reg_in3], &ps0_in3); 524 + kvm_cvt_df(&VCPU_FPR(vcpu, reg_in1), &ps0_in1); 525 + kvm_cvt_df(&VCPU_FPR(vcpu, reg_in2), &ps0_in2); 526 + kvm_cvt_df(&VCPU_FPR(vcpu, reg_in3), &ps0_in3); 526 527 527 528 if (scalar & SCALAR_LOW) 528 529 ps0_in2 = qpr[reg_in2]; 529 530 530 - func(&vcpu->arch.fpscr, &ps0_out, &ps0_in1, &ps0_in2, &ps0_in3); 531 + func(&vcpu->arch.fp.fpscr, &ps0_out, &ps0_in1, &ps0_in2, &ps0_in3); 531 532 532 533 dprintk(KERN_INFO "PS3 ps0 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n", 533 534 ps0_in1, ps0_in2, ps0_in3, ps0_out); 534 535 535 536 if (!(scalar & SCALAR_NO_PS0)) 536 - kvm_cvt_fd(&ps0_out, &fpr[reg_out]); 537 + kvm_cvt_fd(&ps0_out, &VCPU_FPR(vcpu, reg_out)); 537 538 538 539 /* PS1 */ 539 540 ps1_in1 = qpr[reg_in1]; ··· 544 545 ps1_in2 = ps0_in2; 545 546 546 547 if (!(scalar & SCALAR_NO_PS1)) 547 - func(&vcpu->arch.fpscr, &qpr[reg_out], &ps1_in1, &ps1_in2, &ps1_in3); 548 + func(&vcpu->arch.fp.fpscr, &qpr[reg_out], &ps1_in1, &ps1_in2, &ps1_in3); 548 549 549 550 dprintk(KERN_INFO "PS3 ps1 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n", 550 551 ps1_in1, ps1_in2, ps1_in3, qpr[reg_out]); ··· 560 561 u32 *src2)) 561 562 { 562 563 u32 *qpr = vcpu->arch.qpr; 563 - u64 *fpr = vcpu->arch.fpr; 564 564 u32 ps0_out; 565 565 u32 ps0_in1, ps0_in2; 566 566 u32 ps1_out; ··· 569 571 WARN_ON(rc); 570 572 571 573 /* PS0 */ 572 - kvm_cvt_df(&fpr[reg_in1], &ps0_in1); 574 + kvm_cvt_df(&VCPU_FPR(vcpu, reg_in1), &ps0_in1); 573 575 574 576 if (scalar & SCALAR_LOW) 575 577 ps0_in2 = qpr[reg_in2]; 576 578 else 577 - kvm_cvt_df(&fpr[reg_in2], &ps0_in2); 579 + kvm_cvt_df(&VCPU_FPR(vcpu, reg_in2), &ps0_in2); 578 580 579 - func(&vcpu->arch.fpscr, &ps0_out, &ps0_in1, &ps0_in2); 581 + func(&vcpu->arch.fp.fpscr, &ps0_out, &ps0_in1, &ps0_in2); 580 582 581 583 if (!(scalar & SCALAR_NO_PS0)) { 582 584 dprintk(KERN_INFO "PS2 ps0 -> f(0x%x, 0x%x) = 0x%x\n", 583 585 ps0_in1, ps0_in2, ps0_out); 584 586 585 - kvm_cvt_fd(&ps0_out, &fpr[reg_out]); 587 + kvm_cvt_fd(&ps0_out, &VCPU_FPR(vcpu, reg_out)); 586 588 } 587 589 588 590 /* PS1 */ ··· 592 594 if (scalar & SCALAR_HIGH) 593 595 ps1_in2 = ps0_in2; 594 596 595 - func(&vcpu->arch.fpscr, &ps1_out, &ps1_in1, &ps1_in2); 597 + func(&vcpu->arch.fp.fpscr, &ps1_out, &ps1_in1, &ps1_in2); 596 598 597 599 if (!(scalar & SCALAR_NO_PS1)) { 598 600 qpr[reg_out] = ps1_out; ··· 610 612 u32 *dst, u32 *src1)) 611 613 { 612 614 u32 *qpr = vcpu->arch.qpr; 613 - u64 *fpr = vcpu->arch.fpr; 614 615 u32 ps0_out, ps0_in; 615 616 u32 ps1_in; 616 617 ··· 617 620 WARN_ON(rc); 618 621 619 622 /* PS0 */ 620 - kvm_cvt_df(&fpr[reg_in], &ps0_in); 621 - func(&vcpu->arch.fpscr, &ps0_out, &ps0_in); 623 + kvm_cvt_df(&VCPU_FPR(vcpu, reg_in), &ps0_in); 624 + func(&vcpu->arch.fp.fpscr, &ps0_out, &ps0_in); 622 625 623 626 dprintk(KERN_INFO "PS1 ps0 -> f(0x%x) = 0x%x\n", 624 627 ps0_in, ps0_out); 625 628 626 - kvm_cvt_fd(&ps0_out, &fpr[reg_out]); 629 + kvm_cvt_fd(&ps0_out, &VCPU_FPR(vcpu, reg_out)); 627 630 628 631 /* PS1 */ 629 632 ps1_in = qpr[reg_in]; 630 - func(&vcpu->arch.fpscr, &qpr[reg_out], &ps1_in); 633 + func(&vcpu->arch.fp.fpscr, &qpr[reg_out], &ps1_in); 631 634 632 635 dprintk(KERN_INFO "PS1 ps1 -> f(0x%x) = 0x%x\n", 633 636 ps1_in, qpr[reg_out]); ··· 646 649 int ax_rc = inst_get_field(inst, 21, 25); 647 650 short full_d = inst_get_field(inst, 16, 31); 648 651 649 - u64 *fpr_d = &vcpu->arch.fpr[ax_rd]; 650 - u64 *fpr_a = &vcpu->arch.fpr[ax_ra]; 651 - u64 *fpr_b = &vcpu->arch.fpr[ax_rb]; 652 - u64 *fpr_c = &vcpu->arch.fpr[ax_rc]; 652 + u64 *fpr_d = &VCPU_FPR(vcpu, ax_rd); 653 + u64 *fpr_a = &VCPU_FPR(vcpu, ax_ra); 654 + u64 *fpr_b = &VCPU_FPR(vcpu, ax_rb); 655 + u64 *fpr_c = &VCPU_FPR(vcpu, ax_rc); 653 656 654 657 bool rcomp = (inst & 1) ? true : false; 655 658 u32 cr = kvmppc_get_cr(vcpu); ··· 671 674 /* Do we need to clear FE0 / FE1 here? Don't think so. */ 672 675 673 676 #ifdef DEBUG 674 - for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) { 677 + for (i = 0; i < ARRAY_SIZE(vcpu->arch.fp.fpr); i++) { 675 678 u32 f; 676 - kvm_cvt_df(&vcpu->arch.fpr[i], &f); 679 + kvm_cvt_df(&VCPU_FPR(vcpu, i), &f); 677 680 dprintk(KERN_INFO "FPR[%d] = 0x%x / 0x%llx QPR[%d] = 0x%x\n", 678 - i, f, vcpu->arch.fpr[i], i, vcpu->arch.qpr[i]); 681 + i, f, VCPU_FPR(vcpu, i), i, vcpu->arch.qpr[i]); 679 682 } 680 683 #endif 681 684 ··· 761 764 break; 762 765 } 763 766 case OP_4X_PS_NEG: 764 - vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb]; 765 - vcpu->arch.fpr[ax_rd] ^= 0x8000000000000000ULL; 767 + VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb); 768 + VCPU_FPR(vcpu, ax_rd) ^= 0x8000000000000000ULL; 766 769 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; 767 770 vcpu->arch.qpr[ax_rd] ^= 0x80000000; 768 771 break; ··· 772 775 break; 773 776 case OP_4X_PS_MR: 774 777 WARN_ON(rcomp); 775 - vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb]; 778 + VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb); 776 779 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; 777 780 break; 778 781 case OP_4X_PS_CMPO1: ··· 781 784 break; 782 785 case OP_4X_PS_NABS: 783 786 WARN_ON(rcomp); 784 - vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb]; 785 - vcpu->arch.fpr[ax_rd] |= 0x8000000000000000ULL; 787 + VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb); 788 + VCPU_FPR(vcpu, ax_rd) |= 0x8000000000000000ULL; 786 789 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; 787 790 vcpu->arch.qpr[ax_rd] |= 0x80000000; 788 791 break; 789 792 case OP_4X_PS_ABS: 790 793 WARN_ON(rcomp); 791 - vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb]; 792 - vcpu->arch.fpr[ax_rd] &= ~0x8000000000000000ULL; 794 + VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb); 795 + VCPU_FPR(vcpu, ax_rd) &= ~0x8000000000000000ULL; 793 796 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; 794 797 vcpu->arch.qpr[ax_rd] &= ~0x80000000; 795 798 break; 796 799 case OP_4X_PS_MERGE00: 797 800 WARN_ON(rcomp); 798 - vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra]; 799 - /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */ 800 - kvm_cvt_df(&vcpu->arch.fpr[ax_rb], 801 + VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_ra); 802 + /* vcpu->arch.qpr[ax_rd] = VCPU_FPR(vcpu, ax_rb); */ 803 + kvm_cvt_df(&VCPU_FPR(vcpu, ax_rb), 801 804 &vcpu->arch.qpr[ax_rd]); 802 805 break; 803 806 case OP_4X_PS_MERGE01: 804 807 WARN_ON(rcomp); 805 - vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra]; 808 + VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_ra); 806 809 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; 807 810 break; 808 811 case OP_4X_PS_MERGE10: 809 812 WARN_ON(rcomp); 810 - /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */ 813 + /* VCPU_FPR(vcpu, ax_rd) = vcpu->arch.qpr[ax_ra]; */ 811 814 kvm_cvt_fd(&vcpu->arch.qpr[ax_ra], 812 - &vcpu->arch.fpr[ax_rd]); 813 - /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */ 814 - kvm_cvt_df(&vcpu->arch.fpr[ax_rb], 815 + &VCPU_FPR(vcpu, ax_rd)); 816 + /* vcpu->arch.qpr[ax_rd] = VCPU_FPR(vcpu, ax_rb); */ 817 + kvm_cvt_df(&VCPU_FPR(vcpu, ax_rb), 815 818 &vcpu->arch.qpr[ax_rd]); 816 819 break; 817 820 case OP_4X_PS_MERGE11: 818 821 WARN_ON(rcomp); 819 - /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */ 822 + /* VCPU_FPR(vcpu, ax_rd) = vcpu->arch.qpr[ax_ra]; */ 820 823 kvm_cvt_fd(&vcpu->arch.qpr[ax_ra], 821 - &vcpu->arch.fpr[ax_rd]); 824 + &VCPU_FPR(vcpu, ax_rd)); 822 825 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; 823 826 break; 824 827 } ··· 853 856 case OP_4A_PS_SUM1: 854 857 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, 855 858 ax_rb, ax_ra, SCALAR_NO_PS0 | SCALAR_HIGH, fps_fadds); 856 - vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rc]; 859 + VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rc); 857 860 break; 858 861 case OP_4A_PS_SUM0: 859 862 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, ··· 1103 1106 case 59: 1104 1107 switch (inst_get_field(inst, 21, 30)) { 1105 1108 case OP_59_FADDS: 1106 - fpd_fadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); 1109 + fpd_fadds(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b); 1107 1110 kvmppc_sync_qpr(vcpu, ax_rd); 1108 1111 break; 1109 1112 case OP_59_FSUBS: 1110 - fpd_fsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); 1113 + fpd_fsubs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b); 1111 1114 kvmppc_sync_qpr(vcpu, ax_rd); 1112 1115 break; 1113 1116 case OP_59_FDIVS: 1114 - fpd_fdivs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); 1117 + fpd_fdivs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b); 1115 1118 kvmppc_sync_qpr(vcpu, ax_rd); 1116 1119 break; 1117 1120 case OP_59_FRES: 1118 - fpd_fres(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); 1121 + fpd_fres(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); 1119 1122 kvmppc_sync_qpr(vcpu, ax_rd); 1120 1123 break; 1121 1124 case OP_59_FRSQRTES: 1122 - fpd_frsqrtes(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); 1125 + fpd_frsqrtes(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); 1123 1126 kvmppc_sync_qpr(vcpu, ax_rd); 1124 1127 break; 1125 1128 } 1126 1129 switch (inst_get_field(inst, 26, 30)) { 1127 1130 case OP_59_FMULS: 1128 - fpd_fmuls(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c); 1131 + fpd_fmuls(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c); 1129 1132 kvmppc_sync_qpr(vcpu, ax_rd); 1130 1133 break; 1131 1134 case OP_59_FMSUBS: 1132 - fpd_fmsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); 1135 + fpd_fmsubs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); 1133 1136 kvmppc_sync_qpr(vcpu, ax_rd); 1134 1137 break; 1135 1138 case OP_59_FMADDS: 1136 - fpd_fmadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); 1139 + fpd_fmadds(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); 1137 1140 kvmppc_sync_qpr(vcpu, ax_rd); 1138 1141 break; 1139 1142 case OP_59_FNMSUBS: 1140 - fpd_fnmsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); 1143 + fpd_fnmsubs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); 1141 1144 kvmppc_sync_qpr(vcpu, ax_rd); 1142 1145 break; 1143 1146 case OP_59_FNMADDS: 1144 - fpd_fnmadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); 1147 + fpd_fnmadds(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); 1145 1148 kvmppc_sync_qpr(vcpu, ax_rd); 1146 1149 break; 1147 1150 } ··· 1156 1159 break; 1157 1160 case OP_63_MFFS: 1158 1161 /* XXX missing CR */ 1159 - *fpr_d = vcpu->arch.fpscr; 1162 + *fpr_d = vcpu->arch.fp.fpscr; 1160 1163 break; 1161 1164 case OP_63_MTFSF: 1162 1165 /* XXX missing fm bits */ 1163 1166 /* XXX missing CR */ 1164 - vcpu->arch.fpscr = *fpr_b; 1167 + vcpu->arch.fp.fpscr = *fpr_b; 1165 1168 break; 1166 1169 case OP_63_FCMPU: 1167 1170 { ··· 1169 1172 u32 cr0_mask = 0xf0000000; 1170 1173 u32 cr_shift = inst_get_field(inst, 6, 8) * 4; 1171 1174 1172 - fpd_fcmpu(&vcpu->arch.fpscr, &tmp_cr, fpr_a, fpr_b); 1175 + fpd_fcmpu(&vcpu->arch.fp.fpscr, &tmp_cr, fpr_a, fpr_b); 1173 1176 cr &= ~(cr0_mask >> cr_shift); 1174 1177 cr |= (cr & cr0_mask) >> cr_shift; 1175 1178 break; ··· 1180 1183 u32 cr0_mask = 0xf0000000; 1181 1184 u32 cr_shift = inst_get_field(inst, 6, 8) * 4; 1182 1185 1183 - fpd_fcmpo(&vcpu->arch.fpscr, &tmp_cr, fpr_a, fpr_b); 1186 + fpd_fcmpo(&vcpu->arch.fp.fpscr, &tmp_cr, fpr_a, fpr_b); 1184 1187 cr &= ~(cr0_mask >> cr_shift); 1185 1188 cr |= (cr & cr0_mask) >> cr_shift; 1186 1189 break; 1187 1190 } 1188 1191 case OP_63_FNEG: 1189 - fpd_fneg(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); 1192 + fpd_fneg(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); 1190 1193 break; 1191 1194 case OP_63_FMR: 1192 1195 *fpr_d = *fpr_b; 1193 1196 break; 1194 1197 case OP_63_FABS: 1195 - fpd_fabs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); 1198 + fpd_fabs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); 1196 1199 break; 1197 1200 case OP_63_FCPSGN: 1198 - fpd_fcpsgn(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); 1201 + fpd_fcpsgn(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b); 1199 1202 break; 1200 1203 case OP_63_FDIV: 1201 - fpd_fdiv(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); 1204 + fpd_fdiv(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b); 1202 1205 break; 1203 1206 case OP_63_FADD: 1204 - fpd_fadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); 1207 + fpd_fadd(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b); 1205 1208 break; 1206 1209 case OP_63_FSUB: 1207 - fpd_fsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); 1210 + fpd_fsub(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b); 1208 1211 break; 1209 1212 case OP_63_FCTIW: 1210 - fpd_fctiw(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); 1213 + fpd_fctiw(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); 1211 1214 break; 1212 1215 case OP_63_FCTIWZ: 1213 - fpd_fctiwz(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); 1216 + fpd_fctiwz(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); 1214 1217 break; 1215 1218 case OP_63_FRSP: 1216 - fpd_frsp(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); 1219 + fpd_frsp(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); 1217 1220 kvmppc_sync_qpr(vcpu, ax_rd); 1218 1221 break; 1219 1222 case OP_63_FRSQRTE: ··· 1221 1224 double one = 1.0f; 1222 1225 1223 1226 /* fD = sqrt(fB) */ 1224 - fpd_fsqrt(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); 1227 + fpd_fsqrt(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); 1225 1228 /* fD = 1.0f / fD */ 1226 - fpd_fdiv(&vcpu->arch.fpscr, &cr, fpr_d, (u64*)&one, fpr_d); 1229 + fpd_fdiv(&vcpu->arch.fp.fpscr, &cr, fpr_d, (u64*)&one, fpr_d); 1227 1230 break; 1228 1231 } 1229 1232 } 1230 1233 switch (inst_get_field(inst, 26, 30)) { 1231 1234 case OP_63_FMUL: 1232 - fpd_fmul(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c); 1235 + fpd_fmul(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c); 1233 1236 break; 1234 1237 case OP_63_FSEL: 1235 - fpd_fsel(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); 1238 + fpd_fsel(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); 1236 1239 break; 1237 1240 case OP_63_FMSUB: 1238 - fpd_fmsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); 1241 + fpd_fmsub(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); 1239 1242 break; 1240 1243 case OP_63_FMADD: 1241 - fpd_fmadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); 1244 + fpd_fmadd(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); 1242 1245 break; 1243 1246 case OP_63_FNMSUB: 1244 - fpd_fnmsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); 1247 + fpd_fnmsub(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); 1245 1248 break; 1246 1249 case OP_63_FNMADD: 1247 - fpd_fnmadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); 1250 + fpd_fnmadd(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); 1248 1251 break; 1249 1252 } 1250 1253 break; 1251 1254 } 1252 1255 1253 1256 #ifdef DEBUG 1254 - for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) { 1257 + for (i = 0; i < ARRAY_SIZE(vcpu->arch.fp.fpr); i++) { 1255 1258 u32 f; 1256 - kvm_cvt_df(&vcpu->arch.fpr[i], &f); 1259 + kvm_cvt_df(&VCPU_FPR(vcpu, i), &f); 1257 1260 dprintk(KERN_INFO "FPR[%d] = 0x%x\n", i, f); 1258 1261 } 1259 1262 #endif
+34 -121
arch/powerpc/kvm/book3s_pr.c
··· 41 41 #include <linux/vmalloc.h> 42 42 #include <linux/highmem.h> 43 43 #include <linux/module.h> 44 + #include <linux/miscdevice.h> 44 45 45 46 #include "book3s.h" 46 47 ··· 567 566 void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) 568 567 { 569 568 struct thread_struct *t = &current->thread; 570 - u64 *vcpu_fpr = vcpu->arch.fpr; 571 - #ifdef CONFIG_VSX 572 - u64 *vcpu_vsx = vcpu->arch.vsr; 573 - #endif 574 - u64 *thread_fpr = &t->fp_state.fpr[0][0]; 575 - int i; 576 569 577 570 /* 578 571 * VSX instructions can access FP and vector registers, so if ··· 589 594 * both the traditional FP registers and the added VSX 590 595 * registers into thread.fp_state.fpr[]. 591 596 */ 592 - if (current->thread.regs->msr & MSR_FP) 597 + if (t->regs->msr & MSR_FP) 593 598 giveup_fpu(current); 594 - for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) 595 - vcpu_fpr[i] = thread_fpr[get_fpr_index(i)]; 596 - 597 - vcpu->arch.fpscr = t->fp_state.fpscr; 598 - 599 - #ifdef CONFIG_VSX 600 - if (cpu_has_feature(CPU_FTR_VSX)) 601 - for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++) 602 - vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1]; 603 - #endif 599 + t->fp_save_area = NULL; 604 600 } 605 601 606 602 #ifdef CONFIG_ALTIVEC 607 603 if (msr & MSR_VEC) { 608 604 if (current->thread.regs->msr & MSR_VEC) 609 605 giveup_altivec(current); 610 - memcpy(vcpu->arch.vr, t->vr_state.vr, sizeof(vcpu->arch.vr)); 611 - vcpu->arch.vscr = t->vr_state.vscr; 606 + t->vr_save_area = NULL; 612 607 } 613 608 #endif 614 609 ··· 646 661 ulong msr) 647 662 { 648 663 struct thread_struct *t = &current->thread; 649 - u64 *vcpu_fpr = vcpu->arch.fpr; 650 - #ifdef CONFIG_VSX 651 - u64 *vcpu_vsx = vcpu->arch.vsr; 652 - #endif 653 - u64 *thread_fpr = &t->fp_state.fpr[0][0]; 654 - int i; 655 664 656 665 /* When we have paired singles, we emulate in software */ 657 666 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) ··· 683 704 #endif 684 705 685 706 if (msr & MSR_FP) { 686 - for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) 687 - thread_fpr[get_fpr_index(i)] = vcpu_fpr[i]; 688 - #ifdef CONFIG_VSX 689 - for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++) 690 - thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i]; 691 - #endif 692 - t->fp_state.fpscr = vcpu->arch.fpscr; 693 - t->fpexc_mode = 0; 694 - kvmppc_load_up_fpu(); 707 + enable_kernel_fp(); 708 + load_fp_state(&vcpu->arch.fp); 709 + t->fp_save_area = &vcpu->arch.fp; 695 710 } 696 711 697 712 if (msr & MSR_VEC) { 698 713 #ifdef CONFIG_ALTIVEC 699 - memcpy(t->vr_state.vr, vcpu->arch.vr, sizeof(vcpu->arch.vr)); 700 - t->vr_state.vscr = vcpu->arch.vscr; 701 - t->vrsave = -1; 702 - kvmppc_load_up_altivec(); 714 + enable_kernel_altivec(); 715 + load_vr_state(&vcpu->arch.vr); 716 + t->vr_save_area = &vcpu->arch.vr; 703 717 #endif 704 718 } 705 719 706 - current->thread.regs->msr |= msr; 720 + t->regs->msr |= msr; 707 721 vcpu->arch.guest_owned_ext |= msr; 708 722 kvmppc_recalc_shadow_msr(vcpu); 709 723 ··· 715 743 if (!lost_ext) 716 744 return; 717 745 718 - if (lost_ext & MSR_FP) 719 - kvmppc_load_up_fpu(); 746 + if (lost_ext & MSR_FP) { 747 + enable_kernel_fp(); 748 + load_fp_state(&vcpu->arch.fp); 749 + } 720 750 #ifdef CONFIG_ALTIVEC 721 - if (lost_ext & MSR_VEC) 722 - kvmppc_load_up_altivec(); 751 + if (lost_ext & MSR_VEC) { 752 + enable_kernel_altivec(); 753 + load_vr_state(&vcpu->arch.vr); 754 + } 723 755 #endif 724 756 current->thread.regs->msr |= lost_ext; 725 757 } ··· 849 873 /* We're good on these - the host merely wanted to get our attention */ 850 874 case BOOK3S_INTERRUPT_DECREMENTER: 851 875 case BOOK3S_INTERRUPT_HV_DECREMENTER: 876 + case BOOK3S_INTERRUPT_DOORBELL: 852 877 vcpu->stat.dec_exits++; 853 878 r = RESUME_GUEST; 854 879 break; ··· 1022 1045 * and if we really did time things so badly, then we just exit 1023 1046 * again due to a host external interrupt. 1024 1047 */ 1025 - local_irq_disable(); 1026 1048 s = kvmppc_prepare_to_enter(vcpu); 1027 - if (s <= 0) { 1028 - local_irq_enable(); 1049 + if (s <= 0) 1029 1050 r = s; 1030 - } else { 1051 + else { 1052 + /* interrupts now hard-disabled */ 1031 1053 kvmppc_fix_ee_before_entry(); 1032 1054 } 1055 + 1033 1056 kvmppc_handle_lost_ext(vcpu); 1034 1057 } 1035 1058 ··· 1110 1133 case KVM_REG_PPC_HIOR: 1111 1134 *val = get_reg_val(id, to_book3s(vcpu)->hior); 1112 1135 break; 1113 - #ifdef CONFIG_VSX 1114 - case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: { 1115 - long int i = id - KVM_REG_PPC_VSR0; 1116 - 1117 - if (!cpu_has_feature(CPU_FTR_VSX)) { 1118 - r = -ENXIO; 1119 - break; 1120 - } 1121 - val->vsxval[0] = vcpu->arch.fpr[i]; 1122 - val->vsxval[1] = vcpu->arch.vsr[i]; 1123 - break; 1124 - } 1125 - #endif /* CONFIG_VSX */ 1126 1136 default: 1127 1137 r = -EINVAL; 1128 1138 break; ··· 1128 1164 to_book3s(vcpu)->hior = set_reg_val(id, *val); 1129 1165 to_book3s(vcpu)->hior_explicit = true; 1130 1166 break; 1131 - #ifdef CONFIG_VSX 1132 - case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: { 1133 - long int i = id - KVM_REG_PPC_VSR0; 1134 - 1135 - if (!cpu_has_feature(CPU_FTR_VSX)) { 1136 - r = -ENXIO; 1137 - break; 1138 - } 1139 - vcpu->arch.fpr[i] = val->vsxval[0]; 1140 - vcpu->arch.vsr[i] = val->vsxval[1]; 1141 - break; 1142 - } 1143 - #endif /* CONFIG_VSX */ 1144 1167 default: 1145 1168 r = -EINVAL; 1146 1169 break; ··· 1225 1274 static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) 1226 1275 { 1227 1276 int ret; 1228 - struct thread_fp_state fp; 1229 - int fpexc_mode; 1230 1277 #ifdef CONFIG_ALTIVEC 1231 - struct thread_vr_state vr; 1232 1278 unsigned long uninitialized_var(vrsave); 1233 - int used_vr; 1234 1279 #endif 1235 - #ifdef CONFIG_VSX 1236 - int used_vsr; 1237 - #endif 1238 - ulong ext_msr; 1239 1280 1240 1281 /* Check if we can run the vcpu at all */ 1241 1282 if (!vcpu->arch.sane) { ··· 1242 1299 * really did time things so badly, then we just exit again due to 1243 1300 * a host external interrupt. 1244 1301 */ 1245 - local_irq_disable(); 1246 1302 ret = kvmppc_prepare_to_enter(vcpu); 1247 - if (ret <= 0) { 1248 - local_irq_enable(); 1303 + if (ret <= 0) 1249 1304 goto out; 1250 - } 1305 + /* interrupts now hard-disabled */ 1251 1306 1252 - /* Save FPU state in stack */ 1307 + /* Save FPU state in thread_struct */ 1253 1308 if (current->thread.regs->msr & MSR_FP) 1254 1309 giveup_fpu(current); 1255 - fp = current->thread.fp_state; 1256 - fpexc_mode = current->thread.fpexc_mode; 1257 1310 1258 1311 #ifdef CONFIG_ALTIVEC 1259 - /* Save Altivec state in stack */ 1260 - used_vr = current->thread.used_vr; 1261 - if (used_vr) { 1262 - if (current->thread.regs->msr & MSR_VEC) 1263 - giveup_altivec(current); 1264 - vr = current->thread.vr_state; 1265 - vrsave = current->thread.vrsave; 1266 - } 1312 + /* Save Altivec state in thread_struct */ 1313 + if (current->thread.regs->msr & MSR_VEC) 1314 + giveup_altivec(current); 1267 1315 #endif 1268 1316 1269 1317 #ifdef CONFIG_VSX 1270 - /* Save VSX state in stack */ 1271 - used_vsr = current->thread.used_vsr; 1272 - if (used_vsr && (current->thread.regs->msr & MSR_VSX)) 1318 + /* Save VSX state in thread_struct */ 1319 + if (current->thread.regs->msr & MSR_VSX) 1273 1320 __giveup_vsx(current); 1274 1321 #endif 1275 - 1276 - /* Remember the MSR with disabled extensions */ 1277 - ext_msr = current->thread.regs->msr; 1278 1322 1279 1323 /* Preload FPU if it's enabled */ 1280 1324 if (vcpu->arch.shared->msr & MSR_FP) ··· 1276 1346 1277 1347 /* Make sure we save the guest FPU/Altivec/VSX state */ 1278 1348 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); 1279 - 1280 - current->thread.regs->msr = ext_msr; 1281 - 1282 - /* Restore FPU/VSX state from stack */ 1283 - current->thread.fp_state = fp; 1284 - current->thread.fpexc_mode = fpexc_mode; 1285 - 1286 - #ifdef CONFIG_ALTIVEC 1287 - /* Restore Altivec state from stack */ 1288 - if (used_vr && current->thread.used_vr) { 1289 - current->thread.vr_state = vr; 1290 - current->thread.vrsave = vrsave; 1291 - } 1292 - current->thread.used_vr = used_vr; 1293 - #endif 1294 - 1295 - #ifdef CONFIG_VSX 1296 - current->thread.used_vsr = used_vsr; 1297 - #endif 1298 1349 1299 1350 out: 1300 1351 vcpu->mode = OUTSIDE_GUEST_MODE; ··· 1517 1606 module_exit(kvmppc_book3s_exit_pr); 1518 1607 1519 1608 MODULE_LICENSE("GPL"); 1609 + MODULE_ALIAS_MISCDEV(KVM_MINOR); 1610 + MODULE_ALIAS("devname:kvm"); 1520 1611 #endif
-47
arch/powerpc/kvm/book3s_rmhandlers.S
··· 162 162 mtsrr1 r6 163 163 RFI 164 164 165 - #if defined(CONFIG_PPC_BOOK3S_32) 166 - #define STACK_LR INT_FRAME_SIZE+4 167 - 168 - /* load_up_xxx have to run with MSR_DR=0 on Book3S_32 */ 169 - #define MSR_EXT_START \ 170 - PPC_STL r20, _NIP(r1); \ 171 - mfmsr r20; \ 172 - LOAD_REG_IMMEDIATE(r3, MSR_DR|MSR_EE); \ 173 - andc r3,r20,r3; /* Disable DR,EE */ \ 174 - mtmsr r3; \ 175 - sync 176 - 177 - #define MSR_EXT_END \ 178 - mtmsr r20; /* Enable DR,EE */ \ 179 - sync; \ 180 - PPC_LL r20, _NIP(r1) 181 - 182 - #elif defined(CONFIG_PPC_BOOK3S_64) 183 - #define STACK_LR _LINK 184 - #define MSR_EXT_START 185 - #define MSR_EXT_END 186 - #endif 187 - 188 - /* 189 - * Activate current's external feature (FPU/Altivec/VSX) 190 - */ 191 - #define define_load_up(what) \ 192 - \ 193 - _GLOBAL(kvmppc_load_up_ ## what); \ 194 - PPC_STLU r1, -INT_FRAME_SIZE(r1); \ 195 - mflr r3; \ 196 - PPC_STL r3, STACK_LR(r1); \ 197 - MSR_EXT_START; \ 198 - \ 199 - bl FUNC(load_up_ ## what); \ 200 - \ 201 - MSR_EXT_END; \ 202 - PPC_LL r3, STACK_LR(r1); \ 203 - mtlr r3; \ 204 - addi r1, r1, INT_FRAME_SIZE; \ 205 - blr 206 - 207 - define_load_up(fpu) 208 - #ifdef CONFIG_ALTIVEC 209 - define_load_up(altivec) 210 - #endif 211 - 212 165 #include "book3s_segment.S"
+2
arch/powerpc/kvm/book3s_segment.S
··· 361 361 beqa BOOK3S_INTERRUPT_DECREMENTER 362 362 cmpwi r12, BOOK3S_INTERRUPT_PERFMON 363 363 beqa BOOK3S_INTERRUPT_PERFMON 364 + cmpwi r12, BOOK3S_INTERRUPT_DOORBELL 365 + beqa BOOK3S_INTERRUPT_DOORBELL 364 366 365 367 RFI 366 368 kvmppc_handler_trampoline_exit_end:
+3 -1
arch/powerpc/kvm/book3s_xics.c
··· 1246 1246 kvm->arch.xics = xics; 1247 1247 mutex_unlock(&kvm->lock); 1248 1248 1249 - if (ret) 1249 + if (ret) { 1250 + kfree(xics); 1250 1251 return ret; 1252 + } 1251 1253 1252 1254 xics_debugfs_init(xics); 1253 1255
+6 -38
arch/powerpc/kvm/booke.c
··· 643 643 local_irq_enable(); 644 644 kvm_vcpu_block(vcpu); 645 645 clear_bit(KVM_REQ_UNHALT, &vcpu->requests); 646 - local_irq_disable(); 646 + hard_irq_disable(); 647 647 648 648 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS); 649 649 r = 1; ··· 682 682 { 683 683 int ret, s; 684 684 struct debug_reg debug; 685 - #ifdef CONFIG_PPC_FPU 686 - struct thread_fp_state fp; 687 - int fpexc_mode; 688 - #endif 689 685 690 686 if (!vcpu->arch.sane) { 691 687 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 692 688 return -EINVAL; 693 689 } 694 690 695 - local_irq_disable(); 696 691 s = kvmppc_prepare_to_enter(vcpu); 697 692 if (s <= 0) { 698 - local_irq_enable(); 699 693 ret = s; 700 694 goto out; 701 695 } 696 + /* interrupts now hard-disabled */ 702 697 703 698 #ifdef CONFIG_PPC_FPU 704 699 /* Save userspace FPU state in stack */ 705 700 enable_kernel_fp(); 706 - fp = current->thread.fp_state; 707 - fpexc_mode = current->thread.fpexc_mode; 708 - 709 - /* Restore guest FPU state to thread */ 710 - memcpy(current->thread.fp_state.fpr, vcpu->arch.fpr, 711 - sizeof(vcpu->arch.fpr)); 712 - current->thread.fp_state.fpscr = vcpu->arch.fpscr; 713 701 714 702 /* 715 703 * Since we can't trap on MSR_FP in GS-mode, we consider the guest ··· 716 728 debug = current->thread.debug; 717 729 current->thread.debug = vcpu->arch.shadow_dbg_reg; 718 730 731 + vcpu->arch.pgdir = current->mm->pgd; 719 732 kvmppc_fix_ee_before_entry(); 720 733 721 734 ret = __kvmppc_vcpu_run(kvm_run, vcpu); ··· 732 743 kvmppc_save_guest_fp(vcpu); 733 744 734 745 vcpu->fpu_active = 0; 735 - 736 - /* Save guest FPU state from thread */ 737 - memcpy(vcpu->arch.fpr, current->thread.fp_state.fpr, 738 - sizeof(vcpu->arch.fpr)); 739 - vcpu->arch.fpscr = current->thread.fp_state.fpscr; 740 - 741 - /* Restore userspace FPU state from stack */ 742 - current->thread.fp_state = fp; 743 - current->thread.fpexc_mode = fpexc_mode; 744 746 #endif 745 747 746 748 out: ··· 877 897 int r = RESUME_HOST; 878 898 int s; 879 899 int idx; 880 - 881 - #ifdef CONFIG_PPC64 882 - WARN_ON(local_paca->irq_happened != 0); 883 - #endif 884 - 885 - /* 886 - * We enter with interrupts disabled in hardware, but 887 - * we need to call hard_irq_disable anyway to ensure that 888 - * the software state is kept in sync. 889 - */ 890 - hard_irq_disable(); 891 900 892 901 /* update before a new last_exit_type is rewritten */ 893 902 kvmppc_update_timing_stats(vcpu); ··· 1186 1217 * aren't already exiting to userspace for some other reason. 1187 1218 */ 1188 1219 if (!(r & RESUME_HOST)) { 1189 - local_irq_disable(); 1190 1220 s = kvmppc_prepare_to_enter(vcpu); 1191 - if (s <= 0) { 1192 - local_irq_enable(); 1221 + if (s <= 0) 1193 1222 r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV); 1194 - } else { 1223 + else { 1224 + /* interrupts now hard-disabled */ 1195 1225 kvmppc_fix_ee_before_entry(); 1196 1226 } 1197 1227 }
+4 -1
arch/powerpc/kvm/booke.h
··· 136 136 { 137 137 #ifdef CONFIG_PPC_FPU 138 138 if (vcpu->fpu_active && !(current->thread.regs->msr & MSR_FP)) { 139 - load_up_fpu(); 139 + enable_kernel_fp(); 140 + load_fp_state(&vcpu->arch.fp); 141 + current->thread.fp_save_area = &vcpu->arch.fp; 140 142 current->thread.regs->msr |= MSR_FP; 141 143 } 142 144 #endif ··· 153 151 #ifdef CONFIG_PPC_FPU 154 152 if (vcpu->fpu_active && (current->thread.regs->msr & MSR_FP)) 155 153 giveup_fpu(current); 154 + current->thread.fp_save_area = NULL; 156 155 #endif 157 156 } 158 157
+11
arch/powerpc/kvm/bookehv_interrupts.S
··· 33 33 34 34 #ifdef CONFIG_64BIT 35 35 #include <asm/exception-64e.h> 36 + #include <asm/hw_irq.h> 37 + #include <asm/irqflags.h> 36 38 #else 37 39 #include "../kernel/head_booke.h" /* for THREAD_NORMSAVE() */ 38 40 #endif ··· 468 466 rlwinm r3, r3, 0, ~SPRN_EPCR_DMIUH 469 467 mtspr SPRN_EPCR, r3 470 468 isync 469 + 470 + #ifdef CONFIG_64BIT 471 + /* 472 + * We enter with interrupts disabled in hardware, but 473 + * we need to call RECONCILE_IRQ_STATE to ensure 474 + * that the software state is kept in sync. 475 + */ 476 + RECONCILE_IRQ_STATE(r3,r5) 477 + #endif 471 478 472 479 /* Switch to kernel stack and jump to handler. */ 473 480 PPC_LL r3, HOST_RUN(r1)
+4
arch/powerpc/kvm/e500.c
··· 16 16 #include <linux/slab.h> 17 17 #include <linux/err.h> 18 18 #include <linux/export.h> 19 + #include <linux/module.h> 20 + #include <linux/miscdevice.h> 19 21 20 22 #include <asm/reg.h> 21 23 #include <asm/cputable.h> ··· 575 573 576 574 module_init(kvmppc_e500_init); 577 575 module_exit(kvmppc_e500_exit); 576 + MODULE_ALIAS_MISCDEV(KVM_MINOR); 577 + MODULE_ALIAS("devname:kvm");
+5 -3
arch/powerpc/kvm/e500.h
··· 31 31 #define E500_TLB_NUM 2 32 32 33 33 /* entry is mapped somewhere in host TLB */ 34 - #define E500_TLB_VALID (1 << 0) 34 + #define E500_TLB_VALID (1 << 31) 35 35 /* TLB1 entry is mapped by host TLB1, tracked by bitmaps */ 36 - #define E500_TLB_BITMAP (1 << 1) 36 + #define E500_TLB_BITMAP (1 << 30) 37 37 /* TLB1 entry is mapped by host TLB0 */ 38 - #define E500_TLB_TLB0 (1 << 2) 38 + #define E500_TLB_TLB0 (1 << 29) 39 + /* bits [6-5] MAS2_X1 and MAS2_X0 and [4-0] bits for WIMGE */ 40 + #define E500_TLB_MAS2_ATTR (0x7f) 39 41 40 42 struct tlbe_ref { 41 43 pfn_t pfn; /* valid only for TLB0, except briefly */
+1 -1
arch/powerpc/kvm/e500_mmu.c
··· 127 127 } 128 128 129 129 static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu, 130 - unsigned int eaddr, int as) 130 + gva_t eaddr, int as) 131 131 { 132 132 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 133 133 unsigned int victim, tsized;
+34 -25
arch/powerpc/kvm/e500_mmu_host.c
··· 65 65 return mas3; 66 66 } 67 67 68 - static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode) 69 - { 70 - #ifdef CONFIG_SMP 71 - return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M; 72 - #else 73 - return mas2 & MAS2_ATTRIB_MASK; 74 - #endif 75 - } 76 - 77 68 /* 78 69 * writing shadow tlb entry to host TLB 79 70 */ ··· 222 231 ref->flags &= ~(E500_TLB_TLB0 | E500_TLB_VALID); 223 232 } 224 233 225 - /* Already invalidated in between */ 226 - if (!(ref->flags & E500_TLB_VALID)) 227 - return; 228 - 229 - /* Guest tlbe is backed by at most one host tlbe per shadow pid. */ 230 - kvmppc_e500_tlbil_one(vcpu_e500, gtlbe); 234 + /* 235 + * If TLB entry is still valid then it's a TLB0 entry, and thus 236 + * backed by at most one host tlbe per shadow pid 237 + */ 238 + if (ref->flags & E500_TLB_VALID) 239 + kvmppc_e500_tlbil_one(vcpu_e500, gtlbe); 231 240 232 241 /* Mark the TLB as not backed by the host anymore */ 233 - ref->flags &= ~E500_TLB_VALID; 242 + ref->flags = 0; 234 243 } 235 244 236 245 static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe) ··· 240 249 241 250 static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, 242 251 struct kvm_book3e_206_tlb_entry *gtlbe, 243 - pfn_t pfn) 252 + pfn_t pfn, unsigned int wimg) 244 253 { 245 254 ref->pfn = pfn; 246 - ref->flags |= E500_TLB_VALID; 255 + ref->flags = E500_TLB_VALID; 256 + 257 + /* Use guest supplied MAS2_G and MAS2_E */ 258 + ref->flags |= (gtlbe->mas2 & MAS2_ATTRIB_MASK) | wimg; 247 259 248 260 /* Mark the page accessed */ 249 261 kvm_set_pfn_accessed(pfn); ··· 310 316 311 317 /* Force IPROT=0 for all guest mappings. */ 312 318 stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID; 313 - stlbe->mas2 = (gvaddr & MAS2_EPN) | 314 - e500_shadow_mas2_attrib(gtlbe->mas2, pr); 319 + stlbe->mas2 = (gvaddr & MAS2_EPN) | (ref->flags & E500_TLB_MAS2_ATTR); 315 320 stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) | 316 321 e500_shadow_mas3_attrib(gtlbe->mas7_3, pr); 317 322 ··· 332 339 int ret = 0; 333 340 unsigned long mmu_seq; 334 341 struct kvm *kvm = vcpu_e500->vcpu.kvm; 342 + unsigned long tsize_pages = 0; 343 + pte_t *ptep; 344 + unsigned int wimg = 0; 345 + pgd_t *pgdir; 335 346 336 347 /* used to check for invalidations in progress */ 337 348 mmu_seq = kvm->mmu_notifier_seq; ··· 402 405 */ 403 406 404 407 for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) { 405 - unsigned long gfn_start, gfn_end, tsize_pages; 408 + unsigned long gfn_start, gfn_end; 406 409 tsize_pages = 1 << (tsize - 2); 407 410 408 411 gfn_start = gfn & ~(tsize_pages - 1); ··· 444 447 } 445 448 446 449 if (likely(!pfnmap)) { 447 - unsigned long tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT); 450 + tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT); 448 451 pfn = gfn_to_pfn_memslot(slot, gfn); 449 452 if (is_error_noslot_pfn(pfn)) { 450 - printk(KERN_ERR "Couldn't get real page for gfn %lx!\n", 451 - (long)gfn); 453 + if (printk_ratelimit()) 454 + pr_err("%s: real page not found for gfn %lx\n", 455 + __func__, (long)gfn); 452 456 return -EINVAL; 453 457 } 454 458 ··· 464 466 goto out; 465 467 } 466 468 467 - kvmppc_e500_ref_setup(ref, gtlbe, pfn); 469 + 470 + pgdir = vcpu_e500->vcpu.arch.pgdir; 471 + ptep = lookup_linux_ptep(pgdir, hva, &tsize_pages); 472 + if (pte_present(*ptep)) 473 + wimg = (*ptep >> PTE_WIMGE_SHIFT) & MAS2_WIMGE_MASK; 474 + else { 475 + if (printk_ratelimit()) 476 + pr_err("%s: pte not present: gfn %lx, pfn %lx\n", 477 + __func__, (long)gfn, pfn); 478 + return -EINVAL; 479 + } 480 + kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg); 468 481 469 482 kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, 470 483 ref, gvaddr, stlbe);
+4
arch/powerpc/kvm/e500mc.c
··· 16 16 #include <linux/slab.h> 17 17 #include <linux/err.h> 18 18 #include <linux/export.h> 19 + #include <linux/miscdevice.h> 20 + #include <linux/module.h> 19 21 20 22 #include <asm/reg.h> 21 23 #include <asm/cputable.h> ··· 393 391 394 392 module_init(kvmppc_e500mc_init); 395 393 module_exit(kvmppc_e500mc_exit); 394 + MODULE_ALIAS_MISCDEV(KVM_MINOR); 395 + MODULE_ALIAS("devname:kvm");
-1
arch/powerpc/kvm/emulate.c
··· 219 219 * lmw 220 220 * stmw 221 221 * 222 - * XXX is_bigendian should depend on MMU mapping or MSR[LE] 223 222 */ 224 223 /* XXX Should probably auto-generate instruction decoding for a particular core 225 224 * from opcode tables in the future. */
+1
arch/powerpc/kvm/mpic.c
··· 1635 1635 1636 1636 dev->kvm->arch.mpic = NULL; 1637 1637 kfree(opp); 1638 + kfree(dev); 1638 1639 } 1639 1640 1640 1641 static int mpic_set_default_irq_routing(struct openpic *opp)
+35 -23
arch/powerpc/kvm/powerpc.c
··· 68 68 */ 69 69 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) 70 70 { 71 - int r = 1; 71 + int r; 72 72 73 - WARN_ON_ONCE(!irqs_disabled()); 73 + WARN_ON(irqs_disabled()); 74 + hard_irq_disable(); 75 + 74 76 while (true) { 75 77 if (need_resched()) { 76 78 local_irq_enable(); 77 79 cond_resched(); 78 - local_irq_disable(); 80 + hard_irq_disable(); 79 81 continue; 80 82 } 81 83 ··· 103 101 local_irq_enable(); 104 102 trace_kvm_check_requests(vcpu); 105 103 r = kvmppc_core_check_requests(vcpu); 106 - local_irq_disable(); 104 + hard_irq_disable(); 107 105 if (r > 0) 108 106 continue; 109 107 break; ··· 115 113 continue; 116 114 } 117 115 118 - #ifdef CONFIG_PPC64 119 - /* lazy EE magic */ 120 - hard_irq_disable(); 121 - if (lazy_irq_pending()) { 122 - /* Got an interrupt in between, try again */ 123 - local_irq_enable(); 124 - local_irq_disable(); 125 - kvm_guest_exit(); 126 - continue; 127 - } 128 - #endif 129 - 130 116 kvm_guest_enter(); 131 - break; 117 + return 1; 132 118 } 133 119 120 + /* return to host */ 121 + local_irq_enable(); 134 122 return r; 135 123 } 136 124 EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter); ··· 648 656 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); 649 657 break; 650 658 case KVM_MMIO_REG_FPR: 651 - vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 659 + VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; 652 660 break; 653 661 #ifdef CONFIG_PPC_BOOK3S 654 662 case KVM_MMIO_REG_QPR: 655 663 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 656 664 break; 657 665 case KVM_MMIO_REG_FQPR: 658 - vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 666 + VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; 659 667 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 660 668 break; 661 669 #endif ··· 665 673 } 666 674 667 675 int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 668 - unsigned int rt, unsigned int bytes, int is_bigendian) 676 + unsigned int rt, unsigned int bytes, 677 + int is_default_endian) 669 678 { 670 679 int idx, ret; 680 + int is_bigendian; 681 + 682 + if (kvmppc_need_byteswap(vcpu)) { 683 + /* Default endianness is "little endian". */ 684 + is_bigendian = !is_default_endian; 685 + } else { 686 + /* Default endianness is "big endian". */ 687 + is_bigendian = is_default_endian; 688 + } 671 689 672 690 if (bytes > sizeof(run->mmio.data)) { 673 691 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, ··· 713 711 714 712 /* Same as above, but sign extends */ 715 713 int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, 716 - unsigned int rt, unsigned int bytes, int is_bigendian) 714 + unsigned int rt, unsigned int bytes, 715 + int is_default_endian) 717 716 { 718 717 int r; 719 718 720 719 vcpu->arch.mmio_sign_extend = 1; 721 - r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian); 720 + r = kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian); 722 721 723 722 return r; 724 723 } 725 724 726 725 int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 727 - u64 val, unsigned int bytes, int is_bigendian) 726 + u64 val, unsigned int bytes, int is_default_endian) 728 727 { 729 728 void *data = run->mmio.data; 730 729 int idx, ret; 730 + int is_bigendian; 731 + 732 + if (kvmppc_need_byteswap(vcpu)) { 733 + /* Default endianness is "little endian". */ 734 + is_bigendian = !is_default_endian; 735 + } else { 736 + /* Default endianness is "big endian". */ 737 + is_bigendian = is_default_endian; 738 + } 731 739 732 740 if (bytes > sizeof(run->mmio.data)) { 733 741 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
+14 -1
arch/s390/include/asm/kvm_host.h
··· 106 106 __u64 gbea; /* 0x0180 */ 107 107 __u8 reserved188[24]; /* 0x0188 */ 108 108 __u32 fac; /* 0x01a0 */ 109 - __u8 reserved1a4[92]; /* 0x01a4 */ 109 + __u8 reserved1a4[68]; /* 0x01a4 */ 110 + __u64 itdba; /* 0x01e8 */ 111 + __u8 reserved1f0[16]; /* 0x01f0 */ 110 112 } __attribute__((packed)); 113 + 114 + struct kvm_s390_itdb { 115 + __u8 data[256]; 116 + } __packed; 117 + 118 + struct sie_page { 119 + struct kvm_s390_sie_block sie_block; 120 + __u8 reserved200[1024]; /* 0x0200 */ 121 + struct kvm_s390_itdb itdb; /* 0x0600 */ 122 + __u8 reserved700[2304]; /* 0x0700 */ 123 + } __packed; 111 124 112 125 struct kvm_vcpu_stat { 113 126 u32 exit_userspace;
+11
arch/s390/kvm/intercept.c
··· 112 112 static int handle_prog(struct kvm_vcpu *vcpu) 113 113 { 114 114 vcpu->stat.exit_program_interruption++; 115 + 116 + /* Restore ITDB to Program-Interruption TDB in guest memory */ 117 + if (IS_TE_ENABLED(vcpu) && 118 + !(current->thread.per_flags & PER_FLAG_NO_TE) && 119 + IS_ITDB_VALID(vcpu)) { 120 + copy_to_guest(vcpu, TDB_ADDR, vcpu->arch.sie_block->itdba, 121 + sizeof(struct kvm_s390_itdb)); 122 + memset((void *) vcpu->arch.sie_block->itdba, 0, 123 + sizeof(struct kvm_s390_itdb)); 124 + } 125 + 115 126 trace_kvm_s390_intercept_prog(vcpu, vcpu->arch.sie_block->iprcc); 116 127 return kvm_s390_inject_program_int(vcpu, vcpu->arch.sie_block->iprcc); 117 128 }
+11 -6
arch/s390/kvm/kvm-s390.c
··· 395 395 CPUSTAT_STOPPED | 396 396 CPUSTAT_GED); 397 397 vcpu->arch.sie_block->ecb = 6; 398 + if (test_vfacility(50) && test_vfacility(73)) 399 + vcpu->arch.sie_block->ecb |= 0x10; 400 + 398 401 vcpu->arch.sie_block->ecb2 = 8; 399 402 vcpu->arch.sie_block->eca = 0xC1002001U; 400 403 vcpu->arch.sie_block->fac = (int) (long) vfacilities; ··· 414 411 unsigned int id) 415 412 { 416 413 struct kvm_vcpu *vcpu; 414 + struct sie_page *sie_page; 417 415 int rc = -EINVAL; 418 416 419 417 if (id >= KVM_MAX_VCPUS) ··· 426 422 if (!vcpu) 427 423 goto out; 428 424 429 - vcpu->arch.sie_block = (struct kvm_s390_sie_block *) 430 - get_zeroed_page(GFP_KERNEL); 431 - 432 - if (!vcpu->arch.sie_block) 425 + sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL); 426 + if (!sie_page) 433 427 goto out_free_cpu; 428 + 429 + vcpu->arch.sie_block = &sie_page->sie_block; 430 + vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb; 434 431 435 432 vcpu->arch.sie_block->icpua = id; 436 433 if (!kvm_is_ucontrol(kvm)) { ··· 1187 1182 return -ENOMEM; 1188 1183 } 1189 1184 memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16); 1190 - vfacilities[0] &= 0xff82fff3f47c0000UL; 1191 - vfacilities[1] &= 0x001c000000000000UL; 1185 + vfacilities[0] &= 0xff82fff3f4fc2000UL; 1186 + vfacilities[1] &= 0x005c000000000000UL; 1192 1187 return 0; 1193 1188 } 1194 1189
+6
arch/s390/kvm/kvm-s390.h
··· 26 26 27 27 int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu); 28 28 29 + /* Transactional Memory Execution related macros */ 30 + #define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & 0x10)) 31 + #define TDB_ADDR 0x1800UL 32 + #define TDB_FORMAT1 1 33 + #define IS_ITDB_VALID(vcpu) ((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1)) 34 + 29 35 #define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\ 30 36 do { \ 31 37 debug_sprintf_event(d_kvm->arch.dbf, d_loglevel, d_string "\n", \
+12 -21
arch/x86/include/asm/kvm_para.h
··· 85 85 return ret; 86 86 } 87 87 88 - static inline uint32_t kvm_cpuid_base(void) 89 - { 90 - if (boot_cpu_data.cpuid_level < 0) 91 - return 0; /* So we don't blow up on old processors */ 92 - 93 - if (cpu_has_hypervisor) 94 - return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0); 95 - 96 - return 0; 97 - } 98 - 99 - static inline bool kvm_para_available(void) 100 - { 101 - return kvm_cpuid_base() != 0; 102 - } 103 - 104 - static inline unsigned int kvm_arch_para_features(void) 105 - { 106 - return cpuid_eax(KVM_CPUID_FEATURES); 107 - } 108 - 109 88 #ifdef CONFIG_KVM_GUEST 89 + bool kvm_para_available(void); 90 + unsigned int kvm_arch_para_features(void); 110 91 void __init kvm_guest_init(void); 111 92 void kvm_async_pf_task_wait(u32 token); 112 93 void kvm_async_pf_task_wake(u32 token); ··· 106 125 #define kvm_guest_init() do {} while (0) 107 126 #define kvm_async_pf_task_wait(T) do {} while(0) 108 127 #define kvm_async_pf_task_wake(T) do {} while(0) 128 + 129 + static inline bool kvm_para_available(void) 130 + { 131 + return 0; 132 + } 133 + 134 + static inline unsigned int kvm_arch_para_features(void) 135 + { 136 + return 0; 137 + } 109 138 110 139 static inline u32 kvm_read_and_reset_pf_reason(void) 111 140 {
+32
arch/x86/kernel/kvm.c
··· 500 500 #endif 501 501 } 502 502 503 + static noinline uint32_t __kvm_cpuid_base(void) 504 + { 505 + if (boot_cpu_data.cpuid_level < 0) 506 + return 0; /* So we don't blow up on old processors */ 507 + 508 + if (cpu_has_hypervisor) 509 + return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0); 510 + 511 + return 0; 512 + } 513 + 514 + static inline uint32_t kvm_cpuid_base(void) 515 + { 516 + static int kvm_cpuid_base = -1; 517 + 518 + if (kvm_cpuid_base == -1) 519 + kvm_cpuid_base = __kvm_cpuid_base(); 520 + 521 + return kvm_cpuid_base; 522 + } 523 + 524 + bool kvm_para_available(void) 525 + { 526 + return kvm_cpuid_base() != 0; 527 + } 528 + EXPORT_SYMBOL_GPL(kvm_para_available); 529 + 530 + unsigned int kvm_arch_para_features(void) 531 + { 532 + return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES); 533 + } 534 + 503 535 static uint32_t __init kvm_detect(void) 504 536 { 505 537 return kvm_cpuid_base();
+8
arch/x86/kvm/cpuid.h
··· 72 72 return best && (best->ecx & bit(X86_FEATURE_PCID)); 73 73 } 74 74 75 + static inline bool guest_cpuid_has_x2apic(struct kvm_vcpu *vcpu) 76 + { 77 + struct kvm_cpuid_entry2 *best; 78 + 79 + best = kvm_find_cpuid_entry(vcpu, 1, 0); 80 + return best && (best->ecx & bit(X86_FEATURE_X2APIC)); 81 + } 82 + 75 83 #endif
+1 -1
arch/x86/kvm/lapic.h
··· 65 65 struct kvm_lapic_irq *irq, int *r, unsigned long *dest_map); 66 66 67 67 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu); 68 - void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data); 68 + int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info); 69 69 void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu, 70 70 struct kvm_lapic_state *s); 71 71 int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu);
+5 -4
arch/x86/kvm/vmx.c
··· 4392 4392 static void vmx_vcpu_reset(struct kvm_vcpu *vcpu) 4393 4393 { 4394 4394 struct vcpu_vmx *vmx = to_vmx(vcpu); 4395 - u64 msr; 4395 + struct msr_data apic_base_msr; 4396 4396 4397 4397 vmx->rmode.vm86_active = 0; 4398 4398 ··· 4400 4400 4401 4401 vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); 4402 4402 kvm_set_cr8(&vmx->vcpu, 0); 4403 - msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE; 4403 + apic_base_msr.data = 0xfee00000 | MSR_IA32_APICBASE_ENABLE; 4404 4404 if (kvm_vcpu_is_bsp(&vmx->vcpu)) 4405 - msr |= MSR_IA32_APICBASE_BSP; 4406 - kvm_set_apic_base(&vmx->vcpu, msr); 4405 + apic_base_msr.data |= MSR_IA32_APICBASE_BSP; 4406 + apic_base_msr.host_initiated = true; 4407 + kvm_set_apic_base(&vmx->vcpu, &apic_base_msr); 4407 4408 4408 4409 vmx_segment_cache_clear(vmx); 4409 4410
+30 -9
arch/x86/kvm/x86.c
··· 257 257 } 258 258 EXPORT_SYMBOL_GPL(kvm_get_apic_base); 259 259 260 - void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data) 260 + int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 261 261 { 262 - /* TODO: reserve bits check */ 263 - kvm_lapic_set_base(vcpu, data); 262 + u64 old_state = vcpu->arch.apic_base & 263 + (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE); 264 + u64 new_state = msr_info->data & 265 + (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE); 266 + u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) | 267 + 0x2ff | (guest_cpuid_has_x2apic(vcpu) ? 0 : X2APIC_ENABLE); 268 + 269 + if (!msr_info->host_initiated && 270 + ((msr_info->data & reserved_bits) != 0 || 271 + new_state == X2APIC_ENABLE || 272 + (new_state == MSR_IA32_APICBASE_ENABLE && 273 + old_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) || 274 + (new_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE) && 275 + old_state == 0))) 276 + return 1; 277 + 278 + kvm_lapic_set_base(vcpu, msr_info->data); 279 + return 0; 264 280 } 265 281 EXPORT_SYMBOL_GPL(kvm_set_apic_base); 266 282 ··· 1856 1840 if (__copy_to_user((void __user *)addr, instructions, 4)) 1857 1841 return 1; 1858 1842 kvm->arch.hv_hypercall = data; 1843 + mark_page_dirty(kvm, gfn); 1859 1844 break; 1860 1845 } 1861 1846 case HV_X64_MSR_REFERENCE_TSC: { ··· 1885 1868 { 1886 1869 switch (msr) { 1887 1870 case HV_X64_MSR_APIC_ASSIST_PAGE: { 1871 + u64 gfn; 1888 1872 unsigned long addr; 1889 1873 1890 1874 if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) { 1891 1875 vcpu->arch.hv_vapic = data; 1892 1876 break; 1893 1877 } 1894 - addr = gfn_to_hva(vcpu->kvm, data >> 1895 - HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT); 1878 + gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT; 1879 + addr = gfn_to_hva(vcpu->kvm, gfn); 1896 1880 if (kvm_is_error_hva(addr)) 1897 1881 return 1; 1898 1882 if (__clear_user((void __user *)addr, PAGE_SIZE)) 1899 1883 return 1; 1900 1884 vcpu->arch.hv_vapic = data; 1885 + mark_page_dirty(vcpu->kvm, gfn); 1901 1886 break; 1902 1887 } 1903 1888 case HV_X64_MSR_EOI: ··· 2025 2006 case 0x200 ... 0x2ff: 2026 2007 return set_msr_mtrr(vcpu, msr, data); 2027 2008 case MSR_IA32_APICBASE: 2028 - kvm_set_apic_base(vcpu, data); 2029 - break; 2009 + return kvm_set_apic_base(vcpu, msr_info); 2030 2010 case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: 2031 2011 return kvm_x2apic_msr_write(vcpu, msr, data); 2032 2012 case MSR_IA32_TSCDEADLINE: ··· 2616 2598 case KVM_CAP_GET_TSC_KHZ: 2617 2599 case KVM_CAP_KVMCLOCK_CTRL: 2618 2600 case KVM_CAP_READONLY_MEM: 2601 + case KVM_CAP_HYPERV_TIME: 2619 2602 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT 2620 2603 case KVM_CAP_ASSIGN_DEV_IRQ: 2621 2604 case KVM_CAP_PCI_2_3: 2622 - case KVM_CAP_HYPERV_TIME: 2623 2605 #endif 2624 2606 r = 1; 2625 2607 break; ··· 6427 6409 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 6428 6410 struct kvm_sregs *sregs) 6429 6411 { 6412 + struct msr_data apic_base_msr; 6430 6413 int mmu_reset_needed = 0; 6431 6414 int pending_vec, max_bits, idx; 6432 6415 struct desc_ptr dt; ··· 6451 6432 6452 6433 mmu_reset_needed |= vcpu->arch.efer != sregs->efer; 6453 6434 kvm_x86_ops->set_efer(vcpu, sregs->efer); 6454 - kvm_set_apic_base(vcpu, sregs->apic_base); 6435 + apic_base_msr.data = sregs->apic_base; 6436 + apic_base_msr.host_initiated = true; 6437 + kvm_set_apic_base(vcpu, &apic_base_msr); 6455 6438 6456 6439 mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0; 6457 6440 kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
+9 -2
drivers/s390/kvm/virtio_ccw.c
··· 642 642 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) { 643 643 /* OK */ 644 644 } 645 - if (irb_is_error(irb)) 646 - vcdev->err = -EIO; /* XXX - use real error */ 645 + if (irb_is_error(irb)) { 646 + /* Command reject? */ 647 + if ((scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) && 648 + (irb->ecw[0] & SNS0_CMD_REJECT)) 649 + vcdev->err = -EOPNOTSUPP; 650 + else 651 + /* Map everything else to -EIO. */ 652 + vcdev->err = -EIO; 653 + } 647 654 if (vcdev->curr_io & activity) { 648 655 switch (activity) { 649 656 case VIRTIO_CCW_DOING_READ_FEAT: