Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: PPC: booke: category E.HV (GS-mode) support

Chips such as e500mc that implement category E.HV in Power ISA 2.06
provide hardware virtualization features, including a new MSR mode for
guest state. The guest OS can perform many operations without trapping
into the hypervisor, including transitions to and from guest userspace.

Since we can use SRR1[GS] to reliably tell whether an exception came from
guest state, instead of messing around with IVPR, we use DO_KVM similarly
to book3s.

Current issues include:
- Machine checks from guest state are not routed to the host handler.
- The guest can cause a host oops by executing an emulated instruction
in a page that lacks read permission. Existing e500/4xx support has
the same problem.

Includes work by Ashish Kalra <Ashish.Kalra@freescale.com>,
Varun Sethi <Varun.Sethi@freescale.com>, and
Liu Yu <yu.liu@freescale.com>.

Signed-off-by: Scott Wood <scottwood@freescale.com>
[agraf: remove pt_regs usage]
Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>

authored by

Scott Wood and committed by
Avi Kivity
d30f6e48 cfac5784

+1058 -67
+1
arch/powerpc/include/asm/dbell.h
··· 19 19 20 20 #define PPC_DBELL_MSG_BRDCAST (0x04000000) 21 21 #define PPC_DBELL_TYPE(x) (((x) & 0xf) << (63-36)) 22 + #define PPC_DBELL_LPID(x) ((x) << (63 - 49)) 22 23 enum ppc_dbell { 23 24 PPC_DBELL = 0, /* doorbell */ 24 25 PPC_DBELL_CRIT = 1, /* critical doorbell */
+8
arch/powerpc/include/asm/kvm_asm.h
··· 48 48 #define BOOKE_INTERRUPT_SPE_FP_DATA 33 49 49 #define BOOKE_INTERRUPT_SPE_FP_ROUND 34 50 50 #define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35 51 + #define BOOKE_INTERRUPT_DOORBELL 36 52 + #define BOOKE_INTERRUPT_DOORBELL_CRITICAL 37 53 + 54 + /* booke_hv */ 55 + #define BOOKE_INTERRUPT_GUEST_DBELL 38 56 + #define BOOKE_INTERRUPT_GUEST_DBELL_CRIT 39 57 + #define BOOKE_INTERRUPT_HV_SYSCALL 40 58 + #define BOOKE_INTERRUPT_HV_PRIV 41 51 59 52 60 /* book3s */ 53 61
+49
arch/powerpc/include/asm/kvm_booke_hv_asm.h
··· 1 + /* 2 + * Copyright 2010-2011 Freescale Semiconductor, Inc. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License, version 2, as 6 + * published by the Free Software Foundation. 7 + */ 8 + 9 + #ifndef ASM_KVM_BOOKE_HV_ASM_H 10 + #define ASM_KVM_BOOKE_HV_ASM_H 11 + 12 + #ifdef __ASSEMBLY__ 13 + 14 + /* 15 + * All exceptions from guest state must go through KVM 16 + * (except for those which are delivered directly to the guest) -- 17 + * there are no exceptions for which we fall through directly to 18 + * the normal host handler. 19 + * 20 + * Expected inputs (normal exceptions): 21 + * SCRATCH0 = saved r10 22 + * r10 = thread struct 23 + * r11 = appropriate SRR1 variant (currently used as scratch) 24 + * r13 = saved CR 25 + * *(r10 + THREAD_NORMSAVE(0)) = saved r11 26 + * *(r10 + THREAD_NORMSAVE(2)) = saved r13 27 + * 28 + * Expected inputs (crit/mcheck/debug exceptions): 29 + * appropriate SCRATCH = saved r8 30 + * r8 = exception level stack frame 31 + * r9 = *(r8 + _CCR) = saved CR 32 + * r11 = appropriate SRR1 variant (currently used as scratch) 33 + * *(r8 + GPR9) = saved r9 34 + * *(r8 + GPR10) = saved r10 (r10 not yet clobbered) 35 + * *(r8 + GPR11) = saved r11 36 + */ 37 + .macro DO_KVM intno srr1 38 + #ifdef CONFIG_KVM_BOOKE_HV 39 + BEGIN_FTR_SECTION 40 + mtocrf 0x80, r11 /* check MSR[GS] without clobbering reg */ 41 + bf 3, kvmppc_resume_\intno\()_\srr1 42 + b kvmppc_handler_\intno\()_\srr1 43 + kvmppc_resume_\intno\()_\srr1: 44 + END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) 45 + #endif 46 + .endm 47 + 48 + #endif /*__ASSEMBLY__ */ 49 + #endif /* ASM_KVM_BOOKE_HV_ASM_H */
+18 -1
arch/powerpc/include/asm/kvm_host.h
··· 106 106 u32 dec_exits; 107 107 u32 ext_intr_exits; 108 108 u32 halt_wakeup; 109 + u32 dbell_exits; 110 + u32 gdbell_exits; 109 111 #ifdef CONFIG_PPC_BOOK3S 110 112 u32 pf_storage; 111 113 u32 pf_instruc; ··· 142 140 EMULATED_TLBSX_EXITS, 143 141 EMULATED_TLBWE_EXITS, 144 142 EMULATED_RFI_EXITS, 143 + EMULATED_RFCI_EXITS, 145 144 DEC_EXITS, 146 145 EXT_INTR_EXITS, 147 146 HALT_WAKEUP, ··· 150 147 FP_UNAVAIL, 151 148 DEBUG_EXITS, 152 149 TIMEINGUEST, 150 + DBELL_EXITS, 151 + GDBELL_EXITS, 153 152 __NUMBER_OF_KVM_EXIT_TYPES 154 153 }; 155 154 ··· 222 217 }; 223 218 224 219 struct kvm_arch { 220 + unsigned int lpid; 225 221 #ifdef CONFIG_KVM_BOOK3S_64_HV 226 222 unsigned long hpt_virt; 227 223 struct revmap_entry *revmap; 228 - unsigned int lpid; 229 224 unsigned int host_lpid; 230 225 unsigned long host_lpcr; 231 226 unsigned long sdr1; ··· 350 345 u64 vsr[64]; 351 346 #endif 352 347 348 + #ifdef CONFIG_KVM_BOOKE_HV 349 + u32 host_mas4; 350 + u32 host_mas6; 351 + u32 shadow_epcr; 352 + u32 epcr; 353 + u32 shadow_msrp; 354 + u32 eplc; 355 + u32 epsc; 356 + u32 oldpir; 357 + #endif 358 + 353 359 #ifdef CONFIG_PPC_BOOK3S 354 360 /* For Gekko paired singles */ 355 361 u32 qpr[32]; ··· 444 428 ulong queued_esr; 445 429 u32 tlbcfg[4]; 446 430 u32 mmucfg; 431 + u32 epr; 447 432 #endif 448 433 gpa_t paddr_accessed; 449 434
+3
arch/powerpc/include/asm/kvm_ppc.h
··· 139 139 extern void kvmppc_core_commit_memory_region(struct kvm *kvm, 140 140 struct kvm_userspace_memory_region *mem); 141 141 142 + extern int kvmppc_bookehv_init(void); 143 + extern void kvmppc_bookehv_exit(void); 144 + 142 145 /* 143 146 * Cuts out inst bits with ordering according to spec. 144 147 * That means the leftmost bit is zero. All given bits are included.
+6
arch/powerpc/include/asm/mmu-book3e.h
··· 104 104 #define MAS4_TSIZED_MASK 0x00000f80 /* Default TSIZE */ 105 105 #define MAS4_TSIZED_SHIFT 7 106 106 107 + #define MAS5_SGS 0x80000000 108 + 107 109 #define MAS6_SPID0 0x3FFF0000 108 110 #define MAS6_SPID1 0x00007FFE 109 111 #define MAS6_ISIZE(x) MAS1_TSIZE(x) ··· 119 117 #define MAS6_ISIZE_SHIFT 7 120 118 121 119 #define MAS7_RPN 0xFFFFFFFF 120 + 121 + #define MAS8_TGS 0x80000000 /* Guest space */ 122 + #define MAS8_VF 0x40000000 /* Virtualization Fault */ 123 + #define MAS8_TLPID 0x000000ff 122 124 123 125 /* Bit definitions for MMUCFG */ 124 126 #define MMUCFG_MAVN 0x00000003 /* MMU Architecture Version Number */
+3
arch/powerpc/include/asm/processor.h
··· 243 243 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER 244 244 void* kvm_shadow_vcpu; /* KVM internal data */ 245 245 #endif /* CONFIG_KVM_BOOK3S_32_HANDLER */ 246 + #if defined(CONFIG_KVM) && defined(CONFIG_BOOKE) 247 + struct kvm_vcpu *kvm_vcpu; 248 + #endif 246 249 #ifdef CONFIG_PPC64 247 250 unsigned long dscr; 248 251 int dscr_inherit;
+2
arch/powerpc/include/asm/reg.h
··· 257 257 #define LPCR_LPES_SH 2 258 258 #define LPCR_RMI 0x00000002 /* real mode is cache inhibit */ 259 259 #define LPCR_HDICE 0x00000001 /* Hyp Decr enable (HV,PR,EE) */ 260 + #ifndef SPRN_LPID 260 261 #define SPRN_LPID 0x13F /* Logical Partition Identifier */ 262 + #endif 261 263 #define LPID_RSVD 0x3ff /* Reserved LPID for partn switching */ 262 264 #define SPRN_HMER 0x150 /* Hardware m? error recovery */ 263 265 #define SPRN_HMEER 0x151 /* Hardware m? enable error recovery */
+34
arch/powerpc/include/asm/reg_booke.h
··· 61 61 #define SPRN_SPRG7W 0x117 /* Special Purpose Register General 7 Write */ 62 62 #define SPRN_EPCR 0x133 /* Embedded Processor Control Register */ 63 63 #define SPRN_DBCR2 0x136 /* Debug Control Register 2 */ 64 + #define SPRN_MSRP 0x137 /* MSR Protect Register */ 64 65 #define SPRN_IAC3 0x13A /* Instruction Address Compare 3 */ 65 66 #define SPRN_IAC4 0x13B /* Instruction Address Compare 4 */ 66 67 #define SPRN_DVC1 0x13E /* Data Value Compare Register 1 */ 67 68 #define SPRN_DVC2 0x13F /* Data Value Compare Register 2 */ 69 + #define SPRN_LPID 0x152 /* Logical Partition ID */ 68 70 #define SPRN_MAS8 0x155 /* MMU Assist Register 8 */ 69 71 #define SPRN_TLB0PS 0x158 /* TLB 0 Page Size Register */ 70 72 #define SPRN_TLB1PS 0x159 /* TLB 1 Page Size Register */ 71 73 #define SPRN_MAS5_MAS6 0x15c /* MMU Assist Register 5 || 6 */ 72 74 #define SPRN_MAS8_MAS1 0x15d /* MMU Assist Register 8 || 1 */ 73 75 #define SPRN_EPTCFG 0x15e /* Embedded Page Table Config */ 76 + #define SPRN_GSPRG0 0x170 /* Guest SPRG0 */ 77 + #define SPRN_GSPRG1 0x171 /* Guest SPRG1 */ 78 + #define SPRN_GSPRG2 0x172 /* Guest SPRG2 */ 79 + #define SPRN_GSPRG3 0x173 /* Guest SPRG3 */ 74 80 #define SPRN_MAS7_MAS3 0x174 /* MMU Assist Register 7 || 3 */ 75 81 #define SPRN_MAS0_MAS1 0x175 /* MMU Assist Register 0 || 1 */ 82 + #define SPRN_GSRR0 0x17A /* Guest SRR0 */ 83 + #define SPRN_GSRR1 0x17B /* Guest SRR1 */ 84 + #define SPRN_GEPR 0x17C /* Guest EPR */ 85 + #define SPRN_GDEAR 0x17D /* Guest DEAR */ 86 + #define SPRN_GPIR 0x17E /* Guest PIR */ 87 + #define SPRN_GESR 0x17F /* Guest Exception Syndrome Register */ 76 88 #define SPRN_IVOR0 0x190 /* Interrupt Vector Offset Register 0 */ 77 89 #define SPRN_IVOR1 0x191 /* Interrupt Vector Offset Register 1 */ 78 90 #define SPRN_IVOR2 0x192 /* Interrupt Vector Offset Register 2 */ ··· 105 93 #define SPRN_IVOR39 0x1B1 /* Interrupt Vector Offset Register 39 */ 106 94 #define SPRN_IVOR40 0x1B2 /* Interrupt Vector Offset Register 40 */ 107 95 #define SPRN_IVOR41 0x1B3 /* Interrupt Vector Offset Register 41 */ 96 + #define SPRN_GIVOR2 0x1B8 /* Guest IVOR2 */ 97 + #define SPRN_GIVOR3 0x1B9 /* Guest IVOR3 */ 98 + #define SPRN_GIVOR4 0x1BA /* Guest IVOR4 */ 99 + #define SPRN_GIVOR8 0x1BB /* Guest IVOR8 */ 100 + #define SPRN_GIVOR13 0x1BC /* Guest IVOR13 */ 101 + #define SPRN_GIVOR14 0x1BD /* Guest IVOR14 */ 102 + #define SPRN_GIVPR 0x1BF /* Guest IVPR */ 108 103 #define SPRN_SPEFSCR 0x200 /* SPE & Embedded FP Status & Control */ 109 104 #define SPRN_BBEAR 0x201 /* Branch Buffer Entry Address Register */ 110 105 #define SPRN_BBTAR 0x202 /* Branch Buffer Target Address Register */ ··· 264 245 #define MCSR_LDG 0x00002000UL /* Guarded Load */ 265 246 #define MCSR_TLBSYNC 0x00000002UL /* Multiple tlbsyncs detected */ 266 247 #define MCSR_BSL2_ERR 0x00000001UL /* Backside L2 cache error */ 248 + 249 + #define MSRP_UCLEP 0x04000000 /* Protect MSR[UCLE] */ 250 + #define MSRP_DEP 0x00000200 /* Protect MSR[DE] */ 251 + #define MSRP_PMMP 0x00000004 /* Protect MSR[PMM] */ 267 252 #endif 268 253 269 254 #ifdef CONFIG_E200 ··· 622 599 #define SPRN_EPCR_DMIUH 0x00400000 /* Disable MAS Interrupt updates 623 600 * for hypervisor */ 624 601 602 + /* Bit definitions for EPLC/EPSC */ 603 + #define EPC_EPR 0x80000000 /* 1 = user, 0 = kernel */ 604 + #define EPC_EPR_SHIFT 31 605 + #define EPC_EAS 0x40000000 /* Address Space */ 606 + #define EPC_EAS_SHIFT 30 607 + #define EPC_EGS 0x20000000 /* 1 = guest, 0 = hypervisor */ 608 + #define EPC_EGS_SHIFT 29 609 + #define EPC_ELPID 0x00ff0000 610 + #define EPC_ELPID_SHIFT 16 611 + #define EPC_EPID 0x00003fff 612 + #define EPC_EPID_SHIFT 0 625 613 626 614 /* 627 615 * The IBM-403 is an even more odd special case, as it is much
+13 -2
arch/powerpc/kernel/asm-offsets.c
··· 116 116 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER 117 117 DEFINE(THREAD_KVM_SVCPU, offsetof(struct thread_struct, kvm_shadow_vcpu)); 118 118 #endif 119 + #ifdef CONFIG_KVM_BOOKE_HV 120 + DEFINE(THREAD_KVM_VCPU, offsetof(struct thread_struct, kvm_vcpu)); 121 + #endif 119 122 120 123 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); 121 124 DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags)); ··· 390 387 #ifdef CONFIG_KVM 391 388 DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); 392 389 DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); 390 + DEFINE(VCPU_GUEST_PID, offsetof(struct kvm_vcpu, arch.pid)); 393 391 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); 394 392 DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave)); 395 393 DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fpr)); ··· 433 429 DEFINE(VCPU_SHARED_MAS4, offsetof(struct kvm_vcpu_arch_shared, mas4)); 434 430 DEFINE(VCPU_SHARED_MAS6, offsetof(struct kvm_vcpu_arch_shared, mas6)); 435 431 432 + DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); 433 + DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid)); 434 + 436 435 /* book3s */ 437 436 #ifdef CONFIG_KVM_BOOK3S_64_HV 438 - DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid)); 439 437 DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1)); 440 438 DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid)); 441 439 DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr)); ··· 452 446 DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar)); 453 447 #endif 454 448 #ifdef CONFIG_PPC_BOOK3S 455 - DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); 456 449 DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id)); 457 450 DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr)); 458 451 DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr)); ··· 600 595 DEFINE(VCPU_ACC, offsetof(struct kvm_vcpu, arch.acc)); 601 596 DEFINE(VCPU_SPEFSCR, offsetof(struct kvm_vcpu, arch.spefscr)); 602 597 DEFINE(VCPU_HOST_SPEFSCR, offsetof(struct kvm_vcpu, arch.host_spefscr)); 598 + #endif 599 + 600 + #ifdef CONFIG_KVM_BOOKE_HV 601 + DEFINE(VCPU_HOST_MAS4, offsetof(struct kvm_vcpu, arch.host_mas4)); 602 + DEFINE(VCPU_HOST_MAS6, offsetof(struct kvm_vcpu, arch.host_mas6)); 603 + DEFINE(VCPU_EPLC, offsetof(struct kvm_vcpu, arch.eplc)); 603 604 #endif 604 605 605 606 #ifdef CONFIG_KVM_EXIT_TIMING
+24 -4
arch/powerpc/kernel/head_booke.h
··· 3 3 4 4 #include <asm/ptrace.h> /* for STACK_FRAME_REGS_MARKER */ 5 5 #include <asm/kvm_asm.h> 6 + #include <asm/kvm_booke_hv_asm.h> 6 7 7 8 /* 8 9 * Macros used for common Book-e exception handling ··· 37 36 stw r11, THREAD_NORMSAVE(0)(r10); \ 38 37 stw r13, THREAD_NORMSAVE(2)(r10); \ 39 38 mfcr r13; /* save CR in r13 for now */\ 40 - mfspr r11,SPRN_SRR1; /* check whether user or kernel */\ 41 - andi. r11,r11,MSR_PR; \ 39 + mfspr r11, SPRN_SRR1; \ 40 + DO_KVM BOOKE_INTERRUPT_##intno SPRN_SRR1; \ 41 + andi. r11, r11, MSR_PR; /* check whether user or kernel */\ 42 42 mr r11, r1; \ 43 43 beq 1f; \ 44 44 /* if from user, start at top of this thread's kernel stack */ \ ··· 125 123 stw r10,GPR10(r8); \ 126 124 stw r11,GPR11(r8); \ 127 125 stw r9,_CCR(r8); /* save CR on stack */\ 128 - mfspr r10,exc_level_srr1; /* check whether user or kernel */\ 129 - andi. r10,r10,MSR_PR; \ 126 + mfspr r11,exc_level_srr1; /* check whether user or kernel */\ 127 + DO_KVM BOOKE_INTERRUPT_##intno exc_level_srr1; \ 128 + andi. r11,r11,MSR_PR; \ 130 129 mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\ 131 130 lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\ 132 131 addi r11,r11,EXC_LVL_FRAME_OVERHEAD; /* allocate stack frame */\ ··· 174 171 #define MCHECK_EXCEPTION_PROLOG \ 175 172 EXC_LEVEL_EXCEPTION_PROLOG(MC, MACHINE_CHECK, \ 176 173 SPRN_MCSRR0, SPRN_MCSRR1) 174 + 175 + /* 176 + * Guest Doorbell -- this is a bit odd in that uses GSRR0/1 despite 177 + * being delivered to the host. This exception can only happen 178 + * inside a KVM guest -- so we just handle up to the DO_KVM rather 179 + * than try to fit this into one of the existing prolog macros. 180 + */ 181 + #define GUEST_DOORBELL_EXCEPTION \ 182 + START_EXCEPTION(GuestDoorbell); \ 183 + mtspr SPRN_SPRG_WSCRATCH0, r10; /* save one register */ \ 184 + mfspr r10, SPRN_SPRG_THREAD; \ 185 + stw r11, THREAD_NORMSAVE(0)(r10); \ 186 + mfspr r11, SPRN_SRR1; \ 187 + stw r13, THREAD_NORMSAVE(2)(r10); \ 188 + mfcr r13; /* save CR in r13 for now */\ 189 + DO_KVM BOOKE_INTERRUPT_GUEST_DBELL SPRN_GSRR1; \ 190 + trap 177 191 178 192 /* 179 193 * Exception vectors.
+3
arch/powerpc/kvm/Kconfig
··· 90 90 depends on KVM_BOOK3S_64 && !KVM_BOOK3S_64_HV 91 91 select KVM_BOOK3S_PR 92 92 93 + config KVM_BOOKE_HV 94 + bool 95 + 93 96 config KVM_440 94 97 bool "KVM support for PowerPC 440 processors" 95 98 depends on EXPERIMENTAL && 44x
+253 -56
arch/powerpc/kvm/booke.c
··· 17 17 * 18 18 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 19 19 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> 20 + * Scott Wood <scottwood@freescale.com> 21 + * Varun Sethi <varun.sethi@freescale.com> 20 22 */ 21 23 22 24 #include <linux/errno.h> ··· 32 30 #include <asm/cputable.h> 33 31 #include <asm/uaccess.h> 34 32 #include <asm/kvm_ppc.h> 35 - #include "timing.h" 36 33 #include <asm/cacheflush.h> 34 + #include <asm/dbell.h> 35 + #include <asm/hw_irq.h> 36 + #include <asm/irq.h> 37 37 38 + #include "timing.h" 38 39 #include "booke.h" 39 40 40 41 unsigned long kvmppc_booke_handlers; ··· 60 55 { "dec", VCPU_STAT(dec_exits) }, 61 56 { "ext_intr", VCPU_STAT(ext_intr_exits) }, 62 57 { "halt_wakeup", VCPU_STAT(halt_wakeup) }, 58 + { "doorbell", VCPU_STAT(dbell_exits) }, 59 + { "guest doorbell", VCPU_STAT(gdbell_exits) }, 63 60 { NULL } 64 61 }; 65 62 ··· 127 120 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr) 128 121 { 129 122 u32 old_msr = vcpu->arch.shared->msr; 123 + 124 + #ifdef CONFIG_KVM_BOOKE_HV 125 + new_msr |= MSR_GS; 126 + #endif 130 127 131 128 vcpu->arch.shared->msr = new_msr; 132 129 ··· 206 195 clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions); 207 196 } 208 197 198 + static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) 199 + { 200 + #ifdef CONFIG_KVM_BOOKE_HV 201 + mtspr(SPRN_GSRR0, srr0); 202 + mtspr(SPRN_GSRR1, srr1); 203 + #else 204 + vcpu->arch.shared->srr0 = srr0; 205 + vcpu->arch.shared->srr1 = srr1; 206 + #endif 207 + } 208 + 209 + static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) 210 + { 211 + vcpu->arch.csrr0 = srr0; 212 + vcpu->arch.csrr1 = srr1; 213 + } 214 + 215 + static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) 216 + { 217 + if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) { 218 + vcpu->arch.dsrr0 = srr0; 219 + vcpu->arch.dsrr1 = srr1; 220 + } else { 221 + set_guest_csrr(vcpu, srr0, srr1); 222 + } 223 + } 224 + 225 + static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) 226 + { 227 + vcpu->arch.mcsrr0 = srr0; 228 + vcpu->arch.mcsrr1 = srr1; 229 + } 230 + 231 + static unsigned long get_guest_dear(struct kvm_vcpu *vcpu) 232 + { 233 + #ifdef CONFIG_KVM_BOOKE_HV 234 + return mfspr(SPRN_GDEAR); 235 + #else 236 + return vcpu->arch.shared->dar; 237 + #endif 238 + } 239 + 240 + static void set_guest_dear(struct kvm_vcpu *vcpu, unsigned long dear) 241 + { 242 + #ifdef CONFIG_KVM_BOOKE_HV 243 + mtspr(SPRN_GDEAR, dear); 244 + #else 245 + vcpu->arch.shared->dar = dear; 246 + #endif 247 + } 248 + 249 + static unsigned long get_guest_esr(struct kvm_vcpu *vcpu) 250 + { 251 + #ifdef CONFIG_KVM_BOOKE_HV 252 + return mfspr(SPRN_GESR); 253 + #else 254 + return vcpu->arch.shared->esr; 255 + #endif 256 + } 257 + 258 + static void set_guest_esr(struct kvm_vcpu *vcpu, u32 esr) 259 + { 260 + #ifdef CONFIG_KVM_BOOKE_HV 261 + mtspr(SPRN_GESR, esr); 262 + #else 263 + vcpu->arch.shared->esr = esr; 264 + #endif 265 + } 266 + 209 267 /* Deliver the interrupt of the corresponding priority, if possible. */ 210 268 static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, 211 269 unsigned int priority) ··· 286 206 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1); 287 207 bool crit; 288 208 bool keep_irq = false; 209 + enum int_class int_class; 289 210 290 211 /* Truncate crit indicators in 32 bit mode */ 291 212 if (!(vcpu->arch.shared->msr & MSR_SF)) { ··· 322 241 case BOOKE_IRQPRIO_AP_UNAVAIL: 323 242 case BOOKE_IRQPRIO_ALIGNMENT: 324 243 allowed = 1; 325 - msr_mask = MSR_CE|MSR_ME|MSR_DE; 244 + msr_mask = MSR_GS | MSR_CE | MSR_ME | MSR_DE; 245 + int_class = INT_CLASS_NONCRIT; 326 246 break; 327 247 case BOOKE_IRQPRIO_CRITICAL: 328 - case BOOKE_IRQPRIO_WATCHDOG: 329 248 allowed = vcpu->arch.shared->msr & MSR_CE; 330 - msr_mask = MSR_ME; 249 + allowed = allowed && !crit; 250 + msr_mask = MSR_GS | MSR_ME; 251 + int_class = INT_CLASS_CRIT; 331 252 break; 332 253 case BOOKE_IRQPRIO_MACHINE_CHECK: 333 254 allowed = vcpu->arch.shared->msr & MSR_ME; 334 - msr_mask = 0; 255 + allowed = allowed && !crit; 256 + msr_mask = MSR_GS; 257 + int_class = INT_CLASS_MC; 335 258 break; 336 259 case BOOKE_IRQPRIO_DECREMENTER: 337 260 case BOOKE_IRQPRIO_FIT: ··· 344 259 case BOOKE_IRQPRIO_EXTERNAL: 345 260 allowed = vcpu->arch.shared->msr & MSR_EE; 346 261 allowed = allowed && !crit; 347 - msr_mask = MSR_CE|MSR_ME|MSR_DE; 262 + msr_mask = MSR_GS | MSR_CE | MSR_ME | MSR_DE; 263 + int_class = INT_CLASS_NONCRIT; 348 264 break; 349 265 case BOOKE_IRQPRIO_DEBUG: 350 266 allowed = vcpu->arch.shared->msr & MSR_DE; 351 - msr_mask = MSR_ME; 267 + allowed = allowed && !crit; 268 + msr_mask = MSR_GS | MSR_ME; 269 + int_class = INT_CLASS_CRIT; 352 270 break; 353 271 } 354 272 355 273 if (allowed) { 356 - vcpu->arch.shared->srr0 = vcpu->arch.pc; 357 - vcpu->arch.shared->srr1 = vcpu->arch.shared->msr; 274 + switch (int_class) { 275 + case INT_CLASS_NONCRIT: 276 + set_guest_srr(vcpu, vcpu->arch.pc, 277 + vcpu->arch.shared->msr); 278 + break; 279 + case INT_CLASS_CRIT: 280 + set_guest_csrr(vcpu, vcpu->arch.pc, 281 + vcpu->arch.shared->msr); 282 + break; 283 + case INT_CLASS_DBG: 284 + set_guest_dsrr(vcpu, vcpu->arch.pc, 285 + vcpu->arch.shared->msr); 286 + break; 287 + case INT_CLASS_MC: 288 + set_guest_mcsrr(vcpu, vcpu->arch.pc, 289 + vcpu->arch.shared->msr); 290 + break; 291 + } 292 + 358 293 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; 359 294 if (update_esr == true) 360 - vcpu->arch.shared->esr = vcpu->arch.queued_esr; 295 + set_guest_esr(vcpu, vcpu->arch.queued_esr); 361 296 if (update_dear == true) 362 - vcpu->arch.shared->dar = vcpu->arch.queued_dear; 297 + set_guest_dear(vcpu, vcpu->arch.queued_dear); 363 298 kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask); 364 299 365 300 if (!keep_irq) 366 301 clear_bit(priority, &vcpu->arch.pending_exceptions); 367 302 } 303 + 304 + #ifdef CONFIG_KVM_BOOKE_HV 305 + /* 306 + * If an interrupt is pending but masked, raise a guest doorbell 307 + * so that we are notified when the guest enables the relevant 308 + * MSR bit. 309 + */ 310 + if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE) 311 + kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT); 312 + if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE) 313 + kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT); 314 + if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK) 315 + kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC); 316 + #endif 368 317 369 318 return allowed; 370 319 } ··· 463 344 return -EINVAL; 464 345 } 465 346 347 + if (!current->thread.kvm_vcpu) { 348 + WARN(1, "no vcpu\n"); 349 + return -EPERM; 350 + } 351 + 466 352 local_irq_disable(); 467 353 468 354 kvmppc_core_prepare_to_enter(vcpu); ··· 487 363 return ret; 488 364 } 489 365 366 + static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) 367 + { 368 + enum emulation_result er; 369 + 370 + er = kvmppc_emulate_instruction(run, vcpu); 371 + switch (er) { 372 + case EMULATE_DONE: 373 + /* don't overwrite subtypes, just account kvm_stats */ 374 + kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS); 375 + /* Future optimization: only reload non-volatiles if 376 + * they were actually modified by emulation. */ 377 + return RESUME_GUEST_NV; 378 + 379 + case EMULATE_DO_DCR: 380 + run->exit_reason = KVM_EXIT_DCR; 381 + return RESUME_HOST; 382 + 383 + case EMULATE_FAIL: 384 + /* XXX Deliver Program interrupt to guest. */ 385 + printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", 386 + __func__, vcpu->arch.pc, vcpu->arch.last_inst); 387 + /* For debugging, encode the failing instruction and 388 + * report it to userspace. */ 389 + run->hw.hardware_exit_reason = ~0ULL << 32; 390 + run->hw.hardware_exit_reason |= vcpu->arch.last_inst; 391 + return RESUME_HOST; 392 + 393 + default: 394 + BUG(); 395 + } 396 + } 397 + 490 398 /** 491 399 * kvmppc_handle_exit 492 400 * ··· 527 371 int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, 528 372 unsigned int exit_nr) 529 373 { 530 - enum emulation_result er; 531 374 int r = RESUME_HOST; 532 375 533 376 /* update before a new last_exit_type is rewritten */ 534 377 kvmppc_update_timing_stats(vcpu); 378 + 379 + switch (exit_nr) { 380 + case BOOKE_INTERRUPT_EXTERNAL: 381 + do_IRQ(current->thread.regs); 382 + break; 383 + 384 + case BOOKE_INTERRUPT_DECREMENTER: 385 + timer_interrupt(current->thread.regs); 386 + break; 387 + 388 + #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3E_64) 389 + case BOOKE_INTERRUPT_DOORBELL: 390 + doorbell_exception(current->thread.regs); 391 + break; 392 + #endif 393 + case BOOKE_INTERRUPT_MACHINE_CHECK: 394 + /* FIXME */ 395 + break; 396 + } 535 397 536 398 local_irq_enable(); 537 399 ··· 558 384 559 385 switch (exit_nr) { 560 386 case BOOKE_INTERRUPT_MACHINE_CHECK: 561 - printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR)); 562 - kvmppc_dump_vcpu(vcpu); 563 - r = RESUME_HOST; 387 + kvm_resched(vcpu); 388 + r = RESUME_GUEST; 564 389 break; 565 390 566 391 case BOOKE_INTERRUPT_EXTERNAL: 567 392 kvmppc_account_exit(vcpu, EXT_INTR_EXITS); 568 - if (need_resched()) 569 - cond_resched(); 393 + kvm_resched(vcpu); 570 394 r = RESUME_GUEST; 571 395 break; 572 396 573 397 case BOOKE_INTERRUPT_DECREMENTER: 574 - /* Since we switched IVPR back to the host's value, the host 575 - * handled this interrupt the moment we enabled interrupts. 576 - * Now we just offer it a chance to reschedule the guest. */ 577 398 kvmppc_account_exit(vcpu, DEC_EXITS); 578 - if (need_resched()) 579 - cond_resched(); 399 + kvm_resched(vcpu); 580 400 r = RESUME_GUEST; 581 401 break; 582 402 403 + case BOOKE_INTERRUPT_DOORBELL: 404 + kvmppc_account_exit(vcpu, DBELL_EXITS); 405 + kvm_resched(vcpu); 406 + r = RESUME_GUEST; 407 + break; 408 + 409 + case BOOKE_INTERRUPT_GUEST_DBELL_CRIT: 410 + kvmppc_account_exit(vcpu, GDBELL_EXITS); 411 + 412 + /* 413 + * We are here because there is a pending guest interrupt 414 + * which could not be delivered as MSR_CE or MSR_ME was not 415 + * set. Once we break from here we will retry delivery. 416 + */ 417 + r = RESUME_GUEST; 418 + break; 419 + 420 + case BOOKE_INTERRUPT_GUEST_DBELL: 421 + kvmppc_account_exit(vcpu, GDBELL_EXITS); 422 + 423 + /* 424 + * We are here because there is a pending guest interrupt 425 + * which could not be delivered as MSR_EE was not set. Once 426 + * we break from here we will retry delivery. 427 + */ 428 + r = RESUME_GUEST; 429 + break; 430 + 431 + case BOOKE_INTERRUPT_HV_PRIV: 432 + r = emulation_exit(run, vcpu); 433 + break; 434 + 583 435 case BOOKE_INTERRUPT_PROGRAM: 584 - if (vcpu->arch.shared->msr & MSR_PR) { 436 + if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) { 585 437 /* Program traps generated by user-level software must be handled 586 438 * by the guest kernel. */ 587 439 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr); ··· 616 416 break; 617 417 } 618 418 619 - er = kvmppc_emulate_instruction(run, vcpu); 620 - switch (er) { 621 - case EMULATE_DONE: 622 - /* don't overwrite subtypes, just account kvm_stats */ 623 - kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS); 624 - /* Future optimization: only reload non-volatiles if 625 - * they were actually modified by emulation. */ 626 - r = RESUME_GUEST_NV; 627 - break; 628 - case EMULATE_DO_DCR: 629 - run->exit_reason = KVM_EXIT_DCR; 630 - r = RESUME_HOST; 631 - break; 632 - case EMULATE_FAIL: 633 - /* XXX Deliver Program interrupt to guest. */ 634 - printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", 635 - __func__, vcpu->arch.pc, vcpu->arch.last_inst); 636 - /* For debugging, encode the failing instruction and 637 - * report it to userspace. */ 638 - run->hw.hardware_exit_reason = ~0ULL << 32; 639 - run->hw.hardware_exit_reason |= vcpu->arch.last_inst; 640 - r = RESUME_HOST; 641 - break; 642 - default: 643 - BUG(); 644 - } 419 + r = emulation_exit(run, vcpu); 645 420 break; 646 421 647 422 case BOOKE_INTERRUPT_FP_UNAVAIL: ··· 681 506 r = RESUME_GUEST; 682 507 break; 683 508 509 + #ifdef CONFIG_KVM_BOOKE_HV 510 + case BOOKE_INTERRUPT_HV_SYSCALL: 511 + if (!(vcpu->arch.shared->msr & MSR_PR)) { 512 + kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu)); 513 + } else { 514 + /* 515 + * hcall from guest userspace -- send privileged 516 + * instruction program check. 517 + */ 518 + kvmppc_core_queue_program(vcpu, ESR_PPR); 519 + } 520 + 521 + r = RESUME_GUEST; 522 + break; 523 + #else 684 524 case BOOKE_INTERRUPT_SYSCALL: 685 525 if (!(vcpu->arch.shared->msr & MSR_PR) && 686 526 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) { ··· 709 519 kvmppc_account_exit(vcpu, SYSCALL_EXITS); 710 520 r = RESUME_GUEST; 711 521 break; 522 + #endif 712 523 713 524 case BOOKE_INTERRUPT_DTLB_MISS: { 714 525 unsigned long eaddr = vcpu->arch.fault_dear; ··· 850 659 int r; 851 660 852 661 vcpu->arch.pc = 0; 853 - vcpu->arch.shared->msr = 0; 854 - vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS; 855 662 vcpu->arch.shared->pir = vcpu->vcpu_id; 856 663 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ 664 + kvmppc_set_msr(vcpu, 0); 857 665 666 + #ifndef CONFIG_KVM_BOOKE_HV 667 + vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS; 858 668 vcpu->arch.shadow_pid = 1; 669 + vcpu->arch.shared->msr = 0; 670 + #endif 859 671 860 672 /* Eye-catching numbers so we know if the guest takes an interrupt 861 673 * before it's programmed its own IVPR/IVORs. */ ··· 939 745 sregs->u.e.csrr0 = vcpu->arch.csrr0; 940 746 sregs->u.e.csrr1 = vcpu->arch.csrr1; 941 747 sregs->u.e.mcsr = vcpu->arch.mcsr; 942 - sregs->u.e.esr = vcpu->arch.shared->esr; 943 - sregs->u.e.dear = vcpu->arch.shared->dar; 748 + sregs->u.e.esr = get_guest_esr(vcpu); 749 + sregs->u.e.dear = get_guest_dear(vcpu); 944 750 sregs->u.e.tsr = vcpu->arch.tsr; 945 751 sregs->u.e.tcr = vcpu->arch.tcr; 946 752 sregs->u.e.dec = kvmppc_get_dec(vcpu, tb); ··· 957 763 vcpu->arch.csrr0 = sregs->u.e.csrr0; 958 764 vcpu->arch.csrr1 = sregs->u.e.csrr1; 959 765 vcpu->arch.mcsr = sregs->u.e.mcsr; 960 - vcpu->arch.shared->esr = sregs->u.e.esr; 961 - vcpu->arch.shared->dar = sregs->u.e.dear; 766 + set_guest_esr(vcpu, sregs->u.e.esr); 767 + set_guest_dear(vcpu, sregs->u.e.dear); 962 768 vcpu->arch.vrsave = sregs->u.e.vrsave; 963 769 kvmppc_set_tcr(vcpu, sregs->u.e.tcr); 964 770 ··· 1155 961 1156 962 void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 1157 963 { 964 + current->thread.kvm_vcpu = vcpu; 1158 965 } 1159 966 1160 967 void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu) 1161 968 { 969 + current->thread.kvm_vcpu = NULL; 1162 970 } 1163 971 1164 972 int __init kvmppc_booke_init(void) 1165 973 { 974 + #ifndef CONFIG_KVM_BOOKE_HV 1166 975 unsigned long ivor[16]; 1167 976 unsigned long max_ivor = 0; 1168 977 int i; ··· 1208 1011 } 1209 1012 flush_icache_range(kvmppc_booke_handlers, 1210 1013 kvmppc_booke_handlers + max_ivor + kvmppc_handler_len); 1211 - 1014 + #endif /* !BOOKE_HV */ 1212 1015 return 0; 1213 1016 } 1214 1017
+23 -1
arch/powerpc/kvm/booke.h
··· 48 48 #define BOOKE_IRQPRIO_PERFORMANCE_MONITOR 19 49 49 /* Internal pseudo-irqprio for level triggered externals */ 50 50 #define BOOKE_IRQPRIO_EXTERNAL_LEVEL 20 51 - #define BOOKE_IRQPRIO_MAX 20 51 + #define BOOKE_IRQPRIO_DBELL 21 52 + #define BOOKE_IRQPRIO_DBELL_CRIT 22 53 + #define BOOKE_IRQPRIO_MAX 23 54 + 55 + #define BOOKE_IRQMASK_EE ((1 << BOOKE_IRQPRIO_EXTERNAL_LEVEL) | \ 56 + (1 << BOOKE_IRQPRIO_PERFORMANCE_MONITOR) | \ 57 + (1 << BOOKE_IRQPRIO_DBELL) | \ 58 + (1 << BOOKE_IRQPRIO_DECREMENTER) | \ 59 + (1 << BOOKE_IRQPRIO_FIT) | \ 60 + (1 << BOOKE_IRQPRIO_EXTERNAL)) 61 + 62 + #define BOOKE_IRQMASK_CE ((1 << BOOKE_IRQPRIO_DBELL_CRIT) | \ 63 + (1 << BOOKE_IRQPRIO_WATCHDOG) | \ 64 + (1 << BOOKE_IRQPRIO_CRITICAL)) 52 65 53 66 extern unsigned long kvmppc_booke_handlers; 54 67 ··· 86 73 87 74 void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu); 88 75 void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu); 76 + 77 + enum int_class { 78 + INT_CLASS_NONCRIT, 79 + INT_CLASS_CRIT, 80 + INT_CLASS_MC, 81 + INT_CLASS_DBG, 82 + }; 83 + 84 + void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type); 89 85 90 86 #endif /* __KVM_BOOKE_H__ */
+20 -3
arch/powerpc/kvm/booke_emulate.c
··· 99 99 return emulated; 100 100 } 101 101 102 + /* 103 + * NOTE: some of these registers are not emulated on BOOKE_HV (GS-mode). 104 + * Their backing store is in real registers, and these functions 105 + * will return the wrong result if called for them in another context 106 + * (such as debugging). 107 + */ 102 108 int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) 103 109 { 104 110 int emulated = EMULATE_DONE; ··· 128 122 kvmppc_set_tcr(vcpu, spr_val); 129 123 break; 130 124 131 - /* Note: SPRG4-7 are user-readable. These values are 132 - * loaded into the real SPRGs when resuming the 133 - * guest. */ 125 + /* 126 + * Note: SPRG4-7 are user-readable. 127 + * These values are loaded into the real SPRGs when resuming the 128 + * guest (PR-mode only). 129 + */ 134 130 case SPRN_SPRG4: 135 131 vcpu->arch.shared->sprg4 = spr_val; break; 136 132 case SPRN_SPRG5: ··· 144 136 145 137 case SPRN_IVPR: 146 138 vcpu->arch.ivpr = spr_val; 139 + #ifdef CONFIG_KVM_BOOKE_HV 140 + mtspr(SPRN_GIVPR, spr_val); 141 + #endif 147 142 break; 148 143 case SPRN_IVOR0: 149 144 vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = spr_val; ··· 156 145 break; 157 146 case SPRN_IVOR2: 158 147 vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = spr_val; 148 + #ifdef CONFIG_KVM_BOOKE_HV 149 + mtspr(SPRN_GIVOR2, spr_val); 150 + #endif 159 151 break; 160 152 case SPRN_IVOR3: 161 153 vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = spr_val; ··· 177 163 break; 178 164 case SPRN_IVOR8: 179 165 vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = spr_val; 166 + #ifdef CONFIG_KVM_BOOKE_HV 167 + mtspr(SPRN_GIVOR8, spr_val); 168 + #endif 180 169 break; 181 170 case SPRN_IVOR9: 182 171 vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = spr_val;
+587
arch/powerpc/kvm/bookehv_interrupts.S
··· 1 + /* 2 + * This program is free software; you can redistribute it and/or modify 3 + * it under the terms of the GNU General Public License, version 2, as 4 + * published by the Free Software Foundation. 5 + * 6 + * This program is distributed in the hope that it will be useful, 7 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 + * GNU General Public License for more details. 10 + * 11 + * You should have received a copy of the GNU General Public License 12 + * along with this program; if not, write to the Free Software 13 + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 + * 15 + * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. 16 + * 17 + * Author: Varun Sethi <varun.sethi@freescale.com> 18 + * Author: Scott Wood <scotwood@freescale.com> 19 + * 20 + * This file is derived from arch/powerpc/kvm/booke_interrupts.S 21 + */ 22 + 23 + #include <asm/ppc_asm.h> 24 + #include <asm/kvm_asm.h> 25 + #include <asm/reg.h> 26 + #include <asm/mmu-44x.h> 27 + #include <asm/page.h> 28 + #include <asm/asm-compat.h> 29 + #include <asm/asm-offsets.h> 30 + #include <asm/bitsperlong.h> 31 + 32 + #include "../kernel/head_booke.h" /* for THREAD_NORMSAVE() */ 33 + 34 + #define GET_VCPU(vcpu, thread) \ 35 + PPC_LL vcpu, THREAD_KVM_VCPU(thread) 36 + 37 + #define SET_VCPU(vcpu) \ 38 + PPC_STL vcpu, (THREAD + THREAD_KVM_VCPU)(r2) 39 + 40 + #define LONGBYTES (BITS_PER_LONG / 8) 41 + 42 + #define VCPU_GPR(n) (VCPU_GPRS + (n * LONGBYTES)) 43 + #define VCPU_GUEST_SPRG(n) (VCPU_GUEST_SPRGS + (n * LONGBYTES)) 44 + 45 + /* The host stack layout: */ 46 + #define HOST_R1 (0 * LONGBYTES) /* Implied by stwu. */ 47 + #define HOST_CALLEE_LR (1 * LONGBYTES) 48 + #define HOST_RUN (2 * LONGBYTES) /* struct kvm_run */ 49 + /* 50 + * r2 is special: it holds 'current', and it made nonvolatile in the 51 + * kernel with the -ffixed-r2 gcc option. 52 + */ 53 + #define HOST_R2 (3 * LONGBYTES) 54 + #define HOST_NV_GPRS (4 * LONGBYTES) 55 + #define HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * LONGBYTES)) 56 + #define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + LONGBYTES) 57 + #define HOST_STACK_SIZE ((HOST_MIN_STACK_SIZE + 15) & ~15) /* Align. */ 58 + #define HOST_STACK_LR (HOST_STACK_SIZE + LONGBYTES) /* In caller stack frame. */ 59 + 60 + #define NEED_EMU 0x00000001 /* emulation -- save nv regs */ 61 + #define NEED_DEAR 0x00000002 /* save faulting DEAR */ 62 + #define NEED_ESR 0x00000004 /* save faulting ESR */ 63 + 64 + /* 65 + * On entry: 66 + * r4 = vcpu, r5 = srr0, r6 = srr1 67 + * saved in vcpu: cr, ctr, r3-r13 68 + */ 69 + .macro kvm_handler_common intno, srr0, flags 70 + mfspr r10, SPRN_PID 71 + lwz r8, VCPU_HOST_PID(r4) 72 + PPC_LL r11, VCPU_SHARED(r4) 73 + PPC_STL r14, VCPU_GPR(r14)(r4) /* We need a non-volatile GPR. */ 74 + li r14, \intno 75 + 76 + stw r10, VCPU_GUEST_PID(r4) 77 + mtspr SPRN_PID, r8 78 + 79 + .if \flags & NEED_EMU 80 + lwz r9, VCPU_KVM(r4) 81 + .endif 82 + 83 + #ifdef CONFIG_KVM_EXIT_TIMING 84 + /* save exit time */ 85 + 1: mfspr r7, SPRN_TBRU 86 + mfspr r8, SPRN_TBRL 87 + mfspr r9, SPRN_TBRU 88 + cmpw r9, r7 89 + PPC_STL r8, VCPU_TIMING_EXIT_TBL(r4) 90 + bne- 1b 91 + PPC_STL r9, VCPU_TIMING_EXIT_TBU(r4) 92 + #endif 93 + 94 + oris r8, r6, MSR_CE@h 95 + #ifndef CONFIG_64BIT 96 + stw r6, (VCPU_SHARED_MSR + 4)(r11) 97 + #else 98 + std r6, (VCPU_SHARED_MSR)(r11) 99 + #endif 100 + ori r8, r8, MSR_ME | MSR_RI 101 + PPC_STL r5, VCPU_PC(r4) 102 + 103 + /* 104 + * Make sure CE/ME/RI are set (if appropriate for exception type) 105 + * whether or not the guest had it set. Since mfmsr/mtmsr are 106 + * somewhat expensive, skip in the common case where the guest 107 + * had all these bits set (and thus they're still set if 108 + * appropriate for the exception type). 109 + */ 110 + cmpw r6, r8 111 + .if \flags & NEED_EMU 112 + lwz r9, KVM_LPID(r9) 113 + .endif 114 + beq 1f 115 + mfmsr r7 116 + .if \srr0 != SPRN_MCSRR0 && \srr0 != SPRN_CSRR0 117 + oris r7, r7, MSR_CE@h 118 + .endif 119 + .if \srr0 != SPRN_MCSRR0 120 + ori r7, r7, MSR_ME | MSR_RI 121 + .endif 122 + mtmsr r7 123 + 1: 124 + 125 + .if \flags & NEED_EMU 126 + /* 127 + * This assumes you have external PID support. 128 + * To support a bookehv CPU without external PID, you'll 129 + * need to look up the TLB entry and create a temporary mapping. 130 + * 131 + * FIXME: we don't currently handle if the lwepx faults. PR-mode 132 + * booke doesn't handle it either. Since Linux doesn't use 133 + * broadcast tlbivax anymore, the only way this should happen is 134 + * if the guest maps its memory execute-but-not-read, or if we 135 + * somehow take a TLB miss in the middle of this entry code and 136 + * evict the relevant entry. On e500mc, all kernel lowmem is 137 + * bolted into TLB1 large page mappings, and we don't use 138 + * broadcast invalidates, so we should not take a TLB miss here. 139 + * 140 + * Later we'll need to deal with faults here. Disallowing guest 141 + * mappings that are execute-but-not-read could be an option on 142 + * e500mc, but not on chips with an LRAT if it is used. 143 + */ 144 + 145 + mfspr r3, SPRN_EPLC /* will already have correct ELPID and EGS */ 146 + PPC_STL r15, VCPU_GPR(r15)(r4) 147 + PPC_STL r16, VCPU_GPR(r16)(r4) 148 + PPC_STL r17, VCPU_GPR(r17)(r4) 149 + PPC_STL r18, VCPU_GPR(r18)(r4) 150 + PPC_STL r19, VCPU_GPR(r19)(r4) 151 + mr r8, r3 152 + PPC_STL r20, VCPU_GPR(r20)(r4) 153 + rlwimi r8, r6, EPC_EAS_SHIFT - MSR_IR_LG, EPC_EAS 154 + PPC_STL r21, VCPU_GPR(r21)(r4) 155 + rlwimi r8, r6, EPC_EPR_SHIFT - MSR_PR_LG, EPC_EPR 156 + PPC_STL r22, VCPU_GPR(r22)(r4) 157 + rlwimi r8, r10, EPC_EPID_SHIFT, EPC_EPID 158 + PPC_STL r23, VCPU_GPR(r23)(r4) 159 + PPC_STL r24, VCPU_GPR(r24)(r4) 160 + PPC_STL r25, VCPU_GPR(r25)(r4) 161 + PPC_STL r26, VCPU_GPR(r26)(r4) 162 + PPC_STL r27, VCPU_GPR(r27)(r4) 163 + PPC_STL r28, VCPU_GPR(r28)(r4) 164 + PPC_STL r29, VCPU_GPR(r29)(r4) 165 + PPC_STL r30, VCPU_GPR(r30)(r4) 166 + PPC_STL r31, VCPU_GPR(r31)(r4) 167 + mtspr SPRN_EPLC, r8 168 + isync 169 + lwepx r9, 0, r5 170 + mtspr SPRN_EPLC, r3 171 + stw r9, VCPU_LAST_INST(r4) 172 + .endif 173 + 174 + .if \flags & NEED_ESR 175 + mfspr r8, SPRN_ESR 176 + PPC_STL r8, VCPU_FAULT_ESR(r4) 177 + .endif 178 + 179 + .if \flags & NEED_DEAR 180 + mfspr r9, SPRN_DEAR 181 + PPC_STL r9, VCPU_FAULT_DEAR(r4) 182 + .endif 183 + 184 + b kvmppc_resume_host 185 + .endm 186 + 187 + /* 188 + * For input register values, see arch/powerpc/include/asm/kvm_booke_hv_asm.h 189 + */ 190 + .macro kvm_handler intno srr0, srr1, flags 191 + _GLOBAL(kvmppc_handler_\intno\()_\srr1) 192 + GET_VCPU(r11, r10) 193 + PPC_STL r3, VCPU_GPR(r3)(r11) 194 + mfspr r3, SPRN_SPRG_RSCRATCH0 195 + PPC_STL r4, VCPU_GPR(r4)(r11) 196 + PPC_LL r4, THREAD_NORMSAVE(0)(r10) 197 + PPC_STL r5, VCPU_GPR(r5)(r11) 198 + PPC_STL r13, VCPU_CR(r11) 199 + mfspr r5, \srr0 200 + PPC_STL r3, VCPU_GPR(r10)(r11) 201 + PPC_LL r3, THREAD_NORMSAVE(2)(r10) 202 + PPC_STL r6, VCPU_GPR(r6)(r11) 203 + PPC_STL r4, VCPU_GPR(r11)(r11) 204 + mfspr r6, \srr1 205 + PPC_STL r7, VCPU_GPR(r7)(r11) 206 + PPC_STL r8, VCPU_GPR(r8)(r11) 207 + PPC_STL r9, VCPU_GPR(r9)(r11) 208 + PPC_STL r3, VCPU_GPR(r13)(r11) 209 + mfctr r7 210 + PPC_STL r12, VCPU_GPR(r12)(r11) 211 + PPC_STL r7, VCPU_CTR(r11) 212 + mr r4, r11 213 + kvm_handler_common \intno, \srr0, \flags 214 + .endm 215 + 216 + .macro kvm_lvl_handler intno scratch srr0, srr1, flags 217 + _GLOBAL(kvmppc_handler_\intno\()_\srr1) 218 + mfspr r10, SPRN_SPRG_THREAD 219 + GET_VCPU(r11, r10) 220 + PPC_STL r3, VCPU_GPR(r3)(r11) 221 + mfspr r3, \scratch 222 + PPC_STL r4, VCPU_GPR(r4)(r11) 223 + PPC_LL r4, GPR9(r8) 224 + PPC_STL r5, VCPU_GPR(r5)(r11) 225 + PPC_STL r9, VCPU_CR(r11) 226 + mfspr r5, \srr0 227 + PPC_STL r3, VCPU_GPR(r8)(r11) 228 + PPC_LL r3, GPR10(r8) 229 + PPC_STL r6, VCPU_GPR(r6)(r11) 230 + PPC_STL r4, VCPU_GPR(r9)(r11) 231 + mfspr r6, \srr1 232 + PPC_LL r4, GPR11(r8) 233 + PPC_STL r7, VCPU_GPR(r7)(r11) 234 + PPC_STL r8, VCPU_GPR(r8)(r11) 235 + PPC_STL r3, VCPU_GPR(r10)(r11) 236 + mfctr r7 237 + PPC_STL r12, VCPU_GPR(r12)(r11) 238 + PPC_STL r4, VCPU_GPR(r11)(r11) 239 + PPC_STL r7, VCPU_CTR(r11) 240 + mr r4, r11 241 + kvm_handler_common \intno, \srr0, \flags 242 + .endm 243 + 244 + kvm_lvl_handler BOOKE_INTERRUPT_CRITICAL, \ 245 + SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0 246 + kvm_lvl_handler BOOKE_INTERRUPT_MACHINE_CHECK, \ 247 + SPRN_SPRG_RSCRATCH_MC, SPRN_MCSRR0, SPRN_MCSRR1, 0 248 + kvm_handler BOOKE_INTERRUPT_DATA_STORAGE, \ 249 + SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR) 250 + kvm_handler BOOKE_INTERRUPT_INST_STORAGE, SPRN_SRR0, SPRN_SRR1, NEED_ESR 251 + kvm_handler BOOKE_INTERRUPT_EXTERNAL, SPRN_SRR0, SPRN_SRR1, 0 252 + kvm_handler BOOKE_INTERRUPT_ALIGNMENT, \ 253 + SPRN_SRR0, SPRN_SRR1, (NEED_DEAR | NEED_ESR) 254 + kvm_handler BOOKE_INTERRUPT_PROGRAM, SPRN_SRR0, SPRN_SRR1, NEED_ESR 255 + kvm_handler BOOKE_INTERRUPT_FP_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0 256 + kvm_handler BOOKE_INTERRUPT_SYSCALL, SPRN_SRR0, SPRN_SRR1, 0 257 + kvm_handler BOOKE_INTERRUPT_AP_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0 258 + kvm_handler BOOKE_INTERRUPT_DECREMENTER, SPRN_SRR0, SPRN_SRR1, 0 259 + kvm_handler BOOKE_INTERRUPT_FIT, SPRN_SRR0, SPRN_SRR1, 0 260 + kvm_lvl_handler BOOKE_INTERRUPT_WATCHDOG, \ 261 + SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0 262 + kvm_handler BOOKE_INTERRUPT_DTLB_MISS, \ 263 + SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR) 264 + kvm_handler BOOKE_INTERRUPT_ITLB_MISS, SPRN_SRR0, SPRN_SRR1, 0 265 + kvm_handler BOOKE_INTERRUPT_SPE_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0 266 + kvm_handler BOOKE_INTERRUPT_SPE_FP_DATA, SPRN_SRR0, SPRN_SRR1, 0 267 + kvm_handler BOOKE_INTERRUPT_SPE_FP_ROUND, SPRN_SRR0, SPRN_SRR1, 0 268 + kvm_handler BOOKE_INTERRUPT_PERFORMANCE_MONITOR, SPRN_SRR0, SPRN_SRR1, 0 269 + kvm_handler BOOKE_INTERRUPT_DOORBELL, SPRN_SRR0, SPRN_SRR1, 0 270 + kvm_lvl_handler BOOKE_INTERRUPT_DOORBELL_CRITICAL, \ 271 + SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0 272 + kvm_handler BOOKE_INTERRUPT_HV_PRIV, SPRN_SRR0, SPRN_SRR1, NEED_EMU 273 + kvm_handler BOOKE_INTERRUPT_HV_SYSCALL, SPRN_SRR0, SPRN_SRR1, 0 274 + kvm_handler BOOKE_INTERRUPT_GUEST_DBELL, SPRN_GSRR0, SPRN_GSRR1, 0 275 + kvm_lvl_handler BOOKE_INTERRUPT_GUEST_DBELL_CRIT, \ 276 + SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0 277 + kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \ 278 + SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0 279 + kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \ 280 + SPRN_SPRG_RSCRATCH_DBG, SPRN_DSRR0, SPRN_DSRR1, 0 281 + 282 + 283 + /* Registers: 284 + * SPRG_SCRATCH0: guest r10 285 + * r4: vcpu pointer 286 + * r11: vcpu->arch.shared 287 + * r14: KVM exit number 288 + */ 289 + _GLOBAL(kvmppc_resume_host) 290 + /* Save remaining volatile guest register state to vcpu. */ 291 + mfspr r3, SPRN_VRSAVE 292 + PPC_STL r0, VCPU_GPR(r0)(r4) 293 + PPC_STL r1, VCPU_GPR(r1)(r4) 294 + mflr r5 295 + mfspr r6, SPRN_SPRG4 296 + PPC_STL r2, VCPU_GPR(r2)(r4) 297 + PPC_STL r5, VCPU_LR(r4) 298 + mfspr r7, SPRN_SPRG5 299 + PPC_STL r3, VCPU_VRSAVE(r4) 300 + PPC_STL r6, VCPU_SHARED_SPRG4(r11) 301 + mfspr r8, SPRN_SPRG6 302 + PPC_STL r7, VCPU_SHARED_SPRG5(r11) 303 + mfspr r9, SPRN_SPRG7 304 + PPC_STL r8, VCPU_SHARED_SPRG6(r11) 305 + mfxer r3 306 + PPC_STL r9, VCPU_SHARED_SPRG7(r11) 307 + 308 + /* save guest MAS registers and restore host mas4 & mas6 */ 309 + mfspr r5, SPRN_MAS0 310 + PPC_STL r3, VCPU_XER(r4) 311 + mfspr r6, SPRN_MAS1 312 + stw r5, VCPU_SHARED_MAS0(r11) 313 + mfspr r7, SPRN_MAS2 314 + stw r6, VCPU_SHARED_MAS1(r11) 315 + #ifndef CONFIG_64BIT 316 + stw r7, (VCPU_SHARED_MAS2 + 4)(r11) 317 + #else 318 + std r7, (VCPU_SHARED_MAS2)(r11) 319 + #endif 320 + mfspr r5, SPRN_MAS3 321 + mfspr r6, SPRN_MAS4 322 + stw r5, VCPU_SHARED_MAS7_3+4(r11) 323 + mfspr r7, SPRN_MAS6 324 + stw r6, VCPU_SHARED_MAS4(r11) 325 + mfspr r5, SPRN_MAS7 326 + lwz r6, VCPU_HOST_MAS4(r4) 327 + stw r7, VCPU_SHARED_MAS6(r11) 328 + lwz r8, VCPU_HOST_MAS6(r4) 329 + mtspr SPRN_MAS4, r6 330 + stw r5, VCPU_SHARED_MAS7_3+0(r11) 331 + mtspr SPRN_MAS6, r8 332 + mfspr r3, SPRN_EPCR 333 + rlwinm r3, r3, 0, ~SPRN_EPCR_DMIUH 334 + mtspr SPRN_EPCR, r3 335 + isync 336 + 337 + /* Restore host stack pointer */ 338 + PPC_LL r1, VCPU_HOST_STACK(r4) 339 + PPC_LL r2, HOST_R2(r1) 340 + 341 + /* Switch to kernel stack and jump to handler. */ 342 + PPC_LL r3, HOST_RUN(r1) 343 + mr r5, r14 /* intno */ 344 + mr r14, r4 /* Save vcpu pointer. */ 345 + bl kvmppc_handle_exit 346 + 347 + /* Restore vcpu pointer and the nonvolatiles we used. */ 348 + mr r4, r14 349 + PPC_LL r14, VCPU_GPR(r14)(r4) 350 + 351 + andi. r5, r3, RESUME_FLAG_NV 352 + beq skip_nv_load 353 + PPC_LL r15, VCPU_GPR(r15)(r4) 354 + PPC_LL r16, VCPU_GPR(r16)(r4) 355 + PPC_LL r17, VCPU_GPR(r17)(r4) 356 + PPC_LL r18, VCPU_GPR(r18)(r4) 357 + PPC_LL r19, VCPU_GPR(r19)(r4) 358 + PPC_LL r20, VCPU_GPR(r20)(r4) 359 + PPC_LL r21, VCPU_GPR(r21)(r4) 360 + PPC_LL r22, VCPU_GPR(r22)(r4) 361 + PPC_LL r23, VCPU_GPR(r23)(r4) 362 + PPC_LL r24, VCPU_GPR(r24)(r4) 363 + PPC_LL r25, VCPU_GPR(r25)(r4) 364 + PPC_LL r26, VCPU_GPR(r26)(r4) 365 + PPC_LL r27, VCPU_GPR(r27)(r4) 366 + PPC_LL r28, VCPU_GPR(r28)(r4) 367 + PPC_LL r29, VCPU_GPR(r29)(r4) 368 + PPC_LL r30, VCPU_GPR(r30)(r4) 369 + PPC_LL r31, VCPU_GPR(r31)(r4) 370 + skip_nv_load: 371 + /* Should we return to the guest? */ 372 + andi. r5, r3, RESUME_FLAG_HOST 373 + beq lightweight_exit 374 + 375 + srawi r3, r3, 2 /* Shift -ERR back down. */ 376 + 377 + heavyweight_exit: 378 + /* Not returning to guest. */ 379 + PPC_LL r5, HOST_STACK_LR(r1) 380 + 381 + /* 382 + * We already saved guest volatile register state; now save the 383 + * non-volatiles. 384 + */ 385 + 386 + PPC_STL r15, VCPU_GPR(r15)(r4) 387 + PPC_STL r16, VCPU_GPR(r16)(r4) 388 + PPC_STL r17, VCPU_GPR(r17)(r4) 389 + PPC_STL r18, VCPU_GPR(r18)(r4) 390 + PPC_STL r19, VCPU_GPR(r19)(r4) 391 + PPC_STL r20, VCPU_GPR(r20)(r4) 392 + PPC_STL r21, VCPU_GPR(r21)(r4) 393 + PPC_STL r22, VCPU_GPR(r22)(r4) 394 + PPC_STL r23, VCPU_GPR(r23)(r4) 395 + PPC_STL r24, VCPU_GPR(r24)(r4) 396 + PPC_STL r25, VCPU_GPR(r25)(r4) 397 + PPC_STL r26, VCPU_GPR(r26)(r4) 398 + PPC_STL r27, VCPU_GPR(r27)(r4) 399 + PPC_STL r28, VCPU_GPR(r28)(r4) 400 + PPC_STL r29, VCPU_GPR(r29)(r4) 401 + PPC_STL r30, VCPU_GPR(r30)(r4) 402 + PPC_STL r31, VCPU_GPR(r31)(r4) 403 + 404 + /* Load host non-volatile register state from host stack. */ 405 + PPC_LL r14, HOST_NV_GPR(r14)(r1) 406 + PPC_LL r15, HOST_NV_GPR(r15)(r1) 407 + PPC_LL r16, HOST_NV_GPR(r16)(r1) 408 + PPC_LL r17, HOST_NV_GPR(r17)(r1) 409 + PPC_LL r18, HOST_NV_GPR(r18)(r1) 410 + PPC_LL r19, HOST_NV_GPR(r19)(r1) 411 + PPC_LL r20, HOST_NV_GPR(r20)(r1) 412 + PPC_LL r21, HOST_NV_GPR(r21)(r1) 413 + PPC_LL r22, HOST_NV_GPR(r22)(r1) 414 + PPC_LL r23, HOST_NV_GPR(r23)(r1) 415 + PPC_LL r24, HOST_NV_GPR(r24)(r1) 416 + PPC_LL r25, HOST_NV_GPR(r25)(r1) 417 + PPC_LL r26, HOST_NV_GPR(r26)(r1) 418 + PPC_LL r27, HOST_NV_GPR(r27)(r1) 419 + PPC_LL r28, HOST_NV_GPR(r28)(r1) 420 + PPC_LL r29, HOST_NV_GPR(r29)(r1) 421 + PPC_LL r30, HOST_NV_GPR(r30)(r1) 422 + PPC_LL r31, HOST_NV_GPR(r31)(r1) 423 + 424 + /* Return to kvm_vcpu_run(). */ 425 + mtlr r5 426 + addi r1, r1, HOST_STACK_SIZE 427 + /* r3 still contains the return code from kvmppc_handle_exit(). */ 428 + blr 429 + 430 + /* Registers: 431 + * r3: kvm_run pointer 432 + * r4: vcpu pointer 433 + */ 434 + _GLOBAL(__kvmppc_vcpu_run) 435 + stwu r1, -HOST_STACK_SIZE(r1) 436 + PPC_STL r1, VCPU_HOST_STACK(r4) /* Save stack pointer to vcpu. */ 437 + 438 + /* Save host state to stack. */ 439 + PPC_STL r3, HOST_RUN(r1) 440 + mflr r3 441 + PPC_STL r3, HOST_STACK_LR(r1) 442 + 443 + /* Save host non-volatile register state to stack. */ 444 + PPC_STL r14, HOST_NV_GPR(r14)(r1) 445 + PPC_STL r15, HOST_NV_GPR(r15)(r1) 446 + PPC_STL r16, HOST_NV_GPR(r16)(r1) 447 + PPC_STL r17, HOST_NV_GPR(r17)(r1) 448 + PPC_STL r18, HOST_NV_GPR(r18)(r1) 449 + PPC_STL r19, HOST_NV_GPR(r19)(r1) 450 + PPC_STL r20, HOST_NV_GPR(r20)(r1) 451 + PPC_STL r21, HOST_NV_GPR(r21)(r1) 452 + PPC_STL r22, HOST_NV_GPR(r22)(r1) 453 + PPC_STL r23, HOST_NV_GPR(r23)(r1) 454 + PPC_STL r24, HOST_NV_GPR(r24)(r1) 455 + PPC_STL r25, HOST_NV_GPR(r25)(r1) 456 + PPC_STL r26, HOST_NV_GPR(r26)(r1) 457 + PPC_STL r27, HOST_NV_GPR(r27)(r1) 458 + PPC_STL r28, HOST_NV_GPR(r28)(r1) 459 + PPC_STL r29, HOST_NV_GPR(r29)(r1) 460 + PPC_STL r30, HOST_NV_GPR(r30)(r1) 461 + PPC_STL r31, HOST_NV_GPR(r31)(r1) 462 + 463 + /* Load guest non-volatiles. */ 464 + PPC_LL r14, VCPU_GPR(r14)(r4) 465 + PPC_LL r15, VCPU_GPR(r15)(r4) 466 + PPC_LL r16, VCPU_GPR(r16)(r4) 467 + PPC_LL r17, VCPU_GPR(r17)(r4) 468 + PPC_LL r18, VCPU_GPR(r18)(r4) 469 + PPC_LL r19, VCPU_GPR(r19)(r4) 470 + PPC_LL r20, VCPU_GPR(r20)(r4) 471 + PPC_LL r21, VCPU_GPR(r21)(r4) 472 + PPC_LL r22, VCPU_GPR(r22)(r4) 473 + PPC_LL r23, VCPU_GPR(r23)(r4) 474 + PPC_LL r24, VCPU_GPR(r24)(r4) 475 + PPC_LL r25, VCPU_GPR(r25)(r4) 476 + PPC_LL r26, VCPU_GPR(r26)(r4) 477 + PPC_LL r27, VCPU_GPR(r27)(r4) 478 + PPC_LL r28, VCPU_GPR(r28)(r4) 479 + PPC_LL r29, VCPU_GPR(r29)(r4) 480 + PPC_LL r30, VCPU_GPR(r30)(r4) 481 + PPC_LL r31, VCPU_GPR(r31)(r4) 482 + 483 + 484 + lightweight_exit: 485 + PPC_STL r2, HOST_R2(r1) 486 + 487 + mfspr r3, SPRN_PID 488 + stw r3, VCPU_HOST_PID(r4) 489 + lwz r3, VCPU_GUEST_PID(r4) 490 + mtspr SPRN_PID, r3 491 + 492 + /* Save vcpu pointer for the exception handlers 493 + * must be done before loading guest r2. 494 + */ 495 + // SET_VCPU(r4) 496 + 497 + PPC_LL r11, VCPU_SHARED(r4) 498 + /* Save host mas4 and mas6 and load guest MAS registers */ 499 + mfspr r3, SPRN_MAS4 500 + stw r3, VCPU_HOST_MAS4(r4) 501 + mfspr r3, SPRN_MAS6 502 + stw r3, VCPU_HOST_MAS6(r4) 503 + lwz r3, VCPU_SHARED_MAS0(r11) 504 + lwz r5, VCPU_SHARED_MAS1(r11) 505 + #ifndef CONFIG_64BIT 506 + lwz r6, (VCPU_SHARED_MAS2 + 4)(r11) 507 + #else 508 + ld r6, (VCPU_SHARED_MAS2)(r11) 509 + #endif 510 + lwz r7, VCPU_SHARED_MAS7_3+4(r11) 511 + lwz r8, VCPU_SHARED_MAS4(r11) 512 + mtspr SPRN_MAS0, r3 513 + mtspr SPRN_MAS1, r5 514 + mtspr SPRN_MAS2, r6 515 + mtspr SPRN_MAS3, r7 516 + mtspr SPRN_MAS4, r8 517 + lwz r3, VCPU_SHARED_MAS6(r11) 518 + lwz r5, VCPU_SHARED_MAS7_3+0(r11) 519 + mtspr SPRN_MAS6, r3 520 + mtspr SPRN_MAS7, r5 521 + /* Disable MAS register updates via exception */ 522 + mfspr r3, SPRN_EPCR 523 + oris r3, r3, SPRN_EPCR_DMIUH@h 524 + mtspr SPRN_EPCR, r3 525 + 526 + /* 527 + * Host interrupt handlers may have clobbered these guest-readable 528 + * SPRGs, so we need to reload them here with the guest's values. 529 + */ 530 + lwz r3, VCPU_VRSAVE(r4) 531 + lwz r5, VCPU_SHARED_SPRG4(r11) 532 + mtspr SPRN_VRSAVE, r3 533 + lwz r6, VCPU_SHARED_SPRG5(r11) 534 + mtspr SPRN_SPRG4W, r5 535 + lwz r7, VCPU_SHARED_SPRG6(r11) 536 + mtspr SPRN_SPRG5W, r6 537 + lwz r8, VCPU_SHARED_SPRG7(r11) 538 + mtspr SPRN_SPRG6W, r7 539 + mtspr SPRN_SPRG7W, r8 540 + 541 + /* Load some guest volatiles. */ 542 + PPC_LL r3, VCPU_LR(r4) 543 + PPC_LL r5, VCPU_XER(r4) 544 + PPC_LL r6, VCPU_CTR(r4) 545 + PPC_LL r7, VCPU_CR(r4) 546 + PPC_LL r8, VCPU_PC(r4) 547 + #ifndef CONFIG_64BIT 548 + lwz r9, (VCPU_SHARED_MSR + 4)(r11) 549 + #else 550 + ld r9, (VCPU_SHARED_MSR)(r11) 551 + #endif 552 + PPC_LL r0, VCPU_GPR(r0)(r4) 553 + PPC_LL r1, VCPU_GPR(r1)(r4) 554 + PPC_LL r2, VCPU_GPR(r2)(r4) 555 + PPC_LL r10, VCPU_GPR(r10)(r4) 556 + PPC_LL r11, VCPU_GPR(r11)(r4) 557 + PPC_LL r12, VCPU_GPR(r12)(r4) 558 + PPC_LL r13, VCPU_GPR(r13)(r4) 559 + mtlr r3 560 + mtxer r5 561 + mtctr r6 562 + mtcr r7 563 + mtsrr0 r8 564 + mtsrr1 r9 565 + 566 + #ifdef CONFIG_KVM_EXIT_TIMING 567 + /* save enter time */ 568 + 1: 569 + mfspr r6, SPRN_TBRU 570 + mfspr r7, SPRN_TBRL 571 + mfspr r8, SPRN_TBRU 572 + cmpw r8, r6 573 + PPC_STL r7, VCPU_TIMING_LAST_ENTER_TBL(r4) 574 + bne 1b 575 + PPC_STL r8, VCPU_TIMING_LAST_ENTER_TBU(r4) 576 + #endif 577 + 578 + /* Finish loading guest volatiles and jump to guest. */ 579 + PPC_LL r5, VCPU_GPR(r5)(r4) 580 + PPC_LL r6, VCPU_GPR(r6)(r4) 581 + PPC_LL r7, VCPU_GPR(r7)(r4) 582 + PPC_LL r8, VCPU_GPR(r8)(r4) 583 + PPC_LL r9, VCPU_GPR(r9)(r4) 584 + 585 + PPC_LL r3, VCPU_GPR(r3)(r4) 586 + PPC_LL r4, VCPU_GPR(r4)(r4) 587 + rfi
+5
arch/powerpc/kvm/powerpc.c
··· 114 114 goto out; 115 115 #endif 116 116 117 + #ifdef CONFIG_KVM_BOOKE_HV 118 + if (!cpu_has_feature(CPU_FTR_EMB_HV)) 119 + goto out; 120 + #endif 121 + 117 122 r = true; 118 123 119 124 out:
+6
arch/powerpc/kvm/timing.h
··· 93 93 case SIGNAL_EXITS: 94 94 vcpu->stat.signal_exits++; 95 95 break; 96 + case DBELL_EXITS: 97 + vcpu->stat.dbell_exits++; 98 + break; 99 + case GDBELL_EXITS: 100 + vcpu->stat.gdbell_exits++; 101 + break; 96 102 } 97 103 } 98 104