Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: PPC: book3s_pr: Simplify transitions between virtual and real mode

This simplifies the way that the book3s_pr makes the transition to
real mode when entering the guest. We now call kvmppc_entry_trampoline
(renamed from kvmppc_rmcall) in the base kernel using a normal function
call instead of doing an indirect call through a pointer in the vcpu.
If kvm is a module, the module loader takes care of generating a
trampoline as it does for other calls to functions outside the module.

kvmppc_entry_trampoline then disables interrupts and jumps to
kvmppc_handler_trampoline_enter in real mode using an rfi[d].
That then uses the link register as the address to return to
(potentially in module space) when the guest exits.

This also simplifies the way that we call the Linux interrupt handler
when we exit the guest due to an external, decrementer or performance
monitor interrupt. Instead of turning on the MMU, then deciding that
we need to call the Linux handler and turning the MMU back off again,
we now go straight to the handler at the point where we would turn the
MMU on. The handler will then return to the virtual-mode code
(potentially in the module).

Along the way, this moves the setting and clearing of the HID5 DCBZ32
bit into real-mode interrupts-off code, and also makes sure that
we clear the MSR[RI] bit before loading values into SRR0/1.

The net result is that we no longer need any code addresses to be
stored in vcpu->arch.

Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>

authored by

Paul Mackerras and committed by
Avi Kivity
02143947 177339d7

+119 -211
+1 -3
arch/powerpc/include/asm/kvm_book3s.h
··· 141 141 extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu); 142 142 extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); 143 143 144 - extern void kvmppc_handler_lowmem_trampoline(void); 145 - extern void kvmppc_handler_trampoline_enter(void); 146 - extern void kvmppc_rmcall(ulong srr0, ulong srr1); 144 + extern void kvmppc_entry_trampoline(void); 147 145 extern void kvmppc_hv_entry_trampoline(void); 148 146 extern void kvmppc_load_up_fpu(void); 149 147 extern void kvmppc_load_up_altivec(void);
+1
arch/powerpc/include/asm/kvm_book3s_asm.h
··· 75 75 ulong scratch0; 76 76 ulong scratch1; 77 77 u8 in_guest; 78 + u8 restore_hid5; 78 79 79 80 #ifdef CONFIG_KVM_BOOK3S_64_HV 80 81 struct kvm_vcpu *kvm_vcpu;
-8
arch/powerpc/include/asm/kvm_host.h
··· 258 258 ulong host_stack; 259 259 u32 host_pid; 260 260 #ifdef CONFIG_PPC_BOOK3S 261 - ulong host_msr; 262 - ulong host_r2; 263 - void *host_retip; 264 - ulong trampoline_lowmem; 265 - ulong trampoline_enter; 266 - ulong highmem_handler; 267 - ulong rmcall; 268 - ulong host_paca_phys; 269 261 struct kvmppc_slb slb[64]; 270 262 int slb_max; /* 1 + index of last valid entry in slb[] */ 271 263 int slb_nr; /* total number of entries in SLB */
+1 -6
arch/powerpc/kernel/asm-offsets.c
··· 449 449 #ifdef CONFIG_PPC_BOOK3S 450 450 DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); 451 451 DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id)); 452 - DEFINE(VCPU_HOST_RETIP, offsetof(struct kvm_vcpu, arch.host_retip)); 453 - DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr)); 454 452 DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr)); 455 453 DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr)); 456 454 DEFINE(VCPU_DSCR, offsetof(struct kvm_vcpu, arch.dscr)); ··· 456 458 DEFINE(VCPU_UAMOR, offsetof(struct kvm_vcpu, arch.uamor)); 457 459 DEFINE(VCPU_CTRL, offsetof(struct kvm_vcpu, arch.ctrl)); 458 460 DEFINE(VCPU_DABR, offsetof(struct kvm_vcpu, arch.dabr)); 459 - DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem)); 460 - DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter)); 461 - DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler)); 462 - DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall)); 463 461 DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags)); 464 462 DEFINE(VCPU_DEC, offsetof(struct kvm_vcpu, arch.dec)); 465 463 DEFINE(VCPU_DEC_EXPIRES, offsetof(struct kvm_vcpu, arch.dec_expires)); ··· 531 537 HSTATE_FIELD(HSTATE_SCRATCH0, scratch0); 532 538 HSTATE_FIELD(HSTATE_SCRATCH1, scratch1); 533 539 HSTATE_FIELD(HSTATE_IN_GUEST, in_guest); 540 + HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5); 534 541 535 542 #ifdef CONFIG_KVM_BOOK3S_64_HV 536 543 HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu);
+1 -1
arch/powerpc/kvm/book3s_32_sr.S
··· 31 31 * R1 = host R1 32 32 * R2 = host R2 33 33 * R3 = shadow vcpu 34 - * all other volatile GPRS = free 34 + * all other volatile GPRS = free except R4, R6 35 35 * SVCPU[CR] = guest CR 36 36 * SVCPU[XER] = guest XER 37 37 * SVCPU[CTR] = guest CTR
+1 -1
arch/powerpc/kvm/book3s_64_slb.S
··· 53 53 * R1 = host R1 54 54 * R2 = host R2 55 55 * R3 = shadow vcpu 56 - * all other volatile GPRS = free 56 + * all other volatile GPRS = free except R4, R6 57 57 * SVCPU[CR] = guest CR 58 58 * SVCPU[XER] = guest XER 59 59 * SVCPU[CTR] = guest CTR
+1 -3
arch/powerpc/kvm/book3s_exports.c
··· 23 23 #ifdef CONFIG_KVM_BOOK3S_64_HV 24 24 EXPORT_SYMBOL_GPL(kvmppc_hv_entry_trampoline); 25 25 #else 26 - EXPORT_SYMBOL_GPL(kvmppc_handler_trampoline_enter); 27 - EXPORT_SYMBOL_GPL(kvmppc_handler_lowmem_trampoline); 28 - EXPORT_SYMBOL_GPL(kvmppc_rmcall); 26 + EXPORT_SYMBOL_GPL(kvmppc_entry_trampoline); 29 27 EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu); 30 28 #ifdef CONFIG_ALTIVEC 31 29 EXPORT_SYMBOL_GPL(kvmppc_load_up_altivec);
+5 -124
arch/powerpc/kvm/book3s_interrupts.S
··· 29 29 #define ULONG_SIZE 8 30 30 #define FUNC(name) GLUE(.,name) 31 31 32 - #define GET_SHADOW_VCPU_R13 33 - 34 - #define DISABLE_INTERRUPTS \ 35 - mfmsr r0; \ 36 - rldicl r0,r0,48,1; \ 37 - rotldi r0,r0,16; \ 38 - mtmsrd r0,1; \ 39 - 40 32 #elif defined(CONFIG_PPC_BOOK3S_32) 41 33 42 34 #define ULONG_SIZE 4 43 35 #define FUNC(name) name 44 - 45 - #define GET_SHADOW_VCPU_R13 \ 46 - lwz r13, (THREAD + THREAD_KVM_SVCPU)(r2) 47 - 48 - #define DISABLE_INTERRUPTS \ 49 - mfmsr r0; \ 50 - rlwinm r0,r0,0,17,15; \ 51 - mtmsr r0; \ 52 36 53 37 #endif /* CONFIG_PPC_BOOK3S_XX */ 54 38 ··· 92 108 93 109 kvm_start_lightweight: 94 110 95 - GET_SHADOW_VCPU_R13 96 - PPC_LL r3, VCPU_HIGHMEM_HANDLER(r4) 97 - PPC_STL r3, HSTATE_VMHANDLER(r13) 98 - 99 - PPC_LL r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */ 100 - 101 - DISABLE_INTERRUPTS 102 - 103 111 #ifdef CONFIG_PPC_BOOK3S_64 104 - /* Some guests may need to have dcbz set to 32 byte length. 105 - * 106 - * Usually we ensure that by patching the guest's instructions 107 - * to trap on dcbz and emulate it in the hypervisor. 108 - * 109 - * If we can, we should tell the CPU to use 32 byte dcbz though, 110 - * because that's a lot faster. 111 - */ 112 - 113 112 PPC_LL r3, VCPU_HFLAGS(r4) 114 - rldicl. r3, r3, 0, 63 /* CR = ((r3 & 1) == 0) */ 115 - beq no_dcbz32_on 116 - 117 - mfspr r3,SPRN_HID5 118 - ori r3, r3, 0x80 /* XXX HID5_dcbz32 = 0x80 */ 119 - mtspr SPRN_HID5,r3 120 - 121 - no_dcbz32_on: 122 - 113 + rldicl r3, r3, 0, 63 /* r3 &= 1 */ 114 + stb r3, HSTATE_RESTORE_HID5(r13) 123 115 #endif /* CONFIG_PPC_BOOK3S_64 */ 124 116 125 - PPC_LL r6, VCPU_RMCALL(r4) 126 - mtctr r6 127 - 128 - PPC_LL r3, VCPU_TRAMPOLINE_ENTER(r4) 129 - LOAD_REG_IMMEDIATE(r4, MSR_KERNEL & ~(MSR_IR | MSR_DR)) 117 + PPC_LL r4, VCPU_SHADOW_MSR(r4) /* get shadow_msr */ 130 118 131 119 /* Jump to segment patching handler and into our guest */ 132 - bctr 120 + bl FUNC(kvmppc_entry_trampoline) 121 + nop 133 122 134 123 /* 135 124 * This is the handler in module memory. It gets jumped at from the ··· 127 170 /* R7 = vcpu */ 128 171 PPC_LL r7, GPR4(r1) 129 172 130 - #ifdef CONFIG_PPC_BOOK3S_64 131 - 132 - PPC_LL r5, VCPU_HFLAGS(r7) 133 - rldicl. r5, r5, 0, 63 /* CR = ((r5 & 1) == 0) */ 134 - beq no_dcbz32_off 135 - 136 - li r4, 0 137 - mfspr r5,SPRN_HID5 138 - rldimi r5,r4,6,56 139 - mtspr SPRN_HID5,r5 140 - 141 - no_dcbz32_off: 142 - 143 - #endif /* CONFIG_PPC_BOOK3S_64 */ 144 - 145 173 PPC_STL r14, VCPU_GPR(r14)(r7) 146 174 PPC_STL r15, VCPU_GPR(r15)(r7) 147 175 PPC_STL r16, VCPU_GPR(r16)(r7) ··· 145 203 PPC_STL r29, VCPU_GPR(r29)(r7) 146 204 PPC_STL r30, VCPU_GPR(r30)(r7) 147 205 PPC_STL r31, VCPU_GPR(r31)(r7) 148 - 149 - /* Restore host msr -> SRR1 */ 150 - PPC_LL r6, VCPU_HOST_MSR(r7) 151 - 152 - /* 153 - * For some interrupts, we need to call the real Linux 154 - * handler, so it can do work for us. This has to happen 155 - * as if the interrupt arrived from the kernel though, 156 - * so let's fake it here where most state is restored. 157 - * 158 - * Call Linux for hardware interrupts/decrementer 159 - * r3 = address of interrupt handler (exit reason) 160 - */ 161 - 162 - cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL 163 - beq call_linux_handler 164 - cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER 165 - beq call_linux_handler 166 - cmpwi r12, BOOK3S_INTERRUPT_PERFMON 167 - beq call_linux_handler 168 - 169 - /* Back to EE=1 */ 170 - mtmsr r6 171 - sync 172 - b kvm_return_point 173 - 174 - call_linux_handler: 175 - 176 - /* 177 - * If we land here we need to jump back to the handler we 178 - * came from. 179 - * 180 - * We have a page that we can access from real mode, so let's 181 - * jump back to that and use it as a trampoline to get back into the 182 - * interrupt handler! 183 - * 184 - * R3 still contains the exit code, 185 - * R5 VCPU_HOST_RETIP and 186 - * R6 VCPU_HOST_MSR 187 - */ 188 - 189 - /* Restore host IP -> SRR0 */ 190 - PPC_LL r5, VCPU_HOST_RETIP(r7) 191 - 192 - /* XXX Better move to a safe function? 193 - * What if we get an HTAB flush in between mtsrr0 and mtsrr1? */ 194 - 195 - mtlr r12 196 - 197 - PPC_LL r4, VCPU_TRAMPOLINE_LOWMEM(r7) 198 - mtsrr0 r4 199 - LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)) 200 - mtsrr1 r3 201 - 202 - RFI 203 - 204 - .global kvm_return_point 205 - kvm_return_point: 206 - 207 - /* Jump back to lightweight entry if we're supposed to */ 208 - /* go back into the guest */ 209 206 210 207 /* Pass the exit number as 3rd argument to kvmppc_handle_exit */ 211 208 mr r5, r12
-12
arch/powerpc/kvm/book3s_pr.c
··· 875 875 if (!p) 876 876 goto uninit_vcpu; 877 877 878 - vcpu->arch.host_retip = kvm_return_point; 879 - vcpu->arch.host_msr = mfmsr(); 880 878 #ifdef CONFIG_PPC_BOOK3S_64 881 879 /* default to book3s_64 (970fx) */ 882 880 vcpu->arch.pvr = 0x3C0301; ··· 884 886 #endif 885 887 kvmppc_set_pvr(vcpu, vcpu->arch.pvr); 886 888 vcpu->arch.slb_nr = 64; 887 - 888 - /* remember where some real-mode handlers are */ 889 - vcpu->arch.trampoline_lowmem = __pa(kvmppc_handler_lowmem_trampoline); 890 - vcpu->arch.trampoline_enter = __pa(kvmppc_handler_trampoline_enter); 891 - vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem; 892 - #ifdef CONFIG_PPC_BOOK3S_64 893 - vcpu->arch.rmcall = *(ulong*)kvmppc_rmcall; 894 - #else 895 - vcpu->arch.rmcall = (ulong)kvmppc_rmcall; 896 - #endif 897 889 898 890 vcpu->arch.shadow_msr = MSR_USER64; 899 891
+16 -33
arch/powerpc/kvm/book3s_rmhandlers.S
··· 36 36 37 37 #if defined(CONFIG_PPC_BOOK3S_64) 38 38 39 - #define LOAD_SHADOW_VCPU(reg) GET_PACA(reg) 40 - #define MSR_NOIRQ MSR_KERNEL & ~(MSR_IR | MSR_DR) 41 39 #define FUNC(name) GLUE(.,name) 40 + #define MTMSR_EERI(reg) mtmsrd (reg),1 42 41 43 42 .globl kvmppc_skip_interrupt 44 43 kvmppc_skip_interrupt: ··· 67 68 68 69 #elif defined(CONFIG_PPC_BOOK3S_32) 69 70 70 - #define MSR_NOIRQ MSR_KERNEL 71 71 #define FUNC(name) name 72 + #define MTMSR_EERI(reg) mtmsr (reg) 72 73 73 74 .macro INTERRUPT_TRAMPOLINE intno 74 75 ··· 169 170 #endif 170 171 171 172 /* 172 - * This trampoline brings us back to a real mode handler 173 + * Call kvmppc_handler_trampoline_enter in real mode 173 174 * 174 - * Input Registers: 175 - * 176 - * R5 = SRR0 177 - * R6 = SRR1 178 - * LR = real-mode IP 179 - * 175 + * On entry, r4 contains the guest shadow MSR 180 176 */ 181 - .global kvmppc_handler_lowmem_trampoline 182 - kvmppc_handler_lowmem_trampoline: 177 + _GLOBAL(kvmppc_entry_trampoline) 178 + mfmsr r5 179 + LOAD_REG_ADDR(r7, kvmppc_handler_trampoline_enter) 180 + toreal(r7) 183 181 184 - mtsrr0 r5 182 + li r9, MSR_RI 183 + ori r9, r9, MSR_EE 184 + andc r9, r5, r9 /* Clear EE and RI in MSR value */ 185 + li r6, MSR_IR | MSR_DR 186 + ori r6, r6, MSR_EE 187 + andc r6, r5, r6 /* Clear EE, DR and IR in MSR value */ 188 + MTMSR_EERI(r9) /* Clear EE and RI in MSR */ 189 + mtsrr0 r7 /* before we set srr0/1 */ 185 190 mtsrr1 r6 186 - blr 187 - kvmppc_handler_lowmem_trampoline_end: 188 - 189 - /* 190 - * Call a function in real mode 191 - * 192 - * Input Registers: 193 - * 194 - * R3 = function 195 - * R4 = MSR 196 - * R5 = scratch register 197 - * 198 - */ 199 - _GLOBAL(kvmppc_rmcall) 200 - LOAD_REG_IMMEDIATE(r5, MSR_NOIRQ) 201 - mtmsr r5 /* Disable relocation and interrupts, so mtsrr 202 - doesn't get interrupted */ 203 - sync 204 - mtsrr0 r3 205 - mtsrr1 r4 206 191 RFI 207 192 208 193 #if defined(CONFIG_PPC_BOOK3S_32)
+92 -20
arch/powerpc/kvm/book3s_segment.S
··· 23 23 24 24 #define GET_SHADOW_VCPU(reg) \ 25 25 mr reg, r13 26 + #define MTMSR_EERI(reg) mtmsrd (reg),1 26 27 27 28 #elif defined(CONFIG_PPC_BOOK3S_32) 28 29 ··· 31 30 tophys(reg, r2); \ 32 31 lwz reg, (THREAD + THREAD_KVM_SVCPU)(reg); \ 33 32 tophys(reg, reg) 33 + #define MTMSR_EERI(reg) mtmsr (reg) 34 34 35 35 #endif 36 36 ··· 59 57 /* Required state: 60 58 * 61 59 * MSR = ~IR|DR 62 - * R13 = PACA 63 60 * R1 = host R1 64 61 * R2 = host R2 65 - * R10 = guest MSR 62 + * R4 = guest shadow MSR 63 + * R5 = normal host MSR 64 + * R6 = current host MSR (EE, IR, DR off) 65 + * LR = highmem guest exit code 66 66 * all other volatile GPRS = free 67 67 * SVCPU[CR] = guest CR 68 68 * SVCPU[XER] = guest XER ··· 75 71 /* r3 = shadow vcpu */ 76 72 GET_SHADOW_VCPU(r3) 77 73 74 + /* Save guest exit handler address and MSR */ 75 + mflr r0 76 + PPC_STL r0, HSTATE_VMHANDLER(r3) 77 + PPC_STL r5, HSTATE_HOST_MSR(r3) 78 + 78 79 /* Save R1/R2 in the PACA (64-bit) or shadow_vcpu (32-bit) */ 79 80 PPC_STL r1, HSTATE_HOST_R1(r3) 80 81 PPC_STL r2, HSTATE_HOST_R2(r3) 81 - 82 - /* Move SRR0 and SRR1 into the respective regs */ 83 - PPC_LL r9, SVCPU_PC(r3) 84 - mtsrr0 r9 85 - mtsrr1 r10 86 82 87 83 /* Activate guest mode, so faults get handled by KVM */ 88 84 li r11, KVM_GUEST_MODE_GUEST ··· 91 87 /* Switch to guest segment. This is subarch specific. */ 92 88 LOAD_GUEST_SEGMENTS 93 89 90 + #ifdef CONFIG_PPC_BOOK3S_64 91 + /* Some guests may need to have dcbz set to 32 byte length. 92 + * 93 + * Usually we ensure that by patching the guest's instructions 94 + * to trap on dcbz and emulate it in the hypervisor. 95 + * 96 + * If we can, we should tell the CPU to use 32 byte dcbz though, 97 + * because that's a lot faster. 98 + */ 99 + lbz r0, HSTATE_RESTORE_HID5(r3) 100 + cmpwi r0, 0 101 + beq no_dcbz32_on 102 + 103 + mfspr r0,SPRN_HID5 104 + ori r0, r0, 0x80 /* XXX HID5_dcbz32 = 0x80 */ 105 + mtspr SPRN_HID5,r0 106 + no_dcbz32_on: 107 + 108 + #endif /* CONFIG_PPC_BOOK3S_64 */ 109 + 94 110 /* Enter guest */ 95 111 96 - PPC_LL r4, SVCPU_CTR(r3) 97 - PPC_LL r5, SVCPU_LR(r3) 98 - lwz r6, SVCPU_CR(r3) 99 - lwz r7, SVCPU_XER(r3) 112 + PPC_LL r8, SVCPU_CTR(r3) 113 + PPC_LL r9, SVCPU_LR(r3) 114 + lwz r10, SVCPU_CR(r3) 115 + lwz r11, SVCPU_XER(r3) 100 116 101 - mtctr r4 102 - mtlr r5 103 - mtcr r6 104 - mtxer r7 117 + mtctr r8 118 + mtlr r9 119 + mtcr r10 120 + mtxer r11 121 + 122 + /* Move SRR0 and SRR1 into the respective regs */ 123 + PPC_LL r9, SVCPU_PC(r3) 124 + /* First clear RI in our current MSR value */ 125 + li r0, MSR_RI 126 + andc r6, r6, r0 127 + MTMSR_EERI(r6) 128 + mtsrr0 r9 129 + mtsrr1 r4 105 130 106 131 PPC_LL r0, SVCPU_R0(r3) 107 132 PPC_LL r1, SVCPU_R1(r3) ··· 292 259 /* Switch back to host MMU */ 293 260 LOAD_HOST_SEGMENTS 294 261 262 + #ifdef CONFIG_PPC_BOOK3S_64 263 + 264 + lbz r5, HSTATE_RESTORE_HID5(r13) 265 + cmpwi r5, 0 266 + beq no_dcbz32_off 267 + 268 + li r4, 0 269 + mfspr r5,SPRN_HID5 270 + rldimi r5,r4,6,56 271 + mtspr SPRN_HID5,r5 272 + 273 + no_dcbz32_off: 274 + 275 + #endif /* CONFIG_PPC_BOOK3S_64 */ 276 + 277 + /* 278 + * For some interrupts, we need to call the real Linux 279 + * handler, so it can do work for us. This has to happen 280 + * as if the interrupt arrived from the kernel though, 281 + * so let's fake it here where most state is restored. 282 + * 283 + * Having set up SRR0/1 with the address where we want 284 + * to continue with relocation on (potentially in module 285 + * space), we either just go straight there with rfi[d], 286 + * or we jump to an interrupt handler with bctr if there 287 + * is an interrupt to be handled first. In the latter 288 + * case, the rfi[d] at the end of the interrupt handler 289 + * will get us back to where we want to continue. 290 + */ 291 + 292 + cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL 293 + beq 1f 294 + cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER 295 + beq 1f 296 + cmpwi r12, BOOK3S_INTERRUPT_PERFMON 297 + 1: mtctr r12 298 + 295 299 /* Register usage at this point: 296 300 * 297 301 * R1 = host R1 ··· 339 269 * 340 270 */ 341 271 342 - /* RFI into the highmem handler */ 343 - mfmsr r7 344 - ori r7, r7, MSR_IR|MSR_DR|MSR_RI|MSR_ME /* Enable paging */ 345 - mtsrr1 r7 346 - /* Load highmem handler address */ 272 + PPC_LL r6, HSTATE_HOST_MSR(r13) 347 273 PPC_LL r8, HSTATE_VMHANDLER(r13) 274 + 275 + /* Restore host msr -> SRR1 */ 276 + mtsrr1 r6 277 + /* Load highmem handler address */ 348 278 mtsrr0 r8 349 279 280 + /* RFI into the highmem handler, or jump to interrupt handler */ 281 + beqctr 350 282 RFI 351 283 kvmppc_handler_trampoline_exit_end: