Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: PPC: Split host-state fields out of kvmppc_book3s_shadow_vcpu

There are several fields in struct kvmppc_book3s_shadow_vcpu that
temporarily store bits of host state while a guest is running,
rather than anything relating to the particular guest or vcpu.
This splits them out into a new kvmppc_host_state structure and
modifies the definitions in asm-offsets.c to suit.

On 32-bit, we have a kvmppc_host_state structure inside the
kvmppc_book3s_shadow_vcpu since the assembly code needs to be able
to get to them both with one pointer. On 64-bit they are separate
fields in the PACA. This means that on 64-bit we don't need to
copy the kvmppc_host_state in and out on vcpu load/unload, and
in future will mean that the book3s_hv code doesn't need a
shadow_vcpu struct in the PACA at all. That does mean that we
have to be careful not to rely on any values persisting in the
hstate field of the paca across any point where we could block
or get preempted.

Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>

authored by

Paul Mackerras and committed by
Avi Kivity
3c42bf8a 923c53ca

+129 -122
+5 -5
arch/powerpc/include/asm/exception-64s.h
··· 96 96 EXCEPTION_PROLOG_PSERIES_1(label, h); 97 97 98 98 #define __KVMTEST(n) \ 99 - lbz r10,PACA_KVM_SVCPU+SVCPU_IN_GUEST(r13); \ 99 + lbz r10,HSTATE_IN_GUEST(r13); \ 100 100 cmpwi r10,0; \ 101 101 bne do_kvm_##n 102 102 103 103 #define __KVM_HANDLER(area, h, n) \ 104 104 do_kvm_##n: \ 105 105 ld r10,area+EX_R10(r13); \ 106 - stw r9,PACA_KVM_SVCPU+SVCPU_SCRATCH1(r13); \ 106 + stw r9,HSTATE_SCRATCH1(r13); \ 107 107 ld r9,area+EX_R9(r13); \ 108 - std r12,PACA_KVM_SVCPU+SVCPU_SCRATCH0(r13); \ 108 + std r12,HSTATE_SCRATCH0(r13); \ 109 109 li r12,n; \ 110 110 b kvmppc_interrupt 111 111 ··· 114 114 cmpwi r10,KVM_GUEST_MODE_SKIP; \ 115 115 ld r10,area+EX_R10(r13); \ 116 116 beq 89f; \ 117 - stw r9,PACA_KVM_SVCPU+SVCPU_SCRATCH1(r13); \ 117 + stw r9,HSTATE_SCRATCH1(r13); \ 118 118 ld r9,area+EX_R9(r13); \ 119 - std r12,PACA_KVM_SVCPU+SVCPU_SCRATCH0(r13); \ 119 + std r12,HSTATE_SCRATCH0(r13); \ 120 120 li r12,n; \ 121 121 b kvmppc_interrupt; \ 122 122 89: mtocrf 0x80,r9; \
+19 -8
arch/powerpc/include/asm/kvm_book3s_asm.h
··· 60 60 61 61 #else /*__ASSEMBLY__ */ 62 62 63 + /* 64 + * This struct goes in the PACA on 64-bit processors. It is used 65 + * to store host state that needs to be saved when we enter a guest 66 + * and restored when we exit, but isn't specific to any particular 67 + * guest or vcpu. It also has some scratch fields used by the guest 68 + * exit code. 69 + */ 70 + struct kvmppc_host_state { 71 + ulong host_r1; 72 + ulong host_r2; 73 + ulong vmhandler; 74 + ulong scratch0; 75 + ulong scratch1; 76 + u8 in_guest; 77 + }; 78 + 63 79 struct kvmppc_book3s_shadow_vcpu { 64 80 ulong gpr[14]; 65 81 u32 cr; ··· 89 73 ulong shadow_srr1; 90 74 ulong fault_dar; 91 75 92 - ulong host_r1; 93 - ulong host_r2; 94 - ulong handler; 95 - ulong scratch0; 96 - ulong scratch1; 97 - ulong vmhandler; 98 - u8 in_guest; 99 - 100 76 #ifdef CONFIG_PPC_BOOK3S_32 101 77 u32 sr[16]; /* Guest SRs */ 78 + 79 + struct kvmppc_host_state hstate; 102 80 #endif 81 + 103 82 #ifdef CONFIG_PPC_BOOK3S_64 104 83 u8 slb_max; /* highest used guest slb entry */ 105 84 struct {
+1
arch/powerpc/include/asm/paca.h
··· 149 149 #ifdef CONFIG_KVM_BOOK3S_HANDLER 150 150 /* We use this to store guest state in */ 151 151 struct kvmppc_book3s_shadow_vcpu shadow_vcpu; 152 + struct kvmppc_host_state kvm_hstate; 152 153 #endif 153 154 }; 154 155
+48 -48
arch/powerpc/kernel/asm-offsets.c
··· 198 198 DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time)); 199 199 DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time)); 200 200 DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save)); 201 - #ifdef CONFIG_KVM_BOOK3S_64_HANDLER 202 - DEFINE(PACA_KVM_SVCPU, offsetof(struct paca_struct, shadow_vcpu)); 203 - DEFINE(SVCPU_SLB, offsetof(struct kvmppc_book3s_shadow_vcpu, slb)); 204 - DEFINE(SVCPU_SLB_MAX, offsetof(struct kvmppc_book3s_shadow_vcpu, slb_max)); 205 - #endif 206 201 #endif /* CONFIG_PPC64 */ 207 202 208 203 /* RTAS */ ··· 411 416 DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler)); 412 417 DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall)); 413 418 DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags)); 414 - DEFINE(VCPU_SVCPU, offsetof(struct kvmppc_vcpu_book3s, shadow_vcpu) - 415 - offsetof(struct kvmppc_vcpu_book3s, vcpu)); 416 - DEFINE(SVCPU_CR, offsetof(struct kvmppc_book3s_shadow_vcpu, cr)); 417 - DEFINE(SVCPU_XER, offsetof(struct kvmppc_book3s_shadow_vcpu, xer)); 418 - DEFINE(SVCPU_CTR, offsetof(struct kvmppc_book3s_shadow_vcpu, ctr)); 419 - DEFINE(SVCPU_LR, offsetof(struct kvmppc_book3s_shadow_vcpu, lr)); 420 - DEFINE(SVCPU_PC, offsetof(struct kvmppc_book3s_shadow_vcpu, pc)); 421 - DEFINE(SVCPU_R0, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[0])); 422 - DEFINE(SVCPU_R1, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[1])); 423 - DEFINE(SVCPU_R2, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[2])); 424 - DEFINE(SVCPU_R3, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[3])); 425 - DEFINE(SVCPU_R4, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[4])); 426 - DEFINE(SVCPU_R5, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[5])); 427 - DEFINE(SVCPU_R6, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[6])); 428 - DEFINE(SVCPU_R7, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[7])); 429 - DEFINE(SVCPU_R8, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[8])); 430 - DEFINE(SVCPU_R9, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[9])); 431 - DEFINE(SVCPU_R10, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[10])); 432 - DEFINE(SVCPU_R11, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[11])); 433 - DEFINE(SVCPU_R12, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[12])); 434 - DEFINE(SVCPU_R13, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[13])); 435 - DEFINE(SVCPU_HOST_R1, offsetof(struct kvmppc_book3s_shadow_vcpu, host_r1)); 436 - DEFINE(SVCPU_HOST_R2, offsetof(struct kvmppc_book3s_shadow_vcpu, host_r2)); 437 - DEFINE(SVCPU_VMHANDLER, offsetof(struct kvmppc_book3s_shadow_vcpu, 438 - vmhandler)); 439 - DEFINE(SVCPU_SCRATCH0, offsetof(struct kvmppc_book3s_shadow_vcpu, 440 - scratch0)); 441 - DEFINE(SVCPU_SCRATCH1, offsetof(struct kvmppc_book3s_shadow_vcpu, 442 - scratch1)); 443 - DEFINE(SVCPU_IN_GUEST, offsetof(struct kvmppc_book3s_shadow_vcpu, 444 - in_guest)); 445 - DEFINE(SVCPU_FAULT_DSISR, offsetof(struct kvmppc_book3s_shadow_vcpu, 446 - fault_dsisr)); 447 - DEFINE(SVCPU_FAULT_DAR, offsetof(struct kvmppc_book3s_shadow_vcpu, 448 - fault_dar)); 449 - DEFINE(SVCPU_LAST_INST, offsetof(struct kvmppc_book3s_shadow_vcpu, 450 - last_inst)); 451 - DEFINE(SVCPU_SHADOW_SRR1, offsetof(struct kvmppc_book3s_shadow_vcpu, 452 - shadow_srr1)); 453 - #ifdef CONFIG_PPC_BOOK3S_32 454 - DEFINE(SVCPU_SR, offsetof(struct kvmppc_book3s_shadow_vcpu, sr)); 419 + 420 + #ifdef CONFIG_PPC_BOOK3S_64 421 + # define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, shadow_vcpu.f)) 422 + # define HSTATE_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, kvm_hstate.f)) 423 + #else /* 32-bit */ 424 + # define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct kvmppc_book3s_shadow_vcpu, f)) 425 + # define HSTATE_FIELD(x, f) DEFINE(x, offsetof(struct kvmppc_book3s_shadow_vcpu, hstate.f)) 455 426 #endif 456 - #else 427 + 428 + SVCPU_FIELD(SVCPU_CR, cr); 429 + SVCPU_FIELD(SVCPU_XER, xer); 430 + SVCPU_FIELD(SVCPU_CTR, ctr); 431 + SVCPU_FIELD(SVCPU_LR, lr); 432 + SVCPU_FIELD(SVCPU_PC, pc); 433 + SVCPU_FIELD(SVCPU_R0, gpr[0]); 434 + SVCPU_FIELD(SVCPU_R1, gpr[1]); 435 + SVCPU_FIELD(SVCPU_R2, gpr[2]); 436 + SVCPU_FIELD(SVCPU_R3, gpr[3]); 437 + SVCPU_FIELD(SVCPU_R4, gpr[4]); 438 + SVCPU_FIELD(SVCPU_R5, gpr[5]); 439 + SVCPU_FIELD(SVCPU_R6, gpr[6]); 440 + SVCPU_FIELD(SVCPU_R7, gpr[7]); 441 + SVCPU_FIELD(SVCPU_R8, gpr[8]); 442 + SVCPU_FIELD(SVCPU_R9, gpr[9]); 443 + SVCPU_FIELD(SVCPU_R10, gpr[10]); 444 + SVCPU_FIELD(SVCPU_R11, gpr[11]); 445 + SVCPU_FIELD(SVCPU_R12, gpr[12]); 446 + SVCPU_FIELD(SVCPU_R13, gpr[13]); 447 + SVCPU_FIELD(SVCPU_FAULT_DSISR, fault_dsisr); 448 + SVCPU_FIELD(SVCPU_FAULT_DAR, fault_dar); 449 + SVCPU_FIELD(SVCPU_LAST_INST, last_inst); 450 + SVCPU_FIELD(SVCPU_SHADOW_SRR1, shadow_srr1); 451 + #ifdef CONFIG_PPC_BOOK3S_32 452 + SVCPU_FIELD(SVCPU_SR, sr); 453 + #endif 454 + #ifdef CONFIG_PPC64 455 + SVCPU_FIELD(SVCPU_SLB, slb); 456 + SVCPU_FIELD(SVCPU_SLB_MAX, slb_max); 457 + #endif 458 + 459 + HSTATE_FIELD(HSTATE_HOST_R1, host_r1); 460 + HSTATE_FIELD(HSTATE_HOST_R2, host_r2); 461 + HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler); 462 + HSTATE_FIELD(HSTATE_SCRATCH0, scratch0); 463 + HSTATE_FIELD(HSTATE_SCRATCH1, scratch1); 464 + HSTATE_FIELD(HSTATE_IN_GUEST, in_guest); 465 + 466 + #else /* CONFIG_PPC_BOOK3S */ 457 467 DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); 458 468 DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); 459 469 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); ··· 468 468 DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear)); 469 469 DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr)); 470 470 #endif /* CONFIG_PPC_BOOK3S */ 471 - #endif 471 + #endif /* CONFIG_KVM */ 472 472 473 473 #ifdef CONFIG_KVM_GUEST 474 474 DEFINE(KVM_MAGIC_SCRATCH1, offsetof(struct kvm_vcpu_arch_shared,
+1 -1
arch/powerpc/kernel/exceptions-64s.S
··· 298 298 srdi r10,r10,60 299 299 rlwimi r10,r9,16,0x20 300 300 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER 301 - lbz r9,PACA_KVM_SVCPU+SVCPU_IN_GUEST(r13) 301 + lbz r9,HSTATE_IN_GUEST(r13) 302 302 rlwimi r10,r9,8,0x300 303 303 #endif 304 304 mfcr r9
+7 -14
arch/powerpc/kvm/book3s_interrupts.S
··· 29 29 #define ULONG_SIZE 8 30 30 #define FUNC(name) GLUE(.,name) 31 31 32 - #define GET_SHADOW_VCPU(reg) \ 33 - addi reg, r13, PACA_KVM_SVCPU 32 + #define GET_SHADOW_VCPU_R13 34 33 35 34 #define DISABLE_INTERRUPTS \ 36 35 mfmsr r0; \ ··· 42 43 #define ULONG_SIZE 4 43 44 #define FUNC(name) name 44 45 45 - #define GET_SHADOW_VCPU(reg) \ 46 - lwz reg, (THREAD + THREAD_KVM_SVCPU)(r2) 46 + #define GET_SHADOW_VCPU_R13 \ 47 + lwz r13, (THREAD + THREAD_KVM_SVCPU)(r2) 47 48 48 49 #define DISABLE_INTERRUPTS \ 49 50 mfmsr r0; \ ··· 106 107 /* Load non-volatile guest state from the vcpu */ 107 108 VCPU_LOAD_NVGPRS(r4) 108 109 109 - GET_SHADOW_VCPU(r5) 110 - 111 - /* Save R1/R2 in the PACA */ 112 - PPC_STL r1, SVCPU_HOST_R1(r5) 113 - PPC_STL r2, SVCPU_HOST_R2(r5) 114 - 115 - /* XXX swap in/out on load? */ 116 - PPC_LL r3, VCPU_HIGHMEM_HANDLER(r4) 117 - PPC_STL r3, SVCPU_VMHANDLER(r5) 118 - 119 110 kvm_start_lightweight: 111 + 112 + GET_SHADOW_VCPU_R13 113 + PPC_LL r3, VCPU_HIGHMEM_HANDLER(r4) 114 + PPC_STL r3, HSTATE_VMHANDLER(r13) 120 115 121 116 PPC_LL r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */ 122 117
+8 -10
arch/powerpc/kvm/book3s_rmhandlers.S
··· 36 36 #if defined(CONFIG_PPC_BOOK3S_64) 37 37 38 38 #define LOAD_SHADOW_VCPU(reg) GET_PACA(reg) 39 - #define SHADOW_VCPU_OFF PACA_KVM_SVCPU 40 39 #define MSR_NOIRQ MSR_KERNEL & ~(MSR_IR | MSR_DR) 41 40 #define FUNC(name) GLUE(.,name) 42 41 ··· 65 66 66 67 #elif defined(CONFIG_PPC_BOOK3S_32) 67 68 68 - #define SHADOW_VCPU_OFF 0 69 69 #define MSR_NOIRQ MSR_KERNEL 70 70 #define FUNC(name) name 71 71 ··· 94 96 b kvmppc_resume_\intno /* Get back original handler */ 95 97 96 98 1: tophys(r13, r13) 97 - stw r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13) 99 + stw r12, HSTATE_SCRATCH1(r13) 98 100 mfspr r12, SPRN_SPRG_SCRATCH1 99 - stw r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13) 100 - lbz r12, (SHADOW_VCPU_OFF + SVCPU_IN_GUEST)(r13) 101 + stw r12, HSTATE_SCRATCH0(r13) 102 + lbz r12, HSTATE_IN_GUEST(r13) 101 103 cmpwi r12, KVM_GUEST_MODE_NONE 102 104 bne ..kvmppc_handler_hasmagic_\intno 103 105 /* No KVM guest? Then jump back to the Linux handler! */ 104 - lwz r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13) 106 + lwz r12, HSTATE_SCRATCH1(r13) 105 107 b 2b 106 108 107 109 /* Now we know we're handling a KVM guest */ ··· 144 146 * 145 147 * R12 = free 146 148 * R13 = Shadow VCPU (PACA) 147 - * SVCPU.SCRATCH0 = guest R12 148 - * SVCPU.SCRATCH1 = guest CR 149 + * HSTATE.SCRATCH0 = guest R12 150 + * HSTATE.SCRATCH1 = guest CR 149 151 * SPRG_SCRATCH0 = guest R13 150 152 * 151 153 */ ··· 157 159 mtsrr0 r12 158 160 159 161 /* Clean up all state */ 160 - lwz r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13) 162 + lwz r12, HSTATE_SCRATCH1(r13) 161 163 mtcr r12 162 - PPC_LL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13) 164 + PPC_LL r12, HSTATE_SCRATCH0(r13) 163 165 GET_SCRATCH0(r13) 164 166 165 167 /* And get back into the code */
+40 -36
arch/powerpc/kvm/book3s_segment.S
··· 22 22 #if defined(CONFIG_PPC_BOOK3S_64) 23 23 24 24 #define GET_SHADOW_VCPU(reg) \ 25 - addi reg, r13, PACA_KVM_SVCPU 25 + mr reg, r13 26 26 27 27 #elif defined(CONFIG_PPC_BOOK3S_32) 28 28 ··· 71 71 /* r3 = shadow vcpu */ 72 72 GET_SHADOW_VCPU(r3) 73 73 74 + /* Save R1/R2 in the PACA (64-bit) or shadow_vcpu (32-bit) */ 75 + PPC_STL r1, HSTATE_HOST_R1(r3) 76 + PPC_STL r2, HSTATE_HOST_R2(r3) 77 + 74 78 /* Move SRR0 and SRR1 into the respective regs */ 75 79 PPC_LL r9, SVCPU_PC(r3) 76 80 mtsrr0 r9 ··· 82 78 83 79 /* Activate guest mode, so faults get handled by KVM */ 84 80 li r11, KVM_GUEST_MODE_GUEST 85 - stb r11, SVCPU_IN_GUEST(r3) 81 + stb r11, HSTATE_IN_GUEST(r3) 86 82 87 83 /* Switch to guest segment. This is subarch specific. */ 88 84 LOAD_GUEST_SEGMENTS ··· 136 132 * 137 133 * SPRG_SCRATCH0 = guest R13 138 134 * R12 = exit handler id 139 - * R13 = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64] 140 - * SVCPU.SCRATCH0 = guest R12 141 - * SVCPU.SCRATCH1 = guest CR 135 + * R13 = shadow vcpu (32-bit) or PACA (64-bit) 136 + * HSTATE.SCRATCH0 = guest R12 137 + * HSTATE.SCRATCH1 = guest CR 142 138 * 143 139 */ 144 140 145 141 /* Save registers */ 146 142 147 - PPC_STL r0, (SHADOW_VCPU_OFF + SVCPU_R0)(r13) 148 - PPC_STL r1, (SHADOW_VCPU_OFF + SVCPU_R1)(r13) 149 - PPC_STL r2, (SHADOW_VCPU_OFF + SVCPU_R2)(r13) 150 - PPC_STL r3, (SHADOW_VCPU_OFF + SVCPU_R3)(r13) 151 - PPC_STL r4, (SHADOW_VCPU_OFF + SVCPU_R4)(r13) 152 - PPC_STL r5, (SHADOW_VCPU_OFF + SVCPU_R5)(r13) 153 - PPC_STL r6, (SHADOW_VCPU_OFF + SVCPU_R6)(r13) 154 - PPC_STL r7, (SHADOW_VCPU_OFF + SVCPU_R7)(r13) 155 - PPC_STL r8, (SHADOW_VCPU_OFF + SVCPU_R8)(r13) 156 - PPC_STL r9, (SHADOW_VCPU_OFF + SVCPU_R9)(r13) 157 - PPC_STL r10, (SHADOW_VCPU_OFF + SVCPU_R10)(r13) 158 - PPC_STL r11, (SHADOW_VCPU_OFF + SVCPU_R11)(r13) 143 + PPC_STL r0, SVCPU_R0(r13) 144 + PPC_STL r1, SVCPU_R1(r13) 145 + PPC_STL r2, SVCPU_R2(r13) 146 + PPC_STL r3, SVCPU_R3(r13) 147 + PPC_STL r4, SVCPU_R4(r13) 148 + PPC_STL r5, SVCPU_R5(r13) 149 + PPC_STL r6, SVCPU_R6(r13) 150 + PPC_STL r7, SVCPU_R7(r13) 151 + PPC_STL r8, SVCPU_R8(r13) 152 + PPC_STL r9, SVCPU_R9(r13) 153 + PPC_STL r10, SVCPU_R10(r13) 154 + PPC_STL r11, SVCPU_R11(r13) 159 155 160 156 /* Restore R1/R2 so we can handle faults */ 161 - PPC_LL r1, (SHADOW_VCPU_OFF + SVCPU_HOST_R1)(r13) 162 - PPC_LL r2, (SHADOW_VCPU_OFF + SVCPU_HOST_R2)(r13) 157 + PPC_LL r1, HSTATE_HOST_R1(r13) 158 + PPC_LL r2, HSTATE_HOST_R2(r13) 163 159 164 160 /* Save guest PC and MSR */ 165 161 #ifdef CONFIG_PPC64 ··· 175 171 1: mfsrr0 r3 176 172 mfsrr1 r4 177 173 2: 178 - PPC_STL r3, (SHADOW_VCPU_OFF + SVCPU_PC)(r13) 179 - PPC_STL r4, (SHADOW_VCPU_OFF + SVCPU_SHADOW_SRR1)(r13) 174 + PPC_STL r3, SVCPU_PC(r13) 175 + PPC_STL r4, SVCPU_SHADOW_SRR1(r13) 180 176 181 177 /* Get scratch'ed off registers */ 182 178 GET_SCRATCH0(r9) 183 - PPC_LL r8, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13) 184 - lwz r7, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13) 179 + PPC_LL r8, HSTATE_SCRATCH0(r13) 180 + lwz r7, HSTATE_SCRATCH1(r13) 185 181 186 - PPC_STL r9, (SHADOW_VCPU_OFF + SVCPU_R13)(r13) 187 - PPC_STL r8, (SHADOW_VCPU_OFF + SVCPU_R12)(r13) 188 - stw r7, (SHADOW_VCPU_OFF + SVCPU_CR)(r13) 182 + PPC_STL r9, SVCPU_R13(r13) 183 + PPC_STL r8, SVCPU_R12(r13) 184 + stw r7, SVCPU_CR(r13) 189 185 190 186 /* Save more register state */ 191 187 ··· 195 191 mfctr r8 196 192 mflr r9 197 193 198 - stw r5, (SHADOW_VCPU_OFF + SVCPU_XER)(r13) 199 - PPC_STL r6, (SHADOW_VCPU_OFF + SVCPU_FAULT_DAR)(r13) 200 - stw r7, (SHADOW_VCPU_OFF + SVCPU_FAULT_DSISR)(r13) 201 - PPC_STL r8, (SHADOW_VCPU_OFF + SVCPU_CTR)(r13) 202 - PPC_STL r9, (SHADOW_VCPU_OFF + SVCPU_LR)(r13) 194 + stw r5, SVCPU_XER(r13) 195 + PPC_STL r6, SVCPU_FAULT_DAR(r13) 196 + stw r7, SVCPU_FAULT_DSISR(r13) 197 + PPC_STL r8, SVCPU_CTR(r13) 198 + PPC_STL r9, SVCPU_LR(r13) 203 199 204 200 /* 205 201 * In order for us to easily get the last instruction, ··· 229 225 /* Set guest mode to 'jump over instruction' so if lwz faults 230 226 * we'll just continue at the next IP. */ 231 227 li r9, KVM_GUEST_MODE_SKIP 232 - stb r9, (SHADOW_VCPU_OFF + SVCPU_IN_GUEST)(r13) 228 + stb r9, HSTATE_IN_GUEST(r13) 233 229 234 230 /* 1) enable paging for data */ 235 231 mfmsr r9 ··· 243 239 sync 244 240 245 241 #endif 246 - stw r0, (SHADOW_VCPU_OFF + SVCPU_LAST_INST)(r13) 242 + stw r0, SVCPU_LAST_INST(r13) 247 243 248 244 no_ld_last_inst: 249 245 250 246 /* Unset guest mode */ 251 247 li r9, KVM_GUEST_MODE_NONE 252 - stb r9, (SHADOW_VCPU_OFF + SVCPU_IN_GUEST)(r13) 248 + stb r9, HSTATE_IN_GUEST(r13) 253 249 254 250 /* Switch back to host MMU */ 255 251 LOAD_HOST_SEGMENTS ··· 259 255 * R1 = host R1 260 256 * R2 = host R2 261 257 * R12 = exit handler id 262 - * R13 = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64] 258 + * R13 = shadow vcpu (32-bit) or PACA (64-bit) 263 259 * SVCPU.* = guest * 264 260 * 265 261 */ ··· 269 265 ori r7, r7, MSR_IR|MSR_DR|MSR_RI|MSR_ME /* Enable paging */ 270 266 mtsrr1 r7 271 267 /* Load highmem handler address */ 272 - PPC_LL r8, (SHADOW_VCPU_OFF + SVCPU_VMHANDLER)(r13) 268 + PPC_LL r8, HSTATE_VMHANDLER(r13) 273 269 mtsrr0 r8 274 270 275 271 RFI