Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: PPC: Make highmem code generic

Since we now have several fields in the shadow VCPU, we also change
the internal calling convention between the different entry/exit code
layers.

Let's reflect that in the IR=1 code and make sure we use "long" defines
for long field access.

Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>

authored by

Alexander Graf and committed by
Avi Kivity
b79fcdf6 8c3a4e0b

+100 -99
+100 -99
arch/powerpc/kvm/book3s_interrupts.S
··· 24 24 #include <asm/asm-offsets.h> 25 25 #include <asm/exception-64s.h> 26 26 27 - #define KVMPPC_HANDLE_EXIT .kvmppc_handle_exit 28 - #define ULONG_SIZE 8 29 - #define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE)) 27 + #if defined(CONFIG_PPC_BOOK3S_64) 30 28 31 - .macro DISABLE_INTERRUPTS 32 - mfmsr r0 33 - rldicl r0,r0,48,1 34 - rotldi r0,r0,16 35 - mtmsrd r0,1 36 - .endm 29 + #define ULONG_SIZE 8 30 + #define FUNC(name) GLUE(.,name) 37 31 32 + #define GET_SHADOW_VCPU(reg) \ 33 + addi reg, r13, PACA_KVM_SVCPU 34 + 35 + #define DISABLE_INTERRUPTS \ 36 + mfmsr r0; \ 37 + rldicl r0,r0,48,1; \ 38 + rotldi r0,r0,16; \ 39 + mtmsrd r0,1; \ 40 + 41 + #elif defined(CONFIG_PPC_BOOK3S_32) 42 + 43 + #define ULONG_SIZE 4 44 + #define FUNC(name) name 45 + 46 + #define GET_SHADOW_VCPU(reg) \ 47 + lwz reg, (THREAD + THREAD_KVM_SVCPU)(r2) 48 + 49 + #define DISABLE_INTERRUPTS \ 50 + mfmsr r0; \ 51 + rlwinm r0,r0,0,17,15; \ 52 + mtmsr r0; \ 53 + 54 + #endif /* CONFIG_PPC_BOOK3S_XX */ 55 + 56 + 57 + #define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE)) 38 58 #define VCPU_LOAD_NVGPRS(vcpu) \ 39 - ld r14, VCPU_GPR(r14)(vcpu); \ 40 - ld r15, VCPU_GPR(r15)(vcpu); \ 41 - ld r16, VCPU_GPR(r16)(vcpu); \ 42 - ld r17, VCPU_GPR(r17)(vcpu); \ 43 - ld r18, VCPU_GPR(r18)(vcpu); \ 44 - ld r19, VCPU_GPR(r19)(vcpu); \ 45 - ld r20, VCPU_GPR(r20)(vcpu); \ 46 - ld r21, VCPU_GPR(r21)(vcpu); \ 47 - ld r22, VCPU_GPR(r22)(vcpu); \ 48 - ld r23, VCPU_GPR(r23)(vcpu); \ 49 - ld r24, VCPU_GPR(r24)(vcpu); \ 50 - ld r25, VCPU_GPR(r25)(vcpu); \ 51 - ld r26, VCPU_GPR(r26)(vcpu); \ 52 - ld r27, VCPU_GPR(r27)(vcpu); \ 53 - ld r28, VCPU_GPR(r28)(vcpu); \ 54 - ld r29, VCPU_GPR(r29)(vcpu); \ 55 - ld r30, VCPU_GPR(r30)(vcpu); \ 56 - ld r31, VCPU_GPR(r31)(vcpu); \ 59 + PPC_LL r14, VCPU_GPR(r14)(vcpu); \ 60 + PPC_LL r15, VCPU_GPR(r15)(vcpu); \ 61 + PPC_LL r16, VCPU_GPR(r16)(vcpu); \ 62 + PPC_LL r17, VCPU_GPR(r17)(vcpu); \ 63 + PPC_LL r18, VCPU_GPR(r18)(vcpu); \ 64 + PPC_LL r19, VCPU_GPR(r19)(vcpu); \ 65 + PPC_LL r20, VCPU_GPR(r20)(vcpu); \ 66 + PPC_LL r21, VCPU_GPR(r21)(vcpu); \ 67 + PPC_LL r22, VCPU_GPR(r22)(vcpu); \ 68 + PPC_LL r23, VCPU_GPR(r23)(vcpu); \ 69 + PPC_LL r24, VCPU_GPR(r24)(vcpu); \ 70 + PPC_LL r25, VCPU_GPR(r25)(vcpu); \ 71 + PPC_LL r26, VCPU_GPR(r26)(vcpu); \ 72 + PPC_LL r27, VCPU_GPR(r27)(vcpu); \ 73 + PPC_LL r28, VCPU_GPR(r28)(vcpu); \ 74 + PPC_LL r29, VCPU_GPR(r29)(vcpu); \ 75 + PPC_LL r30, VCPU_GPR(r30)(vcpu); \ 76 + PPC_LL r31, VCPU_GPR(r31)(vcpu); \ 57 77 58 78 /***************************************************************************** 59 79 * * ··· 89 69 90 70 kvm_start_entry: 91 71 /* Write correct stack frame */ 92 - mflr r0 93 - std r0,16(r1) 72 + mflr r0 73 + PPC_STL r0,PPC_LR_STKOFF(r1) 94 74 95 75 /* Save host state to the stack */ 96 - stdu r1, -SWITCH_FRAME_SIZE(r1) 76 + PPC_STLU r1, -SWITCH_FRAME_SIZE(r1) 97 77 98 78 /* Save r3 (kvm_run) and r4 (vcpu) */ 99 79 SAVE_2GPRS(3, r1) ··· 102 82 SAVE_NVGPRS(r1) 103 83 104 84 /* Save LR */ 105 - std r0, _LINK(r1) 85 + PPC_STL r0, _LINK(r1) 106 86 107 87 /* Load non-volatile guest state from the vcpu */ 108 88 VCPU_LOAD_NVGPRS(r4) 109 89 90 + GET_SHADOW_VCPU(r5) 91 + 110 92 /* Save R1/R2 in the PACA */ 111 - std r1, PACA_KVM_HOST_R1(r13) 112 - std r2, PACA_KVM_HOST_R2(r13) 93 + PPC_STL r1, SVCPU_HOST_R1(r5) 94 + PPC_STL r2, SVCPU_HOST_R2(r5) 113 95 114 96 /* XXX swap in/out on load? */ 115 - ld r3, VCPU_HIGHMEM_HANDLER(r4) 116 - std r3, PACA_KVM_VMHANDLER(r13) 97 + PPC_LL r3, VCPU_HIGHMEM_HANDLER(r4) 98 + PPC_STL r3, SVCPU_VMHANDLER(r5) 117 99 118 100 kvm_start_lightweight: 119 101 120 - ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */ 121 - ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */ 122 - 123 - /* Load some guest state in the respective registers */ 124 - ld r5, VCPU_CTR(r4) /* r5 = vcpu->arch.ctr */ 125 - /* will be swapped in by rmcall */ 126 - 127 - ld r3, VCPU_LR(r4) /* r3 = vcpu->arch.lr */ 128 - mtlr r3 /* LR = r3 */ 102 + PPC_LL r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */ 129 103 130 104 DISABLE_INTERRUPTS 131 105 106 + #ifdef CONFIG_PPC_BOOK3S_64 132 107 /* Some guests may need to have dcbz set to 32 byte length. 133 108 * 134 109 * Usually we ensure that by patching the guest's instructions ··· 133 118 * because that's a lot faster. 134 119 */ 135 120 136 - ld r3, VCPU_HFLAGS(r4) 121 + PPC_LL r3, VCPU_HFLAGS(r4) 137 122 rldicl. r3, r3, 0, 63 /* CR = ((r3 & 1) == 0) */ 138 123 beq no_dcbz32_on 139 124 ··· 143 128 144 129 no_dcbz32_on: 145 130 146 - ld r6, VCPU_RMCALL(r4) 131 + #endif /* CONFIG_PPC_BOOK3S_64 */ 132 + 133 + PPC_LL r6, VCPU_RMCALL(r4) 147 134 mtctr r6 148 135 149 - ld r3, VCPU_TRAMPOLINE_ENTER(r4) 136 + PPC_LL r3, VCPU_TRAMPOLINE_ENTER(r4) 150 137 LOAD_REG_IMMEDIATE(r4, MSR_KERNEL & ~(MSR_IR | MSR_DR)) 151 138 152 - /* Jump to SLB patching handlder and into our guest */ 139 + /* Jump to segment patching handler and into our guest */ 153 140 bctr 154 141 155 142 /* ··· 166 149 /* 167 150 * Register usage at this point: 168 151 * 169 - * R0 = guest last inst 170 - * R1 = host R1 171 - * R2 = host R2 172 - * R3 = guest PC 173 - * R4 = guest MSR 174 - * R5 = guest DAR 175 - * R6 = guest DSISR 176 - * R13 = PACA 177 - * PACA.KVM.* = guest * 152 + * R1 = host R1 153 + * R2 = host R2 154 + * R12 = exit handler id 155 + * R13 = PACA 156 + * SVCPU.* = guest * 178 157 * 179 158 */ 180 159 181 160 /* R7 = vcpu */ 182 - ld r7, GPR4(r1) 161 + PPC_LL r7, GPR4(r1) 183 162 184 - /* Now save the guest state */ 163 + #ifdef CONFIG_PPC_BOOK3S_64 185 164 186 - stw r0, VCPU_LAST_INST(r7) 187 - 188 - std r3, VCPU_PC(r7) 189 - std r4, VCPU_SHADOW_SRR1(r7) 190 - std r5, VCPU_FAULT_DEAR(r7) 191 - stw r6, VCPU_FAULT_DSISR(r7) 192 - 193 - ld r5, VCPU_HFLAGS(r7) 165 + PPC_LL r5, VCPU_HFLAGS(r7) 194 166 rldicl. r5, r5, 0, 63 /* CR = ((r5 & 1) == 0) */ 195 167 beq no_dcbz32_off 196 168 ··· 190 184 191 185 no_dcbz32_off: 192 186 193 - std r14, VCPU_GPR(r14)(r7) 194 - std r15, VCPU_GPR(r15)(r7) 195 - std r16, VCPU_GPR(r16)(r7) 196 - std r17, VCPU_GPR(r17)(r7) 197 - std r18, VCPU_GPR(r18)(r7) 198 - std r19, VCPU_GPR(r19)(r7) 199 - std r20, VCPU_GPR(r20)(r7) 200 - std r21, VCPU_GPR(r21)(r7) 201 - std r22, VCPU_GPR(r22)(r7) 202 - std r23, VCPU_GPR(r23)(r7) 203 - std r24, VCPU_GPR(r24)(r7) 204 - std r25, VCPU_GPR(r25)(r7) 205 - std r26, VCPU_GPR(r26)(r7) 206 - std r27, VCPU_GPR(r27)(r7) 207 - std r28, VCPU_GPR(r28)(r7) 208 - std r29, VCPU_GPR(r29)(r7) 209 - std r30, VCPU_GPR(r30)(r7) 210 - std r31, VCPU_GPR(r31)(r7) 187 + #endif /* CONFIG_PPC_BOOK3S_64 */ 211 188 212 - /* Save guest CTR */ 213 - mfctr r5 214 - std r5, VCPU_CTR(r7) 215 - 216 - /* Save guest LR */ 217 - mflr r5 218 - std r5, VCPU_LR(r7) 189 + PPC_STL r14, VCPU_GPR(r14)(r7) 190 + PPC_STL r15, VCPU_GPR(r15)(r7) 191 + PPC_STL r16, VCPU_GPR(r16)(r7) 192 + PPC_STL r17, VCPU_GPR(r17)(r7) 193 + PPC_STL r18, VCPU_GPR(r18)(r7) 194 + PPC_STL r19, VCPU_GPR(r19)(r7) 195 + PPC_STL r20, VCPU_GPR(r20)(r7) 196 + PPC_STL r21, VCPU_GPR(r21)(r7) 197 + PPC_STL r22, VCPU_GPR(r22)(r7) 198 + PPC_STL r23, VCPU_GPR(r23)(r7) 199 + PPC_STL r24, VCPU_GPR(r24)(r7) 200 + PPC_STL r25, VCPU_GPR(r25)(r7) 201 + PPC_STL r26, VCPU_GPR(r26)(r7) 202 + PPC_STL r27, VCPU_GPR(r27)(r7) 203 + PPC_STL r28, VCPU_GPR(r28)(r7) 204 + PPC_STL r29, VCPU_GPR(r29)(r7) 205 + PPC_STL r30, VCPU_GPR(r30)(r7) 206 + PPC_STL r31, VCPU_GPR(r31)(r7) 219 207 220 208 /* Restore host msr -> SRR1 */ 221 - ld r6, VCPU_HOST_MSR(r7) 209 + PPC_LL r6, VCPU_HOST_MSR(r7) 222 210 223 211 /* 224 212 * For some interrupts, we need to call the real Linux ··· 231 231 232 232 /* Back to EE=1 */ 233 233 mtmsr r6 234 + sync 234 235 b kvm_return_point 235 236 236 237 call_linux_handler: ··· 250 249 */ 251 250 252 251 /* Restore host IP -> SRR0 */ 253 - ld r5, VCPU_HOST_RETIP(r7) 252 + PPC_LL r5, VCPU_HOST_RETIP(r7) 254 253 255 254 /* XXX Better move to a safe function? 256 255 * What if we get an HTAB flush in between mtsrr0 and mtsrr1? */ 257 256 258 257 mtlr r12 259 258 260 - ld r4, VCPU_TRAMPOLINE_LOWMEM(r7) 259 + PPC_LL r4, VCPU_TRAMPOLINE_LOWMEM(r7) 261 260 mtsrr0 r4 262 261 LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)) 263 262 mtsrr1 r3 ··· 275 274 276 275 /* Restore r3 (kvm_run) and r4 (vcpu) */ 277 276 REST_2GPRS(3, r1) 278 - bl KVMPPC_HANDLE_EXIT 277 + bl FUNC(kvmppc_handle_exit) 279 278 280 279 /* If RESUME_GUEST, get back in the loop */ 281 280 cmpwi r3, RESUME_GUEST ··· 286 285 287 286 kvm_exit_loop: 288 287 289 - ld r4, _LINK(r1) 288 + PPC_LL r4, _LINK(r1) 290 289 mtlr r4 291 290 292 291 /* Restore non-volatile host registers (r14 - r31) */ ··· 297 296 298 297 kvm_loop_heavyweight: 299 298 300 - ld r4, _LINK(r1) 301 - std r4, (16 + SWITCH_FRAME_SIZE)(r1) 299 + PPC_LL r4, _LINK(r1) 300 + PPC_STL r4, (PPC_LR_STKOFF + SWITCH_FRAME_SIZE)(r1) 302 301 303 302 /* Load vcpu and cpu_run */ 304 303 REST_2GPRS(3, r1)