Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: PPC: Use ccr field in pt_regs struct embedded in vcpu struct

When the 'regs' field was added to struct kvm_vcpu_arch, the code
was changed to use several of the fields inside regs (e.g., gpr, lr,
etc.) but not the ccr field, because the ccr field in struct pt_regs
is 64 bits on 64-bit platforms, but the cr field in kvm_vcpu_arch is
only 32 bits. This changes the code to use the regs.ccr field
instead of cr, and changes the assembly code on 64-bit platforms to
use 64-bit loads and stores instead of 32-bit ones.

Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>

authored by

Paul Mackerras and committed by
Michael Ellerman
fd0944ba 9a94d3ee

+30 -32
+2 -2
arch/powerpc/include/asm/kvm_book3s.h
··· 301 301 302 302 static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) 303 303 { 304 - vcpu->arch.cr = val; 304 + vcpu->arch.regs.ccr = val; 305 305 } 306 306 307 307 static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) 308 308 { 309 - return vcpu->arch.cr; 309 + return vcpu->arch.regs.ccr; 310 310 } 311 311 312 312 static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
+2 -2
arch/powerpc/include/asm/kvm_book3s_64.h
··· 483 483 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 484 484 static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu) 485 485 { 486 - vcpu->arch.cr = vcpu->arch.cr_tm; 486 + vcpu->arch.regs.ccr = vcpu->arch.cr_tm; 487 487 vcpu->arch.regs.xer = vcpu->arch.xer_tm; 488 488 vcpu->arch.regs.link = vcpu->arch.lr_tm; 489 489 vcpu->arch.regs.ctr = vcpu->arch.ctr_tm; ··· 500 500 501 501 static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu) 502 502 { 503 - vcpu->arch.cr_tm = vcpu->arch.cr; 503 + vcpu->arch.cr_tm = vcpu->arch.regs.ccr; 504 504 vcpu->arch.xer_tm = vcpu->arch.regs.xer; 505 505 vcpu->arch.lr_tm = vcpu->arch.regs.link; 506 506 vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
+2 -2
arch/powerpc/include/asm/kvm_booke.h
··· 46 46 47 47 static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) 48 48 { 49 - vcpu->arch.cr = val; 49 + vcpu->arch.regs.ccr = val; 50 50 } 51 51 52 52 static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) 53 53 { 54 - return vcpu->arch.cr; 54 + return vcpu->arch.regs.ccr; 55 55 } 56 56 57 57 static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
-2
arch/powerpc/include/asm/kvm_host.h
··· 538 538 ulong tar; 539 539 #endif 540 540 541 - u32 cr; 542 - 543 541 #ifdef CONFIG_PPC_BOOK3S 544 542 ulong hflags; 545 543 ulong guest_owned_ext;
+2 -2
arch/powerpc/kernel/asm-offsets.c
··· 438 438 #ifdef CONFIG_PPC_BOOK3S 439 439 OFFSET(VCPU_TAR, kvm_vcpu, arch.tar); 440 440 #endif 441 - OFFSET(VCPU_CR, kvm_vcpu, arch.cr); 441 + OFFSET(VCPU_CR, kvm_vcpu, arch.regs.ccr); 442 442 OFFSET(VCPU_PC, kvm_vcpu, arch.regs.nip); 443 443 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 444 444 OFFSET(VCPU_MSR, kvm_vcpu, arch.shregs.msr); ··· 695 695 #endif /* CONFIG_PPC_BOOK3S_64 */ 696 696 697 697 #else /* CONFIG_PPC_BOOK3S */ 698 - OFFSET(VCPU_CR, kvm_vcpu, arch.cr); 698 + OFFSET(VCPU_CR, kvm_vcpu, arch.regs.ccr); 699 699 OFFSET(VCPU_XER, kvm_vcpu, arch.regs.xer); 700 700 OFFSET(VCPU_LR, kvm_vcpu, arch.regs.link); 701 701 OFFSET(VCPU_CTR, kvm_vcpu, arch.regs.ctr);
+6 -6
arch/powerpc/kvm/book3s_emulate.c
··· 110 110 vcpu->arch.ctr_tm = vcpu->arch.regs.ctr; 111 111 vcpu->arch.tar_tm = vcpu->arch.tar; 112 112 vcpu->arch.lr_tm = vcpu->arch.regs.link; 113 - vcpu->arch.cr_tm = vcpu->arch.cr; 113 + vcpu->arch.cr_tm = vcpu->arch.regs.ccr; 114 114 vcpu->arch.xer_tm = vcpu->arch.regs.xer; 115 115 vcpu->arch.vrsave_tm = vcpu->arch.vrsave; 116 116 } ··· 129 129 vcpu->arch.regs.ctr = vcpu->arch.ctr_tm; 130 130 vcpu->arch.tar = vcpu->arch.tar_tm; 131 131 vcpu->arch.regs.link = vcpu->arch.lr_tm; 132 - vcpu->arch.cr = vcpu->arch.cr_tm; 132 + vcpu->arch.regs.ccr = vcpu->arch.cr_tm; 133 133 vcpu->arch.regs.xer = vcpu->arch.xer_tm; 134 134 vcpu->arch.vrsave = vcpu->arch.vrsave_tm; 135 135 } ··· 141 141 uint64_t texasr; 142 142 143 143 /* CR0 = 0 | MSR[TS] | 0 */ 144 - vcpu->arch.cr = (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)) | 144 + vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) | 145 145 (((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1)) 146 146 << CR0_SHIFT); 147 147 ··· 220 220 tm_abort(ra_val); 221 221 222 222 /* CR0 = 0 | MSR[TS] | 0 */ 223 - vcpu->arch.cr = (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)) | 223 + vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) | 224 224 (((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1)) 225 225 << CR0_SHIFT); 226 226 ··· 494 494 495 495 if (!(kvmppc_get_msr(vcpu) & MSR_PR)) { 496 496 preempt_disable(); 497 - vcpu->arch.cr = (CR0_TBEGIN_FAILURE | 498 - (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT))); 497 + vcpu->arch.regs.ccr = (CR0_TBEGIN_FAILURE | 498 + (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT))); 499 499 500 500 vcpu->arch.texasr = (TEXASR_FS | TEXASR_EXACT | 501 501 (((u64)(TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
+2 -2
arch/powerpc/kvm/book3s_hv.c
··· 410 410 vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1); 411 411 pr_err("sprg2 = %.16llx sprg3 = %.16llx\n", 412 412 vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3); 413 - pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n", 414 - vcpu->arch.cr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr); 413 + pr_err("cr = %.8lx xer = %.16lx dsisr = %.8x\n", 414 + vcpu->arch.regs.ccr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr); 415 415 pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar); 416 416 pr_err("fault dar = %.16lx dsisr = %.8x\n", 417 417 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
+2 -2
arch/powerpc/kvm/book3s_hv_rmhandlers.S
··· 1092 1092 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 1093 1093 1094 1094 ld r5, VCPU_LR(r4) 1095 - lwz r6, VCPU_CR(r4) 1095 + ld r6, VCPU_CR(r4) 1096 1096 mtlr r5 1097 1097 mtcr r6 1098 1098 ··· 1280 1280 std r3, VCPU_GPR(R12)(r9) 1281 1281 /* CR is in the high half of r12 */ 1282 1282 srdi r4, r12, 32 1283 - stw r4, VCPU_CR(r9) 1283 + std r4, VCPU_CR(r9) 1284 1284 BEGIN_FTR_SECTION 1285 1285 ld r3, HSTATE_CFAR(r13) 1286 1286 std r3, VCPU_CFAR(r9)
+3 -3
arch/powerpc/kvm/book3s_hv_tm.c
··· 130 130 return RESUME_GUEST; 131 131 } 132 132 /* Set CR0 to indicate previous transactional state */ 133 - vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | 133 + vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | 134 134 (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28); 135 135 /* L=1 => tresume, L=0 => tsuspend */ 136 136 if (instr & (1 << 21)) { ··· 174 174 copy_from_checkpoint(vcpu); 175 175 176 176 /* Set CR0 to indicate previous transactional state */ 177 - vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | 177 + vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | 178 178 (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28); 179 179 vcpu->arch.shregs.msr &= ~MSR_TS_MASK; 180 180 return RESUME_GUEST; ··· 204 204 copy_to_checkpoint(vcpu); 205 205 206 206 /* Set CR0 to indicate previous transactional state */ 207 - vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | 207 + vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | 208 208 (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28); 209 209 vcpu->arch.shregs.msr = msr | MSR_TS_S; 210 210 return RESUME_GUEST;
+3 -2
arch/powerpc/kvm/book3s_hv_tm_builtin.c
··· 89 89 if (instr & (1 << 21)) 90 90 vcpu->arch.shregs.msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; 91 91 /* Set CR0 to 0b0010 */ 92 - vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | 0x20000000; 92 + vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | 93 + 0x20000000; 93 94 return 1; 94 95 } 95 96 ··· 106 105 vcpu->arch.shregs.msr &= ~MSR_TS_MASK; /* go to N state */ 107 106 vcpu->arch.regs.nip = vcpu->arch.tfhar; 108 107 copy_from_checkpoint(vcpu); 109 - vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | 0xa0000000; 108 + vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | 0xa0000000; 110 109 }
+2 -2
arch/powerpc/kvm/book3s_pr.c
··· 167 167 svcpu->gpr[11] = vcpu->arch.regs.gpr[11]; 168 168 svcpu->gpr[12] = vcpu->arch.regs.gpr[12]; 169 169 svcpu->gpr[13] = vcpu->arch.regs.gpr[13]; 170 - svcpu->cr = vcpu->arch.cr; 170 + svcpu->cr = vcpu->arch.regs.ccr; 171 171 svcpu->xer = vcpu->arch.regs.xer; 172 172 svcpu->ctr = vcpu->arch.regs.ctr; 173 173 svcpu->lr = vcpu->arch.regs.link; ··· 249 249 vcpu->arch.regs.gpr[11] = svcpu->gpr[11]; 250 250 vcpu->arch.regs.gpr[12] = svcpu->gpr[12]; 251 251 vcpu->arch.regs.gpr[13] = svcpu->gpr[13]; 252 - vcpu->arch.cr = svcpu->cr; 252 + vcpu->arch.regs.ccr = svcpu->cr; 253 253 vcpu->arch.regs.xer = svcpu->xer; 254 254 vcpu->arch.regs.ctr = svcpu->ctr; 255 255 vcpu->arch.regs.link = svcpu->lr;
+4 -4
arch/powerpc/kvm/bookehv_interrupts.S
··· 182 182 */ 183 183 PPC_LL r4, PACACURRENT(r13) 184 184 PPC_LL r4, (THREAD + THREAD_KVM_VCPU)(r4) 185 - stw r10, VCPU_CR(r4) 185 + PPC_STL r10, VCPU_CR(r4) 186 186 PPC_STL r11, VCPU_GPR(R4)(r4) 187 187 PPC_STL r5, VCPU_GPR(R5)(r4) 188 188 PPC_STL r6, VCPU_GPR(R6)(r4) ··· 292 292 PPC_STL r4, VCPU_GPR(R4)(r11) 293 293 PPC_LL r4, THREAD_NORMSAVE(0)(r10) 294 294 PPC_STL r5, VCPU_GPR(R5)(r11) 295 - stw r13, VCPU_CR(r11) 295 + PPC_STL r13, VCPU_CR(r11) 296 296 mfspr r5, \srr0 297 297 PPC_STL r3, VCPU_GPR(R10)(r11) 298 298 PPC_LL r3, THREAD_NORMSAVE(2)(r10) ··· 319 319 PPC_STL r4, VCPU_GPR(R4)(r11) 320 320 PPC_LL r4, GPR9(r8) 321 321 PPC_STL r5, VCPU_GPR(R5)(r11) 322 - stw r9, VCPU_CR(r11) 322 + PPC_STL r9, VCPU_CR(r11) 323 323 mfspr r5, \srr0 324 324 PPC_STL r3, VCPU_GPR(R8)(r11) 325 325 PPC_LL r3, GPR10(r8) ··· 643 643 PPC_LL r3, VCPU_LR(r4) 644 644 PPC_LL r5, VCPU_XER(r4) 645 645 PPC_LL r6, VCPU_CTR(r4) 646 - lwz r7, VCPU_CR(r4) 646 + PPC_LL r7, VCPU_CR(r4) 647 647 PPC_LL r8, VCPU_PC(r4) 648 648 PPC_LD(r9, VCPU_SHARED_MSR, r11) 649 649 PPC_LL r0, VCPU_GPR(R0)(r4)
-1
arch/powerpc/kvm/emulate_loadstore.c
··· 117 117 118 118 emulated = EMULATE_FAIL; 119 119 vcpu->arch.regs.msr = vcpu->arch.shared->msr; 120 - vcpu->arch.regs.ccr = vcpu->arch.cr; 121 120 if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) { 122 121 int type = op.type & INSTR_TYPE_MASK; 123 122 int size = GETSIZE(op.type);