Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: PPC: e500: Save/restore SPE state

This is done lazily. The SPE save will be done only if the guest has
used SPE since the last preemption or heavyweight exit. Restore will be
done only on demand, when enabling MSR_SPE in the shadow MSR, in response
to an SPE fault or mtmsr emulation.

For SPEFSCR, Linux already switches it on context switch (non-lazily), so
the only remaining bit is to save it between qemu and the guest.

Signed-off-by: Liu Yu <yu.liu@freescale.com>
Signed-off-by: Scott Wood <scottwood@freescale.com>
Signed-off-by: Alexander Graf <agraf@suse.de>

authored by

Scott Wood and committed by
Avi Kivity
4cd35f67 ecee273f

+148 -17
+6
arch/powerpc/include/asm/kvm_host.h
··· 195 195 u64 fpr[32]; 196 196 u64 fpscr; 197 197 198 + #ifdef CONFIG_SPE 199 + ulong evr[32]; 200 + ulong spefscr; 201 + ulong host_spefscr; 202 + u64 acc; 203 + #endif 198 204 #ifdef CONFIG_ALTIVEC 199 205 vector128 vr[32]; 200 206 vector128 vscr;
+1
arch/powerpc/include/asm/reg_booke.h
··· 318 318 #define ESR_ILK 0x00100000 /* Instr. Cache Locking */ 319 319 #define ESR_PUO 0x00040000 /* Unimplemented Operation exception */ 320 320 #define ESR_BO 0x00020000 /* Byte Ordering */ 321 + #define ESR_SPV 0x00000080 /* Signal Processing operation */ 321 322 322 323 /* Bit definitions related to the DBCR0. */ 323 324 #if defined(CONFIG_40x)
+7
arch/powerpc/kernel/asm-offsets.c
··· 497 497 DEFINE(TLBCAM_MAS7, offsetof(struct tlbcam, MAS7)); 498 498 #endif 499 499 500 + #if defined(CONFIG_KVM) && defined(CONFIG_SPE) 501 + DEFINE(VCPU_EVR, offsetof(struct kvm_vcpu, arch.evr[0])); 502 + DEFINE(VCPU_ACC, offsetof(struct kvm_vcpu, arch.acc)); 503 + DEFINE(VCPU_SPEFSCR, offsetof(struct kvm_vcpu, arch.spefscr)); 504 + DEFINE(VCPU_HOST_SPEFSCR, offsetof(struct kvm_vcpu, arch.host_spefscr)); 505 + #endif 506 + 500 507 #ifdef CONFIG_KVM_EXIT_TIMING 501 508 DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu, 502 509 arch.timing_exit.tv32.tbu));
+82 -2
arch/powerpc/kvm/booke.c
··· 13 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 14 * 15 15 * Copyright IBM Corp. 2007 16 + * Copyright 2010-2011 Freescale Semiconductor, Inc. 16 17 * 17 18 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 18 19 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> ··· 77 76 kvmppc_get_gpr(vcpu, i+2), 78 77 kvmppc_get_gpr(vcpu, i+3)); 79 78 } 79 + } 80 + 81 + #ifdef CONFIG_SPE 82 + void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu) 83 + { 84 + preempt_disable(); 85 + enable_kernel_spe(); 86 + kvmppc_save_guest_spe(vcpu); 87 + vcpu->arch.shadow_msr &= ~MSR_SPE; 88 + preempt_enable(); 89 + } 90 + 91 + static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu) 92 + { 93 + preempt_disable(); 94 + enable_kernel_spe(); 95 + kvmppc_load_guest_spe(vcpu); 96 + vcpu->arch.shadow_msr |= MSR_SPE; 97 + preempt_enable(); 98 + } 99 + 100 + static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu) 101 + { 102 + if (vcpu->arch.shared->msr & MSR_SPE) { 103 + if (!(vcpu->arch.shadow_msr & MSR_SPE)) 104 + kvmppc_vcpu_enable_spe(vcpu); 105 + } else if (vcpu->arch.shadow_msr & MSR_SPE) { 106 + kvmppc_vcpu_disable_spe(vcpu); 107 + } 108 + } 109 + #else 110 + static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu) 111 + { 112 + } 113 + #endif 114 + 115 + /* Helper function for "full" MSR writes. No need to call this if only EE is 116 + * changing. */ 117 + void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr) 118 + { 119 + if ((new_msr & MSR_PR) != (vcpu->arch.shared->msr & MSR_PR)) 120 + kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR); 121 + 122 + vcpu->arch.shared->msr = new_msr; 123 + 124 + if (vcpu->arch.shared->msr & MSR_WE) { 125 + kvm_vcpu_block(vcpu); 126 + kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS); 127 + }; 128 + 129 + kvmppc_vcpu_sync_spe(vcpu); 80 130 } 81 131 82 132 static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu, ··· 396 344 r = RESUME_GUEST; 397 345 break; 398 346 399 - case BOOKE_INTERRUPT_SPE_UNAVAIL: 400 - kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_UNAVAIL); 347 + #ifdef CONFIG_SPE 348 + case BOOKE_INTERRUPT_SPE_UNAVAIL: { 349 + if (vcpu->arch.shared->msr & MSR_SPE) 350 + kvmppc_vcpu_enable_spe(vcpu); 351 + else 352 + kvmppc_booke_queue_irqprio(vcpu, 353 + BOOKE_IRQPRIO_SPE_UNAVAIL); 401 354 r = RESUME_GUEST; 402 355 break; 356 + } 403 357 404 358 case BOOKE_INTERRUPT_SPE_FP_DATA: 405 359 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA); ··· 416 358 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND); 417 359 r = RESUME_GUEST; 418 360 break; 361 + #else 362 + case BOOKE_INTERRUPT_SPE_UNAVAIL: 363 + /* 364 + * Guest wants SPE, but host kernel doesn't support it. Send 365 + * an "unimplemented operation" program check to the guest. 366 + */ 367 + kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV); 368 + r = RESUME_GUEST; 369 + break; 370 + 371 + /* 372 + * These really should never happen without CONFIG_SPE, 373 + * as we should never enable the real MSR[SPE] in the guest. 374 + */ 375 + case BOOKE_INTERRUPT_SPE_FP_DATA: 376 + case BOOKE_INTERRUPT_SPE_FP_ROUND: 377 + printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n", 378 + __func__, exit_nr, vcpu->arch.pc); 379 + run->hw.hardware_exit_reason = exit_nr; 380 + r = RESUME_HOST; 381 + break; 382 + #endif 419 383 420 384 case BOOKE_INTERRUPT_DATA_STORAGE: 421 385 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
+8 -14
arch/powerpc/kvm/booke.h
··· 52 52 53 53 extern unsigned long kvmppc_booke_handlers; 54 54 55 - /* Helper function for "full" MSR writes. No need to call this if only EE is 56 - * changing. */ 57 - static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr) 58 - { 59 - if ((new_msr & MSR_PR) != (vcpu->arch.shared->msr & MSR_PR)) 60 - kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR); 61 - 62 - vcpu->arch.shared->msr = new_msr; 63 - 64 - if (vcpu->arch.shared->msr & MSR_WE) { 65 - kvm_vcpu_block(vcpu); 66 - kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS); 67 - }; 68 - } 55 + void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr); 69 56 70 57 int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, 71 58 unsigned int inst, int *advance); 72 59 int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt); 73 60 int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs); 61 + 62 + /* low-level asm code to transfer guest state */ 63 + void kvmppc_load_guest_spe(struct kvm_vcpu *vcpu); 64 + void kvmppc_save_guest_spe(struct kvm_vcpu *vcpu); 65 + 66 + /* high-level function, manages flags, host state */ 67 + void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu); 74 68 75 69 #endif /* __KVM_BOOKE_H__ */
+38
arch/powerpc/kvm/booke_interrupts.S
··· 13 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 14 * 15 15 * Copyright IBM Corp. 2007 16 + * Copyright 2011 Freescale Semiconductor, Inc. 16 17 * 17 18 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 18 19 */ ··· 240 239 heavyweight_exit: 241 240 /* Not returning to guest. */ 242 241 242 + #ifdef CONFIG_SPE 243 + /* save guest SPEFSCR and load host SPEFSCR */ 244 + mfspr r9, SPRN_SPEFSCR 245 + stw r9, VCPU_SPEFSCR(r4) 246 + lwz r9, VCPU_HOST_SPEFSCR(r4) 247 + mtspr SPRN_SPEFSCR, r9 248 + #endif 249 + 243 250 /* We already saved guest volatile register state; now save the 244 251 * non-volatiles. */ 245 252 stw r15, VCPU_GPR(r15)(r4) ··· 349 340 lwz r30, VCPU_GPR(r30)(r4) 350 341 lwz r31, VCPU_GPR(r31)(r4) 351 342 343 + #ifdef CONFIG_SPE 344 + /* save host SPEFSCR and load guest SPEFSCR */ 345 + mfspr r3, SPRN_SPEFSCR 346 + stw r3, VCPU_HOST_SPEFSCR(r4) 347 + lwz r3, VCPU_SPEFSCR(r4) 348 + mtspr SPRN_SPEFSCR, r3 349 + #endif 350 + 352 351 lightweight_exit: 353 352 stw r2, HOST_R2(r1) 354 353 ··· 442 425 lwz r3, VCPU_GPR(r3)(r4) 443 426 lwz r4, VCPU_GPR(r4)(r4) 444 427 rfi 428 + 429 + #ifdef CONFIG_SPE 430 + _GLOBAL(kvmppc_save_guest_spe) 431 + cmpi 0,r3,0 432 + beqlr- 433 + SAVE_32EVRS(0, r4, r3, VCPU_EVR) 434 + evxor evr6, evr6, evr6 435 + evmwumiaa evr6, evr6, evr6 436 + li r4,VCPU_ACC 437 + evstddx evr6, r4, r3 /* save acc */ 438 + blr 439 + 440 + _GLOBAL(kvmppc_load_guest_spe) 441 + cmpi 0,r3,0 442 + beqlr- 443 + li r4,VCPU_ACC 444 + evlddx evr6,r4,r3 445 + evmra evr6,evr6 /* load acc */ 446 + REST_32EVRS(0, r4, r3, VCPU_EVR) 447 + blr 448 + #endif
+6 -1
arch/powerpc/kvm/e500.c
··· 1 1 /* 2 - * Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved. 2 + * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved. 3 3 * 4 4 * Author: Yu Liu, <yu.liu@freescale.com> 5 5 * ··· 41 41 void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) 42 42 { 43 43 kvmppc_e500_tlb_put(vcpu); 44 + 45 + #ifdef CONFIG_SPE 46 + if (vcpu->arch.shadow_msr & MSR_SPE) 47 + kvmppc_vcpu_disable_spe(vcpu); 48 + #endif 44 49 } 45 50 46 51 int kvmppc_core_check_processor_compat(void)