Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: PPC: Book3S HV: Add transactional memory support

This adds saving of the transactional memory (TM) checkpointed state
on guest entry and exit. We only do this if we see that the guest has
an active transaction.

It also adds emulation of the TM state changes when delivering IRQs
into the guest. According to the architecture, if we are
transactional when an IRQ occurs, the TM state is changed to
suspended, otherwise it's left unchanged.

Signed-off-by: Michael Neuling <mikey@neuling.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Scott Wood <scottwood@freescale.com>

authored by

Michael Neuling and committed by
Paul Mackerras
e4e38121 7505258c

+149 -30
+1
arch/powerpc/include/asm/reg.h
··· 213 213 #define SPRN_ACOP 0x1F /* Available Coprocessor Register */ 214 214 #define SPRN_TFIAR 0x81 /* Transaction Failure Inst Addr */ 215 215 #define SPRN_TEXASR 0x82 /* Transaction EXception & Summary */ 216 + #define TEXASR_FS __MASK(63-36) /* Transaction Failure Summary */ 216 217 #define SPRN_TEXASRU 0x83 /* '' '' '' Upper 32 */ 217 218 #define SPRN_TFHAR 0x80 /* Transaction Failure Handler Addr */ 218 219 #define SPRN_CTRLF 0x088
+4
arch/powerpc/include/asm/tm.h
··· 7 7 8 8 #include <uapi/asm/tm.h> 9 9 10 + #ifndef __ASSEMBLY__ 11 + 10 12 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 11 13 extern void do_load_up_transact_fpu(struct thread_struct *thread); 12 14 extern void do_load_up_transact_altivec(struct thread_struct *thread); ··· 22 20 extern void tm_abort(uint8_t cause); 23 21 extern void tm_save_sprs(struct thread_struct *thread); 24 22 extern void tm_restore_sprs(struct thread_struct *thread); 23 + 24 + #endif /* __ASSEMBLY__ */
+8 -1
arch/powerpc/kvm/book3s_64_mmu_hv.c
··· 262 262 263 263 static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu) 264 264 { 265 - kvmppc_set_msr(vcpu, vcpu->arch.intr_msr); 265 + unsigned long msr = vcpu->arch.intr_msr; 266 + 267 + /* If transactional, change to suspend mode on IRQ delivery */ 268 + if (MSR_TM_TRANSACTIONAL(vcpu->arch.shregs.msr)) 269 + msr |= MSR_TS_S; 270 + else 271 + msr |= vcpu->arch.shregs.msr & MSR_TS_MASK; 272 + kvmppc_set_msr(vcpu, msr); 266 273 } 267 274 268 275 /*
+136 -29
arch/powerpc/kvm/book3s_hv_rmhandlers.S
··· 28 28 #include <asm/exception-64s.h> 29 29 #include <asm/kvm_book3s_asm.h> 30 30 #include <asm/mmu-hash64.h> 31 + #include <asm/tm.h> 32 + 33 + #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM) 31 34 32 35 #ifdef __LITTLE_ENDIAN__ 33 36 #error Need to fix lppaca and SLB shadow accesses in little endian mode ··· 600 597 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_206, CPU_FTR_ARCH_206, 89) 601 598 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 602 599 600 + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 601 + BEGIN_FTR_SECTION 602 + b skip_tm 603 + END_FTR_SECTION_IFCLR(CPU_FTR_TM) 604 + 605 + /* Turn on TM/FP/VSX/VMX so we can restore them. */ 606 + mfmsr r5 607 + li r6, MSR_TM >> 32 608 + sldi r6, r6, 32 609 + or r5, r5, r6 610 + ori r5, r5, MSR_FP 611 + oris r5, r5, (MSR_VEC | MSR_VSX)@h 612 + mtmsrd r5 613 + 614 + /* 615 + * The user may change these outside of a transaction, so they must 616 + * always be context switched. 617 + */ 618 + ld r5, VCPU_TFHAR(r4) 619 + ld r6, VCPU_TFIAR(r4) 620 + ld r7, VCPU_TEXASR(r4) 621 + mtspr SPRN_TFHAR, r5 622 + mtspr SPRN_TFIAR, r6 623 + mtspr SPRN_TEXASR, r7 624 + 625 + ld r5, VCPU_MSR(r4) 626 + rldicl. r5, r5, 64 - MSR_TS_S_LG, 62 627 + beq skip_tm /* TM not active in guest */ 628 + 629 + /* Make sure the failure summary is set, otherwise we'll program check 630 + * when we trechkpt. It's possible that this might have been not set 631 + * on a kvmppc_set_one_reg() call but we shouldn't let this crash the 632 + * host. 633 + */ 634 + oris r7, r7, (TEXASR_FS)@h 635 + mtspr SPRN_TEXASR, r7 636 + 637 + /* 638 + * We need to load up the checkpointed state for the guest. 639 + * We need to do this early as it will blow away any GPRs, VSRs and 640 + * some SPRs. 641 + */ 642 + 643 + mr r31, r4 644 + addi r3, r31, VCPU_FPRS_TM 645 + bl .load_fp_state 646 + addi r3, r31, VCPU_VRS_TM 647 + bl .load_vr_state 648 + mr r4, r31 649 + lwz r7, VCPU_VRSAVE_TM(r4) 650 + mtspr SPRN_VRSAVE, r7 651 + 652 + ld r5, VCPU_LR_TM(r4) 653 + lwz r6, VCPU_CR_TM(r4) 654 + ld r7, VCPU_CTR_TM(r4) 655 + ld r8, VCPU_AMR_TM(r4) 656 + ld r9, VCPU_TAR_TM(r4) 657 + mtlr r5 658 + mtcr r6 659 + mtctr r7 660 + mtspr SPRN_AMR, r8 661 + mtspr SPRN_TAR, r9 662 + 663 + /* 664 + * Load up PPR and DSCR values but don't put them in the actual SPRs 665 + * till the last moment to avoid running with userspace PPR and DSCR for 666 + * too long. 667 + */ 668 + ld r29, VCPU_DSCR_TM(r4) 669 + ld r30, VCPU_PPR_TM(r4) 670 + 671 + std r2, PACATMSCRATCH(r13) /* Save TOC */ 672 + 673 + /* Clear the MSR RI since r1, r13 are all going to be foobar. */ 674 + li r5, 0 675 + mtmsrd r5, 1 676 + 677 + /* Load GPRs r0-r28 */ 678 + reg = 0 679 + .rept 29 680 + ld reg, VCPU_GPRS_TM(reg)(r31) 681 + reg = reg + 1 682 + .endr 683 + 684 + mtspr SPRN_DSCR, r29 685 + mtspr SPRN_PPR, r30 686 + 687 + /* Load final GPRs */ 688 + ld 29, VCPU_GPRS_TM(29)(r31) 689 + ld 30, VCPU_GPRS_TM(30)(r31) 690 + ld 31, VCPU_GPRS_TM(31)(r31) 691 + 692 + /* TM checkpointed state is now setup. All GPRs are now volatile. */ 693 + TRECHKPT 694 + 695 + /* Now let's get back the state we need. */ 696 + HMT_MEDIUM 697 + GET_PACA(r13) 698 + ld r29, HSTATE_DSCR(r13) 699 + mtspr SPRN_DSCR, r29 700 + ld r4, HSTATE_KVM_VCPU(r13) 701 + ld r1, HSTATE_HOST_R1(r13) 702 + ld r2, PACATMSCRATCH(r13) 703 + 704 + /* Set the MSR RI since we have our registers back. */ 705 + li r5, MSR_RI 706 + mtmsrd r5, 1 707 + skip_tm: 708 + #endif 709 + 603 710 /* Load guest PMU registers */ 604 711 /* R4 is live here (vcpu pointer) */ 605 712 li r3, 1 ··· 817 704 ld r6, VCPU_VTB(r4) 818 705 mtspr SPRN_IC, r5 819 706 mtspr SPRN_VTB, r6 820 - #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 821 - ld r5, VCPU_TFHAR(r4) 822 - ld r6, VCPU_TFIAR(r4) 823 - ld r7, VCPU_TEXASR(r4) 824 - mtspr SPRN_TFHAR, r5 825 - mtspr SPRN_TFIAR, r6 826 - mtspr SPRN_TEXASR, r7 827 - #endif 828 707 ld r8, VCPU_EBBHR(r4) 829 708 mtspr SPRN_EBBHR, r8 830 709 ld r5, VCPU_EBBRR(r4) ··· 922 817 12: mtspr SPRN_SRR0, r10 923 818 mr r10,r0 924 819 mtspr SPRN_SRR1, r11 925 - ld r11, VCPU_INTR_MSR(r4) 820 + mr r9, r4 821 + bl kvmppc_msr_interrupt 926 822 5: 927 823 928 824 /* ··· 1209 1103 BEGIN_FTR_SECTION 1210 1104 b 8f 1211 1105 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 1212 - /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */ 1213 - mfmsr r8 1214 - li r0, 1 1215 - rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG 1216 - mtmsrd r8 1217 - 1218 1106 /* Save POWER8-specific registers */ 1219 1107 mfspr r5, SPRN_IAMR 1220 1108 mfspr r6, SPRN_PSPB ··· 1222 1122 std r5, VCPU_IC(r9) 1223 1123 std r6, VCPU_VTB(r9) 1224 1124 std r7, VCPU_TAR(r9) 1225 - #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1226 - mfspr r5, SPRN_TFHAR 1227 - mfspr r6, SPRN_TFIAR 1228 - mfspr r7, SPRN_TEXASR 1229 - std r5, VCPU_TFHAR(r9) 1230 - std r6, VCPU_TFIAR(r9) 1231 - std r7, VCPU_TEXASR(r9) 1232 - #endif 1233 1125 mfspr r8, SPRN_EBBHR 1234 1126 std r8, VCPU_EBBHR(r9) 1235 1127 mfspr r5, SPRN_EBBRR ··· 1649 1557 mtspr SPRN_SRR0, r10 1650 1558 mtspr SPRN_SRR1, r11 1651 1559 li r10, BOOK3S_INTERRUPT_DATA_STORAGE 1652 - ld r11, VCPU_INTR_MSR(r9) 1560 + bl kvmppc_msr_interrupt 1653 1561 fast_interrupt_c_return: 1654 1562 6: ld r7, VCPU_CTR(r9) 1655 1563 lwz r8, VCPU_XER(r9) ··· 1718 1626 1: mtspr SPRN_SRR0, r10 1719 1627 mtspr SPRN_SRR1, r11 1720 1628 li r10, BOOK3S_INTERRUPT_INST_STORAGE 1721 - ld r11, VCPU_INTR_MSR(r9) 1629 + bl kvmppc_msr_interrupt 1722 1630 b fast_interrupt_c_return 1723 1631 1724 1632 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */ ··· 1761 1669 mtspr SPRN_SRR0,r10 1762 1670 mtspr SPRN_SRR1,r11 1763 1671 li r10, BOOK3S_INTERRUPT_SYSCALL 1764 - ld r11, VCPU_INTR_MSR(r9) 1672 + bl kvmppc_msr_interrupt 1765 1673 mr r4,r9 1766 1674 b fast_guest_return 1767 1675 ··· 2089 1997 beq mc_cont 2090 1998 /* If not, deliver a machine check. SRR0/1 are already set */ 2091 1999 li r10, BOOK3S_INTERRUPT_MACHINE_CHECK 2092 - ld r11, VCPU_INTR_MSR(r9) 2000 + bl kvmppc_msr_interrupt 2093 2001 b fast_interrupt_c_return 2094 2002 2095 2003 /* ··· 2230 2138 mfspr r6,SPRN_VRSAVE 2231 2139 stw r6,VCPU_VRSAVE(r31) 2232 2140 mtlr r30 2233 - mtmsrd r5 2234 - isync 2235 2141 blr 2236 2142 2237 2143 /* ··· 2276 2186 */ 2277 2187 kvmppc_bad_host_intr: 2278 2188 b . 2189 + 2190 + /* 2191 + * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken 2192 + * from VCPU_INTR_MSR and is modified based on the required TM state changes. 2193 + * r11 has the guest MSR value (in/out) 2194 + * r9 has a vcpu pointer (in) 2195 + * r0 is used as a scratch register 2196 + */ 2197 + kvmppc_msr_interrupt: 2198 + rldicl r0, r11, 64 - MSR_TS_S_LG, 62 2199 + cmpwi r0, 2 /* Check if we are in transactional state.. */ 2200 + ld r11, VCPU_INTR_MSR(r9) 2201 + bne 1f 2202 + /* ... if transactional, change to suspended */ 2203 + li r0, 1 2204 + 1: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG 2205 + blr