Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: PPC: Book3S HV: Add new state for transactional memory

Add new state for transactional memory (TM) to kvm_vcpu_arch. Also add
asm-offset bits that are going to be required.

This also moves the existing TFHAR, TFIAR and TEXASR SPRs into a
CONFIG_PPC_TRANSACTIONAL_MEM section. This requires some code changes to
ensure we still compile with CONFIG_PPC_TRANSACTIONAL_MEM=N. Much of the added
the added #ifdefs are removed in a later patch when the bulk of the TM code is
added.

Signed-off-by: Michael Neuling <mikey@neuling.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
[agraf: fix merge conflict]
Signed-off-by: Alexander Graf <agraf@suse.de>

authored by

Michael Neuling and committed by
Alexander Graf
7b490411 7b37a123

+114 -8
+21 -3
arch/powerpc/include/asm/kvm_host.h
··· 475 475 ulong ppr; 476 476 ulong pspb; 477 477 ulong fscr; 478 - ulong tfhar; 479 - ulong tfiar; 480 - ulong texasr; 481 478 ulong ebbhr; 482 479 ulong ebbrr; 483 480 ulong bescr; ··· 523 526 u64 siar; 524 527 u64 sdar; 525 528 u64 sier; 529 + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 530 + u64 tfhar; 531 + u64 texasr; 532 + u64 tfiar; 533 + 534 + u32 cr_tm; 535 + u64 lr_tm; 536 + u64 ctr_tm; 537 + u64 amr_tm; 538 + u64 ppr_tm; 539 + u64 dscr_tm; 540 + u64 tar_tm; 541 + 542 + ulong gpr_tm[32]; 543 + 544 + struct thread_fp_state fp_tm; 545 + 546 + struct thread_vr_state vr_tm; 547 + u32 vrsave_tm; /* also USPRG0 */ 548 + 549 + #endif 526 550 527 551 #ifdef CONFIG_KVM_EXIT_TIMING 528 552 struct mutex exit_timing_lock;
+16 -3
arch/powerpc/kernel/asm-offsets.c
··· 521 521 DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr)); 522 522 DEFINE(VCPU_FSCR, offsetof(struct kvm_vcpu, arch.fscr)); 523 523 DEFINE(VCPU_PSPB, offsetof(struct kvm_vcpu, arch.pspb)); 524 - DEFINE(VCPU_TFHAR, offsetof(struct kvm_vcpu, arch.tfhar)); 525 - DEFINE(VCPU_TFIAR, offsetof(struct kvm_vcpu, arch.tfiar)); 526 - DEFINE(VCPU_TEXASR, offsetof(struct kvm_vcpu, arch.texasr)); 527 524 DEFINE(VCPU_EBBHR, offsetof(struct kvm_vcpu, arch.ebbhr)); 528 525 DEFINE(VCPU_EBBRR, offsetof(struct kvm_vcpu, arch.ebbrr)); 529 526 DEFINE(VCPU_BESCR, offsetof(struct kvm_vcpu, arch.bescr)); ··· 542 545 DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige)); 543 546 DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv)); 544 547 DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb)); 548 + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 549 + DEFINE(VCPU_TFHAR, offsetof(struct kvm_vcpu, arch.tfhar)); 550 + DEFINE(VCPU_TFIAR, offsetof(struct kvm_vcpu, arch.tfiar)); 551 + DEFINE(VCPU_TEXASR, offsetof(struct kvm_vcpu, arch.texasr)); 552 + DEFINE(VCPU_GPR_TM, offsetof(struct kvm_vcpu, arch.gpr_tm)); 553 + DEFINE(VCPU_FPRS_TM, offsetof(struct kvm_vcpu, arch.fp_tm.fpr)); 554 + DEFINE(VCPU_VRS_TM, offsetof(struct kvm_vcpu, arch.vr_tm.vr)); 555 + DEFINE(VCPU_VRSAVE_TM, offsetof(struct kvm_vcpu, arch.vrsave_tm)); 556 + DEFINE(VCPU_CR_TM, offsetof(struct kvm_vcpu, arch.cr_tm)); 557 + DEFINE(VCPU_LR_TM, offsetof(struct kvm_vcpu, arch.lr_tm)); 558 + DEFINE(VCPU_CTR_TM, offsetof(struct kvm_vcpu, arch.ctr_tm)); 559 + DEFINE(VCPU_AMR_TM, offsetof(struct kvm_vcpu, arch.amr_tm)); 560 + DEFINE(VCPU_PPR_TM, offsetof(struct kvm_vcpu, arch.ppr_tm)); 561 + DEFINE(VCPU_DSCR_TM, offsetof(struct kvm_vcpu, arch.dscr_tm)); 562 + DEFINE(VCPU_TAR_TM, offsetof(struct kvm_vcpu, arch.tar_tm)); 563 + #endif 545 564 546 565 #ifdef CONFIG_PPC_BOOK3S_64 547 566 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+4
arch/powerpc/kvm/book3s_hv.c
··· 875 875 case KVM_REG_PPC_IAMR: 876 876 *val = get_reg_val(id, vcpu->arch.iamr); 877 877 break; 878 + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 878 879 case KVM_REG_PPC_TFHAR: 879 880 *val = get_reg_val(id, vcpu->arch.tfhar); 880 881 break; ··· 885 884 case KVM_REG_PPC_TEXASR: 886 885 *val = get_reg_val(id, vcpu->arch.texasr); 887 886 break; 887 + #endif 888 888 case KVM_REG_PPC_FSCR: 889 889 *val = get_reg_val(id, vcpu->arch.fscr); 890 890 break; ··· 1035 1033 case KVM_REG_PPC_IAMR: 1036 1034 vcpu->arch.iamr = set_reg_val(id, *val); 1037 1035 break; 1036 + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1038 1037 case KVM_REG_PPC_TFHAR: 1039 1038 vcpu->arch.tfhar = set_reg_val(id, *val); 1040 1039 break; ··· 1045 1042 case KVM_REG_PPC_TEXASR: 1046 1043 vcpu->arch.texasr = set_reg_val(id, *val); 1047 1044 break; 1045 + #endif 1048 1046 case KVM_REG_PPC_FSCR: 1049 1047 vcpu->arch.fscr = set_reg_val(id, *val); 1050 1048 break;
+73 -2
arch/powerpc/kvm/book3s_hv_rmhandlers.S
··· 701 701 ld r6, VCPU_VTB(r4) 702 702 mtspr SPRN_IC, r5 703 703 mtspr SPRN_VTB, r6 704 + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 704 705 ld r5, VCPU_TFHAR(r4) 705 706 ld r6, VCPU_TFIAR(r4) 706 707 ld r7, VCPU_TEXASR(r4) 707 - ld r8, VCPU_EBBHR(r4) 708 708 mtspr SPRN_TFHAR, r5 709 709 mtspr SPRN_TFIAR, r6 710 710 mtspr SPRN_TEXASR, r7 711 + #endif 712 + ld r8, VCPU_EBBHR(r4) 711 713 mtspr SPRN_EBBHR, r8 712 714 ld r5, VCPU_EBBRR(r4) 713 715 ld r6, VCPU_BESCR(r4) ··· 1120 1118 std r5, VCPU_IC(r9) 1121 1119 std r6, VCPU_VTB(r9) 1122 1120 std r7, VCPU_TAR(r9) 1121 + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1123 1122 mfspr r5, SPRN_TFHAR 1124 1123 mfspr r6, SPRN_TFIAR 1125 1124 mfspr r7, SPRN_TEXASR 1126 - mfspr r8, SPRN_EBBHR 1127 1125 std r5, VCPU_TFHAR(r9) 1128 1126 std r6, VCPU_TFIAR(r9) 1129 1127 std r7, VCPU_TEXASR(r9) 1128 + #endif 1129 + mfspr r8, SPRN_EBBHR 1130 1130 std r8, VCPU_EBBHR(r9) 1131 1131 mfspr r5, SPRN_EBBRR 1132 1132 mfspr r6, SPRN_BESCR ··· 1500 1496 slbmte r6,r5 1501 1497 1: addi r8,r8,16 1502 1498 .endr 1499 + 1500 + /* Save DEC */ 1501 + mfspr r5,SPRN_DEC 1502 + mftb r6 1503 + extsw r5,r5 1504 + add r5,r5,r6 1505 + std r5,VCPU_DEC_EXPIRES(r9) 1506 + 1507 + BEGIN_FTR_SECTION 1508 + b 8f 1509 + END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 1510 + /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */ 1511 + mfmsr r8 1512 + li r0, 1 1513 + rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG 1514 + mtmsrd r8 1515 + 1516 + /* Save POWER8-specific registers */ 1517 + mfspr r5, SPRN_IAMR 1518 + mfspr r6, SPRN_PSPB 1519 + mfspr r7, SPRN_FSCR 1520 + std r5, VCPU_IAMR(r9) 1521 + stw r6, VCPU_PSPB(r9) 1522 + std r7, VCPU_FSCR(r9) 1523 + mfspr r5, SPRN_IC 1524 + mfspr r6, SPRN_VTB 1525 + mfspr r7, SPRN_TAR 1526 + std r5, VCPU_IC(r9) 1527 + std r6, VCPU_VTB(r9) 1528 + std r7, VCPU_TAR(r9) 1529 + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1530 + mfspr r5, SPRN_TFHAR 1531 + mfspr r6, SPRN_TFIAR 1532 + mfspr r7, SPRN_TEXASR 1533 + std r5, VCPU_TFHAR(r9) 1534 + std r6, VCPU_TFIAR(r9) 1535 + std r7, VCPU_TEXASR(r9) 1536 + #endif 1537 + mfspr r8, SPRN_EBBHR 1538 + std r8, VCPU_EBBHR(r9) 1539 + mfspr r5, SPRN_EBBRR 1540 + mfspr r6, SPRN_BESCR 1541 + mfspr r7, SPRN_CSIGR 1542 + mfspr r8, SPRN_TACR 1543 + std r5, VCPU_EBBRR(r9) 1544 + std r6, VCPU_BESCR(r9) 1545 + std r7, VCPU_CSIGR(r9) 1546 + std r8, VCPU_TACR(r9) 1547 + mfspr r5, SPRN_TCSCR 1548 + mfspr r6, SPRN_ACOP 1549 + mfspr r7, SPRN_PID 1550 + mfspr r8, SPRN_WORT 1551 + std r5, VCPU_TCSCR(r9) 1552 + std r6, VCPU_ACOP(r9) 1553 + stw r7, VCPU_GUEST_PID(r9) 1554 + std r8, VCPU_WORT(r9) 1555 + 8: 1556 + 1557 + /* Save and reset AMR and UAMOR before turning on the MMU */ 1558 + BEGIN_FTR_SECTION 1559 + mfspr r5,SPRN_AMR 1560 + mfspr r6,SPRN_UAMOR 1561 + std r5,VCPU_AMR(r9) 1562 + std r6,VCPU_UAMOR(r9) 1563 + li r6,0 1564 + mtspr SPRN_AMR,r6 1565 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 1503 1566 1504 1567 /* Unset guest mode */ 1505 1568 li r0, KVM_GUEST_MODE_NONE