Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: PPC: Book3S HV: Add infrastructure to support 2nd DAWR

KVM code assumes single DAWR everywhere. Add code to support 2nd DAWR.
DAWR is a hypervisor resource and thus H_SET_MODE hcall is used to set/
unset it. Introduce new case H_SET_MODE_RESOURCE_SET_DAWR1 for 2nd DAWR.
Also, KVM will support 2nd DAWR only if CPU_FTR_DAWR1 is set.

Signed-off-by: Ravi Bangoria <ravi.bangoria@linux.ibm.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>

authored by

Ravi Bangoria and committed by
Paul Mackerras
bd1de1a0 122954ed

+91 -1
+2
Documentation/virt/kvm/api.rst
··· 2276 2276 PPC KVM_REG_PPC_PSSCR 64 2277 2277 PPC KVM_REG_PPC_DEC_EXPIRY 64 2278 2278 PPC KVM_REG_PPC_PTCR 64 2279 + PPC KVM_REG_PPC_DAWR1 64 2280 + PPC KVM_REG_PPC_DAWRX1 64 2279 2281 PPC KVM_REG_PPC_TM_GPR0 64 2280 2282 ... 2281 2283 PPC KVM_REG_PPC_TM_GPR31 64
+7 -1
arch/powerpc/include/asm/hvcall.h
··· 569 569 u64 pidr; 570 570 u64 cfar; 571 571 u64 ppr; 572 + /* Version 1 ends here */ 573 + u64 dawr1; 574 + u64 dawrx1; 575 + /* Version 2 ends here */ 572 576 }; 573 577 574 578 /* Latest version of hv_guest_state structure */ 575 - #define HV_GUEST_STATE_VERSION 1 579 + #define HV_GUEST_STATE_VERSION 2 576 580 577 581 static inline int hv_guest_state_size(unsigned int version) 578 582 { 579 583 switch (version) { 580 584 case 1: 581 585 return offsetofend(struct hv_guest_state, ppr); 586 + case 2: 587 + return offsetofend(struct hv_guest_state, dawrx1); 582 588 default: 583 589 return -1; 584 590 }
+3
arch/powerpc/include/asm/kvm_host.h
··· 306 306 u8 svm_enabled; 307 307 bool threads_indep; 308 308 bool nested_enable; 309 + bool dawr1_enabled; 309 310 pgd_t *pgtable; 310 311 u64 process_table; 311 312 struct dentry *debugfs_dir; ··· 586 585 ulong dabr; 587 586 ulong dawr0; 588 587 ulong dawrx0; 588 + ulong dawr1; 589 + ulong dawrx1; 589 590 ulong ciabr; 590 591 ulong cfar; 591 592 ulong ppr;
+2
arch/powerpc/include/uapi/asm/kvm.h
··· 644 644 #define KVM_REG_PPC_MMCR3 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc1) 645 645 #define KVM_REG_PPC_SIER2 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc2) 646 646 #define KVM_REG_PPC_SIER3 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc3) 647 + #define KVM_REG_PPC_DAWR1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc4) 648 + #define KVM_REG_PPC_DAWRX1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc5) 647 649 648 650 /* Transactional Memory checkpointed state: 649 651 * This is all GPRs, all VSX regs and a subset of SPRs
+2
arch/powerpc/kernel/asm-offsets.c
··· 528 528 OFFSET(VCPU_DABRX, kvm_vcpu, arch.dabrx); 529 529 OFFSET(VCPU_DAWR0, kvm_vcpu, arch.dawr0); 530 530 OFFSET(VCPU_DAWRX0, kvm_vcpu, arch.dawrx0); 531 + OFFSET(VCPU_DAWR1, kvm_vcpu, arch.dawr1); 532 + OFFSET(VCPU_DAWRX1, kvm_vcpu, arch.dawrx1); 531 533 OFFSET(VCPU_CIABR, kvm_vcpu, arch.ciabr); 532 534 OFFSET(VCPU_HFLAGS, kvm_vcpu, arch.hflags); 533 535 OFFSET(VCPU_DEC, kvm_vcpu, arch.dec);
+43
arch/powerpc/kvm/book3s_hv.c
··· 785 785 vcpu->arch.dawr0 = value1; 786 786 vcpu->arch.dawrx0 = value2; 787 787 return H_SUCCESS; 788 + case H_SET_MODE_RESOURCE_SET_DAWR1: 789 + if (!kvmppc_power8_compatible(vcpu)) 790 + return H_P2; 791 + if (!ppc_breakpoint_available()) 792 + return H_P2; 793 + if (!cpu_has_feature(CPU_FTR_DAWR1)) 794 + return H_P2; 795 + if (!vcpu->kvm->arch.dawr1_enabled) 796 + return H_FUNCTION; 797 + if (mflags) 798 + return H_UNSUPPORTED_FLAG_START; 799 + if (value2 & DABRX_HYP) 800 + return H_P4; 801 + vcpu->arch.dawr1 = value1; 802 + vcpu->arch.dawrx1 = value2; 803 + return H_SUCCESS; 788 804 case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE: 789 805 /* KVM does not support mflags=2 (AIL=2) */ 790 806 if (mflags != 0 && mflags != 3) ··· 1780 1764 case KVM_REG_PPC_DAWRX: 1781 1765 *val = get_reg_val(id, vcpu->arch.dawrx0); 1782 1766 break; 1767 + case KVM_REG_PPC_DAWR1: 1768 + *val = get_reg_val(id, vcpu->arch.dawr1); 1769 + break; 1770 + case KVM_REG_PPC_DAWRX1: 1771 + *val = get_reg_val(id, vcpu->arch.dawrx1); 1772 + break; 1783 1773 case KVM_REG_PPC_CIABR: 1784 1774 *val = get_reg_val(id, vcpu->arch.ciabr); 1785 1775 break; ··· 2017 1995 break; 2018 1996 case KVM_REG_PPC_DAWRX: 2019 1997 vcpu->arch.dawrx0 = set_reg_val(id, *val) & ~DAWRX_HYP; 1998 + break; 1999 + case KVM_REG_PPC_DAWR1: 2000 + vcpu->arch.dawr1 = set_reg_val(id, *val); 2001 + break; 2002 + case KVM_REG_PPC_DAWRX1: 2003 + vcpu->arch.dawrx1 = set_reg_val(id, *val) & ~DAWRX_HYP; 2020 2004 break; 2021 2005 case KVM_REG_PPC_CIABR: 2022 2006 vcpu->arch.ciabr = set_reg_val(id, *val); ··· 3481 3453 unsigned long host_dawrx0 = mfspr(SPRN_DAWRX0); 3482 3454 unsigned long host_psscr = mfspr(SPRN_PSSCR); 3483 3455 unsigned long host_pidr = mfspr(SPRN_PID); 3456 + unsigned long host_dawr1 = 0; 3457 + unsigned long host_dawrx1 = 0; 3458 + 3459 + if (cpu_has_feature(CPU_FTR_DAWR1)) { 3460 + host_dawr1 = mfspr(SPRN_DAWR1); 3461 + host_dawrx1 = mfspr(SPRN_DAWRX1); 3462 + } 3484 3463 3485 3464 /* 3486 3465 * P8 and P9 suppress the HDEC exception when LPCR[HDICE] = 0, ··· 3526 3491 if (dawr_enabled()) { 3527 3492 mtspr(SPRN_DAWR0, vcpu->arch.dawr0); 3528 3493 mtspr(SPRN_DAWRX0, vcpu->arch.dawrx0); 3494 + if (cpu_has_feature(CPU_FTR_DAWR1)) { 3495 + mtspr(SPRN_DAWR1, vcpu->arch.dawr1); 3496 + mtspr(SPRN_DAWRX1, vcpu->arch.dawrx1); 3497 + } 3529 3498 } 3530 3499 mtspr(SPRN_CIABR, vcpu->arch.ciabr); 3531 3500 mtspr(SPRN_IC, vcpu->arch.ic); ··· 3583 3544 mtspr(SPRN_CIABR, host_ciabr); 3584 3545 mtspr(SPRN_DAWR0, host_dawr0); 3585 3546 mtspr(SPRN_DAWRX0, host_dawrx0); 3547 + if (cpu_has_feature(CPU_FTR_DAWR1)) { 3548 + mtspr(SPRN_DAWR1, host_dawr1); 3549 + mtspr(SPRN_DAWRX1, host_dawrx1); 3550 + } 3586 3551 mtspr(SPRN_PID, host_pidr); 3587 3552 3588 3553 /*
+7
arch/powerpc/kvm/book3s_hv_nested.c
··· 49 49 hr->pidr = vcpu->arch.pid; 50 50 hr->cfar = vcpu->arch.cfar; 51 51 hr->ppr = vcpu->arch.ppr; 52 + hr->dawr1 = vcpu->arch.dawr1; 53 + hr->dawrx1 = vcpu->arch.dawrx1; 52 54 } 53 55 54 56 static void byteswap_pt_regs(struct pt_regs *regs) ··· 93 91 hr->pidr = swab64(hr->pidr); 94 92 hr->cfar = swab64(hr->cfar); 95 93 hr->ppr = swab64(hr->ppr); 94 + hr->dawr1 = swab64(hr->dawr1); 95 + hr->dawrx1 = swab64(hr->dawrx1); 96 96 } 97 97 98 98 static void save_hv_return_state(struct kvm_vcpu *vcpu, int trap, ··· 142 138 143 139 /* Don't let data address watchpoint match in hypervisor state */ 144 140 hr->dawrx0 &= ~DAWRX_HYP; 141 + hr->dawrx1 &= ~DAWRX_HYP; 145 142 146 143 /* Don't let completed instruction address breakpt match in HV state */ 147 144 if ((hr->ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER) ··· 172 167 vcpu->arch.pid = hr->pidr; 173 168 vcpu->arch.cfar = hr->cfar; 174 169 vcpu->arch.ppr = hr->ppr; 170 + vcpu->arch.dawr1 = hr->dawr1; 171 + vcpu->arch.dawrx1 = hr->dawrx1; 175 172 } 176 173 177 174 void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu,
+23
arch/powerpc/kvm/book3s_hv_rmhandlers.S
··· 57 57 #define STACK_SLOT_HFSCR (SFS-72) 58 58 #define STACK_SLOT_AMR (SFS-80) 59 59 #define STACK_SLOT_UAMOR (SFS-88) 60 + #define STACK_SLOT_DAWR1 (SFS-96) 61 + #define STACK_SLOT_DAWRX1 (SFS-104) 60 62 /* the following is used by the P9 short path */ 61 63 #define STACK_SLOT_NVGPRS (SFS-152) /* 18 gprs */ 62 64 ··· 717 715 std r7, STACK_SLOT_DAWRX0(r1) 718 716 std r8, STACK_SLOT_IAMR(r1) 719 717 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 718 + BEGIN_FTR_SECTION 719 + mfspr r6, SPRN_DAWR1 720 + mfspr r7, SPRN_DAWRX1 721 + std r6, STACK_SLOT_DAWR1(r1) 722 + std r7, STACK_SLOT_DAWRX1(r1) 723 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S | CPU_FTR_DAWR1) 720 724 721 725 mfspr r5, SPRN_AMR 722 726 std r5, STACK_SLOT_AMR(r1) ··· 813 805 ld r6, VCPU_DAWRX0(r4) 814 806 mtspr SPRN_DAWR0, r5 815 807 mtspr SPRN_DAWRX0, r6 808 + BEGIN_FTR_SECTION 809 + ld r5, VCPU_DAWR1(r4) 810 + ld r6, VCPU_DAWRX1(r4) 811 + mtspr SPRN_DAWR1, r5 812 + mtspr SPRN_DAWRX1, r6 813 + END_FTR_SECTION_IFSET(CPU_FTR_DAWR1) 816 814 1: 817 815 ld r7, VCPU_CIABR(r4) 818 816 ld r8, VCPU_TAR(r4) ··· 1783 1769 mtspr SPRN_DAWR0, r6 1784 1770 mtspr SPRN_DAWRX0, r7 1785 1771 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1772 + BEGIN_FTR_SECTION 1773 + ld r6, STACK_SLOT_DAWR1(r1) 1774 + ld r7, STACK_SLOT_DAWRX1(r1) 1775 + mtspr SPRN_DAWR1, r6 1776 + mtspr SPRN_DAWRX1, r7 1777 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S | CPU_FTR_DAWR1) 1786 1778 BEGIN_FTR_SECTION 1787 1779 ld r5, STACK_SLOT_TID(r1) 1788 1780 ld r6, STACK_SLOT_PSSCR(r1) ··· 3363 3343 mtspr SPRN_IAMR, r0 3364 3344 mtspr SPRN_CIABR, r0 3365 3345 mtspr SPRN_DAWRX0, r0 3346 + BEGIN_FTR_SECTION 3347 + mtspr SPRN_DAWRX1, r0 3348 + END_FTR_SECTION_IFSET(CPU_FTR_DAWR1) 3366 3349 3367 3350 BEGIN_MMU_FTR_SECTION 3368 3351 b 4f
+2
tools/arch/powerpc/include/uapi/asm/kvm.h
··· 644 644 #define KVM_REG_PPC_MMCR3 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc1) 645 645 #define KVM_REG_PPC_SIER2 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc2) 646 646 #define KVM_REG_PPC_SIER3 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc3) 647 + #define KVM_REG_PPC_DAWR1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc4) 648 + #define KVM_REG_PPC_DAWRX1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc5) 647 649 648 650 /* Transactional Memory checkpointed state: 649 651 * This is all GPRs, all VSX regs and a subset of SPRs