Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: PPC: Work around POWER7 DABR corruption problem

It turns out that on POWER7, writing to the DABR can cause a corrupted
value to be written if the PMU is active and updating SDAR in continuous
sampling mode. To work around this, we make sure that the PMU is inactive
and SDAR updates are disabled (via MMCRA) when we are context-switching
DABR.

When the guest sets DABR via the H_SET_DABR hypercall, we use a slightly
different workaround, which is to read back the DABR and write it again
if it got corrupted.

While we are at it, make it consistent that the saving and restoring
of the guest's non-volatile GPRs and the FPRs are done with the guest
setup of the PMU active.

Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>

authored by

Paul Mackerras and committed by
Avi Kivity
8943633c 7657f408

+61 -41
+7 -2
arch/powerpc/kvm/book3s_hv_interrupts.S
··· 68 68 rotldi r10,r10,16 69 69 mtmsrd r10,1 70 70 71 - /* Save host PMU registers and load guest PMU registers */ 71 + /* Save host PMU registers */ 72 72 /* R4 is live here (vcpu pointer) but not r3 or r5 */ 73 73 li r3, 1 74 74 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 75 75 mfspr r7, SPRN_MMCR0 /* save MMCR0 */ 76 76 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable interrupts */ 77 + mfspr r6, SPRN_MMCRA 78 + BEGIN_FTR_SECTION 79 + /* On P7, clear MMCRA in order to disable SDAR updates */ 80 + li r5, 0 81 + mtspr SPRN_MMCRA, r5 82 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 77 83 isync 78 84 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */ 79 85 lbz r5, LPPACA_PMCINUSE(r3) 80 86 cmpwi r5, 0 81 87 beq 31f /* skip if not */ 82 88 mfspr r5, SPRN_MMCR1 83 - mfspr r6, SPRN_MMCRA 84 89 std r7, HSTATE_MMCR(r13) 85 90 std r5, HSTATE_MMCR + 8(r13) 86 91 std r6, HSTATE_MMCR + 16(r13)
+54 -39
arch/powerpc/kvm/book3s_hv_rmhandlers.S
··· 159 159 mflr r0 160 160 std r0, HSTATE_VMHANDLER(r13) 161 161 162 - ld r14, VCPU_GPR(r14)(r4) 163 - ld r15, VCPU_GPR(r15)(r4) 164 - ld r16, VCPU_GPR(r16)(r4) 165 - ld r17, VCPU_GPR(r17)(r4) 166 - ld r18, VCPU_GPR(r18)(r4) 167 - ld r19, VCPU_GPR(r19)(r4) 168 - ld r20, VCPU_GPR(r20)(r4) 169 - ld r21, VCPU_GPR(r21)(r4) 170 - ld r22, VCPU_GPR(r22)(r4) 171 - ld r23, VCPU_GPR(r23)(r4) 172 - ld r24, VCPU_GPR(r24)(r4) 173 - ld r25, VCPU_GPR(r25)(r4) 174 - ld r26, VCPU_GPR(r26)(r4) 175 - ld r27, VCPU_GPR(r27)(r4) 176 - ld r28, VCPU_GPR(r28)(r4) 177 - ld r29, VCPU_GPR(r29)(r4) 178 - ld r30, VCPU_GPR(r30)(r4) 179 - ld r31, VCPU_GPR(r31)(r4) 162 + /* Set partition DABR */ 163 + /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */ 164 + li r5,3 165 + ld r6,VCPU_DABR(r4) 166 + mtspr SPRN_DABRX,r5 167 + mtspr SPRN_DABR,r6 168 + BEGIN_FTR_SECTION 169 + isync 170 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 180 171 181 172 /* Load guest PMU registers */ 182 173 /* R4 is live here (vcpu pointer) */ ··· 205 214 206 215 /* Load up FP, VMX and VSX registers */ 207 216 bl kvmppc_load_fp 217 + 218 + ld r14, VCPU_GPR(r14)(r4) 219 + ld r15, VCPU_GPR(r15)(r4) 220 + ld r16, VCPU_GPR(r16)(r4) 221 + ld r17, VCPU_GPR(r17)(r4) 222 + ld r18, VCPU_GPR(r18)(r4) 223 + ld r19, VCPU_GPR(r19)(r4) 224 + ld r20, VCPU_GPR(r20)(r4) 225 + ld r21, VCPU_GPR(r21)(r4) 226 + ld r22, VCPU_GPR(r22)(r4) 227 + ld r23, VCPU_GPR(r23)(r4) 228 + ld r24, VCPU_GPR(r24)(r4) 229 + ld r25, VCPU_GPR(r25)(r4) 230 + ld r26, VCPU_GPR(r26)(r4) 231 + ld r27, VCPU_GPR(r27)(r4) 232 + ld r28, VCPU_GPR(r28)(r4) 233 + ld r29, VCPU_GPR(r29)(r4) 234 + ld r30, VCPU_GPR(r30)(r4) 235 + ld r31, VCPU_GPR(r31)(r4) 208 236 209 237 BEGIN_FTR_SECTION 210 238 /* Switch DSCR to guest value */ ··· 265 255 lwz r6, VCPU_DSISR(r4) 266 256 mtspr SPRN_DAR, r5 267 257 mtspr SPRN_DSISR, r6 268 - 269 - /* Set partition DABR */ 270 - li r5,3 271 - ld r6,VCPU_DABR(r4) 272 - mtspr SPRN_DABRX,r5 273 - mtspr SPRN_DABR,r6 274 258 275 259 BEGIN_FTR_SECTION 276 260 /* Restore AMR and UAMOR, set AMOR to all 1s */ ··· 959 955 mtspr SPRN_AMR,r6 960 956 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 961 957 962 - /* Restore host DABR and DABRX */ 963 - ld r5,HSTATE_DABR(r13) 964 - li r6,7 965 - mtspr SPRN_DABR,r5 966 - mtspr SPRN_DABRX,r6 967 - 968 958 /* Switch DSCR back to host value */ 969 959 BEGIN_FTR_SECTION 970 960 mfspr r8, SPRN_DSCR ··· 997 999 std r5, VCPU_SPRG2(r9) 998 1000 std r6, VCPU_SPRG3(r9) 999 1001 1002 + /* save FP state */ 1003 + mr r3, r9 1004 + bl .kvmppc_save_fp 1005 + 1000 1006 /* Increment yield count if they have a VPA */ 1001 1007 ld r8, VCPU_VPA(r9) /* do they have a VPA? */ 1002 1008 cmpdi r8, 0 ··· 1015 1013 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 1016 1014 mfspr r4, SPRN_MMCR0 /* save MMCR0 */ 1017 1015 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ 1016 + mfspr r6, SPRN_MMCRA 1017 + BEGIN_FTR_SECTION 1018 + /* On P7, clear MMCRA in order to disable SDAR updates */ 1019 + li r7, 0 1020 + mtspr SPRN_MMCRA, r7 1021 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 1018 1022 isync 1019 1023 beq 21f /* if no VPA, save PMU stuff anyway */ 1020 1024 lbz r7, LPPACA_PMCINUSE(r8) ··· 1029 1021 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ 1030 1022 b 22f 1031 1023 21: mfspr r5, SPRN_MMCR1 1032 - mfspr r6, SPRN_MMCRA 1033 1024 std r4, VCPU_MMCR(r9) 1034 1025 std r5, VCPU_MMCR + 8(r9) 1035 1026 std r6, VCPU_MMCR + 16(r9) ··· 1053 1046 stw r11, VCPU_PMC + 28(r9) 1054 1047 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 1055 1048 22: 1056 - /* save FP state */ 1057 - mr r3, r9 1058 - bl .kvmppc_save_fp 1059 1049 1060 1050 /* Secondary threads go off to take a nap on POWER7 */ 1061 1051 BEGIN_FTR_SECTION 1062 - lwz r0,VCPU_PTID(r3) 1052 + lwz r0,VCPU_PTID(r9) 1063 1053 cmpwi r0,0 1064 1054 bne secondary_nap 1065 1055 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 1056 + 1057 + /* Restore host DABR and DABRX */ 1058 + ld r5,HSTATE_DABR(r13) 1059 + li r6,7 1060 + mtspr SPRN_DABR,r5 1061 + mtspr SPRN_DABRX,r6 1066 1062 1067 1063 /* 1068 1064 * Reload DEC. HDEC interrupts were disabled when ··· 1403 1393 1404 1394 _GLOBAL(kvmppc_h_set_dabr) 1405 1395 std r4,VCPU_DABR(r3) 1406 - mtspr SPRN_DABR,r4 1396 + /* Work around P7 bug where DABR can get corrupted on mtspr */ 1397 + 1: mtspr SPRN_DABR,r4 1398 + mfspr r5, SPRN_DABR 1399 + cmpd r4, r5 1400 + bne 1b 1401 + isync 1407 1402 li r3,0 1408 1403 blr 1409 1404 ··· 1630 1615 * r3 = vcpu pointer 1631 1616 */ 1632 1617 _GLOBAL(kvmppc_save_fp) 1633 - mfmsr r9 1634 - ori r8,r9,MSR_FP 1618 + mfmsr r5 1619 + ori r8,r5,MSR_FP 1635 1620 #ifdef CONFIG_ALTIVEC 1636 1621 BEGIN_FTR_SECTION 1637 1622 oris r8,r8,MSR_VEC@h ··· 1680 1665 #endif 1681 1666 mfspr r6,SPRN_VRSAVE 1682 1667 stw r6,VCPU_VRSAVE(r3) 1683 - mtmsrd r9 1668 + mtmsrd r5 1684 1669 isync 1685 1670 blr 1686 1671