Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: PPC: book3s_hv: Add support for PPC970-family processors

This adds support for running KVM guests in supervisor mode on those
PPC970 processors that have a usable hypervisor mode. Unfortunately,
Apple G5 machines have supervisor mode disabled (MSR[HV] is forced to
1), but the YDL PowerStation does have a usable hypervisor mode.

There are several differences between the PPC970 and POWER7 in how
guests are managed. These differences are accommodated using the
CPU_FTR_ARCH_201 (PPC970) and CPU_FTR_ARCH_206 (POWER7) CPU feature
bits. Notably, on PPC970:

* The LPCR, LPID or RMOR registers don't exist, and the functions of
those registers are provided by bits in HID4 and one bit in HID0.

* External interrupts can be directed to the hypervisor, but unlike
POWER7 they are masked by MSR[EE] in non-hypervisor modes and use
SRR0/1 not HSRR0/1.

* There is no virtual RMA (VRMA) mode; the guest must use an RMO
(real mode offset) area.

* The TLB entries are not tagged with the LPID, so it is necessary to
flush the whole TLB on partition switch. Furthermore, when switching
partitions we have to ensure that no other CPU is executing the tlbie
or tlbsync instructions in either the old or the new partition,
otherwise undefined behaviour can occur.

* The PMU has 8 counters (PMC registers) rather than 6.

* The DSCR, PURR, SPURR, AMR, AMOR, UAMOR registers don't exist.

* The SLB has 64 entries rather than 32.

* There is no mediated external interrupt facility, so if we switch to
a guest that has a virtual external interrupt pending but the guest
has MSR[EE] = 0, we have to arrange to have an interrupt pending for
it so that we can get control back once it re-enables interrupts. We
do that by sending ourselves an IPI with smp_send_reschedule after
hard-disabling interrupts.

Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>

authored by

Paul Mackerras and committed by
Avi Kivity
9e368f29 969391c5

+354 -42
+4
arch/powerpc/include/asm/exception-64s.h
··· 246 246 KVMTEST(vec); \ 247 247 _SOFTEN_TEST(EXC_HV) 248 248 249 + #define SOFTEN_TEST_HV_201(vec) \ 250 + KVMTEST(vec); \ 251 + _SOFTEN_TEST(EXC_STD) 252 + 249 253 #define __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) \ 250 254 HMT_MEDIUM; \ 251 255 SET_SCRATCH0(r13); /* save r13 */ \
+1 -1
arch/powerpc/include/asm/kvm_book3s_asm.h
··· 82 82 unsigned long xics_phys; 83 83 u64 dabr; 84 84 u64 host_mmcr[3]; 85 - u32 host_pmc[6]; 85 + u32 host_pmc[8]; 86 86 u64 host_purr; 87 87 u64 host_spurr; 88 88 u64 host_dscr;
+1 -1
arch/powerpc/include/asm/kvm_host.h
··· 353 353 u32 dbsr; 354 354 355 355 u64 mmcr[3]; 356 - u32 pmc[6]; 356 + u32 pmc[8]; 357 357 358 358 #ifdef CONFIG_KVM_EXIT_TIMING 359 359 struct mutex exit_timing_lock;
+1
arch/powerpc/kernel/asm-offsets.c
··· 128 128 DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page)); 129 129 /* paca */ 130 130 DEFINE(PACA_SIZE, sizeof(struct paca_struct)); 131 + DEFINE(PACA_LOCK_TOKEN, offsetof(struct paca_struct, lock_token)); 131 132 DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index)); 132 133 DEFINE(PACAPROCSTART, offsetof(struct paca_struct, cpu_start)); 133 134 DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack));
+1 -1
arch/powerpc/kernel/exceptions-64s.S
··· 171 171 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502) 172 172 FTR_SECTION_ELSE 173 173 _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt, 174 - EXC_STD, SOFTEN_TEST_PR) 174 + EXC_STD, SOFTEN_TEST_HV_201) 175 175 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500) 176 176 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 177 177
+5 -8
arch/powerpc/kvm/Kconfig
··· 67 67 If unsure, say N. 68 68 69 69 config KVM_BOOK3S_64_HV 70 - bool "KVM support for POWER7 using hypervisor mode in host" 70 + bool "KVM support for POWER7 and PPC970 using hypervisor mode in host" 71 71 depends on KVM_BOOK3S_64 72 72 ---help--- 73 73 Support running unmodified book3s_64 guest kernels in 74 - virtual machines on POWER7 processors that have hypervisor 75 - mode available to the host. 74 + virtual machines on POWER7 and PPC970 processors that have 75 + hypervisor mode available to the host. 76 76 77 77 If you say Y here, KVM will use the hardware virtualization 78 78 facilities of POWER7 (and later) processors, meaning that 79 79 guest operating systems will run at full hardware speed 80 80 using supervisor and user modes. However, this also means 81 81 that KVM is not usable under PowerVM (pHyp), is only usable 82 - on POWER7 (or later) processors, and can only emulate 83 - POWER5+, POWER6 and POWER7 processors. 84 - 85 - This module provides access to the hardware capabilities through 86 - a character device node named /dev/kvm. 82 + on POWER7 (or later) processors and PPC970-family processors, 83 + and cannot emulate a different processor from the host processor. 87 84 88 85 If unsure, say N. 89 86
+22 -8
arch/powerpc/kvm/book3s_64_mmu_hv.c
··· 42 42 #define VRMA_PAGE_ORDER 24 43 43 #define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */ 44 44 45 + /* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */ 46 + #define MAX_LPID_970 63 45 47 #define NR_LPIDS (LPID_RSVD + 1) 46 48 unsigned long lpid_inuse[BITS_TO_LONGS(NR_LPIDS)]; 47 49 ··· 71 69 72 70 kvm->arch.sdr1 = __pa(hpt) | (HPT_ORDER - 18); 73 71 kvm->arch.lpid = lpid; 74 - kvm->arch.host_sdr1 = mfspr(SPRN_SDR1); 75 - kvm->arch.host_lpid = mfspr(SPRN_LPID); 76 - kvm->arch.host_lpcr = mfspr(SPRN_LPCR); 77 72 78 73 pr_info("KVM guest htab at %lx, LPID %lx\n", hpt, lpid); 79 74 return 0; ··· 127 128 128 129 int kvmppc_mmu_hv_init(void) 129 130 { 130 - if (!cpu_has_feature(CPU_FTR_HVMODE) || 131 - !cpu_has_feature(CPU_FTR_ARCH_206)) 131 + unsigned long host_lpid, rsvd_lpid; 132 + 133 + if (!cpu_has_feature(CPU_FTR_HVMODE)) 132 134 return -EINVAL; 135 + 133 136 memset(lpid_inuse, 0, sizeof(lpid_inuse)); 134 - set_bit(mfspr(SPRN_LPID), lpid_inuse); 135 - set_bit(LPID_RSVD, lpid_inuse); 137 + 138 + if (cpu_has_feature(CPU_FTR_ARCH_206)) { 139 + host_lpid = mfspr(SPRN_LPID); /* POWER7 */ 140 + rsvd_lpid = LPID_RSVD; 141 + } else { 142 + host_lpid = 0; /* PPC970 */ 143 + rsvd_lpid = MAX_LPID_970; 144 + } 145 + 146 + set_bit(host_lpid, lpid_inuse); 147 + /* rsvd_lpid is reserved for use in partition switching */ 148 + set_bit(rsvd_lpid, lpid_inuse); 136 149 137 150 return 0; 138 151 } ··· 168 157 { 169 158 struct kvmppc_mmu *mmu = &vcpu->arch.mmu; 170 159 171 - vcpu->arch.slb_nr = 32; /* Assume POWER7 for now */ 160 + if (cpu_has_feature(CPU_FTR_ARCH_206)) 161 + vcpu->arch.slb_nr = 32; /* POWER7 */ 162 + else 163 + vcpu->arch.slb_nr = 64; 172 164 173 165 mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate; 174 166 mmu->reset_msr = kvmppc_mmu_book3s_64_hv_reset_msr;
+49 -11
arch/powerpc/kvm/book3s_hv.c
··· 443 443 444 444 int kvmppc_core_check_processor_compat(void) 445 445 { 446 - if (cpu_has_feature(CPU_FTR_HVMODE) && 447 - cpu_has_feature(CPU_FTR_ARCH_206)) 446 + if (cpu_has_feature(CPU_FTR_HVMODE)) 448 447 return 0; 449 448 return -EIO; 450 449 } ··· 730 731 return -EINTR; 731 732 } 732 733 734 + /* On PPC970, check that we have an RMA region */ 735 + if (!vcpu->kvm->arch.rma && cpu_has_feature(CPU_FTR_ARCH_201)) 736 + return -EPERM; 737 + 733 738 kvm_run->exit_reason = 0; 734 739 vcpu->arch.ret = RESUME_GUEST; 735 740 vcpu->arch.trap = 0; ··· 923 920 } 924 921 925 922 /* Work out RMLS (real mode limit selector) field value for a given RMA size. 926 - Assumes POWER7. */ 923 + Assumes POWER7 or PPC970. */ 927 924 static inline int lpcr_rmls(unsigned long rma_size) 928 925 { 929 926 switch (rma_size) { 930 927 case 32ul << 20: /* 32 MB */ 931 - return 8; 928 + if (cpu_has_feature(CPU_FTR_ARCH_206)) 929 + return 8; /* only supported on POWER7 */ 930 + return -1; 932 931 case 64ul << 20: /* 64 MB */ 933 932 return 3; 934 933 case 128ul << 20: /* 128 MB */ ··· 1064 1059 mem->userspace_addr == vma->vm_start) 1065 1060 ri = vma->vm_file->private_data; 1066 1061 up_read(&current->mm->mmap_sem); 1062 + if (!ri && cpu_has_feature(CPU_FTR_ARCH_201)) { 1063 + pr_err("CPU requires an RMO\n"); 1064 + return -EINVAL; 1065 + } 1067 1066 } 1068 1067 1069 1068 if (ri) { ··· 1086 1077 atomic_inc(&ri->use_count); 1087 1078 kvm->arch.rma = ri; 1088 1079 kvm->arch.n_rma_pages = rma_size >> porder; 1089 - lpcr = kvm->arch.lpcr & ~(LPCR_VPM0 | LPCR_VRMA_L); 1090 - lpcr |= rmls << LPCR_RMLS_SH; 1080 + 1081 + /* Update LPCR and RMOR */ 1082 + lpcr = kvm->arch.lpcr; 1083 + if (cpu_has_feature(CPU_FTR_ARCH_201)) { 1084 + /* PPC970; insert RMLS value (split field) in HID4 */ 1085 + lpcr &= ~((1ul << HID4_RMLS0_SH) | 1086 + (3ul << HID4_RMLS2_SH)); 1087 + lpcr |= ((rmls >> 2) << HID4_RMLS0_SH) | 1088 + ((rmls & 3) << HID4_RMLS2_SH); 1089 + /* RMOR is also in HID4 */ 1090 + lpcr |= ((ri->base_pfn >> (26 - PAGE_SHIFT)) & 0xffff) 1091 + << HID4_RMOR_SH; 1092 + } else { 1093 + /* POWER7 */ 1094 + lpcr &= ~(LPCR_VPM0 | LPCR_VRMA_L); 1095 + lpcr |= rmls << LPCR_RMLS_SH; 1096 + kvm->arch.rmor = kvm->arch.rma->base_pfn << PAGE_SHIFT; 1097 + } 1091 1098 kvm->arch.lpcr = lpcr; 1092 - kvm->arch.rmor = kvm->arch.rma->base_pfn << PAGE_SHIFT; 1093 1099 pr_info("Using RMO at %lx size %lx (LPCR = %lx)\n", 1094 1100 ri->base_pfn << PAGE_SHIFT, rma_size, lpcr); 1095 1101 } ··· 1175 1151 kvm->arch.rma = NULL; 1176 1152 kvm->arch.n_rma_pages = 0; 1177 1153 1178 - lpcr = kvm->arch.host_lpcr & (LPCR_PECE | LPCR_LPES); 1179 - lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE | 1180 - LPCR_VPM0 | LPCR_VRMA_L; 1181 - kvm->arch.lpcr = lpcr; 1154 + kvm->arch.host_sdr1 = mfspr(SPRN_SDR1); 1182 1155 1156 + if (cpu_has_feature(CPU_FTR_ARCH_201)) { 1157 + /* PPC970; HID4 is effectively the LPCR */ 1158 + unsigned long lpid = kvm->arch.lpid; 1159 + kvm->arch.host_lpid = 0; 1160 + kvm->arch.host_lpcr = lpcr = mfspr(SPRN_HID4); 1161 + lpcr &= ~((3 << HID4_LPID1_SH) | (0xful << HID4_LPID5_SH)); 1162 + lpcr |= ((lpid >> 4) << HID4_LPID1_SH) | 1163 + ((lpid & 0xf) << HID4_LPID5_SH); 1164 + } else { 1165 + /* POWER7; init LPCR for virtual RMA mode */ 1166 + kvm->arch.host_lpid = mfspr(SPRN_LPID); 1167 + kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR); 1168 + lpcr &= LPCR_PECE | LPCR_LPES; 1169 + lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE | 1170 + LPCR_VPM0 | LPCR_VRMA_L; 1171 + } 1172 + kvm->arch.lpcr = lpcr; 1183 1173 1184 1174 return 0; 1185 1175
+7 -4
arch/powerpc/kvm/book3s_hv_builtin.c
··· 55 55 static DEFINE_SPINLOCK(rma_lock); 56 56 57 57 /* Work out RMLS (real mode limit selector) field value for a given RMA size. 58 - Assumes POWER7. */ 58 + Assumes POWER7 or PPC970. */ 59 59 static inline int lpcr_rmls(unsigned long rma_size) 60 60 { 61 61 switch (rma_size) { 62 62 case 32ul << 20: /* 32 MB */ 63 - return 8; 63 + if (cpu_has_feature(CPU_FTR_ARCH_206)) 64 + return 8; /* only supported on POWER7 */ 65 + return -1; 64 66 case 64ul << 20: /* 64 MB */ 65 67 return 3; 66 68 case 128ul << 20: /* 128 MB */ ··· 92 90 void *rma; 93 91 struct page *pg; 94 92 95 - /* Only do this in HV mode */ 96 - if (!cpu_has_feature(CPU_FTR_HVMODE)) 93 + /* Only do this on PPC970 in HV mode */ 94 + if (!cpu_has_feature(CPU_FTR_HVMODE) || 95 + !cpu_has_feature(CPU_FTR_ARCH_201)) 97 96 return; 98 97 99 98 if (!kvm_rma_size || !kvm_rma_count)
+30
arch/powerpc/kvm/book3s_hv_interrupts.S
··· 50 50 SAVE_NVGPRS(r1) 51 51 52 52 /* Save host DSCR */ 53 + BEGIN_FTR_SECTION 53 54 mfspr r3, SPRN_DSCR 54 55 std r3, HSTATE_DSCR(r13) 56 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 55 57 56 58 /* Save host DABR */ 57 59 mfspr r3, SPRN_DABR ··· 88 86 mfspr r7, SPRN_PMC4 89 87 mfspr r8, SPRN_PMC5 90 88 mfspr r9, SPRN_PMC6 89 + BEGIN_FTR_SECTION 90 + mfspr r10, SPRN_PMC7 91 + mfspr r11, SPRN_PMC8 92 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 91 93 stw r3, HSTATE_PMC(r13) 92 94 stw r5, HSTATE_PMC + 4(r13) 93 95 stw r6, HSTATE_PMC + 8(r13) 94 96 stw r7, HSTATE_PMC + 12(r13) 95 97 stw r8, HSTATE_PMC + 16(r13) 96 98 stw r9, HSTATE_PMC + 20(r13) 99 + BEGIN_FTR_SECTION 100 + stw r10, HSTATE_PMC + 24(r13) 101 + stw r11, HSTATE_PMC + 28(r13) 102 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 97 103 31: 98 104 99 105 /* ··· 114 104 extsw r8,r8 115 105 add r8,r8,r7 116 106 std r8,HSTATE_DECEXP(r13) 107 + 108 + /* 109 + * On PPC970, if the guest vcpu has an external interrupt pending, 110 + * send ourselves an IPI so as to interrupt the guest once it 111 + * enables interrupts. (It must have interrupts disabled, 112 + * otherwise we would already have delivered the interrupt.) 113 + */ 114 + BEGIN_FTR_SECTION 115 + ld r0, VCPU_PENDING_EXC(r4) 116 + li r7, (1 << BOOK3S_IRQPRIO_EXTERNAL) 117 + oris r7, r7, (1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h 118 + and. r0, r0, r7 119 + beq 32f 120 + mr r31, r4 121 + lhz r3, PACAPACAINDEX(r13) 122 + bl smp_send_reschedule 123 + nop 124 + mr r4, r31 125 + 32: 126 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 117 127 118 128 /* Jump to partition switch code */ 119 129 bl .kvmppc_hv_entry_trampoline
+4 -2
arch/powerpc/kvm/book3s_hv_rm_mmu.c
··· 56 56 /* only handle 4k, 64k and 16M pages for now */ 57 57 porder = 12; 58 58 if (pteh & HPTE_V_LARGE) { 59 - if ((ptel & 0xf000) == 0x1000) { 59 + if (cpu_has_feature(CPU_FTR_ARCH_206) && 60 + (ptel & 0xf000) == 0x1000) { 60 61 /* 64k page */ 61 62 porder = 16; 62 63 } else if ((ptel & 0xff000) == 0) { ··· 127 126 va_low &= 0x7ff; 128 127 if (v & HPTE_V_LARGE) { 129 128 rb |= 1; /* L field */ 130 - if (r & 0xff000) { 129 + if (cpu_has_feature(CPU_FTR_ARCH_206) && 130 + (r & 0xff000)) { 131 131 /* non-16MB large page, must be 64k */ 132 132 /* (masks depend on page size) */ 133 133 rb |= 0x1000; /* page encoding in LP field */
+225 -5
arch/powerpc/kvm/book3s_hv_rmhandlers.S
··· 148 148 lwz r7, VCPU_PMC + 12(r4) 149 149 lwz r8, VCPU_PMC + 16(r4) 150 150 lwz r9, VCPU_PMC + 20(r4) 151 + BEGIN_FTR_SECTION 152 + lwz r10, VCPU_PMC + 24(r4) 153 + lwz r11, VCPU_PMC + 28(r4) 154 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 151 155 mtspr SPRN_PMC1, r3 152 156 mtspr SPRN_PMC2, r5 153 157 mtspr SPRN_PMC3, r6 154 158 mtspr SPRN_PMC4, r7 155 159 mtspr SPRN_PMC5, r8 156 160 mtspr SPRN_PMC6, r9 161 + BEGIN_FTR_SECTION 162 + mtspr SPRN_PMC7, r10 163 + mtspr SPRN_PMC8, r11 164 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 157 165 ld r3, VCPU_MMCR(r4) 158 166 ld r5, VCPU_MMCR + 8(r4) 159 167 ld r6, VCPU_MMCR + 16(r4) ··· 173 165 /* Load up FP, VMX and VSX registers */ 174 166 bl kvmppc_load_fp 175 167 168 + BEGIN_FTR_SECTION 176 169 /* Switch DSCR to guest value */ 177 170 ld r5, VCPU_DSCR(r4) 178 171 mtspr SPRN_DSCR, r5 172 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 179 173 180 174 /* 181 175 * Set the decrementer to the guest decrementer. ··· 220 210 mtspr SPRN_DABRX,r5 221 211 mtspr SPRN_DABR,r6 222 212 213 + BEGIN_FTR_SECTION 223 214 /* Restore AMR and UAMOR, set AMOR to all 1s */ 224 215 ld r5,VCPU_AMR(r4) 225 216 ld r6,VCPU_UAMOR(r4) ··· 228 217 mtspr SPRN_AMR,r5 229 218 mtspr SPRN_UAMOR,r6 230 219 mtspr SPRN_AMOR,r7 220 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 231 221 232 222 /* Clear out SLB */ 233 223 li r6,0 ··· 236 224 slbia 237 225 ptesync 238 226 227 + BEGIN_FTR_SECTION 228 + b 30f 229 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 230 + /* 231 + * POWER7 host -> guest partition switch code. 232 + * We don't have to lock against concurrent tlbies, 233 + * but we do have to coordinate across hardware threads. 234 + */ 239 235 /* Increment entry count iff exit count is zero. */ 240 236 ld r5,HSTATE_KVM_VCORE(r13) 241 237 addi r9,r5,VCORE_ENTRY_EXIT ··· 335 315 ld r8,VCPU_SPURR(r4) 336 316 mtspr SPRN_PURR,r7 337 317 mtspr SPRN_SPURR,r8 318 + b 31f 319 + 320 + /* 321 + * PPC970 host -> guest partition switch code. 322 + * We have to lock against concurrent tlbies, 323 + * using native_tlbie_lock to lock against host tlbies 324 + * and kvm->arch.tlbie_lock to lock against guest tlbies. 325 + * We also have to invalidate the TLB since its 326 + * entries aren't tagged with the LPID. 327 + */ 328 + 30: ld r9,VCPU_KVM(r4) /* pointer to struct kvm */ 329 + 330 + /* first take native_tlbie_lock */ 331 + .section ".toc","aw" 332 + toc_tlbie_lock: 333 + .tc native_tlbie_lock[TC],native_tlbie_lock 334 + .previous 335 + ld r3,toc_tlbie_lock@toc(2) 336 + lwz r8,PACA_LOCK_TOKEN(r13) 337 + 24: lwarx r0,0,r3 338 + cmpwi r0,0 339 + bne 24b 340 + stwcx. r8,0,r3 341 + bne 24b 342 + isync 343 + 344 + ld r7,KVM_LPCR(r9) /* use kvm->arch.lpcr to store HID4 */ 345 + li r0,0x18f 346 + rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */ 347 + or r0,r7,r0 348 + ptesync 349 + sync 350 + mtspr SPRN_HID4,r0 /* switch to reserved LPID */ 351 + isync 352 + li r0,0 353 + stw r0,0(r3) /* drop native_tlbie_lock */ 354 + 355 + /* invalidate the whole TLB */ 356 + li r0,256 357 + mtctr r0 358 + li r6,0 359 + 25: tlbiel r6 360 + addi r6,r6,0x1000 361 + bdnz 25b 362 + ptesync 363 + 364 + /* Take the guest's tlbie_lock */ 365 + addi r3,r9,KVM_TLBIE_LOCK 366 + 24: lwarx r0,0,r3 367 + cmpwi r0,0 368 + bne 24b 369 + stwcx. r8,0,r3 370 + bne 24b 371 + isync 372 + ld r6,KVM_SDR1(r9) 373 + mtspr SPRN_SDR1,r6 /* switch to partition page table */ 374 + 375 + /* Set up HID4 with the guest's LPID etc. */ 376 + sync 377 + mtspr SPRN_HID4,r7 378 + isync 379 + 380 + /* drop the guest's tlbie_lock */ 381 + li r0,0 382 + stw r0,0(r3) 383 + 384 + /* Check if HDEC expires soon */ 385 + mfspr r3,SPRN_HDEC 386 + cmpwi r3,10 387 + li r12,BOOK3S_INTERRUPT_HV_DECREMENTER 388 + mr r9,r4 389 + blt hdec_soon 390 + 391 + /* Enable HDEC interrupts */ 392 + mfspr r0,SPRN_HID0 393 + li r3,1 394 + rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1 395 + sync 396 + mtspr SPRN_HID0,r0 397 + mfspr r0,SPRN_HID0 398 + mfspr r0,SPRN_HID0 399 + mfspr r0,SPRN_HID0 400 + mfspr r0,SPRN_HID0 401 + mfspr r0,SPRN_HID0 402 + mfspr r0,SPRN_HID0 338 403 339 404 /* Load up guest SLB entries */ 340 - lwz r5,VCPU_SLB_MAX(r4) 405 + 31: lwz r5,VCPU_SLB_MAX(r4) 341 406 cmpwi r5,0 342 407 beq 9f 343 408 mtctr r5 ··· 577 472 hcall_real_cont: 578 473 579 474 /* Check for mediated interrupts (could be done earlier really ...) */ 475 + BEGIN_FTR_SECTION 580 476 cmpwi r12,BOOK3S_INTERRUPT_EXTERNAL 581 477 bne+ 1f 582 478 ld r5,VCPU_KVM(r9) ··· 587 481 andi. r0,r5,LPCR_MER 588 482 bne bounce_ext_interrupt 589 483 1: 484 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 590 485 591 486 /* Save DEC */ 592 487 mfspr r5,SPRN_DEC ··· 599 492 /* Save HEIR (HV emulation assist reg) in last_inst 600 493 if this is an HEI (HV emulation interrupt, e40) */ 601 494 li r3,-1 495 + BEGIN_FTR_SECTION 602 496 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST 603 497 bne 11f 604 498 mfspr r3,SPRN_HEIR 499 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 605 500 11: stw r3,VCPU_LAST_INST(r9) 606 501 607 502 /* Save more register state */ ··· 617 508 stw r7, VCPU_DSISR(r9) 618 509 std r8, VCPU_CTR(r9) 619 510 /* grab HDAR & HDSISR if HV data storage interrupt (HDSI) */ 511 + BEGIN_FTR_SECTION 620 512 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE 621 513 beq 6f 514 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 622 515 7: std r6, VCPU_FAULT_DAR(r9) 623 516 stw r7, VCPU_FAULT_DSISR(r9) 624 517 ··· 654 543 /* 655 544 * Save the guest PURR/SPURR 656 545 */ 546 + BEGIN_FTR_SECTION 657 547 mfspr r5,SPRN_PURR 658 548 mfspr r6,SPRN_SPURR 659 549 ld r7,VCPU_PURR(r9) ··· 674 562 add r4,r4,r6 675 563 mtspr SPRN_PURR,r3 676 564 mtspr SPRN_SPURR,r4 565 + END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201) 677 566 678 567 /* Clear out SLB */ 679 568 li r5,0 ··· 683 570 ptesync 684 571 685 572 hdec_soon: 573 + BEGIN_FTR_SECTION 574 + b 32f 575 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 576 + /* 577 + * POWER7 guest -> host partition switch code. 578 + * We don't have to lock against tlbies but we do 579 + * have to coordinate the hardware threads. 580 + */ 686 581 /* Increment the threads-exiting-guest count in the 0xff00 687 582 bits of vcore->entry_exit_count */ 688 583 lwsync ··· 761 640 16: ld r8,KVM_HOST_LPCR(r4) 762 641 mtspr SPRN_LPCR,r8 763 642 isync 643 + b 33f 644 + 645 + /* 646 + * PPC970 guest -> host partition switch code. 647 + * We have to lock against concurrent tlbies, and 648 + * we have to flush the whole TLB. 649 + */ 650 + 32: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */ 651 + 652 + /* Take the guest's tlbie_lock */ 653 + lwz r8,PACA_LOCK_TOKEN(r13) 654 + addi r3,r4,KVM_TLBIE_LOCK 655 + 24: lwarx r0,0,r3 656 + cmpwi r0,0 657 + bne 24b 658 + stwcx. r8,0,r3 659 + bne 24b 660 + isync 661 + 662 + ld r7,KVM_HOST_LPCR(r4) /* use kvm->arch.host_lpcr for HID4 */ 663 + li r0,0x18f 664 + rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */ 665 + or r0,r7,r0 666 + ptesync 667 + sync 668 + mtspr SPRN_HID4,r0 /* switch to reserved LPID */ 669 + isync 670 + li r0,0 671 + stw r0,0(r3) /* drop guest tlbie_lock */ 672 + 673 + /* invalidate the whole TLB */ 674 + li r0,256 675 + mtctr r0 676 + li r6,0 677 + 25: tlbiel r6 678 + addi r6,r6,0x1000 679 + bdnz 25b 680 + ptesync 681 + 682 + /* take native_tlbie_lock */ 683 + ld r3,toc_tlbie_lock@toc(2) 684 + 24: lwarx r0,0,r3 685 + cmpwi r0,0 686 + bne 24b 687 + stwcx. r8,0,r3 688 + bne 24b 689 + isync 690 + 691 + ld r6,KVM_HOST_SDR1(r4) 692 + mtspr SPRN_SDR1,r6 /* switch to host page table */ 693 + 694 + /* Set up host HID4 value */ 695 + sync 696 + mtspr SPRN_HID4,r7 697 + isync 698 + li r0,0 699 + stw r0,0(r3) /* drop native_tlbie_lock */ 700 + 701 + lis r8,0x7fff /* MAX_INT@h */ 702 + mtspr SPRN_HDEC,r8 703 + 704 + /* Disable HDEC interrupts */ 705 + mfspr r0,SPRN_HID0 706 + li r3,0 707 + rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1 708 + sync 709 + mtspr SPRN_HID0,r0 710 + mfspr r0,SPRN_HID0 711 + mfspr r0,SPRN_HID0 712 + mfspr r0,SPRN_HID0 713 + mfspr r0,SPRN_HID0 714 + mfspr r0,SPRN_HID0 715 + mfspr r0,SPRN_HID0 764 716 765 717 /* load host SLB entries */ 766 - ld r8,PACA_SLBSHADOWPTR(r13) 718 + 33: ld r8,PACA_SLBSHADOWPTR(r13) 767 719 768 720 .rept SLB_NUM_BOLTED 769 721 ld r5,SLBSHADOW_SAVEAREA(r8) ··· 848 654 .endr 849 655 850 656 /* Save and reset AMR and UAMOR before turning on the MMU */ 657 + BEGIN_FTR_SECTION 851 658 mfspr r5,SPRN_AMR 852 659 mfspr r6,SPRN_UAMOR 853 660 std r5,VCPU_AMR(r9) 854 661 std r6,VCPU_UAMOR(r9) 855 662 li r6,0 856 663 mtspr SPRN_AMR,r6 664 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 857 665 858 666 /* Restore host DABR and DABRX */ 859 667 ld r5,HSTATE_DABR(r13) ··· 864 668 mtspr SPRN_DABRX,r6 865 669 866 670 /* Switch DSCR back to host value */ 671 + BEGIN_FTR_SECTION 867 672 mfspr r8, SPRN_DSCR 868 673 ld r7, HSTATE_DSCR(r13) 869 674 std r8, VCPU_DSCR(r7) 870 675 mtspr SPRN_DSCR, r7 676 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 871 677 872 678 /* Save non-volatile GPRs */ 873 679 std r14, VCPU_GPR(r14)(r9) ··· 933 735 mfspr r6, SPRN_PMC4 934 736 mfspr r7, SPRN_PMC5 935 737 mfspr r8, SPRN_PMC6 738 + BEGIN_FTR_SECTION 739 + mfspr r10, SPRN_PMC7 740 + mfspr r11, SPRN_PMC8 741 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 936 742 stw r3, VCPU_PMC(r9) 937 743 stw r4, VCPU_PMC + 4(r9) 938 744 stw r5, VCPU_PMC + 8(r9) 939 745 stw r6, VCPU_PMC + 12(r9) 940 746 stw r7, VCPU_PMC + 16(r9) 941 747 stw r8, VCPU_PMC + 20(r9) 748 + BEGIN_FTR_SECTION 749 + stw r10, VCPU_PMC + 24(r9) 750 + stw r11, VCPU_PMC + 28(r9) 751 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 942 752 22: 943 753 /* save FP state */ 944 754 mr r3, r9 945 755 bl .kvmppc_save_fp 946 756 947 - /* Secondary threads go off to take a nap */ 757 + /* Secondary threads go off to take a nap on POWER7 */ 758 + BEGIN_FTR_SECTION 948 759 lwz r0,VCPU_PTID(r3) 949 760 cmpwi r0,0 950 761 bne secondary_nap 762 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 951 763 952 764 /* 953 765 * Reload DEC. HDEC interrupts were disabled when ··· 979 771 lwz r6, HSTATE_PMC + 12(r13) 980 772 lwz r8, HSTATE_PMC + 16(r13) 981 773 lwz r9, HSTATE_PMC + 20(r13) 774 + BEGIN_FTR_SECTION 775 + lwz r10, HSTATE_PMC + 24(r13) 776 + lwz r11, HSTATE_PMC + 28(r13) 777 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 982 778 mtspr SPRN_PMC1, r3 983 779 mtspr SPRN_PMC2, r4 984 780 mtspr SPRN_PMC3, r5 985 781 mtspr SPRN_PMC4, r6 986 782 mtspr SPRN_PMC5, r8 987 783 mtspr SPRN_PMC6, r9 784 + BEGIN_FTR_SECTION 785 + mtspr SPRN_PMC7, r10 786 + mtspr SPRN_PMC8, r11 787 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 988 788 ld r3, HSTATE_MMCR(r13) 989 789 ld r4, HSTATE_MMCR + 8(r13) 990 790 ld r5, HSTATE_MMCR + 16(r13) ··· 1018 802 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK 1019 803 1020 804 /* RFI into the highmem handler, or branch to interrupt handler */ 1021 - mfmsr r6 805 + 12: mfmsr r6 1022 806 mtctr r12 1023 807 li r0, MSR_RI 1024 808 andc r6, r6, r0 ··· 1028 812 beqctr 1029 813 RFI 1030 814 1031 - 11: mtspr SPRN_HSRR0, r8 815 + 11: 816 + BEGIN_FTR_SECTION 817 + b 12b 818 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 819 + mtspr SPRN_HSRR0, r8 1032 820 mtspr SPRN_HSRR1, r7 1033 821 ba 0x500 1034 822
+3
arch/powerpc/kvm/powerpc.c
··· 213 213 break; 214 214 case KVM_CAP_PPC_RMA: 215 215 r = 1; 216 + /* PPC970 requires an RMA */ 217 + if (cpu_has_feature(CPU_FTR_ARCH_201)) 218 + r = 2; 216 219 break; 217 220 #endif 218 221 default:
+1 -1
arch/powerpc/mm/hash_native_64.c
··· 37 37 38 38 #define HPTE_LOCK_BIT 3 39 39 40 - static DEFINE_RAW_SPINLOCK(native_tlbie_lock); 40 + DEFINE_RAW_SPINLOCK(native_tlbie_lock); 41 41 42 42 static inline void __tlbie(unsigned long va, int psize, int ssize) 43 43 {