Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc, KVM: Split HVMODE_206 cpu feature bit into separate HV and architecture bits

This replaces the single CPU_FTR_HVMODE_206 bit with two bits, one to
indicate that we have a usable hypervisor mode, and another to indicate
that the processor conforms to PowerISA version 2.06. We also add
another bit to indicate that the processor conforms to ISA version 2.01
and set that for PPC970 and derivatives.

Some PPC970 chips (specifically those in Apple machines) have a
hypervisor mode in that MSR[HV] is always 1, but the hypervisor mode
is not useful in the sense that there is no way to run any code in
supervisor mode (HV=0 PR=0). On these processors, the LPES0 and LPES1
bits in HID4 are always 0, and we use that as a way of detecting that
hypervisor mode is not useful.

Where we have a feature section in assembly code around code that
only applies on POWER7 in hypervisor mode, we use a construct like

END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)

The definition of END_FTR_SECTION_IFSET is such that the code will
be enabled (not overwritten with nops) only if all bits in the
provided mask are set.

Note that the CPU feature check in __tlbie() only needs to check the
ARCH_206 bit, not the HVMODE bit, because __tlbie() can only get called
if we are running bare-metal, i.e. in hypervisor mode.

Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>

authored by

Paul Mackerras and committed by
Avi Kivity
969391c5 aa04b4cc

+56 -26
+8 -6
arch/powerpc/include/asm/cputable.h
··· 179 179 #define LONG_ASM_CONST(x) 0 180 180 #endif 181 181 182 - 183 - #define CPU_FTR_HVMODE_206 LONG_ASM_CONST(0x0000000800000000) 182 + #define CPU_FTR_HVMODE LONG_ASM_CONST(0x0000000200000000) 183 + #define CPU_FTR_ARCH_201 LONG_ASM_CONST(0x0000000400000000) 184 + #define CPU_FTR_ARCH_206 LONG_ASM_CONST(0x0000000800000000) 184 185 #define CPU_FTR_CFAR LONG_ASM_CONST(0x0000001000000000) 185 186 #define CPU_FTR_IABR LONG_ASM_CONST(0x0000002000000000) 186 187 #define CPU_FTR_MMCRA LONG_ASM_CONST(0x0000004000000000) ··· 402 401 CPU_FTR_MMCRA | CPU_FTR_CP_USE_DCBTZ | \ 403 402 CPU_FTR_STCX_CHECKS_ADDRESS) 404 403 #define CPU_FTRS_PPC970 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 405 - CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 404 + CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_201 | \ 406 405 CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \ 407 - CPU_FTR_CP_USE_DCBTZ | CPU_FTR_STCX_CHECKS_ADDRESS) 406 + CPU_FTR_CP_USE_DCBTZ | CPU_FTR_STCX_CHECKS_ADDRESS | \ 407 + CPU_FTR_HVMODE) 408 408 #define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 409 409 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 410 410 CPU_FTR_MMCRA | CPU_FTR_SMT | \ ··· 419 417 CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \ 420 418 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR) 421 419 #define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 422 - CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_HVMODE_206 |\ 420 + CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\ 423 421 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 424 422 CPU_FTR_COHERENT_ICACHE | \ 425 423 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ 426 424 CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \ 427 425 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ 428 - CPU_FTR_ICSWX | CPU_FTR_CFAR) 426 + CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE) 429 427 #define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 430 428 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 431 429 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
+12 -4
arch/powerpc/include/asm/reg.h
··· 307 307 #define SPRN_HASH1 0x3D2 /* Primary Hash Address Register */ 308 308 #define SPRN_HASH2 0x3D3 /* Secondary Hash Address Resgister */ 309 309 #define SPRN_HID0 0x3F0 /* Hardware Implementation Register 0 */ 310 + #define HID0_HDICE_SH (63 - 23) /* 970 HDEC interrupt enable */ 310 311 #define HID0_EMCP (1<<31) /* Enable Machine Check pin */ 311 312 #define HID0_EBA (1<<29) /* Enable Bus Address Parity */ 312 313 #define HID0_EBD (1<<28) /* Enable Bus Data Parity */ ··· 363 362 #define SPRN_IABR2 0x3FA /* 83xx */ 364 363 #define SPRN_IBCR 0x135 /* 83xx Insn Breakpoint Control Reg */ 365 364 #define SPRN_HID4 0x3F4 /* 970 HID4 */ 365 + #define HID4_LPES0 (1ul << (63-0)) /* LPAR env. sel. bit 0 */ 366 + #define HID4_RMLS2_SH (63 - 2) /* Real mode limit bottom 2 bits */ 367 + #define HID4_LPID5_SH (63 - 6) /* partition ID bottom 4 bits */ 368 + #define HID4_RMOR_SH (63 - 22) /* real mode offset (16 bits) */ 369 + #define HID4_LPES1 (1 << (63-57)) /* LPAR env. sel. bit 1 */ 370 + #define HID4_RMLS0_SH (63 - 58) /* Real mode limit top bit */ 371 + #define HID4_LPID1_SH 0 /* partition ID top 2 bits */ 366 372 #define SPRN_HID4_GEKKO 0x3F3 /* Gekko HID4 */ 367 373 #define SPRN_HID5 0x3F6 /* 970 HID5 */ 368 374 #define SPRN_HID6 0x3F9 /* BE HID 6 */ ··· 819 811 mfspr rX,SPRN_SPRG_PACA; \ 820 812 FTR_SECTION_ELSE_NESTED(66); \ 821 813 mfspr rX,SPRN_SPRG_HPACA; \ 822 - ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE_206, 66) 814 + ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE, 66) 823 815 824 816 #define SET_PACA(rX) \ 825 817 BEGIN_FTR_SECTION_NESTED(66); \ 826 818 mtspr SPRN_SPRG_PACA,rX; \ 827 819 FTR_SECTION_ELSE_NESTED(66); \ 828 820 mtspr SPRN_SPRG_HPACA,rX; \ 829 - ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE_206, 66) 821 + ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE, 66) 830 822 831 823 #define GET_SCRATCH0(rX) \ 832 824 BEGIN_FTR_SECTION_NESTED(66); \ 833 825 mfspr rX,SPRN_SPRG_SCRATCH0; \ 834 826 FTR_SECTION_ELSE_NESTED(66); \ 835 827 mfspr rX,SPRN_SPRG_HSCRATCH0; \ 836 - ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE_206, 66) 828 + ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE, 66) 837 829 838 830 #define SET_SCRATCH0(rX) \ 839 831 BEGIN_FTR_SECTION_NESTED(66); \ 840 832 mtspr SPRN_SPRG_SCRATCH0,rX; \ 841 833 FTR_SECTION_ELSE_NESTED(66); \ 842 834 mtspr SPRN_SPRG_HSCRATCH0,rX; \ 843 - ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE_206, 66) 835 + ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE, 66) 844 836 845 837 #else /* CONFIG_PPC_BOOK3S_64 */ 846 838 #define GET_SCRATCH0(rX) mfspr rX,SPRN_SPRG_SCRATCH0
+2 -2
arch/powerpc/kernel/cpu_setup_power7.S
··· 45 45 blr 46 46 47 47 __init_hvmode_206: 48 - /* Disable CPU_FTR_HVMODE_206 and exit if MSR:HV is not set */ 48 + /* Disable CPU_FTR_HVMODE and exit if MSR:HV is not set */ 49 49 mfmsr r3 50 50 rldicl. r0,r3,4,63 51 51 bnelr 52 52 ld r5,CPU_SPEC_FEATURES(r4) 53 - LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE_206) 53 + LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE) 54 54 xor r5,r5,r6 55 55 std r5,CPU_SPEC_FEATURES(r4) 56 56 blr
+22 -4
arch/powerpc/kernel/cpu_setup_ppc970.S
··· 76 76 /* Do nothing if not running in HV mode */ 77 77 mfmsr r0 78 78 rldicl. r0,r0,4,63 79 - beqlr 79 + beq no_hv_mode 80 80 81 81 mfspr r0,SPRN_HID0 82 82 li r11,5 /* clear DOZE and SLEEP */ ··· 90 90 /* Do nothing if not running in HV mode */ 91 91 mfmsr r0 92 92 rldicl. r0,r0,4,63 93 - beqlr 93 + beq no_hv_mode 94 94 95 95 mfspr r0,SPRN_HID0 96 96 li r11,0x15 /* clear DOZE and SLEEP */ ··· 109 109 sync 110 110 isync 111 111 112 + /* Try to set LPES = 01 in HID4 */ 113 + mfspr r0,SPRN_HID4 114 + clrldi r0,r0,1 /* clear LPES0 */ 115 + ori r0,r0,HID4_LPES1 /* set LPES1 */ 116 + sync 117 + mtspr SPRN_HID4,r0 118 + isync 119 + 112 120 /* Save away cpu state */ 113 121 LOAD_REG_ADDR(r5,cpu_state_storage) 114 122 ··· 125 117 std r3,CS_HID0(r5) 126 118 mfspr r3,SPRN_HID1 127 119 std r3,CS_HID1(r5) 128 - mfspr r3,SPRN_HID4 129 - std r3,CS_HID4(r5) 120 + mfspr r4,SPRN_HID4 121 + std r4,CS_HID4(r5) 130 122 mfspr r3,SPRN_HID5 131 123 std r3,CS_HID5(r5) 132 124 125 + /* See if we successfully set LPES1 to 1; if not we are in Apple mode */ 126 + andi. r4,r4,HID4_LPES1 127 + bnelr 128 + 129 + no_hv_mode: 130 + /* Disable CPU_FTR_HVMODE and exit, since we don't have HV mode */ 131 + ld r5,CPU_SPEC_FEATURES(r4) 132 + LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE) 133 + andc r5,r5,r6 134 + std r5,CPU_SPEC_FEATURES(r4) 133 135 blr 134 136 135 137 /* Called with no MMU context (typically MSR:IR/DR off) to
+2 -2
arch/powerpc/kernel/exceptions-64s.S
··· 75 75 b .power7_wakeup_noloss 76 76 2: b .power7_wakeup_loss 77 77 9: 78 - END_FTR_SECTION_IFSET(CPU_FTR_HVMODE_206) 78 + END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 79 79 #endif /* CONFIG_PPC_P7_NAP */ 80 80 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD, 81 81 NOTEST, 0x100) ··· 173 173 _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt, 174 174 EXC_STD, SOFTEN_TEST_PR) 175 175 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500) 176 - ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE_206) 176 + ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 177 177 178 178 STD_EXCEPTION_PSERIES(0x600, 0x600, alignment) 179 179 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x600)
+1 -1
arch/powerpc/kernel/paca.c
··· 167 167 * if we do a GET_PACA() before the feature fixups have been 168 168 * applied 169 169 */ 170 - if (cpu_has_feature(CPU_FTR_HVMODE_206)) 170 + if (cpu_has_feature(CPU_FTR_HVMODE)) 171 171 mtspr(SPRN_SPRG_HPACA, local_paca); 172 172 #endif 173 173 mtspr(SPRN_SPRG_PACA, local_paca);
+2 -1
arch/powerpc/kvm/book3s_64_mmu_hv.c
··· 128 128 129 129 int kvmppc_mmu_hv_init(void) 130 130 { 131 - if (!cpu_has_feature(CPU_FTR_HVMODE_206)) 131 + if (!cpu_has_feature(CPU_FTR_HVMODE) || 132 + !cpu_has_feature(CPU_FTR_ARCH_206)) 132 133 return -EINVAL; 133 134 memset(lpid_inuse, 0, sizeof(lpid_inuse)); 134 135 set_bit(mfspr(SPRN_LPID), lpid_inuse);
+2 -1
arch/powerpc/kvm/book3s_hv.c
··· 443 443 444 444 int kvmppc_core_check_processor_compat(void) 445 445 { 446 - if (cpu_has_feature(CPU_FTR_HVMODE_206)) 446 + if (cpu_has_feature(CPU_FTR_HVMODE) && 447 + cpu_has_feature(CPU_FTR_ARCH_206)) 447 448 return 0; 448 449 return -EIO; 449 450 }
+2 -2
arch/powerpc/kvm/book3s_hv_builtin.c
··· 90 90 void *rma; 91 91 struct page *pg; 92 92 93 - /* Only do this on POWER7 in HV mode */ 94 - if (!cpu_has_feature(CPU_FTR_HVMODE_206)) 93 + /* Only do this in HV mode */ 94 + if (!cpu_has_feature(CPU_FTR_HVMODE)) 95 95 return; 96 96 97 97 if (!kvm_rma_size || !kvm_rma_count)
+1 -1
arch/powerpc/kvm/book3s_segment.S
··· 170 170 mfspr r4,SPRN_HSRR1 171 171 andi. r12,r12,0x3ffd 172 172 b 2f 173 - END_FTR_SECTION_IFSET(CPU_FTR_HVMODE_206) 173 + END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 174 174 #endif 175 175 1: mfsrr0 r3 176 176 mfsrr1 r4
+2 -2
arch/powerpc/mm/hash_native_64.c
··· 51 51 va &= ~0xffful; 52 52 va |= ssize << 8; 53 53 asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2) 54 - : : "r" (va), "r"(0), "i" (CPU_FTR_HVMODE_206) 54 + : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206) 55 55 : "memory"); 56 56 break; 57 57 default: ··· 61 61 va |= ssize << 8; 62 62 va |= 1; /* L */ 63 63 asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2) 64 - : : "r" (va), "r"(0), "i" (CPU_FTR_HVMODE_206) 64 + : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206) 65 65 : "memory"); 66 66 break; 67 67 }