Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'powerpc-4.3-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc fixes from Michael Ellerman:

- Revert "Use the POWER8 Micro Partition Prefetch Engine in KVM HV on
POWER8" from Paul
- Handle irq_happened flag correctly in off-line loop from Paul
- Validate rtas.entry before calling enter_rtas() from Vasant

* tag 'powerpc-4.3-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
powerpc/rtas: Validate rtas.entry before calling enter_rtas()
powerpc/powernv: Handle irq_happened flag correctly in off-line loop
powerpc: Revert "Use the POWER8 Micro Partition Prefetch Engine in KVM HV on POWER8"

+28 -86
-7
arch/powerpc/include/asm/cache.h
··· 3 3 4 4 #ifdef __KERNEL__ 5 5 6 - #include <asm/reg.h> 7 6 8 7 /* bytes per L1 cache line */ 9 8 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX) ··· 39 40 }; 40 41 41 42 extern struct ppc64_caches ppc64_caches; 42 - 43 - static inline void logmpp(u64 x) 44 - { 45 - asm volatile(PPC_LOGMPP(R1) : : "r" (x)); 46 - } 47 - 48 43 #endif /* __powerpc64__ && ! __ASSEMBLY__ */ 49 44 50 45 #if defined(__ASSEMBLY__)
-2
arch/powerpc/include/asm/kvm_host.h
··· 297 297 u32 arch_compat; 298 298 ulong pcr; 299 299 ulong dpdes; /* doorbell state (POWER8) */ 300 - void *mpp_buffer; /* Micro Partition Prefetch buffer */ 301 - bool mpp_buffer_is_valid; 302 300 ulong conferring_threads; 303 301 }; 304 302
-17
arch/powerpc/include/asm/ppc-opcode.h
··· 141 141 #define PPC_INST_ISEL 0x7c00001e 142 142 #define PPC_INST_ISEL_MASK 0xfc00003e 143 143 #define PPC_INST_LDARX 0x7c0000a8 144 - #define PPC_INST_LOGMPP 0x7c0007e4 145 144 #define PPC_INST_LSWI 0x7c0004aa 146 145 #define PPC_INST_LSWX 0x7c00042a 147 146 #define PPC_INST_LWARX 0x7c000028 ··· 284 285 #define __PPC_EH(eh) 0 285 286 #endif 286 287 287 - /* POWER8 Micro Partition Prefetch (MPP) parameters */ 288 - /* Address mask is common for LOGMPP instruction and MPPR SPR */ 289 - #define PPC_MPPE_ADDRESS_MASK 0xffffffffc000ULL 290 - 291 - /* Bits 60 and 61 of MPP SPR should be set to one of the following */ 292 - /* Aborting the fetch is indeed setting 00 in the table size bits */ 293 - #define PPC_MPPR_FETCH_ABORT (0x0ULL << 60) 294 - #define PPC_MPPR_FETCH_WHOLE_TABLE (0x2ULL << 60) 295 - 296 - /* Bits 54 and 55 of register for LOGMPP instruction should be set to: */ 297 - #define PPC_LOGMPP_LOG_L2 (0x02ULL << 54) 298 - #define PPC_LOGMPP_LOG_L2L3 (0x01ULL << 54) 299 - #define PPC_LOGMPP_LOG_ABORT (0x03ULL << 54) 300 - 301 288 /* Deal with instructions that older assemblers aren't aware of */ 302 289 #define PPC_DCBAL(a, b) stringify_in_c(.long PPC_INST_DCBAL | \ 303 290 __PPC_RA(a) | __PPC_RB(b)) ··· 292 307 #define PPC_LDARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LDARX | \ 293 308 ___PPC_RT(t) | ___PPC_RA(a) | \ 294 309 ___PPC_RB(b) | __PPC_EH(eh)) 295 - #define PPC_LOGMPP(b) stringify_in_c(.long PPC_INST_LOGMPP | \ 296 - __PPC_RB(b)) 297 310 #define PPC_LWARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LWARX | \ 298 311 ___PPC_RT(t) | ___PPC_RA(a) | \ 299 312 ___PPC_RB(b) | __PPC_EH(eh))
-1
arch/powerpc/include/asm/reg.h
··· 226 226 #define CTRL_TE 0x00c00000 /* thread enable */ 227 227 #define CTRL_RUNLATCH 0x1 228 228 #define SPRN_DAWR 0xB4 229 - #define SPRN_MPPR 0xB8 /* Micro Partition Prefetch Register */ 230 229 #define SPRN_RPR 0xBA /* Relative Priority Register */ 231 230 #define SPRN_CIABR 0xBB 232 231 #define CIABR_PRIV 0x3
+3
arch/powerpc/kernel/rtas.c
··· 1043 1043 if (!capable(CAP_SYS_ADMIN)) 1044 1044 return -EPERM; 1045 1045 1046 + if (!rtas.entry) 1047 + return -EINVAL; 1048 + 1046 1049 if (copy_from_user(&args, uargs, 3 * sizeof(u32)) != 0) 1047 1050 return -EFAULT; 1048 1051
+1 -54
arch/powerpc/kvm/book3s_hv.c
··· 36 36 37 37 #include <asm/reg.h> 38 38 #include <asm/cputable.h> 39 - #include <asm/cache.h> 40 39 #include <asm/cacheflush.h> 41 40 #include <asm/tlbflush.h> 42 41 #include <asm/uaccess.h> ··· 73 74 #define TB_NIL (~(u64)0) 74 75 75 76 static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1); 76 - 77 - #if defined(CONFIG_PPC_64K_PAGES) 78 - #define MPP_BUFFER_ORDER 0 79 - #elif defined(CONFIG_PPC_4K_PAGES) 80 - #define MPP_BUFFER_ORDER 3 81 - #endif 82 77 83 78 static int dynamic_mt_modes = 6; 84 79 module_param(dynamic_mt_modes, int, S_IRUGO | S_IWUSR); ··· 1448 1455 vcore->kvm = kvm; 1449 1456 INIT_LIST_HEAD(&vcore->preempt_list); 1450 1457 1451 - vcore->mpp_buffer_is_valid = false; 1452 - 1453 - if (cpu_has_feature(CPU_FTR_ARCH_207S)) 1454 - vcore->mpp_buffer = (void *)__get_free_pages( 1455 - GFP_KERNEL|__GFP_ZERO, 1456 - MPP_BUFFER_ORDER); 1457 - 1458 1458 return vcore; 1459 1459 } 1460 1460 ··· 1878 1892 } 1879 1893 } 1880 1894 return 1; 1881 - } 1882 - 1883 - static void kvmppc_start_saving_l2_cache(struct kvmppc_vcore *vc) 1884 - { 1885 - phys_addr_t phy_addr, mpp_addr; 1886 - 1887 - phy_addr = (phys_addr_t)virt_to_phys(vc->mpp_buffer); 1888 - mpp_addr = phy_addr & PPC_MPPE_ADDRESS_MASK; 1889 - 1890 - mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_ABORT); 1891 - logmpp(mpp_addr | PPC_LOGMPP_LOG_L2); 1892 - 1893 - vc->mpp_buffer_is_valid = true; 1894 - } 1895 - 1896 - static void kvmppc_start_restoring_l2_cache(const struct kvmppc_vcore *vc) 1897 - { 1898 - phys_addr_t phy_addr, mpp_addr; 1899 - 1900 - phy_addr = virt_to_phys(vc->mpp_buffer); 1901 - mpp_addr = phy_addr & PPC_MPPE_ADDRESS_MASK; 1902 - 1903 - /* We must abort any in-progress save operations to ensure 1904 - * the table is valid so that prefetch engine knows when to 1905 - * stop prefetching. */ 1906 - logmpp(mpp_addr | PPC_LOGMPP_LOG_ABORT); 1907 - mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_WHOLE_TABLE); 1908 1895 } 1909 1896 1910 1897 /* ··· 2430 2471 2431 2472 srcu_idx = srcu_read_lock(&vc->kvm->srcu); 2432 2473 2433 - if (vc->mpp_buffer_is_valid) 2434 - kvmppc_start_restoring_l2_cache(vc); 2435 - 2436 2474 __kvmppc_vcore_entry(); 2437 - 2438 - if (vc->mpp_buffer) 2439 - kvmppc_start_saving_l2_cache(vc); 2440 2475 2441 2476 srcu_read_unlock(&vc->kvm->srcu, srcu_idx); 2442 2477 ··· 3026 3073 { 3027 3074 long int i; 3028 3075 3029 - for (i = 0; i < KVM_MAX_VCORES; ++i) { 3030 - if (kvm->arch.vcores[i] && kvm->arch.vcores[i]->mpp_buffer) { 3031 - struct kvmppc_vcore *vc = kvm->arch.vcores[i]; 3032 - free_pages((unsigned long)vc->mpp_buffer, 3033 - MPP_BUFFER_ORDER); 3034 - } 3076 + for (i = 0; i < KVM_MAX_VCORES; ++i) 3035 3077 kfree(kvm->arch.vcores[i]); 3036 - } 3037 3078 kvm->arch.online_vcores = 0; 3038 3079 } 3039 3080
+24 -5
arch/powerpc/platforms/powernv/smp.c
··· 171 171 * so clear LPCR:PECE1. We keep PECE2 enabled. 172 172 */ 173 173 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1); 174 + 175 + /* 176 + * Hard-disable interrupts, and then clear irq_happened flags 177 + * that we can safely ignore while off-line, since they 178 + * are for things for which we do no processing when off-line 179 + * (or in the case of HMI, all the processing we need to do 180 + * is done in lower-level real-mode code). 181 + */ 182 + hard_irq_disable(); 183 + local_paca->irq_happened &= ~(PACA_IRQ_DEC | PACA_IRQ_HMI); 184 + 174 185 while (!generic_check_cpu_restart(cpu)) { 186 + /* 187 + * Clear IPI flag, since we don't handle IPIs while 188 + * offline, except for those when changing micro-threading 189 + * mode, which are handled explicitly below, and those 190 + * for coming online, which are handled via 191 + * generic_check_cpu_restart() calls. 192 + */ 193 + kvmppc_set_host_ipi(cpu, 0); 175 194 176 195 ppc64_runlatch_off(); 177 196 ··· 215 196 * having finished executing in a KVM guest, then srr1 216 197 * contains 0. 217 198 */ 218 - if ((srr1 & wmask) == SRR1_WAKEEE) { 199 + if (((srr1 & wmask) == SRR1_WAKEEE) || 200 + (local_paca->irq_happened & PACA_IRQ_EE)) { 219 201 icp_native_flush_interrupt(); 220 - local_paca->irq_happened &= PACA_IRQ_HARD_DIS; 221 - smp_mb(); 222 202 } else if ((srr1 & wmask) == SRR1_WAKEHDBELL) { 223 203 unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); 224 204 asm volatile(PPC_MSGCLR(%0) : : "r" (msg)); 225 - kvmppc_set_host_ipi(cpu, 0); 226 205 } 206 + local_paca->irq_happened &= ~(PACA_IRQ_EE | PACA_IRQ_DBELL); 207 + smp_mb(); 227 208 228 209 if (cpu_core_split_required()) 229 210 continue; 230 211 231 - if (!generic_check_cpu_restart(cpu)) 212 + if (srr1 && !generic_check_cpu_restart(cpu)) 232 213 DBG("CPU%d Unexpected exit while offline !\n", cpu); 233 214 } 234 215 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1);