Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus

Pull MIPS fixes from Ralf Baechle:
"Another round of MIPS fixes for 4.2. No area does particularly stand
out but we have a two unpleasant ones:

- Kernel ptes are marked with a global bit which allows the kernel to
share kernel TLB entries between all processes. For this to work
both entries of an adjacent even/odd pte pair need to have the
global bit set. There has been a subtle race in setting the other
entry's global bit since ~ 2000 but it take particularly
pathological workloads that essentially do mostly vmalloc/vfree to
trigger this.

This pull request fixes the 64-bit case but leaves the case of 32
bit CPUs with 64 bit ptes unsolved for now. The unfixed cases
affect hardware that is not available in the field yet.

- Instruction emulation requires loading instructions from user space
but the current fast but simplistic approach will fail on pages
that are PROT_EXEC but !PROT_READ. For this reason we temporarily
do not permit this permission and will map pages with PROT_EXEC |
PROT_READ.

The remainder of this pull request is more or less across the field
and the short log explains them well"

* 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus:
MIPS: Make set_pte() SMP safe.
MIPS: Replace add and sub instructions in relocate_kernel.S with addiu
MIPS: Flush RPS on kernel entry with EVA
Revert "MIPS: BCM63xx: Provide a plat_post_dma_flush hook"
MIPS: BMIPS: Delete unused Kconfig symbol
MIPS: Export get_c0_perfcount_int()
MIPS: show_stack: Fix stack trace with EVA
MIPS: do_mcheck: Fix kernel code dump with EVA
MIPS: SMP: Don't increment irq_count multiple times for call function IPIs
MIPS: Partially disable RIXI support.
MIPS: Handle page faults of executable but unreadable pages correctly.
MIPS: Malta: Don't reinitialise RTC
MIPS: unaligned: Fix build error on big endian R6 kernels
MIPS: Fix sched_getaffinity with MT FPAFF enabled
MIPS: Fix build with CONFIG_OF=y for non OF-enabled targets
CPUFREQ: Loongson2: Fix broken build due to incorrect include.

Changed files
+130 -62
arch
mips
ath79
cavium-octeon
include
kernel
lantiq
loongson64
loongson-3
mm
mti-malta
mti-sead3
netlogic
common
paravirt
pistachio
pmcs-msp71xx
ralink
sgi-ip27
sibyte
bcm1480
sb1250
drivers
-1
arch/mips/Kconfig
··· 151 151 select BCM7120_L2_IRQ 152 152 select BRCMSTB_L2_IRQ 153 153 select IRQ_MIPS_CPU 154 - select RAW_IRQ_ACCESSORS 155 154 select DMA_NONCOHERENT 156 155 select SYS_SUPPORTS_32BIT_KERNEL 157 156 select SYS_SUPPORTS_LITTLE_ENDIAN
+1
arch/mips/ath79/setup.c
··· 190 190 { 191 191 return ATH79_MISC_IRQ(5); 192 192 } 193 + EXPORT_SYMBOL_GPL(get_c0_perfcount_int); 193 194 194 195 unsigned int get_c0_compare_int(void) 195 196 {
+1 -1
arch/mips/cavium-octeon/smp.c
··· 42 42 cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action); 43 43 44 44 if (action & SMP_CALL_FUNCTION) 45 - smp_call_function_interrupt(); 45 + generic_smp_call_function_interrupt(); 46 46 if (action & SMP_RESCHEDULE_YOURSELF) 47 47 scheduler_ipi(); 48 48
-10
arch/mips/include/asm/mach-bcm63xx/dma-coherence.h
··· 1 - #ifndef __ASM_MACH_BCM63XX_DMA_COHERENCE_H 2 - #define __ASM_MACH_BCM63XX_DMA_COHERENCE_H 3 - 4 - #include <asm/bmips.h> 5 - 6 - #define plat_post_dma_flush bmips_post_dma_flush 7 - 8 - #include <asm/mach-generic/dma-coherence.h> 9 - 10 - #endif /* __ASM_MACH_BCM63XX_DMA_COHERENCE_H */
+31
arch/mips/include/asm/pgtable.h
··· 182 182 * Make sure the buddy is global too (if it's !none, 183 183 * it better already be global) 184 184 */ 185 + #ifdef CONFIG_SMP 186 + /* 187 + * For SMP, multiple CPUs can race, so we need to do 188 + * this atomically. 189 + */ 190 + #ifdef CONFIG_64BIT 191 + #define LL_INSN "lld" 192 + #define SC_INSN "scd" 193 + #else /* CONFIG_32BIT */ 194 + #define LL_INSN "ll" 195 + #define SC_INSN "sc" 196 + #endif 197 + unsigned long page_global = _PAGE_GLOBAL; 198 + unsigned long tmp; 199 + 200 + __asm__ __volatile__ ( 201 + " .set push\n" 202 + " .set noreorder\n" 203 + "1: " LL_INSN " %[tmp], %[buddy]\n" 204 + " bnez %[tmp], 2f\n" 205 + " or %[tmp], %[tmp], %[global]\n" 206 + " " SC_INSN " %[tmp], %[buddy]\n" 207 + " beqz %[tmp], 1b\n" 208 + " nop\n" 209 + "2:\n" 210 + " .set pop" 211 + : [buddy] "+m" (buddy->pte), 212 + [tmp] "=&r" (tmp) 213 + : [global] "r" (page_global)); 214 + #else /* !CONFIG_SMP */ 185 215 if (pte_none(*buddy)) 186 216 pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL; 217 + #endif /* CONFIG_SMP */ 187 218 } 188 219 #endif 189 220 }
-2
arch/mips/include/asm/smp.h
··· 83 83 extern void play_dead(void); 84 84 #endif 85 85 86 - extern asmlinkage void smp_call_function_interrupt(void); 87 - 88 86 static inline void arch_send_call_function_single_ipi(int cpu) 89 87 { 90 88 extern struct plat_smp_ops *mp_ops; /* private */
+25
arch/mips/include/asm/stackframe.h
··· 152 152 .set noreorder 153 153 bltz k0, 8f 154 154 move k1, sp 155 + #ifdef CONFIG_EVA 156 + /* 157 + * Flush interAptiv's Return Prediction Stack (RPS) by writing 158 + * EntryHi. Toggling Config7.RPS is slower and less portable. 159 + * 160 + * The RPS isn't automatically flushed when exceptions are 161 + * taken, which can result in kernel mode speculative accesses 162 + * to user addresses if the RPS mispredicts. That's harmless 163 + * when user and kernel share the same address space, but with 164 + * EVA the same user segments may be unmapped to kernel mode, 165 + * even containing sensitive MMIO regions or invalid memory. 166 + * 167 + * This can happen when the kernel sets the return address to 168 + * ret_from_* and jr's to the exception handler, which looks 169 + * more like a tail call than a function call. If nested calls 170 + * don't evict the last user address in the RPS, it will 171 + * mispredict the return and fetch from a user controlled 172 + * address into the icache. 173 + * 174 + * More recent EVA-capable cores with MAAR to restrict 175 + * speculative accesses aren't affected. 176 + */ 177 + MFC0 k0, CP0_ENTRYHI 178 + MTC0 k0, CP0_ENTRYHI 179 + #endif 155 180 .set reorder 156 181 /* Called from user mode, new stack. */ 157 182 get_saved_sp
+3 -2
arch/mips/kernel/mips-mt-fpaff.c
··· 154 154 unsigned long __user *user_mask_ptr) 155 155 { 156 156 unsigned int real_len; 157 - cpumask_t mask; 157 + cpumask_t allowed, mask; 158 158 int retval; 159 159 struct task_struct *p; 160 160 ··· 173 173 if (retval) 174 174 goto out_unlock; 175 175 176 - cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask); 176 + cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed); 177 + cpumask_and(&mask, &allowed, cpu_active_mask); 177 178 178 179 out_unlock: 179 180 read_unlock(&tasklist_lock);
+1 -1
arch/mips/kernel/prom.c
··· 38 38 return mips_machine_name; 39 39 } 40 40 41 - #ifdef CONFIG_OF 41 + #ifdef CONFIG_USE_OF 42 42 void __init early_init_dt_add_memory_arch(u64 base, u64 size) 43 43 { 44 44 return add_memory_region(base, size, BOOT_MEM_RAM);
+4 -4
arch/mips/kernel/relocate_kernel.S
··· 24 24 25 25 process_entry: 26 26 PTR_L s2, (s0) 27 - PTR_ADD s0, s0, SZREG 27 + PTR_ADDIU s0, s0, SZREG 28 28 29 29 /* 30 30 * In case of a kdump/crash kernel, the indirection page is not ··· 61 61 /* copy page word by word */ 62 62 REG_L s5, (s2) 63 63 REG_S s5, (s4) 64 - PTR_ADD s4, s4, SZREG 65 - PTR_ADD s2, s2, SZREG 66 - LONG_SUB s6, s6, 1 64 + PTR_ADDIU s4, s4, SZREG 65 + PTR_ADDIU s2, s2, SZREG 66 + LONG_ADDIU s6, s6, -1 67 67 beq s6, zero, process_entry 68 68 b copy_word 69 69 b process_entry
+2 -2
arch/mips/kernel/smp-bmips.c
··· 284 284 if (action == 0) 285 285 scheduler_ipi(); 286 286 else 287 - smp_call_function_interrupt(); 287 + generic_smp_call_function_interrupt(); 288 288 289 289 return IRQ_HANDLED; 290 290 } ··· 336 336 if (action & SMP_RESCHEDULE_YOURSELF) 337 337 scheduler_ipi(); 338 338 if (action & SMP_CALL_FUNCTION) 339 - smp_call_function_interrupt(); 339 + generic_smp_call_function_interrupt(); 340 340 341 341 return IRQ_HANDLED; 342 342 }
-10
arch/mips/kernel/smp.c
··· 192 192 cpu_startup_entry(CPUHP_ONLINE); 193 193 } 194 194 195 - /* 196 - * Call into both interrupt handlers, as we share the IPI for them 197 - */ 198 - void __irq_entry smp_call_function_interrupt(void) 199 - { 200 - irq_enter(); 201 - generic_smp_call_function_interrupt(); 202 - irq_exit(); 203 - } 204 - 205 195 static void stop_this_cpu(void *dummy) 206 196 { 207 197 /*
+13
arch/mips/kernel/traps.c
··· 192 192 void show_stack(struct task_struct *task, unsigned long *sp) 193 193 { 194 194 struct pt_regs regs; 195 + mm_segment_t old_fs = get_fs(); 195 196 if (sp) { 196 197 regs.regs[29] = (unsigned long)sp; 197 198 regs.regs[31] = 0; ··· 211 210 prepare_frametrace(&regs); 212 211 } 213 212 } 213 + /* 214 + * show_stack() deals exclusively with kernel mode, so be sure to access 215 + * the stack in the kernel (not user) address space. 216 + */ 217 + set_fs(KERNEL_DS); 214 218 show_stacktrace(task, &regs); 219 + set_fs(old_fs); 215 220 } 216 221 217 222 static void show_code(unsigned int __user *pc) ··· 1526 1519 const int field = 2 * sizeof(unsigned long); 1527 1520 int multi_match = regs->cp0_status & ST0_TS; 1528 1521 enum ctx_state prev_state; 1522 + mm_segment_t old_fs = get_fs(); 1529 1523 1530 1524 prev_state = exception_enter(); 1531 1525 show_regs(regs); ··· 1548 1540 dump_tlb_all(); 1549 1541 } 1550 1542 1543 + if (!user_mode(regs)) 1544 + set_fs(KERNEL_DS); 1545 + 1551 1546 show_code((unsigned int __user *) regs->cp0_epc); 1547 + 1548 + set_fs(old_fs); 1552 1549 1553 1550 /* 1554 1551 * Some chips may have other causes of machine check (e.g. SB1
+1 -1
arch/mips/kernel/unaligned.c
··· 438 438 : "memory"); \ 439 439 } while(0) 440 440 441 - #define StoreDW(addr, value, res) \ 441 + #define _StoreDW(addr, value, res) \ 442 442 do { \ 443 443 __asm__ __volatile__ ( \ 444 444 ".set\tpush\n\t" \
+2 -1
arch/mips/lantiq/irq.c
··· 293 293 294 294 static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) 295 295 { 296 - smp_call_function_interrupt(); 296 + generic_smp_call_function_interrupt(); 297 297 return IRQ_HANDLED; 298 298 } 299 299 ··· 466 466 { 467 467 return ltq_perfcount_irq; 468 468 } 469 + EXPORT_SYMBOL_GPL(get_c0_perfcount_int); 469 470 470 471 unsigned int get_c0_compare_int(void) 471 472 {
+5 -2
arch/mips/loongson64/loongson-3/smp.c
··· 266 266 if (action & SMP_RESCHEDULE_YOURSELF) 267 267 scheduler_ipi(); 268 268 269 - if (action & SMP_CALL_FUNCTION) 270 - smp_call_function_interrupt(); 269 + if (action & SMP_CALL_FUNCTION) { 270 + irq_enter(); 271 + generic_smp_call_function_interrupt(); 272 + irq_exit(); 273 + } 271 274 272 275 if (action & SMP_ASK_C0COUNT) { 273 276 BUG_ON(cpu != 0);
+4 -4
arch/mips/mm/cache.c
··· 160 160 protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); 161 161 protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); 162 162 protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); 163 - protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ); 163 + protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT); 164 164 protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT); 165 - protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ); 165 + protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT); 166 166 protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT); 167 167 168 168 protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); 169 169 protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); 170 170 protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ); 171 171 protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE); 172 - protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ); 172 + protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT); 173 173 protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT); 174 - protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE | _PAGE_NO_READ); 174 + protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE); 175 175 protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE); 176 176 177 177 } else {
+2 -1
arch/mips/mm/fault.c
··· 133 133 #endif 134 134 goto bad_area; 135 135 } 136 - if (!(vma->vm_flags & VM_READ)) { 136 + if (!(vma->vm_flags & VM_READ) && 137 + exception_epc(regs) != address) { 137 138 #if 0 138 139 pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n", 139 140 raw_smp_processor_id(),
+1 -1
arch/mips/mti-malta/malta-int.c
··· 222 222 223 223 static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) 224 224 { 225 - smp_call_function_interrupt(); 225 + generic_smp_call_function_interrupt(); 226 226 227 227 return IRQ_HANDLED; 228 228 }
+10 -6
arch/mips/mti-malta/malta-time.c
··· 154 154 155 155 return mips_cpu_perf_irq; 156 156 } 157 + EXPORT_SYMBOL_GPL(get_c0_perfcount_int); 157 158 158 159 unsigned int get_c0_compare_int(void) 159 160 { ··· 172 171 173 172 static void __init init_rtc(void) 174 173 { 175 - /* stop the clock whilst setting it up */ 176 - CMOS_WRITE(RTC_SET | RTC_24H, RTC_CONTROL); 174 + unsigned char freq, ctrl; 177 175 178 - /* 32KHz time base */ 179 - CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT); 176 + /* Set 32KHz time base if not already set */ 177 + freq = CMOS_READ(RTC_FREQ_SELECT); 178 + if ((freq & RTC_DIV_CTL) != RTC_REF_CLCK_32KHZ) 179 + CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT); 180 180 181 - /* start the clock */ 182 - CMOS_WRITE(RTC_24H, RTC_CONTROL); 181 + /* Ensure SET bit is clear so RTC can run */ 182 + ctrl = CMOS_READ(RTC_CONTROL); 183 + if (ctrl & RTC_SET) 184 + CMOS_WRITE(ctrl & ~RTC_SET, RTC_CONTROL); 183 185 } 184 186 185 187 void __init plat_time_init(void)
+1
arch/mips/mti-sead3/sead3-time.c
··· 77 77 return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; 78 78 return -1; 79 79 } 80 + EXPORT_SYMBOL_GPL(get_c0_perfcount_int); 80 81 81 82 unsigned int get_c0_compare_int(void) 82 83 {
+1 -1
arch/mips/netlogic/common/smp.c
··· 86 86 { 87 87 clear_c0_eimr(irq); 88 88 ack_c0_eirr(irq); 89 - smp_call_function_interrupt(); 89 + generic_smp_call_function_interrupt(); 90 90 set_c0_eimr(irq); 91 91 } 92 92
+1 -1
arch/mips/paravirt/paravirt-smp.c
··· 114 114 115 115 static irqreturn_t paravirt_function_interrupt(int irq, void *dev_id) 116 116 { 117 - smp_call_function_interrupt(); 117 + generic_smp_call_function_interrupt(); 118 118 return IRQ_HANDLED; 119 119 } 120 120
+1
arch/mips/pistachio/time.c
··· 26 26 { 27 27 return gic_get_c0_perfcount_int(); 28 28 } 29 + EXPORT_SYMBOL_GPL(get_c0_perfcount_int); 29 30 30 31 int get_c0_fdc_int(void) 31 32 {
+1 -1
arch/mips/pmcs-msp71xx/msp_smp.c
··· 44 44 45 45 static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) 46 46 { 47 - smp_call_function_interrupt(); 47 + generic_smp_call_function_interrupt(); 48 48 49 49 return IRQ_HANDLED; 50 50 }
+1
arch/mips/ralink/irq.c
··· 89 89 { 90 90 return rt_perfcount_irq; 91 91 } 92 + EXPORT_SYMBOL_GPL(get_c0_perfcount_int); 92 93 93 94 unsigned int get_c0_compare_int(void) 94 95 {
+6 -2
arch/mips/sgi-ip27/ip27-irq.c
··· 107 107 scheduler_ipi(); 108 108 } else if (pend0 & (1UL << CPU_CALL_A_IRQ)) { 109 109 LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ); 110 - smp_call_function_interrupt(); 110 + irq_enter(); 111 + generic_smp_call_function_interrupt(); 112 + irq_exit(); 111 113 } else if (pend0 & (1UL << CPU_CALL_B_IRQ)) { 112 114 LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ); 113 - smp_call_function_interrupt(); 115 + irq_enter(); 116 + generic_smp_call_function_interrupt(); 117 + irq_exit(); 114 118 } else 115 119 #endif 116 120 {
+5 -4
arch/mips/sibyte/bcm1480/smp.c
··· 29 29 #include <asm/sibyte/bcm1480_regs.h> 30 30 #include <asm/sibyte/bcm1480_int.h> 31 31 32 - extern void smp_call_function_interrupt(void); 33 - 34 32 /* 35 33 * These are routines for dealing with the bcm1480 smp capabilities 36 34 * independent of board/firmware ··· 182 184 if (action & SMP_RESCHEDULE_YOURSELF) 183 185 scheduler_ipi(); 184 186 185 - if (action & SMP_CALL_FUNCTION) 186 - smp_call_function_interrupt(); 187 + if (action & SMP_CALL_FUNCTION) { 188 + irq_enter(); 189 + generic_smp_call_function_interrupt(); 190 + irq_exit(); 191 + } 187 192 }
+5 -2
arch/mips/sibyte/sb1250/smp.c
··· 172 172 if (action & SMP_RESCHEDULE_YOURSELF) 173 173 scheduler_ipi(); 174 174 175 - if (action & SMP_CALL_FUNCTION) 176 - smp_call_function_interrupt(); 175 + if (action & SMP_CALL_FUNCTION) { 176 + irq_enter(); 177 + generic_smp_call_function_interrupt(); 178 + irq_exit(); 179 + } 177 180 }
+1 -1
drivers/cpufreq/loongson2_cpufreq.c
··· 20 20 #include <asm/clock.h> 21 21 #include <asm/idle.h> 22 22 23 - #include <asm/mach-loongson/loongson.h> 23 + #include <asm/mach-loongson64/loongson.h> 24 24 25 25 static uint nowait; 26 26
+1 -1
drivers/irqchip/irq-mips-gic.c
··· 538 538 539 539 static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) 540 540 { 541 - smp_call_function_interrupt(); 541 + generic_smp_call_function_interrupt(); 542 542 543 543 return IRQ_HANDLED; 544 544 }