Merge tag 'powerpc-5.6-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc fixes from Michael Ellerman:

- Fix an existing bug in our user access handling, exposed by one of
the bug fixes we merged this cycle.

- A fix for a boot hang on 32-bit with CONFIG_TRACE_IRQFLAGS and the
recently added CONFIG_VMAP_STACK.

Thanks to: Christophe Leroy, Guenter Roeck.

* tag 'powerpc-5.6-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
powerpc: Fix CONFIG_TRACE_IRQFLAGS with CONFIG_VMAP_STACK
powerpc/futex: Fix incorrect user access blocking

+7 -5
+6 -4
arch/powerpc/include/asm/futex.h
··· 35 { 36 int oldval = 0, ret; 37 38 - allow_write_to_user(uaddr, sizeof(*uaddr)); 39 pagefault_disable(); 40 41 switch (op) { ··· 62 63 *oval = oldval; 64 65 - prevent_write_to_user(uaddr, sizeof(*uaddr)); 66 return ret; 67 } 68 ··· 76 if (!access_ok(uaddr, sizeof(u32))) 77 return -EFAULT; 78 79 - allow_write_to_user(uaddr, sizeof(*uaddr)); 80 __asm__ __volatile__ ( 81 PPC_ATOMIC_ENTRY_BARRIER 82 "1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\ ··· 98 : "cc", "memory"); 99 100 *uval = prev; 101 - prevent_write_to_user(uaddr, sizeof(*uaddr)); 102 return ret; 103 } 104
··· 35 { 36 int oldval = 0, ret; 37 38 + allow_read_write_user(uaddr, uaddr, sizeof(*uaddr)); 39 pagefault_disable(); 40 41 switch (op) { ··· 62 63 *oval = oldval; 64 65 + prevent_read_write_user(uaddr, uaddr, sizeof(*uaddr)); 66 return ret; 67 } 68 ··· 76 if (!access_ok(uaddr, sizeof(u32))) 77 return -EFAULT; 78 79 + allow_read_write_user(uaddr, uaddr, sizeof(*uaddr)); 80 + 81 __asm__ __volatile__ ( 82 PPC_ATOMIC_ENTRY_BARRIER 83 "1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\ ··· 97 : "cc", "memory"); 98 99 *uval = prev; 100 + prevent_read_write_user(uaddr, uaddr, sizeof(*uaddr)); 101 + 102 return ret; 103 } 104
+1 -1
arch/powerpc/kernel/entry_32.S
··· 214 * To speed up the syscall path where interrupts stay on, let's check 215 * first if we are changing the MSR value at all. 216 */ 217 - tophys(r12, r1) 218 lwz r12,_MSR(r12) 219 andi. r12,r12,MSR_EE 220 bne 1f
··· 214 * To speed up the syscall path where interrupts stay on, let's check 215 * first if we are changing the MSR value at all. 216 */ 217 + tophys_novmstack r12, r1 218 lwz r12,_MSR(r12) 219 andi. r12,r12,MSR_EE 220 bne 1f