Merge tag 'powerpc-6.0-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc fixes from Michael Ellerman:

- Ensure we never emit lwarx with EH=1 on 32-bit, because some 32-bit
CPUs trap on it rather than ignoring it as they should.

- Fix ftrace when building with clang, which was broken by some
refactoring.

- A couple of other minor fixes.

Thanks to Christophe Leroy, Naveen N. Rao, Nick Desaulniers, Ondrej
Mosnacek, Pali Rohár, Russell Currey, and Segher Boessenkool.

* tag 'powerpc-6.0-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
powerpc/kexec: Fix build failure from uninitialised variable
powerpc/ppc-opcode: Fix PPC_RAW_TW()
powerpc64/ftrace: Fix ftrace for clang builds
powerpc: Make eh value more explicit when using lwarx
powerpc: Don't hide eh field of lwarx behind a macro
powerpc: Fix eh field when calling lwarx on PPC32

Changed files
+25 -30
arch
powerpc
+3 -2
arch/powerpc/include/asm/atomic.h
··· 140 140 arch_atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new) 141 141 { 142 142 int r, o = *old; 143 + unsigned int eh = IS_ENABLED(CONFIG_PPC64); 143 144 144 145 __asm__ __volatile__ ( 145 - "1: lwarx %0,0,%2,%5 # atomic_try_cmpxchg_acquire \n" 146 + "1: lwarx %0,0,%2,%[eh] # atomic_try_cmpxchg_acquire \n" 146 147 " cmpw 0,%0,%3 \n" 147 148 " bne- 2f \n" 148 149 " stwcx. %4,0,%2 \n" ··· 151 150 "\t" PPC_ACQUIRE_BARRIER " \n" 152 151 "2: \n" 153 152 : "=&r" (r), "+m" (v->counter) 154 - : "r" (&v->counter), "r" (o), "r" (new), "i" (IS_ENABLED(CONFIG_PPC64) ? 1 : 0) 153 + : "r" (&v->counter), "r" (o), "r" (new), [eh] "n" (eh) 155 154 : "cr0", "memory"); 156 155 157 156 if (unlikely(r != o))
+2 -2
arch/powerpc/include/asm/bitops.h
··· 163 163 "bne- 1b\n" \ 164 164 postfix \ 165 165 : "=&r" (old), "=&r" (t) \ 166 - : "rK" (mask), "r" (p), "i" (IS_ENABLED(CONFIG_PPC64) ? eh : 0) \ 166 + : "rK" (mask), "r" (p), "n" (eh) \ 167 167 : "cc", "memory"); \ 168 168 return (old & mask); \ 169 169 } ··· 171 171 DEFINE_TESTOP(test_and_set_bits, or, PPC_ATOMIC_ENTRY_BARRIER, 172 172 PPC_ATOMIC_EXIT_BARRIER, 0) 173 173 DEFINE_TESTOP(test_and_set_bits_lock, or, "", 174 - PPC_ACQUIRE_BARRIER, 1) 174 + PPC_ACQUIRE_BARRIER, IS_ENABLED(CONFIG_PPC64)) 175 175 DEFINE_TESTOP(test_and_change_bits, xor, PPC_ATOMIC_ENTRY_BARRIER, 176 176 PPC_ATOMIC_EXIT_BARRIER, 0) 177 177
+2 -11
arch/powerpc/include/asm/ppc-opcode.h
··· 343 343 #define __PPC_SPR(r) ((((r) & 0x1f) << 16) | ((((r) >> 5) & 0x1f) << 11)) 344 344 #define __PPC_RC21 (0x1 << 10) 345 345 #define __PPC_PRFX_R(r) (((r) & 0x1) << 20) 346 + #define __PPC_EH(eh) (((eh) & 0x1) << 0) 346 347 347 348 /* 348 349 * Both low and high 16 bits are added as SIGNED additions, so if low 16 bits ··· 359 358 /* LI Field */ 360 359 #define PPC_LI_MASK 0x03fffffc 361 360 #define PPC_LI(v) ((v) & PPC_LI_MASK) 362 - 363 - /* 364 - * Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a 365 - * larx with EH set as an illegal instruction. 366 - */ 367 - #ifdef CONFIG_PPC64 368 - #define __PPC_EH(eh) (((eh) & 0x1) << 0) 369 - #else 370 - #define __PPC_EH(eh) 0 371 - #endif 372 361 373 362 /* Base instruction encoding */ 374 363 #define PPC_RAW_CP_ABORT (0x7c00068c) ··· 571 580 572 581 #define PPC_RAW_BRANCH(offset) (0x48000000 | PPC_LI(offset)) 573 582 #define PPC_RAW_BL(offset) (0x48000001 | PPC_LI(offset)) 574 - #define PPC_RAW_TW(t0, a, b) (0x7f000008 | ___PPC_RS(t0) | ___PPC_RA(a) | ___PPC_RB(b)) 583 + #define PPC_RAW_TW(t0, a, b) (0x7c000008 | ___PPC_RS(t0) | ___PPC_RA(a) | ___PPC_RB(b)) 575 584 #define PPC_RAW_TRAP() PPC_RAW_TW(31, 0, 0) 576 585 #define PPC_RAW_SETB(t, bfa) (0x7c000100 | ___PPC_RT(t) | ___PPC_RA((bfa) << 2)) 577 586
+9 -6
arch/powerpc/include/asm/simple_spinlock.h
··· 48 48 static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) 49 49 { 50 50 unsigned long tmp, token; 51 + unsigned int eh = IS_ENABLED(CONFIG_PPC64); 51 52 52 53 token = LOCK_TOKEN; 53 54 __asm__ __volatile__( 54 - "1: lwarx %0,0,%2,1\n\ 55 + "1: lwarx %0,0,%2,%[eh]\n\ 55 56 cmpwi 0,%0,0\n\ 56 57 bne- 2f\n\ 57 58 stwcx. %1,0,%2\n\ ··· 60 59 PPC_ACQUIRE_BARRIER 61 60 "2:" 62 61 : "=&r" (tmp) 63 - : "r" (token), "r" (&lock->slock) 62 + : "r" (token), "r" (&lock->slock), [eh] "n" (eh) 64 63 : "cr0", "memory"); 65 64 66 65 return tmp; ··· 157 156 static inline long __arch_read_trylock(arch_rwlock_t *rw) 158 157 { 159 158 long tmp; 159 + unsigned int eh = IS_ENABLED(CONFIG_PPC64); 160 160 161 161 __asm__ __volatile__( 162 - "1: lwarx %0,0,%1,1\n" 162 + "1: lwarx %0,0,%1,%[eh]\n" 163 163 __DO_SIGN_EXTEND 164 164 " addic. %0,%0,1\n\ 165 165 ble- 2f\n" ··· 168 166 bne- 1b\n" 169 167 PPC_ACQUIRE_BARRIER 170 168 "2:" : "=&r" (tmp) 171 - : "r" (&rw->lock) 169 + : "r" (&rw->lock), [eh] "n" (eh) 172 170 : "cr0", "xer", "memory"); 173 171 174 172 return tmp; ··· 181 179 static inline long __arch_write_trylock(arch_rwlock_t *rw) 182 180 { 183 181 long tmp, token; 182 + unsigned int eh = IS_ENABLED(CONFIG_PPC64); 184 183 185 184 token = WRLOCK_TOKEN; 186 185 __asm__ __volatile__( 187 - "1: lwarx %0,0,%2,1\n\ 186 + "1: lwarx %0,0,%2,%[eh]\n\ 188 187 cmpwi 0,%0,0\n\ 189 188 bne- 2f\n" 190 189 " stwcx. %1,0,%2\n\ 191 190 bne- 1b\n" 192 191 PPC_ACQUIRE_BARRIER 193 192 "2:" : "=&r" (tmp) 194 - : "r" (token), "r" (&rw->lock) 193 + : "r" (token), "r" (&rw->lock), [eh] "n" (eh) 195 194 : "cr0", "memory"); 196 195 197 196 return tmp;
+4 -4
arch/powerpc/kernel/trace/ftrace.c
··· 393 393 */ 394 394 static bool expected_nop_sequence(void *ip, ppc_inst_t op0, ppc_inst_t op1) 395 395 { 396 - if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V1)) 396 + if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS)) 397 + return ppc_inst_equal(op0, ppc_inst(PPC_RAW_NOP())); 398 + else 397 399 return ppc_inst_equal(op0, ppc_inst(PPC_RAW_BRANCH(8))) && 398 400 ppc_inst_equal(op1, ppc_inst(PPC_INST_LD_TOC)); 399 - else 400 - return ppc_inst_equal(op0, ppc_inst(PPC_RAW_NOP())); 401 401 } 402 402 403 403 static int ··· 412 412 if (copy_inst_from_kernel_nofault(op, ip)) 413 413 return -EFAULT; 414 414 415 - if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V1) && 415 + if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && 416 416 copy_inst_from_kernel_nofault(op + 1, ip + 4)) 417 417 return -EFAULT; 418 418
+5 -5
arch/powerpc/kexec/file_load_64.c
··· 1043 1043 const char *propname) 1044 1044 { 1045 1045 const void *prop, *fdtprop; 1046 - int len = 0, fdtlen = 0, ret; 1046 + int len = 0, fdtlen = 0; 1047 1047 1048 1048 prop = of_get_property(dn, propname, &len); 1049 1049 fdtprop = fdt_getprop(fdt, node_offset, propname, &fdtlen); 1050 1050 1051 1051 if (fdtprop && !prop) 1052 - ret = fdt_delprop(fdt, node_offset, propname); 1052 + return fdt_delprop(fdt, node_offset, propname); 1053 1053 else if (prop) 1054 - ret = fdt_setprop(fdt, node_offset, propname, prop, len); 1055 - 1056 - return ret; 1054 + return fdt_setprop(fdt, node_offset, propname, prop, len); 1055 + else 1056 + return -FDT_ERR_NOTFOUND; 1057 1057 } 1058 1058 1059 1059 static int update_pci_dma_nodes(void *fdt, const char *dmapropname)