Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'parisc-5.9-2' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux

Pull more parisc updates from Helge Deller:

- Oscar Carter contributed a patch which fixes parisc's usage of
dereference_function_descriptor() and thus will allow using the
-Wcast-function-type compiler option in the top-level Makefile

- Sven Schnelle fixed a bug in the SBA code to prevent crashes during
kexec

- John David Anglin provided implementations for __smp_store_release()
and __smp_load_acquire barriers() which avoids using the sync
assembler instruction and thus speeds up barrier paths

- Some whitespace cleanups in parisc's atomic.h header file

* 'parisc-5.9-2' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux:
parisc: Implement __smp_store_release and __smp_load_acquire barriers
parisc: mask out enable and reserved bits from sba imask
parisc: Whitespace cleanups in atomic.h
parisc/kernel/ftrace: Remove function callback casts
sections.h: dereference_function_descriptor() returns void pointer

+70 -8
+4 -4
arch/parisc/include/asm/atomic.h
··· 34 34 /* Can't use raw_spin_lock_irq because of #include problems, so 35 35 * this is the substitute */ 36 36 #define _atomic_spin_lock_irqsave(l,f) do { \ 37 - arch_spinlock_t *s = ATOMIC_HASH(l); \ 37 + arch_spinlock_t *s = ATOMIC_HASH(l); \ 38 38 local_irq_save(f); \ 39 39 arch_spin_lock(s); \ 40 40 } while(0) 41 41 42 42 #define _atomic_spin_unlock_irqrestore(l,f) do { \ 43 - arch_spinlock_t *s = ATOMIC_HASH(l); \ 43 + arch_spinlock_t *s = ATOMIC_HASH(l); \ 44 44 arch_spin_unlock(s); \ 45 45 local_irq_restore(f); \ 46 46 } while(0) ··· 85 85 _atomic_spin_lock_irqsave(v, flags); \ 86 86 v->counter c_op i; \ 87 87 _atomic_spin_unlock_irqrestore(v, flags); \ 88 - } \ 88 + } 89 89 90 90 #define ATOMIC_OP_RETURN(op, c_op) \ 91 91 static __inline__ int atomic_##op##_return(int i, atomic_t *v) \ ··· 148 148 _atomic_spin_lock_irqsave(v, flags); \ 149 149 v->counter c_op i; \ 150 150 _atomic_spin_unlock_irqrestore(v, flags); \ 151 - } \ 151 + } 152 152 153 153 #define ATOMIC64_OP_RETURN(op, c_op) \ 154 154 static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \
+61
arch/parisc/include/asm/barrier.h
··· 26 26 #define __smp_rmb() mb() 27 27 #define __smp_wmb() mb() 28 28 29 + #define __smp_store_release(p, v) \ 30 + do { \ 31 + typeof(p) __p = (p); \ 32 + union { typeof(*p) __val; char __c[1]; } __u = \ 33 + { .__val = (__force typeof(*p)) (v) }; \ 34 + compiletime_assert_atomic_type(*p); \ 35 + switch (sizeof(*p)) { \ 36 + case 1: \ 37 + asm volatile("stb,ma %0,0(%1)" \ 38 + : : "r"(*(__u8 *)__u.__c), "r"(__p) \ 39 + : "memory"); \ 40 + break; \ 41 + case 2: \ 42 + asm volatile("sth,ma %0,0(%1)" \ 43 + : : "r"(*(__u16 *)__u.__c), "r"(__p) \ 44 + : "memory"); \ 45 + break; \ 46 + case 4: \ 47 + asm volatile("stw,ma %0,0(%1)" \ 48 + : : "r"(*(__u32 *)__u.__c), "r"(__p) \ 49 + : "memory"); \ 50 + break; \ 51 + case 8: \ 52 + if (IS_ENABLED(CONFIG_64BIT)) \ 53 + asm volatile("std,ma %0,0(%1)" \ 54 + : : "r"(*(__u64 *)__u.__c), "r"(__p) \ 55 + : "memory"); \ 56 + break; \ 57 + } \ 58 + } while (0) 59 + 60 + #define __smp_load_acquire(p) \ 61 + ({ \ 62 + union { typeof(*p) __val; char __c[1]; } __u; \ 63 + typeof(p) __p = (p); \ 64 + compiletime_assert_atomic_type(*p); \ 65 + switch (sizeof(*p)) { \ 66 + case 1: \ 67 + asm volatile("ldb,ma 0(%1),%0" \ 68 + : "=r"(*(__u8 *)__u.__c) : "r"(__p) \ 69 + : "memory"); \ 70 + break; \ 71 + case 2: \ 72 + asm volatile("ldh,ma 0(%1),%0" \ 73 + : "=r"(*(__u16 *)__u.__c) : "r"(__p) \ 74 + : "memory"); \ 75 + break; \ 76 + case 4: \ 77 + asm volatile("ldw,ma 0(%1),%0" \ 78 + : "=r"(*(__u32 *)__u.__c) : "r"(__p) \ 79 + : "memory"); \ 80 + break; \ 81 + case 8: \ 82 + if (IS_ENABLED(CONFIG_64BIT)) \ 83 + asm volatile("ldd,ma 0(%1),%0" \ 84 + : "=r"(*(__u64 *)__u.__c) : "r"(__p) \ 85 + : "memory"); \ 86 + break; \ 87 + } \ 88 + __u.__val; \ 89 + }) 29 90 #include <asm-generic/barrier.h> 30 91 31 92 #endif /* !__ASSEMBLY__ */
+2 -1
arch/parisc/kernel/ftrace.c
··· 64 64 function_trace_op, regs); 65 65 66 66 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 67 - if (ftrace_graph_return != (trace_func_graph_ret_t) ftrace_stub || 67 + if (dereference_function_descriptor(ftrace_graph_return) != 68 + dereference_function_descriptor(ftrace_stub) || 68 69 ftrace_graph_entry != ftrace_graph_entry_stub) { 69 70 unsigned long *parent_rp; 70 71
+1 -1
drivers/parisc/sba_iommu.c
··· 1270 1270 ** (one that doesn't overlap memory or LMMIO space) in the 1271 1271 ** IBASE and IMASK registers. 1272 1272 */ 1273 - ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE); 1273 + ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1fffffULL; 1274 1274 iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1; 1275 1275 1276 1276 if ((ioc->ibase < 0xfed00000UL) && ((ioc->ibase + iova_space_size) > 0xfee00000UL)) {
+2 -2
include/asm-generic/sections.h
··· 60 60 61 61 /* Function descriptor handling (if any). Override in asm/sections.h */ 62 62 #ifndef dereference_function_descriptor 63 - #define dereference_function_descriptor(p) (p) 64 - #define dereference_kernel_function_descriptor(p) (p) 63 + #define dereference_function_descriptor(p) ((void *)(p)) 64 + #define dereference_kernel_function_descriptor(p) ((void *)(p)) 65 65 #endif 66 66 67 67 /* random extra sections (if any). Override