Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

openrisc: add optimized atomic operations

Using the l.lwa and l.swa atomic instruction pair.
Most openrisc processor cores provide these instructions now. If the
instructions are not available emulation is provided.

Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
[shorne@gmail.com: remove OPENRISC_HAVE_INST_LWA_SWA config suggesed by
Alan Cox https://lkml.org/lkml/2014/7/23/666]
[shorne@gmail.com: expand to implement all ops suggested by Peter
Zijlstra https://lkml.org/lkml/2017/2/20/317]
Signed-off-by: Stafford Horne <shorne@gmail.com>

authored by

Stefan Kristiansson and committed by
Stafford Horne
bc19598f 11595172

+128 -1
-1
arch/openrisc/include/asm/Kbuild
··· 1 1 2 2 header-y += ucontext.h 3 3 4 - generic-y += atomic.h 5 4 generic-y += auxvec.h 6 5 generic-y += barrier.h 7 6 generic-y += bitsperlong.h
+126
arch/openrisc/include/asm/atomic.h
··· 1 + /* 2 + * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi> 3 + * 4 + * This file is licensed under the terms of the GNU General Public License 5 + * version 2. This program is licensed "as is" without any warranty of any 6 + * kind, whether express or implied. 7 + */ 8 + 9 + #ifndef __ASM_OPENRISC_ATOMIC_H 10 + #define __ASM_OPENRISC_ATOMIC_H 11 + 12 + #include <linux/types.h> 13 + 14 + /* Atomically perform op with v->counter and i */ 15 + #define ATOMIC_OP(op) \ 16 + static inline void atomic_##op(int i, atomic_t *v) \ 17 + { \ 18 + int tmp; \ 19 + \ 20 + __asm__ __volatile__( \ 21 + "1: l.lwa %0,0(%1) \n" \ 22 + " l." #op " %0,%0,%2 \n" \ 23 + " l.swa 0(%1),%0 \n" \ 24 + " l.bnf 1b \n" \ 25 + " l.nop \n" \ 26 + : "=&r"(tmp) \ 27 + : "r"(&v->counter), "r"(i) \ 28 + : "cc", "memory"); \ 29 + } 30 + 31 + /* Atomically perform op with v->counter and i, return the result */ 32 + #define ATOMIC_OP_RETURN(op) \ 33 + static inline int atomic_##op##_return(int i, atomic_t *v) \ 34 + { \ 35 + int tmp; \ 36 + \ 37 + __asm__ __volatile__( \ 38 + "1: l.lwa %0,0(%1) \n" \ 39 + " l." #op " %0,%0,%2 \n" \ 40 + " l.swa 0(%1),%0 \n" \ 41 + " l.bnf 1b \n" \ 42 + " l.nop \n" \ 43 + : "=&r"(tmp) \ 44 + : "r"(&v->counter), "r"(i) \ 45 + : "cc", "memory"); \ 46 + \ 47 + return tmp; \ 48 + } 49 + 50 + /* Atomically perform op with v->counter and i, return orig v->counter */ 51 + #define ATOMIC_FETCH_OP(op) \ 52 + static inline int atomic_fetch_##op(int i, atomic_t *v) \ 53 + { \ 54 + int tmp, old; \ 55 + \ 56 + __asm__ __volatile__( \ 57 + "1: l.lwa %0,0(%2) \n" \ 58 + " l." #op " %1,%0,%3 \n" \ 59 + " l.swa 0(%2),%1 \n" \ 60 + " l.bnf 1b \n" \ 61 + " l.nop \n" \ 62 + : "=&r"(old), "=&r"(tmp) \ 63 + : "r"(&v->counter), "r"(i) \ 64 + : "cc", "memory"); \ 65 + \ 66 + return old; \ 67 + } 68 + 69 + ATOMIC_OP_RETURN(add) 70 + ATOMIC_OP_RETURN(sub) 71 + 72 + ATOMIC_FETCH_OP(add) 73 + ATOMIC_FETCH_OP(sub) 74 + ATOMIC_FETCH_OP(and) 75 + ATOMIC_FETCH_OP(or) 76 + ATOMIC_FETCH_OP(xor) 77 + 78 + ATOMIC_OP(and) 79 + ATOMIC_OP(or) 80 + ATOMIC_OP(xor) 81 + 82 + #undef ATOMIC_FETCH_OP 83 + #undef ATOMIC_OP_RETURN 84 + #undef ATOMIC_OP 85 + 86 + #define atomic_add_return atomic_add_return 87 + #define atomic_sub_return atomic_sub_return 88 + #define atomic_fetch_add atomic_fetch_add 89 + #define atomic_fetch_sub atomic_fetch_sub 90 + #define atomic_fetch_and atomic_fetch_and 91 + #define atomic_fetch_or atomic_fetch_or 92 + #define atomic_fetch_xor atomic_fetch_xor 93 + #define atomic_and atomic_and 94 + #define atomic_or atomic_or 95 + #define atomic_xor atomic_xor 96 + 97 + /* 98 + * Atomically add a to v->counter as long as v is not already u. 99 + * Returns the original value at v->counter. 100 + * 101 + * This is often used through atomic_inc_not_zero() 102 + */ 103 + static inline int __atomic_add_unless(atomic_t *v, int a, int u) 104 + { 105 + int old, tmp; 106 + 107 + __asm__ __volatile__( 108 + "1: l.lwa %0, 0(%2) \n" 109 + " l.sfeq %0, %4 \n" 110 + " l.bf 2f \n" 111 + " l.add %1, %0, %3 \n" 112 + " l.swa 0(%2), %1 \n" 113 + " l.bnf 1b \n" 114 + " l.nop \n" 115 + "2: \n" 116 + : "=&r"(old), "=&r" (tmp) 117 + : "r"(&v->counter), "r"(a), "r"(u) 118 + : "cc", "memory"); 119 + 120 + return old; 121 + } 122 + #define __atomic_add_unless __atomic_add_unless 123 + 124 + #include <asm-generic/atomic.h> 125 + 126 + #endif /* __ASM_OPENRISC_ATOMIC_H */
+2
include/asm-generic/atomic.h
··· 223 223 #define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) 224 224 #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) 225 225 226 + #ifndef __atomic_add_unless 226 227 static inline int __atomic_add_unless(atomic_t *v, int a, int u) 227 228 { 228 229 int c, old; ··· 232 231 c = old; 233 232 return c; 234 233 } 234 + #endif 235 235 236 236 #endif /* __ASM_GENERIC_ATOMIC_H */