Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

frv: Rewrite atomic implementation

Mostly complete rewrite of the FRV atomic implementation, instead of
using assembly files, use inline assembler.

The out-of-line CONFIG option makes a bit of a mess of things, but a
little CPP trickery gets that done too.

FRV already had the atomic logic ops but under a non standard name,
the reimplementation provides the generic names and provides the
intermediate form required for the bitops implementation.

The slightly inconsistent __atomic32_fetch_##op naming is because
__atomic_fetch_##op conlicts with GCC builtin functions.

The 64bit atomic ops use the inline assembly %Ln construct to access
the low word register (r+1), afaik this construct was not previously
used in the kernel and is completely undocumented, but I found it in
the FRV GCC code and it seems to work.

FRV had a non-standard definition of atomic_{clear,set}_mask() which
would work types other than atomic_t, the one user relying on that
(arch/frv/kernel/dma.c) got converted to use the new intermediate
form.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

authored by

Peter Zijlstra and committed by
Thomas Gleixner
b0d8003e 7fc1845d

+260 -356
+67 -54
arch/frv/include/asm/atomic.h
··· 15 15 #define _ASM_ATOMIC_H 16 16 17 17 #include <linux/types.h> 18 - #include <asm/spr-regs.h> 19 18 #include <asm/cmpxchg.h> 20 19 #include <asm/barrier.h> 21 20 22 21 #ifdef CONFIG_SMP 23 22 #error not SMP safe 24 23 #endif 24 + 25 + #include <asm/atomic_defs.h> 25 26 26 27 /* 27 28 * Atomic operations that C can't guarantee us. Useful for ··· 35 34 #define atomic_read(v) ACCESS_ONCE((v)->counter) 36 35 #define atomic_set(v, i) (((v)->counter) = (i)) 37 36 38 - #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS 37 + static inline int atomic_inc_return(atomic_t *v) 38 + { 39 + return __atomic_add_return(1, &v->counter); 40 + } 41 + 42 + static inline int atomic_dec_return(atomic_t *v) 43 + { 44 + return __atomic_sub_return(1, &v->counter); 45 + } 46 + 39 47 static inline int atomic_add_return(int i, atomic_t *v) 40 48 { 41 - unsigned long val; 42 - 43 - asm("0: \n" 44 - " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ 45 - " ckeq icc3,cc7 \n" 46 - " ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */ 47 - " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ 48 - " add%I2 %1,%2,%1 \n" 49 - " cst.p %1,%M0 ,cc3,#1 \n" 50 - " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */ 51 - " beq icc3,#0,0b \n" 52 - : "+U"(v->counter), "=&r"(val) 53 - : "NPr"(i) 54 - : "memory", "cc7", "cc3", "icc3" 55 - ); 56 - 57 - return val; 49 + return __atomic_add_return(i, &v->counter); 58 50 } 59 51 60 52 static inline int atomic_sub_return(int i, atomic_t *v) 61 53 { 62 - unsigned long val; 63 - 64 - asm("0: \n" 65 - " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ 66 - " ckeq icc3,cc7 \n" 67 - " ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */ 68 - " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ 69 - " sub%I2 %1,%2,%1 \n" 70 - " cst.p %1,%M0 ,cc3,#1 \n" 71 - " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */ 72 - " beq icc3,#0,0b \n" 73 - : "+U"(v->counter), "=&r"(val) 74 - : "NPr"(i) 75 - : "memory", "cc7", "cc3", "icc3" 76 - ); 77 - 78 - return val; 54 + return __atomic_sub_return(i, &v->counter); 79 55 } 80 - 81 - #else 82 - 83 - extern int atomic_add_return(int i, atomic_t *v); 84 - extern int atomic_sub_return(int i, atomic_t *v); 85 - 86 - #endif 87 56 88 57 static inline int atomic_add_negative(int i, atomic_t *v) 89 58 { ··· 72 101 73 102 static inline void atomic_inc(atomic_t *v) 74 103 { 75 - atomic_add_return(1, v); 104 + atomic_inc_return(v); 76 105 } 77 106 78 107 static inline void atomic_dec(atomic_t *v) 79 108 { 80 - atomic_sub_return(1, v); 109 + atomic_dec_return(v); 81 110 } 82 - 83 - #define atomic_dec_return(v) atomic_sub_return(1, (v)) 84 - #define atomic_inc_return(v) atomic_add_return(1, (v)) 85 111 86 112 #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) 87 113 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) ··· 88 120 * 64-bit atomic ops 89 121 */ 90 122 typedef struct { 91 - volatile long long counter; 123 + long long counter; 92 124 } atomic64_t; 93 125 94 126 #define ATOMIC64_INIT(i) { (i) } 95 127 96 - static inline long long atomic64_read(atomic64_t *v) 128 + static inline long long atomic64_read(const atomic64_t *v) 97 129 { 98 130 long long counter; 99 131 100 132 asm("ldd%I1 %M1,%0" 101 133 : "=e"(counter) 102 134 : "m"(v->counter)); 135 + 103 136 return counter; 104 137 } 105 138 ··· 111 142 : "e"(i)); 112 143 } 113 144 114 - extern long long atomic64_inc_return(atomic64_t *v); 115 - extern long long atomic64_dec_return(atomic64_t *v); 116 - extern long long atomic64_add_return(long long i, atomic64_t *v); 117 - extern long long atomic64_sub_return(long long i, atomic64_t *v); 145 + static inline long long atomic64_inc_return(atomic64_t *v) 146 + { 147 + return __atomic64_add_return(1, &v->counter); 148 + } 149 + 150 + static inline long long atomic64_dec_return(atomic64_t *v) 151 + { 152 + return __atomic64_sub_return(1, &v->counter); 153 + } 154 + 155 + static inline long long atomic64_add_return(long long i, atomic64_t *v) 156 + { 157 + return __atomic64_add_return(i, &v->counter); 158 + } 159 + 160 + static inline long long atomic64_sub_return(long long i, atomic64_t *v) 161 + { 162 + return __atomic64_sub_return(i, &v->counter); 163 + } 118 164 119 165 static inline long long atomic64_add_negative(long long i, atomic64_t *v) 120 166 { ··· 160 176 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) 161 177 #define atomic64_inc_and_test(v) (atomic64_inc_return((v)) == 0) 162 178 179 + 163 180 #define atomic_cmpxchg(v, old, new) (cmpxchg(&(v)->counter, old, new)) 164 181 #define atomic_xchg(v, new) (xchg(&(v)->counter, new)) 165 182 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter)) ··· 181 196 return c; 182 197 } 183 198 199 + #define ATOMIC_OP(op) \ 200 + static inline void atomic_##op(int i, atomic_t *v) \ 201 + { \ 202 + (void)__atomic32_fetch_##op(i, &v->counter); \ 203 + } \ 204 + \ 205 + static inline void atomic64_##op(long long i, atomic64_t *v) \ 206 + { \ 207 + (void)__atomic64_fetch_##op(i, &v->counter); \ 208 + } 209 + 210 + #define CONFIG_ARCH_HAS_ATOMIC_OR 211 + 212 + ATOMIC_OP(or) 213 + ATOMIC_OP(and) 214 + ATOMIC_OP(xor) 215 + 216 + #undef ATOMIC_OP 217 + 218 + static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v) 219 + { 220 + atomic_and(~mask, v); 221 + } 222 + 223 + static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v) 224 + { 225 + atomic_or(mask, v); 226 + } 184 227 185 228 #endif /* _ASM_ATOMIC_H */
+172
arch/frv/include/asm/atomic_defs.h
··· 1 + 2 + #include <asm/spr-regs.h> 3 + 4 + #ifdef __ATOMIC_LIB__ 5 + 6 + #ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS 7 + 8 + #define ATOMIC_QUALS 9 + #define ATOMIC_EXPORT(x) EXPORT_SYMBOL(x) 10 + 11 + #else /* !OUTOFLINE && LIB */ 12 + 13 + #define ATOMIC_OP_RETURN(op) 14 + #define ATOMIC_FETCH_OP(op) 15 + 16 + #endif /* OUTOFLINE */ 17 + 18 + #else /* !__ATOMIC_LIB__ */ 19 + 20 + #define ATOMIC_EXPORT(x) 21 + 22 + #ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS 23 + 24 + #define ATOMIC_OP_RETURN(op) \ 25 + extern int __atomic_##op##_return(int i, int *v); \ 26 + extern long long __atomic64_##op##_return(long long i, long long *v); 27 + 28 + #define ATOMIC_FETCH_OP(op) \ 29 + extern int __atomic32_fetch_##op(int i, int *v); \ 30 + extern long long __atomic64_fetch_##op(long long i, long long *v); 31 + 32 + #else /* !OUTOFLINE && !LIB */ 33 + 34 + #define ATOMIC_QUALS static inline 35 + 36 + #endif /* OUTOFLINE */ 37 + #endif /* __ATOMIC_LIB__ */ 38 + 39 + 40 + /* 41 + * Note on the 64 bit inline asm variants... 42 + * 43 + * CSTD is a conditional instruction and needs a constrained memory reference. 44 + * Normally 'U' provides the correct constraints for conditional instructions 45 + * and this is used for the 32 bit version, however 'U' does not appear to work 46 + * for 64 bit values (gcc-4.9) 47 + * 48 + * The exact constraint is that conditional instructions cannot deal with an 49 + * immediate displacement in the memory reference, so what we do is we read the 50 + * address through a volatile cast into a local variable in order to insure we 51 + * _have_ to compute the correct address without displacement. This allows us 52 + * to use the regular 'm' for the memory address. 53 + * 54 + * Furthermore, the %Ln operand, which prints the low word register (r+1), 55 + * really only works for registers, this means we cannot allow immediate values 56 + * for the 64 bit versions -- like we do for the 32 bit ones. 57 + * 58 + */ 59 + 60 + #ifndef ATOMIC_OP_RETURN 61 + #define ATOMIC_OP_RETURN(op) \ 62 + ATOMIC_QUALS int __atomic_##op##_return(int i, int *v) \ 63 + { \ 64 + int val; \ 65 + \ 66 + asm volatile( \ 67 + "0: \n" \ 68 + " orcc gr0,gr0,gr0,icc3 \n" \ 69 + " ckeq icc3,cc7 \n" \ 70 + " ld.p %M0,%1 \n" \ 71 + " orcr cc7,cc7,cc3 \n" \ 72 + " "#op"%I2 %1,%2,%1 \n" \ 73 + " cst.p %1,%M0 ,cc3,#1 \n" \ 74 + " corcc gr29,gr29,gr0 ,cc3,#1 \n" \ 75 + " beq icc3,#0,0b \n" \ 76 + : "+U"(*v), "=&r"(val) \ 77 + : "NPr"(i) \ 78 + : "memory", "cc7", "cc3", "icc3" \ 79 + ); \ 80 + \ 81 + return val; \ 82 + } \ 83 + ATOMIC_EXPORT(__atomic_##op##_return); \ 84 + \ 85 + ATOMIC_QUALS long long __atomic64_##op##_return(long long i, long long *v) \ 86 + { \ 87 + long long *__v = READ_ONCE(v); \ 88 + long long val; \ 89 + \ 90 + asm volatile( \ 91 + "0: \n" \ 92 + " orcc gr0,gr0,gr0,icc3 \n" \ 93 + " ckeq icc3,cc7 \n" \ 94 + " ldd.p %M0,%1 \n" \ 95 + " orcr cc7,cc7,cc3 \n" \ 96 + " "#op"cc %L1,%L2,%L1,icc0 \n" \ 97 + " "#op"x %1,%2,%1,icc0 \n" \ 98 + " cstd.p %1,%M0 ,cc3,#1 \n" \ 99 + " corcc gr29,gr29,gr0 ,cc3,#1 \n" \ 100 + " beq icc3,#0,0b \n" \ 101 + : "+m"(*__v), "=&e"(val) \ 102 + : "e"(i) \ 103 + : "memory", "cc7", "cc3", "icc0", "icc3" \ 104 + ); \ 105 + \ 106 + return val; \ 107 + } \ 108 + ATOMIC_EXPORT(__atomic64_##op##_return); 109 + #endif 110 + 111 + #ifndef ATOMIC_FETCH_OP 112 + #define ATOMIC_FETCH_OP(op) \ 113 + ATOMIC_QUALS int __atomic32_fetch_##op(int i, int *v) \ 114 + { \ 115 + int old, tmp; \ 116 + \ 117 + asm volatile( \ 118 + "0: \n" \ 119 + " orcc gr0,gr0,gr0,icc3 \n" \ 120 + " ckeq icc3,cc7 \n" \ 121 + " ld.p %M0,%1 \n" \ 122 + " orcr cc7,cc7,cc3 \n" \ 123 + " "#op"%I3 %1,%3,%2 \n" \ 124 + " cst.p %2,%M0 ,cc3,#1 \n" \ 125 + " corcc gr29,gr29,gr0 ,cc3,#1 \n" \ 126 + " beq icc3,#0,0b \n" \ 127 + : "+U"(*v), "=&r"(old), "=r"(tmp) \ 128 + : "NPr"(i) \ 129 + : "memory", "cc7", "cc3", "icc3" \ 130 + ); \ 131 + \ 132 + return old; \ 133 + } \ 134 + ATOMIC_EXPORT(__atomic32_fetch_##op); \ 135 + \ 136 + ATOMIC_QUALS long long __atomic64_fetch_##op(long long i, long long *v) \ 137 + { \ 138 + long long *__v = READ_ONCE(v); \ 139 + long long old, tmp; \ 140 + \ 141 + asm volatile( \ 142 + "0: \n" \ 143 + " orcc gr0,gr0,gr0,icc3 \n" \ 144 + " ckeq icc3,cc7 \n" \ 145 + " ldd.p %M0,%1 \n" \ 146 + " orcr cc7,cc7,cc3 \n" \ 147 + " "#op" %L1,%L3,%L2 \n" \ 148 + " "#op" %1,%3,%2 \n" \ 149 + " cstd.p %2,%M0 ,cc3,#1 \n" \ 150 + " corcc gr29,gr29,gr0 ,cc3,#1 \n" \ 151 + " beq icc3,#0,0b \n" \ 152 + : "+m"(*__v), "=&e"(old), "=e"(tmp) \ 153 + : "e"(i) \ 154 + : "memory", "cc7", "cc3", "icc3" \ 155 + ); \ 156 + \ 157 + return old; \ 158 + } \ 159 + ATOMIC_EXPORT(__atomic64_fetch_##op); 160 + #endif 161 + 162 + ATOMIC_FETCH_OP(or) 163 + ATOMIC_FETCH_OP(and) 164 + ATOMIC_FETCH_OP(xor) 165 + 166 + ATOMIC_OP_RETURN(add) 167 + ATOMIC_OP_RETURN(sub) 168 + 169 + #undef ATOMIC_FETCH_OP 170 + #undef ATOMIC_OP_RETURN 171 + #undef ATOMIC_QUALS 172 + #undef ATOMIC_EXPORT
+10 -89
arch/frv/include/asm/bitops.h
··· 25 25 26 26 #include <asm-generic/bitops/ffz.h> 27 27 28 - #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS 29 - static inline 30 - unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v) 31 - { 32 - unsigned long old, tmp; 33 - 34 - asm volatile( 35 - "0: \n" 36 - " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ 37 - " ckeq icc3,cc7 \n" 38 - " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */ 39 - " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ 40 - " and%I3 %1,%3,%2 \n" 41 - " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */ 42 - " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */ 43 - " beq icc3,#0,0b \n" 44 - : "+U"(*v), "=&r"(old), "=r"(tmp) 45 - : "NPr"(~mask) 46 - : "memory", "cc7", "cc3", "icc3" 47 - ); 48 - 49 - return old; 50 - } 51 - 52 - static inline 53 - unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v) 54 - { 55 - unsigned long old, tmp; 56 - 57 - asm volatile( 58 - "0: \n" 59 - " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ 60 - " ckeq icc3,cc7 \n" 61 - " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */ 62 - " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ 63 - " or%I3 %1,%3,%2 \n" 64 - " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */ 65 - " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */ 66 - " beq icc3,#0,0b \n" 67 - : "+U"(*v), "=&r"(old), "=r"(tmp) 68 - : "NPr"(mask) 69 - : "memory", "cc7", "cc3", "icc3" 70 - ); 71 - 72 - return old; 73 - } 74 - 75 - static inline 76 - unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v) 77 - { 78 - unsigned long old, tmp; 79 - 80 - asm volatile( 81 - "0: \n" 82 - " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ 83 - " ckeq icc3,cc7 \n" 84 - " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */ 85 - " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ 86 - " xor%I3 %1,%3,%2 \n" 87 - " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */ 88 - " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */ 89 - " beq icc3,#0,0b \n" 90 - : "+U"(*v), "=&r"(old), "=r"(tmp) 91 - : "NPr"(mask) 92 - : "memory", "cc7", "cc3", "icc3" 93 - ); 94 - 95 - return old; 96 - } 97 - 98 - #else 99 - 100 - extern unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v); 101 - extern unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v); 102 - extern unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v); 103 - 104 - #endif 105 - 106 - #define atomic_clear_mask(mask, v) atomic_test_and_ANDNOT_mask((mask), (v)) 107 - #define atomic_set_mask(mask, v) atomic_test_and_OR_mask((mask), (v)) 28 + #include <asm/atomic.h> 108 29 109 30 static inline int test_and_clear_bit(unsigned long nr, volatile void *addr) 110 31 { 111 - volatile unsigned long *ptr = addr; 112 - unsigned long mask = 1UL << (nr & 31); 32 + unsigned int *ptr = (void *)addr; 33 + unsigned int mask = 1UL << (nr & 31); 113 34 ptr += nr >> 5; 114 - return (atomic_test_and_ANDNOT_mask(mask, ptr) & mask) != 0; 35 + return (__atomic32_fetch_and(~mask, ptr) & mask) != 0; 115 36 } 116 37 117 38 static inline int test_and_set_bit(unsigned long nr, volatile void *addr) 118 39 { 119 - volatile unsigned long *ptr = addr; 120 - unsigned long mask = 1UL << (nr & 31); 40 + unsigned int *ptr = (void *)addr; 41 + unsigned int mask = 1UL << (nr & 31); 121 42 ptr += nr >> 5; 122 - return (atomic_test_and_OR_mask(mask, ptr) & mask) != 0; 43 + return (__atomic32_fetch_or(mask, ptr) & mask) != 0; 123 44 } 124 45 125 46 static inline int test_and_change_bit(unsigned long nr, volatile void *addr) 126 47 { 127 - volatile unsigned long *ptr = addr; 128 - unsigned long mask = 1UL << (nr & 31); 48 + unsigned int *ptr = (void *)addr; 49 + unsigned int mask = 1UL << (nr & 31); 129 50 ptr += nr >> 5; 130 - return (atomic_test_and_XOR_mask(mask, ptr) & mask) != 0; 51 + return (__atomic32_fetch_xor(mask, ptr) & mask) != 0; 131 52 } 132 53 133 54 static inline void clear_bit(unsigned long nr, volatile void *addr)
+3 -3
arch/frv/kernel/dma.c
··· 109 109 110 110 static DEFINE_RWLOCK(frv_dma_channels_lock); 111 111 112 - unsigned long frv_dma_inprogress; 112 + unsigned int frv_dma_inprogress; 113 113 114 114 #define frv_clear_dma_inprogress(channel) \ 115 - atomic_clear_mask(1 << (channel), &frv_dma_inprogress); 115 + (void)__atomic32_fetch_and(~(1 << (channel)), &frv_dma_inprogress); 116 116 117 117 #define frv_set_dma_inprogress(channel) \ 118 - atomic_set_mask(1 << (channel), &frv_dma_inprogress); 118 + (void)__atomic32_fetch_or(1 << (channel), &frv_dma_inprogress); 119 119 120 120 /*****************************************************************************/ 121 121 /*
-5
arch/frv/kernel/frv_ksyms.c
··· 58 58 EXPORT_SYMBOL(__insl_ns); 59 59 60 60 #ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS 61 - EXPORT_SYMBOL(atomic_test_and_ANDNOT_mask); 62 - EXPORT_SYMBOL(atomic_test_and_OR_mask); 63 - EXPORT_SYMBOL(atomic_test_and_XOR_mask); 64 - EXPORT_SYMBOL(atomic_add_return); 65 - EXPORT_SYMBOL(atomic_sub_return); 66 61 EXPORT_SYMBOL(__xchg_32); 67 62 EXPORT_SYMBOL(__cmpxchg_32); 68 63 #endif
+1 -1
arch/frv/lib/Makefile
··· 5 5 lib-y := \ 6 6 __ashldi3.o __lshrdi3.o __muldi3.o __ashrdi3.o __negdi2.o __ucmpdi2.o \ 7 7 checksum.o memcpy.o memset.o atomic-ops.o atomic64-ops.o \ 8 - outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o 8 + outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o atomic-lib.o
+7
arch/frv/lib/atomic-lib.c
··· 1 + 2 + #include <linux/export.h> 3 + #include <asm/atomic.h> 4 + 5 + #define __ATOMIC_LIB__ 6 + 7 + #include <asm/atomic_defs.h>
-110
arch/frv/lib/atomic-ops.S
··· 19 19 20 20 ############################################################################### 21 21 # 22 - # unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v); 23 - # 24 - ############################################################################### 25 - .globl atomic_test_and_ANDNOT_mask 26 - .type atomic_test_and_ANDNOT_mask,@function 27 - atomic_test_and_ANDNOT_mask: 28 - not.p gr8,gr10 29 - 0: 30 - orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ 31 - ckeq icc3,cc7 32 - ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */ 33 - orcr cc7,cc7,cc3 /* set CC3 to true */ 34 - and gr8,gr10,gr11 35 - cst.p gr11,@(gr9,gr0) ,cc3,#1 36 - corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */ 37 - beq icc3,#0,0b 38 - bralr 39 - 40 - .size atomic_test_and_ANDNOT_mask, .-atomic_test_and_ANDNOT_mask 41 - 42 - ############################################################################### 43 - # 44 - # unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v); 45 - # 46 - ############################################################################### 47 - .globl atomic_test_and_OR_mask 48 - .type atomic_test_and_OR_mask,@function 49 - atomic_test_and_OR_mask: 50 - or.p gr8,gr8,gr10 51 - 0: 52 - orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ 53 - ckeq icc3,cc7 54 - ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */ 55 - orcr cc7,cc7,cc3 /* set CC3 to true */ 56 - or gr8,gr10,gr11 57 - cst.p gr11,@(gr9,gr0) ,cc3,#1 58 - corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */ 59 - beq icc3,#0,0b 60 - bralr 61 - 62 - .size atomic_test_and_OR_mask, .-atomic_test_and_OR_mask 63 - 64 - ############################################################################### 65 - # 66 - # unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v); 67 - # 68 - ############################################################################### 69 - .globl atomic_test_and_XOR_mask 70 - .type atomic_test_and_XOR_mask,@function 71 - atomic_test_and_XOR_mask: 72 - or.p gr8,gr8,gr10 73 - 0: 74 - orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ 75 - ckeq icc3,cc7 76 - ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */ 77 - orcr cc7,cc7,cc3 /* set CC3 to true */ 78 - xor gr8,gr10,gr11 79 - cst.p gr11,@(gr9,gr0) ,cc3,#1 80 - corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */ 81 - beq icc3,#0,0b 82 - bralr 83 - 84 - .size atomic_test_and_XOR_mask, .-atomic_test_and_XOR_mask 85 - 86 - ############################################################################### 87 - # 88 - # int atomic_add_return(int i, atomic_t *v) 89 - # 90 - ############################################################################### 91 - .globl atomic_add_return 92 - .type atomic_add_return,@function 93 - atomic_add_return: 94 - or.p gr8,gr8,gr10 95 - 0: 96 - orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ 97 - ckeq icc3,cc7 98 - ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */ 99 - orcr cc7,cc7,cc3 /* set CC3 to true */ 100 - add gr8,gr10,gr8 101 - cst.p gr8,@(gr9,gr0) ,cc3,#1 102 - corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */ 103 - beq icc3,#0,0b 104 - bralr 105 - 106 - .size atomic_add_return, .-atomic_add_return 107 - 108 - ############################################################################### 109 - # 110 - # int atomic_sub_return(int i, atomic_t *v) 111 - # 112 - ############################################################################### 113 - .globl atomic_sub_return 114 - .type atomic_sub_return,@function 115 - atomic_sub_return: 116 - or.p gr8,gr8,gr10 117 - 0: 118 - orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ 119 - ckeq icc3,cc7 120 - ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */ 121 - orcr cc7,cc7,cc3 /* set CC3 to true */ 122 - sub gr8,gr10,gr8 123 - cst.p gr8,@(gr9,gr0) ,cc3,#1 124 - corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */ 125 - beq icc3,#0,0b 126 - bralr 127 - 128 - .size atomic_sub_return, .-atomic_sub_return 129 - 130 - ############################################################################### 131 - # 132 22 # uint32_t __xchg_32(uint32_t i, uint32_t *v) 133 23 # 134 24 ###############################################################################
-94
arch/frv/lib/atomic64-ops.S
··· 20 20 21 21 ############################################################################### 22 22 # 23 - # long long atomic64_inc_return(atomic64_t *v) 24 - # 25 - ############################################################################### 26 - .globl atomic64_inc_return 27 - .type atomic64_inc_return,@function 28 - atomic64_inc_return: 29 - or.p gr8,gr8,gr10 30 - 0: 31 - orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ 32 - ckeq icc3,cc7 33 - ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */ 34 - orcr cc7,cc7,cc3 /* set CC3 to true */ 35 - addicc gr9,#1,gr9,icc0 36 - addxi gr8,#0,gr8,icc0 37 - cstd.p gr8,@(gr10,gr0) ,cc3,#1 38 - corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */ 39 - beq icc3,#0,0b 40 - bralr 41 - 42 - .size atomic64_inc_return, .-atomic64_inc_return 43 - 44 - ############################################################################### 45 - # 46 - # long long atomic64_dec_return(atomic64_t *v) 47 - # 48 - ############################################################################### 49 - .globl atomic64_dec_return 50 - .type atomic64_dec_return,@function 51 - atomic64_dec_return: 52 - or.p gr8,gr8,gr10 53 - 0: 54 - orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ 55 - ckeq icc3,cc7 56 - ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */ 57 - orcr cc7,cc7,cc3 /* set CC3 to true */ 58 - subicc gr9,#1,gr9,icc0 59 - subxi gr8,#0,gr8,icc0 60 - cstd.p gr8,@(gr10,gr0) ,cc3,#1 61 - corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */ 62 - beq icc3,#0,0b 63 - bralr 64 - 65 - .size atomic64_dec_return, .-atomic64_dec_return 66 - 67 - ############################################################################### 68 - # 69 - # long long atomic64_add_return(long long i, atomic64_t *v) 70 - # 71 - ############################################################################### 72 - .globl atomic64_add_return 73 - .type atomic64_add_return,@function 74 - atomic64_add_return: 75 - or.p gr8,gr8,gr4 76 - or gr9,gr9,gr5 77 - 0: 78 - orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ 79 - ckeq icc3,cc7 80 - ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */ 81 - orcr cc7,cc7,cc3 /* set CC3 to true */ 82 - addcc gr9,gr5,gr9,icc0 83 - addx gr8,gr4,gr8,icc0 84 - cstd.p gr8,@(gr10,gr0) ,cc3,#1 85 - corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */ 86 - beq icc3,#0,0b 87 - bralr 88 - 89 - .size atomic64_add_return, .-atomic64_add_return 90 - 91 - ############################################################################### 92 - # 93 - # long long atomic64_sub_return(long long i, atomic64_t *v) 94 - # 95 - ############################################################################### 96 - .globl atomic64_sub_return 97 - .type atomic64_sub_return,@function 98 - atomic64_sub_return: 99 - or.p gr8,gr8,gr4 100 - or gr9,gr9,gr5 101 - 0: 102 - orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ 103 - ckeq icc3,cc7 104 - ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */ 105 - orcr cc7,cc7,cc3 /* set CC3 to true */ 106 - subcc gr9,gr5,gr9,icc0 107 - subx gr8,gr4,gr8,icc0 108 - cstd.p gr8,@(gr10,gr0) ,cc3,#1 109 - corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */ 110 - beq icc3,#0,0b 111 - bralr 112 - 113 - .size atomic64_sub_return, .-atomic64_sub_return 114 - 115 - ############################################################################### 116 - # 117 23 # uint64_t __xchg_64(uint64_t i, uint64_t *v) 118 24 # 119 25 ###############################################################################