Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull arch atomic cleanups from Ingo Molnar:
"This is a series kept separate from the main locking tree, which
cleans up and improves various details in the atomics type handling:

- Remove the unused atomic_or_long() method

- Consolidate and compress atomic ops implementations between
architectures, to reduce linecount and to make it easier to add new
ops.

- Rewrite generic atomic support to only require cmpxchg() from an
architecture - generate all other methods from that"

* 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (23 commits)
locking,arch: Use ACCESS_ONCE() instead of cast to volatile in atomic_read()
locking, mips: Fix atomics
locking, sparc64: Fix atomics
locking,arch: Rewrite generic atomic support
locking,arch,xtensa: Fold atomic_ops
locking,arch,sparc: Fold atomic_ops
locking,arch,sh: Fold atomic_ops
locking,arch,powerpc: Fold atomic_ops
locking,arch,parisc: Fold atomic_ops
locking,arch,mn10300: Fold atomic_ops
locking,arch,mips: Fold atomic_ops
locking,arch,metag: Fold atomic_ops
locking,arch,m68k: Fold atomic_ops
locking,arch,m32r: Fold atomic_ops
locking,arch,ia64: Fold atomic_ops
locking,arch,hexagon: Fold atomic_ops
locking,arch,cris: Fold atomic_ops
locking,arch,avr32: Fold atomic_ops
locking,arch,arm64: Fold atomic_ops
locking,arch,arm: Fold atomic_ops
...

+1598 -2307
+82 -135
arch/alpha/include/asm/atomic.h
··· 17 17 #define ATOMIC_INIT(i) { (i) } 18 18 #define ATOMIC64_INIT(i) { (i) } 19 19 20 - #define atomic_read(v) (*(volatile int *)&(v)->counter) 21 - #define atomic64_read(v) (*(volatile long *)&(v)->counter) 20 + #define atomic_read(v) ACCESS_ONCE((v)->counter) 21 + #define atomic64_read(v) ACCESS_ONCE((v)->counter) 22 22 23 23 #define atomic_set(v,i) ((v)->counter = (i)) 24 24 #define atomic64_set(v,i) ((v)->counter = (i)) ··· 29 29 * branch back to restart the operation. 30 30 */ 31 31 32 - static __inline__ void atomic_add(int i, atomic_t * v) 33 - { 34 - unsigned long temp; 35 - __asm__ __volatile__( 36 - "1: ldl_l %0,%1\n" 37 - " addl %0,%2,%0\n" 38 - " stl_c %0,%1\n" 39 - " beq %0,2f\n" 40 - ".subsection 2\n" 41 - "2: br 1b\n" 42 - ".previous" 43 - :"=&r" (temp), "=m" (v->counter) 44 - :"Ir" (i), "m" (v->counter)); 32 + #define ATOMIC_OP(op) \ 33 + static __inline__ void atomic_##op(int i, atomic_t * v) \ 34 + { \ 35 + unsigned long temp; \ 36 + __asm__ __volatile__( \ 37 + "1: ldl_l %0,%1\n" \ 38 + " " #op "l %0,%2,%0\n" \ 39 + " stl_c %0,%1\n" \ 40 + " beq %0,2f\n" \ 41 + ".subsection 2\n" \ 42 + "2: br 1b\n" \ 43 + ".previous" \ 44 + :"=&r" (temp), "=m" (v->counter) \ 45 + :"Ir" (i), "m" (v->counter)); \ 46 + } \ 47 + 48 + #define ATOMIC_OP_RETURN(op) \ 49 + static inline int atomic_##op##_return(int i, atomic_t *v) \ 50 + { \ 51 + long temp, result; \ 52 + smp_mb(); \ 53 + __asm__ __volatile__( \ 54 + "1: ldl_l %0,%1\n" \ 55 + " " #op "l %0,%3,%2\n" \ 56 + " " #op "l %0,%3,%0\n" \ 57 + " stl_c %0,%1\n" \ 58 + " beq %0,2f\n" \ 59 + ".subsection 2\n" \ 60 + "2: br 1b\n" \ 61 + ".previous" \ 62 + :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ 63 + :"Ir" (i), "m" (v->counter) : "memory"); \ 64 + smp_mb(); \ 65 + return result; \ 45 66 } 46 67 47 - static __inline__ void atomic64_add(long i, atomic64_t * v) 48 - { 49 - unsigned long temp; 50 - __asm__ __volatile__( 51 - "1: ldq_l %0,%1\n" 52 - " addq %0,%2,%0\n" 53 - " stq_c %0,%1\n" 54 - " beq %0,2f\n" 55 - ".subsection 2\n" 56 - "2: br 1b\n" 57 - ".previous" 58 - :"=&r" (temp), "=m" (v->counter) 59 - :"Ir" (i), "m" (v->counter)); 68 + #define ATOMIC64_OP(op) \ 69 + static __inline__ void atomic64_##op(long i, atomic64_t * v) \ 70 + { \ 71 + unsigned long temp; \ 72 + __asm__ __volatile__( \ 73 + "1: ldq_l %0,%1\n" \ 74 + " " #op "q %0,%2,%0\n" \ 75 + " stq_c %0,%1\n" \ 76 + " beq %0,2f\n" \ 77 + ".subsection 2\n" \ 78 + "2: br 1b\n" \ 79 + ".previous" \ 80 + :"=&r" (temp), "=m" (v->counter) \ 81 + :"Ir" (i), "m" (v->counter)); \ 82 + } \ 83 + 84 + #define ATOMIC64_OP_RETURN(op) \ 85 + static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ 86 + { \ 87 + long temp, result; \ 88 + smp_mb(); \ 89 + __asm__ __volatile__( \ 90 + "1: ldq_l %0,%1\n" \ 91 + " " #op "q %0,%3,%2\n" \ 92 + " " #op "q %0,%3,%0\n" \ 93 + " stq_c %0,%1\n" \ 94 + " beq %0,2f\n" \ 95 + ".subsection 2\n" \ 96 + "2: br 1b\n" \ 97 + ".previous" \ 98 + :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ 99 + :"Ir" (i), "m" (v->counter) : "memory"); \ 100 + smp_mb(); \ 101 + return result; \ 60 102 } 61 103 62 - static __inline__ void atomic_sub(int i, atomic_t * v) 63 - { 64 - unsigned long temp; 65 - __asm__ __volatile__( 66 - "1: ldl_l %0,%1\n" 67 - " subl %0,%2,%0\n" 68 - " stl_c %0,%1\n" 69 - " beq %0,2f\n" 70 - ".subsection 2\n" 71 - "2: br 1b\n" 72 - ".previous" 73 - :"=&r" (temp), "=m" (v->counter) 74 - :"Ir" (i), "m" (v->counter)); 75 - } 104 + #define ATOMIC_OPS(opg) \ 105 + ATOMIC_OP(opg) \ 106 + ATOMIC_OP_RETURN(opg) \ 107 + ATOMIC64_OP(opg) \ 108 + ATOMIC64_OP_RETURN(opg) 76 109 77 - static __inline__ void atomic64_sub(long i, atomic64_t * v) 78 - { 79 - unsigned long temp; 80 - __asm__ __volatile__( 81 - "1: ldq_l %0,%1\n" 82 - " subq %0,%2,%0\n" 83 - " stq_c %0,%1\n" 84 - " beq %0,2f\n" 85 - ".subsection 2\n" 86 - "2: br 1b\n" 87 - ".previous" 88 - :"=&r" (temp), "=m" (v->counter) 89 - :"Ir" (i), "m" (v->counter)); 90 - } 110 + ATOMIC_OPS(add) 111 + ATOMIC_OPS(sub) 91 112 92 - 93 - /* 94 - * Same as above, but return the result value 95 - */ 96 - static inline int atomic_add_return(int i, atomic_t *v) 97 - { 98 - long temp, result; 99 - smp_mb(); 100 - __asm__ __volatile__( 101 - "1: ldl_l %0,%1\n" 102 - " addl %0,%3,%2\n" 103 - " addl %0,%3,%0\n" 104 - " stl_c %0,%1\n" 105 - " beq %0,2f\n" 106 - ".subsection 2\n" 107 - "2: br 1b\n" 108 - ".previous" 109 - :"=&r" (temp), "=m" (v->counter), "=&r" (result) 110 - :"Ir" (i), "m" (v->counter) : "memory"); 111 - smp_mb(); 112 - return result; 113 - } 114 - 115 - static __inline__ long atomic64_add_return(long i, atomic64_t * v) 116 - { 117 - long temp, result; 118 - smp_mb(); 119 - __asm__ __volatile__( 120 - "1: ldq_l %0,%1\n" 121 - " addq %0,%3,%2\n" 122 - " addq %0,%3,%0\n" 123 - " stq_c %0,%1\n" 124 - " beq %0,2f\n" 125 - ".subsection 2\n" 126 - "2: br 1b\n" 127 - ".previous" 128 - :"=&r" (temp), "=m" (v->counter), "=&r" (result) 129 - :"Ir" (i), "m" (v->counter) : "memory"); 130 - smp_mb(); 131 - return result; 132 - } 133 - 134 - static __inline__ long atomic_sub_return(int i, atomic_t * v) 135 - { 136 - long temp, result; 137 - smp_mb(); 138 - __asm__ __volatile__( 139 - "1: ldl_l %0,%1\n" 140 - " subl %0,%3,%2\n" 141 - " subl %0,%3,%0\n" 142 - " stl_c %0,%1\n" 143 - " beq %0,2f\n" 144 - ".subsection 2\n" 145 - "2: br 1b\n" 146 - ".previous" 147 - :"=&r" (temp), "=m" (v->counter), "=&r" (result) 148 - :"Ir" (i), "m" (v->counter) : "memory"); 149 - smp_mb(); 150 - return result; 151 - } 152 - 153 - static __inline__ long atomic64_sub_return(long i, atomic64_t * v) 154 - { 155 - long temp, result; 156 - smp_mb(); 157 - __asm__ __volatile__( 158 - "1: ldq_l %0,%1\n" 159 - " subq %0,%3,%2\n" 160 - " subq %0,%3,%0\n" 161 - " stq_c %0,%1\n" 162 - " beq %0,2f\n" 163 - ".subsection 2\n" 164 - "2: br 1b\n" 165 - ".previous" 166 - :"=&r" (temp), "=m" (v->counter), "=&r" (result) 167 - :"Ir" (i), "m" (v->counter) : "memory"); 168 - smp_mb(); 169 - return result; 170 - } 113 + #undef ATOMIC_OPS 114 + #undef ATOMIC64_OP_RETURN 115 + #undef ATOMIC64_OP 116 + #undef ATOMIC_OP_RETURN 117 + #undef ATOMIC_OP 171 118 172 119 #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) 173 120 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+65 -123
arch/arc/include/asm/atomic.h
··· 25 25 26 26 #define atomic_set(v, i) (((v)->counter) = (i)) 27 27 28 - static inline void atomic_add(int i, atomic_t *v) 29 - { 30 - unsigned int temp; 28 + #define ATOMIC_OP(op, c_op, asm_op) \ 29 + static inline void atomic_##op(int i, atomic_t *v) \ 30 + { \ 31 + unsigned int temp; \ 32 + \ 33 + __asm__ __volatile__( \ 34 + "1: llock %0, [%1] \n" \ 35 + " " #asm_op " %0, %0, %2 \n" \ 36 + " scond %0, [%1] \n" \ 37 + " bnz 1b \n" \ 38 + : "=&r"(temp) /* Early clobber, to prevent reg reuse */ \ 39 + : "r"(&v->counter), "ir"(i) \ 40 + : "cc"); \ 41 + } \ 31 42 32 - __asm__ __volatile__( 33 - "1: llock %0, [%1] \n" 34 - " add %0, %0, %2 \n" 35 - " scond %0, [%1] \n" 36 - " bnz 1b \n" 37 - : "=&r"(temp) /* Early clobber, to prevent reg reuse */ 38 - : "r"(&v->counter), "ir"(i) 39 - : "cc"); 40 - } 41 - 42 - static inline void atomic_sub(int i, atomic_t *v) 43 - { 44 - unsigned int temp; 45 - 46 - __asm__ __volatile__( 47 - "1: llock %0, [%1] \n" 48 - " sub %0, %0, %2 \n" 49 - " scond %0, [%1] \n" 50 - " bnz 1b \n" 51 - : "=&r"(temp) 52 - : "r"(&v->counter), "ir"(i) 53 - : "cc"); 54 - } 55 - 56 - /* add and also return the new value */ 57 - static inline int atomic_add_return(int i, atomic_t *v) 58 - { 59 - unsigned int temp; 60 - 61 - __asm__ __volatile__( 62 - "1: llock %0, [%1] \n" 63 - " add %0, %0, %2 \n" 64 - " scond %0, [%1] \n" 65 - " bnz 1b \n" 66 - : "=&r"(temp) 67 - : "r"(&v->counter), "ir"(i) 68 - : "cc"); 69 - 70 - return temp; 71 - } 72 - 73 - static inline int atomic_sub_return(int i, atomic_t *v) 74 - { 75 - unsigned int temp; 76 - 77 - __asm__ __volatile__( 78 - "1: llock %0, [%1] \n" 79 - " sub %0, %0, %2 \n" 80 - " scond %0, [%1] \n" 81 - " bnz 1b \n" 82 - : "=&r"(temp) 83 - : "r"(&v->counter), "ir"(i) 84 - : "cc"); 85 - 86 - return temp; 87 - } 88 - 89 - static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) 90 - { 91 - unsigned int temp; 92 - 93 - __asm__ __volatile__( 94 - "1: llock %0, [%1] \n" 95 - " bic %0, %0, %2 \n" 96 - " scond %0, [%1] \n" 97 - " bnz 1b \n" 98 - : "=&r"(temp) 99 - : "r"(addr), "ir"(mask) 100 - : "cc"); 43 + #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ 44 + static inline int atomic_##op##_return(int i, atomic_t *v) \ 45 + { \ 46 + unsigned int temp; \ 47 + \ 48 + __asm__ __volatile__( \ 49 + "1: llock %0, [%1] \n" \ 50 + " " #asm_op " %0, %0, %2 \n" \ 51 + " scond %0, [%1] \n" \ 52 + " bnz 1b \n" \ 53 + : "=&r"(temp) \ 54 + : "r"(&v->counter), "ir"(i) \ 55 + : "cc"); \ 56 + \ 57 + return temp; \ 101 58 } 102 59 103 60 #else /* !CONFIG_ARC_HAS_LLSC */ ··· 83 126 v->counter = i; 84 127 atomic_ops_unlock(flags); 85 128 } 129 + 86 130 #endif 87 131 88 132 /* ··· 91 133 * Locking would change to irq-disabling only (UP) and spinlocks (SMP) 92 134 */ 93 135 94 - static inline void atomic_add(int i, atomic_t *v) 95 - { 96 - unsigned long flags; 97 - 98 - atomic_ops_lock(flags); 99 - v->counter += i; 100 - atomic_ops_unlock(flags); 136 + #define ATOMIC_OP(op, c_op, asm_op) \ 137 + static inline void atomic_##op(int i, atomic_t *v) \ 138 + { \ 139 + unsigned long flags; \ 140 + \ 141 + atomic_ops_lock(flags); \ 142 + v->counter c_op i; \ 143 + atomic_ops_unlock(flags); \ 101 144 } 102 145 103 - static inline void atomic_sub(int i, atomic_t *v) 104 - { 105 - unsigned long flags; 106 - 107 - atomic_ops_lock(flags); 108 - v->counter -= i; 109 - atomic_ops_unlock(flags); 110 - } 111 - 112 - static inline int atomic_add_return(int i, atomic_t *v) 113 - { 114 - unsigned long flags; 115 - unsigned long temp; 116 - 117 - atomic_ops_lock(flags); 118 - temp = v->counter; 119 - temp += i; 120 - v->counter = temp; 121 - atomic_ops_unlock(flags); 122 - 123 - return temp; 124 - } 125 - 126 - static inline int atomic_sub_return(int i, atomic_t *v) 127 - { 128 - unsigned long flags; 129 - unsigned long temp; 130 - 131 - atomic_ops_lock(flags); 132 - temp = v->counter; 133 - temp -= i; 134 - v->counter = temp; 135 - atomic_ops_unlock(flags); 136 - 137 - return temp; 138 - } 139 - 140 - static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) 141 - { 142 - unsigned long flags; 143 - 144 - atomic_ops_lock(flags); 145 - *addr &= ~mask; 146 - atomic_ops_unlock(flags); 146 + #define ATOMIC_OP_RETURN(op, c_op) \ 147 + static inline int atomic_##op##_return(int i, atomic_t *v) \ 148 + { \ 149 + unsigned long flags; \ 150 + unsigned long temp; \ 151 + \ 152 + atomic_ops_lock(flags); \ 153 + temp = v->counter; \ 154 + temp c_op i; \ 155 + v->counter = temp; \ 156 + atomic_ops_unlock(flags); \ 157 + \ 158 + return temp; \ 147 159 } 148 160 149 161 #endif /* !CONFIG_ARC_HAS_LLSC */ 162 + 163 + #define ATOMIC_OPS(op, c_op, asm_op) \ 164 + ATOMIC_OP(op, c_op, asm_op) \ 165 + ATOMIC_OP_RETURN(op, c_op, asm_op) 166 + 167 + ATOMIC_OPS(add, +=, add) 168 + ATOMIC_OPS(sub, -=, sub) 169 + ATOMIC_OP(and, &=, and) 170 + 171 + #define atomic_clear_mask(mask, v) atomic_and(~(mask), (v)) 172 + 173 + #undef ATOMIC_OPS 174 + #undef ATOMIC_OP_RETURN 175 + #undef ATOMIC_OP 150 176 151 177 /** 152 178 * __atomic_add_unless - add unless the number is a given value
+121 -180
arch/arm/include/asm/atomic.h
··· 27 27 * strex/ldrex monitor on some implementations. The reason we can use it for 28 28 * atomic_set() is the clrex or dummy strex done on every exception return. 29 29 */ 30 - #define atomic_read(v) (*(volatile int *)&(v)->counter) 30 + #define atomic_read(v) ACCESS_ONCE((v)->counter) 31 31 #define atomic_set(v,i) (((v)->counter) = (i)) 32 32 33 33 #if __LINUX_ARM_ARCH__ >= 6 ··· 37 37 * store exclusive to ensure that these are atomic. We may loop 38 38 * to ensure that the update happens. 39 39 */ 40 - static inline void atomic_add(int i, atomic_t *v) 41 - { 42 - unsigned long tmp; 43 - int result; 44 40 45 - prefetchw(&v->counter); 46 - __asm__ __volatile__("@ atomic_add\n" 47 - "1: ldrex %0, [%3]\n" 48 - " add %0, %0, %4\n" 49 - " strex %1, %0, [%3]\n" 50 - " teq %1, #0\n" 51 - " bne 1b" 52 - : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) 53 - : "r" (&v->counter), "Ir" (i) 54 - : "cc"); 55 - } 41 + #define ATOMIC_OP(op, c_op, asm_op) \ 42 + static inline void atomic_##op(int i, atomic_t *v) \ 43 + { \ 44 + unsigned long tmp; \ 45 + int result; \ 46 + \ 47 + prefetchw(&v->counter); \ 48 + __asm__ __volatile__("@ atomic_" #op "\n" \ 49 + "1: ldrex %0, [%3]\n" \ 50 + " " #asm_op " %0, %0, %4\n" \ 51 + " strex %1, %0, [%3]\n" \ 52 + " teq %1, #0\n" \ 53 + " bne 1b" \ 54 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \ 55 + : "r" (&v->counter), "Ir" (i) \ 56 + : "cc"); \ 57 + } \ 56 58 57 - static inline int atomic_add_return(int i, atomic_t *v) 58 - { 59 - unsigned long tmp; 60 - int result; 61 - 62 - smp_mb(); 63 - prefetchw(&v->counter); 64 - 65 - __asm__ __volatile__("@ atomic_add_return\n" 66 - "1: ldrex %0, [%3]\n" 67 - " add %0, %0, %4\n" 68 - " strex %1, %0, [%3]\n" 69 - " teq %1, #0\n" 70 - " bne 1b" 71 - : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) 72 - : "r" (&v->counter), "Ir" (i) 73 - : "cc"); 74 - 75 - smp_mb(); 76 - 77 - return result; 78 - } 79 - 80 - static inline void atomic_sub(int i, atomic_t *v) 81 - { 82 - unsigned long tmp; 83 - int result; 84 - 85 - prefetchw(&v->counter); 86 - __asm__ __volatile__("@ atomic_sub\n" 87 - "1: ldrex %0, [%3]\n" 88 - " sub %0, %0, %4\n" 89 - " strex %1, %0, [%3]\n" 90 - " teq %1, #0\n" 91 - " bne 1b" 92 - : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) 93 - : "r" (&v->counter), "Ir" (i) 94 - : "cc"); 95 - } 96 - 97 - static inline int atomic_sub_return(int i, atomic_t *v) 98 - { 99 - unsigned long tmp; 100 - int result; 101 - 102 - smp_mb(); 103 - prefetchw(&v->counter); 104 - 105 - __asm__ __volatile__("@ atomic_sub_return\n" 106 - "1: ldrex %0, [%3]\n" 107 - " sub %0, %0, %4\n" 108 - " strex %1, %0, [%3]\n" 109 - " teq %1, #0\n" 110 - " bne 1b" 111 - : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) 112 - : "r" (&v->counter), "Ir" (i) 113 - : "cc"); 114 - 115 - smp_mb(); 116 - 117 - return result; 59 + #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ 60 + static inline int atomic_##op##_return(int i, atomic_t *v) \ 61 + { \ 62 + unsigned long tmp; \ 63 + int result; \ 64 + \ 65 + smp_mb(); \ 66 + prefetchw(&v->counter); \ 67 + \ 68 + __asm__ __volatile__("@ atomic_" #op "_return\n" \ 69 + "1: ldrex %0, [%3]\n" \ 70 + " " #asm_op " %0, %0, %4\n" \ 71 + " strex %1, %0, [%3]\n" \ 72 + " teq %1, #0\n" \ 73 + " bne 1b" \ 74 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \ 75 + : "r" (&v->counter), "Ir" (i) \ 76 + : "cc"); \ 77 + \ 78 + smp_mb(); \ 79 + \ 80 + return result; \ 118 81 } 119 82 120 83 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) ··· 137 174 #error SMP not supported on pre-ARMv6 CPUs 138 175 #endif 139 176 140 - static inline int atomic_add_return(int i, atomic_t *v) 141 - { 142 - unsigned long flags; 143 - int val; 177 + #define ATOMIC_OP(op, c_op, asm_op) \ 178 + static inline void atomic_##op(int i, atomic_t *v) \ 179 + { \ 180 + unsigned long flags; \ 181 + \ 182 + raw_local_irq_save(flags); \ 183 + v->counter c_op i; \ 184 + raw_local_irq_restore(flags); \ 185 + } \ 144 186 145 - raw_local_irq_save(flags); 146 - val = v->counter; 147 - v->counter = val += i; 148 - raw_local_irq_restore(flags); 149 - 150 - return val; 187 + #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ 188 + static inline int atomic_##op##_return(int i, atomic_t *v) \ 189 + { \ 190 + unsigned long flags; \ 191 + int val; \ 192 + \ 193 + raw_local_irq_save(flags); \ 194 + v->counter c_op i; \ 195 + val = v->counter; \ 196 + raw_local_irq_restore(flags); \ 197 + \ 198 + return val; \ 151 199 } 152 - #define atomic_add(i, v) (void) atomic_add_return(i, v) 153 - 154 - static inline int atomic_sub_return(int i, atomic_t *v) 155 - { 156 - unsigned long flags; 157 - int val; 158 - 159 - raw_local_irq_save(flags); 160 - val = v->counter; 161 - v->counter = val -= i; 162 - raw_local_irq_restore(flags); 163 - 164 - return val; 165 - } 166 - #define atomic_sub(i, v) (void) atomic_sub_return(i, v) 167 200 168 201 static inline int atomic_cmpxchg(atomic_t *v, int old, int new) 169 202 { ··· 186 227 } 187 228 188 229 #endif /* __LINUX_ARM_ARCH__ */ 230 + 231 + #define ATOMIC_OPS(op, c_op, asm_op) \ 232 + ATOMIC_OP(op, c_op, asm_op) \ 233 + ATOMIC_OP_RETURN(op, c_op, asm_op) 234 + 235 + ATOMIC_OPS(add, +=, add) 236 + ATOMIC_OPS(sub, -=, sub) 237 + 238 + #undef ATOMIC_OPS 239 + #undef ATOMIC_OP_RETURN 240 + #undef ATOMIC_OP 189 241 190 242 #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 191 243 ··· 270 300 } 271 301 #endif 272 302 273 - static inline void atomic64_add(long long i, atomic64_t *v) 274 - { 275 - long long result; 276 - unsigned long tmp; 303 + #define ATOMIC64_OP(op, op1, op2) \ 304 + static inline void atomic64_##op(long long i, atomic64_t *v) \ 305 + { \ 306 + long long result; \ 307 + unsigned long tmp; \ 308 + \ 309 + prefetchw(&v->counter); \ 310 + __asm__ __volatile__("@ atomic64_" #op "\n" \ 311 + "1: ldrexd %0, %H0, [%3]\n" \ 312 + " " #op1 " %Q0, %Q0, %Q4\n" \ 313 + " " #op2 " %R0, %R0, %R4\n" \ 314 + " strexd %1, %0, %H0, [%3]\n" \ 315 + " teq %1, #0\n" \ 316 + " bne 1b" \ 317 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \ 318 + : "r" (&v->counter), "r" (i) \ 319 + : "cc"); \ 320 + } \ 277 321 278 - prefetchw(&v->counter); 279 - __asm__ __volatile__("@ atomic64_add\n" 280 - "1: ldrexd %0, %H0, [%3]\n" 281 - " adds %Q0, %Q0, %Q4\n" 282 - " adc %R0, %R0, %R4\n" 283 - " strexd %1, %0, %H0, [%3]\n" 284 - " teq %1, #0\n" 285 - " bne 1b" 286 - : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) 287 - : "r" (&v->counter), "r" (i) 288 - : "cc"); 322 + #define ATOMIC64_OP_RETURN(op, op1, op2) \ 323 + static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \ 324 + { \ 325 + long long result; \ 326 + unsigned long tmp; \ 327 + \ 328 + smp_mb(); \ 329 + prefetchw(&v->counter); \ 330 + \ 331 + __asm__ __volatile__("@ atomic64_" #op "_return\n" \ 332 + "1: ldrexd %0, %H0, [%3]\n" \ 333 + " " #op1 " %Q0, %Q0, %Q4\n" \ 334 + " " #op2 " %R0, %R0, %R4\n" \ 335 + " strexd %1, %0, %H0, [%3]\n" \ 336 + " teq %1, #0\n" \ 337 + " bne 1b" \ 338 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \ 339 + : "r" (&v->counter), "r" (i) \ 340 + : "cc"); \ 341 + \ 342 + smp_mb(); \ 343 + \ 344 + return result; \ 289 345 } 290 346 291 - static inline long long atomic64_add_return(long long i, atomic64_t *v) 292 - { 293 - long long result; 294 - unsigned long tmp; 347 + #define ATOMIC64_OPS(op, op1, op2) \ 348 + ATOMIC64_OP(op, op1, op2) \ 349 + ATOMIC64_OP_RETURN(op, op1, op2) 295 350 296 - smp_mb(); 297 - prefetchw(&v->counter); 351 + ATOMIC64_OPS(add, adds, adc) 352 + ATOMIC64_OPS(sub, subs, sbc) 298 353 299 - __asm__ __volatile__("@ atomic64_add_return\n" 300 - "1: ldrexd %0, %H0, [%3]\n" 301 - " adds %Q0, %Q0, %Q4\n" 302 - " adc %R0, %R0, %R4\n" 303 - " strexd %1, %0, %H0, [%3]\n" 304 - " teq %1, #0\n" 305 - " bne 1b" 306 - : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) 307 - : "r" (&v->counter), "r" (i) 308 - : "cc"); 309 - 310 - smp_mb(); 311 - 312 - return result; 313 - } 314 - 315 - static inline void atomic64_sub(long long i, atomic64_t *v) 316 - { 317 - long long result; 318 - unsigned long tmp; 319 - 320 - prefetchw(&v->counter); 321 - __asm__ __volatile__("@ atomic64_sub\n" 322 - "1: ldrexd %0, %H0, [%3]\n" 323 - " subs %Q0, %Q0, %Q4\n" 324 - " sbc %R0, %R0, %R4\n" 325 - " strexd %1, %0, %H0, [%3]\n" 326 - " teq %1, #0\n" 327 - " bne 1b" 328 - : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) 329 - : "r" (&v->counter), "r" (i) 330 - : "cc"); 331 - } 332 - 333 - static inline long long atomic64_sub_return(long long i, atomic64_t *v) 334 - { 335 - long long result; 336 - unsigned long tmp; 337 - 338 - smp_mb(); 339 - prefetchw(&v->counter); 340 - 341 - __asm__ __volatile__("@ atomic64_sub_return\n" 342 - "1: ldrexd %0, %H0, [%3]\n" 343 - " subs %Q0, %Q0, %Q4\n" 344 - " sbc %R0, %R0, %R4\n" 345 - " strexd %1, %0, %H0, [%3]\n" 346 - " teq %1, #0\n" 347 - " bne 1b" 348 - : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) 349 - : "r" (&v->counter), "r" (i) 350 - : "cc"); 351 - 352 - smp_mb(); 353 - 354 - return result; 355 - } 354 + #undef ATOMIC64_OPS 355 + #undef ATOMIC64_OP_RETURN 356 + #undef ATOMIC64_OP 356 357 357 358 static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old, 358 359 long long new)
+81 -118
arch/arm64/include/asm/atomic.h
··· 35 35 * strex/ldrex monitor on some implementations. The reason we can use it for 36 36 * atomic_set() is the clrex or dummy strex done on every exception return. 37 37 */ 38 - #define atomic_read(v) (*(volatile int *)&(v)->counter) 38 + #define atomic_read(v) ACCESS_ONCE((v)->counter) 39 39 #define atomic_set(v,i) (((v)->counter) = (i)) 40 40 41 41 /* ··· 43 43 * store exclusive to ensure that these are atomic. We may loop 44 44 * to ensure that the update happens. 45 45 */ 46 - static inline void atomic_add(int i, atomic_t *v) 47 - { 48 - unsigned long tmp; 49 - int result; 50 46 51 - asm volatile("// atomic_add\n" 52 - "1: ldxr %w0, %2\n" 53 - " add %w0, %w0, %w3\n" 54 - " stxr %w1, %w0, %2\n" 55 - " cbnz %w1, 1b" 56 - : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) 57 - : "Ir" (i)); 47 + #define ATOMIC_OP(op, asm_op) \ 48 + static inline void atomic_##op(int i, atomic_t *v) \ 49 + { \ 50 + unsigned long tmp; \ 51 + int result; \ 52 + \ 53 + asm volatile("// atomic_" #op "\n" \ 54 + "1: ldxr %w0, %2\n" \ 55 + " " #asm_op " %w0, %w0, %w3\n" \ 56 + " stxr %w1, %w0, %2\n" \ 57 + " cbnz %w1, 1b" \ 58 + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ 59 + : "Ir" (i)); \ 60 + } \ 61 + 62 + #define ATOMIC_OP_RETURN(op, asm_op) \ 63 + static inline int atomic_##op##_return(int i, atomic_t *v) \ 64 + { \ 65 + unsigned long tmp; \ 66 + int result; \ 67 + \ 68 + asm volatile("// atomic_" #op "_return\n" \ 69 + "1: ldxr %w0, %2\n" \ 70 + " " #asm_op " %w0, %w0, %w3\n" \ 71 + " stlxr %w1, %w0, %2\n" \ 72 + " cbnz %w1, 1b" \ 73 + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ 74 + : "Ir" (i) \ 75 + : "memory"); \ 76 + \ 77 + smp_mb(); \ 78 + return result; \ 58 79 } 59 80 60 - static inline int atomic_add_return(int i, atomic_t *v) 61 - { 62 - unsigned long tmp; 63 - int result; 81 + #define ATOMIC_OPS(op, asm_op) \ 82 + ATOMIC_OP(op, asm_op) \ 83 + ATOMIC_OP_RETURN(op, asm_op) 64 84 65 - asm volatile("// atomic_add_return\n" 66 - "1: ldxr %w0, %2\n" 67 - " add %w0, %w0, %w3\n" 68 - " stlxr %w1, %w0, %2\n" 69 - " cbnz %w1, 1b" 70 - : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) 71 - : "Ir" (i) 72 - : "memory"); 85 + ATOMIC_OPS(add, add) 86 + ATOMIC_OPS(sub, sub) 73 87 74 - smp_mb(); 75 - return result; 76 - } 77 - 78 - static inline void atomic_sub(int i, atomic_t *v) 79 - { 80 - unsigned long tmp; 81 - int result; 82 - 83 - asm volatile("// atomic_sub\n" 84 - "1: ldxr %w0, %2\n" 85 - " sub %w0, %w0, %w3\n" 86 - " stxr %w1, %w0, %2\n" 87 - " cbnz %w1, 1b" 88 - : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) 89 - : "Ir" (i)); 90 - } 91 - 92 - static inline int atomic_sub_return(int i, atomic_t *v) 93 - { 94 - unsigned long tmp; 95 - int result; 96 - 97 - asm volatile("// atomic_sub_return\n" 98 - "1: ldxr %w0, %2\n" 99 - " sub %w0, %w0, %w3\n" 100 - " stlxr %w1, %w0, %2\n" 101 - " cbnz %w1, 1b" 102 - : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) 103 - : "Ir" (i) 104 - : "memory"); 105 - 106 - smp_mb(); 107 - return result; 108 - } 88 + #undef ATOMIC_OPS 89 + #undef ATOMIC_OP_RETURN 90 + #undef ATOMIC_OP 109 91 110 92 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) 111 93 { ··· 139 157 */ 140 158 #define ATOMIC64_INIT(i) { (i) } 141 159 142 - #define atomic64_read(v) (*(volatile long *)&(v)->counter) 160 + #define atomic64_read(v) ACCESS_ONCE((v)->counter) 143 161 #define atomic64_set(v,i) (((v)->counter) = (i)) 144 162 145 - static inline void atomic64_add(u64 i, atomic64_t *v) 146 - { 147 - long result; 148 - unsigned long tmp; 163 + #define ATOMIC64_OP(op, asm_op) \ 164 + static inline void atomic64_##op(long i, atomic64_t *v) \ 165 + { \ 166 + long result; \ 167 + unsigned long tmp; \ 168 + \ 169 + asm volatile("// atomic64_" #op "\n" \ 170 + "1: ldxr %0, %2\n" \ 171 + " " #asm_op " %0, %0, %3\n" \ 172 + " stxr %w1, %0, %2\n" \ 173 + " cbnz %w1, 1b" \ 174 + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ 175 + : "Ir" (i)); \ 176 + } \ 149 177 150 - asm volatile("// atomic64_add\n" 151 - "1: ldxr %0, %2\n" 152 - " add %0, %0, %3\n" 153 - " stxr %w1, %0, %2\n" 154 - " cbnz %w1, 1b" 155 - : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) 156 - : "Ir" (i)); 178 + #define ATOMIC64_OP_RETURN(op, asm_op) \ 179 + static inline long atomic64_##op##_return(long i, atomic64_t *v) \ 180 + { \ 181 + long result; \ 182 + unsigned long tmp; \ 183 + \ 184 + asm volatile("// atomic64_" #op "_return\n" \ 185 + "1: ldxr %0, %2\n" \ 186 + " " #asm_op " %0, %0, %3\n" \ 187 + " stlxr %w1, %0, %2\n" \ 188 + " cbnz %w1, 1b" \ 189 + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ 190 + : "Ir" (i) \ 191 + : "memory"); \ 192 + \ 193 + smp_mb(); \ 194 + return result; \ 157 195 } 158 196 159 - static inline long atomic64_add_return(long i, atomic64_t *v) 160 - { 161 - long result; 162 - unsigned long tmp; 197 + #define ATOMIC64_OPS(op, asm_op) \ 198 + ATOMIC64_OP(op, asm_op) \ 199 + ATOMIC64_OP_RETURN(op, asm_op) 163 200 164 - asm volatile("// atomic64_add_return\n" 165 - "1: ldxr %0, %2\n" 166 - " add %0, %0, %3\n" 167 - " stlxr %w1, %0, %2\n" 168 - " cbnz %w1, 1b" 169 - : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) 170 - : "Ir" (i) 171 - : "memory"); 201 + ATOMIC64_OPS(add, add) 202 + ATOMIC64_OPS(sub, sub) 172 203 173 - smp_mb(); 174 - return result; 175 - } 176 - 177 - static inline void atomic64_sub(u64 i, atomic64_t *v) 178 - { 179 - long result; 180 - unsigned long tmp; 181 - 182 - asm volatile("// atomic64_sub\n" 183 - "1: ldxr %0, %2\n" 184 - " sub %0, %0, %3\n" 185 - " stxr %w1, %0, %2\n" 186 - " cbnz %w1, 1b" 187 - : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) 188 - : "Ir" (i)); 189 - } 190 - 191 - static inline long atomic64_sub_return(long i, atomic64_t *v) 192 - { 193 - long result; 194 - unsigned long tmp; 195 - 196 - asm volatile("// atomic64_sub_return\n" 197 - "1: ldxr %0, %2\n" 198 - " sub %0, %0, %3\n" 199 - " stlxr %w1, %0, %2\n" 200 - " cbnz %w1, 1b" 201 - : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) 202 - : "Ir" (i) 203 - : "memory"); 204 - 205 - smp_mb(); 206 - return result; 207 - } 204 + #undef ATOMIC64_OPS 205 + #undef ATOMIC64_OP_RETURN 206 + #undef ATOMIC64_OP 208 207 209 208 static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new) 210 209 {
+65 -64
arch/avr32/include/asm/atomic.h
··· 19 19 20 20 #define ATOMIC_INIT(i) { (i) } 21 21 22 - #define atomic_read(v) (*(volatile int *)&(v)->counter) 22 + #define atomic_read(v) ACCESS_ONCE((v)->counter) 23 23 #define atomic_set(v, i) (((v)->counter) = i) 24 24 25 - /* 26 - * atomic_sub_return - subtract the atomic variable 27 - * @i: integer value to subtract 28 - * @v: pointer of type atomic_t 29 - * 30 - * Atomically subtracts @i from @v. Returns the resulting value. 31 - */ 32 - static inline int atomic_sub_return(int i, atomic_t *v) 33 - { 34 - int result; 35 - 36 - asm volatile( 37 - "/* atomic_sub_return */\n" 38 - "1: ssrf 5\n" 39 - " ld.w %0, %2\n" 40 - " sub %0, %3\n" 41 - " stcond %1, %0\n" 42 - " brne 1b" 43 - : "=&r"(result), "=o"(v->counter) 44 - : "m"(v->counter), "rKs21"(i) 45 - : "cc"); 46 - 47 - return result; 25 + #define ATOMIC_OP_RETURN(op, asm_op, asm_con) \ 26 + static inline int __atomic_##op##_return(int i, atomic_t *v) \ 27 + { \ 28 + int result; \ 29 + \ 30 + asm volatile( \ 31 + "/* atomic_" #op "_return */\n" \ 32 + "1: ssrf 5\n" \ 33 + " ld.w %0, %2\n" \ 34 + " " #asm_op " %0, %3\n" \ 35 + " stcond %1, %0\n" \ 36 + " brne 1b" \ 37 + : "=&r" (result), "=o" (v->counter) \ 38 + : "m" (v->counter), #asm_con (i) \ 39 + : "cc"); \ 40 + \ 41 + return result; \ 48 42 } 43 + 44 + ATOMIC_OP_RETURN(sub, sub, rKs21) 45 + ATOMIC_OP_RETURN(add, add, r) 46 + 47 + #undef ATOMIC_OP_RETURN 48 + 49 + /* 50 + * Probably found the reason why we want to use sub with the signed 21-bit 51 + * limit, it uses one less register than the add instruction that can add up to 52 + * 32-bit values. 53 + * 54 + * Both instructions are 32-bit, to use a 16-bit instruction the immediate is 55 + * very small; 4 bit. 56 + * 57 + * sub 32-bit, type IV, takes a register and subtracts a 21-bit immediate. 58 + * add 32-bit, type II, adds two register values together. 59 + */ 60 + #define IS_21BIT_CONST(i) \ 61 + (__builtin_constant_p(i) && ((i) >= -1048575) && ((i) <= 1048576)) 49 62 50 63 /* 51 64 * atomic_add_return - add integer to atomic variable ··· 69 56 */ 70 57 static inline int atomic_add_return(int i, atomic_t *v) 71 58 { 72 - int result; 59 + if (IS_21BIT_CONST(i)) 60 + return __atomic_sub_return(-i, v); 73 61 74 - if (__builtin_constant_p(i) && (i >= -1048575) && (i <= 1048576)) 75 - result = atomic_sub_return(-i, v); 76 - else 77 - asm volatile( 78 - "/* atomic_add_return */\n" 79 - "1: ssrf 5\n" 80 - " ld.w %0, %1\n" 81 - " add %0, %3\n" 82 - " stcond %2, %0\n" 83 - " brne 1b" 84 - : "=&r"(result), "=o"(v->counter) 85 - : "m"(v->counter), "r"(i) 86 - : "cc", "memory"); 87 - 88 - return result; 62 + return __atomic_add_return(i, v); 89 63 } 90 64 91 65 /* 92 - * atomic_sub_unless - sub unless the number is a given value 66 + * atomic_sub_return - subtract the atomic variable 67 + * @i: integer value to subtract 93 68 * @v: pointer of type atomic_t 94 - * @a: the amount to subtract from v... 95 - * @u: ...unless v is equal to u. 96 69 * 97 - * Atomically subtract @a from @v, so long as it was not @u. 98 - * Returns the old value of @v. 99 - */ 100 - static inline void atomic_sub_unless(atomic_t *v, int a, int u) 70 + * Atomically subtracts @i from @v. Returns the resulting value. 71 + */ 72 + static inline int atomic_sub_return(int i, atomic_t *v) 101 73 { 102 - int tmp; 74 + if (IS_21BIT_CONST(i)) 75 + return __atomic_sub_return(i, v); 103 76 104 - asm volatile( 105 - "/* atomic_sub_unless */\n" 106 - "1: ssrf 5\n" 107 - " ld.w %0, %2\n" 108 - " cp.w %0, %4\n" 109 - " breq 1f\n" 110 - " sub %0, %3\n" 111 - " stcond %1, %0\n" 112 - " brne 1b\n" 113 - "1:" 114 - : "=&r"(tmp), "=o"(v->counter) 115 - : "m"(v->counter), "rKs21"(a), "rKs21"(u) 116 - : "cc", "memory"); 77 + return __atomic_add_return(-i, v); 117 78 } 118 79 119 80 /* ··· 103 116 { 104 117 int tmp, old = atomic_read(v); 105 118 106 - if (__builtin_constant_p(a) && (a >= -1048575) && (a <= 1048576)) 107 - atomic_sub_unless(v, -a, u); 108 - else { 119 + if (IS_21BIT_CONST(a)) { 120 + asm volatile( 121 + "/* __atomic_sub_unless */\n" 122 + "1: ssrf 5\n" 123 + " ld.w %0, %2\n" 124 + " cp.w %0, %4\n" 125 + " breq 1f\n" 126 + " sub %0, %3\n" 127 + " stcond %1, %0\n" 128 + " brne 1b\n" 129 + "1:" 130 + : "=&r"(tmp), "=o"(v->counter) 131 + : "m"(v->counter), "rKs21"(-a), "rKs21"(u) 132 + : "cc", "memory"); 133 + } else { 109 134 asm volatile( 110 135 "/* __atomic_add_unless */\n" 111 136 "1: ssrf 5\n" ··· 135 136 136 137 return old; 137 138 } 139 + 140 + #undef IS_21BIT_CONST 138 141 139 142 /* 140 143 * atomic_sub_if_positive - conditionally subtract integer from atomic variable
+26 -33
arch/cris/include/asm/atomic.h
··· 17 17 18 18 #define ATOMIC_INIT(i) { (i) } 19 19 20 - #define atomic_read(v) (*(volatile int *)&(v)->counter) 20 + #define atomic_read(v) ACCESS_ONCE((v)->counter) 21 21 #define atomic_set(v,i) (((v)->counter) = (i)) 22 22 23 23 /* These should be written in asm but we do it in C for now. */ 24 24 25 - static inline void atomic_add(int i, volatile atomic_t *v) 26 - { 27 - unsigned long flags; 28 - cris_atomic_save(v, flags); 29 - v->counter += i; 30 - cris_atomic_restore(v, flags); 25 + #define ATOMIC_OP(op, c_op) \ 26 + static inline void atomic_##op(int i, volatile atomic_t *v) \ 27 + { \ 28 + unsigned long flags; \ 29 + cris_atomic_save(v, flags); \ 30 + v->counter c_op i; \ 31 + cris_atomic_restore(v, flags); \ 32 + } \ 33 + 34 + #define ATOMIC_OP_RETURN(op, c_op) \ 35 + static inline int atomic_##op##_return(int i, volatile atomic_t *v) \ 36 + { \ 37 + unsigned long flags; \ 38 + int retval; \ 39 + cris_atomic_save(v, flags); \ 40 + retval = (v->counter c_op i); \ 41 + cris_atomic_restore(v, flags); \ 42 + return retval; \ 31 43 } 32 44 33 - static inline void atomic_sub(int i, volatile atomic_t *v) 34 - { 35 - unsigned long flags; 36 - cris_atomic_save(v, flags); 37 - v->counter -= i; 38 - cris_atomic_restore(v, flags); 39 - } 45 + #define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op) 40 46 41 - static inline int atomic_add_return(int i, volatile atomic_t *v) 42 - { 43 - unsigned long flags; 44 - int retval; 45 - cris_atomic_save(v, flags); 46 - retval = (v->counter += i); 47 - cris_atomic_restore(v, flags); 48 - return retval; 49 - } 47 + ATOMIC_OPS(add, +=) 48 + ATOMIC_OPS(sub, -=) 49 + 50 + #undef ATOMIC_OPS 51 + #undef ATOMIC_OP_RETURN 52 + #undef ATOMIC_OP 50 53 51 54 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) 52 - 53 - static inline int atomic_sub_return(int i, volatile atomic_t *v) 54 - { 55 - unsigned long flags; 56 - int retval; 57 - cris_atomic_save(v, flags); 58 - retval = (v->counter -= i); 59 - cris_atomic_restore(v, flags); 60 - return retval; 61 - } 62 55 63 56 static inline int atomic_sub_and_test(int i, volatile atomic_t *v) 64 57 {
+1 -1
arch/frv/include/asm/atomic.h
··· 31 31 */ 32 32 33 33 #define ATOMIC_INIT(i) { (i) } 34 - #define atomic_read(v) (*(volatile int *)&(v)->counter) 34 + #define atomic_read(v) ACCESS_ONCE((v)->counter) 35 35 #define atomic_set(v, i) (((v)->counter) = (i)) 36 36 37 37 #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
+36 -30
arch/hexagon/include/asm/atomic.h
··· 94 94 return __oldval; 95 95 } 96 96 97 - static inline int atomic_add_return(int i, atomic_t *v) 98 - { 99 - int output; 97 + #define ATOMIC_OP(op) \ 98 + static inline void atomic_##op(int i, atomic_t *v) \ 99 + { \ 100 + int output; \ 101 + \ 102 + __asm__ __volatile__ ( \ 103 + "1: %0 = memw_locked(%1);\n" \ 104 + " %0 = "#op "(%0,%2);\n" \ 105 + " memw_locked(%1,P3)=%0;\n" \ 106 + " if !P3 jump 1b;\n" \ 107 + : "=&r" (output) \ 108 + : "r" (&v->counter), "r" (i) \ 109 + : "memory", "p3" \ 110 + ); \ 111 + } \ 100 112 101 - __asm__ __volatile__ ( 102 - "1: %0 = memw_locked(%1);\n" 103 - " %0 = add(%0,%2);\n" 104 - " memw_locked(%1,P3)=%0;\n" 105 - " if !P3 jump 1b;\n" 106 - : "=&r" (output) 107 - : "r" (&v->counter), "r" (i) 108 - : "memory", "p3" 109 - ); 110 - return output; 111 - 113 + #define ATOMIC_OP_RETURN(op) \ 114 + static inline int atomic_##op##_return(int i, atomic_t *v) \ 115 + { \ 116 + int output; \ 117 + \ 118 + __asm__ __volatile__ ( \ 119 + "1: %0 = memw_locked(%1);\n" \ 120 + " %0 = "#op "(%0,%2);\n" \ 121 + " memw_locked(%1,P3)=%0;\n" \ 122 + " if !P3 jump 1b;\n" \ 123 + : "=&r" (output) \ 124 + : "r" (&v->counter), "r" (i) \ 125 + : "memory", "p3" \ 126 + ); \ 127 + return output; \ 112 128 } 113 129 114 - #define atomic_add(i, v) atomic_add_return(i, (v)) 130 + #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) 115 131 116 - static inline int atomic_sub_return(int i, atomic_t *v) 117 - { 118 - int output; 119 - __asm__ __volatile__ ( 120 - "1: %0 = memw_locked(%1);\n" 121 - " %0 = sub(%0,%2);\n" 122 - " memw_locked(%1,P3)=%0\n" 123 - " if !P3 jump 1b;\n" 124 - : "=&r" (output) 125 - : "r" (&v->counter), "r" (i) 126 - : "memory", "p3" 127 - ); 128 - return output; 129 - } 132 + ATOMIC_OPS(add) 133 + ATOMIC_OPS(sub) 130 134 131 - #define atomic_sub(i, v) atomic_sub_return(i, (v)) 135 + #undef ATOMIC_OPS 136 + #undef ATOMIC_OP_RETURN 137 + #undef ATOMIC_OP 132 138 133 139 /** 134 140 * __atomic_add_unless - add unless the number is a given value
+85 -101
arch/ia64/include/asm/atomic.h
··· 21 21 #define ATOMIC_INIT(i) { (i) } 22 22 #define ATOMIC64_INIT(i) { (i) } 23 23 24 - #define atomic_read(v) (*(volatile int *)&(v)->counter) 25 - #define atomic64_read(v) (*(volatile long *)&(v)->counter) 24 + #define atomic_read(v) ACCESS_ONCE((v)->counter) 25 + #define atomic64_read(v) ACCESS_ONCE((v)->counter) 26 26 27 27 #define atomic_set(v,i) (((v)->counter) = (i)) 28 28 #define atomic64_set(v,i) (((v)->counter) = (i)) 29 29 30 - static __inline__ int 31 - ia64_atomic_add (int i, atomic_t *v) 32 - { 33 - __s32 old, new; 34 - CMPXCHG_BUGCHECK_DECL 35 - 36 - do { 37 - CMPXCHG_BUGCHECK(v); 38 - old = atomic_read(v); 39 - new = old + i; 40 - } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); 41 - return new; 30 + #define ATOMIC_OP(op, c_op) \ 31 + static __inline__ int \ 32 + ia64_atomic_##op (int i, atomic_t *v) \ 33 + { \ 34 + __s32 old, new; \ 35 + CMPXCHG_BUGCHECK_DECL \ 36 + \ 37 + do { \ 38 + CMPXCHG_BUGCHECK(v); \ 39 + old = atomic_read(v); \ 40 + new = old c_op i; \ 41 + } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \ 42 + return new; \ 42 43 } 43 44 44 - static __inline__ long 45 - ia64_atomic64_add (__s64 i, atomic64_t *v) 46 - { 47 - __s64 old, new; 48 - CMPXCHG_BUGCHECK_DECL 45 + ATOMIC_OP(add, +) 46 + ATOMIC_OP(sub, -) 49 47 50 - do { 51 - CMPXCHG_BUGCHECK(v); 52 - old = atomic64_read(v); 53 - new = old + i; 54 - } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); 55 - return new; 48 + #undef ATOMIC_OP 49 + 50 + #define atomic_add_return(i,v) \ 51 + ({ \ 52 + int __ia64_aar_i = (i); \ 53 + (__builtin_constant_p(i) \ 54 + && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \ 55 + || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \ 56 + || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \ 57 + || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \ 58 + ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \ 59 + : ia64_atomic_add(__ia64_aar_i, v); \ 60 + }) 61 + 62 + #define atomic_sub_return(i,v) \ 63 + ({ \ 64 + int __ia64_asr_i = (i); \ 65 + (__builtin_constant_p(i) \ 66 + && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \ 67 + || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \ 68 + || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \ 69 + || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \ 70 + ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \ 71 + : ia64_atomic_sub(__ia64_asr_i, v); \ 72 + }) 73 + 74 + #define ATOMIC64_OP(op, c_op) \ 75 + static __inline__ long \ 76 + ia64_atomic64_##op (__s64 i, atomic64_t *v) \ 77 + { \ 78 + __s64 old, new; \ 79 + CMPXCHG_BUGCHECK_DECL \ 80 + \ 81 + do { \ 82 + CMPXCHG_BUGCHECK(v); \ 83 + old = atomic64_read(v); \ 84 + new = old c_op i; \ 85 + } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \ 86 + return new; \ 56 87 } 57 88 58 - static __inline__ int 59 - ia64_atomic_sub (int i, atomic_t *v) 60 - { 61 - __s32 old, new; 62 - CMPXCHG_BUGCHECK_DECL 89 + ATOMIC64_OP(add, +) 90 + ATOMIC64_OP(sub, -) 63 91 64 - do { 65 - CMPXCHG_BUGCHECK(v); 66 - old = atomic_read(v); 67 - new = old - i; 68 - } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); 69 - return new; 70 - } 92 + #undef ATOMIC64_OP 71 93 72 - static __inline__ long 73 - ia64_atomic64_sub (__s64 i, atomic64_t *v) 74 - { 75 - __s64 old, new; 76 - CMPXCHG_BUGCHECK_DECL 94 + #define atomic64_add_return(i,v) \ 95 + ({ \ 96 + long __ia64_aar_i = (i); \ 97 + (__builtin_constant_p(i) \ 98 + && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \ 99 + || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \ 100 + || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \ 101 + || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \ 102 + ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \ 103 + : ia64_atomic64_add(__ia64_aar_i, v); \ 104 + }) 77 105 78 - do { 79 - CMPXCHG_BUGCHECK(v); 80 - old = atomic64_read(v); 81 - new = old - i; 82 - } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); 83 - return new; 84 - } 106 + #define atomic64_sub_return(i,v) \ 107 + ({ \ 108 + long __ia64_asr_i = (i); \ 109 + (__builtin_constant_p(i) \ 110 + && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \ 111 + || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \ 112 + || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \ 113 + || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \ 114 + ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \ 115 + : ia64_atomic64_sub(__ia64_asr_i, v); \ 116 + }) 85 117 86 118 #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) 87 119 #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) ··· 155 123 156 124 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) 157 125 158 - #define atomic_add_return(i,v) \ 159 - ({ \ 160 - int __ia64_aar_i = (i); \ 161 - (__builtin_constant_p(i) \ 162 - && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \ 163 - || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \ 164 - || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \ 165 - || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \ 166 - ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \ 167 - : ia64_atomic_add(__ia64_aar_i, v); \ 168 - }) 169 - 170 - #define atomic64_add_return(i,v) \ 171 - ({ \ 172 - long __ia64_aar_i = (i); \ 173 - (__builtin_constant_p(i) \ 174 - && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \ 175 - || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \ 176 - || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \ 177 - || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \ 178 - ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \ 179 - : ia64_atomic64_add(__ia64_aar_i, v); \ 180 - }) 181 - 182 126 /* 183 127 * Atomically add I to V and return TRUE if the resulting value is 184 128 * negative. ··· 171 163 return atomic64_add_return(i, v) < 0; 172 164 } 173 165 174 - #define atomic_sub_return(i,v) \ 175 - ({ \ 176 - int __ia64_asr_i = (i); \ 177 - (__builtin_constant_p(i) \ 178 - && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \ 179 - || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \ 180 - || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \ 181 - || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \ 182 - ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \ 183 - : ia64_atomic_sub(__ia64_asr_i, v); \ 184 - }) 185 - 186 - #define atomic64_sub_return(i,v) \ 187 - ({ \ 188 - long __ia64_asr_i = (i); \ 189 - (__builtin_constant_p(i) \ 190 - && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \ 191 - || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \ 192 - || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \ 193 - || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \ 194 - ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \ 195 - : ia64_atomic64_sub(__ia64_asr_i, v); \ 196 - }) 197 - 198 166 #define atomic_dec_return(v) atomic_sub_return(1, (v)) 199 167 #define atomic_inc_return(v) atomic_add_return(1, (v)) 200 168 #define atomic64_dec_return(v) atomic64_sub_return(1, (v)) ··· 183 199 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0) 184 200 #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0) 185 201 186 - #define atomic_add(i,v) atomic_add_return((i), (v)) 187 - #define atomic_sub(i,v) atomic_sub_return((i), (v)) 202 + #define atomic_add(i,v) (void)atomic_add_return((i), (v)) 203 + #define atomic_sub(i,v) (void)atomic_sub_return((i), (v)) 188 204 #define atomic_inc(v) atomic_add(1, (v)) 189 205 #define atomic_dec(v) atomic_sub(1, (v)) 190 206 191 - #define atomic64_add(i,v) atomic64_add_return((i), (v)) 192 - #define atomic64_sub(i,v) atomic64_sub_return((i), (v)) 207 + #define atomic64_add(i,v) (void)atomic64_add_return((i), (v)) 208 + #define atomic64_sub(i,v) (void)atomic64_sub_return((i), (v)) 193 209 #define atomic64_inc(v) atomic64_add(1, (v)) 194 210 #define atomic64_dec(v) atomic64_sub(1, (v)) 195 211
+57 -86
arch/m32r/include/asm/atomic.h
··· 28 28 * 29 29 * Atomically reads the value of @v. 30 30 */ 31 - #define atomic_read(v) (*(volatile int *)&(v)->counter) 31 + #define atomic_read(v) ACCESS_ONCE((v)->counter) 32 32 33 33 /** 34 34 * atomic_set - set atomic variable ··· 39 39 */ 40 40 #define atomic_set(v,i) (((v)->counter) = (i)) 41 41 42 - /** 43 - * atomic_add_return - add integer to atomic variable and return it 44 - * @i: integer value to add 45 - * @v: pointer of type atomic_t 46 - * 47 - * Atomically adds @i to @v and return (@i + @v). 48 - */ 49 - static __inline__ int atomic_add_return(int i, atomic_t *v) 50 - { 51 - unsigned long flags; 52 - int result; 53 - 54 - local_irq_save(flags); 55 - __asm__ __volatile__ ( 56 - "# atomic_add_return \n\t" 57 - DCACHE_CLEAR("%0", "r4", "%1") 58 - M32R_LOCK" %0, @%1; \n\t" 59 - "add %0, %2; \n\t" 60 - M32R_UNLOCK" %0, @%1; \n\t" 61 - : "=&r" (result) 62 - : "r" (&v->counter), "r" (i) 63 - : "memory" 64 42 #ifdef CONFIG_CHIP_M32700_TS1 65 - , "r4" 66 - #endif /* CONFIG_CHIP_M32700_TS1 */ 67 - ); 68 - local_irq_restore(flags); 43 + #define __ATOMIC_CLOBBER , "r4" 44 + #else 45 + #define __ATOMIC_CLOBBER 46 + #endif 69 47 70 - return result; 48 + #define ATOMIC_OP(op) \ 49 + static __inline__ void atomic_##op(int i, atomic_t *v) \ 50 + { \ 51 + unsigned long flags; \ 52 + int result; \ 53 + \ 54 + local_irq_save(flags); \ 55 + __asm__ __volatile__ ( \ 56 + "# atomic_" #op " \n\t" \ 57 + DCACHE_CLEAR("%0", "r4", "%1") \ 58 + M32R_LOCK" %0, @%1; \n\t" \ 59 + #op " %0, %2; \n\t" \ 60 + M32R_UNLOCK" %0, @%1; \n\t" \ 61 + : "=&r" (result) \ 62 + : "r" (&v->counter), "r" (i) \ 63 + : "memory" \ 64 + __ATOMIC_CLOBBER \ 65 + ); \ 66 + local_irq_restore(flags); \ 67 + } \ 68 + 69 + #define ATOMIC_OP_RETURN(op) \ 70 + static __inline__ int atomic_##op##_return(int i, atomic_t *v) \ 71 + { \ 72 + unsigned long flags; \ 73 + int result; \ 74 + \ 75 + local_irq_save(flags); \ 76 + __asm__ __volatile__ ( \ 77 + "# atomic_" #op "_return \n\t" \ 78 + DCACHE_CLEAR("%0", "r4", "%1") \ 79 + M32R_LOCK" %0, @%1; \n\t" \ 80 + #op " %0, %2; \n\t" \ 81 + M32R_UNLOCK" %0, @%1; \n\t" \ 82 + : "=&r" (result) \ 83 + : "r" (&v->counter), "r" (i) \ 84 + : "memory" \ 85 + __ATOMIC_CLOBBER \ 86 + ); \ 87 + local_irq_restore(flags); \ 88 + \ 89 + return result; \ 71 90 } 72 91 73 - /** 74 - * atomic_sub_return - subtract integer from atomic variable and return it 75 - * @i: integer value to subtract 76 - * @v: pointer of type atomic_t 77 - * 78 - * Atomically subtracts @i from @v and return (@v - @i). 79 - */ 80 - static __inline__ int atomic_sub_return(int i, atomic_t *v) 81 - { 82 - unsigned long flags; 83 - int result; 92 + #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) 84 93 85 - local_irq_save(flags); 86 - __asm__ __volatile__ ( 87 - "# atomic_sub_return \n\t" 88 - DCACHE_CLEAR("%0", "r4", "%1") 89 - M32R_LOCK" %0, @%1; \n\t" 90 - "sub %0, %2; \n\t" 91 - M32R_UNLOCK" %0, @%1; \n\t" 92 - : "=&r" (result) 93 - : "r" (&v->counter), "r" (i) 94 - : "memory" 95 - #ifdef CONFIG_CHIP_M32700_TS1 96 - , "r4" 97 - #endif /* CONFIG_CHIP_M32700_TS1 */ 98 - ); 99 - local_irq_restore(flags); 94 + ATOMIC_OPS(add) 95 + ATOMIC_OPS(sub) 100 96 101 - return result; 102 - } 103 - 104 - /** 105 - * atomic_add - add integer to atomic variable 106 - * @i: integer value to add 107 - * @v: pointer of type atomic_t 108 - * 109 - * Atomically adds @i to @v. 110 - */ 111 - #define atomic_add(i,v) ((void) atomic_add_return((i), (v))) 112 - 113 - /** 114 - * atomic_sub - subtract the atomic variable 115 - * @i: integer value to subtract 116 - * @v: pointer of type atomic_t 117 - * 118 - * Atomically subtracts @i from @v. 119 - */ 120 - #define atomic_sub(i,v) ((void) atomic_sub_return((i), (v))) 97 + #undef ATOMIC_OPS 98 + #undef ATOMIC_OP_RETURN 99 + #undef ATOMIC_OP 121 100 122 101 /** 123 102 * atomic_sub_and_test - subtract value from variable and test result ··· 130 151 : "=&r" (result) 131 152 : "r" (&v->counter) 132 153 : "memory" 133 - #ifdef CONFIG_CHIP_M32700_TS1 134 - , "r4" 135 - #endif /* CONFIG_CHIP_M32700_TS1 */ 154 + __ATOMIC_CLOBBER 136 155 ); 137 156 local_irq_restore(flags); 138 157 ··· 158 181 : "=&r" (result) 159 182 : "r" (&v->counter) 160 183 : "memory" 161 - #ifdef CONFIG_CHIP_M32700_TS1 162 - , "r4" 163 - #endif /* CONFIG_CHIP_M32700_TS1 */ 184 + __ATOMIC_CLOBBER 164 185 ); 165 186 local_irq_restore(flags); 166 187 ··· 255 280 : "=&r" (tmp) 256 281 : "r" (addr), "r" (~mask) 257 282 : "memory" 258 - #ifdef CONFIG_CHIP_M32700_TS1 259 - , "r5" 260 - #endif /* CONFIG_CHIP_M32700_TS1 */ 283 + __ATOMIC_CLOBBER 261 284 ); 262 285 local_irq_restore(flags); 263 286 } ··· 275 302 : "=&r" (tmp) 276 303 : "r" (addr), "r" (mask) 277 304 : "memory" 278 - #ifdef CONFIG_CHIP_M32700_TS1 279 - , "r5" 280 - #endif /* CONFIG_CHIP_M32700_TS1 */ 305 + __ATOMIC_CLOBBER 281 306 ); 282 307 local_irq_restore(flags); 283 308 }
+48 -63
arch/m68k/include/asm/atomic.h
··· 17 17 18 18 #define ATOMIC_INIT(i) { (i) } 19 19 20 - #define atomic_read(v) (*(volatile int *)&(v)->counter) 20 + #define atomic_read(v) ACCESS_ONCE((v)->counter) 21 21 #define atomic_set(v, i) (((v)->counter) = i) 22 22 23 23 /* ··· 30 30 #define ASM_DI "di" 31 31 #endif 32 32 33 - static inline void atomic_add(int i, atomic_t *v) 34 - { 35 - __asm__ __volatile__("addl %1,%0" : "+m" (*v) : ASM_DI (i)); 33 + #define ATOMIC_OP(op, c_op, asm_op) \ 34 + static inline void atomic_##op(int i, atomic_t *v) \ 35 + { \ 36 + __asm__ __volatile__(#asm_op "l %1,%0" : "+m" (*v) : ASM_DI (i));\ 37 + } \ 38 + 39 + #ifdef CONFIG_RMW_INSNS 40 + 41 + #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ 42 + static inline int atomic_##op##_return(int i, atomic_t *v) \ 43 + { \ 44 + int t, tmp; \ 45 + \ 46 + __asm__ __volatile__( \ 47 + "1: movel %2,%1\n" \ 48 + " " #asm_op "l %3,%1\n" \ 49 + " casl %2,%1,%0\n" \ 50 + " jne 1b" \ 51 + : "+m" (*v), "=&d" (t), "=&d" (tmp) \ 52 + : "g" (i), "2" (atomic_read(v))); \ 53 + return t; \ 36 54 } 37 55 38 - static inline void atomic_sub(int i, atomic_t *v) 39 - { 40 - __asm__ __volatile__("subl %1,%0" : "+m" (*v) : ASM_DI (i)); 56 + #else 57 + 58 + #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ 59 + static inline int atomic_##op##_return(int i, atomic_t * v) \ 60 + { \ 61 + unsigned long flags; \ 62 + int t; \ 63 + \ 64 + local_irq_save(flags); \ 65 + t = (v->counter c_op i); \ 66 + local_irq_restore(flags); \ 67 + \ 68 + return t; \ 41 69 } 70 + 71 + #endif /* CONFIG_RMW_INSNS */ 72 + 73 + #define ATOMIC_OPS(op, c_op, asm_op) \ 74 + ATOMIC_OP(op, c_op, asm_op) \ 75 + ATOMIC_OP_RETURN(op, c_op, asm_op) 76 + 77 + ATOMIC_OPS(add, +=, add) 78 + ATOMIC_OPS(sub, -=, sub) 79 + 80 + #undef ATOMIC_OPS 81 + #undef ATOMIC_OP_RETURN 82 + #undef ATOMIC_OP 42 83 43 84 static inline void atomic_inc(atomic_t *v) 44 85 { ··· 117 76 118 77 #ifdef CONFIG_RMW_INSNS 119 78 120 - static inline int atomic_add_return(int i, atomic_t *v) 121 - { 122 - int t, tmp; 123 - 124 - __asm__ __volatile__( 125 - "1: movel %2,%1\n" 126 - " addl %3,%1\n" 127 - " casl %2,%1,%0\n" 128 - " jne 1b" 129 - : "+m" (*v), "=&d" (t), "=&d" (tmp) 130 - : "g" (i), "2" (atomic_read(v))); 131 - return t; 132 - } 133 - 134 - static inline int atomic_sub_return(int i, atomic_t *v) 135 - { 136 - int t, tmp; 137 - 138 - __asm__ __volatile__( 139 - "1: movel %2,%1\n" 140 - " subl %3,%1\n" 141 - " casl %2,%1,%0\n" 142 - " jne 1b" 143 - : "+m" (*v), "=&d" (t), "=&d" (tmp) 144 - : "g" (i), "2" (atomic_read(v))); 145 - return t; 146 - } 147 - 148 79 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) 149 80 #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 150 81 151 82 #else /* !CONFIG_RMW_INSNS */ 152 - 153 - static inline int atomic_add_return(int i, atomic_t * v) 154 - { 155 - unsigned long flags; 156 - int t; 157 - 158 - local_irq_save(flags); 159 - t = atomic_read(v); 160 - t += i; 161 - atomic_set(v, t); 162 - local_irq_restore(flags); 163 - 164 - return t; 165 - } 166 - 167 - static inline int atomic_sub_return(int i, atomic_t * v) 168 - { 169 - unsigned long flags; 170 - int t; 171 - 172 - local_irq_save(flags); 173 - t = atomic_read(v); 174 - t -= i; 175 - atomic_set(v, t); 176 - local_irq_restore(flags); 177 - 178 - return t; 179 - } 180 83 181 84 static inline int atomic_cmpxchg(atomic_t *v, int old, int new) 182 85 {
+45 -74
arch/metag/include/asm/atomic_lnkget.h
··· 27 27 return temp; 28 28 } 29 29 30 - static inline void atomic_add(int i, atomic_t *v) 31 - { 32 - int temp; 30 + #define ATOMIC_OP(op) \ 31 + static inline void atomic_##op(int i, atomic_t *v) \ 32 + { \ 33 + int temp; \ 34 + \ 35 + asm volatile ( \ 36 + "1: LNKGETD %0, [%1]\n" \ 37 + " " #op " %0, %0, %2\n" \ 38 + " LNKSETD [%1], %0\n" \ 39 + " DEFR %0, TXSTAT\n" \ 40 + " ANDT %0, %0, #HI(0x3f000000)\n" \ 41 + " CMPT %0, #HI(0x02000000)\n" \ 42 + " BNZ 1b\n" \ 43 + : "=&d" (temp) \ 44 + : "da" (&v->counter), "bd" (i) \ 45 + : "cc"); \ 46 + } \ 33 47 34 - asm volatile ( 35 - "1: LNKGETD %0, [%1]\n" 36 - " ADD %0, %0, %2\n" 37 - " LNKSETD [%1], %0\n" 38 - " DEFR %0, TXSTAT\n" 39 - " ANDT %0, %0, #HI(0x3f000000)\n" 40 - " CMPT %0, #HI(0x02000000)\n" 41 - " BNZ 1b\n" 42 - : "=&d" (temp) 43 - : "da" (&v->counter), "bd" (i) 44 - : "cc"); 48 + #define ATOMIC_OP_RETURN(op) \ 49 + static inline int atomic_##op##_return(int i, atomic_t *v) \ 50 + { \ 51 + int result, temp; \ 52 + \ 53 + smp_mb(); \ 54 + \ 55 + asm volatile ( \ 56 + "1: LNKGETD %1, [%2]\n" \ 57 + " " #op " %1, %1, %3\n" \ 58 + " LNKSETD [%2], %1\n" \ 59 + " DEFR %0, TXSTAT\n" \ 60 + " ANDT %0, %0, #HI(0x3f000000)\n" \ 61 + " CMPT %0, #HI(0x02000000)\n" \ 62 + " BNZ 1b\n" \ 63 + : "=&d" (temp), "=&da" (result) \ 64 + : "da" (&v->counter), "bd" (i) \ 65 + : "cc"); \ 66 + \ 67 + smp_mb(); \ 68 + \ 69 + return result; \ 45 70 } 46 71 47 - static inline void atomic_sub(int i, atomic_t *v) 48 - { 49 - int temp; 72 + #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) 50 73 51 - asm volatile ( 52 - "1: LNKGETD %0, [%1]\n" 53 - " SUB %0, %0, %2\n" 54 - " LNKSETD [%1], %0\n" 55 - " DEFR %0, TXSTAT\n" 56 - " ANDT %0, %0, #HI(0x3f000000)\n" 57 - " CMPT %0, #HI(0x02000000)\n" 58 - " BNZ 1b\n" 59 - : "=&d" (temp) 60 - : "da" (&v->counter), "bd" (i) 61 - : "cc"); 62 - } 74 + ATOMIC_OPS(add) 75 + ATOMIC_OPS(sub) 63 76 64 - static inline int atomic_add_return(int i, atomic_t *v) 65 - { 66 - int result, temp; 67 - 68 - smp_mb(); 69 - 70 - asm volatile ( 71 - "1: LNKGETD %1, [%2]\n" 72 - " ADD %1, %1, %3\n" 73 - " LNKSETD [%2], %1\n" 74 - " DEFR %0, TXSTAT\n" 75 - " ANDT %0, %0, #HI(0x3f000000)\n" 76 - " CMPT %0, #HI(0x02000000)\n" 77 - " BNZ 1b\n" 78 - : "=&d" (temp), "=&da" (result) 79 - : "da" (&v->counter), "bd" (i) 80 - : "cc"); 81 - 82 - smp_mb(); 83 - 84 - return result; 85 - } 86 - 87 - static inline int atomic_sub_return(int i, atomic_t *v) 88 - { 89 - int result, temp; 90 - 91 - smp_mb(); 92 - 93 - asm volatile ( 94 - "1: LNKGETD %1, [%2]\n" 95 - " SUB %1, %1, %3\n" 96 - " LNKSETD [%2], %1\n" 97 - " DEFR %0, TXSTAT\n" 98 - " ANDT %0, %0, #HI(0x3f000000)\n" 99 - " CMPT %0, #HI(0x02000000)\n" 100 - " BNZ 1b\n" 101 - : "=&d" (temp), "=&da" (result) 102 - : "da" (&v->counter), "bd" (i) 103 - : "cc"); 104 - 105 - smp_mb(); 106 - 107 - return result; 108 - } 77 + #undef ATOMIC_OPS 78 + #undef ATOMIC_OP_RETURN 79 + #undef ATOMIC_OP 109 80 110 81 static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) 111 82 {
+30 -44
arch/metag/include/asm/atomic_lock1.h
··· 37 37 return i; 38 38 } 39 39 40 - static inline void atomic_add(int i, atomic_t *v) 41 - { 42 - unsigned long flags; 40 + #define ATOMIC_OP(op, c_op) \ 41 + static inline void atomic_##op(int i, atomic_t *v) \ 42 + { \ 43 + unsigned long flags; \ 44 + \ 45 + __global_lock1(flags); \ 46 + fence(); \ 47 + v->counter c_op i; \ 48 + __global_unlock1(flags); \ 49 + } \ 43 50 44 - __global_lock1(flags); 45 - fence(); 46 - v->counter += i; 47 - __global_unlock1(flags); 51 + #define ATOMIC_OP_RETURN(op, c_op) \ 52 + static inline int atomic_##op##_return(int i, atomic_t *v) \ 53 + { \ 54 + unsigned long result; \ 55 + unsigned long flags; \ 56 + \ 57 + __global_lock1(flags); \ 58 + result = v->counter; \ 59 + result c_op i; \ 60 + fence(); \ 61 + v->counter = result; \ 62 + __global_unlock1(flags); \ 63 + \ 64 + return result; \ 48 65 } 49 66 50 - static inline void atomic_sub(int i, atomic_t *v) 51 - { 52 - unsigned long flags; 67 + #define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op) 53 68 54 - __global_lock1(flags); 55 - fence(); 56 - v->counter -= i; 57 - __global_unlock1(flags); 58 - } 69 + ATOMIC_OPS(add, +=) 70 + ATOMIC_OPS(sub, -=) 59 71 60 - static inline int atomic_add_return(int i, atomic_t *v) 61 - { 62 - unsigned long result; 63 - unsigned long flags; 64 - 65 - __global_lock1(flags); 66 - result = v->counter; 67 - result += i; 68 - fence(); 69 - v->counter = result; 70 - __global_unlock1(flags); 71 - 72 - return result; 73 - } 74 - 75 - static inline int atomic_sub_return(int i, atomic_t *v) 76 - { 77 - unsigned long result; 78 - unsigned long flags; 79 - 80 - __global_lock1(flags); 81 - result = v->counter; 82 - result -= i; 83 - fence(); 84 - v->counter = result; 85 - __global_unlock1(flags); 86 - 87 - return result; 88 - } 72 + #undef ATOMIC_OPS 73 + #undef ATOMIC_OP_RETURN 74 + #undef ATOMIC_OP 89 75 90 76 static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) 91 77 {
+187 -370
arch/mips/include/asm/atomic.h
··· 29 29 * 30 30 * Atomically reads the value of @v. 31 31 */ 32 - #define atomic_read(v) (*(volatile int *)&(v)->counter) 32 + #define atomic_read(v) ACCESS_ONCE((v)->counter) 33 33 34 34 /* 35 35 * atomic_set - set atomic variable ··· 40 40 */ 41 41 #define atomic_set(v, i) ((v)->counter = (i)) 42 42 43 - /* 44 - * atomic_add - add integer to atomic variable 45 - * @i: integer value to add 46 - * @v: pointer of type atomic_t 47 - * 48 - * Atomically adds @i to @v. 49 - */ 50 - static __inline__ void atomic_add(int i, atomic_t * v) 51 - { 52 - if (kernel_uses_llsc && R10000_LLSC_WAR) { 53 - int temp; 43 + #define ATOMIC_OP(op, c_op, asm_op) \ 44 + static __inline__ void atomic_##op(int i, atomic_t * v) \ 45 + { \ 46 + if (kernel_uses_llsc && R10000_LLSC_WAR) { \ 47 + int temp; \ 48 + \ 49 + __asm__ __volatile__( \ 50 + " .set arch=r4000 \n" \ 51 + "1: ll %0, %1 # atomic_" #op " \n" \ 52 + " " #asm_op " %0, %2 \n" \ 53 + " sc %0, %1 \n" \ 54 + " beqzl %0, 1b \n" \ 55 + " .set mips0 \n" \ 56 + : "=&r" (temp), "+m" (v->counter) \ 57 + : "Ir" (i)); \ 58 + } else if (kernel_uses_llsc) { \ 59 + int temp; \ 60 + \ 61 + do { \ 62 + __asm__ __volatile__( \ 63 + " .set arch=r4000 \n" \ 64 + " ll %0, %1 # atomic_" #op "\n" \ 65 + " " #asm_op " %0, %2 \n" \ 66 + " sc %0, %1 \n" \ 67 + " .set mips0 \n" \ 68 + : "=&r" (temp), "+m" (v->counter) \ 69 + : "Ir" (i)); \ 70 + } while (unlikely(!temp)); \ 71 + } else { \ 72 + unsigned long flags; \ 73 + \ 74 + raw_local_irq_save(flags); \ 75 + v->counter c_op i; \ 76 + raw_local_irq_restore(flags); \ 77 + } \ 78 + } \ 54 79 55 - __asm__ __volatile__( 56 - " .set arch=r4000 \n" 57 - "1: ll %0, %1 # atomic_add \n" 58 - " addu %0, %2 \n" 59 - " sc %0, %1 \n" 60 - " beqzl %0, 1b \n" 61 - " .set mips0 \n" 62 - : "=&r" (temp), "+m" (v->counter) 63 - : "Ir" (i)); 64 - } else if (kernel_uses_llsc) { 65 - int temp; 66 - 67 - do { 68 - __asm__ __volatile__( 69 - " .set arch=r4000 \n" 70 - " ll %0, %1 # atomic_add \n" 71 - " addu %0, %2 \n" 72 - " sc %0, %1 \n" 73 - " .set mips0 \n" 74 - : "=&r" (temp), "+m" (v->counter) 75 - : "Ir" (i)); 76 - } while (unlikely(!temp)); 77 - } else { 78 - unsigned long flags; 79 - 80 - raw_local_irq_save(flags); 81 - v->counter += i; 82 - raw_local_irq_restore(flags); 83 - } 80 + #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ 81 + static __inline__ int atomic_##op##_return(int i, atomic_t * v) \ 82 + { \ 83 + int result; \ 84 + \ 85 + smp_mb__before_llsc(); \ 86 + \ 87 + if (kernel_uses_llsc && R10000_LLSC_WAR) { \ 88 + int temp; \ 89 + \ 90 + __asm__ __volatile__( \ 91 + " .set arch=r4000 \n" \ 92 + "1: ll %1, %2 # atomic_" #op "_return \n" \ 93 + " " #asm_op " %0, %1, %3 \n" \ 94 + " sc %0, %2 \n" \ 95 + " beqzl %0, 1b \n" \ 96 + " " #asm_op " %0, %1, %3 \n" \ 97 + " .set mips0 \n" \ 98 + : "=&r" (result), "=&r" (temp), "+m" (v->counter) \ 99 + : "Ir" (i)); \ 100 + } else if (kernel_uses_llsc) { \ 101 + int temp; \ 102 + \ 103 + do { \ 104 + __asm__ __volatile__( \ 105 + " .set arch=r4000 \n" \ 106 + " ll %1, %2 # atomic_" #op "_return \n" \ 107 + " " #asm_op " %0, %1, %3 \n" \ 108 + " sc %0, %2 \n" \ 109 + " .set mips0 \n" \ 110 + : "=&r" (result), "=&r" (temp), "+m" (v->counter) \ 111 + : "Ir" (i)); \ 112 + } while (unlikely(!result)); \ 113 + \ 114 + result = temp; result c_op i; \ 115 + } else { \ 116 + unsigned long flags; \ 117 + \ 118 + raw_local_irq_save(flags); \ 119 + result = v->counter; \ 120 + result c_op i; \ 121 + v->counter = result; \ 122 + raw_local_irq_restore(flags); \ 123 + } \ 124 + \ 125 + smp_llsc_mb(); \ 126 + \ 127 + return result; \ 84 128 } 85 129 86 - /* 87 - * atomic_sub - subtract the atomic variable 88 - * @i: integer value to subtract 89 - * @v: pointer of type atomic_t 90 - * 91 - * Atomically subtracts @i from @v. 92 - */ 93 - static __inline__ void atomic_sub(int i, atomic_t * v) 94 - { 95 - if (kernel_uses_llsc && R10000_LLSC_WAR) { 96 - int temp; 130 + #define ATOMIC_OPS(op, c_op, asm_op) \ 131 + ATOMIC_OP(op, c_op, asm_op) \ 132 + ATOMIC_OP_RETURN(op, c_op, asm_op) 97 133 98 - __asm__ __volatile__( 99 - " .set arch=r4000 \n" 100 - "1: ll %0, %1 # atomic_sub \n" 101 - " subu %0, %2 \n" 102 - " sc %0, %1 \n" 103 - " beqzl %0, 1b \n" 104 - " .set mips0 \n" 105 - : "=&r" (temp), "+m" (v->counter) 106 - : "Ir" (i)); 107 - } else if (kernel_uses_llsc) { 108 - int temp; 134 + ATOMIC_OPS(add, +=, addu) 135 + ATOMIC_OPS(sub, -=, subu) 109 136 110 - do { 111 - __asm__ __volatile__( 112 - " .set arch=r4000 \n" 113 - " ll %0, %1 # atomic_sub \n" 114 - " subu %0, %2 \n" 115 - " sc %0, %1 \n" 116 - " .set mips0 \n" 117 - : "=&r" (temp), "+m" (v->counter) 118 - : "Ir" (i)); 119 - } while (unlikely(!temp)); 120 - } else { 121 - unsigned long flags; 122 - 123 - raw_local_irq_save(flags); 124 - v->counter -= i; 125 - raw_local_irq_restore(flags); 126 - } 127 - } 128 - 129 - /* 130 - * Same as above, but return the result value 131 - */ 132 - static __inline__ int atomic_add_return(int i, atomic_t * v) 133 - { 134 - int result; 135 - 136 - smp_mb__before_llsc(); 137 - 138 - if (kernel_uses_llsc && R10000_LLSC_WAR) { 139 - int temp; 140 - 141 - __asm__ __volatile__( 142 - " .set arch=r4000 \n" 143 - "1: ll %1, %2 # atomic_add_return \n" 144 - " addu %0, %1, %3 \n" 145 - " sc %0, %2 \n" 146 - " beqzl %0, 1b \n" 147 - " addu %0, %1, %3 \n" 148 - " .set mips0 \n" 149 - : "=&r" (result), "=&r" (temp), "+m" (v->counter) 150 - : "Ir" (i)); 151 - } else if (kernel_uses_llsc) { 152 - int temp; 153 - 154 - do { 155 - __asm__ __volatile__( 156 - " .set arch=r4000 \n" 157 - " ll %1, %2 # atomic_add_return \n" 158 - " addu %0, %1, %3 \n" 159 - " sc %0, %2 \n" 160 - " .set mips0 \n" 161 - : "=&r" (result), "=&r" (temp), "+m" (v->counter) 162 - : "Ir" (i)); 163 - } while (unlikely(!result)); 164 - 165 - result = temp + i; 166 - } else { 167 - unsigned long flags; 168 - 169 - raw_local_irq_save(flags); 170 - result = v->counter; 171 - result += i; 172 - v->counter = result; 173 - raw_local_irq_restore(flags); 174 - } 175 - 176 - smp_llsc_mb(); 177 - 178 - return result; 179 - } 180 - 181 - static __inline__ int atomic_sub_return(int i, atomic_t * v) 182 - { 183 - int result; 184 - 185 - smp_mb__before_llsc(); 186 - 187 - if (kernel_uses_llsc && R10000_LLSC_WAR) { 188 - int temp; 189 - 190 - __asm__ __volatile__( 191 - " .set arch=r4000 \n" 192 - "1: ll %1, %2 # atomic_sub_return \n" 193 - " subu %0, %1, %3 \n" 194 - " sc %0, %2 \n" 195 - " beqzl %0, 1b \n" 196 - " subu %0, %1, %3 \n" 197 - " .set mips0 \n" 198 - : "=&r" (result), "=&r" (temp), "=m" (v->counter) 199 - : "Ir" (i), "m" (v->counter) 200 - : "memory"); 201 - 202 - result = temp - i; 203 - } else if (kernel_uses_llsc) { 204 - int temp; 205 - 206 - do { 207 - __asm__ __volatile__( 208 - " .set arch=r4000 \n" 209 - " ll %1, %2 # atomic_sub_return \n" 210 - " subu %0, %1, %3 \n" 211 - " sc %0, %2 \n" 212 - " .set mips0 \n" 213 - : "=&r" (result), "=&r" (temp), "+m" (v->counter) 214 - : "Ir" (i)); 215 - } while (unlikely(!result)); 216 - 217 - result = temp - i; 218 - } else { 219 - unsigned long flags; 220 - 221 - raw_local_irq_save(flags); 222 - result = v->counter; 223 - result -= i; 224 - v->counter = result; 225 - raw_local_irq_restore(flags); 226 - } 227 - 228 - smp_llsc_mb(); 229 - 230 - return result; 231 - } 137 + #undef ATOMIC_OPS 138 + #undef ATOMIC_OP_RETURN 139 + #undef ATOMIC_OP 232 140 233 141 /* 234 142 * atomic_sub_if_positive - conditionally subtract integer from atomic variable ··· 306 398 * @v: pointer of type atomic64_t 307 399 * 308 400 */ 309 - #define atomic64_read(v) (*(volatile long *)&(v)->counter) 401 + #define atomic64_read(v) ACCESS_ONCE((v)->counter) 310 402 311 403 /* 312 404 * atomic64_set - set atomic variable ··· 315 407 */ 316 408 #define atomic64_set(v, i) ((v)->counter = (i)) 317 409 318 - /* 319 - * atomic64_add - add integer to atomic variable 320 - * @i: integer value to add 321 - * @v: pointer of type atomic64_t 322 - * 323 - * Atomically adds @i to @v. 324 - */ 325 - static __inline__ void atomic64_add(long i, atomic64_t * v) 326 - { 327 - if (kernel_uses_llsc && R10000_LLSC_WAR) { 328 - long temp; 410 + #define ATOMIC64_OP(op, c_op, asm_op) \ 411 + static __inline__ void atomic64_##op(long i, atomic64_t * v) \ 412 + { \ 413 + if (kernel_uses_llsc && R10000_LLSC_WAR) { \ 414 + long temp; \ 415 + \ 416 + __asm__ __volatile__( \ 417 + " .set arch=r4000 \n" \ 418 + "1: lld %0, %1 # atomic64_" #op " \n" \ 419 + " " #asm_op " %0, %2 \n" \ 420 + " scd %0, %1 \n" \ 421 + " beqzl %0, 1b \n" \ 422 + " .set mips0 \n" \ 423 + : "=&r" (temp), "+m" (v->counter) \ 424 + : "Ir" (i)); \ 425 + } else if (kernel_uses_llsc) { \ 426 + long temp; \ 427 + \ 428 + do { \ 429 + __asm__ __volatile__( \ 430 + " .set arch=r4000 \n" \ 431 + " lld %0, %1 # atomic64_" #op "\n" \ 432 + " " #asm_op " %0, %2 \n" \ 433 + " scd %0, %1 \n" \ 434 + " .set mips0 \n" \ 435 + : "=&r" (temp), "+m" (v->counter) \ 436 + : "Ir" (i)); \ 437 + } while (unlikely(!temp)); \ 438 + } else { \ 439 + unsigned long flags; \ 440 + \ 441 + raw_local_irq_save(flags); \ 442 + v->counter c_op i; \ 443 + raw_local_irq_restore(flags); \ 444 + } \ 445 + } \ 329 446 330 - __asm__ __volatile__( 331 - " .set arch=r4000 \n" 332 - "1: lld %0, %1 # atomic64_add \n" 333 - " daddu %0, %2 \n" 334 - " scd %0, %1 \n" 335 - " beqzl %0, 1b \n" 336 - " .set mips0 \n" 337 - : "=&r" (temp), "+m" (v->counter) 338 - : "Ir" (i)); 339 - } else if (kernel_uses_llsc) { 340 - long temp; 341 - 342 - do { 343 - __asm__ __volatile__( 344 - " .set arch=r4000 \n" 345 - " lld %0, %1 # atomic64_add \n" 346 - " daddu %0, %2 \n" 347 - " scd %0, %1 \n" 348 - " .set mips0 \n" 349 - : "=&r" (temp), "+m" (v->counter) 350 - : "Ir" (i)); 351 - } while (unlikely(!temp)); 352 - } else { 353 - unsigned long flags; 354 - 355 - raw_local_irq_save(flags); 356 - v->counter += i; 357 - raw_local_irq_restore(flags); 358 - } 447 + #define ATOMIC64_OP_RETURN(op, c_op, asm_op) \ 448 + static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ 449 + { \ 450 + long result; \ 451 + \ 452 + smp_mb__before_llsc(); \ 453 + \ 454 + if (kernel_uses_llsc && R10000_LLSC_WAR) { \ 455 + long temp; \ 456 + \ 457 + __asm__ __volatile__( \ 458 + " .set arch=r4000 \n" \ 459 + "1: lld %1, %2 # atomic64_" #op "_return\n" \ 460 + " " #asm_op " %0, %1, %3 \n" \ 461 + " scd %0, %2 \n" \ 462 + " beqzl %0, 1b \n" \ 463 + " " #asm_op " %0, %1, %3 \n" \ 464 + " .set mips0 \n" \ 465 + : "=&r" (result), "=&r" (temp), "+m" (v->counter) \ 466 + : "Ir" (i)); \ 467 + } else if (kernel_uses_llsc) { \ 468 + long temp; \ 469 + \ 470 + do { \ 471 + __asm__ __volatile__( \ 472 + " .set arch=r4000 \n" \ 473 + " lld %1, %2 # atomic64_" #op "_return\n" \ 474 + " " #asm_op " %0, %1, %3 \n" \ 475 + " scd %0, %2 \n" \ 476 + " .set mips0 \n" \ 477 + : "=&r" (result), "=&r" (temp), "=m" (v->counter) \ 478 + : "Ir" (i), "m" (v->counter) \ 479 + : "memory"); \ 480 + } while (unlikely(!result)); \ 481 + \ 482 + result = temp; result c_op i; \ 483 + } else { \ 484 + unsigned long flags; \ 485 + \ 486 + raw_local_irq_save(flags); \ 487 + result = v->counter; \ 488 + result c_op i; \ 489 + v->counter = result; \ 490 + raw_local_irq_restore(flags); \ 491 + } \ 492 + \ 493 + smp_llsc_mb(); \ 494 + \ 495 + return result; \ 359 496 } 360 497 361 - /* 362 - * atomic64_sub - subtract the atomic variable 363 - * @i: integer value to subtract 364 - * @v: pointer of type atomic64_t 365 - * 366 - * Atomically subtracts @i from @v. 367 - */ 368 - static __inline__ void atomic64_sub(long i, atomic64_t * v) 369 - { 370 - if (kernel_uses_llsc && R10000_LLSC_WAR) { 371 - long temp; 498 + #define ATOMIC64_OPS(op, c_op, asm_op) \ 499 + ATOMIC64_OP(op, c_op, asm_op) \ 500 + ATOMIC64_OP_RETURN(op, c_op, asm_op) 372 501 373 - __asm__ __volatile__( 374 - " .set arch=r4000 \n" 375 - "1: lld %0, %1 # atomic64_sub \n" 376 - " dsubu %0, %2 \n" 377 - " scd %0, %1 \n" 378 - " beqzl %0, 1b \n" 379 - " .set mips0 \n" 380 - : "=&r" (temp), "+m" (v->counter) 381 - : "Ir" (i)); 382 - } else if (kernel_uses_llsc) { 383 - long temp; 502 + ATOMIC64_OPS(add, +=, daddu) 503 + ATOMIC64_OPS(sub, -=, dsubu) 384 504 385 - do { 386 - __asm__ __volatile__( 387 - " .set arch=r4000 \n" 388 - " lld %0, %1 # atomic64_sub \n" 389 - " dsubu %0, %2 \n" 390 - " scd %0, %1 \n" 391 - " .set mips0 \n" 392 - : "=&r" (temp), "+m" (v->counter) 393 - : "Ir" (i)); 394 - } while (unlikely(!temp)); 395 - } else { 396 - unsigned long flags; 397 - 398 - raw_local_irq_save(flags); 399 - v->counter -= i; 400 - raw_local_irq_restore(flags); 401 - } 402 - } 403 - 404 - /* 405 - * Same as above, but return the result value 406 - */ 407 - static __inline__ long atomic64_add_return(long i, atomic64_t * v) 408 - { 409 - long result; 410 - 411 - smp_mb__before_llsc(); 412 - 413 - if (kernel_uses_llsc && R10000_LLSC_WAR) { 414 - long temp; 415 - 416 - __asm__ __volatile__( 417 - " .set arch=r4000 \n" 418 - "1: lld %1, %2 # atomic64_add_return \n" 419 - " daddu %0, %1, %3 \n" 420 - " scd %0, %2 \n" 421 - " beqzl %0, 1b \n" 422 - " daddu %0, %1, %3 \n" 423 - " .set mips0 \n" 424 - : "=&r" (result), "=&r" (temp), "+m" (v->counter) 425 - : "Ir" (i)); 426 - } else if (kernel_uses_llsc) { 427 - long temp; 428 - 429 - do { 430 - __asm__ __volatile__( 431 - " .set arch=r4000 \n" 432 - " lld %1, %2 # atomic64_add_return \n" 433 - " daddu %0, %1, %3 \n" 434 - " scd %0, %2 \n" 435 - " .set mips0 \n" 436 - : "=&r" (result), "=&r" (temp), "=m" (v->counter) 437 - : "Ir" (i), "m" (v->counter) 438 - : "memory"); 439 - } while (unlikely(!result)); 440 - 441 - result = temp + i; 442 - } else { 443 - unsigned long flags; 444 - 445 - raw_local_irq_save(flags); 446 - result = v->counter; 447 - result += i; 448 - v->counter = result; 449 - raw_local_irq_restore(flags); 450 - } 451 - 452 - smp_llsc_mb(); 453 - 454 - return result; 455 - } 456 - 457 - static __inline__ long atomic64_sub_return(long i, atomic64_t * v) 458 - { 459 - long result; 460 - 461 - smp_mb__before_llsc(); 462 - 463 - if (kernel_uses_llsc && R10000_LLSC_WAR) { 464 - long temp; 465 - 466 - __asm__ __volatile__( 467 - " .set arch=r4000 \n" 468 - "1: lld %1, %2 # atomic64_sub_return \n" 469 - " dsubu %0, %1, %3 \n" 470 - " scd %0, %2 \n" 471 - " beqzl %0, 1b \n" 472 - " dsubu %0, %1, %3 \n" 473 - " .set mips0 \n" 474 - : "=&r" (result), "=&r" (temp), "=m" (v->counter) 475 - : "Ir" (i), "m" (v->counter) 476 - : "memory"); 477 - } else if (kernel_uses_llsc) { 478 - long temp; 479 - 480 - do { 481 - __asm__ __volatile__( 482 - " .set arch=r4000 \n" 483 - " lld %1, %2 # atomic64_sub_return \n" 484 - " dsubu %0, %1, %3 \n" 485 - " scd %0, %2 \n" 486 - " .set mips0 \n" 487 - : "=&r" (result), "=&r" (temp), "=m" (v->counter) 488 - : "Ir" (i), "m" (v->counter) 489 - : "memory"); 490 - } while (unlikely(!result)); 491 - 492 - result = temp - i; 493 - } else { 494 - unsigned long flags; 495 - 496 - raw_local_irq_save(flags); 497 - result = v->counter; 498 - result -= i; 499 - v->counter = result; 500 - raw_local_irq_restore(flags); 501 - } 502 - 503 - smp_llsc_mb(); 504 - 505 - return result; 506 - } 505 + #undef ATOMIC64_OPS 506 + #undef ATOMIC64_OP_RETURN 507 + #undef ATOMIC64_OP 507 508 508 509 /* 509 510 * atomic64_sub_if_positive - conditionally subtract integer from atomic variable
+44 -85
arch/mn10300/include/asm/atomic.h
··· 33 33 * @v: pointer of type atomic_t 34 34 * 35 35 * Atomically reads the value of @v. Note that the guaranteed 36 - * useful range of an atomic_t is only 24 bits. 37 36 */ 38 37 #define atomic_read(v) (ACCESS_ONCE((v)->counter)) 39 38 ··· 42 43 * @i: required value 43 44 * 44 45 * Atomically sets the value of @v to @i. Note that the guaranteed 45 - * useful range of an atomic_t is only 24 bits. 46 46 */ 47 47 #define atomic_set(v, i) (((v)->counter) = (i)) 48 48 49 - /** 50 - * atomic_add_return - add integer to atomic variable 51 - * @i: integer value to add 52 - * @v: pointer of type atomic_t 53 - * 54 - * Atomically adds @i to @v and returns the result 55 - * Note that the guaranteed useful range of an atomic_t is only 24 bits. 56 - */ 57 - static inline int atomic_add_return(int i, atomic_t *v) 58 - { 59 - int retval; 60 - #ifdef CONFIG_SMP 61 - int status; 62 - 63 - asm volatile( 64 - "1: mov %4,(_AAR,%3) \n" 65 - " mov (_ADR,%3),%1 \n" 66 - " add %5,%1 \n" 67 - " mov %1,(_ADR,%3) \n" 68 - " mov (_ADR,%3),%0 \n" /* flush */ 69 - " mov (_ASR,%3),%0 \n" 70 - " or %0,%0 \n" 71 - " bne 1b \n" 72 - : "=&r"(status), "=&r"(retval), "=m"(v->counter) 73 - : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i) 74 - : "memory", "cc"); 75 - 76 - #else 77 - unsigned long flags; 78 - 79 - flags = arch_local_cli_save(); 80 - retval = v->counter; 81 - retval += i; 82 - v->counter = retval; 83 - arch_local_irq_restore(flags); 84 - #endif 85 - return retval; 49 + #define ATOMIC_OP(op) \ 50 + static inline void atomic_##op(int i, atomic_t *v) \ 51 + { \ 52 + int retval, status; \ 53 + \ 54 + asm volatile( \ 55 + "1: mov %4,(_AAR,%3) \n" \ 56 + " mov (_ADR,%3),%1 \n" \ 57 + " " #op " %5,%1 \n" \ 58 + " mov %1,(_ADR,%3) \n" \ 59 + " mov (_ADR,%3),%0 \n" /* flush */ \ 60 + " mov (_ASR,%3),%0 \n" \ 61 + " or %0,%0 \n" \ 62 + " bne 1b \n" \ 63 + : "=&r"(status), "=&r"(retval), "=m"(v->counter) \ 64 + : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i) \ 65 + : "memory", "cc"); \ 86 66 } 87 67 88 - /** 89 - * atomic_sub_return - subtract integer from atomic variable 90 - * @i: integer value to subtract 91 - * @v: pointer of type atomic_t 92 - * 93 - * Atomically subtracts @i from @v and returns the result 94 - * Note that the guaranteed useful range of an atomic_t is only 24 bits. 95 - */ 96 - static inline int atomic_sub_return(int i, atomic_t *v) 97 - { 98 - int retval; 99 - #ifdef CONFIG_SMP 100 - int status; 101 - 102 - asm volatile( 103 - "1: mov %4,(_AAR,%3) \n" 104 - " mov (_ADR,%3),%1 \n" 105 - " sub %5,%1 \n" 106 - " mov %1,(_ADR,%3) \n" 107 - " mov (_ADR,%3),%0 \n" /* flush */ 108 - " mov (_ASR,%3),%0 \n" 109 - " or %0,%0 \n" 110 - " bne 1b \n" 111 - : "=&r"(status), "=&r"(retval), "=m"(v->counter) 112 - : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i) 113 - : "memory", "cc"); 114 - 115 - #else 116 - unsigned long flags; 117 - flags = arch_local_cli_save(); 118 - retval = v->counter; 119 - retval -= i; 120 - v->counter = retval; 121 - arch_local_irq_restore(flags); 122 - #endif 123 - return retval; 68 + #define ATOMIC_OP_RETURN(op) \ 69 + static inline int atomic_##op##_return(int i, atomic_t *v) \ 70 + { \ 71 + int retval, status; \ 72 + \ 73 + asm volatile( \ 74 + "1: mov %4,(_AAR,%3) \n" \ 75 + " mov (_ADR,%3),%1 \n" \ 76 + " " #op " %5,%1 \n" \ 77 + " mov %1,(_ADR,%3) \n" \ 78 + " mov (_ADR,%3),%0 \n" /* flush */ \ 79 + " mov (_ASR,%3),%0 \n" \ 80 + " or %0,%0 \n" \ 81 + " bne 1b \n" \ 82 + : "=&r"(status), "=&r"(retval), "=m"(v->counter) \ 83 + : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i) \ 84 + : "memory", "cc"); \ 85 + return retval; \ 124 86 } 87 + 88 + #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) 89 + 90 + ATOMIC_OPS(add) 91 + ATOMIC_OPS(sub) 92 + 93 + #undef ATOMIC_OPS 94 + #undef ATOMIC_OP_RETURN 95 + #undef ATOMIC_OP 125 96 126 97 static inline int atomic_add_negative(int i, atomic_t *v) 127 98 { 128 99 return atomic_add_return(i, v) < 0; 129 - } 130 - 131 - static inline void atomic_add(int i, atomic_t *v) 132 - { 133 - atomic_add_return(i, v); 134 - } 135 - 136 - static inline void atomic_sub(int i, atomic_t *v) 137 - { 138 - atomic_sub_return(i, v); 139 100 } 140 101 141 102 static inline void atomic_inc(atomic_t *v)
+71 -46
arch/parisc/include/asm/atomic.h
··· 55 55 * are atomic, so a reader never sees inconsistent values. 56 56 */ 57 57 58 - /* It's possible to reduce all atomic operations to either 59 - * __atomic_add_return, atomic_set and atomic_read (the latter 60 - * is there only for consistency). 61 - */ 62 - 63 - static __inline__ int __atomic_add_return(int i, atomic_t *v) 64 - { 65 - int ret; 66 - unsigned long flags; 67 - _atomic_spin_lock_irqsave(v, flags); 68 - 69 - ret = (v->counter += i); 70 - 71 - _atomic_spin_unlock_irqrestore(v, flags); 72 - return ret; 73 - } 74 - 75 - static __inline__ void atomic_set(atomic_t *v, int i) 58 + static __inline__ void atomic_set(atomic_t *v, int i) 76 59 { 77 60 unsigned long flags; 78 61 _atomic_spin_lock_irqsave(v, flags); ··· 67 84 68 85 static __inline__ int atomic_read(const atomic_t *v) 69 86 { 70 - return (*(volatile int *)&(v)->counter); 87 + return ACCESS_ONCE((v)->counter); 71 88 } 72 89 73 90 /* exported interface */ ··· 98 115 return c; 99 116 } 100 117 118 + #define ATOMIC_OP(op, c_op) \ 119 + static __inline__ void atomic_##op(int i, atomic_t *v) \ 120 + { \ 121 + unsigned long flags; \ 122 + \ 123 + _atomic_spin_lock_irqsave(v, flags); \ 124 + v->counter c_op i; \ 125 + _atomic_spin_unlock_irqrestore(v, flags); \ 126 + } \ 101 127 102 - #define atomic_add(i,v) ((void)(__atomic_add_return( (i),(v)))) 103 - #define atomic_sub(i,v) ((void)(__atomic_add_return(-((int) (i)),(v)))) 104 - #define atomic_inc(v) ((void)(__atomic_add_return( 1,(v)))) 105 - #define atomic_dec(v) ((void)(__atomic_add_return( -1,(v)))) 128 + #define ATOMIC_OP_RETURN(op, c_op) \ 129 + static __inline__ int atomic_##op##_return(int i, atomic_t *v) \ 130 + { \ 131 + unsigned long flags; \ 132 + int ret; \ 133 + \ 134 + _atomic_spin_lock_irqsave(v, flags); \ 135 + ret = (v->counter c_op i); \ 136 + _atomic_spin_unlock_irqrestore(v, flags); \ 137 + \ 138 + return ret; \ 139 + } 106 140 107 - #define atomic_add_return(i,v) (__atomic_add_return( (i),(v))) 108 - #define atomic_sub_return(i,v) (__atomic_add_return(-(i),(v))) 109 - #define atomic_inc_return(v) (__atomic_add_return( 1,(v))) 110 - #define atomic_dec_return(v) (__atomic_add_return( -1,(v))) 141 + #define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op) 142 + 143 + ATOMIC_OPS(add, +=) 144 + ATOMIC_OPS(sub, -=) 145 + 146 + #undef ATOMIC_OPS 147 + #undef ATOMIC_OP_RETURN 148 + #undef ATOMIC_OP 149 + 150 + #define atomic_inc(v) (atomic_add( 1,(v))) 151 + #define atomic_dec(v) (atomic_add( -1,(v))) 152 + 153 + #define atomic_inc_return(v) (atomic_add_return( 1,(v))) 154 + #define atomic_dec_return(v) (atomic_add_return( -1,(v))) 111 155 112 156 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) 113 157 ··· 158 148 159 149 #define ATOMIC64_INIT(i) { (i) } 160 150 161 - static __inline__ s64 162 - __atomic64_add_return(s64 i, atomic64_t *v) 163 - { 164 - s64 ret; 165 - unsigned long flags; 166 - _atomic_spin_lock_irqsave(v, flags); 151 + #define ATOMIC64_OP(op, c_op) \ 152 + static __inline__ void atomic64_##op(s64 i, atomic64_t *v) \ 153 + { \ 154 + unsigned long flags; \ 155 + \ 156 + _atomic_spin_lock_irqsave(v, flags); \ 157 + v->counter c_op i; \ 158 + _atomic_spin_unlock_irqrestore(v, flags); \ 159 + } \ 167 160 168 - ret = (v->counter += i); 169 - 170 - _atomic_spin_unlock_irqrestore(v, flags); 171 - return ret; 161 + #define ATOMIC64_OP_RETURN(op, c_op) \ 162 + static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \ 163 + { \ 164 + unsigned long flags; \ 165 + s64 ret; \ 166 + \ 167 + _atomic_spin_lock_irqsave(v, flags); \ 168 + ret = (v->counter c_op i); \ 169 + _atomic_spin_unlock_irqrestore(v, flags); \ 170 + \ 171 + return ret; \ 172 172 } 173 + 174 + #define ATOMIC64_OPS(op, c_op) ATOMIC64_OP(op, c_op) ATOMIC64_OP_RETURN(op, c_op) 175 + 176 + ATOMIC64_OPS(add, +=) 177 + ATOMIC64_OPS(sub, -=) 178 + 179 + #undef ATOMIC64_OPS 180 + #undef ATOMIC64_OP_RETURN 181 + #undef ATOMIC64_OP 173 182 174 183 static __inline__ void 175 184 atomic64_set(atomic64_t *v, s64 i) ··· 204 175 static __inline__ s64 205 176 atomic64_read(const atomic64_t *v) 206 177 { 207 - return (*(volatile long *)&(v)->counter); 178 + return ACCESS_ONCE((v)->counter); 208 179 } 209 180 210 - #define atomic64_add(i,v) ((void)(__atomic64_add_return( ((s64)(i)),(v)))) 211 - #define atomic64_sub(i,v) ((void)(__atomic64_add_return(-((s64)(i)),(v)))) 212 - #define atomic64_inc(v) ((void)(__atomic64_add_return( 1,(v)))) 213 - #define atomic64_dec(v) ((void)(__atomic64_add_return( -1,(v)))) 181 + #define atomic64_inc(v) (atomic64_add( 1,(v))) 182 + #define atomic64_dec(v) (atomic64_add( -1,(v))) 214 183 215 - #define atomic64_add_return(i,v) (__atomic64_add_return( ((s64)(i)),(v))) 216 - #define atomic64_sub_return(i,v) (__atomic64_add_return(-((s64)(i)),(v))) 217 - #define atomic64_inc_return(v) (__atomic64_add_return( 1,(v))) 218 - #define atomic64_dec_return(v) (__atomic64_add_return( -1,(v))) 184 + #define atomic64_inc_return(v) (atomic64_add_return( 1,(v))) 185 + #define atomic64_dec_return(v) (atomic64_add_return( -1,(v))) 219 186 220 187 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) 221 188
+78 -122
arch/powerpc/include/asm/atomic.h
··· 26 26 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i)); 27 27 } 28 28 29 - static __inline__ void atomic_add(int a, atomic_t *v) 30 - { 31 - int t; 29 + #define ATOMIC_OP(op, asm_op) \ 30 + static __inline__ void atomic_##op(int a, atomic_t *v) \ 31 + { \ 32 + int t; \ 33 + \ 34 + __asm__ __volatile__( \ 35 + "1: lwarx %0,0,%3 # atomic_" #op "\n" \ 36 + #asm_op " %0,%2,%0\n" \ 37 + PPC405_ERR77(0,%3) \ 38 + " stwcx. %0,0,%3 \n" \ 39 + " bne- 1b\n" \ 40 + : "=&r" (t), "+m" (v->counter) \ 41 + : "r" (a), "r" (&v->counter) \ 42 + : "cc"); \ 43 + } \ 32 44 33 - __asm__ __volatile__( 34 - "1: lwarx %0,0,%3 # atomic_add\n\ 35 - add %0,%2,%0\n" 36 - PPC405_ERR77(0,%3) 37 - " stwcx. %0,0,%3 \n\ 38 - bne- 1b" 39 - : "=&r" (t), "+m" (v->counter) 40 - : "r" (a), "r" (&v->counter) 41 - : "cc"); 45 + #define ATOMIC_OP_RETURN(op, asm_op) \ 46 + static __inline__ int atomic_##op##_return(int a, atomic_t *v) \ 47 + { \ 48 + int t; \ 49 + \ 50 + __asm__ __volatile__( \ 51 + PPC_ATOMIC_ENTRY_BARRIER \ 52 + "1: lwarx %0,0,%2 # atomic_" #op "_return\n" \ 53 + #asm_op " %0,%1,%0\n" \ 54 + PPC405_ERR77(0,%2) \ 55 + " stwcx. %0,0,%2 \n" \ 56 + " bne- 1b\n" \ 57 + PPC_ATOMIC_EXIT_BARRIER \ 58 + : "=&r" (t) \ 59 + : "r" (a), "r" (&v->counter) \ 60 + : "cc", "memory"); \ 61 + \ 62 + return t; \ 42 63 } 43 64 44 - static __inline__ int atomic_add_return(int a, atomic_t *v) 45 - { 46 - int t; 65 + #define ATOMIC_OPS(op, asm_op) ATOMIC_OP(op, asm_op) ATOMIC_OP_RETURN(op, asm_op) 47 66 48 - __asm__ __volatile__( 49 - PPC_ATOMIC_ENTRY_BARRIER 50 - "1: lwarx %0,0,%2 # atomic_add_return\n\ 51 - add %0,%1,%0\n" 52 - PPC405_ERR77(0,%2) 53 - " stwcx. %0,0,%2 \n\ 54 - bne- 1b" 55 - PPC_ATOMIC_EXIT_BARRIER 56 - : "=&r" (t) 57 - : "r" (a), "r" (&v->counter) 58 - : "cc", "memory"); 67 + ATOMIC_OPS(add, add) 68 + ATOMIC_OPS(sub, subf) 59 69 60 - return t; 61 - } 70 + #undef ATOMIC_OPS 71 + #undef ATOMIC_OP_RETURN 72 + #undef ATOMIC_OP 62 73 63 74 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) 64 - 65 - static __inline__ void atomic_sub(int a, atomic_t *v) 66 - { 67 - int t; 68 - 69 - __asm__ __volatile__( 70 - "1: lwarx %0,0,%3 # atomic_sub\n\ 71 - subf %0,%2,%0\n" 72 - PPC405_ERR77(0,%3) 73 - " stwcx. %0,0,%3 \n\ 74 - bne- 1b" 75 - : "=&r" (t), "+m" (v->counter) 76 - : "r" (a), "r" (&v->counter) 77 - : "cc"); 78 - } 79 - 80 - static __inline__ int atomic_sub_return(int a, atomic_t *v) 81 - { 82 - int t; 83 - 84 - __asm__ __volatile__( 85 - PPC_ATOMIC_ENTRY_BARRIER 86 - "1: lwarx %0,0,%2 # atomic_sub_return\n\ 87 - subf %0,%1,%0\n" 88 - PPC405_ERR77(0,%2) 89 - " stwcx. %0,0,%2 \n\ 90 - bne- 1b" 91 - PPC_ATOMIC_EXIT_BARRIER 92 - : "=&r" (t) 93 - : "r" (a), "r" (&v->counter) 94 - : "cc", "memory"); 95 - 96 - return t; 97 - } 98 75 99 76 static __inline__ void atomic_inc(atomic_t *v) 100 77 { ··· 266 289 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i)); 267 290 } 268 291 269 - static __inline__ void atomic64_add(long a, atomic64_t *v) 270 - { 271 - long t; 272 - 273 - __asm__ __volatile__( 274 - "1: ldarx %0,0,%3 # atomic64_add\n\ 275 - add %0,%2,%0\n\ 276 - stdcx. %0,0,%3 \n\ 277 - bne- 1b" 278 - : "=&r" (t), "+m" (v->counter) 279 - : "r" (a), "r" (&v->counter) 280 - : "cc"); 292 + #define ATOMIC64_OP(op, asm_op) \ 293 + static __inline__ void atomic64_##op(long a, atomic64_t *v) \ 294 + { \ 295 + long t; \ 296 + \ 297 + __asm__ __volatile__( \ 298 + "1: ldarx %0,0,%3 # atomic64_" #op "\n" \ 299 + #asm_op " %0,%2,%0\n" \ 300 + " stdcx. %0,0,%3 \n" \ 301 + " bne- 1b\n" \ 302 + : "=&r" (t), "+m" (v->counter) \ 303 + : "r" (a), "r" (&v->counter) \ 304 + : "cc"); \ 281 305 } 282 306 283 - static __inline__ long atomic64_add_return(long a, atomic64_t *v) 284 - { 285 - long t; 286 - 287 - __asm__ __volatile__( 288 - PPC_ATOMIC_ENTRY_BARRIER 289 - "1: ldarx %0,0,%2 # atomic64_add_return\n\ 290 - add %0,%1,%0\n\ 291 - stdcx. %0,0,%2 \n\ 292 - bne- 1b" 293 - PPC_ATOMIC_EXIT_BARRIER 294 - : "=&r" (t) 295 - : "r" (a), "r" (&v->counter) 296 - : "cc", "memory"); 297 - 298 - return t; 307 + #define ATOMIC64_OP_RETURN(op, asm_op) \ 308 + static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \ 309 + { \ 310 + long t; \ 311 + \ 312 + __asm__ __volatile__( \ 313 + PPC_ATOMIC_ENTRY_BARRIER \ 314 + "1: ldarx %0,0,%2 # atomic64_" #op "_return\n" \ 315 + #asm_op " %0,%1,%0\n" \ 316 + " stdcx. %0,0,%2 \n" \ 317 + " bne- 1b\n" \ 318 + PPC_ATOMIC_EXIT_BARRIER \ 319 + : "=&r" (t) \ 320 + : "r" (a), "r" (&v->counter) \ 321 + : "cc", "memory"); \ 322 + \ 323 + return t; \ 299 324 } 325 + 326 + #define ATOMIC64_OPS(op, asm_op) ATOMIC64_OP(op, asm_op) ATOMIC64_OP_RETURN(op, asm_op) 327 + 328 + ATOMIC64_OPS(add, add) 329 + ATOMIC64_OPS(sub, subf) 330 + 331 + #undef ATOMIC64_OPS 332 + #undef ATOMIC64_OP_RETURN 333 + #undef ATOMIC64_OP 300 334 301 335 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) 302 - 303 - static __inline__ void atomic64_sub(long a, atomic64_t *v) 304 - { 305 - long t; 306 - 307 - __asm__ __volatile__( 308 - "1: ldarx %0,0,%3 # atomic64_sub\n\ 309 - subf %0,%2,%0\n\ 310 - stdcx. %0,0,%3 \n\ 311 - bne- 1b" 312 - : "=&r" (t), "+m" (v->counter) 313 - : "r" (a), "r" (&v->counter) 314 - : "cc"); 315 - } 316 - 317 - static __inline__ long atomic64_sub_return(long a, atomic64_t *v) 318 - { 319 - long t; 320 - 321 - __asm__ __volatile__( 322 - PPC_ATOMIC_ENTRY_BARRIER 323 - "1: ldarx %0,0,%2 # atomic64_sub_return\n\ 324 - subf %0,%1,%0\n\ 325 - stdcx. %0,0,%2 \n\ 326 - bne- 1b" 327 - PPC_ATOMIC_EXIT_BARRIER 328 - : "=&r" (t) 329 - : "r" (a), "r" (&v->counter) 330 - : "cc", "memory"); 331 - 332 - return t; 333 - } 334 336 335 337 static __inline__ void atomic64_inc(atomic64_t *v) 336 338 {
+45 -74
arch/sh/include/asm/atomic-grb.h
··· 1 1 #ifndef __ASM_SH_ATOMIC_GRB_H 2 2 #define __ASM_SH_ATOMIC_GRB_H 3 3 4 - static inline void atomic_add(int i, atomic_t *v) 5 - { 6 - int tmp; 4 + #define ATOMIC_OP(op) \ 5 + static inline void atomic_##op(int i, atomic_t *v) \ 6 + { \ 7 + int tmp; \ 8 + \ 9 + __asm__ __volatile__ ( \ 10 + " .align 2 \n\t" \ 11 + " mova 1f, r0 \n\t" /* r0 = end point */ \ 12 + " mov r15, r1 \n\t" /* r1 = saved sp */ \ 13 + " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ \ 14 + " mov.l @%1, %0 \n\t" /* load old value */ \ 15 + " " #op " %2, %0 \n\t" /* $op */ \ 16 + " mov.l %0, @%1 \n\t" /* store new value */ \ 17 + "1: mov r1, r15 \n\t" /* LOGOUT */ \ 18 + : "=&r" (tmp), \ 19 + "+r" (v) \ 20 + : "r" (i) \ 21 + : "memory" , "r0", "r1"); \ 22 + } \ 7 23 8 - __asm__ __volatile__ ( 9 - " .align 2 \n\t" 10 - " mova 1f, r0 \n\t" /* r0 = end point */ 11 - " mov r15, r1 \n\t" /* r1 = saved sp */ 12 - " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ 13 - " mov.l @%1, %0 \n\t" /* load old value */ 14 - " add %2, %0 \n\t" /* add */ 15 - " mov.l %0, @%1 \n\t" /* store new value */ 16 - "1: mov r1, r15 \n\t" /* LOGOUT */ 17 - : "=&r" (tmp), 18 - "+r" (v) 19 - : "r" (i) 20 - : "memory" , "r0", "r1"); 24 + #define ATOMIC_OP_RETURN(op) \ 25 + static inline int atomic_##op##_return(int i, atomic_t *v) \ 26 + { \ 27 + int tmp; \ 28 + \ 29 + __asm__ __volatile__ ( \ 30 + " .align 2 \n\t" \ 31 + " mova 1f, r0 \n\t" /* r0 = end point */ \ 32 + " mov r15, r1 \n\t" /* r1 = saved sp */ \ 33 + " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ \ 34 + " mov.l @%1, %0 \n\t" /* load old value */ \ 35 + " " #op " %2, %0 \n\t" /* $op */ \ 36 + " mov.l %0, @%1 \n\t" /* store new value */ \ 37 + "1: mov r1, r15 \n\t" /* LOGOUT */ \ 38 + : "=&r" (tmp), \ 39 + "+r" (v) \ 40 + : "r" (i) \ 41 + : "memory" , "r0", "r1"); \ 42 + \ 43 + return tmp; \ 21 44 } 22 45 23 - static inline void atomic_sub(int i, atomic_t *v) 24 - { 25 - int tmp; 46 + #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) 26 47 27 - __asm__ __volatile__ ( 28 - " .align 2 \n\t" 29 - " mova 1f, r0 \n\t" /* r0 = end point */ 30 - " mov r15, r1 \n\t" /* r1 = saved sp */ 31 - " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ 32 - " mov.l @%1, %0 \n\t" /* load old value */ 33 - " sub %2, %0 \n\t" /* sub */ 34 - " mov.l %0, @%1 \n\t" /* store new value */ 35 - "1: mov r1, r15 \n\t" /* LOGOUT */ 36 - : "=&r" (tmp), 37 - "+r" (v) 38 - : "r" (i) 39 - : "memory" , "r0", "r1"); 40 - } 48 + ATOMIC_OPS(add) 49 + ATOMIC_OPS(sub) 41 50 42 - static inline int atomic_add_return(int i, atomic_t *v) 43 - { 44 - int tmp; 45 - 46 - __asm__ __volatile__ ( 47 - " .align 2 \n\t" 48 - " mova 1f, r0 \n\t" /* r0 = end point */ 49 - " mov r15, r1 \n\t" /* r1 = saved sp */ 50 - " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ 51 - " mov.l @%1, %0 \n\t" /* load old value */ 52 - " add %2, %0 \n\t" /* add */ 53 - " mov.l %0, @%1 \n\t" /* store new value */ 54 - "1: mov r1, r15 \n\t" /* LOGOUT */ 55 - : "=&r" (tmp), 56 - "+r" (v) 57 - : "r" (i) 58 - : "memory" , "r0", "r1"); 59 - 60 - return tmp; 61 - } 62 - 63 - static inline int atomic_sub_return(int i, atomic_t *v) 64 - { 65 - int tmp; 66 - 67 - __asm__ __volatile__ ( 68 - " .align 2 \n\t" 69 - " mova 1f, r0 \n\t" /* r0 = end point */ 70 - " mov r15, r1 \n\t" /* r1 = saved sp */ 71 - " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ 72 - " mov.l @%1, %0 \n\t" /* load old value */ 73 - " sub %2, %0 \n\t" /* sub */ 74 - " mov.l %0, @%1 \n\t" /* store new value */ 75 - "1: mov r1, r15 \n\t" /* LOGOUT */ 76 - : "=&r" (tmp), 77 - "+r" (v) 78 - : "r" (i) 79 - : "memory", "r0", "r1"); 80 - 81 - return tmp; 82 - } 51 + #undef ATOMIC_OPS 52 + #undef ATOMIC_OP_RETURN 53 + #undef ATOMIC_OP 83 54 84 55 static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) 85 56 {
+26 -36
arch/sh/include/asm/atomic-irq.h
··· 8 8 * forward to code at the end of this object's .text section, then 9 9 * branch back to restart the operation. 10 10 */ 11 - static inline void atomic_add(int i, atomic_t *v) 12 - { 13 - unsigned long flags; 14 11 15 - raw_local_irq_save(flags); 16 - v->counter += i; 17 - raw_local_irq_restore(flags); 12 + #define ATOMIC_OP(op, c_op) \ 13 + static inline void atomic_##op(int i, atomic_t *v) \ 14 + { \ 15 + unsigned long flags; \ 16 + \ 17 + raw_local_irq_save(flags); \ 18 + v->counter c_op i; \ 19 + raw_local_irq_restore(flags); \ 18 20 } 19 21 20 - static inline void atomic_sub(int i, atomic_t *v) 21 - { 22 - unsigned long flags; 23 - 24 - raw_local_irq_save(flags); 25 - v->counter -= i; 26 - raw_local_irq_restore(flags); 22 + #define ATOMIC_OP_RETURN(op, c_op) \ 23 + static inline int atomic_##op##_return(int i, atomic_t *v) \ 24 + { \ 25 + unsigned long temp, flags; \ 26 + \ 27 + raw_local_irq_save(flags); \ 28 + temp = v->counter; \ 29 + temp c_op i; \ 30 + v->counter = temp; \ 31 + raw_local_irq_restore(flags); \ 32 + \ 33 + return temp; \ 27 34 } 28 35 29 - static inline int atomic_add_return(int i, atomic_t *v) 30 - { 31 - unsigned long temp, flags; 36 + #define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op) 32 37 33 - raw_local_irq_save(flags); 34 - temp = v->counter; 35 - temp += i; 36 - v->counter = temp; 37 - raw_local_irq_restore(flags); 38 + ATOMIC_OPS(add, +=) 39 + ATOMIC_OPS(sub, -=) 38 40 39 - return temp; 40 - } 41 - 42 - static inline int atomic_sub_return(int i, atomic_t *v) 43 - { 44 - unsigned long temp, flags; 45 - 46 - raw_local_irq_save(flags); 47 - temp = v->counter; 48 - temp -= i; 49 - v->counter = temp; 50 - raw_local_irq_restore(flags); 51 - 52 - return temp; 53 - } 41 + #undef ATOMIC_OPS 42 + #undef ATOMIC_OP_RETURN 43 + #undef ATOMIC_OP 54 44 55 45 static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) 56 46 {
+43 -62
arch/sh/include/asm/atomic-llsc.h
··· 2 2 #define __ASM_SH_ATOMIC_LLSC_H 3 3 4 4 /* 5 - * To get proper branch prediction for the main line, we must branch 6 - * forward to code at the end of this object's .text section, then 7 - * branch back to restart the operation. 8 - */ 9 - static inline void atomic_add(int i, atomic_t *v) 10 - { 11 - unsigned long tmp; 12 - 13 - __asm__ __volatile__ ( 14 - "1: movli.l @%2, %0 ! atomic_add \n" 15 - " add %1, %0 \n" 16 - " movco.l %0, @%2 \n" 17 - " bf 1b \n" 18 - : "=&z" (tmp) 19 - : "r" (i), "r" (&v->counter) 20 - : "t"); 21 - } 22 - 23 - static inline void atomic_sub(int i, atomic_t *v) 24 - { 25 - unsigned long tmp; 26 - 27 - __asm__ __volatile__ ( 28 - "1: movli.l @%2, %0 ! atomic_sub \n" 29 - " sub %1, %0 \n" 30 - " movco.l %0, @%2 \n" 31 - " bf 1b \n" 32 - : "=&z" (tmp) 33 - : "r" (i), "r" (&v->counter) 34 - : "t"); 35 - } 36 - 37 - /* 38 5 * SH-4A note: 39 6 * 40 7 * We basically get atomic_xxx_return() for free compared with ··· 9 42 * encoding, so the retval is automatically set without having to 10 43 * do any special work. 11 44 */ 12 - static inline int atomic_add_return(int i, atomic_t *v) 13 - { 14 - unsigned long temp; 45 + /* 46 + * To get proper branch prediction for the main line, we must branch 47 + * forward to code at the end of this object's .text section, then 48 + * branch back to restart the operation. 49 + */ 15 50 16 - __asm__ __volatile__ ( 17 - "1: movli.l @%2, %0 ! atomic_add_return \n" 18 - " add %1, %0 \n" 19 - " movco.l %0, @%2 \n" 20 - " bf 1b \n" 21 - " synco \n" 22 - : "=&z" (temp) 23 - : "r" (i), "r" (&v->counter) 24 - : "t"); 25 - 26 - return temp; 51 + #define ATOMIC_OP(op) \ 52 + static inline void atomic_##op(int i, atomic_t *v) \ 53 + { \ 54 + unsigned long tmp; \ 55 + \ 56 + __asm__ __volatile__ ( \ 57 + "1: movli.l @%2, %0 ! atomic_" #op "\n" \ 58 + " " #op " %1, %0 \n" \ 59 + " movco.l %0, @%2 \n" \ 60 + " bf 1b \n" \ 61 + : "=&z" (tmp) \ 62 + : "r" (i), "r" (&v->counter) \ 63 + : "t"); \ 27 64 } 28 65 29 - static inline int atomic_sub_return(int i, atomic_t *v) 30 - { 31 - unsigned long temp; 32 - 33 - __asm__ __volatile__ ( 34 - "1: movli.l @%2, %0 ! atomic_sub_return \n" 35 - " sub %1, %0 \n" 36 - " movco.l %0, @%2 \n" 37 - " bf 1b \n" 38 - " synco \n" 39 - : "=&z" (temp) 40 - : "r" (i), "r" (&v->counter) 41 - : "t"); 42 - 43 - return temp; 66 + #define ATOMIC_OP_RETURN(op) \ 67 + static inline int atomic_##op##_return(int i, atomic_t *v) \ 68 + { \ 69 + unsigned long temp; \ 70 + \ 71 + __asm__ __volatile__ ( \ 72 + "1: movli.l @%2, %0 ! atomic_" #op "_return \n" \ 73 + " " #op " %1, %0 \n" \ 74 + " movco.l %0, @%2 \n" \ 75 + " bf 1b \n" \ 76 + " synco \n" \ 77 + : "=&z" (temp) \ 78 + : "r" (i), "r" (&v->counter) \ 79 + : "t"); \ 80 + \ 81 + return temp; \ 44 82 } 83 + 84 + #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) 85 + 86 + ATOMIC_OPS(add) 87 + ATOMIC_OPS(sub) 88 + 89 + #undef ATOMIC_OPS 90 + #undef ATOMIC_OP_RETURN 91 + #undef ATOMIC_OP 45 92 46 93 static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) 47 94 {
+1 -1
arch/sh/include/asm/atomic.h
··· 14 14 15 15 #define ATOMIC_INIT(i) { (i) } 16 16 17 - #define atomic_read(v) (*(volatile int *)&(v)->counter) 17 + #define atomic_read(v) ACCESS_ONCE((v)->counter) 18 18 #define atomic_set(v,i) ((v)->counter = (i)) 19 19 20 20 #if defined(CONFIG_GUSA_RB)
+9 -10
arch/sparc/include/asm/atomic_32.h
··· 20 20 21 21 #define ATOMIC_INIT(i) { (i) } 22 22 23 - int __atomic_add_return(int, atomic_t *); 23 + int atomic_add_return(int, atomic_t *); 24 24 int atomic_cmpxchg(atomic_t *, int, int); 25 25 #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 26 26 int __atomic_add_unless(atomic_t *, int, int); 27 27 void atomic_set(atomic_t *, int); 28 28 29 - #define atomic_read(v) (*(volatile int *)&(v)->counter) 29 + #define atomic_read(v) ACCESS_ONCE((v)->counter) 30 30 31 - #define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v))) 32 - #define atomic_sub(i, v) ((void)__atomic_add_return(-(int)(i), (v))) 33 - #define atomic_inc(v) ((void)__atomic_add_return( 1, (v))) 34 - #define atomic_dec(v) ((void)__atomic_add_return( -1, (v))) 31 + #define atomic_add(i, v) ((void)atomic_add_return( (int)(i), (v))) 32 + #define atomic_sub(i, v) ((void)atomic_add_return(-(int)(i), (v))) 33 + #define atomic_inc(v) ((void)atomic_add_return( 1, (v))) 34 + #define atomic_dec(v) ((void)atomic_add_return( -1, (v))) 35 35 36 - #define atomic_add_return(i, v) (__atomic_add_return( (int)(i), (v))) 37 - #define atomic_sub_return(i, v) (__atomic_add_return(-(int)(i), (v))) 38 - #define atomic_inc_return(v) (__atomic_add_return( 1, (v))) 39 - #define atomic_dec_return(v) (__atomic_add_return( -1, (v))) 36 + #define atomic_sub_return(i, v) (atomic_add_return(-(int)(i), (v))) 37 + #define atomic_inc_return(v) (atomic_add_return( 1, (v))) 38 + #define atomic_dec_return(v) (atomic_add_return( -1, (v))) 40 39 41 40 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) 42 41
+25 -24
arch/sparc/include/asm/atomic_64.h
··· 14 14 #define ATOMIC_INIT(i) { (i) } 15 15 #define ATOMIC64_INIT(i) { (i) } 16 16 17 - #define atomic_read(v) (*(volatile int *)&(v)->counter) 18 - #define atomic64_read(v) (*(volatile long *)&(v)->counter) 17 + #define atomic_read(v) ACCESS_ONCE((v)->counter) 18 + #define atomic64_read(v) ACCESS_ONCE((v)->counter) 19 19 20 20 #define atomic_set(v, i) (((v)->counter) = i) 21 21 #define atomic64_set(v, i) (((v)->counter) = i) 22 22 23 - void atomic_add(int, atomic_t *); 24 - void atomic64_add(long, atomic64_t *); 25 - void atomic_sub(int, atomic_t *); 26 - void atomic64_sub(long, atomic64_t *); 23 + #define ATOMIC_OP(op) \ 24 + void atomic_##op(int, atomic_t *); \ 25 + void atomic64_##op(long, atomic64_t *); 27 26 28 - int atomic_add_ret(int, atomic_t *); 29 - long atomic64_add_ret(long, atomic64_t *); 30 - int atomic_sub_ret(int, atomic_t *); 31 - long atomic64_sub_ret(long, atomic64_t *); 27 + #define ATOMIC_OP_RETURN(op) \ 28 + int atomic_##op##_return(int, atomic_t *); \ 29 + long atomic64_##op##_return(long, atomic64_t *); 32 30 33 - #define atomic_dec_return(v) atomic_sub_ret(1, v) 34 - #define atomic64_dec_return(v) atomic64_sub_ret(1, v) 31 + #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) 35 32 36 - #define atomic_inc_return(v) atomic_add_ret(1, v) 37 - #define atomic64_inc_return(v) atomic64_add_ret(1, v) 33 + ATOMIC_OPS(add) 34 + ATOMIC_OPS(sub) 38 35 39 - #define atomic_sub_return(i, v) atomic_sub_ret(i, v) 40 - #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v) 36 + #undef ATOMIC_OPS 37 + #undef ATOMIC_OP_RETURN 38 + #undef ATOMIC_OP 41 39 42 - #define atomic_add_return(i, v) atomic_add_ret(i, v) 43 - #define atomic64_add_return(i, v) atomic64_add_ret(i, v) 40 + #define atomic_dec_return(v) atomic_sub_return(1, v) 41 + #define atomic64_dec_return(v) atomic64_sub_return(1, v) 42 + 43 + #define atomic_inc_return(v) atomic_add_return(1, v) 44 + #define atomic64_inc_return(v) atomic64_add_return(1, v) 44 45 45 46 /* 46 47 * atomic_inc_and_test - increment and test ··· 54 53 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) 55 54 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) 56 55 57 - #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0) 58 - #define atomic64_sub_and_test(i, v) (atomic64_sub_ret(i, v) == 0) 56 + #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) 57 + #define atomic64_sub_and_test(i, v) (atomic64_sub_return(i, v) == 0) 59 58 60 - #define atomic_dec_and_test(v) (atomic_sub_ret(1, v) == 0) 61 - #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0) 59 + #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) 60 + #define atomic64_dec_and_test(v) (atomic64_sub_return(1, v) == 0) 62 61 63 62 #define atomic_inc(v) atomic_add(1, v) 64 63 #define atomic64_inc(v) atomic64_add(1, v) ··· 66 65 #define atomic_dec(v) atomic_sub(1, v) 67 66 #define atomic64_dec(v) atomic64_sub(1, v) 68 67 69 - #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0) 70 - #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0) 68 + #define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0) 69 + #define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0) 71 70 72 71 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) 73 72 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+1 -1
arch/sparc/kernel/smp_64.c
··· 1138 1138 1139 1139 void smp_capture(void) 1140 1140 { 1141 - int result = atomic_add_ret(1, &smp_capture_depth); 1141 + int result = atomic_add_return(1, &smp_capture_depth); 1142 1142 1143 1143 if (result == 1) { 1144 1144 int ncpus = num_online_cpus();
+15 -10
arch/sparc/lib/atomic32.c
··· 27 27 28 28 #endif /* SMP */ 29 29 30 - int __atomic_add_return(int i, atomic_t *v) 31 - { 32 - int ret; 33 - unsigned long flags; 34 - spin_lock_irqsave(ATOMIC_HASH(v), flags); 30 + #define ATOMIC_OP(op, cop) \ 31 + int atomic_##op##_return(int i, atomic_t *v) \ 32 + { \ 33 + int ret; \ 34 + unsigned long flags; \ 35 + spin_lock_irqsave(ATOMIC_HASH(v), flags); \ 36 + \ 37 + ret = (v->counter cop i); \ 38 + \ 39 + spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \ 40 + return ret; \ 41 + } \ 42 + EXPORT_SYMBOL(atomic_##op##_return); 35 43 36 - ret = (v->counter += i); 44 + ATOMIC_OP(add, +=) 37 45 38 - spin_unlock_irqrestore(ATOMIC_HASH(v), flags); 39 - return ret; 40 - } 41 - EXPORT_SYMBOL(__atomic_add_return); 46 + #undef ATOMIC_OP 42 47 43 48 int atomic_cmpxchg(atomic_t *v, int old, int new) 44 49 {
+67 -96
arch/sparc/lib/atomic_64.S
··· 14 14 * memory barriers, and a second which returns 15 15 * a value and does the barriers. 16 16 */ 17 - ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */ 18 - BACKOFF_SETUP(%o2) 19 - 1: lduw [%o1], %g1 20 - add %g1, %o0, %g7 21 - cas [%o1], %g1, %g7 22 - cmp %g1, %g7 23 - bne,pn %icc, BACKOFF_LABEL(2f, 1b) 24 - nop 25 - retl 26 - nop 27 - 2: BACKOFF_SPIN(%o2, %o3, 1b) 28 - ENDPROC(atomic_add) 29 17 30 - ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */ 31 - BACKOFF_SETUP(%o2) 32 - 1: lduw [%o1], %g1 33 - sub %g1, %o0, %g7 34 - cas [%o1], %g1, %g7 35 - cmp %g1, %g7 36 - bne,pn %icc, BACKOFF_LABEL(2f, 1b) 37 - nop 38 - retl 39 - nop 40 - 2: BACKOFF_SPIN(%o2, %o3, 1b) 41 - ENDPROC(atomic_sub) 18 + #define ATOMIC_OP(op) \ 19 + ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ 20 + BACKOFF_SETUP(%o2); \ 21 + 1: lduw [%o1], %g1; \ 22 + op %g1, %o0, %g7; \ 23 + cas [%o1], %g1, %g7; \ 24 + cmp %g1, %g7; \ 25 + bne,pn %icc, BACKOFF_LABEL(2f, 1b); \ 26 + nop; \ 27 + retl; \ 28 + nop; \ 29 + 2: BACKOFF_SPIN(%o2, %o3, 1b); \ 30 + ENDPROC(atomic_##op); \ 42 31 43 - ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */ 44 - BACKOFF_SETUP(%o2) 45 - 1: lduw [%o1], %g1 46 - add %g1, %o0, %g7 47 - cas [%o1], %g1, %g7 48 - cmp %g1, %g7 49 - bne,pn %icc, BACKOFF_LABEL(2f, 1b) 50 - add %g1, %o0, %g1 51 - retl 52 - sra %g1, 0, %o0 53 - 2: BACKOFF_SPIN(%o2, %o3, 1b) 54 - ENDPROC(atomic_add_ret) 32 + #define ATOMIC_OP_RETURN(op) \ 33 + ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \ 34 + BACKOFF_SETUP(%o2); \ 35 + 1: lduw [%o1], %g1; \ 36 + op %g1, %o0, %g7; \ 37 + cas [%o1], %g1, %g7; \ 38 + cmp %g1, %g7; \ 39 + bne,pn %icc, BACKOFF_LABEL(2f, 1b); \ 40 + op %g1, %o0, %g1; \ 41 + retl; \ 42 + sra %g1, 0, %o0; \ 43 + 2: BACKOFF_SPIN(%o2, %o3, 1b); \ 44 + ENDPROC(atomic_##op##_return); 55 45 56 - ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */ 57 - BACKOFF_SETUP(%o2) 58 - 1: lduw [%o1], %g1 59 - sub %g1, %o0, %g7 60 - cas [%o1], %g1, %g7 61 - cmp %g1, %g7 62 - bne,pn %icc, BACKOFF_LABEL(2f, 1b) 63 - sub %g1, %o0, %g1 64 - retl 65 - sra %g1, 0, %o0 66 - 2: BACKOFF_SPIN(%o2, %o3, 1b) 67 - ENDPROC(atomic_sub_ret) 46 + #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) 68 47 69 - ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */ 70 - BACKOFF_SETUP(%o2) 71 - 1: ldx [%o1], %g1 72 - add %g1, %o0, %g7 73 - casx [%o1], %g1, %g7 74 - cmp %g1, %g7 75 - bne,pn %xcc, BACKOFF_LABEL(2f, 1b) 76 - nop 77 - retl 78 - nop 79 - 2: BACKOFF_SPIN(%o2, %o3, 1b) 80 - ENDPROC(atomic64_add) 48 + ATOMIC_OPS(add) 49 + ATOMIC_OPS(sub) 81 50 82 - ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */ 83 - BACKOFF_SETUP(%o2) 84 - 1: ldx [%o1], %g1 85 - sub %g1, %o0, %g7 86 - casx [%o1], %g1, %g7 87 - cmp %g1, %g7 88 - bne,pn %xcc, BACKOFF_LABEL(2f, 1b) 89 - nop 90 - retl 91 - nop 92 - 2: BACKOFF_SPIN(%o2, %o3, 1b) 93 - ENDPROC(atomic64_sub) 51 + #undef ATOMIC_OPS 52 + #undef ATOMIC_OP_RETURN 53 + #undef ATOMIC_OP 94 54 95 - ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */ 96 - BACKOFF_SETUP(%o2) 97 - 1: ldx [%o1], %g1 98 - add %g1, %o0, %g7 99 - casx [%o1], %g1, %g7 100 - cmp %g1, %g7 101 - bne,pn %xcc, BACKOFF_LABEL(2f, 1b) 102 - nop 103 - retl 104 - add %g1, %o0, %o0 105 - 2: BACKOFF_SPIN(%o2, %o3, 1b) 106 - ENDPROC(atomic64_add_ret) 55 + #define ATOMIC64_OP(op) \ 56 + ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ 57 + BACKOFF_SETUP(%o2); \ 58 + 1: ldx [%o1], %g1; \ 59 + op %g1, %o0, %g7; \ 60 + casx [%o1], %g1, %g7; \ 61 + cmp %g1, %g7; \ 62 + bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \ 63 + nop; \ 64 + retl; \ 65 + nop; \ 66 + 2: BACKOFF_SPIN(%o2, %o3, 1b); \ 67 + ENDPROC(atomic64_##op); \ 107 68 108 - ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */ 109 - BACKOFF_SETUP(%o2) 110 - 1: ldx [%o1], %g1 111 - sub %g1, %o0, %g7 112 - casx [%o1], %g1, %g7 113 - cmp %g1, %g7 114 - bne,pn %xcc, BACKOFF_LABEL(2f, 1b) 115 - nop 116 - retl 117 - sub %g1, %o0, %o0 118 - 2: BACKOFF_SPIN(%o2, %o3, 1b) 119 - ENDPROC(atomic64_sub_ret) 69 + #define ATOMIC64_OP_RETURN(op) \ 70 + ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \ 71 + BACKOFF_SETUP(%o2); \ 72 + 1: ldx [%o1], %g1; \ 73 + op %g1, %o0, %g7; \ 74 + casx [%o1], %g1, %g7; \ 75 + cmp %g1, %g7; \ 76 + bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \ 77 + nop; \ 78 + retl; \ 79 + op %g1, %o0, %o0; \ 80 + 2: BACKOFF_SPIN(%o2, %o3, 1b); \ 81 + ENDPROC(atomic64_##op##_return); 82 + 83 + #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) 84 + 85 + ATOMIC64_OPS(add) 86 + ATOMIC64_OPS(sub) 87 + 88 + #undef ATOMIC64_OPS 89 + #undef ATOMIC64_OP_RETURN 90 + #undef ATOMIC64_OP 120 91 121 92 ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */ 122 93 BACKOFF_SETUP(%o2)
+17 -8
arch/sparc/lib/ksyms.c
··· 99 99 EXPORT_SYMBOL(__clear_user); 100 100 101 101 /* Atomic counter implementation. */ 102 - EXPORT_SYMBOL(atomic_add); 103 - EXPORT_SYMBOL(atomic_add_ret); 104 - EXPORT_SYMBOL(atomic_sub); 105 - EXPORT_SYMBOL(atomic_sub_ret); 106 - EXPORT_SYMBOL(atomic64_add); 107 - EXPORT_SYMBOL(atomic64_add_ret); 108 - EXPORT_SYMBOL(atomic64_sub); 109 - EXPORT_SYMBOL(atomic64_sub_ret); 102 + #define ATOMIC_OP(op) \ 103 + EXPORT_SYMBOL(atomic_##op); \ 104 + EXPORT_SYMBOL(atomic64_##op); 105 + 106 + #define ATOMIC_OP_RETURN(op) \ 107 + EXPORT_SYMBOL(atomic_##op##_return); \ 108 + EXPORT_SYMBOL(atomic64_##op##_return); 109 + 110 + #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) 111 + 112 + ATOMIC_OPS(add) 113 + ATOMIC_OPS(sub) 114 + 115 + #undef ATOMIC_OPS 116 + #undef ATOMIC_OP_RETURN 117 + #undef ATOMIC_OP 118 + 110 119 EXPORT_SYMBOL(atomic64_dec_if_positive); 111 120 112 121 /* Atomic bit operations. */
+1 -16
arch/x86/include/asm/atomic.h
··· 24 24 */ 25 25 static inline int atomic_read(const atomic_t *v) 26 26 { 27 - return (*(volatile int *)&(v)->counter); 27 + return ACCESS_ONCE((v)->counter); 28 28 } 29 29 30 30 /** ··· 218 218 asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v)); 219 219 return *v; 220 220 } 221 - 222 - #ifdef CONFIG_X86_64 223 - /** 224 - * atomic_or_long - OR of two long integers 225 - * @v1: pointer to type unsigned long 226 - * @v2: pointer to type unsigned long 227 - * 228 - * Atomically ORs @v1 and @v2 229 - * Returns the result of the OR 230 - */ 231 - static inline void atomic_or_long(unsigned long *v1, unsigned long v2) 232 - { 233 - asm(LOCK_PREFIX "orq %1, %0" : "+m" (*v1) : "r" (v2)); 234 - } 235 - #endif 236 221 237 222 /* These are x86-specific, used by some header files */ 238 223 #define atomic_clear_mask(mask, addr) \
+1 -1
arch/x86/include/asm/atomic64_64.h
··· 18 18 */ 19 19 static inline long atomic64_read(const atomic64_t *v) 20 20 { 21 - return (*(volatile long *)&(v)->counter); 21 + return ACCESS_ONCE((v)->counter); 22 22 } 23 23 24 24 /**
+80 -149
arch/xtensa/include/asm/atomic.h
··· 47 47 * 48 48 * Atomically reads the value of @v. 49 49 */ 50 - #define atomic_read(v) (*(volatile int *)&(v)->counter) 50 + #define atomic_read(v) ACCESS_ONCE((v)->counter) 51 51 52 52 /** 53 53 * atomic_set - set atomic variable ··· 58 58 */ 59 59 #define atomic_set(v,i) ((v)->counter = (i)) 60 60 61 - /** 62 - * atomic_add - add integer to atomic variable 63 - * @i: integer value to add 64 - * @v: pointer of type atomic_t 65 - * 66 - * Atomically adds @i to @v. 67 - */ 68 - static inline void atomic_add(int i, atomic_t * v) 69 - { 70 61 #if XCHAL_HAVE_S32C1I 71 - unsigned long tmp; 72 - int result; 62 + #define ATOMIC_OP(op) \ 63 + static inline void atomic_##op(int i, atomic_t * v) \ 64 + { \ 65 + unsigned long tmp; \ 66 + int result; \ 67 + \ 68 + __asm__ __volatile__( \ 69 + "1: l32i %1, %3, 0\n" \ 70 + " wsr %1, scompare1\n" \ 71 + " " #op " %0, %1, %2\n" \ 72 + " s32c1i %0, %3, 0\n" \ 73 + " bne %0, %1, 1b\n" \ 74 + : "=&a" (result), "=&a" (tmp) \ 75 + : "a" (i), "a" (v) \ 76 + : "memory" \ 77 + ); \ 78 + } \ 73 79 74 - __asm__ __volatile__( 75 - "1: l32i %1, %3, 0\n" 76 - " wsr %1, scompare1\n" 77 - " add %0, %1, %2\n" 78 - " s32c1i %0, %3, 0\n" 79 - " bne %0, %1, 1b\n" 80 - : "=&a" (result), "=&a" (tmp) 81 - : "a" (i), "a" (v) 82 - : "memory" 83 - ); 84 - #else 85 - unsigned int vval; 86 - 87 - __asm__ __volatile__( 88 - " rsil a15, "__stringify(LOCKLEVEL)"\n" 89 - " l32i %0, %2, 0\n" 90 - " add %0, %0, %1\n" 91 - " s32i %0, %2, 0\n" 92 - " wsr a15, ps\n" 93 - " rsync\n" 94 - : "=&a" (vval) 95 - : "a" (i), "a" (v) 96 - : "a15", "memory" 97 - ); 98 - #endif 80 + #define ATOMIC_OP_RETURN(op) \ 81 + static inline int atomic_##op##_return(int i, atomic_t * v) \ 82 + { \ 83 + unsigned long tmp; \ 84 + int result; \ 85 + \ 86 + __asm__ __volatile__( \ 87 + "1: l32i %1, %3, 0\n" \ 88 + " wsr %1, scompare1\n" \ 89 + " " #op " %0, %1, %2\n" \ 90 + " s32c1i %0, %3, 0\n" \ 91 + " bne %0, %1, 1b\n" \ 92 + " " #op " %0, %0, %2\n" \ 93 + : "=&a" (result), "=&a" (tmp) \ 94 + : "a" (i), "a" (v) \ 95 + : "memory" \ 96 + ); \ 97 + \ 98 + return result; \ 99 99 } 100 100 101 - /** 102 - * atomic_sub - subtract the atomic variable 103 - * @i: integer value to subtract 104 - * @v: pointer of type atomic_t 105 - * 106 - * Atomically subtracts @i from @v. 107 - */ 108 - static inline void atomic_sub(int i, atomic_t *v) 109 - { 110 - #if XCHAL_HAVE_S32C1I 111 - unsigned long tmp; 112 - int result; 101 + #else /* XCHAL_HAVE_S32C1I */ 113 102 114 - __asm__ __volatile__( 115 - "1: l32i %1, %3, 0\n" 116 - " wsr %1, scompare1\n" 117 - " sub %0, %1, %2\n" 118 - " s32c1i %0, %3, 0\n" 119 - " bne %0, %1, 1b\n" 120 - : "=&a" (result), "=&a" (tmp) 121 - : "a" (i), "a" (v) 122 - : "memory" 123 - ); 124 - #else 125 - unsigned int vval; 103 + #define ATOMIC_OP(op) \ 104 + static inline void atomic_##op(int i, atomic_t * v) \ 105 + { \ 106 + unsigned int vval; \ 107 + \ 108 + __asm__ __volatile__( \ 109 + " rsil a15, "__stringify(LOCKLEVEL)"\n"\ 110 + " l32i %0, %2, 0\n" \ 111 + " " #op " %0, %0, %1\n" \ 112 + " s32i %0, %2, 0\n" \ 113 + " wsr a15, ps\n" \ 114 + " rsync\n" \ 115 + : "=&a" (vval) \ 116 + : "a" (i), "a" (v) \ 117 + : "a15", "memory" \ 118 + ); \ 119 + } \ 126 120 127 - __asm__ __volatile__( 128 - " rsil a15, "__stringify(LOCKLEVEL)"\n" 129 - " l32i %0, %2, 0\n" 130 - " sub %0, %0, %1\n" 131 - " s32i %0, %2, 0\n" 132 - " wsr a15, ps\n" 133 - " rsync\n" 134 - : "=&a" (vval) 135 - : "a" (i), "a" (v) 136 - : "a15", "memory" 137 - ); 138 - #endif 121 + #define ATOMIC_OP_RETURN(op) \ 122 + static inline int atomic_##op##_return(int i, atomic_t * v) \ 123 + { \ 124 + unsigned int vval; \ 125 + \ 126 + __asm__ __volatile__( \ 127 + " rsil a15,"__stringify(LOCKLEVEL)"\n" \ 128 + " l32i %0, %2, 0\n" \ 129 + " " #op " %0, %0, %1\n" \ 130 + " s32i %0, %2, 0\n" \ 131 + " wsr a15, ps\n" \ 132 + " rsync\n" \ 133 + : "=&a" (vval) \ 134 + : "a" (i), "a" (v) \ 135 + : "a15", "memory" \ 136 + ); \ 137 + \ 138 + return vval; \ 139 139 } 140 140 141 - /* 142 - * We use atomic_{add|sub}_return to define other functions. 143 - */ 141 + #endif /* XCHAL_HAVE_S32C1I */ 144 142 145 - static inline int atomic_add_return(int i, atomic_t * v) 146 - { 147 - #if XCHAL_HAVE_S32C1I 148 - unsigned long tmp; 149 - int result; 143 + #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) 150 144 151 - __asm__ __volatile__( 152 - "1: l32i %1, %3, 0\n" 153 - " wsr %1, scompare1\n" 154 - " add %0, %1, %2\n" 155 - " s32c1i %0, %3, 0\n" 156 - " bne %0, %1, 1b\n" 157 - " add %0, %0, %2\n" 158 - : "=&a" (result), "=&a" (tmp) 159 - : "a" (i), "a" (v) 160 - : "memory" 161 - ); 145 + ATOMIC_OPS(add) 146 + ATOMIC_OPS(sub) 162 147 163 - return result; 164 - #else 165 - unsigned int vval; 166 - 167 - __asm__ __volatile__( 168 - " rsil a15,"__stringify(LOCKLEVEL)"\n" 169 - " l32i %0, %2, 0\n" 170 - " add %0, %0, %1\n" 171 - " s32i %0, %2, 0\n" 172 - " wsr a15, ps\n" 173 - " rsync\n" 174 - : "=&a" (vval) 175 - : "a" (i), "a" (v) 176 - : "a15", "memory" 177 - ); 178 - 179 - return vval; 180 - #endif 181 - } 182 - 183 - static inline int atomic_sub_return(int i, atomic_t * v) 184 - { 185 - #if XCHAL_HAVE_S32C1I 186 - unsigned long tmp; 187 - int result; 188 - 189 - __asm__ __volatile__( 190 - "1: l32i %1, %3, 0\n" 191 - " wsr %1, scompare1\n" 192 - " sub %0, %1, %2\n" 193 - " s32c1i %0, %3, 0\n" 194 - " bne %0, %1, 1b\n" 195 - " sub %0, %0, %2\n" 196 - : "=&a" (result), "=&a" (tmp) 197 - : "a" (i), "a" (v) 198 - : "memory" 199 - ); 200 - 201 - return result; 202 - #else 203 - unsigned int vval; 204 - 205 - __asm__ __volatile__( 206 - " rsil a15,"__stringify(LOCKLEVEL)"\n" 207 - " l32i %0, %2, 0\n" 208 - " sub %0, %0, %1\n" 209 - " s32i %0, %2, 0\n" 210 - " wsr a15, ps\n" 211 - " rsync\n" 212 - : "=&a" (vval) 213 - : "a" (i), "a" (v) 214 - : "a15", "memory" 215 - ); 216 - 217 - return vval; 218 - #endif 219 - } 148 + #undef ATOMIC_OPS 149 + #undef ATOMIC_OP_RETURN 150 + #undef ATOMIC_OP 220 151 221 152 /** 222 153 * atomic_sub_and_test - subtract value from variable and test result
+97 -97
include/asm-generic/atomic.h
··· 18 18 #include <asm/cmpxchg.h> 19 19 #include <asm/barrier.h> 20 20 21 + /* 22 + * atomic_$op() - $op integer to atomic variable 23 + * @i: integer value to $op 24 + * @v: pointer to the atomic variable 25 + * 26 + * Atomically $ops @i to @v. Does not strictly guarantee a memory-barrier, use 27 + * smp_mb__{before,after}_atomic(). 28 + */ 29 + 30 + /* 31 + * atomic_$op_return() - $op interer to atomic variable and returns the result 32 + * @i: integer value to $op 33 + * @v: pointer to the atomic variable 34 + * 35 + * Atomically $ops @i to @v. Does imply a full memory barrier. 36 + */ 37 + 21 38 #ifdef CONFIG_SMP 22 - /* Force people to define core atomics */ 23 - # if !defined(atomic_add_return) || !defined(atomic_sub_return) || \ 24 - !defined(atomic_clear_mask) || !defined(atomic_set_mask) 25 - # error "SMP requires a little arch-specific magic" 26 - # endif 39 + 40 + /* we can build all atomic primitives from cmpxchg */ 41 + 42 + #define ATOMIC_OP(op, c_op) \ 43 + static inline void atomic_##op(int i, atomic_t *v) \ 44 + { \ 45 + int c, old; \ 46 + \ 47 + c = v->counter; \ 48 + while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \ 49 + c = old; \ 50 + } 51 + 52 + #define ATOMIC_OP_RETURN(op, c_op) \ 53 + static inline int atomic_##op##_return(int i, atomic_t *v) \ 54 + { \ 55 + int c, old; \ 56 + \ 57 + c = v->counter; \ 58 + while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \ 59 + c = old; \ 60 + \ 61 + return c c_op i; \ 62 + } 63 + 64 + #else 65 + 66 + #include <linux/irqflags.h> 67 + 68 + #define ATOMIC_OP(op, c_op) \ 69 + static inline void atomic_##op(int i, atomic_t *v) \ 70 + { \ 71 + unsigned long flags; \ 72 + \ 73 + raw_local_irq_save(flags); \ 74 + v->counter = v->counter c_op i; \ 75 + raw_local_irq_restore(flags); \ 76 + } 77 + 78 + #define ATOMIC_OP_RETURN(op, c_op) \ 79 + static inline int atomic_##op##_return(int i, atomic_t *v) \ 80 + { \ 81 + unsigned long flags; \ 82 + int ret; \ 83 + \ 84 + raw_local_irq_save(flags); \ 85 + ret = (v->counter = v->counter c_op i); \ 86 + raw_local_irq_restore(flags); \ 87 + \ 88 + return ret; \ 89 + } 90 + 91 + #endif /* CONFIG_SMP */ 92 + 93 + #ifndef atomic_add_return 94 + ATOMIC_OP_RETURN(add, +) 27 95 #endif 96 + 97 + #ifndef atomic_sub_return 98 + ATOMIC_OP_RETURN(sub, -) 99 + #endif 100 + 101 + #ifndef atomic_clear_mask 102 + ATOMIC_OP(and, &) 103 + #define atomic_clear_mask(i, v) atomic_and(~(i), (v)) 104 + #endif 105 + 106 + #ifndef atomic_set_mask 107 + #define CONFIG_ARCH_HAS_ATOMIC_OR 108 + ATOMIC_OP(or, |) 109 + #define atomic_set_mask(i, v) atomic_or((i), (v)) 110 + #endif 111 + 112 + #undef ATOMIC_OP_RETURN 113 + #undef ATOMIC_OP 28 114 29 115 /* 30 116 * Atomic operations that C can't guarantee us. Useful for ··· 119 33 120 34 #define ATOMIC_INIT(i) { (i) } 121 35 122 - #ifdef __KERNEL__ 123 - 124 36 /** 125 37 * atomic_read - read atomic variable 126 38 * @v: pointer of type atomic_t ··· 126 42 * Atomically reads the value of @v. 127 43 */ 128 44 #ifndef atomic_read 129 - #define atomic_read(v) (*(volatile int *)&(v)->counter) 45 + #define atomic_read(v) ACCESS_ONCE((v)->counter) 130 46 #endif 131 47 132 48 /** ··· 139 55 #define atomic_set(v, i) (((v)->counter) = (i)) 140 56 141 57 #include <linux/irqflags.h> 142 - 143 - /** 144 - * atomic_add_return - add integer to atomic variable 145 - * @i: integer value to add 146 - * @v: pointer of type atomic_t 147 - * 148 - * Atomically adds @i to @v and returns the result 149 - */ 150 - #ifndef atomic_add_return 151 - static inline int atomic_add_return(int i, atomic_t *v) 152 - { 153 - unsigned long flags; 154 - int temp; 155 - 156 - raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */ 157 - temp = v->counter; 158 - temp += i; 159 - v->counter = temp; 160 - raw_local_irq_restore(flags); 161 - 162 - return temp; 163 - } 164 - #endif 165 - 166 - /** 167 - * atomic_sub_return - subtract integer from atomic variable 168 - * @i: integer value to subtract 169 - * @v: pointer of type atomic_t 170 - * 171 - * Atomically subtracts @i from @v and returns the result 172 - */ 173 - #ifndef atomic_sub_return 174 - static inline int atomic_sub_return(int i, atomic_t *v) 175 - { 176 - unsigned long flags; 177 - int temp; 178 - 179 - raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */ 180 - temp = v->counter; 181 - temp -= i; 182 - v->counter = temp; 183 - raw_local_irq_restore(flags); 184 - 185 - return temp; 186 - } 187 - #endif 188 58 189 59 static inline int atomic_add_negative(int i, atomic_t *v) 190 60 { ··· 177 139 178 140 static inline int __atomic_add_unless(atomic_t *v, int a, int u) 179 141 { 180 - int c, old; 181 - c = atomic_read(v); 182 - while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c) 183 - c = old; 184 - return c; 142 + int c, old; 143 + c = atomic_read(v); 144 + while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c) 145 + c = old; 146 + return c; 185 147 } 186 148 187 - /** 188 - * atomic_clear_mask - Atomically clear bits in atomic variable 189 - * @mask: Mask of the bits to be cleared 190 - * @v: pointer of type atomic_t 191 - * 192 - * Atomically clears the bits set in @mask from @v 193 - */ 194 - #ifndef atomic_clear_mask 195 - static inline void atomic_clear_mask(unsigned long mask, atomic_t *v) 196 - { 197 - unsigned long flags; 198 - 199 - mask = ~mask; 200 - raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */ 201 - v->counter &= mask; 202 - raw_local_irq_restore(flags); 203 - } 204 - #endif 205 - 206 - /** 207 - * atomic_set_mask - Atomically set bits in atomic variable 208 - * @mask: Mask of the bits to be set 209 - * @v: pointer of type atomic_t 210 - * 211 - * Atomically sets the bits set in @mask in @v 212 - */ 213 - #ifndef atomic_set_mask 214 - static inline void atomic_set_mask(unsigned int mask, atomic_t *v) 215 - { 216 - unsigned long flags; 217 - 218 - raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */ 219 - v->counter |= mask; 220 - raw_local_irq_restore(flags); 221 - } 222 - #endif 223 - 224 - #endif /* __KERNEL__ */ 225 149 #endif /* __ASM_GENERIC_ATOMIC_H */
+16 -4
include/asm-generic/atomic64.h
··· 20 20 21 21 extern long long atomic64_read(const atomic64_t *v); 22 22 extern void atomic64_set(atomic64_t *v, long long i); 23 - extern void atomic64_add(long long a, atomic64_t *v); 24 - extern long long atomic64_add_return(long long a, atomic64_t *v); 25 - extern void atomic64_sub(long long a, atomic64_t *v); 26 - extern long long atomic64_sub_return(long long a, atomic64_t *v); 23 + 24 + #define ATOMIC64_OP(op) \ 25 + extern void atomic64_##op(long long a, atomic64_t *v); 26 + 27 + #define ATOMIC64_OP_RETURN(op) \ 28 + extern long long atomic64_##op##_return(long long a, atomic64_t *v); 29 + 30 + #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) 31 + 32 + ATOMIC64_OPS(add) 33 + ATOMIC64_OPS(sub) 34 + 35 + #undef ATOMIC64_OPS 36 + #undef ATOMIC64_OP_RETURN 37 + #undef ATOMIC64_OP 38 + 27 39 extern long long atomic64_dec_if_positive(atomic64_t *v); 28 40 extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n); 29 41 extern long long atomic64_xchg(atomic64_t *v, long long new);
+32 -43
lib/atomic64.c
··· 70 70 } 71 71 EXPORT_SYMBOL(atomic64_set); 72 72 73 - void atomic64_add(long long a, atomic64_t *v) 74 - { 75 - unsigned long flags; 76 - raw_spinlock_t *lock = lock_addr(v); 73 + #define ATOMIC64_OP(op, c_op) \ 74 + void atomic64_##op(long long a, atomic64_t *v) \ 75 + { \ 76 + unsigned long flags; \ 77 + raw_spinlock_t *lock = lock_addr(v); \ 78 + \ 79 + raw_spin_lock_irqsave(lock, flags); \ 80 + v->counter c_op a; \ 81 + raw_spin_unlock_irqrestore(lock, flags); \ 82 + } \ 83 + EXPORT_SYMBOL(atomic64_##op); 77 84 78 - raw_spin_lock_irqsave(lock, flags); 79 - v->counter += a; 80 - raw_spin_unlock_irqrestore(lock, flags); 81 - } 82 - EXPORT_SYMBOL(atomic64_add); 85 + #define ATOMIC64_OP_RETURN(op, c_op) \ 86 + long long atomic64_##op##_return(long long a, atomic64_t *v) \ 87 + { \ 88 + unsigned long flags; \ 89 + raw_spinlock_t *lock = lock_addr(v); \ 90 + long long val; \ 91 + \ 92 + raw_spin_lock_irqsave(lock, flags); \ 93 + val = (v->counter c_op a); \ 94 + raw_spin_unlock_irqrestore(lock, flags); \ 95 + return val; \ 96 + } \ 97 + EXPORT_SYMBOL(atomic64_##op##_return); 83 98 84 - long long atomic64_add_return(long long a, atomic64_t *v) 85 - { 86 - unsigned long flags; 87 - raw_spinlock_t *lock = lock_addr(v); 88 - long long val; 99 + #define ATOMIC64_OPS(op, c_op) \ 100 + ATOMIC64_OP(op, c_op) \ 101 + ATOMIC64_OP_RETURN(op, c_op) 89 102 90 - raw_spin_lock_irqsave(lock, flags); 91 - val = v->counter += a; 92 - raw_spin_unlock_irqrestore(lock, flags); 93 - return val; 94 - } 95 - EXPORT_SYMBOL(atomic64_add_return); 103 + ATOMIC64_OPS(add, +=) 104 + ATOMIC64_OPS(sub, -=) 96 105 97 - void atomic64_sub(long long a, atomic64_t *v) 98 - { 99 - unsigned long flags; 100 - raw_spinlock_t *lock = lock_addr(v); 101 - 102 - raw_spin_lock_irqsave(lock, flags); 103 - v->counter -= a; 104 - raw_spin_unlock_irqrestore(lock, flags); 105 - } 106 - EXPORT_SYMBOL(atomic64_sub); 107 - 108 - long long atomic64_sub_return(long long a, atomic64_t *v) 109 - { 110 - unsigned long flags; 111 - raw_spinlock_t *lock = lock_addr(v); 112 - long long val; 113 - 114 - raw_spin_lock_irqsave(lock, flags); 115 - val = v->counter -= a; 116 - raw_spin_unlock_irqrestore(lock, flags); 117 - return val; 118 - } 119 - EXPORT_SYMBOL(atomic64_sub_return); 106 + #undef ATOMIC64_OPS 107 + #undef ATOMIC64_OP_RETURN 108 + #undef ATOMIC64_OP 120 109 121 110 long long atomic64_dec_if_positive(atomic64_t *v) 122 111 {