Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

locking/atomic, arch/sparc: Implement atomic{,64}_fetch_{add,sub,and,or,xor}()

Implement FETCH-OP atomic primitives, these are very similar to the
existing OP-RETURN primitives we already have, except they return the
value of the atomic variable _before_ modification.

This is especially useful for irreversible operations -- such as
bitops (because it becomes impossible to reconstruct the state prior
to modification).

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: David S. Miller <davem@davemloft.net>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: James Y Knight <jyknight@google.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arch@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Cc: sparclinux@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Peter Zijlstra and committed by
Ingo Molnar
3a1adb23 7d9794e7

+109 -38
+1
arch/sparc/include/asm/atomic.h
··· 5 5 #else 6 6 #include <asm/atomic_32.h> 7 7 #endif 8 + #define atomic_fetch_or atomic_fetch_or 8 9 #endif
+12 -3
arch/sparc/include/asm/atomic_32.h
··· 20 20 #define ATOMIC_INIT(i) { (i) } 21 21 22 22 int atomic_add_return(int, atomic_t *); 23 - void atomic_and(int, atomic_t *); 24 - void atomic_or(int, atomic_t *); 25 - void atomic_xor(int, atomic_t *); 23 + int atomic_fetch_add(int, atomic_t *); 24 + int atomic_fetch_and(int, atomic_t *); 25 + int atomic_fetch_or(int, atomic_t *); 26 + int atomic_fetch_xor(int, atomic_t *); 26 27 int atomic_cmpxchg(atomic_t *, int, int); 27 28 int atomic_xchg(atomic_t *, int); 28 29 int __atomic_add_unless(atomic_t *, int, int); ··· 36 35 #define atomic_inc(v) ((void)atomic_add_return( 1, (v))) 37 36 #define atomic_dec(v) ((void)atomic_add_return( -1, (v))) 38 37 38 + #define atomic_fetch_or atomic_fetch_or 39 + 40 + #define atomic_and(i, v) ((void)atomic_fetch_and((i), (v))) 41 + #define atomic_or(i, v) ((void)atomic_fetch_or((i), (v))) 42 + #define atomic_xor(i, v) ((void)atomic_fetch_xor((i), (v))) 43 + 39 44 #define atomic_sub_return(i, v) (atomic_add_return(-(int)(i), (v))) 45 + #define atomic_fetch_sub(i, v) (atomic_fetch_add (-(int)(i), (v))) 46 + 40 47 #define atomic_inc_return(v) (atomic_add_return( 1, (v))) 41 48 #define atomic_dec_return(v) (atomic_add_return( -1, (v))) 42 49
+12 -4
arch/sparc/include/asm/atomic_64.h
··· 28 28 int atomic_##op##_return(int, atomic_t *); \ 29 29 long atomic64_##op##_return(long, atomic64_t *); 30 30 31 - #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) 31 + #define ATOMIC_FETCH_OP(op) \ 32 + int atomic_fetch_##op(int, atomic_t *); \ 33 + long atomic64_fetch_##op(long, atomic64_t *); 34 + 35 + #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op) 32 36 33 37 ATOMIC_OPS(add) 34 38 ATOMIC_OPS(sub) 35 39 36 - ATOMIC_OP(and) 37 - ATOMIC_OP(or) 38 - ATOMIC_OP(xor) 40 + #undef ATOMIC_OPS 41 + #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) 42 + 43 + ATOMIC_OPS(and) 44 + ATOMIC_OPS(or) 45 + ATOMIC_OPS(xor) 39 46 40 47 #undef ATOMIC_OPS 48 + #undef ATOMIC_FETCH_OP 41 49 #undef ATOMIC_OP_RETURN 42 50 #undef ATOMIC_OP 43 51
+21 -16
arch/sparc/lib/atomic32.c
··· 27 27 28 28 #endif /* SMP */ 29 29 30 + #define ATOMIC_FETCH_OP(op, c_op) \ 31 + int atomic_fetch_##op(int i, atomic_t *v) \ 32 + { \ 33 + int ret; \ 34 + unsigned long flags; \ 35 + spin_lock_irqsave(ATOMIC_HASH(v), flags); \ 36 + \ 37 + ret = v->counter; \ 38 + v->counter c_op i; \ 39 + \ 40 + spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \ 41 + return ret; \ 42 + } \ 43 + EXPORT_SYMBOL(atomic_fetch_##op); 44 + 30 45 #define ATOMIC_OP_RETURN(op, c_op) \ 31 46 int atomic_##op##_return(int i, atomic_t *v) \ 32 47 { \ ··· 56 41 } \ 57 42 EXPORT_SYMBOL(atomic_##op##_return); 58 43 59 - #define ATOMIC_OP(op, c_op) \ 60 - void atomic_##op(int i, atomic_t *v) \ 61 - { \ 62 - unsigned long flags; \ 63 - spin_lock_irqsave(ATOMIC_HASH(v), flags); \ 64 - \ 65 - v->counter c_op i; \ 66 - \ 67 - spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \ 68 - } \ 69 - EXPORT_SYMBOL(atomic_##op); 70 - 71 44 ATOMIC_OP_RETURN(add, +=) 72 - ATOMIC_OP(and, &=) 73 - ATOMIC_OP(or, |=) 74 - ATOMIC_OP(xor, ^=) 75 45 46 + ATOMIC_FETCH_OP(add, +=) 47 + ATOMIC_FETCH_OP(and, &=) 48 + ATOMIC_FETCH_OP(or, |=) 49 + ATOMIC_FETCH_OP(xor, ^=) 50 + 51 + #undef ATOMIC_FETCH_OP 76 52 #undef ATOMIC_OP_RETURN 77 - #undef ATOMIC_OP 78 53 79 54 int atomic_xchg(atomic_t *v, int new) 80 55 {
+50 -11
arch/sparc/lib/atomic_64.S
··· 9 9 10 10 .text 11 11 12 - /* Two versions of the atomic routines, one that 12 + /* Three versions of the atomic routines, one that 13 13 * does not return a value and does not perform 14 - * memory barriers, and a second which returns 15 - * a value and does the barriers. 14 + * memory barriers, and a two which return 15 + * a value, the new and old value resp. and does the 16 + * barriers. 16 17 */ 17 18 18 19 #define ATOMIC_OP(op) \ ··· 44 43 2: BACKOFF_SPIN(%o2, %o3, 1b); \ 45 44 ENDPROC(atomic_##op##_return); 46 45 47 - #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) 46 + #define ATOMIC_FETCH_OP(op) \ 47 + ENTRY(atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ 48 + BACKOFF_SETUP(%o2); \ 49 + 1: lduw [%o1], %g1; \ 50 + op %g1, %o0, %g7; \ 51 + cas [%o1], %g1, %g7; \ 52 + cmp %g1, %g7; \ 53 + bne,pn %icc, BACKOFF_LABEL(2f, 1b); \ 54 + nop; \ 55 + retl; \ 56 + sra %g1, 0, %o0; \ 57 + 2: BACKOFF_SPIN(%o2, %o3, 1b); \ 58 + ENDPROC(atomic_fetch_##op); 59 + 60 + #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op) 48 61 49 62 ATOMIC_OPS(add) 50 63 ATOMIC_OPS(sub) 51 - ATOMIC_OP(and) 52 - ATOMIC_OP(or) 53 - ATOMIC_OP(xor) 54 64 55 65 #undef ATOMIC_OPS 66 + #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) 67 + 68 + ATOMIC_OPS(and) 69 + ATOMIC_OPS(or) 70 + ATOMIC_OPS(xor) 71 + 72 + #undef ATOMIC_OPS 73 + #undef ATOMIC_FETCH_OP 56 74 #undef ATOMIC_OP_RETURN 57 75 #undef ATOMIC_OP 58 76 ··· 103 83 2: BACKOFF_SPIN(%o2, %o3, 1b); \ 104 84 ENDPROC(atomic64_##op##_return); 105 85 106 - #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) 86 + #define ATOMIC64_FETCH_OP(op) \ 87 + ENTRY(atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ 88 + BACKOFF_SETUP(%o2); \ 89 + 1: ldx [%o1], %g1; \ 90 + op %g1, %o0, %g7; \ 91 + casx [%o1], %g1, %g7; \ 92 + cmp %g1, %g7; \ 93 + bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \ 94 + nop; \ 95 + retl; \ 96 + mov %g1, %o0; \ 97 + 2: BACKOFF_SPIN(%o2, %o3, 1b); \ 98 + ENDPROC(atomic64_fetch_##op); 99 + 100 + #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op) 107 101 108 102 ATOMIC64_OPS(add) 109 103 ATOMIC64_OPS(sub) 110 - ATOMIC64_OP(and) 111 - ATOMIC64_OP(or) 112 - ATOMIC64_OP(xor) 113 104 114 105 #undef ATOMIC64_OPS 106 + #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_FETCH_OP(op) 107 + 108 + ATOMIC64_OPS(and) 109 + ATOMIC64_OPS(or) 110 + ATOMIC64_OPS(xor) 111 + 112 + #undef ATOMIC64_OPS 113 + #undef ATOMIC64_FETCH_OP 115 114 #undef ATOMIC64_OP_RETURN 116 115 #undef ATOMIC64_OP 117 116
+13 -4
arch/sparc/lib/ksyms.c
··· 107 107 EXPORT_SYMBOL(atomic_##op##_return); \ 108 108 EXPORT_SYMBOL(atomic64_##op##_return); 109 109 110 - #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) 110 + #define ATOMIC_FETCH_OP(op) \ 111 + EXPORT_SYMBOL(atomic_fetch_##op); \ 112 + EXPORT_SYMBOL(atomic64_fetch_##op); 113 + 114 + #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op) 111 115 112 116 ATOMIC_OPS(add) 113 117 ATOMIC_OPS(sub) 114 - ATOMIC_OP(and) 115 - ATOMIC_OP(or) 116 - ATOMIC_OP(xor) 117 118 118 119 #undef ATOMIC_OPS 120 + #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) 121 + 122 + ATOMIC_OPS(and) 123 + ATOMIC_OPS(or) 124 + ATOMIC_OPS(xor) 125 + 126 + #undef ATOMIC_OPS 127 + #undef ATOMIC_FETCH_OP 119 128 #undef ATOMIC_OP_RETURN 120 129 #undef ATOMIC_OP 121 130