Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

locking/atomic, arch/avr32: Implement atomic_fetch_{add,sub,and,or,xor}()

Implement FETCH-OP atomic primitives, these are very similar to the
existing OP-RETURN primitives we already have, except they return the
value of the atomic variable _before_ modification.

This is especially useful for irreversible operations -- such as
bitops (because it becomes impossible to reconstruct the state prior
to modification).

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Hans-Christian Noren Egtvedt <egtvedt@samfundet.no>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Haavard Skinnemoen <hskinnemoen@gmail.com>
Cc: Hans-Christian Egtvedt <egtvedt@samfundet.no>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arch@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Peter Zijlstra and committed by
Ingo Molnar
1a6eafac 2efe95fe

+51 -5
+51 -5
arch/avr32/include/asm/atomic.h
··· 41 41 return result; \ 42 42 } 43 43 44 + #define ATOMIC_FETCH_OP(op, asm_op, asm_con) \ 45 + static inline int __atomic_fetch_##op(int i, atomic_t *v) \ 46 + { \ 47 + int result, val; \ 48 + \ 49 + asm volatile( \ 50 + "/* atomic_fetch_" #op " */\n" \ 51 + "1: ssrf 5\n" \ 52 + " ld.w %0, %3\n" \ 53 + " mov %1, %0\n" \ 54 + " " #asm_op " %1, %4\n" \ 55 + " stcond %2, %1\n" \ 56 + " brne 1b" \ 57 + : "=&r" (result), "=&r" (val), "=o" (v->counter) \ 58 + : "m" (v->counter), #asm_con (i) \ 59 + : "cc"); \ 60 + \ 61 + return result; \ 62 + } 63 + 44 64 ATOMIC_OP_RETURN(sub, sub, rKs21) 45 65 ATOMIC_OP_RETURN(add, add, r) 66 + ATOMIC_FETCH_OP (sub, sub, rKs21) 67 + ATOMIC_FETCH_OP (add, add, r) 46 68 47 - #define ATOMIC_OP(op, asm_op) \ 69 + #define atomic_fetch_or atomic_fetch_or 70 + 71 + #define ATOMIC_OPS(op, asm_op) \ 48 72 ATOMIC_OP_RETURN(op, asm_op, r) \ 49 73 static inline void atomic_##op(int i, atomic_t *v) \ 50 74 { \ 51 75 (void)__atomic_##op##_return(i, v); \ 76 + } \ 77 + ATOMIC_FETCH_OP(op, asm_op, r) \ 78 + static inline int atomic_fetch_##op(int i, atomic_t *v) \ 79 + { \ 80 + return __atomic_fetch_##op(i, v); \ 52 81 } 53 82 54 - ATOMIC_OP(and, and) 55 - ATOMIC_OP(or, or) 56 - ATOMIC_OP(xor, eor) 83 + ATOMIC_OPS(and, and) 84 + ATOMIC_OPS(or, or) 85 + ATOMIC_OPS(xor, eor) 57 86 58 - #undef ATOMIC_OP 87 + #undef ATOMIC_OPS 88 + #undef ATOMIC_FETCH_OP 59 89 #undef ATOMIC_OP_RETURN 60 90 61 91 /* ··· 117 87 return __atomic_add_return(i, v); 118 88 } 119 89 90 + static inline int atomic_fetch_add(int i, atomic_t *v) 91 + { 92 + if (IS_21BIT_CONST(i)) 93 + return __atomic_fetch_sub(-i, v); 94 + 95 + return __atomic_fetch_add(i, v); 96 + } 97 + 120 98 /* 121 99 * atomic_sub_return - subtract the atomic variable 122 100 * @i: integer value to subtract ··· 138 100 return __atomic_sub_return(i, v); 139 101 140 102 return __atomic_add_return(-i, v); 103 + } 104 + 105 + static inline int atomic_fetch_sub(int i, atomic_t *v) 106 + { 107 + if (IS_21BIT_CONST(i)) 108 + return __atomic_fetch_sub(i, v); 109 + 110 + return __atomic_fetch_add(-i, v); 141 111 } 142 112 143 113 /*