Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

tile: Provide atomic_{or,xor,and}

Implement atomic logic ops -- atomic_{or,xor,and}.

For tilegx, these are relatively straightforward; the architecture
provides atomic "or" and "and", both 32-bit and 64-bit. To support
xor we provide a loop using "cmpexch".

For the older 32-bit tilepro architecture, we have to extend
the set of low-level assembly routines to include 32-bit "and",
as well as all three 64-bit routines. Somewhat confusingly,
some 32-bit versions are already used by the bitops inlines, with
parameter types appropriate for bitops, so we have to do a bit of
casting to match "int" to "unsigned long".

Signed-off-by: Chris Metcalf <cmetcalf@ezchip.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1436474297-32187-1-git-send-email-cmetcalf@ezchip.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

authored by

Chris Metcalf and committed by
Thomas Gleixner
2957c035 73ada370

+99
+30
arch/tile/include/asm/atomic_32.h
··· 34 34 _atomic_xchg_add(&v->counter, i); 35 35 } 36 36 37 + #define ATOMIC_OP(op) \ 38 + unsigned long _atomic_##op(volatile unsigned long *p, unsigned long mask); \ 39 + static inline void atomic_##op(int i, atomic_t *v) \ 40 + { \ 41 + _atomic_##op((unsigned long *)&v->counter, i); \ 42 + } 43 + 44 + #define CONFIG_ARCH_HAS_ATOMIC_OR 45 + 46 + ATOMIC_OP(and) 47 + ATOMIC_OP(or) 48 + ATOMIC_OP(xor) 49 + 50 + #undef ATOMIC_OP 51 + 37 52 /** 38 53 * atomic_add_return - add integer and return 39 54 * @v: pointer of type atomic_t ··· 127 112 { 128 113 _atomic64_xchg_add(&v->counter, i); 129 114 } 115 + 116 + #define ATOMIC64_OP(op) \ 117 + long long _atomic64_##op(long long *v, long long n); \ 118 + static inline void atomic64_##op(long long i, atomic64_t *v) \ 119 + { \ 120 + _atomic64_##op(&v->counter, i); \ 121 + } 122 + 123 + ATOMIC64_OP(and) 124 + ATOMIC64_OP(or) 125 + ATOMIC64_OP(xor) 130 126 131 127 /** 132 128 * atomic64_add_return - add integer and return ··· 251 225 extern struct __get_user __atomic_xchg_add_unless(volatile int *p, 252 226 int *lock, int o, int n); 253 227 extern struct __get_user __atomic_or(volatile int *p, int *lock, int n); 228 + extern struct __get_user __atomic_and(volatile int *p, int *lock, int n); 254 229 extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n); 255 230 extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n); 256 231 extern long long __atomic64_cmpxchg(volatile long long *p, int *lock, ··· 261 234 long long n); 262 235 extern long long __atomic64_xchg_add_unless(volatile long long *p, 263 236 int *lock, long long o, long long n); 237 + extern long long __atomic64_and(volatile long long *p, int *lock, long long n); 238 + extern long long __atomic64_or(volatile long long *p, int *lock, long long n); 239 + extern long long __atomic64_xor(volatile long long *p, int *lock, long long n); 264 240 265 241 /* Return failure from the atomic wrappers. */ 266 242 struct __get_user __atomic_bad_address(int __user *addr);
+42
arch/tile/include/asm/atomic_64.h
··· 58 58 return oldval; 59 59 } 60 60 61 + #define CONFIG_ARCH_HAS_ATOMIC_OR 62 + 63 + static inline void atomic_and(int i, atomic_t *v) 64 + { 65 + __insn_fetchand4((void *)&v->counter, i); 66 + } 67 + 68 + static inline void atomic_or(int i, atomic_t *v) 69 + { 70 + __insn_fetchor4((void *)&v->counter, i); 71 + } 72 + 73 + static inline void atomic_xor(int i, atomic_t *v) 74 + { 75 + int guess, oldval = v->counter; 76 + do { 77 + guess = oldval; 78 + __insn_mtspr(SPR_CMPEXCH_VALUE, guess); 79 + oldval = __insn_cmpexch4(&v->counter, guess ^ i); 80 + } while (guess != oldval); 81 + } 82 + 61 83 /* Now the true 64-bit operations. */ 62 84 63 85 #define ATOMIC64_INIT(i) { (i) } ··· 111 89 oldval = cmpxchg(&v->counter, guess, guess + a); 112 90 } while (guess != oldval); 113 91 return oldval != u; 92 + } 93 + 94 + static inline void atomic64_and(long i, atomic64_t *v) 95 + { 96 + __insn_fetchand((void *)&v->counter, i); 97 + } 98 + 99 + static inline void atomic64_or(long i, atomic64_t *v) 100 + { 101 + __insn_fetchor((void *)&v->counter, i); 102 + } 103 + 104 + static inline void atomic64_xor(long i, atomic64_t *v) 105 + { 106 + long guess, oldval = v->counter; 107 + do { 108 + guess = oldval; 109 + __insn_mtspr(SPR_CMPEXCH_VALUE, guess); 110 + oldval = __insn_cmpexch(&v->counter, guess ^ i); 111 + } while (guess != oldval); 114 112 } 115 113 116 114 #define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
+23
arch/tile/lib/atomic_32.c
··· 94 94 } 95 95 EXPORT_SYMBOL(_atomic_or); 96 96 97 + unsigned long _atomic_and(volatile unsigned long *p, unsigned long mask) 98 + { 99 + return __atomic_and((int *)p, __atomic_setup(p), mask).val; 100 + } 101 + EXPORT_SYMBOL(_atomic_and); 102 + 97 103 unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask) 98 104 { 99 105 return __atomic_andn((int *)p, __atomic_setup(p), mask).val; ··· 142 136 } 143 137 EXPORT_SYMBOL(_atomic64_cmpxchg); 144 138 139 + long long _atomic64_and(long long *v, long long n) 140 + { 141 + return __atomic64_and(v, __atomic_setup(v), n); 142 + } 143 + EXPORT_SYMBOL(_atomic64_and); 144 + 145 + long long _atomic64_or(long long *v, long long n) 146 + { 147 + return __atomic64_or(v, __atomic_setup(v), n); 148 + } 149 + EXPORT_SYMBOL(_atomic64_or); 150 + 151 + long long _atomic64_xor(long long *v, long long n) 152 + { 153 + return __atomic64_xor(v, __atomic_setup(v), n); 154 + } 155 + EXPORT_SYMBOL(_atomic64_xor); 145 156 146 157 /* 147 158 * If any of the atomic or futex routines hit a bad address (not in
+4
arch/tile/lib/atomic_asm_32.S
··· 178 178 atomic_op _xchg_add_unless, 32, \ 179 179 "sne r26, r22, r2; { bbns r26, 3f; add r24, r22, r3 }" 180 180 atomic_op _or, 32, "or r24, r22, r2" 181 + atomic_op _and, 32, "and r24, r22, r2" 181 182 atomic_op _andn, 32, "nor r2, r2, zero; and r24, r22, r2" 182 183 atomic_op _xor, 32, "xor r24, r22, r2" 183 184 ··· 192 191 { bbns r26, 3f; add r24, r22, r4 }; \ 193 192 { bbns r27, 3f; add r25, r23, r5 }; \ 194 193 slt_u r26, r24, r22; add r25, r25, r26" 194 + atomic_op 64_or, 64, "{ or r24, r22, r2; or r25, r23, r3 }" 195 + atomic_op 64_and, 64, "{ and r24, r22, r2; and r25, r23, r3 }" 196 + atomic_op 64_xor, 64, "{ xor r24, r22, r2; xor r25, r23, r3 }" 195 197 196 198 jrp lr /* happy backtracer */ 197 199