Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

s390/atomic: refactor atomic primitives

Rework atomic.h to make the low level functions avaible for use
in other headers without using atomic_t, e.g. in bitops.h.

Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

+170 -169
+39 -168
arch/s390/include/asm/atomic.h
··· 1 1 /* 2 - * Copyright IBM Corp. 1999, 2009 2 + * Copyright IBM Corp. 1999, 2016 3 3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>, 4 4 * Denis Joseph Barrow, 5 - * Arnd Bergmann <arndb@de.ibm.com>, 6 - * 7 - * Atomic operations that C can't guarantee us. 8 - * Useful for resource counting etc. 9 - * s390 uses 'Compare And Swap' for atomicity in SMP environment. 10 - * 5 + * Arnd Bergmann, 11 6 */ 12 7 13 8 #ifndef __ARCH_S390_ATOMIC__ ··· 10 15 11 16 #include <linux/compiler.h> 12 17 #include <linux/types.h> 18 + #include <asm/atomic_ops.h> 13 19 #include <asm/barrier.h> 14 20 #include <asm/cmpxchg.h> 15 21 16 22 #define ATOMIC_INIT(i) { (i) } 17 - 18 - #define __ATOMIC_NO_BARRIER "\n" 19 - 20 - #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES 21 - 22 - #define __ATOMIC_OR "lao" 23 - #define __ATOMIC_AND "lan" 24 - #define __ATOMIC_ADD "laa" 25 - #define __ATOMIC_XOR "lax" 26 - #define __ATOMIC_BARRIER "bcr 14,0\n" 27 - 28 - #define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \ 29 - ({ \ 30 - int old_val; \ 31 - \ 32 - typecheck(atomic_t *, ptr); \ 33 - asm volatile( \ 34 - op_string " %0,%2,%1\n" \ 35 - __barrier \ 36 - : "=d" (old_val), "+Q" ((ptr)->counter) \ 37 - : "d" (op_val) \ 38 - : "cc", "memory"); \ 39 - old_val; \ 40 - }) 41 - 42 - #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */ 43 - 44 - #define __ATOMIC_OR "or" 45 - #define __ATOMIC_AND "nr" 46 - #define __ATOMIC_ADD "ar" 47 - #define __ATOMIC_XOR "xr" 48 - #define __ATOMIC_BARRIER "\n" 49 - 50 - #define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \ 51 - ({ \ 52 - int old_val, new_val; \ 53 - \ 54 - typecheck(atomic_t *, ptr); \ 55 - asm volatile( \ 56 - " l %0,%2\n" \ 57 - "0: lr %1,%0\n" \ 58 - op_string " %1,%3\n" \ 59 - " cs %0,%1,%2\n" \ 60 - " jl 0b" \ 61 - : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\ 62 - : "d" (op_val) \ 63 - : "cc", "memory"); \ 64 - old_val; \ 65 - }) 66 - 67 - #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ 68 23 69 24 static inline int atomic_read(const atomic_t *v) 70 25 { ··· 35 90 36 91 static inline int atomic_add_return(int i, atomic_t *v) 37 92 { 38 - return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER) + i; 93 + return __atomic_add_barrier(i, &v->counter) + i; 39 94 } 40 95 41 96 static inline int atomic_fetch_add(int i, atomic_t *v) 42 97 { 43 - return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER); 98 + return __atomic_add_barrier(i, &v->counter); 44 99 } 45 100 46 101 static inline void atomic_add(int i, atomic_t *v) 47 102 { 48 103 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES 49 104 if (__builtin_constant_p(i) && (i > -129) && (i < 128)) { 50 - asm volatile( 51 - "asi %0,%1\n" 52 - : "+Q" (v->counter) 53 - : "i" (i) 54 - : "cc", "memory"); 105 + __atomic_add_const(i, &v->counter); 55 106 return; 56 107 } 57 108 #endif 58 - __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_NO_BARRIER); 109 + __atomic_add(i, &v->counter); 59 110 } 60 111 61 112 #define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0) ··· 66 125 #define atomic_dec_return(_v) atomic_sub_return(1, _v) 67 126 #define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0) 68 127 69 - #define ATOMIC_OPS(op, OP) \ 128 + #define ATOMIC_OPS(op) \ 70 129 static inline void atomic_##op(int i, atomic_t *v) \ 71 130 { \ 72 - __ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_NO_BARRIER); \ 131 + __atomic_##op(i, &v->counter); \ 73 132 } \ 74 133 static inline int atomic_fetch_##op(int i, atomic_t *v) \ 75 134 { \ 76 - return __ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_BARRIER); \ 135 + return __atomic_##op##_barrier(i, &v->counter); \ 77 136 } 78 137 79 - ATOMIC_OPS(and, AND) 80 - ATOMIC_OPS(or, OR) 81 - ATOMIC_OPS(xor, XOR) 138 + ATOMIC_OPS(and) 139 + ATOMIC_OPS(or) 140 + ATOMIC_OPS(xor) 82 141 83 142 #undef ATOMIC_OPS 84 143 ··· 86 145 87 146 static inline int atomic_cmpxchg(atomic_t *v, int old, int new) 88 147 { 89 - asm volatile( 90 - " cs %0,%2,%1" 91 - : "+d" (old), "+Q" (v->counter) 92 - : "d" (new) 93 - : "cc", "memory"); 94 - return old; 148 + return __atomic_cmpxchg(&v->counter, old, new); 95 149 } 96 150 97 151 static inline int __atomic_add_unless(atomic_t *v, int a, int u) ··· 104 168 return c; 105 169 } 106 170 107 - 108 - #undef __ATOMIC_LOOP 109 - 110 171 #define ATOMIC64_INIT(i) { (i) } 111 172 112 - #define __ATOMIC64_NO_BARRIER "\n" 113 - 114 - #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES 115 - 116 - #define __ATOMIC64_OR "laog" 117 - #define __ATOMIC64_AND "lang" 118 - #define __ATOMIC64_ADD "laag" 119 - #define __ATOMIC64_XOR "laxg" 120 - #define __ATOMIC64_BARRIER "bcr 14,0\n" 121 - 122 - #define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \ 123 - ({ \ 124 - long long old_val; \ 125 - \ 126 - typecheck(atomic64_t *, ptr); \ 127 - asm volatile( \ 128 - op_string " %0,%2,%1\n" \ 129 - __barrier \ 130 - : "=d" (old_val), "+Q" ((ptr)->counter) \ 131 - : "d" (op_val) \ 132 - : "cc", "memory"); \ 133 - old_val; \ 134 - }) 135 - 136 - #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */ 137 - 138 - #define __ATOMIC64_OR "ogr" 139 - #define __ATOMIC64_AND "ngr" 140 - #define __ATOMIC64_ADD "agr" 141 - #define __ATOMIC64_XOR "xgr" 142 - #define __ATOMIC64_BARRIER "\n" 143 - 144 - #define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \ 145 - ({ \ 146 - long long old_val, new_val; \ 147 - \ 148 - typecheck(atomic64_t *, ptr); \ 149 - asm volatile( \ 150 - " lg %0,%2\n" \ 151 - "0: lgr %1,%0\n" \ 152 - op_string " %1,%3\n" \ 153 - " csg %0,%1,%2\n" \ 154 - " jl 0b" \ 155 - : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\ 156 - : "d" (op_val) \ 157 - : "cc", "memory"); \ 158 - old_val; \ 159 - }) 160 - 161 - #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ 162 - 163 - static inline long long atomic64_read(const atomic64_t *v) 173 + static inline long atomic64_read(const atomic64_t *v) 164 174 { 165 - long long c; 175 + long c; 166 176 167 177 asm volatile( 168 178 " lg %0,%1\n" ··· 116 234 return c; 117 235 } 118 236 119 - static inline void atomic64_set(atomic64_t *v, long long i) 237 + static inline void atomic64_set(atomic64_t *v, long i) 120 238 { 121 239 asm volatile( 122 240 " stg %1,%0\n" 123 241 : "=Q" (v->counter) : "d" (i)); 124 242 } 125 243 126 - static inline long long atomic64_add_return(long long i, atomic64_t *v) 244 + static inline long atomic64_add_return(long i, atomic64_t *v) 127 245 { 128 - return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER) + i; 246 + return __atomic64_add_barrier(i, &v->counter) + i; 129 247 } 130 248 131 - static inline long long atomic64_fetch_add(long long i, atomic64_t *v) 249 + static inline long atomic64_fetch_add(long i, atomic64_t *v) 132 250 { 133 - return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER); 251 + return __atomic64_add_barrier(i, &v->counter); 134 252 } 135 253 136 - static inline void atomic64_add(long long i, atomic64_t *v) 254 + static inline void atomic64_add(long i, atomic64_t *v) 137 255 { 138 256 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES 139 257 if (__builtin_constant_p(i) && (i > -129) && (i < 128)) { 140 - asm volatile( 141 - "agsi %0,%1\n" 142 - : "+Q" (v->counter) 143 - : "i" (i) 144 - : "cc", "memory"); 258 + __atomic64_add_const(i, &v->counter); 145 259 return; 146 260 } 147 261 #endif 148 - __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER); 262 + __atomic64_add(i, &v->counter); 149 263 } 150 264 151 265 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) 152 266 153 - static inline long long atomic64_cmpxchg(atomic64_t *v, 154 - long long old, long long new) 267 + static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new) 155 268 { 156 - asm volatile( 157 - " csg %0,%2,%1" 158 - : "+d" (old), "+Q" (v->counter) 159 - : "d" (new) 160 - : "cc", "memory"); 161 - return old; 269 + return __atomic64_cmpxchg(&v->counter, old, new); 162 270 } 163 271 164 - #define ATOMIC64_OPS(op, OP) \ 272 + #define ATOMIC64_OPS(op) \ 165 273 static inline void atomic64_##op(long i, atomic64_t *v) \ 166 274 { \ 167 - __ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_NO_BARRIER); \ 275 + __atomic64_##op(i, &v->counter); \ 168 276 } \ 169 277 static inline long atomic64_fetch_##op(long i, atomic64_t *v) \ 170 278 { \ 171 - return __ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_BARRIER); \ 279 + return __atomic64_##op##_barrier(i, &v->counter); \ 172 280 } 173 281 174 - ATOMIC64_OPS(and, AND) 175 - ATOMIC64_OPS(or, OR) 176 - ATOMIC64_OPS(xor, XOR) 282 + ATOMIC64_OPS(and) 283 + ATOMIC64_OPS(or) 284 + ATOMIC64_OPS(xor) 177 285 178 286 #undef ATOMIC64_OPS 179 - #undef __ATOMIC64_LOOP 180 287 181 - static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u) 288 + static inline int atomic64_add_unless(atomic64_t *v, long i, long u) 182 289 { 183 - long long c, old; 290 + long c, old; 184 291 185 292 c = atomic64_read(v); 186 293 for (;;) { ··· 183 312 return c != u; 184 313 } 185 314 186 - static inline long long atomic64_dec_if_positive(atomic64_t *v) 315 + static inline long atomic64_dec_if_positive(atomic64_t *v) 187 316 { 188 - long long c, old, dec; 317 + long c, old, dec; 189 318 190 319 c = atomic64_read(v); 191 320 for (;;) { ··· 204 333 #define atomic64_inc(_v) atomic64_add(1, _v) 205 334 #define atomic64_inc_return(_v) atomic64_add_return(1, _v) 206 335 #define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0) 207 - #define atomic64_sub_return(_i, _v) atomic64_add_return(-(long long)(_i), _v) 208 - #define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(long long)(_i), _v) 209 - #define atomic64_sub(_i, _v) atomic64_add(-(long long)(_i), _v) 336 + #define atomic64_sub_return(_i, _v) atomic64_add_return(-(long)(_i), _v) 337 + #define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(long)(_i), _v) 338 + #define atomic64_sub(_i, _v) atomic64_add(-(long)(_i), _v) 210 339 #define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0) 211 340 #define atomic64_dec(_v) atomic64_sub(1, _v) 212 341 #define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
+130
arch/s390/include/asm/atomic_ops.h
··· 1 + /* 2 + * Low level function for atomic operations 3 + * 4 + * Copyright IBM Corp. 1999, 2016 5 + */ 6 + 7 + #ifndef __ARCH_S390_ATOMIC_OPS__ 8 + #define __ARCH_S390_ATOMIC_OPS__ 9 + 10 + #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES 11 + 12 + #define __ATOMIC_OP(op_name, op_type, op_string, op_barrier) \ 13 + static inline op_type op_name(op_type val, op_type *ptr) \ 14 + { \ 15 + op_type old; \ 16 + \ 17 + asm volatile( \ 18 + op_string " %[old],%[val],%[ptr]\n" \ 19 + op_barrier \ 20 + : [old] "=d" (old), [ptr] "+Q" (*ptr) \ 21 + : [val] "d" (val) : "cc", "memory"); \ 22 + return old; \ 23 + } \ 24 + 25 + #define __ATOMIC_OPS(op_name, op_type, op_string) \ 26 + __ATOMIC_OP(op_name, op_type, op_string, "\n") \ 27 + __ATOMIC_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n") 28 + 29 + __ATOMIC_OPS(__atomic_add, int, "laa") 30 + __ATOMIC_OPS(__atomic_and, int, "lan") 31 + __ATOMIC_OPS(__atomic_or, int, "lao") 32 + __ATOMIC_OPS(__atomic_xor, int, "lax") 33 + 34 + __ATOMIC_OPS(__atomic64_add, long, "laag") 35 + __ATOMIC_OPS(__atomic64_and, long, "lang") 36 + __ATOMIC_OPS(__atomic64_or, long, "laog") 37 + __ATOMIC_OPS(__atomic64_xor, long, "laxg") 38 + 39 + #undef __ATOMIC_OPS 40 + #undef __ATOMIC_OP 41 + 42 + static inline void __atomic_add_const(int val, int *ptr) 43 + { 44 + asm volatile( 45 + " asi %[ptr],%[val]\n" 46 + : [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc"); 47 + } 48 + 49 + static inline void __atomic64_add_const(long val, long *ptr) 50 + { 51 + asm volatile( 52 + " agsi %[ptr],%[val]\n" 53 + : [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc"); 54 + } 55 + 56 + #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */ 57 + 58 + #define __ATOMIC_OP(op_name, op_string) \ 59 + static inline int op_name(int val, int *ptr) \ 60 + { \ 61 + int old, new; \ 62 + \ 63 + asm volatile( \ 64 + "0: lr %[new],%[old]\n" \ 65 + op_string " %[new],%[val]\n" \ 66 + " cs %[old],%[new],%[ptr]\n" \ 67 + " jl 0b" \ 68 + : [old] "=d" (old), [new] "=&d" (new), [ptr] "+Q" (*ptr)\ 69 + : [val] "d" (val), "0" (*ptr) : "cc", "memory"); \ 70 + return old; \ 71 + } 72 + 73 + #define __ATOMIC_OPS(op_name, op_string) \ 74 + __ATOMIC_OP(op_name, op_string) \ 75 + __ATOMIC_OP(op_name##_barrier, op_string) 76 + 77 + __ATOMIC_OPS(__atomic_add, "ar") 78 + __ATOMIC_OPS(__atomic_and, "nr") 79 + __ATOMIC_OPS(__atomic_or, "or") 80 + __ATOMIC_OPS(__atomic_xor, "xr") 81 + 82 + #undef __ATOMIC_OPS 83 + 84 + #define __ATOMIC64_OP(op_name, op_string) \ 85 + static inline long op_name(long val, long *ptr) \ 86 + { \ 87 + long old, new; \ 88 + \ 89 + asm volatile( \ 90 + "0: lgr %[new],%[old]\n" \ 91 + op_string " %[new],%[val]\n" \ 92 + " csg %[old],%[new],%[ptr]\n" \ 93 + " jl 0b" \ 94 + : [old] "=d" (old), [new] "=&d" (new), [ptr] "+Q" (*ptr)\ 95 + : [val] "d" (val), "0" (*ptr) : "cc", "memory"); \ 96 + return old; \ 97 + } 98 + 99 + #define __ATOMIC64_OPS(op_name, op_string) \ 100 + __ATOMIC64_OP(op_name, op_string) \ 101 + __ATOMIC64_OP(op_name##_barrier, op_string) 102 + 103 + __ATOMIC64_OPS(__atomic64_add, "agr") 104 + __ATOMIC64_OPS(__atomic64_and, "ngr") 105 + __ATOMIC64_OPS(__atomic64_or, "ogr") 106 + __ATOMIC64_OPS(__atomic64_xor, "xgr") 107 + 108 + #undef __ATOMIC64_OPS 109 + 110 + #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ 111 + 112 + static inline int __atomic_cmpxchg(int *ptr, int old, int new) 113 + { 114 + asm volatile( 115 + " cs %[old],%[new],%[ptr]" 116 + : [old] "+d" (old), [ptr] "+Q" (*ptr) 117 + : [new] "d" (new) : "cc", "memory"); 118 + return old; 119 + } 120 + 121 + static inline long __atomic64_cmpxchg(long *ptr, long old, long new) 122 + { 123 + asm volatile( 124 + " csg %[old],%[new],%[ptr]" 125 + : [old] "+d" (old), [ptr] "+Q" (*ptr) 126 + : [new] "d" (new) : "cc", "memory"); 127 + return old; 128 + } 129 + 130 + #endif /* __ARCH_S390_ATOMIC_OPS__ */
+1 -1
arch/s390/pci/pci_debug.c
··· 69 69 int i; 70 70 71 71 for (i = 0; i < ARRAY_SIZE(pci_sw_names); i++, counter++) 72 - seq_printf(m, "%26s:\t%llu\n", pci_sw_names[i], 72 + seq_printf(m, "%26s:\t%lu\n", pci_sw_names[i], 73 73 atomic64_read(counter)); 74 74 } 75 75