Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

s390/atomic: Provide arch_atomic_*_and_test() implementations

Provide arch_atomic_*_and_test() implementations which make use of flag
output constraints, and allow the compiler to generate slightly better
code.

Reviewed-by: Juergen Christ <jchrist@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>

authored by

Heiko Carstens and committed by
Alexander Gordeev
a53f5d24 7c7f32c9

+109
+36
arch/s390/include/asm/atomic.h
··· 57 57 } 58 58 #define arch_atomic_dec arch_atomic_dec 59 59 60 + static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v) 61 + { 62 + return __atomic_add_and_test_barrier(-i, &v->counter); 63 + } 64 + #define arch_atomic_sub_and_test arch_atomic_sub_and_test 65 + 66 + static __always_inline bool arch_atomic_dec_and_test(atomic_t *v) 67 + { 68 + return __atomic_add_const_and_test_barrier(-1, &v->counter); 69 + } 70 + #define arch_atomic_dec_and_test arch_atomic_dec_and_test 71 + 72 + static __always_inline bool arch_atomic_inc_and_test(atomic_t *v) 73 + { 74 + return __atomic_add_const_and_test_barrier(1, &v->counter); 75 + } 76 + #define arch_atomic_inc_and_test arch_atomic_inc_and_test 77 + 60 78 #define arch_atomic_sub(_i, _v) arch_atomic_add(-(int)(_i), _v) 61 79 #define arch_atomic_sub_return(_i, _v) arch_atomic_add_return(-(int)(_i), _v) 62 80 #define arch_atomic_fetch_sub(_i, _v) arch_atomic_fetch_add(-(int)(_i), _v) ··· 163 145 __atomic64_add_const(-1, (long *)&v->counter); 164 146 } 165 147 #define arch_atomic64_dec arch_atomic64_dec 148 + 149 + static __always_inline bool arch_atomic64_sub_and_test(s64 i, atomic64_t *v) 150 + { 151 + return __atomic64_add_and_test_barrier(-i, (long *)&v->counter); 152 + } 153 + #define arch_atomic64_sub_and_test arch_atomic64_sub_and_test 154 + 155 + static __always_inline bool arch_atomic64_dec_and_test(atomic64_t *v) 156 + { 157 + return __atomic64_add_const_and_test_barrier(-1, (long *)&v->counter); 158 + } 159 + #define arch_atomic64_dec_and_test arch_atomic64_dec_and_test 160 + 161 + static __always_inline bool arch_atomic64_inc_and_test(atomic64_t *v) 162 + { 163 + return __atomic64_add_const_and_test_barrier(1, (long *)&v->counter); 164 + } 165 + #define arch_atomic64_inc_and_test arch_atomic64_inc_and_test 166 166 167 167 static __always_inline s64 arch_atomic64_xchg(atomic64_t *v, s64 new) 168 168 {
+73
arch/s390/include/asm/atomic_ops.h
··· 10 10 11 11 #include <linux/limits.h> 12 12 #include <asm/march.h> 13 + #include <asm/asm.h> 13 14 14 15 static __always_inline int __atomic_read(const int *ptr) 15 16 { ··· 169 168 #define __atomic64_add_const_barrier(val, ptr) __atomic64_add(val, ptr) 170 169 171 170 #endif /* MARCH_HAS_Z196_FEATURES */ 171 + 172 + #if defined(MARCH_HAS_Z196_FEATURES) && defined(__HAVE_ASM_FLAG_OUTPUTS__) 173 + 174 + #define __ATOMIC_TEST_OP(op_name, op_type, op_string, op_barrier) \ 175 + static __always_inline bool op_name(op_type val, op_type *ptr) \ 176 + { \ 177 + op_type tmp; \ 178 + int cc; \ 179 + \ 180 + asm volatile( \ 181 + op_string " %[tmp],%[val],%[ptr]\n" \ 182 + op_barrier \ 183 + : "=@cc" (cc), [tmp] "=d" (tmp), [ptr] "+QS" (*ptr) \ 184 + : [val] "d" (val) \ 185 + : "memory"); \ 186 + return (cc == 0) || (cc == 2); \ 187 + } \ 188 + 189 + #define __ATOMIC_TEST_OPS(op_name, op_type, op_string) \ 190 + __ATOMIC_TEST_OP(op_name, op_type, op_string, "") \ 191 + __ATOMIC_TEST_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n") 192 + 193 + __ATOMIC_TEST_OPS(__atomic_add_and_test, int, "laal") 194 + __ATOMIC_TEST_OPS(__atomic64_add_and_test, long, "laalg") 195 + 196 + #undef __ATOMIC_TEST_OPS 197 + #undef __ATOMIC_TEST_OP 198 + 199 + #define __ATOMIC_CONST_TEST_OP(op_name, op_type, op_string, op_barrier) \ 200 + static __always_inline bool op_name(op_type val, op_type *ptr) \ 201 + { \ 202 + int cc; \ 203 + \ 204 + asm volatile( \ 205 + op_string " %[ptr],%[val]\n" \ 206 + op_barrier \ 207 + : "=@cc" (cc), [ptr] "+QS" (*ptr) \ 208 + : [val] "i" (val) \ 209 + : "memory"); \ 210 + return (cc == 0) || (cc == 2); \ 211 + } 212 + 213 + #define __ATOMIC_CONST_TEST_OPS(op_name, op_type, op_string) \ 214 + __ATOMIC_CONST_TEST_OP(op_name, op_type, op_string, "") \ 215 + __ATOMIC_CONST_TEST_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n") 216 + 217 + __ATOMIC_CONST_TEST_OPS(__atomic_add_const_and_test, int, "alsi") 218 + __ATOMIC_CONST_TEST_OPS(__atomic64_add_const_and_test, long, "algsi") 219 + 220 + #undef __ATOMIC_CONST_TEST_OPS 221 + #undef __ATOMIC_CONST_TEST_OP 222 + 223 + #else /* defined(MARCH_HAS_Z196_FEATURES) && defined(__HAVE_ASM_FLAG_OUTPUTS__) */ 224 + 225 + #define __ATOMIC_TEST_OP(op_name, op_func, op_type) \ 226 + static __always_inline bool op_name(op_type val, op_type *ptr) \ 227 + { \ 228 + return op_func(val, ptr) == -val; \ 229 + } 230 + 231 + __ATOMIC_TEST_OP(__atomic_add_and_test, __atomic_add, int) 232 + __ATOMIC_TEST_OP(__atomic_add_and_test_barrier, __atomic_add_barrier, int) 233 + __ATOMIC_TEST_OP(__atomic_add_const_and_test, __atomic_add, int) 234 + __ATOMIC_TEST_OP(__atomic_add_const_and_test_barrier, __atomic_add_barrier, int) 235 + __ATOMIC_TEST_OP(__atomic64_add_and_test, __atomic64_add, long) 236 + __ATOMIC_TEST_OP(__atomic64_add_and_test_barrier, __atomic64_add_barrier, long) 237 + __ATOMIC_TEST_OP(__atomic64_add_const_and_test, __atomic64_add, long) 238 + __ATOMIC_TEST_OP(__atomic64_add_const_and_test_barrier, __atomic64_add_barrier, long) 239 + 240 + #undef __ATOMIC_TEST_OP 241 + 242 + #endif /* defined(MARCH_HAS_Z196_FEATURES) && defined(__HAVE_ASM_FLAG_OUTPUTS__) */ 172 243 173 244 #endif /* __ARCH_S390_ATOMIC_OPS__ */