Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Low level function for atomic operations
3 *
4 * Copyright IBM Corp. 1999, 2016
5 */
6
7#ifndef __ARCH_S390_ATOMIC_OPS__
8#define __ARCH_S390_ATOMIC_OPS__
9
10#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
11
12#define __ATOMIC_OP(op_name, op_type, op_string, op_barrier) \
13static inline op_type op_name(op_type val, op_type *ptr) \
14{ \
15 op_type old; \
16 \
17 asm volatile( \
18 op_string " %[old],%[val],%[ptr]\n" \
19 op_barrier \
20 : [old] "=d" (old), [ptr] "+Q" (*ptr) \
21 : [val] "d" (val) : "cc", "memory"); \
22 return old; \
23} \
24
25#define __ATOMIC_OPS(op_name, op_type, op_string) \
26 __ATOMIC_OP(op_name, op_type, op_string, "\n") \
27 __ATOMIC_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
28
29__ATOMIC_OPS(__atomic_add, int, "laa")
30__ATOMIC_OPS(__atomic_and, int, "lan")
31__ATOMIC_OPS(__atomic_or, int, "lao")
32__ATOMIC_OPS(__atomic_xor, int, "lax")
33
34__ATOMIC_OPS(__atomic64_add, long, "laag")
35__ATOMIC_OPS(__atomic64_and, long, "lang")
36__ATOMIC_OPS(__atomic64_or, long, "laog")
37__ATOMIC_OPS(__atomic64_xor, long, "laxg")
38
39#undef __ATOMIC_OPS
40#undef __ATOMIC_OP
41
42static inline void __atomic_add_const(int val, int *ptr)
43{
44 asm volatile(
45 " asi %[ptr],%[val]\n"
46 : [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc");
47}
48
49static inline void __atomic64_add_const(long val, long *ptr)
50{
51 asm volatile(
52 " agsi %[ptr],%[val]\n"
53 : [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc");
54}
55
56#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
57
58#define __ATOMIC_OP(op_name, op_string) \
59static inline int op_name(int val, int *ptr) \
60{ \
61 int old, new; \
62 \
63 asm volatile( \
64 "0: lr %[new],%[old]\n" \
65 op_string " %[new],%[val]\n" \
66 " cs %[old],%[new],%[ptr]\n" \
67 " jl 0b" \
68 : [old] "=d" (old), [new] "=&d" (new), [ptr] "+Q" (*ptr)\
69 : [val] "d" (val), "0" (*ptr) : "cc", "memory"); \
70 return old; \
71}
72
73#define __ATOMIC_OPS(op_name, op_string) \
74 __ATOMIC_OP(op_name, op_string) \
75 __ATOMIC_OP(op_name##_barrier, op_string)
76
77__ATOMIC_OPS(__atomic_add, "ar")
78__ATOMIC_OPS(__atomic_and, "nr")
79__ATOMIC_OPS(__atomic_or, "or")
80__ATOMIC_OPS(__atomic_xor, "xr")
81
82#undef __ATOMIC_OPS
83
84#define __ATOMIC64_OP(op_name, op_string) \
85static inline long op_name(long val, long *ptr) \
86{ \
87 long old, new; \
88 \
89 asm volatile( \
90 "0: lgr %[new],%[old]\n" \
91 op_string " %[new],%[val]\n" \
92 " csg %[old],%[new],%[ptr]\n" \
93 " jl 0b" \
94 : [old] "=d" (old), [new] "=&d" (new), [ptr] "+Q" (*ptr)\
95 : [val] "d" (val), "0" (*ptr) : "cc", "memory"); \
96 return old; \
97}
98
99#define __ATOMIC64_OPS(op_name, op_string) \
100 __ATOMIC64_OP(op_name, op_string) \
101 __ATOMIC64_OP(op_name##_barrier, op_string)
102
103__ATOMIC64_OPS(__atomic64_add, "agr")
104__ATOMIC64_OPS(__atomic64_and, "ngr")
105__ATOMIC64_OPS(__atomic64_or, "ogr")
106__ATOMIC64_OPS(__atomic64_xor, "xgr")
107
108#undef __ATOMIC64_OPS
109
110#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
111
112static inline int __atomic_cmpxchg(int *ptr, int old, int new)
113{
114 return __sync_val_compare_and_swap(ptr, old, new);
115}
116
117static inline int __atomic_cmpxchg_bool(int *ptr, int old, int new)
118{
119 return __sync_bool_compare_and_swap(ptr, old, new);
120}
121
122static inline long __atomic64_cmpxchg(long *ptr, long old, long new)
123{
124 return __sync_val_compare_and_swap(ptr, old, new);
125}
126
127static inline long __atomic64_cmpxchg_bool(long *ptr, long old, long new)
128{
129 return __sync_bool_compare_and_swap(ptr, old, new);
130}
131
132#endif /* __ARCH_S390_ATOMIC_OPS__ */