Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Atomic operations for the Hexagon architecture
3 *
4 * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 and
9 * only version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 */
21
22#ifndef _ASM_ATOMIC_H
23#define _ASM_ATOMIC_H
24
25#include <linux/types.h>
26#include <asm/cmpxchg.h>
27#include <asm/barrier.h>
28
29#define ATOMIC_INIT(i) { (i) }
30
31/* Normal writes in our arch don't clear lock reservations */
32
33static inline void atomic_set(atomic_t *v, int new)
34{
35 asm volatile(
36 "1: r6 = memw_locked(%0);\n"
37 " memw_locked(%0,p0) = %1;\n"
38 " if (!P0) jump 1b;\n"
39 :
40 : "r" (&v->counter), "r" (new)
41 : "memory", "p0", "r6"
42 );
43}
44
45#define atomic_set_release(v, i) atomic_set((v), (i))
46
47/**
48 * atomic_read - reads a word, atomically
49 * @v: pointer to atomic value
50 *
51 * Assumes all word reads on our architecture are atomic.
52 */
53#define atomic_read(v) READ_ONCE((v)->counter)
54
55/**
56 * atomic_xchg - atomic
57 * @v: pointer to memory to change
58 * @new: new value (technically passed in a register -- see xchg)
59 */
60#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
61
62
63/**
64 * atomic_cmpxchg - atomic compare-and-exchange values
65 * @v: pointer to value to change
66 * @old: desired old value to match
67 * @new: new value to put in
68 *
69 * Parameters are then pointer, value-in-register, value-in-register,
70 * and the output is the old value.
71 *
72 * Apparently this is complicated for archs that don't support
73 * the memw_locked like we do (or it's broken or whatever).
74 *
75 * Kind of the lynchpin of the rest of the generically defined routines.
76 * Remember V2 had that bug with dotnew predicate set by memw_locked.
77 *
78 * "old" is "expected" old val, __oldval is actual old value
79 */
80static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
81{
82 int __oldval;
83
84 asm volatile(
85 "1: %0 = memw_locked(%1);\n"
86 " { P0 = cmp.eq(%0,%2);\n"
87 " if (!P0.new) jump:nt 2f; }\n"
88 " memw_locked(%1,P0) = %3;\n"
89 " if (!P0) jump 1b;\n"
90 "2:\n"
91 : "=&r" (__oldval)
92 : "r" (&v->counter), "r" (old), "r" (new)
93 : "memory", "p0"
94 );
95
96 return __oldval;
97}
98
99#define ATOMIC_OP(op) \
100static inline void atomic_##op(int i, atomic_t *v) \
101{ \
102 int output; \
103 \
104 __asm__ __volatile__ ( \
105 "1: %0 = memw_locked(%1);\n" \
106 " %0 = "#op "(%0,%2);\n" \
107 " memw_locked(%1,P3)=%0;\n" \
108 " if !P3 jump 1b;\n" \
109 : "=&r" (output) \
110 : "r" (&v->counter), "r" (i) \
111 : "memory", "p3" \
112 ); \
113} \
114
115#define ATOMIC_OP_RETURN(op) \
116static inline int atomic_##op##_return(int i, atomic_t *v) \
117{ \
118 int output; \
119 \
120 __asm__ __volatile__ ( \
121 "1: %0 = memw_locked(%1);\n" \
122 " %0 = "#op "(%0,%2);\n" \
123 " memw_locked(%1,P3)=%0;\n" \
124 " if !P3 jump 1b;\n" \
125 : "=&r" (output) \
126 : "r" (&v->counter), "r" (i) \
127 : "memory", "p3" \
128 ); \
129 return output; \
130}
131
132#define ATOMIC_FETCH_OP(op) \
133static inline int atomic_fetch_##op(int i, atomic_t *v) \
134{ \
135 int output, val; \
136 \
137 __asm__ __volatile__ ( \
138 "1: %0 = memw_locked(%2);\n" \
139 " %1 = "#op "(%0,%3);\n" \
140 " memw_locked(%2,P3)=%1;\n" \
141 " if !P3 jump 1b;\n" \
142 : "=&r" (output), "=&r" (val) \
143 : "r" (&v->counter), "r" (i) \
144 : "memory", "p3" \
145 ); \
146 return output; \
147}
148
149#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
150
151ATOMIC_OPS(add)
152ATOMIC_OPS(sub)
153
154#undef ATOMIC_OPS
155#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
156
157ATOMIC_OPS(and)
158ATOMIC_OPS(or)
159ATOMIC_OPS(xor)
160
161#undef ATOMIC_OPS
162#undef ATOMIC_FETCH_OP
163#undef ATOMIC_OP_RETURN
164#undef ATOMIC_OP
165
166/**
167 * __atomic_add_unless - add unless the number is a given value
168 * @v: pointer to value
169 * @a: amount to add
170 * @u: unless value is equal to u
171 *
172 * Returns old value.
173 *
174 */
175
176static inline int __atomic_add_unless(atomic_t *v, int a, int u)
177{
178 int __oldval;
179 register int tmp;
180
181 asm volatile(
182 "1: %0 = memw_locked(%2);"
183 " {"
184 " p3 = cmp.eq(%0, %4);"
185 " if (p3.new) jump:nt 2f;"
186 " %1 = add(%0, %3);"
187 " }"
188 " memw_locked(%2, p3) = %1;"
189 " {"
190 " if !p3 jump 1b;"
191 " }"
192 "2:"
193 : "=&r" (__oldval), "=&r" (tmp)
194 : "r" (v), "r" (a), "r" (u)
195 : "memory", "p3"
196 );
197 return __oldval;
198}
199
200#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
201
202#define atomic_inc(v) atomic_add(1, (v))
203#define atomic_dec(v) atomic_sub(1, (v))
204
205#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
206#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
207#define atomic_sub_and_test(i, v) (atomic_sub_return(i, (v)) == 0)
208#define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0)
209
210#define atomic_inc_return(v) (atomic_add_return(1, v))
211#define atomic_dec_return(v) (atomic_sub_return(1, v))
212
213#endif