Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 1994 - 1997, 1999, 2000, 06 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_BITOPS_H
10#define _ASM_BITOPS_H
11
12#include <linux/compiler.h>
13#include <linux/irqflags.h>
14#include <linux/types.h>
15#include <asm/barrier.h>
16#include <asm/bug.h>
17#include <asm/byteorder.h> /* sigh ... */
18#include <asm/cpu-features.h>
19#include <asm/sgidefs.h>
20#include <asm/war.h>
21
22#if (_MIPS_SZLONG == 32)
23#define SZLONG_LOG 5
24#define SZLONG_MASK 31UL
25#define __LL "ll "
26#define __SC "sc "
27#elif (_MIPS_SZLONG == 64)
28#define SZLONG_LOG 6
29#define SZLONG_MASK 63UL
30#define __LL "lld "
31#define __SC "scd "
32#endif
33
34/*
35 * clear_bit() doesn't provide any barrier for the compiler.
36 */
37#define smp_mb__before_clear_bit() smp_mb()
38#define smp_mb__after_clear_bit() smp_mb()
39
40/*
41 * set_bit - Atomically set a bit in memory
42 * @nr: the bit to set
43 * @addr: the address to start counting from
44 *
45 * This function is atomic and may not be reordered. See __set_bit()
46 * if you do not require the atomic guarantees.
47 * Note that @nr may be almost arbitrarily large; this function is not
48 * restricted to acting on a single-word quantity.
49 */
50static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
51{
52 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
53 unsigned long temp;
54
55 if (cpu_has_llsc && R10000_LLSC_WAR) {
56 __asm__ __volatile__(
57 " .set mips3 \n"
58 "1: " __LL "%0, %1 # set_bit \n"
59 " or %0, %2 \n"
60 " " __SC "%0, %1 \n"
61 " beqzl %0, 1b \n"
62 " .set mips0 \n"
63 : "=&r" (temp), "=m" (*m)
64 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
65 } else if (cpu_has_llsc) {
66 __asm__ __volatile__(
67 " .set mips3 \n"
68 "1: " __LL "%0, %1 # set_bit \n"
69 " or %0, %2 \n"
70 " " __SC "%0, %1 \n"
71 " beqz %0, 1b \n"
72 " .set mips0 \n"
73 : "=&r" (temp), "=m" (*m)
74 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
75 } else {
76 volatile unsigned long *a = addr;
77 unsigned long mask;
78 unsigned long flags;
79
80 a += nr >> SZLONG_LOG;
81 mask = 1UL << (nr & SZLONG_MASK);
82 local_irq_save(flags);
83 *a |= mask;
84 local_irq_restore(flags);
85 }
86}
87
88/*
89 * clear_bit - Clears a bit in memory
90 * @nr: Bit to clear
91 * @addr: Address to start counting from
92 *
93 * clear_bit() is atomic and may not be reordered. However, it does
94 * not contain a memory barrier, so if it is used for locking purposes,
95 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
96 * in order to ensure changes are visible on other processors.
97 */
98static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
99{
100 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
101 unsigned long temp;
102
103 if (cpu_has_llsc && R10000_LLSC_WAR) {
104 __asm__ __volatile__(
105 " .set mips3 \n"
106 "1: " __LL "%0, %1 # clear_bit \n"
107 " and %0, %2 \n"
108 " " __SC "%0, %1 \n"
109 " beqzl %0, 1b \n"
110 " .set mips0 \n"
111 : "=&r" (temp), "=m" (*m)
112 : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));
113 } else if (cpu_has_llsc) {
114 __asm__ __volatile__(
115 " .set mips3 \n"
116 "1: " __LL "%0, %1 # clear_bit \n"
117 " and %0, %2 \n"
118 " " __SC "%0, %1 \n"
119 " beqz %0, 1b \n"
120 " .set mips0 \n"
121 : "=&r" (temp), "=m" (*m)
122 : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));
123 } else {
124 volatile unsigned long *a = addr;
125 unsigned long mask;
126 unsigned long flags;
127
128 a += nr >> SZLONG_LOG;
129 mask = 1UL << (nr & SZLONG_MASK);
130 local_irq_save(flags);
131 *a &= ~mask;
132 local_irq_restore(flags);
133 }
134}
135
136/*
137 * change_bit - Toggle a bit in memory
138 * @nr: Bit to change
139 * @addr: Address to start counting from
140 *
141 * change_bit() is atomic and may not be reordered.
142 * Note that @nr may be almost arbitrarily large; this function is not
143 * restricted to acting on a single-word quantity.
144 */
145static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
146{
147 if (cpu_has_llsc && R10000_LLSC_WAR) {
148 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
149 unsigned long temp;
150
151 __asm__ __volatile__(
152 " .set mips3 \n"
153 "1: " __LL "%0, %1 # change_bit \n"
154 " xor %0, %2 \n"
155 " " __SC "%0, %1 \n"
156 " beqzl %0, 1b \n"
157 " .set mips0 \n"
158 : "=&r" (temp), "=m" (*m)
159 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
160 } else if (cpu_has_llsc) {
161 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
162 unsigned long temp;
163
164 __asm__ __volatile__(
165 " .set mips3 \n"
166 "1: " __LL "%0, %1 # change_bit \n"
167 " xor %0, %2 \n"
168 " " __SC "%0, %1 \n"
169 " beqz %0, 1b \n"
170 " .set mips0 \n"
171 : "=&r" (temp), "=m" (*m)
172 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
173 } else {
174 volatile unsigned long *a = addr;
175 unsigned long mask;
176 unsigned long flags;
177
178 a += nr >> SZLONG_LOG;
179 mask = 1UL << (nr & SZLONG_MASK);
180 local_irq_save(flags);
181 *a ^= mask;
182 local_irq_restore(flags);
183 }
184}
185
186/*
187 * test_and_set_bit - Set a bit and return its old value
188 * @nr: Bit to set
189 * @addr: Address to count from
190 *
191 * This operation is atomic and cannot be reordered.
192 * It also implies a memory barrier.
193 */
194static inline int test_and_set_bit(unsigned long nr,
195 volatile unsigned long *addr)
196{
197 if (cpu_has_llsc && R10000_LLSC_WAR) {
198 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
199 unsigned long temp, res;
200
201 __asm__ __volatile__(
202 " .set mips3 \n"
203 "1: " __LL "%0, %1 # test_and_set_bit \n"
204 " or %2, %0, %3 \n"
205 " " __SC "%2, %1 \n"
206 " beqzl %2, 1b \n"
207 " and %2, %0, %3 \n"
208 " .set mips0 \n"
209 : "=&r" (temp), "=m" (*m), "=&r" (res)
210 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
211 : "memory");
212
213 return res != 0;
214 } else if (cpu_has_llsc) {
215 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
216 unsigned long temp, res;
217
218 __asm__ __volatile__(
219 " .set push \n"
220 " .set noreorder \n"
221 " .set mips3 \n"
222 "1: " __LL "%0, %1 # test_and_set_bit \n"
223 " or %2, %0, %3 \n"
224 " " __SC "%2, %1 \n"
225 " beqz %2, 1b \n"
226 " and %2, %0, %3 \n"
227 " .set pop \n"
228 : "=&r" (temp), "=m" (*m), "=&r" (res)
229 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
230 : "memory");
231
232 return res != 0;
233 } else {
234 volatile unsigned long *a = addr;
235 unsigned long mask;
236 int retval;
237 unsigned long flags;
238
239 a += nr >> SZLONG_LOG;
240 mask = 1UL << (nr & SZLONG_MASK);
241 local_irq_save(flags);
242 retval = (mask & *a) != 0;
243 *a |= mask;
244 local_irq_restore(flags);
245
246 return retval;
247 }
248
249 smp_mb();
250}
251
252/*
253 * test_and_clear_bit - Clear a bit and return its old value
254 * @nr: Bit to clear
255 * @addr: Address to count from
256 *
257 * This operation is atomic and cannot be reordered.
258 * It also implies a memory barrier.
259 */
260static inline int test_and_clear_bit(unsigned long nr,
261 volatile unsigned long *addr)
262{
263 if (cpu_has_llsc && R10000_LLSC_WAR) {
264 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
265 unsigned long temp, res;
266
267 __asm__ __volatile__(
268 " .set mips3 \n"
269 "1: " __LL "%0, %1 # test_and_clear_bit \n"
270 " or %2, %0, %3 \n"
271 " xor %2, %3 \n"
272 " " __SC "%2, %1 \n"
273 " beqzl %2, 1b \n"
274 " and %2, %0, %3 \n"
275 " .set mips0 \n"
276 : "=&r" (temp), "=m" (*m), "=&r" (res)
277 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
278 : "memory");
279
280 return res != 0;
281 } else if (cpu_has_llsc) {
282 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
283 unsigned long temp, res;
284
285 __asm__ __volatile__(
286 " .set push \n"
287 " .set noreorder \n"
288 " .set mips3 \n"
289 "1: " __LL "%0, %1 # test_and_clear_bit \n"
290 " or %2, %0, %3 \n"
291 " xor %2, %3 \n"
292 " " __SC "%2, %1 \n"
293 " beqz %2, 1b \n"
294 " and %2, %0, %3 \n"
295 " .set pop \n"
296 : "=&r" (temp), "=m" (*m), "=&r" (res)
297 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
298 : "memory");
299
300 return res != 0;
301 } else {
302 volatile unsigned long *a = addr;
303 unsigned long mask;
304 int retval;
305 unsigned long flags;
306
307 a += nr >> SZLONG_LOG;
308 mask = 1UL << (nr & SZLONG_MASK);
309 local_irq_save(flags);
310 retval = (mask & *a) != 0;
311 *a &= ~mask;
312 local_irq_restore(flags);
313
314 return retval;
315 }
316
317 smp_mb();
318}
319
320/*
321 * test_and_change_bit - Change a bit and return its old value
322 * @nr: Bit to change
323 * @addr: Address to count from
324 *
325 * This operation is atomic and cannot be reordered.
326 * It also implies a memory barrier.
327 */
328static inline int test_and_change_bit(unsigned long nr,
329 volatile unsigned long *addr)
330{
331 if (cpu_has_llsc && R10000_LLSC_WAR) {
332 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
333 unsigned long temp, res;
334
335 __asm__ __volatile__(
336 " .set mips3 \n"
337 "1: " __LL "%0, %1 # test_and_change_bit \n"
338 " xor %2, %0, %3 \n"
339 " " __SC "%2, %1 \n"
340 " beqzl %2, 1b \n"
341 " and %2, %0, %3 \n"
342 " .set mips0 \n"
343 : "=&r" (temp), "=m" (*m), "=&r" (res)
344 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
345 : "memory");
346
347 return res != 0;
348 } else if (cpu_has_llsc) {
349 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
350 unsigned long temp, res;
351
352 __asm__ __volatile__(
353 " .set push \n"
354 " .set noreorder \n"
355 " .set mips3 \n"
356 "1: " __LL "%0, %1 # test_and_change_bit \n"
357 " xor %2, %0, %3 \n"
358 " " __SC "\t%2, %1 \n"
359 " beqz %2, 1b \n"
360 " and %2, %0, %3 \n"
361 " .set pop \n"
362 : "=&r" (temp), "=m" (*m), "=&r" (res)
363 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
364 : "memory");
365
366 return res != 0;
367 } else {
368 volatile unsigned long *a = addr;
369 unsigned long mask, retval;
370 unsigned long flags;
371
372 a += nr >> SZLONG_LOG;
373 mask = 1UL << (nr & SZLONG_MASK);
374 local_irq_save(flags);
375 retval = (mask & *a) != 0;
376 *a ^= mask;
377 local_irq_restore(flags);
378
379 return retval;
380 }
381
382 smp_mb();
383}
384
385#include <asm-generic/bitops/non-atomic.h>
386
387/*
388 * Return the bit position (0..63) of the most significant 1 bit in a word
389 * Returns -1 if no 1 bit exists
390 */
391static inline int __ilog2(unsigned long x)
392{
393 int lz;
394
395 if (sizeof(x) == 4) {
396 __asm__ (
397 " .set push \n"
398 " .set mips32 \n"
399 " clz %0, %1 \n"
400 " .set pop \n"
401 : "=r" (lz)
402 : "r" (x));
403
404 return 31 - lz;
405 }
406
407 BUG_ON(sizeof(x) != 8);
408
409 __asm__ (
410 " .set push \n"
411 " .set mips64 \n"
412 " dclz %0, %1 \n"
413 " .set pop \n"
414 : "=r" (lz)
415 : "r" (x));
416
417 return 63 - lz;
418}
419
420#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
421
422/*
423 * __ffs - find first bit in word.
424 * @word: The word to search
425 *
426 * Returns 0..SZLONG-1
427 * Undefined if no bit exists, so code should check against 0 first.
428 */
429static inline unsigned long __ffs(unsigned long word)
430{
431 return __ilog2(word & -word);
432}
433
434/*
435 * fls - find last bit set.
436 * @word: The word to search
437 *
438 * This is defined the same way as ffs.
439 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
440 */
441static inline int fls(int word)
442{
443 __asm__ ("clz %0, %1" : "=r" (word) : "r" (word));
444
445 return 32 - word;
446}
447
448#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPS64)
449static inline int fls64(__u64 word)
450{
451 __asm__ ("dclz %0, %1" : "=r" (word) : "r" (word));
452
453 return 64 - word;
454}
455#else
456#include <asm-generic/bitops/fls64.h>
457#endif
458
459/*
460 * ffs - find first bit set.
461 * @word: The word to search
462 *
463 * This is defined the same way as
464 * the libc and compiler builtin ffs routines, therefore
465 * differs in spirit from the above ffz (man ffs).
466 */
467static inline int ffs(int word)
468{
469 if (!word)
470 return 0;
471
472 return fls(word & -word);
473}
474
475#else
476
477#include <asm-generic/bitops/__ffs.h>
478#include <asm-generic/bitops/ffs.h>
479#include <asm-generic/bitops/fls.h>
480#include <asm-generic/bitops/fls64.h>
481
482#endif /*defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) */
483
484#include <asm-generic/bitops/ffz.h>
485#include <asm-generic/bitops/find.h>
486
487#ifdef __KERNEL__
488
489#include <asm-generic/bitops/sched.h>
490#include <asm-generic/bitops/hweight.h>
491#include <asm-generic/bitops/ext2-non-atomic.h>
492#include <asm-generic/bitops/ext2-atomic.h>
493#include <asm-generic/bitops/minix.h>
494
495#endif /* __KERNEL__ */
496
497#endif /* _ASM_BITOPS_H */