at v2.6.17-rc2 540 lines 13 kB view raw
1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (c) 1994 - 1997, 1999, 2000 Ralf Baechle (ralf@gnu.org) 7 * Copyright (c) 1999, 2000 Silicon Graphics, Inc. 8 */ 9#ifndef _ASM_BITOPS_H 10#define _ASM_BITOPS_H 11 12#include <linux/config.h> 13#include <linux/compiler.h> 14#include <linux/types.h> 15#include <asm/bug.h> 16#include <asm/byteorder.h> /* sigh ... */ 17#include <asm/cpu-features.h> 18 19#if (_MIPS_SZLONG == 32) 20#define SZLONG_LOG 5 21#define SZLONG_MASK 31UL 22#define __LL "ll " 23#define __SC "sc " 24#define cpu_to_lelongp(x) cpu_to_le32p((__u32 *) (x)) 25#elif (_MIPS_SZLONG == 64) 26#define SZLONG_LOG 6 27#define SZLONG_MASK 63UL 28#define __LL "lld " 29#define __SC "scd " 30#define cpu_to_lelongp(x) cpu_to_le64p((__u64 *) (x)) 31#endif 32 33#ifdef __KERNEL__ 34 35#include <asm/interrupt.h> 36#include <asm/sgidefs.h> 37#include <asm/war.h> 38 39/* 40 * clear_bit() doesn't provide any barrier for the compiler. 41 */ 42#define smp_mb__before_clear_bit() smp_mb() 43#define smp_mb__after_clear_bit() smp_mb() 44 45/* 46 * Only disable interrupt for kernel mode stuff to keep usermode stuff 47 * that dares to use kernel include files alive. 48 */ 49 50#define __bi_flags unsigned long flags 51#define __bi_local_irq_save(x) local_irq_save(x) 52#define __bi_local_irq_restore(x) local_irq_restore(x) 53#else 54#define __bi_flags 55#define __bi_local_irq_save(x) 56#define __bi_local_irq_restore(x) 57#endif /* __KERNEL__ */ 58 59/* 60 * set_bit - Atomically set a bit in memory 61 * @nr: the bit to set 62 * @addr: the address to start counting from 63 * 64 * This function is atomic and may not be reordered. See __set_bit() 65 * if you do not require the atomic guarantees. 66 * Note that @nr may be almost arbitrarily large; this function is not 67 * restricted to acting on a single-word quantity. 68 */ 69static inline void set_bit(unsigned long nr, volatile unsigned long *addr) 70{ 71 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); 72 unsigned long temp; 73 74 if (cpu_has_llsc && R10000_LLSC_WAR) { 75 __asm__ __volatile__( 76 " .set mips3 \n" 77 "1: " __LL "%0, %1 # set_bit \n" 78 " or %0, %2 \n" 79 " " __SC "%0, %1 \n" 80 " beqzl %0, 1b \n" 81 " .set mips0 \n" 82 : "=&r" (temp), "=m" (*m) 83 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m)); 84 } else if (cpu_has_llsc) { 85 __asm__ __volatile__( 86 " .set mips3 \n" 87 "1: " __LL "%0, %1 # set_bit \n" 88 " or %0, %2 \n" 89 " " __SC "%0, %1 \n" 90 " beqz %0, 1b \n" 91 " .set mips0 \n" 92 : "=&r" (temp), "=m" (*m) 93 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m)); 94 } else { 95 volatile unsigned long *a = addr; 96 unsigned long mask; 97 __bi_flags; 98 99 a += nr >> SZLONG_LOG; 100 mask = 1UL << (nr & SZLONG_MASK); 101 __bi_local_irq_save(flags); 102 *a |= mask; 103 __bi_local_irq_restore(flags); 104 } 105} 106 107/* 108 * clear_bit - Clears a bit in memory 109 * @nr: Bit to clear 110 * @addr: Address to start counting from 111 * 112 * clear_bit() is atomic and may not be reordered. However, it does 113 * not contain a memory barrier, so if it is used for locking purposes, 114 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() 115 * in order to ensure changes are visible on other processors. 116 */ 117static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) 118{ 119 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); 120 unsigned long temp; 121 122 if (cpu_has_llsc && R10000_LLSC_WAR) { 123 __asm__ __volatile__( 124 " .set mips3 \n" 125 "1: " __LL "%0, %1 # clear_bit \n" 126 " and %0, %2 \n" 127 " " __SC "%0, %1 \n" 128 " beqzl %0, 1b \n" 129 " .set mips0 \n" 130 : "=&r" (temp), "=m" (*m) 131 : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m)); 132 } else if (cpu_has_llsc) { 133 __asm__ __volatile__( 134 " .set mips3 \n" 135 "1: " __LL "%0, %1 # clear_bit \n" 136 " and %0, %2 \n" 137 " " __SC "%0, %1 \n" 138 " beqz %0, 1b \n" 139 " .set mips0 \n" 140 : "=&r" (temp), "=m" (*m) 141 : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m)); 142 } else { 143 volatile unsigned long *a = addr; 144 unsigned long mask; 145 __bi_flags; 146 147 a += nr >> SZLONG_LOG; 148 mask = 1UL << (nr & SZLONG_MASK); 149 __bi_local_irq_save(flags); 150 *a &= ~mask; 151 __bi_local_irq_restore(flags); 152 } 153} 154 155/* 156 * change_bit - Toggle a bit in memory 157 * @nr: Bit to change 158 * @addr: Address to start counting from 159 * 160 * change_bit() is atomic and may not be reordered. 161 * Note that @nr may be almost arbitrarily large; this function is not 162 * restricted to acting on a single-word quantity. 163 */ 164static inline void change_bit(unsigned long nr, volatile unsigned long *addr) 165{ 166 if (cpu_has_llsc && R10000_LLSC_WAR) { 167 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); 168 unsigned long temp; 169 170 __asm__ __volatile__( 171 " .set mips3 \n" 172 "1: " __LL "%0, %1 # change_bit \n" 173 " xor %0, %2 \n" 174 " " __SC "%0, %1 \n" 175 " beqzl %0, 1b \n" 176 " .set mips0 \n" 177 : "=&r" (temp), "=m" (*m) 178 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m)); 179 } else if (cpu_has_llsc) { 180 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); 181 unsigned long temp; 182 183 __asm__ __volatile__( 184 " .set mips3 \n" 185 "1: " __LL "%0, %1 # change_bit \n" 186 " xor %0, %2 \n" 187 " " __SC "%0, %1 \n" 188 " beqz %0, 1b \n" 189 " .set mips0 \n" 190 : "=&r" (temp), "=m" (*m) 191 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m)); 192 } else { 193 volatile unsigned long *a = addr; 194 unsigned long mask; 195 __bi_flags; 196 197 a += nr >> SZLONG_LOG; 198 mask = 1UL << (nr & SZLONG_MASK); 199 __bi_local_irq_save(flags); 200 *a ^= mask; 201 __bi_local_irq_restore(flags); 202 } 203} 204 205/* 206 * test_and_set_bit - Set a bit and return its old value 207 * @nr: Bit to set 208 * @addr: Address to count from 209 * 210 * This operation is atomic and cannot be reordered. 211 * It also implies a memory barrier. 212 */ 213static inline int test_and_set_bit(unsigned long nr, 214 volatile unsigned long *addr) 215{ 216 if (cpu_has_llsc && R10000_LLSC_WAR) { 217 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); 218 unsigned long temp, res; 219 220 __asm__ __volatile__( 221 " .set mips3 \n" 222 "1: " __LL "%0, %1 # test_and_set_bit \n" 223 " or %2, %0, %3 \n" 224 " " __SC "%2, %1 \n" 225 " beqzl %2, 1b \n" 226 " and %2, %0, %3 \n" 227#ifdef CONFIG_SMP 228 " sync \n" 229#endif 230 " .set mips0 \n" 231 : "=&r" (temp), "=m" (*m), "=&r" (res) 232 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) 233 : "memory"); 234 235 return res != 0; 236 } else if (cpu_has_llsc) { 237 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); 238 unsigned long temp, res; 239 240 __asm__ __volatile__( 241 " .set push \n" 242 " .set noreorder \n" 243 " .set mips3 \n" 244 "1: " __LL "%0, %1 # test_and_set_bit \n" 245 " or %2, %0, %3 \n" 246 " " __SC "%2, %1 \n" 247 " beqz %2, 1b \n" 248 " and %2, %0, %3 \n" 249#ifdef CONFIG_SMP 250 " sync \n" 251#endif 252 " .set pop \n" 253 : "=&r" (temp), "=m" (*m), "=&r" (res) 254 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) 255 : "memory"); 256 257 return res != 0; 258 } else { 259 volatile unsigned long *a = addr; 260 unsigned long mask; 261 int retval; 262 __bi_flags; 263 264 a += nr >> SZLONG_LOG; 265 mask = 1UL << (nr & SZLONG_MASK); 266 __bi_local_irq_save(flags); 267 retval = (mask & *a) != 0; 268 *a |= mask; 269 __bi_local_irq_restore(flags); 270 271 return retval; 272 } 273} 274 275/* 276 * test_and_clear_bit - Clear a bit and return its old value 277 * @nr: Bit to clear 278 * @addr: Address to count from 279 * 280 * This operation is atomic and cannot be reordered. 281 * It also implies a memory barrier. 282 */ 283static inline int test_and_clear_bit(unsigned long nr, 284 volatile unsigned long *addr) 285{ 286 if (cpu_has_llsc && R10000_LLSC_WAR) { 287 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); 288 unsigned long temp, res; 289 290 __asm__ __volatile__( 291 " .set mips3 \n" 292 "1: " __LL "%0, %1 # test_and_clear_bit \n" 293 " or %2, %0, %3 \n" 294 " xor %2, %3 \n" 295 " " __SC "%2, %1 \n" 296 " beqzl %2, 1b \n" 297 " and %2, %0, %3 \n" 298#ifdef CONFIG_SMP 299 " sync \n" 300#endif 301 " .set mips0 \n" 302 : "=&r" (temp), "=m" (*m), "=&r" (res) 303 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) 304 : "memory"); 305 306 return res != 0; 307 } else if (cpu_has_llsc) { 308 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); 309 unsigned long temp, res; 310 311 __asm__ __volatile__( 312 " .set push \n" 313 " .set noreorder \n" 314 " .set mips3 \n" 315 "1: " __LL "%0, %1 # test_and_clear_bit \n" 316 " or %2, %0, %3 \n" 317 " xor %2, %3 \n" 318 " " __SC "%2, %1 \n" 319 " beqz %2, 1b \n" 320 " and %2, %0, %3 \n" 321#ifdef CONFIG_SMP 322 " sync \n" 323#endif 324 " .set pop \n" 325 : "=&r" (temp), "=m" (*m), "=&r" (res) 326 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) 327 : "memory"); 328 329 return res != 0; 330 } else { 331 volatile unsigned long *a = addr; 332 unsigned long mask; 333 int retval; 334 __bi_flags; 335 336 a += nr >> SZLONG_LOG; 337 mask = 1UL << (nr & SZLONG_MASK); 338 __bi_local_irq_save(flags); 339 retval = (mask & *a) != 0; 340 *a &= ~mask; 341 __bi_local_irq_restore(flags); 342 343 return retval; 344 } 345} 346 347/* 348 * test_and_change_bit - Change a bit and return its old value 349 * @nr: Bit to change 350 * @addr: Address to count from 351 * 352 * This operation is atomic and cannot be reordered. 353 * It also implies a memory barrier. 354 */ 355static inline int test_and_change_bit(unsigned long nr, 356 volatile unsigned long *addr) 357{ 358 if (cpu_has_llsc && R10000_LLSC_WAR) { 359 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); 360 unsigned long temp, res; 361 362 __asm__ __volatile__( 363 " .set mips3 \n" 364 "1: " __LL "%0, %1 # test_and_change_bit \n" 365 " xor %2, %0, %3 \n" 366 " " __SC "%2, %1 \n" 367 " beqzl %2, 1b \n" 368 " and %2, %0, %3 \n" 369#ifdef CONFIG_SMP 370 " sync \n" 371#endif 372 " .set mips0 \n" 373 : "=&r" (temp), "=m" (*m), "=&r" (res) 374 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) 375 : "memory"); 376 377 return res != 0; 378 } else if (cpu_has_llsc) { 379 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); 380 unsigned long temp, res; 381 382 __asm__ __volatile__( 383 " .set push \n" 384 " .set noreorder \n" 385 " .set mips3 \n" 386 "1: " __LL "%0, %1 # test_and_change_bit \n" 387 " xor %2, %0, %3 \n" 388 " " __SC "\t%2, %1 \n" 389 " beqz %2, 1b \n" 390 " and %2, %0, %3 \n" 391#ifdef CONFIG_SMP 392 " sync \n" 393#endif 394 " .set pop \n" 395 : "=&r" (temp), "=m" (*m), "=&r" (res) 396 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) 397 : "memory"); 398 399 return res != 0; 400 } else { 401 volatile unsigned long *a = addr; 402 unsigned long mask, retval; 403 __bi_flags; 404 405 a += nr >> SZLONG_LOG; 406 mask = 1UL << (nr & SZLONG_MASK); 407 __bi_local_irq_save(flags); 408 retval = (mask & *a) != 0; 409 *a ^= mask; 410 __bi_local_irq_restore(flags); 411 412 return retval; 413 } 414} 415 416#undef __bi_flags 417#undef __bi_local_irq_save 418#undef __bi_local_irq_restore 419 420#include <asm-generic/bitops/non-atomic.h> 421 422/* 423 * Return the bit position (0..63) of the most significant 1 bit in a word 424 * Returns -1 if no 1 bit exists 425 */ 426static inline int __ilog2(unsigned long x) 427{ 428 int lz; 429 430 if (sizeof(x) == 4) { 431 __asm__ ( 432 " .set push \n" 433 " .set mips32 \n" 434 " clz %0, %1 \n" 435 " .set pop \n" 436 : "=r" (lz) 437 : "r" (x)); 438 439 return 31 - lz; 440 } 441 442 BUG_ON(sizeof(x) != 8); 443 444 __asm__ ( 445 " .set push \n" 446 " .set mips64 \n" 447 " dclz %0, %1 \n" 448 " .set pop \n" 449 : "=r" (lz) 450 : "r" (x)); 451 452 return 63 - lz; 453} 454 455#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) 456 457/* 458 * __ffs - find first bit in word. 459 * @word: The word to search 460 * 461 * Returns 0..SZLONG-1 462 * Undefined if no bit exists, so code should check against 0 first. 463 */ 464static inline unsigned long __ffs(unsigned long word) 465{ 466 return __ilog2(word & -word); 467} 468 469/* 470 * ffs - find first bit set. 471 * @word: The word to search 472 * 473 * Returns 1..SZLONG 474 * Returns 0 if no bit exists 475 */ 476 477static inline unsigned long ffs(unsigned long word) 478{ 479 if (!word) 480 return 0; 481 482 return __ffs(word) + 1; 483} 484 485/* 486 * ffz - find first zero in word. 487 * @word: The word to search 488 * 489 * Undefined if no zero exists, so code should check against ~0UL first. 490 */ 491static inline unsigned long ffz(unsigned long word) 492{ 493 return __ffs (~word); 494} 495 496/* 497 * fls - find last bit set. 498 * @word: The word to search 499 * 500 * Returns 1..SZLONG 501 * Returns 0 if no bit exists 502 */ 503static inline unsigned long fls(unsigned long word) 504{ 505#ifdef CONFIG_CPU_MIPS32 506 __asm__ ("clz %0, %1" : "=r" (word) : "r" (word)); 507 508 return 32 - word; 509#endif 510 511#ifdef CONFIG_CPU_MIPS64 512 __asm__ ("dclz %0, %1" : "=r" (word) : "r" (word)); 513 514 return 64 - word; 515#endif 516} 517 518#else 519 520#include <asm-generic/bitops/__ffs.h> 521#include <asm-generic/bitops/ffs.h> 522#include <asm-generic/bitops/ffz.h> 523#include <asm-generic/bitops/fls.h> 524 525#endif /*defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) */ 526 527#include <asm-generic/bitops/fls64.h> 528#include <asm-generic/bitops/find.h> 529 530#ifdef __KERNEL__ 531 532#include <asm-generic/bitops/sched.h> 533#include <asm-generic/bitops/hweight.h> 534#include <asm-generic/bitops/ext2-non-atomic.h> 535#include <asm-generic/bitops/ext2-atomic.h> 536#include <asm-generic/bitops/minix.h> 537 538#endif /* __KERNEL__ */ 539 540#endif /* _ASM_BITOPS_H */