at v2.6.13 421 lines 12 kB view raw
1/* 2 * Copyright 1995, Russell King. 3 * Various bits and pieces copyrights include: 4 * Linus Torvalds (test_bit). 5 * Big endian support: Copyright 2001, Nicolas Pitre 6 * reworked by rmk. 7 * 8 * bit 0 is the LSB of an "unsigned long" quantity. 9 * 10 * Please note that the code in this file should never be included 11 * from user space. Many of these are not implemented in assembler 12 * since they would be too costly. Also, they require privileged 13 * instructions (which are not available from user mode) to ensure 14 * that they are atomic. 15 */ 16 17#ifndef __ASM_ARM_BITOPS_H 18#define __ASM_ARM_BITOPS_H 19 20#ifdef __KERNEL__ 21 22#include <asm/system.h> 23 24#define smp_mb__before_clear_bit() mb() 25#define smp_mb__after_clear_bit() mb() 26 27/* 28 * These functions are the basis of our bit ops. 29 * 30 * First, the atomic bitops. These use native endian. 31 */ 32static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *p) 33{ 34 unsigned long flags; 35 unsigned long mask = 1UL << (bit & 31); 36 37 p += bit >> 5; 38 39 local_irq_save(flags); 40 *p |= mask; 41 local_irq_restore(flags); 42} 43 44static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long *p) 45{ 46 unsigned long flags; 47 unsigned long mask = 1UL << (bit & 31); 48 49 p += bit >> 5; 50 51 local_irq_save(flags); 52 *p &= ~mask; 53 local_irq_restore(flags); 54} 55 56static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned long *p) 57{ 58 unsigned long flags; 59 unsigned long mask = 1UL << (bit & 31); 60 61 p += bit >> 5; 62 63 local_irq_save(flags); 64 *p ^= mask; 65 local_irq_restore(flags); 66} 67 68static inline int 69____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p) 70{ 71 unsigned long flags; 72 unsigned int res; 73 unsigned long mask = 1UL << (bit & 31); 74 75 p += bit >> 5; 76 77 local_irq_save(flags); 78 res = *p; 79 *p = res | mask; 80 local_irq_restore(flags); 81 82 return res & mask; 83} 84 85static inline int 86____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p) 87{ 88 unsigned long flags; 89 unsigned int res; 90 unsigned long mask = 1UL << (bit & 31); 91 92 p += bit >> 5; 93 94 local_irq_save(flags); 95 res = *p; 96 *p = res & ~mask; 97 local_irq_restore(flags); 98 99 return res & mask; 100} 101 102static inline int 103____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p) 104{ 105 unsigned long flags; 106 unsigned int res; 107 unsigned long mask = 1UL << (bit & 31); 108 109 p += bit >> 5; 110 111 local_irq_save(flags); 112 res = *p; 113 *p = res ^ mask; 114 local_irq_restore(flags); 115 116 return res & mask; 117} 118 119/* 120 * Now the non-atomic variants. We let the compiler handle all 121 * optimisations for these. These are all _native_ endian. 122 */ 123static inline void __set_bit(int nr, volatile unsigned long *p) 124{ 125 p[nr >> 5] |= (1UL << (nr & 31)); 126} 127 128static inline void __clear_bit(int nr, volatile unsigned long *p) 129{ 130 p[nr >> 5] &= ~(1UL << (nr & 31)); 131} 132 133static inline void __change_bit(int nr, volatile unsigned long *p) 134{ 135 p[nr >> 5] ^= (1UL << (nr & 31)); 136} 137 138static inline int __test_and_set_bit(int nr, volatile unsigned long *p) 139{ 140 unsigned long oldval, mask = 1UL << (nr & 31); 141 142 p += nr >> 5; 143 144 oldval = *p; 145 *p = oldval | mask; 146 return oldval & mask; 147} 148 149static inline int __test_and_clear_bit(int nr, volatile unsigned long *p) 150{ 151 unsigned long oldval, mask = 1UL << (nr & 31); 152 153 p += nr >> 5; 154 155 oldval = *p; 156 *p = oldval & ~mask; 157 return oldval & mask; 158} 159 160static inline int __test_and_change_bit(int nr, volatile unsigned long *p) 161{ 162 unsigned long oldval, mask = 1UL << (nr & 31); 163 164 p += nr >> 5; 165 166 oldval = *p; 167 *p = oldval ^ mask; 168 return oldval & mask; 169} 170 171/* 172 * This routine doesn't need to be atomic. 173 */ 174static inline int __test_bit(int nr, const volatile unsigned long * p) 175{ 176 return (p[nr >> 5] >> (nr & 31)) & 1UL; 177} 178 179/* 180 * A note about Endian-ness. 181 * ------------------------- 182 * 183 * When the ARM is put into big endian mode via CR15, the processor 184 * merely swaps the order of bytes within words, thus: 185 * 186 * ------------ physical data bus bits ----------- 187 * D31 ... D24 D23 ... D16 D15 ... D8 D7 ... D0 188 * little byte 3 byte 2 byte 1 byte 0 189 * big byte 0 byte 1 byte 2 byte 3 190 * 191 * This means that reading a 32-bit word at address 0 returns the same 192 * value irrespective of the endian mode bit. 193 * 194 * Peripheral devices should be connected with the data bus reversed in 195 * "Big Endian" mode. ARM Application Note 61 is applicable, and is 196 * available from http://www.arm.com/. 197 * 198 * The following assumes that the data bus connectivity for big endian 199 * mode has been followed. 200 * 201 * Note that bit 0 is defined to be 32-bit word bit 0, not byte 0 bit 0. 202 */ 203 204/* 205 * Little endian assembly bitops. nr = 0 -> byte 0 bit 0. 206 */ 207extern void _set_bit_le(int nr, volatile unsigned long * p); 208extern void _clear_bit_le(int nr, volatile unsigned long * p); 209extern void _change_bit_le(int nr, volatile unsigned long * p); 210extern int _test_and_set_bit_le(int nr, volatile unsigned long * p); 211extern int _test_and_clear_bit_le(int nr, volatile unsigned long * p); 212extern int _test_and_change_bit_le(int nr, volatile unsigned long * p); 213extern int _find_first_zero_bit_le(const void * p, unsigned size); 214extern int _find_next_zero_bit_le(const void * p, int size, int offset); 215extern int _find_first_bit_le(const unsigned long *p, unsigned size); 216extern int _find_next_bit_le(const unsigned long *p, int size, int offset); 217 218/* 219 * Big endian assembly bitops. nr = 0 -> byte 3 bit 0. 220 */ 221extern void _set_bit_be(int nr, volatile unsigned long * p); 222extern void _clear_bit_be(int nr, volatile unsigned long * p); 223extern void _change_bit_be(int nr, volatile unsigned long * p); 224extern int _test_and_set_bit_be(int nr, volatile unsigned long * p); 225extern int _test_and_clear_bit_be(int nr, volatile unsigned long * p); 226extern int _test_and_change_bit_be(int nr, volatile unsigned long * p); 227extern int _find_first_zero_bit_be(const void * p, unsigned size); 228extern int _find_next_zero_bit_be(const void * p, int size, int offset); 229extern int _find_first_bit_be(const unsigned long *p, unsigned size); 230extern int _find_next_bit_be(const unsigned long *p, int size, int offset); 231 232#ifndef CONFIG_SMP 233/* 234 * The __* form of bitops are non-atomic and may be reordered. 235 */ 236#define ATOMIC_BITOP_LE(name,nr,p) \ 237 (__builtin_constant_p(nr) ? \ 238 ____atomic_##name(nr, p) : \ 239 _##name##_le(nr,p)) 240 241#define ATOMIC_BITOP_BE(name,nr,p) \ 242 (__builtin_constant_p(nr) ? \ 243 ____atomic_##name(nr, p) : \ 244 _##name##_be(nr,p)) 245#else 246#define ATOMIC_BITOP_LE(name,nr,p) _##name##_le(nr,p) 247#define ATOMIC_BITOP_BE(name,nr,p) _##name##_be(nr,p) 248#endif 249 250#define NONATOMIC_BITOP(name,nr,p) \ 251 (____nonatomic_##name(nr, p)) 252 253#ifndef __ARMEB__ 254/* 255 * These are the little endian, atomic definitions. 256 */ 257#define set_bit(nr,p) ATOMIC_BITOP_LE(set_bit,nr,p) 258#define clear_bit(nr,p) ATOMIC_BITOP_LE(clear_bit,nr,p) 259#define change_bit(nr,p) ATOMIC_BITOP_LE(change_bit,nr,p) 260#define test_and_set_bit(nr,p) ATOMIC_BITOP_LE(test_and_set_bit,nr,p) 261#define test_and_clear_bit(nr,p) ATOMIC_BITOP_LE(test_and_clear_bit,nr,p) 262#define test_and_change_bit(nr,p) ATOMIC_BITOP_LE(test_and_change_bit,nr,p) 263#define test_bit(nr,p) __test_bit(nr,p) 264#define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz) 265#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off) 266#define find_first_bit(p,sz) _find_first_bit_le(p,sz) 267#define find_next_bit(p,sz,off) _find_next_bit_le(p,sz,off) 268 269#define WORD_BITOFF_TO_LE(x) ((x)) 270 271#else 272 273/* 274 * These are the big endian, atomic definitions. 275 */ 276#define set_bit(nr,p) ATOMIC_BITOP_BE(set_bit,nr,p) 277#define clear_bit(nr,p) ATOMIC_BITOP_BE(clear_bit,nr,p) 278#define change_bit(nr,p) ATOMIC_BITOP_BE(change_bit,nr,p) 279#define test_and_set_bit(nr,p) ATOMIC_BITOP_BE(test_and_set_bit,nr,p) 280#define test_and_clear_bit(nr,p) ATOMIC_BITOP_BE(test_and_clear_bit,nr,p) 281#define test_and_change_bit(nr,p) ATOMIC_BITOP_BE(test_and_change_bit,nr,p) 282#define test_bit(nr,p) __test_bit(nr,p) 283#define find_first_zero_bit(p,sz) _find_first_zero_bit_be(p,sz) 284#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_be(p,sz,off) 285#define find_first_bit(p,sz) _find_first_bit_be(p,sz) 286#define find_next_bit(p,sz,off) _find_next_bit_be(p,sz,off) 287 288#define WORD_BITOFF_TO_LE(x) ((x) ^ 0x18) 289 290#endif 291 292#if __LINUX_ARM_ARCH__ < 5 293 294/* 295 * ffz = Find First Zero in word. Undefined if no zero exists, 296 * so code should check against ~0UL first.. 297 */ 298static inline unsigned long ffz(unsigned long word) 299{ 300 int k; 301 302 word = ~word; 303 k = 31; 304 if (word & 0x0000ffff) { k -= 16; word <<= 16; } 305 if (word & 0x00ff0000) { k -= 8; word <<= 8; } 306 if (word & 0x0f000000) { k -= 4; word <<= 4; } 307 if (word & 0x30000000) { k -= 2; word <<= 2; } 308 if (word & 0x40000000) { k -= 1; } 309 return k; 310} 311 312/* 313 * ffz = Find First Zero in word. Undefined if no zero exists, 314 * so code should check against ~0UL first.. 315 */ 316static inline unsigned long __ffs(unsigned long word) 317{ 318 int k; 319 320 k = 31; 321 if (word & 0x0000ffff) { k -= 16; word <<= 16; } 322 if (word & 0x00ff0000) { k -= 8; word <<= 8; } 323 if (word & 0x0f000000) { k -= 4; word <<= 4; } 324 if (word & 0x30000000) { k -= 2; word <<= 2; } 325 if (word & 0x40000000) { k -= 1; } 326 return k; 327} 328 329/* 330 * fls: find last bit set. 331 */ 332 333#define fls(x) generic_fls(x) 334 335/* 336 * ffs: find first bit set. This is defined the same way as 337 * the libc and compiler builtin ffs routines, therefore 338 * differs in spirit from the above ffz (man ffs). 339 */ 340 341#define ffs(x) generic_ffs(x) 342 343#else 344 345/* 346 * On ARMv5 and above those functions can be implemented around 347 * the clz instruction for much better code efficiency. 348 */ 349 350static __inline__ int generic_fls(int x); 351#define fls(x) \ 352 ( __builtin_constant_p(x) ? generic_fls(x) : \ 353 ({ int __r; asm("clz\t%0, %1" : "=r"(__r) : "r"(x) : "cc"); 32-__r; }) ) 354#define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); }) 355#define __ffs(x) (ffs(x) - 1) 356#define ffz(x) __ffs( ~(x) ) 357 358#endif 359 360/* 361 * Find first bit set in a 168-bit bitmap, where the first 362 * 128 bits are unlikely to be set. 363 */ 364static inline int sched_find_first_bit(const unsigned long *b) 365{ 366 unsigned long v; 367 unsigned int off; 368 369 for (off = 0; v = b[off], off < 4; off++) { 370 if (unlikely(v)) 371 break; 372 } 373 return __ffs(v) + off * 32; 374} 375 376/* 377 * hweightN: returns the hamming weight (i.e. the number 378 * of bits set) of a N-bit word 379 */ 380 381#define hweight32(x) generic_hweight32(x) 382#define hweight16(x) generic_hweight16(x) 383#define hweight8(x) generic_hweight8(x) 384 385/* 386 * Ext2 is defined to use little-endian byte ordering. 387 * These do not need to be atomic. 388 */ 389#define ext2_set_bit(nr,p) \ 390 __test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) 391#define ext2_set_bit_atomic(lock,nr,p) \ 392 test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) 393#define ext2_clear_bit(nr,p) \ 394 __test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) 395#define ext2_clear_bit_atomic(lock,nr,p) \ 396 test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) 397#define ext2_test_bit(nr,p) \ 398 __test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) 399#define ext2_find_first_zero_bit(p,sz) \ 400 _find_first_zero_bit_le(p,sz) 401#define ext2_find_next_zero_bit(p,sz,off) \ 402 _find_next_zero_bit_le(p,sz,off) 403 404/* 405 * Minix is defined to use little-endian byte ordering. 406 * These do not need to be atomic. 407 */ 408#define minix_set_bit(nr,p) \ 409 __set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) 410#define minix_test_bit(nr,p) \ 411 __test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) 412#define minix_test_and_set_bit(nr,p) \ 413 __test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) 414#define minix_test_and_clear_bit(nr,p) \ 415 __test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) 416#define minix_find_first_zero_bit(p,sz) \ 417 _find_first_zero_bit_le(p,sz) 418 419#endif /* __KERNEL__ */ 420 421#endif /* _ARM_BITOPS_H */