Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.15-rc2 421 lines 12 kB view raw
1/* 2 * Copyright 1995, Russell King. 3 * Various bits and pieces copyrights include: 4 * Linus Torvalds (test_bit). 5 * Big endian support: Copyright 2001, Nicolas Pitre 6 * reworked by rmk. 7 * 8 * bit 0 is the LSB of an "unsigned long" quantity. 9 * 10 * Please note that the code in this file should never be included 11 * from user space. Many of these are not implemented in assembler 12 * since they would be too costly. Also, they require privileged 13 * instructions (which are not available from user mode) to ensure 14 * that they are atomic. 15 */ 16 17#ifndef __ASM_ARM_BITOPS_H 18#define __ASM_ARM_BITOPS_H 19 20#ifdef __KERNEL__ 21 22#include <linux/compiler.h> 23#include <asm/system.h> 24 25#define smp_mb__before_clear_bit() mb() 26#define smp_mb__after_clear_bit() mb() 27 28/* 29 * These functions are the basis of our bit ops. 30 * 31 * First, the atomic bitops. These use native endian. 32 */ 33static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *p) 34{ 35 unsigned long flags; 36 unsigned long mask = 1UL << (bit & 31); 37 38 p += bit >> 5; 39 40 local_irq_save(flags); 41 *p |= mask; 42 local_irq_restore(flags); 43} 44 45static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long *p) 46{ 47 unsigned long flags; 48 unsigned long mask = 1UL << (bit & 31); 49 50 p += bit >> 5; 51 52 local_irq_save(flags); 53 *p &= ~mask; 54 local_irq_restore(flags); 55} 56 57static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned long *p) 58{ 59 unsigned long flags; 60 unsigned long mask = 1UL << (bit & 31); 61 62 p += bit >> 5; 63 64 local_irq_save(flags); 65 *p ^= mask; 66 local_irq_restore(flags); 67} 68 69static inline int 70____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p) 71{ 72 unsigned long flags; 73 unsigned int res; 74 unsigned long mask = 1UL << (bit & 31); 75 76 p += bit >> 5; 77 78 local_irq_save(flags); 79 res = *p; 80 *p = res | mask; 81 local_irq_restore(flags); 82 83 return res & mask; 84} 85 86static inline int 87____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p) 88{ 89 unsigned long flags; 90 unsigned int res; 91 unsigned long mask = 1UL << (bit & 31); 92 93 p += bit >> 5; 94 95 local_irq_save(flags); 96 res = *p; 97 *p = res & ~mask; 98 local_irq_restore(flags); 99 100 return res & mask; 101} 102 103static inline int 104____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p) 105{ 106 unsigned long flags; 107 unsigned int res; 108 unsigned long mask = 1UL << (bit & 31); 109 110 p += bit >> 5; 111 112 local_irq_save(flags); 113 res = *p; 114 *p = res ^ mask; 115 local_irq_restore(flags); 116 117 return res & mask; 118} 119 120/* 121 * Now the non-atomic variants. We let the compiler handle all 122 * optimisations for these. These are all _native_ endian. 123 */ 124static inline void __set_bit(int nr, volatile unsigned long *p) 125{ 126 p[nr >> 5] |= (1UL << (nr & 31)); 127} 128 129static inline void __clear_bit(int nr, volatile unsigned long *p) 130{ 131 p[nr >> 5] &= ~(1UL << (nr & 31)); 132} 133 134static inline void __change_bit(int nr, volatile unsigned long *p) 135{ 136 p[nr >> 5] ^= (1UL << (nr & 31)); 137} 138 139static inline int __test_and_set_bit(int nr, volatile unsigned long *p) 140{ 141 unsigned long oldval, mask = 1UL << (nr & 31); 142 143 p += nr >> 5; 144 145 oldval = *p; 146 *p = oldval | mask; 147 return oldval & mask; 148} 149 150static inline int __test_and_clear_bit(int nr, volatile unsigned long *p) 151{ 152 unsigned long oldval, mask = 1UL << (nr & 31); 153 154 p += nr >> 5; 155 156 oldval = *p; 157 *p = oldval & ~mask; 158 return oldval & mask; 159} 160 161static inline int __test_and_change_bit(int nr, volatile unsigned long *p) 162{ 163 unsigned long oldval, mask = 1UL << (nr & 31); 164 165 p += nr >> 5; 166 167 oldval = *p; 168 *p = oldval ^ mask; 169 return oldval & mask; 170} 171 172/* 173 * This routine doesn't need to be atomic. 174 */ 175static inline int __test_bit(int nr, const volatile unsigned long * p) 176{ 177 return (p[nr >> 5] >> (nr & 31)) & 1UL; 178} 179 180/* 181 * A note about Endian-ness. 182 * ------------------------- 183 * 184 * When the ARM is put into big endian mode via CR15, the processor 185 * merely swaps the order of bytes within words, thus: 186 * 187 * ------------ physical data bus bits ----------- 188 * D31 ... D24 D23 ... D16 D15 ... D8 D7 ... D0 189 * little byte 3 byte 2 byte 1 byte 0 190 * big byte 0 byte 1 byte 2 byte 3 191 * 192 * This means that reading a 32-bit word at address 0 returns the same 193 * value irrespective of the endian mode bit. 194 * 195 * Peripheral devices should be connected with the data bus reversed in 196 * "Big Endian" mode. ARM Application Note 61 is applicable, and is 197 * available from http://www.arm.com/. 198 * 199 * The following assumes that the data bus connectivity for big endian 200 * mode has been followed. 201 * 202 * Note that bit 0 is defined to be 32-bit word bit 0, not byte 0 bit 0. 203 */ 204 205/* 206 * Little endian assembly bitops. nr = 0 -> byte 0 bit 0. 207 */ 208extern void _set_bit_le(int nr, volatile unsigned long * p); 209extern void _clear_bit_le(int nr, volatile unsigned long * p); 210extern void _change_bit_le(int nr, volatile unsigned long * p); 211extern int _test_and_set_bit_le(int nr, volatile unsigned long * p); 212extern int _test_and_clear_bit_le(int nr, volatile unsigned long * p); 213extern int _test_and_change_bit_le(int nr, volatile unsigned long * p); 214extern int _find_first_zero_bit_le(const void * p, unsigned size); 215extern int _find_next_zero_bit_le(const void * p, int size, int offset); 216extern int _find_first_bit_le(const unsigned long *p, unsigned size); 217extern int _find_next_bit_le(const unsigned long *p, int size, int offset); 218 219/* 220 * Big endian assembly bitops. nr = 0 -> byte 3 bit 0. 221 */ 222extern void _set_bit_be(int nr, volatile unsigned long * p); 223extern void _clear_bit_be(int nr, volatile unsigned long * p); 224extern void _change_bit_be(int nr, volatile unsigned long * p); 225extern int _test_and_set_bit_be(int nr, volatile unsigned long * p); 226extern int _test_and_clear_bit_be(int nr, volatile unsigned long * p); 227extern int _test_and_change_bit_be(int nr, volatile unsigned long * p); 228extern int _find_first_zero_bit_be(const void * p, unsigned size); 229extern int _find_next_zero_bit_be(const void * p, int size, int offset); 230extern int _find_first_bit_be(const unsigned long *p, unsigned size); 231extern int _find_next_bit_be(const unsigned long *p, int size, int offset); 232 233#ifndef CONFIG_SMP 234/* 235 * The __* form of bitops are non-atomic and may be reordered. 236 */ 237#define ATOMIC_BITOP_LE(name,nr,p) \ 238 (__builtin_constant_p(nr) ? \ 239 ____atomic_##name(nr, p) : \ 240 _##name##_le(nr,p)) 241 242#define ATOMIC_BITOP_BE(name,nr,p) \ 243 (__builtin_constant_p(nr) ? \ 244 ____atomic_##name(nr, p) : \ 245 _##name##_be(nr,p)) 246#else 247#define ATOMIC_BITOP_LE(name,nr,p) _##name##_le(nr,p) 248#define ATOMIC_BITOP_BE(name,nr,p) _##name##_be(nr,p) 249#endif 250 251#define NONATOMIC_BITOP(name,nr,p) \ 252 (____nonatomic_##name(nr, p)) 253 254#ifndef __ARMEB__ 255/* 256 * These are the little endian, atomic definitions. 257 */ 258#define set_bit(nr,p) ATOMIC_BITOP_LE(set_bit,nr,p) 259#define clear_bit(nr,p) ATOMIC_BITOP_LE(clear_bit,nr,p) 260#define change_bit(nr,p) ATOMIC_BITOP_LE(change_bit,nr,p) 261#define test_and_set_bit(nr,p) ATOMIC_BITOP_LE(test_and_set_bit,nr,p) 262#define test_and_clear_bit(nr,p) ATOMIC_BITOP_LE(test_and_clear_bit,nr,p) 263#define test_and_change_bit(nr,p) ATOMIC_BITOP_LE(test_and_change_bit,nr,p) 264#define test_bit(nr,p) __test_bit(nr,p) 265#define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz) 266#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off) 267#define find_first_bit(p,sz) _find_first_bit_le(p,sz) 268#define find_next_bit(p,sz,off) _find_next_bit_le(p,sz,off) 269 270#define WORD_BITOFF_TO_LE(x) ((x)) 271 272#else 273 274/* 275 * These are the big endian, atomic definitions. 276 */ 277#define set_bit(nr,p) ATOMIC_BITOP_BE(set_bit,nr,p) 278#define clear_bit(nr,p) ATOMIC_BITOP_BE(clear_bit,nr,p) 279#define change_bit(nr,p) ATOMIC_BITOP_BE(change_bit,nr,p) 280#define test_and_set_bit(nr,p) ATOMIC_BITOP_BE(test_and_set_bit,nr,p) 281#define test_and_clear_bit(nr,p) ATOMIC_BITOP_BE(test_and_clear_bit,nr,p) 282#define test_and_change_bit(nr,p) ATOMIC_BITOP_BE(test_and_change_bit,nr,p) 283#define test_bit(nr,p) __test_bit(nr,p) 284#define find_first_zero_bit(p,sz) _find_first_zero_bit_be(p,sz) 285#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_be(p,sz,off) 286#define find_first_bit(p,sz) _find_first_bit_be(p,sz) 287#define find_next_bit(p,sz,off) _find_next_bit_be(p,sz,off) 288 289#define WORD_BITOFF_TO_LE(x) ((x) ^ 0x18) 290 291#endif 292 293#if __LINUX_ARM_ARCH__ < 5 294 295/* 296 * ffz = Find First Zero in word. Undefined if no zero exists, 297 * so code should check against ~0UL first.. 298 */ 299static inline unsigned long ffz(unsigned long word) 300{ 301 int k; 302 303 word = ~word; 304 k = 31; 305 if (word & 0x0000ffff) { k -= 16; word <<= 16; } 306 if (word & 0x00ff0000) { k -= 8; word <<= 8; } 307 if (word & 0x0f000000) { k -= 4; word <<= 4; } 308 if (word & 0x30000000) { k -= 2; word <<= 2; } 309 if (word & 0x40000000) { k -= 1; } 310 return k; 311} 312 313/* 314 * ffz = Find First Zero in word. Undefined if no zero exists, 315 * so code should check against ~0UL first.. 316 */ 317static inline unsigned long __ffs(unsigned long word) 318{ 319 int k; 320 321 k = 31; 322 if (word & 0x0000ffff) { k -= 16; word <<= 16; } 323 if (word & 0x00ff0000) { k -= 8; word <<= 8; } 324 if (word & 0x0f000000) { k -= 4; word <<= 4; } 325 if (word & 0x30000000) { k -= 2; word <<= 2; } 326 if (word & 0x40000000) { k -= 1; } 327 return k; 328} 329 330/* 331 * fls: find last bit set. 332 */ 333 334#define fls(x) generic_fls(x) 335 336/* 337 * ffs: find first bit set. This is defined the same way as 338 * the libc and compiler builtin ffs routines, therefore 339 * differs in spirit from the above ffz (man ffs). 340 */ 341 342#define ffs(x) generic_ffs(x) 343 344#else 345 346/* 347 * On ARMv5 and above those functions can be implemented around 348 * the clz instruction for much better code efficiency. 349 */ 350 351#define fls(x) \ 352 ( __builtin_constant_p(x) ? generic_fls(x) : \ 353 ({ int __r; asm("clz\t%0, %1" : "=r"(__r) : "r"(x) : "cc"); 32-__r; }) ) 354#define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); }) 355#define __ffs(x) (ffs(x) - 1) 356#define ffz(x) __ffs( ~(x) ) 357 358#endif 359 360/* 361 * Find first bit set in a 168-bit bitmap, where the first 362 * 128 bits are unlikely to be set. 363 */ 364static inline int sched_find_first_bit(const unsigned long *b) 365{ 366 unsigned long v; 367 unsigned int off; 368 369 for (off = 0; v = b[off], off < 4; off++) { 370 if (unlikely(v)) 371 break; 372 } 373 return __ffs(v) + off * 32; 374} 375 376/* 377 * hweightN: returns the hamming weight (i.e. the number 378 * of bits set) of a N-bit word 379 */ 380 381#define hweight32(x) generic_hweight32(x) 382#define hweight16(x) generic_hweight16(x) 383#define hweight8(x) generic_hweight8(x) 384 385/* 386 * Ext2 is defined to use little-endian byte ordering. 387 * These do not need to be atomic. 388 */ 389#define ext2_set_bit(nr,p) \ 390 __test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) 391#define ext2_set_bit_atomic(lock,nr,p) \ 392 test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) 393#define ext2_clear_bit(nr,p) \ 394 __test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) 395#define ext2_clear_bit_atomic(lock,nr,p) \ 396 test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) 397#define ext2_test_bit(nr,p) \ 398 __test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) 399#define ext2_find_first_zero_bit(p,sz) \ 400 _find_first_zero_bit_le(p,sz) 401#define ext2_find_next_zero_bit(p,sz,off) \ 402 _find_next_zero_bit_le(p,sz,off) 403 404/* 405 * Minix is defined to use little-endian byte ordering. 406 * These do not need to be atomic. 407 */ 408#define minix_set_bit(nr,p) \ 409 __set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) 410#define minix_test_bit(nr,p) \ 411 __test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) 412#define minix_test_and_set_bit(nr,p) \ 413 __test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) 414#define minix_test_and_clear_bit(nr,p) \ 415 __test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) 416#define minix_find_first_zero_bit(p,sz) \ 417 _find_first_zero_bit_le(p,sz) 418 419#endif /* __KERNEL__ */ 420 421#endif /* _ARM_BITOPS_H */