at v2.6.16 333 lines 8.7 kB view raw
1/* 2 * Copyright 1995, Russell King. 3 * 4 * Based on the arm32 version by RMK (and others). Their copyrights apply to 5 * Those parts. 6 * Modified for arm26 by Ian Molton on 25/11/04 7 * 8 * bit 0 is the LSB of an "unsigned long" quantity. 9 * 10 * Please note that the code in this file should never be included 11 * from user space. Many of these are not implemented in assembler 12 * since they would be too costly. Also, they require privileged 13 * instructions (which are not available from user mode) to ensure 14 * that they are atomic. 15 */ 16 17#ifndef __ASM_ARM_BITOPS_H 18#define __ASM_ARM_BITOPS_H 19 20#ifdef __KERNEL__ 21 22#include <linux/compiler.h> 23#include <asm/system.h> 24 25#define smp_mb__before_clear_bit() do { } while (0) 26#define smp_mb__after_clear_bit() do { } while (0) 27 28/* 29 * These functions are the basis of our bit ops. 30 * 31 * First, the atomic bitops. These use native endian. 32 */ 33static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *p) 34{ 35 unsigned long flags; 36 unsigned long mask = 1UL << (bit & 31); 37 38 p += bit >> 5; 39 40 local_irq_save(flags); 41 *p |= mask; 42 local_irq_restore(flags); 43} 44 45static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long *p) 46{ 47 unsigned long flags; 48 unsigned long mask = 1UL << (bit & 31); 49 50 p += bit >> 5; 51 52 local_irq_save(flags); 53 *p &= ~mask; 54 local_irq_restore(flags); 55} 56 57static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned long *p) 58{ 59 unsigned long flags; 60 unsigned long mask = 1UL << (bit & 31); 61 62 p += bit >> 5; 63 64 local_irq_save(flags); 65 *p ^= mask; 66 local_irq_restore(flags); 67} 68 69static inline int 70____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p) 71{ 72 unsigned long flags; 73 unsigned int res; 74 unsigned long mask = 1UL << (bit & 31); 75 76 p += bit >> 5; 77 78 local_irq_save(flags); 79 res = *p; 80 *p = res | mask; 81 local_irq_restore(flags); 82 83 return res & mask; 84} 85 86static inline int 87____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p) 88{ 89 unsigned long flags; 90 unsigned int res; 91 unsigned long mask = 1UL << (bit & 31); 92 93 p += bit >> 5; 94 95 local_irq_save(flags); 96 res = *p; 97 *p = res & ~mask; 98 local_irq_restore(flags); 99 100 return res & mask; 101} 102 103static inline int 104____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p) 105{ 106 unsigned long flags; 107 unsigned int res; 108 unsigned long mask = 1UL << (bit & 31); 109 110 p += bit >> 5; 111 112 local_irq_save(flags); 113 res = *p; 114 *p = res ^ mask; 115 local_irq_restore(flags); 116 117 return res & mask; 118} 119 120/* 121 * Now the non-atomic variants. We let the compiler handle all 122 * optimisations for these. These are all _native_ endian. 123 */ 124static inline void __set_bit(int nr, volatile unsigned long *p) 125{ 126 p[nr >> 5] |= (1UL << (nr & 31)); 127} 128 129static inline void __clear_bit(int nr, volatile unsigned long *p) 130{ 131 p[nr >> 5] &= ~(1UL << (nr & 31)); 132} 133 134static inline void __change_bit(int nr, volatile unsigned long *p) 135{ 136 p[nr >> 5] ^= (1UL << (nr & 31)); 137} 138 139static inline int __test_and_set_bit(int nr, volatile unsigned long *p) 140{ 141 unsigned long oldval, mask = 1UL << (nr & 31); 142 143 p += nr >> 5; 144 145 oldval = *p; 146 *p = oldval | mask; 147 return oldval & mask; 148} 149 150static inline int __test_and_clear_bit(int nr, volatile unsigned long *p) 151{ 152 unsigned long oldval, mask = 1UL << (nr & 31); 153 154 p += nr >> 5; 155 156 oldval = *p; 157 *p = oldval & ~mask; 158 return oldval & mask; 159} 160 161static inline int __test_and_change_bit(int nr, volatile unsigned long *p) 162{ 163 unsigned long oldval, mask = 1UL << (nr & 31); 164 165 p += nr >> 5; 166 167 oldval = *p; 168 *p = oldval ^ mask; 169 return oldval & mask; 170} 171 172/* 173 * This routine doesn't need to be atomic. 174 */ 175static inline int __test_bit(int nr, const volatile unsigned long * p) 176{ 177 return (p[nr >> 5] >> (nr & 31)) & 1UL; 178} 179 180/* 181 * Little endian assembly bitops. nr = 0 -> byte 0 bit 0. 182 */ 183extern void _set_bit_le(int nr, volatile unsigned long * p); 184extern void _clear_bit_le(int nr, volatile unsigned long * p); 185extern void _change_bit_le(int nr, volatile unsigned long * p); 186extern int _test_and_set_bit_le(int nr, volatile unsigned long * p); 187extern int _test_and_clear_bit_le(int nr, volatile unsigned long * p); 188extern int _test_and_change_bit_le(int nr, volatile unsigned long * p); 189extern int _find_first_zero_bit_le(const unsigned long * p, unsigned size); 190extern int _find_next_zero_bit_le(void * p, int size, int offset); 191extern int _find_first_bit_le(const unsigned long *p, unsigned size); 192extern int _find_next_bit_le(const unsigned long *p, int size, int offset); 193 194/* 195 * The __* form of bitops are non-atomic and may be reordered. 196 */ 197#define ATOMIC_BITOP_LE(name,nr,p) \ 198 (__builtin_constant_p(nr) ? \ 199 ____atomic_##name(nr, p) : \ 200 _##name##_le(nr,p)) 201 202#define NONATOMIC_BITOP(name,nr,p) \ 203 (____nonatomic_##name(nr, p)) 204 205/* 206 * These are the little endian, atomic definitions. 207 */ 208#define set_bit(nr,p) ATOMIC_BITOP_LE(set_bit,nr,p) 209#define clear_bit(nr,p) ATOMIC_BITOP_LE(clear_bit,nr,p) 210#define change_bit(nr,p) ATOMIC_BITOP_LE(change_bit,nr,p) 211#define test_and_set_bit(nr,p) ATOMIC_BITOP_LE(test_and_set_bit,nr,p) 212#define test_and_clear_bit(nr,p) ATOMIC_BITOP_LE(test_and_clear_bit,nr,p) 213#define test_and_change_bit(nr,p) ATOMIC_BITOP_LE(test_and_change_bit,nr,p) 214#define test_bit(nr,p) __test_bit(nr,p) 215#define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz) 216#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off) 217#define find_first_bit(p,sz) _find_first_bit_le(p,sz) 218#define find_next_bit(p,sz,off) _find_next_bit_le(p,sz,off) 219 220#define WORD_BITOFF_TO_LE(x) ((x)) 221 222/* 223 * ffz = Find First Zero in word. Undefined if no zero exists, 224 * so code should check against ~0UL first.. 225 */ 226static inline unsigned long ffz(unsigned long word) 227{ 228 int k; 229 230 word = ~word; 231 k = 31; 232 if (word & 0x0000ffff) { k -= 16; word <<= 16; } 233 if (word & 0x00ff0000) { k -= 8; word <<= 8; } 234 if (word & 0x0f000000) { k -= 4; word <<= 4; } 235 if (word & 0x30000000) { k -= 2; word <<= 2; } 236 if (word & 0x40000000) { k -= 1; } 237 return k; 238} 239 240/* 241 * ffz = Find First Zero in word. Undefined if no zero exists, 242 * so code should check against ~0UL first.. 243 */ 244static inline unsigned long __ffs(unsigned long word) 245{ 246 int k; 247 248 k = 31; 249 if (word & 0x0000ffff) { k -= 16; word <<= 16; } 250 if (word & 0x00ff0000) { k -= 8; word <<= 8; } 251 if (word & 0x0f000000) { k -= 4; word <<= 4; } 252 if (word & 0x30000000) { k -= 2; word <<= 2; } 253 if (word & 0x40000000) { k -= 1; } 254 return k; 255} 256 257/* 258 * fls: find last bit set. 259 */ 260 261#define fls(x) generic_fls(x) 262#define fls64(x) generic_fls64(x) 263 264/* 265 * ffs: find first bit set. This is defined the same way as 266 * the libc and compiler builtin ffs routines, therefore 267 * differs in spirit from the above ffz (man ffs). 268 */ 269 270#define ffs(x) generic_ffs(x) 271 272/* 273 * Find first bit set in a 168-bit bitmap, where the first 274 * 128 bits are unlikely to be set. 275 */ 276static inline int sched_find_first_bit(unsigned long *b) 277{ 278 unsigned long v; 279 unsigned int off; 280 281 for (off = 0; v = b[off], off < 4; off++) { 282 if (unlikely(v)) 283 break; 284 } 285 return __ffs(v) + off * 32; 286} 287 288/* 289 * hweightN: returns the hamming weight (i.e. the number 290 * of bits set) of a N-bit word 291 */ 292 293#define hweight32(x) generic_hweight32(x) 294#define hweight16(x) generic_hweight16(x) 295#define hweight8(x) generic_hweight8(x) 296 297/* 298 * Ext2 is defined to use little-endian byte ordering. 299 * These do not need to be atomic. 300 */ 301#define ext2_set_bit(nr,p) \ 302 __test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) 303#define ext2_set_bit_atomic(lock,nr,p) \ 304 test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) 305#define ext2_clear_bit(nr,p) \ 306 __test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) 307#define ext2_clear_bit_atomic(lock,nr,p) \ 308 test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) 309#define ext2_test_bit(nr,p) \ 310 __test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) 311#define ext2_find_first_zero_bit(p,sz) \ 312 _find_first_zero_bit_le(p,sz) 313#define ext2_find_next_zero_bit(p,sz,off) \ 314 _find_next_zero_bit_le(p,sz,off) 315 316/* 317 * Minix is defined to use little-endian byte ordering. 318 * These do not need to be atomic. 319 */ 320#define minix_set_bit(nr,p) \ 321 __set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) 322#define minix_test_bit(nr,p) \ 323 __test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) 324#define minix_test_and_set_bit(nr,p) \ 325 __test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) 326#define minix_test_and_clear_bit(nr,p) \ 327 __test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) 328#define minix_find_first_zero_bit(p,sz) \ 329 _find_first_zero_bit_le((unsigned long *)(p),sz) 330 331#endif /* __KERNEL__ */ 332 333#endif /* _ARM_BITOPS_H */