at v2.6.16-rc2 447 lines 10 kB view raw
1/* 2 * include/asm-xtensa/bitops.h 3 * 4 * Atomic operations that C can't guarantee us.Useful for resource counting etc. 5 * 6 * This file is subject to the terms and conditions of the GNU General Public 7 * License. See the file "COPYING" in the main directory of this archive 8 * for more details. 9 * 10 * Copyright (C) 2001 - 2005 Tensilica Inc. 11 */ 12 13#ifndef _XTENSA_BITOPS_H 14#define _XTENSA_BITOPS_H 15 16#ifdef __KERNEL__ 17 18#include <asm/processor.h> 19#include <asm/byteorder.h> 20#include <asm/system.h> 21 22#ifdef CONFIG_SMP 23# error SMP not supported on this architecture 24#endif 25 26static __inline__ void set_bit(int nr, volatile void * addr) 27{ 28 unsigned long mask = 1 << (nr & 0x1f); 29 unsigned long *a = ((unsigned long *)addr) + (nr >> 5); 30 unsigned long flags; 31 32 local_irq_save(flags); 33 *a |= mask; 34 local_irq_restore(flags); 35} 36 37static __inline__ void __set_bit(int nr, volatile unsigned long * addr) 38{ 39 unsigned long mask = 1 << (nr & 0x1f); 40 unsigned long *a = ((unsigned long *)addr) + (nr >> 5); 41 42 *a |= mask; 43} 44 45static __inline__ void clear_bit(int nr, volatile void * addr) 46{ 47 unsigned long mask = 1 << (nr & 0x1f); 48 unsigned long *a = ((unsigned long *)addr) + (nr >> 5); 49 unsigned long flags; 50 51 local_irq_save(flags); 52 *a &= ~mask; 53 local_irq_restore(flags); 54} 55 56static __inline__ void __clear_bit(int nr, volatile unsigned long *addr) 57{ 58 unsigned long mask = 1 << (nr & 0x1f); 59 unsigned long *a = ((unsigned long *)addr) + (nr >> 5); 60 61 *a &= ~mask; 62} 63 64/* 65 * clear_bit() doesn't provide any barrier for the compiler. 66 */ 67 68#define smp_mb__before_clear_bit() barrier() 69#define smp_mb__after_clear_bit() barrier() 70 71static __inline__ void change_bit(int nr, volatile void * addr) 72{ 73 unsigned long mask = 1 << (nr & 0x1f); 74 unsigned long *a = ((unsigned long *)addr) + (nr >> 5); 75 unsigned long flags; 76 77 local_irq_save(flags); 78 *a ^= mask; 79 local_irq_restore(flags); 80} 81 82static __inline__ void __change_bit(int nr, volatile void * addr) 83{ 84 unsigned long mask = 1 << (nr & 0x1f); 85 unsigned long *a = ((unsigned long *)addr) + (nr >> 5); 86 87 *a ^= mask; 88} 89 90static __inline__ int test_and_set_bit(int nr, volatile void * addr) 91{ 92 unsigned long retval; 93 unsigned long mask = 1 << (nr & 0x1f); 94 unsigned long *a = ((unsigned long *)addr) + (nr >> 5); 95 unsigned long flags; 96 97 local_irq_save(flags); 98 retval = (mask & *a) != 0; 99 *a |= mask; 100 local_irq_restore(flags); 101 102 return retval; 103} 104 105static __inline__ int __test_and_set_bit(int nr, volatile void * addr) 106{ 107 unsigned long retval; 108 unsigned long mask = 1 << (nr & 0x1f); 109 unsigned long *a = ((unsigned long *)addr) + (nr >> 5); 110 111 retval = (mask & *a) != 0; 112 *a |= mask; 113 114 return retval; 115} 116 117static __inline__ int test_and_clear_bit(int nr, volatile void * addr) 118{ 119 unsigned long retval; 120 unsigned long mask = 1 << (nr & 0x1f); 121 unsigned long *a = ((unsigned long *)addr) + (nr >> 5); 122 unsigned long flags; 123 124 local_irq_save(flags); 125 retval = (mask & *a) != 0; 126 *a &= ~mask; 127 local_irq_restore(flags); 128 129 return retval; 130} 131 132static __inline__ int __test_and_clear_bit(int nr, volatile void * addr) 133{ 134 unsigned long mask = 1 << (nr & 0x1f); 135 unsigned long *a = ((unsigned long *)addr) + (nr >> 5); 136 unsigned long old = *a; 137 138 *a = old & ~mask; 139 return (old & mask) != 0; 140} 141 142static __inline__ int test_and_change_bit(int nr, volatile void * addr) 143{ 144 unsigned long retval; 145 unsigned long mask = 1 << (nr & 0x1f); 146 unsigned long *a = ((unsigned long *)addr) + (nr >> 5); 147 unsigned long flags; 148 149 local_irq_save(flags); 150 151 retval = (mask & *a) != 0; 152 *a ^= mask; 153 local_irq_restore(flags); 154 155 return retval; 156} 157 158/* 159 * non-atomic version; can be reordered 160 */ 161 162static __inline__ int __test_and_change_bit(int nr, volatile void *addr) 163{ 164 unsigned long mask = 1 << (nr & 0x1f); 165 unsigned long *a = ((unsigned long *)addr) + (nr >> 5); 166 unsigned long old = *a; 167 168 *a = old ^ mask; 169 return (old & mask) != 0; 170} 171 172static __inline__ int test_bit(int nr, const volatile void *addr) 173{ 174 return 1UL & (((const volatile unsigned int *)addr)[nr>>5] >> (nr&31)); 175} 176 177#if XCHAL_HAVE_NSA 178 179static __inline__ int __cntlz (unsigned long x) 180{ 181 int lz; 182 asm ("nsau %0, %1" : "=r" (lz) : "r" (x)); 183 return 31 - lz; 184} 185 186#else 187 188static __inline__ int __cntlz (unsigned long x) 189{ 190 unsigned long sum, x1, x2, x4, x8, x16; 191 x1 = x & 0xAAAAAAAA; 192 x2 = x & 0xCCCCCCCC; 193 x4 = x & 0xF0F0F0F0; 194 x8 = x & 0xFF00FF00; 195 x16 = x & 0xFFFF0000; 196 sum = x2 ? 2 : 0; 197 sum += (x16 != 0) * 16; 198 sum += (x8 != 0) * 8; 199 sum += (x4 != 0) * 4; 200 sum += (x1 != 0); 201 202 return sum; 203} 204 205#endif 206 207/* 208 * ffz: Find first zero in word. Undefined if no zero exists. 209 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). 210 */ 211 212static __inline__ int ffz(unsigned long x) 213{ 214 if ((x = ~x) == 0) 215 return 32; 216 return __cntlz(x & -x); 217} 218 219/* 220 * __ffs: Find first bit set in word. Return 0 for bit 0 221 */ 222 223static __inline__ int __ffs(unsigned long x) 224{ 225 return __cntlz(x & -x); 226} 227 228/* 229 * ffs: Find first bit set in word. This is defined the same way as 230 * the libc and compiler builtin ffs routines, therefore 231 * differs in spirit from the above ffz (man ffs). 232 */ 233 234static __inline__ int ffs(unsigned long x) 235{ 236 return __cntlz(x & -x) + 1; 237} 238 239/* 240 * fls: Find last (most-significant) bit set in word. 241 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. 242 */ 243 244static __inline__ int fls (unsigned int x) 245{ 246 return __cntlz(x); 247} 248#define fls64(x) generic_fls64(x) 249 250static __inline__ int 251find_next_bit(const unsigned long *addr, int size, int offset) 252{ 253 const unsigned long *p = addr + (offset >> 5); 254 unsigned long result = offset & ~31UL; 255 unsigned long tmp; 256 257 if (offset >= size) 258 return size; 259 size -= result; 260 offset &= 31UL; 261 if (offset) { 262 tmp = *p++; 263 tmp &= ~0UL << offset; 264 if (size < 32) 265 goto found_first; 266 if (tmp) 267 goto found_middle; 268 size -= 32; 269 result += 32; 270 } 271 while (size >= 32) { 272 if ((tmp = *p++) != 0) 273 goto found_middle; 274 result += 32; 275 size -= 32; 276 } 277 if (!size) 278 return result; 279 tmp = *p; 280 281found_first: 282 tmp &= ~0UL >> (32 - size); 283 if (tmp == 0UL) /* Are any bits set? */ 284 return result + size; /* Nope. */ 285found_middle: 286 return result + __ffs(tmp); 287} 288 289/** 290 * find_first_bit - find the first set bit in a memory region 291 * @addr: The address to start the search at 292 * @size: The maximum size to search 293 * 294 * Returns the bit-number of the first set bit, not the number of the byte 295 * containing a bit. 296 */ 297 298#define find_first_bit(addr, size) \ 299 find_next_bit((addr), (size), 0) 300 301static __inline__ int 302find_next_zero_bit(const unsigned long *addr, int size, int offset) 303{ 304 const unsigned long *p = addr + (offset >> 5); 305 unsigned long result = offset & ~31UL; 306 unsigned long tmp; 307 308 if (offset >= size) 309 return size; 310 size -= result; 311 offset &= 31UL; 312 if (offset) { 313 tmp = *p++; 314 tmp |= ~0UL >> (32-offset); 315 if (size < 32) 316 goto found_first; 317 if (~tmp) 318 goto found_middle; 319 size -= 32; 320 result += 32; 321 } 322 while (size & ~31UL) { 323 if (~(tmp = *p++)) 324 goto found_middle; 325 result += 32; 326 size -= 32; 327 } 328 if (!size) 329 return result; 330 tmp = *p; 331 332found_first: 333 tmp |= ~0UL << size; 334found_middle: 335 return result + ffz(tmp); 336} 337 338#define find_first_zero_bit(addr, size) \ 339 find_next_zero_bit((addr), (size), 0) 340 341#ifdef __XTENSA_EL__ 342# define ext2_set_bit(nr,addr) __test_and_set_bit((nr), (addr)) 343# define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit((nr),(addr)) 344# define ext2_clear_bit(nr,addr) __test_and_clear_bit((nr), (addr)) 345# define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr),(addr)) 346# define ext2_test_bit(nr,addr) test_bit((nr), (addr)) 347# define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr),(size)) 348# define ext2_find_next_zero_bit(addr, size, offset) \ 349 find_next_zero_bit((addr), (size), (offset)) 350#elif defined(__XTENSA_EB__) 351# define ext2_set_bit(nr,addr) __test_and_set_bit((nr) ^ 0x18, (addr)) 352# define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit((nr) ^ 0x18, (addr)) 353# define ext2_clear_bit(nr,addr) __test_and_clear_bit((nr) ^ 18, (addr)) 354# define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr)^0x18,(addr)) 355# define ext2_test_bit(nr,addr) test_bit((nr) ^ 0x18, (addr)) 356# define ext2_find_first_zero_bit(addr, size) \ 357 ext2_find_next_zero_bit((addr), (size), 0) 358 359static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset) 360{ 361 unsigned long *p = ((unsigned long *) addr) + (offset >> 5); 362 unsigned long result = offset & ~31UL; 363 unsigned long tmp; 364 365 if (offset >= size) 366 return size; 367 size -= result; 368 offset &= 31UL; 369 if(offset) { 370 /* We hold the little endian value in tmp, but then the 371 * shift is illegal. So we could keep a big endian value 372 * in tmp, like this: 373 * 374 * tmp = __swab32(*(p++)); 375 * tmp |= ~0UL >> (32-offset); 376 * 377 * but this would decrease preformance, so we change the 378 * shift: 379 */ 380 tmp = *(p++); 381 tmp |= __swab32(~0UL >> (32-offset)); 382 if(size < 32) 383 goto found_first; 384 if(~tmp) 385 goto found_middle; 386 size -= 32; 387 result += 32; 388 } 389 while(size & ~31UL) { 390 if(~(tmp = *(p++))) 391 goto found_middle; 392 result += 32; 393 size -= 32; 394 } 395 if(!size) 396 return result; 397 tmp = *p; 398 399found_first: 400 /* tmp is little endian, so we would have to swab the shift, 401 * see above. But then we have to swab tmp below for ffz, so 402 * we might as well do this here. 403 */ 404 return result + ffz(__swab32(tmp) | (~0UL << size)); 405found_middle: 406 return result + ffz(__swab32(tmp)); 407} 408 409#else 410# error processor byte order undefined! 411#endif 412 413 414#define hweight32(x) generic_hweight32(x) 415#define hweight16(x) generic_hweight16(x) 416#define hweight8(x) generic_hweight8(x) 417 418/* 419 * Find the first bit set in a 140-bit bitmap. 420 * The first 100 bits are unlikely to be set. 421 */ 422 423static inline int sched_find_first_bit(const unsigned long *b) 424{ 425 if (unlikely(b[0])) 426 return __ffs(b[0]); 427 if (unlikely(b[1])) 428 return __ffs(b[1]) + 32; 429 if (unlikely(b[2])) 430 return __ffs(b[2]) + 64; 431 if (b[3]) 432 return __ffs(b[3]) + 96; 433 return __ffs(b[4]) + 128; 434} 435 436 437/* Bitmap functions for the minix filesystem. */ 438 439#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr) 440#define minix_set_bit(nr,addr) set_bit(nr,addr) 441#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr) 442#define minix_test_bit(nr,addr) test_bit(nr,addr) 443#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) 444 445#endif /* __KERNEL__ */ 446 447#endif /* _XTENSA_BITOPS_H */