at v2.6.16 517 lines 11 kB view raw
1#ifndef __ASM_SH64_BITOPS_H 2#define __ASM_SH64_BITOPS_H 3 4/* 5 * This file is subject to the terms and conditions of the GNU General Public 6 * License. See the file "COPYING" in the main directory of this archive 7 * for more details. 8 * 9 * include/asm-sh64/bitops.h 10 * 11 * Copyright (C) 2000, 2001 Paolo Alberelli 12 * Copyright (C) 2003 Paul Mundt 13 */ 14 15#ifdef __KERNEL__ 16#include <linux/compiler.h> 17#include <asm/system.h> 18/* For __swab32 */ 19#include <asm/byteorder.h> 20 21static __inline__ void set_bit(int nr, volatile void * addr) 22{ 23 int mask; 24 volatile unsigned int *a = addr; 25 unsigned long flags; 26 27 a += nr >> 5; 28 mask = 1 << (nr & 0x1f); 29 local_irq_save(flags); 30 *a |= mask; 31 local_irq_restore(flags); 32} 33 34static inline void __set_bit(int nr, void *addr) 35{ 36 int mask; 37 unsigned int *a = addr; 38 39 a += nr >> 5; 40 mask = 1 << (nr & 0x1f); 41 *a |= mask; 42} 43 44/* 45 * clear_bit() doesn't provide any barrier for the compiler. 46 */ 47#define smp_mb__before_clear_bit() barrier() 48#define smp_mb__after_clear_bit() barrier() 49static inline void clear_bit(int nr, volatile unsigned long *a) 50{ 51 int mask; 52 unsigned long flags; 53 54 a += nr >> 5; 55 mask = 1 << (nr & 0x1f); 56 local_irq_save(flags); 57 *a &= ~mask; 58 local_irq_restore(flags); 59} 60 61static inline void __clear_bit(int nr, volatile unsigned long *a) 62{ 63 int mask; 64 65 a += nr >> 5; 66 mask = 1 << (nr & 0x1f); 67 *a &= ~mask; 68} 69 70static __inline__ void change_bit(int nr, volatile void * addr) 71{ 72 int mask; 73 volatile unsigned int *a = addr; 74 unsigned long flags; 75 76 a += nr >> 5; 77 mask = 1 << (nr & 0x1f); 78 local_irq_save(flags); 79 *a ^= mask; 80 local_irq_restore(flags); 81} 82 83static __inline__ void __change_bit(int nr, volatile void * addr) 84{ 85 int mask; 86 volatile unsigned int *a = addr; 87 88 a += nr >> 5; 89 mask = 1 << (nr & 0x1f); 90 *a ^= mask; 91} 92 93static __inline__ int test_and_set_bit(int nr, volatile void * addr) 94{ 95 int mask, retval; 96 volatile unsigned int *a = addr; 97 unsigned long flags; 98 99 a += nr >> 5; 100 mask = 1 << (nr & 0x1f); 101 local_irq_save(flags); 102 retval = (mask & *a) != 0; 103 *a |= mask; 104 local_irq_restore(flags); 105 106 return retval; 107} 108 109static __inline__ int __test_and_set_bit(int nr, volatile void * addr) 110{ 111 int mask, retval; 112 volatile unsigned int *a = addr; 113 114 a += nr >> 5; 115 mask = 1 << (nr & 0x1f); 116 retval = (mask & *a) != 0; 117 *a |= mask; 118 119 return retval; 120} 121 122static __inline__ int test_and_clear_bit(int nr, volatile void * addr) 123{ 124 int mask, retval; 125 volatile unsigned int *a = addr; 126 unsigned long flags; 127 128 a += nr >> 5; 129 mask = 1 << (nr & 0x1f); 130 local_irq_save(flags); 131 retval = (mask & *a) != 0; 132 *a &= ~mask; 133 local_irq_restore(flags); 134 135 return retval; 136} 137 138static __inline__ int __test_and_clear_bit(int nr, volatile void * addr) 139{ 140 int mask, retval; 141 volatile unsigned int *a = addr; 142 143 a += nr >> 5; 144 mask = 1 << (nr & 0x1f); 145 retval = (mask & *a) != 0; 146 *a &= ~mask; 147 148 return retval; 149} 150 151static __inline__ int test_and_change_bit(int nr, volatile void * addr) 152{ 153 int mask, retval; 154 volatile unsigned int *a = addr; 155 unsigned long flags; 156 157 a += nr >> 5; 158 mask = 1 << (nr & 0x1f); 159 local_irq_save(flags); 160 retval = (mask & *a) != 0; 161 *a ^= mask; 162 local_irq_restore(flags); 163 164 return retval; 165} 166 167static __inline__ int __test_and_change_bit(int nr, volatile void * addr) 168{ 169 int mask, retval; 170 volatile unsigned int *a = addr; 171 172 a += nr >> 5; 173 mask = 1 << (nr & 0x1f); 174 retval = (mask & *a) != 0; 175 *a ^= mask; 176 177 return retval; 178} 179 180static __inline__ int test_bit(int nr, const volatile void *addr) 181{ 182 return 1UL & (((const volatile unsigned int *) addr)[nr >> 5] >> (nr & 31)); 183} 184 185static __inline__ unsigned long ffz(unsigned long word) 186{ 187 unsigned long result, __d2, __d3; 188 189 __asm__("gettr tr0, %2\n\t" 190 "pta $+32, tr0\n\t" 191 "andi %1, 1, %3\n\t" 192 "beq %3, r63, tr0\n\t" 193 "pta $+4, tr0\n" 194 "0:\n\t" 195 "shlri.l %1, 1, %1\n\t" 196 "addi %0, 1, %0\n\t" 197 "andi %1, 1, %3\n\t" 198 "beqi %3, 1, tr0\n" 199 "1:\n\t" 200 "ptabs %2, tr0\n\t" 201 : "=r" (result), "=r" (word), "=r" (__d2), "=r" (__d3) 202 : "0" (0L), "1" (word)); 203 204 return result; 205} 206 207/** 208 * __ffs - find first bit in word 209 * @word: The word to search 210 * 211 * Undefined if no bit exists, so code should check against 0 first. 212 */ 213static inline unsigned long __ffs(unsigned long word) 214{ 215 int r = 0; 216 217 if (!word) 218 return 0; 219 if (!(word & 0xffff)) { 220 word >>= 16; 221 r += 16; 222 } 223 if (!(word & 0xff)) { 224 word >>= 8; 225 r += 8; 226 } 227 if (!(word & 0xf)) { 228 word >>= 4; 229 r += 4; 230 } 231 if (!(word & 3)) { 232 word >>= 2; 233 r += 2; 234 } 235 if (!(word & 1)) { 236 word >>= 1; 237 r += 1; 238 } 239 return r; 240} 241 242/** 243 * find_next_bit - find the next set bit in a memory region 244 * @addr: The address to base the search on 245 * @offset: The bitnumber to start searching at 246 * @size: The maximum size to search 247 */ 248static inline unsigned long find_next_bit(const unsigned long *addr, 249 unsigned long size, unsigned long offset) 250{ 251 unsigned int *p = ((unsigned int *) addr) + (offset >> 5); 252 unsigned int result = offset & ~31UL; 253 unsigned int tmp; 254 255 if (offset >= size) 256 return size; 257 size -= result; 258 offset &= 31UL; 259 if (offset) { 260 tmp = *p++; 261 tmp &= ~0UL << offset; 262 if (size < 32) 263 goto found_first; 264 if (tmp) 265 goto found_middle; 266 size -= 32; 267 result += 32; 268 } 269 while (size >= 32) { 270 if ((tmp = *p++) != 0) 271 goto found_middle; 272 result += 32; 273 size -= 32; 274 } 275 if (!size) 276 return result; 277 tmp = *p; 278 279found_first: 280 tmp &= ~0UL >> (32 - size); 281 if (tmp == 0UL) /* Are any bits set? */ 282 return result + size; /* Nope. */ 283found_middle: 284 return result + __ffs(tmp); 285} 286 287/** 288 * find_first_bit - find the first set bit in a memory region 289 * @addr: The address to start the search at 290 * @size: The maximum size to search 291 * 292 * Returns the bit-number of the first set bit, not the number of the byte 293 * containing a bit. 294 */ 295#define find_first_bit(addr, size) \ 296 find_next_bit((addr), (size), 0) 297 298 299static inline int find_next_zero_bit(void *addr, int size, int offset) 300{ 301 unsigned long *p = ((unsigned long *) addr) + (offset >> 5); 302 unsigned long result = offset & ~31UL; 303 unsigned long tmp; 304 305 if (offset >= size) 306 return size; 307 size -= result; 308 offset &= 31UL; 309 if (offset) { 310 tmp = *(p++); 311 tmp |= ~0UL >> (32-offset); 312 if (size < 32) 313 goto found_first; 314 if (~tmp) 315 goto found_middle; 316 size -= 32; 317 result += 32; 318 } 319 while (size & ~31UL) { 320 if (~(tmp = *(p++))) 321 goto found_middle; 322 result += 32; 323 size -= 32; 324 } 325 if (!size) 326 return result; 327 tmp = *p; 328 329found_first: 330 tmp |= ~0UL << size; 331found_middle: 332 return result + ffz(tmp); 333} 334 335#define find_first_zero_bit(addr, size) \ 336 find_next_zero_bit((addr), (size), 0) 337 338/* 339 * hweightN: returns the hamming weight (i.e. the number 340 * of bits set) of a N-bit word 341 */ 342 343#define hweight32(x) generic_hweight32(x) 344#define hweight16(x) generic_hweight16(x) 345#define hweight8(x) generic_hweight8(x) 346 347/* 348 * Every architecture must define this function. It's the fastest 349 * way of searching a 140-bit bitmap where the first 100 bits are 350 * unlikely to be set. It's guaranteed that at least one of the 140 351 * bits is cleared. 352 */ 353 354static inline int sched_find_first_bit(unsigned long *b) 355{ 356 if (unlikely(b[0])) 357 return __ffs(b[0]); 358 if (unlikely(b[1])) 359 return __ffs(b[1]) + 32; 360 if (unlikely(b[2])) 361 return __ffs(b[2]) + 64; 362 if (b[3]) 363 return __ffs(b[3]) + 96; 364 return __ffs(b[4]) + 128; 365} 366 367/* 368 * ffs: find first bit set. This is defined the same way as 369 * the libc and compiler builtin ffs routines, therefore 370 * differs in spirit from the above ffz (man ffs). 371 */ 372 373#define ffs(x) generic_ffs(x) 374 375/* 376 * hweightN: returns the hamming weight (i.e. the number 377 * of bits set) of a N-bit word 378 */ 379 380#define hweight32(x) generic_hweight32(x) 381#define hweight16(x) generic_hweight16(x) 382#define hweight8(x) generic_hweight8(x) 383 384#ifdef __LITTLE_ENDIAN__ 385#define ext2_set_bit(nr, addr) test_and_set_bit((nr), (addr)) 386#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr), (addr)) 387#define ext2_test_bit(nr, addr) test_bit((nr), (addr)) 388#define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr), (size)) 389#define ext2_find_next_zero_bit(addr, size, offset) \ 390 find_next_zero_bit((addr), (size), (offset)) 391#else 392static __inline__ int ext2_set_bit(int nr, volatile void * addr) 393{ 394 int mask, retval; 395 unsigned long flags; 396 volatile unsigned char *ADDR = (unsigned char *) addr; 397 398 ADDR += nr >> 3; 399 mask = 1 << (nr & 0x07); 400 local_irq_save(flags); 401 retval = (mask & *ADDR) != 0; 402 *ADDR |= mask; 403 local_irq_restore(flags); 404 return retval; 405} 406 407static __inline__ int ext2_clear_bit(int nr, volatile void * addr) 408{ 409 int mask, retval; 410 unsigned long flags; 411 volatile unsigned char *ADDR = (unsigned char *) addr; 412 413 ADDR += nr >> 3; 414 mask = 1 << (nr & 0x07); 415 local_irq_save(flags); 416 retval = (mask & *ADDR) != 0; 417 *ADDR &= ~mask; 418 local_irq_restore(flags); 419 return retval; 420} 421 422static __inline__ int ext2_test_bit(int nr, const volatile void * addr) 423{ 424 int mask; 425 const volatile unsigned char *ADDR = (const unsigned char *) addr; 426 427 ADDR += nr >> 3; 428 mask = 1 << (nr & 0x07); 429 return ((mask & *ADDR) != 0); 430} 431 432#define ext2_find_first_zero_bit(addr, size) \ 433 ext2_find_next_zero_bit((addr), (size), 0) 434 435static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset) 436{ 437 unsigned long *p = ((unsigned long *) addr) + (offset >> 5); 438 unsigned long result = offset & ~31UL; 439 unsigned long tmp; 440 441 if (offset >= size) 442 return size; 443 size -= result; 444 offset &= 31UL; 445 if(offset) { 446 /* We hold the little endian value in tmp, but then the 447 * shift is illegal. So we could keep a big endian value 448 * in tmp, like this: 449 * 450 * tmp = __swab32(*(p++)); 451 * tmp |= ~0UL >> (32-offset); 452 * 453 * but this would decrease preformance, so we change the 454 * shift: 455 */ 456 tmp = *(p++); 457 tmp |= __swab32(~0UL >> (32-offset)); 458 if(size < 32) 459 goto found_first; 460 if(~tmp) 461 goto found_middle; 462 size -= 32; 463 result += 32; 464 } 465 while(size & ~31UL) { 466 if(~(tmp = *(p++))) 467 goto found_middle; 468 result += 32; 469 size -= 32; 470 } 471 if(!size) 472 return result; 473 tmp = *p; 474 475found_first: 476 /* tmp is little endian, so we would have to swab the shift, 477 * see above. But then we have to swab tmp below for ffz, so 478 * we might as well do this here. 479 */ 480 return result + ffz(__swab32(tmp) | (~0UL << size)); 481found_middle: 482 return result + ffz(__swab32(tmp)); 483} 484#endif 485 486#define ext2_set_bit_atomic(lock, nr, addr) \ 487 ({ \ 488 int ret; \ 489 spin_lock(lock); \ 490 ret = ext2_set_bit((nr), (addr)); \ 491 spin_unlock(lock); \ 492 ret; \ 493 }) 494 495#define ext2_clear_bit_atomic(lock, nr, addr) \ 496 ({ \ 497 int ret; \ 498 spin_lock(lock); \ 499 ret = ext2_clear_bit((nr), (addr)); \ 500 spin_unlock(lock); \ 501 ret; \ 502 }) 503 504/* Bitmap functions for the minix filesystem. */ 505#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr) 506#define minix_set_bit(nr,addr) set_bit(nr,addr) 507#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr) 508#define minix_test_bit(nr,addr) test_bit(nr,addr) 509#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) 510 511#define ffs(x) generic_ffs(x) 512#define fls(x) generic_fls(x) 513#define fls64(x) generic_fls64(x) 514 515#endif /* __KERNEL__ */ 516 517#endif /* __ASM_SH64_BITOPS_H */