at v2.6.14 410 lines 11 kB view raw
1#ifndef _H8300_BITOPS_H 2#define _H8300_BITOPS_H 3 4/* 5 * Copyright 1992, Linus Torvalds. 6 * Copyright 2002, Yoshinori Sato 7 */ 8 9#include <linux/config.h> 10#include <linux/compiler.h> 11#include <asm/byteorder.h> /* swab32 */ 12#include <asm/system.h> 13 14#ifdef __KERNEL__ 15/* 16 * Function prototypes to keep gcc -Wall happy 17 */ 18 19/* 20 * ffz = Find First Zero in word. Undefined if no zero exists, 21 * so code should check against ~0UL first.. 22 */ 23static __inline__ unsigned long ffz(unsigned long word) 24{ 25 unsigned long result; 26 27 result = -1; 28 __asm__("1:\n\t" 29 "shlr.l %2\n\t" 30 "adds #1,%0\n\t" 31 "bcs 1b" 32 : "=r" (result) 33 : "0" (result),"r" (word)); 34 return result; 35} 36 37#define H8300_GEN_BITOP_CONST(OP,BIT) \ 38 case BIT: \ 39 __asm__(OP " #" #BIT ",@%0"::"r"(b_addr):"memory"); \ 40 break; 41 42#define H8300_GEN_BITOP(FNAME,OP) \ 43static __inline__ void FNAME(int nr, volatile unsigned long* addr) \ 44{ \ 45 volatile unsigned char *b_addr; \ 46 b_addr = (volatile unsigned char *)addr + ((nr >> 3) ^ 3); \ 47 if (__builtin_constant_p(nr)) { \ 48 switch(nr & 7) { \ 49 H8300_GEN_BITOP_CONST(OP,0) \ 50 H8300_GEN_BITOP_CONST(OP,1) \ 51 H8300_GEN_BITOP_CONST(OP,2) \ 52 H8300_GEN_BITOP_CONST(OP,3) \ 53 H8300_GEN_BITOP_CONST(OP,4) \ 54 H8300_GEN_BITOP_CONST(OP,5) \ 55 H8300_GEN_BITOP_CONST(OP,6) \ 56 H8300_GEN_BITOP_CONST(OP,7) \ 57 } \ 58 } else { \ 59 __asm__(OP " %w0,@%1"::"r"(nr),"r"(b_addr):"memory"); \ 60 } \ 61} 62 63/* 64 * clear_bit() doesn't provide any barrier for the compiler. 65 */ 66#define smp_mb__before_clear_bit() barrier() 67#define smp_mb__after_clear_bit() barrier() 68 69H8300_GEN_BITOP(set_bit ,"bset") 70H8300_GEN_BITOP(clear_bit ,"bclr") 71H8300_GEN_BITOP(change_bit,"bnot") 72#define __set_bit(nr,addr) set_bit((nr),(addr)) 73#define __clear_bit(nr,addr) clear_bit((nr),(addr)) 74#define __change_bit(nr,addr) change_bit((nr),(addr)) 75 76#undef H8300_GEN_BITOP 77#undef H8300_GEN_BITOP_CONST 78 79static __inline__ int test_bit(int nr, const unsigned long* addr) 80{ 81 return (*((volatile unsigned char *)addr + 82 ((nr >> 3) ^ 3)) & (1UL << (nr & 7))) != 0; 83} 84 85#define __test_bit(nr, addr) test_bit(nr, addr) 86 87#define H8300_GEN_TEST_BITOP_CONST_INT(OP,BIT) \ 88 case BIT: \ 89 __asm__("stc ccr,%w1\n\t" \ 90 "orc #0x80,ccr\n\t" \ 91 "bld #" #BIT ",@%4\n\t" \ 92 OP " #" #BIT ",@%4\n\t" \ 93 "rotxl.l %0\n\t" \ 94 "ldc %w1,ccr" \ 95 : "=r"(retval),"=&r"(ccrsave),"=m"(*b_addr) \ 96 : "0" (retval),"r" (b_addr) \ 97 : "memory"); \ 98 break; 99 100#define H8300_GEN_TEST_BITOP_CONST(OP,BIT) \ 101 case BIT: \ 102 __asm__("bld #" #BIT ",@%3\n\t" \ 103 OP " #" #BIT ",@%3\n\t" \ 104 "rotxl.l %0\n\t" \ 105 : "=r"(retval),"=m"(*b_addr) \ 106 : "0" (retval),"r" (b_addr) \ 107 : "memory"); \ 108 break; 109 110#define H8300_GEN_TEST_BITOP(FNNAME,OP) \ 111static __inline__ int FNNAME(int nr, volatile void * addr) \ 112{ \ 113 int retval = 0; \ 114 char ccrsave; \ 115 volatile unsigned char *b_addr; \ 116 b_addr = (volatile unsigned char *)addr + ((nr >> 3) ^ 3); \ 117 if (__builtin_constant_p(nr)) { \ 118 switch(nr & 7) { \ 119 H8300_GEN_TEST_BITOP_CONST_INT(OP,0) \ 120 H8300_GEN_TEST_BITOP_CONST_INT(OP,1) \ 121 H8300_GEN_TEST_BITOP_CONST_INT(OP,2) \ 122 H8300_GEN_TEST_BITOP_CONST_INT(OP,3) \ 123 H8300_GEN_TEST_BITOP_CONST_INT(OP,4) \ 124 H8300_GEN_TEST_BITOP_CONST_INT(OP,5) \ 125 H8300_GEN_TEST_BITOP_CONST_INT(OP,6) \ 126 H8300_GEN_TEST_BITOP_CONST_INT(OP,7) \ 127 } \ 128 } else { \ 129 __asm__("stc ccr,%w1\n\t" \ 130 "orc #0x80,ccr\n\t" \ 131 "btst %w5,@%4\n\t" \ 132 OP " %w5,@%4\n\t" \ 133 "beq 1f\n\t" \ 134 "inc.l #1,%0\n" \ 135 "1:\n\t" \ 136 "ldc %w1,ccr" \ 137 : "=r"(retval),"=&r"(ccrsave),"=m"(*b_addr) \ 138 : "0" (retval),"r" (b_addr),"r"(nr) \ 139 : "memory"); \ 140 } \ 141 return retval; \ 142} \ 143 \ 144static __inline__ int __ ## FNNAME(int nr, volatile void * addr) \ 145{ \ 146 int retval = 0; \ 147 volatile unsigned char *b_addr; \ 148 b_addr = (volatile unsigned char *)addr + ((nr >> 3) ^ 3); \ 149 if (__builtin_constant_p(nr)) { \ 150 switch(nr & 7) { \ 151 H8300_GEN_TEST_BITOP_CONST(OP,0) \ 152 H8300_GEN_TEST_BITOP_CONST(OP,1) \ 153 H8300_GEN_TEST_BITOP_CONST(OP,2) \ 154 H8300_GEN_TEST_BITOP_CONST(OP,3) \ 155 H8300_GEN_TEST_BITOP_CONST(OP,4) \ 156 H8300_GEN_TEST_BITOP_CONST(OP,5) \ 157 H8300_GEN_TEST_BITOP_CONST(OP,6) \ 158 H8300_GEN_TEST_BITOP_CONST(OP,7) \ 159 } \ 160 } else { \ 161 __asm__("btst %w4,@%3\n\t" \ 162 OP " %w4,@%3\n\t" \ 163 "beq 1f\n\t" \ 164 "inc.l #1,%0\n" \ 165 "1:" \ 166 : "=r"(retval),"=m"(*b_addr) \ 167 : "0" (retval),"r" (b_addr),"r"(nr) \ 168 : "memory"); \ 169 } \ 170 return retval; \ 171} 172 173H8300_GEN_TEST_BITOP(test_and_set_bit, "bset") 174H8300_GEN_TEST_BITOP(test_and_clear_bit, "bclr") 175H8300_GEN_TEST_BITOP(test_and_change_bit,"bnot") 176#undef H8300_GEN_TEST_BITOP_CONST 177#undef H8300_GEN_TEST_BITOP_CONST_INT 178#undef H8300_GEN_TEST_BITOP 179 180#define find_first_zero_bit(addr, size) \ 181 find_next_zero_bit((addr), (size), 0) 182 183#define ffs(x) generic_ffs(x) 184 185static __inline__ unsigned long __ffs(unsigned long word) 186{ 187 unsigned long result; 188 189 result = -1; 190 __asm__("1:\n\t" 191 "shlr.l %2\n\t" 192 "adds #1,%0\n\t" 193 "bcc 1b" 194 : "=r" (result) 195 : "0"(result),"r"(word)); 196 return result; 197} 198 199static __inline__ int find_next_zero_bit (const unsigned long * addr, int size, int offset) 200{ 201 unsigned long *p = (unsigned long *)(((unsigned long)addr + (offset >> 3)) & ~3); 202 unsigned long result = offset & ~31UL; 203 unsigned long tmp; 204 205 if (offset >= size) 206 return size; 207 size -= result; 208 offset &= 31UL; 209 if (offset) { 210 tmp = *(p++); 211 tmp |= ~0UL >> (32-offset); 212 if (size < 32) 213 goto found_first; 214 if (~tmp) 215 goto found_middle; 216 size -= 32; 217 result += 32; 218 } 219 while (size & ~31UL) { 220 if (~(tmp = *(p++))) 221 goto found_middle; 222 result += 32; 223 size -= 32; 224 } 225 if (!size) 226 return result; 227 tmp = *p; 228 229found_first: 230 tmp |= ~0UL >> size; 231found_middle: 232 return result + ffz(tmp); 233} 234 235static __inline__ unsigned long find_next_bit(const unsigned long *addr, 236 unsigned long size, unsigned long offset) 237{ 238 unsigned long *p = (unsigned long *)(((unsigned long)addr + (offset >> 3)) & ~3); 239 unsigned int result = offset & ~31UL; 240 unsigned int tmp; 241 242 if (offset >= size) 243 return size; 244 size -= result; 245 offset &= 31UL; 246 if (offset) { 247 tmp = *(p++); 248 tmp &= ~0UL << offset; 249 if (size < 32) 250 goto found_first; 251 if (tmp) 252 goto found_middle; 253 size -= 32; 254 result += 32; 255 } 256 while (size >= 32) { 257 if ((tmp = *p++) != 0) 258 goto found_middle; 259 result += 32; 260 size -= 32; 261 } 262 if (!size) 263 return result; 264 tmp = *p; 265 266found_first: 267 tmp &= ~0UL >> (32 - size); 268 if (tmp == 0UL) 269 return result + size; 270found_middle: 271 return result + __ffs(tmp); 272} 273 274#define find_first_bit(addr, size) find_next_bit(addr, size, 0) 275 276/* 277 * Every architecture must define this function. It's the fastest 278 * way of searching a 140-bit bitmap where the first 100 bits are 279 * unlikely to be set. It's guaranteed that at least one of the 140 280 * bits is cleared. 281 */ 282static inline int sched_find_first_bit(unsigned long *b) 283{ 284 if (unlikely(b[0])) 285 return __ffs(b[0]); 286 if (unlikely(b[1])) 287 return __ffs(b[1]) + 32; 288 if (unlikely(b[2])) 289 return __ffs(b[2]) + 64; 290 if (b[3]) 291 return __ffs(b[3]) + 96; 292 return __ffs(b[4]) + 128; 293} 294 295/* 296 * hweightN: returns the hamming weight (i.e. the number 297 * of bits set) of a N-bit word 298 */ 299 300#define hweight32(x) generic_hweight32(x) 301#define hweight16(x) generic_hweight16(x) 302#define hweight8(x) generic_hweight8(x) 303 304static __inline__ int ext2_set_bit(int nr, volatile void * addr) 305{ 306 int mask, retval; 307 unsigned long flags; 308 volatile unsigned char *ADDR = (unsigned char *) addr; 309 310 ADDR += nr >> 3; 311 mask = 1 << (nr & 0x07); 312 local_irq_save(flags); 313 retval = (mask & *ADDR) != 0; 314 *ADDR |= mask; 315 local_irq_restore(flags); 316 return retval; 317} 318#define ext2_set_bit_atomic(lock, nr, addr) ext2_set_bit(nr, addr) 319 320static __inline__ int ext2_clear_bit(int nr, volatile void * addr) 321{ 322 int mask, retval; 323 unsigned long flags; 324 volatile unsigned char *ADDR = (unsigned char *) addr; 325 326 ADDR += nr >> 3; 327 mask = 1 << (nr & 0x07); 328 local_irq_save(flags); 329 retval = (mask & *ADDR) != 0; 330 *ADDR &= ~mask; 331 local_irq_restore(flags); 332 return retval; 333} 334#define ext2_clear_bit_atomic(lock, nr, addr) ext2_set_bit(nr, addr) 335 336static __inline__ int ext2_test_bit(int nr, const volatile void * addr) 337{ 338 int mask; 339 const volatile unsigned char *ADDR = (const unsigned char *) addr; 340 341 ADDR += nr >> 3; 342 mask = 1 << (nr & 0x07); 343 return ((mask & *ADDR) != 0); 344} 345 346#define ext2_find_first_zero_bit(addr, size) \ 347 ext2_find_next_zero_bit((addr), (size), 0) 348 349static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset) 350{ 351 unsigned long *p = ((unsigned long *) addr) + (offset >> 5); 352 unsigned long result = offset & ~31UL; 353 unsigned long tmp; 354 355 if (offset >= size) 356 return size; 357 size -= result; 358 offset &= 31UL; 359 if(offset) { 360 /* We hold the little endian value in tmp, but then the 361 * shift is illegal. So we could keep a big endian value 362 * in tmp, like this: 363 * 364 * tmp = __swab32(*(p++)); 365 * tmp |= ~0UL >> (32-offset); 366 * 367 * but this would decrease performance, so we change the 368 * shift: 369 */ 370 tmp = *(p++); 371 tmp |= __swab32(~0UL >> (32-offset)); 372 if(size < 32) 373 goto found_first; 374 if(~tmp) 375 goto found_middle; 376 size -= 32; 377 result += 32; 378 } 379 while(size & ~31UL) { 380 if(~(tmp = *(p++))) 381 goto found_middle; 382 result += 32; 383 size -= 32; 384 } 385 if(!size) 386 return result; 387 tmp = *p; 388 389found_first: 390 /* tmp is little endian, so we would have to swab the shift, 391 * see above. But then we have to swab tmp below for ffz, so 392 * we might as well do this here. 393 */ 394 return result + ffz(__swab32(tmp) | (~0UL << size)); 395found_middle: 396 return result + ffz(__swab32(tmp)); 397} 398 399/* Bitmap functions for the minix filesystem. */ 400#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr) 401#define minix_set_bit(nr,addr) set_bit(nr,addr) 402#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr) 403#define minix_test_bit(nr,addr) test_bit(nr,addr) 404#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) 405 406#endif /* __KERNEL__ */ 407 408#define fls(x) generic_fls(x) 409 410#endif /* _H8300_BITOPS_H */