at v2.6.21 8.3 kB view raw
1#ifndef _ALPHA_BITOPS_H 2#define _ALPHA_BITOPS_H 3 4#include <asm/compiler.h> 5 6/* 7 * Copyright 1994, Linus Torvalds. 8 */ 9 10/* 11 * These have to be done with inline assembly: that way the bit-setting 12 * is guaranteed to be atomic. All bit operations return 0 if the bit 13 * was cleared before the operation and != 0 if it was not. 14 * 15 * To get proper branch prediction for the main line, we must branch 16 * forward to code at the end of this object's .text section, then 17 * branch back to restart the operation. 18 * 19 * bit 0 is the LSB of addr; bit 64 is the LSB of (addr+1). 20 */ 21 22static inline void 23set_bit(unsigned long nr, volatile void * addr) 24{ 25 unsigned long temp; 26 int *m = ((int *) addr) + (nr >> 5); 27 28 __asm__ __volatile__( 29 "1: ldl_l %0,%3\n" 30 " bis %0,%2,%0\n" 31 " stl_c %0,%1\n" 32 " beq %0,2f\n" 33 ".subsection 2\n" 34 "2: br 1b\n" 35 ".previous" 36 :"=&r" (temp), "=m" (*m) 37 :"Ir" (1UL << (nr & 31)), "m" (*m)); 38} 39 40/* 41 * WARNING: non atomic version. 42 */ 43static inline void 44__set_bit(unsigned long nr, volatile void * addr) 45{ 46 int *m = ((int *) addr) + (nr >> 5); 47 48 *m |= 1 << (nr & 31); 49} 50 51#define smp_mb__before_clear_bit() smp_mb() 52#define smp_mb__after_clear_bit() smp_mb() 53 54static inline void 55clear_bit(unsigned long nr, volatile void * addr) 56{ 57 unsigned long temp; 58 int *m = ((int *) addr) + (nr >> 5); 59 60 __asm__ __volatile__( 61 "1: ldl_l %0,%3\n" 62 " bic %0,%2,%0\n" 63 " stl_c %0,%1\n" 64 " beq %0,2f\n" 65 ".subsection 2\n" 66 "2: br 1b\n" 67 ".previous" 68 :"=&r" (temp), "=m" (*m) 69 :"Ir" (1UL << (nr & 31)), "m" (*m)); 70} 71 72/* 73 * WARNING: non atomic version. 74 */ 75static __inline__ void 76__clear_bit(unsigned long nr, volatile void * addr) 77{ 78 int *m = ((int *) addr) + (nr >> 5); 79 80 *m &= ~(1 << (nr & 31)); 81} 82 83static inline void 84change_bit(unsigned long nr, volatile void * addr) 85{ 86 unsigned long temp; 87 int *m = ((int *) addr) + (nr >> 5); 88 89 __asm__ __volatile__( 90 "1: ldl_l %0,%3\n" 91 " xor %0,%2,%0\n" 92 " stl_c %0,%1\n" 93 " beq %0,2f\n" 94 ".subsection 2\n" 95 "2: br 1b\n" 96 ".previous" 97 :"=&r" (temp), "=m" (*m) 98 :"Ir" (1UL << (nr & 31)), "m" (*m)); 99} 100 101/* 102 * WARNING: non atomic version. 103 */ 104static __inline__ void 105__change_bit(unsigned long nr, volatile void * addr) 106{ 107 int *m = ((int *) addr) + (nr >> 5); 108 109 *m ^= 1 << (nr & 31); 110} 111 112static inline int 113test_and_set_bit(unsigned long nr, volatile void *addr) 114{ 115 unsigned long oldbit; 116 unsigned long temp; 117 int *m = ((int *) addr) + (nr >> 5); 118 119 __asm__ __volatile__( 120 "1: ldl_l %0,%4\n" 121 " and %0,%3,%2\n" 122 " bne %2,2f\n" 123 " xor %0,%3,%0\n" 124 " stl_c %0,%1\n" 125 " beq %0,3f\n" 126 "2:\n" 127#ifdef CONFIG_SMP 128 " mb\n" 129#endif 130 ".subsection 2\n" 131 "3: br 1b\n" 132 ".previous" 133 :"=&r" (temp), "=m" (*m), "=&r" (oldbit) 134 :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); 135 136 return oldbit != 0; 137} 138 139/* 140 * WARNING: non atomic version. 141 */ 142static inline int 143__test_and_set_bit(unsigned long nr, volatile void * addr) 144{ 145 unsigned long mask = 1 << (nr & 0x1f); 146 int *m = ((int *) addr) + (nr >> 5); 147 int old = *m; 148 149 *m = old | mask; 150 return (old & mask) != 0; 151} 152 153static inline int 154test_and_clear_bit(unsigned long nr, volatile void * addr) 155{ 156 unsigned long oldbit; 157 unsigned long temp; 158 int *m = ((int *) addr) + (nr >> 5); 159 160 __asm__ __volatile__( 161 "1: ldl_l %0,%4\n" 162 " and %0,%3,%2\n" 163 " beq %2,2f\n" 164 " xor %0,%3,%0\n" 165 " stl_c %0,%1\n" 166 " beq %0,3f\n" 167 "2:\n" 168#ifdef CONFIG_SMP 169 " mb\n" 170#endif 171 ".subsection 2\n" 172 "3: br 1b\n" 173 ".previous" 174 :"=&r" (temp), "=m" (*m), "=&r" (oldbit) 175 :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); 176 177 return oldbit != 0; 178} 179 180/* 181 * WARNING: non atomic version. 182 */ 183static inline int 184__test_and_clear_bit(unsigned long nr, volatile void * addr) 185{ 186 unsigned long mask = 1 << (nr & 0x1f); 187 int *m = ((int *) addr) + (nr >> 5); 188 int old = *m; 189 190 *m = old & ~mask; 191 return (old & mask) != 0; 192} 193 194static inline int 195test_and_change_bit(unsigned long nr, volatile void * addr) 196{ 197 unsigned long oldbit; 198 unsigned long temp; 199 int *m = ((int *) addr) + (nr >> 5); 200 201 __asm__ __volatile__( 202 "1: ldl_l %0,%4\n" 203 " and %0,%3,%2\n" 204 " xor %0,%3,%0\n" 205 " stl_c %0,%1\n" 206 " beq %0,3f\n" 207#ifdef CONFIG_SMP 208 " mb\n" 209#endif 210 ".subsection 2\n" 211 "3: br 1b\n" 212 ".previous" 213 :"=&r" (temp), "=m" (*m), "=&r" (oldbit) 214 :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); 215 216 return oldbit != 0; 217} 218 219/* 220 * WARNING: non atomic version. 221 */ 222static __inline__ int 223__test_and_change_bit(unsigned long nr, volatile void * addr) 224{ 225 unsigned long mask = 1 << (nr & 0x1f); 226 int *m = ((int *) addr) + (nr >> 5); 227 int old = *m; 228 229 *m = old ^ mask; 230 return (old & mask) != 0; 231} 232 233static inline int 234test_bit(int nr, const volatile void * addr) 235{ 236 return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL; 237} 238 239/* 240 * ffz = Find First Zero in word. Undefined if no zero exists, 241 * so code should check against ~0UL first.. 242 * 243 * Do a binary search on the bits. Due to the nature of large 244 * constants on the alpha, it is worthwhile to split the search. 245 */ 246static inline unsigned long ffz_b(unsigned long x) 247{ 248 unsigned long sum, x1, x2, x4; 249 250 x = ~x & -~x; /* set first 0 bit, clear others */ 251 x1 = x & 0xAA; 252 x2 = x & 0xCC; 253 x4 = x & 0xF0; 254 sum = x2 ? 2 : 0; 255 sum += (x4 != 0) * 4; 256 sum += (x1 != 0); 257 258 return sum; 259} 260 261static inline unsigned long ffz(unsigned long word) 262{ 263#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) 264 /* Whee. EV67 can calculate it directly. */ 265 return __kernel_cttz(~word); 266#else 267 unsigned long bits, qofs, bofs; 268 269 bits = __kernel_cmpbge(word, ~0UL); 270 qofs = ffz_b(bits); 271 bits = __kernel_extbl(word, qofs); 272 bofs = ffz_b(bits); 273 274 return qofs*8 + bofs; 275#endif 276} 277 278/* 279 * __ffs = Find First set bit in word. Undefined if no set bit exists. 280 */ 281static inline unsigned long __ffs(unsigned long word) 282{ 283#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) 284 /* Whee. EV67 can calculate it directly. */ 285 return __kernel_cttz(word); 286#else 287 unsigned long bits, qofs, bofs; 288 289 bits = __kernel_cmpbge(0, word); 290 qofs = ffz_b(bits); 291 bits = __kernel_extbl(word, qofs); 292 bofs = ffz_b(~bits); 293 294 return qofs*8 + bofs; 295#endif 296} 297 298#ifdef __KERNEL__ 299 300/* 301 * ffs: find first bit set. This is defined the same way as 302 * the libc and compiler builtin ffs routines, therefore 303 * differs in spirit from the above __ffs. 304 */ 305 306static inline int ffs(int word) 307{ 308 int result = __ffs(word) + 1; 309 return word ? result : 0; 310} 311 312/* 313 * fls: find last bit set. 314 */ 315#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) 316static inline int fls(int word) 317{ 318 return 64 - __kernel_ctlz(word & 0xffffffff); 319} 320#else 321#include <asm-generic/bitops/fls.h> 322#endif 323#include <asm-generic/bitops/fls64.h> 324 325/* Compute powers of two for the given integer. */ 326static inline long floor_log2(unsigned long word) 327{ 328#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) 329 return 63 - __kernel_ctlz(word); 330#else 331 long bit; 332 for (bit = -1; word ; bit++) 333 word >>= 1; 334 return bit; 335#endif 336} 337 338static inline long ceil_log2(unsigned long word) 339{ 340 long bit = floor_log2(word); 341 return bit + (word > (1UL << bit)); 342} 343 344/* 345 * hweightN: returns the hamming weight (i.e. the number 346 * of bits set) of a N-bit word 347 */ 348 349#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) 350/* Whee. EV67 can calculate it directly. */ 351static inline unsigned long hweight64(unsigned long w) 352{ 353 return __kernel_ctpop(w); 354} 355 356#define hweight32(x) (unsigned int) hweight64((x) & 0xfffffffful) 357#define hweight16(x) (unsigned int) hweight64((x) & 0xfffful) 358#define hweight8(x) (unsigned int) hweight64((x) & 0xfful) 359#else 360#include <asm-generic/bitops/hweight.h> 361#endif 362 363#endif /* __KERNEL__ */ 364 365#include <asm-generic/bitops/find.h> 366 367#ifdef __KERNEL__ 368 369/* 370 * Every architecture must define this function. It's the fastest 371 * way of searching a 140-bit bitmap where the first 100 bits are 372 * unlikely to be set. It's guaranteed that at least one of the 140 373 * bits is set. 374 */ 375static inline unsigned long 376sched_find_first_bit(unsigned long b[3]) 377{ 378 unsigned long b0 = b[0], b1 = b[1], b2 = b[2]; 379 unsigned long ofs; 380 381 ofs = (b1 ? 64 : 128); 382 b1 = (b1 ? b1 : b2); 383 ofs = (b0 ? 0 : ofs); 384 b0 = (b0 ? b0 : b1); 385 386 return __ffs(b0) + ofs; 387} 388 389#include <asm-generic/bitops/ext2-non-atomic.h> 390 391#define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) 392#define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) 393 394#include <asm-generic/bitops/minix.h> 395 396#endif /* __KERNEL__ */ 397 398#endif /* _ALPHA_BITOPS_H */