Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at c9a28fa7b9ac19b676deefa0a171ce7df8755c08 446 lines 11 kB view raw
1#ifndef _ASM_IA64_BITOPS_H 2#define _ASM_IA64_BITOPS_H 3 4/* 5 * Copyright (C) 1998-2003 Hewlett-Packard Co 6 * David Mosberger-Tang <davidm@hpl.hp.com> 7 * 8 * 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64 9 * O(1) scheduler patch 10 */ 11 12#ifndef _LINUX_BITOPS_H 13#error only <linux/bitops.h> can be included directly 14#endif 15 16#include <linux/compiler.h> 17#include <linux/types.h> 18#include <asm/intrinsics.h> 19 20/** 21 * set_bit - Atomically set a bit in memory 22 * @nr: the bit to set 23 * @addr: the address to start counting from 24 * 25 * This function is atomic and may not be reordered. See __set_bit() 26 * if you do not require the atomic guarantees. 27 * Note that @nr may be almost arbitrarily large; this function is not 28 * restricted to acting on a single-word quantity. 29 * 30 * The address must be (at least) "long" aligned. 31 * Note that there are driver (e.g., eepro100) which use these operations to 32 * operate on hw-defined data-structures, so we can't easily change these 33 * operations to force a bigger alignment. 34 * 35 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). 36 */ 37static __inline__ void 38set_bit (int nr, volatile void *addr) 39{ 40 __u32 bit, old, new; 41 volatile __u32 *m; 42 CMPXCHG_BUGCHECK_DECL 43 44 m = (volatile __u32 *) addr + (nr >> 5); 45 bit = 1 << (nr & 31); 46 do { 47 CMPXCHG_BUGCHECK(m); 48 old = *m; 49 new = old | bit; 50 } while (cmpxchg_acq(m, old, new) != old); 51} 52 53/** 54 * __set_bit - Set a bit in memory 55 * @nr: the bit to set 56 * @addr: the address to start counting from 57 * 58 * Unlike set_bit(), this function is non-atomic and may be reordered. 59 * If it's called on the same region of memory simultaneously, the effect 60 * may be that only one operation succeeds. 61 */ 62static __inline__ void 63__set_bit (int nr, volatile void *addr) 64{ 65 *((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31)); 66} 67 68/* 69 * clear_bit() has "acquire" semantics. 70 */ 71#define smp_mb__before_clear_bit() smp_mb() 72#define smp_mb__after_clear_bit() do { /* skip */; } while (0) 73 74/** 75 * clear_bit - Clears a bit in memory 76 * @nr: Bit to clear 77 * @addr: Address to start counting from 78 * 79 * clear_bit() is atomic and may not be reordered. However, it does 80 * not contain a memory barrier, so if it is used for locking purposes, 81 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() 82 * in order to ensure changes are visible on other processors. 83 */ 84static __inline__ void 85clear_bit (int nr, volatile void *addr) 86{ 87 __u32 mask, old, new; 88 volatile __u32 *m; 89 CMPXCHG_BUGCHECK_DECL 90 91 m = (volatile __u32 *) addr + (nr >> 5); 92 mask = ~(1 << (nr & 31)); 93 do { 94 CMPXCHG_BUGCHECK(m); 95 old = *m; 96 new = old & mask; 97 } while (cmpxchg_acq(m, old, new) != old); 98} 99 100/** 101 * clear_bit_unlock - Clears a bit in memory with release 102 * @nr: Bit to clear 103 * @addr: Address to start counting from 104 * 105 * clear_bit_unlock() is atomic and may not be reordered. It does 106 * contain a memory barrier suitable for unlock type operations. 107 */ 108static __inline__ void 109clear_bit_unlock (int nr, volatile void *addr) 110{ 111 __u32 mask, old, new; 112 volatile __u32 *m; 113 CMPXCHG_BUGCHECK_DECL 114 115 m = (volatile __u32 *) addr + (nr >> 5); 116 mask = ~(1 << (nr & 31)); 117 do { 118 CMPXCHG_BUGCHECK(m); 119 old = *m; 120 new = old & mask; 121 } while (cmpxchg_rel(m, old, new) != old); 122} 123 124/** 125 * __clear_bit_unlock - Non-atomically clear a bit with release 126 * 127 * This is like clear_bit_unlock, but the implementation uses a store 128 * with release semantics. See also __raw_spin_unlock(). 129 */ 130static __inline__ void 131__clear_bit_unlock(int nr, volatile void *addr) 132{ 133 __u32 mask, new; 134 volatile __u32 *m; 135 136 m = (volatile __u32 *)addr + (nr >> 5); 137 mask = ~(1 << (nr & 31)); 138 new = *m & mask; 139 barrier(); 140 ia64_st4_rel_nta(m, new); 141} 142 143/** 144 * __clear_bit - Clears a bit in memory (non-atomic version) 145 */ 146static __inline__ void 147__clear_bit (int nr, volatile void *addr) 148{ 149 volatile __u32 *p = (__u32 *) addr + (nr >> 5); 150 __u32 m = 1 << (nr & 31); 151 *p &= ~m; 152} 153 154/** 155 * change_bit - Toggle a bit in memory 156 * @nr: Bit to clear 157 * @addr: Address to start counting from 158 * 159 * change_bit() is atomic and may not be reordered. 160 * Note that @nr may be almost arbitrarily large; this function is not 161 * restricted to acting on a single-word quantity. 162 */ 163static __inline__ void 164change_bit (int nr, volatile void *addr) 165{ 166 __u32 bit, old, new; 167 volatile __u32 *m; 168 CMPXCHG_BUGCHECK_DECL 169 170 m = (volatile __u32 *) addr + (nr >> 5); 171 bit = (1 << (nr & 31)); 172 do { 173 CMPXCHG_BUGCHECK(m); 174 old = *m; 175 new = old ^ bit; 176 } while (cmpxchg_acq(m, old, new) != old); 177} 178 179/** 180 * __change_bit - Toggle a bit in memory 181 * @nr: the bit to set 182 * @addr: the address to start counting from 183 * 184 * Unlike change_bit(), this function is non-atomic and may be reordered. 185 * If it's called on the same region of memory simultaneously, the effect 186 * may be that only one operation succeeds. 187 */ 188static __inline__ void 189__change_bit (int nr, volatile void *addr) 190{ 191 *((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31)); 192} 193 194/** 195 * test_and_set_bit - Set a bit and return its old value 196 * @nr: Bit to set 197 * @addr: Address to count from 198 * 199 * This operation is atomic and cannot be reordered. 200 * It also implies a memory barrier. 201 */ 202static __inline__ int 203test_and_set_bit (int nr, volatile void *addr) 204{ 205 __u32 bit, old, new; 206 volatile __u32 *m; 207 CMPXCHG_BUGCHECK_DECL 208 209 m = (volatile __u32 *) addr + (nr >> 5); 210 bit = 1 << (nr & 31); 211 do { 212 CMPXCHG_BUGCHECK(m); 213 old = *m; 214 new = old | bit; 215 } while (cmpxchg_acq(m, old, new) != old); 216 return (old & bit) != 0; 217} 218 219/** 220 * test_and_set_bit_lock - Set a bit and return its old value for lock 221 * @nr: Bit to set 222 * @addr: Address to count from 223 * 224 * This is the same as test_and_set_bit on ia64 225 */ 226#define test_and_set_bit_lock test_and_set_bit 227 228/** 229 * __test_and_set_bit - Set a bit and return its old value 230 * @nr: Bit to set 231 * @addr: Address to count from 232 * 233 * This operation is non-atomic and can be reordered. 234 * If two examples of this operation race, one can appear to succeed 235 * but actually fail. You must protect multiple accesses with a lock. 236 */ 237static __inline__ int 238__test_and_set_bit (int nr, volatile void *addr) 239{ 240 __u32 *p = (__u32 *) addr + (nr >> 5); 241 __u32 m = 1 << (nr & 31); 242 int oldbitset = (*p & m) != 0; 243 244 *p |= m; 245 return oldbitset; 246} 247 248/** 249 * test_and_clear_bit - Clear a bit and return its old value 250 * @nr: Bit to set 251 * @addr: Address to count from 252 * 253 * This operation is atomic and cannot be reordered. 254 * It also implies a memory barrier. 255 */ 256static __inline__ int 257test_and_clear_bit (int nr, volatile void *addr) 258{ 259 __u32 mask, old, new; 260 volatile __u32 *m; 261 CMPXCHG_BUGCHECK_DECL 262 263 m = (volatile __u32 *) addr + (nr >> 5); 264 mask = ~(1 << (nr & 31)); 265 do { 266 CMPXCHG_BUGCHECK(m); 267 old = *m; 268 new = old & mask; 269 } while (cmpxchg_acq(m, old, new) != old); 270 return (old & ~mask) != 0; 271} 272 273/** 274 * __test_and_clear_bit - Clear a bit and return its old value 275 * @nr: Bit to set 276 * @addr: Address to count from 277 * 278 * This operation is non-atomic and can be reordered. 279 * If two examples of this operation race, one can appear to succeed 280 * but actually fail. You must protect multiple accesses with a lock. 281 */ 282static __inline__ int 283__test_and_clear_bit(int nr, volatile void * addr) 284{ 285 __u32 *p = (__u32 *) addr + (nr >> 5); 286 __u32 m = 1 << (nr & 31); 287 int oldbitset = *p & m; 288 289 *p &= ~m; 290 return oldbitset; 291} 292 293/** 294 * test_and_change_bit - Change a bit and return its old value 295 * @nr: Bit to set 296 * @addr: Address to count from 297 * 298 * This operation is atomic and cannot be reordered. 299 * It also implies a memory barrier. 300 */ 301static __inline__ int 302test_and_change_bit (int nr, volatile void *addr) 303{ 304 __u32 bit, old, new; 305 volatile __u32 *m; 306 CMPXCHG_BUGCHECK_DECL 307 308 m = (volatile __u32 *) addr + (nr >> 5); 309 bit = (1 << (nr & 31)); 310 do { 311 CMPXCHG_BUGCHECK(m); 312 old = *m; 313 new = old ^ bit; 314 } while (cmpxchg_acq(m, old, new) != old); 315 return (old & bit) != 0; 316} 317 318/* 319 * WARNING: non atomic version. 320 */ 321static __inline__ int 322__test_and_change_bit (int nr, void *addr) 323{ 324 __u32 old, bit = (1 << (nr & 31)); 325 __u32 *m = (__u32 *) addr + (nr >> 5); 326 327 old = *m; 328 *m = old ^ bit; 329 return (old & bit) != 0; 330} 331 332static __inline__ int 333test_bit (int nr, const volatile void *addr) 334{ 335 return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31)); 336} 337 338/** 339 * ffz - find the first zero bit in a long word 340 * @x: The long word to find the bit in 341 * 342 * Returns the bit-number (0..63) of the first (least significant) zero bit. 343 * Undefined if no zero exists, so code should check against ~0UL first... 344 */ 345static inline unsigned long 346ffz (unsigned long x) 347{ 348 unsigned long result; 349 350 result = ia64_popcnt(x & (~x - 1)); 351 return result; 352} 353 354/** 355 * __ffs - find first bit in word. 356 * @x: The word to search 357 * 358 * Undefined if no bit exists, so code should check against 0 first. 359 */ 360static __inline__ unsigned long 361__ffs (unsigned long x) 362{ 363 unsigned long result; 364 365 result = ia64_popcnt((x-1) & ~x); 366 return result; 367} 368 369#ifdef __KERNEL__ 370 371/* 372 * Return bit number of last (most-significant) bit set. Undefined 373 * for x==0. Bits are numbered from 0..63 (e.g., ia64_fls(9) == 3). 374 */ 375static inline unsigned long 376ia64_fls (unsigned long x) 377{ 378 long double d = x; 379 long exp; 380 381 exp = ia64_getf_exp(d); 382 return exp - 0xffff; 383} 384 385/* 386 * Find the last (most significant) bit set. Returns 0 for x==0 and 387 * bits are numbered from 1..32 (e.g., fls(9) == 4). 388 */ 389static inline int 390fls (int t) 391{ 392 unsigned long x = t & 0xffffffffu; 393 394 if (!x) 395 return 0; 396 x |= x >> 1; 397 x |= x >> 2; 398 x |= x >> 4; 399 x |= x >> 8; 400 x |= x >> 16; 401 return ia64_popcnt(x); 402} 403 404#include <asm-generic/bitops/fls64.h> 405 406/* 407 * ffs: find first bit set. This is defined the same way as the libc and 408 * compiler builtin ffs routines, therefore differs in spirit from the above 409 * ffz (man ffs): it operates on "int" values only and the result value is the 410 * bit number + 1. ffs(0) is defined to return zero. 411 */ 412#define ffs(x) __builtin_ffs(x) 413 414/* 415 * hweightN: returns the hamming weight (i.e. the number 416 * of bits set) of a N-bit word 417 */ 418static __inline__ unsigned long 419hweight64 (unsigned long x) 420{ 421 unsigned long result; 422 result = ia64_popcnt(x); 423 return result; 424} 425 426#define hweight32(x) (unsigned int) hweight64((x) & 0xfffffffful) 427#define hweight16(x) (unsigned int) hweight64((x) & 0xfffful) 428#define hweight8(x) (unsigned int) hweight64((x) & 0xfful) 429 430#endif /* __KERNEL__ */ 431 432#include <asm-generic/bitops/find.h> 433 434#ifdef __KERNEL__ 435 436#include <asm-generic/bitops/ext2-non-atomic.h> 437 438#define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) 439#define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) 440 441#include <asm-generic/bitops/minix.h> 442#include <asm-generic/bitops/sched.h> 443 444#endif /* __KERNEL__ */ 445 446#endif /* _ASM_IA64_BITOPS_H */