Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.17-rc6 278 lines 8.1 kB view raw
1/* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> 2 * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org> 3 */ 4 5#ifndef _ASM_PARISC_ATOMIC_H_ 6#define _ASM_PARISC_ATOMIC_H_ 7 8#include <linux/config.h> 9#include <linux/types.h> 10#include <asm/system.h> 11 12/* 13 * Atomic operations that C can't guarantee us. Useful for 14 * resource counting etc.. 15 * 16 * And probably incredibly slow on parisc. OTOH, we don't 17 * have to write any serious assembly. prumpf 18 */ 19 20#ifdef CONFIG_SMP 21#include <asm/spinlock.h> 22#include <asm/cache.h> /* we use L1_CACHE_BYTES */ 23 24/* Use an array of spinlocks for our atomic_ts. 25 * Hash function to index into a different SPINLOCK. 26 * Since "a" is usually an address, use one spinlock per cacheline. 27 */ 28# define ATOMIC_HASH_SIZE 4 29# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) 30 31extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; 32 33/* Can't use raw_spin_lock_irq because of #include problems, so 34 * this is the substitute */ 35#define _atomic_spin_lock_irqsave(l,f) do { \ 36 raw_spinlock_t *s = ATOMIC_HASH(l); \ 37 local_irq_save(f); \ 38 __raw_spin_lock(s); \ 39} while(0) 40 41#define _atomic_spin_unlock_irqrestore(l,f) do { \ 42 raw_spinlock_t *s = ATOMIC_HASH(l); \ 43 __raw_spin_unlock(s); \ 44 local_irq_restore(f); \ 45} while(0) 46 47 48#else 49# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0) 50# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0) 51#endif 52 53/* This should get optimized out since it's never called. 54** Or get a link error if xchg is used "wrong". 55*/ 56extern void __xchg_called_with_bad_pointer(void); 57 58 59/* __xchg32/64 defined in arch/parisc/lib/bitops.c */ 60extern unsigned long __xchg8(char, char *); 61extern unsigned long __xchg32(int, int *); 62#ifdef __LP64__ 63extern unsigned long __xchg64(unsigned long, unsigned long *); 64#endif 65 66/* optimizer better get rid of switch since size is a constant */ 67static __inline__ unsigned long 68__xchg(unsigned long x, __volatile__ void * ptr, int size) 69{ 70 switch(size) { 71#ifdef __LP64__ 72 case 8: return __xchg64(x,(unsigned long *) ptr); 73#endif 74 case 4: return __xchg32((int) x, (int *) ptr); 75 case 1: return __xchg8((char) x, (char *) ptr); 76 } 77 __xchg_called_with_bad_pointer(); 78 return x; 79} 80 81 82/* 83** REVISIT - Abandoned use of LDCW in xchg() for now: 84** o need to test sizeof(*ptr) to avoid clearing adjacent bytes 85** o and while we are at it, could __LP64__ code use LDCD too? 86** 87** if (__builtin_constant_p(x) && (x == NULL)) 88** if (((unsigned long)p & 0xf) == 0) 89** return __ldcw(p); 90*/ 91#define xchg(ptr,x) \ 92 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 93 94 95#define __HAVE_ARCH_CMPXCHG 1 96 97/* bug catcher for when unsupported size is used - won't link */ 98extern void __cmpxchg_called_with_bad_pointer(void); 99 100/* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */ 101extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, unsigned int new_); 102extern unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new_); 103 104/* don't worry...optimizer will get rid of most of this */ 105static __inline__ unsigned long 106__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) 107{ 108 switch(size) { 109#ifdef __LP64__ 110 case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_); 111#endif 112 case 4: return __cmpxchg_u32((unsigned int *)ptr, (unsigned int) old, (unsigned int) new_); 113 } 114 __cmpxchg_called_with_bad_pointer(); 115 return old; 116} 117 118#define cmpxchg(ptr,o,n) \ 119 ({ \ 120 __typeof__(*(ptr)) _o_ = (o); \ 121 __typeof__(*(ptr)) _n_ = (n); \ 122 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ 123 (unsigned long)_n_, sizeof(*(ptr))); \ 124 }) 125 126/* Note that we need not lock read accesses - aligned word writes/reads 127 * are atomic, so a reader never sees unconsistent values. 128 * 129 * Cache-line alignment would conflict with, for example, linux/module.h 130 */ 131 132typedef struct { volatile int counter; } atomic_t; 133 134/* It's possible to reduce all atomic operations to either 135 * __atomic_add_return, atomic_set and atomic_read (the latter 136 * is there only for consistency). 137 */ 138 139static __inline__ int __atomic_add_return(int i, atomic_t *v) 140{ 141 int ret; 142 unsigned long flags; 143 _atomic_spin_lock_irqsave(v, flags); 144 145 ret = (v->counter += i); 146 147 _atomic_spin_unlock_irqrestore(v, flags); 148 return ret; 149} 150 151static __inline__ void atomic_set(atomic_t *v, int i) 152{ 153 unsigned long flags; 154 _atomic_spin_lock_irqsave(v, flags); 155 156 v->counter = i; 157 158 _atomic_spin_unlock_irqrestore(v, flags); 159} 160 161static __inline__ int atomic_read(const atomic_t *v) 162{ 163 return v->counter; 164} 165 166/* exported interface */ 167#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) 168#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 169 170/** 171 * atomic_add_unless - add unless the number is a given value 172 * @v: pointer of type atomic_t 173 * @a: the amount to add to v... 174 * @u: ...unless v is equal to u. 175 * 176 * Atomically adds @a to @v, so long as it was not @u. 177 * Returns non-zero if @v was not @u, and zero otherwise. 178 */ 179#define atomic_add_unless(v, a, u) \ 180({ \ 181 int c, old; \ 182 c = atomic_read(v); \ 183 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ 184 c = old; \ 185 c != (u); \ 186}) 187#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 188 189#define atomic_add(i,v) ((void)(__atomic_add_return( ((int)i),(v)))) 190#define atomic_sub(i,v) ((void)(__atomic_add_return(-((int)i),(v)))) 191#define atomic_inc(v) ((void)(__atomic_add_return( 1,(v)))) 192#define atomic_dec(v) ((void)(__atomic_add_return( -1,(v)))) 193 194#define atomic_add_return(i,v) (__atomic_add_return( ((int)i),(v))) 195#define atomic_sub_return(i,v) (__atomic_add_return(-((int)i),(v))) 196#define atomic_inc_return(v) (__atomic_add_return( 1,(v))) 197#define atomic_dec_return(v) (__atomic_add_return( -1,(v))) 198 199#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) 200 201/* 202 * atomic_inc_and_test - increment and test 203 * @v: pointer of type atomic_t 204 * 205 * Atomically increments @v by 1 206 * and returns true if the result is zero, or false for all 207 * other cases. 208 */ 209#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) 210 211#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) 212 213#define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0) 214 215#define ATOMIC_INIT(i) ((atomic_t) { (i) }) 216 217#define smp_mb__before_atomic_dec() smp_mb() 218#define smp_mb__after_atomic_dec() smp_mb() 219#define smp_mb__before_atomic_inc() smp_mb() 220#define smp_mb__after_atomic_inc() smp_mb() 221 222#ifdef __LP64__ 223 224typedef struct { volatile s64 counter; } atomic64_t; 225 226#define ATOMIC64_INIT(i) ((atomic64_t) { (i) }) 227 228static __inline__ int 229__atomic64_add_return(s64 i, atomic64_t *v) 230{ 231 int ret; 232 unsigned long flags; 233 _atomic_spin_lock_irqsave(v, flags); 234 235 ret = (v->counter += i); 236 237 _atomic_spin_unlock_irqrestore(v, flags); 238 return ret; 239} 240 241static __inline__ void 242atomic64_set(atomic64_t *v, s64 i) 243{ 244 unsigned long flags; 245 _atomic_spin_lock_irqsave(v, flags); 246 247 v->counter = i; 248 249 _atomic_spin_unlock_irqrestore(v, flags); 250} 251 252static __inline__ s64 253atomic64_read(const atomic64_t *v) 254{ 255 return v->counter; 256} 257 258#define atomic64_add(i,v) ((void)(__atomic64_add_return( ((s64)i),(v)))) 259#define atomic64_sub(i,v) ((void)(__atomic64_add_return(-((s64)i),(v)))) 260#define atomic64_inc(v) ((void)(__atomic64_add_return( 1,(v)))) 261#define atomic64_dec(v) ((void)(__atomic64_add_return( -1,(v)))) 262 263#define atomic64_add_return(i,v) (__atomic64_add_return( ((s64)i),(v))) 264#define atomic64_sub_return(i,v) (__atomic64_add_return(-((s64)i),(v))) 265#define atomic64_inc_return(v) (__atomic64_add_return( 1,(v))) 266#define atomic64_dec_return(v) (__atomic64_add_return( -1,(v))) 267 268#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) 269 270#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) 271#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0) 272#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i),(v)) == 0) 273 274#endif /* __LP64__ */ 275 276#include <asm-generic/atomic.h> 277 278#endif /* _ASM_PARISC_ATOMIC_H_ */