at v2.6.21 277 lines 8.1 kB view raw
1/* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> 2 * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org> 3 */ 4 5#ifndef _ASM_PARISC_ATOMIC_H_ 6#define _ASM_PARISC_ATOMIC_H_ 7 8#include <linux/types.h> 9#include <asm/system.h> 10 11/* 12 * Atomic operations that C can't guarantee us. Useful for 13 * resource counting etc.. 14 * 15 * And probably incredibly slow on parisc. OTOH, we don't 16 * have to write any serious assembly. prumpf 17 */ 18 19#ifdef CONFIG_SMP 20#include <asm/spinlock.h> 21#include <asm/cache.h> /* we use L1_CACHE_BYTES */ 22 23/* Use an array of spinlocks for our atomic_ts. 24 * Hash function to index into a different SPINLOCK. 25 * Since "a" is usually an address, use one spinlock per cacheline. 26 */ 27# define ATOMIC_HASH_SIZE 4 28# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) 29 30extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; 31 32/* Can't use raw_spin_lock_irq because of #include problems, so 33 * this is the substitute */ 34#define _atomic_spin_lock_irqsave(l,f) do { \ 35 raw_spinlock_t *s = ATOMIC_HASH(l); \ 36 local_irq_save(f); \ 37 __raw_spin_lock(s); \ 38} while(0) 39 40#define _atomic_spin_unlock_irqrestore(l,f) do { \ 41 raw_spinlock_t *s = ATOMIC_HASH(l); \ 42 __raw_spin_unlock(s); \ 43 local_irq_restore(f); \ 44} while(0) 45 46 47#else 48# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0) 49# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0) 50#endif 51 52/* This should get optimized out since it's never called. 53** Or get a link error if xchg is used "wrong". 54*/ 55extern void __xchg_called_with_bad_pointer(void); 56 57 58/* __xchg32/64 defined in arch/parisc/lib/bitops.c */ 59extern unsigned long __xchg8(char, char *); 60extern unsigned long __xchg32(int, int *); 61#ifdef CONFIG_64BIT 62extern unsigned long __xchg64(unsigned long, unsigned long *); 63#endif 64 65/* optimizer better get rid of switch since size is a constant */ 66static __inline__ unsigned long 67__xchg(unsigned long x, __volatile__ void * ptr, int size) 68{ 69 switch(size) { 70#ifdef CONFIG_64BIT 71 case 8: return __xchg64(x,(unsigned long *) ptr); 72#endif 73 case 4: return __xchg32((int) x, (int *) ptr); 74 case 1: return __xchg8((char) x, (char *) ptr); 75 } 76 __xchg_called_with_bad_pointer(); 77 return x; 78} 79 80 81/* 82** REVISIT - Abandoned use of LDCW in xchg() for now: 83** o need to test sizeof(*ptr) to avoid clearing adjacent bytes 84** o and while we are at it, could CONFIG_64BIT code use LDCD too? 85** 86** if (__builtin_constant_p(x) && (x == NULL)) 87** if (((unsigned long)p & 0xf) == 0) 88** return __ldcw(p); 89*/ 90#define xchg(ptr,x) \ 91 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 92 93 94#define __HAVE_ARCH_CMPXCHG 1 95 96/* bug catcher for when unsupported size is used - won't link */ 97extern void __cmpxchg_called_with_bad_pointer(void); 98 99/* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */ 100extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, unsigned int new_); 101extern unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new_); 102 103/* don't worry...optimizer will get rid of most of this */ 104static __inline__ unsigned long 105__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) 106{ 107 switch(size) { 108#ifdef CONFIG_64BIT 109 case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_); 110#endif 111 case 4: return __cmpxchg_u32((unsigned int *)ptr, (unsigned int) old, (unsigned int) new_); 112 } 113 __cmpxchg_called_with_bad_pointer(); 114 return old; 115} 116 117#define cmpxchg(ptr,o,n) \ 118 ({ \ 119 __typeof__(*(ptr)) _o_ = (o); \ 120 __typeof__(*(ptr)) _n_ = (n); \ 121 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ 122 (unsigned long)_n_, sizeof(*(ptr))); \ 123 }) 124 125/* Note that we need not lock read accesses - aligned word writes/reads 126 * are atomic, so a reader never sees unconsistent values. 127 * 128 * Cache-line alignment would conflict with, for example, linux/module.h 129 */ 130 131typedef struct { volatile int counter; } atomic_t; 132 133/* It's possible to reduce all atomic operations to either 134 * __atomic_add_return, atomic_set and atomic_read (the latter 135 * is there only for consistency). 136 */ 137 138static __inline__ int __atomic_add_return(int i, atomic_t *v) 139{ 140 int ret; 141 unsigned long flags; 142 _atomic_spin_lock_irqsave(v, flags); 143 144 ret = (v->counter += i); 145 146 _atomic_spin_unlock_irqrestore(v, flags); 147 return ret; 148} 149 150static __inline__ void atomic_set(atomic_t *v, int i) 151{ 152 unsigned long flags; 153 _atomic_spin_lock_irqsave(v, flags); 154 155 v->counter = i; 156 157 _atomic_spin_unlock_irqrestore(v, flags); 158} 159 160static __inline__ int atomic_read(const atomic_t *v) 161{ 162 return v->counter; 163} 164 165/* exported interface */ 166#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) 167#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 168 169/** 170 * atomic_add_unless - add unless the number is a given value 171 * @v: pointer of type atomic_t 172 * @a: the amount to add to v... 173 * @u: ...unless v is equal to u. 174 * 175 * Atomically adds @a to @v, so long as it was not @u. 176 * Returns non-zero if @v was not @u, and zero otherwise. 177 */ 178#define atomic_add_unless(v, a, u) \ 179({ \ 180 int c, old; \ 181 c = atomic_read(v); \ 182 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ 183 c = old; \ 184 c != (u); \ 185}) 186#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 187 188#define atomic_add(i,v) ((void)(__atomic_add_return( ((int)i),(v)))) 189#define atomic_sub(i,v) ((void)(__atomic_add_return(-((int)i),(v)))) 190#define atomic_inc(v) ((void)(__atomic_add_return( 1,(v)))) 191#define atomic_dec(v) ((void)(__atomic_add_return( -1,(v)))) 192 193#define atomic_add_return(i,v) (__atomic_add_return( ((int)i),(v))) 194#define atomic_sub_return(i,v) (__atomic_add_return(-((int)i),(v))) 195#define atomic_inc_return(v) (__atomic_add_return( 1,(v))) 196#define atomic_dec_return(v) (__atomic_add_return( -1,(v))) 197 198#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) 199 200/* 201 * atomic_inc_and_test - increment and test 202 * @v: pointer of type atomic_t 203 * 204 * Atomically increments @v by 1 205 * and returns true if the result is zero, or false for all 206 * other cases. 207 */ 208#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) 209 210#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) 211 212#define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0) 213 214#define ATOMIC_INIT(i) ((atomic_t) { (i) }) 215 216#define smp_mb__before_atomic_dec() smp_mb() 217#define smp_mb__after_atomic_dec() smp_mb() 218#define smp_mb__before_atomic_inc() smp_mb() 219#define smp_mb__after_atomic_inc() smp_mb() 220 221#ifdef CONFIG_64BIT 222 223typedef struct { volatile s64 counter; } atomic64_t; 224 225#define ATOMIC64_INIT(i) ((atomic64_t) { (i) }) 226 227static __inline__ int 228__atomic64_add_return(s64 i, atomic64_t *v) 229{ 230 int ret; 231 unsigned long flags; 232 _atomic_spin_lock_irqsave(v, flags); 233 234 ret = (v->counter += i); 235 236 _atomic_spin_unlock_irqrestore(v, flags); 237 return ret; 238} 239 240static __inline__ void 241atomic64_set(atomic64_t *v, s64 i) 242{ 243 unsigned long flags; 244 _atomic_spin_lock_irqsave(v, flags); 245 246 v->counter = i; 247 248 _atomic_spin_unlock_irqrestore(v, flags); 249} 250 251static __inline__ s64 252atomic64_read(const atomic64_t *v) 253{ 254 return v->counter; 255} 256 257#define atomic64_add(i,v) ((void)(__atomic64_add_return( ((s64)i),(v)))) 258#define atomic64_sub(i,v) ((void)(__atomic64_add_return(-((s64)i),(v)))) 259#define atomic64_inc(v) ((void)(__atomic64_add_return( 1,(v)))) 260#define atomic64_dec(v) ((void)(__atomic64_add_return( -1,(v)))) 261 262#define atomic64_add_return(i,v) (__atomic64_add_return( ((s64)i),(v))) 263#define atomic64_sub_return(i,v) (__atomic64_add_return(-((s64)i),(v))) 264#define atomic64_inc_return(v) (__atomic64_add_return( 1,(v))) 265#define atomic64_dec_return(v) (__atomic64_add_return( -1,(v))) 266 267#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) 268 269#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) 270#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0) 271#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i),(v)) == 0) 272 273#endif /* CONFIG_64BIT */ 274 275#include <asm-generic/atomic.h> 276 277#endif /* _ASM_PARISC_ATOMIC_H_ */