at v2.6.16 164 lines 4.9 kB view raw
1/* atomic.h: These still suck, but the I-cache hit rate is higher. 2 * 3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) 4 * Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au) 5 * 6 * Additions by Keith M Wesolowski (wesolows@foobazco.org) based 7 * on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>. 8 */ 9 10#ifndef __ARCH_SPARC_ATOMIC__ 11#define __ARCH_SPARC_ATOMIC__ 12 13#include <linux/config.h> 14 15typedef struct { volatile int counter; } atomic_t; 16 17#ifdef __KERNEL__ 18 19#define ATOMIC_INIT(i) { (i) } 20 21extern int __atomic_add_return(int, atomic_t *); 22extern int atomic_cmpxchg(atomic_t *, int, int); 23#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 24extern int atomic_add_unless(atomic_t *, int, int); 25extern void atomic_set(atomic_t *, int); 26 27#define atomic_read(v) ((v)->counter) 28 29#define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v))) 30#define atomic_sub(i, v) ((void)__atomic_add_return(-(int)(i), (v))) 31#define atomic_inc(v) ((void)__atomic_add_return( 1, (v))) 32#define atomic_dec(v) ((void)__atomic_add_return( -1, (v))) 33 34#define atomic_add_return(i, v) (__atomic_add_return( (int)(i), (v))) 35#define atomic_sub_return(i, v) (__atomic_add_return(-(int)(i), (v))) 36#define atomic_inc_return(v) (__atomic_add_return( 1, (v))) 37#define atomic_dec_return(v) (__atomic_add_return( -1, (v))) 38 39#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) 40 41/* 42 * atomic_inc_and_test - increment and test 43 * @v: pointer of type atomic_t 44 * 45 * Atomically increments @v by 1 46 * and returns true if the result is zero, or false for all 47 * other cases. 48 */ 49#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) 50 51#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) 52#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) 53 54#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 55 56/* This is the old 24-bit implementation. It's still used internally 57 * by some sparc-specific code, notably the semaphore implementation. 58 */ 59typedef struct { volatile int counter; } atomic24_t; 60 61#ifndef CONFIG_SMP 62 63#define ATOMIC24_INIT(i) { (i) } 64#define atomic24_read(v) ((v)->counter) 65#define atomic24_set(v, i) (((v)->counter) = i) 66 67#else 68/* We do the bulk of the actual work out of line in two common 69 * routines in assembler, see arch/sparc/lib/atomic.S for the 70 * "fun" details. 71 * 72 * For SMP the trick is you embed the spin lock byte within 73 * the word, use the low byte so signedness is easily retained 74 * via a quick arithmetic shift. It looks like this: 75 * 76 * ---------------------------------------- 77 * | signed 24-bit counter value | lock | atomic_t 78 * ---------------------------------------- 79 * 31 8 7 0 80 */ 81 82#define ATOMIC24_INIT(i) { ((i) << 8) } 83 84static inline int atomic24_read(const atomic24_t *v) 85{ 86 int ret = v->counter; 87 88 while(ret & 0xff) 89 ret = v->counter; 90 91 return ret >> 8; 92} 93 94#define atomic24_set(v, i) (((v)->counter) = ((i) << 8)) 95#endif 96 97static inline int __atomic24_add(int i, atomic24_t *v) 98{ 99 register volatile int *ptr asm("g1"); 100 register int increment asm("g2"); 101 register int tmp1 asm("g3"); 102 register int tmp2 asm("g4"); 103 register int tmp3 asm("g7"); 104 105 ptr = &v->counter; 106 increment = i; 107 108 __asm__ __volatile__( 109 "mov %%o7, %%g4\n\t" 110 "call ___atomic24_add\n\t" 111 " add %%o7, 8, %%o7\n" 112 : "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3) 113 : "0" (increment), "r" (ptr) 114 : "memory", "cc"); 115 116 return increment; 117} 118 119static inline int __atomic24_sub(int i, atomic24_t *v) 120{ 121 register volatile int *ptr asm("g1"); 122 register int increment asm("g2"); 123 register int tmp1 asm("g3"); 124 register int tmp2 asm("g4"); 125 register int tmp3 asm("g7"); 126 127 ptr = &v->counter; 128 increment = i; 129 130 __asm__ __volatile__( 131 "mov %%o7, %%g4\n\t" 132 "call ___atomic24_sub\n\t" 133 " add %%o7, 8, %%o7\n" 134 : "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3) 135 : "0" (increment), "r" (ptr) 136 : "memory", "cc"); 137 138 return increment; 139} 140 141#define atomic24_add(i, v) ((void)__atomic24_add((i), (v))) 142#define atomic24_sub(i, v) ((void)__atomic24_sub((i), (v))) 143 144#define atomic24_dec_return(v) __atomic24_sub(1, (v)) 145#define atomic24_inc_return(v) __atomic24_add(1, (v)) 146 147#define atomic24_sub_and_test(i, v) (__atomic24_sub((i), (v)) == 0) 148#define atomic24_dec_and_test(v) (__atomic24_sub(1, (v)) == 0) 149 150#define atomic24_inc(v) ((void)__atomic24_add(1, (v))) 151#define atomic24_dec(v) ((void)__atomic24_sub(1, (v))) 152 153#define atomic24_add_negative(i, v) (__atomic24_add((i), (v)) < 0) 154 155/* Atomic operations are already serializing */ 156#define smp_mb__before_atomic_dec() barrier() 157#define smp_mb__after_atomic_dec() barrier() 158#define smp_mb__before_atomic_inc() barrier() 159#define smp_mb__after_atomic_inc() barrier() 160 161#endif /* !(__KERNEL__) */ 162 163#include <asm-generic/atomic.h> 164#endif /* !(__ARCH_SPARC_ATOMIC__) */