at v2.6.15-rc1 204 lines 4.8 kB view raw
1#ifndef _ALPHA_ATOMIC_H 2#define _ALPHA_ATOMIC_H 3 4#include <asm/barrier.h> 5 6/* 7 * Atomic operations that C can't guarantee us. Useful for 8 * resource counting etc... 9 * 10 * But use these as seldom as possible since they are much slower 11 * than regular operations. 12 */ 13 14 15/* 16 * Counter is volatile to make sure gcc doesn't try to be clever 17 * and move things around on us. We need to use _exactly_ the address 18 * the user gave us, not some alias that contains the same information. 19 */ 20typedef struct { volatile int counter; } atomic_t; 21typedef struct { volatile long counter; } atomic64_t; 22 23#define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) 24#define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } ) 25 26#define atomic_read(v) ((v)->counter + 0) 27#define atomic64_read(v) ((v)->counter + 0) 28 29#define atomic_set(v,i) ((v)->counter = (i)) 30#define atomic64_set(v,i) ((v)->counter = (i)) 31 32/* 33 * To get proper branch prediction for the main line, we must branch 34 * forward to code at the end of this object's .text section, then 35 * branch back to restart the operation. 36 */ 37 38static __inline__ void atomic_add(int i, atomic_t * v) 39{ 40 unsigned long temp; 41 __asm__ __volatile__( 42 "1: ldl_l %0,%1\n" 43 " addl %0,%2,%0\n" 44 " stl_c %0,%1\n" 45 " beq %0,2f\n" 46 ".subsection 2\n" 47 "2: br 1b\n" 48 ".previous" 49 :"=&r" (temp), "=m" (v->counter) 50 :"Ir" (i), "m" (v->counter)); 51} 52 53static __inline__ void atomic64_add(long i, atomic64_t * v) 54{ 55 unsigned long temp; 56 __asm__ __volatile__( 57 "1: ldq_l %0,%1\n" 58 " addq %0,%2,%0\n" 59 " stq_c %0,%1\n" 60 " beq %0,2f\n" 61 ".subsection 2\n" 62 "2: br 1b\n" 63 ".previous" 64 :"=&r" (temp), "=m" (v->counter) 65 :"Ir" (i), "m" (v->counter)); 66} 67 68static __inline__ void atomic_sub(int i, atomic_t * v) 69{ 70 unsigned long temp; 71 __asm__ __volatile__( 72 "1: ldl_l %0,%1\n" 73 " subl %0,%2,%0\n" 74 " stl_c %0,%1\n" 75 " beq %0,2f\n" 76 ".subsection 2\n" 77 "2: br 1b\n" 78 ".previous" 79 :"=&r" (temp), "=m" (v->counter) 80 :"Ir" (i), "m" (v->counter)); 81} 82 83static __inline__ void atomic64_sub(long i, atomic64_t * v) 84{ 85 unsigned long temp; 86 __asm__ __volatile__( 87 "1: ldq_l %0,%1\n" 88 " subq %0,%2,%0\n" 89 " stq_c %0,%1\n" 90 " beq %0,2f\n" 91 ".subsection 2\n" 92 "2: br 1b\n" 93 ".previous" 94 :"=&r" (temp), "=m" (v->counter) 95 :"Ir" (i), "m" (v->counter)); 96} 97 98 99/* 100 * Same as above, but return the result value 101 */ 102static __inline__ long atomic_add_return(int i, atomic_t * v) 103{ 104 long temp, result; 105 smp_mb(); 106 __asm__ __volatile__( 107 "1: ldl_l %0,%1\n" 108 " addl %0,%3,%2\n" 109 " addl %0,%3,%0\n" 110 " stl_c %0,%1\n" 111 " beq %0,2f\n" 112 ".subsection 2\n" 113 "2: br 1b\n" 114 ".previous" 115 :"=&r" (temp), "=m" (v->counter), "=&r" (result) 116 :"Ir" (i), "m" (v->counter) : "memory"); 117 smp_mb(); 118 return result; 119} 120 121#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) 122 123static __inline__ long atomic64_add_return(long i, atomic64_t * v) 124{ 125 long temp, result; 126 smp_mb(); 127 __asm__ __volatile__( 128 "1: ldq_l %0,%1\n" 129 " addq %0,%3,%2\n" 130 " addq %0,%3,%0\n" 131 " stq_c %0,%1\n" 132 " beq %0,2f\n" 133 ".subsection 2\n" 134 "2: br 1b\n" 135 ".previous" 136 :"=&r" (temp), "=m" (v->counter), "=&r" (result) 137 :"Ir" (i), "m" (v->counter) : "memory"); 138 smp_mb(); 139 return result; 140} 141 142static __inline__ long atomic_sub_return(int i, atomic_t * v) 143{ 144 long temp, result; 145 smp_mb(); 146 __asm__ __volatile__( 147 "1: ldl_l %0,%1\n" 148 " subl %0,%3,%2\n" 149 " subl %0,%3,%0\n" 150 " stl_c %0,%1\n" 151 " beq %0,2f\n" 152 ".subsection 2\n" 153 "2: br 1b\n" 154 ".previous" 155 :"=&r" (temp), "=m" (v->counter), "=&r" (result) 156 :"Ir" (i), "m" (v->counter) : "memory"); 157 smp_mb(); 158 return result; 159} 160 161static __inline__ long atomic64_sub_return(long i, atomic64_t * v) 162{ 163 long temp, result; 164 smp_mb(); 165 __asm__ __volatile__( 166 "1: ldq_l %0,%1\n" 167 " subq %0,%3,%2\n" 168 " subq %0,%3,%0\n" 169 " stq_c %0,%1\n" 170 " beq %0,2f\n" 171 ".subsection 2\n" 172 "2: br 1b\n" 173 ".previous" 174 :"=&r" (temp), "=m" (v->counter), "=&r" (result) 175 :"Ir" (i), "m" (v->counter) : "memory"); 176 smp_mb(); 177 return result; 178} 179 180#define atomic_dec_return(v) atomic_sub_return(1,(v)) 181#define atomic64_dec_return(v) atomic64_sub_return(1,(v)) 182 183#define atomic_inc_return(v) atomic_add_return(1,(v)) 184#define atomic64_inc_return(v) atomic64_add_return(1,(v)) 185 186#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) 187#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0) 188 189#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) 190#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) 191#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0) 192 193#define atomic_inc(v) atomic_add(1,(v)) 194#define atomic64_inc(v) atomic64_add(1,(v)) 195 196#define atomic_dec(v) atomic_sub(1,(v)) 197#define atomic64_dec(v) atomic64_sub(1,(v)) 198 199#define smp_mb__before_atomic_dec() smp_mb() 200#define smp_mb__after_atomic_dec() smp_mb() 201#define smp_mb__before_atomic_inc() smp_mb() 202#define smp_mb__after_atomic_inc() smp_mb() 203 204#endif /* _ALPHA_ATOMIC_H */