at v2.6.16-rc2 146 lines 3.1 kB view raw
1#ifndef __ASM_SH_ATOMIC_H 2#define __ASM_SH_ATOMIC_H 3 4/* 5 * Atomic operations that C can't guarantee us. Useful for 6 * resource counting etc.. 7 * 8 */ 9 10typedef struct { volatile int counter; } atomic_t; 11 12#define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) 13 14#define atomic_read(v) ((v)->counter) 15#define atomic_set(v,i) ((v)->counter = (i)) 16 17#include <asm/system.h> 18 19/* 20 * To get proper branch prediction for the main line, we must branch 21 * forward to code at the end of this object's .text section, then 22 * branch back to restart the operation. 23 */ 24 25static __inline__ void atomic_add(int i, atomic_t * v) 26{ 27 unsigned long flags; 28 29 local_irq_save(flags); 30 *(long *)v += i; 31 local_irq_restore(flags); 32} 33 34static __inline__ void atomic_sub(int i, atomic_t *v) 35{ 36 unsigned long flags; 37 38 local_irq_save(flags); 39 *(long *)v -= i; 40 local_irq_restore(flags); 41} 42 43static __inline__ int atomic_add_return(int i, atomic_t * v) 44{ 45 unsigned long temp, flags; 46 47 local_irq_save(flags); 48 temp = *(long *)v; 49 temp += i; 50 *(long *)v = temp; 51 local_irq_restore(flags); 52 53 return temp; 54} 55 56#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) 57 58static __inline__ int atomic_sub_return(int i, atomic_t * v) 59{ 60 unsigned long temp, flags; 61 62 local_irq_save(flags); 63 temp = *(long *)v; 64 temp -= i; 65 *(long *)v = temp; 66 local_irq_restore(flags); 67 68 return temp; 69} 70 71#define atomic_dec_return(v) atomic_sub_return(1,(v)) 72#define atomic_inc_return(v) atomic_add_return(1,(v)) 73 74/* 75 * atomic_inc_and_test - increment and test 76 * @v: pointer of type atomic_t 77 * 78 * Atomically increments @v by 1 79 * and returns true if the result is zero, or false for all 80 * other cases. 81 */ 82#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) 83 84#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) 85#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) 86 87#define atomic_inc(v) atomic_add(1,(v)) 88#define atomic_dec(v) atomic_sub(1,(v)) 89 90static inline int atomic_cmpxchg(atomic_t *v, int old, int new) 91{ 92 int ret; 93 unsigned long flags; 94 95 local_irq_save(flags); 96 ret = v->counter; 97 if (likely(ret == old)) 98 v->counter = new; 99 local_irq_restore(flags); 100 101 return ret; 102} 103 104#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 105 106static inline int atomic_add_unless(atomic_t *v, int a, int u) 107{ 108 int ret; 109 unsigned long flags; 110 111 local_irq_save(flags); 112 ret = v->counter; 113 if (ret != u) 114 v->counter += a; 115 local_irq_restore(flags); 116 117 return ret != u; 118} 119#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 120 121static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v) 122{ 123 unsigned long flags; 124 125 local_irq_save(flags); 126 *(long *)v &= ~mask; 127 local_irq_restore(flags); 128} 129 130static __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v) 131{ 132 unsigned long flags; 133 134 local_irq_save(flags); 135 *(long *)v |= mask; 136 local_irq_restore(flags); 137} 138 139/* Atomic operations are already serializing on SH */ 140#define smp_mb__before_atomic_dec() barrier() 141#define smp_mb__after_atomic_dec() barrier() 142#define smp_mb__before_atomic_inc() barrier() 143#define smp_mb__after_atomic_inc() barrier() 144 145#include <asm-generic/atomic.h> 146#endif /* __ASM_SH_ATOMIC_H */