at v2.6.12 148 lines 3.2 kB view raw
1#ifndef __ARCH_M68K_ATOMIC__ 2#define __ARCH_M68K_ATOMIC__ 3 4#include <linux/config.h> 5 6#include <asm/system.h> /* local_irq_XXX() */ 7 8/* 9 * Atomic operations that C can't guarantee us. Useful for 10 * resource counting etc.. 11 */ 12 13/* 14 * We do not have SMP m68k systems, so we don't have to deal with that. 15 */ 16 17typedef struct { int counter; } atomic_t; 18#define ATOMIC_INIT(i) { (i) } 19 20#define atomic_read(v) ((v)->counter) 21#define atomic_set(v, i) (((v)->counter) = i) 22 23static inline void atomic_add(int i, atomic_t *v) 24{ 25 __asm__ __volatile__("addl %1,%0" : "+m" (*v) : "id" (i)); 26} 27 28static inline void atomic_sub(int i, atomic_t *v) 29{ 30 __asm__ __volatile__("subl %1,%0" : "+m" (*v) : "id" (i)); 31} 32 33static inline void atomic_inc(atomic_t *v) 34{ 35 __asm__ __volatile__("addql #1,%0" : "+m" (*v)); 36} 37 38static inline void atomic_dec(atomic_t *v) 39{ 40 __asm__ __volatile__("subql #1,%0" : "+m" (*v)); 41} 42 43static inline int atomic_dec_and_test(atomic_t *v) 44{ 45 char c; 46 __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v)); 47 return c != 0; 48} 49 50static inline int atomic_inc_and_test(atomic_t *v) 51{ 52 char c; 53 __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v)); 54 return c != 0; 55} 56 57#ifdef CONFIG_RMW_INSNS 58static inline int atomic_add_return(int i, atomic_t *v) 59{ 60 int t, tmp; 61 62 __asm__ __volatile__( 63 "1: movel %2,%1\n" 64 " addl %3,%1\n" 65 " casl %2,%1,%0\n" 66 " jne 1b" 67 : "+m" (*v), "=&d" (t), "=&d" (tmp) 68 : "g" (i), "2" (atomic_read(v))); 69 return t; 70} 71 72static inline int atomic_sub_return(int i, atomic_t *v) 73{ 74 int t, tmp; 75 76 __asm__ __volatile__( 77 "1: movel %2,%1\n" 78 " subl %3,%1\n" 79 " casl %2,%1,%0\n" 80 " jne 1b" 81 : "+m" (*v), "=&d" (t), "=&d" (tmp) 82 : "g" (i), "2" (atomic_read(v))); 83 return t; 84} 85#else /* !CONFIG_RMW_INSNS */ 86static inline int atomic_add_return(int i, atomic_t * v) 87{ 88 unsigned long flags; 89 int t; 90 91 local_irq_save(flags); 92 t = atomic_read(v); 93 t += i; 94 atomic_set(v, t); 95 local_irq_restore(flags); 96 97 return t; 98} 99 100static inline int atomic_sub_return(int i, atomic_t * v) 101{ 102 unsigned long flags; 103 int t; 104 105 local_irq_save(flags); 106 t = atomic_read(v); 107 t -= i; 108 atomic_set(v, t); 109 local_irq_restore(flags); 110 111 return t; 112} 113#endif /* !CONFIG_RMW_INSNS */ 114 115#define atomic_dec_return(v) atomic_sub_return(1, (v)) 116#define atomic_inc_return(v) atomic_add_return(1, (v)) 117 118static inline int atomic_sub_and_test(int i, atomic_t *v) 119{ 120 char c; 121 __asm__ __volatile__("subl %2,%1; seq %0" : "=d" (c), "+m" (*v): "g" (i)); 122 return c != 0; 123} 124 125static inline int atomic_add_negative(int i, atomic_t *v) 126{ 127 char c; 128 __asm__ __volatile__("addl %2,%1; smi %0" : "=d" (c), "+m" (*v): "g" (i)); 129 return c != 0; 130} 131 132static inline void atomic_clear_mask(unsigned long mask, unsigned long *v) 133{ 134 __asm__ __volatile__("andl %1,%0" : "+m" (*v) : "id" (~(mask))); 135} 136 137static inline void atomic_set_mask(unsigned long mask, unsigned long *v) 138{ 139 __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask)); 140} 141 142/* Atomic operations are already serializing */ 143#define smp_mb__before_atomic_dec() barrier() 144#define smp_mb__after_atomic_dec() barrier() 145#define smp_mb__before_atomic_inc() barrier() 146#define smp_mb__after_atomic_inc() barrier() 147 148#endif /* __ARCH_M68K_ATOMIC __ */