Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

h8300: Provide atomic_{or,xor,and}

Implement atomic logic ops -- atomic_{or,xor,and}

These will replace the atomic_{set,clear}_mask functions that are
available on some archs.

Also rework the atomic implementation in terms of CPP macros to avoid
the typical repetition -- I seem to have missed this arch the last
time around when I did that.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

authored by

Peter Zijlstra and committed by
Thomas Gleixner
73ada370 b0d8003e

+45 -100
+45 -100
arch/h8300/include/asm/atomic.h
··· 16 16 17 17 #include <linux/kernel.h> 18 18 19 - static inline int atomic_add_return(int i, atomic_t *v) 20 - { 21 - h8300flags flags; 22 - int ret; 23 - 24 - flags = arch_local_irq_save(); 25 - ret = v->counter += i; 26 - arch_local_irq_restore(flags); 27 - return ret; 19 + #define ATOMIC_OP_RETURN(op, c_op) \ 20 + static inline int atomic_##op##_return(int i, atomic_t *v) \ 21 + { \ 22 + h8300flags flags; \ 23 + int ret; \ 24 + \ 25 + flags = arch_local_irq_save(); \ 26 + ret = v->counter c_op i; \ 27 + arch_local_irq_restore(flags); \ 28 + return ret; \ 28 29 } 29 30 30 - #define atomic_add(i, v) atomic_add_return(i, v) 31 + #define ATOMIC_OP(op, c_op) \ 32 + static inline void atomic_##op(int i, atomic_t *v) \ 33 + { \ 34 + h8300flags flags; \ 35 + \ 36 + flags = arch_local_irq_save(); \ 37 + v->counter c_op i; \ 38 + arch_local_irq_restore(flags); \ 39 + } 40 + 41 + ATOMIC_OP_RETURN(add, +=) 42 + ATOMIC_OP_RETURN(sub, -=) 43 + 44 + #define CONFIG_ARCH_HAS_ATOMIC_OR 45 + 46 + ATOMIC_OP(and, &=) 47 + ATOMIC_OP(or, |=) 48 + ATOMIC_OP(xor, ^=) 49 + 50 + #undef ATOMIC_OP_RETURN 51 + #undef ATOMIC_OP 52 + 53 + #define atomic_add(i, v) (void)atomic_add_return(i, v) 31 54 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) 32 55 33 - static inline int atomic_sub_return(int i, atomic_t *v) 34 - { 35 - h8300flags flags; 36 - int ret; 56 + #define atomic_sub(i, v) (void)atomic_sub_return(i, v) 57 + #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) 37 58 38 - flags = arch_local_irq_save(); 39 - ret = v->counter -= i; 40 - arch_local_irq_restore(flags); 41 - return ret; 42 - } 59 + #define atomic_inc_return(v) atomic_add_return(1, v) 60 + #define atomic_dec_return(v) atomic_sub_return(1, v) 43 61 44 - #define atomic_sub(i, v) atomic_sub_return(i, v) 45 - #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) 62 + #define atomic_inc(v) (void)atomic_inc_return(v) 63 + #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) 46 64 47 - static inline int atomic_inc_return(atomic_t *v) 48 - { 49 - h8300flags flags; 50 - int ret; 51 - 52 - flags = arch_local_irq_save(); 53 - v->counter++; 54 - ret = v->counter; 55 - arch_local_irq_restore(flags); 56 - return ret; 57 - } 58 - 59 - #define atomic_inc(v) atomic_inc_return(v) 60 - 61 - /* 62 - * atomic_inc_and_test - increment and test 63 - * @v: pointer of type atomic_t 64 - * 65 - * Atomically increments @v by 1 66 - * and returns true if the result is zero, or false for all 67 - * other cases. 68 - */ 69 - #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) 70 - 71 - static inline int atomic_dec_return(atomic_t *v) 72 - { 73 - h8300flags flags; 74 - int ret; 75 - 76 - flags = arch_local_irq_save(); 77 - --v->counter; 78 - ret = v->counter; 79 - arch_local_irq_restore(flags); 80 - return ret; 81 - } 82 - 83 - #define atomic_dec(v) atomic_dec_return(v) 84 - 85 - static inline int atomic_dec_and_test(atomic_t *v) 86 - { 87 - h8300flags flags; 88 - int ret; 89 - 90 - flags = arch_local_irq_save(); 91 - --v->counter; 92 - ret = v->counter; 93 - arch_local_irq_restore(flags); 94 - return ret == 0; 95 - } 65 + #define atomic_dec(v) (void)atomic_dec_return(v) 66 + #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) 96 67 97 68 static inline int atomic_cmpxchg(atomic_t *v, int old, int new) 98 69 { ··· 91 120 return ret; 92 121 } 93 122 94 - static inline void atomic_clear_mask(unsigned long mask, unsigned long *v) 123 + static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v) 95 124 { 96 - unsigned char ccr; 97 - unsigned long tmp; 98 - 99 - __asm__ __volatile__("stc ccr,%w3\n\t" 100 - "orc #0x80,ccr\n\t" 101 - "mov.l %0,%1\n\t" 102 - "and.l %2,%1\n\t" 103 - "mov.l %1,%0\n\t" 104 - "ldc %w3,ccr" 105 - : "=m"(*v), "=r"(tmp) 106 - : "g"(~(mask)), "r"(ccr)); 125 + atomic_and(~mask, v); 107 126 } 108 127 109 - static inline void atomic_set_mask(unsigned long mask, unsigned long *v) 128 + static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v) 110 129 { 111 - unsigned char ccr; 112 - unsigned long tmp; 113 - 114 - __asm__ __volatile__("stc ccr,%w3\n\t" 115 - "orc #0x80,ccr\n\t" 116 - "mov.l %0,%1\n\t" 117 - "or.l %2,%1\n\t" 118 - "mov.l %1,%0\n\t" 119 - "ldc %w3,ccr" 120 - : "=m"(*v), "=r"(tmp) 121 - : "g"(~(mask)), "r"(ccr)); 130 + atomic_or(mask, v); 122 131 } 123 - 124 - /* Atomic operations are already serializing */ 125 - #define smp_mb__before_atomic_dec() barrier() 126 - #define smp_mb__after_atomic_dec() barrier() 127 - #define smp_mb__before_atomic_inc() barrier() 128 - #define smp_mb__after_atomic_inc() barrier() 129 132 130 133 #endif /* __ARCH_H8300_ATOMIC __ */