Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Blackfin: SMP: convert to common asm-generic/atomic.h

Now that common code supports SMP systems, switch our SMP atomic logic
over to it to avoid code duplication.

Signed-off-by: Mike Frysinger <vapier@gentoo.org>

+10 -94
+10 -94
arch/blackfin/include/asm/atomic.h
··· 1 1 /* 2 - * Copyright 2004-2009 Analog Devices Inc. 2 + * Copyright 2004-2011 Analog Devices Inc. 3 3 * 4 4 * Licensed under the GPL-2 or later. 5 5 */ ··· 7 7 #ifndef __ARCH_BLACKFIN_ATOMIC__ 8 8 #define __ARCH_BLACKFIN_ATOMIC__ 9 9 10 - #ifndef CONFIG_SMP 11 - # include <asm-generic/atomic.h> 12 - #else 10 + #ifdef CONFIG_SMP 13 11 14 - #include <linux/types.h> 15 - #include <asm/system.h> /* local_irq_XXX() */ 16 - 17 - /* 18 - * Atomic operations that C can't guarantee us. Useful for 19 - * resource counting etc.. 20 - */ 21 - 22 - #define ATOMIC_INIT(i) { (i) } 23 - #define atomic_set(v, i) (((v)->counter) = i) 24 - 25 - #define atomic_read(v) __raw_uncached_fetch_asm(&(v)->counter) 12 + #include <linux/linkage.h> 26 13 27 14 asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr); 28 - 29 15 asmlinkage int __raw_atomic_update_asm(volatile int *ptr, int value); 30 - 31 16 asmlinkage int __raw_atomic_clear_asm(volatile int *ptr, int value); 32 - 33 17 asmlinkage int __raw_atomic_set_asm(volatile int *ptr, int value); 34 - 35 18 asmlinkage int __raw_atomic_xor_asm(volatile int *ptr, int value); 36 - 37 19 asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value); 38 20 39 - static inline void atomic_add(int i, atomic_t *v) 40 - { 41 - __raw_atomic_update_asm(&v->counter, i); 42 - } 21 + #define atomic_read(v) __raw_uncached_fetch_asm(&(v)->counter) 43 22 44 - static inline void atomic_sub(int i, atomic_t *v) 45 - { 46 - __raw_atomic_update_asm(&v->counter, -i); 47 - } 23 + #define atomic_add_return(i, v) __raw_atomic_update_asm(&(v)->counter, i) 24 + #define atomic_sub_return(i, v) __raw_atomic_update_asm(&(v)->counter, -(i)) 48 25 49 - static inline int atomic_add_return(int i, atomic_t *v) 50 - { 51 - return __raw_atomic_update_asm(&v->counter, i); 52 - } 53 - 54 - static inline int atomic_sub_return(int i, atomic_t *v) 55 - { 56 - return __raw_atomic_update_asm(&v->counter, -i); 57 - } 58 - 59 - static inline void atomic_inc(volatile atomic_t *v) 60 - { 61 - __raw_atomic_update_asm(&v->counter, 1); 62 - } 63 - 64 - static inline void atomic_dec(volatile atomic_t *v) 65 - { 66 - __raw_atomic_update_asm(&v->counter, -1); 67 - } 68 - 69 - static inline void atomic_clear_mask(int mask, atomic_t *v) 70 - { 71 - __raw_atomic_clear_asm(&v->counter, mask); 72 - } 73 - 74 - static inline void atomic_set_mask(int mask, atomic_t *v) 75 - { 76 - __raw_atomic_set_asm(&v->counter, mask); 77 - } 78 - 79 - /* Atomic operations are already serializing */ 80 - #define smp_mb__before_atomic_dec() barrier() 81 - #define smp_mb__after_atomic_dec() barrier() 82 - #define smp_mb__before_atomic_inc() barrier() 83 - #define smp_mb__after_atomic_inc() barrier() 84 - 85 - #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) 86 - #define atomic_dec_return(v) atomic_sub_return(1,(v)) 87 - #define atomic_inc_return(v) atomic_add_return(1,(v)) 88 - 89 - #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) 90 - #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 91 - 92 - #define __atomic_add_unless(v, a, u) \ 93 - ({ \ 94 - int c, old; \ 95 - c = atomic_read(v); \ 96 - while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ 97 - c = old; \ 98 - c; \ 99 - }) 100 - 101 - /* 102 - * atomic_inc_and_test - increment and test 103 - * @v: pointer of type atomic_t 104 - * 105 - * Atomically increments @v by 1 106 - * and returns true if the result is zero, or false for all 107 - * other cases. 108 - */ 109 - #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) 110 - 111 - #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) 112 - #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) 113 - 26 + #define atomic_clear_mask(m, v) __raw_atomic_clear_asm(&(v)->counter, m) 27 + #define atomic_set_mask(m, v) __raw_atomic_set_asm(&(v)->counter, m) 114 28 115 29 #endif 30 + 31 + #include <asm-generic/atomic.h> 116 32 117 33 #endif