Fix FRV cmpxchg_local

Fix the FRV cmpxchg_local by breaking the following header dependency loop :

linux/kernel.h -> linux/bitops.h -> asm-frv/bitops.h -> asm-frv/atomic.h
-> asm-frv/system.h ->
asm-generic/cmpxchg_local.h -> typecheck() defined in linux/kernel.h

and

linux/kernel.h -> linux/bitops.h -> asm-frv/bitops.h -> asm-frv/atomic.h ->
asm-generic/cmpxchg_local.h -> typecheck() defined in linux/kernel.h

In order to fix this :
- Move the atomic_test_and_ *_mask inlines from asm-frv/atomic.h (why are they
there at all anyway ? They are not touching atomic_t variables!) to
asm-frv/bitops.h.

Also fix a build issue with cmpxchg : it does not cast to (unsigned long *)
like other architectures, to deal with it in the cmpxchg_local macro.

FRV builds fine with this patch.

Thanks to Adrian Bunk <bunk@kernel.org> for spotting this bug.

Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Cc: Adrian Bunk <bunk@kernel.org>
Cc: David Howells <dhowells@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by Mathieu Desnoyers and committed by Linus Torvalds 6784fd59 b55fcb22

+83 -84
-81
include/asm-frv/atomic.h
··· 125 125 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) 126 126 #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) 127 127 128 - #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS 129 - static inline 130 - unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v) 131 - { 132 - unsigned long old, tmp; 133 - 134 - asm volatile( 135 - "0: \n" 136 - " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ 137 - " ckeq icc3,cc7 \n" 138 - " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */ 139 - " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ 140 - " and%I3 %1,%3,%2 \n" 141 - " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */ 142 - " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */ 143 - " beq icc3,#0,0b \n" 144 - : "+U"(*v), "=&r"(old), "=r"(tmp) 145 - : "NPr"(~mask) 146 - : "memory", "cc7", "cc3", "icc3" 147 - ); 148 - 149 - return old; 150 - } 151 - 152 - static inline 153 - unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v) 154 - { 155 - unsigned long old, tmp; 156 - 157 - asm volatile( 158 - "0: \n" 159 - " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ 160 - " ckeq icc3,cc7 \n" 161 - " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */ 162 - " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ 163 - " or%I3 %1,%3,%2 \n" 164 - " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */ 165 - " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */ 166 - " beq icc3,#0,0b \n" 167 - : "+U"(*v), "=&r"(old), "=r"(tmp) 168 - : "NPr"(mask) 169 - : "memory", "cc7", "cc3", "icc3" 170 - ); 171 - 172 - return old; 173 - } 174 - 175 - static inline 176 - unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v) 177 - { 178 - unsigned long old, tmp; 179 - 180 - asm volatile( 181 - "0: \n" 182 - " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ 183 - " ckeq icc3,cc7 \n" 184 - " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */ 185 - " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ 186 - " xor%I3 %1,%3,%2 \n" 187 - " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */ 188 - " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */ 189 - " beq icc3,#0,0b \n" 190 - : "+U"(*v), "=&r"(old), "=r"(tmp) 191 - : "NPr"(mask) 192 - : "memory", "cc7", "cc3", "icc3" 193 - ); 194 - 195 - return old; 196 - } 197 - 198 - #else 199 - 200 - extern unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v); 201 - extern unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v); 202 - extern unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v); 203 - 204 - #endif 205 - 206 - #define atomic_clear_mask(mask, v) atomic_test_and_ANDNOT_mask((mask), (v)) 207 - #define atomic_set_mask(mask, v) atomic_test_and_OR_mask((mask), (v)) 208 - 209 128 /*****************************************************************************/ 210 129 /* 211 130 * exchange value with memory
+81 -2
include/asm-frv/bitops.h
··· 16 16 17 17 #include <linux/compiler.h> 18 18 #include <asm/byteorder.h> 19 - #include <asm/system.h> 20 - #include <asm/atomic.h> 21 19 22 20 #ifdef __KERNEL__ 23 21 ··· 30 32 */ 31 33 #define smp_mb__before_clear_bit() barrier() 32 34 #define smp_mb__after_clear_bit() barrier() 35 + 36 + #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS 37 + static inline 38 + unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v) 39 + { 40 + unsigned long old, tmp; 41 + 42 + asm volatile( 43 + "0: \n" 44 + " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ 45 + " ckeq icc3,cc7 \n" 46 + " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */ 47 + " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ 48 + " and%I3 %1,%3,%2 \n" 49 + " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */ 50 + " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */ 51 + " beq icc3,#0,0b \n" 52 + : "+U"(*v), "=&r"(old), "=r"(tmp) 53 + : "NPr"(~mask) 54 + : "memory", "cc7", "cc3", "icc3" 55 + ); 56 + 57 + return old; 58 + } 59 + 60 + static inline 61 + unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v) 62 + { 63 + unsigned long old, tmp; 64 + 65 + asm volatile( 66 + "0: \n" 67 + " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ 68 + " ckeq icc3,cc7 \n" 69 + " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */ 70 + " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ 71 + " or%I3 %1,%3,%2 \n" 72 + " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */ 73 + " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */ 74 + " beq icc3,#0,0b \n" 75 + : "+U"(*v), "=&r"(old), "=r"(tmp) 76 + : "NPr"(mask) 77 + : "memory", "cc7", "cc3", "icc3" 78 + ); 79 + 80 + return old; 81 + } 82 + 83 + static inline 84 + unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v) 85 + { 86 + unsigned long old, tmp; 87 + 88 + asm volatile( 89 + "0: \n" 90 + " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ 91 + " ckeq icc3,cc7 \n" 92 + " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */ 93 + " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ 94 + " xor%I3 %1,%3,%2 \n" 95 + " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */ 96 + " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */ 97 + " beq icc3,#0,0b \n" 98 + : "+U"(*v), "=&r"(old), "=r"(tmp) 99 + : "NPr"(mask) 100 + : "memory", "cc7", "cc3", "icc3" 101 + ); 102 + 103 + return old; 104 + } 105 + 106 + #else 107 + 108 + extern unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v); 109 + extern unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v); 110 + extern unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v); 111 + 112 + #endif 113 + 114 + #define atomic_clear_mask(mask, v) atomic_test_and_ANDNOT_mask((mask), (v)) 115 + #define atomic_set_mask(mask, v) atomic_test_and_OR_mask((mask), (v)) 33 116 34 117 static inline int test_and_clear_bit(int nr, volatile void *addr) 35 118 {
+2 -1
include/asm-frv/system.h
··· 14 14 15 15 #include <linux/types.h> 16 16 #include <linux/linkage.h> 17 + #include <linux/kernel.h> 17 18 18 19 struct thread_struct; 19 20 ··· 277 276 { 278 277 switch (size) { 279 278 case 4: 280 - return cmpxchg(ptr, old, new); 279 + return cmpxchg((unsigned long *)ptr, old, new); 281 280 default: 282 281 return __cmpxchg_local_generic(ptr, old, new, size); 283 282 }