Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

locking, sparc: Rename __spin_try_lock() and friends

Needed to avoid namespace conflicts when the common code
function bodies of _spin_try_lock() etc. are moved to a header
file where the function name would be __spin_try_lock().

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Acked-by: David S. Miller <davem@davemloft.net>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Horst Hartmann <horsth@linux.vnet.ibm.com>
Cc: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: David Miller <davem@davemloft.net>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Roman Zippel <zippel@linux-m68k.org>
Cc: <linux-arch@vger.kernel.org>
LKML-Reference: <20090831124416.306495811@de.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

authored by

Heiko Carstens and committed by
Ingo Molnar
9f34ceb6 8307a980

+20 -20
+6 -6
arch/sparc/include/asm/spinlock_32.h
··· 76 76 * 77 77 * Unfortunately this scheme limits us to ~16,000,000 cpus. 78 78 */ 79 - static inline void __read_lock(raw_rwlock_t *rw) 79 + static inline void arch_read_lock(raw_rwlock_t *rw) 80 80 { 81 81 register raw_rwlock_t *lp asm("g1"); 82 82 lp = rw; ··· 92 92 #define __raw_read_lock(lock) \ 93 93 do { unsigned long flags; \ 94 94 local_irq_save(flags); \ 95 - __read_lock(lock); \ 95 + arch_read_lock(lock); \ 96 96 local_irq_restore(flags); \ 97 97 } while(0) 98 98 99 - static inline void __read_unlock(raw_rwlock_t *rw) 99 + static inline void arch_read_unlock(raw_rwlock_t *rw) 100 100 { 101 101 register raw_rwlock_t *lp asm("g1"); 102 102 lp = rw; ··· 112 112 #define __raw_read_unlock(lock) \ 113 113 do { unsigned long flags; \ 114 114 local_irq_save(flags); \ 115 - __read_unlock(lock); \ 115 + arch_read_unlock(lock); \ 116 116 local_irq_restore(flags); \ 117 117 } while(0) 118 118 ··· 150 150 return (val == 0); 151 151 } 152 152 153 - static inline int __read_trylock(raw_rwlock_t *rw) 153 + static inline int arch_read_trylock(raw_rwlock_t *rw) 154 154 { 155 155 register raw_rwlock_t *lp asm("g1"); 156 156 register int res asm("o0"); ··· 169 169 ({ unsigned long flags; \ 170 170 int res; \ 171 171 local_irq_save(flags); \ 172 - res = __read_trylock(lock); \ 172 + res = arch_read_trylock(lock); \ 173 173 local_irq_restore(flags); \ 174 174 res; \ 175 175 })
+14 -14
arch/sparc/include/asm/spinlock_64.h
··· 92 92 93 93 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ 94 94 95 - static void inline __read_lock(raw_rwlock_t *lock) 95 + static void inline arch_read_lock(raw_rwlock_t *lock) 96 96 { 97 97 unsigned long tmp1, tmp2; 98 98 ··· 115 115 : "memory"); 116 116 } 117 117 118 - static int inline __read_trylock(raw_rwlock_t *lock) 118 + static int inline arch_read_trylock(raw_rwlock_t *lock) 119 119 { 120 120 int tmp1, tmp2; 121 121 ··· 136 136 return tmp1; 137 137 } 138 138 139 - static void inline __read_unlock(raw_rwlock_t *lock) 139 + static void inline arch_read_unlock(raw_rwlock_t *lock) 140 140 { 141 141 unsigned long tmp1, tmp2; 142 142 ··· 152 152 : "memory"); 153 153 } 154 154 155 - static void inline __write_lock(raw_rwlock_t *lock) 155 + static void inline arch_write_lock(raw_rwlock_t *lock) 156 156 { 157 157 unsigned long mask, tmp1, tmp2; 158 158 ··· 177 177 : "memory"); 178 178 } 179 179 180 - static void inline __write_unlock(raw_rwlock_t *lock) 180 + static void inline arch_write_unlock(raw_rwlock_t *lock) 181 181 { 182 182 __asm__ __volatile__( 183 183 " stw %%g0, [%0]" ··· 186 186 : "memory"); 187 187 } 188 188 189 - static int inline __write_trylock(raw_rwlock_t *lock) 189 + static int inline arch_write_trylock(raw_rwlock_t *lock) 190 190 { 191 191 unsigned long mask, tmp1, tmp2, result; 192 192 ··· 210 210 return result; 211 211 } 212 212 213 - #define __raw_read_lock(p) __read_lock(p) 214 - #define __raw_read_lock_flags(p, f) __read_lock(p) 215 - #define __raw_read_trylock(p) __read_trylock(p) 216 - #define __raw_read_unlock(p) __read_unlock(p) 217 - #define __raw_write_lock(p) __write_lock(p) 218 - #define __raw_write_lock_flags(p, f) __write_lock(p) 219 - #define __raw_write_unlock(p) __write_unlock(p) 220 - #define __raw_write_trylock(p) __write_trylock(p) 213 + #define __raw_read_lock(p) arch_read_lock(p) 214 + #define __raw_read_lock_flags(p, f) arch_read_lock(p) 215 + #define __raw_read_trylock(p) arch_read_trylock(p) 216 + #define __raw_read_unlock(p) arch_read_unlock(p) 217 + #define __raw_write_lock(p) arch_write_lock(p) 218 + #define __raw_write_lock_flags(p, f) arch_write_lock(p) 219 + #define __raw_write_unlock(p) arch_write_unlock(p) 220 + #define __raw_write_trylock(p) arch_write_trylock(p) 221 221 222 222 #define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) 223 223 #define __raw_write_can_lock(rw) (!(rw)->lock)