Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

locking: Convert __raw_spin* functions to arch_spin*

Name space cleanup. No functional change.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: David S. Miller <davem@davemloft.net>
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: linux-arch@vger.kernel.org

+319 -319
+9 -9
arch/alpha/include/asm/spinlock.h
··· 12 12 * We make no fairness assumptions. They have a cost. 13 13 */ 14 14 15 - #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 16 - #define __raw_spin_is_locked(x) ((x)->lock != 0) 17 - #define __raw_spin_unlock_wait(x) \ 15 + #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) 16 + #define arch_spin_is_locked(x) ((x)->lock != 0) 17 + #define arch_spin_unlock_wait(x) \ 18 18 do { cpu_relax(); } while ((x)->lock) 19 19 20 - static inline void __raw_spin_unlock(arch_spinlock_t * lock) 20 + static inline void arch_spin_unlock(arch_spinlock_t * lock) 21 21 { 22 22 mb(); 23 23 lock->lock = 0; 24 24 } 25 25 26 - static inline void __raw_spin_lock(arch_spinlock_t * lock) 26 + static inline void arch_spin_lock(arch_spinlock_t * lock) 27 27 { 28 28 long tmp; 29 29 ··· 43 43 : "m"(lock->lock) : "memory"); 44 44 } 45 45 46 - static inline int __raw_spin_trylock(arch_spinlock_t *lock) 46 + static inline int arch_spin_trylock(arch_spinlock_t *lock) 47 47 { 48 48 return !test_and_set_bit(0, &lock->lock); 49 49 } ··· 169 169 #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 170 170 #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 171 171 172 - #define _raw_spin_relax(lock) cpu_relax() 173 - #define _raw_read_relax(lock) cpu_relax() 174 - #define _raw_write_relax(lock) cpu_relax() 172 + #define arch_spin_relax(lock) cpu_relax() 173 + #define arch_read_relax(lock) cpu_relax() 174 + #define arch_write_relax(lock) cpu_relax() 175 175 176 176 #endif /* _ALPHA_SPINLOCK_H */
+10 -10
arch/arm/include/asm/spinlock.h
··· 17 17 * Locked value: 1 18 18 */ 19 19 20 - #define __raw_spin_is_locked(x) ((x)->lock != 0) 21 - #define __raw_spin_unlock_wait(lock) \ 22 - do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) 20 + #define arch_spin_is_locked(x) ((x)->lock != 0) 21 + #define arch_spin_unlock_wait(lock) \ 22 + do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) 23 23 24 - #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 24 + #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) 25 25 26 - static inline void __raw_spin_lock(arch_spinlock_t *lock) 26 + static inline void arch_spin_lock(arch_spinlock_t *lock) 27 27 { 28 28 unsigned long tmp; 29 29 ··· 43 43 smp_mb(); 44 44 } 45 45 46 - static inline int __raw_spin_trylock(arch_spinlock_t *lock) 46 + static inline int arch_spin_trylock(arch_spinlock_t *lock) 47 47 { 48 48 unsigned long tmp; 49 49 ··· 63 63 } 64 64 } 65 65 66 - static inline void __raw_spin_unlock(arch_spinlock_t *lock) 66 + static inline void arch_spin_unlock(arch_spinlock_t *lock) 67 67 { 68 68 smp_mb(); 69 69 ··· 220 220 #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 221 221 #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 222 222 223 - #define _raw_spin_relax(lock) cpu_relax() 224 - #define _raw_read_relax(lock) cpu_relax() 225 - #define _raw_write_relax(lock) cpu_relax() 223 + #define arch_spin_relax(lock) cpu_relax() 224 + #define arch_read_relax(lock) cpu_relax() 225 + #define arch_write_relax(lock) cpu_relax() 226 226 227 227 #endif /* __ASM_SPINLOCK_H */
+10 -10
arch/blackfin/include/asm/spinlock.h
··· 24 24 asmlinkage int __raw_write_trylock_asm(volatile int *ptr); 25 25 asmlinkage void __raw_write_unlock_asm(volatile int *ptr); 26 26 27 - static inline int __raw_spin_is_locked(arch_spinlock_t *lock) 27 + static inline int arch_spin_is_locked(arch_spinlock_t *lock) 28 28 { 29 29 return __raw_spin_is_locked_asm(&lock->lock); 30 30 } 31 31 32 - static inline void __raw_spin_lock(arch_spinlock_t *lock) 32 + static inline void arch_spin_lock(arch_spinlock_t *lock) 33 33 { 34 34 __raw_spin_lock_asm(&lock->lock); 35 35 } 36 36 37 - #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 37 + #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) 38 38 39 - static inline int __raw_spin_trylock(arch_spinlock_t *lock) 39 + static inline int arch_spin_trylock(arch_spinlock_t *lock) 40 40 { 41 41 return __raw_spin_trylock_asm(&lock->lock); 42 42 } 43 43 44 - static inline void __raw_spin_unlock(arch_spinlock_t *lock) 44 + static inline void arch_spin_unlock(arch_spinlock_t *lock) 45 45 { 46 46 __raw_spin_unlock_asm(&lock->lock); 47 47 } 48 48 49 - static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock) 49 + static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) 50 50 { 51 - while (__raw_spin_is_locked(lock)) 51 + while (arch_spin_is_locked(lock)) 52 52 cpu_relax(); 53 53 } 54 54 ··· 92 92 __raw_write_unlock_asm(&rw->lock); 93 93 } 94 94 95 - #define _raw_spin_relax(lock) cpu_relax() 96 - #define _raw_read_relax(lock) cpu_relax() 97 - #define _raw_write_relax(lock) cpu_relax() 95 + #define arch_spin_relax(lock) cpu_relax() 96 + #define arch_read_relax(lock) cpu_relax() 97 + #define arch_write_relax(lock) cpu_relax() 98 98 99 99 #endif 100 100
+23 -23
arch/cris/include/arch-v32/arch/spinlock.h
··· 9 9 extern void cris_spin_lock(void *l); 10 10 extern int cris_spin_trylock(void *l); 11 11 12 - static inline int __raw_spin_is_locked(arch_spinlock_t *x) 12 + static inline int arch_spin_is_locked(arch_spinlock_t *x) 13 13 { 14 14 return *(volatile signed char *)(&(x)->slock) <= 0; 15 15 } 16 16 17 - static inline void __raw_spin_unlock(arch_spinlock_t *lock) 17 + static inline void arch_spin_unlock(arch_spinlock_t *lock) 18 18 { 19 19 __asm__ volatile ("move.d %1,%0" \ 20 20 : "=m" (lock->slock) \ ··· 22 22 : "memory"); 23 23 } 24 24 25 - static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock) 25 + static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) 26 26 { 27 - while (__raw_spin_is_locked(lock)) 27 + while (arch_spin_is_locked(lock)) 28 28 cpu_relax(); 29 29 } 30 30 31 - static inline int __raw_spin_trylock(arch_spinlock_t *lock) 31 + static inline int arch_spin_trylock(arch_spinlock_t *lock) 32 32 { 33 33 return cris_spin_trylock((void *)&lock->slock); 34 34 } 35 35 36 - static inline void __raw_spin_lock(arch_spinlock_t *lock) 36 + static inline void arch_spin_lock(arch_spinlock_t *lock) 37 37 { 38 38 cris_spin_lock((void *)&lock->slock); 39 39 } 40 40 41 41 static inline void 42 - __raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) 42 + arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) 43 43 { 44 - __raw_spin_lock(lock); 44 + arch_spin_lock(lock); 45 45 } 46 46 47 47 /* ··· 68 68 69 69 static inline void __raw_read_lock(raw_rwlock_t *rw) 70 70 { 71 - __raw_spin_lock(&rw->slock); 71 + arch_spin_lock(&rw->slock); 72 72 while (rw->lock == 0); 73 73 rw->lock--; 74 - __raw_spin_unlock(&rw->slock); 74 + arch_spin_unlock(&rw->slock); 75 75 } 76 76 77 77 static inline void __raw_write_lock(raw_rwlock_t *rw) 78 78 { 79 - __raw_spin_lock(&rw->slock); 79 + arch_spin_lock(&rw->slock); 80 80 while (rw->lock != RW_LOCK_BIAS); 81 81 rw->lock = 0; 82 - __raw_spin_unlock(&rw->slock); 82 + arch_spin_unlock(&rw->slock); 83 83 } 84 84 85 85 static inline void __raw_read_unlock(raw_rwlock_t *rw) 86 86 { 87 - __raw_spin_lock(&rw->slock); 87 + arch_spin_lock(&rw->slock); 88 88 rw->lock++; 89 - __raw_spin_unlock(&rw->slock); 89 + arch_spin_unlock(&rw->slock); 90 90 } 91 91 92 92 static inline void __raw_write_unlock(raw_rwlock_t *rw) 93 93 { 94 - __raw_spin_lock(&rw->slock); 94 + arch_spin_lock(&rw->slock); 95 95 while (rw->lock != RW_LOCK_BIAS); 96 96 rw->lock = RW_LOCK_BIAS; 97 - __raw_spin_unlock(&rw->slock); 97 + arch_spin_unlock(&rw->slock); 98 98 } 99 99 100 100 static inline int __raw_read_trylock(raw_rwlock_t *rw) 101 101 { 102 102 int ret = 0; 103 - __raw_spin_lock(&rw->slock); 103 + arch_spin_lock(&rw->slock); 104 104 if (rw->lock != 0) { 105 105 rw->lock--; 106 106 ret = 1; 107 107 } 108 - __raw_spin_unlock(&rw->slock); 108 + arch_spin_unlock(&rw->slock); 109 109 return ret; 110 110 } 111 111 112 112 static inline int __raw_write_trylock(raw_rwlock_t *rw) 113 113 { 114 114 int ret = 0; 115 - __raw_spin_lock(&rw->slock); 115 + arch_spin_lock(&rw->slock); 116 116 if (rw->lock == RW_LOCK_BIAS) { 117 117 rw->lock = 0; 118 118 ret = 1; 119 119 } 120 - __raw_spin_unlock(&rw->slock); 120 + arch_spin_unlock(&rw->slock); 121 121 return 1; 122 122 } 123 123 124 124 #define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock) 125 125 #define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock) 126 126 127 - #define _raw_spin_relax(lock) cpu_relax() 128 - #define _raw_read_relax(lock) cpu_relax() 129 - #define _raw_write_relax(lock) cpu_relax() 127 + #define arch_spin_relax(lock) cpu_relax() 128 + #define arch_read_relax(lock) cpu_relax() 129 + #define arch_write_relax(lock) cpu_relax() 130 130 131 131 #endif /* __ASM_ARCH_SPINLOCK_H */
+1 -1
arch/ia64/include/asm/bitops.h
··· 127 127 * @addr: Address to start counting from 128 128 * 129 129 * Similarly to clear_bit_unlock, the implementation uses a store 130 - * with release semantics. See also __raw_spin_unlock(). 130 + * with release semantics. See also arch_spin_unlock(). 131 131 */ 132 132 static __inline__ void 133 133 __clear_bit_unlock(int nr, void *addr)
+13 -13
arch/ia64/include/asm/spinlock.h
··· 17 17 #include <asm/intrinsics.h> 18 18 #include <asm/system.h> 19 19 20 - #define __raw_spin_lock_init(x) ((x)->lock = 0) 20 + #define arch_spin_lock_init(x) ((x)->lock = 0) 21 21 22 22 /* 23 23 * Ticket locks are conceptually two parts, one indicating the current head of ··· 103 103 return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1; 104 104 } 105 105 106 - static inline int __raw_spin_is_locked(arch_spinlock_t *lock) 106 + static inline int arch_spin_is_locked(arch_spinlock_t *lock) 107 107 { 108 108 return __ticket_spin_is_locked(lock); 109 109 } 110 110 111 - static inline int __raw_spin_is_contended(arch_spinlock_t *lock) 111 + static inline int arch_spin_is_contended(arch_spinlock_t *lock) 112 112 { 113 113 return __ticket_spin_is_contended(lock); 114 114 } 115 - #define __raw_spin_is_contended __raw_spin_is_contended 115 + #define arch_spin_is_contended arch_spin_is_contended 116 116 117 - static __always_inline void __raw_spin_lock(arch_spinlock_t *lock) 117 + static __always_inline void arch_spin_lock(arch_spinlock_t *lock) 118 118 { 119 119 __ticket_spin_lock(lock); 120 120 } 121 121 122 - static __always_inline int __raw_spin_trylock(arch_spinlock_t *lock) 122 + static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) 123 123 { 124 124 return __ticket_spin_trylock(lock); 125 125 } 126 126 127 - static __always_inline void __raw_spin_unlock(arch_spinlock_t *lock) 127 + static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) 128 128 { 129 129 __ticket_spin_unlock(lock); 130 130 } 131 131 132 - static __always_inline void __raw_spin_lock_flags(arch_spinlock_t *lock, 132 + static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, 133 133 unsigned long flags) 134 134 { 135 - __raw_spin_lock(lock); 135 + arch_spin_lock(lock); 136 136 } 137 137 138 - static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock) 138 + static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) 139 139 { 140 140 __ticket_spin_unlock_wait(lock); 141 141 } ··· 285 285 return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word; 286 286 } 287 287 288 - #define _raw_spin_relax(lock) cpu_relax() 289 - #define _raw_read_relax(lock) cpu_relax() 290 - #define _raw_write_relax(lock) cpu_relax() 288 + #define arch_spin_relax(lock) cpu_relax() 289 + #define arch_read_relax(lock) cpu_relax() 290 + #define arch_write_relax(lock) cpu_relax() 291 291 292 292 #endif /* _ASM_IA64_SPINLOCK_H */
+14 -14
arch/m32r/include/asm/spinlock.h
··· 24 24 * We make no fairness assumptions. They have a cost. 25 25 */ 26 26 27 - #define __raw_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0) 28 - #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 29 - #define __raw_spin_unlock_wait(x) \ 30 - do { cpu_relax(); } while (__raw_spin_is_locked(x)) 27 + #define arch_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0) 28 + #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) 29 + #define arch_spin_unlock_wait(x) \ 30 + do { cpu_relax(); } while (arch_spin_is_locked(x)) 31 31 32 32 /** 33 - * __raw_spin_trylock - Try spin lock and return a result 33 + * arch_spin_trylock - Try spin lock and return a result 34 34 * @lock: Pointer to the lock variable 35 35 * 36 - * __raw_spin_trylock() tries to get the lock and returns a result. 36 + * arch_spin_trylock() tries to get the lock and returns a result. 37 37 * On the m32r, the result value is 1 (= Success) or 0 (= Failure). 38 38 */ 39 - static inline int __raw_spin_trylock(arch_spinlock_t *lock) 39 + static inline int arch_spin_trylock(arch_spinlock_t *lock) 40 40 { 41 41 int oldval; 42 42 unsigned long tmp1, tmp2; ··· 50 50 * } 51 51 */ 52 52 __asm__ __volatile__ ( 53 - "# __raw_spin_trylock \n\t" 53 + "# arch_spin_trylock \n\t" 54 54 "ldi %1, #0; \n\t" 55 55 "mvfc %2, psw; \n\t" 56 56 "clrpsw #0x40 -> nop; \n\t" ··· 69 69 return (oldval > 0); 70 70 } 71 71 72 - static inline void __raw_spin_lock(arch_spinlock_t *lock) 72 + static inline void arch_spin_lock(arch_spinlock_t *lock) 73 73 { 74 74 unsigned long tmp0, tmp1; 75 75 ··· 84 84 * } 85 85 */ 86 86 __asm__ __volatile__ ( 87 - "# __raw_spin_lock \n\t" 87 + "# arch_spin_lock \n\t" 88 88 ".fillinsn \n" 89 89 "1: \n\t" 90 90 "mvfc %1, psw; \n\t" ··· 111 111 ); 112 112 } 113 113 114 - static inline void __raw_spin_unlock(arch_spinlock_t *lock) 114 + static inline void arch_spin_unlock(arch_spinlock_t *lock) 115 115 { 116 116 mb(); 117 117 lock->slock = 1; ··· 319 319 #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 320 320 #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 321 321 322 - #define _raw_spin_relax(lock) cpu_relax() 323 - #define _raw_read_relax(lock) cpu_relax() 324 - #define _raw_write_relax(lock) cpu_relax() 322 + #define arch_spin_relax(lock) cpu_relax() 323 + #define arch_read_relax(lock) cpu_relax() 324 + #define arch_write_relax(lock) cpu_relax() 325 325 326 326 #endif /* _ASM_M32R_SPINLOCK_H */
+18 -18
arch/mips/include/asm/spinlock.h
··· 34 34 * becomes equal to the the initial value of the tail. 35 35 */ 36 36 37 - static inline int __raw_spin_is_locked(arch_spinlock_t *lock) 37 + static inline int arch_spin_is_locked(arch_spinlock_t *lock) 38 38 { 39 39 unsigned int counters = ACCESS_ONCE(lock->lock); 40 40 41 41 return ((counters >> 14) ^ counters) & 0x1fff; 42 42 } 43 43 44 - #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 45 - #define __raw_spin_unlock_wait(x) \ 46 - while (__raw_spin_is_locked(x)) { cpu_relax(); } 44 + #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) 45 + #define arch_spin_unlock_wait(x) \ 46 + while (arch_spin_is_locked(x)) { cpu_relax(); } 47 47 48 - static inline int __raw_spin_is_contended(arch_spinlock_t *lock) 48 + static inline int arch_spin_is_contended(arch_spinlock_t *lock) 49 49 { 50 50 unsigned int counters = ACCESS_ONCE(lock->lock); 51 51 52 52 return (((counters >> 14) - counters) & 0x1fff) > 1; 53 53 } 54 - #define __raw_spin_is_contended __raw_spin_is_contended 54 + #define arch_spin_is_contended arch_spin_is_contended 55 55 56 - static inline void __raw_spin_lock(arch_spinlock_t *lock) 56 + static inline void arch_spin_lock(arch_spinlock_t *lock) 57 57 { 58 58 int my_ticket; 59 59 int tmp; 60 60 61 61 if (R10000_LLSC_WAR) { 62 62 __asm__ __volatile__ ( 63 - " .set push # __raw_spin_lock \n" 63 + " .set push # arch_spin_lock \n" 64 64 " .set noreorder \n" 65 65 " \n" 66 66 "1: ll %[ticket], %[ticket_ptr] \n" ··· 94 94 [my_ticket] "=&r" (my_ticket)); 95 95 } else { 96 96 __asm__ __volatile__ ( 97 - " .set push # __raw_spin_lock \n" 97 + " .set push # arch_spin_lock \n" 98 98 " .set noreorder \n" 99 99 " \n" 100 100 " ll %[ticket], %[ticket_ptr] \n" ··· 134 134 smp_llsc_mb(); 135 135 } 136 136 137 - static inline void __raw_spin_unlock(arch_spinlock_t *lock) 137 + static inline void arch_spin_unlock(arch_spinlock_t *lock) 138 138 { 139 139 int tmp; 140 140 ··· 142 142 143 143 if (R10000_LLSC_WAR) { 144 144 __asm__ __volatile__ ( 145 - " # __raw_spin_unlock \n" 145 + " # arch_spin_unlock \n" 146 146 "1: ll %[ticket], %[ticket_ptr] \n" 147 147 " addiu %[ticket], %[ticket], 1 \n" 148 148 " ori %[ticket], %[ticket], 0x2000 \n" ··· 153 153 [ticket] "=&r" (tmp)); 154 154 } else { 155 155 __asm__ __volatile__ ( 156 - " .set push # __raw_spin_unlock \n" 156 + " .set push # arch_spin_unlock \n" 157 157 " .set noreorder \n" 158 158 " \n" 159 159 " ll %[ticket], %[ticket_ptr] \n" ··· 174 174 } 175 175 } 176 176 177 - static inline unsigned int __raw_spin_trylock(arch_spinlock_t *lock) 177 + static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) 178 178 { 179 179 int tmp, tmp2, tmp3; 180 180 181 181 if (R10000_LLSC_WAR) { 182 182 __asm__ __volatile__ ( 183 - " .set push # __raw_spin_trylock \n" 183 + " .set push # arch_spin_trylock \n" 184 184 " .set noreorder \n" 185 185 " \n" 186 186 "1: ll %[ticket], %[ticket_ptr] \n" ··· 204 204 [now_serving] "=&r" (tmp3)); 205 205 } else { 206 206 __asm__ __volatile__ ( 207 - " .set push # __raw_spin_trylock \n" 207 + " .set push # arch_spin_trylock \n" 208 208 " .set noreorder \n" 209 209 " \n" 210 210 " ll %[ticket], %[ticket_ptr] \n" ··· 483 483 #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 484 484 #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 485 485 486 - #define _raw_spin_relax(lock) cpu_relax() 487 - #define _raw_read_relax(lock) cpu_relax() 488 - #define _raw_write_relax(lock) cpu_relax() 486 + #define arch_spin_relax(lock) cpu_relax() 487 + #define arch_read_relax(lock) cpu_relax() 488 + #define arch_write_relax(lock) cpu_relax() 489 489 490 490 #endif /* _ASM_SPINLOCK_H */
+2 -2
arch/parisc/include/asm/atomic.h
··· 34 34 #define _atomic_spin_lock_irqsave(l,f) do { \ 35 35 arch_spinlock_t *s = ATOMIC_HASH(l); \ 36 36 local_irq_save(f); \ 37 - __raw_spin_lock(s); \ 37 + arch_spin_lock(s); \ 38 38 } while(0) 39 39 40 40 #define _atomic_spin_unlock_irqrestore(l,f) do { \ 41 41 arch_spinlock_t *s = ATOMIC_HASH(l); \ 42 - __raw_spin_unlock(s); \ 42 + arch_spin_unlock(s); \ 43 43 local_irq_restore(f); \ 44 44 } while(0) 45 45
+22 -22
arch/parisc/include/asm/spinlock.h
··· 5 5 #include <asm/processor.h> 6 6 #include <asm/spinlock_types.h> 7 7 8 - static inline int __raw_spin_is_locked(arch_spinlock_t *x) 8 + static inline int arch_spin_is_locked(arch_spinlock_t *x) 9 9 { 10 10 volatile unsigned int *a = __ldcw_align(x); 11 11 return *a == 0; 12 12 } 13 13 14 - #define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0) 15 - #define __raw_spin_unlock_wait(x) \ 16 - do { cpu_relax(); } while (__raw_spin_is_locked(x)) 14 + #define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0) 15 + #define arch_spin_unlock_wait(x) \ 16 + do { cpu_relax(); } while (arch_spin_is_locked(x)) 17 17 18 - static inline void __raw_spin_lock_flags(arch_spinlock_t *x, 18 + static inline void arch_spin_lock_flags(arch_spinlock_t *x, 19 19 unsigned long flags) 20 20 { 21 21 volatile unsigned int *a; ··· 33 33 mb(); 34 34 } 35 35 36 - static inline void __raw_spin_unlock(arch_spinlock_t *x) 36 + static inline void arch_spin_unlock(arch_spinlock_t *x) 37 37 { 38 38 volatile unsigned int *a; 39 39 mb(); ··· 42 42 mb(); 43 43 } 44 44 45 - static inline int __raw_spin_trylock(arch_spinlock_t *x) 45 + static inline int arch_spin_trylock(arch_spinlock_t *x) 46 46 { 47 47 volatile unsigned int *a; 48 48 int ret; ··· 73 73 { 74 74 unsigned long flags; 75 75 local_irq_save(flags); 76 - __raw_spin_lock_flags(&rw->lock, flags); 76 + arch_spin_lock_flags(&rw->lock, flags); 77 77 rw->counter++; 78 - __raw_spin_unlock(&rw->lock); 78 + arch_spin_unlock(&rw->lock); 79 79 local_irq_restore(flags); 80 80 } 81 81 ··· 85 85 { 86 86 unsigned long flags; 87 87 local_irq_save(flags); 88 - __raw_spin_lock_flags(&rw->lock, flags); 88 + arch_spin_lock_flags(&rw->lock, flags); 89 89 rw->counter--; 90 - __raw_spin_unlock(&rw->lock); 90 + arch_spin_unlock(&rw->lock); 91 91 local_irq_restore(flags); 92 92 } 93 93 ··· 98 98 unsigned long flags; 99 99 retry: 100 100 local_irq_save(flags); 101 - if (__raw_spin_trylock(&rw->lock)) { 101 + if (arch_spin_trylock(&rw->lock)) { 102 102 rw->counter++; 103 - __raw_spin_unlock(&rw->lock); 103 + arch_spin_unlock(&rw->lock); 104 104 local_irq_restore(flags); 105 105 return 1; 106 106 } ··· 111 111 return 0; 112 112 113 113 /* Wait until we have a realistic chance at the lock */ 114 - while (__raw_spin_is_locked(&rw->lock) && rw->counter >= 0) 114 + while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0) 115 115 cpu_relax(); 116 116 117 117 goto retry; ··· 124 124 unsigned long flags; 125 125 retry: 126 126 local_irq_save(flags); 127 - __raw_spin_lock_flags(&rw->lock, flags); 127 + arch_spin_lock_flags(&rw->lock, flags); 128 128 129 129 if (rw->counter != 0) { 130 - __raw_spin_unlock(&rw->lock); 130 + arch_spin_unlock(&rw->lock); 131 131 local_irq_restore(flags); 132 132 133 133 while (rw->counter != 0) ··· 144 144 static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) 145 145 { 146 146 rw->counter = 0; 147 - __raw_spin_unlock(&rw->lock); 147 + arch_spin_unlock(&rw->lock); 148 148 } 149 149 150 150 /* Note that we have to ensure interrupts are disabled in case we're ··· 155 155 int result = 0; 156 156 157 157 local_irq_save(flags); 158 - if (__raw_spin_trylock(&rw->lock)) { 158 + if (arch_spin_trylock(&rw->lock)) { 159 159 if (rw->counter == 0) { 160 160 rw->counter = -1; 161 161 result = 1; 162 162 } else { 163 163 /* Read-locked. Oh well. */ 164 - __raw_spin_unlock(&rw->lock); 164 + arch_spin_unlock(&rw->lock); 165 165 } 166 166 } 167 167 local_irq_restore(flags); ··· 190 190 #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 191 191 #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 192 192 193 - #define _raw_spin_relax(lock) cpu_relax() 194 - #define _raw_read_relax(lock) cpu_relax() 195 - #define _raw_write_relax(lock) cpu_relax() 193 + #define arch_spin_relax(lock) cpu_relax() 194 + #define arch_read_relax(lock) cpu_relax() 195 + #define arch_write_relax(lock) cpu_relax() 196 196 197 197 #endif /* __ASM_SPINLOCK_H */
+16 -16
arch/powerpc/include/asm/spinlock.h
··· 28 28 #include <asm/asm-compat.h> 29 29 #include <asm/synch.h> 30 30 31 - #define __raw_spin_is_locked(x) ((x)->slock != 0) 31 + #define arch_spin_is_locked(x) ((x)->slock != 0) 32 32 33 33 #ifdef CONFIG_PPC64 34 34 /* use 0x800000yy when locked, where yy == CPU number */ ··· 54 54 * This returns the old value in the lock, so we succeeded 55 55 * in getting the lock if the return value is 0. 56 56 */ 57 - static inline unsigned long arch_spin_trylock(arch_spinlock_t *lock) 57 + static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) 58 58 { 59 59 unsigned long tmp, token; 60 60 ··· 73 73 return tmp; 74 74 } 75 75 76 - static inline int __raw_spin_trylock(arch_spinlock_t *lock) 76 + static inline int arch_spin_trylock(arch_spinlock_t *lock) 77 77 { 78 78 CLEAR_IO_SYNC; 79 - return arch_spin_trylock(lock) == 0; 79 + return __arch_spin_trylock(lock) == 0; 80 80 } 81 81 82 82 /* ··· 104 104 #define SHARED_PROCESSOR 0 105 105 #endif 106 106 107 - static inline void __raw_spin_lock(arch_spinlock_t *lock) 107 + static inline void arch_spin_lock(arch_spinlock_t *lock) 108 108 { 109 109 CLEAR_IO_SYNC; 110 110 while (1) { 111 - if (likely(arch_spin_trylock(lock) == 0)) 111 + if (likely(__arch_spin_trylock(lock) == 0)) 112 112 break; 113 113 do { 114 114 HMT_low(); ··· 120 120 } 121 121 122 122 static inline 123 - void __raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) 123 + void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) 124 124 { 125 125 unsigned long flags_dis; 126 126 127 127 CLEAR_IO_SYNC; 128 128 while (1) { 129 - if (likely(arch_spin_trylock(lock) == 0)) 129 + if (likely(__arch_spin_trylock(lock) == 0)) 130 130 break; 131 131 local_save_flags(flags_dis); 132 132 local_irq_restore(flags); ··· 140 140 } 141 141 } 142 142 143 - static inline void __raw_spin_unlock(arch_spinlock_t *lock) 143 + static inline void arch_spin_unlock(arch_spinlock_t *lock) 144 144 { 145 145 SYNC_IO; 146 - __asm__ __volatile__("# __raw_spin_unlock\n\t" 146 + __asm__ __volatile__("# arch_spin_unlock\n\t" 147 147 LWSYNC_ON_SMP: : :"memory"); 148 148 lock->slock = 0; 149 149 } 150 150 151 151 #ifdef CONFIG_PPC64 152 - extern void __raw_spin_unlock_wait(arch_spinlock_t *lock); 152 + extern void arch_spin_unlock_wait(arch_spinlock_t *lock); 153 153 #else 154 - #define __raw_spin_unlock_wait(lock) \ 155 - do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) 154 + #define arch_spin_unlock_wait(lock) \ 155 + do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) 156 156 #endif 157 157 158 158 /* ··· 290 290 #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 291 291 #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 292 292 293 - #define _raw_spin_relax(lock) __spin_yield(lock) 294 - #define _raw_read_relax(lock) __rw_yield(lock) 295 - #define _raw_write_relax(lock) __rw_yield(lock) 293 + #define arch_spin_relax(lock) __spin_yield(lock) 294 + #define arch_read_relax(lock) __rw_yield(lock) 295 + #define arch_write_relax(lock) __rw_yield(lock) 296 296 297 297 #endif /* __KERNEL__ */ 298 298 #endif /* __ASM_SPINLOCK_H */
+6 -6
arch/powerpc/kernel/rtas.c
··· 80 80 81 81 local_irq_save(flags); 82 82 preempt_disable(); 83 - __raw_spin_lock_flags(&rtas.lock, flags); 83 + arch_spin_lock_flags(&rtas.lock, flags); 84 84 return flags; 85 85 } 86 86 87 87 static void unlock_rtas(unsigned long flags) 88 88 { 89 - __raw_spin_unlock(&rtas.lock); 89 + arch_spin_unlock(&rtas.lock); 90 90 local_irq_restore(flags); 91 91 preempt_enable(); 92 92 } ··· 987 987 988 988 local_irq_save(flags); 989 989 hard_irq_disable(); 990 - __raw_spin_lock(&timebase_lock); 990 + arch_spin_lock(&timebase_lock); 991 991 rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL); 992 992 timebase = get_tb(); 993 - __raw_spin_unlock(&timebase_lock); 993 + arch_spin_unlock(&timebase_lock); 994 994 995 995 while (timebase) 996 996 barrier(); ··· 1002 1002 { 1003 1003 while (!timebase) 1004 1004 barrier(); 1005 - __raw_spin_lock(&timebase_lock); 1005 + arch_spin_lock(&timebase_lock); 1006 1006 set_tb(timebase >> 32, timebase & 0xffffffff); 1007 1007 timebase = 0; 1008 - __raw_spin_unlock(&timebase_lock); 1008 + arch_spin_unlock(&timebase_lock); 1009 1009 }
+2 -2
arch/powerpc/lib/locks.c
··· 82 82 } 83 83 #endif 84 84 85 - void __raw_spin_unlock_wait(arch_spinlock_t *lock) 85 + void arch_spin_unlock_wait(arch_spinlock_t *lock) 86 86 { 87 87 while (lock->slock) { 88 88 HMT_low(); ··· 92 92 HMT_medium(); 93 93 } 94 94 95 - EXPORT_SYMBOL(__raw_spin_unlock_wait); 95 + EXPORT_SYMBOL(arch_spin_unlock_wait);
+4 -4
arch/powerpc/platforms/pasemi/setup.c
··· 80 80 81 81 local_irq_save(flags); 82 82 hard_irq_disable(); 83 - __raw_spin_lock(&timebase_lock); 83 + arch_spin_lock(&timebase_lock); 84 84 mtspr(SPRN_TBCTL, TBCTL_FREEZE); 85 85 isync(); 86 86 timebase = get_tb(); 87 - __raw_spin_unlock(&timebase_lock); 87 + arch_spin_unlock(&timebase_lock); 88 88 89 89 while (timebase) 90 90 barrier(); ··· 97 97 while (!timebase) 98 98 smp_rmb(); 99 99 100 - __raw_spin_lock(&timebase_lock); 100 + arch_spin_lock(&timebase_lock); 101 101 set_tb(timebase >> 32, timebase & 0xffffffff); 102 102 timebase = 0; 103 - __raw_spin_unlock(&timebase_lock); 103 + arch_spin_unlock(&timebase_lock); 104 104 } 105 105 106 106 struct smp_ops_t pas_smp_ops = {
+17 -17
arch/s390/include/asm/spinlock.h
··· 52 52 * (the type definitions are in asm/spinlock_types.h) 53 53 */ 54 54 55 - #define __raw_spin_is_locked(x) ((x)->owner_cpu != 0) 56 - #define __raw_spin_unlock_wait(lock) \ 57 - do { while (__raw_spin_is_locked(lock)) \ 58 - _raw_spin_relax(lock); } while (0) 55 + #define arch_spin_is_locked(x) ((x)->owner_cpu != 0) 56 + #define arch_spin_unlock_wait(lock) \ 57 + do { while (arch_spin_is_locked(lock)) \ 58 + arch_spin_relax(lock); } while (0) 59 59 60 - extern void _raw_spin_lock_wait(arch_spinlock_t *); 61 - extern void _raw_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); 62 - extern int _raw_spin_trylock_retry(arch_spinlock_t *); 63 - extern void _raw_spin_relax(arch_spinlock_t *lock); 60 + extern void arch_spin_lock_wait(arch_spinlock_t *); 61 + extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); 62 + extern int arch_spin_trylock_retry(arch_spinlock_t *); 63 + extern void arch_spin_relax(arch_spinlock_t *lock); 64 64 65 - static inline void __raw_spin_lock(arch_spinlock_t *lp) 65 + static inline void arch_spin_lock(arch_spinlock_t *lp) 66 66 { 67 67 int old; 68 68 69 69 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); 70 70 if (likely(old == 0)) 71 71 return; 72 - _raw_spin_lock_wait(lp); 72 + arch_spin_lock_wait(lp); 73 73 } 74 74 75 - static inline void __raw_spin_lock_flags(arch_spinlock_t *lp, 75 + static inline void arch_spin_lock_flags(arch_spinlock_t *lp, 76 76 unsigned long flags) 77 77 { 78 78 int old; ··· 80 80 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); 81 81 if (likely(old == 0)) 82 82 return; 83 - _raw_spin_lock_wait_flags(lp, flags); 83 + arch_spin_lock_wait_flags(lp, flags); 84 84 } 85 85 86 - static inline int __raw_spin_trylock(arch_spinlock_t *lp) 86 + static inline int arch_spin_trylock(arch_spinlock_t *lp) 87 87 { 88 88 int old; 89 89 90 90 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); 91 91 if (likely(old == 0)) 92 92 return 1; 93 - return _raw_spin_trylock_retry(lp); 93 + return arch_spin_trylock_retry(lp); 94 94 } 95 95 96 - static inline void __raw_spin_unlock(arch_spinlock_t *lp) 96 + static inline void arch_spin_unlock(arch_spinlock_t *lp) 97 97 { 98 98 _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0); 99 99 } ··· 188 188 return _raw_write_trylock_retry(rw); 189 189 } 190 190 191 - #define _raw_read_relax(lock) cpu_relax() 192 - #define _raw_write_relax(lock) cpu_relax() 191 + #define arch_read_relax(lock) cpu_relax() 192 + #define arch_write_relax(lock) cpu_relax() 193 193 194 194 #endif /* __ASM_SPINLOCK_H */
+11 -11
arch/s390/lib/spinlock.c
··· 39 39 _raw_yield(); 40 40 } 41 41 42 - void _raw_spin_lock_wait(arch_spinlock_t *lp) 42 + void arch_spin_lock_wait(arch_spinlock_t *lp) 43 43 { 44 44 int count = spin_retry; 45 45 unsigned int cpu = ~smp_processor_id(); ··· 51 51 _raw_yield_cpu(~owner); 52 52 count = spin_retry; 53 53 } 54 - if (__raw_spin_is_locked(lp)) 54 + if (arch_spin_is_locked(lp)) 55 55 continue; 56 56 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) 57 57 return; 58 58 } 59 59 } 60 - EXPORT_SYMBOL(_raw_spin_lock_wait); 60 + EXPORT_SYMBOL(arch_spin_lock_wait); 61 61 62 - void _raw_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) 62 + void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) 63 63 { 64 64 int count = spin_retry; 65 65 unsigned int cpu = ~smp_processor_id(); ··· 72 72 _raw_yield_cpu(~owner); 73 73 count = spin_retry; 74 74 } 75 - if (__raw_spin_is_locked(lp)) 75 + if (arch_spin_is_locked(lp)) 76 76 continue; 77 77 local_irq_disable(); 78 78 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) ··· 80 80 local_irq_restore(flags); 81 81 } 82 82 } 83 - EXPORT_SYMBOL(_raw_spin_lock_wait_flags); 83 + EXPORT_SYMBOL(arch_spin_lock_wait_flags); 84 84 85 - int _raw_spin_trylock_retry(arch_spinlock_t *lp) 85 + int arch_spin_trylock_retry(arch_spinlock_t *lp) 86 86 { 87 87 unsigned int cpu = ~smp_processor_id(); 88 88 int count; 89 89 90 90 for (count = spin_retry; count > 0; count--) { 91 - if (__raw_spin_is_locked(lp)) 91 + if (arch_spin_is_locked(lp)) 92 92 continue; 93 93 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) 94 94 return 1; 95 95 } 96 96 return 0; 97 97 } 98 - EXPORT_SYMBOL(_raw_spin_trylock_retry); 98 + EXPORT_SYMBOL(arch_spin_trylock_retry); 99 99 100 - void _raw_spin_relax(arch_spinlock_t *lock) 100 + void arch_spin_relax(arch_spinlock_t *lock) 101 101 { 102 102 unsigned int cpu = lock->owner_cpu; 103 103 if (cpu != 0) 104 104 _raw_yield_cpu(~cpu); 105 105 } 106 - EXPORT_SYMBOL(_raw_spin_relax); 106 + EXPORT_SYMBOL(arch_spin_relax); 107 107 108 108 void _raw_read_lock_wait(raw_rwlock_t *rw) 109 109 {
+13 -13
arch/sh/include/asm/spinlock.h
··· 23 23 * Your basic SMP spinlocks, allowing only a single CPU anywhere 24 24 */ 25 25 26 - #define __raw_spin_is_locked(x) ((x)->lock <= 0) 27 - #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 28 - #define __raw_spin_unlock_wait(x) \ 29 - do { while (__raw_spin_is_locked(x)) cpu_relax(); } while (0) 26 + #define arch_spin_is_locked(x) ((x)->lock <= 0) 27 + #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) 28 + #define arch_spin_unlock_wait(x) \ 29 + do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0) 30 30 31 31 /* 32 32 * Simple spin lock operations. There are two variants, one clears IRQ's ··· 34 34 * 35 35 * We make no fairness assumptions. They have a cost. 36 36 */ 37 - static inline void __raw_spin_lock(arch_spinlock_t *lock) 37 + static inline void arch_spin_lock(arch_spinlock_t *lock) 38 38 { 39 39 unsigned long tmp; 40 40 unsigned long oldval; 41 41 42 42 __asm__ __volatile__ ( 43 43 "1: \n\t" 44 - "movli.l @%2, %0 ! __raw_spin_lock \n\t" 44 + "movli.l @%2, %0 ! arch_spin_lock \n\t" 45 45 "mov %0, %1 \n\t" 46 46 "mov #0, %0 \n\t" 47 47 "movco.l %0, @%2 \n\t" ··· 54 54 ); 55 55 } 56 56 57 - static inline void __raw_spin_unlock(arch_spinlock_t *lock) 57 + static inline void arch_spin_unlock(arch_spinlock_t *lock) 58 58 { 59 59 unsigned long tmp; 60 60 61 61 __asm__ __volatile__ ( 62 - "mov #1, %0 ! __raw_spin_unlock \n\t" 62 + "mov #1, %0 ! arch_spin_unlock \n\t" 63 63 "mov.l %0, @%1 \n\t" 64 64 : "=&z" (tmp) 65 65 : "r" (&lock->lock) ··· 67 67 ); 68 68 } 69 69 70 - static inline int __raw_spin_trylock(arch_spinlock_t *lock) 70 + static inline int arch_spin_trylock(arch_spinlock_t *lock) 71 71 { 72 72 unsigned long tmp, oldval; 73 73 74 74 __asm__ __volatile__ ( 75 75 "1: \n\t" 76 - "movli.l @%2, %0 ! __raw_spin_trylock \n\t" 76 + "movli.l @%2, %0 ! arch_spin_trylock \n\t" 77 77 "mov %0, %1 \n\t" 78 78 "mov #0, %0 \n\t" 79 79 "movco.l %0, @%2 \n\t" ··· 219 219 #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 220 220 #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 221 221 222 - #define _raw_spin_relax(lock) cpu_relax() 223 - #define _raw_read_relax(lock) cpu_relax() 224 - #define _raw_write_relax(lock) cpu_relax() 222 + #define arch_spin_relax(lock) cpu_relax() 223 + #define arch_read_relax(lock) cpu_relax() 224 + #define arch_write_relax(lock) cpu_relax() 225 225 226 226 #endif /* __ASM_SH_SPINLOCK_H */
+10 -10
arch/sparc/include/asm/spinlock_32.h
··· 10 10 11 11 #include <asm/psr.h> 12 12 13 - #define __raw_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) 13 + #define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) 14 14 15 - #define __raw_spin_unlock_wait(lock) \ 16 - do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) 15 + #define arch_spin_unlock_wait(lock) \ 16 + do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) 17 17 18 - static inline void __raw_spin_lock(arch_spinlock_t *lock) 18 + static inline void arch_spin_lock(arch_spinlock_t *lock) 19 19 { 20 20 __asm__ __volatile__( 21 21 "\n1:\n\t" ··· 35 35 : "g2", "memory", "cc"); 36 36 } 37 37 38 - static inline int __raw_spin_trylock(arch_spinlock_t *lock) 38 + static inline int arch_spin_trylock(arch_spinlock_t *lock) 39 39 { 40 40 unsigned int result; 41 41 __asm__ __volatile__("ldstub [%1], %0" ··· 45 45 return (result == 0); 46 46 } 47 47 48 - static inline void __raw_spin_unlock(arch_spinlock_t *lock) 48 + static inline void arch_spin_unlock(arch_spinlock_t *lock) 49 49 { 50 50 __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); 51 51 } ··· 176 176 177 177 #define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0) 178 178 179 - #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 179 + #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) 180 180 #define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw) 181 181 #define __raw_write_lock_flags(rw, flags) __raw_write_lock(rw) 182 182 183 - #define _raw_spin_relax(lock) cpu_relax() 184 - #define _raw_read_relax(lock) cpu_relax() 185 - #define _raw_write_relax(lock) cpu_relax() 183 + #define arch_spin_relax(lock) cpu_relax() 184 + #define arch_read_relax(lock) cpu_relax() 185 + #define arch_write_relax(lock) cpu_relax() 186 186 187 187 #define __raw_read_can_lock(rw) (!((rw)->lock & 0xff)) 188 188 #define __raw_write_can_lock(rw) (!(rw)->lock)
+9 -9
arch/sparc/include/asm/spinlock_64.h
··· 21 21 * the spinner sections must be pre-V9 branches. 22 22 */ 23 23 24 - #define __raw_spin_is_locked(lp) ((lp)->lock != 0) 24 + #define arch_spin_is_locked(lp) ((lp)->lock != 0) 25 25 26 - #define __raw_spin_unlock_wait(lp) \ 26 + #define arch_spin_unlock_wait(lp) \ 27 27 do { rmb(); \ 28 28 } while((lp)->lock) 29 29 30 - static inline void __raw_spin_lock(arch_spinlock_t *lock) 30 + static inline void arch_spin_lock(arch_spinlock_t *lock) 31 31 { 32 32 unsigned long tmp; 33 33 ··· 46 46 : "memory"); 47 47 } 48 48 49 - static inline int __raw_spin_trylock(arch_spinlock_t *lock) 49 + static inline int arch_spin_trylock(arch_spinlock_t *lock) 50 50 { 51 51 unsigned long result; 52 52 ··· 59 59 return (result == 0UL); 60 60 } 61 61 62 - static inline void __raw_spin_unlock(arch_spinlock_t *lock) 62 + static inline void arch_spin_unlock(arch_spinlock_t *lock) 63 63 { 64 64 __asm__ __volatile__( 65 65 " stb %%g0, [%0]" ··· 68 68 : "memory"); 69 69 } 70 70 71 - static inline void __raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) 71 + static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) 72 72 { 73 73 unsigned long tmp1, tmp2; 74 74 ··· 222 222 #define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) 223 223 #define __raw_write_can_lock(rw) (!(rw)->lock) 224 224 225 - #define _raw_spin_relax(lock) cpu_relax() 226 - #define _raw_read_relax(lock) cpu_relax() 227 - #define _raw_write_relax(lock) cpu_relax() 225 + #define arch_spin_relax(lock) cpu_relax() 226 + #define arch_read_relax(lock) cpu_relax() 227 + #define arch_write_relax(lock) cpu_relax() 228 228 229 229 #endif /* !(__ASSEMBLY__) */ 230 230
+7 -7
arch/x86/include/asm/paravirt.h
··· 731 731 732 732 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) 733 733 734 - static inline int __raw_spin_is_locked(struct arch_spinlock *lock) 734 + static inline int arch_spin_is_locked(struct arch_spinlock *lock) 735 735 { 736 736 return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock); 737 737 } 738 738 739 - static inline int __raw_spin_is_contended(struct arch_spinlock *lock) 739 + static inline int arch_spin_is_contended(struct arch_spinlock *lock) 740 740 { 741 741 return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock); 742 742 } 743 - #define __raw_spin_is_contended __raw_spin_is_contended 743 + #define arch_spin_is_contended arch_spin_is_contended 744 744 745 - static __always_inline void __raw_spin_lock(struct arch_spinlock *lock) 745 + static __always_inline void arch_spin_lock(struct arch_spinlock *lock) 746 746 { 747 747 PVOP_VCALL1(pv_lock_ops.spin_lock, lock); 748 748 } 749 749 750 - static __always_inline void __raw_spin_lock_flags(struct arch_spinlock *lock, 750 + static __always_inline void arch_spin_lock_flags(struct arch_spinlock *lock, 751 751 unsigned long flags) 752 752 { 753 753 PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags); 754 754 } 755 755 756 - static __always_inline int __raw_spin_trylock(struct arch_spinlock *lock) 756 + static __always_inline int arch_spin_trylock(struct arch_spinlock *lock) 757 757 { 758 758 return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock); 759 759 } 760 760 761 - static __always_inline void __raw_spin_unlock(struct arch_spinlock *lock) 761 + static __always_inline void arch_spin_unlock(struct arch_spinlock *lock) 762 762 { 763 763 PVOP_VCALL1(pv_lock_ops.spin_unlock, lock); 764 764 }
+13 -13
arch/x86/include/asm/spinlock.h
··· 174 174 175 175 #ifndef CONFIG_PARAVIRT_SPINLOCKS 176 176 177 - static inline int __raw_spin_is_locked(arch_spinlock_t *lock) 177 + static inline int arch_spin_is_locked(arch_spinlock_t *lock) 178 178 { 179 179 return __ticket_spin_is_locked(lock); 180 180 } 181 181 182 - static inline int __raw_spin_is_contended(arch_spinlock_t *lock) 182 + static inline int arch_spin_is_contended(arch_spinlock_t *lock) 183 183 { 184 184 return __ticket_spin_is_contended(lock); 185 185 } 186 - #define __raw_spin_is_contended __raw_spin_is_contended 186 + #define arch_spin_is_contended arch_spin_is_contended 187 187 188 - static __always_inline void __raw_spin_lock(arch_spinlock_t *lock) 188 + static __always_inline void arch_spin_lock(arch_spinlock_t *lock) 189 189 { 190 190 __ticket_spin_lock(lock); 191 191 } 192 192 193 - static __always_inline int __raw_spin_trylock(arch_spinlock_t *lock) 193 + static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) 194 194 { 195 195 return __ticket_spin_trylock(lock); 196 196 } 197 197 198 - static __always_inline void __raw_spin_unlock(arch_spinlock_t *lock) 198 + static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) 199 199 { 200 200 __ticket_spin_unlock(lock); 201 201 } 202 202 203 - static __always_inline void __raw_spin_lock_flags(arch_spinlock_t *lock, 203 + static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, 204 204 unsigned long flags) 205 205 { 206 - __raw_spin_lock(lock); 206 + arch_spin_lock(lock); 207 207 } 208 208 209 209 #endif /* CONFIG_PARAVIRT_SPINLOCKS */ 210 210 211 - static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock) 211 + static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) 212 212 { 213 - while (__raw_spin_is_locked(lock)) 213 + while (arch_spin_is_locked(lock)) 214 214 cpu_relax(); 215 215 } 216 216 ··· 298 298 #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 299 299 #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 300 300 301 - #define _raw_spin_relax(lock) cpu_relax() 302 - #define _raw_read_relax(lock) cpu_relax() 303 - #define _raw_write_relax(lock) cpu_relax() 301 + #define arch_spin_relax(lock) cpu_relax() 302 + #define arch_read_relax(lock) cpu_relax() 303 + #define arch_write_relax(lock) cpu_relax() 304 304 305 305 /* The {read|write|spin}_lock() on x86 are full memory barriers. */ 306 306 static inline void smp_mb__after_lock(void) { }
+3 -3
arch/x86/kernel/dumpstack.c
··· 207 207 /* racy, but better than risking deadlock. */ 208 208 raw_local_irq_save(flags); 209 209 cpu = smp_processor_id(); 210 - if (!__raw_spin_trylock(&die_lock)) { 210 + if (!arch_spin_trylock(&die_lock)) { 211 211 if (cpu == die_owner) 212 212 /* nested oops. should stop eventually */; 213 213 else 214 - __raw_spin_lock(&die_lock); 214 + arch_spin_lock(&die_lock); 215 215 } 216 216 die_nest_count++; 217 217 die_owner = cpu; ··· 231 231 die_nest_count--; 232 232 if (!die_nest_count) 233 233 /* Nest count reaches zero, release the lock. */ 234 - __raw_spin_unlock(&die_lock); 234 + arch_spin_unlock(&die_lock); 235 235 raw_local_irq_restore(flags); 236 236 oops_exit(); 237 237
+1 -1
arch/x86/kernel/paravirt-spinlocks.c
··· 10 10 static inline void 11 11 default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) 12 12 { 13 - __raw_spin_lock(lock); 13 + arch_spin_lock(lock); 14 14 } 15 15 16 16 struct pv_lock_ops pv_lock_ops = {
+4 -4
arch/x86/kernel/tsc_sync.c
··· 62 62 * previous TSC that was measured (possibly on 63 63 * another CPU) and update the previous TSC timestamp. 64 64 */ 65 - __raw_spin_lock(&sync_lock); 65 + arch_spin_lock(&sync_lock); 66 66 prev = last_tsc; 67 67 rdtsc_barrier(); 68 68 now = get_cycles(); 69 69 rdtsc_barrier(); 70 70 last_tsc = now; 71 - __raw_spin_unlock(&sync_lock); 71 + arch_spin_unlock(&sync_lock); 72 72 73 73 /* 74 74 * Be nice every now and then (and also check whether ··· 87 87 * we saw a time-warp of the TSC going backwards: 88 88 */ 89 89 if (unlikely(prev > now)) { 90 - __raw_spin_lock(&sync_lock); 90 + arch_spin_lock(&sync_lock); 91 91 max_warp = max(max_warp, prev - now); 92 92 nr_warps++; 93 - __raw_spin_unlock(&sync_lock); 93 + arch_spin_unlock(&sync_lock); 94 94 } 95 95 } 96 96 WARN(!(now-start),
+2 -2
include/asm-generic/bitops/atomic.h
··· 22 22 #define _atomic_spin_lock_irqsave(l,f) do { \ 23 23 arch_spinlock_t *s = ATOMIC_HASH(l); \ 24 24 local_irq_save(f); \ 25 - __raw_spin_lock(s); \ 25 + arch_spin_lock(s); \ 26 26 } while(0) 27 27 28 28 #define _atomic_spin_unlock_irqrestore(l,f) do { \ 29 29 arch_spinlock_t *s = ATOMIC_HASH(l); \ 30 - __raw_spin_unlock(s); \ 30 + arch_spin_unlock(s); \ 31 31 local_irq_restore(f); \ 32 32 } while(0) 33 33
+11 -11
include/linux/spinlock.h
··· 14 14 * linux/spinlock_types.h: 15 15 * defines the generic type and initializers 16 16 * 17 - * asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel 17 + * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel 18 18 * implementations, mostly inline assembly code 19 19 * 20 20 * (also included on UP-debug builds:) ··· 34 34 * defines the generic type and initializers 35 35 * 36 36 * linux/spinlock_up.h: 37 - * contains the __raw_spin_*()/etc. version of UP 37 + * contains the arch_spin_*()/etc. version of UP 38 38 * builds. (which are NOPs on non-debug, non-preempt 39 39 * builds) 40 40 * ··· 103 103 do { *(lock) = __SPIN_LOCK_UNLOCKED(lock); } while (0) 104 104 #endif 105 105 106 - #define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) 106 + #define spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) 107 107 108 108 #ifdef CONFIG_GENERIC_LOCKBREAK 109 109 #define spin_is_contended(lock) ((lock)->break_lock) 110 110 #else 111 111 112 - #ifdef __raw_spin_is_contended 113 - #define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock) 112 + #ifdef arch_spin_is_contended 113 + #define spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) 114 114 #else 115 115 #define spin_is_contended(lock) (((void)(lock), 0)) 116 - #endif /*__raw_spin_is_contended*/ 116 + #endif /*arch_spin_is_contended*/ 117 117 #endif 118 118 119 119 /* The lock does not imply full memory barrier. */ ··· 125 125 * spin_unlock_wait - wait until the spinlock gets unlocked 126 126 * @lock: the spinlock in question. 127 127 */ 128 - #define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock) 128 + #define spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock) 129 129 130 130 #ifdef CONFIG_DEBUG_SPINLOCK 131 131 extern void _raw_spin_lock(spinlock_t *lock); ··· 133 133 extern int _raw_spin_trylock(spinlock_t *lock); 134 134 extern void _raw_spin_unlock(spinlock_t *lock); 135 135 #else 136 - # define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock) 136 + # define _raw_spin_lock(lock) arch_spin_lock(&(lock)->raw_lock) 137 137 # define _raw_spin_lock_flags(lock, flags) \ 138 - __raw_spin_lock_flags(&(lock)->raw_lock, *(flags)) 139 - # define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock) 140 - # define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) 138 + arch_spin_lock_flags(&(lock)->raw_lock, *(flags)) 139 + # define _raw_spin_trylock(lock) arch_spin_trylock(&(lock)->raw_lock) 140 + # define _raw_spin_unlock(lock) arch_spin_unlock(&(lock)->raw_lock) 141 141 #endif 142 142 143 143 /*
+13 -13
include/linux/spinlock_up.h
··· 18 18 */ 19 19 20 20 #ifdef CONFIG_DEBUG_SPINLOCK 21 - #define __raw_spin_is_locked(x) ((x)->slock == 0) 21 + #define arch_spin_is_locked(x) ((x)->slock == 0) 22 22 23 - static inline void __raw_spin_lock(arch_spinlock_t *lock) 23 + static inline void arch_spin_lock(arch_spinlock_t *lock) 24 24 { 25 25 lock->slock = 0; 26 26 } 27 27 28 28 static inline void 29 - __raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) 29 + arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) 30 30 { 31 31 local_irq_save(flags); 32 32 lock->slock = 0; 33 33 } 34 34 35 - static inline int __raw_spin_trylock(arch_spinlock_t *lock) 35 + static inline int arch_spin_trylock(arch_spinlock_t *lock) 36 36 { 37 37 char oldval = lock->slock; 38 38 ··· 41 41 return oldval > 0; 42 42 } 43 43 44 - static inline void __raw_spin_unlock(arch_spinlock_t *lock) 44 + static inline void arch_spin_unlock(arch_spinlock_t *lock) 45 45 { 46 46 lock->slock = 1; 47 47 } ··· 57 57 #define __raw_write_unlock(lock) do { (void)(lock); } while (0) 58 58 59 59 #else /* DEBUG_SPINLOCK */ 60 - #define __raw_spin_is_locked(lock) ((void)(lock), 0) 60 + #define arch_spin_is_locked(lock) ((void)(lock), 0) 61 61 /* for sched.c and kernel_lock.c: */ 62 - # define __raw_spin_lock(lock) do { (void)(lock); } while (0) 63 - # define __raw_spin_lock_flags(lock, flags) do { (void)(lock); } while (0) 64 - # define __raw_spin_unlock(lock) do { (void)(lock); } while (0) 65 - # define __raw_spin_trylock(lock) ({ (void)(lock); 1; }) 62 + # define arch_spin_lock(lock) do { (void)(lock); } while (0) 63 + # define arch_spin_lock_flags(lock, flags) do { (void)(lock); } while (0) 64 + # define arch_spin_unlock(lock) do { (void)(lock); } while (0) 65 + # define arch_spin_trylock(lock) ({ (void)(lock); 1; }) 66 66 #endif /* DEBUG_SPINLOCK */ 67 67 68 - #define __raw_spin_is_contended(lock) (((void)(lock), 0)) 68 + #define arch_spin_is_contended(lock) (((void)(lock), 0)) 69 69 70 70 #define __raw_read_can_lock(lock) (((void)(lock), 1)) 71 71 #define __raw_write_can_lock(lock) (((void)(lock), 1)) 72 72 73 - #define __raw_spin_unlock_wait(lock) \ 74 - do { cpu_relax(); } while (__raw_spin_is_locked(lock)) 73 + #define arch_spin_unlock_wait(lock) \ 74 + do { cpu_relax(); } while (arch_spin_is_locked(lock)) 75 75 76 76 #endif /* __LINUX_SPINLOCK_UP_H */
+9 -9
kernel/lockdep.c
··· 77 77 78 78 static int graph_lock(void) 79 79 { 80 - __raw_spin_lock(&lockdep_lock); 80 + arch_spin_lock(&lockdep_lock); 81 81 /* 82 82 * Make sure that if another CPU detected a bug while 83 83 * walking the graph we dont change it (while the other ··· 85 85 * dropped already) 86 86 */ 87 87 if (!debug_locks) { 88 - __raw_spin_unlock(&lockdep_lock); 88 + arch_spin_unlock(&lockdep_lock); 89 89 return 0; 90 90 } 91 91 /* prevent any recursions within lockdep from causing deadlocks */ ··· 95 95 96 96 static inline int graph_unlock(void) 97 97 { 98 - if (debug_locks && !__raw_spin_is_locked(&lockdep_lock)) 98 + if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) 99 99 return DEBUG_LOCKS_WARN_ON(1); 100 100 101 101 current->lockdep_recursion--; 102 - __raw_spin_unlock(&lockdep_lock); 102 + arch_spin_unlock(&lockdep_lock); 103 103 return 0; 104 104 } 105 105 ··· 111 111 { 112 112 int ret = debug_locks_off(); 113 113 114 - __raw_spin_unlock(&lockdep_lock); 114 + arch_spin_unlock(&lockdep_lock); 115 115 116 116 return ret; 117 117 } ··· 1170 1170 this.class = class; 1171 1171 1172 1172 local_irq_save(flags); 1173 - __raw_spin_lock(&lockdep_lock); 1173 + arch_spin_lock(&lockdep_lock); 1174 1174 ret = __lockdep_count_forward_deps(&this); 1175 - __raw_spin_unlock(&lockdep_lock); 1175 + arch_spin_unlock(&lockdep_lock); 1176 1176 local_irq_restore(flags); 1177 1177 1178 1178 return ret; ··· 1197 1197 this.class = class; 1198 1198 1199 1199 local_irq_save(flags); 1200 - __raw_spin_lock(&lockdep_lock); 1200 + arch_spin_lock(&lockdep_lock); 1201 1201 ret = __lockdep_count_backward_deps(&this); 1202 - __raw_spin_unlock(&lockdep_lock); 1202 + arch_spin_unlock(&lockdep_lock); 1203 1203 local_irq_restore(flags); 1204 1204 1205 1205 return ret;
+2 -2
kernel/mutex-debug.h
··· 43 43 \ 44 44 DEBUG_LOCKS_WARN_ON(in_interrupt()); \ 45 45 local_irq_save(flags); \ 46 - __raw_spin_lock(&(lock)->raw_lock); \ 46 + arch_spin_lock(&(lock)->raw_lock); \ 47 47 DEBUG_LOCKS_WARN_ON(l->magic != l); \ 48 48 } while (0) 49 49 50 50 #define spin_unlock_mutex(lock, flags) \ 51 51 do { \ 52 - __raw_spin_unlock(&(lock)->raw_lock); \ 52 + arch_spin_unlock(&(lock)->raw_lock); \ 53 53 local_irq_restore(flags); \ 54 54 preempt_check_resched(); \ 55 55 } while (0)
+2 -2
kernel/spinlock.c
··· 53 53 if (!(lock)->break_lock) \ 54 54 (lock)->break_lock = 1; \ 55 55 while (!op##_can_lock(lock) && (lock)->break_lock) \ 56 - _raw_##op##_relax(&lock->raw_lock); \ 56 + arch_##op##_relax(&lock->raw_lock); \ 57 57 } \ 58 58 (lock)->break_lock = 0; \ 59 59 } \ ··· 73 73 if (!(lock)->break_lock) \ 74 74 (lock)->break_lock = 1; \ 75 75 while (!op##_can_lock(lock) && (lock)->break_lock) \ 76 - _raw_##op##_relax(&lock->raw_lock); \ 76 + arch_##op##_relax(&lock->raw_lock); \ 77 77 } \ 78 78 (lock)->break_lock = 0; \ 79 79 return flags; \
+6 -6
kernel/trace/ring_buffer.c
··· 2834 2834 int ret; 2835 2835 2836 2836 local_irq_save(flags); 2837 - __raw_spin_lock(&cpu_buffer->lock); 2837 + arch_spin_lock(&cpu_buffer->lock); 2838 2838 2839 2839 again: 2840 2840 /* ··· 2923 2923 goto again; 2924 2924 2925 2925 out: 2926 - __raw_spin_unlock(&cpu_buffer->lock); 2926 + arch_spin_unlock(&cpu_buffer->lock); 2927 2927 local_irq_restore(flags); 2928 2928 2929 2929 return reader; ··· 3286 3286 synchronize_sched(); 3287 3287 3288 3288 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3289 - __raw_spin_lock(&cpu_buffer->lock); 3289 + arch_spin_lock(&cpu_buffer->lock); 3290 3290 rb_iter_reset(iter); 3291 - __raw_spin_unlock(&cpu_buffer->lock); 3291 + arch_spin_unlock(&cpu_buffer->lock); 3292 3292 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3293 3293 3294 3294 return iter; ··· 3408 3408 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) 3409 3409 goto out; 3410 3410 3411 - __raw_spin_lock(&cpu_buffer->lock); 3411 + arch_spin_lock(&cpu_buffer->lock); 3412 3412 3413 3413 rb_reset_cpu(cpu_buffer); 3414 3414 3415 - __raw_spin_unlock(&cpu_buffer->lock); 3415 + arch_spin_unlock(&cpu_buffer->lock); 3416 3416 3417 3417 out: 3418 3418 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+16 -16
kernel/trace/trace.c
··· 555 555 return; 556 556 557 557 WARN_ON_ONCE(!irqs_disabled()); 558 - __raw_spin_lock(&ftrace_max_lock); 558 + arch_spin_lock(&ftrace_max_lock); 559 559 560 560 tr->buffer = max_tr.buffer; 561 561 max_tr.buffer = buf; 562 562 563 563 __update_max_tr(tr, tsk, cpu); 564 - __raw_spin_unlock(&ftrace_max_lock); 564 + arch_spin_unlock(&ftrace_max_lock); 565 565 } 566 566 567 567 /** ··· 581 581 return; 582 582 583 583 WARN_ON_ONCE(!irqs_disabled()); 584 - __raw_spin_lock(&ftrace_max_lock); 584 + arch_spin_lock(&ftrace_max_lock); 585 585 586 586 ftrace_disable_cpu(); 587 587 ··· 603 603 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); 604 604 605 605 __update_max_tr(tr, tsk, cpu); 606 - __raw_spin_unlock(&ftrace_max_lock); 606 + arch_spin_unlock(&ftrace_max_lock); 607 607 } 608 608 #endif /* CONFIG_TRACER_MAX_TRACE */ 609 609 ··· 915 915 * nor do we want to disable interrupts, 916 916 * so if we miss here, then better luck next time. 917 917 */ 918 - if (!__raw_spin_trylock(&trace_cmdline_lock)) 918 + if (!arch_spin_trylock(&trace_cmdline_lock)) 919 919 return; 920 920 921 921 idx = map_pid_to_cmdline[tsk->pid]; ··· 940 940 941 941 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); 942 942 943 - __raw_spin_unlock(&trace_cmdline_lock); 943 + arch_spin_unlock(&trace_cmdline_lock); 944 944 } 945 945 946 946 void trace_find_cmdline(int pid, char comm[]) ··· 958 958 } 959 959 960 960 preempt_disable(); 961 - __raw_spin_lock(&trace_cmdline_lock); 961 + arch_spin_lock(&trace_cmdline_lock); 962 962 map = map_pid_to_cmdline[pid]; 963 963 if (map != NO_CMDLINE_MAP) 964 964 strcpy(comm, saved_cmdlines[map]); 965 965 else 966 966 strcpy(comm, "<...>"); 967 967 968 - __raw_spin_unlock(&trace_cmdline_lock); 968 + arch_spin_unlock(&trace_cmdline_lock); 969 969 preempt_enable(); 970 970 } 971 971 ··· 1283 1283 1284 1284 /* Lockdep uses trace_printk for lock tracing */ 1285 1285 local_irq_save(flags); 1286 - __raw_spin_lock(&trace_buf_lock); 1286 + arch_spin_lock(&trace_buf_lock); 1287 1287 len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); 1288 1288 1289 1289 if (len > TRACE_BUF_SIZE || len < 0) ··· 1304 1304 ring_buffer_unlock_commit(buffer, event); 1305 1305 1306 1306 out_unlock: 1307 - __raw_spin_unlock(&trace_buf_lock); 1307 + arch_spin_unlock(&trace_buf_lock); 1308 1308 local_irq_restore(flags); 1309 1309 1310 1310 out: ··· 1360 1360 1361 1361 pause_graph_tracing(); 1362 1362 raw_local_irq_save(irq_flags); 1363 - __raw_spin_lock(&trace_buf_lock); 1363 + arch_spin_lock(&trace_buf_lock); 1364 1364 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); 1365 1365 1366 1366 size = sizeof(*entry) + len + 1; ··· 1378 1378 ring_buffer_unlock_commit(buffer, event); 1379 1379 1380 1380 out_unlock: 1381 - __raw_spin_unlock(&trace_buf_lock); 1381 + arch_spin_unlock(&trace_buf_lock); 1382 1382 raw_local_irq_restore(irq_flags); 1383 1383 unpause_graph_tracing(); 1384 1384 out: ··· 2279 2279 mutex_lock(&tracing_cpumask_update_lock); 2280 2280 2281 2281 local_irq_disable(); 2282 - __raw_spin_lock(&ftrace_max_lock); 2282 + arch_spin_lock(&ftrace_max_lock); 2283 2283 for_each_tracing_cpu(cpu) { 2284 2284 /* 2285 2285 * Increase/decrease the disabled counter if we are ··· 2294 2294 atomic_dec(&global_trace.data[cpu]->disabled); 2295 2295 } 2296 2296 } 2297 - __raw_spin_unlock(&ftrace_max_lock); 2297 + arch_spin_unlock(&ftrace_max_lock); 2298 2298 local_irq_enable(); 2299 2299 2300 2300 cpumask_copy(tracing_cpumask, tracing_cpumask_new); ··· 4318 4318 4319 4319 /* only one dump */ 4320 4320 local_irq_save(flags); 4321 - __raw_spin_lock(&ftrace_dump_lock); 4321 + arch_spin_lock(&ftrace_dump_lock); 4322 4322 if (dump_ran) 4323 4323 goto out; 4324 4324 ··· 4393 4393 } 4394 4394 4395 4395 out: 4396 - __raw_spin_unlock(&ftrace_dump_lock); 4396 + arch_spin_unlock(&ftrace_dump_lock); 4397 4397 local_irq_restore(flags); 4398 4398 } 4399 4399
+2 -2
kernel/trace/trace_clock.c
··· 94 94 if (unlikely(in_nmi())) 95 95 goto out; 96 96 97 - __raw_spin_lock(&trace_clock_struct.lock); 97 + arch_spin_lock(&trace_clock_struct.lock); 98 98 99 99 /* 100 100 * TODO: if this happens often then maybe we should reset ··· 106 106 107 107 trace_clock_struct.prev_time = now; 108 108 109 - __raw_spin_unlock(&trace_clock_struct.lock); 109 + arch_spin_unlock(&trace_clock_struct.lock); 110 110 111 111 out: 112 112 raw_local_irq_restore(flags);
+6 -6
kernel/trace/trace_sched_wakeup.c
··· 143 143 goto out; 144 144 145 145 local_irq_save(flags); 146 - __raw_spin_lock(&wakeup_lock); 146 + arch_spin_lock(&wakeup_lock); 147 147 148 148 /* We could race with grabbing wakeup_lock */ 149 149 if (unlikely(!tracer_enabled || next != wakeup_task)) ··· 169 169 170 170 out_unlock: 171 171 __wakeup_reset(wakeup_trace); 172 - __raw_spin_unlock(&wakeup_lock); 172 + arch_spin_unlock(&wakeup_lock); 173 173 local_irq_restore(flags); 174 174 out: 175 175 atomic_dec(&wakeup_trace->data[cpu]->disabled); ··· 193 193 tracing_reset_online_cpus(tr); 194 194 195 195 local_irq_save(flags); 196 - __raw_spin_lock(&wakeup_lock); 196 + arch_spin_lock(&wakeup_lock); 197 197 __wakeup_reset(tr); 198 - __raw_spin_unlock(&wakeup_lock); 198 + arch_spin_unlock(&wakeup_lock); 199 199 local_irq_restore(flags); 200 200 } 201 201 ··· 225 225 goto out; 226 226 227 227 /* interrupts should be off from try_to_wake_up */ 228 - __raw_spin_lock(&wakeup_lock); 228 + arch_spin_lock(&wakeup_lock); 229 229 230 230 /* check for races. */ 231 231 if (!tracer_enabled || p->prio >= wakeup_prio) ··· 255 255 trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); 256 256 257 257 out_locked: 258 - __raw_spin_unlock(&wakeup_lock); 258 + arch_spin_unlock(&wakeup_lock); 259 259 out: 260 260 atomic_dec(&wakeup_trace->data[cpu]->disabled); 261 261 }
+2 -2
kernel/trace/trace_selftest.c
··· 67 67 68 68 /* Don't allow flipping of max traces now */ 69 69 local_irq_save(flags); 70 - __raw_spin_lock(&ftrace_max_lock); 70 + arch_spin_lock(&ftrace_max_lock); 71 71 72 72 cnt = ring_buffer_entries(tr->buffer); 73 73 ··· 85 85 break; 86 86 } 87 87 tracing_on(); 88 - __raw_spin_unlock(&ftrace_max_lock); 88 + arch_spin_unlock(&ftrace_max_lock); 89 89 local_irq_restore(flags); 90 90 91 91 if (count)
+6 -6
kernel/trace/trace_stack.c
··· 54 54 return; 55 55 56 56 local_irq_save(flags); 57 - __raw_spin_lock(&max_stack_lock); 57 + arch_spin_lock(&max_stack_lock); 58 58 59 59 /* a race could have already updated it */ 60 60 if (this_size <= max_stack_size) ··· 103 103 } 104 104 105 105 out: 106 - __raw_spin_unlock(&max_stack_lock); 106 + arch_spin_unlock(&max_stack_lock); 107 107 local_irq_restore(flags); 108 108 } 109 109 ··· 171 171 return ret; 172 172 173 173 local_irq_save(flags); 174 - __raw_spin_lock(&max_stack_lock); 174 + arch_spin_lock(&max_stack_lock); 175 175 *ptr = val; 176 - __raw_spin_unlock(&max_stack_lock); 176 + arch_spin_unlock(&max_stack_lock); 177 177 local_irq_restore(flags); 178 178 179 179 return count; ··· 207 207 static void *t_start(struct seq_file *m, loff_t *pos) 208 208 { 209 209 local_irq_disable(); 210 - __raw_spin_lock(&max_stack_lock); 210 + arch_spin_lock(&max_stack_lock); 211 211 212 212 if (*pos == 0) 213 213 return SEQ_START_TOKEN; ··· 217 217 218 218 static void t_stop(struct seq_file *m, void *p) 219 219 { 220 - __raw_spin_unlock(&max_stack_lock); 220 + arch_spin_unlock(&max_stack_lock); 221 221 local_irq_enable(); 222 222 } 223 223
+4 -4
lib/spinlock_debug.c
··· 106 106 107 107 for (;;) { 108 108 for (i = 0; i < loops; i++) { 109 - if (__raw_spin_trylock(&lock->raw_lock)) 109 + if (arch_spin_trylock(&lock->raw_lock)) 110 110 return; 111 111 __delay(1); 112 112 } ··· 128 128 void _raw_spin_lock(spinlock_t *lock) 129 129 { 130 130 debug_spin_lock_before(lock); 131 - if (unlikely(!__raw_spin_trylock(&lock->raw_lock))) 131 + if (unlikely(!arch_spin_trylock(&lock->raw_lock))) 132 132 __spin_lock_debug(lock); 133 133 debug_spin_lock_after(lock); 134 134 } 135 135 136 136 int _raw_spin_trylock(spinlock_t *lock) 137 137 { 138 - int ret = __raw_spin_trylock(&lock->raw_lock); 138 + int ret = arch_spin_trylock(&lock->raw_lock); 139 139 140 140 if (ret) 141 141 debug_spin_lock_after(lock); ··· 151 151 void _raw_spin_unlock(spinlock_t *lock) 152 152 { 153 153 debug_spin_unlock(lock); 154 - __raw_spin_unlock(&lock->raw_lock); 154 + arch_spin_unlock(&lock->raw_lock); 155 155 } 156 156 157 157 static void rwlock_bug(rwlock_t *lock, const char *msg)