Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

locking: Convert raw_rwlock functions to arch_rwlock

Name space cleanup for rwlock functions. No functional change.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: David S. Miller <davem@davemloft.net>
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: linux-arch@vger.kernel.org

+215 -215
+10 -10
arch/alpha/include/asm/spinlock.h
··· 50 50 51 51 /***********************************************************/ 52 52 53 - static inline int __raw_read_can_lock(arch_rwlock_t *lock) 53 + static inline int arch_read_can_lock(arch_rwlock_t *lock) 54 54 { 55 55 return (lock->lock & 1) == 0; 56 56 } 57 57 58 - static inline int __raw_write_can_lock(arch_rwlock_t *lock) 58 + static inline int arch_write_can_lock(arch_rwlock_t *lock) 59 59 { 60 60 return lock->lock == 0; 61 61 } 62 62 63 - static inline void __raw_read_lock(arch_rwlock_t *lock) 63 + static inline void arch_read_lock(arch_rwlock_t *lock) 64 64 { 65 65 long regx; 66 66 ··· 80 80 : "m" (*lock) : "memory"); 81 81 } 82 82 83 - static inline void __raw_write_lock(arch_rwlock_t *lock) 83 + static inline void arch_write_lock(arch_rwlock_t *lock) 84 84 { 85 85 long regx; 86 86 ··· 100 100 : "m" (*lock) : "memory"); 101 101 } 102 102 103 - static inline int __raw_read_trylock(arch_rwlock_t * lock) 103 + static inline int arch_read_trylock(arch_rwlock_t * lock) 104 104 { 105 105 long regx; 106 106 int success; ··· 122 122 return success; 123 123 } 124 124 125 - static inline int __raw_write_trylock(arch_rwlock_t * lock) 125 + static inline int arch_write_trylock(arch_rwlock_t * lock) 126 126 { 127 127 long regx; 128 128 int success; ··· 144 144 return success; 145 145 } 146 146 147 - static inline void __raw_read_unlock(arch_rwlock_t * lock) 147 + static inline void arch_read_unlock(arch_rwlock_t * lock) 148 148 { 149 149 long regx; 150 150 __asm__ __volatile__( ··· 160 160 : "m" (*lock) : "memory"); 161 161 } 162 162 163 - static inline void __raw_write_unlock(arch_rwlock_t * lock) 163 + static inline void arch_write_unlock(arch_rwlock_t * lock) 164 164 { 165 165 mb(); 166 166 lock->lock = 0; 167 167 } 168 168 169 - #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 170 - #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 169 + #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 170 + #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 171 171 172 172 #define arch_spin_relax(lock) cpu_relax() 173 173 #define arch_read_relax(lock) cpu_relax()
+10 -10
arch/arm/include/asm/spinlock.h
··· 86 86 * just write zero since the lock is exclusively held. 87 87 */ 88 88 89 - static inline void __raw_write_lock(arch_rwlock_t *rw) 89 + static inline void arch_write_lock(arch_rwlock_t *rw) 90 90 { 91 91 unsigned long tmp; 92 92 ··· 106 106 smp_mb(); 107 107 } 108 108 109 - static inline int __raw_write_trylock(arch_rwlock_t *rw) 109 + static inline int arch_write_trylock(arch_rwlock_t *rw) 110 110 { 111 111 unsigned long tmp; 112 112 ··· 126 126 } 127 127 } 128 128 129 - static inline void __raw_write_unlock(arch_rwlock_t *rw) 129 + static inline void arch_write_unlock(arch_rwlock_t *rw) 130 130 { 131 131 smp_mb(); 132 132 ··· 142 142 } 143 143 144 144 /* write_can_lock - would write_trylock() succeed? */ 145 - #define __raw_write_can_lock(x) ((x)->lock == 0) 145 + #define arch_write_can_lock(x) ((x)->lock == 0) 146 146 147 147 /* 148 148 * Read locks are a bit more hairy: ··· 156 156 * currently active. However, we know we won't have any write 157 157 * locks. 158 158 */ 159 - static inline void __raw_read_lock(arch_rwlock_t *rw) 159 + static inline void arch_read_lock(arch_rwlock_t *rw) 160 160 { 161 161 unsigned long tmp, tmp2; 162 162 ··· 176 176 smp_mb(); 177 177 } 178 178 179 - static inline void __raw_read_unlock(arch_rwlock_t *rw) 179 + static inline void arch_read_unlock(arch_rwlock_t *rw) 180 180 { 181 181 unsigned long tmp, tmp2; 182 182 ··· 198 198 : "cc"); 199 199 } 200 200 201 - static inline int __raw_read_trylock(arch_rwlock_t *rw) 201 + static inline int arch_read_trylock(arch_rwlock_t *rw) 202 202 { 203 203 unsigned long tmp, tmp2 = 1; 204 204 ··· 215 215 } 216 216 217 217 /* read_can_lock - would read_trylock() succeed? */ 218 - #define __raw_read_can_lock(x) ((x)->lock < 0x80000000) 218 + #define arch_read_can_lock(x) ((x)->lock < 0x80000000) 219 219 220 - #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 221 - #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 220 + #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 221 + #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 222 222 223 223 #define arch_spin_relax(lock) cpu_relax() 224 224 #define arch_read_relax(lock) cpu_relax()
+20 -20
arch/blackfin/include/asm/spinlock.h
··· 17 17 asmlinkage void __raw_spin_lock_asm(volatile int *ptr); 18 18 asmlinkage int __raw_spin_trylock_asm(volatile int *ptr); 19 19 asmlinkage void __raw_spin_unlock_asm(volatile int *ptr); 20 - asmlinkage void __raw_read_lock_asm(volatile int *ptr); 21 - asmlinkage int __raw_read_trylock_asm(volatile int *ptr); 22 - asmlinkage void __raw_read_unlock_asm(volatile int *ptr); 23 - asmlinkage void __raw_write_lock_asm(volatile int *ptr); 24 - asmlinkage int __raw_write_trylock_asm(volatile int *ptr); 25 - asmlinkage void __raw_write_unlock_asm(volatile int *ptr); 20 + asmlinkage void arch_read_lock_asm(volatile int *ptr); 21 + asmlinkage int arch_read_trylock_asm(volatile int *ptr); 22 + asmlinkage void arch_read_unlock_asm(volatile int *ptr); 23 + asmlinkage void arch_write_lock_asm(volatile int *ptr); 24 + asmlinkage int arch_write_trylock_asm(volatile int *ptr); 25 + asmlinkage void arch_write_unlock_asm(volatile int *ptr); 26 26 27 27 static inline int arch_spin_is_locked(arch_spinlock_t *lock) 28 28 { ··· 52 52 cpu_relax(); 53 53 } 54 54 55 - static inline int __raw_read_can_lock(arch_rwlock_t *rw) 55 + static inline int arch_read_can_lock(arch_rwlock_t *rw) 56 56 { 57 57 return __raw_uncached_fetch_asm(&rw->lock) > 0; 58 58 } 59 59 60 - static inline int __raw_write_can_lock(arch_rwlock_t *rw) 60 + static inline int arch_write_can_lock(arch_rwlock_t *rw) 61 61 { 62 62 return __raw_uncached_fetch_asm(&rw->lock) == RW_LOCK_BIAS; 63 63 } 64 64 65 - static inline void __raw_read_lock(arch_rwlock_t *rw) 65 + static inline void arch_read_lock(arch_rwlock_t *rw) 66 66 { 67 - __raw_read_lock_asm(&rw->lock); 67 + arch_read_lock_asm(&rw->lock); 68 68 } 69 69 70 - static inline int __raw_read_trylock(arch_rwlock_t *rw) 70 + static inline int arch_read_trylock(arch_rwlock_t *rw) 71 71 { 72 - return __raw_read_trylock_asm(&rw->lock); 72 + return arch_read_trylock_asm(&rw->lock); 73 73 } 74 74 75 - static inline void __raw_read_unlock(arch_rwlock_t *rw) 75 + static inline void arch_read_unlock(arch_rwlock_t *rw) 76 76 { 77 - __raw_read_unlock_asm(&rw->lock); 77 + arch_read_unlock_asm(&rw->lock); 78 78 } 79 79 80 - static inline void __raw_write_lock(arch_rwlock_t *rw) 80 + static inline void arch_write_lock(arch_rwlock_t *rw) 81 81 { 82 - __raw_write_lock_asm(&rw->lock); 82 + arch_write_lock_asm(&rw->lock); 83 83 } 84 84 85 - static inline int __raw_write_trylock(arch_rwlock_t *rw) 85 + static inline int arch_write_trylock(arch_rwlock_t *rw) 86 86 { 87 - return __raw_write_trylock_asm(&rw->lock); 87 + return arch_write_trylock_asm(&rw->lock); 88 88 } 89 89 90 - static inline void __raw_write_unlock(arch_rwlock_t *rw) 90 + static inline void arch_write_unlock(arch_rwlock_t *rw) 91 91 { 92 - __raw_write_unlock_asm(&rw->lock); 92 + arch_write_unlock_asm(&rw->lock); 93 93 } 94 94 95 95 #define arch_spin_relax(lock) cpu_relax()
+8 -8
arch/cris/include/arch-v32/arch/spinlock.h
··· 56 56 * 57 57 */ 58 58 59 - static inline int __raw_read_can_lock(arch_rwlock_t *x) 59 + static inline int arch_read_can_lock(arch_rwlock_t *x) 60 60 { 61 61 return (int)(x)->lock > 0; 62 62 } 63 63 64 - static inline int __raw_write_can_lock(arch_rwlock_t *x) 64 + static inline int arch_write_can_lock(arch_rwlock_t *x) 65 65 { 66 66 return (x)->lock == RW_LOCK_BIAS; 67 67 } 68 68 69 - static inline void __raw_read_lock(arch_rwlock_t *rw) 69 + static inline void arch_read_lock(arch_rwlock_t *rw) 70 70 { 71 71 arch_spin_lock(&rw->slock); 72 72 while (rw->lock == 0); ··· 74 74 arch_spin_unlock(&rw->slock); 75 75 } 76 76 77 - static inline void __raw_write_lock(arch_rwlock_t *rw) 77 + static inline void arch_write_lock(arch_rwlock_t *rw) 78 78 { 79 79 arch_spin_lock(&rw->slock); 80 80 while (rw->lock != RW_LOCK_BIAS); ··· 82 82 arch_spin_unlock(&rw->slock); 83 83 } 84 84 85 - static inline void __raw_read_unlock(arch_rwlock_t *rw) 85 + static inline void arch_read_unlock(arch_rwlock_t *rw) 86 86 { 87 87 arch_spin_lock(&rw->slock); 88 88 rw->lock++; 89 89 arch_spin_unlock(&rw->slock); 90 90 } 91 91 92 - static inline void __raw_write_unlock(arch_rwlock_t *rw) 92 + static inline void arch_write_unlock(arch_rwlock_t *rw) 93 93 { 94 94 arch_spin_lock(&rw->slock); 95 95 while (rw->lock != RW_LOCK_BIAS); ··· 97 97 arch_spin_unlock(&rw->slock); 98 98 } 99 99 100 - static inline int __raw_read_trylock(arch_rwlock_t *rw) 100 + static inline int arch_read_trylock(arch_rwlock_t *rw) 101 101 { 102 102 int ret = 0; 103 103 arch_spin_lock(&rw->slock); ··· 109 109 return ret; 110 110 } 111 111 112 - static inline int __raw_write_trylock(arch_rwlock_t *rw) 112 + static inline int arch_write_trylock(arch_rwlock_t *rw) 113 113 { 114 114 int ret = 0; 115 115 arch_spin_lock(&rw->slock);
+16 -16
arch/ia64/include/asm/spinlock.h
··· 140 140 __ticket_spin_unlock_wait(lock); 141 141 } 142 142 143 - #define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) 144 - #define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0) 143 + #define arch_read_can_lock(rw) (*(volatile int *)(rw) >= 0) 144 + #define arch_write_can_lock(rw) (*(volatile int *)(rw) == 0) 145 145 146 146 #ifdef ASM_SUPPORTED 147 147 148 148 static __always_inline void 149 - __raw_read_lock_flags(arch_rwlock_t *lock, unsigned long flags) 149 + arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags) 150 150 { 151 151 __asm__ __volatile__ ( 152 152 "tbit.nz p6, p0 = %1,%2\n" ··· 169 169 : "p6", "p7", "r2", "memory"); 170 170 } 171 171 172 - #define __raw_read_lock(lock) __raw_read_lock_flags(lock, 0) 172 + #define arch_read_lock(lock) arch_read_lock_flags(lock, 0) 173 173 174 174 #else /* !ASM_SUPPORTED */ 175 175 176 - #define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw) 176 + #define arch_read_lock_flags(rw, flags) arch_read_lock(rw) 177 177 178 - #define __raw_read_lock(rw) \ 178 + #define arch_read_lock(rw) \ 179 179 do { \ 180 180 arch_rwlock_t *__read_lock_ptr = (rw); \ 181 181 \ ··· 188 188 189 189 #endif /* !ASM_SUPPORTED */ 190 190 191 - #define __raw_read_unlock(rw) \ 191 + #define arch_read_unlock(rw) \ 192 192 do { \ 193 193 arch_rwlock_t *__read_lock_ptr = (rw); \ 194 194 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ ··· 197 197 #ifdef ASM_SUPPORTED 198 198 199 199 static __always_inline void 200 - __raw_write_lock_flags(arch_rwlock_t *lock, unsigned long flags) 200 + arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags) 201 201 { 202 202 __asm__ __volatile__ ( 203 203 "tbit.nz p6, p0 = %1, %2\n" ··· 221 221 : "ar.ccv", "p6", "p7", "r2", "r29", "memory"); 222 222 } 223 223 224 - #define __raw_write_lock(rw) __raw_write_lock_flags(rw, 0) 224 + #define arch_write_lock(rw) arch_write_lock_flags(rw, 0) 225 225 226 - #define __raw_write_trylock(rw) \ 226 + #define arch_write_trylock(rw) \ 227 227 ({ \ 228 228 register long result; \ 229 229 \ ··· 235 235 (result == 0); \ 236 236 }) 237 237 238 - static inline void __raw_write_unlock(arch_rwlock_t *x) 238 + static inline void arch_write_unlock(arch_rwlock_t *x) 239 239 { 240 240 u8 *y = (u8 *)x; 241 241 barrier(); ··· 244 244 245 245 #else /* !ASM_SUPPORTED */ 246 246 247 - #define __raw_write_lock_flags(l, flags) __raw_write_lock(l) 247 + #define arch_write_lock_flags(l, flags) arch_write_lock(l) 248 248 249 - #define __raw_write_lock(l) \ 249 + #define arch_write_lock(l) \ 250 250 ({ \ 251 251 __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ 252 252 __u32 *ia64_write_lock_ptr = (__u32 *) (l); \ ··· 257 257 } while (ia64_val); \ 258 258 }) 259 259 260 - #define __raw_write_trylock(rw) \ 260 + #define arch_write_trylock(rw) \ 261 261 ({ \ 262 262 __u64 ia64_val; \ 263 263 __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \ ··· 265 265 (ia64_val == 0); \ 266 266 }) 267 267 268 - static inline void __raw_write_unlock(arch_rwlock_t *x) 268 + static inline void arch_write_unlock(arch_rwlock_t *x) 269 269 { 270 270 barrier(); 271 271 x->write_lock = 0; ··· 273 273 274 274 #endif /* !ASM_SUPPORTED */ 275 275 276 - static inline int __raw_read_trylock(arch_rwlock_t *x) 276 + static inline int arch_read_trylock(arch_rwlock_t *x) 277 277 { 278 278 union { 279 279 arch_rwlock_t lock;
+10 -10
arch/m32r/include/asm/spinlock.h
··· 140 140 * read_can_lock - would read_trylock() succeed? 141 141 * @lock: the rwlock in question. 142 142 */ 143 - #define __raw_read_can_lock(x) ((int)(x)->lock > 0) 143 + #define arch_read_can_lock(x) ((int)(x)->lock > 0) 144 144 145 145 /** 146 146 * write_can_lock - would write_trylock() succeed? 147 147 * @lock: the rwlock in question. 148 148 */ 149 - #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) 149 + #define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) 150 150 151 - static inline void __raw_read_lock(arch_rwlock_t *rw) 151 + static inline void arch_read_lock(arch_rwlock_t *rw) 152 152 { 153 153 unsigned long tmp0, tmp1; 154 154 ··· 199 199 ); 200 200 } 201 201 202 - static inline void __raw_write_lock(arch_rwlock_t *rw) 202 + static inline void arch_write_lock(arch_rwlock_t *rw) 203 203 { 204 204 unsigned long tmp0, tmp1, tmp2; 205 205 ··· 252 252 ); 253 253 } 254 254 255 - static inline void __raw_read_unlock(arch_rwlock_t *rw) 255 + static inline void arch_read_unlock(arch_rwlock_t *rw) 256 256 { 257 257 unsigned long tmp0, tmp1; 258 258 ··· 274 274 ); 275 275 } 276 276 277 - static inline void __raw_write_unlock(arch_rwlock_t *rw) 277 + static inline void arch_write_unlock(arch_rwlock_t *rw) 278 278 { 279 279 unsigned long tmp0, tmp1, tmp2; 280 280 ··· 298 298 ); 299 299 } 300 300 301 - static inline int __raw_read_trylock(arch_rwlock_t *lock) 301 + static inline int arch_read_trylock(arch_rwlock_t *lock) 302 302 { 303 303 atomic_t *count = (atomic_t*)lock; 304 304 if (atomic_dec_return(count) >= 0) ··· 307 307 return 0; 308 308 } 309 309 310 - static inline int __raw_write_trylock(arch_rwlock_t *lock) 310 + static inline int arch_write_trylock(arch_rwlock_t *lock) 311 311 { 312 312 atomic_t *count = (atomic_t *)lock; 313 313 if (atomic_sub_and_test(RW_LOCK_BIAS, count)) ··· 316 316 return 0; 317 317 } 318 318 319 - #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 320 - #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 319 + #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 320 + #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 321 321 322 322 #define arch_spin_relax(lock) cpu_relax() 323 323 #define arch_read_relax(lock) cpu_relax()
+21 -21
arch/mips/include/asm/spinlock.h
··· 248 248 * read_can_lock - would read_trylock() succeed? 249 249 * @lock: the rwlock in question. 250 250 */ 251 - #define __raw_read_can_lock(rw) ((rw)->lock >= 0) 251 + #define arch_read_can_lock(rw) ((rw)->lock >= 0) 252 252 253 253 /* 254 254 * write_can_lock - would write_trylock() succeed? 255 255 * @lock: the rwlock in question. 256 256 */ 257 - #define __raw_write_can_lock(rw) (!(rw)->lock) 257 + #define arch_write_can_lock(rw) (!(rw)->lock) 258 258 259 - static inline void __raw_read_lock(arch_rwlock_t *rw) 259 + static inline void arch_read_lock(arch_rwlock_t *rw) 260 260 { 261 261 unsigned int tmp; 262 262 263 263 if (R10000_LLSC_WAR) { 264 264 __asm__ __volatile__( 265 - " .set noreorder # __raw_read_lock \n" 265 + " .set noreorder # arch_read_lock \n" 266 266 "1: ll %1, %2 \n" 267 267 " bltz %1, 1b \n" 268 268 " addu %1, 1 \n" ··· 275 275 : "memory"); 276 276 } else { 277 277 __asm__ __volatile__( 278 - " .set noreorder # __raw_read_lock \n" 278 + " .set noreorder # arch_read_lock \n" 279 279 "1: ll %1, %2 \n" 280 280 " bltz %1, 2f \n" 281 281 " addu %1, 1 \n" ··· 301 301 /* Note the use of sub, not subu which will make the kernel die with an 302 302 overflow exception if we ever try to unlock an rwlock that is already 303 303 unlocked or is being held by a writer. */ 304 - static inline void __raw_read_unlock(arch_rwlock_t *rw) 304 + static inline void arch_read_unlock(arch_rwlock_t *rw) 305 305 { 306 306 unsigned int tmp; 307 307 ··· 309 309 310 310 if (R10000_LLSC_WAR) { 311 311 __asm__ __volatile__( 312 - "1: ll %1, %2 # __raw_read_unlock \n" 312 + "1: ll %1, %2 # arch_read_unlock \n" 313 313 " sub %1, 1 \n" 314 314 " sc %1, %0 \n" 315 315 " beqzl %1, 1b \n" ··· 318 318 : "memory"); 319 319 } else { 320 320 __asm__ __volatile__( 321 - " .set noreorder # __raw_read_unlock \n" 321 + " .set noreorder # arch_read_unlock \n" 322 322 "1: ll %1, %2 \n" 323 323 " sub %1, 1 \n" 324 324 " sc %1, %0 \n" ··· 335 335 } 336 336 } 337 337 338 - static inline void __raw_write_lock(arch_rwlock_t *rw) 338 + static inline void arch_write_lock(arch_rwlock_t *rw) 339 339 { 340 340 unsigned int tmp; 341 341 342 342 if (R10000_LLSC_WAR) { 343 343 __asm__ __volatile__( 344 - " .set noreorder # __raw_write_lock \n" 344 + " .set noreorder # arch_write_lock \n" 345 345 "1: ll %1, %2 \n" 346 346 " bnez %1, 1b \n" 347 347 " lui %1, 0x8000 \n" ··· 354 354 : "memory"); 355 355 } else { 356 356 __asm__ __volatile__( 357 - " .set noreorder # __raw_write_lock \n" 357 + " .set noreorder # arch_write_lock \n" 358 358 "1: ll %1, %2 \n" 359 359 " bnez %1, 2f \n" 360 360 " lui %1, 0x8000 \n" ··· 377 377 smp_llsc_mb(); 378 378 } 379 379 380 - static inline void __raw_write_unlock(arch_rwlock_t *rw) 380 + static inline void arch_write_unlock(arch_rwlock_t *rw) 381 381 { 382 382 smp_mb(); 383 383 384 384 __asm__ __volatile__( 385 - " # __raw_write_unlock \n" 385 + " # arch_write_unlock \n" 386 386 " sw $0, %0 \n" 387 387 : "=m" (rw->lock) 388 388 : "m" (rw->lock) 389 389 : "memory"); 390 390 } 391 391 392 - static inline int __raw_read_trylock(arch_rwlock_t *rw) 392 + static inline int arch_read_trylock(arch_rwlock_t *rw) 393 393 { 394 394 unsigned int tmp; 395 395 int ret; 396 396 397 397 if (R10000_LLSC_WAR) { 398 398 __asm__ __volatile__( 399 - " .set noreorder # __raw_read_trylock \n" 399 + " .set noreorder # arch_read_trylock \n" 400 400 " li %2, 0 \n" 401 401 "1: ll %1, %3 \n" 402 402 " bltz %1, 2f \n" ··· 413 413 : "memory"); 414 414 } else { 415 415 __asm__ __volatile__( 416 - " .set noreorder # __raw_read_trylock \n" 416 + " .set noreorder # arch_read_trylock \n" 417 417 " li %2, 0 \n" 418 418 "1: ll %1, %3 \n" 419 419 " bltz %1, 2f \n" ··· 433 433 return ret; 434 434 } 435 435 436 - static inline int __raw_write_trylock(arch_rwlock_t *rw) 436 + static inline int arch_write_trylock(arch_rwlock_t *rw) 437 437 { 438 438 unsigned int tmp; 439 439 int ret; 440 440 441 441 if (R10000_LLSC_WAR) { 442 442 __asm__ __volatile__( 443 - " .set noreorder # __raw_write_trylock \n" 443 + " .set noreorder # arch_write_trylock \n" 444 444 " li %2, 0 \n" 445 445 "1: ll %1, %3 \n" 446 446 " bnez %1, 2f \n" ··· 457 457 : "memory"); 458 458 } else { 459 459 __asm__ __volatile__( 460 - " .set noreorder # __raw_write_trylock \n" 460 + " .set noreorder # arch_write_trylock \n" 461 461 " li %2, 0 \n" 462 462 "1: ll %1, %3 \n" 463 463 " bnez %1, 2f \n" ··· 480 480 return ret; 481 481 } 482 482 483 - #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 484 - #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 483 + #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 484 + #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 485 485 486 486 #define arch_spin_relax(lock) cpu_relax() 487 487 #define arch_read_relax(lock) cpu_relax()
+10 -10
arch/parisc/include/asm/spinlock.h
··· 69 69 70 70 /* Note that we have to ensure interrupts are disabled in case we're 71 71 * interrupted by some other code that wants to grab the same read lock */ 72 - static __inline__ void __raw_read_lock(arch_rwlock_t *rw) 72 + static __inline__ void arch_read_lock(arch_rwlock_t *rw) 73 73 { 74 74 unsigned long flags; 75 75 local_irq_save(flags); ··· 81 81 82 82 /* Note that we have to ensure interrupts are disabled in case we're 83 83 * interrupted by some other code that wants to grab the same read lock */ 84 - static __inline__ void __raw_read_unlock(arch_rwlock_t *rw) 84 + static __inline__ void arch_read_unlock(arch_rwlock_t *rw) 85 85 { 86 86 unsigned long flags; 87 87 local_irq_save(flags); ··· 93 93 94 94 /* Note that we have to ensure interrupts are disabled in case we're 95 95 * interrupted by some other code that wants to grab the same read lock */ 96 - static __inline__ int __raw_read_trylock(arch_rwlock_t *rw) 96 + static __inline__ int arch_read_trylock(arch_rwlock_t *rw) 97 97 { 98 98 unsigned long flags; 99 99 retry: ··· 119 119 120 120 /* Note that we have to ensure interrupts are disabled in case we're 121 121 * interrupted by some other code that wants to read_trylock() this lock */ 122 - static __inline__ void __raw_write_lock(arch_rwlock_t *rw) 122 + static __inline__ void arch_write_lock(arch_rwlock_t *rw) 123 123 { 124 124 unsigned long flags; 125 125 retry: ··· 141 141 local_irq_restore(flags); 142 142 } 143 143 144 - static __inline__ void __raw_write_unlock(arch_rwlock_t *rw) 144 + static __inline__ void arch_write_unlock(arch_rwlock_t *rw) 145 145 { 146 146 rw->counter = 0; 147 147 arch_spin_unlock(&rw->lock); ··· 149 149 150 150 /* Note that we have to ensure interrupts are disabled in case we're 151 151 * interrupted by some other code that wants to read_trylock() this lock */ 152 - static __inline__ int __raw_write_trylock(arch_rwlock_t *rw) 152 + static __inline__ int arch_write_trylock(arch_rwlock_t *rw) 153 153 { 154 154 unsigned long flags; 155 155 int result = 0; ··· 173 173 * read_can_lock - would read_trylock() succeed? 174 174 * @lock: the rwlock in question. 175 175 */ 176 - static __inline__ int __raw_read_can_lock(arch_rwlock_t *rw) 176 + static __inline__ int arch_read_can_lock(arch_rwlock_t *rw) 177 177 { 178 178 return rw->counter >= 0; 179 179 } ··· 182 182 * write_can_lock - would write_trylock() succeed? 183 183 * @lock: the rwlock in question. 184 184 */ 185 - static __inline__ int __raw_write_can_lock(arch_rwlock_t *rw) 185 + static __inline__ int arch_write_can_lock(arch_rwlock_t *rw) 186 186 { 187 187 return !rw->counter; 188 188 } 189 189 190 - #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 191 - #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 190 + #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 191 + #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 192 192 193 193 #define arch_spin_relax(lock) cpu_relax() 194 194 #define arch_read_relax(lock) cpu_relax()
+16 -16
arch/powerpc/include/asm/spinlock.h
··· 166 166 * read-locks. 167 167 */ 168 168 169 - #define __raw_read_can_lock(rw) ((rw)->lock >= 0) 170 - #define __raw_write_can_lock(rw) (!(rw)->lock) 169 + #define arch_read_can_lock(rw) ((rw)->lock >= 0) 170 + #define arch_write_can_lock(rw) (!(rw)->lock) 171 171 172 172 #ifdef CONFIG_PPC64 173 173 #define __DO_SIGN_EXTEND "extsw %0,%0\n" ··· 181 181 * This returns the old value in the lock + 1, 182 182 * so we got a read lock if the return value is > 0. 183 183 */ 184 - static inline long arch_read_trylock(arch_rwlock_t *rw) 184 + static inline long __arch_read_trylock(arch_rwlock_t *rw) 185 185 { 186 186 long tmp; 187 187 ··· 205 205 * This returns the old value in the lock, 206 206 * so we got the write lock if the return value is 0. 207 207 */ 208 - static inline long arch_write_trylock(arch_rwlock_t *rw) 208 + static inline long __arch_write_trylock(arch_rwlock_t *rw) 209 209 { 210 210 long tmp, token; 211 211 ··· 225 225 return tmp; 226 226 } 227 227 228 - static inline void __raw_read_lock(arch_rwlock_t *rw) 228 + static inline void arch_read_lock(arch_rwlock_t *rw) 229 229 { 230 230 while (1) { 231 - if (likely(arch_read_trylock(rw) > 0)) 231 + if (likely(__arch_read_trylock(rw) > 0)) 232 232 break; 233 233 do { 234 234 HMT_low(); ··· 239 239 } 240 240 } 241 241 242 - static inline void __raw_write_lock(arch_rwlock_t *rw) 242 + static inline void arch_write_lock(arch_rwlock_t *rw) 243 243 { 244 244 while (1) { 245 - if (likely(arch_write_trylock(rw) == 0)) 245 + if (likely(__arch_write_trylock(rw) == 0)) 246 246 break; 247 247 do { 248 248 HMT_low(); ··· 253 253 } 254 254 } 255 255 256 - static inline int __raw_read_trylock(arch_rwlock_t *rw) 256 + static inline int arch_read_trylock(arch_rwlock_t *rw) 257 257 { 258 - return arch_read_trylock(rw) > 0; 258 + return __arch_read_trylock(rw) > 0; 259 259 } 260 260 261 - static inline int __raw_write_trylock(arch_rwlock_t *rw) 261 + static inline int arch_write_trylock(arch_rwlock_t *rw) 262 262 { 263 - return arch_write_trylock(rw) == 0; 263 + return __arch_write_trylock(rw) == 0; 264 264 } 265 265 266 - static inline void __raw_read_unlock(arch_rwlock_t *rw) 266 + static inline void arch_read_unlock(arch_rwlock_t *rw) 267 267 { 268 268 long tmp; 269 269 ··· 280 280 : "cr0", "xer", "memory"); 281 281 } 282 282 283 - static inline void __raw_write_unlock(arch_rwlock_t *rw) 283 + static inline void arch_write_unlock(arch_rwlock_t *rw) 284 284 { 285 285 __asm__ __volatile__("# write_unlock\n\t" 286 286 LWSYNC_ON_SMP: : :"memory"); 287 287 rw->lock = 0; 288 288 } 289 289 290 - #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 291 - #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 290 + #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 291 + #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 292 292 293 293 #define arch_spin_relax(lock) __spin_yield(lock) 294 294 #define arch_read_relax(lock) __rw_yield(lock)
+10 -10
arch/s390/include/asm/spinlock.h
··· 113 113 * read_can_lock - would read_trylock() succeed? 114 114 * @lock: the rwlock in question. 115 115 */ 116 - #define __raw_read_can_lock(x) ((int)(x)->lock >= 0) 116 + #define arch_read_can_lock(x) ((int)(x)->lock >= 0) 117 117 118 118 /** 119 119 * write_can_lock - would write_trylock() succeed? 120 120 * @lock: the rwlock in question. 121 121 */ 122 - #define __raw_write_can_lock(x) ((x)->lock == 0) 122 + #define arch_write_can_lock(x) ((x)->lock == 0) 123 123 124 124 extern void _raw_read_lock_wait(arch_rwlock_t *lp); 125 125 extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags); ··· 128 128 extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags); 129 129 extern int _raw_write_trylock_retry(arch_rwlock_t *lp); 130 130 131 - static inline void __raw_read_lock(arch_rwlock_t *rw) 131 + static inline void arch_read_lock(arch_rwlock_t *rw) 132 132 { 133 133 unsigned int old; 134 134 old = rw->lock & 0x7fffffffU; ··· 136 136 _raw_read_lock_wait(rw); 137 137 } 138 138 139 - static inline void __raw_read_lock_flags(arch_rwlock_t *rw, unsigned long flags) 139 + static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags) 140 140 { 141 141 unsigned int old; 142 142 old = rw->lock & 0x7fffffffU; ··· 144 144 _raw_read_lock_wait_flags(rw, flags); 145 145 } 146 146 147 - static inline void __raw_read_unlock(arch_rwlock_t *rw) 147 + static inline void arch_read_unlock(arch_rwlock_t *rw) 148 148 { 149 149 unsigned int old, cmp; 150 150 ··· 155 155 } while (cmp != old); 156 156 } 157 157 158 - static inline void __raw_write_lock(arch_rwlock_t *rw) 158 + static inline void arch_write_lock(arch_rwlock_t *rw) 159 159 { 160 160 if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) 161 161 _raw_write_lock_wait(rw); 162 162 } 163 163 164 - static inline void __raw_write_lock_flags(arch_rwlock_t *rw, unsigned long flags) 164 + static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags) 165 165 { 166 166 if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) 167 167 _raw_write_lock_wait_flags(rw, flags); 168 168 } 169 169 170 - static inline void __raw_write_unlock(arch_rwlock_t *rw) 170 + static inline void arch_write_unlock(arch_rwlock_t *rw) 171 171 { 172 172 _raw_compare_and_swap(&rw->lock, 0x80000000, 0); 173 173 } 174 174 175 - static inline int __raw_read_trylock(arch_rwlock_t *rw) 175 + static inline int arch_read_trylock(arch_rwlock_t *rw) 176 176 { 177 177 unsigned int old; 178 178 old = rw->lock & 0x7fffffffU; ··· 181 181 return _raw_read_trylock_retry(rw); 182 182 } 183 183 184 - static inline int __raw_write_trylock(arch_rwlock_t *rw) 184 + static inline int arch_write_trylock(arch_rwlock_t *rw) 185 185 { 186 186 if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)) 187 187 return 1;
+6 -6
arch/s390/lib/spinlock.c
··· 115 115 _raw_yield(); 116 116 count = spin_retry; 117 117 } 118 - if (!__raw_read_can_lock(rw)) 118 + if (!arch_read_can_lock(rw)) 119 119 continue; 120 120 old = rw->lock & 0x7fffffffU; 121 121 if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) ··· 135 135 _raw_yield(); 136 136 count = spin_retry; 137 137 } 138 - if (!__raw_read_can_lock(rw)) 138 + if (!arch_read_can_lock(rw)) 139 139 continue; 140 140 old = rw->lock & 0x7fffffffU; 141 141 local_irq_disable(); ··· 151 151 int count = spin_retry; 152 152 153 153 while (count-- > 0) { 154 - if (!__raw_read_can_lock(rw)) 154 + if (!arch_read_can_lock(rw)) 155 155 continue; 156 156 old = rw->lock & 0x7fffffffU; 157 157 if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) ··· 170 170 _raw_yield(); 171 171 count = spin_retry; 172 172 } 173 - if (!__raw_write_can_lock(rw)) 173 + if (!arch_write_can_lock(rw)) 174 174 continue; 175 175 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) 176 176 return; ··· 188 188 _raw_yield(); 189 189 count = spin_retry; 190 190 } 191 - if (!__raw_write_can_lock(rw)) 191 + if (!arch_write_can_lock(rw)) 192 192 continue; 193 193 local_irq_disable(); 194 194 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) ··· 202 202 int count = spin_retry; 203 203 204 204 while (count-- > 0) { 205 - if (!__raw_write_can_lock(rw)) 205 + if (!arch_write_can_lock(rw)) 206 206 continue; 207 207 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) 208 208 return 1;
+16 -16
arch/sh/include/asm/spinlock.h
··· 100 100 * read_can_lock - would read_trylock() succeed? 101 101 * @lock: the rwlock in question. 102 102 */ 103 - #define __raw_read_can_lock(x) ((x)->lock > 0) 103 + #define arch_read_can_lock(x) ((x)->lock > 0) 104 104 105 105 /** 106 106 * write_can_lock - would write_trylock() succeed? 107 107 * @lock: the rwlock in question. 108 108 */ 109 - #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) 109 + #define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) 110 110 111 - static inline void __raw_read_lock(arch_rwlock_t *rw) 111 + static inline void arch_read_lock(arch_rwlock_t *rw) 112 112 { 113 113 unsigned long tmp; 114 114 115 115 __asm__ __volatile__ ( 116 116 "1: \n\t" 117 - "movli.l @%1, %0 ! __raw_read_lock \n\t" 117 + "movli.l @%1, %0 ! arch_read_lock \n\t" 118 118 "cmp/pl %0 \n\t" 119 119 "bf 1b \n\t" 120 120 "add #-1, %0 \n\t" ··· 126 126 ); 127 127 } 128 128 129 - static inline void __raw_read_unlock(arch_rwlock_t *rw) 129 + static inline void arch_read_unlock(arch_rwlock_t *rw) 130 130 { 131 131 unsigned long tmp; 132 132 133 133 __asm__ __volatile__ ( 134 134 "1: \n\t" 135 - "movli.l @%1, %0 ! __raw_read_unlock \n\t" 135 + "movli.l @%1, %0 ! arch_read_unlock \n\t" 136 136 "add #1, %0 \n\t" 137 137 "movco.l %0, @%1 \n\t" 138 138 "bf 1b \n\t" ··· 142 142 ); 143 143 } 144 144 145 - static inline void __raw_write_lock(arch_rwlock_t *rw) 145 + static inline void arch_write_lock(arch_rwlock_t *rw) 146 146 { 147 147 unsigned long tmp; 148 148 149 149 __asm__ __volatile__ ( 150 150 "1: \n\t" 151 - "movli.l @%1, %0 ! __raw_write_lock \n\t" 151 + "movli.l @%1, %0 ! arch_write_lock \n\t" 152 152 "cmp/hs %2, %0 \n\t" 153 153 "bf 1b \n\t" 154 154 "sub %2, %0 \n\t" ··· 160 160 ); 161 161 } 162 162 163 - static inline void __raw_write_unlock(arch_rwlock_t *rw) 163 + static inline void arch_write_unlock(arch_rwlock_t *rw) 164 164 { 165 165 __asm__ __volatile__ ( 166 - "mov.l %1, @%0 ! __raw_write_unlock \n\t" 166 + "mov.l %1, @%0 ! arch_write_unlock \n\t" 167 167 : 168 168 : "r" (&rw->lock), "r" (RW_LOCK_BIAS) 169 169 : "t", "memory" 170 170 ); 171 171 } 172 172 173 - static inline int __raw_read_trylock(arch_rwlock_t *rw) 173 + static inline int arch_read_trylock(arch_rwlock_t *rw) 174 174 { 175 175 unsigned long tmp, oldval; 176 176 177 177 __asm__ __volatile__ ( 178 178 "1: \n\t" 179 - "movli.l @%2, %0 ! __raw_read_trylock \n\t" 179 + "movli.l @%2, %0 ! arch_read_trylock \n\t" 180 180 "mov %0, %1 \n\t" 181 181 "cmp/pl %0 \n\t" 182 182 "bf 2f \n\t" ··· 193 193 return (oldval > 0); 194 194 } 195 195 196 - static inline int __raw_write_trylock(arch_rwlock_t *rw) 196 + static inline int arch_write_trylock(arch_rwlock_t *rw) 197 197 { 198 198 unsigned long tmp, oldval; 199 199 200 200 __asm__ __volatile__ ( 201 201 "1: \n\t" 202 - "movli.l @%2, %0 ! __raw_write_trylock \n\t" 202 + "movli.l @%2, %0 ! arch_write_trylock \n\t" 203 203 "mov %0, %1 \n\t" 204 204 "cmp/hs %3, %0 \n\t" 205 205 "bf 2f \n\t" ··· 216 216 return (oldval > (RW_LOCK_BIAS - 1)); 217 217 } 218 218 219 - #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 220 - #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 219 + #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 220 + #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 221 221 222 222 #define arch_spin_relax(lock) cpu_relax() 223 223 #define arch_read_relax(lock) cpu_relax()
+16 -16
arch/sparc/include/asm/spinlock_32.h
··· 76 76 * 77 77 * Unfortunately this scheme limits us to ~16,000,000 cpus. 78 78 */ 79 - static inline void arch_read_lock(arch_rwlock_t *rw) 79 + static inline void __arch_read_lock(arch_rwlock_t *rw) 80 80 { 81 81 register arch_rwlock_t *lp asm("g1"); 82 82 lp = rw; ··· 89 89 : "g2", "g4", "memory", "cc"); 90 90 } 91 91 92 - #define __raw_read_lock(lock) \ 92 + #define arch_read_lock(lock) \ 93 93 do { unsigned long flags; \ 94 94 local_irq_save(flags); \ 95 - arch_read_lock(lock); \ 95 + __arch_read_lock(lock); \ 96 96 local_irq_restore(flags); \ 97 97 } while(0) 98 98 99 - static inline void arch_read_unlock(arch_rwlock_t *rw) 99 + static inline void __arch_read_unlock(arch_rwlock_t *rw) 100 100 { 101 101 register arch_rwlock_t *lp asm("g1"); 102 102 lp = rw; ··· 109 109 : "g2", "g4", "memory", "cc"); 110 110 } 111 111 112 - #define __raw_read_unlock(lock) \ 112 + #define arch_read_unlock(lock) \ 113 113 do { unsigned long flags; \ 114 114 local_irq_save(flags); \ 115 - arch_read_unlock(lock); \ 115 + __arch_read_unlock(lock); \ 116 116 local_irq_restore(flags); \ 117 117 } while(0) 118 118 119 - static inline void __raw_write_lock(arch_rwlock_t *rw) 119 + static inline void arch_write_lock(arch_rwlock_t *rw) 120 120 { 121 121 register arch_rwlock_t *lp asm("g1"); 122 122 lp = rw; ··· 130 130 *(volatile __u32 *)&lp->lock = ~0U; 131 131 } 132 132 133 - static inline int __raw_write_trylock(arch_rwlock_t *rw) 133 + static inline int arch_write_trylock(arch_rwlock_t *rw) 134 134 { 135 135 unsigned int val; 136 136 ··· 150 150 return (val == 0); 151 151 } 152 152 153 - static inline int arch_read_trylock(arch_rwlock_t *rw) 153 + static inline int __arch_read_trylock(arch_rwlock_t *rw) 154 154 { 155 155 register arch_rwlock_t *lp asm("g1"); 156 156 register int res asm("o0"); ··· 165 165 return res; 166 166 } 167 167 168 - #define __raw_read_trylock(lock) \ 168 + #define arch_read_trylock(lock) \ 169 169 ({ unsigned long flags; \ 170 170 int res; \ 171 171 local_irq_save(flags); \ 172 - res = arch_read_trylock(lock); \ 172 + res = __arch_read_trylock(lock); \ 173 173 local_irq_restore(flags); \ 174 174 res; \ 175 175 }) 176 176 177 - #define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0) 177 + #define arch_write_unlock(rw) do { (rw)->lock = 0; } while(0) 178 178 179 179 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) 180 - #define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw) 181 - #define __raw_write_lock_flags(rw, flags) __raw_write_lock(rw) 180 + #define arch_read_lock_flags(rw, flags) arch_read_lock(rw) 181 + #define arch_write_lock_flags(rw, flags) arch_write_lock(rw) 182 182 183 183 #define arch_spin_relax(lock) cpu_relax() 184 184 #define arch_read_relax(lock) cpu_relax() 185 185 #define arch_write_relax(lock) cpu_relax() 186 186 187 - #define __raw_read_can_lock(rw) (!((rw)->lock & 0xff)) 188 - #define __raw_write_can_lock(rw) (!(rw)->lock) 187 + #define arch_read_can_lock(rw) (!((rw)->lock & 0xff)) 188 + #define arch_write_can_lock(rw) (!(rw)->lock) 189 189 190 190 #endif /* !(__ASSEMBLY__) */ 191 191
+10 -10
arch/sparc/include/asm/spinlock_64.h
··· 210 210 return result; 211 211 } 212 212 213 - #define __raw_read_lock(p) arch_read_lock(p) 214 - #define __raw_read_lock_flags(p, f) arch_read_lock(p) 215 - #define __raw_read_trylock(p) arch_read_trylock(p) 216 - #define __raw_read_unlock(p) arch_read_unlock(p) 217 - #define __raw_write_lock(p) arch_write_lock(p) 218 - #define __raw_write_lock_flags(p, f) arch_write_lock(p) 219 - #define __raw_write_unlock(p) arch_write_unlock(p) 220 - #define __raw_write_trylock(p) arch_write_trylock(p) 213 + #define arch_read_lock(p) arch_read_lock(p) 214 + #define arch_read_lock_flags(p, f) arch_read_lock(p) 215 + #define arch_read_trylock(p) arch_read_trylock(p) 216 + #define arch_read_unlock(p) arch_read_unlock(p) 217 + #define arch_write_lock(p) arch_write_lock(p) 218 + #define arch_write_lock_flags(p, f) arch_write_lock(p) 219 + #define arch_write_unlock(p) arch_write_unlock(p) 220 + #define arch_write_trylock(p) arch_write_trylock(p) 221 221 222 - #define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) 223 - #define __raw_write_can_lock(rw) (!(rw)->lock) 222 + #define arch_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) 223 + #define arch_write_can_lock(rw) (!(rw)->lock) 224 224 225 225 #define arch_spin_relax(lock) cpu_relax() 226 226 #define arch_read_relax(lock) cpu_relax()
+10 -10
arch/x86/include/asm/spinlock.h
··· 232 232 * read_can_lock - would read_trylock() succeed? 233 233 * @lock: the rwlock in question. 234 234 */ 235 - static inline int __raw_read_can_lock(arch_rwlock_t *lock) 235 + static inline int arch_read_can_lock(arch_rwlock_t *lock) 236 236 { 237 237 return (int)(lock)->lock > 0; 238 238 } ··· 241 241 * write_can_lock - would write_trylock() succeed? 242 242 * @lock: the rwlock in question. 243 243 */ 244 - static inline int __raw_write_can_lock(arch_rwlock_t *lock) 244 + static inline int arch_write_can_lock(arch_rwlock_t *lock) 245 245 { 246 246 return (lock)->lock == RW_LOCK_BIAS; 247 247 } 248 248 249 - static inline void __raw_read_lock(arch_rwlock_t *rw) 249 + static inline void arch_read_lock(arch_rwlock_t *rw) 250 250 { 251 251 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" 252 252 "jns 1f\n" ··· 255 255 ::LOCK_PTR_REG (rw) : "memory"); 256 256 } 257 257 258 - static inline void __raw_write_lock(arch_rwlock_t *rw) 258 + static inline void arch_write_lock(arch_rwlock_t *rw) 259 259 { 260 260 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t" 261 261 "jz 1f\n" ··· 264 264 ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory"); 265 265 } 266 266 267 - static inline int __raw_read_trylock(arch_rwlock_t *lock) 267 + static inline int arch_read_trylock(arch_rwlock_t *lock) 268 268 { 269 269 atomic_t *count = (atomic_t *)lock; 270 270 ··· 274 274 return 0; 275 275 } 276 276 277 - static inline int __raw_write_trylock(arch_rwlock_t *lock) 277 + static inline int arch_write_trylock(arch_rwlock_t *lock) 278 278 { 279 279 atomic_t *count = (atomic_t *)lock; 280 280 ··· 284 284 return 0; 285 285 } 286 286 287 - static inline void __raw_read_unlock(arch_rwlock_t *rw) 287 + static inline void arch_read_unlock(arch_rwlock_t *rw) 288 288 { 289 289 asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); 290 290 } 291 291 292 - static inline void __raw_write_unlock(arch_rwlock_t *rw) 292 + static inline void arch_write_unlock(arch_rwlock_t *rw) 293 293 { 294 294 asm volatile(LOCK_PREFIX "addl %1, %0" 295 295 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory"); 296 296 } 297 297 298 - #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 299 - #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 298 + #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 299 + #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 300 300 301 301 #define arch_spin_relax(lock) cpu_relax() 302 302 #define arch_read_relax(lock) cpu_relax()
+10 -10
include/linux/rwlock.h
··· 38 38 extern int _raw_write_trylock(rwlock_t *lock); 39 39 extern void _raw_write_unlock(rwlock_t *lock); 40 40 #else 41 - # define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock) 41 + # define _raw_read_lock(rwlock) arch_read_lock(&(rwlock)->raw_lock) 42 42 # define _raw_read_lock_flags(lock, flags) \ 43 - __raw_read_lock_flags(&(lock)->raw_lock, *(flags)) 44 - # define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock) 45 - # define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock) 46 - # define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock) 43 + arch_read_lock_flags(&(lock)->raw_lock, *(flags)) 44 + # define _raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock) 45 + # define _raw_read_unlock(rwlock) arch_read_unlock(&(rwlock)->raw_lock) 46 + # define _raw_write_lock(rwlock) arch_write_lock(&(rwlock)->raw_lock) 47 47 # define _raw_write_lock_flags(lock, flags) \ 48 - __raw_write_lock_flags(&(lock)->raw_lock, *(flags)) 49 - # define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock) 50 - # define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock) 48 + arch_write_lock_flags(&(lock)->raw_lock, *(flags)) 49 + # define _raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock) 50 + # define _raw_write_unlock(rwlock) arch_write_unlock(&(rwlock)->raw_lock) 51 51 #endif 52 52 53 - #define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock) 54 - #define write_can_lock(rwlock) __raw_write_can_lock(&(rwlock)->raw_lock) 53 + #define read_can_lock(rwlock) arch_read_can_lock(&(rwlock)->raw_lock) 54 + #define write_can_lock(rwlock) arch_write_can_lock(&(rwlock)->raw_lock) 55 55 56 56 /* 57 57 * Define the various rw_lock methods. Note we define these
+8 -8
include/linux/spinlock_up.h
··· 49 49 /* 50 50 * Read-write spinlocks. No debug version. 51 51 */ 52 - #define __raw_read_lock(lock) do { (void)(lock); } while (0) 53 - #define __raw_write_lock(lock) do { (void)(lock); } while (0) 54 - #define __raw_read_trylock(lock) ({ (void)(lock); 1; }) 55 - #define __raw_write_trylock(lock) ({ (void)(lock); 1; }) 56 - #define __raw_read_unlock(lock) do { (void)(lock); } while (0) 57 - #define __raw_write_unlock(lock) do { (void)(lock); } while (0) 52 + #define arch_read_lock(lock) do { (void)(lock); } while (0) 53 + #define arch_write_lock(lock) do { (void)(lock); } while (0) 54 + #define arch_read_trylock(lock) ({ (void)(lock); 1; }) 55 + #define arch_write_trylock(lock) ({ (void)(lock); 1; }) 56 + #define arch_read_unlock(lock) do { (void)(lock); } while (0) 57 + #define arch_write_unlock(lock) do { (void)(lock); } while (0) 58 58 59 59 #else /* DEBUG_SPINLOCK */ 60 60 #define arch_spin_is_locked(lock) ((void)(lock), 0) ··· 67 67 68 68 #define arch_spin_is_contended(lock) (((void)(lock), 0)) 69 69 70 - #define __raw_read_can_lock(lock) (((void)(lock), 1)) 71 - #define __raw_write_can_lock(lock) (((void)(lock), 1)) 70 + #define arch_read_can_lock(lock) (((void)(lock), 1)) 71 + #define arch_write_can_lock(lock) (((void)(lock), 1)) 72 72 73 73 #define arch_spin_unlock_wait(lock) \ 74 74 do { cpu_relax(); } while (arch_spin_is_locked(lock))
+8 -8
lib/spinlock_debug.c
··· 176 176 177 177 for (;;) { 178 178 for (i = 0; i < loops; i++) { 179 - if (__raw_read_trylock(&lock->raw_lock)) 179 + if (arch_read_trylock(&lock->raw_lock)) 180 180 return; 181 181 __delay(1); 182 182 } ··· 196 196 void _raw_read_lock(rwlock_t *lock) 197 197 { 198 198 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); 199 - __raw_read_lock(&lock->raw_lock); 199 + arch_read_lock(&lock->raw_lock); 200 200 } 201 201 202 202 int _raw_read_trylock(rwlock_t *lock) 203 203 { 204 - int ret = __raw_read_trylock(&lock->raw_lock); 204 + int ret = arch_read_trylock(&lock->raw_lock); 205 205 206 206 #ifndef CONFIG_SMP 207 207 /* ··· 215 215 void _raw_read_unlock(rwlock_t *lock) 216 216 { 217 217 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); 218 - __raw_read_unlock(&lock->raw_lock); 218 + arch_read_unlock(&lock->raw_lock); 219 219 } 220 220 221 221 static inline void debug_write_lock_before(rwlock_t *lock) ··· 251 251 252 252 for (;;) { 253 253 for (i = 0; i < loops; i++) { 254 - if (__raw_write_trylock(&lock->raw_lock)) 254 + if (arch_write_trylock(&lock->raw_lock)) 255 255 return; 256 256 __delay(1); 257 257 } ··· 271 271 void _raw_write_lock(rwlock_t *lock) 272 272 { 273 273 debug_write_lock_before(lock); 274 - __raw_write_lock(&lock->raw_lock); 274 + arch_write_lock(&lock->raw_lock); 275 275 debug_write_lock_after(lock); 276 276 } 277 277 278 278 int _raw_write_trylock(rwlock_t *lock) 279 279 { 280 - int ret = __raw_write_trylock(&lock->raw_lock); 280 + int ret = arch_write_trylock(&lock->raw_lock); 281 281 282 282 if (ret) 283 283 debug_write_lock_after(lock); ··· 293 293 void _raw_write_unlock(rwlock_t *lock) 294 294 { 295 295 debug_write_unlock(lock); 296 - __raw_write_unlock(&lock->raw_lock); 296 + arch_write_unlock(&lock->raw_lock); 297 297 }