Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.8 338 lines 7.7 kB view raw
1#ifndef __ASM_SPINLOCK_H 2#define __ASM_SPINLOCK_H 3#ifdef __KERNEL__ 4 5/* 6 * Simple spin lock operations. 7 * 8 * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM 9 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM 10 * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM 11 * Rework to support virtual processors 12 * 13 * Type of int is used as a full 64b word is not necessary. 14 * 15 * This program is free software; you can redistribute it and/or 16 * modify it under the terms of the GNU General Public License 17 * as published by the Free Software Foundation; either version 18 * 2 of the License, or (at your option) any later version. 19 * 20 * (the type definitions are in asm/spinlock_types.h) 21 */ 22#include <linux/irqflags.h> 23#ifdef CONFIG_PPC64 24#include <asm/paca.h> 25#include <asm/hvcall.h> 26#endif 27#include <asm/asm-compat.h> 28#include <asm/synch.h> 29#include <asm/ppc-opcode.h> 30 31#ifdef CONFIG_PPC64 32/* use 0x800000yy when locked, where yy == CPU number */ 33#ifdef __BIG_ENDIAN__ 34#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) 35#else 36#define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index)) 37#endif 38#else 39#define LOCK_TOKEN 1 40#endif 41 42#if defined(CONFIG_PPC64) && defined(CONFIG_SMP) 43#define CLEAR_IO_SYNC (get_paca()->io_sync = 0) 44#define SYNC_IO do { \ 45 if (unlikely(get_paca()->io_sync)) { \ 46 mb(); \ 47 get_paca()->io_sync = 0; \ 48 } \ 49 } while (0) 50#else 51#define CLEAR_IO_SYNC 52#define SYNC_IO 53#endif 54 55static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) 56{ 57 return lock.slock == 0; 58} 59 60static inline int arch_spin_is_locked(arch_spinlock_t *lock) 61{ 62 smp_mb(); 63 return !arch_spin_value_unlocked(*lock); 64} 65 66/* 67 * This returns the old value in the lock, so we succeeded 68 * in getting the lock if the return value is 0. 69 */ 70static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) 71{ 72 unsigned long tmp, token; 73 74 token = LOCK_TOKEN; 75 __asm__ __volatile__( 76"1: " PPC_LWARX(%0,0,%2,1) "\n\ 77 cmpwi 0,%0,0\n\ 78 bne- 2f\n\ 79 stwcx. %1,0,%2\n\ 80 bne- 1b\n" 81 PPC_ACQUIRE_BARRIER 82"2:" 83 : "=&r" (tmp) 84 : "r" (token), "r" (&lock->slock) 85 : "cr0", "memory"); 86 87 return tmp; 88} 89 90static inline int arch_spin_trylock(arch_spinlock_t *lock) 91{ 92 CLEAR_IO_SYNC; 93 return __arch_spin_trylock(lock) == 0; 94} 95 96/* 97 * On a system with shared processors (that is, where a physical 98 * processor is multiplexed between several virtual processors), 99 * there is no point spinning on a lock if the holder of the lock 100 * isn't currently scheduled on a physical processor. Instead 101 * we detect this situation and ask the hypervisor to give the 102 * rest of our timeslice to the lock holder. 103 * 104 * So that we can tell which virtual processor is holding a lock, 105 * we put 0x80000000 | smp_processor_id() in the lock when it is 106 * held. Conveniently, we have a word in the paca that holds this 107 * value. 108 */ 109 110#if defined(CONFIG_PPC_SPLPAR) 111/* We only yield to the hypervisor if we are in shared processor mode */ 112#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr)) 113extern void __spin_yield(arch_spinlock_t *lock); 114extern void __rw_yield(arch_rwlock_t *lock); 115#else /* SPLPAR */ 116#define __spin_yield(x) barrier() 117#define __rw_yield(x) barrier() 118#define SHARED_PROCESSOR 0 119#endif 120 121static inline void arch_spin_lock(arch_spinlock_t *lock) 122{ 123 CLEAR_IO_SYNC; 124 while (1) { 125 if (likely(__arch_spin_trylock(lock) == 0)) 126 break; 127 do { 128 HMT_low(); 129 if (SHARED_PROCESSOR) 130 __spin_yield(lock); 131 } while (unlikely(lock->slock != 0)); 132 HMT_medium(); 133 } 134} 135 136static inline 137void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) 138{ 139 unsigned long flags_dis; 140 141 CLEAR_IO_SYNC; 142 while (1) { 143 if (likely(__arch_spin_trylock(lock) == 0)) 144 break; 145 local_save_flags(flags_dis); 146 local_irq_restore(flags); 147 do { 148 HMT_low(); 149 if (SHARED_PROCESSOR) 150 __spin_yield(lock); 151 } while (unlikely(lock->slock != 0)); 152 HMT_medium(); 153 local_irq_restore(flags_dis); 154 } 155} 156 157static inline void arch_spin_unlock(arch_spinlock_t *lock) 158{ 159 SYNC_IO; 160 __asm__ __volatile__("# arch_spin_unlock\n\t" 161 PPC_RELEASE_BARRIER: : :"memory"); 162 lock->slock = 0; 163} 164 165static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) 166{ 167 arch_spinlock_t lock_val; 168 169 smp_mb(); 170 171 /* 172 * Atomically load and store back the lock value (unchanged). This 173 * ensures that our observation of the lock value is ordered with 174 * respect to other lock operations. 175 */ 176 __asm__ __volatile__( 177"1: " PPC_LWARX(%0, 0, %2, 0) "\n" 178" stwcx. %0, 0, %2\n" 179" bne- 1b\n" 180 : "=&r" (lock_val), "+m" (*lock) 181 : "r" (lock) 182 : "cr0", "xer"); 183 184 if (arch_spin_value_unlocked(lock_val)) 185 goto out; 186 187 while (lock->slock) { 188 HMT_low(); 189 if (SHARED_PROCESSOR) 190 __spin_yield(lock); 191 } 192 HMT_medium(); 193 194out: 195 smp_mb(); 196} 197 198/* 199 * Read-write spinlocks, allowing multiple readers 200 * but only one writer. 201 * 202 * NOTE! it is quite common to have readers in interrupts 203 * but no interrupt writers. For those circumstances we 204 * can "mix" irq-safe locks - any writer needs to get a 205 * irq-safe write-lock, but readers can get non-irqsafe 206 * read-locks. 207 */ 208 209#define arch_read_can_lock(rw) ((rw)->lock >= 0) 210#define arch_write_can_lock(rw) (!(rw)->lock) 211 212#ifdef CONFIG_PPC64 213#define __DO_SIGN_EXTEND "extsw %0,%0\n" 214#define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */ 215#else 216#define __DO_SIGN_EXTEND 217#define WRLOCK_TOKEN (-1) 218#endif 219 220/* 221 * This returns the old value in the lock + 1, 222 * so we got a read lock if the return value is > 0. 223 */ 224static inline long __arch_read_trylock(arch_rwlock_t *rw) 225{ 226 long tmp; 227 228 __asm__ __volatile__( 229"1: " PPC_LWARX(%0,0,%1,1) "\n" 230 __DO_SIGN_EXTEND 231" addic. %0,%0,1\n\ 232 ble- 2f\n" 233 PPC405_ERR77(0,%1) 234" stwcx. %0,0,%1\n\ 235 bne- 1b\n" 236 PPC_ACQUIRE_BARRIER 237"2:" : "=&r" (tmp) 238 : "r" (&rw->lock) 239 : "cr0", "xer", "memory"); 240 241 return tmp; 242} 243 244/* 245 * This returns the old value in the lock, 246 * so we got the write lock if the return value is 0. 247 */ 248static inline long __arch_write_trylock(arch_rwlock_t *rw) 249{ 250 long tmp, token; 251 252 token = WRLOCK_TOKEN; 253 __asm__ __volatile__( 254"1: " PPC_LWARX(%0,0,%2,1) "\n\ 255 cmpwi 0,%0,0\n\ 256 bne- 2f\n" 257 PPC405_ERR77(0,%1) 258" stwcx. %1,0,%2\n\ 259 bne- 1b\n" 260 PPC_ACQUIRE_BARRIER 261"2:" : "=&r" (tmp) 262 : "r" (token), "r" (&rw->lock) 263 : "cr0", "memory"); 264 265 return tmp; 266} 267 268static inline void arch_read_lock(arch_rwlock_t *rw) 269{ 270 while (1) { 271 if (likely(__arch_read_trylock(rw) > 0)) 272 break; 273 do { 274 HMT_low(); 275 if (SHARED_PROCESSOR) 276 __rw_yield(rw); 277 } while (unlikely(rw->lock < 0)); 278 HMT_medium(); 279 } 280} 281 282static inline void arch_write_lock(arch_rwlock_t *rw) 283{ 284 while (1) { 285 if (likely(__arch_write_trylock(rw) == 0)) 286 break; 287 do { 288 HMT_low(); 289 if (SHARED_PROCESSOR) 290 __rw_yield(rw); 291 } while (unlikely(rw->lock != 0)); 292 HMT_medium(); 293 } 294} 295 296static inline int arch_read_trylock(arch_rwlock_t *rw) 297{ 298 return __arch_read_trylock(rw) > 0; 299} 300 301static inline int arch_write_trylock(arch_rwlock_t *rw) 302{ 303 return __arch_write_trylock(rw) == 0; 304} 305 306static inline void arch_read_unlock(arch_rwlock_t *rw) 307{ 308 long tmp; 309 310 __asm__ __volatile__( 311 "# read_unlock\n\t" 312 PPC_RELEASE_BARRIER 313"1: lwarx %0,0,%1\n\ 314 addic %0,%0,-1\n" 315 PPC405_ERR77(0,%1) 316" stwcx. %0,0,%1\n\ 317 bne- 1b" 318 : "=&r"(tmp) 319 : "r"(&rw->lock) 320 : "cr0", "xer", "memory"); 321} 322 323static inline void arch_write_unlock(arch_rwlock_t *rw) 324{ 325 __asm__ __volatile__("# write_unlock\n\t" 326 PPC_RELEASE_BARRIER: : :"memory"); 327 rw->lock = 0; 328} 329 330#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 331#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 332 333#define arch_spin_relax(lock) __spin_yield(lock) 334#define arch_read_relax(lock) __rw_yield(lock) 335#define arch_write_relax(lock) __rw_yield(lock) 336 337#endif /* __KERNEL__ */ 338#endif /* __ASM_SPINLOCK_H */