Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v4.5-rc7 312 lines 7.3 kB view raw
1#ifndef __ASM_SPINLOCK_H 2#define __ASM_SPINLOCK_H 3#ifdef __KERNEL__ 4 5/* 6 * Simple spin lock operations. 7 * 8 * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM 9 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM 10 * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM 11 * Rework to support virtual processors 12 * 13 * Type of int is used as a full 64b word is not necessary. 14 * 15 * This program is free software; you can redistribute it and/or 16 * modify it under the terms of the GNU General Public License 17 * as published by the Free Software Foundation; either version 18 * 2 of the License, or (at your option) any later version. 19 * 20 * (the type definitions are in asm/spinlock_types.h) 21 */ 22#include <linux/irqflags.h> 23#ifdef CONFIG_PPC64 24#include <asm/paca.h> 25#include <asm/hvcall.h> 26#endif 27#include <asm/asm-compat.h> 28#include <asm/synch.h> 29#include <asm/ppc-opcode.h> 30 31#ifdef CONFIG_PPC64 32/* use 0x800000yy when locked, where yy == CPU number */ 33#ifdef __BIG_ENDIAN__ 34#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) 35#else 36#define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index)) 37#endif 38#else 39#define LOCK_TOKEN 1 40#endif 41 42#if defined(CONFIG_PPC64) && defined(CONFIG_SMP) 43#define CLEAR_IO_SYNC (get_paca()->io_sync = 0) 44#define SYNC_IO do { \ 45 if (unlikely(get_paca()->io_sync)) { \ 46 mb(); \ 47 get_paca()->io_sync = 0; \ 48 } \ 49 } while (0) 50#else 51#define CLEAR_IO_SYNC 52#define SYNC_IO 53#endif 54 55static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) 56{ 57 return lock.slock == 0; 58} 59 60static inline int arch_spin_is_locked(arch_spinlock_t *lock) 61{ 62 smp_mb(); 63 return !arch_spin_value_unlocked(*lock); 64} 65 66/* 67 * This returns the old value in the lock, so we succeeded 68 * in getting the lock if the return value is 0. 69 */ 70static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) 71{ 72 unsigned long tmp, token; 73 74 token = LOCK_TOKEN; 75 __asm__ __volatile__( 76"1: " PPC_LWARX(%0,0,%2,1) "\n\ 77 cmpwi 0,%0,0\n\ 78 bne- 2f\n\ 79 stwcx. %1,0,%2\n\ 80 bne- 1b\n" 81 PPC_ACQUIRE_BARRIER 82"2:" 83 : "=&r" (tmp) 84 : "r" (token), "r" (&lock->slock) 85 : "cr0", "memory"); 86 87 return tmp; 88} 89 90static inline int arch_spin_trylock(arch_spinlock_t *lock) 91{ 92 CLEAR_IO_SYNC; 93 return __arch_spin_trylock(lock) == 0; 94} 95 96/* 97 * On a system with shared processors (that is, where a physical 98 * processor is multiplexed between several virtual processors), 99 * there is no point spinning on a lock if the holder of the lock 100 * isn't currently scheduled on a physical processor. Instead 101 * we detect this situation and ask the hypervisor to give the 102 * rest of our timeslice to the lock holder. 103 * 104 * So that we can tell which virtual processor is holding a lock, 105 * we put 0x80000000 | smp_processor_id() in the lock when it is 106 * held. Conveniently, we have a word in the paca that holds this 107 * value. 108 */ 109 110#if defined(CONFIG_PPC_SPLPAR) 111/* We only yield to the hypervisor if we are in shared processor mode */ 112#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr)) 113extern void __spin_yield(arch_spinlock_t *lock); 114extern void __rw_yield(arch_rwlock_t *lock); 115#else /* SPLPAR */ 116#define __spin_yield(x) barrier() 117#define __rw_yield(x) barrier() 118#define SHARED_PROCESSOR 0 119#endif 120 121static inline void arch_spin_lock(arch_spinlock_t *lock) 122{ 123 CLEAR_IO_SYNC; 124 while (1) { 125 if (likely(__arch_spin_trylock(lock) == 0)) 126 break; 127 do { 128 HMT_low(); 129 if (SHARED_PROCESSOR) 130 __spin_yield(lock); 131 } while (unlikely(lock->slock != 0)); 132 HMT_medium(); 133 } 134} 135 136static inline 137void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) 138{ 139 unsigned long flags_dis; 140 141 CLEAR_IO_SYNC; 142 while (1) { 143 if (likely(__arch_spin_trylock(lock) == 0)) 144 break; 145 local_save_flags(flags_dis); 146 local_irq_restore(flags); 147 do { 148 HMT_low(); 149 if (SHARED_PROCESSOR) 150 __spin_yield(lock); 151 } while (unlikely(lock->slock != 0)); 152 HMT_medium(); 153 local_irq_restore(flags_dis); 154 } 155} 156 157static inline void arch_spin_unlock(arch_spinlock_t *lock) 158{ 159 SYNC_IO; 160 __asm__ __volatile__("# arch_spin_unlock\n\t" 161 PPC_RELEASE_BARRIER: : :"memory"); 162 lock->slock = 0; 163} 164 165#ifdef CONFIG_PPC64 166extern void arch_spin_unlock_wait(arch_spinlock_t *lock); 167#else 168#define arch_spin_unlock_wait(lock) \ 169 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) 170#endif 171 172/* 173 * Read-write spinlocks, allowing multiple readers 174 * but only one writer. 175 * 176 * NOTE! it is quite common to have readers in interrupts 177 * but no interrupt writers. For those circumstances we 178 * can "mix" irq-safe locks - any writer needs to get a 179 * irq-safe write-lock, but readers can get non-irqsafe 180 * read-locks. 181 */ 182 183#define arch_read_can_lock(rw) ((rw)->lock >= 0) 184#define arch_write_can_lock(rw) (!(rw)->lock) 185 186#ifdef CONFIG_PPC64 187#define __DO_SIGN_EXTEND "extsw %0,%0\n" 188#define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */ 189#else 190#define __DO_SIGN_EXTEND 191#define WRLOCK_TOKEN (-1) 192#endif 193 194/* 195 * This returns the old value in the lock + 1, 196 * so we got a read lock if the return value is > 0. 197 */ 198static inline long __arch_read_trylock(arch_rwlock_t *rw) 199{ 200 long tmp; 201 202 __asm__ __volatile__( 203"1: " PPC_LWARX(%0,0,%1,1) "\n" 204 __DO_SIGN_EXTEND 205" addic. %0,%0,1\n\ 206 ble- 2f\n" 207 PPC405_ERR77(0,%1) 208" stwcx. %0,0,%1\n\ 209 bne- 1b\n" 210 PPC_ACQUIRE_BARRIER 211"2:" : "=&r" (tmp) 212 : "r" (&rw->lock) 213 : "cr0", "xer", "memory"); 214 215 return tmp; 216} 217 218/* 219 * This returns the old value in the lock, 220 * so we got the write lock if the return value is 0. 221 */ 222static inline long __arch_write_trylock(arch_rwlock_t *rw) 223{ 224 long tmp, token; 225 226 token = WRLOCK_TOKEN; 227 __asm__ __volatile__( 228"1: " PPC_LWARX(%0,0,%2,1) "\n\ 229 cmpwi 0,%0,0\n\ 230 bne- 2f\n" 231 PPC405_ERR77(0,%1) 232" stwcx. %1,0,%2\n\ 233 bne- 1b\n" 234 PPC_ACQUIRE_BARRIER 235"2:" : "=&r" (tmp) 236 : "r" (token), "r" (&rw->lock) 237 : "cr0", "memory"); 238 239 return tmp; 240} 241 242static inline void arch_read_lock(arch_rwlock_t *rw) 243{ 244 while (1) { 245 if (likely(__arch_read_trylock(rw) > 0)) 246 break; 247 do { 248 HMT_low(); 249 if (SHARED_PROCESSOR) 250 __rw_yield(rw); 251 } while (unlikely(rw->lock < 0)); 252 HMT_medium(); 253 } 254} 255 256static inline void arch_write_lock(arch_rwlock_t *rw) 257{ 258 while (1) { 259 if (likely(__arch_write_trylock(rw) == 0)) 260 break; 261 do { 262 HMT_low(); 263 if (SHARED_PROCESSOR) 264 __rw_yield(rw); 265 } while (unlikely(rw->lock != 0)); 266 HMT_medium(); 267 } 268} 269 270static inline int arch_read_trylock(arch_rwlock_t *rw) 271{ 272 return __arch_read_trylock(rw) > 0; 273} 274 275static inline int arch_write_trylock(arch_rwlock_t *rw) 276{ 277 return __arch_write_trylock(rw) == 0; 278} 279 280static inline void arch_read_unlock(arch_rwlock_t *rw) 281{ 282 long tmp; 283 284 __asm__ __volatile__( 285 "# read_unlock\n\t" 286 PPC_RELEASE_BARRIER 287"1: lwarx %0,0,%1\n\ 288 addic %0,%0,-1\n" 289 PPC405_ERR77(0,%1) 290" stwcx. %0,0,%1\n\ 291 bne- 1b" 292 : "=&r"(tmp) 293 : "r"(&rw->lock) 294 : "cr0", "xer", "memory"); 295} 296 297static inline void arch_write_unlock(arch_rwlock_t *rw) 298{ 299 __asm__ __volatile__("# write_unlock\n\t" 300 PPC_RELEASE_BARRIER: : :"memory"); 301 rw->lock = 0; 302} 303 304#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 305#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 306 307#define arch_spin_relax(lock) __spin_yield(lock) 308#define arch_read_relax(lock) __rw_yield(lock) 309#define arch_write_relax(lock) __rw_yield(lock) 310 311#endif /* __KERNEL__ */ 312#endif /* __ASM_SPINLOCK_H */