at v2.6.26-rc2 294 lines 6.8 kB view raw
1#ifndef __ASM_SPINLOCK_H 2#define __ASM_SPINLOCK_H 3#ifdef __KERNEL__ 4 5/* 6 * Simple spin lock operations. 7 * 8 * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM 9 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM 10 * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM 11 * Rework to support virtual processors 12 * 13 * Type of int is used as a full 64b word is not necessary. 14 * 15 * This program is free software; you can redistribute it and/or 16 * modify it under the terms of the GNU General Public License 17 * as published by the Free Software Foundation; either version 18 * 2 of the License, or (at your option) any later version. 19 * 20 * (the type definitions are in asm/spinlock_types.h) 21 */ 22#include <linux/irqflags.h> 23#ifdef CONFIG_PPC64 24#include <asm/paca.h> 25#include <asm/hvcall.h> 26#include <asm/iseries/hv_call.h> 27#endif 28#include <asm/asm-compat.h> 29#include <asm/synch.h> 30 31#define __raw_spin_is_locked(x) ((x)->slock != 0) 32 33#ifdef CONFIG_PPC64 34/* use 0x800000yy when locked, where yy == CPU number */ 35#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) 36#else 37#define LOCK_TOKEN 1 38#endif 39 40#if defined(CONFIG_PPC64) && defined(CONFIG_SMP) 41#define CLEAR_IO_SYNC (get_paca()->io_sync = 0) 42#define SYNC_IO do { \ 43 if (unlikely(get_paca()->io_sync)) { \ 44 mb(); \ 45 get_paca()->io_sync = 0; \ 46 } \ 47 } while (0) 48#else 49#define CLEAR_IO_SYNC 50#define SYNC_IO 51#endif 52 53/* 54 * This returns the old value in the lock, so we succeeded 55 * in getting the lock if the return value is 0. 56 */ 57static __inline__ unsigned long __spin_trylock(raw_spinlock_t *lock) 58{ 59 unsigned long tmp, token; 60 61 token = LOCK_TOKEN; 62 __asm__ __volatile__( 63"1: lwarx %0,0,%2\n\ 64 cmpwi 0,%0,0\n\ 65 bne- 2f\n\ 66 stwcx. %1,0,%2\n\ 67 bne- 1b\n\ 68 isync\n\ 692:" : "=&r" (tmp) 70 : "r" (token), "r" (&lock->slock) 71 : "cr0", "memory"); 72 73 return tmp; 74} 75 76static int __inline__ __raw_spin_trylock(raw_spinlock_t *lock) 77{ 78 CLEAR_IO_SYNC; 79 return __spin_trylock(lock) == 0; 80} 81 82/* 83 * On a system with shared processors (that is, where a physical 84 * processor is multiplexed between several virtual processors), 85 * there is no point spinning on a lock if the holder of the lock 86 * isn't currently scheduled on a physical processor. Instead 87 * we detect this situation and ask the hypervisor to give the 88 * rest of our timeslice to the lock holder. 89 * 90 * So that we can tell which virtual processor is holding a lock, 91 * we put 0x80000000 | smp_processor_id() in the lock when it is 92 * held. Conveniently, we have a word in the paca that holds this 93 * value. 94 */ 95 96#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) 97/* We only yield to the hypervisor if we are in shared processor mode */ 98#define SHARED_PROCESSOR (get_lppaca()->shared_proc) 99extern void __spin_yield(raw_spinlock_t *lock); 100extern void __rw_yield(raw_rwlock_t *lock); 101#else /* SPLPAR || ISERIES */ 102#define __spin_yield(x) barrier() 103#define __rw_yield(x) barrier() 104#define SHARED_PROCESSOR 0 105#endif 106 107static void __inline__ __raw_spin_lock(raw_spinlock_t *lock) 108{ 109 CLEAR_IO_SYNC; 110 while (1) { 111 if (likely(__spin_trylock(lock) == 0)) 112 break; 113 do { 114 HMT_low(); 115 if (SHARED_PROCESSOR) 116 __spin_yield(lock); 117 } while (unlikely(lock->slock != 0)); 118 HMT_medium(); 119 } 120} 121 122static void __inline__ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) 123{ 124 unsigned long flags_dis; 125 126 CLEAR_IO_SYNC; 127 while (1) { 128 if (likely(__spin_trylock(lock) == 0)) 129 break; 130 local_save_flags(flags_dis); 131 local_irq_restore(flags); 132 do { 133 HMT_low(); 134 if (SHARED_PROCESSOR) 135 __spin_yield(lock); 136 } while (unlikely(lock->slock != 0)); 137 HMT_medium(); 138 local_irq_restore(flags_dis); 139 } 140} 141 142static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock) 143{ 144 SYNC_IO; 145 __asm__ __volatile__("# __raw_spin_unlock\n\t" 146 LWSYNC_ON_SMP: : :"memory"); 147 lock->slock = 0; 148} 149 150#ifdef CONFIG_PPC64 151extern void __raw_spin_unlock_wait(raw_spinlock_t *lock); 152#else 153#define __raw_spin_unlock_wait(lock) \ 154 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) 155#endif 156 157/* 158 * Read-write spinlocks, allowing multiple readers 159 * but only one writer. 160 * 161 * NOTE! it is quite common to have readers in interrupts 162 * but no interrupt writers. For those circumstances we 163 * can "mix" irq-safe locks - any writer needs to get a 164 * irq-safe write-lock, but readers can get non-irqsafe 165 * read-locks. 166 */ 167 168#define __raw_read_can_lock(rw) ((rw)->lock >= 0) 169#define __raw_write_can_lock(rw) (!(rw)->lock) 170 171#ifdef CONFIG_PPC64 172#define __DO_SIGN_EXTEND "extsw %0,%0\n" 173#define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */ 174#else 175#define __DO_SIGN_EXTEND 176#define WRLOCK_TOKEN (-1) 177#endif 178 179/* 180 * This returns the old value in the lock + 1, 181 * so we got a read lock if the return value is > 0. 182 */ 183static long __inline__ __read_trylock(raw_rwlock_t *rw) 184{ 185 long tmp; 186 187 __asm__ __volatile__( 188"1: lwarx %0,0,%1\n" 189 __DO_SIGN_EXTEND 190" addic. %0,%0,1\n\ 191 ble- 2f\n" 192 PPC405_ERR77(0,%1) 193" stwcx. %0,0,%1\n\ 194 bne- 1b\n\ 195 isync\n\ 1962:" : "=&r" (tmp) 197 : "r" (&rw->lock) 198 : "cr0", "xer", "memory"); 199 200 return tmp; 201} 202 203/* 204 * This returns the old value in the lock, 205 * so we got the write lock if the return value is 0. 206 */ 207static __inline__ long __write_trylock(raw_rwlock_t *rw) 208{ 209 long tmp, token; 210 211 token = WRLOCK_TOKEN; 212 __asm__ __volatile__( 213"1: lwarx %0,0,%2\n\ 214 cmpwi 0,%0,0\n\ 215 bne- 2f\n" 216 PPC405_ERR77(0,%1) 217" stwcx. %1,0,%2\n\ 218 bne- 1b\n\ 219 isync\n\ 2202:" : "=&r" (tmp) 221 : "r" (token), "r" (&rw->lock) 222 : "cr0", "memory"); 223 224 return tmp; 225} 226 227static void __inline__ __raw_read_lock(raw_rwlock_t *rw) 228{ 229 while (1) { 230 if (likely(__read_trylock(rw) > 0)) 231 break; 232 do { 233 HMT_low(); 234 if (SHARED_PROCESSOR) 235 __rw_yield(rw); 236 } while (unlikely(rw->lock < 0)); 237 HMT_medium(); 238 } 239} 240 241static void __inline__ __raw_write_lock(raw_rwlock_t *rw) 242{ 243 while (1) { 244 if (likely(__write_trylock(rw) == 0)) 245 break; 246 do { 247 HMT_low(); 248 if (SHARED_PROCESSOR) 249 __rw_yield(rw); 250 } while (unlikely(rw->lock != 0)); 251 HMT_medium(); 252 } 253} 254 255static int __inline__ __raw_read_trylock(raw_rwlock_t *rw) 256{ 257 return __read_trylock(rw) > 0; 258} 259 260static int __inline__ __raw_write_trylock(raw_rwlock_t *rw) 261{ 262 return __write_trylock(rw) == 0; 263} 264 265static void __inline__ __raw_read_unlock(raw_rwlock_t *rw) 266{ 267 long tmp; 268 269 __asm__ __volatile__( 270 "# read_unlock\n\t" 271 LWSYNC_ON_SMP 272"1: lwarx %0,0,%1\n\ 273 addic %0,%0,-1\n" 274 PPC405_ERR77(0,%1) 275" stwcx. %0,0,%1\n\ 276 bne- 1b" 277 : "=&r"(tmp) 278 : "r"(&rw->lock) 279 : "cr0", "memory"); 280} 281 282static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) 283{ 284 __asm__ __volatile__("# write_unlock\n\t" 285 LWSYNC_ON_SMP: : :"memory"); 286 rw->lock = 0; 287} 288 289#define _raw_spin_relax(lock) __spin_yield(lock) 290#define _raw_read_relax(lock) __rw_yield(lock) 291#define _raw_write_relax(lock) __rw_yield(lock) 292 293#endif /* __KERNEL__ */ 294#endif /* __ASM_SPINLOCK_H */