Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.27-rc2 369 lines 9.1 kB view raw
1#ifndef _X86_SPINLOCK_H_ 2#define _X86_SPINLOCK_H_ 3 4#include <asm/atomic.h> 5#include <asm/rwlock.h> 6#include <asm/page.h> 7#include <asm/processor.h> 8#include <linux/compiler.h> 9#include <asm/paravirt.h> 10/* 11 * Your basic SMP spinlocks, allowing only a single CPU anywhere 12 * 13 * Simple spin lock operations. There are two variants, one clears IRQ's 14 * on the local processor, one does not. 15 * 16 * These are fair FIFO ticket locks, which are currently limited to 256 17 * CPUs. 18 * 19 * (the type definitions are in asm/spinlock_types.h) 20 */ 21 22#ifdef CONFIG_X86_32 23# define LOCK_PTR_REG "a" 24#else 25# define LOCK_PTR_REG "D" 26#endif 27 28#if defined(CONFIG_X86_32) && \ 29 (defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)) 30/* 31 * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock 32 * (PPro errata 66, 92) 33 */ 34# define UNLOCK_LOCK_PREFIX LOCK_PREFIX 35#else 36# define UNLOCK_LOCK_PREFIX 37#endif 38 39/* 40 * Ticket locks are conceptually two parts, one indicating the current head of 41 * the queue, and the other indicating the current tail. The lock is acquired 42 * by atomically noting the tail and incrementing it by one (thus adding 43 * ourself to the queue and noting our position), then waiting until the head 44 * becomes equal to the the initial value of the tail. 45 * 46 * We use an xadd covering *both* parts of the lock, to increment the tail and 47 * also load the position of the head, which takes care of memory ordering 48 * issues and should be optimal for the uncontended case. Note the tail must be 49 * in the high part, because a wide xadd increment of the low part would carry 50 * up and contaminate the high part. 51 * 52 * With fewer than 2^8 possible CPUs, we can use x86's partial registers to 53 * save some instructions and make the code more elegant. There really isn't 54 * much between them in performance though, especially as locks are out of line. 55 */ 56#if (NR_CPUS < 256) 57static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) 58{ 59 int tmp = ACCESS_ONCE(lock->slock); 60 61 return (((tmp >> 8) & 0xff) != (tmp & 0xff)); 62} 63 64static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) 65{ 66 int tmp = ACCESS_ONCE(lock->slock); 67 68 return (((tmp >> 8) & 0xff) - (tmp & 0xff)) > 1; 69} 70 71static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) 72{ 73 short inc = 0x0100; 74 75 asm volatile ( 76 LOCK_PREFIX "xaddw %w0, %1\n" 77 "1:\t" 78 "cmpb %h0, %b0\n\t" 79 "je 2f\n\t" 80 "rep ; nop\n\t" 81 "movb %1, %b0\n\t" 82 /* don't need lfence here, because loads are in-order */ 83 "jmp 1b\n" 84 "2:" 85 : "+Q" (inc), "+m" (lock->slock) 86 : 87 : "memory", "cc"); 88} 89 90static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) 91{ 92 int tmp; 93 short new; 94 95 asm volatile("movw %2,%w0\n\t" 96 "cmpb %h0,%b0\n\t" 97 "jne 1f\n\t" 98 "movw %w0,%w1\n\t" 99 "incb %h1\n\t" 100 "lock ; cmpxchgw %w1,%2\n\t" 101 "1:" 102 "sete %b1\n\t" 103 "movzbl %b1,%0\n\t" 104 : "=&a" (tmp), "=Q" (new), "+m" (lock->slock) 105 : 106 : "memory", "cc"); 107 108 return tmp; 109} 110 111static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) 112{ 113 asm volatile(UNLOCK_LOCK_PREFIX "incb %0" 114 : "+m" (lock->slock) 115 : 116 : "memory", "cc"); 117} 118#else 119static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) 120{ 121 int tmp = ACCESS_ONCE(lock->slock); 122 123 return (((tmp >> 16) & 0xffff) != (tmp & 0xffff)); 124} 125 126static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) 127{ 128 int tmp = ACCESS_ONCE(lock->slock); 129 130 return (((tmp >> 16) & 0xffff) - (tmp & 0xffff)) > 1; 131} 132 133static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) 134{ 135 int inc = 0x00010000; 136 int tmp; 137 138 asm volatile("lock ; xaddl %0, %1\n" 139 "movzwl %w0, %2\n\t" 140 "shrl $16, %0\n\t" 141 "1:\t" 142 "cmpl %0, %2\n\t" 143 "je 2f\n\t" 144 "rep ; nop\n\t" 145 "movzwl %1, %2\n\t" 146 /* don't need lfence here, because loads are in-order */ 147 "jmp 1b\n" 148 "2:" 149 : "+Q" (inc), "+m" (lock->slock), "=r" (tmp) 150 : 151 : "memory", "cc"); 152} 153 154static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) 155{ 156 int tmp; 157 int new; 158 159 asm volatile("movl %2,%0\n\t" 160 "movl %0,%1\n\t" 161 "roll $16, %0\n\t" 162 "cmpl %0,%1\n\t" 163 "jne 1f\n\t" 164 "addl $0x00010000, %1\n\t" 165 "lock ; cmpxchgl %1,%2\n\t" 166 "1:" 167 "sete %b1\n\t" 168 "movzbl %b1,%0\n\t" 169 : "=&a" (tmp), "=r" (new), "+m" (lock->slock) 170 : 171 : "memory", "cc"); 172 173 return tmp; 174} 175 176static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) 177{ 178 asm volatile(UNLOCK_LOCK_PREFIX "incw %0" 179 : "+m" (lock->slock) 180 : 181 : "memory", "cc"); 182} 183#endif 184 185#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 186 187#ifdef CONFIG_PARAVIRT 188/* 189 * Define virtualization-friendly old-style lock byte lock, for use in 190 * pv_lock_ops if desired. 191 * 192 * This differs from the pre-2.6.24 spinlock by always using xchgb 193 * rather than decb to take the lock; this allows it to use a 194 * zero-initialized lock structure. It also maintains a 1-byte 195 * contention counter, so that we can implement 196 * __byte_spin_is_contended. 197 */ 198struct __byte_spinlock { 199 s8 lock; 200 s8 spinners; 201}; 202 203static inline int __byte_spin_is_locked(raw_spinlock_t *lock) 204{ 205 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock; 206 return bl->lock != 0; 207} 208 209static inline int __byte_spin_is_contended(raw_spinlock_t *lock) 210{ 211 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock; 212 return bl->spinners != 0; 213} 214 215static inline void __byte_spin_lock(raw_spinlock_t *lock) 216{ 217 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock; 218 s8 val = 1; 219 220 asm("1: xchgb %1, %0\n" 221 " test %1,%1\n" 222 " jz 3f\n" 223 " " LOCK_PREFIX "incb %2\n" 224 "2: rep;nop\n" 225 " cmpb $1, %0\n" 226 " je 2b\n" 227 " " LOCK_PREFIX "decb %2\n" 228 " jmp 1b\n" 229 "3:" 230 : "+m" (bl->lock), "+q" (val), "+m" (bl->spinners): : "memory"); 231} 232 233static inline int __byte_spin_trylock(raw_spinlock_t *lock) 234{ 235 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock; 236 u8 old = 1; 237 238 asm("xchgb %1,%0" 239 : "+m" (bl->lock), "+q" (old) : : "memory"); 240 241 return old == 0; 242} 243 244static inline void __byte_spin_unlock(raw_spinlock_t *lock) 245{ 246 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock; 247 smp_wmb(); 248 bl->lock = 0; 249} 250#else /* !CONFIG_PARAVIRT */ 251static inline int __raw_spin_is_locked(raw_spinlock_t *lock) 252{ 253 return __ticket_spin_is_locked(lock); 254} 255 256static inline int __raw_spin_is_contended(raw_spinlock_t *lock) 257{ 258 return __ticket_spin_is_contended(lock); 259} 260 261static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) 262{ 263 __ticket_spin_lock(lock); 264} 265 266static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) 267{ 268 return __ticket_spin_trylock(lock); 269} 270 271static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) 272{ 273 __ticket_spin_unlock(lock); 274} 275#endif /* CONFIG_PARAVIRT */ 276 277static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) 278{ 279 while (__raw_spin_is_locked(lock)) 280 cpu_relax(); 281} 282 283/* 284 * Read-write spinlocks, allowing multiple readers 285 * but only one writer. 286 * 287 * NOTE! it is quite common to have readers in interrupts 288 * but no interrupt writers. For those circumstances we 289 * can "mix" irq-safe locks - any writer needs to get a 290 * irq-safe write-lock, but readers can get non-irqsafe 291 * read-locks. 292 * 293 * On x86, we implement read-write locks as a 32-bit counter 294 * with the high bit (sign) being the "contended" bit. 295 */ 296 297/** 298 * read_can_lock - would read_trylock() succeed? 299 * @lock: the rwlock in question. 300 */ 301static inline int __raw_read_can_lock(raw_rwlock_t *lock) 302{ 303 return (int)(lock)->lock > 0; 304} 305 306/** 307 * write_can_lock - would write_trylock() succeed? 308 * @lock: the rwlock in question. 309 */ 310static inline int __raw_write_can_lock(raw_rwlock_t *lock) 311{ 312 return (lock)->lock == RW_LOCK_BIAS; 313} 314 315static inline void __raw_read_lock(raw_rwlock_t *rw) 316{ 317 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" 318 "jns 1f\n" 319 "call __read_lock_failed\n\t" 320 "1:\n" 321 ::LOCK_PTR_REG (rw) : "memory"); 322} 323 324static inline void __raw_write_lock(raw_rwlock_t *rw) 325{ 326 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t" 327 "jz 1f\n" 328 "call __write_lock_failed\n\t" 329 "1:\n" 330 ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory"); 331} 332 333static inline int __raw_read_trylock(raw_rwlock_t *lock) 334{ 335 atomic_t *count = (atomic_t *)lock; 336 337 atomic_dec(count); 338 if (atomic_read(count) >= 0) 339 return 1; 340 atomic_inc(count); 341 return 0; 342} 343 344static inline int __raw_write_trylock(raw_rwlock_t *lock) 345{ 346 atomic_t *count = (atomic_t *)lock; 347 348 if (atomic_sub_and_test(RW_LOCK_BIAS, count)) 349 return 1; 350 atomic_add(RW_LOCK_BIAS, count); 351 return 0; 352} 353 354static inline void __raw_read_unlock(raw_rwlock_t *rw) 355{ 356 asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); 357} 358 359static inline void __raw_write_unlock(raw_rwlock_t *rw) 360{ 361 asm volatile(LOCK_PREFIX "addl %1, %0" 362 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory"); 363} 364 365#define _raw_spin_relax(lock) cpu_relax() 366#define _raw_read_relax(lock) cpu_relax() 367#define _raw_write_relax(lock) cpu_relax() 368 369#endif