Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.14 134 lines 3.1 kB view raw
1#ifndef __ASM_SPINLOCK_H 2#define __ASM_SPINLOCK_H 3 4#include <asm/atomic.h> 5#include <asm/rwlock.h> 6#include <asm/page.h> 7#include <linux/config.h> 8 9/* 10 * Your basic SMP spinlocks, allowing only a single CPU anywhere 11 * 12 * Simple spin lock operations. There are two variants, one clears IRQ's 13 * on the local processor, one does not. 14 * 15 * We make no fairness assumptions. They have a cost. 16 * 17 * (the type definitions are in asm/spinlock_types.h) 18 */ 19 20#define __raw_spin_is_locked(x) \ 21 (*(volatile signed char *)(&(x)->slock) <= 0) 22 23#define __raw_spin_lock_string \ 24 "\n1:\t" \ 25 "lock ; decb %0\n\t" \ 26 "js 2f\n" \ 27 LOCK_SECTION_START("") \ 28 "2:\t" \ 29 "rep;nop\n\t" \ 30 "cmpb $0,%0\n\t" \ 31 "jle 2b\n\t" \ 32 "jmp 1b\n" \ 33 LOCK_SECTION_END 34 35#define __raw_spin_unlock_string \ 36 "movb $1,%0" \ 37 :"=m" (lock->slock) : : "memory" 38 39static inline void __raw_spin_lock(raw_spinlock_t *lock) 40{ 41 __asm__ __volatile__( 42 __raw_spin_lock_string 43 :"=m" (lock->slock) : : "memory"); 44} 45 46#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 47 48static inline int __raw_spin_trylock(raw_spinlock_t *lock) 49{ 50 char oldval; 51 52 __asm__ __volatile__( 53 "xchgb %b0,%1" 54 :"=q" (oldval), "=m" (lock->slock) 55 :"0" (0) : "memory"); 56 57 return oldval > 0; 58} 59 60static inline void __raw_spin_unlock(raw_spinlock_t *lock) 61{ 62 __asm__ __volatile__( 63 __raw_spin_unlock_string 64 ); 65} 66 67#define __raw_spin_unlock_wait(lock) \ 68 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) 69 70/* 71 * Read-write spinlocks, allowing multiple readers 72 * but only one writer. 73 * 74 * NOTE! it is quite common to have readers in interrupts 75 * but no interrupt writers. For those circumstances we 76 * can "mix" irq-safe locks - any writer needs to get a 77 * irq-safe write-lock, but readers can get non-irqsafe 78 * read-locks. 79 * 80 * On x86, we implement read-write locks as a 32-bit counter 81 * with the high bit (sign) being the "contended" bit. 82 * 83 * The inline assembly is non-obvious. Think about it. 84 * 85 * Changed to use the same technique as rw semaphores. See 86 * semaphore.h for details. -ben 87 * 88 * the helpers are in arch/i386/kernel/semaphore.c 89 */ 90 91#define __raw_read_can_lock(x) ((int)(x)->lock > 0) 92#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) 93 94static inline void __raw_read_lock(raw_rwlock_t *rw) 95{ 96 __build_read_lock(rw, "__read_lock_failed"); 97} 98 99static inline void __raw_write_lock(raw_rwlock_t *rw) 100{ 101 __build_write_lock(rw, "__write_lock_failed"); 102} 103 104static inline int __raw_read_trylock(raw_rwlock_t *lock) 105{ 106 atomic_t *count = (atomic_t *)lock; 107 atomic_dec(count); 108 if (atomic_read(count) >= 0) 109 return 1; 110 atomic_inc(count); 111 return 0; 112} 113 114static inline int __raw_write_trylock(raw_rwlock_t *lock) 115{ 116 atomic_t *count = (atomic_t *)lock; 117 if (atomic_sub_and_test(RW_LOCK_BIAS, count)) 118 return 1; 119 atomic_add(RW_LOCK_BIAS, count); 120 return 0; 121} 122 123static inline void __raw_read_unlock(raw_rwlock_t *rw) 124{ 125 asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory"); 126} 127 128static inline void __raw_write_unlock(raw_rwlock_t *rw) 129{ 130 asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0" 131 : "=m" (rw->lock) : : "memory"); 132} 133 134#endif /* __ASM_SPINLOCK_H */