Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.20 184 lines 4.1 kB view raw
1/* 2 * include/asm-ppc/rwsem.h: R/W semaphores for SH using the stuff 3 * in lib/rwsem.c. 4 */ 5 6#ifndef _ASM_SH_RWSEM_H 7#define _ASM_SH_RWSEM_H 8 9#ifdef __KERNEL__ 10#include <linux/list.h> 11#include <linux/spinlock.h> 12#include <asm/atomic.h> 13#include <asm/system.h> 14 15/* 16 * the semaphore definition 17 */ 18struct rw_semaphore { 19 long count; 20#define RWSEM_UNLOCKED_VALUE 0x00000000 21#define RWSEM_ACTIVE_BIAS 0x00000001 22#define RWSEM_ACTIVE_MASK 0x0000ffff 23#define RWSEM_WAITING_BIAS (-0x00010000) 24#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS 25#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) 26 spinlock_t wait_lock; 27 struct list_head wait_list; 28#ifdef CONFIG_DEBUG_LOCK_ALLOC 29 struct lockdep_map dep_map; 30#endif 31}; 32 33#ifdef CONFIG_DEBUG_LOCK_ALLOC 34# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } 35#else 36# define __RWSEM_DEP_MAP_INIT(lockname) 37#endif 38 39#define __RWSEM_INITIALIZER(name) \ 40 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ 41 LIST_HEAD_INIT((name).wait_list) \ 42 __RWSEM_DEP_MAP_INIT(name) } 43 44#define DECLARE_RWSEM(name) \ 45 struct rw_semaphore name = __RWSEM_INITIALIZER(name) 46 47extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); 48extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); 49extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem); 50extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); 51 52extern void __init_rwsem(struct rw_semaphore *sem, const char *name, 53 struct lock_class_key *key); 54 55#define init_rwsem(sem) \ 56do { \ 57 static struct lock_class_key __key; \ 58 \ 59 __init_rwsem((sem), #sem, &__key); \ 60} while (0) 61 62static inline void init_rwsem(struct rw_semaphore *sem) 63{ 64 sem->count = RWSEM_UNLOCKED_VALUE; 65 spin_lock_init(&sem->wait_lock); 66 INIT_LIST_HEAD(&sem->wait_list); 67} 68 69/* 70 * lock for reading 71 */ 72static inline void __down_read(struct rw_semaphore *sem) 73{ 74 if (atomic_inc_return((atomic_t *)(&sem->count)) > 0) 75 smp_wmb(); 76 else 77 rwsem_down_read_failed(sem); 78} 79 80static inline int __down_read_trylock(struct rw_semaphore *sem) 81{ 82 int tmp; 83 84 while ((tmp = sem->count) >= 0) { 85 if (tmp == cmpxchg(&sem->count, tmp, 86 tmp + RWSEM_ACTIVE_READ_BIAS)) { 87 smp_wmb(); 88 return 1; 89 } 90 } 91 return 0; 92} 93 94/* 95 * lock for writing 96 */ 97static inline void __down_write(struct rw_semaphore *sem) 98{ 99 int tmp; 100 101 tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS, 102 (atomic_t *)(&sem->count)); 103 if (tmp == RWSEM_ACTIVE_WRITE_BIAS) 104 smp_wmb(); 105 else 106 rwsem_down_write_failed(sem); 107} 108 109static inline int __down_write_trylock(struct rw_semaphore *sem) 110{ 111 int tmp; 112 113 tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, 114 RWSEM_ACTIVE_WRITE_BIAS); 115 smp_wmb(); 116 return tmp == RWSEM_UNLOCKED_VALUE; 117} 118 119/* 120 * unlock after reading 121 */ 122static inline void __up_read(struct rw_semaphore *sem) 123{ 124 int tmp; 125 126 smp_wmb(); 127 tmp = atomic_dec_return((atomic_t *)(&sem->count)); 128 if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0) 129 rwsem_wake(sem); 130} 131 132/* 133 * unlock after writing 134 */ 135static inline void __up_write(struct rw_semaphore *sem) 136{ 137 smp_wmb(); 138 if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS, 139 (atomic_t *)(&sem->count)) < 0) 140 rwsem_wake(sem); 141} 142 143/* 144 * implement atomic add functionality 145 */ 146static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) 147{ 148 atomic_add(delta, (atomic_t *)(&sem->count)); 149} 150 151/* 152 * downgrade write lock to read lock 153 */ 154static inline void __downgrade_write(struct rw_semaphore *sem) 155{ 156 int tmp; 157 158 smp_wmb(); 159 tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count)); 160 if (tmp < 0) 161 rwsem_downgrade_wake(sem); 162} 163 164static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) 165{ 166 __down_write(sem); 167} 168 169/* 170 * implement exchange and add functionality 171 */ 172static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) 173{ 174 smp_mb(); 175 return atomic_add_return(delta, (atomic_t *)(&sem->count)); 176} 177 178static inline int rwsem_is_locked(struct rw_semaphore *sem) 179{ 180 return (sem->count != 0); 181} 182 183#endif /* __KERNEL__ */ 184#endif /* _ASM_SH_RWSEM_H */