Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.31 173 lines 4.1 kB view raw
1#ifndef _ASM_POWERPC_RWSEM_H 2#define _ASM_POWERPC_RWSEM_H 3 4#ifndef _LINUX_RWSEM_H 5#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead." 6#endif 7 8#ifdef __KERNEL__ 9 10/* 11 * R/W semaphores for PPC using the stuff in lib/rwsem.c. 12 * Adapted largely from include/asm-i386/rwsem.h 13 * by Paul Mackerras <paulus@samba.org>. 14 */ 15 16#include <linux/list.h> 17#include <linux/spinlock.h> 18#include <asm/atomic.h> 19#include <asm/system.h> 20 21/* 22 * the semaphore definition 23 */ 24struct rw_semaphore { 25 /* XXX this should be able to be an atomic_t -- paulus */ 26 signed int count; 27#define RWSEM_UNLOCKED_VALUE 0x00000000 28#define RWSEM_ACTIVE_BIAS 0x00000001 29#define RWSEM_ACTIVE_MASK 0x0000ffff 30#define RWSEM_WAITING_BIAS (-0x00010000) 31#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS 32#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) 33 spinlock_t wait_lock; 34 struct list_head wait_list; 35#ifdef CONFIG_DEBUG_LOCK_ALLOC 36 struct lockdep_map dep_map; 37#endif 38}; 39 40#ifdef CONFIG_DEBUG_LOCK_ALLOC 41# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } 42#else 43# define __RWSEM_DEP_MAP_INIT(lockname) 44#endif 45 46#define __RWSEM_INITIALIZER(name) \ 47 { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \ 48 LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) } 49 50#define DECLARE_RWSEM(name) \ 51 struct rw_semaphore name = __RWSEM_INITIALIZER(name) 52 53extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); 54extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); 55extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem); 56extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); 57 58extern void __init_rwsem(struct rw_semaphore *sem, const char *name, 59 struct lock_class_key *key); 60 61#define init_rwsem(sem) \ 62 do { \ 63 static struct lock_class_key __key; \ 64 \ 65 __init_rwsem((sem), #sem, &__key); \ 66 } while (0) 67 68/* 69 * lock for reading 70 */ 71static inline void __down_read(struct rw_semaphore *sem) 72{ 73 if (unlikely(atomic_inc_return((atomic_t *)(&sem->count)) <= 0)) 74 rwsem_down_read_failed(sem); 75} 76 77static inline int __down_read_trylock(struct rw_semaphore *sem) 78{ 79 int tmp; 80 81 while ((tmp = sem->count) >= 0) { 82 if (tmp == cmpxchg(&sem->count, tmp, 83 tmp + RWSEM_ACTIVE_READ_BIAS)) { 84 return 1; 85 } 86 } 87 return 0; 88} 89 90/* 91 * lock for writing 92 */ 93static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) 94{ 95 int tmp; 96 97 tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS, 98 (atomic_t *)(&sem->count)); 99 if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS)) 100 rwsem_down_write_failed(sem); 101} 102 103static inline void __down_write(struct rw_semaphore *sem) 104{ 105 __down_write_nested(sem, 0); 106} 107 108static inline int __down_write_trylock(struct rw_semaphore *sem) 109{ 110 int tmp; 111 112 tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, 113 RWSEM_ACTIVE_WRITE_BIAS); 114 return tmp == RWSEM_UNLOCKED_VALUE; 115} 116 117/* 118 * unlock after reading 119 */ 120static inline void __up_read(struct rw_semaphore *sem) 121{ 122 int tmp; 123 124 tmp = atomic_dec_return((atomic_t *)(&sem->count)); 125 if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)) 126 rwsem_wake(sem); 127} 128 129/* 130 * unlock after writing 131 */ 132static inline void __up_write(struct rw_semaphore *sem) 133{ 134 if (unlikely(atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS, 135 (atomic_t *)(&sem->count)) < 0)) 136 rwsem_wake(sem); 137} 138 139/* 140 * implement atomic add functionality 141 */ 142static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) 143{ 144 atomic_add(delta, (atomic_t *)(&sem->count)); 145} 146 147/* 148 * downgrade write lock to read lock 149 */ 150static inline void __downgrade_write(struct rw_semaphore *sem) 151{ 152 int tmp; 153 154 tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count)); 155 if (tmp < 0) 156 rwsem_downgrade_wake(sem); 157} 158 159/* 160 * implement exchange and add functionality 161 */ 162static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) 163{ 164 return atomic_add_return(delta, (atomic_t *)(&sem->count)); 165} 166 167static inline int rwsem_is_locked(struct rw_semaphore *sem) 168{ 169 return (sem->count != 0); 170} 171 172#endif /* __KERNEL__ */ 173#endif /* _ASM_POWERPC_RWSEM_H */