Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.15 168 lines 3.8 kB view raw
1#ifndef _ASM_POWERPC_RWSEM_H 2#define _ASM_POWERPC_RWSEM_H 3 4#ifdef __KERNEL__ 5 6/* 7 * include/asm-ppc64/rwsem.h: R/W semaphores for PPC using the stuff 8 * in lib/rwsem.c. Adapted largely from include/asm-i386/rwsem.h 9 * by Paul Mackerras <paulus@samba.org>. 10 */ 11 12#include <linux/list.h> 13#include <linux/spinlock.h> 14#include <asm/atomic.h> 15#include <asm/system.h> 16 17/* 18 * the semaphore definition 19 */ 20struct rw_semaphore { 21 /* XXX this should be able to be an atomic_t -- paulus */ 22 signed int count; 23#define RWSEM_UNLOCKED_VALUE 0x00000000 24#define RWSEM_ACTIVE_BIAS 0x00000001 25#define RWSEM_ACTIVE_MASK 0x0000ffff 26#define RWSEM_WAITING_BIAS (-0x00010000) 27#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS 28#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) 29 spinlock_t wait_lock; 30 struct list_head wait_list; 31#if RWSEM_DEBUG 32 int debug; 33#endif 34}; 35 36/* 37 * initialisation 38 */ 39#if RWSEM_DEBUG 40#define __RWSEM_DEBUG_INIT , 0 41#else 42#define __RWSEM_DEBUG_INIT /* */ 43#endif 44 45#define __RWSEM_INITIALIZER(name) \ 46 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ 47 LIST_HEAD_INIT((name).wait_list) \ 48 __RWSEM_DEBUG_INIT } 49 50#define DECLARE_RWSEM(name) \ 51 struct rw_semaphore name = __RWSEM_INITIALIZER(name) 52 53extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); 54extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); 55extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem); 56extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); 57 58static inline void init_rwsem(struct rw_semaphore *sem) 59{ 60 sem->count = RWSEM_UNLOCKED_VALUE; 61 spin_lock_init(&sem->wait_lock); 62 INIT_LIST_HEAD(&sem->wait_list); 63#if RWSEM_DEBUG 64 sem->debug = 0; 65#endif 66} 67 68/* 69 * lock for reading 70 */ 71static inline void __down_read(struct rw_semaphore *sem) 72{ 73 if (unlikely(atomic_inc_return((atomic_t *)(&sem->count)) <= 0)) 74 rwsem_down_read_failed(sem); 75} 76 77static inline int __down_read_trylock(struct rw_semaphore *sem) 78{ 79 int tmp; 80 81 while ((tmp = sem->count) >= 0) { 82 if (tmp == cmpxchg(&sem->count, tmp, 83 tmp + RWSEM_ACTIVE_READ_BIAS)) { 84 return 1; 85 } 86 } 87 return 0; 88} 89 90/* 91 * lock for writing 92 */ 93static inline void __down_write(struct rw_semaphore *sem) 94{ 95 int tmp; 96 97 tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS, 98 (atomic_t *)(&sem->count)); 99 if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS)) 100 rwsem_down_write_failed(sem); 101} 102 103static inline int __down_write_trylock(struct rw_semaphore *sem) 104{ 105 int tmp; 106 107 tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, 108 RWSEM_ACTIVE_WRITE_BIAS); 109 return tmp == RWSEM_UNLOCKED_VALUE; 110} 111 112/* 113 * unlock after reading 114 */ 115static inline void __up_read(struct rw_semaphore *sem) 116{ 117 int tmp; 118 119 tmp = atomic_dec_return((atomic_t *)(&sem->count)); 120 if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)) 121 rwsem_wake(sem); 122} 123 124/* 125 * unlock after writing 126 */ 127static inline void __up_write(struct rw_semaphore *sem) 128{ 129 if (unlikely(atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS, 130 (atomic_t *)(&sem->count)) < 0)) 131 rwsem_wake(sem); 132} 133 134/* 135 * implement atomic add functionality 136 */ 137static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) 138{ 139 atomic_add(delta, (atomic_t *)(&sem->count)); 140} 141 142/* 143 * downgrade write lock to read lock 144 */ 145static inline void __downgrade_write(struct rw_semaphore *sem) 146{ 147 int tmp; 148 149 tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count)); 150 if (tmp < 0) 151 rwsem_downgrade_wake(sem); 152} 153 154/* 155 * implement exchange and add functionality 156 */ 157static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) 158{ 159 return atomic_add_return(delta, (atomic_t *)(&sem->count)); 160} 161 162static inline int rwsem_is_locked(struct rw_semaphore *sem) 163{ 164 return (sem->count != 0); 165} 166 167#endif /* __KERNEL__ */ 168#endif /* _ASM_POWERPC_RWSEM_H */