at v4.13 129 lines 3.0 kB view raw
1#ifndef _ASM_GENERIC_RWSEM_H 2#define _ASM_GENERIC_RWSEM_H 3 4#ifndef _LINUX_RWSEM_H 5#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead." 6#endif 7 8#ifdef __KERNEL__ 9 10/* 11 * R/W semaphores originally for PPC using the stuff in lib/rwsem.c. 12 * Adapted largely from include/asm-i386/rwsem.h 13 * by Paul Mackerras <paulus@samba.org>. 14 */ 15 16/* 17 * the semaphore definition 18 */ 19#ifdef CONFIG_64BIT 20# define RWSEM_ACTIVE_MASK 0xffffffffL 21#else 22# define RWSEM_ACTIVE_MASK 0x0000ffffL 23#endif 24 25#define RWSEM_UNLOCKED_VALUE 0x00000000L 26#define RWSEM_ACTIVE_BIAS 0x00000001L 27#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1) 28#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS 29#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) 30 31/* 32 * lock for reading 33 */ 34static inline void __down_read(struct rw_semaphore *sem) 35{ 36 if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) 37 rwsem_down_read_failed(sem); 38} 39 40static inline int __down_read_trylock(struct rw_semaphore *sem) 41{ 42 long tmp; 43 44 while ((tmp = atomic_long_read(&sem->count)) >= 0) { 45 if (tmp == atomic_long_cmpxchg_acquire(&sem->count, tmp, 46 tmp + RWSEM_ACTIVE_READ_BIAS)) { 47 return 1; 48 } 49 } 50 return 0; 51} 52 53/* 54 * lock for writing 55 */ 56static inline void __down_write(struct rw_semaphore *sem) 57{ 58 long tmp; 59 60 tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS, 61 &sem->count); 62 if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS)) 63 rwsem_down_write_failed(sem); 64} 65 66static inline int __down_write_killable(struct rw_semaphore *sem) 67{ 68 long tmp; 69 70 tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS, 71 &sem->count); 72 if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS)) 73 if (IS_ERR(rwsem_down_write_failed_killable(sem))) 74 return -EINTR; 75 return 0; 76} 77 78static inline int __down_write_trylock(struct rw_semaphore *sem) 79{ 80 long tmp; 81 82 tmp = atomic_long_cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE, 83 RWSEM_ACTIVE_WRITE_BIAS); 84 return tmp == RWSEM_UNLOCKED_VALUE; 85} 86 87/* 88 * unlock after reading 89 */ 90static inline void __up_read(struct rw_semaphore *sem) 91{ 92 long tmp; 93 94 tmp = atomic_long_dec_return_release(&sem->count); 95 if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)) 96 rwsem_wake(sem); 97} 98 99/* 100 * unlock after writing 101 */ 102static inline void __up_write(struct rw_semaphore *sem) 103{ 104 if (unlikely(atomic_long_sub_return_release(RWSEM_ACTIVE_WRITE_BIAS, 105 &sem->count) < 0)) 106 rwsem_wake(sem); 107} 108 109/* 110 * downgrade write lock to read lock 111 */ 112static inline void __downgrade_write(struct rw_semaphore *sem) 113{ 114 long tmp; 115 116 /* 117 * When downgrading from exclusive to shared ownership, 118 * anything inside the write-locked region cannot leak 119 * into the read side. In contrast, anything in the 120 * read-locked region is ok to be re-ordered into the 121 * write side. As such, rely on RELEASE semantics. 122 */ 123 tmp = atomic_long_add_return_release(-RWSEM_WAITING_BIAS, &sem->count); 124 if (tmp < 0) 125 rwsem_downgrade_wake(sem); 126} 127 128#endif /* __KERNEL__ */ 129#endif /* _ASM_GENERIC_RWSEM_H */