Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

parisc: Rework arch_rw locking functions

Clean up the arch read/write locking functions based on the arc
implemenation. This improves readability of those functions.

Signed-off-by: Helge Deller <deller@gmx.de>

+72 -87
+61 -84
arch/parisc/include/asm/spinlock.h
··· 67 67 68 68 /* 69 69 * Read-write spinlocks, allowing multiple readers but only one writer. 70 - * Linux rwlocks are unfair to writers; they can be starved for an indefinite 71 - * time by readers. With care, they can also be taken in interrupt context. 70 + * Unfair locking as Writers could be starved indefinitely by Reader(s) 72 71 * 73 - * In the PA-RISC implementation, we have a spinlock and a counter. 74 - * Readers use the lock to serialise their access to the counter (which 75 - * records how many readers currently hold the lock). 76 - * Writers hold the spinlock, preventing any readers or other writers from 77 - * grabbing the rwlock. 72 + * The spinlock itself is contained in @counter and access to it is 73 + * serialized with @lock_mutex. 78 74 */ 79 75 80 - /* Note that we have to ensure interrupts are disabled in case we're 81 - * interrupted by some other code that wants to grab the same read lock */ 82 - static __inline__ void arch_read_lock(arch_rwlock_t *rw) 76 + /* 1 - lock taken successfully */ 77 + static inline int arch_read_trylock(arch_rwlock_t *rw) 83 78 { 79 + int ret = 0; 84 80 unsigned long flags; 85 - local_irq_save(flags); 86 - arch_spin_lock_flags(&rw->lock, flags); 87 - rw->counter++; 88 - arch_spin_unlock(&rw->lock); 89 - local_irq_restore(flags); 90 - } 91 81 92 - /* Note that we have to ensure interrupts are disabled in case we're 93 - * interrupted by some other code that wants to grab the same read lock */ 94 - static __inline__ void arch_read_unlock(arch_rwlock_t *rw) 95 - { 96 - unsigned long flags; 97 82 local_irq_save(flags); 98 - arch_spin_lock_flags(&rw->lock, flags); 99 - rw->counter--; 100 - arch_spin_unlock(&rw->lock); 101 - local_irq_restore(flags); 102 - } 83 + arch_spin_lock(&(rw->lock_mutex)); 103 84 104 - /* Note that we have to ensure interrupts are disabled in case we're 105 - * interrupted by some other code that wants to grab the same read lock */ 106 - static __inline__ int arch_read_trylock(arch_rwlock_t *rw) 107 - { 108 - unsigned long flags; 109 - retry: 110 - local_irq_save(flags); 111 - if (arch_spin_trylock(&rw->lock)) { 112 - rw->counter++; 113 - arch_spin_unlock(&rw->lock); 114 - local_irq_restore(flags); 115 - return 1; 85 + /* 86 + * zero means writer holds the lock exclusively, deny Reader. 87 + * Otherwise grant lock to first/subseq reader 88 + */ 89 + if (rw->counter > 0) { 90 + rw->counter--; 91 + ret = 1; 116 92 } 117 93 94 + arch_spin_unlock(&(rw->lock_mutex)); 118 95 local_irq_restore(flags); 119 - /* If write-locked, we fail to acquire the lock */ 120 - if (rw->counter < 0) 121 - return 0; 122 96 123 - /* Wait until we have a realistic chance at the lock */ 124 - while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0) 97 + return ret; 98 + } 99 + 100 + /* 1 - lock taken successfully */ 101 + static inline int arch_write_trylock(arch_rwlock_t *rw) 102 + { 103 + int ret = 0; 104 + unsigned long flags; 105 + 106 + local_irq_save(flags); 107 + arch_spin_lock(&(rw->lock_mutex)); 108 + 109 + /* 110 + * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__), 111 + * deny writer. Otherwise if unlocked grant to writer 112 + * Hence the claim that Linux rwlocks are unfair to writers. 113 + * (can be starved for an indefinite time by readers). 114 + */ 115 + if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) { 116 + rw->counter = 0; 117 + ret = 1; 118 + } 119 + arch_spin_unlock(&(rw->lock_mutex)); 120 + local_irq_restore(flags); 121 + 122 + return ret; 123 + } 124 + 125 + static inline void arch_read_lock(arch_rwlock_t *rw) 126 + { 127 + while (!arch_read_trylock(rw)) 125 128 cpu_relax(); 126 - 127 - goto retry; 128 129 } 129 130 130 - /* Note that we have to ensure interrupts are disabled in case we're 131 - * interrupted by some other code that wants to read_trylock() this lock */ 132 - static __inline__ void arch_write_lock(arch_rwlock_t *rw) 131 + static inline void arch_write_lock(arch_rwlock_t *rw) 132 + { 133 + while (!arch_write_trylock(rw)) 134 + cpu_relax(); 135 + } 136 + 137 + static inline void arch_read_unlock(arch_rwlock_t *rw) 133 138 { 134 139 unsigned long flags; 135 - retry: 140 + 136 141 local_irq_save(flags); 137 - arch_spin_lock_flags(&rw->lock, flags); 138 - 139 - if (rw->counter != 0) { 140 - arch_spin_unlock(&rw->lock); 141 - local_irq_restore(flags); 142 - 143 - while (rw->counter != 0) 144 - cpu_relax(); 145 - 146 - goto retry; 147 - } 148 - 149 - rw->counter = -1; /* mark as write-locked */ 150 - mb(); 142 + arch_spin_lock(&(rw->lock_mutex)); 143 + rw->counter++; 144 + arch_spin_unlock(&(rw->lock_mutex)); 151 145 local_irq_restore(flags); 152 146 } 153 147 154 - static __inline__ void arch_write_unlock(arch_rwlock_t *rw) 155 - { 156 - rw->counter = 0; 157 - arch_spin_unlock(&rw->lock); 158 - } 159 - 160 - /* Note that we have to ensure interrupts are disabled in case we're 161 - * interrupted by some other code that wants to read_trylock() this lock */ 162 - static __inline__ int arch_write_trylock(arch_rwlock_t *rw) 148 + static inline void arch_write_unlock(arch_rwlock_t *rw) 163 149 { 164 150 unsigned long flags; 165 - int result = 0; 166 151 167 152 local_irq_save(flags); 168 - if (arch_spin_trylock(&rw->lock)) { 169 - if (rw->counter == 0) { 170 - rw->counter = -1; 171 - result = 1; 172 - } else { 173 - /* Read-locked. Oh well. */ 174 - arch_spin_unlock(&rw->lock); 175 - } 176 - } 153 + arch_spin_lock(&(rw->lock_mutex)); 154 + rw->counter = __ARCH_RW_LOCK_UNLOCKED__; 155 + arch_spin_unlock(&(rw->lock_mutex)); 177 156 local_irq_restore(flags); 178 - 179 - return result; 180 157 } 181 158 182 159 #endif /* __ASM_SPINLOCK_H */
+11 -3
arch/parisc/include/asm/spinlock_types.h
··· 12 12 #endif 13 13 } arch_spinlock_t; 14 14 15 + 16 + /* counter: 17 + * Unlocked : 0x0100_0000 18 + * Read lock(s) : 0x00FF_FFFF to 0x01 (Multiple Readers decrement it) 19 + * Write lock : 0x0, but only if prior value is "unlocked" 0x0100_0000 20 + */ 15 21 typedef struct { 16 - arch_spinlock_t lock; 17 - volatile int counter; 22 + arch_spinlock_t lock_mutex; 23 + volatile unsigned int counter; 18 24 } arch_rwlock_t; 19 25 20 - #define __ARCH_RW_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED, 0 } 26 + #define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000 27 + #define __ARCH_RW_LOCK_UNLOCKED { .lock_mutex = __ARCH_SPIN_LOCK_UNLOCKED, \ 28 + .counter = __ARCH_RW_LOCK_UNLOCKED__ } 21 29 22 30 #endif