Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

locking/qrwlock: Rename functions to queued_*()

To sync up with the naming convention used in qspinlock, all the
qrwlock functions were renamed to started with "queued" instead of
"queue".

Signed-off-by: Waiman Long <Waiman.Long@hp.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Douglas Hatch <doug.hatch@hp.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Scott J Norton <scott.norton@hp.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Link: http://lkml.kernel.org/r/1434729002-57724-2-git-send-email-Waiman.Long@hp.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Waiman Long and committed by
Ingo Molnar
f7d71f20 1c4c7159

+37 -37
+2 -2
arch/x86/include/asm/qrwlock.h
··· 4 4 #include <asm-generic/qrwlock_types.h> 5 5 6 6 #ifndef CONFIG_X86_PPRO_FENCE 7 - #define queue_write_unlock queue_write_unlock 8 - static inline void queue_write_unlock(struct qrwlock *lock) 7 + #define queued_write_unlock queued_write_unlock 8 + static inline void queued_write_unlock(struct qrwlock *lock) 9 9 { 10 10 barrier(); 11 11 ACCESS_ONCE(*(u8 *)&lock->cnts) = 0;
+29 -29
include/asm-generic/qrwlock.h
··· 36 36 /* 37 37 * External function declarations 38 38 */ 39 - extern void queue_read_lock_slowpath(struct qrwlock *lock); 40 - extern void queue_write_lock_slowpath(struct qrwlock *lock); 39 + extern void queued_read_lock_slowpath(struct qrwlock *lock); 40 + extern void queued_write_lock_slowpath(struct qrwlock *lock); 41 41 42 42 /** 43 - * queue_read_can_lock- would read_trylock() succeed? 43 + * queued_read_can_lock- would read_trylock() succeed? 44 44 * @lock: Pointer to queue rwlock structure 45 45 */ 46 - static inline int queue_read_can_lock(struct qrwlock *lock) 46 + static inline int queued_read_can_lock(struct qrwlock *lock) 47 47 { 48 48 return !(atomic_read(&lock->cnts) & _QW_WMASK); 49 49 } 50 50 51 51 /** 52 - * queue_write_can_lock- would write_trylock() succeed? 52 + * queued_write_can_lock- would write_trylock() succeed? 53 53 * @lock: Pointer to queue rwlock structure 54 54 */ 55 - static inline int queue_write_can_lock(struct qrwlock *lock) 55 + static inline int queued_write_can_lock(struct qrwlock *lock) 56 56 { 57 57 return !atomic_read(&lock->cnts); 58 58 } 59 59 60 60 /** 61 - * queue_read_trylock - try to acquire read lock of a queue rwlock 61 + * queued_read_trylock - try to acquire read lock of a queue rwlock 62 62 * @lock : Pointer to queue rwlock structure 63 63 * Return: 1 if lock acquired, 0 if failed 64 64 */ 65 - static inline int queue_read_trylock(struct qrwlock *lock) 65 + static inline int queued_read_trylock(struct qrwlock *lock) 66 66 { 67 67 u32 cnts; 68 68 ··· 77 77 } 78 78 79 79 /** 80 - * queue_write_trylock - try to acquire write lock of a queue rwlock 80 + * queued_write_trylock - try to acquire write lock of a queue rwlock 81 81 * @lock : Pointer to queue rwlock structure 82 82 * Return: 1 if lock acquired, 0 if failed 83 83 */ 84 - static inline int queue_write_trylock(struct qrwlock *lock) 84 + static inline int queued_write_trylock(struct qrwlock *lock) 85 85 { 86 86 u32 cnts; 87 87 ··· 93 93 cnts, cnts | _QW_LOCKED) == cnts); 94 94 } 95 95 /** 96 - * queue_read_lock - acquire read lock of a queue rwlock 96 + * queued_read_lock - acquire read lock of a queue rwlock 97 97 * @lock: Pointer to queue rwlock structure 98 98 */ 99 - static inline void queue_read_lock(struct qrwlock *lock) 99 + static inline void queued_read_lock(struct qrwlock *lock) 100 100 { 101 101 u32 cnts; 102 102 ··· 105 105 return; 106 106 107 107 /* The slowpath will decrement the reader count, if necessary. */ 108 - queue_read_lock_slowpath(lock); 108 + queued_read_lock_slowpath(lock); 109 109 } 110 110 111 111 /** 112 - * queue_write_lock - acquire write lock of a queue rwlock 112 + * queued_write_lock - acquire write lock of a queue rwlock 113 113 * @lock : Pointer to queue rwlock structure 114 114 */ 115 - static inline void queue_write_lock(struct qrwlock *lock) 115 + static inline void queued_write_lock(struct qrwlock *lock) 116 116 { 117 117 /* Optimize for the unfair lock case where the fair flag is 0. */ 118 118 if (atomic_cmpxchg(&lock->cnts, 0, _QW_LOCKED) == 0) 119 119 return; 120 120 121 - queue_write_lock_slowpath(lock); 121 + queued_write_lock_slowpath(lock); 122 122 } 123 123 124 124 /** 125 - * queue_read_unlock - release read lock of a queue rwlock 125 + * queued_read_unlock - release read lock of a queue rwlock 126 126 * @lock : Pointer to queue rwlock structure 127 127 */ 128 - static inline void queue_read_unlock(struct qrwlock *lock) 128 + static inline void queued_read_unlock(struct qrwlock *lock) 129 129 { 130 130 /* 131 131 * Atomically decrement the reader count ··· 134 134 atomic_sub(_QR_BIAS, &lock->cnts); 135 135 } 136 136 137 - #ifndef queue_write_unlock 137 + #ifndef queued_write_unlock 138 138 /** 139 - * queue_write_unlock - release write lock of a queue rwlock 139 + * queued_write_unlock - release write lock of a queue rwlock 140 140 * @lock : Pointer to queue rwlock structure 141 141 */ 142 - static inline void queue_write_unlock(struct qrwlock *lock) 142 + static inline void queued_write_unlock(struct qrwlock *lock) 143 143 { 144 144 /* 145 145 * If the writer field is atomic, it can be cleared directly. ··· 154 154 * Remapping rwlock architecture specific functions to the corresponding 155 155 * queue rwlock functions. 156 156 */ 157 - #define arch_read_can_lock(l) queue_read_can_lock(l) 158 - #define arch_write_can_lock(l) queue_write_can_lock(l) 159 - #define arch_read_lock(l) queue_read_lock(l) 160 - #define arch_write_lock(l) queue_write_lock(l) 161 - #define arch_read_trylock(l) queue_read_trylock(l) 162 - #define arch_write_trylock(l) queue_write_trylock(l) 163 - #define arch_read_unlock(l) queue_read_unlock(l) 164 - #define arch_write_unlock(l) queue_write_unlock(l) 157 + #define arch_read_can_lock(l) queued_read_can_lock(l) 158 + #define arch_write_can_lock(l) queued_write_can_lock(l) 159 + #define arch_read_lock(l) queued_read_lock(l) 160 + #define arch_write_lock(l) queued_write_lock(l) 161 + #define arch_read_trylock(l) queued_read_trylock(l) 162 + #define arch_write_trylock(l) queued_write_trylock(l) 163 + #define arch_read_unlock(l) queued_read_unlock(l) 164 + #define arch_write_unlock(l) queued_write_unlock(l) 165 165 166 166 #endif /* __ASM_GENERIC_QRWLOCK_H */
+6 -6
kernel/locking/qrwlock.c
··· 60 60 } 61 61 62 62 /** 63 - * queue_read_lock_slowpath - acquire read lock of a queue rwlock 63 + * queued_read_lock_slowpath - acquire read lock of a queue rwlock 64 64 * @lock: Pointer to queue rwlock structure 65 65 */ 66 - void queue_read_lock_slowpath(struct qrwlock *lock) 66 + void queued_read_lock_slowpath(struct qrwlock *lock) 67 67 { 68 68 u32 cnts; 69 69 ··· 104 104 */ 105 105 arch_spin_unlock(&lock->lock); 106 106 } 107 - EXPORT_SYMBOL(queue_read_lock_slowpath); 107 + EXPORT_SYMBOL(queued_read_lock_slowpath); 108 108 109 109 /** 110 - * queue_write_lock_slowpath - acquire write lock of a queue rwlock 110 + * queued_write_lock_slowpath - acquire write lock of a queue rwlock 111 111 * @lock : Pointer to queue rwlock structure 112 112 */ 113 - void queue_write_lock_slowpath(struct qrwlock *lock) 113 + void queued_write_lock_slowpath(struct qrwlock *lock) 114 114 { 115 115 u32 cnts; 116 116 ··· 149 149 unlock: 150 150 arch_spin_unlock(&lock->lock); 151 151 } 152 - EXPORT_SYMBOL(queue_write_lock_slowpath); 152 + EXPORT_SYMBOL(queued_write_lock_slowpath);