Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

locking, rwsem: Annotate inner lock as raw

There is no reason to allow the lock protecting rwsems (the
ownerless variant) to be preemptible on -rt. Convert it to raw.

In mainline this change documents the low level nature of
the lock - otherwise there's no functional difference. Lockdep
and Sparse checking will work as usual.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

authored by

Thomas Gleixner and committed by
Ingo Molnar
ddb6c9b5 8292c9e1

+33 -31
+1 -1
include/linux/rwsem-spinlock.h
··· 22 22 */ 23 23 struct rw_semaphore { 24 24 __s32 activity; 25 - spinlock_t wait_lock; 25 + raw_spinlock_t wait_lock; 26 26 struct list_head wait_list; 27 27 #ifdef CONFIG_DEBUG_LOCK_ALLOC 28 28 struct lockdep_map dep_map;
+6 -4
include/linux/rwsem.h
··· 25 25 /* All arch specific implementations share the same struct */ 26 26 struct rw_semaphore { 27 27 long count; 28 - spinlock_t wait_lock; 28 + raw_spinlock_t wait_lock; 29 29 struct list_head wait_list; 30 30 #ifdef CONFIG_DEBUG_LOCK_ALLOC 31 31 struct lockdep_map dep_map; ··· 56 56 # define __RWSEM_DEP_MAP_INIT(lockname) 57 57 #endif 58 58 59 - #define __RWSEM_INITIALIZER(name) \ 60 - { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED(name.wait_lock), \ 61 - LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) } 59 + #define __RWSEM_INITIALIZER(name) \ 60 + { RWSEM_UNLOCKED_VALUE, \ 61 + __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ 62 + LIST_HEAD_INIT((name).wait_list) \ 63 + __RWSEM_DEP_MAP_INIT(name) } 62 64 63 65 #define DECLARE_RWSEM(name) \ 64 66 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
+19 -19
lib/rwsem-spinlock.c
··· 22 22 int ret = 1; 23 23 unsigned long flags; 24 24 25 - if (spin_trylock_irqsave(&sem->wait_lock, flags)) { 25 + if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) { 26 26 ret = (sem->activity != 0); 27 - spin_unlock_irqrestore(&sem->wait_lock, flags); 27 + raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 28 28 } 29 29 return ret; 30 30 } ··· 44 44 lockdep_init_map(&sem->dep_map, name, key, 0); 45 45 #endif 46 46 sem->activity = 0; 47 - spin_lock_init(&sem->wait_lock); 47 + raw_spin_lock_init(&sem->wait_lock); 48 48 INIT_LIST_HEAD(&sem->wait_list); 49 49 } 50 50 EXPORT_SYMBOL(__init_rwsem); ··· 145 145 struct task_struct *tsk; 146 146 unsigned long flags; 147 147 148 - spin_lock_irqsave(&sem->wait_lock, flags); 148 + raw_spin_lock_irqsave(&sem->wait_lock, flags); 149 149 150 150 if (sem->activity >= 0 && list_empty(&sem->wait_list)) { 151 151 /* granted */ 152 152 sem->activity++; 153 - spin_unlock_irqrestore(&sem->wait_lock, flags); 153 + raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 154 154 goto out; 155 155 } 156 156 ··· 165 165 list_add_tail(&waiter.list, &sem->wait_list); 166 166 167 167 /* we don't need to touch the semaphore struct anymore */ 168 - spin_unlock_irqrestore(&sem->wait_lock, flags); 168 + raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 169 169 170 170 /* wait to be given the lock */ 171 171 for (;;) { ··· 189 189 int ret = 0; 190 190 191 191 192 - spin_lock_irqsave(&sem->wait_lock, flags); 192 + raw_spin_lock_irqsave(&sem->wait_lock, flags); 193 193 194 194 if (sem->activity >= 0 && list_empty(&sem->wait_list)) { 195 195 /* granted */ ··· 197 197 ret = 1; 198 198 } 199 199 200 - spin_unlock_irqrestore(&sem->wait_lock, flags); 200 + raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 201 201 202 202 return ret; 203 203 } ··· 212 212 struct task_struct *tsk; 213 213 unsigned long flags; 214 214 215 - spin_lock_irqsave(&sem->wait_lock, flags); 215 + raw_spin_lock_irqsave(&sem->wait_lock, flags); 216 216 217 217 if (sem->activity == 0 && list_empty(&sem->wait_list)) { 218 218 /* granted */ 219 219 sem->activity = -1; 220 - spin_unlock_irqrestore(&sem->wait_lock, flags); 220 + raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 221 221 goto out; 222 222 } 223 223 ··· 232 232 list_add_tail(&waiter.list, &sem->wait_list); 233 233 234 234 /* we don't need to touch the semaphore struct anymore */ 235 - spin_unlock_irqrestore(&sem->wait_lock, flags); 235 + raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 236 236 237 237 /* wait to be given the lock */ 238 238 for (;;) { ··· 260 260 unsigned long flags; 261 261 int ret = 0; 262 262 263 - spin_lock_irqsave(&sem->wait_lock, flags); 263 + raw_spin_lock_irqsave(&sem->wait_lock, flags); 264 264 265 265 if (sem->activity == 0 && list_empty(&sem->wait_list)) { 266 266 /* granted */ ··· 268 268 ret = 1; 269 269 } 270 270 271 - spin_unlock_irqrestore(&sem->wait_lock, flags); 271 + raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 272 272 273 273 return ret; 274 274 } ··· 280 280 { 281 281 unsigned long flags; 282 282 283 - spin_lock_irqsave(&sem->wait_lock, flags); 283 + raw_spin_lock_irqsave(&sem->wait_lock, flags); 284 284 285 285 if (--sem->activity == 0 && !list_empty(&sem->wait_list)) 286 286 sem = __rwsem_wake_one_writer(sem); 287 287 288 - spin_unlock_irqrestore(&sem->wait_lock, flags); 288 + raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 289 289 } 290 290 291 291 /* ··· 295 295 { 296 296 unsigned long flags; 297 297 298 - spin_lock_irqsave(&sem->wait_lock, flags); 298 + raw_spin_lock_irqsave(&sem->wait_lock, flags); 299 299 300 300 sem->activity = 0; 301 301 if (!list_empty(&sem->wait_list)) 302 302 sem = __rwsem_do_wake(sem, 1); 303 303 304 - spin_unlock_irqrestore(&sem->wait_lock, flags); 304 + raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 305 305 } 306 306 307 307 /* ··· 312 312 { 313 313 unsigned long flags; 314 314 315 - spin_lock_irqsave(&sem->wait_lock, flags); 315 + raw_spin_lock_irqsave(&sem->wait_lock, flags); 316 316 317 317 sem->activity = 1; 318 318 if (!list_empty(&sem->wait_list)) 319 319 sem = __rwsem_do_wake(sem, 0); 320 320 321 - spin_unlock_irqrestore(&sem->wait_lock, flags); 321 + raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 322 322 } 323 323
+7 -7
lib/rwsem.c
··· 22 22 lockdep_init_map(&sem->dep_map, name, key, 0); 23 23 #endif 24 24 sem->count = RWSEM_UNLOCKED_VALUE; 25 - spin_lock_init(&sem->wait_lock); 25 + raw_spin_lock_init(&sem->wait_lock); 26 26 INIT_LIST_HEAD(&sem->wait_list); 27 27 } 28 28 ··· 180 180 set_task_state(tsk, TASK_UNINTERRUPTIBLE); 181 181 182 182 /* set up my own style of waitqueue */ 183 - spin_lock_irq(&sem->wait_lock); 183 + raw_spin_lock_irq(&sem->wait_lock); 184 184 waiter.task = tsk; 185 185 waiter.flags = flags; 186 186 get_task_struct(tsk); ··· 204 204 adjustment == -RWSEM_ACTIVE_WRITE_BIAS) 205 205 sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED); 206 206 207 - spin_unlock_irq(&sem->wait_lock); 207 + raw_spin_unlock_irq(&sem->wait_lock); 208 208 209 209 /* wait to be given the lock */ 210 210 for (;;) { ··· 245 245 { 246 246 unsigned long flags; 247 247 248 - spin_lock_irqsave(&sem->wait_lock, flags); 248 + raw_spin_lock_irqsave(&sem->wait_lock, flags); 249 249 250 250 /* do nothing if list empty */ 251 251 if (!list_empty(&sem->wait_list)) 252 252 sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY); 253 253 254 - spin_unlock_irqrestore(&sem->wait_lock, flags); 254 + raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 255 255 256 256 return sem; 257 257 } ··· 265 265 { 266 266 unsigned long flags; 267 267 268 - spin_lock_irqsave(&sem->wait_lock, flags); 268 + raw_spin_lock_irqsave(&sem->wait_lock, flags); 269 269 270 270 /* do nothing if list empty */ 271 271 if (!list_empty(&sem->wait_list)) 272 272 sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED); 273 273 274 - spin_unlock_irqrestore(&sem->wait_lock, flags); 274 + raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 275 275 276 276 return sem; 277 277 }