Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

locking/rwlocks: introduce write_lock_nested

In preparation for converting bit_spin_lock to rwlock in zsmalloc so
that multiple writers of zspages can run at the same time but those
zspages are supposed to be different zspage instance. Thus, it's not
deadlock. This patch adds write_lock_nested to support the case for
LOCKDEP.

[minchan@kernel.org: fix write_lock_nested for RT]
Link: https://lkml.kernel.org/r/YZfrMTAXV56HFWJY@google.com
[bigeasy@linutronix.de: fixup write_lock_nested() implementation]
Link: https://lkml.kernel.org/r/20211123170134.y6xb7pmpgdn4m3bn@linutronix.de

Link: https://lkml.kernel.org/r/20211115185909.3949505-8-minchan@kernel.org
Signed-off-by: Minchan Kim <minchan@kernel.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Mike Galbraith <umgwanakikbuti@gmail.com>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Naresh Kamboju <naresh.kamboju@linaro.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Minchan Kim and committed by
Linus Torvalds
4a57d6bb c4549b87

+47
+6
include/linux/rwlock.h
··· 55 55 #define write_lock(lock) _raw_write_lock(lock) 56 56 #define read_lock(lock) _raw_read_lock(lock) 57 57 58 + #ifdef CONFIG_DEBUG_LOCK_ALLOC 59 + #define write_lock_nested(lock, subclass) _raw_write_lock_nested(lock, subclass) 60 + #else 61 + #define write_lock_nested(lock, subclass) _raw_write_lock(lock) 62 + #endif 63 + 58 64 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) 59 65 60 66 #define read_lock_irqsave(lock, flags) \
+8
include/linux/rwlock_api_smp.h
··· 17 17 18 18 void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock); 19 19 void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock); 20 + void __lockfunc _raw_write_lock_nested(rwlock_t *lock, int subclass) __acquires(lock); 20 21 void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock); 21 22 void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock); 22 23 void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock); ··· 207 206 { 208 207 preempt_disable(); 209 208 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); 209 + LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); 210 + } 211 + 212 + static inline void __raw_write_lock_nested(rwlock_t *lock, int subclass) 213 + { 214 + preempt_disable(); 215 + rwlock_acquire(&lock->dep_map, subclass, 0, _RET_IP_); 210 216 LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); 211 217 } 212 218
+10
include/linux/rwlock_rt.h
··· 28 28 extern int rt_read_trylock(rwlock_t *rwlock); 29 29 extern void rt_read_unlock(rwlock_t *rwlock); 30 30 extern void rt_write_lock(rwlock_t *rwlock); 31 + extern void rt_write_lock_nested(rwlock_t *rwlock, int subclass); 31 32 extern int rt_write_trylock(rwlock_t *rwlock); 32 33 extern void rt_write_unlock(rwlock_t *rwlock); 33 34 ··· 83 82 { 84 83 rt_write_lock(rwlock); 85 84 } 85 + 86 + #ifdef CONFIG_DEBUG_LOCK_ALLOC 87 + static __always_inline void write_lock_nested(rwlock_t *rwlock, int subclass) 88 + { 89 + rt_write_lock_nested(rwlock, subclass); 90 + } 91 + #else 92 + #define write_lock_nested(lock, subclass) rt_write_lock(((void)(subclass), (lock))) 93 + #endif 86 94 87 95 static __always_inline void write_lock_bh(rwlock_t *rwlock) 88 96 {
+1
include/linux/spinlock_api_up.h
··· 59 59 #define _raw_spin_lock_nested(lock, subclass) __LOCK(lock) 60 60 #define _raw_read_lock(lock) __LOCK(lock) 61 61 #define _raw_write_lock(lock) __LOCK(lock) 62 + #define _raw_write_lock_nested(lock, subclass) __LOCK(lock) 62 63 #define _raw_spin_lock_bh(lock) __LOCK_BH(lock) 63 64 #define _raw_read_lock_bh(lock) __LOCK_BH(lock) 64 65 #define _raw_write_lock_bh(lock) __LOCK_BH(lock)
+10
kernel/locking/spinlock.c
··· 300 300 __raw_write_lock(lock); 301 301 } 302 302 EXPORT_SYMBOL(_raw_write_lock); 303 + 304 + #ifndef CONFIG_DEBUG_LOCK_ALLOC 305 + #define __raw_write_lock_nested(lock, subclass) __raw_write_lock(((void)(subclass), (lock))) 306 + #endif 307 + 308 + void __lockfunc _raw_write_lock_nested(rwlock_t *lock, int subclass) 309 + { 310 + __raw_write_lock_nested(lock, subclass); 311 + } 312 + EXPORT_SYMBOL(_raw_write_lock_nested); 303 313 #endif 304 314 305 315 #ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
+12
kernel/locking/spinlock_rt.c
··· 239 239 } 240 240 EXPORT_SYMBOL(rt_write_lock); 241 241 242 + #ifdef CONFIG_DEBUG_LOCK_ALLOC 243 + void __sched rt_write_lock_nested(rwlock_t *rwlock, int subclass) 244 + { 245 + rtlock_might_resched(); 246 + rwlock_acquire(&rwlock->dep_map, subclass, 0, _RET_IP_); 247 + rwbase_write_lock(&rwlock->rwbase, TASK_RTLOCK_WAIT); 248 + rcu_read_lock(); 249 + migrate_disable(); 250 + } 251 + EXPORT_SYMBOL(rt_write_lock_nested); 252 + #endif 253 + 242 254 void __sched rt_read_unlock(rwlock_t *rwlock) 243 255 { 244 256 rwlock_release(&rwlock->dep_map, _RET_IP_);