Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

locking/local_lock: Add local nested BH locking infrastructure.

Add local_lock_nested_bh() locking. It is based on local_lock_t and the
naming follows the preempt_disable_nested() example.

For !PREEMPT_RT + !LOCKDEP it is a per-CPU annotation for locking
assumptions based on local_bh_disable(). The macro is optimized away
during compilation.
For !PREEMPT_RT + LOCKDEP the local_lock_nested_bh() is reduced to
the usual lock-acquire plus lockdep_assert_in_softirq() - ensuring that
BH is disabled.

For PREEMPT_RT local_lock_nested_bh() acquires the specified per-CPU
lock. It does not disable CPU migration because it relies on
local_bh_disable() disabling CPU migration.
With LOCKDEP it performans the usual lockdep checks as with !PREEMPT_RT.
Due to include hell the softirq check has been moved spinlock.c.

The intention is to use this locking in places where locking of a per-CPU
variable relies on BH being disabled. Instead of treating disabled
bottom halves as a big per-CPU lock, PREEMPT_RT can use this to reduce
the locking scope to what actually needs protecting.
A side effect is that it also documents the protection scope of the
per-CPU variables.

Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Link: https://patch.msgid.link/20240620132727.660738-3-bigeasy@linutronix.de
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

authored by

Sebastian Andrzej Siewior and committed by
Jakub Kicinski
c5bcab75 07e4fd4c

+52
+10
include/linux/local_lock.h
··· 62 62 local_unlock_irqrestore(_T->lock, _T->flags), 63 63 unsigned long flags) 64 64 65 + #define local_lock_nested_bh(_lock) \ 66 + __local_lock_nested_bh(_lock) 67 + 68 + #define local_unlock_nested_bh(_lock) \ 69 + __local_unlock_nested_bh(_lock) 70 + 71 + DEFINE_GUARD(local_lock_nested_bh, local_lock_t __percpu*, 72 + local_lock_nested_bh(_T), 73 + local_unlock_nested_bh(_T)) 74 + 65 75 #endif
+31
include/linux/local_lock_internal.h
··· 62 62 local_lock_debug_init(lock); \ 63 63 } while (0) 64 64 65 + #define __spinlock_nested_bh_init(lock) \ 66 + do { \ 67 + static struct lock_class_key __key; \ 68 + \ 69 + debug_check_no_locks_freed((void *)lock, sizeof(*lock));\ 70 + lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, \ 71 + 0, LD_WAIT_CONFIG, LD_WAIT_INV, \ 72 + LD_LOCK_NORMAL); \ 73 + local_lock_debug_init(lock); \ 74 + } while (0) 75 + 65 76 #define __local_lock(lock) \ 66 77 do { \ 67 78 preempt_disable(); \ ··· 108 97 local_lock_release(this_cpu_ptr(lock)); \ 109 98 local_irq_restore(flags); \ 110 99 } while (0) 100 + 101 + #define __local_lock_nested_bh(lock) \ 102 + do { \ 103 + lockdep_assert_in_softirq(); \ 104 + local_lock_acquire(this_cpu_ptr(lock)); \ 105 + } while (0) 106 + 107 + #define __local_unlock_nested_bh(lock) \ 108 + local_lock_release(this_cpu_ptr(lock)) 111 109 112 110 #else /* !CONFIG_PREEMPT_RT */ 113 111 ··· 157 137 #define __local_unlock_irq(lock) __local_unlock(lock) 158 138 159 139 #define __local_unlock_irqrestore(lock, flags) __local_unlock(lock) 140 + 141 + #define __local_lock_nested_bh(lock) \ 142 + do { \ 143 + lockdep_assert_in_softirq_func(); \ 144 + spin_lock(this_cpu_ptr(lock)); \ 145 + } while (0) 146 + 147 + #define __local_unlock_nested_bh(lock) \ 148 + do { \ 149 + spin_unlock(this_cpu_ptr((lock))); \ 150 + } while (0) 160 151 161 152 #endif /* CONFIG_PREEMPT_RT */
+3
include/linux/lockdep.h
··· 600 600 (!in_softirq() || in_irq() || in_nmi())); \ 601 601 } while (0) 602 602 603 + extern void lockdep_assert_in_softirq_func(void); 604 + 603 605 #else 604 606 # define might_lock(lock) do { } while (0) 605 607 # define might_lock_read(lock) do { } while (0) ··· 615 613 # define lockdep_assert_preemption_enabled() do { } while (0) 616 614 # define lockdep_assert_preemption_disabled() do { } while (0) 617 615 # define lockdep_assert_in_softirq() do { } while (0) 616 + # define lockdep_assert_in_softirq_func() do { } while (0) 618 617 #endif 619 618 620 619 #ifdef CONFIG_PROVE_RAW_LOCK_NESTING
+8
kernel/locking/spinlock.c
··· 413 413 && addr < (unsigned long)__lock_text_end; 414 414 } 415 415 EXPORT_SYMBOL(in_lock_functions); 416 + 417 + #if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_PREEMPT_RT) 418 + void notrace lockdep_assert_in_softirq_func(void) 419 + { 420 + lockdep_assert_in_softirq(); 421 + } 422 + EXPORT_SYMBOL(lockdep_assert_in_softirq_func); 423 + #endif