Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

lockdep: Change hardirq{s_enabled,_context} to per-cpu variables

Currently all IRQ-tracking state is in task_struct, this means that
task_struct needs to be defined before we use it.

Especially for lockdep_assert_irq*() this can lead to header-hell.

Move the hardirq state into per-cpu variables to avoid the task_struct
dependency.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Link: https://lkml.kernel.org/r/20200623083721.512673481@infradead.org

+52 -43
+12 -7
include/linux/irqflags.h
··· 14 14 15 15 #include <linux/typecheck.h> 16 16 #include <asm/irqflags.h> 17 + #include <asm/percpu.h> 17 18 18 19 /* Currently lockdep_softirqs_on/off is used only by lockdep */ 19 20 #ifdef CONFIG_PROVE_LOCKING ··· 32 31 #endif 33 32 34 33 #ifdef CONFIG_TRACE_IRQFLAGS 34 + 35 + DECLARE_PER_CPU(int, hardirqs_enabled); 36 + DECLARE_PER_CPU(int, hardirq_context); 37 + 35 38 extern void trace_hardirqs_on_prepare(void); 36 39 extern void trace_hardirqs_off_finish(void); 37 40 extern void trace_hardirqs_on(void); 38 41 extern void trace_hardirqs_off(void); 39 - # define lockdep_hardirq_context(p) ((p)->hardirq_context) 42 + # define lockdep_hardirq_context(p) (this_cpu_read(hardirq_context)) 40 43 # define lockdep_softirq_context(p) ((p)->softirq_context) 41 - # define lockdep_hardirqs_enabled(p) ((p)->hardirqs_enabled) 44 + # define lockdep_hardirqs_enabled(p) (this_cpu_read(hardirqs_enabled)) 42 45 # define lockdep_softirqs_enabled(p) ((p)->softirqs_enabled) 43 - # define lockdep_hardirq_enter() \ 44 - do { \ 45 - if (!current->hardirq_context++) \ 46 - current->hardirq_threaded = 0; \ 46 + # define lockdep_hardirq_enter() \ 47 + do { \ 48 + if (this_cpu_inc_return(hardirq_context) == 1) \ 49 + current->hardirq_threaded = 0; \ 47 50 } while (0) 48 51 # define lockdep_hardirq_threaded() \ 49 52 do { \ ··· 55 50 } while (0) 56 51 # define lockdep_hardirq_exit() \ 57 52 do { \ 58 - current->hardirq_context--; \ 53 + this_cpu_dec(hardirq_context); \ 59 54 } while (0) 60 55 # define lockdep_softirq_enter() \ 61 56 do { \
+18 -16
include/linux/lockdep.h
··· 11 11 #define __LINUX_LOCKDEP_H 12 12 13 13 #include <linux/lockdep_types.h> 14 + #include <asm/percpu.h> 14 15 15 16 struct task_struct; 16 17 ··· 530 529 lock_release(&(lock)->dep_map, _THIS_IP_); \ 531 530 } while (0) 532 531 533 - #define lockdep_assert_irqs_enabled() do { \ 534 - WARN_ONCE(debug_locks && !current->lockdep_recursion && \ 535 - !current->hardirqs_enabled, \ 536 - "IRQs not enabled as expected\n"); \ 537 - } while (0) 532 + DECLARE_PER_CPU(int, hardirqs_enabled); 533 + DECLARE_PER_CPU(int, hardirq_context); 538 534 539 - #define lockdep_assert_irqs_disabled() do { \ 540 - WARN_ONCE(debug_locks && !current->lockdep_recursion && \ 541 - current->hardirqs_enabled, \ 542 - "IRQs not disabled as expected\n"); \ 543 - } while (0) 535 + #define lockdep_assert_irqs_enabled() \ 536 + do { \ 537 + WARN_ON_ONCE(debug_locks && !this_cpu_read(hardirqs_enabled)); \ 538 + } while (0) 544 539 545 - #define lockdep_assert_in_irq() do { \ 546 - WARN_ONCE(debug_locks && !current->lockdep_recursion && \ 547 - !current->hardirq_context, \ 548 - "Not in hardirq as expected\n"); \ 549 - } while (0) 540 + #define lockdep_assert_irqs_disabled() \ 541 + do { \ 542 + WARN_ON_ONCE(debug_locks && this_cpu_read(hardirqs_enabled)); \ 543 + } while (0) 544 + 545 + #define lockdep_assert_in_irq() \ 546 + do { \ 547 + WARN_ON_ONCE(debug_locks && !this_cpu_read(hardirq_context)); \ 548 + } while (0) 550 549 551 550 #else 552 551 # define might_lock(lock) do { } while (0) 553 552 # define might_lock_read(lock) do { } while (0) 554 553 # define might_lock_nested(lock, subclass) do { } while (0) 554 + 555 555 # define lockdep_assert_irqs_enabled() do { } while (0) 556 556 # define lockdep_assert_irqs_disabled() do { } while (0) 557 557 # define lockdep_assert_in_irq() do { } while (0) ··· 562 560 563 561 # define lockdep_assert_RT_in_threaded_ctx() do { \ 564 562 WARN_ONCE(debug_locks && !current->lockdep_recursion && \ 565 - current->hardirq_context && \ 563 + lockdep_hardirq_context(current) && \ 566 564 !(current->hardirq_threaded || current->irq_config), \ 567 565 "Not in threaded context on PREEMPT_RT as expected\n"); \ 568 566 } while (0)
-2
include/linux/sched.h
··· 990 990 unsigned long hardirq_disable_ip; 991 991 unsigned int hardirq_enable_event; 992 992 unsigned int hardirq_disable_event; 993 - int hardirqs_enabled; 994 - int hardirq_context; 995 993 u64 hardirq_chain_key; 996 994 unsigned long softirq_disable_ip; 997 995 unsigned long softirq_enable_ip;
+1 -3
kernel/fork.c
··· 1954 1954 1955 1955 rt_mutex_init_task(p); 1956 1956 1957 + lockdep_assert_irqs_enabled(); 1957 1958 #ifdef CONFIG_PROVE_LOCKING 1958 - DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); 1959 1959 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); 1960 1960 #endif 1961 1961 retval = -EAGAIN; ··· 2036 2036 #endif 2037 2037 #ifdef CONFIG_TRACE_IRQFLAGS 2038 2038 p->irq_events = 0; 2039 - p->hardirqs_enabled = 0; 2040 2039 p->hardirq_enable_ip = 0; 2041 2040 p->hardirq_enable_event = 0; 2042 2041 p->hardirq_disable_ip = _THIS_IP_; ··· 2045 2046 p->softirq_enable_event = 0; 2046 2047 p->softirq_disable_ip = 0; 2047 2048 p->softirq_disable_event = 0; 2048 - p->hardirq_context = 0; 2049 2049 p->softirq_context = 0; 2050 2050 #endif 2051 2051
+15 -15
kernel/locking/lockdep.c
··· 2062 2062 pr_warn("-----------------------------------------------------\n"); 2063 2063 pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n", 2064 2064 curr->comm, task_pid_nr(curr), 2065 - curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT, 2065 + lockdep_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT, 2066 2066 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT, 2067 - curr->hardirqs_enabled, 2067 + lockdep_hardirqs_enabled(curr), 2068 2068 curr->softirqs_enabled); 2069 2069 print_lock(next); 2070 2070 ··· 3658 3658 if (unlikely(current->lockdep_recursion & LOCKDEP_RECURSION_MASK)) 3659 3659 return; 3660 3660 3661 - if (unlikely(current->hardirqs_enabled)) { 3661 + if (unlikely(lockdep_hardirqs_enabled(current))) { 3662 3662 /* 3663 3663 * Neither irq nor preemption are disabled here 3664 3664 * so this is racy by nature but losing one hit ··· 3686 3686 * Can't allow enabling interrupts while in an interrupt handler, 3687 3687 * that's general bad form and such. Recursion, limited stack etc.. 3688 3688 */ 3689 - if (DEBUG_LOCKS_WARN_ON(current->hardirq_context)) 3689 + if (DEBUG_LOCKS_WARN_ON(lockdep_hardirq_context(current))) 3690 3690 return; 3691 3691 3692 3692 current->hardirq_chain_key = current->curr_chain_key; ··· 3724 3724 if (unlikely(current->lockdep_recursion & LOCKDEP_RECURSION_MASK)) 3725 3725 return; 3726 3726 3727 - if (curr->hardirqs_enabled) { 3727 + if (lockdep_hardirqs_enabled(curr)) { 3728 3728 /* 3729 3729 * Neither irq nor preemption are disabled here 3730 3730 * so this is racy by nature but losing one hit ··· 3751 3751 3752 3752 skip_checks: 3753 3753 /* we'll do an OFF -> ON transition: */ 3754 - curr->hardirqs_enabled = 1; 3754 + this_cpu_write(hardirqs_enabled, 1); 3755 3755 curr->hardirq_enable_ip = ip; 3756 3756 curr->hardirq_enable_event = ++curr->irq_events; 3757 3757 debug_atomic_inc(hardirqs_on_events); ··· 3783 3783 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 3784 3784 return; 3785 3785 3786 - if (curr->hardirqs_enabled) { 3786 + if (lockdep_hardirqs_enabled(curr)) { 3787 3787 /* 3788 3788 * We have done an ON -> OFF transition: 3789 3789 */ 3790 - curr->hardirqs_enabled = 0; 3790 + this_cpu_write(hardirqs_enabled, 0); 3791 3791 curr->hardirq_disable_ip = ip; 3792 3792 curr->hardirq_disable_event = ++curr->irq_events; 3793 3793 debug_atomic_inc(hardirqs_off_events); ··· 3832 3832 * usage bit for all held locks, if hardirqs are 3833 3833 * enabled too: 3834 3834 */ 3835 - if (curr->hardirqs_enabled) 3835 + if (lockdep_hardirqs_enabled(curr)) 3836 3836 mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ); 3837 3837 lockdep_recursion_finish(); 3838 3838 } ··· 3881 3881 */ 3882 3882 if (!hlock->trylock) { 3883 3883 if (hlock->read) { 3884 - if (curr->hardirq_context) 3884 + if (lockdep_hardirq_context(curr)) 3885 3885 if (!mark_lock(curr, hlock, 3886 3886 LOCK_USED_IN_HARDIRQ_READ)) 3887 3887 return 0; ··· 3890 3890 LOCK_USED_IN_SOFTIRQ_READ)) 3891 3891 return 0; 3892 3892 } else { 3893 - if (curr->hardirq_context) 3893 + if (lockdep_hardirq_context(curr)) 3894 3894 if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ)) 3895 3895 return 0; 3896 3896 if (curr->softirq_context) ··· 3928 3928 3929 3929 static inline unsigned int task_irq_context(struct task_struct *task) 3930 3930 { 3931 - return LOCK_CHAIN_HARDIRQ_CONTEXT * !!task->hardirq_context + 3931 + return LOCK_CHAIN_HARDIRQ_CONTEXT * !!lockdep_hardirq_context(task) + 3932 3932 LOCK_CHAIN_SOFTIRQ_CONTEXT * !!task->softirq_context; 3933 3933 } 3934 3934 ··· 4021 4021 * Set appropriate wait type for the context; for IRQs we have to take 4022 4022 * into account force_irqthread as that is implied by PREEMPT_RT. 4023 4023 */ 4024 - if (curr->hardirq_context) { 4024 + if (lockdep_hardirq_context(curr)) { 4025 4025 /* 4026 4026 * Check if force_irqthreads will run us threaded. 4027 4027 */ ··· 4864 4864 return; 4865 4865 4866 4866 if (irqs_disabled_flags(flags)) { 4867 - if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) { 4867 + if (DEBUG_LOCKS_WARN_ON(lockdep_hardirqs_enabled(current))) { 4868 4868 printk("possible reason: unannotated irqs-off.\n"); 4869 4869 } 4870 4870 } else { 4871 - if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) { 4871 + if (DEBUG_LOCKS_WARN_ON(!lockdep_hardirqs_enabled(current))) { 4872 4872 printk("possible reason: unannotated irqs-on.\n"); 4873 4873 } 4874 4874 }
+6
kernel/softirq.c
··· 107 107 * where hardirqs are disabled legitimately: 108 108 */ 109 109 #ifdef CONFIG_TRACE_IRQFLAGS 110 + 111 + DEFINE_PER_CPU(int, hardirqs_enabled); 112 + DEFINE_PER_CPU(int, hardirq_context); 113 + EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled); 114 + EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context); 115 + 110 116 void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) 111 117 { 112 118 unsigned long flags;