locking/lockdep: Improve 'invalid wait context' splat

The 'invalid wait context' splat doesn't print all the information
required to reconstruct / validate the error, specifically the
irq-context state is missing.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Peter Zijlstra and committed by
Ingo Molnar
9a019db0 a13f58a0

+31 -20
+31 -20
kernel/locking/lockdep.c
··· 3952 3952 return ret; 3953 3953 } 3954 3954 3955 + static inline short task_wait_context(struct task_struct *curr) 3956 + { 3957 + /* 3958 + * Set appropriate wait type for the context; for IRQs we have to take 3959 + * into account force_irqthread as that is implied by PREEMPT_RT. 3960 + */ 3961 + if (curr->hardirq_context) { 3962 + /* 3963 + * Check if force_irqthreads will run us threaded. 3964 + */ 3965 + if (curr->hardirq_threaded || curr->irq_config) 3966 + return LD_WAIT_CONFIG; 3967 + 3968 + return LD_WAIT_SPIN; 3969 + } else if (curr->softirq_context) { 3970 + /* 3971 + * Softirqs are always threaded. 3972 + */ 3973 + return LD_WAIT_CONFIG; 3974 + } 3975 + 3976 + return LD_WAIT_MAX; 3977 + } 3978 + 3955 3979 static int 3956 3980 print_lock_invalid_wait_context(struct task_struct *curr, 3957 3981 struct held_lock *hlock) 3958 3982 { 3983 + short curr_inner; 3984 + 3959 3985 if (!debug_locks_off()) 3960 3986 return 0; 3961 3987 if (debug_locks_silent) ··· 3997 3971 print_lock(hlock); 3998 3972 3999 3973 pr_warn("other info that might help us debug this:\n"); 3974 + 3975 + curr_inner = task_wait_context(curr); 3976 + pr_warn("context-{%d:%d}\n", curr_inner, curr_inner); 3977 + 4000 3978 lockdep_print_held_locks(curr); 4001 3979 4002 3980 pr_warn("stack backtrace:\n"); ··· 4047 4017 } 4048 4018 depth++; 4049 4019 4050 - /* 4051 - * Set appropriate wait type for the context; for IRQs we have to take 4052 - * into account force_irqthread as that is implied by PREEMPT_RT. 4053 - */ 4054 - if (curr->hardirq_context) { 4055 - /* 4056 - * Check if force_irqthreads will run us threaded. 4057 - */ 4058 - if (curr->hardirq_threaded || curr->irq_config) 4059 - curr_inner = LD_WAIT_CONFIG; 4060 - else 4061 - curr_inner = LD_WAIT_SPIN; 4062 - } else if (curr->softirq_context) { 4063 - /* 4064 - * Softirqs are always threaded. 4065 - */ 4066 - curr_inner = LD_WAIT_CONFIG; 4067 - } else { 4068 - curr_inner = LD_WAIT_MAX; 4069 - } 4020 + curr_inner = task_wait_context(curr); 4070 4021 4071 4022 for (; depth < curr->lockdep_depth; depth++) { 4072 4023 struct held_lock *prev = curr->held_locks + depth;