Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

debug lockups: Improve lockup detection, fix generic arch fallback

As Andrew noted, my previous patch ("debug lockups: Improve lockup
detection") broke/removed SysRq-L support from architecture that do
not provide a __trigger_all_cpu_backtrace implementation.

Restore a fallback path and clean up the SysRq-L machinery a bit:

- Rename the arch method to arch_trigger_all_cpu_backtrace()

- Simplify the define

- Document the method a bit - in the hope of more architectures
adding support for it.

[ The patch touches Sparc code for the rename. ]

Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: "David S. Miller" <davem@davemloft.net>
LKML-Reference: <20090802140809.7ec4bb6b.akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

+38 -10
+2 -2
arch/sparc/include/asm/irq_64.h
··· 89 89 return retval; 90 90 } 91 91 92 - void __trigger_all_cpu_backtrace(void); 93 - #define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace() 92 + void arch_trigger_all_cpu_backtrace(void); 93 + #define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace 94 94 95 95 extern void *hardirq_stack[NR_CPUS]; 96 96 extern void *softirq_stack[NR_CPUS];
+2 -2
arch/sparc/kernel/process_64.c
··· 251 251 } 252 252 } 253 253 254 - void __trigger_all_cpu_backtrace(void) 254 + void arch_trigger_all_cpu_backtrace(void) 255 255 { 256 256 struct thread_info *tp = current_thread_info(); 257 257 struct pt_regs *regs = get_irq_regs(); ··· 304 304 305 305 static void sysrq_handle_globreg(int key, struct tty_struct *tty) 306 306 { 307 - __trigger_all_cpu_backtrace(); 307 + arch_trigger_all_cpu_backtrace(); 308 308 } 309 309 310 310 static struct sysrq_key_op sparc_globalreg_op = {
+2 -2
arch/x86/include/asm/nmi.h
··· 45 45 void __user *, size_t *, loff_t *); 46 46 extern int unknown_nmi_panic; 47 47 48 - void __trigger_all_cpu_backtrace(void); 49 - #define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace() 48 + void arch_trigger_all_cpu_backtrace(void); 49 + #define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace 50 50 51 51 static inline void localise_nmi_watchdog(void) 52 52 {
+1 -1
arch/x86/kernel/apic/nmi.c
··· 554 554 return 0; 555 555 } 556 556 557 - void __trigger_all_cpu_backtrace(void) 557 + void arch_trigger_all_cpu_backtrace(void) 558 558 { 559 559 int i; 560 560
+14 -1
drivers/char/sysrq.c
··· 223 223 224 224 static void sysrq_handle_showallcpus(int key, struct tty_struct *tty) 225 225 { 226 - trigger_all_cpu_backtrace(); 226 + /* 227 + * Fall back to the workqueue based printing if the 228 + * backtrace printing did not succeed or the 229 + * architecture has no support for it: 230 + */ 231 + if (!trigger_all_cpu_backtrace()) { 232 + struct pt_regs *regs = get_irq_regs(); 233 + 234 + if (regs) { 235 + printk(KERN_INFO "CPU%d:\n", smp_processor_id()); 236 + show_regs(regs); 237 + } 238 + schedule_work(&sysrq_showallcpus); 239 + } 227 240 } 228 241 229 242 static struct sysrq_key_op sysrq_showallcpus_op = {
+17 -2
include/linux/nmi.h
··· 28 28 static inline void acpi_nmi_enable(void) { } 29 29 #endif 30 30 31 - #ifndef trigger_all_cpu_backtrace 32 - #define trigger_all_cpu_backtrace() do { } while (0) 31 + /* 32 + * Create trigger_all_cpu_backtrace() out of the arch-provided 33 + * base function. Return whether such support was available, 34 + * to allow calling code to fall back to some other mechanism: 35 + */ 36 + #ifdef arch_trigger_all_cpu_backtrace 37 + static inline bool trigger_all_cpu_backtrace(void) 38 + { 39 + arch_trigger_all_cpu_backtrace(); 40 + 41 + return true; 42 + } 43 + #else 44 + static inline bool trigger_all_cpu_backtrace(void) 45 + { 46 + return false; 47 + } 33 48 #endif 34 49 35 50 #endif