1/* 2 * linux/kernel/panic.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 */ 6 7/* 8 * This function is used through-out the kernel (including mm and fs) 9 * to indicate a major problem. 10 */ 11#include <linux/debug_locks.h> 12#include <linux/interrupt.h> 13#include <linux/kmsg_dump.h> 14#include <linux/kallsyms.h> 15#include <linux/notifier.h> 16#include <linux/module.h> 17#include <linux/random.h> 18#include <linux/reboot.h> 19#include <linux/delay.h> 20#include <linux/kexec.h> 21#include <linux/sched.h> 22#include <linux/sysrq.h> 23#include <linux/init.h> 24#include <linux/nmi.h> 25#include <linux/dmi.h> 26 27#define PANIC_TIMER_STEP 100 28#define PANIC_BLINK_SPD 18 29 30int panic_on_oops; 31static unsigned long tainted_mask; 32static int pause_on_oops; 33static int pause_on_oops_flag; 34static DEFINE_SPINLOCK(pause_on_oops_lock); 35 36int panic_timeout; 37 38ATOMIC_NOTIFIER_HEAD(panic_notifier_list); 39 40EXPORT_SYMBOL(panic_notifier_list); 41 42static long no_blink(int state) 43{ 44 return 0; 45} 46 47/* Returns how long it waited in ms */ 48long (*panic_blink)(int state); 49EXPORT_SYMBOL(panic_blink); 50 51/** 52 * panic - halt the system 53 * @fmt: The text string to print 54 * 55 * Display a message, then perform cleanups. 56 * 57 * This function never returns. 58 */ 59NORET_TYPE void panic(const char * fmt, ...) 60{ 61 static char buf[1024]; 62 va_list args; 63 long i, i_next = 0; 64 int state = 0; 65 66 /* 67 * It's possible to come here directly from a panic-assertion and 68 * not have preempt disabled. Some functions called from here want 69 * preempt to be disabled. No point enabling it later though... 70 */ 71 preempt_disable(); 72 73 console_verbose(); 74 bust_spinlocks(1); 75 va_start(args, fmt); 76 vsnprintf(buf, sizeof(buf), fmt, args); 77 va_end(args); 78 printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf); 79#ifdef CONFIG_DEBUG_BUGVERBOSE 80 dump_stack(); 81#endif 82 83 /* 84 * If we have crashed and we have a crash kernel loaded let it handle 85 * everything else. 86 * Do we want to call this before we try to display a message? 87 */ 88 crash_kexec(NULL); 89 90 kmsg_dump(KMSG_DUMP_PANIC); 91 92 /* 93 * Note smp_send_stop is the usual smp shutdown function, which 94 * unfortunately means it may not be hardened to work in a panic 95 * situation. 96 */ 97 smp_send_stop(); 98 99 atomic_notifier_call_chain(&panic_notifier_list, 0, buf); 100 101 bust_spinlocks(0); 102 103 if (!panic_blink) 104 panic_blink = no_blink; 105 106 if (panic_timeout > 0) { 107 /* 108 * Delay timeout seconds before rebooting the machine. 109 * We can't use the "normal" timers since we just panicked. 110 */ 111 printk(KERN_EMERG "Rebooting in %d seconds..", panic_timeout); 112 113 for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) { 114 touch_nmi_watchdog(); 115 if (i >= i_next) { 116 i += panic_blink(state ^= 1); 117 i_next = i + 3600 / PANIC_BLINK_SPD; 118 } 119 mdelay(PANIC_TIMER_STEP); 120 } 121 /* 122 * This will not be a clean reboot, with everything 123 * shutting down. But if there is a chance of 124 * rebooting the system it will be rebooted. 125 */ 126 emergency_restart(); 127 } 128#ifdef __sparc__ 129 { 130 extern int stop_a_enabled; 131 /* Make sure the user can actually press Stop-A (L1-A) */ 132 stop_a_enabled = 1; 133 printk(KERN_EMERG "Press Stop-A (L1-A) to return to the boot prom\n"); 134 } 135#endif 136#if defined(CONFIG_S390) 137 { 138 unsigned long caller; 139 140 caller = (unsigned long)__builtin_return_address(0); 141 disabled_wait(caller); 142 } 143#endif 144 local_irq_enable(); 145 for (i = 0; ; i += PANIC_TIMER_STEP) { 146 touch_softlockup_watchdog(); 147 if (i >= i_next) { 148 i += panic_blink(state ^= 1); 149 i_next = i + 3600 / PANIC_BLINK_SPD; 150 } 151 mdelay(PANIC_TIMER_STEP); 152 } 153} 154 155EXPORT_SYMBOL(panic); 156 157 158struct tnt { 159 u8 bit; 160 char true; 161 char false; 162}; 163 164static const struct tnt tnts[] = { 165 { TAINT_PROPRIETARY_MODULE, 'P', 'G' }, 166 { TAINT_FORCED_MODULE, 'F', ' ' }, 167 { TAINT_UNSAFE_SMP, 'S', ' ' }, 168 { TAINT_FORCED_RMMOD, 'R', ' ' }, 169 { TAINT_MACHINE_CHECK, 'M', ' ' }, 170 { TAINT_BAD_PAGE, 'B', ' ' }, 171 { TAINT_USER, 'U', ' ' }, 172 { TAINT_DIE, 'D', ' ' }, 173 { TAINT_OVERRIDDEN_ACPI_TABLE, 'A', ' ' }, 174 { TAINT_WARN, 'W', ' ' }, 175 { TAINT_CRAP, 'C', ' ' }, 176 { TAINT_FIRMWARE_WORKAROUND, 'I', ' ' }, 177}; 178 179/** 180 * print_tainted - return a string to represent the kernel taint state. 181 * 182 * 'P' - Proprietary module has been loaded. 183 * 'F' - Module has been forcibly loaded. 184 * 'S' - SMP with CPUs not designed for SMP. 185 * 'R' - User forced a module unload. 186 * 'M' - System experienced a machine check exception. 187 * 'B' - System has hit bad_page. 188 * 'U' - Userspace-defined naughtiness. 189 * 'D' - Kernel has oopsed before 190 * 'A' - ACPI table overridden. 191 * 'W' - Taint on warning. 192 * 'C' - modules from drivers/staging are loaded. 193 * 'I' - Working around severe firmware bug. 194 * 195 * The string is overwritten by the next call to print_tainted(). 196 */ 197const char *print_tainted(void) 198{ 199 static char buf[ARRAY_SIZE(tnts) + sizeof("Tainted: ") + 1]; 200 201 if (tainted_mask) { 202 char *s; 203 int i; 204 205 s = buf + sprintf(buf, "Tainted: "); 206 for (i = 0; i < ARRAY_SIZE(tnts); i++) { 207 const struct tnt *t = &tnts[i]; 208 *s++ = test_bit(t->bit, &tainted_mask) ? 209 t->true : t->false; 210 } 211 *s = 0; 212 } else 213 snprintf(buf, sizeof(buf), "Not tainted"); 214 215 return buf; 216} 217 218int test_taint(unsigned flag) 219{ 220 return test_bit(flag, &tainted_mask); 221} 222EXPORT_SYMBOL(test_taint); 223 224unsigned long get_taint(void) 225{ 226 return tainted_mask; 227} 228 229void add_taint(unsigned flag) 230{ 231 /* 232 * Can't trust the integrity of the kernel anymore. 233 * We don't call directly debug_locks_off() because the issue 234 * is not necessarily serious enough to set oops_in_progress to 1 235 * Also we want to keep up lockdep for staging development and 236 * post-warning case. 237 */ 238 if (flag != TAINT_CRAP && flag != TAINT_WARN && __debug_locks_off()) 239 printk(KERN_WARNING "Disabling lock debugging due to kernel taint\n"); 240 241 set_bit(flag, &tainted_mask); 242} 243EXPORT_SYMBOL(add_taint); 244 245static void spin_msec(int msecs) 246{ 247 int i; 248 249 for (i = 0; i < msecs; i++) { 250 touch_nmi_watchdog(); 251 mdelay(1); 252 } 253} 254 255/* 256 * It just happens that oops_enter() and oops_exit() are identically 257 * implemented... 258 */ 259static void do_oops_enter_exit(void) 260{ 261 unsigned long flags; 262 static int spin_counter; 263 264 if (!pause_on_oops) 265 return; 266 267 spin_lock_irqsave(&pause_on_oops_lock, flags); 268 if (pause_on_oops_flag == 0) { 269 /* This CPU may now print the oops message */ 270 pause_on_oops_flag = 1; 271 } else { 272 /* We need to stall this CPU */ 273 if (!spin_counter) { 274 /* This CPU gets to do the counting */ 275 spin_counter = pause_on_oops; 276 do { 277 spin_unlock(&pause_on_oops_lock); 278 spin_msec(MSEC_PER_SEC); 279 spin_lock(&pause_on_oops_lock); 280 } while (--spin_counter); 281 pause_on_oops_flag = 0; 282 } else { 283 /* This CPU waits for a different one */ 284 while (spin_counter) { 285 spin_unlock(&pause_on_oops_lock); 286 spin_msec(1); 287 spin_lock(&pause_on_oops_lock); 288 } 289 } 290 } 291 spin_unlock_irqrestore(&pause_on_oops_lock, flags); 292} 293 294/* 295 * Return true if the calling CPU is allowed to print oops-related info. 296 * This is a bit racy.. 297 */ 298int oops_may_print(void) 299{ 300 return pause_on_oops_flag == 0; 301} 302 303/* 304 * Called when the architecture enters its oops handler, before it prints 305 * anything. If this is the first CPU to oops, and it's oopsing the first 306 * time then let it proceed. 307 * 308 * This is all enabled by the pause_on_oops kernel boot option. We do all 309 * this to ensure that oopses don't scroll off the screen. It has the 310 * side-effect of preventing later-oopsing CPUs from mucking up the display, 311 * too. 312 * 313 * It turns out that the CPU which is allowed to print ends up pausing for 314 * the right duration, whereas all the other CPUs pause for twice as long: 315 * once in oops_enter(), once in oops_exit(). 316 */ 317void oops_enter(void) 318{ 319 tracing_off(); 320 /* can't trust the integrity of the kernel anymore: */ 321 debug_locks_off(); 322 do_oops_enter_exit(); 323} 324 325/* 326 * 64-bit random ID for oopses: 327 */ 328static u64 oops_id; 329 330static int init_oops_id(void) 331{ 332 if (!oops_id) 333 get_random_bytes(&oops_id, sizeof(oops_id)); 334 else 335 oops_id++; 336 337 return 0; 338} 339late_initcall(init_oops_id); 340 341void print_oops_end_marker(void) 342{ 343 init_oops_id(); 344 printk(KERN_WARNING "---[ end trace %016llx ]---\n", 345 (unsigned long long)oops_id); 346} 347 348/* 349 * Called when the architecture exits its oops handler, after printing 350 * everything. 351 */ 352void oops_exit(void) 353{ 354 do_oops_enter_exit(); 355 print_oops_end_marker(); 356 kmsg_dump(KMSG_DUMP_OOPS); 357} 358 359#ifdef WANT_WARN_ON_SLOWPATH 360struct slowpath_args { 361 const char *fmt; 362 va_list args; 363}; 364 365static void warn_slowpath_common(const char *file, int line, void *caller, 366 unsigned taint, struct slowpath_args *args) 367{ 368 const char *board; 369 370 printk(KERN_WARNING "------------[ cut here ]------------\n"); 371 printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller); 372 board = dmi_get_system_info(DMI_PRODUCT_NAME); 373 if (board) 374 printk(KERN_WARNING "Hardware name: %s\n", board); 375 376 if (args) 377 vprintk(args->fmt, args->args); 378 379 print_modules(); 380 dump_stack(); 381 print_oops_end_marker(); 382 add_taint(taint); 383} 384 385void warn_slowpath_fmt(const char *file, int line, const char *fmt, ...) 386{ 387 struct slowpath_args args; 388 389 args.fmt = fmt; 390 va_start(args.args, fmt); 391 warn_slowpath_common(file, line, __builtin_return_address(0), 392 TAINT_WARN, &args); 393 va_end(args.args); 394} 395EXPORT_SYMBOL(warn_slowpath_fmt); 396 397void warn_slowpath_fmt_taint(const char *file, int line, 398 unsigned taint, const char *fmt, ...) 399{ 400 struct slowpath_args args; 401 402 args.fmt = fmt; 403 va_start(args.args, fmt); 404 warn_slowpath_common(file, line, __builtin_return_address(0), 405 taint, &args); 406 va_end(args.args); 407} 408EXPORT_SYMBOL(warn_slowpath_fmt_taint); 409 410void warn_slowpath_null(const char *file, int line) 411{ 412 warn_slowpath_common(file, line, __builtin_return_address(0), 413 TAINT_WARN, NULL); 414} 415EXPORT_SYMBOL(warn_slowpath_null); 416#endif 417 418#ifdef CONFIG_CC_STACKPROTECTOR 419 420/* 421 * Called when gcc's -fstack-protector feature is used, and 422 * gcc detects corruption of the on-stack canary value 423 */ 424void __stack_chk_fail(void) 425{ 426 panic("stack-protector: Kernel stack is corrupted in: %p\n", 427 __builtin_return_address(0)); 428} 429EXPORT_SYMBOL(__stack_chk_fail); 430 431#endif 432 433core_param(panic, panic_timeout, int, 0644); 434core_param(pause_on_oops, pause_on_oops, int, 0644);