x86: whitespace cleanup of mce_64.c

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

+87 -79
+87 -79
arch/x86/kernel/cpu/mcheck/mce_64.c
··· 1 /* 2 * Machine check handler. 3 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs. 4 - * Rest from unknown author(s). 5 - * 2004 Andi Kleen. Rewrote most of it. 6 */ 7 8 #include <linux/init.h> ··· 23 #include <linux/ctype.h> 24 #include <linux/kmod.h> 25 #include <linux/kdebug.h> 26 - #include <asm/processor.h> 27 #include <asm/msr.h> 28 #include <asm/mce.h> 29 #include <asm/uaccess.h> ··· 63 * separate MCEs from kernel messages to avoid bogus bug reports. 64 */ 65 66 - struct mce_log mcelog = { 67 MCE_LOG_SIGNATURE, 68 MCE_LOG_LEN, 69 - }; 70 71 void mce_log(struct mce *mce) 72 { ··· 111 "CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n", 112 m->cpu, m->mcgstatus, m->bank, m->status); 113 if (m->rip) { 114 - printk(KERN_EMERG 115 - "RIP%s %02x:<%016Lx> ", 116 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "", 117 m->cs, m->rip); 118 if (m->cs == __KERNEL_CS) 119 print_symbol("{%s}", m->rip); 120 printk("\n"); 121 } 122 - printk(KERN_EMERG "TSC %Lx ", m->tsc); 123 if (m->addr) 124 printk("ADDR %Lx ", m->addr); 125 if (m->misc) 126 - printk("MISC %Lx ", m->misc); 127 printk("\n"); 128 printk(KERN_EMERG "This is not a software problem!\n"); 129 - printk(KERN_EMERG 130 - "Run through mcelog --ascii to decode and contact your hardware vendor\n"); 131 } 132 133 static void mce_panic(char *msg, struct mce *backup, unsigned long start) 134 - { 135 int i; 136 137 oops_begin(); 138 for (i = 0; i < MCE_LOG_LEN; i++) { 139 unsigned long tsc = mcelog.entry[i].tsc; 140 if (time_before(tsc, start)) 141 continue; 142 - print_mce(&mcelog.entry[i]); 143 if (backup && mcelog.entry[i].tsc == backup->tsc) 144 backup = NULL; 145 } 146 if (backup) 147 print_mce(backup); 148 panic(msg); 149 - } 150 151 static int mce_available(struct cpuinfo_x86 *c) 152 { ··· 170 } 171 } 172 173 - /* 174 * The actual machine check handler 175 */ 176 - 177 void do_machine_check(struct pt_regs * regs, long error_code) 178 { 179 struct mce m, panicm; ··· 193 atomic_inc(&mce_entry); 194 195 if (regs) 196 - notify_die(DIE_NMI, "machine check", regs, error_code, 18, SIGKILL); 197 if (!banks) 198 goto out2; 199 ··· 204 /* if the restart IP is not valid, we're done for */ 205 if (!(m.mcgstatus & MCG_STATUS_RIPV)) 206 no_way_out = 1; 207 - 208 rdtscll(mcestart); 209 barrier(); 210 211 for (i = 0; i < banks; i++) { 212 if (!bank[i]) 213 continue; 214 - 215 - m.misc = 0; 216 m.addr = 0; 217 m.bank = i; 218 m.tsc = 0; ··· 372 if (mce_notify_user()) { 373 next_interval = max(next_interval/2, HZ/100); 374 } else { 375 - next_interval = min(next_interval*2, 376 (int)round_jiffies_relative(check_interval*HZ)); 377 } 378 ··· 423 }; 424 425 static __init int periodic_mcheck_init(void) 426 - { 427 next_interval = check_interval * HZ; 428 if (next_interval) 429 schedule_delayed_work(&mcheck_work, 430 round_jiffies_relative(next_interval)); 431 idle_notifier_register(&mce_idle_notifier); 432 return 0; 433 - } 434 __initcall(periodic_mcheck_init); 435 436 437 - /* 438 * Initialize Machine Checks for a CPU. 439 */ 440 static void mce_init(void *dummy) ··· 444 445 rdmsrl(MSR_IA32_MCG_CAP, cap); 446 banks = cap & 0xff; 447 - if (banks > NR_BANKS) { 448 printk(KERN_INFO "MCE: warning: using only %d banks\n", banks); 449 - banks = NR_BANKS; 450 } 451 /* Use accurate RIP reporting if available. */ 452 if ((cap & (1<<9)) && ((cap >> 16) & 0xff) >= 9) ··· 464 for (i = 0; i < banks; i++) { 465 wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]); 466 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); 467 - } 468 } 469 470 /* Add per CPU specific workarounds here */ 471 static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c) 472 - { 473 /* This should be disabled by the BIOS, but isn't always */ 474 if (c->x86_vendor == X86_VENDOR_AMD && c->x86 == 15) { 475 - /* disable GART TBL walk error reporting, which trips off 476 incorrectly with the IOMMU & 3ware & Cerberus. */ 477 clear_bit(10, &bank[4]); 478 /* Lots of broken BIOS around that don't clear them ··· 480 mce_bootlog = 0; 481 } 482 483 - } 484 485 static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c) 486 { ··· 496 } 497 } 498 499 - /* 500 * Called for each booted CPU to set up machine checks. 501 - * Must be called with preempt off. 502 */ 503 void __cpuinit mcheck_init(struct cpuinfo_x86 *c) 504 { 505 static cpumask_t mce_cpus = CPU_MASK_NONE; 506 507 - mce_cpu_quirks(c); 508 509 if (mce_dont_init || 510 cpu_test_and_set(smp_processor_id(), mce_cpus) || ··· 553 return 0; 554 } 555 556 - static void collect_tscs(void *data) 557 - { 558 unsigned long *cpu_tsc = (unsigned long *)data; 559 - rdtscll(cpu_tsc[smp_processor_id()]); 560 - } 561 562 - static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, loff_t *off) 563 { 564 unsigned long *cpu_tsc; 565 static DECLARE_MUTEX(mce_read_sem); ··· 573 if (!cpu_tsc) 574 return -ENOMEM; 575 576 - down(&mce_read_sem); 577 next = rcu_dereference(mcelog.next); 578 579 /* Only supports full reads right now */ 580 - if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) { 581 up(&mce_read_sem); 582 kfree(cpu_tsc); 583 return -EINVAL; 584 } 585 586 err = 0; 587 - for (i = 0; i < next; i++) { 588 unsigned long start = jiffies; 589 while (!mcelog.entry[i].finished) { 590 if (time_after_eq(jiffies, start + 2)) { 591 memset(mcelog.entry + i,0, sizeof(struct mce)); ··· 596 } 597 smp_rmb(); 598 err |= copy_to_user(buf, mcelog.entry + i, sizeof(struct mce)); 599 - buf += sizeof(struct mce); 600 timeout: 601 ; 602 - } 603 604 memset(mcelog.entry, 0, next * sizeof(struct mce)); 605 mcelog.next = 0; 606 607 synchronize_sched(); 608 609 - /* Collect entries that were still getting written before the synchronize. */ 610 - 611 on_each_cpu(collect_tscs, cpu_tsc, 1, 1); 612 - for (i = next; i < MCE_LOG_LEN; i++) { 613 - if (mcelog.entry[i].finished && 614 - mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) { 615 - err |= copy_to_user(buf, mcelog.entry+i, sizeof(struct mce)); 616 smp_rmb(); 617 buf += sizeof(struct mce); 618 memset(&mcelog.entry[i], 0, sizeof(struct mce)); 619 } 620 - } 621 up(&mce_read_sem); 622 kfree(cpu_tsc); 623 - return err ? -EFAULT : buf - ubuf; 624 } 625 626 static unsigned int mce_poll(struct file *file, poll_table *wait) ··· 634 return 0; 635 } 636 637 - static int mce_ioctl(struct inode *i, struct file *f,unsigned int cmd, unsigned long arg) 638 { 639 int __user *p = (int __user *)arg; 640 if (!capable(CAP_SYS_ADMIN)) 641 - return -EPERM; 642 switch (cmd) { 643 - case MCE_GET_RECORD_LEN: 644 return put_user(sizeof(struct mce), p); 645 case MCE_GET_LOG_LEN: 646 - return put_user(MCE_LOG_LEN, p); 647 case MCE_GETCLEAR_FLAGS: { 648 unsigned flags; 649 - do { 650 flags = mcelog.flags; 651 - } while (cmpxchg(&mcelog.flags, flags, 0) != flags); 652 - return put_user(flags, p); 653 } 654 default: 655 - return -ENOTTY; 656 - } 657 } 658 659 static const struct file_operations mce_chrdev_ops = { ··· 687 set_in_cr4(X86_CR4_MCE); 688 } 689 690 - /* 691 - * Old style boot options parsing. Only for compatibility. 692 */ 693 - 694 static int __init mcheck_disable(char *str) 695 { 696 mce_dont_init = 1; ··· 710 else if (isdigit(str[0])) 711 get_option(&str, &tolerant); 712 else 713 - printk("mce= argument %s ignored. Please use /sys", str); 714 return 1; 715 } 716 717 __setup("nomce", mcheck_disable); 718 __setup("mce=", mcheck_enable); 719 720 - /* 721 * Sysfs support 722 - */ 723 724 /* On resume clear all MCE state. Don't want to see leftovers from the BIOS. 725 Only one CPU is active at this time, the others get readded later using ··· 731 } 732 733 /* Reinit MCEs after user configuration changes */ 734 - static void mce_restart(void) 735 - { 736 if (next_interval) 737 cancel_delayed_work(&mcheck_work); 738 /* Timer race is harmless here */ 739 - on_each_cpu(mce_init, NULL, 1, 1); 740 next_interval = check_interval * HZ; 741 if (next_interval) 742 schedule_delayed_work(&mcheck_work, ··· 752 753 /* Why are there no generic functions for this? */ 754 #define ACCESSOR(name, var, start) \ 755 - static ssize_t show_ ## name(struct sys_device *s, char *buf) { \ 756 - return sprintf(buf, "%lx\n", (unsigned long)var); \ 757 - } \ 758 static ssize_t set_ ## name(struct sys_device *s,const char *buf,size_t siz) { \ 759 - char *end; \ 760 - unsigned long new = simple_strtoul(buf, &end, 0); \ 761 - if (end == buf) return -EINVAL; \ 762 - var = new; \ 763 - start; \ 764 - return end-buf; \ 765 - } \ 766 static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name); 767 768 /* TBD should generate these dynamically based on number of available banks */
··· 1 /* 2 * Machine check handler. 3 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs. 4 + * Rest from unknown author(s). 5 + * 2004 Andi Kleen. Rewrote most of it. 6 */ 7 8 #include <linux/init.h> ··· 23 #include <linux/ctype.h> 24 #include <linux/kmod.h> 25 #include <linux/kdebug.h> 26 + #include <asm/processor.h> 27 #include <asm/msr.h> 28 #include <asm/mce.h> 29 #include <asm/uaccess.h> ··· 63 * separate MCEs from kernel messages to avoid bogus bug reports. 64 */ 65 66 + struct mce_log mcelog = { 67 MCE_LOG_SIGNATURE, 68 MCE_LOG_LEN, 69 + }; 70 71 void mce_log(struct mce *mce) 72 { ··· 111 "CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n", 112 m->cpu, m->mcgstatus, m->bank, m->status); 113 if (m->rip) { 114 + printk(KERN_EMERG "RIP%s %02x:<%016Lx> ", 115 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "", 116 m->cs, m->rip); 117 if (m->cs == __KERNEL_CS) 118 print_symbol("{%s}", m->rip); 119 printk("\n"); 120 } 121 + printk(KERN_EMERG "TSC %Lx ", m->tsc); 122 if (m->addr) 123 printk("ADDR %Lx ", m->addr); 124 if (m->misc) 125 + printk("MISC %Lx ", m->misc); 126 printk("\n"); 127 printk(KERN_EMERG "This is not a software problem!\n"); 128 + printk(KERN_EMERG "Run through mcelog --ascii to decode " 129 + "and contact your hardware vendor\n"); 130 } 131 132 static void mce_panic(char *msg, struct mce *backup, unsigned long start) 133 + { 134 int i; 135 136 oops_begin(); 137 for (i = 0; i < MCE_LOG_LEN; i++) { 138 unsigned long tsc = mcelog.entry[i].tsc; 139 + 140 if (time_before(tsc, start)) 141 continue; 142 + print_mce(&mcelog.entry[i]); 143 if (backup && mcelog.entry[i].tsc == backup->tsc) 144 backup = NULL; 145 } 146 if (backup) 147 print_mce(backup); 148 panic(msg); 149 + } 150 151 static int mce_available(struct cpuinfo_x86 *c) 152 { ··· 170 } 171 } 172 173 + /* 174 * The actual machine check handler 175 */ 176 void do_machine_check(struct pt_regs * regs, long error_code) 177 { 178 struct mce m, panicm; ··· 194 atomic_inc(&mce_entry); 195 196 if (regs) 197 + notify_die(DIE_NMI, "machine check", regs, error_code, 18, 198 + SIGKILL); 199 if (!banks) 200 goto out2; 201 ··· 204 /* if the restart IP is not valid, we're done for */ 205 if (!(m.mcgstatus & MCG_STATUS_RIPV)) 206 no_way_out = 1; 207 + 208 rdtscll(mcestart); 209 barrier(); 210 211 for (i = 0; i < banks; i++) { 212 if (!bank[i]) 213 continue; 214 + 215 + m.misc = 0; 216 m.addr = 0; 217 m.bank = i; 218 m.tsc = 0; ··· 372 if (mce_notify_user()) { 373 next_interval = max(next_interval/2, HZ/100); 374 } else { 375 + next_interval = min(next_interval * 2, 376 (int)round_jiffies_relative(check_interval*HZ)); 377 } 378 ··· 423 }; 424 425 static __init int periodic_mcheck_init(void) 426 + { 427 next_interval = check_interval * HZ; 428 if (next_interval) 429 schedule_delayed_work(&mcheck_work, 430 round_jiffies_relative(next_interval)); 431 idle_notifier_register(&mce_idle_notifier); 432 return 0; 433 + } 434 __initcall(periodic_mcheck_init); 435 436 437 + /* 438 * Initialize Machine Checks for a CPU. 439 */ 440 static void mce_init(void *dummy) ··· 444 445 rdmsrl(MSR_IA32_MCG_CAP, cap); 446 banks = cap & 0xff; 447 + if (banks > NR_BANKS) { 448 printk(KERN_INFO "MCE: warning: using only %d banks\n", banks); 449 + banks = NR_BANKS; 450 } 451 /* Use accurate RIP reporting if available. */ 452 if ((cap & (1<<9)) && ((cap >> 16) & 0xff) >= 9) ··· 464 for (i = 0; i < banks; i++) { 465 wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]); 466 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); 467 + } 468 } 469 470 /* Add per CPU specific workarounds here */ 471 static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c) 472 + { 473 /* This should be disabled by the BIOS, but isn't always */ 474 if (c->x86_vendor == X86_VENDOR_AMD && c->x86 == 15) { 475 + /* disable GART TBL walk error reporting, which trips off 476 incorrectly with the IOMMU & 3ware & Cerberus. */ 477 clear_bit(10, &bank[4]); 478 /* Lots of broken BIOS around that don't clear them ··· 480 mce_bootlog = 0; 481 } 482 483 + } 484 485 static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c) 486 { ··· 496 } 497 } 498 499 + /* 500 * Called for each booted CPU to set up machine checks. 501 + * Must be called with preempt off. 502 */ 503 void __cpuinit mcheck_init(struct cpuinfo_x86 *c) 504 { 505 static cpumask_t mce_cpus = CPU_MASK_NONE; 506 507 + mce_cpu_quirks(c); 508 509 if (mce_dont_init || 510 cpu_test_and_set(smp_processor_id(), mce_cpus) || ··· 553 return 0; 554 } 555 556 + static void collect_tscs(void *data) 557 + { 558 unsigned long *cpu_tsc = (unsigned long *)data; 559 560 + rdtscll(cpu_tsc[smp_processor_id()]); 561 + } 562 + 563 + static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, 564 + loff_t *off) 565 { 566 unsigned long *cpu_tsc; 567 static DECLARE_MUTEX(mce_read_sem); ··· 571 if (!cpu_tsc) 572 return -ENOMEM; 573 574 + down(&mce_read_sem); 575 next = rcu_dereference(mcelog.next); 576 577 /* Only supports full reads right now */ 578 + if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) { 579 up(&mce_read_sem); 580 kfree(cpu_tsc); 581 return -EINVAL; 582 } 583 584 err = 0; 585 + for (i = 0; i < next; i++) { 586 unsigned long start = jiffies; 587 + 588 while (!mcelog.entry[i].finished) { 589 if (time_after_eq(jiffies, start + 2)) { 590 memset(mcelog.entry + i,0, sizeof(struct mce)); ··· 593 } 594 smp_rmb(); 595 err |= copy_to_user(buf, mcelog.entry + i, sizeof(struct mce)); 596 + buf += sizeof(struct mce); 597 timeout: 598 ; 599 + } 600 601 memset(mcelog.entry, 0, next * sizeof(struct mce)); 602 mcelog.next = 0; 603 604 synchronize_sched(); 605 606 + /* 607 + * Collect entries that were still getting written before the 608 + * synchronize. 609 + */ 610 on_each_cpu(collect_tscs, cpu_tsc, 1, 1); 611 + for (i = next; i < MCE_LOG_LEN; i++) { 612 + if (mcelog.entry[i].finished && 613 + mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) { 614 + err |= copy_to_user(buf, mcelog.entry+i, 615 + sizeof(struct mce)); 616 smp_rmb(); 617 buf += sizeof(struct mce); 618 memset(&mcelog.entry[i], 0, sizeof(struct mce)); 619 } 620 + } 621 up(&mce_read_sem); 622 kfree(cpu_tsc); 623 + return err ? -EFAULT : buf - ubuf; 624 } 625 626 static unsigned int mce_poll(struct file *file, poll_table *wait) ··· 628 return 0; 629 } 630 631 + static int mce_ioctl(struct inode *i, struct file *f,unsigned int cmd, 632 + unsigned long arg) 633 { 634 int __user *p = (int __user *)arg; 635 + 636 if (!capable(CAP_SYS_ADMIN)) 637 + return -EPERM; 638 switch (cmd) { 639 + case MCE_GET_RECORD_LEN: 640 return put_user(sizeof(struct mce), p); 641 case MCE_GET_LOG_LEN: 642 + return put_user(MCE_LOG_LEN, p); 643 case MCE_GETCLEAR_FLAGS: { 644 unsigned flags; 645 + 646 + do { 647 flags = mcelog.flags; 648 + } while (cmpxchg(&mcelog.flags, flags, 0) != flags); 649 + return put_user(flags, p); 650 } 651 default: 652 + return -ENOTTY; 653 + } 654 } 655 656 static const struct file_operations mce_chrdev_ops = { ··· 678 set_in_cr4(X86_CR4_MCE); 679 } 680 681 + /* 682 + * Old style boot options parsing. Only for compatibility. 683 */ 684 static int __init mcheck_disable(char *str) 685 { 686 mce_dont_init = 1; ··· 702 else if (isdigit(str[0])) 703 get_option(&str, &tolerant); 704 else 705 + printk("mce= argument %s ignored. Please use /sys", str); 706 return 1; 707 } 708 709 __setup("nomce", mcheck_disable); 710 __setup("mce=", mcheck_enable); 711 712 + /* 713 * Sysfs support 714 + */ 715 716 /* On resume clear all MCE state. Don't want to see leftovers from the BIOS. 717 Only one CPU is active at this time, the others get readded later using ··· 723 } 724 725 /* Reinit MCEs after user configuration changes */ 726 + static void mce_restart(void) 727 + { 728 if (next_interval) 729 cancel_delayed_work(&mcheck_work); 730 /* Timer race is harmless here */ 731 + on_each_cpu(mce_init, NULL, 1, 1); 732 next_interval = check_interval * HZ; 733 if (next_interval) 734 schedule_delayed_work(&mcheck_work, ··· 744 745 /* Why are there no generic functions for this? */ 746 #define ACCESSOR(name, var, start) \ 747 + static ssize_t show_ ## name(struct sys_device *s, char *buf) { \ 748 + return sprintf(buf, "%lx\n", (unsigned long)var); \ 749 + } \ 750 static ssize_t set_ ## name(struct sys_device *s,const char *buf,size_t siz) { \ 751 + char *end; \ 752 + unsigned long new = simple_strtoul(buf, &end, 0); \ 753 + if (end == buf) return -EINVAL; \ 754 + var = new; \ 755 + start; \ 756 + return end-buf; \ 757 + } \ 758 static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name); 759 760 /* TBD should generate these dynamically based on number of available banks */