x86: whitespace cleanup of mce_64.c

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

+87 -79
+87 -79
arch/x86/kernel/cpu/mcheck/mce_64.c
··· 1 1 /* 2 2 * Machine check handler. 3 3 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs. 4 - * Rest from unknown author(s). 5 - * 2004 Andi Kleen. Rewrote most of it. 4 + * Rest from unknown author(s). 5 + * 2004 Andi Kleen. Rewrote most of it. 6 6 */ 7 7 8 8 #include <linux/init.h> ··· 23 23 #include <linux/ctype.h> 24 24 #include <linux/kmod.h> 25 25 #include <linux/kdebug.h> 26 - #include <asm/processor.h> 26 + #include <asm/processor.h> 27 27 #include <asm/msr.h> 28 28 #include <asm/mce.h> 29 29 #include <asm/uaccess.h> ··· 63 63 * separate MCEs from kernel messages to avoid bogus bug reports. 64 64 */ 65 65 66 - struct mce_log mcelog = { 66 + struct mce_log mcelog = { 67 67 MCE_LOG_SIGNATURE, 68 68 MCE_LOG_LEN, 69 - }; 69 + }; 70 70 71 71 void mce_log(struct mce *mce) 72 72 { ··· 111 111 "CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n", 112 112 m->cpu, m->mcgstatus, m->bank, m->status); 113 113 if (m->rip) { 114 - printk(KERN_EMERG 115 - "RIP%s %02x:<%016Lx> ", 114 + printk(KERN_EMERG "RIP%s %02x:<%016Lx> ", 116 115 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "", 117 116 m->cs, m->rip); 118 117 if (m->cs == __KERNEL_CS) 119 118 print_symbol("{%s}", m->rip); 120 119 printk("\n"); 121 120 } 122 - printk(KERN_EMERG "TSC %Lx ", m->tsc); 121 + printk(KERN_EMERG "TSC %Lx ", m->tsc); 123 122 if (m->addr) 124 123 printk("ADDR %Lx ", m->addr); 125 124 if (m->misc) 126 - printk("MISC %Lx ", m->misc); 125 + printk("MISC %Lx ", m->misc); 127 126 printk("\n"); 128 127 printk(KERN_EMERG "This is not a software problem!\n"); 129 - printk(KERN_EMERG 130 - "Run through mcelog --ascii to decode and contact your hardware vendor\n"); 128 + printk(KERN_EMERG "Run through mcelog --ascii to decode " 129 + "and contact your hardware vendor\n"); 131 130 } 132 131 133 132 static void mce_panic(char *msg, struct mce *backup, unsigned long start) 134 - { 133 + { 135 134 int i; 136 135 137 136 oops_begin(); 138 137 for (i = 0; i < MCE_LOG_LEN; i++) { 139 138 unsigned long tsc = mcelog.entry[i].tsc; 139 + 140 140 if (time_before(tsc, start)) 141 141 continue; 142 - print_mce(&mcelog.entry[i]); 142 + print_mce(&mcelog.entry[i]); 143 143 if (backup && mcelog.entry[i].tsc == backup->tsc) 144 144 backup = NULL; 145 145 } 146 146 if (backup) 147 147 print_mce(backup); 148 148 panic(msg); 149 - } 149 + } 150 150 151 151 static int mce_available(struct cpuinfo_x86 *c) 152 152 { ··· 170 170 } 171 171 } 172 172 173 - /* 173 + /* 174 174 * The actual machine check handler 175 175 */ 176 - 177 176 void do_machine_check(struct pt_regs * regs, long error_code) 178 177 { 179 178 struct mce m, panicm; ··· 193 194 atomic_inc(&mce_entry); 194 195 195 196 if (regs) 196 - notify_die(DIE_NMI, "machine check", regs, error_code, 18, SIGKILL); 197 + notify_die(DIE_NMI, "machine check", regs, error_code, 18, 198 + SIGKILL); 197 199 if (!banks) 198 200 goto out2; 199 201 ··· 204 204 /* if the restart IP is not valid, we're done for */ 205 205 if (!(m.mcgstatus & MCG_STATUS_RIPV)) 206 206 no_way_out = 1; 207 - 207 + 208 208 rdtscll(mcestart); 209 209 barrier(); 210 210 211 211 for (i = 0; i < banks; i++) { 212 212 if (!bank[i]) 213 213 continue; 214 - 215 - m.misc = 0; 214 + 215 + m.misc = 0; 216 216 m.addr = 0; 217 217 m.bank = i; 218 218 m.tsc = 0; ··· 372 372 if (mce_notify_user()) { 373 373 next_interval = max(next_interval/2, HZ/100); 374 374 } else { 375 - next_interval = min(next_interval*2, 375 + next_interval = min(next_interval * 2, 376 376 (int)round_jiffies_relative(check_interval*HZ)); 377 377 } 378 378 ··· 423 423 }; 424 424 425 425 static __init int periodic_mcheck_init(void) 426 - { 426 + { 427 427 next_interval = check_interval * HZ; 428 428 if (next_interval) 429 429 schedule_delayed_work(&mcheck_work, 430 430 round_jiffies_relative(next_interval)); 431 431 idle_notifier_register(&mce_idle_notifier); 432 432 return 0; 433 - } 433 + } 434 434 __initcall(periodic_mcheck_init); 435 435 436 436 437 - /* 437 + /* 438 438 * Initialize Machine Checks for a CPU. 439 439 */ 440 440 static void mce_init(void *dummy) ··· 444 444 445 445 rdmsrl(MSR_IA32_MCG_CAP, cap); 446 446 banks = cap & 0xff; 447 - if (banks > NR_BANKS) { 447 + if (banks > NR_BANKS) { 448 448 printk(KERN_INFO "MCE: warning: using only %d banks\n", banks); 449 - banks = NR_BANKS; 449 + banks = NR_BANKS; 450 450 } 451 451 /* Use accurate RIP reporting if available. */ 452 452 if ((cap & (1<<9)) && ((cap >> 16) & 0xff) >= 9) ··· 464 464 for (i = 0; i < banks; i++) { 465 465 wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]); 466 466 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); 467 - } 467 + } 468 468 } 469 469 470 470 /* Add per CPU specific workarounds here */ 471 471 static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c) 472 - { 472 + { 473 473 /* This should be disabled by the BIOS, but isn't always */ 474 474 if (c->x86_vendor == X86_VENDOR_AMD && c->x86 == 15) { 475 - /* disable GART TBL walk error reporting, which trips off 475 + /* disable GART TBL walk error reporting, which trips off 476 476 incorrectly with the IOMMU & 3ware & Cerberus. */ 477 477 clear_bit(10, &bank[4]); 478 478 /* Lots of broken BIOS around that don't clear them ··· 480 480 mce_bootlog = 0; 481 481 } 482 482 483 - } 483 + } 484 484 485 485 static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c) 486 486 { ··· 496 496 } 497 497 } 498 498 499 - /* 499 + /* 500 500 * Called for each booted CPU to set up machine checks. 501 - * Must be called with preempt off. 501 + * Must be called with preempt off. 502 502 */ 503 503 void __cpuinit mcheck_init(struct cpuinfo_x86 *c) 504 504 { 505 505 static cpumask_t mce_cpus = CPU_MASK_NONE; 506 506 507 - mce_cpu_quirks(c); 507 + mce_cpu_quirks(c); 508 508 509 509 if (mce_dont_init || 510 510 cpu_test_and_set(smp_processor_id(), mce_cpus) || ··· 553 553 return 0; 554 554 } 555 555 556 - static void collect_tscs(void *data) 557 - { 556 + static void collect_tscs(void *data) 557 + { 558 558 unsigned long *cpu_tsc = (unsigned long *)data; 559 - rdtscll(cpu_tsc[smp_processor_id()]); 560 - } 561 559 562 - static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, loff_t *off) 560 + rdtscll(cpu_tsc[smp_processor_id()]); 561 + } 562 + 563 + static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, 564 + loff_t *off) 563 565 { 564 566 unsigned long *cpu_tsc; 565 567 static DECLARE_MUTEX(mce_read_sem); ··· 573 571 if (!cpu_tsc) 574 572 return -ENOMEM; 575 573 576 - down(&mce_read_sem); 574 + down(&mce_read_sem); 577 575 next = rcu_dereference(mcelog.next); 578 576 579 577 /* Only supports full reads right now */ 580 - if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) { 578 + if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) { 581 579 up(&mce_read_sem); 582 580 kfree(cpu_tsc); 583 581 return -EINVAL; 584 582 } 585 583 586 584 err = 0; 587 - for (i = 0; i < next; i++) { 585 + for (i = 0; i < next; i++) { 588 586 unsigned long start = jiffies; 587 + 589 588 while (!mcelog.entry[i].finished) { 590 589 if (time_after_eq(jiffies, start + 2)) { 591 590 memset(mcelog.entry + i,0, sizeof(struct mce)); ··· 596 593 } 597 594 smp_rmb(); 598 595 err |= copy_to_user(buf, mcelog.entry + i, sizeof(struct mce)); 599 - buf += sizeof(struct mce); 596 + buf += sizeof(struct mce); 600 597 timeout: 601 598 ; 602 - } 599 + } 603 600 604 601 memset(mcelog.entry, 0, next * sizeof(struct mce)); 605 602 mcelog.next = 0; 606 603 607 604 synchronize_sched(); 608 605 609 - /* Collect entries that were still getting written before the synchronize. */ 610 - 606 + /* 607 + * Collect entries that were still getting written before the 608 + * synchronize. 609 + */ 611 610 on_each_cpu(collect_tscs, cpu_tsc, 1, 1); 612 - for (i = next; i < MCE_LOG_LEN; i++) { 613 - if (mcelog.entry[i].finished && 614 - mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) { 615 - err |= copy_to_user(buf, mcelog.entry+i, sizeof(struct mce)); 611 + for (i = next; i < MCE_LOG_LEN; i++) { 612 + if (mcelog.entry[i].finished && 613 + mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) { 614 + err |= copy_to_user(buf, mcelog.entry+i, 615 + sizeof(struct mce)); 616 616 smp_rmb(); 617 617 buf += sizeof(struct mce); 618 618 memset(&mcelog.entry[i], 0, sizeof(struct mce)); 619 619 } 620 - } 620 + } 621 621 up(&mce_read_sem); 622 622 kfree(cpu_tsc); 623 - return err ? -EFAULT : buf - ubuf; 623 + return err ? -EFAULT : buf - ubuf; 624 624 } 625 625 626 626 static unsigned int mce_poll(struct file *file, poll_table *wait) ··· 634 628 return 0; 635 629 } 636 630 637 - static int mce_ioctl(struct inode *i, struct file *f,unsigned int cmd, unsigned long arg) 631 + static int mce_ioctl(struct inode *i, struct file *f,unsigned int cmd, 632 + unsigned long arg) 638 633 { 639 634 int __user *p = (int __user *)arg; 635 + 640 636 if (!capable(CAP_SYS_ADMIN)) 641 - return -EPERM; 637 + return -EPERM; 642 638 switch (cmd) { 643 - case MCE_GET_RECORD_LEN: 639 + case MCE_GET_RECORD_LEN: 644 640 return put_user(sizeof(struct mce), p); 645 641 case MCE_GET_LOG_LEN: 646 - return put_user(MCE_LOG_LEN, p); 642 + return put_user(MCE_LOG_LEN, p); 647 643 case MCE_GETCLEAR_FLAGS: { 648 644 unsigned flags; 649 - do { 645 + 646 + do { 650 647 flags = mcelog.flags; 651 - } while (cmpxchg(&mcelog.flags, flags, 0) != flags); 652 - return put_user(flags, p); 648 + } while (cmpxchg(&mcelog.flags, flags, 0) != flags); 649 + return put_user(flags, p); 653 650 } 654 651 default: 655 - return -ENOTTY; 656 - } 652 + return -ENOTTY; 653 + } 657 654 } 658 655 659 656 static const struct file_operations mce_chrdev_ops = { ··· 687 678 set_in_cr4(X86_CR4_MCE); 688 679 } 689 680 690 - /* 691 - * Old style boot options parsing. Only for compatibility. 681 + /* 682 + * Old style boot options parsing. Only for compatibility. 692 683 */ 693 - 694 684 static int __init mcheck_disable(char *str) 695 685 { 696 686 mce_dont_init = 1; ··· 710 702 else if (isdigit(str[0])) 711 703 get_option(&str, &tolerant); 712 704 else 713 - printk("mce= argument %s ignored. Please use /sys", str); 705 + printk("mce= argument %s ignored. Please use /sys", str); 714 706 return 1; 715 707 } 716 708 717 709 __setup("nomce", mcheck_disable); 718 710 __setup("mce=", mcheck_enable); 719 711 720 - /* 712 + /* 721 713 * Sysfs support 722 - */ 714 + */ 723 715 724 716 /* On resume clear all MCE state. Don't want to see leftovers from the BIOS. 725 717 Only one CPU is active at this time, the others get readded later using ··· 731 723 } 732 724 733 725 /* Reinit MCEs after user configuration changes */ 734 - static void mce_restart(void) 735 - { 726 + static void mce_restart(void) 727 + { 736 728 if (next_interval) 737 729 cancel_delayed_work(&mcheck_work); 738 730 /* Timer race is harmless here */ 739 - on_each_cpu(mce_init, NULL, 1, 1); 731 + on_each_cpu(mce_init, NULL, 1, 1); 740 732 next_interval = check_interval * HZ; 741 733 if (next_interval) 742 734 schedule_delayed_work(&mcheck_work, ··· 752 744 753 745 /* Why are there no generic functions for this? */ 754 746 #define ACCESSOR(name, var, start) \ 755 - static ssize_t show_ ## name(struct sys_device *s, char *buf) { \ 756 - return sprintf(buf, "%lx\n", (unsigned long)var); \ 757 - } \ 747 + static ssize_t show_ ## name(struct sys_device *s, char *buf) { \ 748 + return sprintf(buf, "%lx\n", (unsigned long)var); \ 749 + } \ 758 750 static ssize_t set_ ## name(struct sys_device *s,const char *buf,size_t siz) { \ 759 - char *end; \ 760 - unsigned long new = simple_strtoul(buf, &end, 0); \ 761 - if (end == buf) return -EINVAL; \ 762 - var = new; \ 763 - start; \ 764 - return end-buf; \ 765 - } \ 751 + char *end; \ 752 + unsigned long new = simple_strtoul(buf, &end, 0); \ 753 + if (end == buf) return -EINVAL; \ 754 + var = new; \ 755 + start; \ 756 + return end-buf; \ 757 + } \ 766 758 static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name); 767 759 768 760 /* TBD should generate these dynamically based on number of available banks */