Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'x86-ras-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 RAS update from Ingo Molnar:
"Rework all config variables used throughout the MCA code and collect
them together into a mca_config struct. This keeps them tightly and
neatly packed together instead of spilled all over the place.

Then, convert those which are used as booleans into real booleans and
save some space. These bits are exposed via
/sys/devices/system/machinecheck/machinecheck*/"

* 'x86-ras-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86, MCA: Finish mca_config conversion
x86, MCA: Convert the next three variables batch
x86, MCA: Convert rip_msr, mce_bootlog, monarch_timeout
x86, MCA: Convert dont_log_ce, banks and tolerant
drivers/base: Add a DEVICE_BOOL_ATTR macro

+158 -114
+17 -4
arch/x86/include/asm/mce.h
··· 119 119 #define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1) 120 120 121 121 #ifdef __KERNEL__ 122 + 123 + struct mca_config { 124 + bool dont_log_ce; 125 + bool cmci_disabled; 126 + bool ignore_ce; 127 + bool disabled; 128 + bool ser; 129 + bool bios_cmci_threshold; 130 + u8 banks; 131 + s8 bootlog; 132 + int tolerant; 133 + int monarch_timeout; 134 + int panic_timeout; 135 + u32 rip_msr; 136 + }; 137 + 138 + extern struct mca_config mca_cfg; 122 139 extern void mce_register_decode_chain(struct notifier_block *nb); 123 140 extern void mce_unregister_decode_chain(struct notifier_block *nb); 124 141 ··· 143 126 #include <linux/init.h> 144 127 #include <linux/atomic.h> 145 128 146 - extern int mce_disabled; 147 129 extern int mce_p5_enabled; 148 130 149 131 #ifdef CONFIG_X86_MCE ··· 175 159 #define MAX_NR_BANKS 32 176 160 177 161 #ifdef CONFIG_X86_MCE_INTEL 178 - extern int mce_cmci_disabled; 179 - extern int mce_ignore_ce; 180 - extern int mce_bios_cmci_threshold; 181 162 void mce_intel_feature_init(struct cpuinfo_x86 *c); 182 163 void cmci_clear(void); 183 164 void cmci_reenable(void);
-2
arch/x86/kernel/cpu/mcheck/mce-internal.h
··· 24 24 int mce_severity(struct mce *a, int tolerant, char **msg); 25 25 struct dentry *mce_get_debugfs_dir(void); 26 26 27 - extern int mce_ser; 28 - 29 27 extern struct mce_bank *mce_banks; 30 28 31 29 #ifdef CONFIG_X86_MCE_INTEL
+2 -2
arch/x86/kernel/cpu/mcheck/mce-severity.c
··· 193 193 continue; 194 194 if ((m->mcgstatus & s->mcgmask) != s->mcgres) 195 195 continue; 196 - if (s->ser == SER_REQUIRED && !mce_ser) 196 + if (s->ser == SER_REQUIRED && !mca_cfg.ser) 197 197 continue; 198 - if (s->ser == NO_SER && mce_ser) 198 + if (s->ser == NO_SER && mca_cfg.ser) 199 199 continue; 200 200 if (s->context && ctx != s->context) 201 201 continue;
+106 -101
arch/x86/kernel/cpu/mcheck/mce.c
··· 58 58 #define CREATE_TRACE_POINTS 59 59 #include <trace/events/mce.h> 60 60 61 - int mce_disabled __read_mostly; 62 - 63 61 #define SPINUNIT 100 /* 100ns */ 64 62 65 63 atomic_t mce_entry; 66 64 67 65 DEFINE_PER_CPU(unsigned, mce_exception_count); 68 66 69 - /* 70 - * Tolerant levels: 71 - * 0: always panic on uncorrected errors, log corrected errors 72 - * 1: panic or SIGBUS on uncorrected errors, log corrected errors 73 - * 2: SIGBUS or log uncorrected errors (if possible), log corrected errors 74 - * 3: never panic or SIGBUS, log all errors (for testing only) 75 - */ 76 - static int tolerant __read_mostly = 1; 77 - static int banks __read_mostly; 78 - static int rip_msr __read_mostly; 79 - static int mce_bootlog __read_mostly = -1; 80 - static int monarch_timeout __read_mostly = -1; 81 - static int mce_panic_timeout __read_mostly; 82 - static int mce_dont_log_ce __read_mostly; 83 - int mce_cmci_disabled __read_mostly; 84 - int mce_ignore_ce __read_mostly; 85 - int mce_ser __read_mostly; 86 - int mce_bios_cmci_threshold __read_mostly; 67 + struct mce_bank *mce_banks __read_mostly; 87 68 88 - struct mce_bank *mce_banks __read_mostly; 69 + struct mca_config mca_cfg __read_mostly = { 70 + .bootlog = -1, 71 + /* 72 + * Tolerant levels: 73 + * 0: always panic on uncorrected errors, log corrected errors 74 + * 1: panic or SIGBUS on uncorrected errors, log corrected errors 75 + * 2: SIGBUS or log uncorrected errors (if possible), log corr. errors 76 + * 3: never panic or SIGBUS, log all errors (for testing only) 77 + */ 78 + .tolerant = 1, 79 + .monarch_timeout = -1 80 + }; 89 81 90 82 /* User mode helper program triggered by machine check event */ 91 83 static unsigned long mce_need_notify; ··· 294 302 while (timeout-- > 0) 295 303 udelay(1); 296 304 if (panic_timeout == 0) 297 - panic_timeout = mce_panic_timeout; 305 + panic_timeout = mca_cfg.panic_timeout; 298 306 panic("Panicing machine check CPU died"); 299 307 } 300 308 ··· 352 360 pr_emerg(HW_ERR "Machine check: %s\n", exp); 353 361 if (!fake_panic) { 354 362 if (panic_timeout == 0) 355 - panic_timeout = mce_panic_timeout; 363 + panic_timeout = mca_cfg.panic_timeout; 356 364 panic(msg); 357 365 } else 358 366 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg); ··· 364 372 { 365 373 unsigned bank = __this_cpu_read(injectm.bank); 366 374 367 - if (msr == rip_msr) 375 + if (msr == mca_cfg.rip_msr) 368 376 return offsetof(struct mce, ip); 369 377 if (msr == MSR_IA32_MCx_STATUS(bank)) 370 378 return offsetof(struct mce, status); ··· 443 451 m->cs |= 3; 444 452 } 445 453 /* Use accurate RIP reporting if available. */ 446 - if (rip_msr) 447 - m->ip = mce_rdmsrl(rip_msr); 454 + if (mca_cfg.rip_msr) 455 + m->ip = mce_rdmsrl(mca_cfg.rip_msr); 448 456 } 449 457 } 450 458 ··· 505 513 506 514 int mce_available(struct cpuinfo_x86 *c) 507 515 { 508 - if (mce_disabled) 516 + if (mca_cfg.disabled) 509 517 return 0; 510 518 return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA); 511 519 } ··· 557 565 /* 558 566 * Mask the reported address by the reported granularity. 559 567 */ 560 - if (mce_ser && (m->status & MCI_STATUS_MISCV)) { 568 + if (mca_cfg.ser && (m->status & MCI_STATUS_MISCV)) { 561 569 u8 shift = MCI_MISC_ADDR_LSB(m->misc); 562 570 m->addr >>= shift; 563 571 m->addr <<= shift; ··· 591 599 592 600 mce_gather_info(&m, NULL); 593 601 594 - for (i = 0; i < banks; i++) { 602 + for (i = 0; i < mca_cfg.banks; i++) { 595 603 if (!mce_banks[i].ctl || !test_bit(i, *b)) 596 604 continue; 597 605 ··· 612 620 * TBD do the same check for MCI_STATUS_EN here? 613 621 */ 614 622 if (!(flags & MCP_UC) && 615 - (m.status & (mce_ser ? MCI_STATUS_S : MCI_STATUS_UC))) 623 + (m.status & (mca_cfg.ser ? MCI_STATUS_S : MCI_STATUS_UC))) 616 624 continue; 617 625 618 626 mce_read_aux(&m, i); ··· 623 631 * Don't get the IP here because it's unlikely to 624 632 * have anything to do with the actual error location. 625 633 */ 626 - if (!(flags & MCP_DONTLOG) && !mce_dont_log_ce) 634 + if (!(flags & MCP_DONTLOG) && !mca_cfg.dont_log_ce) 627 635 mce_log(&m); 628 636 629 637 /* ··· 650 658 { 651 659 int i, ret = 0; 652 660 653 - for (i = 0; i < banks; i++) { 661 + for (i = 0; i < mca_cfg.banks; i++) { 654 662 m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i)); 655 663 if (m->status & MCI_STATUS_VAL) { 656 664 __set_bit(i, validp); 657 665 if (quirk_no_way_out) 658 666 quirk_no_way_out(i, m, regs); 659 667 } 660 - if (mce_severity(m, tolerant, msg) >= MCE_PANIC_SEVERITY) 668 + if (mce_severity(m, mca_cfg.tolerant, msg) >= MCE_PANIC_SEVERITY) 661 669 ret = 1; 662 670 } 663 671 return ret; ··· 688 696 rmb(); 689 697 if (atomic_read(&mce_paniced)) 690 698 wait_for_panic(); 691 - if (!monarch_timeout) 699 + if (!mca_cfg.monarch_timeout) 692 700 goto out; 693 701 if ((s64)*t < SPINUNIT) { 694 702 /* CHECKME: Make panic default for 1 too? */ 695 - if (tolerant < 1) 703 + if (mca_cfg.tolerant < 1) 696 704 mce_panic("Timeout synchronizing machine check over CPUs", 697 705 NULL, NULL); 698 706 cpu_missing = 1; ··· 742 750 * Grade the severity of the errors of all the CPUs. 743 751 */ 744 752 for_each_possible_cpu(cpu) { 745 - int severity = mce_severity(&per_cpu(mces_seen, cpu), tolerant, 753 + int severity = mce_severity(&per_cpu(mces_seen, cpu), 754 + mca_cfg.tolerant, 746 755 &nmsg); 747 756 if (severity > global_worst) { 748 757 msg = nmsg; ··· 757 764 * This dumps all the mces in the log buffer and stops the 758 765 * other CPUs. 759 766 */ 760 - if (m && global_worst >= MCE_PANIC_SEVERITY && tolerant < 3) 767 + if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) 761 768 mce_panic("Fatal Machine check", m, msg); 762 769 763 770 /* ··· 770 777 * No machine check event found. Must be some external 771 778 * source or one CPU is hung. Panic. 772 779 */ 773 - if (global_worst <= MCE_KEEP_SEVERITY && tolerant < 3) 780 + if (global_worst <= MCE_KEEP_SEVERITY && mca_cfg.tolerant < 3) 774 781 mce_panic("Machine check from unknown source", NULL, NULL); 775 782 776 783 /* ··· 794 801 { 795 802 int order; 796 803 int cpus = num_online_cpus(); 797 - u64 timeout = (u64)monarch_timeout * NSEC_PER_USEC; 804 + u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC; 798 805 799 806 if (!timeout) 800 807 return -1; ··· 858 865 static int mce_end(int order) 859 866 { 860 867 int ret = -1; 861 - u64 timeout = (u64)monarch_timeout * NSEC_PER_USEC; 868 + u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC; 862 869 863 870 if (!timeout) 864 871 goto reset; ··· 939 946 { 940 947 int i; 941 948 942 - for (i = 0; i < banks; i++) { 949 + for (i = 0; i < mca_cfg.banks; i++) { 943 950 if (test_bit(i, toclear)) 944 951 mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0); 945 952 } ··· 1004 1011 */ 1005 1012 void do_machine_check(struct pt_regs *regs, long error_code) 1006 1013 { 1014 + struct mca_config *cfg = &mca_cfg; 1007 1015 struct mce m, *final; 1008 1016 int i; 1009 1017 int worst = 0; ··· 1016 1022 int order; 1017 1023 /* 1018 1024 * If no_way_out gets set, there is no safe way to recover from this 1019 - * MCE. If tolerant is cranked up, we'll try anyway. 1025 + * MCE. If mca_cfg.tolerant is cranked up, we'll try anyway. 1020 1026 */ 1021 1027 int no_way_out = 0; 1022 1028 /* ··· 1032 1038 1033 1039 this_cpu_inc(mce_exception_count); 1034 1040 1035 - if (!banks) 1041 + if (!cfg->banks) 1036 1042 goto out; 1037 1043 1038 1044 mce_gather_info(&m, regs); ··· 1059 1065 * because the first one to see it will clear it. 1060 1066 */ 1061 1067 order = mce_start(&no_way_out); 1062 - for (i = 0; i < banks; i++) { 1068 + for (i = 0; i < cfg->banks; i++) { 1063 1069 __clear_bit(i, toclear); 1064 1070 if (!test_bit(i, valid_banks)) 1065 1071 continue; ··· 1078 1084 * Non uncorrected or non signaled errors are handled by 1079 1085 * machine_check_poll. Leave them alone, unless this panics. 1080 1086 */ 1081 - if (!(m.status & (mce_ser ? MCI_STATUS_S : MCI_STATUS_UC)) && 1087 + if (!(m.status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) && 1082 1088 !no_way_out) 1083 1089 continue; 1084 1090 ··· 1087 1093 */ 1088 1094 add_taint(TAINT_MACHINE_CHECK); 1089 1095 1090 - severity = mce_severity(&m, tolerant, NULL); 1096 + severity = mce_severity(&m, cfg->tolerant, NULL); 1091 1097 1092 1098 /* 1093 1099 * When machine check was for corrected handler don't touch, ··· 1111 1117 * When the ring overflows we just ignore the AO error. 1112 1118 * RED-PEN add some logging mechanism when 1113 1119 * usable_address or mce_add_ring fails. 1114 - * RED-PEN don't ignore overflow for tolerant == 0 1120 + * RED-PEN don't ignore overflow for mca_cfg.tolerant == 0 1115 1121 */ 1116 1122 if (severity == MCE_AO_SEVERITY && mce_usable_address(&m)) 1117 1123 mce_ring_add(m.addr >> PAGE_SHIFT); ··· 1143 1149 * issues we try to recover, or limit damage to the current 1144 1150 * process. 1145 1151 */ 1146 - if (tolerant < 3) { 1152 + if (cfg->tolerant < 3) { 1147 1153 if (no_way_out) 1148 1154 mce_panic("Fatal machine check on current CPU", &m, msg); 1149 1155 if (worst == MCE_AR_SEVERITY) { ··· 1371 1377 static int __cpuinit __mcheck_cpu_mce_banks_init(void) 1372 1378 { 1373 1379 int i; 1380 + u8 num_banks = mca_cfg.banks; 1374 1381 1375 - mce_banks = kzalloc(banks * sizeof(struct mce_bank), GFP_KERNEL); 1382 + mce_banks = kzalloc(num_banks * sizeof(struct mce_bank), GFP_KERNEL); 1376 1383 if (!mce_banks) 1377 1384 return -ENOMEM; 1378 - for (i = 0; i < banks; i++) { 1385 + 1386 + for (i = 0; i < num_banks; i++) { 1379 1387 struct mce_bank *b = &mce_banks[i]; 1380 1388 1381 1389 b->ctl = -1ULL; ··· 1397 1401 rdmsrl(MSR_IA32_MCG_CAP, cap); 1398 1402 1399 1403 b = cap & MCG_BANKCNT_MASK; 1400 - if (!banks) 1404 + if (!mca_cfg.banks) 1401 1405 pr_info("CPU supports %d MCE banks\n", b); 1402 1406 1403 1407 if (b > MAX_NR_BANKS) { ··· 1407 1411 } 1408 1412 1409 1413 /* Don't support asymmetric configurations today */ 1410 - WARN_ON(banks != 0 && b != banks); 1411 - banks = b; 1414 + WARN_ON(mca_cfg.banks != 0 && b != mca_cfg.banks); 1415 + mca_cfg.banks = b; 1416 + 1412 1417 if (!mce_banks) { 1413 1418 int err = __mcheck_cpu_mce_banks_init(); 1414 1419 ··· 1419 1422 1420 1423 /* Use accurate RIP reporting if available. */ 1421 1424 if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9) 1422 - rip_msr = MSR_IA32_MCG_EIP; 1425 + mca_cfg.rip_msr = MSR_IA32_MCG_EIP; 1423 1426 1424 1427 if (cap & MCG_SER_P) 1425 - mce_ser = 1; 1428 + mca_cfg.ser = true; 1426 1429 1427 1430 return 0; 1428 1431 } 1429 1432 1430 1433 static void __mcheck_cpu_init_generic(void) 1431 1434 { 1435 + enum mcp_flags m_fl = 0; 1432 1436 mce_banks_t all_banks; 1433 1437 u64 cap; 1434 1438 int i; 1439 + 1440 + if (!mca_cfg.bootlog) 1441 + m_fl = MCP_DONTLOG; 1435 1442 1436 1443 /* 1437 1444 * Log the machine checks left over from the previous reset. 1438 1445 */ 1439 1446 bitmap_fill(all_banks, MAX_NR_BANKS); 1440 - machine_check_poll(MCP_UC|(!mce_bootlog ? MCP_DONTLOG : 0), &all_banks); 1447 + machine_check_poll(MCP_UC | m_fl, &all_banks); 1441 1448 1442 1449 set_in_cr4(X86_CR4_MCE); 1443 1450 ··· 1449 1448 if (cap & MCG_CTL_P) 1450 1449 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); 1451 1450 1452 - for (i = 0; i < banks; i++) { 1451 + for (i = 0; i < mca_cfg.banks; i++) { 1453 1452 struct mce_bank *b = &mce_banks[i]; 1454 1453 1455 1454 if (!b->init) ··· 1490 1489 /* Add per CPU specific workarounds here */ 1491 1490 static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) 1492 1491 { 1492 + struct mca_config *cfg = &mca_cfg; 1493 + 1493 1494 if (c->x86_vendor == X86_VENDOR_UNKNOWN) { 1494 1495 pr_info("unknown CPU type - not enabling MCE support\n"); 1495 1496 return -EOPNOTSUPP; ··· 1499 1496 1500 1497 /* This should be disabled by the BIOS, but isn't always */ 1501 1498 if (c->x86_vendor == X86_VENDOR_AMD) { 1502 - if (c->x86 == 15 && banks > 4) { 1499 + if (c->x86 == 15 && cfg->banks > 4) { 1503 1500 /* 1504 1501 * disable GART TBL walk error reporting, which 1505 1502 * trips off incorrectly with the IOMMU & 3ware ··· 1507 1504 */ 1508 1505 clear_bit(10, (unsigned long *)&mce_banks[4].ctl); 1509 1506 } 1510 - if (c->x86 <= 17 && mce_bootlog < 0) { 1507 + if (c->x86 <= 17 && cfg->bootlog < 0) { 1511 1508 /* 1512 1509 * Lots of broken BIOS around that don't clear them 1513 1510 * by default and leave crap in there. Don't log: 1514 1511 */ 1515 - mce_bootlog = 0; 1512 + cfg->bootlog = 0; 1516 1513 } 1517 1514 /* 1518 1515 * Various K7s with broken bank 0 around. Always disable 1519 1516 * by default. 1520 1517 */ 1521 - if (c->x86 == 6 && banks > 0) 1518 + if (c->x86 == 6 && cfg->banks > 0) 1522 1519 mce_banks[0].ctl = 0; 1523 1520 1524 1521 /* ··· 1569 1566 * valid event later, merely don't write CTL0. 1570 1567 */ 1571 1568 1572 - if (c->x86 == 6 && c->x86_model < 0x1A && banks > 0) 1569 + if (c->x86 == 6 && c->x86_model < 0x1A && cfg->banks > 0) 1573 1570 mce_banks[0].init = 0; 1574 1571 1575 1572 /* ··· 1577 1574 * synchronization with a one second timeout. 1578 1575 */ 1579 1576 if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) && 1580 - monarch_timeout < 0) 1581 - monarch_timeout = USEC_PER_SEC; 1577 + cfg->monarch_timeout < 0) 1578 + cfg->monarch_timeout = USEC_PER_SEC; 1582 1579 1583 1580 /* 1584 1581 * There are also broken BIOSes on some Pentium M and 1585 1582 * earlier systems: 1586 1583 */ 1587 - if (c->x86 == 6 && c->x86_model <= 13 && mce_bootlog < 0) 1588 - mce_bootlog = 0; 1584 + if (c->x86 == 6 && c->x86_model <= 13 && cfg->bootlog < 0) 1585 + cfg->bootlog = 0; 1589 1586 1590 1587 if (c->x86 == 6 && c->x86_model == 45) 1591 1588 quirk_no_way_out = quirk_sandybridge_ifu; 1592 1589 } 1593 - if (monarch_timeout < 0) 1594 - monarch_timeout = 0; 1595 - if (mce_bootlog != 0) 1596 - mce_panic_timeout = 30; 1590 + if (cfg->monarch_timeout < 0) 1591 + cfg->monarch_timeout = 0; 1592 + if (cfg->bootlog != 0) 1593 + cfg->panic_timeout = 30; 1597 1594 1598 1595 return 0; 1599 1596 } ··· 1638 1635 1639 1636 __this_cpu_write(mce_next_interval, iv); 1640 1637 1641 - if (mce_ignore_ce || !iv) 1638 + if (mca_cfg.ignore_ce || !iv) 1642 1639 return; 1643 1640 1644 1641 t->expires = round_jiffies(jiffies + iv); ··· 1671 1668 */ 1672 1669 void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c) 1673 1670 { 1674 - if (mce_disabled) 1671 + if (mca_cfg.disabled) 1675 1672 return; 1676 1673 1677 1674 if (__mcheck_cpu_ancient_init(c)) ··· 1681 1678 return; 1682 1679 1683 1680 if (__mcheck_cpu_cap_init() < 0 || __mcheck_cpu_apply_quirks(c) < 0) { 1684 - mce_disabled = 1; 1681 + mca_cfg.disabled = true; 1685 1682 return; 1686 1683 } 1687 1684 ··· 1954 1951 */ 1955 1952 static int __init mcheck_enable(char *str) 1956 1953 { 1954 + struct mca_config *cfg = &mca_cfg; 1955 + 1957 1956 if (*str == 0) { 1958 1957 enable_p5_mce(); 1959 1958 return 1; ··· 1963 1958 if (*str == '=') 1964 1959 str++; 1965 1960 if (!strcmp(str, "off")) 1966 - mce_disabled = 1; 1961 + cfg->disabled = true; 1967 1962 else if (!strcmp(str, "no_cmci")) 1968 - mce_cmci_disabled = 1; 1963 + cfg->cmci_disabled = true; 1969 1964 else if (!strcmp(str, "dont_log_ce")) 1970 - mce_dont_log_ce = 1; 1965 + cfg->dont_log_ce = true; 1971 1966 else if (!strcmp(str, "ignore_ce")) 1972 - mce_ignore_ce = 1; 1967 + cfg->ignore_ce = true; 1973 1968 else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog")) 1974 - mce_bootlog = (str[0] == 'b'); 1969 + cfg->bootlog = (str[0] == 'b'); 1975 1970 else if (!strcmp(str, "bios_cmci_threshold")) 1976 - mce_bios_cmci_threshold = 1; 1971 + cfg->bios_cmci_threshold = true; 1977 1972 else if (isdigit(str[0])) { 1978 - get_option(&str, &tolerant); 1973 + get_option(&str, &(cfg->tolerant)); 1979 1974 if (*str == ',') { 1980 1975 ++str; 1981 - get_option(&str, &monarch_timeout); 1976 + get_option(&str, &(cfg->monarch_timeout)); 1982 1977 } 1983 1978 } else { 1984 1979 pr_info("mce argument %s ignored. Please use /sys\n", str); ··· 2007 2002 { 2008 2003 int i; 2009 2004 2010 - for (i = 0; i < banks; i++) { 2005 + for (i = 0; i < mca_cfg.banks; i++) { 2011 2006 struct mce_bank *b = &mce_banks[i]; 2012 2007 2013 2008 if (b->init) ··· 2147 2142 if (strict_strtoull(buf, 0, &new) < 0) 2148 2143 return -EINVAL; 2149 2144 2150 - if (mce_ignore_ce ^ !!new) { 2145 + if (mca_cfg.ignore_ce ^ !!new) { 2151 2146 if (new) { 2152 2147 /* disable ce features */ 2153 2148 mce_timer_delete_all(); 2154 2149 on_each_cpu(mce_disable_cmci, NULL, 1); 2155 - mce_ignore_ce = 1; 2150 + mca_cfg.ignore_ce = true; 2156 2151 } else { 2157 2152 /* enable ce features */ 2158 - mce_ignore_ce = 0; 2153 + mca_cfg.ignore_ce = false; 2159 2154 on_each_cpu(mce_enable_ce, (void *)1, 1); 2160 2155 } 2161 2156 } ··· 2171 2166 if (strict_strtoull(buf, 0, &new) < 0) 2172 2167 return -EINVAL; 2173 2168 2174 - if (mce_cmci_disabled ^ !!new) { 2169 + if (mca_cfg.cmci_disabled ^ !!new) { 2175 2170 if (new) { 2176 2171 /* disable cmci */ 2177 2172 on_each_cpu(mce_disable_cmci, NULL, 1); 2178 - mce_cmci_disabled = 1; 2173 + mca_cfg.cmci_disabled = true; 2179 2174 } else { 2180 2175 /* enable cmci */ 2181 - mce_cmci_disabled = 0; 2176 + mca_cfg.cmci_disabled = false; 2182 2177 on_each_cpu(mce_enable_ce, NULL, 1); 2183 2178 } 2184 2179 } ··· 2195 2190 } 2196 2191 2197 2192 static DEVICE_ATTR(trigger, 0644, show_trigger, set_trigger); 2198 - static DEVICE_INT_ATTR(tolerant, 0644, tolerant); 2199 - static DEVICE_INT_ATTR(monarch_timeout, 0644, monarch_timeout); 2200 - static DEVICE_INT_ATTR(dont_log_ce, 0644, mce_dont_log_ce); 2193 + static DEVICE_INT_ATTR(tolerant, 0644, mca_cfg.tolerant); 2194 + static DEVICE_INT_ATTR(monarch_timeout, 0644, mca_cfg.monarch_timeout); 2195 + static DEVICE_BOOL_ATTR(dont_log_ce, 0644, mca_cfg.dont_log_ce); 2201 2196 2202 2197 static struct dev_ext_attribute dev_attr_check_interval = { 2203 2198 __ATTR(check_interval, 0644, device_show_int, store_int_with_restart), ··· 2205 2200 }; 2206 2201 2207 2202 static struct dev_ext_attribute dev_attr_ignore_ce = { 2208 - __ATTR(ignore_ce, 0644, device_show_int, set_ignore_ce), 2209 - &mce_ignore_ce 2203 + __ATTR(ignore_ce, 0644, device_show_bool, set_ignore_ce), 2204 + &mca_cfg.ignore_ce 2210 2205 }; 2211 2206 2212 2207 static struct dev_ext_attribute dev_attr_cmci_disabled = { 2213 - __ATTR(cmci_disabled, 0644, device_show_int, set_cmci_disabled), 2214 - &mce_cmci_disabled 2208 + __ATTR(cmci_disabled, 0644, device_show_bool, set_cmci_disabled), 2209 + &mca_cfg.cmci_disabled 2215 2210 }; 2216 2211 2217 2212 static struct device_attribute *mce_device_attrs[] = { ··· 2258 2253 if (err) 2259 2254 goto error; 2260 2255 } 2261 - for (j = 0; j < banks; j++) { 2256 + for (j = 0; j < mca_cfg.banks; j++) { 2262 2257 err = device_create_file(dev, &mce_banks[j].attr); 2263 2258 if (err) 2264 2259 goto error2; ··· 2290 2285 for (i = 0; mce_device_attrs[i]; i++) 2291 2286 device_remove_file(dev, mce_device_attrs[i]); 2292 2287 2293 - for (i = 0; i < banks; i++) 2288 + for (i = 0; i < mca_cfg.banks; i++) 2294 2289 device_remove_file(dev, &mce_banks[i].attr); 2295 2290 2296 2291 device_unregister(dev); ··· 2309 2304 2310 2305 if (!(action & CPU_TASKS_FROZEN)) 2311 2306 cmci_clear(); 2312 - for (i = 0; i < banks; i++) { 2307 + for (i = 0; i < mca_cfg.banks; i++) { 2313 2308 struct mce_bank *b = &mce_banks[i]; 2314 2309 2315 2310 if (b->init) ··· 2327 2322 2328 2323 if (!(action & CPU_TASKS_FROZEN)) 2329 2324 cmci_reenable(); 2330 - for (i = 0; i < banks; i++) { 2325 + for (i = 0; i < mca_cfg.banks; i++) { 2331 2326 struct mce_bank *b = &mce_banks[i]; 2332 2327 2333 2328 if (b->init) ··· 2380 2375 { 2381 2376 int i; 2382 2377 2383 - for (i = 0; i < banks; i++) { 2378 + for (i = 0; i < mca_cfg.banks; i++) { 2384 2379 struct mce_bank *b = &mce_banks[i]; 2385 2380 struct device_attribute *a = &b->attr; 2386 2381 ··· 2431 2426 */ 2432 2427 static int __init mcheck_disable(char *str) 2433 2428 { 2434 - mce_disabled = 1; 2429 + mca_cfg.disabled = true; 2435 2430 return 1; 2436 2431 } 2437 2432 __setup("nomce", mcheck_disable);
+4 -4
arch/x86/kernel/cpu/mcheck/mce_intel.c
··· 53 53 { 54 54 u64 cap; 55 55 56 - if (mce_cmci_disabled || mce_ignore_ce) 56 + if (mca_cfg.cmci_disabled || mca_cfg.ignore_ce) 57 57 return 0; 58 58 59 59 /* ··· 200 200 continue; 201 201 } 202 202 203 - if (!mce_bios_cmci_threshold) { 203 + if (!mca_cfg.bios_cmci_threshold) { 204 204 val &= ~MCI_CTL2_CMCI_THRESHOLD_MASK; 205 205 val |= CMCI_THRESHOLD; 206 206 } else if (!(val & MCI_CTL2_CMCI_THRESHOLD_MASK)) { ··· 227 227 * set the thresholds properly or does not work with 228 228 * this boot option. Note down now and report later. 229 229 */ 230 - if (mce_bios_cmci_threshold && bios_zero_thresh && 230 + if (mca_cfg.bios_cmci_threshold && bios_zero_thresh && 231 231 (val & MCI_CTL2_CMCI_THRESHOLD_MASK)) 232 232 bios_wrong_thresh = 1; 233 233 } else { ··· 235 235 } 236 236 } 237 237 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); 238 - if (mce_bios_cmci_threshold && bios_wrong_thresh) { 238 + if (mca_cfg.bios_cmci_threshold && bios_wrong_thresh) { 239 239 pr_info_once( 240 240 "bios_cmci_threshold: Some banks do not have valid thresholds set\n"); 241 241 pr_info_once(
+1 -1
arch/x86/lguest/boot.c
··· 1412 1412 1413 1413 /* We don't have features. We have puppies! Puppies! */ 1414 1414 #ifdef CONFIG_X86_MCE 1415 - mce_disabled = 1; 1415 + mca_cfg.disabled = true; 1416 1416 #endif 1417 1417 #ifdef CONFIG_ACPI 1418 1418 acpi_disabled = 1;
+21
drivers/base/core.c
··· 171 171 } 172 172 EXPORT_SYMBOL_GPL(device_show_int); 173 173 174 + ssize_t device_store_bool(struct device *dev, struct device_attribute *attr, 175 + const char *buf, size_t size) 176 + { 177 + struct dev_ext_attribute *ea = to_ext_attr(attr); 178 + 179 + if (strtobool(buf, ea->var) < 0) 180 + return -EINVAL; 181 + 182 + return size; 183 + } 184 + EXPORT_SYMBOL_GPL(device_store_bool); 185 + 186 + ssize_t device_show_bool(struct device *dev, struct device_attribute *attr, 187 + char *buf) 188 + { 189 + struct dev_ext_attribute *ea = to_ext_attr(attr); 190 + 191 + return snprintf(buf, PAGE_SIZE, "%d\n", *(bool *)(ea->var)); 192 + } 193 + EXPORT_SYMBOL_GPL(device_show_bool); 194 + 174 195 /** 175 196 * device_release - free device structure. 176 197 * @kobj: device's kobject.
+7
include/linux/device.h
··· 498 498 char *buf); 499 499 ssize_t device_store_int(struct device *dev, struct device_attribute *attr, 500 500 const char *buf, size_t count); 501 + ssize_t device_show_bool(struct device *dev, struct device_attribute *attr, 502 + char *buf); 503 + ssize_t device_store_bool(struct device *dev, struct device_attribute *attr, 504 + const char *buf, size_t count); 501 505 502 506 #define DEVICE_ATTR(_name, _mode, _show, _store) \ 503 507 struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store) ··· 511 507 #define DEVICE_INT_ATTR(_name, _mode, _var) \ 512 508 struct dev_ext_attribute dev_attr_##_name = \ 513 509 { __ATTR(_name, _mode, device_show_int, device_store_int), &(_var) } 510 + #define DEVICE_BOOL_ATTR(_name, _mode, _var) \ 511 + struct dev_ext_attribute dev_attr_##_name = \ 512 + { __ATTR(_name, _mode, device_show_bool, device_store_bool), &(_var) } 514 513 #define DEVICE_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \ 515 514 struct device_attribute dev_attr_##_name = \ 516 515 __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)