Merge branch 'ras-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull RAS fix from Thomas Gleixner:
"Fix a regression in the new AMD SMCA code which issues an SMP function
call from the early interrupt disabled region of CPU hotplug. To avoid
that, use cached block addresses which can be used directly"

* 'ras-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/MCE/AMD: Cache SMCA MISC block addresses

+14 -15
+14 -15
arch/x86/kernel/cpu/mcheck/mce_amd.c
··· 94 [SMCA_SMU] = { "smu", "System Management Unit" }, 95 }; 96 97 const char *smca_get_name(enum smca_bank_types t) 98 { 99 if (t >= N_SMCA_BANK_TYPES) ··· 448 if (!block) 449 return MSR_AMD64_SMCA_MCx_MISC(bank); 450 451 /* 452 * For SMCA enabled processors, BLKPTR field of the first MISC register 453 * (MCx_MISC0) indicates presence of additional MISC regs set (MISC1-4). 454 */ 455 if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high)) 456 - return addr; 457 458 if (!(low & MCI_CONFIG_MCAX)) 459 - return addr; 460 461 if (!rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) && 462 (low & MASK_BLKPTR_LO)) 463 - return MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1); 464 465 return addr; 466 } 467 ··· 478 479 if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS)) 480 return addr; 481 - 482 - /* Get address from already initialized block. */ 483 - if (per_cpu(threshold_banks, cpu)) { 484 - struct threshold_bank *bankp = per_cpu(threshold_banks, cpu)[bank]; 485 - 486 - if (bankp && bankp->blocks) { 487 - struct threshold_block *blockp = &bankp->blocks[block]; 488 - 489 - if (blockp) 490 - return blockp->address; 491 - } 492 - } 493 494 if (mce_flags.smca) 495 return smca_get_block_address(cpu, bank, block);
··· 94 [SMCA_SMU] = { "smu", "System Management Unit" }, 95 }; 96 97 + static u32 smca_bank_addrs[MAX_NR_BANKS][NR_BLOCKS] __ro_after_init = 98 + { 99 + [0 ... MAX_NR_BANKS - 1] = { [0 ... NR_BLOCKS - 1] = -1 } 100 + }; 101 + 102 const char *smca_get_name(enum smca_bank_types t) 103 { 104 if (t >= N_SMCA_BANK_TYPES) ··· 443 if (!block) 444 return MSR_AMD64_SMCA_MCx_MISC(bank); 445 446 + /* Check our cache first: */ 447 + if (smca_bank_addrs[bank][block] != -1) 448 + return smca_bank_addrs[bank][block]; 449 + 450 /* 451 * For SMCA enabled processors, BLKPTR field of the first MISC register 452 * (MCx_MISC0) indicates presence of additional MISC regs set (MISC1-4). 453 */ 454 if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high)) 455 + goto out; 456 457 if (!(low & MCI_CONFIG_MCAX)) 458 + goto out; 459 460 if (!rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) && 461 (low & MASK_BLKPTR_LO)) 462 + addr = MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1); 463 464 + out: 465 + smca_bank_addrs[bank][block] = addr; 466 return addr; 467 } 468 ··· 467 468 if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS)) 469 return addr; 470 471 if (mce_flags.smca) 472 return smca_get_block_address(cpu, bank, block);