Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc/book3s: Recover from MC in sapphire on SCOM read via MMIO.

Detect and recover from machine check when inside opal on a special
scom load instructions. On specific SCOM read via MMIO we may get a machine
check exception with SRR0 pointing inside opal. To recover from MC
in this scenario, get a recovery instruction address and return to it from
MC.

OPAL will export the machine check recoverable ranges through
device tree node mcheck-recoverable-ranges under ibm,opal:

# hexdump /proc/device-tree/ibm,opal/mcheck-recoverable-ranges
0000000 0000 0000 3000 2804 0000 000c 0000 0000
0000010 3000 2814 0000 0000 3000 27f0 0000 000c
0000020 0000 0000 3000 2814 xxxx xxxx xxxx xxxx
0000030 llll llll yyyy yyyy yyyy yyyy
...
...
#

where:
xxxx xxxx xxxx xxxx = Starting instruction address
llll llll = Length of the address range.
yyyy yyyy yyyy yyyy = recovery address

Each recoverable address range entry is (start address, len,
recovery address), 2 cells each for start and recovery address, 1 cell for
len, totalling 5 cells per entry. During kernel boot time, build up the
recovery table with the list of recovery ranges from device-tree node which
will be used during machine check exception to recover from MMIO SCOM UE.

Signed-off-by: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>

authored by

Mahesh Salgaonkar and committed by
Benjamin Herrenschmidt
55672ecf d2a36071

+146 -10
+3
arch/powerpc/include/asm/machdep.h
··· 170 170 int (*system_reset_exception)(struct pt_regs *regs); 171 171 int (*machine_check_exception)(struct pt_regs *regs); 172 172 173 + /* Called during machine check exception to retrive fixup address. */ 174 + bool (*mce_check_early_recovery)(struct pt_regs *regs); 175 + 173 176 /* Motherboard/chipset features. This is a kind of general purpose 174 177 * hook used to control some machine specific features (like reset 175 178 * lines, chip power control, etc...).
+2 -1
arch/powerpc/include/asm/mce.h
··· 187 187 #define MCE_EVENT_DONTRELEASE false 188 188 189 189 extern void save_mce_event(struct pt_regs *regs, long handled, 190 - struct mce_error_info *mce_err, uint64_t addr); 190 + struct mce_error_info *mce_err, uint64_t nip, 191 + uint64_t addr); 191 192 extern int get_mce_event(struct machine_check_event *mce, bool release); 192 193 extern void release_mce_event(void); 193 194 extern void machine_check_queue_event(void);
+3
arch/powerpc/include/asm/opal.h
··· 833 833 834 834 /* Internal functions */ 835 835 extern int early_init_dt_scan_opal(unsigned long node, const char *uname, int depth, void *data); 836 + extern int early_init_dt_scan_recoverable_ranges(unsigned long node, 837 + const char *uname, int depth, void *data); 836 838 837 839 extern int opal_get_chars(uint32_t vtermno, char *buf, int count); 838 840 extern int opal_put_chars(uint32_t vtermno, const char *buf, int total_len); ··· 865 863 extern void opal_flash_init(void); 866 864 867 865 extern int opal_machine_check(struct pt_regs *regs); 866 + extern bool opal_mce_check_early_recovery(struct pt_regs *regs); 868 867 869 868 extern void opal_shutdown(void); 870 869
+2 -2
arch/powerpc/kernel/mce.c
··· 70 70 */ 71 71 void save_mce_event(struct pt_regs *regs, long handled, 72 72 struct mce_error_info *mce_err, 73 - uint64_t addr) 73 + uint64_t nip, uint64_t addr) 74 74 { 75 75 uint64_t srr1; 76 76 int index = __get_cpu_var(mce_nest_count)++; ··· 86 86 87 87 /* Populate generic machine check info */ 88 88 mce->version = MCE_V1; 89 - mce->srr0 = regs->nip; 89 + mce->srr0 = nip; 90 90 mce->srr1 = regs->msr; 91 91 mce->gpr3 = regs->gpr[3]; 92 92 mce->in_use = 1;
+33 -4
arch/powerpc/kernel/mce_power.c
··· 26 26 #include <linux/ptrace.h> 27 27 #include <asm/mmu.h> 28 28 #include <asm/mce.h> 29 + #include <asm/machdep.h> 29 30 30 31 /* flush SLBs and reload */ 31 32 static void flush_and_reload_slb(void) ··· 198 197 } 199 198 } 200 199 200 + static long mce_handle_ue_error(struct pt_regs *regs) 201 + { 202 + long handled = 0; 203 + 204 + /* 205 + * On specific SCOM read via MMIO we may get a machine check 206 + * exception with SRR0 pointing inside opal. If that is the 207 + * case OPAL may have recovery address to re-read SCOM data in 208 + * different way and hence we can recover from this MC. 209 + */ 210 + 211 + if (ppc_md.mce_check_early_recovery) { 212 + if (ppc_md.mce_check_early_recovery(regs)) 213 + handled = 1; 214 + } 215 + return handled; 216 + } 217 + 201 218 long __machine_check_early_realmode_p7(struct pt_regs *regs) 202 219 { 203 - uint64_t srr1, addr; 220 + uint64_t srr1, nip, addr; 204 221 long handled = 1; 205 222 struct mce_error_info mce_error_info = { 0 }; 206 223 207 224 srr1 = regs->msr; 225 + nip = regs->nip; 208 226 209 227 /* 210 228 * Handle memory errors depending whether this was a load/store or ··· 241 221 addr = regs->nip; 242 222 } 243 223 244 - save_mce_event(regs, handled, &mce_error_info, addr); 224 + /* Handle UE error. */ 225 + if (mce_error_info.error_type == MCE_ERROR_TYPE_UE) 226 + handled = mce_handle_ue_error(regs); 227 + 228 + save_mce_event(regs, handled, &mce_error_info, nip, addr); 245 229 return handled; 246 230 } 247 231 ··· 287 263 288 264 long __machine_check_early_realmode_p8(struct pt_regs *regs) 289 265 { 290 - uint64_t srr1, addr; 266 + uint64_t srr1, nip, addr; 291 267 long handled = 1; 292 268 struct mce_error_info mce_error_info = { 0 }; 293 269 294 270 srr1 = regs->msr; 271 + nip = regs->nip; 295 272 296 273 if (P7_SRR1_MC_LOADSTORE(srr1)) { 297 274 handled = mce_handle_derror_p8(regs->dsisr); ··· 304 279 addr = regs->nip; 305 280 } 306 281 307 - save_mce_event(regs, handled, &mce_error_info, addr); 282 + /* Handle UE error. */ 283 + if (mce_error_info.error_type == MCE_ERROR_TYPE_UE) 284 + handled = mce_handle_ue_error(regs); 285 + 286 + save_mce_event(regs, handled, &mce_error_info, nip, addr); 308 287 return handled; 309 288 }
+5
arch/powerpc/kernel/prom.c
··· 752 752 spinning_secondaries = boot_cpu_count - 1; 753 753 #endif 754 754 755 + #ifdef CONFIG_PPC_POWERNV 756 + /* Scan and build the list of machine check recoverable ranges */ 757 + of_scan_flat_dt(early_init_dt_scan_recoverable_ranges, NULL); 758 + #endif 759 + 755 760 DBG(" <- early_init_devtree()\n"); 756 761 } 757 762
+97 -3
arch/powerpc/platforms/powernv/opal.c
··· 21 21 #include <linux/sched.h> 22 22 #include <linux/kobject.h> 23 23 #include <linux/delay.h> 24 + #include <linux/memblock.h> 24 25 #include <asm/opal.h> 25 26 #include <asm/firmware.h> 26 27 #include <asm/mce.h> ··· 34 33 struct opal { 35 34 u64 base; 36 35 u64 entry; 36 + u64 size; 37 37 } opal; 38 + 39 + struct mcheck_recoverable_range { 40 + u64 start_addr; 41 + u64 end_addr; 42 + u64 recover_addr; 43 + }; 44 + 45 + static struct mcheck_recoverable_range *mc_recoverable_range; 46 + static int mc_recoverable_range_len; 38 47 39 48 static struct device_node *opal_node; 40 49 static DEFINE_SPINLOCK(opal_write_lock); ··· 60 49 int __init early_init_dt_scan_opal(unsigned long node, 61 50 const char *uname, int depth, void *data) 62 51 { 63 - const void *basep, *entryp; 64 - unsigned long basesz, entrysz; 52 + const void *basep, *entryp, *sizep; 53 + unsigned long basesz, entrysz, runtimesz; 65 54 66 55 if (depth != 1 || strcmp(uname, "ibm,opal") != 0) 67 56 return 0; 68 57 69 58 basep = of_get_flat_dt_prop(node, "opal-base-address", &basesz); 70 59 entryp = of_get_flat_dt_prop(node, "opal-entry-address", &entrysz); 60 + sizep = of_get_flat_dt_prop(node, "opal-runtime-size", &runtimesz); 71 61 72 - if (!basep || !entryp) 62 + if (!basep || !entryp || !sizep) 73 63 return 1; 74 64 75 65 opal.base = of_read_number(basep, basesz/4); 76 66 opal.entry = of_read_number(entryp, entrysz/4); 67 + opal.size = of_read_number(sizep, runtimesz/4); 77 68 78 69 pr_debug("OPAL Base = 0x%llx (basep=%p basesz=%ld)\n", 79 70 opal.base, basep, basesz); 80 71 pr_debug("OPAL Entry = 0x%llx (entryp=%p basesz=%ld)\n", 81 72 opal.entry, entryp, entrysz); 73 + pr_debug("OPAL Entry = 0x%llx (sizep=%p runtimesz=%ld)\n", 74 + opal.size, sizep, runtimesz); 82 75 83 76 powerpc_firmware_features |= FW_FEATURE_OPAL; 84 77 if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) { ··· 96 81 printk("OPAL V1 detected !\n"); 97 82 } 98 83 84 + return 1; 85 + } 86 + 87 + int __init early_init_dt_scan_recoverable_ranges(unsigned long node, 88 + const char *uname, int depth, void *data) 89 + { 90 + unsigned long i, size; 91 + const __be32 *prop; 92 + 93 + if (depth != 1 || strcmp(uname, "ibm,opal") != 0) 94 + return 0; 95 + 96 + prop = of_get_flat_dt_prop(node, "mcheck-recoverable-ranges", &size); 97 + 98 + if (!prop) 99 + return 1; 100 + 101 + pr_debug("Found machine check recoverable ranges.\n"); 102 + 103 + /* 104 + * Allocate a buffer to hold the MC recoverable ranges. We would be 105 + * accessing them in real mode, hence it needs to be within 106 + * RMO region. 107 + */ 108 + mc_recoverable_range =__va(memblock_alloc_base(size, __alignof__(u64), 109 + ppc64_rma_size)); 110 + memset(mc_recoverable_range, 0, size); 111 + 112 + /* 113 + * Each recoverable address entry is an (start address,len, 114 + * recover address) pair, * 2 cells each, totalling 4 cells per entry. 115 + */ 116 + for (i = 0; i < size / (sizeof(*prop) * 5); i++) { 117 + mc_recoverable_range[i].start_addr = 118 + of_read_number(prop + (i * 5) + 0, 2); 119 + mc_recoverable_range[i].end_addr = 120 + mc_recoverable_range[i].start_addr + 121 + of_read_number(prop + (i * 5) + 2, 1); 122 + mc_recoverable_range[i].recover_addr = 123 + of_read_number(prop + (i * 5) + 3, 2); 124 + 125 + pr_debug("Machine check recoverable range: %llx..%llx: %llx\n", 126 + mc_recoverable_range[i].start_addr, 127 + mc_recoverable_range[i].end_addr, 128 + mc_recoverable_range[i].recover_addr); 129 + } 130 + mc_recoverable_range_len = i; 99 131 return 1; 100 132 } 101 133 ··· 461 399 if (opal_recover_mce(regs, &evt)) 462 400 return 1; 463 401 return 0; 402 + } 403 + 404 + static uint64_t find_recovery_address(uint64_t nip) 405 + { 406 + int i; 407 + 408 + for (i = 0; i < mc_recoverable_range_len; i++) 409 + if ((nip >= mc_recoverable_range[i].start_addr) && 410 + (nip < mc_recoverable_range[i].end_addr)) 411 + return mc_recoverable_range[i].recover_addr; 412 + return 0; 413 + } 414 + 415 + bool opal_mce_check_early_recovery(struct pt_regs *regs) 416 + { 417 + uint64_t recover_addr = 0; 418 + 419 + if (!opal.base || !opal.size) 420 + goto out; 421 + 422 + if ((regs->nip >= opal.base) && 423 + (regs->nip <= (opal.base + opal.size))) 424 + recover_addr = find_recovery_address(regs->nip); 425 + 426 + /* 427 + * Setup regs->nip to rfi into fixup address. 428 + */ 429 + if (recover_addr) 430 + regs->nip = recover_addr; 431 + 432 + out: 433 + return !!recover_addr; 464 434 } 465 435 466 436 static irqreturn_t opal_interrupt(int irq, void *data)
+1
arch/powerpc/platforms/powernv/setup.c
··· 188 188 ppc_md.power_off = pnv_power_off; 189 189 ppc_md.halt = pnv_halt; 190 190 ppc_md.machine_check_exception = opal_machine_check; 191 + ppc_md.mce_check_early_recovery = opal_mce_check_early_recovery; 191 192 } 192 193 193 194 #ifdef CONFIG_PPC_POWERNV_RTAS