Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm

Pull ARM updates from Russell King:

- fix various clang build and cppcheck issues

- switch ARM to use new common outgoing-CPU-notification code

- add some additional explanation about the boot code

- kbuild "make clean" fixes

- get rid of another "(____ptrval____)", this time for the VDSO code

- avoid treating cache maintenance faults as a write

- add a frame pointer unwinder implementation for clang

- add EDAC support for Aurora L2 cache

- improve robustness of adjust_lowmem_bounds() finding the bounds of
lowmem.

- add reset control for AMBA primecell devices

* tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm: (24 commits)
ARM: 8906/1: drivers/amba: add reset control to amba bus probe
ARM: 8905/1: Emit __gnu_mcount_nc when using Clang 10.0.0 or newer
ARM: 8904/1: skip nomap memblocks while finding the lowmem/highmem boundary
ARM: 8903/1: ensure that usable memory in bank 0 starts from a PMD-aligned address
ARM: 8891/1: EDAC: armada_xp: Add support for more SoCs
ARM: 8888/1: EDAC: Add driver for the Marvell Armada XP SDRAM and L2 cache ECC
ARM: 8892/1: EDAC: Add missing debugfs_create_x32 wrapper
ARM: 8890/1: l2x0: add marvell,ecc-enable property for aurora
ARM: 8889/1: dt-bindings: document marvell,ecc-enable binding
ARM: 8886/1: l2x0: support parity-enable/disable on aurora
ARM: 8885/1: aurora-l2: add defines for parity and ECC registers
ARM: 8887/1: aurora-l2: add prefix to MAX_RANGE_SIZE
ARM: 8902/1: l2c: move cache-aurora-l2.h to asm/hardware
ARM: 8900/1: UNWINDER_FRAME_POINTER implementation for Clang
ARM: 8898/1: mm: Don't treat faults reported from cache maintenance as writes
ARM: 8896/1: VDSO: Don't leak kernel addresses
ARM: 8895/1: visit mach-* and plat-* directories when cleaning
ARM: 8894/1: boot: Replace open-coded nop with macro
ARM: 8893/1: boot: Explain the 8 nops
ARM: 8876/1: fix O= building with CONFIG_FPE_FASTFPE
...

+1027 -24
+4
Documentation/devicetree/bindings/arm/l2c2x0.yaml
··· 176 176 description: disable parity checking on the L2 cache (L220 or PL310). 177 177 type: boolean 178 178 179 + marvell,ecc-enable: 180 + description: enable ECC protection on the L2 cache 181 + type: boolean 182 + 179 183 arm,outer-sync-disable: 180 184 description: disable the outer sync operation on the L2 cache. 181 185 Some core tiles, especially ARM PB11MPCore have a faulty L220 cache that
+6
MAINTAINERS
··· 5802 5802 S: Maintained 5803 5803 F: drivers/edac/amd64_edac* 5804 5804 5805 + EDAC-ARMADA 5806 + M: Jan Luebbe <jlu@pengutronix.de> 5807 + L: linux-edac@vger.kernel.org 5808 + S: Maintained 5809 + F: drivers/edac/armada_xp_* 5810 + 5805 5811 EDAC-AST2500 5806 5812 M: Stefan Schaeckeler <sschaeck@cisco.com> 5807 5813 S: Supported
+4 -3
arch/arm/Kconfig
··· 82 82 select HAVE_FAST_GUP if ARM_LPAE 83 83 select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL 84 84 select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL && !CC_IS_CLANG 85 - select HAVE_FUNCTION_TRACER if !XIP_KERNEL 85 + select HAVE_FUNCTION_TRACER if !XIP_KERNEL && (CC_IS_GCC || CLANG_VERSION >= 100000) 86 86 select HAVE_GCC_PLUGINS 87 87 select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7) 88 88 select HAVE_IDE if PCI || ISA || PCMCIA ··· 1476 1476 code to do integer division. 1477 1477 1478 1478 config AEABI 1479 - bool "Use the ARM EABI to compile the kernel" if !CPU_V7 && !CPU_V7M && !CPU_V6 && !CPU_V6K 1480 - default CPU_V7 || CPU_V7M || CPU_V6 || CPU_V6K 1479 + bool "Use the ARM EABI to compile the kernel" if !CPU_V7 && \ 1480 + !CPU_V7M && !CPU_V6 && !CPU_V6K && !CC_IS_CLANG 1481 + default CPU_V7 || CPU_V7M || CPU_V6 || CPU_V6K || CC_IS_CLANG 1481 1482 help 1482 1483 This option allows for the kernel to be compiled using the latest 1483 1484 ARM ABI (aka EABI). This is only useful if you are using a user
+1 -1
arch/arm/Kconfig.debug
··· 56 56 57 57 config UNWINDER_FRAME_POINTER 58 58 bool "Frame pointer unwinder" 59 - depends on !THUMB2_KERNEL && !CC_IS_CLANG 59 + depends on !THUMB2_KERNEL 60 60 select ARCH_WANT_FRAME_POINTERS 61 61 select FRAME_POINTER 62 62 help
+14 -8
arch/arm/Makefile
··· 36 36 endif 37 37 38 38 ifeq ($(CONFIG_FRAME_POINTER),y) 39 - KBUILD_CFLAGS +=-fno-omit-frame-pointer -mapcs -mno-sched-prolog 39 + KBUILD_CFLAGS +=-fno-omit-frame-pointer 40 + ifeq ($(CONFIG_CC_IS_GCC),y) 41 + KBUILD_CFLAGS += -mapcs -mno-sched-prolog 42 + endif 40 43 endif 41 44 42 45 ifeq ($(CONFIG_CPU_BIG_ENDIAN),y) ··· 113 110 114 111 ifeq ($(CONFIG_ARM_UNWIND),y) 115 112 CFLAGS_ABI +=-funwind-tables 113 + endif 114 + 115 + ifeq ($(CONFIG_CC_IS_CLANG),y) 116 + CFLAGS_ABI += -meabi gnu 116 117 endif 117 118 118 119 # Accept old syntax despite ".syntax unified" ··· 273 266 274 267 export TEXT_OFFSET GZFLAGS MMUEXT 275 268 276 - # Do we have FASTFPE? 277 - FASTFPE :=arch/arm/fastfpe 278 - ifeq ($(FASTFPE),$(wildcard $(FASTFPE))) 279 - FASTFPE_OBJ :=$(FASTFPE)/ 280 - endif 281 - 282 269 core-$(CONFIG_FPE_NWFPE) += arch/arm/nwfpe/ 283 - core-$(CONFIG_FPE_FASTFPE) += $(FASTFPE_OBJ) 270 + # Put arch/arm/fastfpe/ to use this. 271 + core-$(CONFIG_FPE_FASTFPE) += $(patsubst $(srctree)/%,%,$(wildcard $(srctree)/arch/arm/fastfpe/)) 284 272 core-$(CONFIG_VFP) += arch/arm/vfp/ 285 273 core-$(CONFIG_XEN) += arch/arm/xen/ 286 274 core-$(CONFIG_KVM_ARM_HOST) += arch/arm/kvm/ ··· 287 285 core-y += arch/arm/net/ 288 286 core-y += arch/arm/crypto/ 289 287 core-y += $(machdirs) $(platdirs) 288 + 289 + # For cleaning 290 + core- += $(patsubst %,arch/arm/mach-%/, $(machine-)) 291 + core- += $(patsubst %,arch/arm/plat-%/, $(plat-)) 290 292 291 293 drivers-$(CONFIG_OPROFILE) += arch/arm/oprofile/ 292 294
+13 -1
arch/arm/boot/compressed/head.S
··· 153 153 AR_CLASS( .arm ) 154 154 start: 155 155 .type start,#function 156 + /* 157 + * These 7 nops along with the 1 nop immediately below for 158 + * !THUMB2 form 8 nops that make the compressed kernel bootable 159 + * on legacy ARM systems that were assuming the kernel in a.out 160 + * binary format. The boot loaders on these systems would 161 + * jump 32 bytes into the image to skip the a.out header. 162 + * with these 8 nops filling exactly 32 bytes, things still 163 + * work as expected on these legacy systems. Thumb2 mode keeps 164 + * 7 of the nops as it turns out that some boot loaders 165 + * were patching the initial instructions of the kernel, i.e 166 + * had started to exploit this "patch area". 167 + */ 156 168 .rept 7 157 169 __nop 158 170 .endr 159 171 #ifndef CONFIG_THUMB2_KERNEL 160 - mov r0, r0 172 + __nop 161 173 #else 162 174 AR_CLASS( sub pc, pc, #3 ) @ A/R: switch to Thumb2 mode 163 175 M_CLASS( nop.w ) @ M: already in Thumb2 mode
+3 -3
arch/arm/kernel/perf_event_v7.c
··· 697 697 /* 698 698 * Event filters for PMUv2 699 699 */ 700 - #define ARMV7_EXCLUDE_PL1 (1 << 31) 701 - #define ARMV7_EXCLUDE_USER (1 << 30) 702 - #define ARMV7_INCLUDE_HYP (1 << 27) 700 + #define ARMV7_EXCLUDE_PL1 BIT(31) 701 + #define ARMV7_EXCLUDE_USER BIT(30) 702 + #define ARMV7_INCLUDE_HYP BIT(27) 703 703 704 704 /* 705 705 * Secure debug enable reg
-1
arch/arm/kernel/vdso.c
··· 194 194 } 195 195 196 196 text_pages = (vdso_end - vdso_start) >> PAGE_SHIFT; 197 - pr_debug("vdso: %i text pages at base %p\n", text_pages, vdso_start); 198 197 199 198 /* Allocate the VDSO text pagelist */ 200 199 vdso_text_pagelist = kcalloc(text_pages, sizeof(struct page *),
+7 -1
arch/arm/lib/Makefile
··· 5 5 # Copyright (C) 1995-2000 Russell King 6 6 # 7 7 8 - lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \ 8 + lib-y := changebit.o csumipv6.o csumpartial.o \ 9 9 csumpartialcopy.o csumpartialcopyuser.o clearbit.o \ 10 10 delay.o delay-loop.o findbit.o memchr.o memcpy.o \ 11 11 memmove.o memset.o setbit.o \ ··· 18 18 19 19 mmu-y := clear_user.o copy_page.o getuser.o putuser.o \ 20 20 copy_from_user.o copy_to_user.o 21 + 22 + ifdef CONFIG_CC_IS_CLANG 23 + lib-y += backtrace-clang.o 24 + else 25 + lib-y += backtrace.o 26 + endif 21 27 22 28 # using lib_ here won't override already available weak symbols 23 29 obj-$(CONFIG_UACCESS_WITH_MEMCPY) += uaccess_with_memcpy.o
+217
arch/arm/lib/backtrace-clang.S
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * linux/arch/arm/lib/backtrace-clang.S 4 + * 5 + * Copyright (C) 2019 Nathan Huckleberry 6 + * 7 + */ 8 + #include <linux/kern_levels.h> 9 + #include <linux/linkage.h> 10 + #include <asm/assembler.h> 11 + .text 12 + 13 + /* fp is 0 or stack frame */ 14 + 15 + #define frame r4 16 + #define sv_fp r5 17 + #define sv_pc r6 18 + #define mask r7 19 + #define sv_lr r8 20 + 21 + ENTRY(c_backtrace) 22 + 23 + #if !defined(CONFIG_FRAME_POINTER) || !defined(CONFIG_PRINTK) 24 + ret lr 25 + ENDPROC(c_backtrace) 26 + #else 27 + 28 + 29 + /* 30 + * Clang does not store pc or sp in function prologues so we don't know exactly 31 + * where the function starts. 32 + * 33 + * We can treat the current frame's lr as the saved pc and the preceding 34 + * frame's lr as the current frame's lr, but we can't trace the most recent 35 + * call. Inserting a false stack frame allows us to reference the function 36 + * called last in the stacktrace. 37 + * 38 + * If the call instruction was a bl we can look at the callers branch 39 + * instruction to calculate the saved pc. We can recover the pc in most cases, 40 + * but in cases such as calling function pointers we cannot. In this case, 41 + * default to using the lr. This will be some address in the function, but will 42 + * not be the function start. 43 + * 44 + * Unfortunately due to the stack frame layout we can't dump r0 - r3, but these 45 + * are less frequently saved. 46 + * 47 + * Stack frame layout: 48 + * <larger addresses> 49 + * saved lr 50 + * frame=> saved fp 51 + * optionally saved caller registers (r4 - r10) 52 + * optionally saved arguments (r0 - r3) 53 + * <top of stack frame> 54 + * <smaller addresses> 55 + * 56 + * Functions start with the following code sequence: 57 + * corrected pc => stmfd sp!, {..., fp, lr} 58 + * add fp, sp, #x 59 + * stmfd sp!, {r0 - r3} (optional) 60 + * 61 + * 62 + * 63 + * 64 + * 65 + * 66 + * The diagram below shows an example stack setup for dump_stack. 67 + * 68 + * The frame for c_backtrace has pointers to the code of dump_stack. This is 69 + * why the frame of c_backtrace is used to for the pc calculation of 70 + * dump_stack. This is why we must move back a frame to print dump_stack. 71 + * 72 + * The stored locals for dump_stack are in dump_stack's frame. This means that 73 + * to fully print dump_stack's frame we need both the frame for dump_stack (for 74 + * locals) and the frame that was called by dump_stack (for pc). 75 + * 76 + * To print locals we must know where the function start is. If we read the 77 + * function prologue opcodes we can determine which variables are stored in the 78 + * stack frame. 79 + * 80 + * To find the function start of dump_stack we can look at the stored LR of 81 + * show_stack. It points at the instruction directly after the bl dump_stack. 82 + * We can then read the offset from the bl opcode to determine where the branch 83 + * takes us. The address calculated must be the start of dump_stack. 84 + * 85 + * c_backtrace frame dump_stack: 86 + * {[LR] } ============| ... 87 + * {[FP] } =======| | bl c_backtrace 88 + * | |=> ... 89 + * {[R4-R10]} | 90 + * {[R0-R3] } | show_stack: 91 + * dump_stack frame | ... 92 + * {[LR] } =============| bl dump_stack 93 + * {[FP] } <=======| |=> ... 94 + * {[R4-R10]} 95 + * {[R0-R3] } 96 + */ 97 + 98 + stmfd sp!, {r4 - r9, fp, lr} @ Save an extra register 99 + @ to ensure 8 byte alignment 100 + movs frame, r0 @ if frame pointer is zero 101 + beq no_frame @ we have no stack frames 102 + tst r1, #0x10 @ 26 or 32-bit mode? 103 + moveq mask, #0xfc000003 104 + movne mask, #0 @ mask for 32-bit 105 + 106 + /* 107 + * Switches the current frame to be the frame for dump_stack. 108 + */ 109 + add frame, sp, #24 @ switch to false frame 110 + for_each_frame: tst frame, mask @ Check for address exceptions 111 + bne no_frame 112 + 113 + /* 114 + * sv_fp is the stack frame with the locals for the current considered 115 + * function. 116 + * 117 + * sv_pc is the saved lr frame the frame above. This is a pointer to a code 118 + * address within the current considered function, but it is not the function 119 + * start. This value gets updated to be the function start later if it is 120 + * possible. 121 + */ 122 + 1001: ldr sv_pc, [frame, #4] @ get saved 'pc' 123 + 1002: ldr sv_fp, [frame, #0] @ get saved fp 124 + 125 + teq sv_fp, mask @ make sure next frame exists 126 + beq no_frame 127 + 128 + /* 129 + * sv_lr is the lr from the function that called the current function. This is 130 + * a pointer to a code address in the current function's caller. sv_lr-4 is 131 + * the instruction used to call the current function. 132 + * 133 + * This sv_lr can be used to calculate the function start if the function was 134 + * called using a bl instruction. If the function start can be recovered sv_pc 135 + * is overwritten with the function start. 136 + * 137 + * If the current function was called using a function pointer we cannot 138 + * recover the function start and instead continue with sv_pc as an arbitrary 139 + * value within the current function. If this is the case we cannot print 140 + * registers for the current function, but the stacktrace is still printed 141 + * properly. 142 + */ 143 + 1003: ldr sv_lr, [sv_fp, #4] @ get saved lr from next frame 144 + 145 + ldr r0, [sv_lr, #-4] @ get call instruction 146 + ldr r3, .Lopcode+4 147 + and r2, r3, r0 @ is this a bl call 148 + teq r2, r3 149 + bne finished_setup @ give up if it's not 150 + and r0, #0xffffff @ get call offset 24-bit int 151 + lsl r0, r0, #8 @ sign extend offset 152 + asr r0, r0, #8 153 + ldr sv_pc, [sv_fp, #4] @ get lr address 154 + add sv_pc, sv_pc, #-4 @ get call instruction address 155 + add sv_pc, sv_pc, #8 @ take care of prefetch 156 + add sv_pc, sv_pc, r0, lsl #2@ find function start 157 + 158 + finished_setup: 159 + 160 + bic sv_pc, sv_pc, mask @ mask PC/LR for the mode 161 + 162 + /* 163 + * Print the function (sv_pc) and where it was called from (sv_lr). 164 + */ 165 + 1004: mov r0, sv_pc 166 + 167 + mov r1, sv_lr 168 + mov r2, frame 169 + bic r1, r1, mask @ mask PC/LR for the mode 170 + bl dump_backtrace_entry 171 + 172 + /* 173 + * Test if the function start is a stmfd instruction to determine which 174 + * registers were stored in the function prologue. 175 + * 176 + * If we could not recover the sv_pc because we were called through a function 177 + * pointer the comparison will fail and no registers will print. Unwinding will 178 + * continue as if there had been no registers stored in this frame. 179 + */ 180 + 1005: ldr r1, [sv_pc, #0] @ if stmfd sp!, {..., fp, lr} 181 + ldr r3, .Lopcode @ instruction exists, 182 + teq r3, r1, lsr #11 183 + ldr r0, [frame] @ locals are stored in 184 + @ the preceding frame 185 + subeq r0, r0, #4 186 + bleq dump_backtrace_stm @ dump saved registers 187 + 188 + /* 189 + * If we are out of frames or if the next frame is invalid. 190 + */ 191 + teq sv_fp, #0 @ zero saved fp means 192 + beq no_frame @ no further frames 193 + 194 + cmp sv_fp, frame @ next frame must be 195 + mov frame, sv_fp @ above the current frame 196 + bhi for_each_frame 197 + 198 + 1006: adr r0, .Lbad 199 + mov r1, frame 200 + bl printk 201 + no_frame: ldmfd sp!, {r4 - r9, fp, pc} 202 + ENDPROC(c_backtrace) 203 + .pushsection __ex_table,"a" 204 + .align 3 205 + .long 1001b, 1006b 206 + .long 1002b, 1006b 207 + .long 1003b, 1006b 208 + .long 1004b, 1006b 209 + .long 1005b, 1006b 210 + .popsection 211 + 212 + .Lbad: .asciz "Backtrace aborted due to bad frame pointer <%p>\n" 213 + .align 214 + .Lopcode: .word 0xe92d4800 >> 11 @ stmfd sp!, {... fp, lr} 215 + .word 0x0b000000 @ bl if these bits are set 216 + 217 + #endif
+49 -1
arch/arm/mm/cache-aurora-l2.h arch/arm/include/asm/hardware/cache-aurora-l2.h
··· 31 31 #define AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU \ 32 32 (3 << AURORA_ACR_REPLACEMENT_OFFSET) 33 33 34 + #define AURORA_ACR_PARITY_EN (1 << 21) 35 + #define AURORA_ACR_ECC_EN (1 << 20) 36 + 34 37 #define AURORA_ACR_FORCE_WRITE_POLICY_OFFSET 0 35 38 #define AURORA_ACR_FORCE_WRITE_POLICY_MASK \ 36 39 (0x3 << AURORA_ACR_FORCE_WRITE_POLICY_OFFSET) ··· 44 41 #define AURORA_ACR_FORCE_WRITE_THRO_POLICY \ 45 42 (2 << AURORA_ACR_FORCE_WRITE_POLICY_OFFSET) 46 43 47 - #define MAX_RANGE_SIZE 1024 44 + #define AURORA_ERR_CNT_REG 0x600 45 + #define AURORA_ERR_ATTR_CAP_REG 0x608 46 + #define AURORA_ERR_ADDR_CAP_REG 0x60c 47 + #define AURORA_ERR_WAY_CAP_REG 0x610 48 + #define AURORA_ERR_INJECT_CTL_REG 0x614 49 + #define AURORA_ERR_INJECT_MASK_REG 0x618 50 + 51 + #define AURORA_ERR_CNT_CLR_OFFSET 31 52 + #define AURORA_ERR_CNT_CLR \ 53 + (0x1 << AURORA_ERR_CNT_CLR_OFFSET) 54 + #define AURORA_ERR_CNT_UE_OFFSET 16 55 + #define AURORA_ERR_CNT_UE_MASK \ 56 + (0x7fff << AURORA_ERR_CNT_UE_OFFSET) 57 + #define AURORA_ERR_CNT_CE_OFFSET 0 58 + #define AURORA_ERR_CNT_CE_MASK \ 59 + (0xffff << AURORA_ERR_CNT_CE_OFFSET) 60 + 61 + #define AURORA_ERR_ATTR_SRC_OFF 16 62 + #define AURORA_ERR_ATTR_SRC_MSK \ 63 + (0x7 << AURORA_ERR_ATTR_SRC_OFF) 64 + #define AURORA_ERR_ATTR_TXN_OFF 12 65 + #define AURORA_ERR_ATTR_TXN_MSK \ 66 + (0xf << AURORA_ERR_ATTR_TXN_OFF) 67 + #define AURORA_ERR_ATTR_ERR_OFF 8 68 + #define AURORA_ERR_ATTR_ERR_MSK \ 69 + (0x3 << AURORA_ERR_ATTR_ERR_OFF) 70 + #define AURORA_ERR_ATTR_CAP_VALID_OFF 0 71 + #define AURORA_ERR_ATTR_CAP_VALID \ 72 + (0x1 << AURORA_ERR_ATTR_CAP_VALID_OFF) 73 + 74 + #define AURORA_ERR_ADDR_CAP_ADDR_MASK 0xffffffe0 75 + 76 + #define AURORA_ERR_WAY_IDX_OFF 8 77 + #define AURORA_ERR_WAY_IDX_MSK \ 78 + (0xfff << AURORA_ERR_WAY_IDX_OFF) 79 + #define AURORA_ERR_WAY_CAP_WAY_OFFSET 1 80 + #define AURORA_ERR_WAY_CAP_WAY_MASK \ 81 + (0xf << AURORA_ERR_WAY_CAP_WAY_OFFSET) 82 + 83 + #define AURORA_ERR_INJECT_CTL_ADDR_MASK 0xfffffff0 84 + #define AURORA_ERR_ATTR_TXN_OFF 12 85 + #define AURORA_ERR_INJECT_CTL_EN_MASK 0x3 86 + #define AURORA_ERR_INJECT_CTL_EN_PARITY 0x2 87 + #define AURORA_ERR_INJECT_CTL_EN_ECC 0x1 88 + 89 + #define AURORA_MAX_RANGE_SIZE 1024 48 90 49 91 #define AURORA_WAY_SIZE_SHIFT 2 50 92
+15 -3
arch/arm/mm/cache-l2x0.c
··· 18 18 #include <asm/cp15.h> 19 19 #include <asm/cputype.h> 20 20 #include <asm/hardware/cache-l2x0.h> 21 + #include <asm/hardware/cache-aurora-l2.h> 21 22 #include "cache-tauros3.h" 22 - #include "cache-aurora-l2.h" 23 23 24 24 struct l2c_init_data { 25 25 const char *type; ··· 1352 1352 * since cache range operations stall the CPU pipeline 1353 1353 * until completion. 1354 1354 */ 1355 - if (end > start + MAX_RANGE_SIZE) 1356 - end = start + MAX_RANGE_SIZE; 1355 + if (end > start + AURORA_MAX_RANGE_SIZE) 1356 + end = start + AURORA_MAX_RANGE_SIZE; 1357 1357 1358 1358 /* 1359 1359 * Cache range operations can't straddle a page boundary. ··· 1491 1491 if (l2_wt_override) { 1492 1492 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY; 1493 1493 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK; 1494 + } 1495 + 1496 + if (of_property_read_bool(np, "marvell,ecc-enable")) { 1497 + mask |= AURORA_ACR_ECC_EN; 1498 + val |= AURORA_ACR_ECC_EN; 1499 + } 1500 + 1501 + if (of_property_read_bool(np, "arm,parity-enable")) { 1502 + mask |= AURORA_ACR_PARITY_EN; 1503 + val |= AURORA_ACR_PARITY_EN; 1504 + } else if (of_property_read_bool(np, "arm,parity-disable")) { 1505 + mask |= AURORA_ACR_PARITY_EN; 1494 1506 } 1495 1507 1496 1508 *aux_val &= ~mask;
+2 -2
arch/arm/mm/fault.c
··· 191 191 { 192 192 unsigned int mask = VM_READ | VM_WRITE | VM_EXEC; 193 193 194 - if (fsr & FSR_WRITE) 194 + if ((fsr & FSR_WRITE) && !(fsr & FSR_CM)) 195 195 mask = VM_WRITE; 196 196 if (fsr & FSR_LNX_PF) 197 197 mask = VM_EXEC; ··· 262 262 263 263 if (user_mode(regs)) 264 264 flags |= FAULT_FLAG_USER; 265 - if (fsr & FSR_WRITE) 265 + if ((fsr & FSR_WRITE) && !(fsr & FSR_CM)) 266 266 flags |= FAULT_FLAG_WRITE; 267 267 268 268 /*
+1
arch/arm/mm/fault.h
··· 6 6 * Fault status register encodings. We steal bit 31 for our own purposes. 7 7 */ 8 8 #define FSR_LNX_PF (1 << 31) 9 + #define FSR_CM (1 << 13) 9 10 #define FSR_WRITE (1 << 11) 10 11 #define FSR_FS4 (1 << 10) 11 12 #define FSR_FS3_0 (15)
+19
arch/arm/mm/mmu.c
··· 1177 1177 */ 1178 1178 vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET; 1179 1179 1180 + /* 1181 + * The first usable region must be PMD aligned. Mark its start 1182 + * as MEMBLOCK_NOMAP if it isn't 1183 + */ 1184 + for_each_memblock(memory, reg) { 1185 + if (!memblock_is_nomap(reg)) { 1186 + if (!IS_ALIGNED(reg->base, PMD_SIZE)) { 1187 + phys_addr_t len; 1188 + 1189 + len = round_up(reg->base, PMD_SIZE) - reg->base; 1190 + memblock_mark_nomap(reg->base, len); 1191 + } 1192 + break; 1193 + } 1194 + } 1195 + 1180 1196 for_each_memblock(memory, reg) { 1181 1197 phys_addr_t block_start = reg->base; 1182 1198 phys_addr_t block_end = reg->base + reg->size; 1199 + 1200 + if (memblock_is_nomap(reg)) 1201 + continue; 1183 1202 1184 1203 if (reg->base < vmalloc_limit) { 1185 1204 if (block_end > lowmem_limit)
+14
drivers/amba/bus.c
··· 18 18 #include <linux/limits.h> 19 19 #include <linux/clk/clk-conf.h> 20 20 #include <linux/platform_device.h> 21 + #include <linux/reset.h> 21 22 22 23 #include <asm/irq.h> 23 24 ··· 402 401 ret = amba_get_enable_pclk(dev); 403 402 if (ret == 0) { 404 403 u32 pid, cid; 404 + struct reset_control *rstc; 405 + 406 + /* 407 + * Find reset control(s) of the amba bus and de-assert them. 408 + */ 409 + rstc = of_reset_control_array_get_optional_shared(dev->dev.of_node); 410 + if (IS_ERR(rstc)) { 411 + if (PTR_ERR(rstc) != -EPROBE_DEFER) 412 + dev_err(&dev->dev, "Can't get amba reset!\n"); 413 + return PTR_ERR(rstc); 414 + } 415 + reset_control_deassert(rstc); 416 + reset_control_put(rstc); 405 417 406 418 /* 407 419 * Read pid and cid based on size of resource
+7
drivers/edac/Kconfig
··· 466 466 help 467 467 Support for error detection and correction on the SiFive SoCs. 468 468 469 + config EDAC_ARMADA_XP 470 + bool "Marvell Armada XP DDR and L2 Cache ECC" 471 + depends on MACH_MVEBU_V7 472 + help 473 + Support for error correction and detection on the Marvell Aramada XP 474 + DDR RAM and L2 cache controllers. 475 + 469 476 config EDAC_SYNOPSYS 470 477 tristate "Synopsys DDR Memory Controller" 471 478 depends on ARCH_ZYNQ || ARCH_ZYNQMP
+1
drivers/edac/Makefile
··· 80 80 81 81 obj-$(CONFIG_EDAC_ALTERA) += altera_edac.o 82 82 obj-$(CONFIG_EDAC_SIFIVE) += sifive_edac.o 83 + obj-$(CONFIG_EDAC_ARMADA_XP) += armada_xp_edac.o 83 84 obj-$(CONFIG_EDAC_SYNOPSYS) += synopsys_edac.o 84 85 obj-$(CONFIG_EDAC_XGENE) += xgene_edac.o 85 86 obj-$(CONFIG_EDAC_TI) += ti_edac.o
+635
drivers/edac/armada_xp_edac.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (C) 2017 Pengutronix, Jan Luebbe <kernel@pengutronix.de> 4 + */ 5 + 6 + #include <linux/kernel.h> 7 + #include <linux/edac.h> 8 + #include <linux/of_platform.h> 9 + 10 + #include <asm/hardware/cache-l2x0.h> 11 + #include <asm/hardware/cache-aurora-l2.h> 12 + 13 + #include "edac_mc.h" 14 + #include "edac_device.h" 15 + #include "edac_module.h" 16 + 17 + /************************ EDAC MC (DDR RAM) ********************************/ 18 + 19 + #define SDRAM_NUM_CS 4 20 + 21 + #define SDRAM_CONFIG_REG 0x0 22 + #define SDRAM_CONFIG_ECC_MASK BIT(18) 23 + #define SDRAM_CONFIG_REGISTERED_MASK BIT(17) 24 + #define SDRAM_CONFIG_BUS_WIDTH_MASK BIT(15) 25 + 26 + #define SDRAM_ADDR_CTRL_REG 0x10 27 + #define SDRAM_ADDR_CTRL_SIZE_HIGH_OFFSET(cs) (20+cs) 28 + #define SDRAM_ADDR_CTRL_SIZE_HIGH_MASK(cs) (0x1 << SDRAM_ADDR_CTRL_SIZE_HIGH_OFFSET(cs)) 29 + #define SDRAM_ADDR_CTRL_ADDR_SEL_MASK(cs) BIT(16+cs) 30 + #define SDRAM_ADDR_CTRL_SIZE_LOW_OFFSET(cs) (cs*4+2) 31 + #define SDRAM_ADDR_CTRL_SIZE_LOW_MASK(cs) (0x3 << SDRAM_ADDR_CTRL_SIZE_LOW_OFFSET(cs)) 32 + #define SDRAM_ADDR_CTRL_STRUCT_OFFSET(cs) (cs*4) 33 + #define SDRAM_ADDR_CTRL_STRUCT_MASK(cs) (0x3 << SDRAM_ADDR_CTRL_STRUCT_OFFSET(cs)) 34 + 35 + #define SDRAM_ERR_DATA_H_REG 0x40 36 + #define SDRAM_ERR_DATA_L_REG 0x44 37 + 38 + #define SDRAM_ERR_RECV_ECC_REG 0x48 39 + #define SDRAM_ERR_RECV_ECC_VALUE_MASK 0xff 40 + 41 + #define SDRAM_ERR_CALC_ECC_REG 0x4c 42 + #define SDRAM_ERR_CALC_ECC_ROW_OFFSET 8 43 + #define SDRAM_ERR_CALC_ECC_ROW_MASK (0xffff << SDRAM_ERR_CALC_ECC_ROW_OFFSET) 44 + #define SDRAM_ERR_CALC_ECC_VALUE_MASK 0xff 45 + 46 + #define SDRAM_ERR_ADDR_REG 0x50 47 + #define SDRAM_ERR_ADDR_BANK_OFFSET 23 48 + #define SDRAM_ERR_ADDR_BANK_MASK (0x7 << SDRAM_ERR_ADDR_BANK_OFFSET) 49 + #define SDRAM_ERR_ADDR_COL_OFFSET 8 50 + #define SDRAM_ERR_ADDR_COL_MASK (0x7fff << SDRAM_ERR_ADDR_COL_OFFSET) 51 + #define SDRAM_ERR_ADDR_CS_OFFSET 1 52 + #define SDRAM_ERR_ADDR_CS_MASK (0x3 << SDRAM_ERR_ADDR_CS_OFFSET) 53 + #define SDRAM_ERR_ADDR_TYPE_MASK BIT(0) 54 + 55 + #define SDRAM_ERR_CTRL_REG 0x54 56 + #define SDRAM_ERR_CTRL_THR_OFFSET 16 57 + #define SDRAM_ERR_CTRL_THR_MASK (0xff << SDRAM_ERR_CTRL_THR_OFFSET) 58 + #define SDRAM_ERR_CTRL_PROP_MASK BIT(9) 59 + 60 + #define SDRAM_ERR_SBE_COUNT_REG 0x58 61 + #define SDRAM_ERR_DBE_COUNT_REG 0x5c 62 + 63 + #define SDRAM_ERR_CAUSE_ERR_REG 0xd0 64 + #define SDRAM_ERR_CAUSE_MSG_REG 0xd8 65 + #define SDRAM_ERR_CAUSE_DBE_MASK BIT(1) 66 + #define SDRAM_ERR_CAUSE_SBE_MASK BIT(0) 67 + 68 + #define SDRAM_RANK_CTRL_REG 0x1e0 69 + #define SDRAM_RANK_CTRL_EXIST_MASK(cs) BIT(cs) 70 + 71 + struct axp_mc_drvdata { 72 + void __iomem *base; 73 + /* width in bytes */ 74 + unsigned int width; 75 + /* bank interleaving */ 76 + bool cs_addr_sel[SDRAM_NUM_CS]; 77 + 78 + char msg[128]; 79 + }; 80 + 81 + /* derived from "DRAM Address Multiplexing" in the ARAMDA XP Functional Spec */ 82 + static uint32_t axp_mc_calc_address(struct axp_mc_drvdata *drvdata, 83 + uint8_t cs, uint8_t bank, uint16_t row, 84 + uint16_t col) 85 + { 86 + if (drvdata->width == 8) { 87 + /* 64 bit */ 88 + if (drvdata->cs_addr_sel[cs]) 89 + /* bank interleaved */ 90 + return (((row & 0xfff8) << 16) | 91 + ((bank & 0x7) << 16) | 92 + ((row & 0x7) << 13) | 93 + ((col & 0x3ff) << 3)); 94 + else 95 + return (((row & 0xffff << 16) | 96 + ((bank & 0x7) << 13) | 97 + ((col & 0x3ff)) << 3)); 98 + } else if (drvdata->width == 4) { 99 + /* 32 bit */ 100 + if (drvdata->cs_addr_sel[cs]) 101 + /* bank interleaved */ 102 + return (((row & 0xfff0) << 15) | 103 + ((bank & 0x7) << 16) | 104 + ((row & 0xf) << 12) | 105 + ((col & 0x3ff) << 2)); 106 + else 107 + return (((row & 0xffff << 15) | 108 + ((bank & 0x7) << 12) | 109 + ((col & 0x3ff)) << 2)); 110 + } else { 111 + /* 16 bit */ 112 + if (drvdata->cs_addr_sel[cs]) 113 + /* bank interleaved */ 114 + return (((row & 0xffe0) << 14) | 115 + ((bank & 0x7) << 16) | 116 + ((row & 0x1f) << 11) | 117 + ((col & 0x3ff) << 1)); 118 + else 119 + return (((row & 0xffff << 14) | 120 + ((bank & 0x7) << 11) | 121 + ((col & 0x3ff)) << 1)); 122 + } 123 + } 124 + 125 + static void axp_mc_check(struct mem_ctl_info *mci) 126 + { 127 + struct axp_mc_drvdata *drvdata = mci->pvt_info; 128 + uint32_t data_h, data_l, recv_ecc, calc_ecc, addr; 129 + uint32_t cnt_sbe, cnt_dbe, cause_err, cause_msg; 130 + uint32_t row_val, col_val, bank_val, addr_val; 131 + uint8_t syndrome_val, cs_val; 132 + char *msg = drvdata->msg; 133 + 134 + data_h = readl(drvdata->base + SDRAM_ERR_DATA_H_REG); 135 + data_l = readl(drvdata->base + SDRAM_ERR_DATA_L_REG); 136 + recv_ecc = readl(drvdata->base + SDRAM_ERR_RECV_ECC_REG); 137 + calc_ecc = readl(drvdata->base + SDRAM_ERR_CALC_ECC_REG); 138 + addr = readl(drvdata->base + SDRAM_ERR_ADDR_REG); 139 + cnt_sbe = readl(drvdata->base + SDRAM_ERR_SBE_COUNT_REG); 140 + cnt_dbe = readl(drvdata->base + SDRAM_ERR_DBE_COUNT_REG); 141 + cause_err = readl(drvdata->base + SDRAM_ERR_CAUSE_ERR_REG); 142 + cause_msg = readl(drvdata->base + SDRAM_ERR_CAUSE_MSG_REG); 143 + 144 + /* clear cause registers */ 145 + writel(~(SDRAM_ERR_CAUSE_DBE_MASK | SDRAM_ERR_CAUSE_SBE_MASK), 146 + drvdata->base + SDRAM_ERR_CAUSE_ERR_REG); 147 + writel(~(SDRAM_ERR_CAUSE_DBE_MASK | SDRAM_ERR_CAUSE_SBE_MASK), 148 + drvdata->base + SDRAM_ERR_CAUSE_MSG_REG); 149 + 150 + /* clear error counter registers */ 151 + if (cnt_sbe) 152 + writel(0, drvdata->base + SDRAM_ERR_SBE_COUNT_REG); 153 + if (cnt_dbe) 154 + writel(0, drvdata->base + SDRAM_ERR_DBE_COUNT_REG); 155 + 156 + if (!cnt_sbe && !cnt_dbe) 157 + return; 158 + 159 + if (!(addr & SDRAM_ERR_ADDR_TYPE_MASK)) { 160 + if (cnt_sbe) 161 + cnt_sbe--; 162 + else 163 + dev_warn(mci->pdev, "inconsistent SBE count detected"); 164 + } else { 165 + if (cnt_dbe) 166 + cnt_dbe--; 167 + else 168 + dev_warn(mci->pdev, "inconsistent DBE count detected"); 169 + } 170 + 171 + /* report earlier errors */ 172 + if (cnt_sbe) 173 + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 174 + cnt_sbe, /* error count */ 175 + 0, 0, 0, /* pfn, offset, syndrome */ 176 + -1, -1, -1, /* top, mid, low layer */ 177 + mci->ctl_name, 178 + "details unavailable (multiple errors)"); 179 + if (cnt_dbe) 180 + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 181 + cnt_sbe, /* error count */ 182 + 0, 0, 0, /* pfn, offset, syndrome */ 183 + -1, -1, -1, /* top, mid, low layer */ 184 + mci->ctl_name, 185 + "details unavailable (multiple errors)"); 186 + 187 + /* report details for most recent error */ 188 + cs_val = (addr & SDRAM_ERR_ADDR_CS_MASK) >> SDRAM_ERR_ADDR_CS_OFFSET; 189 + bank_val = (addr & SDRAM_ERR_ADDR_BANK_MASK) >> SDRAM_ERR_ADDR_BANK_OFFSET; 190 + row_val = (calc_ecc & SDRAM_ERR_CALC_ECC_ROW_MASK) >> SDRAM_ERR_CALC_ECC_ROW_OFFSET; 191 + col_val = (addr & SDRAM_ERR_ADDR_COL_MASK) >> SDRAM_ERR_ADDR_COL_OFFSET; 192 + syndrome_val = (recv_ecc ^ calc_ecc) & 0xff; 193 + addr_val = axp_mc_calc_address(drvdata, cs_val, bank_val, row_val, 194 + col_val); 195 + msg += sprintf(msg, "row=0x%04x ", row_val); /* 11 chars */ 196 + msg += sprintf(msg, "bank=0x%x ", bank_val); /* 9 chars */ 197 + msg += sprintf(msg, "col=0x%04x ", col_val); /* 11 chars */ 198 + msg += sprintf(msg, "cs=%d", cs_val); /* 4 chars */ 199 + 200 + if (!(addr & SDRAM_ERR_ADDR_TYPE_MASK)) { 201 + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 202 + 1, /* error count */ 203 + addr_val >> PAGE_SHIFT, 204 + addr_val & ~PAGE_MASK, 205 + syndrome_val, 206 + cs_val, -1, -1, /* top, mid, low layer */ 207 + mci->ctl_name, drvdata->msg); 208 + } else { 209 + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 210 + 1, /* error count */ 211 + addr_val >> PAGE_SHIFT, 212 + addr_val & ~PAGE_MASK, 213 + syndrome_val, 214 + cs_val, -1, -1, /* top, mid, low layer */ 215 + mci->ctl_name, drvdata->msg); 216 + } 217 + } 218 + 219 + static void axp_mc_read_config(struct mem_ctl_info *mci) 220 + { 221 + struct axp_mc_drvdata *drvdata = mci->pvt_info; 222 + uint32_t config, addr_ctrl, rank_ctrl; 223 + unsigned int i, cs_struct, cs_size; 224 + struct dimm_info *dimm; 225 + 226 + config = readl(drvdata->base + SDRAM_CONFIG_REG); 227 + if (config & SDRAM_CONFIG_BUS_WIDTH_MASK) 228 + /* 64 bit */ 229 + drvdata->width = 8; 230 + else 231 + /* 32 bit */ 232 + drvdata->width = 4; 233 + 234 + addr_ctrl = readl(drvdata->base + SDRAM_ADDR_CTRL_REG); 235 + rank_ctrl = readl(drvdata->base + SDRAM_RANK_CTRL_REG); 236 + for (i = 0; i < SDRAM_NUM_CS; i++) { 237 + dimm = mci->dimms[i]; 238 + 239 + if (!(rank_ctrl & SDRAM_RANK_CTRL_EXIST_MASK(i))) 240 + continue; 241 + 242 + drvdata->cs_addr_sel[i] = 243 + !!(addr_ctrl & SDRAM_ADDR_CTRL_ADDR_SEL_MASK(i)); 244 + 245 + cs_struct = (addr_ctrl & SDRAM_ADDR_CTRL_STRUCT_MASK(i)) >> SDRAM_ADDR_CTRL_STRUCT_OFFSET(i); 246 + cs_size = ((addr_ctrl & SDRAM_ADDR_CTRL_SIZE_HIGH_MASK(i)) >> (SDRAM_ADDR_CTRL_SIZE_HIGH_OFFSET(i) - 2) | 247 + ((addr_ctrl & SDRAM_ADDR_CTRL_SIZE_LOW_MASK(i)) >> SDRAM_ADDR_CTRL_SIZE_LOW_OFFSET(i))); 248 + 249 + switch (cs_size) { 250 + case 0: /* 2GBit */ 251 + dimm->nr_pages = 524288; 252 + break; 253 + case 1: /* 256MBit */ 254 + dimm->nr_pages = 65536; 255 + break; 256 + case 2: /* 512MBit */ 257 + dimm->nr_pages = 131072; 258 + break; 259 + case 3: /* 1GBit */ 260 + dimm->nr_pages = 262144; 261 + break; 262 + case 4: /* 4GBit */ 263 + dimm->nr_pages = 1048576; 264 + break; 265 + case 5: /* 8GBit */ 266 + dimm->nr_pages = 2097152; 267 + break; 268 + } 269 + dimm->grain = 8; 270 + dimm->dtype = cs_struct ? DEV_X16 : DEV_X8; 271 + dimm->mtype = (config & SDRAM_CONFIG_REGISTERED_MASK) ? 272 + MEM_RDDR3 : MEM_DDR3; 273 + dimm->edac_mode = EDAC_SECDED; 274 + } 275 + } 276 + 277 + static const struct of_device_id axp_mc_of_match[] = { 278 + {.compatible = "marvell,armada-xp-sdram-controller",}, 279 + {}, 280 + }; 281 + MODULE_DEVICE_TABLE(of, axp_mc_of_match); 282 + 283 + static int axp_mc_probe(struct platform_device *pdev) 284 + { 285 + struct axp_mc_drvdata *drvdata; 286 + struct edac_mc_layer layers[1]; 287 + const struct of_device_id *id; 288 + struct mem_ctl_info *mci; 289 + struct resource *r; 290 + void __iomem *base; 291 + uint32_t config; 292 + 293 + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 294 + if (!r) { 295 + dev_err(&pdev->dev, "Unable to get mem resource\n"); 296 + return -ENODEV; 297 + } 298 + 299 + base = devm_ioremap_resource(&pdev->dev, r); 300 + if (IS_ERR(base)) { 301 + dev_err(&pdev->dev, "Unable to map regs\n"); 302 + return PTR_ERR(base); 303 + } 304 + 305 + config = readl(base + SDRAM_CONFIG_REG); 306 + if (!(config & SDRAM_CONFIG_ECC_MASK)) { 307 + dev_warn(&pdev->dev, "SDRAM ECC is not enabled"); 308 + return -EINVAL; 309 + } 310 + 311 + layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; 312 + layers[0].size = SDRAM_NUM_CS; 313 + layers[0].is_virt_csrow = true; 314 + 315 + mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*drvdata)); 316 + if (!mci) 317 + return -ENOMEM; 318 + 319 + drvdata = mci->pvt_info; 320 + drvdata->base = base; 321 + mci->pdev = &pdev->dev; 322 + platform_set_drvdata(pdev, mci); 323 + 324 + id = of_match_device(axp_mc_of_match, &pdev->dev); 325 + mci->edac_check = axp_mc_check; 326 + mci->mtype_cap = MEM_FLAG_DDR3; 327 + mci->edac_cap = EDAC_FLAG_SECDED; 328 + mci->mod_name = pdev->dev.driver->name; 329 + mci->ctl_name = id ? id->compatible : "unknown"; 330 + mci->dev_name = dev_name(&pdev->dev); 331 + mci->scrub_mode = SCRUB_NONE; 332 + 333 + axp_mc_read_config(mci); 334 + 335 + /* These SoCs have a reduced width bus */ 336 + if (of_machine_is_compatible("marvell,armada380") || 337 + of_machine_is_compatible("marvell,armadaxp-98dx3236")) 338 + drvdata->width /= 2; 339 + 340 + /* configure SBE threshold */ 341 + /* it seems that SBEs are not captured otherwise */ 342 + writel(1 << SDRAM_ERR_CTRL_THR_OFFSET, drvdata->base + SDRAM_ERR_CTRL_REG); 343 + 344 + /* clear cause registers */ 345 + writel(~(SDRAM_ERR_CAUSE_DBE_MASK | SDRAM_ERR_CAUSE_SBE_MASK), drvdata->base + SDRAM_ERR_CAUSE_ERR_REG); 346 + writel(~(SDRAM_ERR_CAUSE_DBE_MASK | SDRAM_ERR_CAUSE_SBE_MASK), drvdata->base + SDRAM_ERR_CAUSE_MSG_REG); 347 + 348 + /* clear counter registers */ 349 + writel(0, drvdata->base + SDRAM_ERR_SBE_COUNT_REG); 350 + writel(0, drvdata->base + SDRAM_ERR_DBE_COUNT_REG); 351 + 352 + if (edac_mc_add_mc(mci)) { 353 + edac_mc_free(mci); 354 + return -EINVAL; 355 + } 356 + edac_op_state = EDAC_OPSTATE_POLL; 357 + 358 + return 0; 359 + } 360 + 361 + static int axp_mc_remove(struct platform_device *pdev) 362 + { 363 + struct mem_ctl_info *mci = platform_get_drvdata(pdev); 364 + 365 + edac_mc_del_mc(&pdev->dev); 366 + edac_mc_free(mci); 367 + platform_set_drvdata(pdev, NULL); 368 + 369 + return 0; 370 + } 371 + 372 + static struct platform_driver axp_mc_driver = { 373 + .probe = axp_mc_probe, 374 + .remove = axp_mc_remove, 375 + .driver = { 376 + .name = "armada_xp_mc_edac", 377 + .of_match_table = of_match_ptr(axp_mc_of_match), 378 + }, 379 + }; 380 + 381 + /************************ EDAC Device (L2 Cache) ***************************/ 382 + 383 + struct aurora_l2_drvdata { 384 + void __iomem *base; 385 + 386 + char msg[128]; 387 + 388 + /* error injection via debugfs */ 389 + uint32_t inject_addr; 390 + uint32_t inject_mask; 391 + uint8_t inject_ctl; 392 + 393 + struct dentry *debugfs; 394 + }; 395 + 396 + #ifdef CONFIG_EDAC_DEBUG 397 + static void aurora_l2_inject(struct aurora_l2_drvdata *drvdata) 398 + { 399 + drvdata->inject_addr &= AURORA_ERR_INJECT_CTL_ADDR_MASK; 400 + drvdata->inject_ctl &= AURORA_ERR_INJECT_CTL_EN_MASK; 401 + writel(0, drvdata->base + AURORA_ERR_INJECT_CTL_REG); 402 + writel(drvdata->inject_mask, drvdata->base + AURORA_ERR_INJECT_MASK_REG); 403 + writel(drvdata->inject_addr | drvdata->inject_ctl, drvdata->base + AURORA_ERR_INJECT_CTL_REG); 404 + } 405 + #endif 406 + 407 + static void aurora_l2_check(struct edac_device_ctl_info *dci) 408 + { 409 + struct aurora_l2_drvdata *drvdata = dci->pvt_info; 410 + uint32_t cnt, src, txn, err, attr_cap, addr_cap, way_cap; 411 + unsigned int cnt_ce, cnt_ue; 412 + char *msg = drvdata->msg; 413 + size_t size = sizeof(drvdata->msg); 414 + size_t len = 0; 415 + 416 + cnt = readl(drvdata->base + AURORA_ERR_CNT_REG); 417 + attr_cap = readl(drvdata->base + AURORA_ERR_ATTR_CAP_REG); 418 + addr_cap = readl(drvdata->base + AURORA_ERR_ADDR_CAP_REG); 419 + way_cap = readl(drvdata->base + AURORA_ERR_WAY_CAP_REG); 420 + 421 + cnt_ce = (cnt & AURORA_ERR_CNT_CE_MASK) >> AURORA_ERR_CNT_CE_OFFSET; 422 + cnt_ue = (cnt & AURORA_ERR_CNT_UE_MASK) >> AURORA_ERR_CNT_UE_OFFSET; 423 + /* clear error counter registers */ 424 + if (cnt_ce || cnt_ue) 425 + writel(AURORA_ERR_CNT_CLR, drvdata->base + AURORA_ERR_CNT_REG); 426 + 427 + if (!(attr_cap & AURORA_ERR_ATTR_CAP_VALID)) 428 + goto clear_remaining; 429 + 430 + src = (attr_cap & AURORA_ERR_ATTR_SRC_MSK) >> AURORA_ERR_ATTR_SRC_OFF; 431 + if (src <= 3) 432 + len += snprintf(msg+len, size-len, "src=CPU%d ", src); 433 + else 434 + len += snprintf(msg+len, size-len, "src=IO "); 435 + 436 + txn = (attr_cap & AURORA_ERR_ATTR_TXN_MSK) >> AURORA_ERR_ATTR_TXN_OFF; 437 + switch (txn) { 438 + case 0: 439 + len += snprintf(msg+len, size-len, "txn=Data-Read "); 440 + break; 441 + case 1: 442 + len += snprintf(msg+len, size-len, "txn=Isn-Read "); 443 + break; 444 + case 2: 445 + len += snprintf(msg+len, size-len, "txn=Clean-Flush "); 446 + break; 447 + case 3: 448 + len += snprintf(msg+len, size-len, "txn=Eviction "); 449 + break; 450 + case 4: 451 + len += snprintf(msg+len, size-len, 452 + "txn=Read-Modify-Write "); 453 + break; 454 + } 455 + 456 + err = (attr_cap & AURORA_ERR_ATTR_ERR_MSK) >> AURORA_ERR_ATTR_ERR_OFF; 457 + switch (err) { 458 + case 0: 459 + len += snprintf(msg+len, size-len, "err=CorrECC "); 460 + break; 461 + case 1: 462 + len += snprintf(msg+len, size-len, "err=UnCorrECC "); 463 + break; 464 + case 2: 465 + len += snprintf(msg+len, size-len, "err=TagParity "); 466 + break; 467 + } 468 + 469 + len += snprintf(msg+len, size-len, "addr=0x%x ", addr_cap & AURORA_ERR_ADDR_CAP_ADDR_MASK); 470 + len += snprintf(msg+len, size-len, "index=0x%x ", (way_cap & AURORA_ERR_WAY_IDX_MSK) >> AURORA_ERR_WAY_IDX_OFF); 471 + len += snprintf(msg+len, size-len, "way=0x%x", (way_cap & AURORA_ERR_WAY_CAP_WAY_MASK) >> AURORA_ERR_WAY_CAP_WAY_OFFSET); 472 + 473 + /* clear error capture registers */ 474 + writel(AURORA_ERR_ATTR_CAP_VALID, drvdata->base + AURORA_ERR_ATTR_CAP_REG); 475 + if (err) { 476 + /* UnCorrECC or TagParity */ 477 + if (cnt_ue) 478 + cnt_ue--; 479 + edac_device_handle_ue(dci, 0, 0, drvdata->msg); 480 + } else { 481 + if (cnt_ce) 482 + cnt_ce--; 483 + edac_device_handle_ce(dci, 0, 0, drvdata->msg); 484 + } 485 + 486 + clear_remaining: 487 + /* report remaining errors */ 488 + while (cnt_ue--) 489 + edac_device_handle_ue(dci, 0, 0, "details unavailable (multiple errors)"); 490 + while (cnt_ce--) 491 + edac_device_handle_ue(dci, 0, 0, "details unavailable (multiple errors)"); 492 + } 493 + 494 + static void aurora_l2_poll(struct edac_device_ctl_info *dci) 495 + { 496 + #ifdef CONFIG_EDAC_DEBUG 497 + struct aurora_l2_drvdata *drvdata = dci->pvt_info; 498 + #endif 499 + 500 + aurora_l2_check(dci); 501 + #ifdef CONFIG_EDAC_DEBUG 502 + aurora_l2_inject(drvdata); 503 + #endif 504 + } 505 + 506 + static const struct of_device_id aurora_l2_of_match[] = { 507 + {.compatible = "marvell,aurora-system-cache",}, 508 + {}, 509 + }; 510 + MODULE_DEVICE_TABLE(of, aurora_l2_of_match); 511 + 512 + static int aurora_l2_probe(struct platform_device *pdev) 513 + { 514 + struct aurora_l2_drvdata *drvdata; 515 + struct edac_device_ctl_info *dci; 516 + const struct of_device_id *id; 517 + uint32_t l2x0_aux_ctrl; 518 + void __iomem *base; 519 + struct resource *r; 520 + 521 + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 522 + if (!r) { 523 + dev_err(&pdev->dev, "Unable to get mem resource\n"); 524 + return -ENODEV; 525 + } 526 + 527 + base = devm_ioremap_resource(&pdev->dev, r); 528 + if (IS_ERR(base)) { 529 + dev_err(&pdev->dev, "Unable to map regs\n"); 530 + return PTR_ERR(base); 531 + } 532 + 533 + l2x0_aux_ctrl = readl(base + L2X0_AUX_CTRL); 534 + if (!(l2x0_aux_ctrl & AURORA_ACR_PARITY_EN)) 535 + dev_warn(&pdev->dev, "tag parity is not enabled"); 536 + if (!(l2x0_aux_ctrl & AURORA_ACR_ECC_EN)) 537 + dev_warn(&pdev->dev, "data ECC is not enabled"); 538 + 539 + dci = edac_device_alloc_ctl_info(sizeof(*drvdata), 540 + "cpu", 1, "L", 1, 2, NULL, 0, 0); 541 + if (!dci) 542 + return -ENOMEM; 543 + 544 + drvdata = dci->pvt_info; 545 + drvdata->base = base; 546 + dci->dev = &pdev->dev; 547 + platform_set_drvdata(pdev, dci); 548 + 549 + id = of_match_device(aurora_l2_of_match, &pdev->dev); 550 + dci->edac_check = aurora_l2_poll; 551 + dci->mod_name = pdev->dev.driver->name; 552 + dci->ctl_name = id ? id->compatible : "unknown"; 553 + dci->dev_name = dev_name(&pdev->dev); 554 + 555 + /* clear registers */ 556 + writel(AURORA_ERR_CNT_CLR, drvdata->base + AURORA_ERR_CNT_REG); 557 + writel(AURORA_ERR_ATTR_CAP_VALID, drvdata->base + AURORA_ERR_ATTR_CAP_REG); 558 + 559 + if (edac_device_add_device(dci)) { 560 + edac_device_free_ctl_info(dci); 561 + return -EINVAL; 562 + } 563 + 564 + #ifdef CONFIG_EDAC_DEBUG 565 + drvdata->debugfs = edac_debugfs_create_dir(dev_name(&pdev->dev)); 566 + if (drvdata->debugfs) { 567 + edac_debugfs_create_x32("inject_addr", 0644, 568 + drvdata->debugfs, 569 + &drvdata->inject_addr); 570 + edac_debugfs_create_x32("inject_mask", 0644, 571 + drvdata->debugfs, 572 + &drvdata->inject_mask); 573 + edac_debugfs_create_x8("inject_ctl", 0644, 574 + drvdata->debugfs, &drvdata->inject_ctl); 575 + } 576 + #endif 577 + 578 + return 0; 579 + } 580 + 581 + static int aurora_l2_remove(struct platform_device *pdev) 582 + { 583 + struct edac_device_ctl_info *dci = platform_get_drvdata(pdev); 584 + #ifdef CONFIG_EDAC_DEBUG 585 + struct aurora_l2_drvdata *drvdata = dci->pvt_info; 586 + 587 + edac_debugfs_remove_recursive(drvdata->debugfs); 588 + #endif 589 + edac_device_del_device(&pdev->dev); 590 + edac_device_free_ctl_info(dci); 591 + platform_set_drvdata(pdev, NULL); 592 + 593 + return 0; 594 + } 595 + 596 + static struct platform_driver aurora_l2_driver = { 597 + .probe = aurora_l2_probe, 598 + .remove = aurora_l2_remove, 599 + .driver = { 600 + .name = "aurora_l2_edac", 601 + .of_match_table = of_match_ptr(aurora_l2_of_match), 602 + }, 603 + }; 604 + 605 + /************************ Driver registration ******************************/ 606 + 607 + static struct platform_driver * const drivers[] = { 608 + &axp_mc_driver, 609 + &aurora_l2_driver, 610 + }; 611 + 612 + static int __init armada_xp_edac_init(void) 613 + { 614 + int res; 615 + 616 + /* only polling is supported */ 617 + edac_op_state = EDAC_OPSTATE_POLL; 618 + 619 + res = platform_register_drivers(drivers, ARRAY_SIZE(drivers)); 620 + if (res) 621 + pr_warn("Aramda XP EDAC drivers fail to register\n"); 622 + 623 + return 0; 624 + } 625 + module_init(armada_xp_edac_init); 626 + 627 + static void __exit armada_xp_edac_exit(void) 628 + { 629 + platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); 630 + } 631 + module_exit(armada_xp_edac_exit); 632 + 633 + MODULE_LICENSE("GPL v2"); 634 + MODULE_AUTHOR("Pengutronix"); 635 + MODULE_DESCRIPTION("EDAC Drivers for Marvell Armada XP SDRAM and L2 Cache Controller");
+11
drivers/edac/debugfs.c
··· 138 138 debugfs_create_x16(name, mode, parent, value); 139 139 } 140 140 EXPORT_SYMBOL_GPL(edac_debugfs_create_x16); 141 + 142 + /* Wrapper for debugfs_create_x32() */ 143 + void edac_debugfs_create_x32(const char *name, umode_t mode, 144 + struct dentry *parent, u32 *value) 145 + { 146 + if (!parent) 147 + parent = edac_debugfs; 148 + 149 + debugfs_create_x32(name, mode, parent, value); 150 + } 151 + EXPORT_SYMBOL_GPL(edac_debugfs_create_x32);
+4
drivers/edac/edac_module.h
··· 82 82 struct dentry *parent, u8 *value); 83 83 void edac_debugfs_create_x16(const char *name, umode_t mode, 84 84 struct dentry *parent, u16 *value); 85 + void edac_debugfs_create_x32(const char *name, umode_t mode, 86 + struct dentry *parent, u32 *value); 85 87 #else 86 88 static inline void edac_debugfs_init(void) { } 87 89 static inline void edac_debugfs_exit(void) { } ··· 98 96 struct dentry *parent, u8 *value) { } 99 97 static inline void edac_debugfs_create_x16(const char *name, umode_t mode, 100 98 struct dentry *parent, u16 *value) { } 99 + static inline void edac_debugfs_create_x32(const char *name, umode_t mode, 100 + struct dentry *parent, u32 *value) { } 101 101 #endif 102 102 103 103 /*