Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'edac_updates_for_v6.17_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/ras/ras

Pull EDAC updates from Borislav Petkov:

- i10nm:
- switch to using scnprintf()
- Add Granite Rapids-D support

- synopsys: Make sure ECC error and counter registers are cleared
during init/probing to avoid reporting stale errors

- igen6: Add Wildcat Lake SoCs support

- Make sure scrub features sysfs attributes are initialized properly

- Allocate memory repair sysfs attributes statically to reduce stack
usage

- Fix DIMM module size computation for DIMMs with total capacity which
is a non power-of-two number, in amd64_edac

- Do not be too dramatic when reporting disabled memory controllers in
igen6_edac

- Add support to ie31200_edac for the following SoCs:
- Core i5-14[67]00
- Bartless Lake-S SoCs
- Raptor Lake-HX

* tag 'edac_updates_for_v6.17_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/ras/ras:
EDAC/{skx_common,i10nm}: Use scnprintf() for safer buffer handling
EDAC/synopsys: Clear the ECC counters on init
EDAC/ie31200: Add Intel Raptor Lake-HX SoCs support
EDAC/igen6: Add Intel Wildcat Lake SoCs support
EDAC/i10nm: Add Intel Granite Rapids-D support
EDAC/mem_repair: Reduce stack usage in edac_mem_repair_get_desc()
EDAC/igen6: Reduce log level to debug for absent memory controllers
EDAC/ie31200: Document which CPUs correspond to each Raptor Lake-S device ID
EDAC/ie31200: Enable support for Core i5-14600 and i7-14700
ie31200/EDAC: Add Intel Bartlett Lake-S SoCs support

+138 -100
+20 -10
drivers/edac/i10nm_base.c
··· 62 62 ((GET_BITFIELD(reg, 0, 10) << 12) + 0x140000) 63 63 64 64 #define I10NM_GNR_IMC_MMIO_OFFSET 0x24c000 65 + #define I10NM_GNR_D_IMC_MMIO_OFFSET 0x206000 65 66 #define I10NM_GNR_IMC_MMIO_SIZE 0x4000 66 67 #define I10NM_HBM_IMC_MMIO_SIZE 0x9000 67 68 #define I10NM_DDR_IMC_CH_CNT(reg) GET_BITFIELD(reg, 21, 24) ··· 344 343 345 344 status_mask = rrl->over_mask | rrl->uc_mask | rrl->v_mask; 346 345 347 - n = snprintf(msg, len, " retry_rd_err_log["); 346 + n = scnprintf(msg, len, " retry_rd_err_log["); 348 347 for (i = 0; i < rrl->set_num; i++) { 349 348 scrub = (rrl->modes[i] == FRE_SCRUB || rrl->modes[i] == LRE_SCRUB); 350 349 if (scrub_err != scrub) ··· 356 355 log = read_imc_reg(imc, ch, offset, width); 357 356 358 357 if (width == 4) 359 - n += snprintf(msg + n, len - n, "%.8llx ", log); 358 + n += scnprintf(msg + n, len - n, "%.8llx ", log); 360 359 else 361 - n += snprintf(msg + n, len - n, "%.16llx ", log); 360 + n += scnprintf(msg + n, len - n, "%.16llx ", log); 362 361 363 362 /* Clear RRL status if RRL in Linux control mode. */ 364 363 if (retry_rd_err_log == 2 && !j && (log & status_mask)) ··· 368 367 369 368 /* Move back one space. */ 370 369 n--; 371 - n += snprintf(msg + n, len - n, "]"); 370 + n += scnprintf(msg + n, len - n, "]"); 372 371 373 372 if (len - n > 0) { 374 - n += snprintf(msg + n, len - n, " correrrcnt["); 373 + n += scnprintf(msg + n, len - n, " correrrcnt["); 375 374 for (i = 0; i < rrl->cecnt_num && len - n > 0; i++) { 376 375 offset = rrl->cecnt_offsets[i]; 377 376 width = rrl->cecnt_widths[i]; ··· 379 378 380 379 /* CPUs {ICX,SPR} encode two counters per 4-byte CORRERRCNT register. */ 381 380 if (res_cfg->type <= SPR) { 382 - n += snprintf(msg + n, len - n, "%.4llx %.4llx ", 381 + n += scnprintf(msg + n, len - n, "%.4llx %.4llx ", 383 382 corr & 0xffff, corr >> 16); 384 383 } else { 385 384 /* CPUs {GNR} encode one counter per CORRERRCNT register. */ 386 385 if (width == 4) 387 - n += snprintf(msg + n, len - n, "%.8llx ", corr); 386 + n += scnprintf(msg + n, len - n, "%.8llx ", corr); 388 387 else 389 - n += snprintf(msg + n, len - n, "%.16llx ", corr); 388 + n += scnprintf(msg + n, len - n, "%.16llx ", corr); 390 389 } 391 390 } 392 391 393 392 /* Move back one space. */ 394 393 n--; 395 - n += snprintf(msg + n, len - n, "]"); 394 + n += scnprintf(msg + n, len - n, "]"); 396 395 } 397 396 } 398 397 ··· 688 687 return NULL; 689 688 } 690 689 690 + static u32 get_gnr_imc_mmio_offset(void) 691 + { 692 + if (boot_cpu_data.x86_vfm == INTEL_GRANITERAPIDS_D) 693 + return I10NM_GNR_D_IMC_MMIO_OFFSET; 694 + 695 + return I10NM_GNR_IMC_MMIO_OFFSET; 696 + } 697 + 691 698 /** 692 699 * get_ddr_munit() - Get the resource of the i-th DDR memory controller. 693 700 * ··· 724 715 return NULL; 725 716 726 717 *offset = I10NM_GET_IMC_MMIO_OFFSET(reg) + 727 - I10NM_GNR_IMC_MMIO_OFFSET + 718 + get_gnr_imc_mmio_offset() + 728 719 physical_idx * I10NM_GNR_IMC_MMIO_SIZE; 729 720 *size = I10NM_GNR_IMC_MMIO_SIZE; 730 721 ··· 1039 1030 X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &spr_cfg), 1040 1031 X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &spr_cfg), 1041 1032 X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, &gnr_cfg), 1033 + X86_MATCH_VFM(INTEL_GRANITERAPIDS_D, &gnr_cfg), 1042 1034 X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, &gnr_cfg), 1043 1035 X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, &gnr_cfg), 1044 1036 X86_MATCH_VFM(INTEL_ATOM_DARKMONT_X, &gnr_cfg),
+34 -4
drivers/edac/ie31200_edac.c
··· 87 87 #define PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_10 0x3eca 88 88 89 89 /* Raptor Lake-S */ 90 - #define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_1 0xa703 91 - #define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_2 0x4640 92 - #define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_3 0x4630 93 - #define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_4 0xa700 90 + #define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_1 0xa703 /* 8P+8E, e.g. i7-13700 */ 91 + #define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_2 0x4640 /* 6P+8E, e.g. i5-13500, i5-13600, i5-14500 */ 92 + #define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_3 0x4630 /* 4P+0E, e.g. i3-13100E */ 93 + #define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_4 0xa700 /* 8P+16E, e.g. i9-13900, i9-14900 */ 94 + #define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_5 0xa740 /* 8P+12E, e.g. i7-14700 */ 95 + #define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_6 0xa704 /* 6P+8E, e.g. i5-14600 */ 96 + 97 + /* Raptor Lake-HX */ 98 + #define PCI_DEVICE_ID_INTEL_IE31200_RPL_HX_1 0xa702 /* 8P+16E, e.g. i9-13950HX */ 94 99 95 100 /* Alder Lake-S */ 96 101 #define PCI_DEVICE_ID_INTEL_IE31200_ADL_S_1 0x4660 102 + 103 + /* Bartlett Lake-S */ 104 + #define PCI_DEVICE_ID_INTEL_IE31200_BTL_S_1 0x4639 105 + #define PCI_DEVICE_ID_INTEL_IE31200_BTL_S_2 0x463c 106 + #define PCI_DEVICE_ID_INTEL_IE31200_BTL_S_3 0x4642 107 + #define PCI_DEVICE_ID_INTEL_IE31200_BTL_S_4 0x4643 108 + #define PCI_DEVICE_ID_INTEL_IE31200_BTL_S_5 0xa731 109 + #define PCI_DEVICE_ID_INTEL_IE31200_BTL_S_6 0xa732 110 + #define PCI_DEVICE_ID_INTEL_IE31200_BTL_S_7 0xa733 111 + #define PCI_DEVICE_ID_INTEL_IE31200_BTL_S_8 0xa741 112 + #define PCI_DEVICE_ID_INTEL_IE31200_BTL_S_9 0xa744 113 + #define PCI_DEVICE_ID_INTEL_IE31200_BTL_S_10 0xa745 97 114 98 115 #define IE31200_RANKS_PER_CHANNEL 8 99 116 #define IE31200_DIMMS_PER_CHANNEL 2 ··· 757 740 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_S_2), (kernel_ulong_t)&rpl_s_cfg}, 758 741 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_S_3), (kernel_ulong_t)&rpl_s_cfg}, 759 742 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_S_4), (kernel_ulong_t)&rpl_s_cfg}, 743 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_S_5), (kernel_ulong_t)&rpl_s_cfg}, 744 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_S_6), (kernel_ulong_t)&rpl_s_cfg}, 745 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_HX_1), (kernel_ulong_t)&rpl_s_cfg}, 760 746 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_ADL_S_1), (kernel_ulong_t)&rpl_s_cfg}, 747 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_BTL_S_1), (kernel_ulong_t)&rpl_s_cfg}, 748 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_BTL_S_2), (kernel_ulong_t)&rpl_s_cfg}, 749 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_BTL_S_3), (kernel_ulong_t)&rpl_s_cfg}, 750 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_BTL_S_4), (kernel_ulong_t)&rpl_s_cfg}, 751 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_BTL_S_5), (kernel_ulong_t)&rpl_s_cfg}, 752 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_BTL_S_6), (kernel_ulong_t)&rpl_s_cfg}, 753 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_BTL_S_7), (kernel_ulong_t)&rpl_s_cfg}, 754 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_BTL_S_8), (kernel_ulong_t)&rpl_s_cfg}, 755 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_BTL_S_9), (kernel_ulong_t)&rpl_s_cfg}, 756 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_BTL_S_10), (kernel_ulong_t)&rpl_s_cfg}, 761 757 { 0, } /* 0 terminated list. */ 762 758 }; 763 759 MODULE_DEVICE_TABLE(pci, ie31200_pci_tbl);
+16 -1
drivers/edac/igen6_edac.c
··· 275 275 #define DID_PTL_H_SKU2 0xb001 276 276 #define DID_PTL_H_SKU3 0xb002 277 277 278 + /* Compute die IDs for Wildcat Lake with IBECC */ 279 + #define DID_WCL_SKU1 0xfd00 280 + 278 281 static int get_mchbar(struct pci_dev *pdev, u64 *mchbar) 279 282 { 280 283 union { ··· 572 569 .err_addr_to_imc_addr = adl_err_addr_to_imc_addr, 573 570 }; 574 571 572 + static struct res_config wcl_cfg = { 573 + .machine_check = true, 574 + .num_imc = 1, 575 + .imc_base = 0xd800, 576 + .ibecc_base = 0xd400, 577 + .ibecc_error_log_offset = 0x170, 578 + .ibecc_available = mtl_p_ibecc_available, 579 + .err_addr_to_sys_addr = adl_err_addr_to_sys_addr, 580 + .err_addr_to_imc_addr = adl_err_addr_to_imc_addr, 581 + }; 582 + 575 583 static struct pci_device_id igen6_pci_tbl[] = { 576 584 { PCI_VDEVICE(INTEL, DID_EHL_SKU5), (kernel_ulong_t)&ehl_cfg }, 577 585 { PCI_VDEVICE(INTEL, DID_EHL_SKU6), (kernel_ulong_t)&ehl_cfg }, ··· 636 622 { PCI_VDEVICE(INTEL, DID_PTL_H_SKU1), (kernel_ulong_t)&mtl_p_cfg }, 637 623 { PCI_VDEVICE(INTEL, DID_PTL_H_SKU2), (kernel_ulong_t)&mtl_p_cfg }, 638 624 { PCI_VDEVICE(INTEL, DID_PTL_H_SKU3), (kernel_ulong_t)&mtl_p_cfg }, 625 + { PCI_VDEVICE(INTEL, DID_WCL_SKU1), (kernel_ulong_t)&wcl_cfg }, 639 626 { }, 640 627 }; 641 628 MODULE_DEVICE_TABLE(pci, igen6_pci_tbl); ··· 1366 1351 } 1367 1352 1368 1353 if (lmc < res_cfg->num_imc) { 1369 - igen6_printk(KERN_WARNING, "Expected %d mcs, but only %d detected.", 1354 + igen6_printk(KERN_DEBUG, "Expected %d mcs, but only %d detected.", 1370 1355 res_cfg->num_imc, lmc); 1371 1356 res_cfg->num_imc = lmc; 1372 1357 }
+22 -34
drivers/edac/mem_repair.c
··· 286 286 return 0; 287 287 } 288 288 289 - #define MR_ATTR_RO(_name, _instance) \ 290 - ((struct edac_mem_repair_dev_attr) { .dev_attr = __ATTR_RO(_name), \ 291 - .instance = _instance }) 292 - 293 - #define MR_ATTR_WO(_name, _instance) \ 294 - ((struct edac_mem_repair_dev_attr) { .dev_attr = __ATTR_WO(_name), \ 295 - .instance = _instance }) 296 - 297 - #define MR_ATTR_RW(_name, _instance) \ 298 - ((struct edac_mem_repair_dev_attr) { .dev_attr = __ATTR_RW(_name), \ 299 - .instance = _instance }) 289 + static const struct device_attribute mem_repair_dev_attr[] = { 290 + [MR_TYPE] = __ATTR_RO(repair_type), 291 + [MR_PERSIST_MODE] = __ATTR_RW(persist_mode), 292 + [MR_SAFE_IN_USE] = __ATTR_RO(repair_safe_when_in_use), 293 + [MR_HPA] = __ATTR_RW(hpa), 294 + [MR_MIN_HPA] = __ATTR_RO(min_hpa), 295 + [MR_MAX_HPA] = __ATTR_RO(max_hpa), 296 + [MR_DPA] = __ATTR_RW(dpa), 297 + [MR_MIN_DPA] = __ATTR_RO(min_dpa), 298 + [MR_MAX_DPA] = __ATTR_RO(max_dpa), 299 + [MR_NIBBLE_MASK] = __ATTR_RW(nibble_mask), 300 + [MR_BANK_GROUP] = __ATTR_RW(bank_group), 301 + [MR_BANK] = __ATTR_RW(bank), 302 + [MR_RANK] = __ATTR_RW(rank), 303 + [MR_ROW] = __ATTR_RW(row), 304 + [MR_COLUMN] = __ATTR_RW(column), 305 + [MR_CHANNEL] = __ATTR_RW(channel), 306 + [MR_SUB_CHANNEL] = __ATTR_RW(sub_channel), 307 + [MEM_DO_REPAIR] = __ATTR_WO(repair) 308 + }; 300 309 301 310 static int mem_repair_create_desc(struct device *dev, 302 311 const struct attribute_group **attr_groups, ··· 314 305 struct edac_mem_repair_context *ctx; 315 306 struct attribute_group *group; 316 307 int i; 317 - struct edac_mem_repair_dev_attr dev_attr[] = { 318 - [MR_TYPE] = MR_ATTR_RO(repair_type, instance), 319 - [MR_PERSIST_MODE] = MR_ATTR_RW(persist_mode, instance), 320 - [MR_SAFE_IN_USE] = MR_ATTR_RO(repair_safe_when_in_use, instance), 321 - [MR_HPA] = MR_ATTR_RW(hpa, instance), 322 - [MR_MIN_HPA] = MR_ATTR_RO(min_hpa, instance), 323 - [MR_MAX_HPA] = MR_ATTR_RO(max_hpa, instance), 324 - [MR_DPA] = MR_ATTR_RW(dpa, instance), 325 - [MR_MIN_DPA] = MR_ATTR_RO(min_dpa, instance), 326 - [MR_MAX_DPA] = MR_ATTR_RO(max_dpa, instance), 327 - [MR_NIBBLE_MASK] = MR_ATTR_RW(nibble_mask, instance), 328 - [MR_BANK_GROUP] = MR_ATTR_RW(bank_group, instance), 329 - [MR_BANK] = MR_ATTR_RW(bank, instance), 330 - [MR_RANK] = MR_ATTR_RW(rank, instance), 331 - [MR_ROW] = MR_ATTR_RW(row, instance), 332 - [MR_COLUMN] = MR_ATTR_RW(column, instance), 333 - [MR_CHANNEL] = MR_ATTR_RW(channel, instance), 334 - [MR_SUB_CHANNEL] = MR_ATTR_RW(sub_channel, instance), 335 - [MEM_DO_REPAIR] = MR_ATTR_WO(repair, instance) 336 - }; 337 - 338 308 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); 339 309 if (!ctx) 340 310 return -ENOMEM; 341 311 342 312 for (i = 0; i < MR_MAX_ATTRS; i++) { 343 - memcpy(&ctx->mem_repair_dev_attr[i], 344 - &dev_attr[i], sizeof(dev_attr[i])); 313 + ctx->mem_repair_dev_attr[i].dev_attr = mem_repair_dev_attr[i]; 314 + ctx->mem_repair_dev_attr[i].instance = instance; 345 315 sysfs_attr_init(&ctx->mem_repair_dev_attr[i].dev_attr.attr); 346 316 ctx->mem_repair_attrs[i] = 347 317 &ctx->mem_repair_dev_attr[i].dev_attr.attr;
+2 -2
drivers/edac/skx_common.c
··· 670 670 } 671 671 672 672 if (res->decoded_by_adxl) { 673 - len = snprintf(skx_msg, MSG_SIZE, "%s%s err_code:0x%04x:0x%04x %s", 673 + len = scnprintf(skx_msg, MSG_SIZE, "%s%s err_code:0x%04x:0x%04x %s", 674 674 overflow ? " OVERFLOW" : "", 675 675 (uncorrected_error && recoverable) ? " recoverable" : "", 676 676 mscod, errcode, adxl_msg); 677 677 } else { 678 - len = snprintf(skx_msg, MSG_SIZE, 678 + len = scnprintf(skx_msg, MSG_SIZE, 679 679 "%s%s err_code:0x%04x:0x%04x ProcessorSocketId:0x%x MemoryControllerId:0x%x PhysicalRankId:0x%x Row:0x%x Column:0x%x Bank:0x%x BankGroup:0x%x", 680 680 overflow ? " OVERFLOW" : "", 681 681 (uncorrected_error && recoverable) ? " recoverable" : "",
+44 -49
drivers/edac/synopsys_edac.c
··· 332 332 #endif 333 333 }; 334 334 335 + enum synps_platform_type { 336 + ZYNQ, 337 + ZYNQMP, 338 + SYNPS, 339 + }; 340 + 335 341 /** 336 342 * struct synps_platform_data - synps platform data structure. 343 + * @platform: Identifies the target hardware platform 337 344 * @get_error_info: Get EDAC error info. 338 345 * @get_mtype: Get mtype. 339 346 * @get_dtype: Get dtype. 340 - * @get_ecc_state: Get ECC state. 341 347 * @get_mem_info: Get EDAC memory info 342 348 * @quirks: To differentiate IPs. 343 349 */ 344 350 struct synps_platform_data { 351 + enum synps_platform_type platform; 345 352 int (*get_error_info)(struct synps_edac_priv *priv); 346 353 enum mem_type (*get_mtype)(const void __iomem *base); 347 354 enum dev_type (*get_dtype)(const void __iomem *base); 348 - bool (*get_ecc_state)(void __iomem *base); 349 355 #ifdef CONFIG_EDAC_DEBUG 350 356 u64 (*get_mem_info)(struct synps_edac_priv *priv); 351 357 #endif ··· 726 720 return dt; 727 721 } 728 722 729 - /** 730 - * zynq_get_ecc_state - Return the controller ECC enable/disable status. 731 - * @base: DDR memory controller base address. 732 - * 733 - * Get the ECC enable/disable status of the controller. 734 - * 735 - * Return: true if enabled, otherwise false. 736 - */ 737 - static bool zynq_get_ecc_state(void __iomem *base) 723 + static bool get_ecc_state(struct synps_edac_priv *priv) 738 724 { 725 + u32 ecctype, clearval; 739 726 enum dev_type dt; 740 - u32 ecctype; 741 727 742 - dt = zynq_get_dtype(base); 743 - if (dt == DEV_UNKNOWN) 744 - return false; 728 + if (priv->p_data->platform == ZYNQ) { 729 + dt = zynq_get_dtype(priv->baseaddr); 730 + if (dt == DEV_UNKNOWN) 731 + return false; 745 732 746 - ecctype = readl(base + SCRUB_OFST) & SCRUB_MODE_MASK; 747 - if ((ecctype == SCRUB_MODE_SECDED) && (dt == DEV_X2)) 748 - return true; 733 + ecctype = readl(priv->baseaddr + SCRUB_OFST) & SCRUB_MODE_MASK; 734 + if (ecctype == SCRUB_MODE_SECDED && dt == DEV_X2) { 735 + clearval = ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_UE_ERR; 736 + writel(clearval, priv->baseaddr + ECC_CTRL_OFST); 737 + writel(0x0, priv->baseaddr + ECC_CTRL_OFST); 738 + return true; 739 + } 740 + } else { 741 + dt = zynqmp_get_dtype(priv->baseaddr); 742 + if (dt == DEV_UNKNOWN) 743 + return false; 749 744 750 - return false; 751 - } 752 - 753 - /** 754 - * zynqmp_get_ecc_state - Return the controller ECC enable/disable status. 755 - * @base: DDR memory controller base address. 756 - * 757 - * Get the ECC enable/disable status for the controller. 758 - * 759 - * Return: a ECC status boolean i.e true/false - enabled/disabled. 760 - */ 761 - static bool zynqmp_get_ecc_state(void __iomem *base) 762 - { 763 - enum dev_type dt; 764 - u32 ecctype; 765 - 766 - dt = zynqmp_get_dtype(base); 767 - if (dt == DEV_UNKNOWN) 768 - return false; 769 - 770 - ecctype = readl(base + ECC_CFG0_OFST) & SCRUB_MODE_MASK; 771 - if ((ecctype == SCRUB_MODE_SECDED) && 772 - ((dt == DEV_X2) || (dt == DEV_X4) || (dt == DEV_X8))) 773 - return true; 745 + ecctype = readl(priv->baseaddr + ECC_CFG0_OFST) & SCRUB_MODE_MASK; 746 + if (ecctype == SCRUB_MODE_SECDED && 747 + (dt == DEV_X2 || dt == DEV_X4 || dt == DEV_X8)) { 748 + clearval = readl(priv->baseaddr + ECC_CLR_OFST) | 749 + ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_CE_ERRCNT | 750 + ECC_CTRL_CLR_UE_ERR | ECC_CTRL_CLR_UE_ERRCNT; 751 + writel(clearval, priv->baseaddr + ECC_CLR_OFST); 752 + return true; 753 + } 754 + } 774 755 775 756 return false; 776 757 } ··· 927 934 } 928 935 929 936 static const struct synps_platform_data zynq_edac_def = { 937 + .platform = ZYNQ, 930 938 .get_error_info = zynq_get_error_info, 931 939 .get_mtype = zynq_get_mtype, 932 940 .get_dtype = zynq_get_dtype, 933 - .get_ecc_state = zynq_get_ecc_state, 934 941 .quirks = 0, 935 942 }; 936 943 937 944 static const struct synps_platform_data zynqmp_edac_def = { 945 + .platform = ZYNQMP, 938 946 .get_error_info = zynqmp_get_error_info, 939 947 .get_mtype = zynqmp_get_mtype, 940 948 .get_dtype = zynqmp_get_dtype, 941 - .get_ecc_state = zynqmp_get_ecc_state, 942 949 #ifdef CONFIG_EDAC_DEBUG 943 950 .get_mem_info = zynqmp_get_mem_info, 944 951 #endif ··· 950 957 }; 951 958 952 959 static const struct synps_platform_data synopsys_edac_def = { 960 + .platform = SYNPS, 953 961 .get_error_info = zynqmp_get_error_info, 954 962 .get_mtype = zynqmp_get_mtype, 955 963 .get_dtype = zynqmp_get_dtype, 956 - .get_ecc_state = zynqmp_get_ecc_state, 957 964 .quirks = (DDR_ECC_INTR_SUPPORT | DDR_ECC_INTR_SELF_CLEAR 958 965 #ifdef CONFIG_EDAC_DEBUG 959 966 | DDR_ECC_DATA_POISON_SUPPORT ··· 1383 1390 if (!p_data) 1384 1391 return -ENODEV; 1385 1392 1386 - if (!p_data->get_ecc_state(baseaddr)) { 1387 - edac_printk(KERN_INFO, EDAC_MC, "ECC not enabled\n"); 1388 - return -ENXIO; 1389 - } 1390 1393 1391 1394 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; 1392 1395 layers[0].size = SYNPS_EDAC_NR_CSROWS; ··· 1402 1413 priv = mci->pvt_info; 1403 1414 priv->baseaddr = baseaddr; 1404 1415 priv->p_data = p_data; 1416 + if (!get_ecc_state(priv)) { 1417 + edac_printk(KERN_INFO, EDAC_MC, "ECC not enabled\n"); 1418 + rc = -ENODEV; 1419 + goto free_edac_mc; 1420 + } 1421 + 1405 1422 spin_lock_init(&priv->reglock); 1406 1423 1407 1424 mc_init(mci, pdev);