at v5.7 20 kB view raw
1/* 2 * Generic EDAC defs 3 * 4 * Author: Dave Jiang <djiang@mvista.com> 5 * 6 * 2006-2008 (c) MontaVista Software, Inc. This file is licensed under 7 * the terms of the GNU General Public License version 2. This program 8 * is licensed "as is" without any warranty of any kind, whether express 9 * or implied. 10 * 11 */ 12#ifndef _LINUX_EDAC_H_ 13#define _LINUX_EDAC_H_ 14 15#include <linux/atomic.h> 16#include <linux/device.h> 17#include <linux/completion.h> 18#include <linux/workqueue.h> 19#include <linux/debugfs.h> 20#include <linux/numa.h> 21 22#define EDAC_DEVICE_NAME_LEN 31 23 24struct device; 25 26#define EDAC_OPSTATE_INVAL -1 27#define EDAC_OPSTATE_POLL 0 28#define EDAC_OPSTATE_NMI 1 29#define EDAC_OPSTATE_INT 2 30 31extern int edac_op_state; 32 33struct bus_type *edac_get_sysfs_subsys(void); 34int edac_get_report_status(void); 35void edac_set_report_status(int new); 36 37enum { 38 EDAC_REPORTING_ENABLED, 39 EDAC_REPORTING_DISABLED, 40 EDAC_REPORTING_FORCE 41}; 42 43static inline void opstate_init(void) 44{ 45 switch (edac_op_state) { 46 case EDAC_OPSTATE_POLL: 47 case EDAC_OPSTATE_NMI: 48 break; 49 default: 50 edac_op_state = EDAC_OPSTATE_POLL; 51 } 52 return; 53} 54 55/* Max length of a DIMM label*/ 56#define EDAC_MC_LABEL_LEN 31 57 58/* Maximum size of the location string */ 59#define LOCATION_SIZE 256 60 61/* Defines the maximum number of labels that can be reported */ 62#define EDAC_MAX_LABELS 8 63 64/* String used to join two or more labels */ 65#define OTHER_LABEL " or " 66 67/** 68 * enum dev_type - describe the type of memory DRAM chips used at the stick 69 * @DEV_UNKNOWN: Can't be determined, or MC doesn't support detect it 70 * @DEV_X1: 1 bit for data 71 * @DEV_X2: 2 bits for data 72 * @DEV_X4: 4 bits for data 73 * @DEV_X8: 8 bits for data 74 * @DEV_X16: 16 bits for data 75 * @DEV_X32: 32 bits for data 76 * @DEV_X64: 64 bits for data 77 * 78 * Typical values are x4 and x8. 79 */ 80enum dev_type { 81 DEV_UNKNOWN = 0, 82 DEV_X1, 83 DEV_X2, 84 DEV_X4, 85 DEV_X8, 86 DEV_X16, 87 DEV_X32, /* Do these parts exist? */ 88 DEV_X64 /* Do these parts exist? */ 89}; 90 91#define DEV_FLAG_UNKNOWN BIT(DEV_UNKNOWN) 92#define DEV_FLAG_X1 BIT(DEV_X1) 93#define DEV_FLAG_X2 BIT(DEV_X2) 94#define DEV_FLAG_X4 BIT(DEV_X4) 95#define DEV_FLAG_X8 BIT(DEV_X8) 96#define DEV_FLAG_X16 BIT(DEV_X16) 97#define DEV_FLAG_X32 BIT(DEV_X32) 98#define DEV_FLAG_X64 BIT(DEV_X64) 99 100/** 101 * enum hw_event_mc_err_type - type of the detected error 102 * 103 * @HW_EVENT_ERR_CORRECTED: Corrected Error - Indicates that an ECC 104 * corrected error was detected 105 * @HW_EVENT_ERR_UNCORRECTED: Uncorrected Error - Indicates an error that 106 * can't be corrected by ECC, but it is not 107 * fatal (maybe it is on an unused memory area, 108 * or the memory controller could recover from 109 * it for example, by re-trying the operation). 110 * @HW_EVENT_ERR_DEFERRED: Deferred Error - Indicates an uncorrectable 111 * error whose handling is not urgent. This could 112 * be due to hardware data poisoning where the 113 * system can continue operation until the poisoned 114 * data is consumed. Preemptive measures may also 115 * be taken, e.g. offlining pages, etc. 116 * @HW_EVENT_ERR_FATAL: Fatal Error - Uncorrected error that could not 117 * be recovered. 118 * @HW_EVENT_ERR_INFO: Informational - The CPER spec defines a forth 119 * type of error: informational logs. 120 */ 121enum hw_event_mc_err_type { 122 HW_EVENT_ERR_CORRECTED, 123 HW_EVENT_ERR_UNCORRECTED, 124 HW_EVENT_ERR_DEFERRED, 125 HW_EVENT_ERR_FATAL, 126 HW_EVENT_ERR_INFO, 127}; 128 129static inline char *mc_event_error_type(const unsigned int err_type) 130{ 131 switch (err_type) { 132 case HW_EVENT_ERR_CORRECTED: 133 return "Corrected"; 134 case HW_EVENT_ERR_UNCORRECTED: 135 return "Uncorrected"; 136 case HW_EVENT_ERR_DEFERRED: 137 return "Deferred"; 138 case HW_EVENT_ERR_FATAL: 139 return "Fatal"; 140 default: 141 case HW_EVENT_ERR_INFO: 142 return "Info"; 143 } 144} 145 146/** 147 * enum mem_type - memory types. For a more detailed reference, please see 148 * http://en.wikipedia.org/wiki/DRAM 149 * 150 * @MEM_EMPTY: Empty csrow 151 * @MEM_RESERVED: Reserved csrow type 152 * @MEM_UNKNOWN: Unknown csrow type 153 * @MEM_FPM: FPM - Fast Page Mode, used on systems up to 1995. 154 * @MEM_EDO: EDO - Extended data out, used on systems up to 1998. 155 * @MEM_BEDO: BEDO - Burst Extended data out, an EDO variant. 156 * @MEM_SDR: SDR - Single data rate SDRAM 157 * http://en.wikipedia.org/wiki/Synchronous_dynamic_random-access_memory 158 * They use 3 pins for chip select: Pins 0 and 2 are 159 * for rank 0; pins 1 and 3 are for rank 1, if the memory 160 * is dual-rank. 161 * @MEM_RDR: Registered SDR SDRAM 162 * @MEM_DDR: Double data rate SDRAM 163 * http://en.wikipedia.org/wiki/DDR_SDRAM 164 * @MEM_RDDR: Registered Double data rate SDRAM 165 * This is a variant of the DDR memories. 166 * A registered memory has a buffer inside it, hiding 167 * part of the memory details to the memory controller. 168 * @MEM_RMBS: Rambus DRAM, used on a few Pentium III/IV controllers. 169 * @MEM_DDR2: DDR2 RAM, as described at JEDEC JESD79-2F. 170 * Those memories are labeled as "PC2-" instead of "PC" to 171 * differentiate from DDR. 172 * @MEM_FB_DDR2: Fully-Buffered DDR2, as described at JEDEC Std No. 205 173 * and JESD206. 174 * Those memories are accessed per DIMM slot, and not by 175 * a chip select signal. 176 * @MEM_RDDR2: Registered DDR2 RAM 177 * This is a variant of the DDR2 memories. 178 * @MEM_XDR: Rambus XDR 179 * It is an evolution of the original RAMBUS memories, 180 * created to compete with DDR2. Weren't used on any 181 * x86 arch, but cell_edac PPC memory controller uses it. 182 * @MEM_DDR3: DDR3 RAM 183 * @MEM_RDDR3: Registered DDR3 RAM 184 * This is a variant of the DDR3 memories. 185 * @MEM_LRDDR3: Load-Reduced DDR3 memory. 186 * @MEM_DDR4: Unbuffered DDR4 RAM 187 * @MEM_RDDR4: Registered DDR4 RAM 188 * This is a variant of the DDR4 memories. 189 * @MEM_LRDDR4: Load-Reduced DDR4 memory. 190 * @MEM_NVDIMM: Non-volatile RAM 191 */ 192enum mem_type { 193 MEM_EMPTY = 0, 194 MEM_RESERVED, 195 MEM_UNKNOWN, 196 MEM_FPM, 197 MEM_EDO, 198 MEM_BEDO, 199 MEM_SDR, 200 MEM_RDR, 201 MEM_DDR, 202 MEM_RDDR, 203 MEM_RMBS, 204 MEM_DDR2, 205 MEM_FB_DDR2, 206 MEM_RDDR2, 207 MEM_XDR, 208 MEM_DDR3, 209 MEM_RDDR3, 210 MEM_LRDDR3, 211 MEM_DDR4, 212 MEM_RDDR4, 213 MEM_LRDDR4, 214 MEM_NVDIMM, 215}; 216 217#define MEM_FLAG_EMPTY BIT(MEM_EMPTY) 218#define MEM_FLAG_RESERVED BIT(MEM_RESERVED) 219#define MEM_FLAG_UNKNOWN BIT(MEM_UNKNOWN) 220#define MEM_FLAG_FPM BIT(MEM_FPM) 221#define MEM_FLAG_EDO BIT(MEM_EDO) 222#define MEM_FLAG_BEDO BIT(MEM_BEDO) 223#define MEM_FLAG_SDR BIT(MEM_SDR) 224#define MEM_FLAG_RDR BIT(MEM_RDR) 225#define MEM_FLAG_DDR BIT(MEM_DDR) 226#define MEM_FLAG_RDDR BIT(MEM_RDDR) 227#define MEM_FLAG_RMBS BIT(MEM_RMBS) 228#define MEM_FLAG_DDR2 BIT(MEM_DDR2) 229#define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2) 230#define MEM_FLAG_RDDR2 BIT(MEM_RDDR2) 231#define MEM_FLAG_XDR BIT(MEM_XDR) 232#define MEM_FLAG_DDR3 BIT(MEM_DDR3) 233#define MEM_FLAG_RDDR3 BIT(MEM_RDDR3) 234#define MEM_FLAG_DDR4 BIT(MEM_DDR4) 235#define MEM_FLAG_RDDR4 BIT(MEM_RDDR4) 236#define MEM_FLAG_LRDDR4 BIT(MEM_LRDDR4) 237#define MEM_FLAG_NVDIMM BIT(MEM_NVDIMM) 238 239/** 240 * enum edac-type - Error Detection and Correction capabilities and mode 241 * @EDAC_UNKNOWN: Unknown if ECC is available 242 * @EDAC_NONE: Doesn't support ECC 243 * @EDAC_RESERVED: Reserved ECC type 244 * @EDAC_PARITY: Detects parity errors 245 * @EDAC_EC: Error Checking - no correction 246 * @EDAC_SECDED: Single bit error correction, Double detection 247 * @EDAC_S2ECD2ED: Chipkill x2 devices - do these exist? 248 * @EDAC_S4ECD4ED: Chipkill x4 devices 249 * @EDAC_S8ECD8ED: Chipkill x8 devices 250 * @EDAC_S16ECD16ED: Chipkill x16 devices 251 */ 252enum edac_type { 253 EDAC_UNKNOWN = 0, 254 EDAC_NONE, 255 EDAC_RESERVED, 256 EDAC_PARITY, 257 EDAC_EC, 258 EDAC_SECDED, 259 EDAC_S2ECD2ED, 260 EDAC_S4ECD4ED, 261 EDAC_S8ECD8ED, 262 EDAC_S16ECD16ED, 263}; 264 265#define EDAC_FLAG_UNKNOWN BIT(EDAC_UNKNOWN) 266#define EDAC_FLAG_NONE BIT(EDAC_NONE) 267#define EDAC_FLAG_PARITY BIT(EDAC_PARITY) 268#define EDAC_FLAG_EC BIT(EDAC_EC) 269#define EDAC_FLAG_SECDED BIT(EDAC_SECDED) 270#define EDAC_FLAG_S2ECD2ED BIT(EDAC_S2ECD2ED) 271#define EDAC_FLAG_S4ECD4ED BIT(EDAC_S4ECD4ED) 272#define EDAC_FLAG_S8ECD8ED BIT(EDAC_S8ECD8ED) 273#define EDAC_FLAG_S16ECD16ED BIT(EDAC_S16ECD16ED) 274 275/** 276 * enum scrub_type - scrubbing capabilities 277 * @SCRUB_UNKNOWN: Unknown if scrubber is available 278 * @SCRUB_NONE: No scrubber 279 * @SCRUB_SW_PROG: SW progressive (sequential) scrubbing 280 * @SCRUB_SW_SRC: Software scrub only errors 281 * @SCRUB_SW_PROG_SRC: Progressive software scrub from an error 282 * @SCRUB_SW_TUNABLE: Software scrub frequency is tunable 283 * @SCRUB_HW_PROG: HW progressive (sequential) scrubbing 284 * @SCRUB_HW_SRC: Hardware scrub only errors 285 * @SCRUB_HW_PROG_SRC: Progressive hardware scrub from an error 286 * @SCRUB_HW_TUNABLE: Hardware scrub frequency is tunable 287 */ 288enum scrub_type { 289 SCRUB_UNKNOWN = 0, 290 SCRUB_NONE, 291 SCRUB_SW_PROG, 292 SCRUB_SW_SRC, 293 SCRUB_SW_PROG_SRC, 294 SCRUB_SW_TUNABLE, 295 SCRUB_HW_PROG, 296 SCRUB_HW_SRC, 297 SCRUB_HW_PROG_SRC, 298 SCRUB_HW_TUNABLE 299}; 300 301#define SCRUB_FLAG_SW_PROG BIT(SCRUB_SW_PROG) 302#define SCRUB_FLAG_SW_SRC BIT(SCRUB_SW_SRC) 303#define SCRUB_FLAG_SW_PROG_SRC BIT(SCRUB_SW_PROG_SRC) 304#define SCRUB_FLAG_SW_TUN BIT(SCRUB_SW_SCRUB_TUNABLE) 305#define SCRUB_FLAG_HW_PROG BIT(SCRUB_HW_PROG) 306#define SCRUB_FLAG_HW_SRC BIT(SCRUB_HW_SRC) 307#define SCRUB_FLAG_HW_PROG_SRC BIT(SCRUB_HW_PROG_SRC) 308#define SCRUB_FLAG_HW_TUN BIT(SCRUB_HW_TUNABLE) 309 310/* FIXME - should have notify capabilities: NMI, LOG, PROC, etc */ 311 312/* EDAC internal operation states */ 313#define OP_ALLOC 0x100 314#define OP_RUNNING_POLL 0x201 315#define OP_RUNNING_INTERRUPT 0x202 316#define OP_RUNNING_POLL_INTR 0x203 317#define OP_OFFLINE 0x300 318 319/** 320 * enum edac_mc_layer - memory controller hierarchy layer 321 * 322 * @EDAC_MC_LAYER_BRANCH: memory layer is named "branch" 323 * @EDAC_MC_LAYER_CHANNEL: memory layer is named "channel" 324 * @EDAC_MC_LAYER_SLOT: memory layer is named "slot" 325 * @EDAC_MC_LAYER_CHIP_SELECT: memory layer is named "chip select" 326 * @EDAC_MC_LAYER_ALL_MEM: memory layout is unknown. All memory is mapped 327 * as a single memory area. This is used when 328 * retrieving errors from a firmware driven driver. 329 * 330 * This enum is used by the drivers to tell edac_mc_sysfs what name should 331 * be used when describing a memory stick location. 332 */ 333enum edac_mc_layer_type { 334 EDAC_MC_LAYER_BRANCH, 335 EDAC_MC_LAYER_CHANNEL, 336 EDAC_MC_LAYER_SLOT, 337 EDAC_MC_LAYER_CHIP_SELECT, 338 EDAC_MC_LAYER_ALL_MEM, 339}; 340 341/** 342 * struct edac_mc_layer - describes the memory controller hierarchy 343 * @type: layer type 344 * @size: number of components per layer. For example, 345 * if the channel layer has two channels, size = 2 346 * @is_virt_csrow: This layer is part of the "csrow" when old API 347 * compatibility mode is enabled. Otherwise, it is 348 * a channel 349 */ 350struct edac_mc_layer { 351 enum edac_mc_layer_type type; 352 unsigned size; 353 bool is_virt_csrow; 354}; 355 356/* 357 * Maximum number of layers used by the memory controller to uniquely 358 * identify a single memory stick. 359 * NOTE: Changing this constant requires not only to change the constant 360 * below, but also to change the existing code at the core, as there are 361 * some code there that are optimized for 3 layers. 362 */ 363#define EDAC_MAX_LAYERS 3 364 365struct dimm_info { 366 struct device dev; 367 368 char label[EDAC_MC_LABEL_LEN + 1]; /* DIMM label on motherboard */ 369 370 /* Memory location data */ 371 unsigned int location[EDAC_MAX_LAYERS]; 372 373 struct mem_ctl_info *mci; /* the parent */ 374 unsigned int idx; /* index within the parent dimm array */ 375 376 u32 grain; /* granularity of reported error in bytes */ 377 enum dev_type dtype; /* memory device type */ 378 enum mem_type mtype; /* memory dimm type */ 379 enum edac_type edac_mode; /* EDAC mode for this dimm */ 380 381 u32 nr_pages; /* number of pages on this dimm */ 382 383 unsigned int csrow, cschannel; /* Points to the old API data */ 384 385 u16 smbios_handle; /* Handle for SMBIOS type 17 */ 386 387 u32 ce_count; 388 u32 ue_count; 389}; 390 391/** 392 * struct rank_info - contains the information for one DIMM rank 393 * 394 * @chan_idx: channel number where the rank is (typically, 0 or 1) 395 * @ce_count: number of correctable errors for this rank 396 * @csrow: A pointer to the chip select row structure (the parent 397 * structure). The location of the rank is given by 398 * the (csrow->csrow_idx, chan_idx) vector. 399 * @dimm: A pointer to the DIMM structure, where the DIMM label 400 * information is stored. 401 * 402 * FIXME: Currently, the EDAC core model will assume one DIMM per rank. 403 * This is a bad assumption, but it makes this patch easier. Later 404 * patches in this series will fix this issue. 405 */ 406struct rank_info { 407 int chan_idx; 408 struct csrow_info *csrow; 409 struct dimm_info *dimm; 410 411 u32 ce_count; /* Correctable Errors for this csrow */ 412}; 413 414struct csrow_info { 415 struct device dev; 416 417 /* Used only by edac_mc_find_csrow_by_page() */ 418 unsigned long first_page; /* first page number in csrow */ 419 unsigned long last_page; /* last page number in csrow */ 420 unsigned long page_mask; /* used for interleaving - 421 * 0UL for non intlv */ 422 423 int csrow_idx; /* the chip-select row */ 424 425 u32 ue_count; /* Uncorrectable Errors for this csrow */ 426 u32 ce_count; /* Correctable Errors for this csrow */ 427 428 struct mem_ctl_info *mci; /* the parent */ 429 430 /* channel information for this csrow */ 431 u32 nr_channels; 432 struct rank_info **channels; 433}; 434 435/* 436 * struct errcount_attribute - used to store the several error counts 437 */ 438struct errcount_attribute_data { 439 int n_layers; 440 int pos[EDAC_MAX_LAYERS]; 441 int layer0, layer1, layer2; 442}; 443 444/** 445 * struct edac_raw_error_desc - Raw error report structure 446 * @grain: minimum granularity for an error report, in bytes 447 * @error_count: number of errors of the same type 448 * @type: severity of the error (CE/UE/Fatal) 449 * @top_layer: top layer of the error (layer[0]) 450 * @mid_layer: middle layer of the error (layer[1]) 451 * @low_layer: low layer of the error (layer[2]) 452 * @page_frame_number: page where the error happened 453 * @offset_in_page: page offset 454 * @syndrome: syndrome of the error (or 0 if unknown or if 455 * the syndrome is not applicable) 456 * @msg: error message 457 * @location: location of the error 458 * @label: label of the affected DIMM(s) 459 * @other_detail: other driver-specific detail about the error 460 */ 461struct edac_raw_error_desc { 462 char location[LOCATION_SIZE]; 463 char label[(EDAC_MC_LABEL_LEN + 1 + sizeof(OTHER_LABEL)) * EDAC_MAX_LABELS]; 464 long grain; 465 466 u16 error_count; 467 enum hw_event_mc_err_type type; 468 int top_layer; 469 int mid_layer; 470 int low_layer; 471 unsigned long page_frame_number; 472 unsigned long offset_in_page; 473 unsigned long syndrome; 474 const char *msg; 475 const char *other_detail; 476}; 477 478/* MEMORY controller information structure 479 */ 480struct mem_ctl_info { 481 struct device dev; 482 struct bus_type *bus; 483 484 struct list_head link; /* for global list of mem_ctl_info structs */ 485 486 struct module *owner; /* Module owner of this control struct */ 487 488 unsigned long mtype_cap; /* memory types supported by mc */ 489 unsigned long edac_ctl_cap; /* Mem controller EDAC capabilities */ 490 unsigned long edac_cap; /* configuration capabilities - this is 491 * closely related to edac_ctl_cap. The 492 * difference is that the controller may be 493 * capable of s4ecd4ed which would be listed 494 * in edac_ctl_cap, but if channels aren't 495 * capable of s4ecd4ed then the edac_cap would 496 * not have that capability. 497 */ 498 unsigned long scrub_cap; /* chipset scrub capabilities */ 499 enum scrub_type scrub_mode; /* current scrub mode */ 500 501 /* Translates sdram memory scrub rate given in bytes/sec to the 502 internal representation and configures whatever else needs 503 to be configured. 504 */ 505 int (*set_sdram_scrub_rate) (struct mem_ctl_info * mci, u32 bw); 506 507 /* Get the current sdram memory scrub rate from the internal 508 representation and converts it to the closest matching 509 bandwidth in bytes/sec. 510 */ 511 int (*get_sdram_scrub_rate) (struct mem_ctl_info * mci); 512 513 514 /* pointer to edac checking routine */ 515 void (*edac_check) (struct mem_ctl_info * mci); 516 517 /* 518 * Remaps memory pages: controller pages to physical pages. 519 * For most MC's, this will be NULL. 520 */ 521 /* FIXME - why not send the phys page to begin with? */ 522 unsigned long (*ctl_page_to_phys) (struct mem_ctl_info * mci, 523 unsigned long page); 524 int mc_idx; 525 struct csrow_info **csrows; 526 unsigned int nr_csrows, num_cschannel; 527 528 /* 529 * Memory Controller hierarchy 530 * 531 * There are basically two types of memory controller: the ones that 532 * sees memory sticks ("dimms"), and the ones that sees memory ranks. 533 * All old memory controllers enumerate memories per rank, but most 534 * of the recent drivers enumerate memories per DIMM, instead. 535 * When the memory controller is per rank, csbased is true. 536 */ 537 unsigned int n_layers; 538 struct edac_mc_layer *layers; 539 bool csbased; 540 541 /* 542 * DIMM info. Will eventually remove the entire csrows_info some day 543 */ 544 unsigned int tot_dimms; 545 struct dimm_info **dimms; 546 547 /* 548 * FIXME - what about controllers on other busses? - IDs must be 549 * unique. dev pointer should be sufficiently unique, but 550 * BUS:SLOT.FUNC numbers may not be unique. 551 */ 552 struct device *pdev; 553 const char *mod_name; 554 const char *ctl_name; 555 const char *dev_name; 556 void *pvt_info; 557 unsigned long start_time; /* mci load start time (in jiffies) */ 558 559 /* 560 * drivers shouldn't access those fields directly, as the core 561 * already handles that. 562 */ 563 u32 ce_noinfo_count, ue_noinfo_count; 564 u32 ue_mc, ce_mc; 565 566 struct completion complete; 567 568 /* Additional top controller level attributes, but specified 569 * by the low level driver. 570 * 571 * Set by the low level driver to provide attributes at the 572 * controller level. 573 * An array of structures, NULL terminated 574 * 575 * If attributes are desired, then set to array of attributes 576 * If no attributes are desired, leave NULL 577 */ 578 const struct mcidev_sysfs_attribute *mc_driver_sysfs_attributes; 579 580 /* work struct for this MC */ 581 struct delayed_work work; 582 583 /* 584 * Used to report an error - by being at the global struct 585 * makes the memory allocated by the EDAC core 586 */ 587 struct edac_raw_error_desc error_desc; 588 589 /* the internal state of this controller instance */ 590 int op_state; 591 592 struct dentry *debugfs; 593 u8 fake_inject_layer[EDAC_MAX_LAYERS]; 594 bool fake_inject_ue; 595 u16 fake_inject_count; 596}; 597 598#define mci_for_each_dimm(mci, dimm) \ 599 for ((dimm) = (mci)->dimms[0]; \ 600 (dimm); \ 601 (dimm) = (dimm)->idx + 1 < (mci)->tot_dimms \ 602 ? (mci)->dimms[(dimm)->idx + 1] \ 603 : NULL) 604 605/** 606 * edac_get_dimm_by_index - Get DIMM info at @index from a memory 607 * controller 608 * 609 * @mci: MC descriptor struct mem_ctl_info 610 * @index: index in the memory controller's DIMM array 611 * 612 * Returns a struct dimm_info * or NULL on failure. 613 */ 614static inline struct dimm_info * 615edac_get_dimm_by_index(struct mem_ctl_info *mci, int index) 616{ 617 if (index < 0 || index >= mci->tot_dimms) 618 return NULL; 619 620 if (WARN_ON_ONCE(mci->dimms[index]->idx != index)) 621 return NULL; 622 623 return mci->dimms[index]; 624} 625 626/** 627 * edac_get_dimm - Get DIMM info from a memory controller given by 628 * [layer0,layer1,layer2] position 629 * 630 * @mci: MC descriptor struct mem_ctl_info 631 * @layer0: layer0 position 632 * @layer1: layer1 position. Unused if n_layers < 2 633 * @layer2: layer2 position. Unused if n_layers < 3 634 * 635 * For 1 layer, this function returns "dimms[layer0]"; 636 * 637 * For 2 layers, this function is similar to allocating a two-dimensional 638 * array and returning "dimms[layer0][layer1]"; 639 * 640 * For 3 layers, this function is similar to allocating a tri-dimensional 641 * array and returning "dimms[layer0][layer1][layer2]"; 642 */ 643static inline struct dimm_info *edac_get_dimm(struct mem_ctl_info *mci, 644 int layer0, int layer1, int layer2) 645{ 646 int index; 647 648 if (layer0 < 0 649 || (mci->n_layers > 1 && layer1 < 0) 650 || (mci->n_layers > 2 && layer2 < 0)) 651 return NULL; 652 653 index = layer0; 654 655 if (mci->n_layers > 1) 656 index = index * mci->layers[1].size + layer1; 657 658 if (mci->n_layers > 2) 659 index = index * mci->layers[2].size + layer2; 660 661 return edac_get_dimm_by_index(mci, index); 662} 663#endif /* _LINUX_EDAC_H_ */