Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

EDAC: Fixup scrubrate manipulation

Make the ->{get|set}_sdram_scrub_rate return the actual scrub rate
bandwidth it succeeded setting and remove superfluous arg pointer used
for that. A negative value returned still means that an error occurred
while setting the scrubrate. Document this for future reference.

Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>

+53 -64
+12 -12
drivers/edac/amd64_edac.c
··· 77 77 *FIXME: Produce a better mapping/linearisation. 78 78 */ 79 79 80 - struct scrubrate scrubrates[] = { 80 + 81 + struct scrubrate { 82 + u32 scrubval; /* bit pattern for scrub rate */ 83 + u32 bandwidth; /* bandwidth consumed (bytes/sec) */ 84 + } scrubrates[] = { 81 85 { 0x01, 1600000000UL}, 82 86 { 0x02, 800000000UL}, 83 87 { 0x03, 400000000UL}, ··· 155 151 } 156 152 157 153 scrubval = scrubrates[i].scrubval; 158 - if (scrubval) 159 - amd64_info("Setting scrub rate bandwidth: %u\n", 160 - scrubrates[i].bandwidth); 161 - else 162 - amd64_info("Turning scrubbing off.\n"); 163 154 164 155 pci_write_bits32(ctl, K8_SCRCTRL, scrubval, 0x001F); 156 + 157 + if (scrubval) 158 + return scrubrates[i].bandwidth; 165 159 166 160 return 0; 167 161 } ··· 171 169 return __amd64_set_scrub_rate(pvt->F3, bw, pvt->min_scrubrate); 172 170 } 173 171 174 - static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw) 172 + static int amd64_get_scrub_rate(struct mem_ctl_info *mci) 175 173 { 176 174 struct amd64_pvt *pvt = mci->pvt_info; 177 175 u32 scrubval = 0; 178 - int status = -1, i; 176 + int i, retval = -EINVAL; 179 177 180 178 amd64_read_pci_cfg(pvt->F3, K8_SCRCTRL, &scrubval); 181 179 ··· 185 183 186 184 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) { 187 185 if (scrubrates[i].scrubval == scrubval) { 188 - *bw = scrubrates[i].bandwidth; 189 - status = 0; 186 + retval = scrubrates[i].bandwidth; 190 187 break; 191 188 } 192 189 } 193 - 194 - return status; 190 + return retval; 195 191 } 196 192 197 193 /* Map from a CSROW entry to the mask entry that operates on it */
-6
drivers/edac/amd64_edac.h
··· 482 482 } flags; 483 483 }; 484 484 485 - struct scrubrate { 486 - u32 scrubval; /* bit pattern for scrub rate */ 487 - u32 bandwidth; /* bandwidth consumed (bytes/sec) */ 488 - }; 489 - 490 - extern struct scrubrate scrubrates[23]; 491 485 extern const char *tt_msgs[4]; 492 486 extern const char *ll_msgs[4]; 493 487 extern const char *rrrr_msgs[16];
+5 -4
drivers/edac/cpc925_edac.c
··· 818 818 } 819 819 820 820 /* Convert current back-ground scrub rate into byte/sec bandwith */ 821 - static int cpc925_get_sdram_scrub_rate(struct mem_ctl_info *mci, u32 *bw) 821 + static int cpc925_get_sdram_scrub_rate(struct mem_ctl_info *mci) 822 822 { 823 823 struct cpc925_mc_pdata *pdata = mci->pvt_info; 824 + int bw; 824 825 u32 mscr; 825 826 u8 si; 826 827 ··· 833 832 if (((mscr & MSCR_SCRUB_MOD_MASK) != MSCR_BACKGR_SCRUB) || 834 833 (si == 0)) { 835 834 cpc925_mc_printk(mci, KERN_INFO, "Scrub mode not enabled\n"); 836 - *bw = 0; 835 + bw = 0; 837 836 } else 838 - *bw = CPC925_SCRUB_BLOCK_SIZE * 0xFA67 / si; 837 + bw = CPC925_SCRUB_BLOCK_SIZE * 0xFA67 / si; 839 838 840 - return 0; 839 + return bw; 841 840 } 842 841 843 842 /* Return 0 for single channel; 1 for dual channel */
+3 -5
drivers/edac/e752x_edac.c
··· 983 983 984 984 pci_write_config_word(pdev, E752X_MCHSCRB, scrubrates[i].scrubval); 985 985 986 - return 0; 986 + return scrubrates[i].bandwidth; 987 987 } 988 988 989 989 /* Convert current scrub rate value into byte/sec bandwidth */ 990 - static int get_sdram_scrub_rate(struct mem_ctl_info *mci, u32 *bw) 990 + static int get_sdram_scrub_rate(struct mem_ctl_info *mci) 991 991 { 992 992 const struct scrubrate *scrubrates; 993 993 struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; ··· 1013 1013 "Invalid sdram scrub control value: 0x%x\n", scrubval); 1014 1014 return -1; 1015 1015 } 1016 + return scrubrates[i].bandwidth; 1016 1017 1017 - *bw = scrubrates[i].bandwidth; 1018 - 1019 - return 0; 1020 1018 } 1021 1019 1022 1020 /* Return 1 if dual channel mode is active. Else return 0. */
+1 -1
drivers/edac/edac_core.h
··· 387 387 representation and converts it to the closest matching 388 388 bandwith in bytes/sec. 389 389 */ 390 - int (*get_sdram_scrub_rate) (struct mem_ctl_info * mci, u32 * bw); 390 + int (*get_sdram_scrub_rate) (struct mem_ctl_info * mci); 391 391 392 392 393 393 /* pointer to edac checking routine */
+29 -30
drivers/edac/edac_mc_sysfs.c
··· 436 436 return count; 437 437 } 438 438 439 - /* memory scrubbing */ 439 + /* Memory scrubbing interface: 440 + * 441 + * A MC driver can limit the scrubbing bandwidth based on the CPU type. 442 + * Therefore, ->set_sdram_scrub_rate should be made to return the actual 443 + * bandwidth that is accepted or 0 when scrubbing is to be disabled. 444 + * 445 + * Negative value still means that an error has occurred while setting 446 + * the scrub rate. 447 + */ 440 448 static ssize_t mci_sdram_scrub_rate_store(struct mem_ctl_info *mci, 441 449 const char *data, size_t count) 442 450 { 443 451 unsigned long bandwidth = 0; 444 - int err; 452 + int new_bw = 0; 445 453 446 - if (!mci->set_sdram_scrub_rate) { 447 - edac_printk(KERN_WARNING, EDAC_MC, 448 - "Memory scrub rate setting not implemented!\n"); 454 + if (!mci->set_sdram_scrub_rate) 449 455 return -EINVAL; 450 - } 451 456 452 457 if (strict_strtoul(data, 10, &bandwidth) < 0) 453 458 return -EINVAL; 454 459 455 - err = mci->set_sdram_scrub_rate(mci, (u32)bandwidth); 456 - if (err) { 457 - edac_printk(KERN_DEBUG, EDAC_MC, 458 - "Failed setting scrub rate to %lu\n", bandwidth); 459 - return -EINVAL; 460 - } 461 - else { 462 - edac_printk(KERN_DEBUG, EDAC_MC, 463 - "Scrub rate set to: %lu\n", bandwidth); 460 + new_bw = mci->set_sdram_scrub_rate(mci, bandwidth); 461 + if (new_bw >= 0) { 462 + edac_printk(KERN_DEBUG, EDAC_MC, "Scrub rate set to %d\n", new_bw); 464 463 return count; 465 464 } 465 + 466 + edac_printk(KERN_DEBUG, EDAC_MC, "Error setting scrub rate to: %lu\n", bandwidth); 467 + return -EINVAL; 466 468 } 467 469 470 + /* 471 + * ->get_sdram_scrub_rate() return value semantics same as above. 472 + */ 468 473 static ssize_t mci_sdram_scrub_rate_show(struct mem_ctl_info *mci, char *data) 469 474 { 470 - u32 bandwidth = 0; 471 - int err; 475 + int bandwidth = 0; 472 476 473 - if (!mci->get_sdram_scrub_rate) { 474 - edac_printk(KERN_WARNING, EDAC_MC, 475 - "Memory scrub rate reading not implemented\n"); 477 + if (!mci->get_sdram_scrub_rate) 476 478 return -EINVAL; 479 + 480 + bandwidth = mci->get_sdram_scrub_rate(mci); 481 + if (bandwidth < 0) { 482 + edac_printk(KERN_DEBUG, EDAC_MC, "Error reading scrub rate\n"); 483 + return bandwidth; 477 484 } 478 485 479 - err = mci->get_sdram_scrub_rate(mci, &bandwidth); 480 - if (err) { 481 - edac_printk(KERN_DEBUG, EDAC_MC, "Error reading scrub rate\n"); 482 - return err; 483 - } 484 - else { 485 - edac_printk(KERN_DEBUG, EDAC_MC, 486 - "Read scrub rate: %d\n", bandwidth); 487 - return sprintf(data, "%d\n", bandwidth); 488 - } 486 + edac_printk(KERN_DEBUG, EDAC_MC, "Read scrub rate: %d\n", bandwidth); 487 + return sprintf(data, "%d\n", bandwidth); 489 488 } 490 489 491 490 /* default attribute files for the MCI object */
+3 -6
drivers/edac/i5100_edac.c
··· 611 611 612 612 bandwidth = 5900000 * i5100_mc_scrben(dw); 613 613 614 - return 0; 614 + return bandwidth; 615 615 } 616 616 617 - static int i5100_get_scrub_rate(struct mem_ctl_info *mci, 618 - u32 *bandwidth) 617 + static int i5100_get_scrub_rate(struct mem_ctl_info *mci) 619 618 { 620 619 struct i5100_priv *priv = mci->pvt_info; 621 620 u32 dw; 622 621 623 622 pci_read_config_dword(priv->mc, I5100_MC, &dw); 624 623 625 - *bandwidth = 5900000 * i5100_mc_scrben(dw); 626 - 627 - return 0; 624 + return 5900000 * i5100_mc_scrben(dw); 628 625 } 629 626 630 627 static struct pci_dev *pci_get_device_func(unsigned vendor,