Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

amd64_edac: Remove "amd64" prefix from static functions

No need for the namespace tagging there. Cleanup setup_pci_device while
at it.

Signed-off-by: Borislav Petkov <bp@suse.de>

+56 -62
+56 -62
drivers/edac/amd64_edac.c
··· 1 1 #include "amd64_edac.h" 2 2 #include <asm/amd_nb.h> 3 3 4 - static struct edac_pci_ctl_info *amd64_ctl_pci; 4 + static struct edac_pci_ctl_info *pci_ctl; 5 5 6 6 static int report_gart_errors; 7 7 module_param(report_gart_errors, int, 0644); ··· 162 162 * scan the scrub rate mapping table for a close or matching bandwidth value to 163 163 * issue. If requested is too big, then use last maximum value found. 164 164 */ 165 - static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate) 165 + static int __set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate) 166 166 { 167 167 u32 scrubval; 168 168 int i; ··· 198 198 return 0; 199 199 } 200 200 201 - static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw) 201 + static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw) 202 202 { 203 203 struct amd64_pvt *pvt = mci->pvt_info; 204 204 u32 min_scrubrate = 0x5; ··· 210 210 if (pvt->fam == 0x15 && pvt->model < 0x10) 211 211 f15h_select_dct(pvt, 0); 212 212 213 - return __amd64_set_scrub_rate(pvt->F3, bw, min_scrubrate); 213 + return __set_scrub_rate(pvt->F3, bw, min_scrubrate); 214 214 } 215 215 216 - static int amd64_get_scrub_rate(struct mem_ctl_info *mci) 216 + static int get_scrub_rate(struct mem_ctl_info *mci) 217 217 { 218 218 struct amd64_pvt *pvt = mci->pvt_info; 219 219 u32 scrubval = 0; ··· 240 240 * returns true if the SysAddr given by sys_addr matches the 241 241 * DRAM base/limit associated with node_id 242 242 */ 243 - static bool amd64_base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, 244 - u8 nid) 243 + static bool base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, u8 nid) 245 244 { 246 245 u64 addr; 247 246 ··· 284 285 285 286 if (intlv_en == 0) { 286 287 for (node_id = 0; node_id < DRAM_RANGES; node_id++) { 287 - if (amd64_base_limit_match(pvt, sys_addr, node_id)) 288 + if (base_limit_match(pvt, sys_addr, node_id)) 288 289 goto found; 289 290 } 290 291 goto err_no_match; ··· 308 309 } 309 310 310 311 /* sanity test for sys_addr */ 311 - if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) { 312 + if (unlikely(!base_limit_match(pvt, sys_addr, node_id))) { 312 313 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address" 313 314 "range for node %d with node interleaving enabled.\n", 314 315 __func__, sys_addr, node_id); ··· 659 660 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs 660 661 * are ECC capable. 661 662 */ 662 - static unsigned long amd64_determine_edac_cap(struct amd64_pvt *pvt) 663 + static unsigned long determine_edac_cap(struct amd64_pvt *pvt) 663 664 { 664 665 u8 bit; 665 666 unsigned long edac_cap = EDAC_FLAG_NONE; ··· 674 675 return edac_cap; 675 676 } 676 677 677 - static void amd64_debug_display_dimm_sizes(struct amd64_pvt *, u8); 678 + static void debug_display_dimm_sizes(struct amd64_pvt *, u8); 678 679 679 - static void amd64_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan) 680 + static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan) 680 681 { 681 682 edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr); 682 683 ··· 710 711 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no", 711 712 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no"); 712 713 713 - amd64_dump_dramcfg_low(pvt, pvt->dclr0, 0); 714 + debug_dump_dramcfg_low(pvt, pvt->dclr0, 0); 714 715 715 716 edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare); 716 717 ··· 721 722 722 723 edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no"); 723 724 724 - amd64_debug_display_dimm_sizes(pvt, 0); 725 + debug_display_dimm_sizes(pvt, 0); 725 726 726 727 /* everything below this point is Fam10h and above */ 727 728 if (pvt->fam == 0xf) 728 729 return; 729 730 730 - amd64_debug_display_dimm_sizes(pvt, 1); 731 + debug_display_dimm_sizes(pvt, 1); 731 732 732 733 amd64_info("using %s syndromes.\n", ((pvt->ecc_sym_sz == 8) ? "x8" : "x4")); 733 734 734 735 /* Only if NOT ganged does dclr1 have valid info */ 735 736 if (!dct_ganging_enabled(pvt)) 736 - amd64_dump_dramcfg_low(pvt, pvt->dclr1, 1); 737 + debug_dump_dramcfg_low(pvt, pvt->dclr1, 1); 737 738 } 738 739 739 740 /* ··· 799 800 } 800 801 } 801 802 802 - static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs) 803 + static enum mem_type determine_memory_type(struct amd64_pvt *pvt, int cs) 803 804 { 804 805 enum mem_type type; 805 806 ··· 1701 1702 * debug routine to display the memory sizes of all logical DIMMs and its 1702 1703 * CSROWs 1703 1704 */ 1704 - static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl) 1705 + static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl) 1705 1706 { 1706 1707 int dimm, size0, size1; 1707 1708 u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases; ··· 1743 1744 } 1744 1745 } 1745 1746 1746 - static struct amd64_family_type amd64_family_types[] = { 1747 + static struct amd64_family_type family_types[] = { 1747 1748 [K8_CPUS] = { 1748 1749 .ctl_name = "K8", 1749 1750 .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP, ··· 2190 2191 * encompasses 2191 2192 * 2192 2193 */ 2193 - static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr) 2194 + static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr) 2194 2195 { 2195 2196 u32 cs_mode, nr_pages; 2196 2197 u32 dbam = dct ? pvt->dbam1 : pvt->dbam0; ··· 2257 2258 pvt->mc_node_id, i); 2258 2259 2259 2260 if (row_dct0) { 2260 - nr_pages = amd64_csrow_nr_pages(pvt, 0, i); 2261 + nr_pages = get_csrow_nr_pages(pvt, 0, i); 2261 2262 csrow->channels[0]->dimm->nr_pages = nr_pages; 2262 2263 } 2263 2264 2264 2265 /* K8 has only one DCT */ 2265 2266 if (pvt->fam != 0xf && row_dct1) { 2266 - int row_dct1_pages = amd64_csrow_nr_pages(pvt, 1, i); 2267 + int row_dct1_pages = get_csrow_nr_pages(pvt, 1, i); 2267 2268 2268 2269 csrow->channels[1]->dimm->nr_pages = row_dct1_pages; 2269 2270 nr_pages += row_dct1_pages; 2270 2271 } 2271 2272 2272 - mtype = amd64_determine_memory_type(pvt, i); 2273 + mtype = determine_memory_type(pvt, i); 2273 2274 2274 2275 edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages); 2275 2276 ··· 2303 2304 } 2304 2305 2305 2306 /* check MCG_CTL on all the cpus on this node */ 2306 - static bool amd64_nb_mce_bank_enabled_on_node(u16 nid) 2307 + static bool nb_mce_bank_enabled_on_node(u16 nid) 2307 2308 { 2308 2309 cpumask_var_t mask; 2309 2310 int cpu, nbe; ··· 2476 2477 ecc_en = !!(value & NBCFG_ECC_ENABLE); 2477 2478 amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled")); 2478 2479 2479 - nb_mce_en = amd64_nb_mce_bank_enabled_on_node(nid); 2480 + nb_mce_en = nb_mce_bank_enabled_on_node(nid); 2480 2481 if (!nb_mce_en) 2481 2482 amd64_notice("NB MCE bank disabled, set MSR " 2482 2483 "0x%08x[4] on node %d to enable.\n", ··· 2531 2532 if (pvt->nbcap & NBCAP_CHIPKILL) 2532 2533 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED; 2533 2534 2534 - mci->edac_cap = amd64_determine_edac_cap(pvt); 2535 + mci->edac_cap = determine_edac_cap(pvt); 2535 2536 mci->mod_name = EDAC_MOD_STR; 2536 2537 mci->mod_ver = EDAC_AMD64_VERSION; 2537 2538 mci->ctl_name = fam->ctl_name; ··· 2539 2540 mci->ctl_page_to_phys = NULL; 2540 2541 2541 2542 /* memory scrubber interface */ 2542 - mci->set_sdram_scrub_rate = amd64_set_scrub_rate; 2543 - mci->get_sdram_scrub_rate = amd64_get_scrub_rate; 2543 + mci->set_sdram_scrub_rate = set_scrub_rate; 2544 + mci->get_sdram_scrub_rate = get_scrub_rate; 2544 2545 } 2545 2546 2546 2547 /* 2547 2548 * returns a pointer to the family descriptor on success, NULL otherwise. 2548 2549 */ 2549 - static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt) 2550 + static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt) 2550 2551 { 2551 2552 struct amd64_family_type *fam_type = NULL; 2552 2553 ··· 2557 2558 2558 2559 switch (pvt->fam) { 2559 2560 case 0xf: 2560 - fam_type = &amd64_family_types[K8_CPUS]; 2561 - pvt->ops = &amd64_family_types[K8_CPUS].ops; 2561 + fam_type = &family_types[K8_CPUS]; 2562 + pvt->ops = &family_types[K8_CPUS].ops; 2562 2563 break; 2563 2564 2564 2565 case 0x10: 2565 - fam_type = &amd64_family_types[F10_CPUS]; 2566 - pvt->ops = &amd64_family_types[F10_CPUS].ops; 2566 + fam_type = &family_types[F10_CPUS]; 2567 + pvt->ops = &family_types[F10_CPUS].ops; 2567 2568 break; 2568 2569 2569 2570 case 0x15: 2570 2571 if (pvt->model == 0x30) { 2571 - fam_type = &amd64_family_types[F15_M30H_CPUS]; 2572 - pvt->ops = &amd64_family_types[F15_M30H_CPUS].ops; 2572 + fam_type = &family_types[F15_M30H_CPUS]; 2573 + pvt->ops = &family_types[F15_M30H_CPUS].ops; 2573 2574 break; 2574 2575 } 2575 2576 2576 - fam_type = &amd64_family_types[F15_CPUS]; 2577 - pvt->ops = &amd64_family_types[F15_CPUS].ops; 2577 + fam_type = &family_types[F15_CPUS]; 2578 + pvt->ops = &family_types[F15_CPUS].ops; 2578 2579 break; 2579 2580 2580 2581 case 0x16: 2581 - fam_type = &amd64_family_types[F16_CPUS]; 2582 - pvt->ops = &amd64_family_types[F16_CPUS].ops; 2582 + fam_type = &family_types[F16_CPUS]; 2583 + pvt->ops = &family_types[F16_CPUS].ops; 2583 2584 break; 2584 2585 2585 2586 default: ··· 2595 2596 return fam_type; 2596 2597 } 2597 2598 2598 - static int amd64_init_one_instance(struct pci_dev *F2) 2599 + static int init_one_instance(struct pci_dev *F2) 2599 2600 { 2600 2601 struct amd64_pvt *pvt = NULL; 2601 2602 struct amd64_family_type *fam_type = NULL; ··· 2613 2614 pvt->F2 = F2; 2614 2615 2615 2616 ret = -EINVAL; 2616 - fam_type = amd64_per_family_init(pvt); 2617 + fam_type = per_family_init(pvt); 2617 2618 if (!fam_type) 2618 2619 goto err_free; 2619 2620 ··· 2697 2698 return ret; 2698 2699 } 2699 2700 2700 - static int amd64_probe_one_instance(struct pci_dev *pdev, 2701 - const struct pci_device_id *mc_type) 2701 + static int probe_one_instance(struct pci_dev *pdev, 2702 + const struct pci_device_id *mc_type) 2702 2703 { 2703 2704 u16 nid = amd_get_node_id(pdev); 2704 2705 struct pci_dev *F3 = node_to_amd_nb(nid)->misc; ··· 2730 2731 goto err_enable; 2731 2732 } 2732 2733 2733 - ret = amd64_init_one_instance(pdev); 2734 + ret = init_one_instance(pdev); 2734 2735 if (ret < 0) { 2735 2736 amd64_err("Error probing instance: %d\n", nid); 2736 2737 restore_ecc_error_reporting(s, nid, F3); ··· 2746 2747 return ret; 2747 2748 } 2748 2749 2749 - static void amd64_remove_one_instance(struct pci_dev *pdev) 2750 + static void remove_one_instance(struct pci_dev *pdev) 2750 2751 { 2751 2752 struct mem_ctl_info *mci; 2752 2753 struct amd64_pvt *pvt; ··· 2837 2838 2838 2839 static struct pci_driver amd64_pci_driver = { 2839 2840 .name = EDAC_MOD_STR, 2840 - .probe = amd64_probe_one_instance, 2841 - .remove = amd64_remove_one_instance, 2841 + .probe = probe_one_instance, 2842 + .remove = remove_one_instance, 2842 2843 .id_table = amd64_pci_table, 2843 2844 }; 2844 2845 ··· 2847 2848 struct mem_ctl_info *mci; 2848 2849 struct amd64_pvt *pvt; 2849 2850 2850 - if (amd64_ctl_pci) 2851 + if (pci_ctl) 2851 2852 return; 2852 2853 2853 2854 mci = mcis[0]; 2854 - if (mci) { 2855 + if (!mci) 2856 + return; 2855 2857 2856 - pvt = mci->pvt_info; 2857 - amd64_ctl_pci = 2858 - edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR); 2859 - 2860 - if (!amd64_ctl_pci) { 2861 - pr_warning("%s(): Unable to create PCI control\n", 2862 - __func__); 2863 - 2864 - pr_warning("%s(): PCI error report via EDAC not set\n", 2865 - __func__); 2866 - } 2858 + pvt = mci->pvt_info; 2859 + pci_ctl = edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR); 2860 + if (!pci_ctl) { 2861 + pr_warn("%s(): Unable to create PCI control\n", __func__); 2862 + pr_warn("%s(): PCI error report via EDAC not set\n", __func__); 2867 2863 } 2868 2864 } 2869 2865 ··· 2914 2920 2915 2921 static void __exit amd64_edac_exit(void) 2916 2922 { 2917 - if (amd64_ctl_pci) 2918 - edac_pci_release_generic_ctl(amd64_ctl_pci); 2923 + if (pci_ctl) 2924 + edac_pci_release_generic_ctl(pci_ctl); 2919 2925 2920 2926 pci_unregister_driver(&amd64_pci_driver); 2921 2927