[PATCH] ppc64: Move ppc64_enable_pmcs() logic into a ppc_md function

This patch moves power4_enable_pmcs() to arch/ppc64/kernel/pmc.c.

I've tested it on P5 LPAR and P4. It does what it used to.

Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
Signed-off-by: Paul Mackerras <paulus@samba.org>

authored by

Michael Ellerman and committed by
Paul Mackerras
180a3362 b13cfd17

+55 -50
+2
arch/ppc64/kernel/iSeries_setup.c
··· 964 ppc_md.calibrate_decr = iSeries_calibrate_decr; 965 ppc_md.progress = iSeries_progress; 966 967 if (get_paca()->lppaca.shared_proc) { 968 ppc_md.idle_loop = iseries_shared_idle; 969 printk(KERN_INFO "Using shared processor idle loop\n");
··· 964 ppc_md.calibrate_decr = iSeries_calibrate_decr; 965 ppc_md.progress = iSeries_progress; 966 967 + /* XXX Implement enable_pmcs for iSeries */ 968 + 969 if (get_paca()->lppaca.shared_proc) { 970 ppc_md.idle_loop = iseries_shared_idle; 971 printk(KERN_INFO "Using shared processor idle loop\n");
+21
arch/ppc64/kernel/pSeries_setup.c
··· 61 #include <asm/plpar_wrappers.h> 62 #include <asm/xics.h> 63 #include <asm/firmware.h> 64 65 #include "i8259.h" 66 #include "mpic.h" ··· 188 " MPIC "); 189 } 190 191 static void __init pSeries_setup_arch(void) 192 { 193 /* Fixup ppc_md depending on the type of interrupt controller */ ··· 261 printk(KERN_INFO "Using default idle loop\n"); 262 ppc_md.idle_loop = default_idle; 263 } 264 } 265 266 static int __init pSeries_init_panel(void)
··· 61 #include <asm/plpar_wrappers.h> 62 #include <asm/xics.h> 63 #include <asm/firmware.h> 64 + #include <asm/pmc.h> 65 66 #include "i8259.h" 67 #include "mpic.h" ··· 187 " MPIC "); 188 } 189 190 + static void pseries_lpar_enable_pmcs(void) 191 + { 192 + unsigned long set, reset; 193 + 194 + power4_enable_pmcs(); 195 + 196 + set = 1UL << 63; 197 + reset = 0; 198 + plpar_hcall_norets(H_PERFMON, set, reset); 199 + 200 + /* instruct hypervisor to maintain PMCs */ 201 + if (firmware_has_feature(FW_FEATURE_SPLPAR)) 202 + get_paca()->lppaca.pmcregs_in_use = 1; 203 + } 204 + 205 static void __init pSeries_setup_arch(void) 206 { 207 /* Fixup ppc_md depending on the type of interrupt controller */ ··· 245 printk(KERN_INFO "Using default idle loop\n"); 246 ppc_md.idle_loop = default_idle; 247 } 248 + 249 + if (systemcfg->platform & PLATFORM_LPAR) 250 + ppc_md.enable_pmcs = pseries_lpar_enable_pmcs; 251 + else 252 + ppc_md.enable_pmcs = power4_enable_pmcs; 253 } 254 255 static int __init pSeries_init_panel(void)
+2
arch/ppc64/kernel/pmac_setup.c
··· 71 #include <asm/of_device.h> 72 #include <asm/lmb.h> 73 #include <asm/smu.h> 74 75 #include "pmac.h" 76 #include "mpic.h" ··· 512 .progress = pmac_progress, 513 .check_legacy_ioport = pmac_check_legacy_ioport, 514 .idle_loop = native_idle, 515 };
··· 71 #include <asm/of_device.h> 72 #include <asm/lmb.h> 73 #include <asm/smu.h> 74 + #include <asm/pmc.h> 75 76 #include "pmac.h" 77 #include "mpic.h" ··· 511 .progress = pmac_progress, 512 .check_legacy_ioport = pmac_check_legacy_ioport, 513 .idle_loop = native_idle, 514 + .enable_pmcs = power4_enable_pmcs, 515 };
+21
arch/ppc64/kernel/pmc.c
··· 65 spin_unlock(&pmc_owner_lock); 66 } 67 EXPORT_SYMBOL_GPL(release_pmc_hardware);
··· 65 spin_unlock(&pmc_owner_lock); 66 } 67 EXPORT_SYMBOL_GPL(release_pmc_hardware); 68 + 69 + void power4_enable_pmcs(void) 70 + { 71 + unsigned long hid0; 72 + 73 + hid0 = mfspr(HID0); 74 + hid0 |= 1UL << (63 - 20); 75 + 76 + /* POWER4 requires the following sequence */ 77 + asm volatile( 78 + "sync\n" 79 + "mtspr %1, %0\n" 80 + "mfspr %0, %1\n" 81 + "mfspr %0, %1\n" 82 + "mfspr %0, %1\n" 83 + "mfspr %0, %1\n" 84 + "mfspr %0, %1\n" 85 + "mfspr %0, %1\n" 86 + "isync" : "=&r" (hid0) : "i" (HID0), "0" (hid0): 87 + "memory"); 88 + }
+4 -50
arch/ppc64/kernel/sysfs.c
··· 101 } 102 __setup("smt-snooze-delay=", setup_smt_snooze_delay); 103 104 /* 105 * Enabling PMCs will slow partition context switch times so we only do 106 * it the first time we write to the PMCs. ··· 112 113 void ppc64_enable_pmcs(void) 114 { 115 - unsigned long hid0; 116 - #ifdef CONFIG_PPC_PSERIES 117 - unsigned long set, reset; 118 - #endif /* CONFIG_PPC_PSERIES */ 119 - 120 /* Only need to enable them once */ 121 if (__get_cpu_var(pmcs_enabled)) 122 return; 123 124 __get_cpu_var(pmcs_enabled) = 1; 125 126 - switch (systemcfg->platform) { 127 - case PLATFORM_PSERIES: 128 - case PLATFORM_POWERMAC: 129 - hid0 = mfspr(HID0); 130 - hid0 |= 1UL << (63 - 20); 131 - 132 - /* POWER4 requires the following sequence */ 133 - asm volatile( 134 - "sync\n" 135 - "mtspr %1, %0\n" 136 - "mfspr %0, %1\n" 137 - "mfspr %0, %1\n" 138 - "mfspr %0, %1\n" 139 - "mfspr %0, %1\n" 140 - "mfspr %0, %1\n" 141 - "mfspr %0, %1\n" 142 - "isync" : "=&r" (hid0) : "i" (HID0), "0" (hid0): 143 - "memory"); 144 - break; 145 - 146 - #ifdef CONFIG_PPC_PSERIES 147 - case PLATFORM_PSERIES_LPAR: 148 - set = 1UL << 63; 149 - reset = 0; 150 - plpar_hcall_norets(H_PERFMON, set, reset); 151 - break; 152 - #endif /* CONFIG_PPC_PSERIES */ 153 - 154 - default: 155 - break; 156 - } 157 - 158 - /* instruct hypervisor to maintain PMCs */ 159 - if (firmware_has_feature(FW_FEATURE_SPLPAR)) 160 - get_paca()->lppaca.pmcregs_in_use = 1; 161 } 162 - 163 - #else 164 - 165 - /* PMC stuff */ 166 - void ppc64_enable_pmcs(void) 167 - { 168 - /* XXX Implement for iseries */ 169 - } 170 - #endif /* CONFIG_PPC_MULTIPLATFORM */ 171 - 172 EXPORT_SYMBOL(ppc64_enable_pmcs); 173 174 /* XXX convert to rusty's on_one_cpu */
··· 101 } 102 __setup("smt-snooze-delay=", setup_smt_snooze_delay); 103 104 + #endif /* CONFIG_PPC_MULTIPLATFORM */ 105 + 106 /* 107 * Enabling PMCs will slow partition context switch times so we only do 108 * it the first time we write to the PMCs. ··· 110 111 void ppc64_enable_pmcs(void) 112 { 113 /* Only need to enable them once */ 114 if (__get_cpu_var(pmcs_enabled)) 115 return; 116 117 __get_cpu_var(pmcs_enabled) = 1; 118 119 + if (ppc_md.enable_pmcs) 120 + ppc_md.enable_pmcs(); 121 } 122 EXPORT_SYMBOL(ppc64_enable_pmcs); 123 124 /* XXX convert to rusty's on_one_cpu */
+3
include/asm-ppc64/machdep.h
··· 140 141 /* Idle loop for this platform, leave empty for default idle loop */ 142 int (*idle_loop)(void); 143 }; 144 145 extern int default_idle(void);
··· 140 141 /* Idle loop for this platform, leave empty for default idle loop */ 142 int (*idle_loop)(void); 143 + 144 + /* Function to enable pmcs for this platform, called once per cpu. */ 145 + void (*enable_pmcs)(void); 146 }; 147 148 extern int default_idle(void);
+2
include/asm-ppc64/pmc.h
··· 26 int reserve_pmc_hardware(perf_irq_t new_perf_irq); 27 void release_pmc_hardware(void); 28 29 #endif /* _PPC64_PMC_H */
··· 26 int reserve_pmc_hardware(perf_irq_t new_perf_irq); 27 void release_pmc_hardware(void); 28 29 + void power4_enable_pmcs(void); 30 + 31 #endif /* _PPC64_PMC_H */