[MIPS] Separate performance counter interrupts

Support for performance counter overflow interrupt that is on a separate
interrupt from the timer.

Signed-off-by: Chris Dearman <chris@mips.com>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>

authored by Chris Dearman and committed by Ralf Baechle ffe9ee47 b72c0526

+136 -47
+6 -6
arch/mips/kernel/smp-mt.c
··· 129 129 130 130 static struct irqaction irq_resched = { 131 131 .handler = ipi_resched_interrupt, 132 - .flags = IRQF_DISABLED, 132 + .flags = IRQF_DISABLED|IRQF_PERCPU, 133 133 .name = "IPI_resched" 134 134 }; 135 135 136 136 static struct irqaction irq_call = { 137 137 .handler = ipi_call_interrupt, 138 - .flags = IRQF_DISABLED, 138 + .flags = IRQF_DISABLED|IRQF_PERCPU, 139 139 .name = "IPI_call" 140 140 }; 141 141 ··· 275 275 setup_irq(cpu_ipi_resched_irq, &irq_resched); 276 276 setup_irq(cpu_ipi_call_irq, &irq_call); 277 277 278 - /* need to mark IPI's as IRQ_PER_CPU */ 279 - irq_desc[cpu_ipi_resched_irq].status |= IRQ_PER_CPU; 280 278 set_irq_handler(cpu_ipi_resched_irq, handle_percpu_irq); 281 - irq_desc[cpu_ipi_call_irq].status |= IRQ_PER_CPU; 282 279 set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq); 283 280 } 284 281 ··· 323 326 324 327 void prom_init_secondary(void) 325 328 { 329 + /* Enable per-cpu interrupts */ 330 + 331 + /* This is Malta specific: IPI,performance and timer inetrrupts */ 326 332 write_c0_status((read_c0_status() & ~ST0_IM ) | 327 - (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7)); 333 + (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP6 | STATUSF_IP7)); 328 334 } 329 335 330 336 void prom_smp_finish(void)
+31 -13
arch/mips/kernel/time.c
··· 199 199 EXPORT_SYMBOL(null_perf_irq); 200 200 EXPORT_SYMBOL(perf_irq); 201 201 202 + /* 203 + * Performance counter IRQ or -1 if shared with timer 204 + */ 205 + int mipsxx_perfcount_irq; 206 + EXPORT_SYMBOL(mipsxx_perfcount_irq); 207 + 208 + /* 209 + * Possibly handle a performance counter interrupt. 210 + * Return true if the timer interrupt should not be checked 211 + */ 212 + static inline int handle_perf_irq (int r2) 213 + { 214 + /* 215 + * The performance counter overflow interrupt may be shared with the 216 + * timer interrupt (mipsxx_perfcount_irq < 0). If it is and a 217 + * performance counter has overflowed (perf_irq() == IRQ_HANDLED) 218 + * and we can't reliably determine if a counter interrupt has also 219 + * happened (!r2) then don't check for a timer interrupt. 220 + */ 221 + return (mipsxx_perfcount_irq < 0) && 222 + perf_irq() == IRQ_HANDLED && 223 + !r2; 224 + } 225 + 202 226 asmlinkage void ll_timer_interrupt(int irq) 203 227 { 204 228 int r2 = cpu_has_mips_r2; ··· 230 206 irq_enter(); 231 207 kstat_this_cpu.irqs[irq]++; 232 208 233 - /* 234 - * Suckage alert: 235 - * Before R2 of the architecture there was no way to see if a 236 - * performance counter interrupt was pending, so we have to run the 237 - * performance counter interrupt handler anyway. 238 - */ 239 - if (!r2 || (read_c0_cause() & (1 << 26))) 240 - if (perf_irq()) 241 - goto out; 209 + if (handle_perf_irq(r2)) 210 + goto out; 242 211 243 - /* we keep interrupt disabled all the time */ 244 - if (!r2 || (read_c0_cause() & (1 << 30))) 245 - timer_interrupt(irq, NULL); 212 + if (r2 && ((read_c0_cause() & (1 << 30)) == 0)) 213 + goto out; 214 + 215 + timer_interrupt(irq, NULL); 246 216 247 217 out: 248 218 irq_exit(); ··· 276 258 277 259 static struct irqaction timer_irqaction = { 278 260 .handler = timer_interrupt, 279 - .flags = IRQF_DISABLED, 261 + .flags = IRQF_DISABLED | IRQF_PERCPU, 280 262 .name = "timer", 281 263 }; 282 264
+94 -26
arch/mips/mips-boards/generic/time.c
··· 53 53 54 54 unsigned long cpu_khz; 55 55 56 - #define CPUCTR_IMASKBIT (0x100 << MIPSCPU_INT_CPUCTR) 57 - 58 56 static int mips_cpu_timer_irq; 57 + extern int mipsxx_perfcount_irq; 59 58 extern void smtc_timer_broadcast(int); 60 59 61 60 static void mips_timer_dispatch(void) 62 61 { 63 62 do_IRQ(mips_cpu_timer_irq); 63 + } 64 + 65 + static void mips_perf_dispatch(void) 66 + { 67 + do_IRQ(mipsxx_perfcount_irq); 64 68 } 65 69 66 70 /* ··· 73 69 extern int null_perf_irq(void); 74 70 75 71 extern int (*perf_irq)(void); 72 + 73 + /* 74 + * Possibly handle a performance counter interrupt. 75 + * Return true if the timer interrupt should not be checked 76 + */ 77 + static inline int handle_perf_irq (int r2) 78 + { 79 + /* 80 + * The performance counter overflow interrupt may be shared with the 81 + * timer interrupt (mipsxx_perfcount_irq < 0). If it is and a 82 + * performance counter has overflowed (perf_irq() == IRQ_HANDLED) 83 + * and we can't reliably determine if a counter interrupt has also 84 + * happened (!r2) then don't check for a timer interrupt. 85 + */ 86 + return (mipsxx_perfcount_irq < 0) && 87 + perf_irq() == IRQ_HANDLED && 88 + !r2; 89 + } 76 90 77 91 irqreturn_t mips_timer_interrupt(int irq, void *dev_id) 78 92 { ··· 114 92 * We could be here due to timer interrupt, 115 93 * perf counter overflow, or both. 116 94 */ 117 - if (read_c0_cause() & (1 << 26)) 118 - perf_irq(); 95 + (void) handle_perf_irq(1); 119 96 120 97 if (read_c0_cause() & (1 << 30)) { 121 98 /* ··· 136 115 #else /* CONFIG_MIPS_MT_SMTC */ 137 116 int r2 = cpu_has_mips_r2; 138 117 118 + if (handle_perf_irq(r2)) 119 + goto out; 120 + 121 + if (r2 && ((read_c0_cause() & (1 << 30)) == 0)) 122 + goto out; 123 + 139 124 if (cpu == 0) { 140 125 /* 141 126 * CPU 0 handles the global timer interrupt job and process 142 127 * accounting resets count/compare registers to trigger next 143 128 * timer int. 144 129 */ 145 - if (!r2 || (read_c0_cause() & (1 << 26))) 146 - if (perf_irq()) 147 - goto out; 148 - 149 - /* we keep interrupt disabled all the time */ 150 - if (!r2 || (read_c0_cause() & (1 << 30))) 151 - timer_interrupt(irq, NULL); 130 + timer_interrupt(irq, NULL); 152 131 } else { 153 132 /* Everyone else needs to reset the timer int here as 154 133 ll_local_timer_interrupt doesn't */ ··· 246 225 mips_scroll_message(); 247 226 } 248 227 228 + irqreturn_t mips_perf_interrupt(int irq, void *dev_id) 229 + { 230 + return perf_irq(); 231 + } 232 + 233 + static struct irqaction perf_irqaction = { 234 + .handler = mips_perf_interrupt, 235 + .flags = IRQF_DISABLED | IRQF_PERCPU, 236 + .name = "performance", 237 + }; 238 + 239 + void __init plat_perf_setup(struct irqaction *irq) 240 + { 241 + int hwint = 0; 242 + mipsxx_perfcount_irq = -1; 243 + 244 + #ifdef MSC01E_INT_BASE 245 + if (cpu_has_veic) { 246 + set_vi_handler (MSC01E_INT_PERFCTR, mips_perf_dispatch); 247 + mipsxx_perfcount_irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR; 248 + } else 249 + #endif 250 + if (cpu_has_mips_r2) { 251 + /* 252 + * Read IntCtl.IPPCI to determine the performance 253 + * counter interrupt 254 + */ 255 + hwint = (read_c0_intctl () >> 26) & 7; 256 + if (hwint != MIPSCPU_INT_CPUCTR) { 257 + if (cpu_has_vint) 258 + set_vi_handler (hwint, mips_perf_dispatch); 259 + mipsxx_perfcount_irq = MIPSCPU_INT_BASE + hwint; 260 + } 261 + } 262 + if (mipsxx_perfcount_irq >= 0) { 263 + #ifdef CONFIG_MIPS_MT_SMTC 264 + setup_irq_smtc(mipsxx_perfcount_irq, irq, 0x100 << hwint); 265 + #else 266 + setup_irq(mipsxx_perfcount_irq, irq); 267 + #endif /* CONFIG_MIPS_MT_SMTC */ 268 + #ifdef CONFIG_SMP 269 + set_irq_handler(mipsxx_perfcount_irq, handle_percpu_irq); 270 + #endif 271 + } 272 + } 273 + 249 274 void __init plat_timer_setup(struct irqaction *irq) 250 275 { 251 - #ifdef MSC01E_INT_BASE 276 + int hwint = 0; 252 277 if (cpu_has_veic) { 253 278 set_vi_handler (MSC01E_INT_CPUCTR, mips_timer_dispatch); 254 279 mips_cpu_timer_irq = MSC01E_INT_BASE + MSC01E_INT_CPUCTR; 255 - } else 256 - #endif 257 - { 258 - if (cpu_has_vint) 259 - set_vi_handler (MIPSCPU_INT_CPUCTR, mips_timer_dispatch); 260 - mips_cpu_timer_irq = MIPSCPU_INT_BASE + MIPSCPU_INT_CPUCTR; 261 280 } 262 - 281 + else { 282 + if (cpu_has_mips_r2) 283 + /* 284 + * Read IntCtl.IPTI to determine the timer interrupt 285 + */ 286 + hwint = (read_c0_intctl () >> 29) & 7; 287 + else 288 + hwint = MIPSCPU_INT_CPUCTR; 289 + if (cpu_has_vint) 290 + set_vi_handler (hwint, mips_timer_dispatch); 291 + mips_cpu_timer_irq = MIPSCPU_INT_BASE + hwint; 292 + } 263 293 264 294 /* we are using the cpu counter for timer interrupts */ 265 295 irq->handler = mips_timer_interrupt; /* we use our own handler */ 266 296 #ifdef CONFIG_MIPS_MT_SMTC 267 - setup_irq_smtc(mips_cpu_timer_irq, irq, CPUCTR_IMASKBIT); 297 + setup_irq_smtc(mips_cpu_timer_irq, irq, 0x100 << hwint); 268 298 #else 269 299 setup_irq(mips_cpu_timer_irq, irq); 270 300 #endif /* CONFIG_MIPS_MT_SMTC */ 271 - 272 301 #ifdef CONFIG_SMP 273 - /* irq_desc(riptor) is a global resource, when the interrupt overlaps 274 - on seperate cpu's the first one tries to handle the second interrupt. 275 - The effect is that the int remains disabled on the second cpu. 276 - Mark the interrupt with IRQ_PER_CPU to avoid any confusion */ 277 - irq_desc[mips_cpu_timer_irq].status |= IRQ_PER_CPU; 278 302 set_irq_handler(mips_cpu_timer_irq, handle_percpu_irq); 279 303 #endif 304 + 305 + plat_perf_setup(&perf_irqaction); 280 306 }
+5 -2
arch/mips/oprofile/op_model_mipsxx.c
··· 177 177 unsigned int counters = op_model_mipsxx_ops.num_counters; 178 178 unsigned int control; 179 179 unsigned int counter; 180 - int handled = 0; 180 + int handled = IRQ_NONE; 181 + 182 + if (cpu_has_mips_r2 && !(read_c0_cause() & (1 << 26))) 183 + return handled; 181 184 182 185 switch (counters) { 183 186 #define HANDLE_COUNTER(n) \ ··· 191 188 (counter & M_COUNTER_OVERFLOW)) { \ 192 189 oprofile_add_sample(get_irq_regs(), n); \ 193 190 w_c0_perfcntr ## n(reg.counter[n]); \ 194 - handled = 1; \ 191 + handled = IRQ_HANDLED; \ 195 192 } 196 193 HANDLE_COUNTER(3) 197 194 HANDLE_COUNTER(2)