Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[MIPS] IRQ Affinity Support for SMTC on Malta Platform Signed-off-by: Kevin D. Kissell <kevink@mips.com> Signed-off-by: Ralf Baechle <ralf@linux-mips.org>

authored by

Kevin D. Kissell and committed by
Ralf Baechle
f571eff0 bbf25010

+195 -2
+13
arch/mips/Kconfig
··· 1378 1378 impact on interrupt service overhead. Disable it only if you know 1379 1379 what you are doing. 1380 1380 1381 + config MIPS_MT_SMTC_IRQAFF 1382 + bool "Support IRQ affinity API" 1383 + depends on MIPS_MT_SMTC 1384 + default n 1385 + help 1386 + Enables SMP IRQ affinity API (/proc/irq/*/smp_affinity, etc.) 1387 + for SMTC Linux kernel. Requires platform support, of which 1388 + an example can be found in the MIPS kernel i8259 and Malta 1389 + platform code. It is recommended that MIPS_MT_SMTC_INSTANT_REPLAY 1390 + be enabled if MIPS_MT_SMTC_IRQAFF is used. Adds overhead to 1391 + interrupt dispatch, and should be used only if you know what 1392 + you are doing. 1393 + 1381 1394 config MIPS_VPE_LOADER_TOM 1382 1395 bool "Load VPE program into memory hidden from linux" 1383 1396 depends on MIPS_VPE_LOADER
+3
arch/mips/kernel/i8259.c
··· 39 39 .disable = disable_8259A_irq, 40 40 .unmask = enable_8259A_irq, 41 41 .mask_ack = mask_and_ack_8259A, 42 + #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF 43 + .set_affinity = plat_set_irq_affinity, 44 + #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ 42 45 }; 43 46 44 47 /*
+63
arch/mips/kernel/smtc.c
··· 606 606 return setup_irq(irq, new); 607 607 } 608 608 609 + #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF 610 + /* 611 + * Support for IRQ affinity to TCs 612 + */ 613 + 614 + void smtc_set_irq_affinity(unsigned int irq, cpumask_t affinity) 615 + { 616 + /* 617 + * If a "fast path" cache of quickly decodable affinity state 618 + * is maintained, this is where it gets done, on a call up 619 + * from the platform affinity code. 620 + */ 621 + } 622 + 623 + void smtc_forward_irq(unsigned int irq) 624 + { 625 + int target; 626 + 627 + /* 628 + * OK wise guy, now figure out how to get the IRQ 629 + * to be serviced on an authorized "CPU". 630 + * 631 + * Ideally, to handle the situation where an IRQ has multiple 632 + * eligible CPUS, we would maintain state per IRQ that would 633 + * allow a fair distribution of service requests. Since the 634 + * expected use model is any-or-only-one, for simplicity 635 + * and efficiency, we just pick the easiest one to find. 636 + */ 637 + 638 + target = first_cpu(irq_desc[irq].affinity); 639 + 640 + /* 641 + * We depend on the platform code to have correctly processed 642 + * IRQ affinity change requests to ensure that the IRQ affinity 643 + * mask has been purged of bits corresponding to nonexistent and 644 + * offline "CPUs", and to TCs bound to VPEs other than the VPE 645 + * connected to the physical interrupt input for the interrupt 646 + * in question. Otherwise we have a nasty problem with interrupt 647 + * mask management. This is best handled in non-performance-critical 648 + * platform IRQ affinity setting code, to minimize interrupt-time 649 + * checks. 650 + */ 651 + 652 + /* If no one is eligible, service locally */ 653 + if (target >= NR_CPUS) { 654 + do_IRQ_no_affinity(irq); 655 + return; 656 + } 657 + 658 + smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq); 659 + } 660 + 661 + #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ 662 + 609 663 /* 610 664 * IPI model for SMTC is tricky, because interrupts aren't TC-specific. 611 665 * Within a VPE one TC can interrupt another by different approaches. ··· 884 830 break; 885 831 } 886 832 break; 833 + #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF 834 + case IRQ_AFFINITY_IPI: 835 + /* 836 + * Accept a "forwarded" interrupt that was initially 837 + * taken by a TC who doesn't have affinity for the IRQ. 838 + */ 839 + do_IRQ_no_affinity((int)arg_copy); 840 + break; 841 + #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ 887 842 default: 888 843 printk("Impossible SMTC IPI Type 0x%x\n", type_copy); 889 844 break;
+50
arch/mips/mips-boards/malta/malta_smtc.c
··· 88 88 void prom_cpus_done(void) 89 89 { 90 90 } 91 + 92 + #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF 93 + /* 94 + * IRQ affinity hook 95 + */ 96 + 97 + 98 + void plat_set_irq_affinity(unsigned int irq, cpumask_t affinity) 99 + { 100 + cpumask_t tmask = affinity; 101 + int cpu = 0; 102 + void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff); 103 + 104 + /* 105 + * On the legacy Malta development board, all I/O interrupts 106 + * are routed through the 8259 and combined in a single signal 107 + * to the CPU daughterboard, and on the CoreFPGA2/3 34K models, 108 + * that signal is brought to IP2 of both VPEs. To avoid racing 109 + * concurrent interrupt service events, IP2 is enabled only on 110 + * one VPE, by convention VPE0. So long as no bits are ever 111 + * cleared in the affinity mask, there will never be any 112 + * interrupt forwarding. But as soon as a program or operator 113 + * sets affinity for one of the related IRQs, we need to make 114 + * sure that we don't ever try to forward across the VPE boundry, 115 + * at least not until we engineer a system where the interrupt 116 + * _ack() or _end() function can somehow know that it corresponds 117 + * to an interrupt taken on another VPE, and perform the appropriate 118 + * restoration of Status.IM state using MFTR/MTTR instead of the 119 + * normal local behavior. We also ensure that no attempt will 120 + * be made to forward to an offline "CPU". 121 + */ 122 + 123 + for_each_cpu_mask(cpu, affinity) { 124 + if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu)) 125 + cpu_clear(cpu, tmask); 126 + } 127 + irq_desc[irq].affinity = tmask; 128 + 129 + if (cpus_empty(tmask)) 130 + /* 131 + * We could restore a default mask here, but the 132 + * runtime code can anyway deal with the null set 133 + */ 134 + printk(KERN_WARNING 135 + "IRQ affinity leaves no legal CPU for IRQ %d\n", irq); 136 + 137 + /* Do any generic SMTC IRQ affinity setup */ 138 + smtc_set_irq_affinity(irq, tmask); 139 + } 140 + #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
+65 -2
include/asm-mips/irq.h
··· 46 46 47 47 #endif /* CONFIG_MIPS_MT_SMTC */ 48 48 49 + #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF 50 + #include <linux/cpumask.h> 51 + 52 + extern void plat_set_irq_affinity(unsigned int irq, cpumask_t affinity); 53 + extern void smtc_forward_irq(unsigned int irq); 54 + 55 + /* 56 + * IRQ affinity hook invoked at the beginning of interrupt dispatch 57 + * if option is enabled. 58 + * 59 + * Up through Linux 2.6.22 (at least) cpumask operations are very 60 + * inefficient on MIPS. Initial prototypes of SMTC IRQ affinity 61 + * used a "fast path" per-IRQ-descriptor cache of affinity information 62 + * to reduce latency. As there is a project afoot to optimize the 63 + * cpumask implementations, this version is optimistically assuming 64 + * that cpumask.h macro overhead is reasonable during interrupt dispatch. 65 + */ 66 + #define IRQ_AFFINITY_HOOK(irq) \ 67 + do { \ 68 + if (!cpu_isset(smp_processor_id(), irq_desc[irq].affinity)) { \ 69 + smtc_forward_irq(irq); \ 70 + irq_exit(); \ 71 + return; \ 72 + } \ 73 + } while (0) 74 + 75 + #else /* Not doing SMTC affinity */ 76 + 77 + #define IRQ_AFFINITY_HOOK(irq) do { } while (0) 78 + 79 + #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ 80 + 49 81 #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP 50 82 51 83 /* ··· 88 56 */ 89 57 #define __DO_IRQ_SMTC_HOOK(irq) \ 90 58 do { \ 59 + IRQ_AFFINITY_HOOK(irq); \ 91 60 if (irq_hwmask[irq] & 0x0000ff00) \ 92 61 write_c0_tccontext(read_c0_tccontext() & \ 93 - ~(irq_hwmask[irq] & 0x0000ff00)); \ 62 + ~(irq_hwmask[irq] & 0x0000ff00)); \ 94 63 } while (0) 64 + 65 + #define __NO_AFFINITY_IRQ_SMTC_HOOK(irq) \ 66 + do { \ 67 + if (irq_hwmask[irq] & 0x0000ff00) \ 68 + write_c0_tccontext(read_c0_tccontext() & \ 69 + ~(irq_hwmask[irq] & 0x0000ff00)); \ 70 + } while (0) 71 + 95 72 #else 96 73 97 - #define __DO_IRQ_SMTC_HOOK(irq) do { } while (0) 74 + #define __DO_IRQ_SMTC_HOOK(irq) \ 75 + do { \ 76 + IRQ_AFFINITY_HOOK(irq); \ 77 + } while (0) 78 + #define __NO_AFFINITY_IRQ_SMTC_HOOK(irq) do { } while (0) 79 + 98 80 #endif 99 81 100 82 /* ··· 126 80 generic_handle_irq(irq); \ 127 81 irq_exit(); \ 128 82 } while (0) 83 + 84 + #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF 85 + /* 86 + * To avoid inefficient and in some cases pathological re-checking of 87 + * IRQ affinity, we have this variant that skips the affinity check. 88 + */ 89 + 90 + 91 + #define do_IRQ_no_affinity(irq) \ 92 + do { \ 93 + irq_enter(); \ 94 + __NO_AFFINITY_IRQ_SMTC_HOOK(irq); \ 95 + generic_handle_irq(irq); \ 96 + irq_exit(); \ 97 + } while (0) 98 + 99 + #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ 129 100 130 101 extern void arch_init_irq(void); 131 102 extern void spurious_interrupt(void);
+1
include/asm-mips/smtc_ipi.h
··· 34 34 35 35 #define LINUX_SMP_IPI 1 36 36 #define SMTC_CLOCK_TICK 2 37 + #define IRQ_AFFINITY_IPI 3 37 38 38 39 /* 39 40 * A queue of IPI messages