Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Fix initialization of CMCI/CMCP interrupts

Back 2010 during a revamp of the irq code some initializations
were moved from ia64_mca_init() to ia64_mca_late_init() in

commit c75f2aa13f5b268aba369b5dc566088b5194377c
Cannot use register_percpu_irq() from ia64_mca_init()

But this was hideously wrong. First of all these initializations
are now down far too late. Specifically after all the other cpus
have been brought up and initialized their own CMC vectors from
smp_callin(). Also ia64_mca_late_init() may be called from any cpu
so the line:
ia64_mca_cmc_vector_setup(); /* Setup vector on BSP */
is generally not executed on the BSP, and so the CMC vector isn't
setup at all on that processor.

Make use of the arch_early_irq_init() hook to get this code executed
at just the right moment: not too early, not too late.

Reported-by: Fred Hartnett <fred.hartnett@hp.com>
Tested-by: Fred Hartnett <fred.hartnett@hp.com>
Cc: stable@kernel.org # v2.6.37+
Signed-off-by: Tony Luck <tony.luck@intel.com>

Tony Luck d303e9e9 96edc754

+35 -15
+1
arch/ia64/include/asm/mca.h
··· 143 143 extern int cpe_vector; 144 144 extern int ia64_cpe_irq; 145 145 extern void ia64_mca_init(void); 146 + extern void ia64_mca_irq_init(void); 146 147 extern void ia64_mca_cpu_init(void *); 147 148 extern void ia64_os_mca_dispatch(void); 148 149 extern void ia64_os_mca_dispatch_end(void);
+8
arch/ia64/kernel/irq.c
··· 23 23 #include <linux/interrupt.h> 24 24 #include <linux/kernel_stat.h> 25 25 26 + #include <asm/mca.h> 27 + 26 28 /* 27 29 * 'what should we do if we get a hw irq event on an illegal vector'. 28 30 * each architecture has to answer this themselves. ··· 84 82 } 85 83 86 84 #endif /* CONFIG_SMP */ 85 + 86 + int __init arch_early_irq_init(void) 87 + { 88 + ia64_mca_irq_init(); 89 + return 0; 90 + } 87 91 88 92 #ifdef CONFIG_HOTPLUG_CPU 89 93 unsigned int vectors_in_migration[NR_IRQS];
+26 -15
arch/ia64/kernel/mca.c
··· 2074 2074 printk(KERN_INFO "MCA related initialization done\n"); 2075 2075 } 2076 2076 2077 - /* 2078 - * ia64_mca_late_init 2079 - * 2080 - * Opportunity to setup things that require initialization later 2081 - * than ia64_mca_init. Setup a timer to poll for CPEs if the 2082 - * platform doesn't support an interrupt driven mechanism. 2083 - * 2084 - * Inputs : None 2085 - * Outputs : Status 2086 - */ 2087 - static int __init 2088 - ia64_mca_late_init(void) 2089 - { 2090 - if (!mca_init) 2091 - return 0; 2092 2077 2078 + /* 2079 + * These pieces cannot be done in ia64_mca_init() because it is called before 2080 + * early_irq_init() which would wipe out our percpu irq registrations. But we 2081 + * cannot leave them until ia64_mca_late_init() because by then all the other 2082 + * processors have been brought online and have set their own CMC vectors to 2083 + * point at a non-existant action. Called from arch_early_irq_init(). 2084 + */ 2085 + void __init ia64_mca_irq_init(void) 2086 + { 2093 2087 /* 2094 2088 * Configure the CMCI/P vector and handler. Interrupts for CMC are 2095 2089 * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c). ··· 2102 2108 /* Setup the CPEI/P handler */ 2103 2109 register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction); 2104 2110 #endif 2111 + } 2112 + 2113 + /* 2114 + * ia64_mca_late_init 2115 + * 2116 + * Opportunity to setup things that require initialization later 2117 + * than ia64_mca_init. Setup a timer to poll for CPEs if the 2118 + * platform doesn't support an interrupt driven mechanism. 2119 + * 2120 + * Inputs : None 2121 + * Outputs : Status 2122 + */ 2123 + static int __init 2124 + ia64_mca_late_init(void) 2125 + { 2126 + if (!mca_init) 2127 + return 0; 2105 2128 2106 2129 register_hotcpu_notifier(&mca_cpu_notifier); 2107 2130