Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc: Consolidate ipi message mux and demux

Consolidate the mux and demux of ipi messages into smp.c and call
a new smp_ops callback to actually trigger the ipi.

The powerpc architecture code is optimised for having 4 distinct
ipi triggers, which are mapped to 4 distinct messages (ipi many, ipi
single, scheduler ipi, and enter debugger). However, several interrupt
controllers only provide a single software triggered interrupt that
can be delivered to each cpu. To resolve this limitation, each smp_ops
implementation created a per-cpu variable that is manipulated with atomic
bitops. Since these lines will be contended they are optimialy marked as
shared_aligned and take a full cache line for each cpu. Distro kernels
may have 2 or 3 of these in their config, each taking per-cpu space
even though at most one will be in use.

This consolidation removes smp_message_recv and replaces the single call
actions cases with direct calls from the common message recognition loop.
The complicated debugger ipi case with its muxed crash handling code is
moved to debug_ipi_action which is now called from the demux code (instead
of the multi-message action calling smp_message_recv).

I put a call to reschedule_action to increase the likelyhood of correctly
merging the anticipated scheduler_ipi() hook coming from the scheduler
tree; that single required call can be inlined later.

The actual message decode is a copy of the old pseries xics code with its
memory barriers and cache line spacing, augmented with a per-cpu unsigned
long based on the book-e doorbell code. The optional data is set via a
callback from the implementation and is passed to the new cause-ipi hook
along with the logical cpu number. While currently only the doorbell
implemntation uses this data it should be almost zero cost to retrieve and
pass it -- it adds a single register load for the argument from the same
cache line to which we just completed a store and the register is dead
on return from the call. I extended the data element from unsigned int
to unsigned long in case some other code wanted to associate a pointer.

The doorbell check_self is replaced by a call to smp_muxed_ipi_resend,
conditioned on the CPU_DBELL feature. The ifdef guard could be relaxed
to CONFIG_SMP but I left it with BOOKE for now.

Also, the doorbell interrupt vector for book-e was not calling irq_enter
and irq_exit, which throws off cpu accounting and causes code to not
realize it is running in interrupt context. Add the missing calls.

Signed-off-by: Milton Miller <miltonm@bga.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>

authored by

Milton Miller and committed by
Benjamin Herrenschmidt
23d72bfd 17f9c8a7

+126 -176
+1 -2
arch/powerpc/include/asm/dbell.h
··· 27 27 PPC_G_DBELL_MC = 4, /* guest mcheck doorbell */ 28 28 }; 29 29 30 - extern void doorbell_message_pass(int cpu, int msg); 30 + extern void doorbell_cause_ipi(int cpu, unsigned long data); 31 31 extern void doorbell_exception(struct pt_regs *regs); 32 - extern void doorbell_check_self(void); 33 32 extern void doorbell_setup_this_cpu(void); 34 33 35 34 static inline void ppc_msgsnd(enum ppc_dbell type, u32 flags, u32 tag)
+11 -5
arch/powerpc/include/asm/smp.h
··· 20 20 #include <linux/threads.h> 21 21 #include <linux/cpumask.h> 22 22 #include <linux/kernel.h> 23 + #include <linux/irqreturn.h> 23 24 24 25 #ifndef __ASSEMBLY__ 25 26 ··· 38 37 39 38 struct smp_ops_t { 40 39 void (*message_pass)(int cpu, int msg); 40 + void (*cause_ipi)(int cpu, unsigned long data); 41 41 int (*probe)(void); 42 42 int (*kick_cpu)(int nr); 43 43 void (*setup_cpu)(int nr); ··· 51 49 }; 52 50 53 51 extern void smp_send_debugger_break(void); 54 - extern void smp_message_recv(int); 55 52 extern void start_secondary_resume(void); 56 53 extern void __devinit smp_generic_give_timebase(void); 57 54 extern void __devinit smp_generic_take_timebase(void); ··· 110 109 #define PPC_MSG_CALL_FUNC_SINGLE 2 111 110 #define PPC_MSG_DEBUGGER_BREAK 3 112 111 113 - /* 114 - * irq controllers that have dedicated ipis per message and don't 115 - * need additional code in the action handler may use this 116 - */ 112 + /* for irq controllers that have dedicated ipis per message (4) */ 117 113 extern int smp_request_message_ipi(int virq, int message); 118 114 extern const char *smp_ipi_name[]; 115 + 116 + /* for irq controllers with only a single ipi */ 117 + extern void smp_muxed_ipi_set_data(int cpu, unsigned long data); 118 + extern void smp_muxed_ipi_message_pass(int cpu, int msg); 119 + extern void smp_muxed_ipi_resend(void); 120 + extern irqreturn_t smp_ipi_demux(void); 119 121 120 122 void smp_init_iSeries(void); 121 123 void smp_init_pSeries(void); ··· 188 184 extern unsigned long __secondary_hold_spinloop; 189 185 extern unsigned long __secondary_hold_acknowledge; 190 186 extern char __secondary_hold; 187 + 188 + extern irqreturn_t debug_ipi_action(int irq, void *data); 191 189 192 190 #endif /* __ASSEMBLY__ */ 193 191
+1 -1
arch/powerpc/include/asm/xics.h
··· 40 40 void (*teardown_cpu)(void); 41 41 void (*flush_ipi)(void); 42 42 #ifdef CONFIG_SMP 43 - void (*message_pass)(int cpu, int msg); 43 + void (*cause_ipi)(int cpu, unsigned long data); 44 44 irq_handler_t ipi_action; 45 45 #endif 46 46 };
+8 -38
arch/powerpc/kernel/dbell.c
··· 13 13 #include <linux/kernel.h> 14 14 #include <linux/smp.h> 15 15 #include <linux/threads.h> 16 - #include <linux/percpu.h> 16 + #include <linux/hardirq.h> 17 17 18 18 #include <asm/dbell.h> 19 19 #include <asm/irq_regs.h> 20 20 21 21 #ifdef CONFIG_SMP 22 - struct doorbell_cpu_info { 23 - unsigned long messages; /* current messages bits */ 24 - unsigned int tag; /* tag value */ 25 - }; 26 - 27 - static DEFINE_PER_CPU(struct doorbell_cpu_info, doorbell_cpu_info); 28 - 29 22 void doorbell_setup_this_cpu(void) 30 23 { 31 - struct doorbell_cpu_info *info = &__get_cpu_var(doorbell_cpu_info); 24 + unsigned long tag = mfspr(SPRN_PIR) & 0x3fff; 32 25 33 - info->messages = 0; 34 - info->tag = mfspr(SPRN_PIR) & 0x3fff; 26 + smp_muxed_ipi_set_data(smp_processor_id(), tag); 35 27 } 36 28 37 - void doorbell_message_pass(int cpu, int msg) 29 + void doorbell_cause_ipi(int cpu, unsigned long data) 38 30 { 39 - struct doorbell_cpu_info *info; 40 - 41 - info = &per_cpu(doorbell_cpu_info, cpu); 42 - set_bit(msg, &info->messages); 43 - ppc_msgsnd(PPC_DBELL, 0, info->tag); 31 + ppc_msgsnd(PPC_DBELL, 0, data); 44 32 } 45 33 46 34 void doorbell_exception(struct pt_regs *regs) 47 35 { 48 36 struct pt_regs *old_regs = set_irq_regs(regs); 49 - struct doorbell_cpu_info *info = &__get_cpu_var(doorbell_cpu_info); 50 - int msg; 51 37 52 - /* Warning: regs can be NULL when called from irq enable */ 38 + irq_enter(); 53 39 54 - if (!info->messages || (num_online_cpus() < 2)) 55 - goto out; 40 + smp_ipi_demux(); 56 41 57 - for (msg = 0; msg < 4; msg++) 58 - if (test_and_clear_bit(msg, &info->messages)) 59 - smp_message_recv(msg); 60 - 61 - out: 42 + irq_exit(); 62 43 set_irq_regs(old_regs); 63 44 } 64 - 65 - void doorbell_check_self(void) 66 - { 67 - struct doorbell_cpu_info *info = &__get_cpu_var(doorbell_cpu_info); 68 - 69 - if (!info->messages) 70 - return; 71 - 72 - ppc_msgsnd(PPC_DBELL, 0, info->tag); 73 - } 74 - 75 45 #else /* CONFIG_SMP */ 76 46 void doorbell_exception(struct pt_regs *regs) 77 47 {
+2 -2
arch/powerpc/kernel/irq.c
··· 66 66 #include <asm/ptrace.h> 67 67 #include <asm/machdep.h> 68 68 #include <asm/udbg.h> 69 - #include <asm/dbell.h> 70 69 #include <asm/smp.h> 71 70 72 71 #ifdef CONFIG_PPC64 ··· 159 160 160 161 #if defined(CONFIG_BOOKE) && defined(CONFIG_SMP) 161 162 /* Check for pending doorbell interrupts and resend to ourself */ 162 - doorbell_check_self(); 163 + if (cpu_has_feature(CPU_FTR_DBELL)) 164 + smp_muxed_ipi_resend(); 163 165 #endif 164 166 165 167 /*
+63 -31
arch/powerpc/kernel/smp.c
··· 111 111 } 112 112 #endif 113 113 114 - void smp_message_recv(int msg) 115 - { 116 - switch(msg) { 117 - case PPC_MSG_CALL_FUNCTION: 118 - generic_smp_call_function_interrupt(); 119 - break; 120 - case PPC_MSG_RESCHEDULE: 121 - /* we notice need_resched on exit */ 122 - break; 123 - case PPC_MSG_CALL_FUNC_SINGLE: 124 - generic_smp_call_function_single_interrupt(); 125 - break; 126 - case PPC_MSG_DEBUGGER_BREAK: 127 - if (crash_ipi_function_ptr) { 128 - crash_ipi_function_ptr(get_irq_regs()); 129 - break; 130 - } 131 - #ifdef CONFIG_DEBUGGER 132 - debugger_ipi(get_irq_regs()); 133 - break; 134 - #endif /* CONFIG_DEBUGGER */ 135 - /* FALLTHROUGH */ 136 - default: 137 - printk("SMP %d: smp_message_recv(): unknown msg %d\n", 138 - smp_processor_id(), msg); 139 - break; 140 - } 141 - } 142 - 143 114 static irqreturn_t call_function_action(int irq, void *data) 144 115 { 145 116 generic_smp_call_function_interrupt(); ··· 129 158 return IRQ_HANDLED; 130 159 } 131 160 132 - static irqreturn_t debug_ipi_action(int irq, void *data) 161 + irqreturn_t debug_ipi_action(int irq, void *data) 133 162 { 134 - smp_message_recv(PPC_MSG_DEBUGGER_BREAK); 163 + if (crash_ipi_function_ptr) { 164 + crash_ipi_function_ptr(get_irq_regs()); 165 + return IRQ_HANDLED; 166 + } 167 + 168 + #ifdef CONFIG_DEBUGGER 169 + debugger_ipi(get_irq_regs()); 170 + #endif /* CONFIG_DEBUGGER */ 171 + 135 172 return IRQ_HANDLED; 136 173 } 137 174 ··· 176 197 virq, smp_ipi_name[msg], err); 177 198 178 199 return err; 200 + } 201 + 202 + struct cpu_messages { 203 + unsigned long messages; /* current messages bits */ 204 + unsigned long data; /* data for cause ipi */ 205 + }; 206 + static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message); 207 + 208 + void smp_muxed_ipi_set_data(int cpu, unsigned long data) 209 + { 210 + struct cpu_messages *info = &per_cpu(ipi_message, cpu); 211 + 212 + info->data = data; 213 + } 214 + 215 + void smp_muxed_ipi_message_pass(int cpu, int msg) 216 + { 217 + struct cpu_messages *info = &per_cpu(ipi_message, cpu); 218 + unsigned long *tgt = &info->messages; 219 + 220 + set_bit(msg, tgt); 221 + mb(); 222 + smp_ops->cause_ipi(cpu, info->data); 223 + } 224 + 225 + void smp_muxed_ipi_resend(void) 226 + { 227 + struct cpu_messages *info = &__get_cpu_var(ipi_message); 228 + unsigned long *tgt = &info->messages; 229 + 230 + if (*tgt) 231 + smp_ops->cause_ipi(smp_processor_id(), info->data); 232 + } 233 + 234 + irqreturn_t smp_ipi_demux(void) 235 + { 236 + struct cpu_messages *info = &__get_cpu_var(ipi_message); 237 + unsigned long *tgt = &info->messages; 238 + 239 + mb(); /* order any irq clear */ 240 + while (*tgt) { 241 + if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION, tgt)) 242 + generic_smp_call_function_interrupt(); 243 + if (test_and_clear_bit(PPC_MSG_RESCHEDULE, tgt)) 244 + reschedule_action(0, NULL); /* upcoming sched hook */ 245 + if (test_and_clear_bit(PPC_MSG_CALL_FUNC_SINGLE, tgt)) 246 + generic_smp_call_function_single_interrupt(); 247 + #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) 248 + if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK, tgt)) 249 + debug_ipi_action(0, NULL); 250 + #endif 251 + } 252 + return IRQ_HANDLED; 179 253 } 180 254 181 255 void smp_send_reschedule(int cpu)
+4 -2
arch/powerpc/platforms/85xx/smp.c
··· 235 235 smp_85xx_ops.message_pass = smp_mpic_message_pass; 236 236 } 237 237 238 - if (cpu_has_feature(CPU_FTR_DBELL)) 239 - smp_85xx_ops.message_pass = doorbell_message_pass; 238 + if (cpu_has_feature(CPU_FTR_DBELL)) { 239 + smp_85xx_ops.message_pass = smp_muxed_ipi_message_pass; 240 + smp_85xx_ops.cause_ipi = doorbell_cause_ipi; 241 + } 240 242 241 243 BUG_ON(!smp_85xx_ops.message_pass); 242 244
+14 -2
arch/powerpc/platforms/cell/interrupt.c
··· 196 196 { 197 197 int ipi = (int)(long)dev_id; 198 198 199 - smp_message_recv(ipi); 200 - 199 + switch(ipi) { 200 + case PPC_MSG_CALL_FUNCTION: 201 + generic_smp_call_function_interrupt(); 202 + break; 203 + case PPC_MSG_RESCHEDULE: 204 + /* Upcoming sched hook */ 205 + break; 206 + case PPC_MSG_CALL_FUNC_SINGLE: 207 + generic_smp_call_function_single_interrupt(); 208 + break; 209 + case PPC_MSG_DEBUGGER_BREAK: 210 + debug_ipi_action(0, NULL); 211 + break; 212 + } 201 213 return IRQ_HANDLED; 202 214 } 203 215 static void iic_request_ipi(int ipi, const char *name)
+1 -2
arch/powerpc/platforms/iseries/irq.c
··· 42 42 #include "irq.h" 43 43 #include "pci.h" 44 44 #include "call_pci.h" 45 - #include "smp.h" 46 45 47 46 #ifdef CONFIG_PCI 48 47 ··· 315 316 #ifdef CONFIG_SMP 316 317 if (get_lppaca()->int_dword.fields.ipi_cnt) { 317 318 get_lppaca()->int_dword.fields.ipi_cnt = 0; 318 - iSeries_smp_message_recv(); 319 + smp_ipi_demux(); 319 320 } 320 321 #endif /* CONFIG_SMP */ 321 322 if (hvlpevent_is_pending())
+3 -20
arch/powerpc/platforms/iseries/smp.c
··· 42 42 #include <asm/cputable.h> 43 43 #include <asm/system.h> 44 44 45 - #include "smp.h" 46 - 47 - static unsigned long iSeries_smp_message[NR_CPUS]; 48 - 49 - void iSeries_smp_message_recv(void) 45 + static void smp_iSeries_cause_ipi(int cpu, unsigned long data) 50 46 { 51 - int cpu = smp_processor_id(); 52 - int msg; 53 - 54 - if (num_online_cpus() < 2) 55 - return; 56 - 57 - for (msg = 0; msg < 4; msg++) 58 - if (test_and_clear_bit(msg, &iSeries_smp_message[cpu])) 59 - smp_message_recv(msg); 60 - } 61 - 62 - static void smp_iSeries_message_pass(int cpu, int msg) 63 - { 64 - set_bit(msg, &iSeries_smp_message[cpu]); 65 47 HvCall_sendIPI(&(paca[cpu])); 66 48 } 67 49 ··· 75 93 } 76 94 77 95 static struct smp_ops_t iSeries_smp_ops = { 78 - .message_pass = smp_iSeries_message_pass, 96 + .message_pass = smp_muxed_ipi_message_pass, 97 + .cause_ipi = smp_iSeries_cause_ipi, 79 98 .probe = smp_iSeries_probe, 80 99 .kick_cpu = smp_iSeries_kick_cpu, 81 100 .setup_cpu = smp_iSeries_setup_cpu,
-6
arch/powerpc/platforms/iseries/smp.h
··· 1 - #ifndef _PLATFORMS_ISERIES_SMP_H 2 - #define _PLATFORMS_ISERIES_SMP_H 3 - 4 - extern void iSeries_smp_message_recv(void); 5 - 6 - #endif /* _PLATFORMS_ISERIES_SMP_H */
+6 -21
arch/powerpc/platforms/powermac/smp.c
··· 156 156 /* 157 157 * On powersurge (old SMP powermac architecture) we don't have 158 158 * separate IPIs for separate messages like openpic does. Instead 159 - * we have a bitmap for each processor, where a 1 bit means that 160 - * the corresponding message is pending for that processor. 161 - * Ideally each cpu's entry would be in a different cache line. 159 + * use the generic demux helpers 162 160 * -- paulus. 163 161 */ 164 - static unsigned long psurge_smp_message[NR_CPUS]; 165 - 166 162 void psurge_smp_message_recv(void) 167 163 { 168 - int cpu = smp_processor_id(); 169 - int msg; 170 - 171 - /* clear interrupt */ 172 - psurge_clr_ipi(cpu); 173 - 174 - if (num_online_cpus() < 2) 175 - return; 176 - 177 - /* make sure there is a message there */ 178 - for (msg = 0; msg < 4; msg++) 179 - if (test_and_clear_bit(msg, &psurge_smp_message[cpu])) 180 - smp_message_recv(msg); 164 + psurge_clr_ipi(smp_processor_id()); 165 + smp_ipi_demux(); 181 166 } 182 167 183 168 irqreturn_t psurge_primary_intr(int irq, void *d) ··· 171 186 return IRQ_HANDLED; 172 187 } 173 188 174 - static void smp_psurge_message_pass(int cpu, int msg) 189 + static void smp_psurge_cause_ipi(int cpu, unsigned long data) 175 190 { 176 - set_bit(msg, &psurge_smp_message[cpu]); 177 191 psurge_set_ipi(cpu); 178 192 } 179 193 ··· 412 428 413 429 /* PowerSurge-style Macs */ 414 430 struct smp_ops_t psurge_smp_ops = { 415 - .message_pass = smp_psurge_message_pass, 431 + .message_pass = smp_muxed_ipi_message_pass, 432 + .cause_ipi = smp_psurge_cause_ipi, 416 433 .probe = smp_psurge_probe, 417 434 .kick_cpu = smp_psurge_kick_cpu, 418 435 .setup_cpu = smp_psurge_setup_cpu,
+2 -1
arch/powerpc/platforms/pseries/smp.c
··· 207 207 }; 208 208 209 209 static struct smp_ops_t pSeries_xics_smp_ops = { 210 - .message_pass = NULL, /* Filled at runtime by xics_smp_probe() */ 210 + .message_pass = smp_muxed_ipi_message_pass, 211 + .cause_ipi = NULL, /* Filled at runtime by xics_smp_probe() */ 211 212 .probe = xics_smp_probe, 212 213 .kick_cpu = smp_pSeries_kick_cpu, 213 214 .setup_cpu = smp_xics_setup_cpu,
+2 -1
arch/powerpc/platforms/wsp/smp.c
··· 75 75 } 76 76 77 77 static struct smp_ops_t a2_smp_ops = { 78 - .message_pass = doorbell_message_pass, 78 + .message_pass = smp_muxed_ipi_message_pass, 79 + .cause_ipi = doorbell_cause_ipi, 79 80 .probe = smp_a2_probe, 80 81 .kick_cpu = smp_a2_kick_cpu, 81 82 .setup_cpu = smp_a2_setup_cpu,
+3 -7
arch/powerpc/sysdev/xics/icp-hv.c
··· 118 118 119 119 #ifdef CONFIG_SMP 120 120 121 - static void icp_hv_message_pass(int cpu, int msg) 121 + static void icp_hv_cause_ipi(int cpu, unsigned long data) 122 122 { 123 - unsigned long *tgt = &per_cpu(xics_ipi_message, cpu); 124 - 125 - set_bit(msg, tgt); 126 - mb(); 127 123 icp_hv_set_qirr(cpu, IPI_PRIORITY); 128 124 } 129 125 ··· 129 133 130 134 icp_hv_set_qirr(cpu, 0xff); 131 135 132 - return xics_ipi_dispatch(cpu); 136 + return smp_ipi_demux(); 133 137 } 134 138 135 139 #endif /* CONFIG_SMP */ ··· 142 146 .flush_ipi = icp_hv_flush_ipi, 143 147 #ifdef CONFIG_SMP 144 148 .ipi_action = icp_hv_ipi_action, 145 - .message_pass = icp_hv_message_pass, 149 + .cause_ipi = icp_hv_cause_ipi, 146 150 #endif 147 151 }; 148 152
+3 -7
arch/powerpc/sysdev/xics/icp-native.c
··· 134 134 135 135 #ifdef CONFIG_SMP 136 136 137 - static void icp_native_message_pass(int cpu, int msg) 137 + static void icp_native_cause_ipi(int cpu, unsigned long data) 138 138 { 139 - unsigned long *tgt = &per_cpu(xics_ipi_message, cpu); 140 - 141 - set_bit(msg, tgt); 142 - mb(); 143 139 icp_native_set_qirr(cpu, IPI_PRIORITY); 144 140 } 145 141 ··· 145 149 146 150 icp_native_set_qirr(cpu, 0xff); 147 151 148 - return xics_ipi_dispatch(cpu); 152 + return smp_ipi_demux(); 149 153 } 150 154 151 155 #endif /* CONFIG_SMP */ ··· 263 267 .flush_ipi = icp_native_flush_ipi, 264 268 #ifdef CONFIG_SMP 265 269 .ipi_action = icp_native_ipi_action, 266 - .message_pass = icp_native_message_pass, 270 + .cause_ipi = icp_native_cause_ipi, 267 271 #endif 268 272 }; 269 273
+2 -28
arch/powerpc/sysdev/xics/xics-common.c
··· 126 126 127 127 #ifdef CONFIG_SMP 128 128 129 - DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, xics_ipi_message); 130 - 131 - irqreturn_t xics_ipi_dispatch(int cpu) 132 - { 133 - unsigned long *tgt = &per_cpu(xics_ipi_message, cpu); 134 - 135 - mb(); /* order mmio clearing qirr */ 136 - while (*tgt) { 137 - if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION, tgt)) { 138 - smp_message_recv(PPC_MSG_CALL_FUNCTION); 139 - } 140 - if (test_and_clear_bit(PPC_MSG_RESCHEDULE, tgt)) { 141 - smp_message_recv(PPC_MSG_RESCHEDULE); 142 - } 143 - if (test_and_clear_bit(PPC_MSG_CALL_FUNC_SINGLE, tgt)) { 144 - smp_message_recv(PPC_MSG_CALL_FUNC_SINGLE); 145 - } 146 - #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) 147 - if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK, tgt)) { 148 - smp_message_recv(PPC_MSG_DEBUGGER_BREAK); 149 - } 150 - #endif 151 - } 152 - return IRQ_HANDLED; 153 - } 154 - 155 129 static void xics_request_ipi(void) 156 130 { 157 131 unsigned int ipi; ··· 144 170 145 171 int __init xics_smp_probe(void) 146 172 { 147 - /* Setup message_pass callback based on which ICP is used */ 148 - smp_ops->message_pass = icp_ops->message_pass; 173 + /* Setup cause_ipi callback based on which ICP is used */ 174 + smp_ops->cause_ipi = icp_ops->cause_ipi; 149 175 150 176 /* Register all the IPIs */ 151 177 xics_request_ipi();