Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

MIPS: Add generic SMP IPI support

Use the new generic IPI layer to provide generic SMP IPI support if the irqchip
supports it.

Signed-off-by: Qais Yousef <qais.yousef@imgtec.com>
Acked-by: Ralf Baechle <ralf@linux-mips.org>
Cc: <jason@lakedaemon.net>
Cc: <marc.zyngier@arm.com>
Cc: <jiang.liu@linux.intel.com>
Cc: <linux-mips@linux-mips.org>
Cc: <lisa.parratt@imgtec.com>
Cc: Qais Yousef <qsyousef@gmail.com>
Link: http://lkml.kernel.org/r/1449580830-23652-17-git-send-email-qais.yousef@imgtec.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

authored by

Qais Yousef and committed by
Thomas Gleixner
fbde2d7d 78930f09

+136
+136
arch/mips/kernel/smp.c
··· 33 33 #include <linux/cpu.h> 34 34 #include <linux/err.h> 35 35 #include <linux/ftrace.h> 36 + #include <linux/irqdomain.h> 37 + #include <linux/of.h> 38 + #include <linux/of_irq.h> 36 39 37 40 #include <linux/atomic.h> 38 41 #include <asm/cpu.h> 39 42 #include <asm/processor.h> 40 43 #include <asm/idle.h> 41 44 #include <asm/r4k-timer.h> 45 + #include <asm/mips-cpc.h> 42 46 #include <asm/mmu_context.h> 43 47 #include <asm/time.h> 44 48 #include <asm/setup.h> ··· 82 78 static cpumask_t cpu_core_setup_map; 83 79 84 80 cpumask_t cpu_coherent_mask; 81 + 82 + #ifdef CONFIG_GENERIC_IRQ_IPI 83 + static struct irq_desc *call_desc; 84 + static struct irq_desc *sched_desc; 85 + #endif 85 86 86 87 static inline void set_cpu_sibling_map(int cpu) 87 88 { ··· 153 144 154 145 mp_ops = ops; 155 146 } 147 + 148 + #ifdef CONFIG_GENERIC_IRQ_IPI 149 + void mips_smp_send_ipi_single(int cpu, unsigned int action) 150 + { 151 + mips_smp_send_ipi_mask(cpumask_of(cpu), action); 152 + } 153 + 154 + void mips_smp_send_ipi_mask(const struct cpumask *mask, unsigned int action) 155 + { 156 + unsigned long flags; 157 + unsigned int core; 158 + int cpu; 159 + 160 + local_irq_save(flags); 161 + 162 + switch (action) { 163 + case SMP_CALL_FUNCTION: 164 + __ipi_send_mask(call_desc, mask); 165 + break; 166 + 167 + case SMP_RESCHEDULE_YOURSELF: 168 + __ipi_send_mask(sched_desc, mask); 169 + break; 170 + 171 + default: 172 + BUG(); 173 + } 174 + 175 + if (mips_cpc_present()) { 176 + for_each_cpu(cpu, mask) { 177 + core = cpu_data[cpu].core; 178 + 179 + if (core == current_cpu_data.core) 180 + continue; 181 + 182 + while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) { 183 + mips_cpc_lock_other(core); 184 + write_cpc_co_cmd(CPC_Cx_CMD_PWRUP); 185 + mips_cpc_unlock_other(); 186 + } 187 + } 188 + } 189 + 190 + local_irq_restore(flags); 191 + } 192 + 193 + 194 + static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) 195 + { 196 + scheduler_ipi(); 197 + 198 + return IRQ_HANDLED; 199 + } 200 + 201 + static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) 202 + { 203 + generic_smp_call_function_interrupt(); 204 + 205 + return IRQ_HANDLED; 206 + } 207 + 208 + static struct irqaction irq_resched = { 209 + .handler = ipi_resched_interrupt, 210 + .flags = IRQF_PERCPU, 211 + .name = "IPI resched" 212 + }; 213 + 214 + static struct irqaction irq_call = { 215 + .handler = ipi_call_interrupt, 216 + .flags = IRQF_PERCPU, 217 + .name = "IPI call" 218 + }; 219 + 220 + static __init void smp_ipi_init_one(unsigned int virq, 221 + struct irqaction *action) 222 + { 223 + int ret; 224 + 225 + irq_set_handler(virq, handle_percpu_irq); 226 + ret = setup_irq(virq, action); 227 + BUG_ON(ret); 228 + } 229 + 230 + static int __init mips_smp_ipi_init(void) 231 + { 232 + unsigned int call_virq, sched_virq; 233 + struct irq_domain *ipidomain; 234 + struct device_node *node; 235 + 236 + node = of_irq_find_parent(of_root); 237 + ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI); 238 + 239 + /* 240 + * Some platforms have half DT setup. So if we found irq node but 241 + * didn't find an ipidomain, try to search for one that is not in the 242 + * DT. 243 + */ 244 + if (node && !ipidomain) 245 + ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI); 246 + 247 + BUG_ON(!ipidomain); 248 + 249 + call_virq = irq_reserve_ipi(ipidomain, cpu_possible_mask); 250 + BUG_ON(!call_virq); 251 + 252 + sched_virq = irq_reserve_ipi(ipidomain, cpu_possible_mask); 253 + BUG_ON(!sched_virq); 254 + 255 + if (irq_domain_is_ipi_per_cpu(ipidomain)) { 256 + int cpu; 257 + 258 + for_each_cpu(cpu, cpu_possible_mask) { 259 + smp_ipi_init_one(call_virq + cpu, &irq_call); 260 + smp_ipi_init_one(sched_virq + cpu, &irq_resched); 261 + } 262 + } else { 263 + smp_ipi_init_one(call_virq, &irq_call); 264 + smp_ipi_init_one(sched_virq, &irq_resched); 265 + } 266 + 267 + call_desc = irq_to_desc(call_virq); 268 + sched_desc = irq_to_desc(sched_virq); 269 + 270 + return 0; 271 + } 272 + early_initcall(mips_smp_ipi_init); 273 + #endif 156 274 157 275 /* 158 276 * First C code run on the secondary CPUs after being started up by